path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
tensorflow_privacy/privacy/membership_inference_attack/codelab.ipynb | ###Markdown
Copyright 2020 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Assess privacy risks with TensorFlow Privacy Membership Inference Attacks Run in Google Colab View source on GitHub OverviewIn this codelab we'll train a simple image classification model on the CIFAR10 dataset, and then use the "membership inference attack" against this model to assess if the attacker is able to "guess" whether a particular sample was present in the training set. SetupFirst, set this notebook's runtime to use a GPU, under Runtime > Change runtime type > Hardware accelerator. Then, begin importing the necessary libraries.
###Code
#@title Import statements.
import numpy as np
from typing import Tuple, Text
from scipy import special
import tensorflow as tf
import tensorflow_datasets as tfds
# Set verbosity.
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
from warnings import simplefilter
from sklearn.exceptions import ConvergenceWarning
simplefilter(action="ignore", category=ConvergenceWarning)
simplefilter(action="ignore", category=FutureWarning)
###Output
_____no_output_____
###Markdown
Install TensorFlow Privacy.
###Code
!pip3 install git+https://github.com/tensorflow/privacy
from tensorflow_privacy.privacy.membership_inference_attack import membership_inference_attack as mia
###Output
_____no_output_____
###Markdown
Train a simple model on CIFAR10 with Keras.
###Code
dataset = 'cifar10'
num_classes = 10
num_conv = 3
activation = 'relu'
optimizer = 'adam'
lr = 0.02
momentum = 0.9
batch_size = 250
epochs = 100 # Privacy risks are especially visible with lots of epochs.
def small_cnn(input_shape: Tuple[int],
num_classes: int,
num_conv: int,
activation: Text = 'relu') -> tf.keras.models.Sequential:
"""Setup a small CNN for image classification.
Args:
input_shape: Integer tuple for the shape of the images.
num_classes: Number of prediction classes.
num_conv: Number of convolutional layers.
activation: The activation function to use for conv and dense layers.
Returns:
The Keras model.
"""
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Input(shape=input_shape))
# Conv layers
for _ in range(num_conv):
model.add(tf.keras.layers.Conv2D(32, (3, 3), activation=activation))
model.add(tf.keras.layers.MaxPooling2D())
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(64, activation=activation))
model.add(tf.keras.layers.Dense(num_classes))
return model
print('Loading the dataset.')
train_ds = tfds.as_numpy(
tfds.load(dataset, split=tfds.Split.TRAIN, batch_size=-1))
test_ds = tfds.as_numpy(
tfds.load(dataset, split=tfds.Split.TEST, batch_size=-1))
x_train = train_ds['image'].astype('float32') / 255.
y_train_indices = train_ds['label'][:, np.newaxis]
x_test = test_ds['image'].astype('float32') / 255.
y_test_indices = test_ds['label'][:, np.newaxis]
# Convert class vectors to binary class matrices.
y_train = tf.keras.utils.to_categorical(y_train_indices, num_classes)
y_test = tf.keras.utils.to_categorical(y_test_indices, num_classes)
input_shape = x_train.shape[1:]
model = small_cnn(
input_shape, num_classes, num_conv=num_conv, activation=activation)
print('Optimizer ', optimizer)
print('learning rate %f', lr)
optimizer = tf.keras.optimizers.SGD(lr=lr, momentum=momentum)
loss = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
model.compile(loss=loss, optimizer=optimizer, metrics=['accuracy'])
model.summary()
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test),
shuffle=True)
print('Finished training.')
#@title Calculate logits, probabilities and loss values for training and test sets.
#@markdown We will use these values later in the membership inference attack to
#@markdown separate training and test samples.
print('Predict on train...')
logits_train = model.predict(x_train, batch_size=batch_size)
print('Predict on test...')
logits_test = model.predict(x_test, batch_size=batch_size)
print('Apply softmax to get probabilities from logits...')
prob_train = special.softmax(logits_train)
prob_test = special.softmax(logits_test)
print('Compute losses...')
cce = tf.keras.backend.categorical_crossentropy
constant = tf.keras.backend.constant
loss_train = cce(constant(y_train), constant(prob_train), from_logits=False).numpy()
loss_test = cce(constant(y_test), constant(prob_test), from_logits=False).numpy()
###Output
_____no_output_____
###Markdown
Run membership inference attacks.
###Code
#@markdown We will now execute membership inference attack against the
#@markdown previously trained CIFAR10 model. This will generate a number of
#@markdown scores (most notably, attacker advantage and AUC for the membership
#@markdown inference classifier). An AUC of close to 0.5 means that the attack
#@markdown isn't able to identify training samples, which means that the model
#@markdown doesn't have privacy issues according to this test. Higher values,
#@markdown on the contrary, indicate potential privacy issues.
labels_train = np.argmax(y_train, axis=1)
labels_test = np.argmax(y_test, axis=1)
results_without_classifiers = mia.run_all_attacks(
loss_train,
loss_test,
logits_train,
logits_test,
labels_train,
labels_test,
attack_classifiers=[],
)
print(results_without_classifiers)
# Note: This will take a while, since it also trains ML models to
# separate train/test examples. If it's taking too looking, use
# the `run_all_attacks` function instead.
attack_result_summary = mia.run_all_attacks_and_create_summary(
loss_train,
loss_test,
logits_train,
logits_test,
labels_train,
labels_test,
)[0]
print(attack_result_summary)
###Output
_____no_output_____
###Markdown
Copyright 2020 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Assess privacy risks with TensorFlow Privacy Membership Inference Attacks Run in Google Colab View source on GitHub OverviewIn this codelab we'll train a simple image classification model on the CIFAR10 dataset, and then use the "membership inference attack" against this model to assess if the attacker is able to "guess" whether a particular sample was present in the training set. SetupFirst, set this notebook's runtime to use a GPU, under Runtime > Change runtime type > Hardware accelerator. Then, begin importing the necessary libraries.
###Code
#@title Import statements.
import numpy as np
from typing import Tuple, Text
from scipy import special
import tensorflow as tf
import tensorflow_datasets as tfds
# Set verbosity.
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
from warnings import simplefilter
from sklearn.exceptions import ConvergenceWarning
simplefilter(action="ignore", category=ConvergenceWarning)
simplefilter(action="ignore", category=FutureWarning)
###Output
_____no_output_____
###Markdown
Install TensorFlow Privacy.
###Code
!pip3 install git+https://github.com/tensorflow/privacy
from tensorflow_privacy.privacy.membership_inference_attack import membership_inference_attack as mia
###Output
_____no_output_____
###Markdown
Train a model
###Code
#@markdown Train a simple model on CIFAR10 with Keras.
dataset = 'cifar10'
num_classes = 10
num_conv = 3
activation = 'relu'
lr = 0.02
momentum = 0.9
batch_size = 250
epochs = 100 # Privacy risks are especially visible with lots of epochs.
def small_cnn(input_shape: Tuple[int],
num_classes: int,
num_conv: int,
activation: Text = 'relu') -> tf.keras.models.Sequential:
"""Setup a small CNN for image classification.
Args:
input_shape: Integer tuple for the shape of the images.
num_classes: Number of prediction classes.
num_conv: Number of convolutional layers.
activation: The activation function to use for conv and dense layers.
Returns:
The Keras model.
"""
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Input(shape=input_shape))
# Conv layers
for _ in range(num_conv):
model.add(tf.keras.layers.Conv2D(32, (3, 3), activation=activation))
model.add(tf.keras.layers.MaxPooling2D())
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(64, activation=activation))
model.add(tf.keras.layers.Dense(num_classes))
return model
print('Loading the dataset.')
train_ds = tfds.as_numpy(
tfds.load(dataset, split=tfds.Split.TRAIN, batch_size=-1))
test_ds = tfds.as_numpy(
tfds.load(dataset, split=tfds.Split.TEST, batch_size=-1))
x_train = train_ds['image'].astype('float32') / 255.
y_train_indices = train_ds['label'][:, np.newaxis]
x_test = test_ds['image'].astype('float32') / 255.
y_test_indices = test_ds['label'][:, np.newaxis]
# Convert class vectors to binary class matrices.
y_train = tf.keras.utils.to_categorical(y_train_indices, num_classes)
y_test = tf.keras.utils.to_categorical(y_test_indices, num_classes)
input_shape = x_train.shape[1:]
model = small_cnn(
input_shape, num_classes, num_conv=num_conv, activation=activation)
print('learning rate %f', lr)
optimizer = tf.keras.optimizers.SGD(lr=lr, momentum=momentum)
loss = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
model.compile(loss=loss, optimizer=optimizer, metrics=['accuracy'])
model.summary()
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test),
shuffle=True)
print('Finished training.')
###Output
_____no_output_____
###Markdown
Calculate logits, probabilities and loss values for training and test sets.We will use these values later in the membership inference attack to separate training and test samples.
###Code
print('Predict on train...')
logits_train = model.predict(x_train, batch_size=batch_size)
print('Predict on test...')
logits_test = model.predict(x_test, batch_size=batch_size)
print('Apply softmax to get probabilities from logits...')
prob_train = special.softmax(logits_train, axis=1)
prob_test = special.softmax(logits_test, axis=1)
print('Compute losses...')
cce = tf.keras.backend.categorical_crossentropy
constant = tf.keras.backend.constant
loss_train = cce(constant(y_train), constant(prob_train), from_logits=False).numpy()
loss_test = cce(constant(y_test), constant(prob_test), from_logits=False).numpy()
###Output
_____no_output_____
###Markdown
Run membership inference attacks.We will now execute a membership inference attack against the previously trained CIFAR10 model. This will generate a number of scores, most notably, attacker advantage and AUC for the membership inference classifier.An AUC of close to 0.5 means that the attack wasn't able to identify training samples, which means that the model doesn't have privacy issues according to this test. Higher values, on the contrary, indicate potential privacy issues.
###Code
from tensorflow_privacy.privacy.membership_inference_attack.data_structures import AttackInputData
from tensorflow_privacy.privacy.membership_inference_attack.data_structures import SlicingSpec
from tensorflow_privacy.privacy.membership_inference_attack.data_structures import AttackType
import tensorflow_privacy.privacy.membership_inference_attack.plotting as plotting
labels_train = np.argmax(y_train, axis=1)
labels_test = np.argmax(y_test, axis=1)
input = AttackInputData(
logits_train = logits_train,
logits_test = logits_test,
loss_train = loss_train,
loss_test = loss_test,
labels_train = labels_train,
labels_test = labels_test
)
# Run several attacks for different data slices
attacks_result = mia.run_attacks(input,
SlicingSpec(
entire_dataset = True,
by_class = True,
by_classification_correctness = True
),
attack_types = [
AttackType.THRESHOLD_ATTACK,
AttackType.LOGISTIC_REGRESSION])
# Plot the ROC curve of the best classifier
fig = plotting.plot_roc_curve(
attacks_result.get_result_with_max_auc().roc_curve)
# Print a user-friendly summary of the attacks
print(attacks_result.summary(by_slices = True))
###Output
_____no_output_____ |
notebooks/demo_Hangul.ipynb | ###Markdown
"data/nine_dreams/ninedreams.txt" IS REQUIRED SPECIFY FILE ENCODING TYOE IN PYTHON
###Code
# -*- coding: utf-8 -*-
print ("UTF-8 ENCODING")
###Output
UTF-8 ENCODING
###Markdown
LOAD PACKAGES
###Code
import chardet # https://github.com/chardet/chardet
import glob
import codecs
import sys
import os
from TextLoader import *
from Hangulpy3 import *
print ("PACKAGES LOADED")
###Output
PACKAGES LOADED
###Markdown
CONVERT UTF8-ENCODED TXT FILE
###Code
def conv_file(fromfile, tofile):
with open(fromfile, "rb") as f:
sample_text=f.read(10240)
pred = chardet.detect(sample_text)
if not pred['encoding'] in ('EUC-KR', 'UTF-8', 'CP949', 'UTF-16LE'):
print ("WARNING! Unknown encoding! : %s = %s") % (fromfile, pred['encoding'])
pred['encoding'] = "CP949" # 못찾으면 기본이 CP949
formfile = fromfile + ".unknown"
elif pred['confidence'] < 0.9:
print ("WARNING! Unsured encofing! : %s = %s / %s")
% (fromfile, pred['confidence'], pred['encoding'])
formfile = fromfile + ".notsure"
with codecs.open(fromfile, "r", encoding=pred['encoding'], errors="ignore") as f:
with codecs.open(tofile, "w+", encoding="utf8") as t:
all_text = f.read()
t.write(all_text)
###Output
_____no_output_____
###Markdown
"data/nine_dreams/ninedreams_utf8.txt" IS GENERATED
###Code
# SOURCE TXT FILE
fromfile = "data/nine_dreams/ninedreams.txt"
# TARGET TXT FILE
tofile = "data/nine_dreams/ninedreams_utf8.txt"
conv_file(fromfile, tofile)
print ("UTF8-CONVERTING DONE")
print (" [%s] IS GENERATED" % (tofile))
###Output
UTF8-CONVERTING DONE
[data/nine_dreams/ninedreams_utf8.txt] IS GENERATED
###Markdown
DECOMPOSE HANGUL (THIS PART IS IMPORTANT!)
###Code
def dump_file(filename):
result=u"" # <= UNICODE STRING
with codecs.open(filename,"r", encoding="UTF8") as f:
for line in f.readlines():
line = tuple(line)
result = result + decompose_text(line)
return result
print ("FUNCTION READY")
###Output
FUNCTION READY
###Markdown
PYTHON 2 AND 3 COMPATIBILITY
###Code
if sys.version_info.major == 2:
parsed_txt = dump_file(tofile).encode("utf8")
else:
parsed_txt = dump_file(tofile)
print ("Parsing %s done" % (tofile))
# PRINT FIRST 100 CHARACTERS
print (parsed_txt[:100])
###Output
Parsing data/nine_dreams/ninedreams_utf8.txt done
ㅎㅏㄴᴥㄱㅜㄱᴥ ㄱㅜㄱᴥㅁㅜㄴᴥㅎㅏㄱᴥㅅㅏᴥㅅㅏㅇᴥ ㅇㅕㅇᴥ�
###Markdown
"data/nine_dreams/input.txt" IS GENERATED
###Code
with open("data/nine_dreams/input.txt", "w") as text_file:
text_file.write(parsed_txt)
print ("Saved to a txt file")
print (text_file)
###Output
Saved to a txt file
<closed file 'data/nine_dreams/input.txt', mode 'w' at 0x7f62ae9a58a0>
###Markdown
COMPOSE HANGUL CHARACTER FROM PHONEME
###Code
data=[u'\u3147', u'\u3157', u'\u1d25', u'\u3134', u'\u3161', u'\u3139', u'\u1d25'
, u' ', u'\u314f', u'\u3147', u'\u3145', u'\u314f', u'\u1d25', u'\u1d25'
, u'\u3163', u'\u1d25', u' ', u'\u3147', u'\u1d25', u'\u3155', u'\u1d25'
, u'\u3134', u'\u314f', u'\u1d25', u'\u3155', u'\u3147', u'\u1d25'
, u'\u315b', u'\u3131', u'\u1d25', u'\u3147', u'\u3139', u'\u3146'
, u'\u1d25', u'\u3137', u'\u314f', u'\u314e', u'\u3139', u'\u1d25'
, u'\u3134', u'\u1d25', u'\u3145', u'\u3163', u'\u1d25', u'\u1d25'
, u'\u314f', u'\u1d25', u'\u314e', u'\u314f', u'\u3147', u'\u3131'
, u'\u3157', u'\u3134', u'\u1d25', u'\u1d25', u'\u315b', u'\u1d25'
, u'\u3148', u'\u3153', u'\u3136', u'\u1d25', u' ', u'\u3145', u'\u3150'
, u'\u3141', u'\u3136', u'\u3161', u'\u3134', u'\u3163', u'\u1d25', u'.'
, u'\u3148', u'\u3153', u'\u3134', u'\u314e', u'\u3153', u'\u1d25', u'\u1d25'
, u'\u3147', u'\u314f', u'\u3134', u'\u3148', u'\u314f', u'\u3139', u'\u315d'
, u'\u314c', u'\u1d25', u'\u3161', u'\u3134', u'\u3148', u'\u3163', u'\u313a'
, u'\u1d25', u' ', u'\u3147', u'\u3161', u'\u3146', u'\u1d25', u'?', u'\u3134'
, u'\u1d25', u'\u314e', u'\u3163', u'\u1d25', u'\u3147', u'\u3148', u'\u314f'
]
print automata("".join(data))
###Output
오늘 ㅏㅇ사ㅣ ㅇㅕ나ㅕㅇㅛㄱㅇㄹㅆ닿ㄹㄴ시ㅏ항곤ㅛ젆 샘ㄶㅡ니.젆ㅓ앉ㅏ뤝ㅡㄴ짉 읐?ㄴ히ㅇ
###Markdown
GENERATE "vocab.pkl" and "data.npy" in "data/nine_dreams/" FROM "data/nine_dreams/input.txt"
###Code
data_dir = "data/nine_dreams"
batch_size = 50
seq_length = 50
data_loader = TextLoader(data_dir, batch_size, seq_length)
###Output
loading preprocessed files
###Markdown
DATA_LOADER IS:
###Code
print ( "type of 'data_loader' is %s, length is %d"
% (type(data_loader.vocab), len(data_loader.vocab)) )
###Output
type of 'data_loader' is <type 'dict'>, length is 76
###Markdown
DATA_LOADER.VOCAB IS:
###Code
print ("data_loader.vocab looks like \n%s " % (data_loader.vocab,))
###Output
data_loader.vocab looks like
{u'_': 69, u'6': 59, u':': 57, u'\n': 19, u'4': 67, u'5': 63, u'>': 75, u'!': 52, u' ': 1, u'"': 28, u'\u1d25': 0, u"'": 49, u')': 46, u'(': 45, u'-': 65, u',': 27, u'.': 24, u'\u3131': 7, u'0': 73, u'\u3133': 60, u'\u3132': 29, u'\u3135': 50, u'\u3134': 4, u'\u3137': 13, u'\u3136': 44, u'\u3139': 5, u'\u3138': 32, u'\u313b': 55, u'\u313a': 48, u'\u313c': 54, u'?': 41, u'3': 66, u'\u3141': 12, u'\u3140': 51, u'\u3143': 47, u'\u3142': 17, u'\u3145': 10, u'\u3144': 43, u'\u3147': 2, u'\u3146': 22, u'\u3149': 40, u'\u3148': 15, u'\u314b': 42, u'\u314a': 23, u'\u314d': 31, u'\u314c': 30, u'\u314f': 3, u'\u314e': 14, u'\u3151': 34, u'\u3150': 21, u'\u3153': 11, u'\u3152': 74, u'\u3155': 18, u'\u3154': 20, u'\u3157': 9, u'\u3156': 39, u'\u3159': 53, u'\u3158': 26, u'\u315b': 38, u'\u315a': 33, u'\u315d': 36, u'\u315c': 16, u'\u315f': 35, u'\u315e': 61, u'\u3161': 8, u'\u3160': 37, u'\u3163': 6, u'\u3162': 25, u'\x1a': 72, u'9': 64, u'7': 71, u'2': 62, u'1': 58, u'\u313f': 56, u'\u313e': 70, u'8': 68}
###Markdown
DATA_LOADER.CHARS IS:
###Code
print ( "type of 'data_loader.chars' is %s, length is %d"
% (type(data_loader.chars), len(data_loader.chars)) )
###Output
type of 'data_loader.chars' is <type 'tuple'>, length is 76
###Markdown
CHARS CONVERTS INDEX -> CHAR
###Code
print ("data_loader.chars looks like \n%s " % (data_loader.chars,))
for i, char in enumerate(data_loader.chars):
# GET INDEX OF THE CHARACTER
idx = data_loader.vocab[char]
print ("[%02d] %03s (%02d)"
% (i, automata("".join(char)), idx))
###Output
[00] (00)
[01] (01)
[02] (02)
[03] ㅏ (03)
[04] (04)
[05] (05)
[06] ㅣ (06)
[07] (07)
[08] ㅡ (08)
[09] ㅗ (09)
[10] (10)
[11] ㅓ (11)
[12] (12)
[13] (13)
[14] (14)
[15] (15)
[16] ㅜ (16)
[17] (17)
[18] ㅕ (18)
[19]
(19)
[20] ㅔ (20)
[21] ㅐ (21)
[22] (22)
[23] (23)
[24] . (24)
[25] ㅢ (25)
[26] ㅘ (26)
[27] , (27)
[28] " (28)
[29] (29)
[30] (30)
[31] (31)
[32] (32)
[33] ㅚ (33)
[34] ㅑ (34)
[35] ㅟ (35)
[36] ㅝ (36)
[37] ㅠ (37)
[38] ㅛ (38)
[39] ㅖ (39)
[40] (40)
[41] ? (41)
[42] (42)
[43] ㅄ (43)
[44] ㄶ (44)
[45] ( (45)
[46] ) (46)
[47] (47)
[48] ㄺ (48)
[49] ' (49)
[50] ㄵ (50)
[51] ㅀ (51)
[52] ! (52)
[53] ㅙ (53)
[54] ㄼ (54)
[55] ㄻ (55)
[56] ㄿ (56)
[57] : (57)
[58] 1 (58)
[59] 6 (59)
[60] ㄳ (60)
[61] ㅞ (61)
[62] 2 (62)
[63] 5 (63)
[64] 9 (64)
[65] - (65)
[66] 3 (66)
[67] 4 (67)
[68] 8 (68)
[69] _ (69)
[70] ㄾ (70)
[71] 7 (71)
[72] (72)
[73] 0 (73)
[74] ㅒ (74)
[75] > (75)
|
lecture02.ingestion/lecture02.ingestion.ipynb | ###Markdown
Lecture 01 : intro, inputs, numpy, pandas 1. Inputs: CSV / Text We will start by ingesting plain text.
###Code
from __future__ import print_function
import csv
my_reader = csv.DictReader(open('data/eu_revolving_loans.csv', 'r'))
###Output
_____no_output_____
###Markdown
DicReader returns a "generator" -- which means that we only have 1 chance to read the returning row dictionaries.Let's just print out line by line to see what we are reading in:
###Code
for line in my_reader:
print(line)
###Output
_____no_output_____
###Markdown
Since the data is tabular format, pandas is ideally suited for such data. There are convenient pandas import functions for reading in tabular data.Pandas provides direct csv ingestion into "data frames":
###Code
import pandas as pd
df = pd.read_csv('data/eu_revolving_loans.csv')
df.head()
###Output
_____no_output_____
###Markdown
As we briefly discussed last week, simply reading in without any configuration generates a fairly message data frame. We should try to specify some helping hints to pandas as to where the header rows are and which is the index colum:
###Code
df = pd.read_csv('data/eu_revolving_loans.csv', header=[1,2,4], index_col=0)
df.head()
###Output
_____no_output_____
###Markdown
2. Inputs: Excel Many organizations still use Excel as the common medium for communicating data and analysis. We will look quickly at how to ingest Excel data. There are many packages available to read Excel files. We will use one popular one here.
###Code
from __future__ import print_function
from openpyxl import load_workbook
###Output
_____no_output_____
###Markdown
Let's take a look at the excel file that want to read into Jupyter
###Code
!open 'data/climate_change_download_0.xlsx'
###Output
_____no_output_____
###Markdown
Here is how we can read the Excel file into the Jupyter environment.
###Code
wb = load_workbook(filename='data/climate_change_download_0.xlsx')
###Output
_____no_output_____
###Markdown
What are the "sheets" in this workbook?
###Code
wb.get_sheet_names()`
###Output
_____no_output_____
###Markdown
We will focus on the sheet 'Data':
###Code
ws = wb.get_sheet_by_name('Data')
###Output
_____no_output_____
###Markdown
For the sheet "Data", let's print out the content cell-by-cell to view the content.
###Code
for row in ws.rows:
for cell in row:
print(cell.value)
###Output
_____no_output_____
###Markdown
Pandas also provides direct Excel data ingest:
###Code
import pandas as pd
df = pd.read_excel('data/climate_change_download_0.xlsx')
df.head()
###Output
_____no_output_____
###Markdown
Here is another example with multiple sheets:
###Code
df = pd.read_excel('data/GHE_DALY_Global_2000_2012.xls', sheetname='Global2012', header=[4,5])
###Output
_____no_output_____
###Markdown
This dataframe has a "multi-level" index:
###Code
df.columns
###Output
_____no_output_____
###Markdown
How do we export a dataframe back to Excel?
###Code
df.to_excel('data/my_excel.xlsx')
!open 'data/my_excel.xlsx'
###Output
_____no_output_____
###Markdown
3. Inputs: PDF PDF is also a common communication medium about data and analysis. Let's look at how one can read data from PDF into Python.
###Code
import pdftables
my_pdf = open('data/WEF_GlobalCompetitivenessReport_2014-15.pdf', 'rb')
chart_page = pdftables.get_pdf_page(my_pdf, 29)
###Output
_____no_output_____
###Markdown
PDF is a proprietary file format with specific tagging that has been reverse engineered. Let's take a look at some structures in this file.
###Code
table = pdftables.page_to_tables(chart_page)
titles = zip(table[0][0], table[0][1])[:5]
titles = [''.join([title[0], title[1]]) for title in titles]
print(titles)
###Output
_____no_output_____
###Markdown
There is a table with structured data that we can peel out:
###Code
all_rows = []
for row_data in table[0][2:]:
all_rows.extend([row_data[:5], row_data[5:]])
print(all_rows)
###Output
_____no_output_____
###Markdown
4. Configurations
###Code
from ConfigParser import ConfigParser
config = ConfigParser()
config.read('../cfg/sample.cfg')
config.sections()
###Output
_____no_output_____
###Markdown
5. APIs Getting Twitter data from APIRelevant links to the exercise here:- Twitter Streaming: https://dev/twitter.com/streaming/overview- API client: https://github.com/tweepy/tweepy- Twitter app: https://apps.twitter.com Create an authentication handler
###Code
import tweepy
auth = tweepy.OAuthHandler(config.get('twitter', 'consumer_key'), config.get('twitter', 'consumer_secret'))
auth.set_access_token(config.get('twitter','access_token'), config.get('twitter','access_token_secret'))
auth
###Output
_____no_output_____
###Markdown
Create an API endpoint
###Code
api = tweepy.API(auth)
###Output
_____no_output_____
###Markdown
Try REST-ful API call to Twitter
###Code
python_tweets = api.search('turkey')
for tweet in python_tweets:
print(tweet.text)
###Output
_____no_output_____
###Markdown
For streaming API call, we should run a standalone python program: tweetering.py Input & Output to OpenWeatherMap APIRelevant links to the exercise here:- http://openweathermap.org/- http://openweathermap.org/currentAPI call:```api.openweathermap.org/data/2.5/weather?q={city name}api.openweathermap.org/data/2.5/weather?q={city name},{country code}```Parameters:> q city name and country code divided by comma, use ISO 3166 country codesExamples of API calls:```api.openweathermap.org/data/2.5/weather?q=Londonapi.openweathermap.org/data/2.5/weather?q=London,uk```
###Code
from pprint import pprint
import requests
weather_key = config.get('openweathermap', 'api_key')
res = requests.get("http://api.openweathermap.org/data/2.5/weather",
params={"q": "San Francisco", "appid": weather_key, "units": "metric"})
pprint(res.json())
###Output
_____no_output_____
###Markdown
6. Python requests "requests" is a wonderful HTTP library for Python, with the right level of abstraction to avoid lots of tedious plumbing (manually add query strings to your URLs, or to form-encode your POST data). Keep-alive and HTTP connection pooling are 100% automatic, powered by urllib3, which is embedded within Requests)```>>> r = requests.get('https://api.github.com/user', auth=('user', 'pass'))>>> r.status_code200>>> r.headers['content-type']'application/json; charset=utf8'>>> r.encoding'utf-8'>>> r.textu'{"type":"User"...'>>> r.json(){u'private_gists': 419, u'total_private_repos': 77, ...}```There is a lot of great documentation at the python-requests [site](http://docs.python-requests.org/en/master/) -- we are extracting selected highlights from there for your convenience here. Making a requestMaking a request with Requests is very simple.Begin by importing the Requests module:
###Code
import requests
###Output
_____no_output_____
###Markdown
Now, let's try to get a webpage. For this example, let's get GitHub's public timeline
###Code
r = requests.get('https://api.github.com/events')
###Output
_____no_output_____
###Markdown
Now, we have a Response object called r. We can get all the information we need from this object.Requests' simple API means that all forms of HTTP request are as obvious. For example, this is how you make an HTTP POST request:
###Code
r = requests.post('http://httpbin.org/post', data = {'key':'value'})
###Output
_____no_output_____
###Markdown
What about the other HTTP request types: PUT, DELETE, HEAD and OPTIONS? These are all just as simple:
###Code
r = requests.put('http://httpbin.org/put', data = {'key':'value'})
r = requests.delete('http://httpbin.org/delete')
r = requests.head('http://httpbin.org/get')
r = requests.options('http://httpbin.org/get')
###Output
_____no_output_____
###Markdown
Passing Parameters In URLsYou often want to send some sort of data in the URL's query string. If you were constructing the URL by hand, this data would be given as key/value pairs in the URL after a question mark, e.g. httpbin.org/get?key=val. Requests allows you to provide these arguments as a dictionary, using the params keyword argument. As an example, if you wanted to pass key1=value1 and key2=value2 to httpbin.org/get, you would use the following code:
###Code
payload = {'key1': 'value1', 'key2': 'value2'}
r = requests.get('http://httpbin.org/get', params=payload)
###Output
_____no_output_____
###Markdown
You can see that the URL has been correctly encoded by printing the URL:
###Code
print(r.url)
###Output
_____no_output_____
###Markdown
Note that any dictionary key whose value is None will not be added to the URL's query string.You can also pass a list of items as a value:
###Code
payload = {'key1': 'value1', 'key2': ['value2', 'value3']}
r = requests.get('http://httpbin.org/get', params=payload)
print(r.url)
###Output
_____no_output_____
###Markdown
Response ContentWe can read the content of the server's response. Consider the GitHub timeline again:
###Code
import requests
r = requests.get('https://api.github.com/events')
r.text
###Output
_____no_output_____
###Markdown
Requests will automatically decode content from the server. Most unicode charsets are seamlessly decoded.When you make a request, Requests makes educated guesses about the encoding of the response based on the HTTP headers. The text encoding guessed by Requests is used when you access r.text. You can find out what encoding Requests is using, and change it, using the r.encoding property:
###Code
r.encoding
r.encoding = 'ISO-8859-1'
###Output
_____no_output_____
###Markdown
If you change the encoding, Requests will use the new value of r.encoding whenever you call r.text. You might want to do this in any situation where you can apply special logic to work out what the encoding of the content will be. For example, HTTP and XML have the ability to specify their encoding in their body. In situations like this, you should use r.content to find the encoding, and then set r.encoding. This will let you use r.text with the correct encoding.Requests will also use custom encodings in the event that you need them. If you have created your own encoding and registered it with the codecs module, you can simply use the codec name as the value of r.encoding and Requests will handle the decoding for you. JSON Response ContentThere's also a builtin JSON decoder, in case you're dealing with JSON data:
###Code
import requests
r = requests.get('https://api.github.com/events')
r.json()
###Output
_____no_output_____
###Markdown
In case the JSON decoding fails, r.json raises an exception. For example, if the response gets a 204 (No Content), or if the response contains invalid JSON, attempting r.json raises ValueError: No JSON object could be decoded.It should be noted that the success of the call to r.json does not indicate the success of the response. Some servers may return a JSON object in a failed response (e.g. error details with HTTP 500). Such JSON will be decoded and returned. To check that a request is successful, use r.raise_for_status() or check r.status_code is what you expect.
###Code
r.status_code
###Output
_____no_output_____
###Markdown
Custom HeadersIf you'd like to add HTTP headers to a request, simply pass in a dict to the headers parameter.For example, we didn't specify our user-agent in the previous example:
###Code
url = 'https://api.github.com/some/endpoint'
headers = {'user-agent': 'my-app/0.0.1'}
r = requests.get(url, headers=headers)
###Output
_____no_output_____
###Markdown
Note: Custom headers are given less precedence than more specific sources of information. For instance:- Authorization headers set with headers= will be overridden if credentials are specified in .netrc, which in turn will be overridden by the auth= parameter.- Authorization headers will be removed if you get redirected off-host.- Proxy-Authorization headers will be overridden by proxy credentials provided in the URL.- Content-Length headers will be overridden when we can determine the length of the content. Response HeadersWe can view the server's response headers using a Python dictionary:
###Code
r.headers
###Output
_____no_output_____
###Markdown
The dictionary is special, though: it's made just for HTTP headers. According to RFC 7230, HTTP Header names are case-insensitive.So, we can access the headers using any capitalization we want:
###Code
r.headers['Content-Type']
r.headers.get('content-type')
###Output
_____no_output_____
###Markdown
CookiesIf a response contains some Cookies, you can quickly access them:
###Code
url = 'http://www.cnn.com'
r = requests.get(url)
print(r.cookies.items())
###Output
_____no_output_____
###Markdown
To send your own cookies to the server, you can use the cookies parameter:
###Code
url = 'http://httpbin.org/cookies'
cookies = dict(cookies_are='working')
r = requests.get(url, cookies=cookies)
r.text
###Output
_____no_output_____
###Markdown
Redirection and HistoryBy default Requests will perform location redirection for all verbs except HEAD.We can use the history property of the Response object to track redirection.The Response.history list contains the Response objects that were created in order to complete the request. The list is sorted from the oldest to the most recent response.For example, GitHub redirects all HTTP requests to HTTPS:
###Code
r = requests.get('http://github.com')
r.url
r.status_code
r.history
###Output
_____no_output_____
###Markdown
If you're using GET, OPTIONS, POST, PUT, PATCH or DELETE, you can disable redirection handling with the allow_redirects parameter:
###Code
r = requests.get('http://github.com', allow_redirects=False)
r.status_code
r.history
###Output
_____no_output_____
###Markdown
If you're using HEAD, you can enable redirection as well:
###Code
r = requests.head('http://github.com', allow_redirects=True)
r.url
r.history
###Output
_____no_output_____
###Markdown
TimeoutsYou can tell Requests to stop waiting for a response after a given number of seconds with the timeout parameter:
###Code
requests.get('http://github.com', timeout=1)
###Output
_____no_output_____ |
07 NLP/kaggle hw/solution.ipynb | ###Markdown
###Code
# !pip3 install kaggle
from google.colab import files
files.upload()
!mkdir ~/.kaggle
!cp kaggle.json ~/.kaggle/
!chmod 600 ~/.kaggle/kaggle.json
!kaggle competitions download -c toxic-comments-classification-apdl-2021
!ls
import pandas as pd
import numpy as np
from sklearn.metrics import *
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
train = pd.read_csv('train_data.csv.zip', compression='zip')
test = pd.read_csv('test_data.csv.zip', compression='zip')
train.toxic.describe()
train.sample(5)
test.sample(5)
x_train, x_test, y_train, y_test = train_test_split(train.comment, train.toxic, random_state=0, stratify=train.toxic)
y_train.describe()
y_test.describe()
###Output
_____no_output_____
###Markdown
Bag of words
###Code
from sklearn.linear_model import LogisticRegression
from sklearn.feature_extraction.text import CountVectorizer
from nltk import ngrams
vec = CountVectorizer(ngram_range=(1, 2)) # строим BoW для слов
bow = vec.fit_transform(x_train)
vec2 = CountVectorizer(ngram_range=(1, 2)) # строим BoW для слов
bow2 = vec2.fit_transform(train.comment)
list(vec2.vocabulary_.items())[:10]
bow.mean()
clf = LogisticRegression(random_state=0, max_iter=500, class_weight='balanced')
clf.fit(bow, y_train)
clf2 = LogisticRegression(random_state=0, max_iter=500, class_weight='balanced')
clf2.fit(bow2, train.toxic)
pred = clf.predict(vec.transform(x_test))
print(classification_report(pred, y_test))
test
bow_test_pred = test.copy()
bow_test_pred['toxic'] = clf.predict(vec.transform(test.comment))
bow_test_pred['toxic'] = bow_test_pred['toxic'].astype(int)
bow_test_pred.drop('comment', axis=1, inplace=True)
bow_test_pred
bow_test_pred2 = test.copy()
bow_test_pred2['toxic'] = clf2.predict(vec2.transform(test.comment))
bow_test_pred2['toxic'] = bow_test_pred2['toxic'].astype(int)
bow_test_pred2.drop('comment', axis=1, inplace=True)
bow_test_pred2
bow_test_pred.to_csv('bow_v1.csv', index=False)
bow_test_pred2.to_csv('bow_v2.csv', index=False)
confusion_matrix(bow_test_pred.toxic, bow_test_pred2.toxic)
# !kaggle competitions submit -c toxic-comments-classification-apdl-2021 -f bow_v2.csv -m "kirill_setdekov first bow v2 submission all data"
###Output
Warning: Looks like you're using an outdated API Version, please consider updating (server 1.5.12 / client 1.5.4)
100% 23.6k/23.6k [00:09<00:00, 2.45kB/s]
Successfully submitted to Toxic comments classification
###Markdown
TF-IDF
###Code
from sklearn.feature_extraction.text import TfidfVectorizer
vec = TfidfVectorizer(ngram_range=(1, 1))
bow = vec.fit_transform(x_train)
clf2 = LogisticRegression(random_state=1, max_iter = 500)
clf2.fit(bow, y_train)
pred = clf2.predict(vec.transform(x_test))
print(classification_report(pred, y_test))
tf_idf = test.copy()
tf_idf['toxic'] = clf2.predict(vec.transform(test.comment))
tf_idf['toxic'] = tf_idf['toxic'].astype(int)
tf_idf.drop('comment', axis=1, inplace=True)
tf_idf
tf_idf.to_csv('tf_idf_v1.csv', index=False)
# !kaggle competitions submit -c toxic-comments-classification-apdl-2021 -f tf_idf_v1.csv -m "kirill_setdekov tfidf v1 submission"
###Output
_____no_output_____
###Markdown
Symbol n-Grams
###Code
vec = CountVectorizer(analyzer='char', ngram_range=(1, 5))
bowsimb = vec.fit_transform(x_train)
from sklearn.preprocessing import MaxAbsScaler
scaler = MaxAbsScaler()
scaler.fit(bowsimb)
bowsimb = scaler.transform(bowsimb)
clf3 = LogisticRegression(random_state=0, max_iter=1000)
clf3.fit(bowsimb, y_train)
pred = clf3.predict(scaler.transform(vec.transform(x_test)))
print(classification_report(pred, y_test))
importances = list(zip(vec.vocabulary_, clf.coef_[0]))
importances[0]
sorted_importances = sorted(importances, key = lambda x: -abs(x[1]))
sorted_importances[:20]
symbol_ngrams = test.copy()
symbol_ngrams['toxic'] = clf3.predict(scaler.
transform(vec.transform(test.comment)))
symbol_ngrams['toxic'] = tf_idf['toxic'].astype(int)
symbol_ngrams.drop('comment', axis=1, inplace=True)
symbol_ngrams
symbol_ngrams.to_csv('symbol_ngrams_v1.csv', index=False)
from sklearn.metrics import confusion_matrix
confusion_matrix(symbol_ngrams.toxic, tf_idf.toxic)
# !kaggle competitions submit -c toxic-comments-classification-apdl-2021 -f symbol_ngrams_v1.csv -m "kirill_setdekov symbol_ngrams_v1 v1 submission"
###Output
_____no_output_____
###Markdown
FastText
###Code
!pip3 install fasttext
import fasttext
with open('ft_train_data.txt', 'w') as f:
for pair in list(zip(x_train, y_train)):
text, label = pair
f.write(f'__label__{int(label)} {text.lower()}\n')
with open('ft_test_data.txt', 'w') as f:
for pair in list(zip(x_test, y_test)):
text, label = pair
f.write(f'__label__{int(label)} {text.lower()}\n')
with open('ft_all.txt', 'w') as f:
for pair in list(zip(train.comment, train.toxic)):
text, label = pair
f.write(f'__label__{int(label)} {text.lower()}\n')
classifier = fasttext.train_supervised('ft_train_data.txt')#, 'model')
result = classifier.test('ft_test_data.txt')
print('P@1:', result[1])#.precision)
print('R@1:', result[2])#.recall)
print('Number of examples:', result[0])#.nexamples)
classifier2 = fasttext.train_supervised('ft_all.txt')#, 'model')
k = 0
for item in [i.lower() for i in test.comment]:
item = item.replace("\n"," ")
k +=1
k
prediction = []
for item in [i.lower() for i in test.comment]:
item = item.replace("\n"," ")
prediction.append(classifier.predict(item))
prediction2 = []
for item in [i.lower() for i in test.comment]:
item = item.replace("\n"," ")
prediction2.append(classifier2.predict(item))
pred = [int(label[0][0].split('__')[2][0]) for label in prediction]
pred2 = [int(label[0][0].split('__')[2][0]) for label in prediction2]
fasttext_pred = test.copy()
fasttext_pred['toxic'] = pred
fasttext_pred.drop('comment', axis=1, inplace=True)
fasttext_pred
fasttext_pred2 = test.copy()
fasttext_pred2['toxic'] = pred2
fasttext_pred2.drop('comment', axis=1, inplace=True)
fasttext_pred2
confusion_matrix(symbol_ngrams.toxic, fasttext_pred.toxic)
confusion_matrix(fasttext_pred2.toxic, fasttext_pred.toxic)
fasttext_pred.to_csv('fasttext_pred_v1.csv', index=False)
fasttext_pred2.to_csv('fasttext_pred_v2.csv', index=False)
!kaggle competitions submit -c toxic-comments-classification-apdl-2021 -f fasttext_pred_v2.csv -m "kirill_setdekov fasttext_pred v2 submission"
###Output
Warning: Looks like you're using an outdated API Version, please consider updating (server 1.5.12 / client 1.5.4)
100% 23.6k/23.6k [00:07<00:00, 3.36kB/s]
Successfully submitted to Toxic comments classification
###Markdown
CNN
###Code
from torchtext.legacy import data
pd.read_csv('train_data.csv.zip', compression='zip')
!unzip train_data.csv.zip
!unzip test_data.csv.zip
# классы Field и LabelField отвечают за то, как данные будут храниться и обрабатываться при считывании
TEXT = data.Field(tokenize='spacy') # spacy -- значит, токенизацию будет делать модуль
LABEL = data.LabelField()
ds = data.TabularDataset(
path='train_data.csv', format='csv',
skip_header=True,
fields=[('comment', TEXT),
('toxic', LABEL)]
)
pd.read_csv('test_data.csv')
test = data.TabularDataset(
path='test_data.csv', format='csv',
skip_header=True,
fields=[('id', TEXT), ('comment', TEXT)]
)
next(ds.comment)
next(ds.toxic)
TEXT.build_vocab(ds, max_size=25000, vectors="glove.6B.100d")
LABEL.build_vocab(ds)
TEXT.vocab.itos[:20]
len(TEXT.vocab.itos)
train, val = ds.split(split_ratio=0.9, stratified=True, strata_field='toxic') # дефолтное соотношение 0.7
print(len(train))
print(len(val))
print(len(test))
BATCH_SIZE = 64
train_iterator, valid_iterator, test_iterator = data.BucketIterator.splits(
(train, val, test),
batch_size=BATCH_SIZE,
sort=True,
sort_key=lambda x: len(x.comment), # сорируем тексты по длине, чтобы рядом оказывались предложения с одинаковой длиной и добавлялось меньше паддинга
repeat=False)
for i, batch in enumerate(valid_iterator):
print(batch.batch_size)
# pass
batch.fields
batch.batch_size
batch.comment
batch.toxic
len(batch.toxic)
import torch.nn as nn
class CNN(nn.Module):
def __init__(self, vocab_size, embedding_dim, n_filters, filter_sizes, output_dim, dropout_proba):
super().__init__()
self.embedding = nn.Embedding(vocab_size, embedding_dim)
self.conv_0 = nn.Conv2d(in_channels=1, out_channels=n_filters, kernel_size=(filter_sizes[0], embedding_dim))
self.conv_1 = nn.Conv2d(in_channels=1, out_channels=n_filters, kernel_size=(filter_sizes[1], embedding_dim))
self.conv_2 = nn.Conv2d(in_channels=1, out_channels=n_filters, kernel_size=(filter_sizes[2], embedding_dim))
self.fc = nn.Linear(len(filter_sizes) * n_filters, output_dim)
self.dropout = nn.Dropout(dropout_proba)
def forward(self, x):
#x = [sent len, batch size]
# print(x.shape)
x = x.permute(1, 0)
#x = [batch size, sent len]
embedded = self.embedding(x)
#print(embedded.shape)
#embedded = [batch size, sent len, emb dim]
embedded = embedded.unsqueeze(1)
#embedded = [batch size, 1, sent len, emb dim]
conv_0 = self.conv_0(embedded)
#print(conv_0.shape)
conv_0 = conv_0.squeeze(3)
#print(conv_0.shape)
conved_0 = F.relu(conv_0)
conved_1 = F.relu(self.conv_1(embedded).squeeze(3))
conved_2 = F.relu(self.conv_2(embedded).squeeze(3))
#conv_n = [batch size, n_filters, sent len - filter_sizes[n]]
# print(conved_0.shape)
pool_0 = F.max_pool1d(conved_0, conved_0.shape[2])
# print(pool_0.shape)
pooled_0 = pool_0.squeeze(2)
# print(pooled_0.shape)
pooled_1 = F.max_pool1d(conved_1, conved_1.shape[2]).squeeze(2)
pooled_2 = F.max_pool1d(conved_2, conved_2.shape[2]).squeeze(2)
#pooled_n = [batch size, n_filters]
cat = self.dropout(torch.cat((pooled_0, pooled_1, pooled_2), dim=1))
#cat = [batch size, n_filters * len(filter_sizes)]
return self.fc(cat)
import torch.nn.functional as F
def binary_accuracy(preds, y):
rounded_preds = torch.round(F.sigmoid(preds))
correct = (rounded_preds == y).float()
acc = correct.sum() / len(correct)
return acc
def train_func(model, iterator, optimizer, criterion):
epoch_loss = 0
epoch_acc = 0
model.train()
for batch in iterator:
optimizer.zero_grad()
predictions = model(batch.comment.cuda()).squeeze(1)
loss = criterion(predictions.float(), batch.toxic.float().cuda())
acc = binary_accuracy(predictions.float(), batch.toxic.float().cuda())
loss.backward()
optimizer.step()
epoch_loss += loss
epoch_acc += acc
return epoch_loss / len(iterator), epoch_acc / len(iterator)
def evaluate_func(model, iterator, criterion):
epoch_loss = 0
epoch_acc = 0
model.eval()
with torch.no_grad():
for batch in iterator:
predictions = model(batch.comment.cuda()).squeeze(1)
loss = criterion(predictions.float(), batch.toxic.float().cuda())
acc = binary_accuracy(predictions.float(), batch.toxic.float().cuda())
epoch_loss += loss
epoch_acc += acc
return epoch_loss / len(iterator), epoch_acc / len(iterator)
INPUT_DIM = len(TEXT.vocab)
EMBEDDING_DIM = 100
N_FILTERS = 100
FILTER_SIZES = [2,3,4]
OUTPUT_DIM = 1
DROPOUT_PROBA = 0.5
model = CNN(INPUT_DIM, EMBEDDING_DIM, N_FILTERS, FILTER_SIZES, OUTPUT_DIM, DROPOUT_PROBA)
INPUT_DIM
model
pretrained_embeddings = TEXT.vocab.vectors
model.embedding.weight.data.copy_(pretrained_embeddings)
import torch.optim as optim
optimizer = optim.Adam(model.parameters()) # мы подали оптимизатору все параметры -- значит, эмбеддиги тоже будут дообучаться
criterion = nn.BCEWithLogitsLoss() # бинарная кросс-энтропия с логитами
model = model.cuda() # будем учить на gpu! =)
model.embedding
from torchsummary import summary
# summary(model, (14))
import torch
N_EPOCHS = 8
for epoch in range(N_EPOCHS):
train_loss, train_acc = train_func(model, train_iterator, optimizer, criterion)
valid_loss, valid_acc = evaluate_func(model, valid_iterator, criterion)
print(f'Epoch: {epoch+1:02}, Train Loss: {train_loss:.3f}, Train Acc: {train_acc*100:.2f}%, Val. Loss: {valid_loss:.3f}, Val. Acc: {valid_acc*100:.2f}%')
test.examples
model.eval()
cnn_res = []
with torch.no_grad():
for batch in test_iterator:
predictions = model(batch.comment.cuda())
cnn_res.append(predictions)
testout = pd.read_csv('test_data.csv.zip', compression='zip')
cnnpred = testout.copy()
cnnpred['toxic'] = [float(item) for sublist in cnn_res for item in sublist]
cnnpred.drop('comment', axis=1, inplace=True)
cnnpred
cnnpred['toxic'] = (cnnpred['toxic'] > 0).astype(int)
cnnpred
cnnpred.to_csv('cnnpred_v4.csv', index=False)
!kaggle competitions submit -c toxic-comments-classification-apdl-2021 -f cnnpred_v4.csv -m "kirill_setdekov cnn v4 with threshold 0"
###Output
_____no_output_____
###Markdown
word2vec> not done, skip this model
###Code
! wget https://nlp.stanford.edu/data/glove.6B.zip
with open("alice.txt", 'r', encoding='utf-8') as f:
text = f.read()
text = re.sub('\n', ' ', text)
sents = sent_tokenize(text)
punct = '!"#$%&()*+,-./:;<=>?@[\]^_`{|}~„“«»†*—/\-‘’'
clean_sents = []
for sent in sents:
s = [w.lower().strip(punct) for w in sent.split()]
clean_sents.append(s)
print(clean_sents[:2])
model_path = "movie_reviews.model"
print("Saving model...")
model_en.save(model_path)
model = word2vec.Word2Vec.load(model_path)
model.build_vocab(clean_sents, update=True)
model.train(clean_sents, total_examples=model.corpus_count, epochs=5)
###Output
_____no_output_____
###Markdown
bow on random forest
###Code
! pip install pymystem3
! pip install --force-reinstall pymorphy2
!pip install pymorphy2-dicts-ru
import pymorphy2
import re
morph = pymorphy2.MorphAnalyzer()
# убираем все небуквенные символы
regex = re.compile("[А-Яа-яA-z]+")
def words_only(text, regex=regex):
try:
return regex.findall(text.lower())
except:
return []
for i in train.comment[10].split():
lemmas = morph.parse(i)
print(lemmas[0])
from functools import lru_cache
@lru_cache(maxsize=128)
def lemmatize_word(token, pymorphy=morph):
return pymorphy.parse(token)[0].normal_form
def lemmatize_text(text):
return [lemmatize_word(w) for w in text]
tokens = words_only(train.comment[10])
print(lemmatize_text(tokens))
from nltk.corpus import stopwords
import nltk
nltk.download('stopwords')
mystopwords = stopwords.words('russian')
def remove_stopwords(lemmas, stopwords = mystopwords):
return [w for w in lemmas if not w in stopwords]
lemmas = lemmatize_text(tokens)
print(*remove_stopwords(lemmas))
def remove_stopwords(lemmas, stopwords = mystopwords):
return [w for w in lemmas if not w in stopwords and len(w) > 3]
print(*remove_stopwords(lemmas))
def clean_text(text):
tokens = words_only(text)
lemmas = lemmatize_text(tokens)
return remove_stopwords(lemmas)
for i in range(20):
print(* clean_text(train.comment[i]))
from tqdm.auto import trange
new_comments = []
for i in trange(len(train.comment), desc='loop'):
new_comments.append(" ".join(clean_text(train.comment[i])))
new_comments[:10]
vec3 = CountVectorizer(ngram_range=(1, 2)) # строим BoW для слов
bow3 = vec3.fit_transform(new_comments)
list(vec3.vocabulary_.items())[100:120]
bow3
clf3 = LogisticRegression(random_state=0, max_iter=500, class_weight='balanced')
clf3.fit(bow3, train.toxic)
pred = clf3.predict(bow3)
print(classification_report(pred, train.toxic))
test
new_commentstest = []
for i in trange(len(test.comment), desc='loop'):
new_commentstest.append(" ".join(clean_text(test.comment[i])))
bow_test_pred3 = test.copy()
bow_test_pred3['newcomment'] = new_commentstest
bow_test_pred3.tail()
bow_test_pred3['toxic'] = clf3.predict(vec3.transform(bow_test_pred3.newcomment))
bow_test_pred3['toxic'] = bow_test_pred3['toxic'].astype(int)
bow_test_pred3.drop('comment', axis=1, inplace=True)
bow_test_pred3.drop('newcomment', axis=1, inplace=True)
bow_test_pred3
confusion_matrix(bow_test_pred2.toxic, bow_test_pred3.toxic)
bow_test_pred3.to_csv('bow_v3.csv', index=False)
# !kaggle competitions submit -c toxic-comments-classification-apdl-2021 -f bow_v3.csv -m "kirill_setdekov bow3 with preprocessing"
!pip install scikit-learn==0.24
from sklearn.ensemble import RandomForestClassifier
from sklearn.experimental import enable_halving_search_cv # noqa
from sklearn.model_selection import HalvingGridSearchCV
###Output
_____no_output_____
###Markdown
nor run -too slow
###Code
# rnd_reg = RandomForestClassifier( )
# # hyper-parameter space
# param_grid_RF = {
# 'n_estimators' : [10,20,50,100,200,500,1000],
# 'max_features' : [0.6,0.8,"auto","sqrt"],
# }
# search_two = HalvingGridSearchCV(rnd_reg, param_grid_RF, factor=5, scoring='accuracy',
# n_jobs=-1, random_state=0, verbose=2).fit(bow3, train.toxic)
# search_two.best_params_
rnd_reg_2 = RandomForestClassifier(n_estimators=1000, verbose=5, n_jobs=-1)
search_no = rnd_reg_2.fit(bow3, train.toxic)
bow_test_pred4 = test.copy()
bow_test_pred4['newcomment'] = new_commentstest
bow_test_pred4.tail()
bow_test_pred4['toxic'] = search_no.predict(vec3.transform(bow_test_pred4.newcomment))
bow_test_pred4['toxic'] = bow_test_pred4['toxic'].astype(int)
bow_test_pred4.drop('comment', axis=1, inplace=True)
bow_test_pred4.drop('newcomment', axis=1, inplace=True)
bow_test_pred4
confusion_matrix(bow_test_pred4.toxic, bow_test_pred3.toxic)
bow_test_pred4.to_csv('bow_v4.csv', index=False)
!kaggle competitions submit -c toxic-comments-classification-apdl-2021 -f bow_v4.csv -m "kirill_setdekov bow4 with preprocessing and RF"
###Output
_____no_output_____ |
sst_science/West_Coast_HeatWave.ipynb | ###Markdown
Satellite sea surface temperatures along the West Coast of the United States during the 2014–2016 northeast Pacific marine heat waveIn 2016 we published a [paper](https://agupubs.onlinelibrary.wiley.com/doi/10.1002/2016GL071039) on the heat wave in the ocean off the California coastThis analysis was the last time I used Matlab to process scientific data. To make Figure 1, here are the following steps:- Download 4 TB of data from NASA PO.DAAC data archive via FTP- Go through each day of data and subset to the West Coast Region to reduce size and save each subsetted day- Go through 2002-2012 and create a daily climatology and save all 365 days of the climatology- Go through each day of data and calculate the anomaly and save each day's anomalyThis whole process took about 1 month. Once the anomalies were calculated, then I could start to do analyses and explore the data.Below we will do this using MUR SST data on AWS Open Data Program in a few minutes using Python.
###Code
import warnings
import numpy as np
import pandas as pd
import xarray as xr
import fsspec
import matplotlib.pyplot as plt
warnings.simplefilter('ignore') # filter some warning messages
xr.set_options(display_style="html") #display dataset nicely
dir_out = './../../data/zarr_testing/'
file_aws = 'https://mur-sst.s3.us-west-2.amazonaws.com/zarr-v1'
file_aws_time = 'https://mur-sst.s3.us-west-2.amazonaws.com/zarr'
%%time
ds_sst = xr.open_zarr(file_aws,consolidated=True)
ds_sst
#region for figure 1
xlat1,xlat2 = 33,48
xlon1,xlon2 = -132, -118,
date1,date2 = '2002-01-01','2013-01-01'
subset = ds_sst.sel(lat=slice(xlat1,xlat2),lon=slice(xlon1,xlon2))
subset
###Output
_____no_output_____
###Markdown
Just plot a random day to make sure it looks correct
###Code
subset.analysed_sst[0,:,:].plot()
###Output
_____no_output_____
###Markdown
How big is this dataset?- Because xarray uses lazy loading, we have access to this entire dataset but it only loads what it needs to for calculations
###Code
print('GB data = ',subset.nbytes/(1024 * 1024 * 1024))
###Output
GB data = 201.89575985074043
###Markdown
Caluculate the Monthly Sea Surface Temperature Anomalies
###Code
sst_monthly = subset.resample(time='1MS').mean('time',keep_attrs=True,skipna=False)
climatology_mean_monthly = sst_monthly.sel(time=slice(date1,date2)).groupby('time.month').mean('time',keep_attrs=True,skipna=False)
sst_anomaly_monthly = sst_monthly.groupby('time.month')-climatology_mean_monthly #take out annual mean to remove trends
sst_anomaly_monthly
sst_anomaly_monthly.analysed_sst[0,:,:].plot(vmin=-3,vmax=3,cmap='RdYlBu_r')
sst_anomaly_monthly.analysed_sst.sel(time='2015-03').plot(vmin=-3,vmax=3,cmap='RdYlBu_r')
#plt.pcolormesh(tem.lon,tem.lat,tem.analysed_sst,transform=ccrs.PlateCarree(),cmap=vik_map,vmin=-2,vmax=2)
#ax.coastlines(resolution='50m', color='black', linewidth=1)
#ax.add_feature(cfeature.LAND)
#ax.add_feature(cfeature.STATES.with_scale('10m'))
#ax.set_extent([-132.27,-117,32,48])
#plt.colorbar(ax=ax,label='SST Anomaly (K)')
#tt=plt.text(-122,47,tstr,fontsize=16)
###Output
_____no_output_____
###Markdown
Let's try and re-do figure 2 which uses 5-day average SST anomalies
###Code
sst_5day = subset.resample(time='5D').mean('time',keep_attrs=True,skipna=False)
climatology_mean_5day = sst_5day.sel(time=slice(date1,date2)).groupby('time.day').mean('time',keep_attrs=True,skipna=False)
sst_anomaly_5day = sst_5day.groupby('time.day')-climatology_mean_5day #take out annual mean to remove trends
sst_anomaly_5day
%%time
max_5day = sst_anomaly_5day.analysed_sst.sel(time=slice('2012','2016')).max("time")
max_5day
#running out of memory right now. maybe need to breakdown into yearly bits or something. could try using time arranged zarr file store
#max_5day.plot(vmin=0,vmax=5,cmap='jet')
###Output
_____no_output_____
###Markdown
Switch to same data, but it is chunked differently- it is optimized for timeseries rather than spatial analysis
###Code
ds_sst = xr.open_zarr(file_aws_time,consolidated=True)
ds_sst
%%time
sst_newport_nearshore = ds_sst.analysed_sst.sel(lat=44.6,lon=-124.11,method='nearest').rolling(time=30, center=True).mean().load()
sst_newport_offshore = ds_sst.analysed_sst.sel(lat=44.6,lon=-134.11,method='nearest').rolling(time=30, center=True).mean().load()
plt.plot(sst_newport_nearshore.time.dt.dayofyear,sst_newport_nearshore)
###Output
_____no_output_____ |
bayes-opt.ipynb | ###Markdown
Based on this: * https://scikit-optimize.github.io/stable/auto_examples/bayesian-optimization.htmlsphx-glr-auto-examples-bayesian-optimization-py
###Code
import numpy as np
np.random.seed(1234)
import matplotlib.pyplot as plt
from skopt.plots import plot_gaussian_process
from skopt import Optimizer
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from skopt import gp_minimize
import numpy as np
%matplotlib inline
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import Matern
noise_level = 0.1
# Our 1D toy problem, this is the function we are trying to
# minimize
def objective(x, noise_level=noise_level):
return np.sin(5 * x[0]) * (1 - np.tanh(x[0] ** 2))\
+ np.random.randn() * noise_level
def objective_wo_noise(x):
return objective(x, noise_level=0)
opt_gp = Optimizer([(-2.0, 2.0)], base_estimator="GP", n_initial_points=5,
acq_optimizer="sampling", random_state=42)
# let's do this by hand first...
X = np.linspace(-2, 2, 100)
y = np.vectorize(lambda x: objective_wo_noise([x]))(X)
plt.plot(X, y)
# Generate data and fit GP
rng = np.random.RandomState(4)
kernel = Matern(length_scale=1.0, nu=2.5)
gp = GaussianProcessRegressor(kernel=kernel, alpha=0.0)
# take 5 points...
X = rng.uniform(-2, 2, 5)
X = np.sort(X)
y = np.vectorize(lambda x: objective_wo_noise([x]))(X)
gp.fit(X.reshape(-1, 1), y)
# how should we approach this? One curve?
X_ = np.linspace(-2, 2, 100)
y_mean, y_std = gp.predict(X_.reshape(-1, 1), return_std=True)
y_samples = gp.sample_y(X_.reshape(-1, 1), 1)
plt.plot(X_, y_samples, 'r')
plt.plot(X_, np.vectorize(lambda x: objective_wo_noise([x]))(X_))
plt.plot(X, y, 'ro')
# if we add some noise...
X_ = np.linspace(-2, 2, 100)
y_mean, y_std = gp.predict(X_.reshape(-1, 1), return_std=True)
y_samples = gp.sample_y(X_.reshape(-1, 1), 100)
plt.plot(X_, y_samples)
# plt.plot(X_, np.vectorize(lambda x: objective_wo_noise([x]))(X_))
plt.plot(X, y, 'ro')
###Output
_____no_output_____
###Markdown
How do we pick the next point to evaluate?From here there are several way to pick the next point. Two common approaches are around:* Upper confidence bound (exploration vs exploitation)* Expected improvement
###Code
plt.plot(X_, y_mean, 'r', X, y, 'ro')
plt.grid(True)
plt.fill_between(X_, y_mean - y_std, y_mean + y_std,
alpha=0.5, color='k')
# for example, let's just consider the lower bound
# kappa controls the exploration/exploitation.
kappa = 0.5
plt.plot(X_, y_mean, 'r', X, y, 'ro', X_, y_mean - y_std, 'b', X_, y_mean - kappa*y_std, 'k')
plt.grid(True)
# expected improvement
from scipy.stats import norm
best_y = np.min(y)
z = (y_mean - best_y + X_)/y_std
ei = (y_mean - best_y+X_)*norm.cdf(z) + y_std*norm.pdf(z)
plt.plot(X_, y_mean, 'r', X, y, 'ro', X_, y_mean - y_std, 'b', X_, ei, 'k')
plt.grid(True)
###Output
_____no_output_____
###Markdown
Let's use scikit optimise instead...
###Code
res = gp_minimize(objective_wo_noise, # the function to minimize
[(-2.0, 2.0)], # the bounds on each dimension of x
acq_func="EI", # the acquisition function
n_calls=10, # the number of evaluations of f
n_random_starts=1, # the number of random initialization points
x0 = [[x] for x in X],
random_state=1234) # the random seed
from skopt.plots import plot_convergence
plot_convergence(res);
plot_gaussian_process(res, n_calls=0,
objective=objective_wo_noise,
show_title=False)
plot_gaussian_process(res, n_calls=0,
show_legend=True, show_title=False,
show_mu=False, show_acq_func=True,
show_observations=False,
show_next_point=True)
plot_gaussian_process(res, n_calls=1,
objective=objective_wo_noise,
show_title=False)
plot_gaussian_process(res, n_calls=1,
show_legend=True, show_title=False,
show_mu=False, show_acq_func=True,
show_observations=False,
show_next_point=True)
plt.figure
plt.figure(figsize=(20,20))
for n_iter in range(5):
# Plot true function.
plt.subplot(5, 2, 2*n_iter+1)
if n_iter == 0:
show_legend = True
else:
show_legend = False
ax = plot_gaussian_process(res, n_calls=n_iter,
objective=objective_wo_noise,
noise_level=noise_level,
show_legend=show_legend, show_title=False,
show_next_point=False, show_acq_func=False)
ax.set_ylabel("")
ax.set_xlabel("")
# Plot EI(x)
plt.subplot(5, 2, 2*n_iter+2)
ax = plot_gaussian_process(res, n_calls=n_iter,
show_legend=show_legend, show_title=False,
show_mu=False, show_acq_func=True,
show_observations=False,
show_next_point=True)
ax.set_ylabel("")
ax.set_xlabel("")
plt.show()
###Output
_____no_output_____ |
NG_LDA.ipynb | ###Markdown
Import Packages
###Code
import re
import numpy as np
import pandas as pd
from pprint import pprint
# Gensim
import gensim
import gensim.corpora as corpora
from gensim.utils import simple_preprocess
from gensim.models import CoherenceModel
# spacy for lemmatization
import spacy
# Plotting tools
import pyLDAvis
import pyLDAvis.gensim # don't skip this
import matplotlib.pyplot as plt
%matplotlib inline
# Enable logging for gensim - optional
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.ERROR)
import warnings
warnings.filterwarnings("ignore",category=DeprecationWarning)
# NLTK Stop words
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
stop_words.extend(['from', 'subject', 're', 'edu', 'use'])
###Output
_____no_output_____
###Markdown
Importing Lyrics data
###Code
# Import Dataset
df = pd.read_csv('')
df1.head()
# df = df1.head(10)
print(df.genre.unique())
print(df.artist.unique())
print(df.year.unique())
###Output
[2007 2008 2006 2016 2004]
###Markdown
Remove newline characters
###Code
# Convert to list
# data = df.lyrics.values.tolist()
# data = [re.sub('[^a-zA-Z ]' ,'', str(sent)) for sent in data]
# pprint(data[:1])
# def sent_to_words(sentences):
# for sentence in sentences:
# yield(gensim.utils.simple_preprocess(str(sentence), deacc=True)) # deacc=True removes punctuations
# data_words = list(sent_to_words(data))
# print(data_words[:1])
###Output
[['mother', 'dear', 'motherits', 'cold', 'tonightlike', 'every', 'otherevery', 'other', 'nightbut', 'never', 'feelfeel', 'it', 'anywayim', 'gonna', 'need', 'sooni', 'can', 'feel', 'itfeel', 'it', 'beginbut', 'dont', 'knowhow', 'im', 'gonna', 'payit', 'must', 'be', 'aboutmid', 'december', 'right', 'nowand', 'think', 'im', 'not', 'real', 'surehow', 'old', 'feeli', 'lost', 'my', 'thoughtsin', 'some', 'dreamoh', 'mother', 'dontknow', 'howi', 'got', 'where', 'ambut', 'ill', 'try', 'to', 'explain', 'anyhowsee', 'graduallyyou', 'get', 'sucked', 'ininto', 'itwithout', 'ever', 'whats', 'happeningand', 'that', 'is', 'whenthe', 'downward', 'spiral', 'beginsanyway', 'back', 'to', 'how', 'it', 'all', 'startedit', 'started', 'with', 'dopewhy', 'not', 'after', 'all', 'it', 'was', 'just', 'the', 'once', 'told', 'myselfi', 'didnt', 'even', 'like', 'it', 'very', 'muchbut', 'the', 'people', 'was', 'with', 'all', 'did', 'itthen', 'tried', 'speedwhy', 'not', 'after', 'all', 'it', 'was', 'just', 'the', 'once', 'told', 'myselfthe', 'next', 'thing', 'knew', 'was', 'doing', 'couple', 'of', 'grams', 'weekthen', 'friend', 'introduced', 'me', 'to', 'smack', 'chasing', 'the', 'dragonwhy', 'not', 'after', 'all', 'it', 'was', 'just', 'the', 'once', 'told', 'myselfwhy', 'not', 'after', 'all', 'it', 'wasnt', 'using', 'needlethen', 'started', 'doing', 'what', 'said', 'id', 'never', 'do']]
###Markdown
Creating Bigram and Trigram ModelsBigrams are two words frequently occurring together in the document. Trigrams are 3 words frequently occurring.Some examples in our example are: ‘front_bumper’, ‘oil_leak’, ‘maryland_college_park’ etc.Gensim’s Phrases model can build and implement the bigrams, trigrams, quadgrams and more. The two important arguments to Phrases are min_count and threshold. The higher the values of these param, the harder it is for words to be combined to bigrams.
###Code
# Build the bigram and trigram models
bigram = gensim.models.Phrases(data_words, min_count=5, threshold=100) # higher threshold fewer phrases.
trigram = gensim.models.Phrases(bigram[data_words], threshold=100)
# Faster way to get a sentence clubbed as a trigram/bigram
bigram_mod = gensim.models.phrases.Phraser(bigram)
trigram_mod = gensim.models.phrases.Phraser(trigram)
# See trigram example
print(bigram_mod[data_words[0]])
###Output
['mother', 'dear', 'motherits', 'cold', 'tonightlike', 'every', 'otherevery', 'other', 'nightbut', 'never', 'feelfeel', 'it', 'anywayim', 'gonna', 'need', 'sooni', 'can', 'feel', 'itfeel', 'it', 'beginbut', 'dont', 'knowhow', 'im', 'gonna', 'payit', 'must', 'be', 'aboutmid', 'december', 'right', 'nowand', 'think', 'im', 'not', 'real', 'surehow', 'old', 'feeli', 'lost', 'my', 'thoughtsin', 'some', 'dreamoh', 'mother', 'dontknow', 'howi', 'got', 'where', 'ambut', 'ill', 'try', 'to', 'explain', 'anyhowsee', 'graduallyyou', 'get', 'sucked', 'ininto', 'itwithout', 'ever', 'whats', 'happeningand', 'that', 'is', 'whenthe', 'downward', 'spiral', 'beginsanyway', 'back', 'to', 'how', 'it', 'all', 'startedit', 'started', 'with', 'dopewhy', 'not', 'after', 'all', 'it', 'was', 'just', 'the', 'once', 'told', 'myselfi', 'didnt', 'even', 'like', 'it', 'very', 'muchbut', 'the', 'people', 'was', 'with', 'all', 'did', 'itthen', 'tried', 'speedwhy', 'not', 'after', 'all', 'it', 'was', 'just', 'the', 'once', 'told', 'myselfthe', 'next', 'thing', 'knew', 'was', 'doing', 'couple', 'of', 'grams', 'weekthen', 'friend', 'introduced', 'me', 'to', 'smack', 'chasing', 'the', 'dragonwhy', 'not', 'after', 'all', 'it', 'was', 'just', 'the', 'once', 'told', 'myselfwhy', 'not', 'after', 'all', 'it', 'wasnt', 'using', 'needlethen', 'started', 'doing', 'what', 'said', 'id', 'never', 'do']
###Markdown
Remove Stopwords, Make Bigrams and LemmatizeThe bigrams model is ready. Let’s define the functions to remove the stopwords, make bigrams and lemmatization and call them sequentially.
###Code
# Define functions for stopwords, bigrams, trigrams and lemmatization
def remove_stopwords(texts):
return [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts]
def make_bigrams(texts):
return [bigram_mod[doc] for doc in texts]
def make_trigrams(texts):
return [trigram_mod[bigram_mod[doc]] for doc in texts]
def lemmatization(texts, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']):
"""https://spacy.io/api/annotation"""
texts_out = []
for sent in texts:
doc = nlp(" ".join(sent))
texts_out.append([token.lemma_ for token in doc if token.pos_ in allowed_postags])
return texts_out
###Output
_____no_output_____
###Markdown
Let’s call the functions in order.
###Code
# Remove Stop Words
data_words_nostops = remove_stopwords(data_words)
# Form Bigrams
data_words_bigrams = make_bigrams(data_words_nostops)
# Initialize spacy 'en' model, keeping only tagger component (for efficiency)
# python3 -m spacy download en
nlp = spacy.load('en', disable=['parser', 'ner'])
# Do lemmatization keeping only noun, adj, vb, adv
data_lemmatized = lemmatization(data_words_bigrams, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV'])
print(data_lemmatized[:1])
###Output
[['mother', 'dear', 'motherit', 'cold', 'tonightlike', 'otherevery', 'nightbut', 'never', 'feelfeel', 'anywayim', 'go', 'need', 'sooni', 'feel', 'itfeel', 'beginbut', 'do', 'not', 'knowhow', 'be', 'go', 'payit', 'must', 'aboutmid', 'december', 'right', 'nowand', 'think', 'be', 'real', 'surehow', 'old', 'feeli', 'lose', 'thoughtsin', 'dreamoh', 'mother', 'dontknow', 'howi', 'get', 'ambut', 'ill', 'try', 'explain', 'anyhowsee', 'graduallyyou', 'get', 'suck', 'ininto', 'itwithout', 'ever', 's', 'happeningand', 'downward', 'spiral', 'beginsanyway', 'back', 'startedit', 'start', 'dopewhy', 'tell', 'myselfi', 'do', 'not', 'even', 'like', 'muchbut', 'people', 'itthen', 'try', 'speedwhy', 'tell', 'myselfthe', 'next', 'thing', 'know', 'couple', 'gram', 'weekthen', 'friend', 'introduce', 'smack', 'chase', 'dragonwhy', 'tell', 'myselfwhy', 'be', 'not', 'use', 'needlethen', 'start', 'say', 'never']]
###Markdown
Create the Dictionary and Corpus needed for Topic ModelingThe two main inputs to the LDA topic model are the dictionary(id2word) and the corpus. Let’s create them.
###Code
# Create Dictionary
id2word = corpora.Dictionary(data_lemmatized)
# Create Corpus
texts = data_lemmatized
# Term Document Frequency
corpus = [id2word.doc2bow(text) for text in texts]
# View
print(corpus[:1])
###Output
[[(0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 3), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1), (11, 1), (12, 1), (13, 2), (14, 1), (15, 1), (16, 1), (17, 1), (18, 1), (19, 1), (20, 1), (21, 1), (22, 1), (23, 1), (24, 1), (25, 1), (26, 2), (27, 2), (28, 1), (29, 1), (30, 1), (31, 1), (32, 1), (33, 1), (34, 1), (35, 1), (36, 1), (37, 1), (38, 1), (39, 1), (40, 1), (41, 1), (42, 2), (43, 1), (44, 1), (45, 1), (46, 1), (47, 1), (48, 1), (49, 1), (50, 1), (51, 2), (52, 1), (53, 1), (54, 3), (55, 1), (56, 1), (57, 1), (58, 1), (59, 1), (60, 1), (61, 1), (62, 1), (63, 1), (64, 1), (65, 1), (66, 1), (67, 1), (68, 2), (69, 1), (70, 1), (71, 1), (72, 3), (73, 1), (74, 1), (75, 1), (76, 1), (77, 2), (78, 1), (79, 1)]]
###Markdown
Gensim creates a unique id for each word in the document. The produced corpus shown above is a mapping of (word_id, word_frequency).For example, (0, 1) above implies, word id 0 occurs once in the first document. Likewise, word id 1 occurs twice and so on.This is used as the input by the LDA model.If you want to see what word a given id corresponds to, pass the id as a key to the dictionary.
###Code
id2word[10]
# Human readable format of corpus (term-frequency)
[[(id2word[id], freq) for id, freq in cp] for cp in corpus[:1]]
###Output
_____no_output_____
###Markdown
Building the Topic ModelWe have everything required to train the LDA model. In addition to the corpus and dictionary, you need to provide the number of topics as well.Apart from that, alpha and eta are hyperparameters that affect sparsity of the topics. According to the Gensim docs, both defaults to 1.0/num_topics prior.chunksize is the number of documents to be used in each training chunk. update_every determines how often the model parameters should be updated and passes is the total number of training passes.
###Code
# Build LDA model
lda_model = gensim.models.ldamodel.LdaModel(corpus=corpus,
id2word=id2word,
num_topics=20,
random_state=100,
update_every=1,
chunksize=100,
passes=10,
alpha='auto',
per_word_topics=True)
# Print the Keyword in the 10 topics
pprint(lda_model.print_topics())
doc_lda = lda_model[corpus]
# Compute Perplexity
print('\nPerplexity: ', lda_model.log_perplexity(corpus)) # a measure of how good the model is. lower the better.
# Compute Coherence Score
coherence_model_lda = CoherenceModel(model=lda_model, texts=data_lemmatized, dictionary=id2word, coherence='c_v')
coherence_lda = coherence_model_lda.get_coherence()
print('\nCoherence Score: ', coherence_lda)
# Visualize the topics
pyLDAvis.enable_notebook()
vis = pyLDAvis.gensim.prepare(lda_model, corpus, id2word)
vis
mallet_path = '/Users/neha/Downloads/mallet-2.0.8/bin/mallet' # update this path
ldamallet = gensim.models.wrappers.LdaMallet(mallet_path, corpus=corpus, num_topics=20, id2word=id2word)
# Show Topics
pprint(ldamallet.show_topics(formatted=False))
# Compute Coherence Score
coherence_model_ldamallet = CoherenceModel(model=ldamallet, texts=data_lemmatized, dictionary=id2word, coherence='c_v')
coherence_ldamallet = coherence_model_ldamallet.get_coherence()
print('\nCoherence Score: ', coherence_ldamallet)
###Output
[(8,
[('stand', 0.14285714285714285),
('betterpull', 0.08571428571428572),
('cry', 0.08571428571428572),
('youbaby', 0.05714285714285714),
('town', 0.02857142857142857),
('magazinewhen', 0.02857142857142857),
('pageantive', 0.02857142857142857),
('saywont', 0.02857142857142857),
('make', 0.02857142857142857),
('graduallyyou', 0.02857142857142857)]),
(7,
[('shiti', 0.1111111111111111),
('wallplatinum', 0.07407407407407407),
('meoh', 0.07407407407407407),
('trust', 0.037037037037037035),
('wanna', 0.037037037037037035),
('perdere', 0.037037037037037035),
('heartyou', 0.037037037037037035),
('porqueseh', 0.037037037037037035),
('howi', 0.037037037037037035),
('soand', 0.037037037037037035)]),
(6,
[('back', 0.2608695652173913),
('stop', 0.043478260869565216),
('friend', 0.043478260869565216),
('sonare', 0.043478260869565216),
('trace', 0.043478260869565216),
('payit', 0.043478260869565216),
('ancho', 0.043478260869565216),
('dad', 0.043478260869565216),
('coquettesmister', 0.043478260869565216),
('slick', 0.043478260869565216)]),
(9,
[('ill', 0.125),
('youdont', 0.09375),
('putnobody', 0.09375),
('care', 0.03125),
('heart', 0.03125),
('bite', 0.03125),
('herall', 0.03125),
('bridge', 0.03125),
('tuxedo', 0.03125),
('bybut', 0.03125)]),
(10,
[('start', 0.08),
('amissin', 0.08),
('nowand', 0.04),
('knowi', 0.04),
('hairgotta', 0.04),
('happeningand', 0.04),
('guess', 0.04),
('nightbut', 0.04),
('wrongso', 0.04),
('drink', 0.04)]),
(2,
[('watch', 0.15),
('open', 0.1),
('life', 0.1),
('people', 0.05),
('mei', 0.05),
('marshall', 0.05),
('knowif', 0.05),
('sighfor', 0.05),
('bell', 0.05),
('kissagainst', 0.05)]),
(5,
[('mother', 0.09523809523809523),
('create', 0.047619047619047616),
('youfriend', 0.047619047619047616),
('itwithout', 0.047619047619047616),
('watch', 0.047619047619047616),
('didyou', 0.047619047619047616),
('hide', 0.047619047619047616),
('time', 0.047619047619047616),
('youand', 0.047619047619047616),
('wait', 0.047619047619047616)]),
(17,
[('sit', 0.07352941176470588),
('electriclay', 0.04411764705882353),
('careful', 0.04411764705882353),
('pence', 0.04411764705882353),
('street', 0.04411764705882353),
('work', 0.04411764705882353),
('mount', 0.04411764705882353),
('boot', 0.04411764705882353),
('metricyou', 0.04411764705882353),
('bad', 0.04411764705882353)]),
(1,
[('kiss', 0.3),
('yanever', 0.075),
('realize', 0.05),
('uhhuhi', 0.05),
('yanow', 0.05),
('startedit', 0.025),
('insidei', 0.025),
('good', 0.025),
('awake', 0.025),
('introduce', 0.025)]),
(13,
[('rocknroll', 0.08955223880597014),
('famous', 0.08955223880597014),
('record', 0.05970149253731343),
('dollar', 0.05970149253731343),
('allyeah', 0.04477611940298507),
('beverlywe', 0.04477611940298507),
('alla', 0.04477611940298507),
('place', 0.04477611940298507),
('allwe', 0.04477611940298507),
('maserati', 0.04477611940298507)])]
Coherence Score: 0.612603937086269
###Markdown
How to find the optimal number of topics for LDA?My approach to finding the optimal number of topics is to build many LDA models with different values of number of topics (k) and pick the one that gives the highest coherence value.Choosing a ‘k’ that marks the end of a rapid growth of topic coherence usually offers meaningful and interpretable topics. Picking an even higher value can sometimes provide more granular sub-topics.If you see the same keywords being repeated in multiple topics, it’s probably a sign that the ‘k’ is too large.The compute_coherence_values() (see below) trains multiple LDA models and provides the models and their corresponding coherence scores.
###Code
def compute_coherence_values(dictionary, corpus, texts, limit, start=2, step=3):
"""
Compute c_v coherence for various number of topics
Parameters:
----------
dictionary : Gensim dictionary
corpus : Gensim corpus
texts : List of input texts
limit : Max num of topics
Returns:
-------
model_list : List of LDA topic models
coherence_values : Coherence values corresponding to the LDA model with respective number of topics
"""
coherence_values = []
model_list = []
for num_topics in range(start, limit, step):
model = gensim.models.wrappers.LdaMallet(mallet_path, corpus=corpus, num_topics=num_topics, id2word=id2word)
model_list.append(model)
coherencemodel = CoherenceModel(model=model, texts=texts, dictionary=dictionary, coherence='c_v')
coherence_values.append(coherencemodel.get_coherence())
return model_list, coherence_values
# Can take a long time to run.
model_list, coherence_values = compute_coherence_values(dictionary=id2word, corpus=corpus, texts=data_lemmatized, start=2, limit=40, step=6)
# Show graph
limit=40; start=2; step=6;
x = range(start, limit, step)
plt.plot(x, coherence_values)
plt.xlabel("Num Topics")
plt.ylabel("Coherence score")
plt.legend(("coherence_values"), loc='best')
plt.show()
# Print the coherence scores
for m, cv in zip(x, coherence_values):
print("Num Topics =", m, " has Coherence Value of", round(cv, 4))
# Select the model and print the topics
optimal_model = model_list[3]
model_topics = optimal_model.show_topics(formatted=False)
pprint(optimal_model.print_topics(num_words=10))
def format_topics_sentences(ldamodel=lda_model, corpus=corpus, texts=data):
# Init output
sent_topics_df = pd.DataFrame()
# Get main topic in each document
for i, row in enumerate(ldamodel[corpus]):
row = sorted(row, key=lambda x: (x[1]), reverse=True)
# Get the Dominant topic, Perc Contribution and Keywords for each document
for j, (topic_num, prop_topic) in enumerate(row):
if j == 0: # => dominant topic
wp = ldamodel.show_topic(topic_num)
topic_keywords = ", ".join([word for word, prop in wp])
sent_topics_df = sent_topics_df.append(pd.Series([int(topic_num), round(prop_topic,4), topic_keywords]), ignore_index=True)
else:
break
sent_topics_df.columns = ['Dominant_Topic', 'Perc_Contribution', 'Topic_Keywords']
# Add original text to the end of the output
contents = pd.Series(texts)
sent_topics_df = pd.concat([sent_topics_df, contents], axis=1)
return(sent_topics_df)
df_topic_sents_keywords = format_topics_sentences(ldamodel=optimal_model, corpus=corpus, texts=data)
# Format
df_dominant_topic = df_topic_sents_keywords.reset_index()
df_dominant_topic.columns = ['Document_No', 'Dominant_Topic', 'Topic_Perc_Contrib', 'Keywords', 'Text']
# Show
df_dominant_topic.head(10)
###Output
_____no_output_____
###Markdown
Find the most representative document for each topicSometimes just the topic keywords may not be enough to make sense of what a topic is about. So, to help with understanding the topic, you can find the documents a given topic has contributed to the most and infer the topic by reading that document. Whew!!
###Code
# Group top 5 sentences under each topic
sent_topics_sorteddf_mallet = pd.DataFrame()
sent_topics_outdf_grpd = df_topic_sents_keywords.groupby('Dominant_Topic')
for i, grp in sent_topics_outdf_grpd:
sent_topics_sorteddf_mallet = pd.concat([sent_topics_sorteddf_mallet,
grp.sort_values(['Perc_Contribution'], ascending=[0]).head(1)],
axis=0)
# Reset Index
sent_topics_sorteddf_mallet.reset_index(drop=True, inplace=True)
# Format
sent_topics_sorteddf_mallet.columns = ['Topic_Num', "Topic_Perc_Contrib", "Keywords", "Text"]
# Show
sent_topics_sorteddf_mallet.head()
###Output
_____no_output_____ |
notebooks/12/12_vibrating_building.ipynb | ###Markdown
Modes of a Vibrating BuildingIn this notebook we will find the vibrational modes of a simple model of a building. We will assume that the mass of the floors are much more than the mass of the walls and that the lateral stiffness of the walls can be modeled by a simple linear spring. We will investigate how the building may vibrate under initial conditions that could be caused by a gust of wind and during ground vibration.
###Code
from IPython.display import YouTubeVideo
YouTubeVideo('g0cz-oDfUg0', width=600)
YouTubeVideo('hSwjkG3nv1c', width=600)
YouTubeVideo('kzVvd4Dk6sw', width=600)
import numpy as np
import matplotlib.pyplot as plt
from resonance.linear_systems import FourStoryBuildingSystem
###Output
_____no_output_____
###Markdown
This gives a bit nicer printing of large NumPy arrays.
###Code
np.set_printoptions(precision=5, linewidth=100, suppress=True)
%matplotlib notebook
###Output
_____no_output_____
###Markdown
Simulate the four story building
###Code
sys = FourStoryBuildingSystem()
sys.constants
sys.coordinates
sys.plot_configuration();
traj = sys.free_response(30, sample_rate=10)
traj[list(sys.coordinates.keys())].plot(subplots=True);
sys.animate_configuration(fps=10)
M, C, K = sys.canonical_coefficients()
M
C
K
###Output
_____no_output_____
###Markdown
ExerciseThe system can be normalized by the mass matrix and transformed into a symmetric eigenvalue problem by introducing the new coordinate vector:$$\mathbf{q}=\mathbf{L}^T\mathbf{x}$$$\mathbf{L}$ is the Cholesky decomposition of the symmetric mass matrix, i.e. $\mathbf{M}=\mathbf{L}\mathbf{L}^T$.The equation of motion becomes:$$\ddot{\mathbf{q}} + \tilde{\mathbf{K}} \mathbf{q} = 0$$Compute $\tilde{\mathbf{K}}$.
###Code
L = np.linalg.cholesky(M)
L
M**0.5
import numpy.linalg as la
from numpy.linalg import inv
K_tilde = inv(L) @ K @ inv(L.T)
K_tilde
###Output
_____no_output_____
###Markdown
Notice that $\tilde{\mathbf{K}}$ is symmetric, so we are guaranteed to get real eigenvalues and orthogonal eigenvectors when solving this system. ExerciseFind the eigenvalues and eigenvectors. Create the spectral matrix $\mathbf{\Lambda}$ and the matrix $P$ which contains the orthonormal eigenvectors of $\tilde{\mathbf{K}}$.$$\mathbf{P} = \left[ \mathbf{v}_1, \ldots, \mathbf{v}_4 \right]$$
###Code
evals, evecs = np.linalg.eig(K_tilde)
evals
evecs
Lambda = np.diag(evals)
Lambda
P = evecs
###Output
_____no_output_____
###Markdown
ExerciseProve that the eigenvectors in $\mathbf{P}$ are orthonormal.
###Code
np.dot(P[:, 0], P[:, 1])
np.linalg.norm(P[:, 0])
P[:, 0].T @ P[:, 1]
P[:, 0].T @ P[:, 0]
###Output
_____no_output_____
###Markdown
An orthonormal matrix has the property that its transpose multiplied by itself is the identity matrix.
###Code
P.T @ P
###Output
_____no_output_____
###Markdown
ExerciseFind the natural freqencies of the system in both radians per second and Hertz, store them in an array in the order of the eigenvalues with names `ws` and `fs`.
###Code
ws = np.sqrt(evals)
ws
fs = ws / 2 / np.pi
fs
###Output
_____no_output_____
###Markdown
ExerciseTransform the eigenvectors back into the coordinate system associated with $\mathbf{x}$. $$\mathbf{S} = \left[ \mathbf{u}_1, \ldots, \mathbf{u}_4 \right]$$
###Code
S = np.linalg.inv(L.T) @ P
S
sys.coordinates
###Output
_____no_output_____
###Markdown
Exercise: visualize the modeshapesThe eigenmodes (mode shapes) are contained in each column of $\mathbf{S}$. Create a plot for each mode shape with these specifications:- The title of each plot should be the frequency of the corresponding modeshape in Hz.- The y axis should be made up of the values [0, 3, 6, 9, 12] meters.- The x axis should plot the five values. The first should be zero and the remaining values should be the components of the mode shape in order of the component associated with the lowest floor to the highest.- Plot lines with small circles at each data point.
###Code
S[:, 0]
np.hstack((0, S[:, 0]))
u1 = S[:, 0]
u1
u1[::-1]
S[:, 2]
fig, axes = plt.subplots(1, 4)
for i in range(4):
axes[i].plot(np.hstack((0, S[:, i])), [0, 3, 6, 9, 12], marker='o')
axes[i].set_title('{:1.2f} Hz'.format(fs[i]))
plt.tight_layout()
fs[0]
S[:, 0]
sys.coordinates['x1'] = S[0, 2]
sys.coordinates['x2'] = S[1, 2]
sys.coordinates['x3'] = S[2, 2]
sys.coordinates['x4'] = S[3, 2]
traj = sys.free_response(30, sample_rate=10)
traj[list(sys.coordinates.keys())].plot(subplots=True)
sys.animate_configuration(fps=10)
###Output
_____no_output_____
###Markdown
Simulating the trajectoryThe trajectory of building's coordinates can be found with:$$\mathbf{x}(t) = \sum_{i=1}^n c_i \sin(\omega_i t + \phi_i) \mathbf{u}_i$$where$$\phi_i = \arctan \frac{\omega_i \mathbf{v}_i^T \mathbf{q}_0}{\mathbf{v}_i^T \dot{\mathbf{q}}_0}$$and$$c_i = \frac{\mathbf{v}^T_i \mathbf{q}_0}{\sin\phi_i}$$$c_i$ are the modal participation factors and reflect what proportion of each mode is excited given specific initial conditions. If the initial conditions are the eigenmode, $\mathbf{u}_i$, the all but the $i$th $c_i$ will be zero. ExerciseShow that if $\mathbf{q}_0 = \mathbf{v}_i$ then $c_i = 1$ all other modal participation factors are 0. Also, report all of the phase angles, $\phi_i$, in degrees.
###Code
for i in range(4):
x0 = S[:, i]
xd0 = np.zeros(4)
print(x0)
q0 = L.T @ x0
qd0 = L.T @ xd0
phis = np.arctan2(ws * P.T @ q0, P.T @ xd0)
print(np.rad2deg(phis))
cs = P.T @ q0 / np.sin(phis)
print(cs)
print('=' * 40)
###Output
_____no_output_____
###Markdown
ExerciseCreate a function called `simulate()` that returns the trajectories of the coordinates given an array of monotonically increasing time values and the initial conditions of the system.It should look like:```pythondef simulate(t, x0, xd0): """Returns the state trajectory. Parameters ========== t : ndarray, shape(m,) Monotonic values of time. x0 : ndarray, shape(n,) The initial conditions of each coordinate. xd0 : ndarray, shape(n,) The initial conditions of each speed. Returns ======= x : ndarray, shape(m, n) The trajectories of each state. """ your code here return x```
###Code
def simulate(t, x0, xd0):
q0 = L.T @ x0
qd0 = L.T @ xd0
phis = np.arctan2(ws * P.T @ q0, P.T @ xd0)
cs = P.T @ q0 / np.sin(phis)
x = np.zeros((len(x0), len(t)))
for ci, wi, phii, ui in zip(cs, ws, phis, S.T):
x += ci * np.sin(wi * t + phii) * np.tile(ui, (len(t), 1)).T
return x
###Output
_____no_output_____
###Markdown
ExerciseUsing the plotting function below, show that the results found here are the same as the simulations from the `FourStoryBuildingSystem` given the same initial conditions.
###Code
def plot_trajectories(t, x):
fig, axes = plt.subplots(4, 1)
for i, ax in enumerate(axes.flatten()):
ax.plot(t, x[i])
ax.set_ylabel(r'$x_{}$ [m]'.format(i + 1))
ax.set_xlabel('Time [s]')
plt.tight_layout()
t = np.linspace(0, 50, num=50 * 60)
x0 = np.array([0.001, 0.010, 0.020, 0.025])
xd0 = np.zeros(4)
x = simulate(t, x0, xd0)
plot_trajectories(t, x)
###Output
_____no_output_____
###Markdown
This shows the plot of a single mode:
###Code
x = simulate(t, S[:, 0], np.zeros(4))
plot_trajectories(t, x)
###Output
_____no_output_____ |
docs/documentation/utilities/friction_factor.ipynb | ###Markdown
`friction_factor`
###Code
from particula.util import friction_factor
help(friction_factor)
###Output
_____no_output_____ |
notebooks/hacker_news_demo.ipynb | ###Markdown
Hacker News aitextgenA demo on how aitextgen can be used to create bespoke Hacker News submission titles.**NOTE**: This is released as a proof of concept for mini-GPT-2 models; quality of titles may vary.
###Code
from aitextgen import aitextgen
###Output
_____no_output_____
###Markdown
Loading the Hacker News ModelThe `minimaxir/hacker-news` model was finetuned on HN submissions up until May 12th with atleast 5 points.It uses a custom GPT-2 architecture that is only 30 MB on disk (compared to 124M GPT-2's 500MB on disk.)Running the cell will download the model and cache it into `/aitextgen`.
###Code
ai = aitextgen(model="minimaxir/hacker-news")
###Output
INFO:aitextgen:Loading minimaxir/hacker-news model from /aitextgen.
###Markdown
GenerationSince the model is so small, generation happens almost immediately, even in bulk.
###Code
ai.generate()
ai.generate(5)
###Output
Ask HN: Should I start writing a blog post in Python?
==========
The Psychology of Human Misjudgment (2012)
==========
New York' New Year: $99 Linux PC
==========
C++11/12 Released
==========
Dynamic types in Go
###Markdown
Prompted InputYou can seed input with a `prompt` to get specific types of HN posts. The prompt will be **bolded** in the output.
###Code
ai.generate(5, prompt="Ask HN")
ai.generate(5, prompt="Show HN")
ai.generate(5, prompt="Elon Musk")
ai.generate(5, prompt="Google says")
###Output
[1mGoogle says[0m its employees are working with Amazon and Apple
==========
[1mGoogle says[0m it’s peaked
==========
[1mGoogle says[0m it is flea banning visible to people who worked in U.S.
==========
[1mGoogle says[0m it will not allow enemy mine to secure sensitive information
==========
[1mGoogle says[0m no to Google for Java
###Markdown
Bulk Generation to FileYou can use `generate_to_file()` to create many HN titles.
###Code
ai.generate_to_file(1000, batch_size=20)
###Output
INFO:aitextgen:Generating 1,000 texts to ATG_20200517_235441_14821584.txt
|
3.2_Simple-Scatter-Plots.ipynb | ###Markdown
Simple Scatter Plots Another commonly used plot type is the simple scatter plot, a close cousin of the line plot.Instead of points being joined by line segments, here the points are represented individually with a dot, circle, or other shape.We’ll start by setting up the notebook for plotting and importing the functions we will use:
###Code
%matplotlib inline
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
import numpy as np
###Output
_____no_output_____
###Markdown
Scatter Plots with ``plt.plot``In the previous section we looked at ``plt.plot``/``ax.plot`` to produce line plots.It turns out that this same function can produce scatter plots as well:
###Code
x = np.linspace(0, 10, 30)
y = np.sin(x)
plt.plot(x, y, 'o', color='black');
###Output
_____no_output_____
###Markdown
The third argument in the function call is a character that represents the type of symbol used for the plotting. Just as you can specify options such as ``'-'``, ``'--'`` to control the line style, the marker style has its own set of short string codes. The full list of available symbols can be seen in the documentation of ``plt.plot``, or in Matplotlib's online documentation. Most of the possibilities are fairly intuitive, and we'll show a number of the more common ones here:
###Code
rng = np.random.RandomState(0)
for marker in ['o', '.', ',', 'x', '+', 'v', '^', '<', '>', 's', 'd']:
plt.plot(rng.rand(5), rng.rand(5), marker,
label="marker='{0}'".format(marker))
plt.legend(numpoints=1)
plt.xlim(0, 1.8);
###Output
_____no_output_____
###Markdown
For even more possibilities, these character codes can be used together with line and color codes to plot points along with a line connecting them:
###Code
plt.plot(x, y, '-ok');
###Output
_____no_output_____
###Markdown
Additional keyword arguments to ``plt.plot`` specify a wide range of properties of the lines and markers:
###Code
plt.plot(x, y, '-p', color='gray',
markersize=15, linewidth=4,
markerfacecolor='white',
markeredgecolor='gray',
markeredgewidth=2)
plt.ylim(-1.2, 1.2);
###Output
_____no_output_____
###Markdown
This type of flexibility in the ``plt.plot`` function allows for a wide variety of possible visualization options.For a full description of the options available, refer to the ``plt.plot`` documentation. Scatter Plots with ``plt.scatter``A second, more powerful method of creating scatter plots is the ``plt.scatter`` function, which can be used very similarly to the ``plt.plot`` function:
###Code
plt.scatter(x, y, marker='o');
###Output
_____no_output_____
###Markdown
The primary difference of ``plt.scatter`` from ``plt.plot`` is that it can be used to create scatter plots where the properties of each individual point (size, face color, edge color, etc.) can be individually controlled or mapped to data.Let's show this by creating a random scatter plot with points of many colors and sizes.In order to better see the overlapping results, we'll also use the ``alpha`` keyword to adjust the transparency level:
###Code
rng = np.random.RandomState(0)
x = rng.randn(100)
y = rng.randn(100)
colors = rng.rand(100)
sizes = 1000 * rng.rand(100)
plt.scatter(x, y, c=colors, s=sizes, alpha=0.3,
cmap='viridis')
plt.colorbar(); # show color scale
###Output
_____no_output_____
###Markdown
Notice that the color argument is automatically mapped to a color scale (shown here by the ``colorbar()`` command), and that the size argument is given in pixels.In this way, the color and size of points can be used to convey information in the visualization, in order to visualize multidimensional data.For example, we might use the Iris data from Scikit-Learn, where each sample is one of three types of flowers that has had the size of its petals and sepals carefully measured:
###Code
from sklearn.datasets import load_iris
iris = load_iris()
features = iris.data.T
plt.scatter(features[0], features[1], alpha=0.2,
s=100*features[3], c=iris.target, cmap='viridis')
plt.xlabel(iris.feature_names[0])
plt.ylabel(iris.feature_names[1]);
###Output
_____no_output_____ |
notebooks/03_categorical_pipeline_sol_01.ipynb | ###Markdown
📃 Solution for Exercise M1.04The goal of this exercise is to evaluate the impact of using an arbitraryinteger encoding for categorical variables along with a linearclassification model such as Logistic Regression.To do so, let's try to use `OrdinalEncoder` to preprocess the categoricalvariables. This preprocessor is assembled in a pipeline with`LogisticRegression`. The generalization performance of the pipeline can beevaluated by cross-validation and then compared to the score obtained whenusing `OneHotEncoder` or to some other baseline score.First, we load the dataset.
###Code
import pandas as pd
adult_census = pd.read_csv("../datasets/adult-census.csv")
target_name = "class"
target = adult_census[target_name]
data = adult_census.drop(columns=[target_name, "education-num"])
###Output
_____no_output_____
###Markdown
In the previous notebook, we used `sklearn.compose.make_column_selector` toautomatically select columns with a specific data type (also called `dtype`).Here, we will use this selector to get only the columns containing strings(column with `object` dtype) that correspond to categorical features in ourdataset.
###Code
from sklearn.compose import make_column_selector as selector
categorical_columns_selector = selector(dtype_include=object)
categorical_columns = categorical_columns_selector(data)
data_categorical = data[categorical_columns]
###Output
_____no_output_____
###Markdown
We filter our dataset that it contains only categorical features.Define a scikit-learn pipeline comBecause `OrdinalEncoder` can raise errors if it sees an unknown category atprediction time, you can set the `handle_unknown="use_encoded_value"` and`unknown_value` parameters. You can refer to the[scikit-learn documentation](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OrdinalEncoder.html)for more details regarding these parameters.
###Code
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import OrdinalEncoder
from sklearn.linear_model import LogisticRegression
model = make_pipeline(
OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-1),
LogisticRegression(max_iter=500))
###Output
_____no_output_____
###Markdown
Your model is now defined. Evaluate it using a cross-validation using`sklearn.model_selection.cross_validate`.
###Code
from sklearn.model_selection import cross_validate
cv_results = cross_validate(model, data_categorical, target)
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f}")
###Output
_____no_output_____
###Markdown
Using an arbitrary mapping from string labels to integers as done here causesthe linear model to make bad assumptions on the relative ordering ofcategories.This prevents the model from learning anything predictive enough and thecross-validated score is even lower than the baseline we obtained by ignoringthe input data and just constantly predicting the most frequent class:
###Code
from sklearn.dummy import DummyClassifier
cv_results = cross_validate(DummyClassifier(strategy="most_frequent"),
data_categorical, target)
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f}")
###Output
_____no_output_____
###Markdown
Now, we would like to compare the generalization performance of our previousmodel with a new model where instead of using an `OrdinalEncoder`, we willuse a `OneHotEncoder`. Repeat the model evaluation using cross-validation.Compare the score of both models and conclude on the impact of choosing aspecific encoding strategy when using a linear model.
###Code
from sklearn.preprocessing import OneHotEncoder
model = make_pipeline(
OneHotEncoder(handle_unknown="ignore"),
LogisticRegression(max_iter=500))
cv_results = cross_validate(model, data_categorical, target)
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f}")
###Output
_____no_output_____
###Markdown
📃 Solution for Exercise M1.04The goal of this exercise is to evaluate the impact of using an arbitraryinteger encoding for categorical variables along with a linearclassification model such as Logistic Regression.To do so, let's try to use `OrdinalEncoder` to preprocess the categoricalvariables. This preprocessor is assembled in a pipeline with`LogisticRegression`. The statistical performance of the pipeline can beevaluated as usual by cross-validation and then compared to the scoreobtained when using `OneHotEncoder` or to some other baseline score.Because `OrdinalEncoder` can raise errors if it sees an unknown category atprediction time, you can set the `handle_unknown` and `unknown_value`parameters.
###Code
import pandas as pd
adult_census = pd.read_csv("../datasets/adult-census.csv")
target_name = "class"
target = adult_census[target_name]
data = adult_census.drop(columns=[target_name, "education-num"])
###Output
_____no_output_____
###Markdown
We can select the categorical based on the `object` dtype.
###Code
from sklearn.compose import make_column_selector as selector
categorical_columns_selector = selector(dtype_include=object)
categorical_columns = categorical_columns_selector(data)
data_categorical = data[categorical_columns]
###Output
_____no_output_____
###Markdown
Now, let's make our predictive pipeline by encoding categories with anordinal encoder before to feed a logistic regression.
###Code
from sklearn.model_selection import cross_validate
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import OrdinalEncoder
from sklearn.linear_model import LogisticRegression
model = make_pipeline(
OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-1),
LogisticRegression(max_iter=500))
cv_results = cross_validate(model, data_categorical, target)
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f}")
###Output
_____no_output_____
###Markdown
Using an arbitrary mapping from string labels to integers as done here causesthe linear model to make bad assumptions on the relative ordering ofcategories.This prevents the model from learning anything predictive enough and thecross-validated score is even lower than the baseline we obtained by ignoringthe input data and just constantly predicting the most frequent class:
###Code
from sklearn.dummy import DummyClassifier
cv_results = cross_validate(DummyClassifier(strategy="most_frequent"),
data_categorical, target)
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f}")
###Output
_____no_output_____
###Markdown
By comparison, a categorical encoding that does not assume any ordering inthe categories can lead to a significantly higher score:
###Code
from sklearn.preprocessing import OneHotEncoder
model = make_pipeline(
OneHotEncoder(handle_unknown="ignore"),
LogisticRegression(max_iter=500))
cv_results = cross_validate(model, data_categorical, target)
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f}")
###Output
_____no_output_____
###Markdown
📃 Solution for Exercise 01The goal of this exercise is to evaluate the impact of using an arbitraryinteger encoding for categorical variables along with a linearclassification model such as Logistic Regression.To do so, let's try to use `OrdinalEncoder` to preprocess the categoricalvariables. This preprocessor is assembled in a pipeline with`LogisticRegression`. The statistical performance of the pipeline can beevaluated as usual by cross-validation and then compared to the scoreobtained when using `OneHotEncoder` or to some other baseline score.Because `OrdinalEncoder` can raise errors if it sees an unknown category atprediction time, you can set the `handle_unknown` and `unknown_value`parameters.
###Code
import pandas as pd
adult_census = pd.read_csv("../datasets/adult-census.csv")
target_name = "class"
target = adult_census[target_name]
data = adult_census.drop(columns=[target_name, "fnlwgt", "education-num"])
###Output
_____no_output_____
###Markdown
We can select the categorical based on the `object` dtype.
###Code
from sklearn.compose import make_column_selector as selector
categorical_columns_selector = selector(dtype_include=object)
categorical_columns = categorical_columns_selector(data)
data_categorical = data[categorical_columns]
###Output
_____no_output_____
###Markdown
Now, let's make our predictive pipeline by encoding categories with anordinal encoder before to feed a logistic regression.
###Code
from sklearn.model_selection import cross_validate
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import OrdinalEncoder
from sklearn.linear_model import LogisticRegression
model = make_pipeline(
OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-1),
LogisticRegression(max_iter=500))
cv_results = cross_validate(model, data_categorical, target)
scores = cv_results["test_score"]
print(f"The different scores obtained are: \n{scores}")
print(f"The accuracy is: {scores.mean():.3f} +- {scores.std():.3f}")
###Output
_____no_output_____
###Markdown
Using an arbitrary mapping from string labels to integers as done here causesthe linear model to make bad assumptions on the relative ordering ofcategories.This prevents the model from learning anything predictive enough and thecross-validated score is even lower than the baseline we obtained by ignoringthe input data and just constantly predicting the most frequent class:
###Code
from sklearn.dummy import DummyClassifier
cv_results = cross_validate(DummyClassifier(strategy="most_frequent"),
data_categorical, target)
scores = cv_results["test_score"]
print(f"The different scores obtained are: \n{scores}")
print(f"The accuracy is: {scores.mean():.3f} +- {scores.std():.3f}")
###Output
_____no_output_____
###Markdown
By comparison, a categorical encoding that does not assume any ordering inthe categories can lead to a significantly higher score:
###Code
from sklearn.preprocessing import OneHotEncoder
model = make_pipeline(
OneHotEncoder(handle_unknown="ignore"),
LogisticRegression(max_iter=500))
cv_results = cross_validate(model, data_categorical, target)
scores = cv_results["test_score"]
print(f"The different scores obtained are: \n{scores}")
print(f"The accuracy is: {scores.mean():.3f} +- {scores.std():.3f}")
###Output
_____no_output_____
###Markdown
📃 Solution for Exercise M1.04The goal of this exercise is to evaluate the impact of using an arbitraryinteger encoding for categorical variables along with a linearclassification model such as Logistic Regression.To do so, let's try to use `OrdinalEncoder` to preprocess the categoricalvariables. This preprocessor is assembled in a pipeline with`LogisticRegression`. The generalization performance of the pipeline can beevaluated by cross-validation and then compared to the score obtained whenusing `OneHotEncoder` or to some other baseline score.First, we load the dataset.
###Code
import pandas as pd
adult_census = pd.read_csv("../datasets/adult-census.csv")
target_name = "class"
target = adult_census[target_name]
data = adult_census.drop(columns=[target_name, "education-num"])
###Output
_____no_output_____
###Markdown
In the previous notebook, we used `sklearn.compose.make_column_selector` toautomatically select columns with a specific data type (also called `dtype`).Here, we will use this selector to get only the columns containing strings(column with `object` dtype) that correspond to categorical features in ourdataset.
###Code
from sklearn.compose import make_column_selector as selector
categorical_columns_selector = selector(dtype_include=object)
categorical_columns = categorical_columns_selector(data)
data_categorical = data[categorical_columns]
###Output
_____no_output_____
###Markdown
Define a scikit-learn pipeline composed of an `OrdinalEncoder` and a`LogisticRegression` classifier.Because `OrdinalEncoder` can raise errors if it sees an unknown category atprediction time, you can set the `handle_unknown="use_encoded_value"` and`unknown_value` parameters. You can refer to the[scikit-learn documentation](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OrdinalEncoder.html)for more details regarding these parameters.
###Code
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import OrdinalEncoder
from sklearn.linear_model import LogisticRegression
# solution
model = make_pipeline(
OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-1),
LogisticRegression(max_iter=500))
###Output
_____no_output_____
###Markdown
Your model is now defined. Evaluate it using a cross-validation using`sklearn.model_selection.cross_validate`.
###Code
from sklearn.model_selection import cross_validate
# solution
cv_results = cross_validate(model, data_categorical, target)
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f}")
###Output
_____no_output_____
###Markdown
Using an arbitrary mapping from string labels to integers as done here causesthe linear model to make bad assumptions on the relative ordering ofcategories.This prevents the model from learning anything predictive enough and thecross-validated score is even lower than the baseline we obtained by ignoringthe input data and just constantly predicting the most frequent class:
###Code
from sklearn.dummy import DummyClassifier
cv_results = cross_validate(DummyClassifier(strategy="most_frequent"),
data_categorical, target)
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f}")
###Output
_____no_output_____
###Markdown
Now, we would like to compare the generalization performance of our previousmodel with a new model where instead of using an `OrdinalEncoder`, we willuse a `OneHotEncoder`. Repeat the model evaluation using cross-validation.Compare the score of both models and conclude on the impact of choosing aspecific encoding strategy when using a linear model.
###Code
from sklearn.preprocessing import OneHotEncoder
# solution
model = make_pipeline(
OneHotEncoder(handle_unknown="ignore"),
LogisticRegression(max_iter=500))
cv_results = cross_validate(model, data_categorical, target)
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f}")
###Output
_____no_output_____
###Markdown
📃 Solution for Exercise M1.04The goal of this exercise is to evaluate the impact of using an arbitraryinteger encoding for categorical variables along with a linearclassification model such as Logistic Regression.To do so, let's try to use `OrdinalEncoder` to preprocess the categoricalvariables. This preprocessor is assembled in a pipeline with`LogisticRegression`. The generalization performance of the pipeline can beevaluated by cross-validation and then compared to the score obtained whenusing `OneHotEncoder` or to some other baseline score.First, we load the dataset.
###Code
import pandas as pd
adult_census = pd.read_csv("../datasets/adult-census.csv")
target_name = "class"
target = adult_census[target_name]
data = adult_census.drop(columns=[target_name, "education-num"])
###Output
_____no_output_____
###Markdown
In the previous notebook, we used `sklearn.compose.make_column_selector` toautomatically select columns with a specific data type (also called `dtype`).Here, we will use this selector to get only the columns containing strings(column with `object` dtype) that correspond to categorical features in ourdataset.
###Code
from sklearn.compose import make_column_selector as selector
categorical_columns_selector = selector(dtype_include=object)
categorical_columns = categorical_columns_selector(data)
data_categorical = data[categorical_columns]
###Output
_____no_output_____
###Markdown
Define a scikit-learn pipeline composed of an `OrdinalEncoder` and a`LogisticRegression` classifier.Because `OrdinalEncoder` can raise errors if it sees an unknown category atprediction time, you can set the `handle_unknown="use_encoded_value"` and`unknown_value` parameters. You can refer to the[scikit-learn documentation](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OrdinalEncoder.html)for more details regarding these parameters.
###Code
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import OrdinalEncoder
from sklearn.linear_model import LogisticRegression
# solution
model = make_pipeline(
OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-1),
LogisticRegression(max_iter=500))
###Output
_____no_output_____
###Markdown
Your model is now defined. Evaluate it using a cross-validation using`sklearn.model_selection.cross_validate`.NoteBe aware that if an error happened during the cross-validation,cross_validate will raise a warning and return NaN (Not a Number)as scores. To make it raise a standard Python exception with a traceback,you can pass the error_score="raise" argument in the call tocross_validate. An exception will be raised instead of a warning at the firstencountered problem and cross_validate will stop right away instead ofreturning NaN values. This is particularly handy when developingcomplex machine learning pipelines.
###Code
from sklearn.model_selection import cross_validate
# solution
cv_results = cross_validate(model, data_categorical, target)
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f}")
###Output
_____no_output_____
###Markdown
Using an arbitrary mapping from string labels to integers as done here causesthe linear model to make bad assumptions on the relative ordering ofcategories.This prevents the model from learning anything predictive enough and thecross-validated score is even lower than the baseline we obtained by ignoringthe input data and just constantly predicting the most frequent class:
###Code
from sklearn.dummy import DummyClassifier
cv_results = cross_validate(DummyClassifier(strategy="most_frequent"),
data_categorical, target)
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f}")
###Output
_____no_output_____
###Markdown
Now, we would like to compare the generalization performance of our previousmodel with a new model where instead of using an `OrdinalEncoder`, we willuse a `OneHotEncoder`. Repeat the model evaluation using cross-validation.Compare the score of both models and conclude on the impact of choosing aspecific encoding strategy when using a linear model.
###Code
from sklearn.preprocessing import OneHotEncoder
# solution
model = make_pipeline(
OneHotEncoder(handle_unknown="ignore"),
LogisticRegression(max_iter=500))
cv_results = cross_validate(model, data_categorical, target)
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f}")
###Output
_____no_output_____
###Markdown
📃 Solution for Exercise M1.04The goal of this exercise is to evaluate the impact of using an arbitraryinteger encoding for categorical variables along with a linearclassification model such as Logistic Regression.To do so, let's try to use `OrdinalEncoder` to preprocess the categoricalvariables. This preprocessor is assembled in a pipeline with`LogisticRegression`. The statistical performance of the pipeline can beevaluated by cross-validation and then compared to the score obtained whenusing `OneHotEncoder` or to some other baseline score.First, we load the dataset.
###Code
import pandas as pd
adult_census = pd.read_csv("../datasets/adult-census.csv")
target_name = "class"
target = adult_census[target_name]
data = adult_census.drop(columns=[target_name, "education-num"])
###Output
_____no_output_____
###Markdown
In the previous notebook, we used `sklearn.compose.make_column_selector` toautomatically select columns with a specific data type (also called `dtype`).Here, we will use this selector to get only the columns containing strings(column with `object` dtype) that correspond to categorical features in ourdataset.
###Code
from sklearn.compose import make_column_selector as selector
categorical_columns_selector = selector(dtype_include=object)
categorical_columns = categorical_columns_selector(data)
data_categorical = data[categorical_columns]
###Output
_____no_output_____
###Markdown
We filter our dataset that it contains only categorical features.Define a scikit-learn pipeline comBecause `OrdinalEncoder` can raise errors if it sees an unknown category atprediction time, you can set the `handle_unknown="use_encoded_value"` and`unknown_value` parameters. You can refer to the[scikit-learn documentation](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OrdinalEncoder.html)for more details regarding these parameters.
###Code
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import OrdinalEncoder
from sklearn.linear_model import LogisticRegression
model = make_pipeline(
OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-1),
LogisticRegression(max_iter=500))
###Output
_____no_output_____
###Markdown
Your model is now defined. Evaluate it using a cross-validation using`sklearn.model_selection.cross_validate`.
###Code
from sklearn.model_selection import cross_validate
cv_results = cross_validate(model, data_categorical, target)
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f}")
###Output
The mean cross-validation accuracy is: 0.755 +/- 0.002
###Markdown
Using an arbitrary mapping from string labels to integers as done here causesthe linear model to make bad assumptions on the relative ordering ofcategories.This prevents the model from learning anything predictive enough and thecross-validated score is even lower than the baseline we obtained by ignoringthe input data and just constantly predicting the most frequent class:
###Code
from sklearn.dummy import DummyClassifier
cv_results = cross_validate(DummyClassifier(strategy="most_frequent"),
data_categorical, target)
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f}")
###Output
The mean cross-validation accuracy is: 0.761 +/- 0.000
###Markdown
Now, we would like to compare the statistical performance of our previousmodel with a new model where instead of using an `OrdinalEncoder`, we willuse a `OneHotEncoder`. Repeat the model evaluation using cross-validation.Compare the score of both models and conclude on the impact of choosing aspecific encoding strategy when using a linear model.
###Code
from sklearn.preprocessing import OneHotEncoder
model = make_pipeline(
OneHotEncoder(handle_unknown="ignore"),
LogisticRegression(max_iter=500))
cv_results = cross_validate(model, data_categorical, target)
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f}")
###Output
The mean cross-validation accuracy is: 0.833 +/- 0.002
###Markdown
📃 Solution for Exercise M1.04The goal of this exercise is to evaluate the impact of using an arbitraryinteger encoding for categorical variables along with a linearclassification model such as Logistic Regression.To do so, let's try to use `OrdinalEncoder` to preprocess the categoricalvariables. This preprocessor is assembled in a pipeline with`LogisticRegression`. The statistical performance of the pipeline can beevaluated by cross-validation and then compared to the score obtained whenusing `OneHotEncoder` or to some other baseline score.First, we load the dataset.
###Code
import pandas as pd
adult_census = pd.read_csv("../datasets/adult-census.csv")
target_name = "class"
target = adult_census[target_name]
data = adult_census.drop(columns=[target_name, "education-num"])
###Output
_____no_output_____
###Markdown
In the previous notebook, we used `sklearn.compose.make_column_selector` toautomatically select columns with a specific data type (also called `dtype`).Here, we will use this selector to get only the columns containing strings(column with `object` dtype) that correspond to categorical features in ourdataset.
###Code
from sklearn.compose import make_column_selector as selector
categorical_columns_selector = selector(dtype_include=object)
categorical_columns = categorical_columns_selector(data)
data_categorical = data[categorical_columns]
###Output
_____no_output_____
###Markdown
We filter our dataset that it contains only categorical features.Define a scikit-learn pipeline comBecause `OrdinalEncoder` can raise errors if it sees an unknown category atprediction time, you can set the `handle_unknown="use_encoded_value"` and`unknown_value` parameters. You can refer to the[scikit-learn documentation](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OrdinalEncoder.html)for more details regarding these parameters.
###Code
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import OrdinalEncoder
from sklearn.linear_model import LogisticRegression
model = make_pipeline(
OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-1),
LogisticRegression(max_iter=500))
###Output
_____no_output_____
###Markdown
Your model is now defined. Evaluate it using a cross-validation using`sklearn.model_selection.cross_validate`.
###Code
from sklearn.model_selection import cross_validate
cv_results = cross_validate(model, data_categorical, target)
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f}")
###Output
_____no_output_____
###Markdown
Using an arbitrary mapping from string labels to integers as done here causesthe linear model to make bad assumptions on the relative ordering ofcategories.This prevents the model from learning anything predictive enough and thecross-validated score is even lower than the baseline we obtained by ignoringthe input data and just constantly predicting the most frequent class:
###Code
from sklearn.dummy import DummyClassifier
cv_results = cross_validate(DummyClassifier(strategy="most_frequent"),
data_categorical, target)
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f}")
###Output
_____no_output_____
###Markdown
Now, we would like to compare the statistical performance of our previousmodel with a new model where instead of using an `OrdinalEncoder`, we willuse a `OneHotEncoder`. Repeat the model evaluation using cross-validation.Compare the score of both models and conclude on the impact of choosing aspecific encoding strategy when using a linear model.
###Code
from sklearn.preprocessing import OneHotEncoder
model = make_pipeline(
OneHotEncoder(handle_unknown="ignore"),
LogisticRegression(max_iter=500))
cv_results = cross_validate(model, data_categorical, target)
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f}")
###Output
_____no_output_____
###Markdown
📃 Solution for Exercise 01The goal of this exercise is to evaluate the impact of using an arbitraryinteger encoding for categorical variables along with a linearclassification model such as Logistic Regression.To do so, let's try to use `OrdinalEncoder` to preprocess the categoricalvariables. This preprocessor is assembled in a pipeline with`LogisticRegression`. The statistical performance of the pipeline can beevaluated as usual by cross-validation and then compared to the scoreobtained when using `OneHotEncoder` or to some other baseline score.Because `OrdinalEncoder` can raise errors if it sees an unknown category atprediction time, you can set the `handle_unknown` and `unknown_value`parameters.
###Code
import pandas as pd
adult_census = pd.read_csv("../datasets/adult-census.csv")
target_name = "class"
target = adult_census[target_name]
data = adult_census.drop(columns=[target_name, "fnlwgt", "education-num"])
###Output
_____no_output_____
###Markdown
We can select the categorical based on the `object` dtype.
###Code
from sklearn.compose import make_column_selector as selector
categorical_columns_selector = selector(dtype_include=object)
categorical_columns = categorical_columns_selector(data)
data_categorical = data[categorical_columns]
###Output
_____no_output_____
###Markdown
Now, let's make our predictive pipeline by encoding categories with anordinal encoder before to feed a logistic regression.
###Code
from sklearn.model_selection import cross_validate
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import OrdinalEncoder
from sklearn.linear_model import LogisticRegression
model = make_pipeline(
OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-1),
LogisticRegression(max_iter=500))
cv_results = cross_validate(model, data_categorical, target)
scores = cv_results["test_score"]
print(f"The different scores obtained are: \n{scores}")
print(f"The accuracy is: {scores.mean():.3f} +- {scores.std():.3f}")
###Output
_____no_output_____
###Markdown
Using an arbitrary mapping from string labels to integers as done here causesthe linear model to make bad assumptions on the relative ordering ofcategories.This prevents the model from learning anything predictive enough and thecross-validated score is even lower than the baseline we obtained by ignoringthe input data and just constantly predicting the most frequent class:
###Code
from sklearn.dummy import DummyClassifier
cv_results = cross_validate(DummyClassifier(strategy="most_frequent"),
data_categorical, target)
scores = cv_results["test_score"]
print(f"The different scores obtained are: \n{scores}")
print(f"The accuracy is: {scores.mean():.3f} +- {scores.std():.3f}")
###Output
_____no_output_____
###Markdown
By comparison, a categorical encoding that does not assume any ordering inthe categories can lead to a significantly higher score:
###Code
from sklearn.preprocessing import OneHotEncoder
model = make_pipeline(
OneHotEncoder(handle_unknown="ignore"),
LogisticRegression(max_iter=500))
cv_results = cross_validate(model, data_categorical, target)
scores = cv_results["test_score"]
print(f"The different scores obtained are: \n{scores}")
print(f"The accuracy is: {scores.mean():.3f} +- {scores.std():.3f}")
###Output
_____no_output_____
###Markdown
📃 Solution for Exercise 01The goal of this exercise is to evaluate the impact of using an arbitraryinteger encoding for categorical variables along with a linearclassification model such as Logistic Regression.To do so, let's try to use `OrdinalEncoder` to preprocess the categoricalvariables. This preprocessor is assembled in a pipeline with`LogisticRegression`. The statistical performance of the pipeline can beevaluated as usual by cross-validation and then compared to the scoreobtained when using `OneHotEncoder` or to some other baseline score.Because `OrdinalEncoder` can raise errors if it sees an unknown category atprediction time, you can set the `handle_unknown` and `unknown_value`parameters.
###Code
import pandas as pd
adult_census = pd.read_csv("../datasets/adult-census.csv")
target_name = "class"
target = adult_census[target_name]
data = adult_census.drop(columns=[target_name, "education-num"])
###Output
_____no_output_____
###Markdown
We can select the categorical based on the `object` dtype.
###Code
from sklearn.compose import make_column_selector as selector
categorical_columns_selector = selector(dtype_include=object)
categorical_columns = categorical_columns_selector(data)
data_categorical = data[categorical_columns]
###Output
_____no_output_____
###Markdown
Now, let's make our predictive pipeline by encoding categories with anordinal encoder before to feed a logistic regression.
###Code
from sklearn.model_selection import cross_validate
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import OrdinalEncoder
from sklearn.linear_model import LogisticRegression
model = make_pipeline(
OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-1),
LogisticRegression(max_iter=500))
cv_results = cross_validate(model, data_categorical, target)
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f}")
###Output
_____no_output_____
###Markdown
Using an arbitrary mapping from string labels to integers as done here causesthe linear model to make bad assumptions on the relative ordering ofcategories.This prevents the model from learning anything predictive enough and thecross-validated score is even lower than the baseline we obtained by ignoringthe input data and just constantly predicting the most frequent class:
###Code
from sklearn.dummy import DummyClassifier
cv_results = cross_validate(DummyClassifier(strategy="most_frequent"),
data_categorical, target)
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f}")
###Output
_____no_output_____
###Markdown
By comparison, a categorical encoding that does not assume any ordering inthe categories can lead to a significantly higher score:
###Code
from sklearn.preprocessing import OneHotEncoder
model = make_pipeline(
OneHotEncoder(handle_unknown="ignore"),
LogisticRegression(max_iter=500))
cv_results = cross_validate(model, data_categorical, target)
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f}")
###Output
_____no_output_____
###Markdown
📃 Solution for Exercise M1.04The goal of this exercise is to evaluate the impact of using an arbitraryinteger encoding for categorical variables along with a linearclassification model such as Logistic Regression.To do so, let's try to use `OrdinalEncoder` to preprocess the categoricalvariables. This preprocessor is assembled in a pipeline with`LogisticRegression`. The generalization performance of the pipeline can beevaluated by cross-validation and then compared to the score obtained whenusing `OneHotEncoder` or to some other baseline score.First, we load the dataset.
###Code
import pandas as pd
adult_census = pd.read_csv("../datasets/adult-census.csv")
target_name = "class"
target = adult_census[target_name]
data = adult_census.drop(columns=[target_name, "education-num"])
###Output
_____no_output_____
###Markdown
In the previous notebook, we used `sklearn.compose.make_column_selector` toautomatically select columns with a specific data type (also called `dtype`).Here, we will use this selector to get only the columns containing strings(column with `object` dtype) that correspond to categorical features in ourdataset.
###Code
from sklearn.compose import make_column_selector as selector
categorical_columns_selector = selector(dtype_include=object)
categorical_columns = categorical_columns_selector(data)
data_categorical = data[categorical_columns]
###Output
_____no_output_____
###Markdown
Define a scikit-learn pipeline composed of an `OrdinalEncoder` and a`LogisticRegression` classifier.Because `OrdinalEncoder` can raise errors if it sees an unknown category atprediction time, you can set the `handle_unknown="use_encoded_value"` and`unknown_value` parameters. You can refer to the[scikit-learn documentation](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OrdinalEncoder.html)for more details regarding these parameters.
###Code
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import OrdinalEncoder
from sklearn.linear_model import LogisticRegression
# solution
model = make_pipeline(
OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-1),
LogisticRegression(max_iter=500))
###Output
_____no_output_____
###Markdown
Your model is now defined. Evaluate it using a cross-validation using`sklearn.model_selection.cross_validate`.
###Code
from sklearn.model_selection import cross_validate
# solution
cv_results = cross_validate(model, data_categorical, target)
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f}")
###Output
_____no_output_____
###Markdown
Using an arbitrary mapping from string labels to integers as done here causesthe linear model to make bad assumptions on the relative ordering ofcategories.This prevents the model from learning anything predictive enough and thecross-validated score is even lower than the baseline we obtained by ignoringthe input data and just constantly predicting the most frequent class:
###Code
from sklearn.dummy import DummyClassifier
cv_results = cross_validate(DummyClassifier(strategy="most_frequent"),
data_categorical, target)
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f}")
###Output
_____no_output_____
###Markdown
Now, we would like to compare the generalization performance of our previousmodel with a new model where instead of using an `OrdinalEncoder`, we willuse a `OneHotEncoder`. Repeat the model evaluation using cross-validation.Compare the score of both models and conclude on the impact of choosing aspecific encoding strategy when using a linear model.
###Code
from sklearn.preprocessing import OneHotEncoder
# solution
model = make_pipeline(
OneHotEncoder(handle_unknown="ignore"),
LogisticRegression(max_iter=500))
cv_results = cross_validate(model, data_categorical, target)
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f}")
###Output
_____no_output_____
###Markdown
📃 Solution for Exercise M1.04The goal of this exercise is to evaluate the impact of using an arbitraryinteger encoding for categorical variables along with a linearclassification model such as Logistic Regression.To do so, let's try to use `OrdinalEncoder` to preprocess the categoricalvariables. This preprocessor is assembled in a pipeline with`LogisticRegression`. The generalization performance of the pipeline can beevaluated by cross-validation and then compared to the score obtained whenusing `OneHotEncoder` or to some other baseline score.First, we load the dataset.
###Code
import pandas as pd
adult_census = pd.read_csv("../datasets/adult-census.csv")
target_name = "class"
target = adult_census[target_name]
data = adult_census.drop(columns=[target_name, "education-num"])
###Output
_____no_output_____
###Markdown
In the previous notebook, we used `sklearn.compose.make_column_selector` toautomatically select columns with a specific data type (also called `dtype`).Here, we will use this selector to get only the columns containing strings(column with `object` dtype) that correspond to categorical features in ourdataset.
###Code
from sklearn.compose import make_column_selector as selector
categorical_columns_selector = selector(dtype_include=object)
categorical_columns = categorical_columns_selector(data)
data_categorical = data[categorical_columns]
###Output
_____no_output_____
###Markdown
Define a scikit-learn pipeline composed of an `OrdinalEncoder` and a`LogisticRegression` classifier.Because `OrdinalEncoder` can raise errors if it sees an unknown category atprediction time, you can set the `handle_unknown="use_encoded_value"` and`unknown_value` parameters. You can refer to the[scikit-learn documentation](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OrdinalEncoder.html)for more details regarding these parameters.
###Code
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import OrdinalEncoder
from sklearn.linear_model import LogisticRegression
# solution
model = make_pipeline(
OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-1),
LogisticRegression(max_iter=500))
###Output
_____no_output_____
###Markdown
Your model is now defined. Evaluate it using a cross-validation using`sklearn.model_selection.cross_validate`.NoteBe aware that if an error happened during the cross-validation,cross_validate will raise a warning and return NaN (Not a Number)as scores. To make it raise a standard Python exception with a traceback,you can pass the error_score="raise" argument in the call tocross_validate. An exception will be raised instead of a warning at the firstencountered problem and cross_validate will stop right away instead ofreturning NaN values. This is particularly handy when developingcomplex machine learning pipelines.
###Code
from sklearn.model_selection import cross_validate
# solution
cv_results = cross_validate(model, data_categorical, target)
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f}")
###Output
_____no_output_____
###Markdown
Using an arbitrary mapping from string labels to integers as done here causesthe linear model to make bad assumptions on the relative ordering ofcategories.This prevents the model from learning anything predictive enough and thecross-validated score is even lower than the baseline we obtained by ignoringthe input data and just constantly predicting the most frequent class:
###Code
from sklearn.dummy import DummyClassifier
cv_results = cross_validate(DummyClassifier(strategy="most_frequent"),
data_categorical, target)
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f}")
###Output
_____no_output_____
###Markdown
Now, we would like to compare the generalization performance of our previousmodel with a new model where instead of using an `OrdinalEncoder`, we willuse a `OneHotEncoder`. Repeat the model evaluation using cross-validation.Compare the score of both models and conclude on the impact of choosing aspecific encoding strategy when using a linear model.
###Code
from sklearn.preprocessing import OneHotEncoder
# solution
model = make_pipeline(
OneHotEncoder(handle_unknown="ignore"),
LogisticRegression(max_iter=500))
cv_results = cross_validate(model, data_categorical, target)
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f}")
###Output
_____no_output_____ |
Data Analysis/Clean and Analyze Employee Exit Surveys/Clean and Analyze Employee Exit Surveys.ipynb | ###Markdown
Clean and Analyze Employee Exit SurveysIn this project, we'll clean and analyze exit surveys from employees of the Department of Education, Training and Employment (DETE)}) and the Technical and Further Education (TAFE) body of the Queensland government in Australia. The TAFE exit survey can be found here and the survey for the DETE can be found here.We'll pretend our stakeholders want us to combine the results for both surveys to answer the following question:- Are employees who only worked for the institutes for a short period of time resigning due to some kind of dissatisfaction? What about employees who have been there longer? IntroductionFirst, we'll read in the datasets and do some initial exploration.
###Code
#Read in the data
import pandas as pd
import numpy as np
dete_survey = pd.read_csv('dete_survey.csv')
#Quick exploration of the data
pd.options.display.max_columns = 150 # to avoid truncated output
dete_survey.head()
dete_survey.info()
#Read in the data
tafe_survey = pd.read_csv("tafe_survey.csv")
#Quick exploration of the data
tafe_survey.head()
tafe_survey.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 702 entries, 0 to 701
Data columns (total 72 columns):
Record ID 702 non-null float64
Institute 702 non-null object
WorkArea 702 non-null object
CESSATION YEAR 695 non-null float64
Reason for ceasing employment 701 non-null object
Contributing Factors. Career Move - Public Sector 437 non-null object
Contributing Factors. Career Move - Private Sector 437 non-null object
Contributing Factors. Career Move - Self-employment 437 non-null object
Contributing Factors. Ill Health 437 non-null object
Contributing Factors. Maternity/Family 437 non-null object
Contributing Factors. Dissatisfaction 437 non-null object
Contributing Factors. Job Dissatisfaction 437 non-null object
Contributing Factors. Interpersonal Conflict 437 non-null object
Contributing Factors. Study 437 non-null object
Contributing Factors. Travel 437 non-null object
Contributing Factors. Other 437 non-null object
Contributing Factors. NONE 437 non-null object
Main Factor. Which of these was the main factor for leaving? 113 non-null object
InstituteViews. Topic:1. I feel the senior leadership had a clear vision and direction 608 non-null object
InstituteViews. Topic:2. I was given access to skills training to help me do my job better 613 non-null object
InstituteViews. Topic:3. I was given adequate opportunities for personal development 610 non-null object
InstituteViews. Topic:4. I was given adequate opportunities for promotion within %Institute]Q25LBL% 608 non-null object
InstituteViews. Topic:5. I felt the salary for the job was right for the responsibilities I had 615 non-null object
InstituteViews. Topic:6. The organisation recognised when staff did good work 607 non-null object
InstituteViews. Topic:7. Management was generally supportive of me 614 non-null object
InstituteViews. Topic:8. Management was generally supportive of my team 608 non-null object
InstituteViews. Topic:9. I was kept informed of the changes in the organisation which would affect me 610 non-null object
InstituteViews. Topic:10. Staff morale was positive within the Institute 602 non-null object
InstituteViews. Topic:11. If I had a workplace issue it was dealt with quickly 601 non-null object
InstituteViews. Topic:12. If I had a workplace issue it was dealt with efficiently 597 non-null object
InstituteViews. Topic:13. If I had a workplace issue it was dealt with discreetly 601 non-null object
WorkUnitViews. Topic:14. I was satisfied with the quality of the management and supervision within my work unit 609 non-null object
WorkUnitViews. Topic:15. I worked well with my colleagues 605 non-null object
WorkUnitViews. Topic:16. My job was challenging and interesting 607 non-null object
WorkUnitViews. Topic:17. I was encouraged to use my initiative in the course of my work 610 non-null object
WorkUnitViews. Topic:18. I had sufficient contact with other people in my job 613 non-null object
WorkUnitViews. Topic:19. I was given adequate support and co-operation by my peers to enable me to do my job 609 non-null object
WorkUnitViews. Topic:20. I was able to use the full range of my skills in my job 609 non-null object
WorkUnitViews. Topic:21. I was able to use the full range of my abilities in my job. ; Category:Level of Agreement; Question:YOUR VIEWS ABOUT YOUR WORK UNIT] 608 non-null object
WorkUnitViews. Topic:22. I was able to use the full range of my knowledge in my job 608 non-null object
WorkUnitViews. Topic:23. My job provided sufficient variety 611 non-null object
WorkUnitViews. Topic:24. I was able to cope with the level of stress and pressure in my job 610 non-null object
WorkUnitViews. Topic:25. My job allowed me to balance the demands of work and family to my satisfaction 611 non-null object
WorkUnitViews. Topic:26. My supervisor gave me adequate personal recognition and feedback on my performance 606 non-null object
WorkUnitViews. Topic:27. My working environment was satisfactory e.g. sufficient space, good lighting, suitable seating and working area 610 non-null object
WorkUnitViews. Topic:28. I was given the opportunity to mentor and coach others in order for me to pass on my skills and knowledge prior to my cessation date 609 non-null object
WorkUnitViews. Topic:29. There was adequate communication between staff in my unit 603 non-null object
WorkUnitViews. Topic:30. Staff morale was positive within my work unit 606 non-null object
Induction. Did you undertake Workplace Induction? 619 non-null object
InductionInfo. Topic:Did you undertake a Corporate Induction? 432 non-null object
InductionInfo. Topic:Did you undertake a Institute Induction? 483 non-null object
InductionInfo. Topic: Did you undertake Team Induction? 440 non-null object
InductionInfo. Face to Face Topic:Did you undertake a Corporate Induction; Category:How it was conducted? 555 non-null object
InductionInfo. On-line Topic:Did you undertake a Corporate Induction; Category:How it was conducted? 555 non-null object
InductionInfo. Induction Manual Topic:Did you undertake a Corporate Induction? 555 non-null object
InductionInfo. Face to Face Topic:Did you undertake a Institute Induction? 530 non-null object
InductionInfo. On-line Topic:Did you undertake a Institute Induction? 555 non-null object
InductionInfo. Induction Manual Topic:Did you undertake a Institute Induction? 553 non-null object
InductionInfo. Face to Face Topic: Did you undertake Team Induction; Category? 555 non-null object
InductionInfo. On-line Topic: Did you undertake Team Induction?process you undertook and how it was conducted.] 555 non-null object
InductionInfo. Induction Manual Topic: Did you undertake Team Induction? 555 non-null object
Workplace. Topic:Did you and your Manager develop a Performance and Professional Development Plan (PPDP)? 608 non-null object
Workplace. Topic:Does your workplace promote a work culture free from all forms of unlawful discrimination? 594 non-null object
Workplace. Topic:Does your workplace promote and practice the principles of employment equity? 587 non-null object
Workplace. Topic:Does your workplace value the diversity of its employees? 586 non-null object
Workplace. Topic:Would you recommend the Institute as an employer to others? 581 non-null object
Gender. What is your Gender? 596 non-null object
CurrentAge. Current Age 596 non-null object
Employment Type. Employment Type 596 non-null object
Classification. Classification 596 non-null object
LengthofServiceOverall. Overall Length of Service at Institute (in years) 596 non-null object
LengthofServiceCurrent. Length of Service at current workplace (in years) 596 non-null object
dtypes: float64(2), object(70)
memory usage: 395.0+ KB
###Markdown
We can make the following observations based on the work above:* The dete_survey dataframe contains 'Not Stated' values that indicate values are missing, but they aren't represented as NaN.* Both the dete_survey and tafe_survey contain many columns that we don't need to complete our analysis.* Each dataframe contains many of the same columns, but the column names are different.* There are multiple columns/answers that indicate an employee resigned because they were dissatisfied. Identify Missing Values and Drop Unneccessary ColumnsFirst, we'll correct the Not Stated values and drop some of the columns we don't need for our analysis.
###Code
# Read in the data again, but this time read 'Not Stated' values as 'NaN'
dete_survey = pd.read_csv('dete_survey.csv', na_values='Not Stated')
#Quick exploration of the data
dete_survey.head()
# Remove columns we don't need for our analysis
dete_survey_updated = dete_survey.drop(dete_survey.columns[28:49], axis=1)
tafe_survey_updated = tafe_survey.drop(tafe_survey.columns[17:66], axis=1)
#Check that the columns were dropped
print(dete_survey_updated.columns)
print(tafe_survey_updated.columns)
###Output
Index(['ID', 'SeparationType', 'Cease Date', 'DETE Start Date',
'Role Start Date', 'Position', 'Classification', 'Region',
'Business Unit', 'Employment Status', 'Career move to public sector',
'Career move to private sector', 'Interpersonal conflicts',
'Job dissatisfaction', 'Dissatisfaction with the department',
'Physical work environment', 'Lack of recognition',
'Lack of job security', 'Work location', 'Employment conditions',
'Maternity/family', 'Relocation', 'Study/Travel', 'Ill Health',
'Traumatic incident', 'Work life balance', 'Workload',
'None of the above', 'Gender', 'Age', 'Aboriginal', 'Torres Strait',
'South Sea', 'Disability', 'NESB'],
dtype='object')
Index(['Record ID', 'Institute', 'WorkArea', 'CESSATION YEAR',
'Reason for ceasing employment',
'Contributing Factors. Career Move - Public Sector ',
'Contributing Factors. Career Move - Private Sector ',
'Contributing Factors. Career Move - Self-employment',
'Contributing Factors. Ill Health',
'Contributing Factors. Maternity/Family',
'Contributing Factors. Dissatisfaction',
'Contributing Factors. Job Dissatisfaction',
'Contributing Factors. Interpersonal Conflict',
'Contributing Factors. Study', 'Contributing Factors. Travel',
'Contributing Factors. Other', 'Contributing Factors. NONE',
'Gender. What is your Gender?', 'CurrentAge. Current Age',
'Employment Type. Employment Type', 'Classification. Classification',
'LengthofServiceOverall. Overall Length of Service at Institute (in years)',
'LengthofServiceCurrent. Length of Service at current workplace (in years)'],
dtype='object')
###Markdown
Rename ColumnsNext, we'll standardize the names of the columns we want to work with, because we eventually want to combine the dataframes.
###Code
# Clean the column names
dete_survey_updated.columns = dete_survey_updated.columns.str.lower().str.strip().str.replace(' ', '_')
# Check that the column names were updated correctly
dete_survey_updated.columns
# Update column names to match the names in dete_survey_updated
mapping = {'Record ID': 'id', 'CESSATION YEAR': 'cease_date', 'Reason for ceasing employment': 'separationtype', 'Gender. What is your Gender?': 'gender', 'CurrentAge. Current Age': 'age',
'Employment Type. Employment Type': 'employment_status',
'Classification. Classification': 'position',
'LengthofServiceOverall. Overall Length of Service at Institute (in years)': 'institute_service',
'LengthofServiceCurrent. Length of Service at current workplace (in years)': 'role_service'}
tafe_survey_updated = tafe_survey_updated.rename(mapping, axis = 1)
# Check that the specified column names were updated correctly
tafe_survey_updated.columns
###Output
_____no_output_____
###Markdown
Filter the DataFor this project, we'll only analyze survey respondents who resigned, so we'll only select separation types containing the string 'Resignation'.
###Code
# Check the unique values for the separationtype column
tafe_survey_updated['separationtype'].value_counts()
# Check the unique values for the separationtype column
dete_survey_updated['separationtype'].value_counts()
# Check the unique values for the separationtype column
dete_survey_updated['separationtype'].value_counts()
# Update all separation types containing the word "resignation" to 'Resignation'
dete_survey_updated['separationtype'] = dete_survey_updated['separationtype'].str.split('-').str[0]
# Check the values in the separationtype column were updated correctly
dete_survey_updated['separationtype'].value_counts()
# Select only the resignation separation types from each dataframe
dete_resignations = dete_survey_updated[dete_survey_updated['separationtype'] == 'Resignation'].copy()
tafe_resignations = tafe_survey_updated[tafe_survey_updated['separationtype'] == 'Resignation'].copy()
###Output
_____no_output_____
###Markdown
Verify the DataBelow, we clean and explore the cease_date and dete_start_date columns to make sure all of the years make sense. We'll use the following criteria:* Since the cease_date is the last year of the person's employment and the dete_start_date is the person's first year of employment, it wouldn't make sense to have years after the current date.* Given that most people in this field start working in their 20s, it's also unlikely that the dete_start_date was before the year 1940.
###Code
# Check the unique values
dete_resignations['cease_date'].value_counts()
# Extract the years and convert them to a float type
dete_resignations['cease_date'] = dete_resignations['cease_date'].str.split('/').str[-1]
dete_resignations['cease_date'] = dete_resignations['cease_date'].astype("float")
# Check the values again and look for outliers
dete_resignations['cease_date'].value_counts()
# Check the unique values and look for outliers
dete_resignations['dete_start_date'].value_counts().sort_values()
# Check the unique values
tafe_resignations['cease_date'].value_counts().sort_values()
###Output
_____no_output_____
###Markdown
Below are our findings:* The years in both dataframes don't completely align. The tafe_survey_updated dataframe contains some cease dates in 2009, but the dete_survey_updated dataframe does not. The tafe_survey_updated dataframe also contains many more cease dates in 2010 than the dete_survey_updaed dataframe. Since we aren't concerned with analyzing the results by year, we'll leave them as is. Create a New Column¶Since our end goal is to answer the question below, we need a column containing the length of time an employee spent in their workplace, or years of service, in both dataframes.* End goal: Are employees who have only worked for the institutes for a short period of time resigning due to some kind of dissatisfaction? What about employees who have been at the job longer?The tafe_resignations dataframe already contains a "service" column, which we renamed to institute_service.Below, we calculate the years of service in the dete_survey_updated dataframe by subtracting the dete_start_date from the cease_date and create a new column named institute_service.
###Code
# Calculate the length of time an employee spent in their respective workplace and create a new column
dete_resignations['institute_service'] = dete_resignations['cease_date'] - dete_resignations['dete_start_date']
# Quick check of the result
dete_resignations['institute_service'].head()
###Output
_____no_output_____
###Markdown
Identify Dissatisfied Employees¶Next, we'll identify any employees who resigned because they were dissatisfied. Below are the columns we'll use to categorize employees as "dissatisfied" from each dataframe:1. tafe_survey_updated:* Contributing Factors. Dissatisfaction* Contributing Factors. Job Dissatisfaction2. dafe_survey_updated:* job_dissatisfaction* dissatisfaction_with_the_department* physical_work_environment* lack_of_recognition* lack_of_job_security* work_location* employment_conditions* work_life_balance* workloadIf the employee indicated any of the factors above caused them to resign, we'll mark them as dissatisfied in a new column. After our changes, the new dissatisfied column will contain just the following values:* True: indicates a person resigned because they were dissatisfied in some way* False: indicates a person resigned because of a reason other than dissatisfaction with the job* NaN: indicates the value is missing
###Code
# Check the unique values
tafe_resignations['Contributing Factors. Dissatisfaction'].value_counts()
# Check the unique values
tafe_resignations['Contributing Factors. Job Dissatisfaction'].value_counts()
# Update the values in the contributing factors columns to be either True, False, or NaN
def update_vals(x):
if x == '-':
return False
elif pd.isnull(x):
return np.nan
else:
return True
tafe_resignations['dissatisfied'] = tafe_resignations[['Contributing Factors. Dissatisfaction', 'Contributing Factors. Job Dissatisfaction']].applymap(update_vals).any(1, skipna=False)
tafe_resignations_up = tafe_resignations.copy()
# Check the unique values after the updates
tafe_resignations_up['dissatisfied'].value_counts(dropna=False)
# Update the values in columns related to dissatisfaction to be either True, False, or NaN
dete_resignations['dissatisfied'] = dete_resignations[['job_dissatisfaction',
'dissatisfaction_with_the_department', 'physical_work_environment',
'lack_of_recognition', 'lack_of_job_security', 'work_location',
'employment_conditions', 'work_life_balance',
'workload']].any(1, skipna=False)
dete_resignations_up = dete_resignations.copy()
dete_resignations_up['dissatisfied'].value_counts(dropna=False)
###Output
_____no_output_____
###Markdown
Combining the Data¶Below, we'll add an institute column so that we can differentiate the data from each survey after we combine them. Then, we'll combine the dataframes and drop any remaining columns we don't need.
###Code
# Add an institute column
dete_resignations_up['institute'] = 'DETE'
tafe_resignations_up['institute'] = 'TAFE'
# Combine the dataframes
combined = pd.concat([dete_resignations_up, tafe_resignations_up], ignore_index=True)
# Verify the number of non null values in each column
combined.notnull().sum().sort_values()
# Drop columns with less than 500 non null values
combined_updated = combined.dropna(thresh = 500, axis =1).copy()
###Output
_____no_output_____
###Markdown
Clean the Service Column¶Next, we'll clean the institute_service column and categorize employees according to the following definitions:* New: Less than 3 years in the workplace* Experienced: 3-6 years in the workplace* Established: 7-10 years in the workplace* Veteran: 11 or more years in the workplaceOur analysis is based on this article, which makes the argument that understanding employee's needs according to career stage instead of age is more effective.
###Code
# Check the unique values
combined_updated['institute_service'].value_counts(dropna=False)
# Extract the years of service and convert the type to float
combined_updated['institute_service_up'] = combined_updated['institute_service'].astype('str').str.extract(r'(\d+)')
combined_updated['institute_service_up'] = combined_updated['institute_service_up'].astype('float')
# Check the years extracted are correct
combined_updated['institute_service_up'].value_counts()
# Convert years of service to categories
def transform_service(val):
if val >= 11:
return "Veteran"
elif 7 <= val < 11:
return "Established"
elif 3 <= val < 7:
return "Experienced"
elif pd.isnull(val):
return np.nan
else:
return "New"
combined_updated['service_cat'] = combined_updated['institute_service_up'].apply(transform_service)
# Quick check of the update
combined_updated['service_cat'].value_counts()
###Output
_____no_output_____
###Markdown
Perform Some Initial Analysis¶Finally, we'll replace the missing values in the dissatisfied column with the most frequent value, False. Then, we'll calculate the percentage of employees who resigned due to dissatisfaction in each service_cat group and plot the results.Note that since we still have additional missing values left to deal with, this is meant to be an initial introduction to the analysis, not the final analysis.
###Code
# Verify the unique values
combined_updated['dissatisfied'].value_counts(dropna=False)
# Replace missing values with the most frequent value, False
combined_updated['dissatisfied'] = combined_updated['dissatisfied'].fillna(False)
# Calculate the percentage of employees who resigned due to dissatisfaction in each category
dis_pct = combined_updated.pivot_table(index='service_cat', values='dissatisfied')
# Plot the results
%matplotlib inline
dis_pct.plot(kind='bar', rot=30)
###Output
_____no_output_____ |
Week 3/numpy.ipynb | ###Markdown
NumPyNumpy is the core library for scientific computing in Python. It provides a high-performance multidimensional array object, and tools for working with these arrays. Official NumPy Documentation: https://numpy.org/doc/stable/reference/
###Code
# Install NumPy
# ! pip install numpy
###Output
_____no_output_____
###Markdown
Since NumPy is not a default thing in Python. We import this library. When we import a library we allow all the functions and types with the initial of that library.
###Code
# Import NumPy
import numpy as np
###Output
_____no_output_____
###Markdown
NumPy ArraysA grid of values, all of the same type. **Rank:** number of dimensions of the array **Shape:** an array of tuple of integers giving the size of the array along each dimension.
###Code
# Rank 1 array
a = np.array([1, 2, 3])
print(type(a)) # Prints data type
print(a.shape)
print(a[0], a[1], a[2]) # Indexing
a[0] = 5 # Assigning
print(a)
# Rank 2 array
b = np.array([ [1,2,3],
[4,5,6]
])
'''
# of elements in first 3rd bracket => 2
# of elements in second 3rd bracket => 3
'''
print(b.shape)
print(b[0, 0], b[0, 1], b[1, 0], b[1,2])
###Output
(2, 3)
1 2 4 6
###Markdown
Special Arrays
###Code
a = np.zeros((6,4)) # Create an array of all zeros
a
np.zeros_like(b,dtype=float)
b = np.ones((3,2)) # Create an array of all ones
b
c = np.full((6,4), 7) # Create a constant array
c
d = np.eye(5) # Create a 2x2 identity matrix
d
e = np.random.random((4,3)) # Create an array filled with random values
e
###Output
_____no_output_____
###Markdown
Indexing
###Code
a = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
a
a[:2,:3]
b = a[:2, 1:3]
b
print(a[0, 1]) # Prints "2"
b[0, 0] = 77 # b[0, 0] is the same piece of data as a[0, 1]
print(a[0, 1]) # Prints "77"
a[1, :]
a[1:2, :]
a[:, 1]
a[:, 1:2]
np.arange(2,10,2)
###Output
_____no_output_____
###Markdown
Boolean array indexing
###Code
a
bool_idx = (a>10)
bool_idx
a[bool_idx]
a [ a>10 ]
###Output
_____no_output_____
###Markdown
Data Types
###Code
x = np.array([1, 2])
print(x.dtype)
x = np.array([1.0, 2.0])
print(x.dtype)
x = np.array([1, 2], dtype=np.float64) # Foring a particular datatype
print(x,x.dtype)
x.dtype
###Output
_____no_output_____
###Markdown
Operations
###Code
x = np.array([[1,2],[3,4]], dtype=np.float64)
y = np.array([[5,6],[7,8]], dtype=np.float64)
x,y
# Adding two arrays element-wise
print(x + y)
print(np.add(x, y))
# Substracting two arrays element-wise
print(x - y)
print(np.subtract(x, y))
# Mutiplication Element-wise
print(x * y)
print(np.multiply(x, y))
# Elementwise division
print(x / y)
print(np.divide(x, y))
# Elementwise square root
print(np.sqrt(x))
# Matrix Multiplication
print(x.dot(y))
print(np.dot(x, y))
x
# Sum of all elements in the array
np.sum(x)
print(np.sum(x, axis=0)) # Compute sum of each column
print(np.sum(x, axis=1)) # Compute sum of each row
a
# Transpose
a.T
###Output
_____no_output_____
###Markdown
Broadcasting
###Code
x = np.array([[1,2,3], [4,5,6], [7,8,9], [10, 11, 12]])
v = np.array([1, 0, 1])
y = x + v # Add v to each row of x using broadcasting
print(y)
x = np.array([[1,2,3], [4,5,6]])
y = np.array([4,5])
(x.T+y).T
x, x.shape
x.T, x.T.shape
y, y.shape
x.T+y
(x.T+y).T
x*2
x+2
###Output
_____no_output_____ |
notebooks/computer_science/Algorithms_and_data_structures_in_python/maps_and_dictionaries/example_1.ipynb | ###Markdown
Hash CodesConsider the challenges associated with the 16-bit hashcode for a character string `s` that sums the Unicode values of the characters in `s`.For example, let `s = "stop"`. It's unicode character representation is:
###Code
for char in "stop":
print(char + ': ' + str(ord(char)))
sum([ord(x) for x in "stop"])
###Output
_____no_output_____
###Markdown
If we then sum these unicode values, we arrive as the following hash code:```stop -----------> 454```The problem is, the following strings will all map to the same value!```stop -----------> 454pots -----------> 454tops -----------> 454spot -----------> 454```A better hash code would take into account the _position_ of our characters. Polynomial Hash codeIf we refer to the characters of our string as $x_0, x_1, \dots, x_n$, we can then chose a non-zero constant, $a \neq 1$, and use a hash code:$$a^{n-1} x_0 + a^{n-2} x_1 + \dots + a^1 x_{n-1} + a^0 x_{n}$$This is simply a polynomial in $a$ that has our $x_i$ values as it's coefficients. This is known as a **polynomial** hash code.
###Code
1 << 32
2**32
2 << 2
###Output
_____no_output_____
###Markdown
Investigate hash map uniformity
###Code
import random
import numpy as np
import matplotlib.pyplot as plt
%config InlineBackend.figure_format='retina'
n = 0
prime = 109345121
scale = 1 + random.randrange(prime - 1)
shift = random.randrange(prime)
def my_hash_func(k, upper):
table = upper * [None]
hash_code = hash(k)
compressed_code = (hash_code * scale + shift) % prime % len(table)
return compressed_code
upper = 1000
inputs = list(range(0, upper))
hash_results = []
for i in inputs:
hash_results.append(my_hash_func(i, upper))
plt.figure(figsize=(15,10))
plt.plot(inputs, hash_results)
plt.figure(figsize=(15,10))
plt.scatter(inputs, hash_results)
def moving_average(x, w):
return np.convolve(x, np.ones(w), 'valid') / w
averages_over_window_size_5 = moving_average(hash_results, 5)
plt.hist(averages_over_window_size_5)
l = [4, 7, 9, 13, 1, 3, 7]
l1 = [1, 4, 7]; l2 = [3, 9, 13]
def merge_sort(l):
size = len(l)
midway = size // 2
first_half = l[:midway]
second_half = l[midway:]
if len(first_half) > 1 or len(second_half) > 1:
sorted_first_half = merge_sort(first_half)
sorted_second_half = merge_sort(second_half)
else:
sorted_first_half = first_half
sorted_second_half = second_half
sorted_l = merge(sorted_first_half, sorted_second_half)
return sorted_l
def merge(l1, l2):
"""Merge two sorted lists."""
i = 0
j = 0
lmerged = []
while (i <= len(l1) - 1) or (j <= len(l2) - 1):
if i == len(l1):
lmerged.extend(l2[j:])
break
if j == len(l2):
lmerged.extend(l1[i:])
break
if (i < len(l1)) and (l1[i] < l2[j]):
lmerged.append(l1[i])
i += 1
else:
lmerged.append(l2[j])
j += 1
return lmerged
merge_sort(l)
l = [random.choice(list(range(1000))) for x in range(1000)]
%%time
res = sorted(l)
%%time
res = merge_sort(l)
###Output
CPU times: user 6.33 ms, sys: 413 µs, total: 6.74 ms
Wall time: 6.4 ms
|
pymc3/examples/gaussian_mixture_model.ipynb | ###Markdown
Mixture Model in PyMC3Original NB by Abe Flaxman, modified by Thomas Wiecki
###Code
import pymc3 as pm, theano.tensor as tt
# simulate data from a known mixture distribution
np.random.seed(12345) # set random seed for reproducibility
k = 3
ndata = 500
spread = 5
centers = np.array([-spread, 0, spread])
# simulate data from mixture distribution
v = np.random.randint(0, k, ndata)
data = centers[v] + np.random.randn(ndata)
plt.hist(data);
# setup model
model = pm.Model()
with model:
# cluster sizes
a = pm.constant(np.array([1., 1., 1.]))
p = pm.Dirichlet('p', a=a, shape=k)
# ensure all clusters have some points
p_min_potential = pm.Potential('p_min_potential', tt.switch(tt.min(p) < .1, -np.inf, 0))
# cluster centers
means = pm.Normal('means', mu=[0, 0, 0], sd=15, shape=k)
# break symmetry
order_means_potential = pm.Potential('order_means_potential',
tt.switch(means[1]-means[0] < 0, -np.inf, 0)
+ tt.switch(means[2]-means[1] < 0, -np.inf, 0))
# measurement error
sd = pm.Uniform('sd', lower=0, upper=20)
# latent cluster of each observation
category = pm.Categorical('category',
p=p,
shape=ndata)
# likelihood for each observed value
points = pm.Normal('obs',
mu=means[category],
sd=sd,
observed=data)
# fit model
with model:
step1 = pm.Metropolis(vars=[p, sd, means])
step2 = pm.ElemwiseCategoricalStep(var=category, values=[0, 1, 2])
tr = pm.sample(10000, step=[step1, step2])
###Output
[-----------------100%-----------------] 10000 of 10000 complete in 93.9 sec
###Markdown
Full trace
###Code
pm.plots.traceplot(tr, ['p', 'sd', 'means']);
###Output
_____no_output_____
###Markdown
After convergence
###Code
# take a look at traceplot for some model parameters
# (with some burn-in and thinning)
pm.plots.traceplot(tr[5000::5], ['p', 'sd', 'means']);
# I prefer autocorrelation plots for serious confirmation of MCMC convergence
pm.autocorrplot(tr[5000::5], ['sd'])
###Output
_____no_output_____
###Markdown
Sampling of cluster for individual data point
###Code
i=0
plt.plot(tr['category'][5000::5, i], drawstyle='steps-mid')
plt.axis(ymin=-.1, ymax=2.1)
def cluster_posterior(i=0):
print('true cluster:', v[i])
print(' data value:', np.round(data[i],2))
plt.hist(tr['category'][5000::5,i], bins=[-.5,.5,1.5,2.5,], rwidth=.9)
plt.axis(xmin=-.5, xmax=2.5)
plt.xticks([0,1,2])
cluster_posterior(i)
###Output
true cluster: 2
data value: 3.29
###Markdown
Mixture Model in PyMC3Original NB by Abe Flaxman, modified by Thomas Wiecki
###Code
import pymc3 as pm, theano.tensor as tt
# simulate data from a known mixture distribution
np.random.seed(12345) # set random seed for reproducibility
k = 3
ndata = 500
spread = 5
centers = np.array([-spread, 0, spread])
# simulate data from mixture distribution
v = np.random.randint(0, k, ndata)
data = centers[v] + np.random.randn(ndata)
plt.hist(data);
# setup model
model = pm.Model()
with model:
# cluster sizes
a = pm.constant(np.array([1., 1., 1.]))
p = pm.Dirichlet('p', a=a, shape=k)
# ensure all clusters have some points
p_min_potential = pm.Potential('p_min_potential', tt.switch(tt.min(p) < .1, -np.inf, 0))
# cluster centers
means = pm.Normal('means', mu=[0, 0, 0], sd=15, shape=k)
# break symmetry
order_means_potential = pm.Potential('order_means_potential',
tt.switch(means[1]-means[0] < 0, -np.inf, 0)
+ tt.switch(means[2]-means[1] < 0, -np.inf, 0))
# measurement error
sd = pm.Uniform('sd', lower=0, upper=20)
# latent cluster of each observation
category = pm.Categorical('category',
p=p,
shape=ndata)
# likelihood for each observed value
points = pm.Normal('obs',
mu=means[category],
sd=sd,
observed=data)
# fit model
with model:
step1 = pm.Metropolis(vars=[p, sd, means])
step2 = pm.ElemwiseCategoricalStep(vars=[category], values=[0, 1, 2])
tr = pm.sample(10000, step=[step1, step2])
###Output
[-----------------100%-----------------] 10000 of 10000 complete in 93.9 sec
###Markdown
Full trace
###Code
pm.plots.traceplot(tr, ['p', 'sd', 'means']);
###Output
_____no_output_____
###Markdown
After convergence
###Code
# take a look at traceplot for some model parameters
# (with some burn-in and thinning)
pm.plots.traceplot(tr[5000::5], ['p', 'sd', 'means']);
# I prefer autocorrelation plots for serious confirmation of MCMC convergence
pm.autocorrplot(tr[5000::5], varnames=['sd'])
###Output
_____no_output_____
###Markdown
Sampling of cluster for individual data point
###Code
i=0
plt.plot(tr['category'][5000::5, i], drawstyle='steps-mid')
plt.axis(ymin=-.1, ymax=2.1)
def cluster_posterior(i=0):
print('true cluster:', v[i])
print(' data value:', np.round(data[i],2))
plt.hist(tr['category'][5000::5,i], bins=[-.5,.5,1.5,2.5,], rwidth=.9)
plt.axis(xmin=-.5, xmax=2.5)
plt.xticks([0,1,2])
cluster_posterior(i)
###Output
true cluster: 2
data value: 3.29
|
gather/gather_report.ipynb | ###Markdown
Figures 2 and 5 for gather paper
###Code
%matplotlib inline
import pylab
import pandas as pd
###Output
_____no_output_____
###Markdown
Preparation: load genome-grist summary CSVs
###Code
class SampleDFs:
def __init__(self, name, all_df, left_df, gather_df, names_df):
self.name = name
self.all_df = all_df
self.left_df = left_df
self.gather_df = gather_df
self.names_df = names_df
def load_sample_dfs(name, sample_id, subsample_to=None, debug=False):
print(f'loading sample {sample_id}')
# load mapping CSVs
all_df = pd.read_csv(f'inputs/mapping/{sample_id}.summary.csv')
left_df = pd.read_csv(f'inputs/leftover/{sample_id}.summary.csv')
# load gather CSV
gather_df = pd.read_csv(f'inputs/gather/{sample_id}.gather.csv')
# names!
names_df = pd.read_csv(f'inputs/gather/{sample_id}.genomes.info.csv')
# connect gather_df to all_df and left_df using 'genome_id'
def fix_name(x):
return "_".join(x.split('_')[:2]).split('.')[0]
gather_df['genome_id'] = gather_df['name'].apply(fix_name)
names_df['genome_id'] = names_df['ident'].apply(fix_name)
# this ensures that only rows that share genome_id are in all the dataframes
in_gather = set(gather_df.genome_id)
if debug:
print(f'{len(in_gather)} in gather results')
in_left = set(left_df.genome_id)
if debug:
print(f'{len(in_left)} in leftover results')
in_both = in_left.intersection(in_gather)
if debug:
print(f'{len(in_both)} in both')
print('diff gather example:', list(in_gather - in_both)[:5])
print('diff left example:', list(in_left - in_both)[:5])
assert not in_gather - in_both
assert not in_left - in_both
all_df = all_df[all_df.genome_id.isin(in_both)]
left_df = left_df[left_df.genome_id.isin(in_both)]
gather_df = gather_df[gather_df.genome_id.isin(in_both)]
names_df = names_df[names_df.genome_id.isin(in_both)]
# reassign index now that we've maybe dropped rows
all_df.index = range(len(all_df))
left_df.index = range(len(left_df))
gather_df.index = range(len(gather_df))
names_df.index = range(len(names_df))
assert len(all_df) == len(gather_df)
assert len(left_df) == len(gather_df)
assert len(names_df) == len(gather_df)
assert len(names_df) == len(in_both)
#in_left
# re-sort left_df and all_df to match gather_df order, using matching genome_id column
all_df = all_df.set_index("genome_id")
all_df = all_df.reindex(index=gather_df["genome_id"])
all_df = all_df.reset_index()
left_df = left_df.set_index("genome_id")
left_df = left_df.reindex(index=gather_df["genome_id"])
left_df = left_df.reset_index()
#left_df["mapped_bp"] = (1 - left_df["percent missed"]/100) * left_df["genome bp"]
#left_df["unique_mapped_coverage"] = left_df.coverage / (1 - left_df["percent missed"] / 100.0)
names_df = names_df.set_index("genome_id")
names_df = names_df.reindex(index=gather_df["genome_id"])
names_df = names_df.reset_index()
# subsample? take top N...
if subsample_to:
left_df = left_df[:subsample_to]
all_df = all_df[:subsample_to]
gather_df = gather_df[:subsample_to]
names_df = names_df[:subsample_to]
sample_df = SampleDFs(name, all_df, left_df, gather_df, names_df)
return sample_df
SUBSAMPLE_TO = 36
podar_mock = load_sample_dfs('(A) podar mock', 'SRR606249', subsample_to=SUBSAMPLE_TO,)
oil_well = load_sample_dfs('(D) oil well', 'SRR1976948', subsample_to=SUBSAMPLE_TO)
gut = load_sample_dfs('(C) gut', 'p8808mo11', subsample_to=SUBSAMPLE_TO)
zymo_mock = load_sample_dfs('(B) zymo mock', 'SRR12324253', subsample_to=SUBSAMPLE_TO)
###Output
loading sample SRR606249
loading sample SRR1976948
loading sample p8808mo11
loading sample SRR12324253
###Markdown
Figure 2: K-mer decomposition of a metagenome into constituent genomes.
###Code
fig, (ax1, ax2) = pylab.subplots(1, 2, figsize=(10, 8), constrained_layout=True)
#pylab.plot(left_df.covered_bp / 1e6, left_df.iloc[::-1].index, 'b.', label='mapped bp to this genome')
ax1.plot(podar_mock.gather_df.intersect_bp / 1e6, podar_mock.gather_df.iloc[::-1].index, 'g<',
label='total k-mers matched')
ax1.plot(podar_mock.gather_df.unique_intersect_bp / 1e6, podar_mock.gather_df.iloc[::-1].index, 'ro',
label='remaining k-mers matched')
positions = list(podar_mock.gather_df.index)
labels = list(reversed(podar_mock.names_df.display_name))
ax1.set_yticks(positions)
ax1.set_yticklabels(labels, fontsize='small')
ax1.set_xlabel('millions of k-mers')
ax1.axis(ymin=-1, ymax=SUBSAMPLE_TO)
ax1.legend(loc='lower right')
ax1.grid(True, axis='both')
ax2.plot(podar_mock.gather_df.f_match_orig * 100, podar_mock.gather_df.iloc[::-1].index, 'g<', label='total k-mer cover')
ax2.plot(podar_mock.gather_df.f_match * 100, podar_mock.gather_df.iloc[::-1].index, 'ro', label='remaining k-mer cover')
ax2.set_yticks(positions)
ax2.set_yticklabels([])
ax2.set_xlabel('% of genome covered')
ax2.legend(loc='lower left')
ax2.axis(xmin=40, xmax=102)
ax2.axis(ymin=-1, ymax=SUBSAMPLE_TO)
ax2.grid(True)
#fig.tight_layout()
None
fig.savefig('fig2.svg')
###Output
_____no_output_____
###Markdown
Figure 5: Hash-based k-mer decomposition of a metagenome into constituent genomes compares well to bases covered by read mapping.
###Code
import matplotlib.pyplot as plt
fig, axes = plt.subplots(figsize=(20, 12), nrows=2, ncols=2)
samples = (podar_mock, zymo_mock, gut, oil_well)
for n, (ax, sample) in enumerate(zip(axes.flat, samples)):
ax.plot(sample.left_df.index, sample.left_df.covered_bp / 1e6, 'b*', label='genome bases covered by mapped reads')
ax.plot(sample.gather_df.index, sample.gather_df.unique_intersect_bp / 1e6, 'ro', label='remaining genome hashes in metagenome')
ax.plot(sample.gather_df.index, (sample.gather_df.unique_intersect_bp - sample.left_df.covered_bp) / 1e6,
'-', label='difference b/t covered bp and hashes')
ax.plot(sample.gather_df.index, [0]*len(sample.gather_df), '--')
ax.axis(xmin=-0.5, xmax=len(sample.gather_df.index) - 0.5)
positions = list(sample.gather_df.index)
labels = [ i + 1 for i in positions ]
ax.set_xticks(positions)
ax.set_xticklabels(labels)
#print(sample.name, positions)
ax.set_xlabel('genome rank (ordered by gather results)')
ax.set_ylabel('number per genome (million)')
if n == 0:
ax.legend(loc='upper right')
ax.set_title(sample.name)
#ax.label_outer()
fig.tight_layout()
pylab.savefig('fig5.svg')
###Output
_____no_output_____ |
S_GAN_image.ipynb | ###Markdown
Check a sample from validation dataset
###Code
# to see one image
cover,*rest = next(iter(valid_set))
_, H, W = cover.size()
cover = cover[None].to(device)
text = "We are busy in Neural Networks project. Anyhow, how is your day going?"
payload = make_payload(W, H, data_depth, text)
payload = payload.to(device)
#generated = encoder.forward(cover, payload)
generated = test(encoder,decoder,data_depth,epochs,cover,payload)
text_return = make_message(generated)
print(text_return)
###Output
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
###Markdown
Testing begins (from a loaded model) Test1 - Save steganographic images
###Code
##Take all images from test folder (one by one) and message requested by user to encode
from imageio import imread, imwrite
epochs = 64
data_depth = 4
test_folder = "div2k/myval/_"
save_dir = os.mkdir(os.path.join("div2k/myval",str(data_depth)+"_"+str(epochs)))
for filename in os.listdir(test_folder):
print(os.path.join(test_folder,filename))
cover_im = imread(os.path.join(test_folder,filename), pilmode='RGB') / 127.5 - 1.0
cover = torch.FloatTensor(cover_im).permute(2, 1, 0).unsqueeze(0)
cover_size = cover.size()
# _, _, height, width = cover.size()
text = "We are busy in Neural Networks project. The deadline is near. Anyhow, how is your day going?"
payload = make_payload(cover_size[3], cover_size[2], data_depth, text)
cover = cover.to(device)
payload = payload.to(device)
generated = encoder.forward(cover, payload)[0].clamp(-1.0, 1.0)
#print(generated.size())
generated = (generated.permute(2, 1, 0).detach().cpu().numpy() + 1.0) * 127.5
imwrite(os.path.join("div2k/myval/",str(data_depth)+"_"+str(epochs),(str(data_depth)+"_"+str(epochs)+"_"+filename)), generated.astype('uint8'))
###Output
div2k/myval/_/0805.png
div2k/myval/_/0804.png
div2k/myval/_/0833.png
div2k/myval/_/0855.png
div2k/myval/_/0874.png
div2k/myval/_/0894.png
###Markdown
Test2 - Take a steganographic image from a folder and decode
###Code
##[Individual]Take an image requested by user to decode
from imageio import imread, imwrite
steg_folder = "div2k/myval/4_64"
filename = "4_64_0855.png"
image = imread(os.path.join(steg_folder,filename), pilmode='RGB') / 127.5 - 1.0
plt.imshow(image)
image = torch.FloatTensor(image).permute(2, 1, 0).unsqueeze(0)
text_return = make_message(image)
print(text_return)
#f = open(steg_folder+".csv", "a")
#f.write("\n" + filename + "\t" + str(text_return))
###Output
WARNING:matplotlib.image:Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:22: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
###Markdown
Test3 - Encode to decode in one cell
###Code
##Input to outut (both encode decode in one cell)
from imageio import imread, imwrite
cover_im = imread("div2k/myval/_/0805.png", pilmode='RGB') / 127.5 - 1.0
plt.imshow(cover_im)
cover = torch.FloatTensor(cover_im).permute(2, 1, 0).unsqueeze(0)
cover_size = cover.size()
# _, _, height, width = cover.size()
text = "We are busy in Neural Networks project. Anyhow, how is your day going?"
payload = make_payload(cover_size[3], cover_size[2], data_depth, text)
cover = cover.to(device)
payload = payload.to(device)
generated = encoder.forward(cover, payload)
text_return = make_message(generated)
print(text_return)
###Output
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:22: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
###Markdown
Generate Difference Image
###Code
from skimage.metrics import structural_similarity as ssim
from imageio import imread, imwrite
diff_epochs = 64
diff_data_depth = 4
cover_folder = "div2k/myval/_"
steg_folder = "div2k/myval/"+str(diff_data_depth)+"_"+str(diff_epochs)
for filename in os.listdir(cover_folder):
print(os.path.join(cover_folder,filename))
cover = imread(os.path.join(cover_folder,filename), as_gray=True)
gen = imread(os.path.join(steg_folder,str(diff_data_depth)+"_"+str(diff_epochs)+"_"+filename), as_gray=True)
(score, diff) = ssim(cover, gen, full=True)
imwrite("div2k/myval/"+str(diff_data_depth)+"_"+str(diff_epochs)+"/"+"%d_%d_diff_%s"%(diff_data_depth,diff_epochs,filename),diff)
print("Score: ",score)
###Output
div2k/myval/_/0805.png
|
lab7/LogisticRegression-Tweets.ipynb | ###Markdown
Aim:* Extract features for logistic regression given some text* Implement logistic regression from scratch* Apply logistic regression on a natural language processing task* Test logistic regressionWe will be using a data set of tweets. Import functions and data
###Code
import nltk
from nltk.corpus import twitter_samples
import pandas as pd
nltk.download('twitter_samples')
nltk.download('stopwords')
import re
import string
import numpy as np
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.tokenize import TweetTokenizer
#process_tweet(): cleans the text, tokenizes it into separate words, removes stopwords, and converts words to stems.
def process_tweet(tweet):
"""Process tweet function.
Input:
tweet: a string containing a tweet
Output:
tweets_clean: a list of words containing the processed tweet
"""
stemmer = PorterStemmer()
stopwords_english = stopwords.words('english')
# remove stock market tickers like $GE
tweet = re.sub(r'\$\w*', '', tweet)
# remove old style retweet text "RT"
tweet = re.sub(r'^RT[\s]+', '', tweet)
# remove hyperlinks
tweet = re.sub(r'https?:\/\/.*[\r\n]*', '', tweet)
# remove hashtags
# only removing the hash # sign from the word
tweet = re.sub(r'#', '', tweet)
# tokenize tweets
tokenizer = TweetTokenizer(preserve_case=False, strip_handles=True,
reduce_len=True)
tweet_tokens = tokenizer.tokenize(tweet)
tweets_clean = []
for word in tweet_tokens:
if(word not in stopwords_english and word not in string.punctuation):
stem_word = stemmer.stem(word)
tweets_clean.append(stem_word)
#############################################################
# 1 remove stopwords
# 2 remove punctuation
# 3 stemming word
# 4 Add it to tweets_clean
return tweets_clean
#build_freqs counts how often a word in the 'corpus' (the entire set of tweets) was associated with
# a positive label '1' or
# a negative label '0',
#then builds the freqs dictionary, where each key is a (word,label) tuple,
#and the value is the count of its frequency within the corpus of tweets.
def build_freqs(tweets, ys):
"""Build frequencies.
Input:
tweets: a list of tweets
ys: an m x 1 array with the sentiment label of each tweet
(either 0 or 1)
Output:
freqs: a dictionary mapping each (word, sentiment) pair to its
frequency
"""
# Convert np array to list since zip needs an iterable.
# The squeeze is necessary or the list ends up with one element.
# Also note that this is just a NOP if ys is already a list.
yslist = np.squeeze(ys).tolist()
# Start with an empty dictionary and populate it by looping over all tweets
# and over all processed words in each tweet.
freqs = {}
for y, tweet in zip(yslist, tweets):
for word in process_tweet(tweet):
pair = (word, y)
#############################################################
#Update the count of pair if present, set it to 1 otherwise
if pair in freqs:
freqs[pair] += 1
else:
freqs[pair] = 1
return freqs
###Output
_____no_output_____
###Markdown
Prepare the data* The `twitter_samples` contains subsets of 5,000 positive tweets, 5,000 negative tweets, and the full set of 10,000 tweets.
###Code
# select the set of positive and negative tweets
all_positive_tweets = twitter_samples.strings('positive_tweets.json')
all_negative_tweets = twitter_samples.strings('negative_tweets.json')
###Output
_____no_output_____
###Markdown
* Train test split: 20% will be in the test set, and 80% in the training set.
###Code
# split the data into two pieces, one for training and one for testing
#############################################################
test_pos = all_positive_tweets[4000:]
train_pos = all_positive_tweets[:4000]
test_neg = all_negative_tweets[4000:]
train_neg = all_negative_tweets[:4000]
train_x = train_pos + train_neg
test_x = test_pos + test_neg
###Output
_____no_output_____
###Markdown
* Create the numpy array of positive labels and negative labels.
###Code
# combine positive and negative labels
train_y = np.append(np.ones((len(train_pos), 1)), np.zeros((len(train_neg), 1)), axis=0)
test_y = np.append(np.ones((len(test_pos), 1)), np.zeros((len(test_neg), 1)), axis=0)
###Output
_____no_output_____
###Markdown
* Create the frequency dictionary using the `build_freqs()` function.
###Code
# create frequency dictionary
#############################################################
freqs = build_freqs(train_x,train_y)
# check the output
print("type(freqs) = " + str(type(freqs)))
print("len(freqs) = " + str(len(freqs.keys())))
###Output
type(freqs) = <class 'dict'>
len(freqs) = 11339
###Markdown
* HERE, The `freqs` dictionary is the frequency dictionary that's being built.
* The key is the tuple (word, label), such as ("happy",1) or ("happy",0). The value stored for each key is the count of how many times the word "happy" was associated with a positive label, or how many times "happy" was associated with a negative label. Process tweet
###Code
# Example
print('This is an example of a positive tweet: \n', train_x[0])
print('\nThis is an example of the processed version of the tweet: \n', process_tweet(train_x[0]))
###Output
This is an example of a positive tweet:
#FollowFriday @France_Inte @PKuchly57 @Milipol_Paris for being top engaged members in my community this week :)
This is an example of the processed version of the tweet:
['followfriday', 'top', 'engag', 'member', 'commun', 'week', ':)']
###Markdown
Logistic regression : Sigmoid$$ h(z) = \frac{1}{1+\exp^{-z}} $$It maps the input 'x' to a value that ranges between 0 and 1, and so it can be treated as a probability.
###Code
def sigmoid(z):
# calculate the sigmoid of z
#############################################################
h = 1/(1+np.exp(-z))
return h
###Output
_____no_output_____
###Markdown
Logistic regression: regression and a sigmoidLogistic regression takes a regular linear regression, and applies a sigmoid to the output of the linear regression.Logistic regression$$ h(z) = \frac{1}{1+\exp^{-z}}$$$$z = \theta_0 x_0 + \theta_1 x_1 + \theta_2 x_2 + ... \theta_N x_N$$ Update the weights:Gradient Descent$$\nabla_{\theta_j}J(\theta) = \frac{1}{m} \sum_{i=1}^m(h^{(i)}-y^{(i)})x_j $$* To update the weight $\theta_j$, we adjust it by subtracting a fraction of the gradient determined by $\alpha$:$$\theta_j = \theta_j - \alpha \times \nabla_{\theta_j}J(\theta) $$* The learning rate $\alpha$ is a value that we choose to control how big a single update will be.
###Code
def gradientDescent(x, y, theta, alpha, num_iters):
# get 'm', the number of rows in matrix x
m = len(x)
for i in range(0, num_iters):
# get z, the dot product of x and theta
#############################################################
z = np.dot(x,theta)
# get the sigmoid of z
#############################################################
h = sigmoid(z)
# calculate the cost function
J = (-1/m)*(y.T @ np.log(h) + (1-y).T @ np.log(1-h))
# update the weights theta
#############################################################
grad = (1/m) * np.dot(x.T, h-y)
theta -= (alpha * grad)
J = float(J)
return J, theta
###Output
_____no_output_____
###Markdown
Extracting the features* Given a list of tweets, extract the features and store them in a matrix. You will extract two features. * The first feature is the number of positive words in a tweet. * The second feature is the number of negative words in a tweet. * Then train your logistic regression classifier on these features.* Test the classifier on a validation set.
###Code
def extract_features(tweet, freqs):
'''
Input:
tweet: a list of words for one tweet
freqs: a dictionary corresponding to the frequencies of each tuple (word, label)
Output:
x: a feature vector of dimension (1,3)
'''
# tokenizes, stems, and removes stopwords
#############################################################
word_l = process_tweet(tweet)
# 3 elements in the form of a 1 x 3 vector
x = np.zeros((1, 3))
#bias term is set to 1
x[0,0] = 1
# loop through each word in the list of words
for word in word_l:
# increment the word count for the positive label 1
#############################################################
x[0,1] += freqs.get((word,1.0),0)
# increment the word count for the negative label 0
#############################################################
x[0,2] += freqs.get((word,0.0),0)
assert(x.shape == (1, 3))
return x
# Check the function
# test 1
# test on training data
tmp1 = extract_features(train_x[0], freqs)
print(tmp1)
# test 2:
# check for when the words are not in the freqs dictionary
tmp2 = extract_features('Hariom pandya', freqs)
print(tmp2)
###Output
[[1. 0. 0.]]
###Markdown
Training Your ModelTo train the model:* Stack the features for all training examples into a matrix `X`. * Call `gradientDescent`
###Code
# collect the features 'x' and stack them into a matrix 'X'
X = np.zeros((len(train_x), 3))
for i in range(len(train_x)):
X[i, :]= extract_features(train_x[i], freqs)
# training labels corresponding to X
Y = train_y
# Apply gradient descent
J, theta = gradientDescent(X, Y, np.zeros((3, 1)), 1e-9, 1500)
print(f"The cost after training is {J:.8f}.")
###Output
The cost after training is 0.24215613.
###Markdown
Test logistic regressionPredict whether a tweet is positive or negative.* Given a tweet, process it, then extract the features.* Apply the model's learned weights on the features to get the logits.* Apply the sigmoid to the logits to get the prediction (a value between 0 and 1).$$y_{pred} = sigmoid(\mathbf{x} \cdot \theta)$$
###Code
def predict_tweet(tweet, freqs, theta):
'''
Input:
tweet: a string
freqs: a dictionary corresponding to the frequencies of each tuple (word, label)
theta: (3,1) vector of weights
Output:
y_pred: the probability of a tweet being positive or negative
'''
# extract the features of the tweet and store it into x
#############################################################
x = extract_features(tweet,freqs)
# make the prediction using x and theta
#############################################################
z = np.dot(x,theta)
y_pred = sigmoid(z)
return y_pred
# Run this cell to test your function
for tweet in ['I am happy', 'I am bad', 'this movie should have been great.', 'great', 'great great', 'great great great', 'great great great great']:
print( '%s -> %f' % (tweet, predict_tweet(tweet, freqs, theta)))
###Output
I am happy -> 0.518581
I am bad -> 0.494339
this movie should have been great. -> 0.515331
great -> 0.515464
great great -> 0.530899
great great great -> 0.546275
great great great great -> 0.561562
###Markdown
Check performance using the test set
###Code
def test_logistic_regression(test_x, test_y, freqs, theta):
"""
Input:
test_x: a list of tweets
test_y: (m, 1) vector with the corresponding labels for the list of tweets
freqs: a dictionary with the frequency of each pair (or tuple)
theta: weight vector of dimension (3, 1)
Output:
accuracy: (# of tweets classified correctly) / (total # of tweets)
"""
# the list for storing predictions
y_hat = []
for tweet in test_x:
# get the label prediction for the tweet
y_pred = predict_tweet(tweet, freqs, theta)
if y_pred > 0.5:
# append 1.0 to the list
y_hat.append(1)
else:
# append 0 to the list
y_hat.append(0)
# With the above implementation, y_hat is a list, but test_y is (m,1) array
# convert both to one-dimensional arrays in order to compare them using the '==' operator
count=0
y_hat=np.array(y_hat)
m=len(test_y)
print(m)
test_y=np.reshape(test_y,m)
print(y_hat.shape)
print(test_y.shape)
accuracy = ((test_y == y_hat).sum())/m
return accuracy
tmp_accuracy = test_logistic_regression(test_x, test_y, freqs, theta)
print(f"Logistic regression model's accuracy = {tmp_accuracy:.4f}")
###Output
2000
(2000,)
(2000,)
Logistic regression model's accuracy = 0.9950
|
Jupyter Notebook Examples/Python/HPDS/4) BMI-Age Plot by Gender.ipynb | ###Markdown
Import the needed libraries
###Code
import PicSureHpdsLib
import pandas
import matplotlib
###Output
_____no_output_____
###Markdown
Create an instance of the datasource adapter and get a reference to the data resource
###Code
adapter = PicSureHpdsLib.BypassAdapter("http://pic-sure-hpds-nhanes:8080/PIC-SURE")
resource = adapter.useResource()
###Output
_____no_output_____
###Markdown
Get a listing of all "demographics" entries in the data dictionary. Show what actions can be done with the "demographic_results" object
###Code
demographic_entries = resource.dictionary().find("\\demographics\\")
demographic_entries.help()
###Output
[HELP] PicSureHpdsLib.Client(connection).useResource(uuid).dictionary().find(term)
.count() Returns the number of entries in the dictionary that match the given term
.keys() Return the keys of the matching entries
.entries() Return a list of matching dictionary entries
.DataFrame() Return the entries in a Pandas-compatible format
[Examples]
results = PicSureHpdsLib.Client(connection).useResource(uuid).dictionary().find("asthma")
df = results.DataFrame()
###Markdown
Examine the demographic_entries results by converting it into a pandas DataFrame
###Code
demographic_entries.DataFrame()
resource.query().help()
resource.query().filter().help()
query_male = resource.query()
query_male.filter().add("\\demographics\\SEX\\", ["male"])
query_female = resource.query()
query_female.filter().add("\\demographics\\SEX\\", ["female"])
field_age = resource.dictionary().find("\\AGE\\")
field_BMI = resource.dictionary().find("\\Body Mass Index")
query_male.require().add(field_age.keys())
query_male.require().add(field_BMI.keys())
query_female.require().add(field_age.keys())
query_female.require().add(field_BMI.keys())
query_female.show()
###Output
.__________[ Query.Select() has NO SELECTIONS ]____________________________________________________________________________________________________________
.__________[ Query.Require() Settings ]_________________________________________________________________________________________
| _key__________________________________________________________________________________________________________________________
| \\demographics\\AGE\\ |
| \\examination\\body measures\\Body Mass Index (kg per m**2)\\ |
.__________[ Query.Filter() Settings ]_____________________________________________________________________________________________________________________
| _restriction_type_ | _key__________________________________________________________________________________________________________ | _restriction_values_
| categorical | \\demographics\\SEX\\ | ['female'] |
###Markdown
Convert the query results for females into a DataFrame and plot it by BMI and Age
###Code
df_f = query_female.getResultsDataFrame()
plot_f = df_f.plot.scatter(x="\\demographics\\AGE\\", y="\\examination\\body measures\\Body Mass Index (kg per m**2)\\", c="#ffbabb40")
# ____ Uncomment if graphs are not displaying ____
#plot_f.plot()
#matplotlib.pyplot.show()
###Output
_____no_output_____
###Markdown
Convert the query results for males into a DataFrame and plot it by BMI and Age
###Code
df_m = query_male.getResultsDataFrame()
plot_m = df_m.plot.scatter(x="\\demographics\\AGE\\", y="\\examination\\body measures\\Body Mass Index (kg per m**2)\\", c="#5a7dd040")
# ____ Uncomment if graphs are not displaying ____
#plot_m.plot()
#matplotlib.pyplot.show()
###Output
_____no_output_____
###Markdown
Replot the results using a single DataFrame containing both male and female
###Code
d = resource.dictionary()
criteria = []
criteria.extend(d.find("\\SEX\\").keys())
criteria.extend(d.find("\\Body Mass Index").keys())
criteria.extend(d.find("\\AGE\\").keys())
query_unified = resource.query()
query_unified.require().add(criteria)
df_mf = query_unified.getResultsDataFrame()
# map a color field for the plot to use
sex_colors = {'male':'#5a7dd040', 'female':'#ffbabb40'}
df_mf['\\sex_color\\'] = df_mf['\\demographics\\SEX\\'].map(sex_colors)
# plot data
plot_mf = df_mf.plot.scatter(x="\\demographics\\AGE\\", y="\\examination\\body measures\\Body Mass Index (kg per m**2)\\", c=df_mf['\\sex_color\\'])
# ____ Uncomment if graphs are not displaying ____
#plot_mf.plot()
#matplotlib.pyplot.show()
###Output
_____no_output_____
###Markdown
Replot data but trim outliers
###Code
q = df_mf["\\examination\\body measures\\Body Mass Index (kg per m**2)\\"].quantile(0.9999)
# create a masked array to remove outliers
test = df_mf.mask(df_mf["\\examination\\body measures\\Body Mass Index (kg per m**2)\\"] > q)
# plot data
plot_mf = test.plot.scatter(x="\\demographics\\AGE\\", y="\\examination\\body measures\\Body Mass Index (kg per m**2)\\", c=df_mf['\\sex_color\\'])
# ____ Uncomment if graphs are not displaying ____
#plot_mf.plot()
#matplotlib.pyplot.show()
###Output
_____no_output_____ |
Natural Language Processing with Attention Models/Week 4 - Chatbot/C4_W4_Ungraded_Lab_Revnet.ipynb | ###Markdown
Putting the "Re" in Reformer: Ungraded LabThis ungraded lab will explore Reversible Residual Networks. You will use these networks in this week's assignment that utilizes the Reformer model. It is based on on the Transformer model you already know, but with two unique features.* Locality Sensitive Hashing (LSH) Attention to reduce the compute cost of the dot product attention and* Reversible Residual Networks (RevNets) organization to reduce the storage requirements when doing backpropagation in training.In this ungraded lab we'll start with a quick review of Residual Networks and their implementation in Trax. Then we will discuss the Revnet architecture and its use in Reformer. Outline- [Part 1: Residual Networks](1) - [1.1 Branch](1.1) - [1.2 Residual Model](1.2)- [Part 2: Reversible Residual Networks](2) - [2.1 Trax Reversible Layers](2.1) - [2.2 Residual Model](2.2)
###Code
import trax
from trax import layers as tl # core building block
import numpy as np # regular ol' numpy
from trax.models.reformer.reformer import (
ReversibleHalfResidualV2 as ReversibleHalfResidual,
) # unique spot
from trax import fastmath # uses jax, offers numpy on steroids
from trax import shapes # data signatures: dimensionality and type
from trax.fastmath import numpy as jnp # For use in defining new layer types.
from trax.shapes import ShapeDtype
from trax.shapes import signature
###Output
_____no_output_____
###Markdown
Part 1.0 Residual Networks[Deep Residual Networks ](https://arxiv.org/abs/1512.03385) (Resnets) were introduced to improve convergence in deep networks. Residual Networks introduce a shortcut connection around one or more layers in a deep network as shown in the diagram below from the original paper.Figure 1: Residual Network diagram from original paperThe [Trax documentation](https://trax-ml.readthedocs.io/en/latest/notebooks/layers_intro.html2.-Inputs-and-Outputs) describes an implementation of Resnets using `branch`. We'll explore that here by implementing a simple resnet built from simple function based layers. Specifically, we'll build a 4 layer network based on two functions, 'F' and 'G'.Figure 2: 4 stage Residual networkDon't worry about the lengthy equations. Those are simply there to be referenced later in the notebook. Part 1.1 BranchTrax `branch` figures prominently in the residual network layer so we will first examine it. You can see from the figure above that we will need a function that will copy an input and send it down multiple paths. This is accomplished with a [branch layer](https://trax-ml.readthedocs.io/en/latest/trax.layers.htmlmodule-trax.layers.combinators), one of the Trax 'combinators'. Branch is a combinator that applies a list of layers in parallel to copies of inputs. Lets try it out! First we will need some layers to play with. Let's build some from functions.
###Code
# simple function taking one input and one output
bl_add1 = tl.Fn("add1", lambda x0: (x0 + 1), n_out=1)
bl_add2 = tl.Fn("add2", lambda x0: (x0 + 2), n_out=1)
bl_add3 = tl.Fn("add3", lambda x0: (x0 + 3), n_out=1)
# try them out
x = np.array([1])
print(bl_add1(x), bl_add2(x), bl_add3(x))
# some information about our new layers
print(
"name:",
bl_add1.name,
"number of inputs:",
bl_add1.n_in,
"number of outputs:",
bl_add1.n_out,
)
bl_3add1s = tl.Branch(bl_add1, bl_add2, bl_add3)
bl_3add1s
###Output
_____no_output_____
###Markdown
Trax uses the concept of a 'stack' to transfer data between layers.For Branch, for each of its layer arguments, it copies the `n_in` inputs from the stack and provides them to the layer, tracking the max_n_in, or the largest n_in required. It then pops the max_n_in elements from the stack.Figure 3: One in, one out BranchOn output, each layer, in succession pushes its results onto the stack. Note that the push/pull operations impact the top of the stack. Elements that are not part of the operation (n, and m in the diagram) remain intact.
###Code
# n_in = 1, Each bl_addx pushes n_out = 1 elements onto the stack
bl_3add1s(x)
# n = np.array([10]); m = np.array([20]) # n, m will remain on the stack
n = "n"
m = "m" # n, m will remain on the stack
bl_3add1s([x, n, m])
###Output
_____no_output_____
###Markdown
Each layer in the input list copies as many inputs from the stack as it needs, and their outputs are successively combined on stack. Put another way, each element of the branch can have differing numbers of inputs and outputs. Let's try a more complex example.
###Code
bl_addab = tl.Fn(
"addab", lambda x0, x1: (x0 + x1), n_out=1
) # Trax figures out how many inputs there are
bl_rep3x = tl.Fn(
"add2x", lambda x0: (x0, x0, x0), n_out=3
) # but you have to tell it how many outputs there are
bl_3ops = tl.Branch(bl_add1, bl_addab, bl_rep3x)
###Output
_____no_output_____
###Markdown
In this case, the number if inputs being copied from the stack varies with the layerFigure 4: variable in, variable out BranchThe stack when the operation is finished is 5 entries reflecting the total from each layer.
###Code
# Before Running this cell, what is the output you are expecting?
y = np.array([3])
bl_3ops([x, y, n, m])
###Output
_____no_output_____
###Markdown
Branch has a special feature to support Residual Network. If an argument is 'None', it will pull the top of stack and push it (at its location in the sequence) onto the output stackFigure 5: Branch for Residual
###Code
bl_2ops = tl.Branch(bl_add1, None)
bl_2ops([x, n, m])
###Output
_____no_output_____
###Markdown
Part 1.2 Residual ModelOK, your turn. Write a function 'MyResidual', that uses `tl.Branch` and `tl.Add` to build a residual layer. If you are curious about the Trax implementation, you can see the code [here](https://github.com/google/trax/blob/190ec6c3d941d8a9f30422f27ef0c95dc16d2ab1/trax/layers/combinators.py).
###Code
def MyResidual(layer):
return tl.Serial(
### START CODE HERE ###
# tl.----,
# tl.----,
### END CODE HERE ###
)
# Lets Try it
mr = MyResidual(bl_add1)
x = np.array([1])
mr([x, n, m])
###Output
_____no_output_____
###Markdown
**Expected Result**(array([3]), 'n', 'm') Great! Now, let's build the 4 layer residual Network in Figure 2. You can use `MyResidual`, or if you prefer, the tl.Residual in Trax, or a combination!
###Code
Fl = tl.Fn("F", lambda x0: (2 * x0), n_out=1)
Gl = tl.Fn("G", lambda x0: (10 * x0), n_out=1)
x1 = np.array([1])
resfg = tl.Serial(
### START CODE HERE ###
# None, #Fl # x + F(x)
# None, #Gl # x + F(x) + G(x + F(x)) etc
# None, #Fl
# None, #Gl
### END CODE HERE ###
)
# Lets try it
resfg([x1, n, m])
###Output
_____no_output_____
###Markdown
**Expected Results**(array([1089]), 'n', 'm') Part 2.0 Reversible Residual NetworksThe Reformer utilized RevNets to reduce the storage requirements for performing backpropagation.Figure 6: Reversible Residual Networks The standard approach on the left above requires one to store the outputs of each stage for use during backprop. By using the organization to the right, one need only store the outputs of the last stage, y1, y2 in the diagram. Using those values and running the algorithm in reverse, one can reproduce the values required for backprop. This trades additional computation for memory space which is at a premium with the current generation of GPU's/TPU's.One thing to note is that the forward functions produced by two networks are similar, but they are not equivalent. Note for example the asymmetry in the output equations after two stages of operation.Figure 7: 'Normal' Residual network (Top) vs REversible Residual Network Part 2.1 Trax Reversible LayersLet's take a look at how this is used in the Reformer.
###Code
refm = trax.models.reformer.ReformerLM(
vocab_size=33000, n_layers=2, mode="train" # Add more options.
)
refm
###Output
_____no_output_____
###Markdown
Eliminating some of the detail, we can see the structure of the network.Figure 8: Key Structure of Reformer Reversible Network Layers in Trax We'll review the Trax layers used to implement the Reversible section of the Reformer. First we can note that not all of the reformer is reversible. Only the section in the ReversibleSerial layer is reversible. In a large Reformer model, that section is repeated many times making up the majority of the model.Figure 9: Functional Diagram of Trax elements in Reformer The implementation starts by duplicating the input to allow the two paths that are part of the reversible residual organization with [Dup](https://github.com/google/trax/blob/190ec6c3d941d8a9f30422f27ef0c95dc16d2ab1/trax/layers/combinators.pyL666). Note that this is accomplished by copying the top of stack and pushing two copies of it onto the stack. This then feeds into the ReversibleHalfResidual layer which we'll review in more detail below. This is followed by [ReversibleSwap](https://github.com/google/trax/blob/190ec6c3d941d8a9f30422f27ef0c95dc16d2ab1/trax/layers/reversible.pyL83). As the name implies, this performs a swap, in this case, the two topmost entries in the stack. This pattern is repeated until we reach the end of the ReversibleSerial section. At that point, the topmost 2 entries of the stack represent the two paths through the network. These are concatenated and pushed onto the stack. The result is an entry that is twice the size of the non-reversible version.Let's look more closely at the [ReversibleHalfResidual](https://github.com/google/trax/blob/190ec6c3d941d8a9f30422f27ef0c95dc16d2ab1/trax/layers/reversible.pyL154). This layer is responsible for executing the layer or layers provided as arguments and adding the output of those layers, the 'residual', to the top of the stack. Below is the 'forward' routine which implements this.Figure 10: ReversibleHalfResidual code and diagram Unlike the previous residual function, the value that is added is from the second path rather than the input to the set of sublayers in this layer. Note that the Layers called by the ReversibleHalfResidual forward function are not modified to support reverse functionality. This layer provides them a 'normal' view of the stack and takes care of reverse operation.Let's try out some of these layers! We'll start with the ones that just operate on the stack, Dup() and Swap().
###Code
x1 = np.array([1])
x2 = np.array([5])
# Dup() duplicates the Top of Stack and returns the stack
dl = tl.Dup()
dl(x1)
# ReversibleSwap() duplicates the Top of Stack and returns the stack
sl = tl.ReversibleSwap()
sl([x1, x2])
###Output
_____no_output_____
###Markdown
You are no doubt wondering "How is ReversibleSwap different from Swap?". Good question! Lets look:Figure 11: Two versions of Swap() The ReverseXYZ functions include a "reverse" compliment to their "forward" function that provides the functionality to run in reverse when doing backpropagation. It can also be run in reverse by simply calling 'reverse'.
###Code
# Demonstrate reverse swap
print(x1, x2, sl.reverse([x1, x2]))
###Output
_____no_output_____
###Markdown
Let's try ReversibleHalfResidual, First we'll need some layers..
###Code
Fl = tl.Fn("F", lambda x0: (2 * x0), n_out=1)
Gl = tl.Fn("G", lambda x0: (10 * x0), n_out=1)
###Output
_____no_output_____
###Markdown
Just a note about ReversibleHalfResidual. As this is written, it resides in the Reformer model and is a layer. It is invoked a bit differently that other layers. Rather than tl.XYZ, it is just ReversibleHalfResidual(layers..) as shown below. This may change in the future.
###Code
half_res_F = ReversibleHalfResidual(Fl)
print(type(half_res_F), "\n", half_res_F)
half_res_F([x1, x1]) # this is going to produce an error - why?
# we have to initialize the ReversibleHalfResidual layer to let it know what the input is going to look like
half_res_F.init(shapes.signature([x1, x1]))
half_res_F([x1, x1])
###Output
_____no_output_____
###Markdown
Notice the output: (DeviceArray([3], dtype=int32), array([1])). The first value, (DeviceArray([3], dtype=int32) is the output of the "Fl" layer and has been converted to a 'Jax' DeviceArray. The second array([1]) is just passed through (recall the diagram of ReversibleHalfResidual above). The final layer we need is the ReversibleSerial Layer. This is the reversible equivalent of the Serial layer and is used in the same manner to build a sequence of layers. Part 2.2 Build a reversible modelWe now have all the layers we need to build the model shown below. Let's build it in two parts. First we'll build 'blk' and then a list of blk's. And then 'mod'. Figure 12: Reversible Model we will build using Trax components
###Code
blk = [ # a list of the 4 layers shown above
### START CODE HERE ###
None,
None,
None,
None,
]
blks = [None, None]
### END CODE HERE ###
mod = tl.Serial(
### START CODE HERE ###
None,
None,
None,
### END CODE HERE ###
)
mod
###Output
_____no_output_____
###Markdown
**Expected Output**```Serial[ Dup_out2 ReversibleSerial_in2_out2[ ReversibleHalfResidualV2_in2_out2[ Serial[ F ] ] ReversibleSwap_in2_out2 ReversibleHalfResidualV2_in2_out2[ Serial[ G ] ] ReversibleSwap_in2_out2 ReversibleHalfResidualV2_in2_out2[ Serial[ F ] ] ReversibleSwap_in2_out2 ReversibleHalfResidualV2_in2_out2[ Serial[ G ] ] ReversibleSwap_in2_out2 ] Concatenate_in2]```
###Code
mod.init(shapes.signature(x1))
out = mod(x1)
out
###Output
_____no_output_____ |
notebooks/4_ica_dimensionality.ipynb | ###Markdown
Table of Contents1 Load Data2 Compare dimensionalities3 Find "single-gene" iModulons4 Plot Components
###Code
from pymodulon.core import IcaData
import os
import pandas as pd
import matplotlib.pyplot as plt
from scipy import stats
import numpy as np
from tqdm.notebook import tqdm
# Directory containing ICA outputs
DATA_DIR = '../data/interim/ica_runs'
###Output
_____no_output_____
###Markdown
Load Data
###Code
def load_M(dim):
return pd.read_csv(os.path.join(DATA_DIR,str(dim),'S.csv'),index_col=0)
def load_A(dim):
return pd.read_csv(os.path.join(DATA_DIR,str(dim),'A.csv'),index_col=0)
dims = sorted([int(x) for x in os.listdir(DATA_DIR)])
M_data = [load_M(dim) for dim in dims]
A_data = [load_A(dim) for dim in dims]
n_components = [m.shape[1] for m in M_data]
###Output
_____no_output_____
###Markdown
Compare dimensionalities
###Code
final_m = M_data[-1]
thresh = 0.7
n_final_mods = []
for m in tqdm(M_data):
corrs = pd.DataFrame(index=final_m.columns,columns=m.columns)
for col1 in final_m.columns:
for col2 in m.columns:
corrs.loc[col1,col2] = abs(stats.pearsonr(final_m[col1],m[col2])[0])
n_final_mods.append(len(np.where(corrs > thresh)[0]))
###Output
_____no_output_____
###Markdown
Find "single-gene" iModulonsAt a high enough dimensionality, some iModulons track the expression trajectory of a single iModulon
###Code
n_single_genes = []
for m in tqdm(M_data):
counter = 0
for col in m.columns:
sorted_genes = abs(m[col]).sort_values(ascending=False)
if sorted_genes.iloc[0] > 2 * sorted_genes.iloc[1]:
counter += 1
n_single_genes.append(counter)
###Output
_____no_output_____
###Markdown
Plot Components
###Code
non_single_components = np.array(n_components) - np.array(n_single_genes)
DF_stats = pd.DataFrame([n_components,n_final_mods,non_single_components,n_single_genes],
index=['Robust Components','Final Components','Multi-gene Components',
'Single Gene Components'],
columns=dims).T
DF_stats.sort_index(inplace=True)
dimensionality = DF_stats[DF_stats['Final Components'] >= DF_stats['Multi-gene Components']].iloc[0].name
print('Optimal Dimensionality:',dimensionality)
plt.plot(dims,n_components,label='Robust Components')
plt.plot(dims,n_final_mods,label='Final Components')
plt.plot(dims,non_single_components,label='Non-single-gene Components')
plt.plot(dims,n_single_genes,label='Single Gene Components')
plt.vlines(dimensionality,0,max(n_components),linestyle='dashed')
plt.xlabel('Dimensionality')
plt.ylabel('# Components')
plt.legend(bbox_to_anchor=(1,1))
DF_stats
###Output
_____no_output_____
###Markdown
Table of Contents1 Load Data2 Compare dimensionalities3 Find "single-gene" iModulons4 Plot Components
###Code
from pymodulon.core import IcaData
import os
import pandas as pd
import matplotlib.pyplot as plt
from scipy import stats
import numpy as np
from tqdm.notebook import tqdm
# Directory containing ICA outputs
DATA_DIR = '../data/ica_runs'
###Output
_____no_output_____
###Markdown
Load Data
###Code
def load_M(dim):
return pd.read_csv(os.path.join(DATA_DIR,str(dim),'S.csv'),index_col=0)
def load_A(dim):
return pd.read_csv(os.path.join(DATA_DIR,str(dim),'A.csv'),index_col=0)
dims = sorted([int(x) for x in os.listdir(DATA_DIR) if x != '.DS_Store'])
M_data = [load_M(dim) for dim in dims]
A_data = [load_A(dim) for dim in dims]
n_components = [m.shape[1] for m in M_data]
###Output
_____no_output_____
###Markdown
Compare dimensionalities
###Code
final_m = M_data[-1]
thresh = 0.7
n_final_mods = []
for m in tqdm(M_data):
corrs = pd.DataFrame(index=final_m.columns,columns=m.columns)
for col1 in final_m.columns:
for col2 in m.columns:
corrs.loc[col1,col2] = abs(stats.pearsonr(final_m[col1],m[col2])[0])
n_final_mods.append(len(np.where(corrs > thresh)[0]))
###Output
_____no_output_____
###Markdown
Find "single-gene" iModulonsAt a high enough dimensionality, some iModulons track the expression trajectory of a single iModulon
###Code
n_single_genes = []
for m in tqdm(M_data):
counter = 0
for col in m.columns:
sorted_genes = abs(m[col]).sort_values(ascending=False)
if sorted_genes.iloc[0] > 2 * sorted_genes.iloc[1]:
counter += 1
n_single_genes.append(counter)
###Output
_____no_output_____
###Markdown
Plot Components
###Code
non_single_components = np.array(n_components) - np.array(n_single_genes)
DF_stats = pd.DataFrame([n_components,n_final_mods,non_single_components,n_single_genes],
index=['Robust Components','Final Components','Multi-gene Components',
'Single Gene Components'],
columns=dims).T
DF_stats.sort_index(inplace=True)
dimensionality = DF_stats[DF_stats['Final Components'] >= DF_stats['Multi-gene Components']].iloc[0].name
print('Optimal Dimensionality:',dimensionality)
plt.plot(dims,n_components,label='Robust Components')
plt.plot(dims,n_final_mods,label='Final Components')
plt.plot(dims,non_single_components,label='Non-single-gene Components')
plt.plot(dims,n_single_genes,label='Single Gene Components')
plt.vlines(dimensionality,0,max(n_components),linestyle='dashed')
plt.xlabel('Dimensionality')
plt.ylabel('# Components')
plt.legend(bbox_to_anchor=(1,1))
plt.savefig('../data/figures/dimensionality.svg')
###Output
_____no_output_____ |
2019/lecture-code/lecture 8 - Multiple Comparisons.ipynb | ###Markdown
Lecture 8: p-hacking and Multiple Comparisons[J. Nathan Matias](https://github.com/natematias)[SOC412](https://natematias.com/courses/soc412/), February 2019In Lecture 8, we discussed Stephanie Lee's story about [Brian Wansink](https://www.buzzfeednews.com/article/stephaniemlee/brian-wansink-cornell-p-hacking.btypwrDwe5), a food researcher who was found guilty of multiple kinds of research misconduct, including "p-hacking," where researchers keep looking for an answer until they find one. In this lecture, we will discuss what p-hacking is and what researchers can do to protect against it in our own work. This example uses the [DeclareDesign](http://declaredesign.org/) library, which supports the simulation and evaluation of experiment designs. We will be using DeclareDesign to help with designing experiments in this class.What can you do in your research to protect yourself against the risk of p-hacking or against reductions in the credibility of your research if people accuse you of p-hacking?* Conduct a **power analysis** to choose a sample size that is large enough to observe the effect you're looking for (see below)* If you have multiple statistical tests in each experiment, [adjust your analysis for multiple comparisons](https://egap.org/methods-guides/10-things-you-need-know-about-multiple-comparisons).* [Pre-register](https://cos.io/prereg/) your study, being clear about whether your research is exploratory or confirmatory, and committing in advance to the statistical tests you're using to analyze the results* Use cross-validation with training and holdout samples to take an exploratory + confirmatory approach (requires a much larger sample size, typically greater than 2x) Load Libraries
###Code
options("scipen"=9, "digits"=4)
library(dplyr)
library(MASS)
library(ggplot2)
library(rlang)
library(corrplot)
library(Hmisc)
library(tidyverse)
library(viridis)
library(fabricatr)
library(DeclareDesign)
## Installed DeclareDesign 0.13 using the following command:
# install.packages("DeclareDesign", dependencies = TRUE,
# repos = c("http://R.declaredesign.org", "https://cloud.r-project.org"))
options(repr.plot.width=7, repr.plot.height=4)
set.seed(03456920)
sessionInfo()
###Output
_____no_output_____
###Markdown
What is a p-value? A p-value (which can be calculated differently for different kinds of statistical tests) is an estimate of the probability of rejecting a null hypothesis. When testing differences in means, we are usually testing the null hypothesis of no difference between the two distributions. In those cases, the p-value is the probability of observing a difference between the distributions that is at least as extreme as the one observed.You can think of the p-value as the probability represented by the area under the following t distribution of all of the possible outcomes for a given difference between means if the null hypothesis is true: Illustrating The Null HypothesisIn the following case, I generate 100 sets of normal distributions with exactly the same mean and standard deviation, and then plot the differences between those means:
###Code
### GENERATE n.samples simulations at n.sample.size observations
### using normal distributions at the specified means
### and record the difference in means and the p value of the observations
#
# `@diff.df: the dataframe to pass in
# `@n.sample.size: the sample sizes to draw from a normal distribution
generate.n.samples <- function(diff.df, n.sample.size = 500){
for(i in seq(nrow(diff.df))){
row = diff.df[i,]
a.dist = rnorm(n.sample.size, mean = row$a.mean, sd = row$a.sd)
b.dist = rnorm(n.sample.size, mean = row$b.mean, sd = row$a.sd)
t <- t.test(a.dist, b.dist)
diff.df[i,]$p.value <- t$p.value
diff.df[i,]$mean.diff <- mean(b.dist) - mean(a.dist)
}
diff.df
}
#expand.grid
n.samples = 1000
null.hypothesis.df = data.frame(a.mean = 1, a.sd = 1,
b.mean = 1, b.sd = 1,
id=seq(n.samples),
mean.diff = NA,
p.value = NA)
null.hypothesis.df <- generate.n.samples(null.hypothesis.df, 200)
ggplot(null.hypothesis.df, aes(mean.diff)) +
geom_histogram(binwidth=0.01) +
xlim(-1.2,1.2) +
ggtitle("Simulated Differences in means under the null hypothesis")
ggplot(null.hypothesis.df, aes(mean.diff, p.value, color=factor(p.value < 0.05))) +
geom_point() +
geom_hline(yintercept = 0.05) +
ggtitle("Simulated p values under the null hypothesis")
print("How often is the p-value < 0.05?")
summary(null.hypothesis.df$p.value > 0.05)
###Output
[1] "How often is the p-value < 0.05?"
###Markdown
Illustrating A Difference in Means (first with a small sample size)
###Code
#expand.grid
small.sample.diff.df = data.frame(a.mean = 1, a.sd = 1,
b.mean = 1.2, b.sd = 1,
id=seq(n.samples),
mean.diff = NA,
p.value = NA)
small.sample.diff.df <- generate.n.samples(small.sample.diff.df, 20)
ggplot(small.sample.diff.df, aes(mean.diff)) +
geom_histogram(binwidth=0.01) +
xlim(-1.2,1.2) +
ggtitle("Simulated Differences in means under the a diff in means of 1 (n=20)")
ggplot(small.sample.diff.df, aes(mean.diff, p.value, color=factor(p.value < 0.05))) +
geom_point() +
geom_hline(yintercept = 0.05) +
ggtitle("Simulated p values under a diff in means of 0.2 (n = 20)")
print("How often is the p-value < 0.05?")
summary(small.sample.diff.df$p.value > 0.05)
print("How often is the p-value < 0.05? when the estimate is < 0 (false positive)?")
nrow(subset(small.sample.diff.df, mean.diff<0 &p.value < 0.05))
print("How often is the p-value >= 0.05 when the estimate is 0.2 or greater (false negative)?")
print(sprintf("%1.2f precent",
nrow(subset(small.sample.diff.df, mean.diff>=0.2 &p.value >= 0.05)) /
nrow(small.sample.diff.df)*100))
print("What is the smallest positive, statistically-significant result?")
sprintf("%1.2f, which is greater than the true difference of 0.2",
min(subset(small.sample.diff.df, mean.diff>0 & p.value < 0.05)$mean.diff))
print("If we only published statistically-significant results, what we would we think the true effect would be?")
sprintf("%1.2f, which is greater than the true difference of 0.2",
mean(subset(small.sample.diff.df, p.value < 0.05)$mean.diff))
print("If we published all experiment results, what we would we think the true effect would be?")
sprintf("%1.2f, which is very close to the true difference of 0.2",
mean(small.sample.diff.df$mean.diff))
###Output
[1] "If we published all experiment results, what we would we think the true effect would be?"
###Markdown
Illustrating A Difference in Means (with a larger sample size)
###Code
#expand.grid
larger.sample.diff.df = data.frame(a.mean = 1, a.sd = 1,
b.mean = 1.2, b.sd = 1,
id=seq(n.samples),
mean.diff = NA,
p.value = NA)
larger.sample.diff.df <- generate.n.samples(larger.sample.diff.df, 200)
ggplot(larger.sample.diff.df, aes(mean.diff)) +
geom_histogram(binwidth=0.01) +
xlim(-1.2,1.2) +
ggtitle("Simulated Differences in means under the a diff in means of 1 (n=200)")
ggplot(larger.sample.diff.df, aes(mean.diff, p.value, color=factor(p.value < 0.05))) +
geom_point() +
geom_hline(yintercept = 0.05) +
ggtitle("Simulated p values under a diff in means of 0.2 (n = 200)")
print("If we only published statistically-significant results, what we would we think the true effect would be?")
sprintf("%1.2f, which is greater than the true difference of 0.2",
mean(subset(larger.sample.diff.df, p.value < 0.05)$mean.diff))
print("How often is the p-value < 0.05?")
sprintf("%1.2f percent",
nrow(subset(larger.sample.diff.df,p.value < 0.05)) / nrow(larger.sample.diff.df)*100)
###Output
[1] "How often is the p-value < 0.05?"
###Markdown
Illustrating a Difference in Means (with an adequately large sample size)
###Code
adequate.sample.diff.df = data.frame(a.mean = 1, a.sd = 1,
b.mean = 1.2, b.sd = 1,
id=seq(n.samples),
mean.diff = NA,
p.value = NA)
adequate.sample.diff.df <- generate.n.samples(larger.sample.diff.df, 400)
ggplot(adequate.sample.diff.df, aes(mean.diff, p.value, color=factor(p.value < 0.05))) +
geom_point() +
geom_hline(yintercept = 0.05) +
ggtitle("Simulated p values under a diff in means of 0.2 (n = 400)")
print("How often is the p-value < 0.05?")
sprintf("%1.2f percent",
nrow(subset(adequate.sample.diff.df,p.value < 0.05)) / nrow(adequate.sample.diff.df)*100)
print("If we only published statistically-significant results, what we would we think the true effect would be?")
sprintf("%1.2f, which is greater than the true difference of 0.2",
mean(subset(adequate.sample.diff.df, p.value < 0.05)$mean.diff))
###Output
[1] "If we only published statistically-significant results, what we would we think the true effect would be?"
###Markdown
The Problem of Multiple ComparisonsIn the above example, I demonstrated that across 100 samples under the null hypothesis and a decision rule of p = 0.05, roughly 5% of the results are statistically significant. This is similarly true for a single experiment with multiple outcome variables.
###Code
## Generate n normally distributed outcome variables with no difference on average
#
#` @num.samples: sample size for the dataframe
#` @num.columns: how many outcome variables to observe
#` @common.mean: the mean of the outcomes
#` @common.sd: the standard deviation of the outcomes
generate.n.outcomes.null <- function( num.samples, num.columns, common.mean, common.sd){
df <- data.frame(id = seq(num.samples))
for(i in seq(num.columns)){
df[paste('row.',i,sep="")] <- rnorm(num.samples, mean=common.mean, sd=common.sd)
}
df
}
###Output
_____no_output_____
###Markdown
With 10 outcome variables, if we look for correlations between every outcomes, we expect to see 5% false positives on average under the null hypothesis.
###Code
set.seed(487)
## generate the data
null.10.obs <- generate.n.outcomes.null(100, 10, 1, 3)
null.10.obs$id <- NULL
null.correlations <- cor(null.10.obs, method="pearson")
null.pvalues <- cor.mtest(null.10.obs, conf.level = 0.95, method="pearson")$p
corrplot(cor(null.10.obs, method="pearson"), sig.level = 0.05, p.mat = null.pvalues)
###Output
_____no_output_____
###Markdown
With multiple comparisons, increasing the sample size does not make the problem go away. Here, we use a sample of 10000 instead of 100
###Code
null.10.obs.large <- generate.n.outcomes.null(10000, 10, 1, 3)
null.10.obs.large$id <- NULL
null.correlations <- cor(null.10.obs.large, method="pearson")
null.pvalues <- cor.mtest(null.10.obs.large, conf.level = 0.95, method="pearson")$p
corrplot(cor(null.10.obs.large, method="pearson"), sig.level = 0.05, p.mat = null.pvalues)
###Output
_____no_output_____
###Markdown
Power AnalysisA power analysis is a process for deciding what sample size to use based on the chance of observing the minimum effect you are looking for in your study. This power analysis uses [DeclareDesign](http://declaredesign.org/). Another option is the [egap Power Analysis page.](https://egap.org/content/power-analysis-simulations-r)(we will discuss this in further detail in a subsequent class)
###Code
mean.a <- 0
effect.b <- 0.1
sample.size <- 500
design <-
declare_population(
N = sample.size
) +
declare_potential_outcomes(
YA_Z_0 = rnorm(n=N, mean = mean.a, sd=1),
YA_Z_1 = rnorm(n=N, mean = mean.a + effect.b, sd=1)
) +
declare_assignment(num_arms = 2,
conditions = (c("0", "1"))) +
declare_estimand(ate_YA_1_0 = effect.b) +
declare_reveal(outcome_variables = c("YA")) +
declare_estimator(YA ~ Z, estimand="ate_YA_1_0")
design
diagnose_design(design, sims=500, bootstrap_sims=500)
###Output
_____no_output_____ |
ML1 - Scikit Learn Methods (Complete).ipynb | ###Markdown
ClassificationWe'll take a tour of the methods for classification in sklearn. First let's load a toy dataset to use:
###Code
from sklearn.datasets import load_breast_cancer
breast = load_breast_cancer()
###Output
_____no_output_____
###Markdown
Let's take a look
###Code
# Convert it to a dataframe for better visuals
df = pd.DataFrame(breast.data)
df.columns = breast.feature_names
df
###Output
_____no_output_____
###Markdown
And now look at the targets
###Code
print(breast.target_names)
breast.target
###Output
['malignant' 'benign']
###Markdown
Classification Trees Using the scikit learn models is basically the same as in Julia's ScikitLearn.jl
###Code
from sklearn.tree import DecisionTreeClassifier
cart = DecisionTreeClassifier(max_depth=2, min_samples_leaf=140)
cart.fit(breast.data, breast.target)
###Output
_____no_output_____
###Markdown
Here's a helper function to plot the trees. Installing Graphviz (tedious) Windows1. Download graphviz from https://graphviz.gitlab.io/_pages/Download/Download_windows.html2. Install it by running the .msi file3. Set the pat variable: (a) Go to Control Panel > System and Security > System > Advanced System Settings > Environment Variables > Path > Edit (b) Add 'C:\Program Files (x86)\Graphviz2.38\bin'4. Run `conda install graphviz`5. Run `conda install python-graphviz` macOS and Linux1. Run `brew install graphviz` (install `brew` from https://docs.brew.sh/Installation if you don't have it)2. Run `conda install graphviz`3. Run `conda install python-graphviz`
###Code
import graphviz
import sklearn.tree
def visualize_tree(sktree):
dot_data = sklearn.tree.export_graphviz(sktree, out_file=None,
filled=True, rounded=True,
special_characters=False,
feature_names=df.columns)
return graphviz.Source(dot_data)
visualize_tree(cart)
###Output
_____no_output_____
###Markdown
We can get the label predictions with the `.predict` method
###Code
labels = cart.predict(breast.data)
labels
###Output
_____no_output_____
###Markdown
And similarly the predicted probabilities with `.predict_proba`
###Code
probs = cart.predict_proba(breast.data)
probs
###Output
_____no_output_____
###Markdown
Just like in Julia, the probabilities are returned for each class
###Code
probs.shape
###Output
_____no_output_____
###Markdown
We can extract the second column of the probs by slicing, just like how we did it in Julia
###Code
probs = cart.predict_proba(breast.data)[:,1]
probs
###Output
_____no_output_____
###Markdown
To evaluate the model, we can use functions from `sklearn.metrics`
###Code
from sklearn.metrics import roc_auc_score, accuracy_score, confusion_matrix
roc_auc_score(breast.target, probs)
accuracy_score(breast.target, labels)
confusion_matrix(breast.target, labels)
from lazypredict.Supervised import LazyClassifier
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
data = load_breast_cancer()
X = data.data
y= data.target
X_train, X_test, y_train, y_test = train_test_split(X, y,test_size=.5,random_state =123)
clf = LazyClassifier(verbose=0,ignore_warnings=True, custom_metric=None)
models,predictions = clf.fit(X_train, X_test, y_train, y_test)
models
###Output
C:\Users\omars\AppData\Roaming\Python\Python37\site-packages\sklearn\utils\deprecation.py:143: FutureWarning: The sklearn.utils.testing module is deprecated in version 0.22 and will be removed in version 0.24. The corresponding classes / functions should instead be imported from sklearn.utils. Anything that cannot be imported from sklearn.utils is now part of the private API.
warnings.warn(message, FutureWarning)
100%|██████████████████████████████████████████████████████████████████████████████████| 30/30 [00:03<00:00, 8.91it/s]
###Markdown
Random Forests and BoostingWe use random forests and boosting in the same way as CART
###Code
from sklearn.ensemble import RandomForestClassifier
forest = RandomForestClassifier(n_estimators=100)
forest.fit(breast.data, breast.target)
labels = forest.predict(breast.data)
probs = forest.predict_proba(breast.data)[:,1]
print(roc_auc_score(breast.target, probs))
print(accuracy_score(breast.target, labels))
confusion_matrix(breast.target, labels)
from sklearn.ensemble import GradientBoostingClassifier
boost = GradientBoostingClassifier(n_estimators=100, learning_rate=0.1)
boost.fit(breast.data, breast.target)
labels = boost.predict(breast.data)
probs = boost.predict_proba(breast.data)[:,1]
print(roc_auc_score(breast.target, probs))
print(accuracy_score(breast.target, labels))
confusion_matrix(breast.target, labels)
#!pip install xgboost
from xgboost import XGBClassifier
boost2 = XGBClassifier()
boost2.fit(breast.data, breast.target)
labels = boost2.predict(breast.data)
probs = boost2.predict_proba(breast.data)[:,1]
print(roc_auc_score(breast.target, probs))
print(accuracy_score(breast.target, labels))
confusion_matrix(breast.target, labels)
###Output
1.0
1.0
###Markdown
Neural Networks
###Code
from sklearn.neural_network import MLPClassifier
mlp = MLPClassifier(max_iter=1000)
mlp.fit(breast.data, breast.target)
labels = mlp.predict(breast.data)
probs = mlp.predict_proba(breast.data)[:,1]
print(roc_auc_score(breast.target, probs))
print(accuracy_score(breast.target, labels))
confusion_matrix(breast.target, labels)
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from keras.utils import np_utils
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import Pipeline
# load dataset
X = breast.data
Y = breast.target
# convert integers to dummy variables (i.e. one hot encoded)
dummy_y = np_utils.to_categorical(Y)
# define baseline model
def baseline_model():
# create model
model = Sequential()
model.add(Dense(8, input_dim=30, activation='relu'))
model.add(Dense(2, activation='softmax'))
# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
estimator = KerasClassifier(build_fn=baseline_model, epochs=10, batch_size=16, verbose=1)
kfold = KFold(n_splits=5, shuffle=True)
results = cross_val_score(estimator, X, dummy_y, cv=kfold)
print("Baseline: %.2f%% (%.2f%%)" % (results.mean()*100, results.std()*100))
###Output
Epoch 1/10
29/29 [==============================] - 0s 6ms/step - loss: 36.4088 - accuracy: 0.3516
Epoch 2/10
29/29 [==============================] - 0s 3ms/step - loss: 17.8082 - accuracy: 0.3758
Epoch 3/10
29/29 [==============================] - 0s 4ms/step - loss: 15.4570 - accuracy: 0.4352
Epoch 4/10
29/29 [==============================] - 0s 4ms/step - loss: 12.8511 - accuracy: 0.4769
Epoch 5/10
29/29 [==============================] - 0s 4ms/step - loss: 10.6894 - accuracy: 0.5319
Epoch 6/10
29/29 [==============================] - 0s 5ms/step - loss: 9.3079 - accuracy: 0.5670
Epoch 7/10
29/29 [==============================] - 0s 4ms/step - loss: 8.3241 - accuracy: 0.5824
Epoch 8/10
29/29 [==============================] - 0s 5ms/step - loss: 7.3370 - accuracy: 0.6264
Epoch 9/10
29/29 [==============================] - 0s 4ms/step - loss: 6.6161 - accuracy: 0.6154
Epoch 10/10
29/29 [==============================] - 0s 4ms/step - loss: 5.6886 - accuracy: 0.6374
8/8 [==============================] - 0s 2ms/step - loss: 5.7604 - accuracy: 0.6491
Epoch 1/10
29/29 [==============================] - 0s 5ms/step - loss: 25.0130 - accuracy: 0.4593
Epoch 2/10
29/29 [==============================] - 0s 5ms/step - loss: 9.2743 - accuracy: 0.2923
Epoch 3/10
29/29 [==============================] - 0s 5ms/step - loss: 6.6822 - accuracy: 0.3385
Epoch 4/10
29/29 [==============================] - 0s 4ms/step - loss: 4.5664 - accuracy: 0.4198
Epoch 5/10
29/29 [==============================] - 0s 5ms/step - loss: 3.2452 - accuracy: 0.5253
Epoch 6/10
29/29 [==============================] - 0s 5ms/step - loss: 2.2986 - accuracy: 0.6000
Epoch 7/10
29/29 [==============================] - 0s 4ms/step - loss: 1.7611 - accuracy: 0.6923
Epoch 8/10
29/29 [==============================] - 0s 5ms/step - loss: 1.3770 - accuracy: 0.7231
Epoch 9/10
29/29 [==============================] - 0s 5ms/step - loss: 1.2754 - accuracy: 0.7407
Epoch 10/10
29/29 [==============================] - 0s 5ms/step - loss: 1.1074 - accuracy: 0.7912
8/8 [==============================] - 0s 1ms/step - loss: 0.8304 - accuracy: 0.7544
Epoch 1/10
29/29 [==============================] - 0s 2ms/step - loss: 19.6493 - accuracy: 0.5560
Epoch 2/10
29/29 [==============================] - 0s 4ms/step - loss: 2.1954 - accuracy: 0.9033
Epoch 3/10
29/29 [==============================] - 0s 4ms/step - loss: 1.7177 - accuracy: 0.8593
Epoch 4/10
29/29 [==============================] - 0s 4ms/step - loss: 1.5284 - accuracy: 0.8901
Epoch 5/10
29/29 [==============================] - 0s 3ms/step - loss: 1.5605 - accuracy: 0.9033
Epoch 6/10
29/29 [==============================] - 0s 3ms/step - loss: 1.4026 - accuracy: 0.8901
Epoch 7/10
29/29 [==============================] - 0s 3ms/step - loss: 1.3803 - accuracy: 0.8923: 0s - loss: 1.4837 - accuracy: 0.89
Epoch 8/10
29/29 [==============================] - 0s 3ms/step - loss: 1.2817 - accuracy: 0.9011
Epoch 9/10
29/29 [==============================] - 0s 3ms/step - loss: 1.4488 - accuracy: 0.8835
Epoch 10/10
29/29 [==============================] - 0s 2ms/step - loss: 1.2524 - accuracy: 0.8923
8/8 [==============================] - 0s 2ms/step - loss: 1.6985 - accuracy: 0.8684
Epoch 1/10
29/29 [==============================] - 0s 1ms/step - loss: 69.7143 - accuracy: 0.3714
Epoch 2/10
29/29 [==============================] - 0s 3ms/step - loss: 25.4033 - accuracy: 0.3912
Epoch 3/10
29/29 [==============================] - 0s 3ms/step - loss: 2.2258 - accuracy: 0.8725
Epoch 4/10
29/29 [==============================] - 0s 3ms/step - loss: 1.8741 - accuracy: 0.8637
Epoch 5/10
29/29 [==============================] - 0s 3ms/step - loss: 1.7284 - accuracy: 0.8813
Epoch 6/10
29/29 [==============================] - 0s 3ms/step - loss: 1.7166 - accuracy: 0.8747
Epoch 7/10
29/29 [==============================] - 0s 2ms/step - loss: 1.6775 - accuracy: 0.8923
Epoch 8/10
29/29 [==============================] - 0s 3ms/step - loss: 1.6431 - accuracy: 0.8923
Epoch 9/10
29/29 [==============================] - 0s 2ms/step - loss: 1.6271 - accuracy: 0.8703
Epoch 10/10
29/29 [==============================] - 0s 2ms/step - loss: 1.5868 - accuracy: 0.8879
8/8 [==============================] - 0s 2ms/step - loss: 1.1058 - accuracy: 0.8684
Epoch 1/10
29/29 [==============================] - 0s 2ms/step - loss: 9.9503 - accuracy: 0.3838
Epoch 2/10
29/29 [==============================] - 0s 2ms/step - loss: 6.8765 - accuracy: 0.3816
Epoch 3/10
29/29 [==============================] - 0s 2ms/step - loss: 5.1510 - accuracy: 0.4561
Epoch 4/10
29/29 [==============================] - 0s 3ms/step - loss: 3.8095 - accuracy: 0.5592
Epoch 5/10
29/29 [==============================] - 0s 3ms/step - loss: 3.1159 - accuracy: 0.6623
Epoch 6/10
29/29 [==============================] - 0s 2ms/step - loss: 2.6108 - accuracy: 0.6820
Epoch 7/10
29/29 [==============================] - 0s 3ms/step - loss: 2.1609 - accuracy: 0.7259
Epoch 8/10
29/29 [==============================] - 0s 2ms/step - loss: 1.9964 - accuracy: 0.7456
Epoch 9/10
29/29 [==============================] - 0s 2ms/step - loss: 1.7422 - accuracy: 0.7763
Epoch 10/10
29/29 [==============================] - 0s 2ms/step - loss: 1.6857 - accuracy: 0.7500
8/8 [==============================] - 0s 5ms/step - loss: 1.4334 - accuracy: 0.8319
Baseline: 79.44% (8.37%)
###Markdown
Logistic RegressionWe can also access logistic regression from sklearn
###Code
from sklearn.linear_model import LogisticRegression
logit = LogisticRegression(solver='liblinear')
logit.fit(breast.data, breast.target)
labels = logit.predict(breast.data)
probs = logit.predict_proba(breast.data)[:,1]
print(roc_auc_score(breast.target, probs))
print(accuracy_score(breast.target, labels))
confusion_matrix(breast.target, labels)
###Output
0.9945298874266688
0.9578207381370826
###Markdown
The sklearn implementation has options for regularization in logistic regression. You can choose between L1 and L2 regularization:Note that this regularization is adhoc and **not equivalent to robustness**. For a robust logistic regression, follow the approach from 15.680.You control the regularization with the `penalty` and `C` hyperparameters. We can see that our model above used L2 regularization with $C=1$. ExerciseTry out unregularized logistic regression as well as L1 regularization. Which of the three options seems best? What if you try changing $C$?
###Code
# No regularization
logit = LogisticRegression(C=1e10, solver='liblinear')
logit.fit(breast.data, breast.target)
labels = logit.predict(breast.data)
probs = logit.predict_proba(breast.data)[:,1]
print(roc_auc_score(breast.target, probs))
print(accuracy_score(breast.target, labels))
confusion_matrix(breast.target, labels)
# L1 regularization
logit = LogisticRegression(C=100, penalty='l1', solver='liblinear')
logit.fit(breast.data, breast.target)
labels = logit.predict(breast.data)
probs = logit.predict_proba(breast.data)[:,1]
print(roc_auc_score(breast.target, probs))
print(accuracy_score(breast.target, labels))
confusion_matrix(breast.target, labels)
###Output
0.9985201627820939
0.9876977152899824
###Markdown
RegressionNow let's take a look at regression in sklearn. Again we can start by loading up a dataset.
###Code
from sklearn.datasets import load_boston
boston = load_boston()
print(boston.DESCR)
###Output
.. _boston_dataset:
Boston house prices dataset
---------------------------
**Data Set Characteristics:**
:Number of Instances: 506
:Number of Attributes: 13 numeric/categorical predictive. Median Value (attribute 14) is usually the target.
:Attribute Information (in order):
- CRIM per capita crime rate by town
- ZN proportion of residential land zoned for lots over 25,000 sq.ft.
- INDUS proportion of non-retail business acres per town
- CHAS Charles River dummy variable (= 1 if tract bounds river; 0 otherwise)
- NOX nitric oxides concentration (parts per 10 million)
- RM average number of rooms per dwelling
- AGE proportion of owner-occupied units built prior to 1940
- DIS weighted distances to five Boston employment centres
- RAD index of accessibility to radial highways
- TAX full-value property-tax rate per $10,000
- PTRATIO pupil-teacher ratio by town
- B 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town
- LSTAT % lower status of the population
- MEDV Median value of owner-occupied homes in $1000's
:Missing Attribute Values: None
:Creator: Harrison, D. and Rubinfeld, D.L.
This is a copy of UCI ML housing dataset.
https://archive.ics.uci.edu/ml/machine-learning-databases/housing/
This dataset was taken from the StatLib library which is maintained at Carnegie Mellon University.
The Boston house-price data of Harrison, D. and Rubinfeld, D.L. 'Hedonic
prices and the demand for clean air', J. Environ. Economics & Management,
vol.5, 81-102, 1978. Used in Belsley, Kuh & Welsch, 'Regression diagnostics
...', Wiley, 1980. N.B. Various transformations are used in the table on
pages 244-261 of the latter.
The Boston house-price data has been used in many machine learning papers that address regression
problems.
.. topic:: References
- Belsley, Kuh & Welsch, 'Regression diagnostics: Identifying Influential Data and Sources of Collinearity', Wiley, 1980. 244-261.
- Quinlan,R. (1993). Combining Instance-Based and Model-Based Learning. In Proceedings on the Tenth International Conference of Machine Learning, 236-243, University of Massachusetts, Amherst. Morgan Kaufmann.
###Markdown
Take a look at the X
###Code
df = pd.DataFrame(boston.data)
df.columns = boston.feature_names
df
boston.target
###Output
_____no_output_____
###Markdown
Regression TreesWe use regression trees in the same way as classification
###Code
from sklearn.tree import DecisionTreeRegressor
cart = DecisionTreeRegressor(max_depth=2, min_samples_leaf=5)
cart.fit(boston.data, boston.target)
visualize_tree(cart)
###Output
_____no_output_____
###Markdown
Like for classification, we get the predicted labels out with the `.predict` method
###Code
preds = cart.predict(boston.data)
preds
###Output
_____no_output_____
###Markdown
There are functions provided by `sklearn.metrics` to evaluate the predictions
###Code
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
print(mean_absolute_error(boston.target, preds))
print(mean_squared_error(boston.target, preds))
print(r2_score(boston.target, preds))
###Output
3.5736909785051676
25.69946745212606
0.695574477973027
###Markdown
Random Forests and BoostingRandom forests and boosting for regression work the same as in classification, except we use the `Regressor` version rather than `Classifier`. ExerciseTest and compare the (in-sample) performance of random forests and boosting on the Boston data with some sensible parameters.
###Code
from sklearn.ensemble import RandomForestRegressor
forest = RandomForestRegressor(n_estimators=100)
forest.fit(boston.data, boston.target)
preds = forest.predict(boston.data)
print(mean_absolute_error(boston.target, preds))
print(mean_squared_error(boston.target, preds))
print(r2_score(boston.target, preds))
from sklearn.ensemble import GradientBoostingRegressor
boost = GradientBoostingRegressor(n_estimators=100, learning_rate=0.2)
boost.fit(boston.data, boston.target)
preds = boost.predict(boston.data)
print(mean_absolute_error(boston.target, preds))
print(mean_squared_error(boston.target, preds))
print(r2_score(boston.target, preds))
from xgboost import XGBRegressor
boost2 = XGBRegressor()
boost2.fit(boston.data, boston.target)
preds = boost2.predict(boston.data)
print(mean_absolute_error(boston.target, preds))
print(mean_squared_error(boston.target, preds))
print(r2_score(boston.target, preds))
###Output
0.026413507235379965
0.0014430003436840648
0.9999829068001611
###Markdown
Neural Networks
###Code
from sklearn.neural_network import MLPRegressor
mlp = MLPRegressor(max_iter=1000)
mlp.fit(boston.data, boston.target)
preds = mlp.predict(boston.data)
print(mean_absolute_error(boston.target, preds))
print(mean_squared_error(boston.target, preds))
print(r2_score(boston.target, preds))
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasRegressor
from keras.utils import np_utils
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import Pipeline
# load dataset
X = boston.data
Y = boston.target
# define baseline model
def baseline_model():
# create model
model = Sequential()
model.add(Dense(13, input_dim=X.shape[1], kernel_initializer='normal', activation='relu'))
model.add(Dense(1, kernel_initializer='normal'))
# Compile model
model.compile(loss='mean_squared_error', optimizer='adam')
return model
estimator = KerasRegressor(build_fn=baseline_model, epochs=10, batch_size=16, verbose=1)
kfold = KFold(n_splits=5, shuffle=True)
results = cross_val_score(estimator, X, Y, cv=kfold)
print("Mean Squared Error: %.2f (%.2f)" % (abs(results.mean()), results.std()))
###Output
Epoch 1/10
26/26 [==============================] - 0s 2ms/step - loss: 416.3250
Epoch 2/10
26/26 [==============================] - 0s 4ms/step - loss: 189.9820
Epoch 3/10
26/26 [==============================] - 0s 5ms/step - loss: 147.3391
Epoch 4/10
26/26 [==============================] - 0s 4ms/step - loss: 123.4213
Epoch 5/10
26/26 [==============================] - 0s 4ms/step - loss: 101.8594
Epoch 6/10
26/26 [==============================] - 0s 4ms/step - loss: 86.1179
Epoch 7/10
26/26 [==============================] - 0s 4ms/step - loss: 76.7525: 0s - loss: 74.
Epoch 8/10
26/26 [==============================] - 0s 5ms/step - loss: 71.9598
Epoch 9/10
26/26 [==============================] - 0s 5ms/step - loss: 68.5318
Epoch 10/10
26/26 [==============================] - 0s 5ms/step - loss: 66.7393
7/7 [==============================] - 0s 4ms/step - loss: 68.3569
Epoch 1/10
26/26 [==============================] - 0s 4ms/step - loss: 229.8424
Epoch 2/10
26/26 [==============================] - 0s 5ms/step - loss: 152.8784
Epoch 3/10
26/26 [==============================] - 0s 5ms/step - loss: 124.9488
Epoch 4/10
26/26 [==============================] - 0s 5ms/step - loss: 101.3086A: 0s - loss: 103.713
Epoch 5/10
26/26 [==============================] - 0s 5ms/step - loss: 83.7563
Epoch 6/10
26/26 [==============================] - 0s 6ms/step - loss: 72.4537
Epoch 7/10
26/26 [==============================] - 0s 5ms/step - loss: 68.8848
Epoch 8/10
26/26 [==============================] - 0s 5ms/step - loss: 65.2835
Epoch 9/10
26/26 [==============================] - 0s 5ms/step - loss: 63.0682
Epoch 10/10
26/26 [==============================] - 0s 5ms/step - loss: 61.2580
7/7 [==============================] - 0s 4ms/step - loss: 78.1588
Epoch 1/10
26/26 [==============================] - 0s 3ms/step - loss: 394.2870
Epoch 2/10
26/26 [==============================] - 0s 4ms/step - loss: 154.2006
Epoch 3/10
26/26 [==============================] - 0s 4ms/step - loss: 121.5101
Epoch 4/10
26/26 [==============================] - 0s 5ms/step - loss: 96.5747
Epoch 5/10
26/26 [==============================] - 0s 5ms/step - loss: 79.6176
Epoch 6/10
26/26 [==============================] - 0s 5ms/step - loss: 71.7972
Epoch 7/10
26/26 [==============================] - 0s 6ms/step - loss: 67.3771
Epoch 8/10
26/26 [==============================] - 0s 6ms/step - loss: 65.3503
Epoch 9/10
26/26 [==============================] - 0s 5ms/step - loss: 63.4878
Epoch 10/10
26/26 [==============================] - 0s 5ms/step - loss: 62.6430
7/7 [==============================] - 0s 3ms/step - loss: 72.4628
Epoch 1/10
26/26 [==============================] - 0s 4ms/step - loss: 457.5549
Epoch 2/10
26/26 [==============================] - 0s 4ms/step - loss: 213.4543
Epoch 3/10
26/26 [==============================] - 0s 3ms/step - loss: 137.6380
Epoch 4/10
26/26 [==============================] - 0s 5ms/step - loss: 123.8971
Epoch 5/10
26/26 [==============================] - 0s 6ms/step - loss: 111.1874
Epoch 6/10
26/26 [==============================] - 0s 4ms/step - loss: 101.2769
Epoch 7/10
26/26 [==============================] - 0s 5ms/step - loss: 91.2895
Epoch 8/10
26/26 [==============================] - 0s 5ms/step - loss: 84.0696
Epoch 9/10
26/26 [==============================] - 0s 4ms/step - loss: 79.6263
Epoch 10/10
26/26 [==============================] - 0s 4ms/step - loss: 76.9124
7/7 [==============================] - 0s 1ms/step - loss: 49.1916
Epoch 1/10
26/26 [==============================] - 0s 1ms/step - loss: 337.1278
Epoch 2/10
26/26 [==============================] - 0s 2ms/step - loss: 109.4944
Epoch 3/10
26/26 [==============================] - 0s 3ms/step - loss: 95.3181
Epoch 4/10
26/26 [==============================] - 0s 3ms/step - loss: 88.5550
Epoch 5/10
26/26 [==============================] - 0s 3ms/step - loss: 83.2130
Epoch 6/10
26/26 [==============================] - 0s 2ms/step - loss: 79.5414
Epoch 7/10
26/26 [==============================] - 0s 4ms/step - loss: 76.1562
Epoch 8/10
26/26 [==============================] - 0s 4ms/step - loss: 74.1669
Epoch 9/10
26/26 [==============================] - 0s 5ms/step - loss: 73.5261
Epoch 10/10
26/26 [==============================] - 0s 5ms/step - loss: 74.5165
7/7 [==============================] - 0s 854us/step - loss: 74.1472
Mean Squared Error: 68.46 (10.14)
###Markdown
Linear Regression Models There are a large collection of linear regression models in sklearn. Let's start with a simple ordinary linear regression
###Code
from sklearn.linear_model import LinearRegression
linear = LinearRegression()
linear.fit(boston.data, boston.target)
preds = linear.predict(boston.data)
print(mean_absolute_error(boston.target, preds))
print(mean_squared_error(boston.target, preds))
print(r2_score(boston.target, preds))
###Output
3.270862810900317
21.894831181729206
0.7406426641094094
###Markdown
We can also take a look at the betas:
###Code
linear.coef_
###Output
_____no_output_____
###Markdown
We can use regularized models as well. Here is ridge regression:
###Code
from sklearn.linear_model import Ridge
ridge = Ridge(alpha=10)
ridge.fit(boston.data, boston.target)
preds = ridge.predict(boston.data)
print(mean_absolute_error(boston.target, preds))
print(mean_squared_error(boston.target, preds))
print(r2_score(boston.target, preds))
ridge.coef_
###Output
3.315169248123664
22.660363555639318
0.7315744764907257
###Markdown
And here is lasso
###Code
from sklearn.linear_model import Lasso
lasso = Lasso(alpha=1)
lasso.fit(boston.data, boston.target)
preds = lasso.predict(boston.data)
print(mean_absolute_error(boston.target, preds))
print(mean_squared_error(boston.target, preds))
print(r2_score(boston.target, preds))
lasso.coef_
###Output
3.6117102456478434
26.79609915726647
0.6825842212709925
###Markdown
There are many other linear regression models available. See the [linear model documentation](http://scikit-learn.org/stable/modules/linear_model.html) for more. ExerciseThe elastic net is another linear regression method that combines ridge and lasso regularization. Try running it on this dataset, referring to the documentation as needed to learn how to use it and control the hyperparameters.
###Code
from sklearn.linear_model import ElasticNet
elastic = ElasticNet(alpha=1, l1_ratio=.7)
elastic.fit(boston.data, boston.target)
preds = elastic.predict(boston.data)
print(mean_absolute_error(boston.target, preds))
print(mean_squared_error(boston.target, preds))
print(r2_score(boston.target, preds))
elastic.coef_
?DecisionTreeClassifier
###Output
_____no_output_____
###Markdown
ClassificationWe'll take a tour of the methods for classification in sklearn. First let's load a toy dataset to use:
###Code
from sklearn.datasets import load_breast_cancer
breast = load_breast_cancer()
###Output
_____no_output_____
###Markdown
Let's take a look
###Code
# Convert it to a dataframe for better visuals
df = pd.DataFrame(breast.data)
df.columns = breast.feature_names
df
###Output
_____no_output_____
###Markdown
And now look at the targets
###Code
print(breast.target_names)
breast.target
###Output
_____no_output_____
###Markdown
Classification Trees Using the scikit learn models is basically the same as in Julia's ScikitLearn.jl
###Code
from sklearn.tree import DecisionTreeClassifier
cart = DecisionTreeClassifier(max_depth=2, min_samples_leaf=140)
cart.fit(breast.data, breast.target)
###Output
_____no_output_____
###Markdown
Here's a helper function to plot the trees. Installing Graphviz (tedious) Windows1. Download graphviz from https://graphviz.gitlab.io/_pages/Download/Download_windows.html2. Install it by running the .msi file3. Set the pat variable: (a) Go to Control Panel > System and Security > System > Advanced System Settings > Environment Variables > Path > Edit (b) Add 'C:\Program Files (x86)\Graphviz2.38\bin'4. Run `conda install graphviz`5. Run `conda install python-graphviz` macOS and Linux1. Run `brew install graphviz` (install `brew` from https://docs.brew.sh/Installation if you don't have it)2. Run `conda install graphviz`3. Run `conda install python-graphviz`
###Code
import graphviz
import sklearn.tree
def visualize_tree(sktree):
dot_data = sklearn.tree.export_graphviz(sktree, out_file=None,
filled=True, rounded=True,
special_characters=False,
feature_names=df.columns)
return graphviz.Source(dot_data)
visualize_tree(cart)
###Output
_____no_output_____
###Markdown
We can get the label predictions with the `.predict` method
###Code
labels = cart.predict(breast.data)
labels
###Output
_____no_output_____
###Markdown
And similarly the predicted probabilities with `.predict_proba`
###Code
probs = cart.predict_proba(breast.data)
probs
###Output
_____no_output_____
###Markdown
Just like in Julia, the probabilities are returned for each class
###Code
probs.shape
###Output
_____no_output_____
###Markdown
We can extract the second column of the probs by slicing, just like how we did it in Julia
###Code
probs = cart.predict_proba(breast.data)[:,1]
probs
###Output
_____no_output_____
###Markdown
To evaluate the model, we can use functions from `sklearn.metrics`
###Code
from sklearn.metrics import roc_auc_score, accuracy_score, confusion_matrix
roc_auc_score(breast.target, probs)
accuracy_score(breast.target, labels)
confusion_matrix(breast.target, labels)
###Output
_____no_output_____
###Markdown
Random Forests and BoostingWe use random forests and boosting in the same way as CART
###Code
from sklearn.ensemble import RandomForestClassifier
forest = RandomForestClassifier(n_estimators=100)
forest.fit(breast.data, breast.target)
labels = forest.predict(breast.data)
probs = forest.predict_proba(breast.data)[:,1]
print(roc_auc_score(breast.target, probs))
print(accuracy_score(breast.target, labels))
confusion_matrix(breast.target, labels)
from sklearn.ensemble import GradientBoostingClassifier
boost = GradientBoostingClassifier(n_estimators=100, learning_rate=0.1)
boost.fit(breast.data, breast.target)
labels = boost.predict(breast.data)
probs = boost.predict_proba(breast.data)[:,1]
print(roc_auc_score(breast.target, probs))
print(accuracy_score(breast.target, labels))
confusion_matrix(breast.target, labels)
###Output
_____no_output_____
###Markdown
Logistic RegressionWe can also access logistic regression from sklearn
###Code
from sklearn.linear_model import LogisticRegression
logit = LogisticRegression()
logit.fit(breast.data, breast.target)
labels = logit.predict(breast.data)
probs = logit.predict_proba(breast.data)[:,1]
print(roc_auc_score(breast.target, probs))
print(accuracy_score(breast.target, labels))
confusion_matrix(breast.target, labels)
###Output
_____no_output_____
###Markdown
The sklearn implementation has options for regularization in logistic regression. You can choose between L1 and L2 regularization:Note that this regularization is adhoc and **not equivalent to robustness**. For a robust logistic regression, follow the approach from 15.680.You control the regularization with the `penalty` and `C` hyperparameters. We can see that our model above used L2 regularization with $C=1$. ExerciseTry out unregularized logistic regression as well as L1 regularization. Which of the three options seems best? What if you try changing $C$?
###Code
# No regularization
logit = LogisticRegression(C=1e10)
logit.fit(breast.data, breast.target)
labels = logit.predict(breast.data)
probs = logit.predict_proba(breast.data)[:,1]
print(roc_auc_score(breast.target, probs))
print(accuracy_score(breast.target, labels))
confusion_matrix(breast.target, labels)
# L1 regularization
logit = LogisticRegression(C=100, penalty='l1')
logit.fit(breast.data, breast.target)
labels = logit.predict(breast.data)
probs = logit.predict_proba(breast.data)[:,1]
print(roc_auc_score(breast.target, probs))
print(accuracy_score(breast.target, labels))
confusion_matrix(breast.target, labels)
###Output
_____no_output_____
###Markdown
RegressionNow let's take a look at regression in sklearn. Again we can start by loading up a dataset.
###Code
from sklearn.datasets import load_boston
boston = load_boston()
print(boston.DESCR)
###Output
_____no_output_____
###Markdown
Take a look at the X
###Code
df = pd.DataFrame(boston.data)
df.columns = boston.feature_names
df
boston.target
###Output
_____no_output_____
###Markdown
Regression TreesWe use regression trees in the same way as classification
###Code
from sklearn.tree import DecisionTreeRegressor
cart = DecisionTreeRegressor(max_depth=2, min_samples_leaf=5)
cart.fit(boston.data, boston.target)
visualize_tree(cart)
###Output
_____no_output_____
###Markdown
Like for classification, we get the predicted labels out with the `.predict` method
###Code
preds = cart.predict(boston.data)
preds
###Output
_____no_output_____
###Markdown
There are functions provided by `sklearn.metrics` to evaluate the predictions
###Code
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
print(mean_absolute_error(boston.target, preds))
print(mean_squared_error(boston.target, preds))
print(r2_score(boston.target, preds))
###Output
_____no_output_____
###Markdown
Random Forests and BoostingRandom forests and boosting for regression work the same as in classification, except we use the `Regressor` version rather than `Classifier`. ExerciseTest and compare the (in-sample) performance of random forests and boosting on the Boston data with some sensible parameters.
###Code
from sklearn.ensemble import RandomForestRegressor
forest = RandomForestRegressor(n_estimators=100)
forest.fit(boston.data, boston.target)
preds = forest.predict(boston.data)
print(mean_absolute_error(boston.target, preds))
print(mean_squared_error(boston.target, preds))
print(r2_score(boston.target, preds))
from sklearn.ensemble import GradientBoostingRegressor
boost = GradientBoostingRegressor(n_estimators=100, learning_rate=0.2)
boost.fit(boston.data, boston.target)
preds = boost.predict(boston.data)
print(mean_absolute_error(boston.target, preds))
print(mean_squared_error(boston.target, preds))
print(r2_score(boston.target, preds))
###Output
_____no_output_____
###Markdown
Linear Regression Models There are a large collection of linear regression models in sklearn. Let's start with a simple ordinary linear regression
###Code
from sklearn.linear_model import LinearRegression
linear = LinearRegression()
linear.fit(boston.data, boston.target)
preds = linear.predict(boston.data)
print(mean_absolute_error(boston.target, preds))
print(mean_squared_error(boston.target, preds))
print(r2_score(boston.target, preds))
###Output
_____no_output_____
###Markdown
We can also take a look at the betas:
###Code
linear.coef_
###Output
_____no_output_____
###Markdown
We can use regularized models as well. Here is ridge regression:
###Code
from sklearn.linear_model import Ridge
ridge = Ridge(alpha=10)
ridge.fit(boston.data, boston.target)
preds = ridge.predict(boston.data)
print(mean_absolute_error(boston.target, preds))
print(mean_squared_error(boston.target, preds))
print(r2_score(boston.target, preds))
ridge.coef_
###Output
_____no_output_____
###Markdown
And here is lasso
###Code
from sklearn.linear_model import Lasso
lasso = Lasso(alpha=1)
lasso.fit(boston.data, boston.target)
preds = lasso.predict(boston.data)
print(mean_absolute_error(boston.target, preds))
print(mean_squared_error(boston.target, preds))
print(r2_score(boston.target, preds))
lasso.coef_
###Output
_____no_output_____
###Markdown
There are many other linear regression models available. See the [linear model documentation](http://scikit-learn.org/stable/modules/linear_model.html) for more. ExerciseThe elastic net is another linear regression method that combines ridge and lasso regularization. Try running it on this dataset, referring to the documentation as needed to learn how to use it and control the hyperparameters.
###Code
from sklearn.linear_model import ElasticNet
net = ElasticNet(l1_ratio=0.3, alpha=1)
net.fit(boston.data, boston.target)
preds = net.predict(boston.data)
print(mean_absolute_error(boston.target, preds))
print(mean_squared_error(boston.target, preds))
print(r2_score(boston.target, preds))
net.coef_
###Output
_____no_output_____ |
Resources/Data-Science/Machine-Learning/Multiple-Linear-Regression/sklearn - Multiple Linear Regression_.ipynb | ###Markdown
Multiple Linear Regression with sklearn - Exercise Solution You are given a real estate dataset. Real estate is one of those examples that every regression course goes through as it is extremely easy to understand and there is a (almost always) certain causal relationship to be found.The data is located in the file: 'real_estate_price_size_year.csv'. You are expected to create a multiple linear regression (similar to the one in the lecture), using the new data. Apart from that, please:- Display the intercept and coefficient(s)- Find the R-squared and Adjusted R-squared- Compare the R-squared and the Adjusted R-squared- Compare the R-squared of this regression and the simple linear regression where only 'size' was used- Using the model make a prediction about an apartment with size 750 sq.ft. from 2009- Find the univariate (or multivariate if you wish - see the article) p-values of the two variables. What can you say about them?- Create a summary table with your findingsIn this exercise, the dependent variable is 'price', while the independent variables are 'size' and 'year'.Good luck! Import the relevant libraries
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
from sklearn.linear_model import LinearRegression
###Output
_____no_output_____
###Markdown
Load the data
###Code
data = pd.read_csv('real_estate_price_size_year.csv')
data.head()
data.describe()
###Output
_____no_output_____
###Markdown
Create the regression Declare the dependent and the independent variables
###Code
x = data[['size','year']]
y = data['price']
###Output
_____no_output_____
###Markdown
Regression
###Code
reg = LinearRegression()
reg.fit(x,y)
###Output
_____no_output_____
###Markdown
Find the intercept
###Code
reg.intercept_
###Output
_____no_output_____
###Markdown
Find the coefficients
###Code
reg.coef_
###Output
_____no_output_____
###Markdown
Calculate the R-squared
###Code
reg.score(x,y)
###Output
_____no_output_____
###Markdown
Calculate the Adjusted R-squared
###Code
# Let's use the handy function we created
def adj_r2(x,y):
r2 = reg.score(x,y)
n = x.shape[0]
p = x.shape[1]
adjusted_r2 = 1-(1-r2)*(n-1)/(n-p-1)
return adjusted_r2
adj_r2(x,y)
###Output
_____no_output_____
###Markdown
Compare the R-squared and the Adjusted R-squared It seems the the R-squared is only slightly larger than the Adjusted R-squared, implying that we were not penalized a lot for the inclusion of 2 independent variables. Compare the Adjusted R-squared with the R-squared of the simple linear regression Comparing the Adjusted R-squared with the R-squared of the simple linear regression (when only 'size' was used - a couple of lectures ago), we realize that 'Year' is not bringing too much value to the result. Making predictionsFind the predicted price of an apartment that has a size of 750 sq.ft. from 2009.
###Code
reg.predict([[750,2009]])
###Output
_____no_output_____
###Markdown
Calculate the univariate p-values of the variables
###Code
from sklearn.feature_selection import f_regression
f_regression(x,y)
p_values = f_regression(x,y)[1]
p_values
p_values.round(3)
###Output
_____no_output_____
###Markdown
Create a summary table with your findings
###Code
reg_summary = pd.DataFrame(data = x.columns.values, columns=['Features'])
reg_summary ['Coefficients'] = reg.coef_
reg_summary ['p-values'] = p_values.round(3)
reg_summary
###Output
_____no_output_____ |
examples/cevae_example.ipynb | ###Markdown
IHDP semi-synthetic datasetHill introduced a semi-synthetic dataset constructed from the Infant Healthand Development Program (IHDP). This dataset is based on a randomized experimentinvestigating the effect of home visits by specialists on future cognitive scores. The IHDP simulation is considered the de-facto standard benchmark for neural network treatment effectestimation methods.
###Code
# load all ihadp data
df = pd.DataFrame()
for i in range(1, 10):
data = pd.read_csv('./data/ihdp_npci_' + str(i) + '.csv', header=None)
df = pd.concat([data, df])
cols = ["treatment", "y_factual", "y_cfactual", "mu0", "mu1"] + [i for i in range(25)]
df.columns = cols
print(df.shape)
# replicate the data 100 times
replications = 100
df = pd.concat([df]*replications, ignore_index=True)
print(df.shape)
# set which features are binary
binfeats = [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]
# set which features are continuous
contfeats = [i for i in range(25) if i not in binfeats]
# reorder features with binary first and continuous after
perm = binfeats + contfeats
df = df.reset_index(drop=True)
df.head()
X = df[perm].values
treatment = df['treatment'].values
y = df['y_factual'].values
y_cf = df['y_cfactual'].values
tau = df.apply(lambda d: d['y_factual'] - d['y_cfactual'] if d['treatment']==1
else d['y_cfactual'] - d['y_factual'],
axis=1)
mu_0 = df['mu0'].values
mu_1 = df['mu1'].values
# seperate for train and test
itr, ite = train_test_split(np.arange(X.shape[0]), test_size=0.2, random_state=1)
X_train, treatment_train, y_train, y_cf_train, tau_train, mu_0_train, mu_1_train = X[itr], treatment[itr], y[itr], y_cf[itr], tau[itr], mu_0[itr], mu_1[itr]
X_val, treatment_val, y_val, y_cf_val, tau_val, mu_0_val, mu_1_val = X[ite], treatment[ite], y[ite], y_cf[ite], tau[ite], mu_0[ite], mu_1[ite]
###Output
_____no_output_____
###Markdown
CEVAE Model
###Code
# cevae model settings
outcome_dist = "normal"
latent_dim = 20
hidden_dim = 200
num_epochs = 5
batch_size = 1000
learning_rate = 0.001
learning_rate_decay = 0.01
num_layers = 2
cevae = CEVAE(outcome_dist=outcome_dist,
latent_dim=latent_dim,
hidden_dim=hidden_dim,
num_epochs=num_epochs,
batch_size=batch_size,
learning_rate=learning_rate,
learning_rate_decay=learning_rate_decay,
num_layers=num_layers)
# fit
losses = cevae.fit(X=torch.tensor(X_train, dtype=torch.float),
treatment=torch.tensor(treatment_train, dtype=torch.float),
y=torch.tensor(y_train, dtype=torch.float))
# predict
ite_train = cevae.predict(X_train)
ite_val = cevae.predict(X_val)
ate_train = ite_train.mean()
ate_val = ite_val.mean()
print(ate_train, ate_val)
###Output
0.58953923 0.5956359
###Markdown
Meta Learners
###Code
# fit propensity model
p_model = ElasticNetPropensityModel()
p_train = p_model.fit_predict(X_train, treatment_train)
p_val = p_model.fit_predict(X_val, treatment_val)
s_learner = BaseSRegressor(LGBMRegressor())
s_ate = s_learner.estimate_ate(X_train, treatment_train, y_train)[0]
s_ite_train = s_learner.fit_predict(X_train, treatment_train, y_train)
s_ite_val = s_learner.predict(X_val)
t_learner = BaseTRegressor(LGBMRegressor())
t_ate = t_learner.estimate_ate(X_train, treatment_train, y_train)[0][0]
t_ite_train = t_learner.fit_predict(X_train, treatment_train, y_train)
t_ite_val = t_learner.predict(X_val, treatment_val, y_val)
x_learner = BaseXRegressor(LGBMRegressor())
x_ate = x_learner.estimate_ate(X_train, treatment_train, y_train, p_train)[0][0]
x_ite_train = x_learner.fit_predict(X_train, treatment_train, y_train, p_train)
x_ite_val = x_learner.predict(X_val, treatment_val, y_val, p_val)
r_learner = BaseRRegressor(LGBMRegressor())
r_ate = r_learner.estimate_ate(X_train, treatment_train, y_train, p_train)[0][0]
r_ite_train = r_learner.fit_predict(X_train, treatment_train, y_train, p_train)
r_ite_val = r_learner.predict(X_val)
###Output
_____no_output_____
###Markdown
Model Results Comparsion Training
###Code
df_preds_train = pd.DataFrame([s_ite_train.ravel(),
t_ite_train.ravel(),
x_ite_train.ravel(),
r_ite_train.ravel(),
ite_train.ravel(),
tau_train.ravel(),
treatment_train.ravel(),
y_train.ravel()],
index=['S','T','X','R','CEVAE','tau','w','y']).T
df_cumgain_train = get_cumgain(df_preds_train)
df_result_train = pd.DataFrame([s_ate, t_ate, x_ate, r_ate, ate_train, tau_train.mean()],
index=['S','T','X','R','CEVAE','actual'], columns=['ATE'])
df_result_train['MAE'] = [mean_absolute_error(t,p) for t,p in zip([s_ite_train, t_ite_train, x_ite_train, r_ite_train, ite_train],
[tau_train.values.reshape(-1,1)]*5 )
] + [None]
df_result_train['AUUC'] = auuc_score(df_preds_train)
df_result_train
plot_gain(df_preds_train)
###Output
_____no_output_____
###Markdown
Validation
###Code
df_preds_val = pd.DataFrame([s_ite_val.ravel(),
t_ite_val.ravel(),
x_ite_val.ravel(),
r_ite_val.ravel(),
ite_val.ravel(),
tau_val.ravel(),
treatment_val.ravel(),
y_val.ravel()],
index=['S','T','X','R','CEVAE','tau','w','y']).T
df_cumgain_val = get_cumgain(df_preds_val)
df_result_val = pd.DataFrame([s_ite_val.mean(), t_ite_val.mean(), x_ite_val.mean(), r_ite_val.mean(), ate_val, tau_val.mean()],
index=['S','T','X','R','CEVAE','actual'], columns=['ATE'])
df_result_val['MAE'] = [mean_absolute_error(t,p) for t,p in zip([s_ite_val, t_ite_val, x_ite_val, r_ite_val, ite_val],
[tau_val.values.reshape(-1,1)]*5 )
] + [None]
df_result_val['AUUC'] = auuc_score(df_preds_val)
df_result_val
plot_gain(df_preds_val)
###Output
_____no_output_____
###Markdown
Synthetic Data
###Code
y, X, w, tau, b, e = simulate_hidden_confounder(n=100000, p=5, sigma=1.0, adj=0.)
X_train, X_val, y_train, y_val, w_train, w_val, tau_train, tau_val, b_train, b_val, e_train, e_val = \
train_test_split(X, y, w, tau, b, e, test_size=0.2, random_state=123, shuffle=True)
preds_dict_train = {}
preds_dict_valid = {}
preds_dict_train['Actuals'] = tau_train
preds_dict_valid['Actuals'] = tau_val
preds_dict_train['generated_data'] = {
'y': y_train,
'X': X_train,
'w': w_train,
'tau': tau_train,
'b': b_train,
'e': e_train}
preds_dict_valid['generated_data'] = {
'y': y_val,
'X': X_val,
'w': w_val,
'tau': tau_val,
'b': b_val,
'e': e_val}
# Predict p_hat because e would not be directly observed in real-life
p_model = ElasticNetPropensityModel()
p_hat_train = p_model.fit_predict(X_train, w_train)
p_hat_val = p_model.fit_predict(X_val, w_val)
for base_learner, label_l in zip([BaseSRegressor, BaseTRegressor, BaseXRegressor, BaseRRegressor],
['S', 'T', 'X', 'R']):
for model, label_m in zip([LinearRegression, XGBRegressor], ['LR', 'XGB']):
# RLearner will need to fit on the p_hat
if label_l != 'R':
learner = base_learner(model())
# fit the model on training data only
learner.fit(X=X_train, treatment=w_train, y=y_train)
try:
preds_dict_train['{} Learner ({})'.format(
label_l, label_m)] = learner.predict(X=X_train, p=p_hat_train).flatten()
preds_dict_valid['{} Learner ({})'.format(
label_l, label_m)] = learner.predict(X=X_val, p=p_hat_val).flatten()
except TypeError:
preds_dict_train['{} Learner ({})'.format(
label_l, label_m)] = learner.predict(X=X_train, treatment=w_train, y=y_train).flatten()
preds_dict_valid['{} Learner ({})'.format(
label_l, label_m)] = learner.predict(X=X_val, treatment=w_val, y=y_val).flatten()
else:
learner = base_learner(model())
learner.fit(X=X_train, p=p_hat_train, treatment=w_train, y=y_train)
preds_dict_train['{} Learner ({})'.format(
label_l, label_m)] = learner.predict(X=X_train).flatten()
preds_dict_valid['{} Learner ({})'.format(
label_l, label_m)] = learner.predict(X=X_val).flatten()
# cevae model settings
outcome_dist = "normal"
latent_dim = 20
hidden_dim = 200
num_epochs = 5
batch_size = 1000
learning_rate = 1e-3
learning_rate_decay = 0.1
num_layers = 3
num_samples = 10
cevae = CEVAE(outcome_dist=outcome_dist,
latent_dim=latent_dim,
hidden_dim=hidden_dim,
num_epochs=num_epochs,
batch_size=batch_size,
learning_rate=learning_rate,
learning_rate_decay=learning_rate_decay,
num_layers=num_layers,
num_samples=num_samples)
# fit
losses = cevae.fit(X=torch.tensor(X_train, dtype=torch.float),
treatment=torch.tensor(w_train, dtype=torch.float),
y=torch.tensor(y_train, dtype=torch.float))
preds_dict_train['CEVAE'] = cevae.predict(X_train).flatten()
preds_dict_valid['CEVAE'] = cevae.predict(X_val).flatten()
actuals_train = preds_dict_train['Actuals']
actuals_validation = preds_dict_valid['Actuals']
synthetic_summary_train = pd.DataFrame({label: [preds.mean(), mse(preds, actuals_train)] for label, preds
in preds_dict_train.items() if 'generated' not in label.lower()},
index=['ATE', 'MSE']).T
synthetic_summary_train['Abs % Error of ATE'] = np.abs(
(synthetic_summary_train['ATE']/synthetic_summary_train.loc['Actuals', 'ATE']) - 1)
synthetic_summary_validation = pd.DataFrame({label: [preds.mean(), mse(preds, actuals_validation)]
for label, preds in preds_dict_valid.items()
if 'generated' not in label.lower()},
index=['ATE', 'MSE']).T
synthetic_summary_validation['Abs % Error of ATE'] = np.abs(
(synthetic_summary_validation['ATE']/synthetic_summary_validation.loc['Actuals', 'ATE']) - 1)
# calculate kl divergence for training
for label in synthetic_summary_train.index:
stacked_values = np.hstack((preds_dict_train[label], actuals_train))
stacked_low = np.percentile(stacked_values, 0.1)
stacked_high = np.percentile(stacked_values, 99.9)
bins = np.linspace(stacked_low, stacked_high, 100)
distr = np.histogram(preds_dict_train[label], bins=bins)[0]
distr = np.clip(distr/distr.sum(), 0.001, 0.999)
true_distr = np.histogram(actuals_train, bins=bins)[0]
true_distr = np.clip(true_distr/true_distr.sum(), 0.001, 0.999)
kl = entropy(distr, true_distr)
synthetic_summary_train.loc[label, 'KL Divergence'] = kl
# calculate kl divergence for validation
for label in synthetic_summary_validation.index:
stacked_values = np.hstack((preds_dict_valid[label], actuals_validation))
stacked_low = np.percentile(stacked_values, 0.1)
stacked_high = np.percentile(stacked_values, 99.9)
bins = np.linspace(stacked_low, stacked_high, 100)
distr = np.histogram(preds_dict_valid[label], bins=bins)[0]
distr = np.clip(distr/distr.sum(), 0.001, 0.999)
true_distr = np.histogram(actuals_validation, bins=bins)[0]
true_distr = np.clip(true_distr/true_distr.sum(), 0.001, 0.999)
kl = entropy(distr, true_distr)
synthetic_summary_validation.loc[label, 'KL Divergence'] = kl
df_preds_train = pd.DataFrame([preds_dict_train['S Learner (LR)'].ravel(),
preds_dict_train['S Learner (XGB)'].ravel(),
preds_dict_train['T Learner (LR)'].ravel(),
preds_dict_train['T Learner (XGB)'].ravel(),
preds_dict_train['X Learner (LR)'].ravel(),
preds_dict_train['X Learner (XGB)'].ravel(),
preds_dict_train['R Learner (LR)'].ravel(),
preds_dict_train['R Learner (XGB)'].ravel(),
preds_dict_train['CEVAE'].ravel(),
preds_dict_train['generated_data']['tau'].ravel(),
preds_dict_train['generated_data']['w'].ravel(),
preds_dict_train['generated_data']['y'].ravel()],
index=['S Learner (LR)','S Learner (XGB)',
'T Learner (LR)','T Learner (XGB)',
'X Learner (LR)','X Learner (XGB)',
'R Learner (LR)','R Learner (XGB)',
'CEVAE','tau','w','y']).T
synthetic_summary_train['AUUC'] = auuc_score(df_preds_train).iloc[:-1]
df_preds_validation = pd.DataFrame([preds_dict_valid['S Learner (LR)'].ravel(),
preds_dict_valid['S Learner (XGB)'].ravel(),
preds_dict_valid['T Learner (LR)'].ravel(),
preds_dict_valid['T Learner (XGB)'].ravel(),
preds_dict_valid['X Learner (LR)'].ravel(),
preds_dict_valid['X Learner (XGB)'].ravel(),
preds_dict_valid['R Learner (LR)'].ravel(),
preds_dict_valid['R Learner (XGB)'].ravel(),
preds_dict_valid['CEVAE'].ravel(),
preds_dict_valid['generated_data']['tau'].ravel(),
preds_dict_valid['generated_data']['w'].ravel(),
preds_dict_valid['generated_data']['y'].ravel()],
index=['S Learner (LR)','S Learner (XGB)',
'T Learner (LR)','T Learner (XGB)',
'X Learner (LR)','X Learner (XGB)',
'R Learner (LR)','R Learner (XGB)',
'CEVAE','tau','w','y']).T
synthetic_summary_validation['AUUC'] = auuc_score(df_preds_validation).iloc[:-1]
synthetic_summary_train
synthetic_summary_validation
plot_gain(df_preds_train)
plot_gain(df_preds_validation)
###Output
_____no_output_____ |
src/imjoy_viewer.ipynb | ###Markdown
Modify the url to point to the correct location of the zarr file
###Code
z_url = r"/mnt/KOMP_C8565_1.zarr"
z = zarr.open(z_url, mode="r") # open the zarr created above in jupyter kernel
###Output
_____no_output_____
###Markdown
Set up the ImJoy viewer extension
###Code
from imjoy import api
import zarr
def encode_zarr_store(zobj):
path_prefix = f"{zobj.path}/" if zobj.path else ""
def getItem(key, options = None):
return zobj.store[path_prefix + key]
def setItem(key, value):
zobj.store[path_prefix + key] = value
def containsItem(key, options = None):
if path_prefix + key in zobj.store:
return True
return {
"_rintf": True,
"_rtype": "zarr-array" if isinstance(zobj, zarr.Array) else "zarr-group",
"getItem": getItem,
"setItem": setItem,
"containsItem": containsItem,
}
api.registerCodec(
{"name": "zarr-array", "type": zarr.Array, "encoder": encode_zarr_store}
)
api.registerCodec(
{"name": "zarr-group", "type": zarr.Group, "encoder": encode_zarr_store}
)
class Plugin:
def __init__(self, images, view_state=None):
if not isinstance(images, list):
images = [images]
self.images = images
self.view_state = view_state
async def setup(self):
pass
async def run(self, ctx):
viewer = await api.createWindow(
type="vizarr", src="https://hms-dbmi.github.io/vizarr"
)
if self.view_state:
await viewer.set_view_state(self.view_state)
for img in self.images:
await viewer.add_image(img)
def run_vizarr(images, view_state=None):
api.export(Plugin(images, view_state))
###Output
_____no_output_____
###Markdown
Access to the group '0' of the zarr file an visualize it
###Code
# Create Zarr
img = { "source": z['0'], "name": "KOMP_test_1" }
# Run vizarr
run_vizarr(img)
###Output
_____no_output_____ |
archived/MBZ-XML-TO-EXCEL-v0004.ipynb | ###Markdown
MBZ-XML-TO-EXCELFirst pubished version May 22, 2019. This is version 0.0004 (revision July 26, 2019)Licensed under the NCSA Open source licenseCopyright (c) 2019 Lawrence AngraveAll rights reserved.Developed by: Lawrence Angrave Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal with the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimers. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimers in the documentation and/or other materials provided with the distribution. Neither the names of Lawrence Angrave, University of Illinois nor the names of its contributors may be used to endorse or promote products derived from this Software without specific prior written permission. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. Citations and acknowledgements welcomed!In a presentation, report or paper please recognise and acknowledge the the use of this software.Please contact [email protected] for a Bibliography citation. For presentations, the following is sufficientMBZ-XML-TO-EXCEL (https://github.com/angrave/Moodle-mbz-to-excel) by Lawrence Angrave.MBZ-XML-TO-EXCEL is an iLearn project, supported by an Institute of Education Sciences Award R305A180211If also using Geo-IP data, please cite IP2Location. For example,"This report uses geo-ip location data from IP2Location.com" Known limitations and issuesThe assessment sheet (generated from workshop.xml) may generate URLs that are longer than 255 characters, the largested supported by Excel. These very long URLs will be excludedNo verification of the data has been performed. It is unknown if the inferred timestamps based on the Unix Epoch timestamp require a timezone adjustment. RequirementsThis project uses Python3, Jupiter notebooks and Pandas. Set up
###Code
#import xml.etree.ElementTree as ET
#lxml supports line numbers
import lxml.etree as ET
from collections import OrderedDict
import pandas as pd
import numpy as np
import re
import os
import urllib
import datetime
import glob
import tarfile
import tempfile
import base64
# geoip support
import bisect
import ipaddress
# timestamp support
from datetime import datetime
# Extract text from html messages
from bs4 import BeautifulSoup
import uuid
import traceback
import xlsxwriter
excelengine = 'xlsxwriter'
# 'xlsxwriter' is currently recommended though it did not improve the write speed using generic pandas interface)
# Todo Perhaps using workbook interface directly will be faster? (https://xlsxwriter.readthedocs.io/)
# io.excel.xlsx.writer' (default, allegedly slow),
# 'pyexcelerate' (untested)
###Output
_____no_output_____
###Markdown
Load GeoIP data (optional)
###Code
def load_geoip_data(geoip_datadir):
global geoip_all_colnames, geoip_geo_columns,geoipv4_df,geoipv4_ipvalues
geoip_all_colnames = ['geoip_ipfrom'
,'geoip_ipto'
,'geoip_country_code'
,'geoip_country_name'
,'geoip_region_name'
,'geoip_city_name'
,'geoip_latitude'
,'geoip_longitude'
,'geoip_zip_code'
,'geoip_time_zone']
geoip_geo_columns = geoip_all_colnames[2:]
#geoip_datadir = 'geoip' #change to your local directory of where the downloaded zip has been unpacked
geoipv4_csv = os.path.join(geoip_datadir,'IP2LOCATION-LITE-DB11.CSV')
if os.path.exists(geoipv4_csv):
print("Reading geoip csv",geoipv4_csv)
geoipv4_df = pd.read_csv(geoipv4_csv, names= geoip_all_colnames)
geoipv4_ipvalues = geoipv4_df['geoip_ipfrom'].values
# bisect searching assumes geoipv4_ipvalues are in increasing order
else:
geoipv4_df = None
geoipv4_ipvalues = None
print("No GeoIP csv data at ",geoipv4_csv)
print("IP addresses will not be converted into geographic locations")
print("Free Geo-IP data can be downloaded from IP2LOCATION.com")
###Output
_____no_output_____
###Markdown
Phase 1 - Extract XMLs from mbz file and create hundreds of Excel files
###Code
# Each file can generate a list of tables (dataframes)
# Recursively process each element.
# For each non-leaf element we build an ordered dictionary of key-value pairs and attach this to an array for the particular element name
# <foo id='1' j='a'> becomes data['foo'] = [ {'id':'1', j:'a'} ]
# The exception is for leaf elements (no-child elements) in the form e.g. <blah>123</blah>
# We treat these equivalently to attributes on the surrounding (parent) xml element
# <foo id='1'><blah>123</blah></foo> becomes data['foo'] = [ {'id':'1', 'blah':'123'} ]
# and no data['blah'] is created
AUTOMATIC_IMPLICIT_XML_COLUMNS = 4 #SOURCE_LINE,PARENT_SHEET,PARENT_INDEX
def process_element(data,dest_basedir, tablename_list, context, e):
#deprecated has_no_children = len(e.getchildren()) == 0
has_no_children = len(e) == 0
has_no_attribs = len(e.attrib.keys()) == 0
text = e.text
has_text = text is not None
if has_text:
text = text.strip()
has_text = len(text) > 0
# Is this a leaf element e.g. <blah>123</blah>
# For the datasets we care about, leaves should not be tables; we only want their value
ignore_attribs_on_leaves = True
# This could be refactored to return a dictionary, so multiple attributes can be attached to the parent
if has_no_children and (has_no_attribs or ignore_attribs_on_leaves):
if not has_no_attribs:
print()
print("Warning: Ignoring attributes on leaf element:" + e.tag+ ":"+ str(e.attrib))
print()
return [e.tag,e.text] # Early return, attach the value to the parent (using the tag as the attribute name)
table_name = e.tag
if table_name not in data:
tablename_list.append(table_name)
data[table_name] = []
key_value_pairs = OrderedDict()
key_value_pairs['SOURCE_LINE'] = e.sourceline
key_value_pairs['PARENT_SHEET'] = context[0]
key_value_pairs['PARENT_ROW_INDEX'] = context[1]
key_value_pairs['PARENT_ID'] = context[2]
#print(e.sourceline)
# For correctness child_context needs to be after this line and before recursion
data[table_name].append(key_value_pairs)
myid = ''
if 'id' in e.attrib:
myid = e.attrib['id']
child_context = [table_name, len(data[table_name])-1, myid] # Used above context[0] during recursive call
for key in sorted(e.attrib.keys()):
key_value_pairs[key] = e.attrib[key]
for child in e.iterchildren():
# Could refactor here to use dictionary to enable multiple key-values from a discarded leaf
key,value = process_element(data,dest_basedir, tablename_list, child_context, child)
if value:
if key in key_value_pairs:
key_value_pairs[key] += ',' + str(value)
else:
key_value_pairs[key] = str(value)
if has_text:
key_value_pairs['TEXT'] = e.text # If at least some non-whitespace text, then use original text
return [e.tag,None]
def tablename_to_sheetname(elided_sheetnames, tablename):
sheetname = tablename
# Future: There may be characters that are invalid. If so, remove them here..
#Excel sheetnames are limited to 31 characters.
max_excel_sheetname_length = 31
if len(sheetname) <= max_excel_sheetname_length:
return sheetname
sheetname = sheetname[0:5] + '...' + sheetname[-20:]
elided_sheetnames.append(sheetname)
if elided_sheetnames.count(sheetname)>1:
sheetname += str( elided_sheetnames.count(sheetname) + 1)
return sheetname
def decode_base64_to_latin1(encoded_val):
try:
return str(base64.b64decode(encoded_val) , 'latin-1')
except Exception as e:
traceback.print_exc()
print("Not base64 latin1?", e)
return '??Not-latin1 text'
def decode_geoip(ip):
try:
ip = ip.strip()
if not ip or geoipv4_df is None:
return pd.Series(None, index=geoip_geo_columns)
ipv4 = int(ipaddress.IPv4Address(ip))
index = bisect.bisect(geoipv4_ipvalues, ipv4) - 1
entry = geoipv4_df.iloc[index]
assert entry.geoip_ipfrom <= ipv4 and entry.geoip_ipto >= ipv4
return entry[2:] # [geoip_geo_columns] # Drop ip_from and ip_to
except Exception as e:
traceback.print_exc()
print("Bad ip?",ip, e)
return pd.Series(None, index=geoip_geo_columns)
def decode_unixtimestamp_to_UTC(seconds):
if seconds == '':
return ''
try:
return datetime.utcfromtimestamp(int(seconds)).strftime('%Y-%m-%d %H:%M:%S')
except Exception as e:
traceback.print_exc()
print("Bad unix timestamp?", seconds , e)
return ''
def decode_html_to_text(html):
if html is np.nan:
return ''
try:
soup = BeautifulSoup(html,"lxml")
return soup.get_text()
except Exception as e:
traceback.print_exc()
print('Bad html?',html, e)
return '???'
def validate_anonid_data(anonid_df):
#Expected columns
for c in ['anonid','userid']:
if c not in anonid_df.columns:
raise ('anonid_csv_file\'' + anonid_csv_file + '\'should have a column named '+c)
# No duplicate userid entries
check_for_duplicates = anonid_df['userid'].duplicated(keep=False)
if check_for_duplicates.any():
print(anonid_df[check_for_duplicates])
raise Exception('See above - fix the duplicates userid entries found in \'' + anonid_csv_file +'\'')
anonid_df['userid'] = anonid_df['userid'].astype(str)
def userid_to_anonid(userid):
global anonid_df, generate_missing_anonid
if userid is np.nan or len(userid) == 0:
return ''
row = anonid_df[ anonid_df['userid'] == userid ]
if len( row ) == 1:
return row['anonid'].values[0]
if generate_missing_anonid:
result = uuid.uuid4().hex
anonid_df = anonid_df.append({ 'userid':userid, 'anonid':result}, ignore_index=True)
else:
result = ''
return result
def to_dataframe(table_name, table_data):
df = pd.DataFrame(table_data)
# Moodle dumps use $@NULL@$ for nulls
df.replace('$@NULL@$','',inplace = True)
# We found two base64 encoded columns in Moodle data-
for col in df.columns & ['other','configdata']:
df[ str(col) + '_base64'] = df[str(col)].map(decode_base64_to_latin1)
for col in df.columns & ['timestart','timefinish','added','backup_date','original_course_startdate','original_course_enddate','timeadded','firstaccess','lastaccess','lastlogin','currentlogin','timecreated','timemodified','created','modified']:
df[ str(col) + '_utc'] = df[str(col)].map(decode_unixtimestamp_to_UTC)
# Extract text from html content
for col in df.columns & ['message', 'description','commenttext','intro','conclusion','summary','feedbacktext','content','feedback','info', 'questiontext' , 'answertext']:
df[ str(col) + '_text'] = df[str(col)].map(decode_html_to_text)
# Moodle data has 'ip' and 'lastip' that are ipv4 dotted
# Currently only ipv4 is implemented. geoipv4_df is None if the cvs file was not found
if geoipv4_df is None:
for col in df.columns & ['ip','lastip']:
df = df.join( df[str(col)].apply(decode_geoip) )
for col in df.columns & ['userid','relateduserid' , 'realuserid']:
col=str(col)
if col == 'userid':
out = 'anondid'
else:
out = col[0:-6] + '_anonid'
df[ out ] = df[col].map(userid_to_anonid)
if delete_userids:
df.drop(columns=[col],inplace=True)
if table_name == 'user':
df['anonid'] = df['id'].map(userid_to_anonid)
# Can add more MOODLE PROCESSING HERE :-)
return df
def to_absolute_file_url(filepath):
return urllib.parse.urljoin( 'file:', urllib.request.pathname2url(os.path.abspath(filepath)))
def write_excel_sheets(source_file, excelwriter, data, tablename_list):
elided_sheetnames = []
table_sheet_mapping = dict()
table_sheet_mapping[''] = '' # Top level parents have empty PARENT_SHEET
for tablename in tablename_list:
sheetname = tablename_to_sheetname(elided_sheetnames, tablename)
table_sheet_mapping[tablename] = sheetname
for tablename in tablename_list:
df = to_dataframe(tablename, data[tablename])
#Convert table (=original xml tag) into real sheet name (not tag name)
if 'PARENT_SHEET' in df.columns:
df['PARENT_SHEET'] = df['PARENT_SHEET'].apply(lambda x: table_sheet_mapping[x])
df.index.rename(tablename, inplace=True)
df.insert(0, 'SOURCE_FILE',source_file ,allow_duplicates=True)
df.insert(1, 'SOURCE_TAG', tablename, allow_duplicates=True)
sheetname = table_sheet_mapping[tablename]
if sheetname != tablename:
print("Writing "+ tablename + " as sheet "+ sheetname)
else:
print("Writing sheet "+ sheetname)
df.to_excel(excelwriter, sheet_name=sheetname, index_label=tablename)
return table_sheet_mapping
def re_adopt_child_table(data, parent_tablename, parent_table, child_tablename):
child_table = data[child_tablename]
for row in child_table:
if 'PARENT_SHEET' not in row.keys():
continue
if row['PARENT_SHEET'] == parent_tablename:
idx = row['PARENT_ROW_INDEX']
# Time to follow the pointer
parent_row = parent_table[idx]
#row['PARENT_TAG'] = parent_row['PARENT_TAG']
row['PARENT_ROW_INDEX'] = parent_row['PARENT_ROW_INDEX']
row['PARENT_ID'] = parent_row['PARENT_ID']
row['PARENT_SHEET'] = parent_row['PARENT_SHEET']
def discard_empty_tables(data,tablename_list):
nonempty_tables = []
for tablename in tablename_list:
table = data[tablename]
# print(tablename, len(table),'rows')
if len(table) == 0:
# print("Skipping empty table",tablename)
continue
include = False
for row in table:
if len(row) > AUTOMATIC_IMPLICIT_XML_COLUMNS: # Found more than just PARENT_TAG,... columns
include = True
break
if include:
# print("Including",tablename)
nonempty_tables.append(tablename)
else:
# print("Skipping unnecessary table",tablename)
# Will need to fixup child items that still think this is their container
# More efficient if we kept a mapping of child tables, rather than iterate over tables
for childname in tablename_list:
re_adopt_child_table(data, tablename, table, childname)
pass
return nonempty_tables
def process_one_file(dest_basedir, relative_sub_dir, xml_filename, dry_run):
print('process_one_file(\''+dest_basedir+'\',\''+relative_sub_dir+'\',\''+xml_filename+'\')')
#print("Reading XML " + xml_filename)
#Original parser
xmlroot = ET.parse(xml_filename).getroot()
# Use lxml
#xmlroot = etree.parse(xml_filename)
#print("Processing...")
data = dict()
tablename_list = []
initial_context = ['','',''] # Todo : Consider missing integer index e.g. ['',None,'']
process_element(data, dest_basedir ,tablename_list, initial_context, xmlroot)
nonempty_tables = discard_empty_tables(data,tablename_list)
if len(nonempty_tables) == 0:
#print("no tables left to write")
return
# We use underscore to collate source subdirectories
basename = os.path.basename(xml_filename).replace('.xml','').replace('_','')
use_sub_dirs = False
if use_sub_dirs:
output_dir = os.path.join(dest_basedir, relative_sub_dir)
if not os.path.exists(output_dir):
os.mkdirs(output_dir)
output_filename = os.path.join(output_dir, basename + '.xlsx')
else:
sub = relative_sub_dir.replace(os.sep,'_').replace('.','')
if (len(sub) > 0) and sub[-1] != '_':
sub = sub + '_'
output_filename = os.path.join(dest_basedir, sub + basename + '.xlsx')
if dry_run: # For debugging
return
print("** Writing ", output_filename)
if os.path.exists(output_filename):
os.remove(output_filename)
excelwriter = pd.ExcelWriter(output_filename, engine= excelengine)
# absolute path is useful to open original files on local machine
if(False):
source_file = to_absolute_file_url(xml_filename)
else:
source_file = os.path.normpath(xml_filename)
try:
write_excel_sheets(source_file, excelwriter, data,nonempty_tables)
excelwriter.close()
except Exception as ex:
traceback.print_exc()
print(type(ex))
print(ex)
pass
finally:
excelwriter = None
print()
def process_directory(xml_basedir, out_basedir, relative_sub_dir,toplevel_xml_only, dry_run):
xml_dir = os.path.join(xml_basedir, relative_sub_dir)
file_list = sorted(os.listdir(xml_dir))
for filename in file_list:
if filename.endswith('.xml'):
print("Processing", filename)
process_one_file(out_basedir, relative_sub_dir, os.path.join(xml_dir,filename), dry_run)
if toplevel_xml_only:
return # No recursion into subdirs(e.g. for testing)
# Recurse
for filename in file_list:
candidate_sub_dir = os.path.join(relative_sub_dir, filename)
if os.path.isdir( os.path.join(xml_basedir, candidate_sub_dir)) :
process_directory(xml_basedir, out_basedir, candidate_sub_dir,toplevel_xml_only, dry_run)
def extract_xml_files_in_tar(tar_file, extract_dir):
os.makedirs(extract_dir)
extract_count = 0
for tarinfo in tar_file:
if os.path.splitext(tarinfo.name)[1] == ".xml":
#print(extract_dir, tarinfo.name)
tar_file.extract( tarinfo, path = extract_dir)
extract_count = extract_count + 1
return extract_count
def archive_file_to_output_dir(archive_file):
return os.path.splitext(archive_file)[0] + '-out'
def archive_file_to_xml_dir(archive_file):
return os.path.splitext(archive_file)[0] + '-xml'
def lazy_extract_mbz(archive_source_file,expanded_archive_directory,skip_expanding_if_xml_files_found):
has_xml_files = len( glob.glob( os.path.join(expanded_archive_directory,'*.xml') ) ) > 0
if has_xml_files and skip_expanding_if_xml_files_found:
print("*** Reusing existing xml files in", expanded_archive_directory)
return
if os.path.isdir(expanded_archive_directory):
print("*** Deleting existing files in", expanded_archive_directory)
raise "Comment out this line if it is going to delete the correct directory"
shutil.rmtree(expanded_archive_directory)
with tarfile.open(archive_source_file, mode='r|*') as tf:
print("*** Expanding",archive_source_file, "to", expanded_archive_directory)
extract_count = extract_xml_files_in_tar(tf, expanded_archive_directory)
print('***',extract_count,' xml files extracted')
def process_xml_files(expanded_archive_directory,out_basedir,toplevel_xml_only,dry_run, anonid_output_csv):
global anonid_df
print("*** Source xml directory :", expanded_archive_directory)
print("*** Output directory:", out_basedir)
if not os.path.isdir(out_basedir):
os.makedirs(out_basedir)
process_directory(expanded_archive_directory, out_basedir,'.',toplevel_xml_only,dry_run)
if anonid_output_csv:
filepath = os.path.join(out_basedir,anonid_output_csv)
print("Writing ",filepath,len(anonid_df.index),'rows')
anonid_df.to_csv( filepath, index = None, header=True)
print("*** Finished processing XML")
###Output
_____no_output_____
###Markdown
Phase 2 - Aggregate Excel documents
###Code
def list_xlsx_files_in_dir(xlsx_dir):
xlsx_files = sorted(glob.glob(os.path.join(xlsx_dir,'*.xlsx')))
xlsx_files = [file for file in xlsx_files if os.path.basename(file)[0] != '~' ]
return xlsx_files
# Phase 2 - Aggregate multiple xlsx that are split across multiple course sections into a single Excel file
def create_aggregate_sections_map(xlsx_dir):
xlsx_files = list_xlsx_files_in_dir(xlsx_dir)
sections_map = dict()
for source_file in xlsx_files:
path = source_file.split(os.path.sep) # TODO os.path.sep
nameparts = path[-1].split('_')
target = nameparts[:]
subnumber = None
if len(nameparts)>3 and nameparts[-3].isdigit(): subnumber = -3 # probably unnecessary as _ are removed from basename
if len(nameparts)>2 and nameparts[-2].isdigit(): subnumber = -2
if not subnumber: continue
target[subnumber] = 'ALLSECTIONS'
key = (os.path.sep.join(path[:-1])) + os.path.sep+ ( '_'.join(target))
if key not in sections_map.keys():
sections_map[key] = []
sections_map[key].append(source_file)
return sections_map
# Phase 3 - Aggregate over common objects
def create_aggregate_common_objects_map(xlsx_dir):
xlsx_files = list_xlsx_files_in_dir(xlsx_dir)
combined_map = dict()
# path/_activities_workshop_ALLSECTIONS_logstores.xlsx will map to key=logstores.xlsx
for source_file in xlsx_files:
path = source_file.split(os.path.sep) # TODO os.path.sep
nameparts = path[-1].split('_')
target = nameparts[-1]
if 'ALL_' == path[-1][:4]:
continue # Guard against restarts
key = (os.path.sep.join(path[:-1])) + os.path.sep+ ('ALL_' + target)
if key not in combined_map.keys():
combined_map[key] = []
combined_map[key].append(source_file)
return combined_map
def rebase_row(row,rebase_map):
if isinstance(row['PARENT_SHEET'] , str):
return str(int(row['PARENT_ROW_INDEX']) + int(rebase_map[ row['XLSX_SOURCEFILE'] + '#' + row['PARENT_SHEET'] ]))
else:
return ''
def check_no_open_Excel_documents_in_Excel(dir):
# Excel creates temporary backup files that start with tilde when an Excel file is open in Excel
if not os.path.isdir(dir):
return
open_files = glob.glob(os.path.join(dir,'~*.xlsx'))
if len(open_files):
print( 'Please close ' + '\n'.join(open_files) + '\nin directory\n'+dir)
raise IOError('Excel files '+('\n'.join(open_files))+' are currently open in Excel')
def aggregate_multiple_excel_files(source_filenames):
allsheets = OrderedDict()
rebase_map = {}
# !! Poor sort - it assumes the integers are the same char length. Todo improve so that filename_5_ < filename_10_
for filename in sorted(source_filenames):
print('Reading and aggregating sheets in' , filename)
xl = pd.ExcelFile(filename)
for sheet in xl.sheet_names:
df = xl.parse(sheet)
df['XLSX_SOURCEFILE'] = filename
if sheet not in allsheets.keys():
allsheets[sheet] = df
rebase_map[filename+'#'+sheet] = 0
else:
row_offset = len(allsheets[sheet])
rebase_map[filename+'#'+sheet] = row_offset # We will need this to rebase parent values
df[ df.columns[0] ] += row_offset
allsheets[sheet] = allsheets[sheet].append(df, ignore_index =True, sort = False)
xl.close()
# print('rebase_map',rebase_map)
# The row index of the parent no longer starts at zero
print('Rebasing parent index entries in all sheets')
for sheet in xl.sheet_names:
df = allsheets[sheet]
df['PARENT_ROW_INDEX'] = df.apply( lambda row: rebase_row( row,rebase_map), axis = 1)
df.drop('XLSX_SOURCEFILE', axis = 1, inplace = True)
return allsheets
def write_aggregated_model(output_filename, allsheets, dry_run):
print("Writing",output_filename)
if dry_run:
print("Dry run. Skipping ", allsheets.keys())
return
excelwriter = pd.ExcelWriter(output_filename, engine = excelengine)
try:
print("Writing Sheets ", allsheets.keys())
for sheetname,df in allsheets.items():
df.to_excel(excelwriter, sheet_name = sheetname, index = 'INDEX')
excelwriter.close()
except Exception as ex:
print(type(ex))
print(ex)
pass
finally:
excelwriter.close()
print('Writing finished\n')
def move_old_files(xlsx_dir, filemap, subdirname,dry_run):
xlsxpartsdir = os.path.join(xlsx_dir,subdirname)
if dry_run:
print('Dry run. Skipping move_old_files', filemap.items(),' to ', subdirname)
return
if not os.path.isdir(xlsxpartsdir):
os.mkdir(xlsxpartsdir)
for targetfile,sources in filemap.items():
for file in sources:
dest=os.path.join(xlsxpartsdir, os.path.basename(file))
print(dest)
os.rename(file, dest)
def aggreate_over_sections(xlsx_dir,dry_run):
sections_map= create_aggregate_sections_map(xlsx_dir)
for targetfile,sources in sections_map.items():
allsheets = aggregate_multiple_excel_files(sources)
write_aggregated_model(targetfile, allsheets, dry_run)
move_old_files(xlsx_dir, sections_map,'_EACH_SECTION_', dry_run)
def aggreate_over_common_objects(xlsx_dir,dry_run):
combined_map = create_aggregate_common_objects_map(xlsx_dir)
for targetfile,sources in combined_map.items():
allsheets = aggregate_multiple_excel_files(sources)
write_aggregated_model(targetfile, allsheets, dry_run)
move_old_files(xlsx_dir, combined_map, '_ALL_SECTIONS_', dry_run)
def create_column_metalist(xlsx_dir,dry_run):
xlsx_files = list_xlsx_files_in_dir(xlsx_dir)
metalist = []
for filename in xlsx_files:
print(filename)
xl = pd.ExcelFile(filename)
filename_local = os.path.basename(filename)
for sheet in xl.sheet_names:
df = xl.parse(sheet,nrows=1)
for column_name in df.columns:
metalist.append([filename_local,sheet,column_name])
xl.close()
meta_df = pd.DataFrame(metalist, columns=['file','sheet','column'])
meta_filename = os.path.join(xlsx_dir,'__All_COLUMNS.csv')
if dry_run:
print('Dry run. Skipping',meta_filename)
else:
meta_df.to_csv(meta_filename,sep='\t',index=False)
###Output
_____no_output_____
###Markdown
Run
###Code
# Configuration / settings here
archive_source_file = None
expanded_archive_directory = None
skip_expanding_if_xml_files_found = True
output_directory = None
generate_missing_anonid = True
geoip_datadir = None
anonid_csv_file = None
# A simple csv file with header 'userid','anonid'
anonid_output_filename='userids_anonids.csv' # None if mapping should not be written
delete_userids = False # User table will still have an 'id' column
#relateduserids,realuserid andu userid columns in other tables are dropped
# Internal testing options
toplevel_xml_only = False # Don't process subdirectories. Occasionally useful for internal testing
dry_run = False # Don't write Excel files. Occasionally useful for internal testing
# Override the above here with the path to your mbz file (or expanded contents)
archive_source_file = os.path.join('..','example.mbz')
# ... or use expanded_archive_directory to point to an mbz file that has already been expanded into XML files
anonid_csv_file = None # os.path.join('..', 'example-userid-to-anonid.csv')
generate_missing_anonid = True
delete_userids = True
geoip_datadir= './geoip'
# Some typical numbers:
# A 400 student 15 week course with 16 sections
# Created a 4GB mbz which expanded to 367 MB of xml. (the non-xml files were not extracted)
# 30 total minutes processing time: 15 minutes to process xml,
# 6 minutes for each aggegration step, 2 minutes for the column summary
# Final output: 60MB of 'ALL_' Excel 29 files (largest: ALL_quiz.xlsx 35MB, ALL_logstores 10MB, ALL_forum 5MB)
# The initial section output (moved to _EACH_SECTION_/) has 334 xlsx files,
# which is futher reduced (see _ALL_SECTIONS_ ) 67 files.
if not archive_source_file and not expanded_archive_directory:
raise ValueError('Nothing to do: No mbz archive file or archive directory (with .xml files) specified')
if archive_source_file and not os.path.isfile(archive_source_file) :
raise ValueError('archive_source_file (' + os.path.abspath(archive_source_file) + ") does not refer to an existing archive")
if not expanded_archive_directory:
expanded_archive_directory = archive_file_to_xml_dir(archive_source_file)
if not output_directory:
if archive_source_file:
output_directory = archive_file_to_output_dir(archive_source_file)
else:
raise ValueError('Please specify output_directory')
if anonid_csv_file:
print ('Using ' + anonid_csv_file + ' mapping')
anonid_df = pd.read_csv(anonid_csv_file)
validate_anonid_data(anonid_df)
else:
anonid_df = pd.DataFrame([{'userid':'-1','anonid':'example1234'}])
start_time = datetime.now()
print(start_time)
if(geoip_datadir and 'geoipv4_df' not in globals()):
load_geoip_data(geoip_datadir)
if archive_source_file:
lazy_extract_mbz(archive_source_file,expanded_archive_directory,skip_expanding_if_xml_files_found)
check_no_open_Excel_documents_in_Excel(output_directory)
# Now the actual processing can begin
process_xml_files(expanded_archive_directory,output_directory, toplevel_xml_only, dry_run, anonid_output_filename)
# At this point we have 100s of Excel documents (one per xml file), each with several sheets (~ one per xml tag)!
# We can aggregate over all of the course sections
aggreate_over_sections(output_directory, dry_run)
# Workshops, assignments etc have a similar structure, so we also aggregate over similar top-level objects
aggreate_over_common_objects(output_directory, dry_run)
create_column_metalist(output_directory, dry_run)
end_time = datetime.now()
print(end_time)
print(end_time-start_time)
###Output
_____no_output_____ |
analysis/histograms.ipynb | ###Markdown
HistogramsThis notebook demonstrates simple use of histograms in wn. Set up libraries and load exemplar dataset
###Code
# load libraries
import os
import opendp.whitenoise.core as wn
import numpy as np
import math
import statistics
# establish data information
data_path = os.path.join('.', 'data', 'PUMS_california_demographics_1000', 'data.csv')
var_names = ["age", "sex", "educ", "race", "income", "married"]
data = np.genfromtxt(data_path, delimiter=',', names=True)
age = list(data[:]['age'])
print("Dimension of dataset: " + str(data.shape))
print("Names of variables: " + str(data.dtype.names))
###Output
Dimension of dataset: (1000,)
Names of variables: ('age', 'sex', 'educ', 'race', 'income', 'married')
###Markdown
Creating DP Releases of HistogramsThe default method for generating a histogram in WhiteNoise is by releasing counts of each bin or category using the geometric mechanism. The geometric mechanism only returns integer values for any query, so resists some vulnerabilities of DP releases from floating point approximations (see Mironov 2012). It is also possible, however, to generate histograms from the more typical Laplace mechanism. We show both approaches below.Here we generate histograms on three types of variables:* A continuous variable, here `income`, where the set of numbers have to be divided into bins,* A boolean or dichotomous variable, here `sex`, that can only take on two values,* A categorical variable, here `education`, where there are distinct categories enumerated as strings.Note the education variable is coded in the data on a scale from 1 to 16, but we're leaving the coded values as strings throughout this notebook.
###Code
income_edges = list(range(0, 100000, 10000))
education_categories = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16"]
with wn.Analysis() as analysis:
data = wn.Dataset(path = data_path, column_names = var_names)
nsize = 1000
income_histogram = wn.dp_histogram(
wn.to_int(data['income'], lower=0, upper=100),
edges = income_edges,
upper = nsize,
null_value = 150,
privacy_usage = {'epsilon': 0.5}
)
income_prep = wn.histogram(wn.to_int(data['income'], lower=0, upper=100000),
edges=income_edges, null_value =-1)
income_histogram2 = wn.laplace_mechanism(income_prep, privacy_usage={"epsilon": 0.5, "delta": .000001})
sex_histogram = wn.dp_histogram(
wn.to_bool(data['sex'], true_label="0"),
upper = nsize,
privacy_usage = {'epsilon': 0.5}
)
sex_prep = wn.histogram(wn.to_bool(data['sex'], true_label="0"), null_value = True)
sex_histogram2 = wn.laplace_mechanism(sex_prep, privacy_usage={"epsilon": 0.5, "delta": .000001})
education_histogram = wn.dp_histogram(
data['educ'],
categories = education_categories,
null_value = "-1",
privacy_usage = {'epsilon': 0.5}
)
education_prep = wn.histogram(data['educ'],
categories = education_categories, null_value = "-1")
education_histogram2 = wn.laplace_mechanism(education_prep, privacy_usage={"epsilon": 0.5, "delta": .000001})
analysis.release()
print("Income histogram Geometric DP release: " + str(income_histogram.value))
print("Income histogram Laplace DP release: " + str(income_histogram2.value))
print("Sex histogram Geometric DP release: " + str(sex_histogram.value))
print("Sex histogram Laplace DP release: " + str(sex_histogram2.value))
print("Education histogram Geometric DP release:" + str(education_histogram.value))
print("Education histogram Laplace DP release: " + str(education_histogram2.value))
###Output
Income histogram Geometric DP release: [278 196 96 103 65 62 53 104 39 85]
Income histogram Laplace DP release: [295.3052913 186.53526817 123.29567384 100.85650317 60.21639407
47.01726179 40.39806265 19.93649819 16.99358144 75.37529966]
Sex histogram Geometric DP release: [485 514]
Sex histogram Laplace DP release: [486.63588064 539.19028398]
Education histogram Geometric DP release:[ 24 9 25 5 40 53 53 37 207 19 167 59 178 23 32 18 6]
Education histogram Laplace DP release: [ 32.99434939 18.3283286 41.24380174 10.64177579 17.71485788
16.44570654 35.4852772 54.55488846 197.43218538 59.72384568
169.34338544 75.37139662 179.65393207 57.39920629 19.23223424
5.08898451 9.42213613]
###Markdown
We can see most obviously that the releases from the Geometric mechanism are integer counts, while the Laplace releases are floating point numbers.Below, we will quickly create histograms of the actual private data, for a point of comparison to our differentially private releases:
###Code
import matplotlib.pyplot as plt
data = np.genfromtxt(data_path, delimiter=',', names=True)
income = list(data[:]['income'])
sex = list(data[:]['sex'])
education = list(data[:]['educ'])
# An "interface" to matplotlib.axes.Axes.hist() method
n_income, bins, patches = plt.hist(income, bins=list(range(0,110000,10000)), color='#0504aa',
alpha=0.7, rwidth=0.85)
plt.grid(axis='y', alpha=0.75)
plt.xlabel('Income')
plt.ylabel('Frequency')
plt.title('True Dataset Income Distribution')
plt.show()
n_sex, bins, patches = plt.hist(sex, bins=[-0.5,0.5,1.5], color='#0504aa',
alpha=0.7, rwidth=0.85)
plt.grid(axis='y', alpha=0.75)
plt.xlabel('Sex')
plt.ylabel('Frequency')
plt.title('True Dataset Sex Distribution')
plt.show()
n_educ, bins, patches = plt.hist(education, bins=list(range(1,19,1)), color='#0504aa',
alpha=0.7, rwidth=0.85)
plt.grid(axis='y', alpha=0.75)
plt.xlabel('Education')
plt.ylabel('Frequency')
plt.title('True Dataset Education Distribution')
plt.show()
###Output
_____no_output_____
###Markdown
Below we can see the differentially private releases of these variables in shades of red, against the "true" private counts in green.
###Code
import matplotlib.pyplot as plt
colorseq = ["forestgreen", "indianred", "orange", "orangered", "orchid"]
fig = plt.figure()
ax = fig.add_axes([0,0,1,1])
plt.ylim([-100,500])
#inccat = ["10k","20k","30k","40k","50k","60k","70k","80k","90k","100k"]
inccat = [10,20,30,40,50,60,70,80,90,100]
width=3
inccat_left = [x + width for x in inccat]
inccat_right = [x + 2*width for x in inccat]
ax.bar(inccat, n_income, width=width, color=colorseq[0], label='True Value')
ax.bar(inccat_left, income_histogram.value, width=width, color=colorseq[1], label='DP Geometric')
ax.bar(inccat_right, income_histogram2.value, width=width, color=colorseq[2], label='DP Laplace')
ax.legend()
plt.title('Histogram of Income')
plt.xlabel('Income, in thousands')
plt.ylabel('Count')
plt.show()
fig = plt.figure()
ax = fig.add_axes([0,0,1,1])
plt.ylim([0,800])
sexcat = [0,1]
width = 0.2
sexcat_left = [x + width for x in sexcat]
sexcat_right = [x + 2*width for x in sexcat]
ax.bar(sexcat, n_sex, width=width, color=colorseq[0], label='True Value')
ax.bar(sexcat_left, sex_histogram.value, width=width, color=colorseq[1], label='DP Geometric')
ax.bar(sexcat_right, sex_histogram2.value, width=width, color=colorseq[2], label='DP Laplace')
ax.legend()
plt.title('Histogram of Sex')
plt.ylabel('Count')
plt.show()
fig = plt.figure()
ax = fig.add_axes([0,0,1,1])
edcat = list(range(1,18))
width = 0.25
edcat_left = [x + width for x in edcat]
edcat_right = [x + 2*width for x in edcat]
ax.bar(edcat, n_educ, width=width, color=colorseq[0], label='True Value')
ax.bar(edcat_left, education_histogram.value, width=width, color=colorseq[1], label='DP Geometric')
ax.bar(edcat_right, education_histogram2.value, width=width, color=colorseq[2], label='DP Laplace')
ax.legend()
plt.title('Histogram of Education')
plt.xlabel('Educational Attainment Category')
plt.ylabel('Count')
plt.show()
###Output
_____no_output_____
###Markdown
HistogramsThis notebook demonstrates simple use of histograms in sn. Set up libraries and load exemplar dataset
###Code
# load libraries
import os
import opendp.smartnoise.core as sn
import numpy as np
import math
import statistics
# establish data information
data_path = os.path.join('.', 'data', 'PUMS_california_demographics_1000', 'data.csv')
var_names = ["age", "sex", "educ", "race", "income", "married"]
data = np.genfromtxt(data_path, delimiter=',', names=True)
age = list(data[:]['age'])
print("Dimension of dataset: " + str(data.shape))
print("Names of variables: " + str(data.dtype.names))
###Output
Dimension of dataset: (1000,)
Names of variables: ('age', 'sex', 'educ', 'race', 'income', 'married')
###Markdown
Creating DP Releases of HistogramsThe default method for generating a histogram in SmartNoise is by releasing counts of each bin or category using the geometric mechanism. The geometric mechanism only returns integer values for any query, so resists some vulnerabilities of DP releases from floating point approximations (see Mironov 2012). It is also possible, however, to generate histograms from the more typical Laplace mechanism, if `protect_floating_point` is disabled. We show both approaches below.Here we generate histograms on three types of variables:* A continuous variable, here `income`, where the set of numbers have to be divided into bins,* A boolean or dichotomous variable, here `sex`, that can only take on two values,* A categorical variable, here `education`, where there are distinct categories enumerated as strings.Note the education variable is coded in the data on a scale from 1 to 16, but we're leaving the coded values as strings throughout this notebook.
###Code
income_edges = list(range(0, 100000, 10000))
education_categories = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16"]
with sn.Analysis(protect_floating_point=False) as analysis:
data = sn.Dataset(path = data_path, column_names = var_names)
nsize = 1000
income_histogram = sn.dp_histogram(
sn.to_int(data['income'], lower=0, upper=100),
edges = income_edges,
upper = nsize,
null_value = 150,
privacy_usage = {'epsilon': 0.5}
)
income_prep = sn.histogram(sn.to_int(data['income'], lower=0, upper=100000),
edges=income_edges, null_value =-1)
income_histogram2 = sn.laplace_mechanism(income_prep, privacy_usage={"epsilon": 0.5, "delta": .000001})
sex_histogram = sn.dp_histogram(
sn.to_bool(data['sex'], true_label="0"),
upper = nsize,
privacy_usage = {'epsilon': 0.5}
)
sex_prep = sn.histogram(sn.to_bool(data['sex'], true_label="0"), null_value = True)
sex_histogram2 = sn.laplace_mechanism(sex_prep, privacy_usage={"epsilon": 0.5, "delta": .000001})
education_histogram = sn.dp_histogram(
data['educ'],
categories = education_categories,
null_value = "-1",
privacy_usage = {'epsilon': 0.5}
)
education_prep = sn.histogram(data['educ'],
categories = education_categories, null_value = "-1")
education_histogram2 = sn.laplace_mechanism(education_prep, privacy_usage={"epsilon": 0.5, "delta": .000001})
analysis.release()
print("Income histogram Geometric DP release: " + str(income_histogram.value))
print("Income histogram Laplace DP release: " + str(income_histogram2.value))
print("Sex histogram Geometric DP release: " + str(sex_histogram.value))
print("Sex histogram Laplace DP release: " + str(sex_histogram2.value))
print("Education histogram Geometric DP release:" + str(education_histogram.value))
print("Education histogram Laplace DP release: " + str(education_histogram2.value))
###Output
Income histogram Geometric DP release: [328 183 125 105 51 44 50 20 28 81]
Income histogram Laplace DP release: [328.43439275 179.14630012 128.92510327 100.32336682 57.80148524
45.24249663 44.09401206 19.1875304 21.75572722 73.4805747 ]
Sex histogram Geometric DP release: [490 517]
Sex histogram Laplace DP release: [485.21478911 518.7216044 ]
Education histogram Geometric DP release:[ 36 12 38 12 14 25 31 54 202 52 181 71 174 51 28 19 7]
Education histogram Laplace DP release: [ 32.86182951 15.89411893 33.02623805 16.02961592 9.07691342
28.04433679 31.6049838 48.61812995 200.63166861 59.07016954
158.95639487 80.70888165 177.9660686 56.21650881 20.67678776
14.14151341 2.37179348]
###Markdown
We can see most obviously that the releases from the Geometric mechanism are integer counts, while the Laplace releases are floating point numbers.Below, we will quickly create histograms of the actual private data, for a point of comparison to our differentially private releases:
###Code
import matplotlib.pyplot as plt
data = np.genfromtxt(data_path, delimiter=',', names=True)
income = list(data[:]['income'])
sex = list(data[:]['sex'])
education = list(data[:]['educ'])
# An "interface" to matplotlib.axes.Axes.hist() method
n_income, bins, patches = plt.hist(income, bins=list(range(0,110000,10000)), color='#0504aa',
alpha=0.7, rwidth=0.85)
plt.grid(axis='y', alpha=0.75)
plt.xlabel('Income')
plt.ylabel('Frequency')
plt.title('True Dataset Income Distribution')
plt.show()
n_sex, bins, patches = plt.hist(sex, bins=[-0.5,0.5,1.5], color='#0504aa',
alpha=0.7, rwidth=0.85)
plt.grid(axis='y', alpha=0.75)
plt.xlabel('Sex')
plt.ylabel('Frequency')
plt.title('True Dataset Sex Distribution')
plt.show()
n_educ, bins, patches = plt.hist(education, bins=list(range(1,19,1)), color='#0504aa',
alpha=0.7, rwidth=0.85)
plt.grid(axis='y', alpha=0.75)
plt.xlabel('Education')
plt.ylabel('Frequency')
plt.title('True Dataset Education Distribution')
plt.show()
###Output
_____no_output_____
###Markdown
Below we can see the differentially private releases of these variables in shades of red, against the "true" private counts in green.
###Code
import matplotlib.pyplot as plt
colorseq = ["forestgreen", "indianred", "orange", "orangered", "orchid"]
fig = plt.figure()
ax = fig.add_axes([0,0,1,1])
plt.ylim([-100,500])
#inccat = ["10k","20k","30k","40k","50k","60k","70k","80k","90k","100k"]
inccat = [10,20,30,40,50,60,70,80,90,100]
width=3
inccat_left = [x + width for x in inccat]
inccat_right = [x + 2*width for x in inccat]
ax.bar(inccat, n_income, width=width, color=colorseq[0], label='True Value')
ax.bar(inccat_left, income_histogram.value, width=width, color=colorseq[1], label='DP Geometric')
ax.bar(inccat_right, income_histogram2.value, width=width, color=colorseq[2], label='DP Laplace')
ax.legend()
plt.title('Histogram of Income')
plt.xlabel('Income, in thousands')
plt.ylabel('Count')
plt.show()
fig = plt.figure()
ax = fig.add_axes([0,0,1,1])
plt.ylim([0,800])
sexcat = [0,1]
width = 0.2
sexcat_left = [x + width for x in sexcat]
sexcat_right = [x + 2*width for x in sexcat]
ax.bar(sexcat, n_sex, width=width, color=colorseq[0], label='True Value')
ax.bar(sexcat_left, sex_histogram.value, width=width, color=colorseq[1], label='DP Geometric')
ax.bar(sexcat_right, sex_histogram2.value, width=width, color=colorseq[2], label='DP Laplace')
ax.legend()
plt.title('Histogram of Sex')
plt.ylabel('Count')
plt.show()
fig = plt.figure()
ax = fig.add_axes([0,0,1,1])
edcat = list(range(1,18))
width = 0.25
edcat_left = [x + width for x in edcat]
edcat_right = [x + 2*width for x in edcat]
ax.bar(edcat, n_educ, width=width, color=colorseq[0], label='True Value')
ax.bar(edcat_left, education_histogram.value, width=width, color=colorseq[1], label='DP Geometric')
ax.bar(edcat_right, education_histogram2.value, width=width, color=colorseq[2], label='DP Laplace')
ax.legend()
plt.title('Histogram of Education')
plt.xlabel('Educational Attainment Category')
plt.ylabel('Count')
plt.show()
###Output
_____no_output_____
###Markdown
Plot histograms
###Code
import os
import math
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from IPython.display import display, HTML
%matplotlib inline
def parse_if_number(s):
try: return float(s)
except: return True if s=="true" else False if s=="false" else s if s else None
def parse_ndarray(s):
return np.fromstring(s, sep=' ') if s else None
def get_file_name(name):
return name.replace(':', '-')
###Output
_____no_output_____
###Markdown
Config
###Code
inputFile = 'data.csv'
repetitionsCount = -1 # -1 = auto-detect
factors = ['R', 'T', 'm', 'D']
# Plots
histBinNum = 30 # Histograms
histCenter = True # Center distribution
plotSize = (10, 10)
plotStyle = 'seaborn-whitegrid'
# Save
saveFigures = False
# Filter scalars
scalarsFilter = ['Floorplan.userCount']
# Filter histograms
histFilter = ['Floorplan.copies:histogram', 'Floorplan.collisions:histogram', 'Floorplan.totalCollisions:histogram', 'Floorplan.msgsPerSlot:histogram']
histNames = [
('Floorplan.copies:histogram', 'Number of copies received by each user in an hear window', 1),
('Floorplan.collisions:histogram', 'Number of collisions received by the users', 1),
('Floorplan.totalCollisions:histogram', 'Number of colliding messages received by the users in each slot', 1),
('Floorplan.msgsPerSlot:histogram', 'Number of messages sent in each slot', 1),
]
###Output
_____no_output_____
###Markdown
Load scalars
###Code
df = pd.read_csv('exported_data/' + inputFile, converters = {
'attrvalue': parse_if_number,
'binedges': parse_ndarray,
'binvalues': parse_ndarray,
'vectime': parse_ndarray,
'vecvalue': parse_ndarray,
})
if repetitionsCount <= 0: # auto-detect
repetitionsCount = int(df[df.attrname == 'repetition']['attrvalue'].max()) + 1
print('Repetitions:', repetitionsCount)
scalars = df[(df.type == 'scalar') | ((df.type == 'itervar') & (df.attrname != 'TO')) | ((df.type == 'param') & (df.attrname == 'Floorplan.userCount')) | ((df.type == 'runattr') & (df.attrname == 'repetition'))]
scalars = scalars.assign(qname = scalars.attrname.combine_first(scalars.module + '.' + scalars.name))
for index, row in scalars[scalars.type == 'itervar'].iterrows():
val = scalars.loc[index, 'attrvalue']
if isinstance(val, str) and not all(c.isdigit() for c in val):
scalars.loc[index, 'attrvalue'] = eval(val)
scalars.value = scalars.value.combine_first(scalars.attrvalue.astype('float64'))
scalars_wide = scalars.pivot_table(index=['run'], columns='qname', values='value')
scalars_wide.sort_values([*factors, 'repetition'], inplace=True)
count = 0
for index in scalars_wide.index:
config = count // repetitionsCount
scalars_wide.loc[index, 'config'] = config
count += 1
scalars_wide = scalars_wide[['config', 'repetition', *factors, *scalarsFilter]]
# Computed
factorsCount = len(factors)
configsCount = len(scalars_wide)//repetitionsCount
print('Configs:', configsCount)
totalSims = configsCount*repetitionsCount
display(HTML("<style>div.output_scroll { height: auto; max-height: 48em; }</style>"))
pd.set_option('display.max_rows', totalSims)
pd.set_option('display.max_columns', 100)
if saveFigures:
os.makedirs('figures', exist_ok=True)
###Output
Configs: 16
###Markdown
Load histograms
###Code
histograms = df[df.type == 'histogram']
histograms = histograms.assign(qname = histograms.module + '.' + histograms.name)
histograms = histograms[histograms.qname.isin(histFilter)]
for index in scalars_wide.index:
r = index
cfg = scalars_wide.loc[index, 'config']
rep = scalars_wide.loc[index, 'repetition']
histograms.loc[histograms.run == r, 'config'] = cfg
histograms.loc[histograms.run == r, 'repetition'] = rep
for histname, _, _ in histNames:
histograms.loc[(histograms.run == r) & (histograms.qname == histname), 'binsize'] = histograms.loc[(histograms.run == r) & (histograms.qname == histname), 'binedges'].values[0][1] - histograms.loc[(histograms.run == r) & (histograms.qname == histname), 'binedges'].values[0][0]
histograms.loc[(histograms.run == r) & (histograms.qname == histname), 'binmin'] = histograms.loc[(histograms.run == r) & (histograms.qname == histname), 'binedges'].values[0].min()
histograms.loc[(histograms.run == r) & (histograms.qname == histname), 'binmax'] = histograms.loc[(histograms.run == r) & (histograms.qname == histname), 'binedges'].values[0].max()
histograms.sort_values(['config', 'repetition', 'qname'], inplace=True)
for cfg in range(0, configsCount):
for histname, _, _ in histNames:
histograms.loc[(histograms.config == cfg) & (histograms.qname == histname), 'binsizelcm'] = np.lcm.reduce(list(map(int, histograms.loc[(histograms.config == cfg) & (histograms.qname == histname), 'binsize'].values.tolist())))
histograms.loc[(histograms.config == cfg) & (histograms.qname == histname), 'binminall'] = histograms.loc[(histograms.config == cfg) & (histograms.qname == histname), 'binmin'].min()
histograms.loc[(histograms.config == cfg) & (histograms.qname == histname), 'binmaxall'] = histograms.loc[(histograms.config == cfg) & (histograms.qname == histname), 'binmax'].max()
histograms = histograms[['config', 'repetition', 'qname', 'binmin', 'binmax', 'binsize', 'binedges', 'binvalues', 'binminall', 'binmaxall', 'binsizelcm']]
###Output
_____no_output_____
###Markdown
Compute means and ranges
###Code
def get_values_for_bin(hist, low, high):
edges = hist['binedges'].values[0]
values = hist['binvalues'].values[0]
inbin = []
lowidx = 0
highidx = 0
for edge in edges:
if edge < low:
lowidx += 1
if edge < high:
highidx += 1
continue
break
minval = math.inf
maxval = -math.inf
for i in range(lowidx, highidx):
if i > len(values) - 1:
break
inbin.append(values[i])
if values[i] < minval:
minval = values[i]
if values[i] > maxval:
maxval = values[i]
if len(inbin) == 0:
return (minval, 0, maxval)
return (minval, sum(inbin) / len(inbin), maxval)
cols = ['config']
for histname, _, _ in histNames:
name = histname[histname.index('.')+1:histname.index(':')]
cols.append(name + 'Bins')
cols.append(name + 'MeanValues')
cols.append(name + 'LowValues')
cols.append(name + 'HighValues')
data = []
for cfg in range(0, configsCount):
curdata = [cfg]
for histname, _, stepMultiplier in histNames:
binmin = int(histograms.loc[(histograms.config == cfg) & (histograms.qname == histname), 'binminall'].values[0])
binstep = int(stepMultiplier) * int(histograms.loc[(histograms.config == cfg) & (histograms.qname == histname), 'binsizelcm'].values[0])
binmax = 1 + int(histograms.loc[(histograms.config == cfg) & (histograms.qname == histname), 'binmaxall'].values[0])
bins = np.arange(binmin, binmax, binstep)
totalSize = (binmax - binmin - 1)//binstep
meanValues = np.zeros(totalSize)
lowValues = np.full(totalSize, math.inf)
highValues = np.full(totalSize, -math.inf)
for rep in range(0, repetitionsCount):
curHist = histograms[(histograms.config == cfg) & (histograms.qname == histname) & (histograms.repetition == rep)]
num = 0
for binlow, binhigh in zip(range(binmin, binmax - 1, binstep), range(binmin + binstep, binmax + binstep, binstep)):
values = get_values_for_bin(curHist, binlow, binhigh)
if lowValues[num] > values[0]:
lowValues[num] = values[0]
meanValues[num] += values[1]
if highValues[num] < values[2]:
highValues[num] = values[2]
num += 1
for i in range(0, len(meanValues)):
meanValues[i] = meanValues[i] / repetitionsCount
curdata.append(bins)
curdata.append(meanValues)
curdata.append(lowValues)
curdata.append(highValues)
data.append(curdata)
plotdf = pd.DataFrame.from_records(data, columns=cols, index='config')
###Output
_____no_output_____
###Markdown
Plots
###Code
for cfg, hist in plotdf.iterrows():
print('Config ' + str(cfg))
display(scalars_wide.loc[(scalars_wide.repetition == 0) & (scalars_wide.config == cfg)][['config', *factors]])
for histName, histDesc, _ in histNames:
name = histName[histName.index('.')+1:histName.index(':')]
bins = hist[name + 'Bins']
means = hist[name + 'MeanValues']
lows = hist[name + 'LowValues']
highs = hist[name + 'HighValues']
bincenters = 0.5*(bins[1:]+bins[:-1])
ranges = [x for x in zip(lows, highs)]
ranges = np.array(ranges).T
plt.bar(bincenters, means, width=1, yerr=ranges, error_kw={'capsize': 3})
plt.title('Histogram for the ' + histDesc)
plt.xlabel(name)
if saveFigures:
fig = plt.gcf()
fig.savefig('figures/' + get_file_name(histName) + '-' + str(cfg) + '-perfplot.png')
plt.show()
print('#######################')
print()
###Output
Config 0
###Markdown
HistogramsThis notebook demonstrates simple use of histograms in sn. Set up libraries and load exemplar dataset
###Code
# load libraries
import os
import opendp.smartnoise.core as sn
import numpy as np
import math
import statistics
# establish data information
data_path = os.path.join('.', 'data', 'PUMS_california_demographics_1000', 'data.csv')
var_names = ["age", "sex", "educ", "race", "income", "married"]
data = np.genfromtxt(data_path, delimiter=',', names=True)
age = list(data[:]['age'])
print("Dimension of dataset: " + str(data.shape))
print("Names of variables: " + str(data.dtype.names))
###Output
Dimension of dataset: (1000,)
Names of variables: ('age', 'sex', 'educ', 'race', 'income', 'married')
###Markdown
Creating DP Releases of HistogramsThe default method for generating a histogram in SmartNoise is by releasing counts of each bin or category using the geometric mechanism. The geometric mechanism only returns integer values for any query, so resists some vulnerabilities of DP releases from floating point approximations (see Mironov 2012). It is also possible, however, to generate histograms from the more typical Laplace mechanism, if `protect_floating_point` is disabled. We show both approaches below.Here we generate histograms on three types of variables:* A continuous variable, here `income`, where the set of numbers have to be divided into bins,* A boolean or dichotomous variable, here `sex`, that can only take on two values,* A categorical variable, here `education`, where there are distinct categories enumerated as strings.Note the education variable is coded in the data on a scale from 1 to 16, but we're leaving the coded values as strings throughout this notebook.
###Code
income_edges = list(range(0, 100000, 10000))
education_categories = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16"]
with sn.Analysis(protect_floating_point=False) as analysis:
data = sn.Dataset(path = data_path, column_names = var_names)
nsize = 1000
income_histogram = sn.dp_histogram(
sn.to_int(data['income'], lower=0, upper=100),
edges = income_edges,
upper = nsize,
null_value = 150,
privacy_usage = {'epsilon': 0.5}
)
income_prep = sn.histogram(sn.to_int(data['income'], lower=0, upper=100000),
edges=income_edges, null_value =-1)
income_histogram2 = sn.laplace_mechanism(income_prep, privacy_usage={"epsilon": 0.5, "delta": .000001})
sex_histogram = sn.dp_histogram(
sn.to_bool(data['sex'], true_label="0"),
upper = nsize,
privacy_usage = {'epsilon': 0.5}
)
sex_prep = sn.histogram(sn.to_bool(data['sex'], true_label="0"), null_value = True)
sex_histogram2 = sn.laplace_mechanism(sex_prep, privacy_usage={"epsilon": 0.5, "delta": .000001})
education_histogram = sn.dp_histogram(
data['educ'],
categories = education_categories,
null_value = "-1",
privacy_usage = {'epsilon': 0.5}
)
education_prep = sn.histogram(data['educ'],
categories = education_categories, null_value = "-1")
education_histogram2 = sn.laplace_mechanism(education_prep, privacy_usage={"epsilon": 0.5, "delta": .000001})
analysis.release()
print("Income histogram Geometric DP release: " + str(income_histogram.value))
print("Income histogram Laplace DP release: " + str(income_histogram2.value))
print("Sex histogram Geometric DP release: " + str(sex_histogram.value))
print("Sex histogram Laplace DP release: " + str(sex_histogram2.value))
print("Education histogram Geometric DP release:" + str(education_histogram.value))
print("Education histogram Laplace DP release: " + str(education_histogram2.value))
###Output
Income histogram Geometric DP release: [328 183 125 105 51 44 50 20 28 81]
Income histogram Laplace DP release: [328.43439275 179.14630012 128.92510327 100.32336682 57.80148524
45.24249663 44.09401206 19.1875304 21.75572722 73.4805747 ]
Sex histogram Geometric DP release: [490 517]
Sex histogram Laplace DP release: [485.21478911 518.7216044 ]
Education histogram Geometric DP release:[ 36 12 38 12 14 25 31 54 202 52 181 71 174 51 28 19 7]
Education histogram Laplace DP release: [ 32.86182951 15.89411893 33.02623805 16.02961592 9.07691342
28.04433679 31.6049838 48.61812995 200.63166861 59.07016954
158.95639487 80.70888165 177.9660686 56.21650881 20.67678776
14.14151341 2.37179348]
###Markdown
We can see most obviously that the releases from the Geometric mechanism are integer counts, while the Laplace releases are floating point numbers.Below, we will quickly create histograms of the actual private data, for a point of comparison to our differentially private releases:
###Code
import matplotlib.pyplot as plt
data = np.genfromtxt(data_path, delimiter=',', names=True)
income = list(data[:]['income'])
sex = list(data[:]['sex'])
education = list(data[:]['educ'])
# An "interface" to matplotlib.axes.Axes.hist() method
n_income, bins, patches = plt.hist(income, bins=list(range(0,110000,10000)), color='#0504aa',
alpha=0.7, rwidth=0.85)
plt.grid(axis='y', alpha=0.75)
plt.xlabel('Income')
plt.ylabel('Frequency')
plt.title('True Dataset Income Distribution')
plt.show()
n_sex, bins, patches = plt.hist(sex, bins=[-0.5,0.5,1.5], color='#0504aa',
alpha=0.7, rwidth=0.85)
plt.grid(axis='y', alpha=0.75)
plt.xlabel('Sex')
plt.ylabel('Frequency')
plt.title('True Dataset Sex Distribution')
plt.show()
n_educ, bins, patches = plt.hist(education, bins=list(range(1,19,1)), color='#0504aa',
alpha=0.7, rwidth=0.85)
plt.grid(axis='y', alpha=0.75)
plt.xlabel('Education')
plt.ylabel('Frequency')
plt.title('True Dataset Education Distribution')
plt.show()
###Output
_____no_output_____
###Markdown
Below we can see the differentially private releases of these variables in shades of red, against the "true" private counts in green.
###Code
import matplotlib.pyplot as plt
colorseq = ["forestgreen", "indianred", "orange", "orangered", "orchid"]
fig = plt.figure()
ax = fig.add_axes([0,0,1,1])
plt.ylim([-100,500])
#inccat = ["10k","20k","30k","40k","50k","60k","70k","80k","90k","100k"]
inccat = [10,20,30,40,50,60,70,80,90,100]
width=3
inccat_left = [x + width for x in inccat]
inccat_right = [x + 2*width for x in inccat]
ax.bar(inccat, n_income, width=width, color=colorseq[0], label='True Value')
ax.bar(inccat_left, income_histogram.value, width=width, color=colorseq[1], label='DP Geometric')
ax.bar(inccat_right, income_histogram2.value, width=width, color=colorseq[2], label='DP Laplace')
ax.legend()
plt.title('Histogram of Income')
plt.xlabel('Income, in thousands')
plt.ylabel('Count')
plt.show()
fig = plt.figure()
ax = fig.add_axes([0,0,1,1])
plt.ylim([0,800])
sexcat = [0,1]
width = 0.2
sexcat_left = [x + width for x in sexcat]
sexcat_right = [x + 2*width for x in sexcat]
ax.bar(sexcat, n_sex, width=width, color=colorseq[0], label='True Value')
ax.bar(sexcat_left, sex_histogram.value, width=width, color=colorseq[1], label='DP Geometric')
ax.bar(sexcat_right, sex_histogram2.value, width=width, color=colorseq[2], label='DP Laplace')
ax.legend()
plt.title('Histogram of Sex')
plt.ylabel('Count')
plt.show()
fig = plt.figure()
ax = fig.add_axes([0,0,1,1])
edcat = list(range(1,18))
width = 0.25
edcat_left = [x + width for x in edcat]
edcat_right = [x + 2*width for x in edcat]
ax.bar(edcat, n_educ, width=width, color=colorseq[0], label='True Value')
ax.bar(edcat_left, education_histogram.value, width=width, color=colorseq[1], label='DP Geometric')
ax.bar(edcat_right, education_histogram2.value, width=width, color=colorseq[2], label='DP Laplace')
ax.legend()
plt.title('Histogram of Education')
plt.xlabel('Educational Attainment Category')
plt.ylabel('Count')
plt.show()
###Output
_____no_output_____ |
qiskit/advanced/aqua/finance/data_providers/time_series.ipynb | ###Markdown
 _*Qiskit Finance: Loading and Processing Stock-Market Time-Series Data*_The latest version of this notebook is available on https://github.com/qiskit/qiskit-tutorial.*** ContributorsJakub Marecek[1] Affiliation- [1]IBMQ IntroductionAcross many problems in finance, one starts with time series. Here, we showcase how to generate pseudo-random time-series, download actual stock-market time series from a number of common providers, and how to compute time-series similarity measures.
###Code
%matplotlib inline
from qiskit.finance.data_providers import *
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
import datetime
import matplotlib.pyplot as plt
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
data = RandomDataProvider(tickers=["TICKER1", "TICKER2"],
start = datetime.datetime(2016, 1, 1),
end = datetime.datetime(2016, 1, 30),
seed = 1)
data.run()
###Output
_____no_output_____
###Markdown
Once the data are loaded, you can run a variety of algorithms on those to aggregate the data. Notably, you can compute the covariance matrix or a variant, which would consider alternative time-series similarity measures based on dynamic time warping (DTW). In DTW, changes that vary in speed, e.g., one stock's price following another stock's price with a small delay, can be accommodated.
###Code
means = data.get_mean_vector()
print("Means:")
print(means)
rho = data.get_similarity_matrix()
print("A time-series similarity measure:")
print(rho)
plt.imshow(rho)
plt.show()
cov = data.get_covariance_matrix()
print("A covariance matrix:")
print(cov)
plt.imshow(cov)
plt.show()
###Output
Means:
[16.66722941 72.03026566]
A time-series similarity measure:
[[1.0000000e+00 6.2284804e-04]
[6.2284804e-04 1.0000000e+00]]
###Markdown
If you wish, you can look into the underlying pseudo-random time-series using. Please note that the private class members (starting with underscore) may change in future releases of Qiskit.
###Code
print("The underlying evolution of stock prices:")
for (cnt, s) in enumerate(data._tickers):
plt.plot(data._data[cnt], label=s)
plt.legend()
plt.xticks(rotation=90)
plt.show()
for (cnt, s) in enumerate(data._tickers):
print(s)
print(data._data[cnt])
###Output
The underlying evolution of stock prices:
###Markdown
Clearly, you can adapt the number and names of tickers and the range of dates:
###Code
data = RandomDataProvider(tickers=["CompanyA", "CompanyB", "CompanyC"],
start = datetime.datetime(2015, 1, 1),
end = datetime.datetime(2016, 1, 30),
seed = 1)
data.run()
for (cnt, s) in enumerate(data._tickers):
plt.plot(data._data[cnt], label=s)
plt.legend()
plt.xticks(rotation=90)
plt.show()
###Output
_____no_output_____
###Markdown
Access to closing-price time-seriesWhile the access to real-time data usually requires a payment, it is possible to access historical (adjusted) closing prices via Wikipedia and Quandlfree of charge, following registration at:https://www.quandl.com/?modal=registerIn the code below, one needs to specify actual tickers of actual NASDAQissues and the access token you obtain from Quandl; by running the code below, you agree to the Quandl terms and conditions, including a liability waiver.Notice that at least two tickers are required for the computationof covariance and time-series matrices, but hundreds of tickers may go beyond the fair usage limits of Quandl.
###Code
stocks = ["REPLACEME1", "REPLACEME2"]
wiki = WikipediaDataProvider(
token = "REPLACEME",
tickers = stocks,
stockmarket = StockMarket.NASDAQ,
start = datetime.datetime(2016,1,1),
end = datetime.datetime(2016,1,30))
wiki.run()
###Output
_____no_output_____
###Markdown
Once the data are loaded, you can again compute the covariance matrix or its DTW variants.
###Code
if wiki._n <= 1:
raise Exception("Not enough data to plot covariance or time-series similarity. Please use at least two tickers.")
rho = wiki.get_similarity_matrix()
print("A time-series similarity measure:")
print(rho)
plt.imshow(rho)
plt.show()
cov = wiki.get_covariance_matrix()
print("A covariance matrix:")
print(cov)
plt.imshow(cov)
plt.show()
###Output
_____no_output_____
###Markdown
If you wish, you can look into the underlying time-series using:
###Code
print("The underlying evolution of stock prices:")
for (cnt, s) in enumerate(stocks):
plt.plot(wiki._data[cnt], label=s)
plt.legend()
plt.xticks(rotation=90)
plt.show()
for (cnt, s) in enumerate(stocks):
print(s)
print(wiki._data[cnt])
###Output
_____no_output_____
###Markdown
[Optional] Setup token to access recent, fine-grained time-seriesIf you would like to download professional data, you will have to set-up a token with one of the major providers. Let us now illustrate the data with NASDAQ Data on Demand, which can supply bid and ask prices in arbitrary resolution, as well as aggregates such as daily adjusted closing prices, for NASDAQ and NYSE issues. If you don't have NASDAQ Data on Demand license, you can contact NASDAQ (cf. https://business.nasdaq.com/intel/GIS/Nasdaq-Data-on-Demand.html) to obtain a trial or paid license.If and when you have access to NASDAQ Data on Demand using your own token, you should replace REPLACE-ME below with the token. To assure the security of the connection, you should also have your own means of validating NASDAQ's certificates. The DataOnDemandProvider constructor has an optional argument `verify`, which can be `None` or a string or a boolean. If it is `None`, certify certificates will be used (default). If verify is a string, it should be pointing to a certificate for the HTTPS connection to NASDAQ (dataondemand.nasdaq.com), either in the form of a CA_BUNDLE file or a directory wherein to look.
###Code
from qiskit.finance.data_providers.data_on_demand_provider import StockMarket
try:
nasdaq = DataOnDemandProvider(token = "REPLACE-ME",
tickers = stocks,
stockmarket = StockMarket.NASDAQ,
start = datetime.datetime(2016,1,1),
end = datetime.datetime(2016,1,2))
nasdaq.run()
nasdaq.plot()
except QiskitFinanceError as e:
print(e)
print("You need to replace REPLACE-ME with a valid token.")
###Output
_____no_output_____
###Markdown
Another major vendor of stock market data is Exchange Data International (EDI), whose API can be used to query over 100 emerging and frontier markets that are Africa, Asia, Far East, Latin America and Middle East, as well as the more established ones. See:https://www.exchange-data.com/pricing-data/adjusted-prices.phpexchange-coveragefor an overview of the coverage.The access again requires a valid access token to replace REPLACE-ME below. The token can be obtained on a trial or paid-for basis at:https://www.quandl.com/In the following example, you need to replace TICKER1 and TICKER2 with valid tickers at the London Stock Exchange.
###Code
from qiskit.finance.data_providers.exchangedataprovider import StockMarket
try:
lse = ExchangeDataProvider(token = "REPLACE-ME",
tickers = ["TICKER1", "TICKER2"],
stockmarket = StockMarket.LONDON,
start = datetime.datetime(2019,1,1),
end = datetime.datetime(2019,1,30))
lse.run()
lse.plot()
except QiskitFinanceError as e:
print(e)
print("You need to replace REPLACE-ME with a valid token.")
###Output
_____no_output_____
###Markdown
For the actual use of the data, please see the portfolio_optimization or portfolio_diversification notebooks.
###Code
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
###Output
_____no_output_____
###Markdown
Trusted Notebook" align="middle"> _*Qiskit Finance: Loading and Processing Stock-Market Time-Series Data*_The latest version of this notebook is available on https://github.com/qiskit/qiskit-tutorial.*** ContributorsJakub Marecek[1] Affiliation- [1]IBMQ IntroductionAcross many problems in finance, one starts with time series. Here, we showcase how to generate pseudo-random time-series, download actual stock-market time series from a number of common providers, and how to compute time-series similarity measures.
###Code
%matplotlib inline
from qiskit.aqua.translators.data_providers import *
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
import datetime
import matplotlib.pyplot as plt
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
data = RandomDataProvider(tickers=["TICKER1", "TICKER2"],
start = datetime.datetime(2016, 1, 1),
end = datetime.datetime(2016, 1, 30),
seed = 1)
data.run()
###Output
_____no_output_____
###Markdown
Once the data are loaded, you can run a variety of algorithms on those to aggregate the data. Notably, you can compute the covariance matrix or a variant, which would consider alternative time-series similarity measures based on dynamic time warping (DTW). In DTW, changes that vary in speed, e.g., one stock's price following another stock's price with a small delay, can be accommodated.
###Code
means = data.get_mean_vector()
print("Means:")
print(means)
rho = data.get_similarity_matrix()
print("A time-series similarity measure:")
print(rho)
plt.imshow(rho)
plt.show()
cov = data.get_covariance_matrix()
print("A covariance matrix:")
print(cov)
plt.imshow(cov)
plt.show()
###Output
Means:
[16.66722941 72.03026566]
A time-series similarity measure:
[[1.0000000e+00 6.2284804e-04]
[6.2284804e-04 1.0000000e+00]]
###Markdown
If you wish, you can look into the underlying pseudo-random time-series using. Please note that the private class members (starting with underscore) may change in future releases of Qiskit.
###Code
print("The underlying evolution of stock prices:")
for (cnt, s) in enumerate(data._tickers):
plt.plot(data._data[cnt], label=s)
plt.legend()
plt.xticks(rotation=90)
plt.show()
for (cnt, s) in enumerate(data._tickers):
print(s)
print(data._data[cnt])
###Output
The underlying evolution of stock prices:
###Markdown
Clearly, you can adapt the number and names of tickers and the range of dates:
###Code
data = RandomDataProvider(tickers=["CompanyA", "CompanyB", "CompanyC"],
start = datetime.datetime(2015, 1, 1),
end = datetime.datetime(2016, 1, 30),
seed = 1)
data.run()
for (cnt, s) in enumerate(data._tickers):
plt.plot(data._data[cnt], label=s)
plt.legend()
plt.xticks(rotation=90)
plt.show()
###Output
_____no_output_____
###Markdown
Access to closing-price time-seriesWhile the access to real-time data usually requires a payment, it is possible to access historical (adjusted) closing prices via Wikipedia and Quandlfree of charge, following registration at:https://www.quandl.com/?modal=registerIn the code below, one needs to specify actual tickers of actual NASDAQissues and the access token you obtain from Quandl; by running the code below, you agree to the Quandl terms and conditions, including a liability waiver.Notice that at least two tickers are required for the computationof covariance and time-series matrices, but hundreds of tickers may go beyond the fair usage limits of Quandl.
###Code
stocks = ["REPLACEME1", "REPLACEME2"]
wiki = WikipediaDataProvider(
token = "REPLACEME",
tickers = stocks,
stockmarket = StockMarket.NASDAQ,
start = datetime.datetime(2016,1,1),
end = datetime.datetime(2016,1,30))
wiki.run()
###Output
_____no_output_____
###Markdown
Once the data are loaded, you can again compute the covariance matrix or its DTW variants.
###Code
if wiki._n <= 1:
raise Exception("Not enough data to plot covariance or time-series similarity. Please use at least two tickers.")
rho = wiki.get_similarity_matrix()
print("A time-series similarity measure:")
print(rho)
plt.imshow(rho)
plt.show()
cov = wiki.get_covariance_matrix()
print("A covariance matrix:")
print(cov)
plt.imshow(cov)
plt.show()
###Output
_____no_output_____
###Markdown
If you wish, you can look into the underlying time-series using:
###Code
print("The underlying evolution of stock prices:")
for (cnt, s) in enumerate(stocks):
plt.plot(wiki._data[cnt], label=s)
plt.legend()
plt.xticks(rotation=90)
plt.show()
for (cnt, s) in enumerate(stocks):
print(s)
print(wiki._data[cnt])
###Output
_____no_output_____
###Markdown
[Optional] Setup token to access recent, fine-grained time-seriesIf you would like to download professional data, you will have to set-up a token with one of the major providers. Let us now illustrate the data with NASDAQ Data on Demand, which can supply bid and ask prices in arbitrary resolution, as well as aggregates such as daily adjusted closing prices, for NASDAQ and NYSE issues. If you don't have NASDAQ Data on Demand license, you can contact NASDAQ (cf. https://business.nasdaq.com/intel/GIS/Nasdaq-Data-on-Demand.html) to obtain a trial or paid license.If and when you have access to NASDAQ Data on Demand using your own token, you should replace REPLACE-ME below with the token. To assure the security of the connection, you should also have your own means of validating NASDAQ's certificates. The DataOnDemandProvider constructor has an optional argument `verify`, which can be `None` or a string or a boolean. If it is `None`, certify certificates will be used (default). If verify is a string, it should be pointing to a certificate for the HTTPS connection to NASDAQ (dataondemand.nasdaq.com), either in the form of a CA_BUNDLE file or a directory wherein to look.
###Code
from qiskit.aqua.translators.data_providers.data_on_demand_provider import StockMarket
try:
nasdaq = DataOnDemandProvider(token = "REPLACE-ME",
tickers = stocks,
stockmarket = StockMarket.NASDAQ,
start = datetime.datetime(2016,1,1),
end = datetime.datetime(2016,1,2))
nasdaq.run()
nasdaq.plot()
except QiskitFinanceError as e:
print(e)
print("You need to replace REPLACE-ME with a valid token.")
###Output
_____no_output_____
###Markdown
Another major vendor of stock market data is Exchange Data International (EDI), whose API can be used to query over 100 emerging and frontier markets that are Africa, Asia, Far East, Latin America and Middle East, as well as the more established ones. See:https://www.exchange-data.com/pricing-data/adjusted-prices.phpexchange-coveragefor an overview of the coverage.The access again requires a valid access token to replace REPLACE-ME below. The token can be obtained on a trial or paid-for basis at:https://www.quandl.com/In the following example, you need to replace TICKER1 and TICKER2 with valid tickers at the London Stock Exchange.
###Code
from qiskit.aqua.translators.data_providers.exchangedataprovider import StockMarket
try:
lse = ExchangeDataProvider(token = "REPLACE-ME",
tickers = ["TICKER1", "TICKER2"],
stockmarket = StockMarket.LONDON,
start = datetime.datetime(2019,1,1),
end = datetime.datetime(2019,1,30))
lse.run()
lse.plot()
except QiskitFinanceError as e:
print(e)
print("You need to replace REPLACE-ME with a valid token.")
###Output
_____no_output_____
###Markdown
For the actual use of the data, please see the portfolio_optimization or portfolio_diversification notebooks.
###Code
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
###Output
_____no_output_____
###Markdown
Trusted Notebook" align="middle"> _*Qiskit Finance: Loading and Processing Stock-Market Time-Series Data*_The latest version of this notebook is available on https://github.com/qiskit/qiskit-tutorial.*** ContributorsJakub Marecek[1] Affiliation- [1]IBMQ IntroductionAcross many problems in finance, one starts with time series. Here, we showcase how to generate pseudo-random time-series, download actual stock-market time series from a number of common providers, and how to compute time-series similarity measures.
###Code
%matplotlib inline
from qiskit.aqua.translators.data_providers import *
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
import datetime
import matplotlib.pyplot as plt
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
data = RandomDataProvider(tickers=["TICKER1", "TICKER2"],
start = datetime.datetime(2016, 1, 1),
end = datetime.datetime(2016, 1, 30),
seed = 1)
data.run()
###Output
_____no_output_____
###Markdown
Once the data are loaded, you can run a variety of algorithms on those to aggregate the data. Notably, you can compute the covariance matrix or a variant, which would consider alternative time-series similarity measures based on dynamic time warping (DTW). In DTW, changes that vary in speed, e.g., one stock's price following another stock's price with a small delay, can be accommodated.
###Code
means = data.get_mean_vector()
print("Means:")
print(means)
rho = data.get_similarity_matrix()
print("A time-series similarity measure:")
print(rho)
plt.imshow(rho)
plt.show()
cov = data.get_covariance_matrix()
print("A covariance matrix:")
print(cov)
plt.imshow(cov)
plt.show()
###Output
Means:
[16.66722941 72.03026566]
A time-series similarity measure:
[[1.0000000e+00 6.2284804e-04]
[6.2284804e-04 1.0000000e+00]]
###Markdown
If you wish, you can look into the underlying pseudo-random time-series using. Please note that the private class members (starting with underscore) may change in future releases of Qiskit.
###Code
print("The underlying evolution of stock prices:")
for (cnt, s) in enumerate(data._tickers):
plt.plot(data._data[cnt], label=s)
plt.legend()
plt.xticks(rotation=90)
plt.show()
for (cnt, s) in enumerate(data._tickers):
print(s)
print(data._data[cnt])
###Output
The underlying evolution of stock prices:
###Markdown
Clearly, you can adapt the number and names of tickers and the range of dates:
###Code
data = RandomDataProvider(tickers=["CompanyA", "CompanyB", "CompanyC"],
start = datetime.datetime(2015, 1, 1),
end = datetime.datetime(2016, 1, 30),
seed = 1)
data.run()
for (cnt, s) in enumerate(data._tickers):
plt.plot(data._data[cnt], label=s)
plt.legend()
plt.xticks(rotation=90)
plt.show()
###Output
_____no_output_____
###Markdown
Access to closing-price time-seriesWhile the access to real-time data usually requires a payment, it is possible to access historical (adjusted) closing prices via Wikipedia and Quandlfree of charge, following registration at:https://www.quandl.com/?modal=registerIn the code below, one needs to specify actual tickers of actual NASDAQissues and the access token you obtain from Quandl; by running the code below, you agree to the Quandl terms and conditions, including a liability waiver.Notice that at least two tickers are required for the computationof covariance and time-series matrices, but hundreds of tickers may go beyond the fair usage limits of Quandl.
###Code
stocks = ["REPLACEME1", "REPLACEME2"]
wiki = WikipediaDataProvider(
token = "REPLACEME",
tickers = stocks,
stockmarket = StockMarket.NASDAQ,
start = datetime.datetime(2016,1,1),
end = datetime.datetime(2016,1,30))
wiki.run()
###Output
_____no_output_____
###Markdown
Once the data are loaded, you can again compute the covariance matrix or its DTW variants.
###Code
if wiki._n <= 1:
raise Exception("Not enough data to plot covariance or time-series similarity. Please use at least two tickers.")
rho = wiki.get_similarity_matrix()
print("A time-series similarity measure:")
print(rho)
plt.imshow(rho)
plt.show()
cov = wiki.get_covariance_matrix()
print("A covariance matrix:")
print(cov)
plt.imshow(cov)
plt.show()
###Output
_____no_output_____
###Markdown
If you wish, you can look into the underlying time-series using:
###Code
print("The underlying evolution of stock prices:")
for (cnt, s) in enumerate(stocks):
plt.plot(wiki._data[cnt], label=s)
plt.legend()
plt.xticks(rotation=90)
plt.show()
for (cnt, s) in enumerate(stocks):
print(s)
print(wiki._data[cnt])
###Output
_____no_output_____
###Markdown
[Optional] Setup token to access recent, fine-grained time-seriesIf you would like to download professional data, you will have to set-up a token with one of the major providers. Let us now illustrate the data with NASDAQ Data on Demand, which can supply bid and ask prices in arbitrary resolution, as well as aggregates such as daily adjusted closing prices, for NASDAQ and NYSE issues. If you don't have NASDAQ Data on Demand license, you can contact NASDAQ (cf. https://business.nasdaq.com/intel/GIS/Nasdaq-Data-on-Demand.html) to obtain a trial or paid license.If and when you have access to NASDAQ Data on Demand using your own token, you should replace REPLACE-ME below with the token. To assure the security of the connection, you should also have your own means of validating NASDAQ's certificates. DataOnDemandProvider constructor has an optional argument verify, which can be None or a string or a boolean. If it is None, certifi certificates will be used (default). If verify is a string, it should be poiting to a cerfificate for the HTTPS connection to NASDAQ (dataondemand.nasdaq.com), either in the form of a CA_BUNDLE file or a directory wherein to look.
###Code
from qiskit.aqua.translators.data_providers.data_on_demand_provider import StockMarket
try:
nasdaq = DataOnDemandProvider(token = "REPLACE-ME",
tickers = stocks,
stockmarket = StockMarket.NASDAQ,
start = datetime.datetime(2016,1,1),
end = datetime.datetime(2016,1,2))
nasdaq.run()
nasdaq.plot()
except QiskitFinanceError as e:
print(e)
print("You need to replace REPLACE-ME with a valid token.")
###Output
_____no_output_____
###Markdown
Another major vendor of stock market data is Exchange Data International (EDI), whose API can be used to query over 100 emerging and frontier markets that are Africa, Asia, Far East, Latin America and Middle East, as well as the more established ones. See:https://www.exchange-data.com/pricing-data/adjusted-prices.phpexchange-coveragefor an overview of the coverage.The access again requires a valid access token to replace REPLACE-ME below. The token can be obtained on a trial or paid-for basis at:https://www.quandl.com/In the following example, you need to replace TICKER1 and TICKER2 with valid tickers at the London Stock Exchange.
###Code
from qiskit.aqua.translators.data_providers.exchangedataprovider import StockMarket
try:
lse = ExchangeDataProvider(token = "REPLACE-ME",
tickers = ["TICKER1", "TICKER2"],
stockmarket = StockMarket.LONDON,
start = datetime.datetime(2019,1,1),
end = datetime.datetime(2019,1,30))
lse.run()
lse.plot()
except QiskitFinanceError as e:
print(e)
print("You need to replace REPLACE-ME with a valid token.")
###Output
_____no_output_____
###Markdown
For the actual use of the data, please see the portfolio_optimization or portfolio_diversification notebooks.
###Code
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
###Output
_____no_output_____
###Markdown
Trusted Notebook" align="middle"> _*Qiskit Finance: Loading and Processing Stock-Market Time-Series Data*_The latest version of this notebook is available on https://github.com/qiskit/qiskit-tutorial.*** ContributorsJakub Marecek[1] Affiliation- [1]IBMQ IntroductionAcross many problems in finance, one starts with time series. Here, we showcase how to generate pseudo-random time-series, download actual stock-market time series from a number of common providers, and how to compute time-series similarity measures.
###Code
%matplotlib inline
from qiskit.aqua.translators.data_providers import *
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
import datetime
import matplotlib.pyplot as plt
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
data = RandomDataProvider(tickers=["TICKER1", "TICKER2"],
start = datetime.datetime(2016, 1, 1),
end = datetime.datetime(2016, 1, 30),
seed = 1)
data.run()
###Output
_____no_output_____
###Markdown
Once the data are loaded, you can run a variety of algorithms on those to aggregate the data. Notably, you can compute the covariance matrix or a variant, which would consider alternative time-series similarity measures based on dynamic time warping (DTW). In DTW, changes that vary in speed, e.g., one stock's price following another stock's price with a small delay, can be accommodated.
###Code
means = data.get_mean_vector()
print("Means:")
print(means)
rho = data.get_similarity_matrix()
print("A time-series similarity measure:")
print(rho)
plt.imshow(rho)
plt.show()
cov = data.get_covariance_matrix()
print("A covariance matrix:")
print(cov)
plt.imshow(cov)
plt.show()
###Output
Means:
[16.66722941 72.03026566]
A time-series similarity measure:
[[1.0000000e+00 6.2284804e-04]
[6.2284804e-04 1.0000000e+00]]
###Markdown
If you wish, you can look into the underlying pseudo-random time-series using. Please note that the private class members (starting with underscore) may change in future releases of Qiskit.
###Code
print("The underlying evolution of stock prices:")
for (cnt, s) in enumerate(data._tickers):
plt.plot(data._data[cnt], label=s)
plt.legend()
plt.xticks(rotation=90)
plt.show()
for (cnt, s) in enumerate(data._tickers):
print(s)
print(data._data[cnt])
###Output
The underlying evolution of stock prices:
###Markdown
Clearly, you can adapt the number and names of tickers and the range of dates:
###Code
data = RandomDataProvider(tickers=["CompanyA", "CompanyB", "CompanyC"],
start = datetime.datetime(2015, 1, 1),
end = datetime.datetime(2016, 1, 30),
seed = 1)
data.run()
for (cnt, s) in enumerate(data._tickers):
plt.plot(data._data[cnt], label=s)
plt.legend()
plt.xticks(rotation=90)
plt.show()
###Output
_____no_output_____
###Markdown
Access to closing-price time-seriesWhile the access to real-time data usually requires a payment, it is possible to access historical (adjusted) closing prices via Wikipedia and Quandlfree of charge, following registration at:https://www.quandl.com/?modal=registerIn the code below, one needs to specify actual tickers of actual NASDAQissues and the access token you obtain from Quandl; by running the code below, you agree to the Quandl terms and conditions, including a liability waiver.Notice that at least two tickers are required for the computationof covariance and time-series matrices, but hundreds of tickers may go beyond the fair usage limits of Quandl.
###Code
stocks = ["REPLACEME1", "REPLACEME2"]
wiki = WikipediaDataProvider(
token = "REPLACEME",
tickers = stocks,
stockmarket = StockMarket.NASDAQ,
start = datetime.datetime(2016,1,1),
end = datetime.datetime(2016,1,30))
wiki.run()
###Output
_____no_output_____
###Markdown
Once the data are loaded, you can again compute the covariance matrix or its DTW variants.
###Code
if wiki._n <= 1:
raise Exception("Not enough data to plot covariance or time-series similarity. Please use at least two tickers.")
rho = wiki.get_similarity_matrix()
print("A time-series similarity measure:")
print(rho)
plt.imshow(rho)
plt.show()
cov = wiki.get_covariance_matrix()
print("A covariance matrix:")
print(cov)
plt.imshow(cov)
plt.show()
###Output
_____no_output_____
###Markdown
If you wish, you can look into the underlying time-series using:
###Code
print("The underlying evolution of stock prices:")
for (cnt, s) in enumerate(stocks):
plt.plot(wiki._data[cnt], label=s)
plt.legend()
plt.xticks(rotation=90)
plt.show()
for (cnt, s) in enumerate(stocks):
print(s)
print(wiki._data[cnt])
###Output
_____no_output_____
###Markdown
[Optional] Setup token to access recent, fine-grained time-seriesIf you would like to download professional data, you will have to set-up a token with one of the major providers. Let us now illustrate the data with NASDAQ Data on Demand, which can supply bid and ask prices in arbitrary resolution, as well as aggregates such as daily adjusted closing prices, for NASDAQ and NYSE issues. If you don't have NASDAQ Data on Demand license, you can contact NASDAQ (cf. https://business.nasdaq.com/intel/GIS/Nasdaq-Data-on-Demand.html) to obtain a trial or paid license.If and when you have access to NASDAQ Data on Demand using your own token, you should replace REPLACE-ME below with the token. To assure the security of the connection, you should also have your own means of validating NASDAQ's certificates. DataOnDemandProvider constructor has an optional argument verify, which can be None or a string or a boolean. If it is None, certifi certificates will be used (default). If verify is a string, it should be poiting to a cerfificate for the HTTPS connection to NASDAQ (dataondemand.nasdaq.com), either in the form of a CA_BUNDLE file or a directory wherein to look.
###Code
from qiskit.aqua.translators.data_providers.data_on_demand_provider import StockMarket
try:
nasdaq = DataOnDemandProvider(token = "REPLACE-ME",
tickers = stocks,
stockmarket = StockMarket.NASDAQ,
start = datetime.datetime(2016,1,1),
end = datetime.datetime(2016,1,2))
nasdaq.run()
nasdaq.plot()
except QiskitFinanceError as e:
print(e)
print("You need to replace REPLACE-ME with a valid token.")
###Output
_____no_output_____
###Markdown
Another major vendor of stock market data is Exchange Data International (EDI), whose API can be used to query over 100 emerging and frontier markets that are Africa, Asia, Far East, Latin America and Middle East, as well as the more established ones. See:https://www.exchange-data.com/pricing-data/adjusted-prices.phpexchange-coveragefor an overview of the coverage.The access again requires a valid access token to replace REPLACE-ME below. The token can be obtained on a trial or paid-for basis at:https://www.quandl.com/In the following example, you need to replace TICKER1 and TICKER2 with valid tickers at the London Stock Exchange.
###Code
from qiskit.aqua.translators.data_providers.exchangedataprovider import StockMarket
try:
lse = ExchangeDataProvider(token = "REPLACE-ME",
tickers = ["TICKER1", "TICKER2"],
stockmarket = StockMarket.LONDON,
start = datetime.datetime(2019,1,1),
end = datetime.datetime(2019,1,30))
lse.run()
lse.plot()
except QiskitFinanceError as e:
print(e)
print("You need to replace REPLACE-ME with a valid token.")
###Output
_____no_output_____
###Markdown
 Qiskit Finance: Loading and Processing Stock-Market Time-Series Data IntroductionAcross many problems in finance, one starts with time series. Here, we showcase how to generate pseudo-random time-series, download actual stock-market time series from a number of common providers, and how to compute time-series similarity measures.
###Code
%matplotlib inline
from qiskit.aqua.translators.data_providers import *
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
import datetime
import matplotlib.pyplot as plt
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
data = RandomDataProvider(tickers=["TICKER1", "TICKER2"],
start = datetime.datetime(2016, 1, 1),
end = datetime.datetime(2016, 1, 30),
seed = 1)
data.run()
###Output
_____no_output_____
###Markdown
Once the data are loaded, you can run a variety of algorithms on those to aggregate the data. Notably, you can compute the covariance matrix or a variant, which would consider alternative time-series similarity measures based on dynamic time warping (DTW). In DTW, changes that vary in speed, e.g., one stock's price following another stock's price with a small delay, can be accommodated.
###Code
means = data.get_mean_vector()
print("Means:")
print(means)
rho = data.get_similarity_matrix()
print("A time-series similarity measure:")
print(rho)
plt.imshow(rho)
plt.show()
cov = data.get_covariance_matrix()
print("A covariance matrix:")
print(cov)
plt.imshow(cov)
plt.show()
###Output
Means:
[16.66722941 72.03026566]
A time-series similarity measure:
[[1.0000000e+00 6.2284804e-04]
[6.2284804e-04 1.0000000e+00]]
###Markdown
If you wish, you can look into the underlying pseudo-random time-series using. Please note that the private class members (starting with underscore) may change in future releases of Qiskit.
###Code
print("The underlying evolution of stock prices:")
for (cnt, s) in enumerate(data._tickers):
plt.plot(data._data[cnt], label=s)
plt.legend()
plt.xticks(rotation=90)
plt.show()
for (cnt, s) in enumerate(data._tickers):
print(s)
print(data._data[cnt])
###Output
The underlying evolution of stock prices:
###Markdown
Clearly, you can adapt the number and names of tickers and the range of dates:
###Code
data = RandomDataProvider(tickers=["CompanyA", "CompanyB", "CompanyC"],
start = datetime.datetime(2015, 1, 1),
end = datetime.datetime(2016, 1, 30),
seed = 1)
data.run()
for (cnt, s) in enumerate(data._tickers):
plt.plot(data._data[cnt], label=s)
plt.legend()
plt.xticks(rotation=90)
plt.show()
###Output
_____no_output_____
###Markdown
Access to closing-price time-seriesWhile the access to real-time data usually requires a payment, it is possible to access historical (adjusted) closing prices via Wikipedia and Quandlfree of charge, following registration at:https://www.quandl.com/?modal=registerIn the code below, one needs to specify actual tickers of actual NASDAQissues and the access token you obtain from Quandl; by running the code below, you agree to the Quandl terms and conditions, including a liability waiver.Notice that at least two tickers are required for the computationof covariance and time-series matrices, but hundreds of tickers may go beyond the fair usage limits of Quandl.
###Code
stocks = ["REPLACEME1", "REPLACEME2"]
wiki = WikipediaDataProvider(
token = "REPLACEME",
tickers = stocks,
stockmarket = StockMarket.NASDAQ,
start = datetime.datetime(2016,1,1),
end = datetime.datetime(2016,1,30))
wiki.run()
###Output
_____no_output_____
###Markdown
Once the data are loaded, you can again compute the covariance matrix or its DTW variants.
###Code
if wiki._n <= 1:
raise Exception("Not enough data to plot covariance or time-series similarity. Please use at least two tickers.")
rho = wiki.get_similarity_matrix()
print("A time-series similarity measure:")
print(rho)
plt.imshow(rho)
plt.show()
cov = wiki.get_covariance_matrix()
print("A covariance matrix:")
print(cov)
plt.imshow(cov)
plt.show()
###Output
_____no_output_____
###Markdown
If you wish, you can look into the underlying time-series using:
###Code
print("The underlying evolution of stock prices:")
for (cnt, s) in enumerate(stocks):
plt.plot(wiki._data[cnt], label=s)
plt.legend()
plt.xticks(rotation=90)
plt.show()
for (cnt, s) in enumerate(stocks):
print(s)
print(wiki._data[cnt])
###Output
_____no_output_____
###Markdown
[Optional] Setup token to access recent, fine-grained time-seriesIf you would like to download professional data, you will have to set-up a token with one of the major providers. Let us now illustrate the data with NASDAQ Data on Demand, which can supply bid and ask prices in arbitrary resolution, as well as aggregates such as daily adjusted closing prices, for NASDAQ and NYSE issues. If you don't have NASDAQ Data on Demand license, you can contact NASDAQ (cf. https://business.nasdaq.com/intel/GIS/Nasdaq-Data-on-Demand.html) to obtain a trial or paid license.If and when you have access to NASDAQ Data on Demand using your own token, you should replace REPLACE-ME below with the token. To assure the security of the connection, you should also have your own means of validating NASDAQ's certificates. The DataOnDemandProvider constructor has an optional argument `verify`, which can be `None` or a string or a boolean. If it is `None`, certify certificates will be used (default). If verify is a string, it should be pointing to a certificate for the HTTPS connection to NASDAQ (dataondemand.nasdaq.com), either in the form of a CA_BUNDLE file or a directory wherein to look.
###Code
from qiskit.aqua.translators.data_providers.data_on_demand_provider import StockMarket
try:
nasdaq = DataOnDemandProvider(token = "REPLACE-ME",
tickers = stocks,
stockmarket = StockMarket.NASDAQ,
start = datetime.datetime(2016,1,1),
end = datetime.datetime(2016,1,2))
nasdaq.run()
nasdaq.plot()
except QiskitFinanceError as e:
print(e)
print("You need to replace REPLACE-ME with a valid token.")
###Output
_____no_output_____
###Markdown
Another major vendor of stock market data is Exchange Data International (EDI), whose API can be used to query over 100 emerging and frontier markets that are Africa, Asia, Far East, Latin America and Middle East, as well as the more established ones. See:https://www.exchange-data.com/pricing-data/adjusted-prices.phpexchange-coveragefor an overview of the coverage.The access again requires a valid access token to replace REPLACE-ME below. The token can be obtained on a trial or paid-for basis at:https://www.quandl.com/In the following example, you need to replace TICKER1 and TICKER2 with valid tickers at the London Stock Exchange.
###Code
from qiskit.aqua.translators.data_providers.exchangedataprovider import StockMarket
try:
lse = ExchangeDataProvider(token = "REPLACE-ME",
tickers = ["TICKER1", "TICKER2"],
stockmarket = StockMarket.LONDON,
start = datetime.datetime(2019,1,1),
end = datetime.datetime(2019,1,30))
lse.run()
lse.plot()
except QiskitFinanceError as e:
print(e)
print("You need to replace REPLACE-ME with a valid token.")
###Output
_____no_output_____
###Markdown
For the actual use of the data, please see the portfolio_optimization or portfolio_diversification notebooks.
###Code
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
###Output
_____no_output_____ |
week_5/LSTM.ipynb | ###Markdown
В чем заключаются недостатки полносвязных сетей?* невозможность улавливать временные закономерности в контексте предыдущих точек (архитектурное ограничение)* фиксированный размер входных данных* фиксированный размер выходных данных Область применимости рекуретных сетей для задачи анализа временных рядов* большое количество экзогенных признаков, имеющих сложную нелинейную зависимость с целевым рядом* очень сложная временная структура имеющая наложение разных сезонных и цикличных паттернов* ряды с часто меняющимся паттерном, или большим количеством аномалий* когда есть необходимость в нефиксированной длине входных и выходных данных (например многомерные ряды, где для разных компонент хочется предоставить разное количество лагов) Особенности подготовки данных - необходима нормализация данных, иначе сеть будет плохо сходиться и медленно обучаться.
###Code
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
data = np.array(range(0, 100, 10)).reshape(-1, 1)
scaler = MinMaxScaler((0, 1))
scaler.fit(data)
transformed = scaler.transform(data)
transformed
inverse = scaler.inverse_transform(transformed)
inverse
###Output
_____no_output_____
###Markdown
Особенность подготвки данных - обработка последовательностей разной длины.
###Code
from keras.preprocessing.sequence import pad_sequences
sequences = [
[1, 2, 3, 4],
[3, 4, 5],
[5, 6],
[3]
]
pad_sequences(sequences, padding='pre')
pad_sequences(sequences, padding='post')
pad_sequences(sequences, maxlen=2)
pad_sequences(sequences, maxlen=2, truncating='post')
###Output
_____no_output_____
###Markdown
Какие архитектуры lstm нас интересуют в контексте временных рядов?* one-to-one - предсказание следующей точки по предыдущей - нет* one-to-many - предсказание следующих N точeк про предыдущей - нет* many-to-one - one-step-ahead предсказание - в некоторой степени* many-to-many - предсказание вектора из следующих m точек по предыдущим n точкам - наибольший интерес Простая LSTM сеть
###Code
from keras.models import Sequential
from keras.layers import LSTM, Dense
ts = dataset['daily-min-temperatures.csv']
ts.plot(figsize=(15, 5))
def transform_into_matrix(ts: pd.Series, num_lags: int) -> Tuple[np.array]:
"""
Transforms time series into lags matrix to allow
applying supervised learning algorithms
Parameters
------------
ts
Time series to transform
num_lags
Number of lags to use
Returns
--------
train, test: np.arrays of shapes (ts-num_lags, num_lags), (num_lags,)
"""
ts_values = ts.values
data = {}
for i in range(num_lags + 1):
data[f'lag_{num_lags - i}'] = np.roll(ts_values, -i)
lags_matrix = pd.DataFrame(data)[:-num_lags]
lags_matrix.index = ts.index[num_lags:]
return lags_matrix.drop('lag_0', axis=1).values, lags_matrix['lag_0'].values
NUM_LAGS = 14
X, y = transform_into_matrix(ts, NUM_LAGS)
X[0]
X = X.reshape((X.shape[0], X.shape[1], 1))
X[0]
split_idx = int(len(X)*0.8)
X_train, X_test = X[:split_idx], X[split_idx:]
y_train, y_test = y[:split_idx], y[split_idx:]
model = Sequential()
model.add(LSTM(50, activation='relu', input_shape=(NUM_LAGS, 1)))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mse')
model.fit(X, y, epochs=100)
y_pred = model.predict(X_test)
pd.Series(y_test.flatten())[-50:].plot()
pd.Series(y_pred.flatten())[-50:].plot()
### данный результат на самом деле не сильно лучше наивного предсказания
from sklearn.metrics import mean_squared_error as mse
mse(y_test.flatten(), y_pred.flatten())
###Output
_____no_output_____
###Markdown
Stacked LSTM Добавьте дополнительные скрытые слои в сеть (используйте return_sequences=True) и сравните качество
###Code
model = Sequential()
# your code here
model.compile(optimizer='adam', loss='mse')
model.fit(X_train, y_train, epochs=100, verbose=0)
y_pred = model.predict(X_test)
pd.Series(y_test.flatten())[-50:].plot()
pd.Series(y_pred.flatten())[-50:].plot()
###Output
_____no_output_____
###Markdown
Bidirectional LSTM Сделаем LSTM слой сети Bidirectional при помощи доп слоя Biderectional и сравним качество
###Code
from keras.layers import Bidirectional
model = Sequential()
# your code here
model.compile(optimizer='adam', loss='mse')
model.fit(X_train, y_train, epochs=10, verbose=0)
y_pred = model.predict(X_test)
###Output
_____no_output_____
###Markdown
Seq2Seq LSTM - когда нужно сделать предсказание на несколько точек вперед Подготовим данные
###Code
from typing import Tuple
def transform_ts_into_matrix(ts: pd.Series, num_lags_in: int, num_lags_out: int) -> Tuple[np.array, np.array]:
"""
Данная функция должна пройтись скользящим окном по временному ряду и для каждых
num_lags_in точек в качестве признаков собрать num_lags_out следующих точек в качестве таргета.
Вернуть два np.array массива из X_train и y_train соответственно
"""
sequence = ts.values
X, y = list(), list()
i = 0
outer_idx = num_lags_out
while outer_idx < len(sequence):
inner_idx = i + num_lags_in
outer_idx = inner_idx + num_lags_out
X_, y_ = sequence[i:inner_idx], sequence[inner_idx:outer_idx]
X.append(X_)
y.append(y_)
i += 1
return np.array(X), np.array(y)
# получим X и y при помощи предыдущей функции и разбейте на трейн и тест
NUM_LAGS_IN = 28
NUM_LAGS_OUT = 7
X, y = transform_ts_into_matrix(ts, NUM_LAGS_IN, NUM_LAGS_OUT)
X = X.reshape((X.shape[0], X.shape[1], 1))
split_idx = int(len(X)*0.8)
X_train, X_test = X[:split_idx], X[split_idx:]
y_train, y_test = y[:split_idx], y[split_idx:]
# объявим енкодер
model = Sequential()
model.add(LSTM(100, activation='relu', input_shape=(NUM_LAGS_IN, 1)))
# добавим промежуточный слой, преобразующий выход с енкодера для входного слоя в декодер
from keras.layers import RepeatVector
model.add(RepeatVector(NUM_LAGS_OUT))
# обьявим декодер
model.add(LSTM(50, activation='relu', return_sequences=True))
# обьявим выходной слой - размерность на выходе получается при помощи дополнительного слоя TimeDistributed
from keras.layers import TimeDistributed
model.add(TimeDistributed(Dense(1)))
###Output
_____no_output_____
###Markdown
Обучим модель и получим предсказание на тесте
###Code
model.compile(optimizer='adam', loss='mse')
model.fit(X_train, y_train, epochs=10, verbose=0)
y_pred = model.predict(X_test)
###Output
_____no_output_____
###Markdown
Пример с многомерным рядом.
###Code
ts_multi = pd.read_csv('../data/stability_index.csv', index_col='timestamp', parse_dates=True)
ts_multi.fillna(ts_multi.mean(), axis=0, inplace=True)
def transform_multi_ts_into_matrix(ts: pd.DataFrame, num_lags: int):
"""
Данная функция должна пройтись скользящим окном по временному ряду
и собрать в качестве признаков X np.array размерности (len(ts)-num_lags, n_dims, num_lags),
а в качестве y np.array размерности (len(ts)-num_lags, n_dims),
где n_dims - размерность многомерного ряда.
То есть для всех компонент временного ряда мы должны взять num_lags предыдущих точек каждой компонент
в качестве признаков и все компоненты текущей точки в качестве target
"""
sequence = ts.values
X, y = list(), list()
i = 0
end_i = num_lags
while end_i < len(sequence):
seq_x, seq_y = sequence[i:end_i], sequence[end_i]
X.append(seq_x)
y.append(seq_y)
i += 1
end_i = i + num_lags
return np.array(X), np.array(y)
NUM_LAGS = 14
N_DIMS = ts_multi.shape[1]
X, y = transform_multi_ts_into_matrix(ts_multi, NUM_LAGS)
X[0].shape
# объявим енкодер
model = Sequential()
model.add(LSTM(100, activation='relu', input_shape=(NUM_LAGS, N_DIMS)))
# добавим промежуточный слой, преобразующий выход с енкодера для входного слоя в декодер
from keras.layers import RepeatVector
model.add(RepeatVector(N_DIMS))
# обьявим декодер
model.add(LSTM(50, activation='relu', return_sequences=True))
# обьявим выходной слой - размерность на выходе получается при помощи дополнительного слоя TimeDistributed
from keras.layers import TimeDistributed
model.add(TimeDistributed(Dense(1)))
model.compile(optimizer='adam', loss='mse')
model.fit(X, y, epochs=50)
###Output
Epoch 1/50
130/130 [==============================] - 1s 9ms/step - loss: 4337.5293
Epoch 2/50
130/130 [==============================] - 1s 10ms/step - loss: 3359.5718
Epoch 3/50
130/130 [==============================] - 1s 11ms/step - loss: 2346.5215
Epoch 4/50
130/130 [==============================] - 1s 11ms/step - loss: 1834.7770
Epoch 5/50
130/130 [==============================] - 1s 9ms/step - loss: 1693.2955
Epoch 6/50
130/130 [==============================] - 1s 10ms/step - loss: 1496.0602
Epoch 7/50
130/130 [==============================] - 1s 10ms/step - loss: 1387.5758
Epoch 8/50
130/130 [==============================] - 1s 11ms/step - loss: 1265.3087
Epoch 9/50
130/130 [==============================] - 1s 9ms/step - loss: 1505.0157
Epoch 10/50
130/130 [==============================] - 1s 9ms/step - loss: 1514.8870
Epoch 11/50
130/130 [==============================] - 1s 8ms/step - loss: 1301.9706
Epoch 12/50
130/130 [==============================] - 1s 9ms/step - loss: 1278.9486
Epoch 13/50
130/130 [==============================] - 1s 9ms/step - loss: 1309.2554
Epoch 14/50
130/130 [==============================] - 1s 9ms/step - loss: 1628.4979
Epoch 15/50
130/130 [==============================] - 1s 9ms/step - loss: 1819.9342
Epoch 16/50
130/130 [==============================] - 1s 9ms/step - loss: 1520.2660
Epoch 17/50
130/130 [==============================] - 1s 9ms/step - loss: 1324.4885
Epoch 18/50
130/130 [==============================] - 1s 9ms/step - loss: 1299.3295
Epoch 19/50
130/130 [==============================] - 1s 9ms/step - loss: 1186.3156
Epoch 20/50
130/130 [==============================] - 1s 10ms/step - loss: 1122.8571
Epoch 21/50
130/130 [==============================] - 1s 9ms/step - loss: 1125.4316
Epoch 22/50
130/130 [==============================] - 1s 10ms/step - loss: 1119.9897
Epoch 23/50
130/130 [==============================] - 1s 9ms/step - loss: 1101.6624
Epoch 24/50
130/130 [==============================] - 1s 9ms/step - loss: 1097.9153
Epoch 25/50
130/130 [==============================] - 1s 9ms/step - loss: 1144.5050
Epoch 26/50
130/130 [==============================] - 1s 9ms/step - loss: 1181.8234
Epoch 27/50
130/130 [==============================] - 1s 10ms/step - loss: 1165.9486
Epoch 28/50
130/130 [==============================] - 1s 10ms/step - loss: 1132.3014
Epoch 29/50
130/130 [==============================] - 1s 10ms/step - loss: 1069.6210
Epoch 30/50
130/130 [==============================] - 1s 9ms/step - loss: 1028.5364
Epoch 31/50
130/130 [==============================] - 1s 9ms/step - loss: 1086.3086
Epoch 32/50
130/130 [==============================] - 1s 9ms/step - loss: 1303.5736
Epoch 33/50
130/130 [==============================] - 1s 9ms/step - loss: 1373.5681
Epoch 34/50
130/130 [==============================] - 1s 9ms/step - loss: 1222.9882
Epoch 35/50
130/130 [==============================] - 1s 9ms/step - loss: 1151.4961
Epoch 36/50
130/130 [==============================] - 1s 9ms/step - loss: 1116.9482
Epoch 37/50
130/130 [==============================] - 1s 11ms/step - loss: 1094.3457
Epoch 38/50
130/130 [==============================] - 1s 10ms/step - loss: 1046.0753
Epoch 39/50
130/130 [==============================] - 1s 9ms/step - loss: 1030.7870
Epoch 40/50
130/130 [==============================] - 1s 9ms/step - loss: 1446.4260
Epoch 41/50
130/130 [==============================] - 1s 9ms/step - loss: 1158.3619
Epoch 42/50
130/130 [==============================] - 1s 10ms/step - loss: 1058.0692
Epoch 43/50
130/130 [==============================] - 1s 10ms/step - loss: 1028.4990
Epoch 44/50
130/130 [==============================] - 1s 10ms/step - loss: 1020.4298
Epoch 45/50
130/130 [==============================] - 1s 10ms/step - loss: 1017.2426
Epoch 46/50
130/130 [==============================] - 1s 10ms/step - loss: 995.0058
Epoch 47/50
130/130 [==============================] - 1s 10ms/step - loss: 979.2719
Epoch 48/50
130/130 [==============================] - 1s 10ms/step - loss: 965.5411
Epoch 49/50
130/130 [==============================] - 1s 10ms/step - loss: 982.8457
Epoch 50/50
130/130 [==============================] - 1s 10ms/step - loss: 954.3374
|
Machine learning/Heart Diseases - Classification with Random Forest Classifier.ipynb | ###Markdown
Intro to Scikit-learn
###Code
# import packages
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pickle
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
# Get the data ready
heart = pd.read_csv('../Data/heart.csv')
heart.head()
# create features matrix
X = heart.drop('target', axis =1)
# create labels
y = heart.target
# choose the right model and hyperparameters
clf = RandomForestClassifier()
# keep the default hyperparameters
clf.get_params()
# fit the model to the data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=49)
# fit it
clf.fit(X_train, y_train);
# make a predition
y_pred = clf.predict(X_test)
y_pred
# Evaluate the model on the training and test data
clf.score(X_train, y_train), clf.score(X_test, y_test)
print(classification_report(y_test, y_pred))
confusion_matrix(y_test, y_pred)
accuracy_score(y_test, y_pred)
# Improve the performance of the model
# Try different amount of n-estimators
np.random.seed(42)
for i in range(10, 140, 20):
print(f'Trying model with {i} estimators')
clf = RandomForestClassifier(i)
clf.fit(X_train, y_train)
print(f'Model accuracy on test set: {clf.score(X_test, y_test)*100:.2f}%\n')
# redo the model and save it
clf = RandomForestClassifier(110)
clf.fit(X_train, y_train)
pickle.dump(clf, open('random_forest_model_1.pkl', 'wb'))
# reload the model
loaded_model = pickle.load(open('random_forest_model_1.pkl', 'rb'))
print(f'Model accuracy on test set: {loaded_model.score(X_test, y_test)*100:.2f}%\n')
###Output
Model accuracy on test set: 85.25%
|
notebooks/3.0-fb-gas_stations_along_route.ipynb | ###Markdown
Bounding Box Approach
###Code
def get_gas_stations_in_area(bounding_box):
""" bounding box is a (minx, miny, maxx, maxy) tuple""" # x = long, y = lat
min_long, min_lat, max_long, max_lat = bounding_box
assert min_long < max_long
assert min_lat < max_lat
return set(positions.cx[min_long:max_long,min_lat:max_lat].index)
def get_gas_stations_in_boxes(bounding_boxes):
ids = [get_gas_stations_in_area(box) for box in bounding_boxes]
return list(set.union(*ids))
boxes_potsdam_berlin = [((52.34416775186111, 13.092272842330203), (52.35864093016666, 13.187254280776756)),((52.35864093016666, 13.044782123106984), (52.37311410847222, 13.187254280776756)),((52.37311410847222, 13.021036763495317), (52.38758728677778, 13.210999640388309)),((52.38758728677778, 13.021036763495317), (52.41653364338889, 13.234744999999975)),((52.41653364338889, 13.139763561553536), (52.431006821694446, 13.234744999999975)),((52.431006821694446, 13.16350892116509), (52.44548, 13.258490359611642)),((52.44548, 13.16350892116509), (52.459953178305554, 13.282235719223195)),((52.459953178305554, 13.16350892116509), (52.474426356611126, 13.305981078834861)),((52.474426356611126, 13.187254280776756), (52.48889953491667, 13.305981078834861)),((52.48889953491667, 13.210999640388309), (52.503372713222234, 13.424707876892967)),((52.503372713222234, 13.234744999999975), (52.53231906983335, 13.448453236504633)),((52.53231906983335, 13.35347179805808), (52.5467922481389, 13.448453236504633))]
boxes_small_potsdam_berlin = [((52.380350697625, 13.044782123106984), (52.40206046508334, 13.068527482718537)),((52.37311410847222, 13.068527482718537), (52.40206046508334, 13.080400162524484)),((52.36587751931945, 13.080400162524484), (52.40206046508334, 13.104145522136037)),((52.35864093016666, 13.104145522136037), (52.394823875930555, 13.11601820194187)),((52.35864093016666, 13.11601820194187), (52.38758728677778, 13.127890881747703)),((52.35864093016666, 13.127890881747703), (52.394823875930555, 13.139763561553536)),((52.35864093016666, 13.139763561553536), (52.40206046508334, 13.16350892116509)),((52.35864093016666, 13.16350892116509), (52.41653364338889, 13.175381600970923)),((52.380350697625, 13.175381600970923), (52.45271658915278, 13.187254280776756)),((52.380350697625, 13.187254280776756), (52.459953178305554, 13.19912696058259)),((52.394823875930555, 13.19912696058259), (52.467189767458336, 13.210999640388309)),((52.431006821694446, 13.210999640388309), (52.4816629457639, 13.222872320194142)),((52.43824341084722, 13.222872320194142), (52.48889953491667, 13.234744999999975)),((52.44548, 13.234744999999975), (52.49613612406946, 13.246617679805809)),((52.459953178305554, 13.246617679805809), (52.51060930237501, 13.258490359611642)),((52.467189767458336, 13.258490359611642), (52.517845891527784, 13.270363039417362)),((52.474426356611126, 13.270363039417362), (52.52508248068057, 13.282235719223195)),((52.48889953491667, 13.282235719223195), (52.52508248068057, 13.294108399029028)),((52.49613612406946, 13.294108399029028), (52.52508248068057, 13.305981078834861)),((52.503372713222234, 13.305981078834861), (52.52508248068057, 13.377217157669747)),((52.503372713222234, 13.377217157669747), (52.53231906983335, 13.412835197087134)),((52.51060930237501, 13.412835197087134), (52.53231906983335, 13.424707876892967))]
def js_box_2_python_box(js_boxes):
return [(min_long, min_lat, max_long, max_lat) for ((min_lat,min_long),(max_lat,max_long)) in js_boxes]
boxes_potsdam_berlin_nice = js_box_2_python_box(boxes_potsdam_berlin)
res = get_gas_stations_in_boxes(boxes_potsdam_berlin_nice)
gpd.GeoSeries(gas_stations_df.loc[res]['Position']).plot()
boxes_potsdam_berlin_nice = js_box_2_python_box(boxes_small_potsdam_berlin)
res = get_gas_stations_in_boxes(boxes_potsdam_berlin_nice)
gpd.GeoSeries(gas_stations_df.loc[res]['Position']).plot();
###Output
_____no_output_____
###Markdown
Buffer Approach
###Code
path_potsdam_berlin = [(52.390530000000005, 13.064540000000001),(52.39041, 13.065890000000001),(52.39025, 13.06723),(52.39002000000001, 13.068810000000001),(52.389970000000005, 13.069350000000002),(52.38998, 13.06948),(52.389860000000006, 13.07028),(52.38973000000001, 13.07103),(52.38935000000001, 13.07352),(52.3892, 13.07463),(52.38918, 13.075120000000002),(52.389210000000006, 13.07553),(52.389300000000006, 13.0759),(52.3894, 13.076130000000001),(52.389520000000005, 13.07624),(52.38965, 13.07638),(52.389880000000005, 13.0767),(52.390100000000004, 13.077110000000001),(52.390330000000006, 13.077770000000001),(52.390440000000005, 13.078660000000001),(52.39052, 13.079400000000001),(52.390570000000004, 13.08004),(52.39056000000001, 13.08037),(52.390550000000005, 13.0806),(52.390530000000005, 13.080990000000002),(52.390420000000006, 13.083100000000002),(52.390440000000005, 13.083400000000001),(52.39038000000001, 13.083430000000002),(52.39011000000001, 13.0836),(52.38853, 13.084660000000001),(52.38801, 13.0851),(52.38774, 13.085410000000001),(52.38754, 13.085730000000002),(52.38729000000001, 13.086300000000001),(52.38689, 13.087610000000002),(52.386500000000005, 13.088960000000002),(52.38611, 13.09026),(52.38602, 13.090700000000002),(52.3858, 13.09121),(52.385290000000005, 13.092300000000002),(52.38477, 13.09331),(52.384040000000006, 13.094650000000001),(52.383500000000005, 13.095670000000002),(52.38302, 13.096580000000001),(52.37538000000001, 13.110970000000002),(52.37485, 13.112020000000001),(52.37471000000001, 13.112340000000001),(52.37436, 13.113220000000002),(52.373990000000006, 13.114300000000002),(52.37379000000001, 13.11494),(52.373580000000004, 13.11578),(52.37304, 13.11809),(52.37266, 13.119740000000002),(52.37252, 13.120540000000002),(52.37238000000001, 13.121540000000001),(52.37227000000001, 13.122710000000001),(52.37225, 13.12311),(52.372220000000006, 13.12376),(52.372220000000006, 13.124830000000001),(52.372260000000004, 13.128100000000002),(52.37229000000001, 13.131340000000002),(52.37234, 13.1369),(52.37232, 13.13785),(52.37228, 13.13859),(52.37220000000001, 13.13958),(52.37216, 13.140500000000001),(52.372150000000005, 13.141950000000001),(52.37218000000001, 13.14399),(52.37228, 13.147120000000001),(52.3723, 13.14906),(52.37232, 13.151140000000002),(52.37228, 13.15149),(52.37225, 13.151850000000001),(52.37219, 13.152070000000002),(52.372130000000006, 13.152210000000002),(52.372040000000005, 13.152360000000002),(52.371930000000006, 13.15248),(52.37181, 13.152560000000001),(52.37167, 13.152600000000001),(52.37153000000001, 13.152600000000001),(52.3714, 13.152550000000002),(52.371300000000005, 13.15248),(52.3712, 13.152370000000001),(52.37106000000001, 13.152130000000001),(52.37098, 13.151840000000002),(52.37095000000001, 13.151560000000002),(52.370960000000004, 13.15136),(52.371, 13.151090000000002),(52.37109, 13.150830000000001),(52.3712, 13.15066),(52.37129, 13.15056),(52.371460000000006, 13.15046),(52.37163, 13.150430000000002),(52.37181, 13.150400000000001),(52.37322, 13.150360000000001),(52.373670000000004, 13.150350000000001),(52.37375, 13.15032),(52.37451, 13.150310000000001),(52.375710000000005, 13.15028),(52.37670000000001, 13.150250000000002),(52.376960000000004, 13.150250000000002),(52.37715000000001, 13.150220000000001),(52.37742, 13.150160000000001),(52.377720000000004, 13.15013),(52.378040000000006, 13.150120000000001),(52.37812, 13.15009),(52.37825, 13.15004),(52.378800000000005, 13.15004),(52.379270000000005, 13.15009),(52.37962, 13.150150000000002),(52.380010000000006, 13.150240000000002),(52.380370000000006, 13.150360000000001),(52.380990000000004, 13.150620000000002),(52.38165000000001, 13.15098),(52.383500000000005, 13.152170000000002),(52.38440000000001, 13.15277),(52.3858, 13.153670000000002),(52.387080000000005, 13.1545),(52.38745, 13.154760000000001),(52.38768, 13.15496),(52.38794000000001, 13.155190000000001),(52.388380000000005, 13.155660000000001),(52.38891, 13.156350000000002),(52.38927, 13.156920000000001),(52.38965, 13.15755),(52.38984000000001, 13.15792),(52.39011000000001, 13.158520000000001),(52.390460000000004, 13.15943),(52.39074, 13.160380000000002),(52.392900000000004, 13.169300000000002),(52.39408, 13.1742),(52.39439, 13.175370000000001),(52.394830000000006, 13.176800000000002),(52.395320000000005, 13.17805),(52.39578, 13.179070000000001),(52.39621, 13.17993),(52.39678000000001, 13.18092),(52.39714000000001, 13.18148),(52.3975, 13.181970000000002),(52.398340000000005, 13.183000000000002),(52.39922000000001, 13.184000000000001),(52.399530000000006, 13.18438),(52.40012, 13.18504),(52.400940000000006, 13.185910000000002),(52.40171, 13.186750000000002),(52.402260000000005, 13.187420000000001),(52.403830000000006, 13.18917),(52.407830000000004, 13.193690000000002),(52.40982, 13.19593),(52.410230000000006, 13.19631),(52.41085, 13.19678),(52.411280000000005, 13.197030000000002),(52.41158000000001, 13.197180000000001),(52.41223, 13.197420000000001),(52.412620000000004, 13.197510000000001),(52.413030000000006, 13.19757),(52.413880000000006, 13.19757),(52.41407, 13.197560000000001),(52.41452, 13.197470000000001),(52.41536000000001, 13.19729),(52.41561, 13.197210000000002),(52.416720000000005, 13.19697),(52.417570000000005, 13.196760000000001),(52.41827000000001, 13.196610000000002),(52.42042000000001, 13.196130000000002),(52.4217, 13.195850000000002),(52.422740000000005, 13.19561),(52.423030000000004, 13.195500000000001),(52.42322000000001, 13.195390000000002),(52.423410000000004, 13.195260000000001),(52.42360000000001, 13.195120000000001),(52.42381, 13.194930000000001),(52.42409000000001, 13.194640000000001),(52.42443, 13.194170000000002),(52.424820000000004, 13.1935),(52.425160000000005, 13.19293),(52.42549, 13.192450000000001),(52.425720000000005, 13.192160000000001),(52.42607, 13.191820000000002),(52.426300000000005, 13.191640000000001),(52.42649, 13.19152),(52.42685, 13.191350000000002),(52.427310000000006, 13.191230000000001),(52.427530000000004, 13.191210000000002),(52.427890000000005, 13.191230000000001),(52.42887, 13.191460000000001),(52.43121000000001, 13.19204),(52.43244000000001, 13.192340000000002),(52.43292, 13.19246),(52.433400000000006, 13.1926),(52.43365000000001, 13.19269),(52.43403000000001, 13.192870000000001),(52.434470000000005, 13.193150000000001),(52.43478, 13.19339),(52.43506000000001, 13.193650000000002),(52.435340000000004, 13.19396),(52.43573000000001, 13.194440000000002),(52.43797000000001, 13.197270000000001),(52.438610000000004, 13.198080000000001),(52.44021000000001, 13.2001),(52.44169, 13.20198),(52.44489, 13.206010000000001),(52.446180000000005, 13.207640000000001),(52.45031, 13.212860000000001),(52.47092000000001, 13.238930000000002),(52.472350000000006, 13.240730000000001),(52.47289000000001, 13.24136),(52.474680000000006, 13.243440000000001),(52.47838, 13.247610000000002),(52.48109, 13.250670000000001),(52.48225000000001, 13.25201),(52.482800000000005, 13.2527),(52.48602, 13.25679),(52.48906, 13.260610000000002),(52.491670000000006, 13.26392),(52.49271, 13.26524),(52.49497, 13.268040000000001),(52.495160000000006, 13.268360000000001),(52.495760000000004, 13.26917),(52.496280000000006, 13.26984),(52.497170000000004, 13.27105),(52.497840000000004, 13.27194),(52.49857, 13.272870000000001),(52.49895000000001, 13.273460000000002),(52.49916, 13.273930000000002),(52.49929, 13.27434),(52.499390000000005, 13.274840000000001),(52.499460000000006, 13.275440000000001),(52.49949, 13.275970000000001),(52.49956, 13.277550000000002),(52.49963, 13.27838),(52.49969, 13.278830000000001),(52.499770000000005, 13.27918),(52.499900000000004, 13.279630000000001),(52.500060000000005, 13.28002),(52.500220000000006, 13.280330000000001),(52.50027000000001, 13.28035),(52.500370000000004, 13.28049),(52.50054, 13.280690000000002),(52.5007, 13.28082),(52.50085000000001, 13.280880000000002),(52.501020000000004, 13.2809),(52.50117, 13.280880000000002),(52.50155, 13.280740000000002),(52.50173, 13.280690000000002),(52.501960000000004, 13.28068),(52.502210000000005, 13.280780000000002),(52.502390000000005, 13.28086),(52.503310000000006, 13.28194),(52.50368, 13.282330000000002),(52.503930000000004, 13.282520000000002),(52.50423000000001, 13.28269),(52.504560000000005, 13.28279),(52.50522, 13.282820000000001),(52.50553000000001, 13.28284),(52.50583, 13.282890000000002),(52.50598, 13.282940000000002),(52.506350000000005, 13.283100000000001),(52.506620000000005, 13.28326),(52.508250000000004, 13.284370000000001),(52.509620000000005, 13.28527),(52.51070000000001, 13.28592),(52.511100000000006, 13.286100000000001),(52.511210000000005, 13.286150000000001),(52.51158, 13.286230000000002),(52.511700000000005, 13.286380000000001),(52.511810000000004, 13.286420000000001),(52.51239, 13.28658),(52.512570000000004, 13.28668),(52.512800000000006, 13.28687),(52.5129, 13.286890000000001),(52.51297, 13.286890000000001),(52.51299, 13.28706),(52.51301, 13.28738),(52.51308, 13.28842),(52.51274, 13.288520000000002),(52.51194, 13.288760000000002),(52.511300000000006, 13.288960000000001),(52.510560000000005, 13.289200000000001),(52.510380000000005, 13.289240000000001),(52.51043000000001, 13.289950000000001),(52.510510000000004, 13.291240000000002),(52.51066, 13.293750000000001),(52.51122, 13.30202),(52.51147, 13.30563),(52.51184000000001, 13.31169),(52.512080000000005, 13.315150000000001),(52.51239, 13.320010000000002),(52.51241, 13.320640000000001),(52.51234, 13.32089),(52.512280000000004, 13.320950000000002),(52.51218, 13.321090000000002),(52.51207, 13.32136),(52.51203, 13.3215),(52.51202000000001, 13.321800000000001),(52.51203, 13.322030000000002),(52.512060000000005, 13.322260000000002),(52.512150000000005, 13.322560000000001),(52.512280000000004, 13.32277),(52.512350000000005, 13.322840000000001),(52.51240000000001, 13.322880000000001),(52.51249000000001, 13.323070000000001),(52.512530000000005, 13.32314),(52.512550000000005, 13.32319),(52.512600000000006, 13.32333),(52.51263, 13.32342),(52.51265000000001, 13.323550000000001),(52.512950000000004, 13.32801),(52.513180000000006, 13.33182),(52.513470000000005, 13.33604),(52.5142, 13.346560000000002),(52.51433, 13.348690000000001),(52.51429, 13.34889),(52.51415, 13.349290000000002),(52.51404, 13.349480000000002),(52.513960000000004, 13.349680000000001),(52.51393, 13.349810000000002),(52.51391, 13.350100000000001),(52.51393, 13.35035),(52.513980000000004, 13.350570000000001),(52.514050000000005, 13.350740000000002),(52.514190000000006, 13.350950000000001),(52.51424, 13.350990000000001),(52.51444000000001, 13.351400000000002),(52.51453000000001, 13.351650000000001),(52.5146, 13.352200000000002),(52.51512, 13.36029),(52.51549000000001, 13.36617),(52.51567000000001, 13.369250000000001),(52.515950000000004, 13.37339),(52.51612, 13.376000000000001),(52.51615, 13.376740000000002),(52.51603000000001, 13.37682),(52.51596000000001, 13.376920000000002),(52.51585000000001, 13.37719),(52.51578000000001, 13.37733),(52.515710000000006, 13.37742),(52.515600000000006, 13.37747),(52.515480000000004, 13.37747),(52.51491000000001, 13.37738),(52.51458, 13.377360000000001),(52.514630000000004, 13.378250000000001),(52.514680000000006, 13.379040000000002),(52.51485, 13.379980000000002),(52.515150000000006, 13.381620000000002),(52.51521, 13.3823),(52.515350000000005, 13.38447),(52.515460000000004, 13.386030000000002),(52.51586, 13.38597),(52.51628, 13.385900000000001),(52.51668, 13.385860000000001),(52.51675, 13.38733),(52.51682, 13.388470000000002),(52.51688000000001, 13.3892),(52.51690000000001, 13.389650000000001),(52.51699000000001, 13.39024),(52.517010000000006, 13.3907),(52.51711, 13.392230000000001),(52.51717000000001, 13.392970000000002),(52.51724, 13.39333),(52.51731, 13.39413),(52.517340000000004, 13.394860000000001),(52.517430000000004, 13.39628),(52.517500000000005, 13.397430000000002),(52.51762, 13.398850000000001),(52.517720000000004, 13.39943),(52.517790000000005, 13.39971),(52.517900000000004, 13.400020000000001),(52.51796, 13.400260000000001),(52.51803, 13.400490000000001),(52.518640000000005, 13.4021),(52.51887000000001, 13.40262),(52.519000000000005, 13.40295),(52.51939, 13.4037),(52.519890000000004, 13.404660000000002),(52.520010000000006, 13.404950000000001)]
pb = LineString([(x,y) for y,x in path_potsdam_berlin])
# 1 grad sind ca 111km => entfernung von 1km = 0.01
pb.buffer(.02)
m = MultiPoint(list(zip(gas_stations_df['Long'],gas_stations_df['Lat'])))
pb.buffer(.02).intersection(m)
###Output
_____no_output_____
###Markdown
Keep a data set that is indexed by postion
###Code
def hash_pos(lat,long):
return str(lat) + ':' + str(long)
gas_station_pos_index = gas_stations_df.copy()
gas_station_pos_index['str_pos'] = gas_station_pos_index.apply(lambda row: hash_pos(row.Lat,row.Long), axis=1)
gas_station_pos_index = gas_station_pos_index.reset_index().set_index('str_pos')
gas_stations_near_path = [hash_pos(point.y,point.x) for point in pb.buffer(.02).intersection(m) ]
gas_station_pos_index.loc[gas_stations_near_path]['id']
###Output
_____no_output_____
###Markdown
Find the point on the path closest to a gas station
###Code
gas_stations = pb.buffer(.02).intersection(m)
gas_stations[0].union(pb)
def closest_point_on_path(path,point):
return path.interpolate(path.project(point))
def length_on_line(path,point):
return path.project(point,normalized=True)
closest_point_on_path(pb,gas_stations[0])
length_on_line(pb,gas_stations[0])
gas_stations[-1].union(pb)
MultiPoint([closest_point_on_path(pb,p) for p in gas_stations])
pb.length * 111
[length_on_line(pb,p) for p in gas_stations]
###Output
_____no_output_____ |
2019-02-13-Wine-Dataset.ipynb | ###Markdown
Welcome to PyData Special Interest Group @ SF Python Project Night-----The goal is to have a sample dataset to explore together. We are going to explore the Wine recognition dataset 🍷It is a choose-your-own-adventure. If you are interested in visualization, do that. If you are interested in statistical modeling, explore that. If you are interested in machine learning or deep learning, try that.
###Code
# Here are common imports to get you started
import keras
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import sklearn
%matplotlib inline
# Let's load the data
from sklearn.datasets import load_wine
data = load_wine()
print(data.DESCR)
###Output
.. _wine_dataset:
Wine recognition dataset
------------------------
**Data Set Characteristics:**
:Number of Instances: 178 (50 in each of three classes)
:Number of Attributes: 13 numeric, predictive attributes and the class
:Attribute Information:
- Alcohol
- Malic acid
- Ash
- Alcalinity of ash
- Magnesium
- Total phenols
- Flavanoids
- Nonflavanoid phenols
- Proanthocyanins
- Color intensity
- Hue
- OD280/OD315 of diluted wines
- Proline
- class:
- class_0
- class_1
- class_2
:Summary Statistics:
============================= ==== ===== ======= =====
Min Max Mean SD
============================= ==== ===== ======= =====
Alcohol: 11.0 14.8 13.0 0.8
Malic Acid: 0.74 5.80 2.34 1.12
Ash: 1.36 3.23 2.36 0.27
Alcalinity of Ash: 10.6 30.0 19.5 3.3
Magnesium: 70.0 162.0 99.7 14.3
Total Phenols: 0.98 3.88 2.29 0.63
Flavanoids: 0.34 5.08 2.03 1.00
Nonflavanoid Phenols: 0.13 0.66 0.36 0.12
Proanthocyanins: 0.41 3.58 1.59 0.57
Colour Intensity: 1.3 13.0 5.1 2.3
Hue: 0.48 1.71 0.96 0.23
OD280/OD315 of diluted wines: 1.27 4.00 2.61 0.71
Proline: 278 1680 746 315
============================= ==== ===== ======= =====
:Missing Attribute Values: None
:Class Distribution: class_0 (59), class_1 (71), class_2 (48)
:Creator: R.A. Fisher
:Donor: Michael Marshall (MARSHALL%[email protected])
:Date: July, 1988
This is a copy of UCI ML Wine recognition datasets.
https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data
The data is the results of a chemical analysis of wines grown in the same
region in Italy by three different cultivators. There are thirteen different
measurements taken for different constituents found in the three types of
wine.
Original Owners:
Forina, M. et al, PARVUS -
An Extendible Package for Data Exploration, Classification and Correlation.
Institute of Pharmaceutical and Food Analysis and Technologies,
Via Brigata Salerno, 16147 Genoa, Italy.
Citation:
Lichman, M. (2013). UCI Machine Learning Repository
[http://archive.ics.uci.edu/ml]. Irvine, CA: University of California,
School of Information and Computer Science.
.. topic:: References
(1) S. Aeberhard, D. Coomans and O. de Vel,
Comparison of Classifiers in High Dimensional Settings,
Tech. Rep. no. 92-02, (1992), Dept. of Computer Science and Dept. of
Mathematics and Statistics, James Cook University of North Queensland.
(Also submitted to Technometrics).
The data was used with many others for comparing various
classifiers. The classes are separable, though only RDA
has achieved 100% correct classification.
(RDA : 100%, QDA 99.4%, LDA 98.9%, 1NN 96.1% (z-transformed data))
(All results using the leave-one-out technique)
(2) S. Aeberhard, D. Coomans and O. de Vel,
"THE CLASSIFICATION PERFORMANCE OF RDA"
Tech. Rep. no. 92-01, (1992), Dept. of Computer Science and Dept. of
Mathematics and Statistics, James Cook University of North Queensland.
(Also submitted to Journal of Chemometrics).
###Markdown
You can also learn more [here](https://archive.ics.uci.edu/ml/datasets/wine).
###Code
# This is a classification problem. Try to predict one of these categories.
list(data.target_names)
# Use these features / columns
data.feature_names
# Here is the raw data
data.data
# Now it is your turn to find something interesting
###Output
_____no_output_____ |
01_sargassum_detection_coast.ipynb | ###Markdown
Detection of Sargassum on the coast and coastal waters Notebook for classifying and analyzing Sargassum in Bonaire with Sentinel-2 images* Decision Tree Classifier (DTC) and Maximum Likelihood Classifier (MLC) are employed* Training sites covering 8 different classes are used to extract pixel values (training samples) over all Sentinel-2 bands* 12 Sentinel bands and 8 spectral indices evaluated using Jeffries-Matusita distance (selected: NDVI, REP, B05 and B11) * 80:20 train-test ratio for splitting the training samples* K-Fold cross-validation performed for tuning the DTC model* MLC model developed with 4 different chi-square thresholds: 0% (base), 10%,20%,50%
###Code
import os
import re
import pandas as pd
import numpy as np
import rasterio as rio
from rasterio import Affine
from rasterio.mask import mask
import matplotlib.pyplot as plt
import seaborn as sns
from glob import glob
import geopandas as gpd
from joblib import dump,load
from rasterstats import zonal_stats
from tqdm import tqdm,tqdm_notebook
#custom functions
from Python.prep_raster import stack_bands,clip_raster,pixel_sample,computeIndexStack
from Python.data_treat import balance_sample,down_sample
from Python.spec_analysis import transpose_df,jmd2df
from Python.data_viz import specsign_plot,jmd_heatmap,ridgePlot,validation_curve_plot
from Python.mlc import mlClassifier
from Python.calc_acc import calc_acc
from Python.pred_raster import stack2pred, dtc_pred_stack
from Python.misc import get_feat_layer_order
#sklearn functions
from sklearn.model_selection import train_test_split,validation_curve
from sklearn.preprocessing import LabelEncoder
from sklearn.tree import DecisionTreeClassifier
#setup IO directories
parent_dir = os.path.join(os.path.abspath('..'),'objective1') #change according to preference
sub_dirs = ['fullstack','clippedstack','indexstack','predicted','stack2pred']
make_dirs = [os.makedirs(os.path.join(parent_dir,name),exist_ok=True) for name in sub_dirs]
###Output
_____no_output_____
###Markdown
Sentinel-2 data preparation* Resample coarse bands to 10m resolution* Stack multiband images * Calculate spectral indices
###Code
#dates considered for classification and analysis
dates = [20180304,20180309,20180314,20180319,20190108,20190128,20190212,20190304,20190309,
20190314,20190319,20190508,20190513,20190518,20190523,20190821,20191129]
#band names
bands = ['B01_60m','B02_10m','B03_10m','B04_10m','B05_20m','B06_20m',
'B07_20m','B08_10m','B8A_20m','B09_60m','B11_20m','B12_20m']
#get product file paths according to dates and tile ID T19PEP (covers Bonaire)
level2_dir = '...' #change according to preference
level2_files = glob(level2_dir+"/*.SAFE")
scene_paths=[file for date in dates for file in level2_files if str(date) in file and 'T19PEP' in file]
#sort multiband image paths according to date
image_collection ={}
for scene in scene_paths:
date = re.findall(r"(\d{8})T", scene)[0]
#collect all .jp2 band images in SAFE directory
all_images = [f for f in glob(scene + "*/**/*.jp2", recursive=True)]
img_paths = [img_path for band in bands for img_path in all_images if band in img_path]
image_collection[date] = img_paths
#check nr. of images per date
for key in image_collection.keys():print(f'Date: {key} Images: {len(image_collection[key])}')
#stack multiband images to a geotiff (!computationaly intensive)
for date in tqdm(image_collection.keys(),position=0, leave=True):
ref10m= image_collection[date][1] #use band B02 (10m) as reference metadata
outfile = os.path.join(parent_dir,'fullstack',f'stack_{date}.tif')
stack_bands(image_collection[date],ref10m,outfile)
#crop multiband image stack and compute spectral indices
roi_file = './data/boundaries/coastline_lacbay.geojson' #polygon for cropping image
indices = ['NDVI','REP','FAI','GNDVI','NDVI_B8A','VB_FAH','SEI','SABI'] #list of indices used in the study
stack_files = glob(parent_dir + "/fullstack/*.tif")
for stack_file in tqdm(stack_files,position=0, leave=True):
filename = os.path.basename(stack_file).split('.')[0]
#cropping
clip_outfile = os.path.join(parent_dir,'clippedstack',filename+"_clipped.tif")
clip_raster(stack_file,roi_file,clip_outfile,fill=True,nodat=0)
#compute spectral indices
index_outfile = os.path.join(index_dir,filename+"_index.tif")
computeIndexStack(clip_outfile,indices,index_outfile)
###Output
_____no_output_____
###Markdown
Sample pixel values from multiband images based on training sites * Training scenes from 4,9,14 and 19 March 2019
###Code
#get training sites and corresponding images
train_sites = [f for f in glob(r".\data\training_input\objective1\*_coast.geojson")]
dates = [20190304,20190309,20190314,20190319]
stack_bands = [f for date in dates for f in glob(parent_dir+'/clipped*/*_clipped.tif') if str(date) in f]
index_bands = [f for date in dates for f in glob(parent_dir+'/index*/*_index.tif') if str(date) in f]
#bands and indices to be sampled
band_names = ['B01','B02','B03','B04','B05','B06','B07','B08','B8A','B09','B11','B12']
indices = ['NDVI','REP','FAI','GNDVI','NDVI-B8A','VB-FAH','SEI','SABI']
dataset = []
for i in range(len(train_sites)):
#sample multibands and spectral indices
df_bands= pixel_sample(stack_bands[i],train_sites[i],band_names)
df_indices= pixel_sample(index_bands[i],train_sites[i],indices)
df_sample = pd.concat([df_bands,df_indices],axis=1)
df_sample = df_sample.loc[:,~df_sample.columns.duplicated()]
#downsample based on floating Sargassum (Sf)
df_downsampled = down_sample(df_sample,'C','Sf')
dataset.append(df_downsampled)
#final dataset
dataset=pd.concat(dataset,sort=False).reset_index(drop=True)
dataset.to_csv(r'./data/training_input/csv/training_samples_20190304_20190319_sargassum.csv',index=False)
###Output
_____no_output_____
###Markdown
Expore spectral signature * Jeffries-Matusita distance (JMD) used for feature selection ([reference](https://books.google.nl/books?id=RxHbb3enITYC&pg=PA52&lpg=PA52&dq=for+one+feature+and+two+classes+the+Bhattacharyya+distance+is+given+by&source=bl&ots=sTKLGl1POo&sig=ACfU3U2s7tv0LT9vfSUat98l4L9_dyUgeg&hl=nl&sa=X&ved=2ahUKEwiKgeHYwI7lAhWIIlAKHZfJAC0Q6AEwBnoECAkQAQv=onepage&q&f=false))* NDVI, REP, B05 and B11 are selected as input features for the classifiers
###Code
#load training sample
df = pd.read_csv('./data/training_input/csv/training_samples_20190304_20190319_sargassum.csv')
#plot spectral signature focused on 4 subclasses
specsign_plot(df,df.columns[4:16],classtype='C')
#plot JMD heatmap for each band
jmd_bands = [jmd2df(transpose_df(df,'C',band)) for band in df.columns[4:16]]
jmd_heatmap(jmd_bands)
#plot JMD heatmap for each spectral index
jmd_indices = [jmd2df(transpose_df(df,'C',band)) for band in df.columns[16:]]
jmd_heatmap(jmd_indices)
#plot distribution of selected input features
sns.set_style('white')
ridgePlot(df[['C','NDVI','REP','B05','B11']],'C')
###Output
_____no_output_____
###Markdown
Build classifiers
###Code
#load training sample
df = pd.read_csv('./data/training_input/csv/training_samples_20190304_20190319_sargassum.csv')
predictors = ['NDVI','REP','B05','B11']
subset_df = df[['C']+predictors]
#split into train and test datasets 80:20
train,test = train_test_split(subset_df, train_size = 0.8,random_state=1,shuffle=True,stratify=np.array(subset_df['C']))
train = train.sort_values(by='C',ascending=True) #sort labels
#split pedictors from labels (for DTC)
le = LabelEncoder()
X_train,y_train = train[predictors],le.fit_transform(train['C'])
X_test,y_test = test[predictors],le.fit_transform(test['C'])
###Output
_____no_output_____
###Markdown
* Decision Tree Classifier
###Code
#perform k-fold (=10) cross-validation
#parameters considered in this step
max_depth = np.arange(1,40,2)
min_samples_split = list(range(2, 100,10))
max_leaf_nodes= list(range(2, 50,5))
min_samples_leaf= list(range(1, 100,10))
min_impurity_decrease=[0,0.00005,0.0001,0.0002,0.0005,0.001,0.0015,0.002,0.005,0.01,0.02,0.05,0.08]
criterion = ['gini','entropy']
#assign parameters to a dictionary
params = {'max_depth':max_depth,'min_samples_split':min_samples_split,
'max_leaf_nodes':max_leaf_nodes,'min_samples_leaf':min_samples_leaf,
'min_impurity_decrease':min_impurity_decrease,'criterion':criterion}
#plot validation curve
fig,axs = plt.subplots(3,2,figsize=(10,8))
axs = axs.ravel()
dtc = DecisionTreeClassifier(random_state=1,criterion='entropy') #default model
for (param_name,param_range),i in zip(params.items(),range(len(params.items()))):
train_scores,test_scores = validation_curve(dtc,X_train.values,y_train,cv=10,scoring='accuracy',
n_jobs=-1,param_range=param_range,param_name=param_name)
validation_curve_plot(train_scores,test_scores,param_range,param_name,axs[i])
plt.show()
#train dtc model based on best parameters
dtc = DecisionTreeClassifier(max_depth=5,random_state=2,criterion='entropy',min_samples_split=70,
max_leaf_nodes=15,min_samples_leaf=40,min_impurity_decrease=0.01,max_features=4)
dtc = dtc.fit(X_train,y_train)
#export model as joblib file
dump(dtc,r".\data\models\dtc_model_sargassum.joblib")
###Output
_____no_output_____
###Markdown
* Maximum Likelihood Classifier
###Code
#train mlc model
mlc = mlClassifier(train,'C')
#export model as joblib file
dump(mlc,r".\data\models\mlc_model_sargassum.joblib")
###Output
_____no_output_____
###Markdown
* Compute model accuracies (based on test split)
###Code
#load models
dtc = load(r".\data\models\dtc_model_sargassum.joblib")
mlc = load(r".\data\models\mlc_model_sargassum.joblib")
#DTC model accuracy
dtc_y_pred = dtc.predict(X_test)
con_mat_dtc = calc_acc(le.inverse_transform(y_test),le.inverse_transform(dtc_y_pred))
con_mat_dtc['classifier'] = 'DTC'
#MLC model accuracies with chi-square threshold
chi_table = {'MLC base':None,'MLC 10%':7.78,'MLC 20%':5.99,'MLC 50%':3.36}
mlc_conmats = []
for key,value in chi_table.items():
con_mat_mlc = mlc.classify_testdata(test,'C',threshold=value)
con_mat_mlc['classifier'] = key
mlc_conmats.append(con_mat_mlc)
#export model accuracies
mlc_conmats = pd.concat(mlc_conmats)
model_acc = pd.concat([con_mat_dtc,mlc_conmats])
model_acc.to_csv('./data/output/objective1/dtc_mlc_model_acc_obj1.csv')
###Output
_____no_output_____
###Markdown
Classification * create an image stack for prediction (stack2pred) for all scenes in objective1 folder* classify each stack2pred image with the DTC and MLC models
###Code
#get all multiband and spectral index images
stack_bands = glob(parent_dir+'/clipped*/*_clipped.tif')
index_bands = glob(parent_dir+'/index*/*_index.tif')
#get the order of the selected predictors in the multiband and spectral index images
predictors = ['NDVI','REP','B05','B11']
used_indices, used_bands = get_feat_layer_order(predictors)
stack2pred_paths = []
#create stack2pred rasters
for band_image,index_image in zip(stack_bands,index_bands):
date = re.findall(r"(\d{8})", band_image)[0]
outfile = os.path.join(f'{parent_dir}\stack2pred',f'stack2pred_{date}.tif')
stack2pred_paths.append(outfile)
stack2pred(index_image,band_image,used_indices,used_bands,outfile)
#load models
dtc = load(r".\data\models\dtc_model_sargassum.joblib")
mlc = load(r".\data\models\mlc_model_sargassum.joblib")
#stack2pred image paths
stack2pred_paths = glob(parent_dir+'*/stack2pred/stack2pred_*.tif')
#classify all stack2pred images
for path in stack2pred_paths:
date = re.findall(r"(\d{8})", path)[0]
#predict multiple mlc with thresholds
mlc_out = f'{parent_dir}/predicted/mlc/mlc_{date}_multi.tif'
os.makedirs(os.path.dirname(mlc_out),exist_ok=True)
if not os.path.exists(mlc_out):
chi_probs = [None,7.78,5.99,3.36]
mlc_preds = np.array([mlc.classify_raster_gx(path,out_file=None,threshold=prob) for prob in chi_probs])
#export multilayer mlc image
with rio.open(path) as src:
profile = src.profile.copy()
profile.update({'dtype': rio.uint16})
with rio.open(mlc_out ,'w',**profile) as dst:
dst.write(mlc_preds.astype(rio.uint16))
#predict and export DTC raster
dtc_out = f'{parent_dir}/predicted/dtc/dtc_{date}.tif'
os.makedirs(os.path.dirname(dtc_out),exist_ok=True)
if not os.path.exists(dtc_out):
dtc_pred_stack(dtc,path,dtc_out)
###Output
_____no_output_____
###Markdown
* MLC class posterior probability raster
###Code
#stack2pred image paths
stack2pred_paths = glob(parent_dir+'*/stack2pred/stack2pred_*.tif')
#compute probabality raster
for path in stack2pred_paths:
mlc_prob_out = f'{parent_dir}/predicted/mlc/mlc_{date}_prob.tif'
os.makedirs(os.path.dirname(mlc_out),exist_ok=True)
mlc.prob_rasters(path,mlc_prob_out)
###Output
_____no_output_____
###Markdown
External validity * Classify DTC and MLC results for a scene taken on 2019-05-18* Validation samples only covers Non-Floating Sargassum (Non-Sf) and Floating Sargassum (Sf)* Floating Sargassum (Sf) pixel value = 3 in the DTC and MLC rasters
###Code
#get file paths
val_samples = gpd.read_file(r'./data/training_input/objective1/sf_validation_20190518.geojson')
dtc_file = glob(parent_dir+'/predicted*/dtc/dtc*20190518*.tif')[0]
mlc_file = glob(parent_dir+'/predicted*/mlc/mlc*20190518*.tif')[0]
coords = [(val_samples.geometry[i][0].x,val_samples.geometry[i][0].y) for i in range(len(val_samples))]
with rio.open(dtc_file) as dtc_src, rio.open(mlc_file) as mlc_src:
#sample from dtc raster
val_samples['DTC'] = [pt[0] for pt in dtc_src.sample(coords)]
#sample from multilayer mlc raster
mlc_multi = pd.concat([pd.DataFrame(pt).T for pt in mlc_src.sample(coords)],ignore_index=True)
val_samples[['MLC base','MLC 10%','MLC 20%','MLC 50%']] = mlc_multi
#convert pixel values to 1 if Sf, else to 0 for others
val_samples[val_samples.columns[-5:]] = (val_samples[val_samples.columns[-5:]]==3).astype(int)
#compute classification (validation) accuracy
df_val = pd.DataFrame(val_samples.drop(columns='geometry'))
acc_val_dfs = []
for pred in df_val.columns[df_val.columns!='label']:
acc = calc_acc(df_val['label'].values, df_val[pred].values)
acc['classifier'] = pred
acc_val_dfs.append(acc)
acc_val_dfs = pd.concat(acc_val_dfs)
acc_val_dfs.to_csv('./data/output/objective1/dtc_mlc_external_val_obj1.csv')
###Output
_____no_output_____
###Markdown
* Plot model and validation accuracies
###Code
model_df = pd.read_csv('./data/output/objective1/dtc_mlc_model_acc_obj1.csv').set_index('Model')
val_df = pd.read_csv('./data/output/objective1/dtc_mlc_external_val_obj1.csv').set_index('Observed')
acc2plot = {'Model accuracy (8 classes)':model_df.loc['PA','UA'].str[:4].astype(float),
'Model F1-score (Sf)':model_df.loc['Sf','F1-score'].astype(float),
'Validation accuracy (2 classes)':val_df.loc['PA','UA'].str[:4].astype(float),
'Validation F1-score (Sf)':val_df.loc['1','F1-score'].astype(float)}
[plt.plot(val_df['classifier'].unique(),value,label=key) for key,value in acc2plot.items()]
plt.legend()
###Output
_____no_output_____
###Markdown
Comparative analysis * Compare Sargassum (Sf and Sl) classified area across different scenes for each model* Persisting missclassification occur between the two Sargassum classes and other coastal features, hence a mask was applied.
###Code
#get classification result paths
dtc_paths = glob(parent_dir+'/predicted*/dtc/dtc*.tif')
mlc_paths = glob(parent_dir+'/predicted*/mlc/mlc*.tif')
#load mask
sl_mask = [gpd.read_file('./data/boundaries/sf_sl_mask.geojson').__geo_interface__['features'][0]['geometry']]
sf_mask = [gpd.read_file('./data/boundaries/sf_sl_mask.geojson').__geo_interface__['features'][1]['geometry']]
#collection of Sargassum classification results
data = dict.fromkeys(['Date','Sl MLC Base','Sl MLC 10%','Sl MLC 20%','Sl MLC 50%','Sl DTC',
'Sf MLC Base','Sf MLC 10%','Sf MLC 20%','Sf MLC 50%','Sf DTC'], [])
for i in range(len(mlc_paths)):
date = re.findall(r"(\d{8})", mlc_paths[i])
data['Date'] = data['Date']+ [str(pd.to_datetime(date)[0].date())]
with rio.open(dtc_paths[i]) as dtc_src, rio.open(mlc_paths[i]) as mlc_src:
#sf pixel count
dtc_img= mask(dataset=dtc_src,shapes=sf_mask,nodata=dtc_src.nodata,invert=True)[0]
data['Sf DTC'] = data['Sf DTC']+[np.unique(dtc_img, return_counts=True)[1][2]]
mlc_imgs= mask(dataset=mlc_src,shapes=sf_mask,nodata=mlc_src.nodata,invert=True)[0]
for k,sf_mlc_key in enumerate(list(data.keys())[6:-1]):
data[sf_mlc_key] = data[sf_mlc_key]+ [[np.unique(mlc_img, return_counts=True)[1][2] for mlc_img in mlc_imgs][k]]
#sl pixel count
dtc_img= mask(dataset=dtc_src,shapes=sl_mask,nodata=dtc_src.nodata,invert=False)[0]
data['Sl DTC'] = data['Sl DTC']+[np.unique(dtc_img, return_counts=True)[1][3]]
mlc_imgs= mask(dataset=mlc_src,shapes=sl_mask,nodata=mlc_src.nodata,invert=False)[0]
for j,sl_mlc_key in enumerate(list(data.keys())[1:5]):
data[sl_mlc_key] = data[sl_mlc_key]+[[np.unique(mlc_img, return_counts=True)[1][3] for mlc_img in mlc_imgs][j]]
#export data
data = pd.DataFrame(data)
data.to_csv('./data/output/objective1/classified_area_obj1.csv',index=False)
###Output
_____no_output_____
###Markdown
* Plot Sargassum classified area in 2019
###Code
#load data and subset only the 2019 results
data = pd.read_csv('./data/output/objective1/classified_area_obj1.csv',index_col='Date')[4:]
#plot Floating Sargassum (Sf) and Sargassum on land (Sl)
fig,axs = plt.subplots(1,2,figsize=(20,8))
axs[0].set_ylabel('Classified area (ha)')
plt.tight_layout()
fig.autofmt_xdate()
plots = [axs[0].plot(data[col]/100) if 'Sf' in col else axs[1].plot(data[col]/100) for col in data.columns]
legends = axs[0].legend(data.columns[:5],loc='upper right'),axs[1].legend(data.columns[5:],loc='upper right')
###Output
_____no_output_____
###Markdown
Sargassum coverage maps * Compute Sargassum coverage maps for the invasions in March and May 2019 and March 2018* A 20mx20m grid was used to calculate the coverage for each scene* MLC 20% results were used for Floating Sargassum (Sf) coverage map* MLC 50% results were used for Sargassum on land (Sl) coverage map* Note that code below takes about 10 minutes to run (due to small grid tile size)
###Code
#get classification result paths
mlc_paths = glob(parent_dir+'/predicted*/mlc/mlc*03*.tif')+glob(parent_dir+'/predicted*/mlc/mlc*05*.tif')
#load mask and grid data
mask_data = gpd.read_file('./data/boundaries/objective1/sf_sl_mask.geojson').__geo_interface__['features']
grid_file = gpd.read_file(r'./data/boundaries/objective1/20mgrid.geojson')
#collect geodataframes
data = []
for mlc_file in mlc_paths:
date = re.findall(r"(\d{8})", mlc_file)[0]
with rio.open(mlc_file) as src:
#iterate according to mask data (first item = sl, second item = sf)
#count number of pixel in each grid tile (computationaly intensive!)
for feat,label,val,inv,model in zip(mask_data,['sl','sf'],[4,3],[False,True],[3,2]):
img = mask(dataset=src,shapes=[feat['geometry']],nodata=src.nodata,invert=inv)[0][model]
zs = zonal_stats(grid_file,np.where(img==val,1,0),affine=src.transform,
prefix=f'{label}_{date}_',stats='count',geojson_out=True,nodata=0)
zs_filter = list(filter(lambda x: x['properties'][f'{label}_{date}_count']!=0, zs))
data.append(gpd.GeoDataFrame.from_features(zs_filter,crs=grid_file.crs))
#merge with grid file based on id
grid_file_copy = grid_file.copy()
for i in range(len(data)):
grid_file_copy = gpd.GeoDataFrame(grid_file_copy.merge(data[i][data[i].columns[1:]],on='id',how='outer'),
crs=grid_file.crs,geometry=grid_file.geometry).replace(np.nan,0)
#calculate coverage for each grid tile
sf_split = np.array_split(grid_file_copy[[i for i in grid_file_copy.columns if 'sf' in i ]],3,axis=1)
sl_split = np.array_split(grid_file_copy[[i for i in grid_file_copy.columns if 'sl' in i ]],3,axis=1)
scale_factor = (100/4/400) #(relative coverage of Sentinel-2 pixels in a 20x20m tile over 4 dates)
sf_covr = [sf_split[i].sum(1)*scale_factor for i in range(len(sf_split))]
sl_covr = [sl_split[i].sum(1)*scale_factor for i in range(len(sl_split))]
#export coverage maps
gdf_out = pd.concat([grid_file_copy[['geometry']]]+sf_covr+sl_covr,axis=1)
gdf_out.columns = ['geometry','sf_mar2018','sf_mar2019','sf_may2019','sl_mar2018','sl_mar2019','sl_may2019']
gdf_out = gdf_out[gdf_out[gdf_out.columns[1:]].sum(1)!=0]
gdf_out.to_file(r'./data/output/objective1/sargassum_coverage_coast.geojson',driver='GeoJSON')
###Output
_____no_output_____ |
CMA-ES/lambda/lambda.ipynb | ###Markdown
$\lambda$对CMA性能影响研究 hljs.initHighlightingOnLoad(); $(document).ready(function(){ $("h2,h3,h4,h5,h6").each(function(i,item){ var tag = $(item).get(0).localName; $(item).attr("id","wow"+i); $("category").append(''+$(this).text()+''); $(".newh2").css("margin-left",0); $(".newh3").css("margin-left",20); $(".newh4").css("margin-left",40); $(".newh5").css("margin-left",60); $(".newh6").css("margin-left",80); }); }); **摘要**: $\lambda$大小影响单次计算时间,根据文档合理的$\lambda$在[5,2n+10]之间,Hansen给出的推荐值为$4+3\times \lfloor ln(N) \rfloor$,本文固定mu=0.5,sigma=0.3,根据不同的$\lambda$对不同函数绘图分析. 第一阶段测试* 函数:[rosen,bukin,griewank]* 最小值:[0,6.82,0]* 维度:[130]* $\lambda$:[5,18,20,50,80,110,140]
###Code
%pylab inline
import pandas as pd
from pandas import Series, DataFrame
import pickle
plt.rc('figure', figsize=(12, 8))
with open("data.tl",'r') as f:
result_list=pickle.load(f)
def convertdic(result_list):
res=[{}]
for row in result_list:
for i,d in enumerate(res):
if row[-1] not in d.keys():
d[row[-1]]=row[:-1]
break
if i==len(res)-1:
res.append({row[-1]:row[:-1]})
break
return res
def draw(title,tail):
bs=[row[:tail] for row in result_list if row[tail]==title]
bs=np.array(bs)
lmax=max(bs[:,-1])
bs=bs/bs.max(0)
bs=bs*[1,1,1,1,lmax]
bs=convertdic(bs)
df=DataFrame(bs[0],index=['countiter','countevals','result','time(s)'])
df=df.stack().unstack(0)
df.columns.name='values'
df.index.name='lambda'
df.plot(kind='bar',stacked=False,colormap='jet',alpha=0.9,title=title,figsize=(12,8));
df.plot(kind='area',stacked=False,colormap='jet',alpha=0.5,title=title,figsize=(12,8),xticks=np.arange(5,lmax,10));
def drawSigmaLines(t,xl):
sigmas=[[row[-3],row[-1]] for row in result_list if row[-2]==t]
ss=map(list,zip(*sigmas))[1]
M=max(map(len,ss))
for s in sigmas:
for i in range(M-len(s[1])):
s[1].append(None)
df1=DataFrame({s[0]:s[1] for s in sigmas})
df1.columns.name='sigma'
df1.index.name='lambda'
df1.plot(title=t,fontsize=10,linewidth=2,alpha=0.8,colormap='rainbow',xlim=(0,xl))
#bukin函数
draw('bukin',-1)
#rosen函数
draw('rosen',-1)
#griwank函数
draw('griewank',-1)
###Output
_____no_output_____
###Markdown
第二阶段测试* 函数:[sphere,cigar,elli]* 最小值:[0,0,0]* 维度:[208]* $\lambda$:[5,10,14,18,20,22,26,60,100,140,180,220]
###Code
with open("data1.tl",'r') as f:
result_list=pickle.load(f)
#sphere函数
draw('sphere',-2)
drawSigmaLines('sphere',300)
#cigar函数
draw('cigar',-2)
drawSigmaLines('cigar',300)
#elli函数
draw('elli',-2)
drawSigmaLines('elli',300)
###Output
_____no_output_____ |
model/MNIST_NN_Model.ipynb | ###Markdown
MNIST digit recognition Neural Network--- 1. Imports---
###Code
import pandas as pd
import matplotlib.pyplot as plt
from keras.datasets import mnist
from keras.models import Sequential
from keras.utils import np_utils
from keras.layers import Dense
###Output
_____no_output_____
###Markdown
2. Understanding the data--- 2.1. Load the dataset and split into train and test set
###Code
(X_train, y_train), (X_test, y_test) = mnist.load_data()
###Output
_____no_output_____
###Markdown
2.2. Data visualization
###Code
X_train.shape
###Output
_____no_output_____
###Markdown
- 60,000 training images- Each image is 28 x 28 pixels
###Code
y_train.shape
###Output
_____no_output_____
###Markdown
- 60,000 arrays- Each of size 10 (from 0-9)- For example, 1 is represented as [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
###Code
X_test.shape
###Output
_____no_output_____
###Markdown
- 10,000 test images- Each image is 28 x 28 pixels
###Code
y_test.shape
###Output
_____no_output_____
###Markdown
- 10,000 arrays similar to __y_train__ 2.3. Images
###Code
plt.imshow(X_train[0], cmap=plt.get_cmap('gray'))
plt.imshow(X_train[1], cmap=plt.get_cmap('gray'))
plt.imshow(X_train[2], cmap=plt.get_cmap('gray'))
###Output
_____no_output_____
###Markdown
3. Data manipulation--- 3.1. Flatten 28 X 28 images into a 1 X 784 vector for each image
###Code
# X_train = X_train.reshape((X_train.shape[0], 28, 28, 1)).astype('float32')
# X_test = X_test.reshape((X_test.shape[0], 28, 28, 1)).astype('float32')
X_train = X_train.reshape((60000, 784))
X_train.shape
X_test = X_test.reshape((10000, 784))
X_test.shape
y_train.shape
y_test.shape
###Output
_____no_output_____
###Markdown
- y_train and y_test are of the required shape and don't need to be changed. 3.2. Normalize inputs from 0-255 in images to 0-1
###Code
X_train = X_train / 255
X_test = X_test / 255
###Output
_____no_output_____
###Markdown
3.3. One hot encode outputs
###Code
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
###Output
_____no_output_____
###Markdown
4. Build the model--- 4.1. Define model type (Neural Network)
###Code
model = Sequential()
###Output
_____no_output_____
###Markdown
4.2. Define architecture
###Code
model.add(Dense(784, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(10, activation='softmax'))
###Output
_____no_output_____
###Markdown
This is a dense nueral network with architecture:| Layer | Activation function | Neurons || --- | --- | --- || 1 | ReLU | 784 || 2 | ReLU | 10 || 3 | Softmax | 10 | 4.3 Compile model
###Code
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
4.4. Training model
###Code
model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=30, batch_size=200, verbose=2)
###Output
Epoch 1/30
300/300 - 2s - loss: 0.4241 - accuracy: 0.8706 - val_loss: 0.1698 - val_accuracy: 0.9487
Epoch 2/30
300/300 - 2s - loss: 0.1324 - accuracy: 0.9619 - val_loss: 0.1176 - val_accuracy: 0.9662
Epoch 3/30
300/300 - 2s - loss: 0.0876 - accuracy: 0.9752 - val_loss: 0.1005 - val_accuracy: 0.9703
Epoch 4/30
300/300 - 2s - loss: 0.0625 - accuracy: 0.9817 - val_loss: 0.0811 - val_accuracy: 0.9753
Epoch 5/30
300/300 - 2s - loss: 0.0478 - accuracy: 0.9860 - val_loss: 0.0748 - val_accuracy: 0.9784
Epoch 6/30
300/300 - 2s - loss: 0.0360 - accuracy: 0.9898 - val_loss: 0.0737 - val_accuracy: 0.9801
Epoch 7/30
300/300 - 2s - loss: 0.0269 - accuracy: 0.9930 - val_loss: 0.0707 - val_accuracy: 0.9801
Epoch 8/30
300/300 - 2s - loss: 0.0203 - accuracy: 0.9947 - val_loss: 0.0703 - val_accuracy: 0.9810
Epoch 9/30
300/300 - 2s - loss: 0.0157 - accuracy: 0.9958 - val_loss: 0.0760 - val_accuracy: 0.9783
Epoch 10/30
300/300 - 2s - loss: 0.0132 - accuracy: 0.9967 - val_loss: 0.0861 - val_accuracy: 0.9769
Epoch 11/30
300/300 - 2s - loss: 0.0100 - accuracy: 0.9976 - val_loss: 0.0756 - val_accuracy: 0.9792
Epoch 12/30
300/300 - 2s - loss: 0.0074 - accuracy: 0.9985 - val_loss: 0.0770 - val_accuracy: 0.9795
Epoch 13/30
300/300 - 2s - loss: 0.0059 - accuracy: 0.9987 - val_loss: 0.0878 - val_accuracy: 0.9774
Epoch 14/30
300/300 - 2s - loss: 0.0078 - accuracy: 0.9980 - val_loss: 0.0913 - val_accuracy: 0.9791
Epoch 15/30
300/300 - 2s - loss: 0.0109 - accuracy: 0.9967 - val_loss: 0.1055 - val_accuracy: 0.9739
Epoch 16/30
300/300 - 2s - loss: 0.0062 - accuracy: 0.9984 - val_loss: 0.0879 - val_accuracy: 0.9796
Epoch 17/30
300/300 - 2s - loss: 0.0053 - accuracy: 0.9985 - val_loss: 0.0923 - val_accuracy: 0.9788
Epoch 18/30
300/300 - 2s - loss: 0.0033 - accuracy: 0.9992 - val_loss: 0.0833 - val_accuracy: 0.9813
Epoch 19/30
300/300 - 2s - loss: 8.4615e-04 - accuracy: 1.0000 - val_loss: 0.0830 - val_accuracy: 0.9826
Epoch 20/30
300/300 - 2s - loss: 3.8904e-04 - accuracy: 1.0000 - val_loss: 0.0846 - val_accuracy: 0.9820
Epoch 21/30
300/300 - 2s - loss: 2.9610e-04 - accuracy: 1.0000 - val_loss: 0.0868 - val_accuracy: 0.9816
Epoch 22/30
300/300 - 2s - loss: 2.5191e-04 - accuracy: 1.0000 - val_loss: 0.0869 - val_accuracy: 0.9816
Epoch 23/30
300/300 - 2s - loss: 2.1611e-04 - accuracy: 1.0000 - val_loss: 0.0884 - val_accuracy: 0.9818
Epoch 24/30
300/300 - 2s - loss: 1.8979e-04 - accuracy: 1.0000 - val_loss: 0.0892 - val_accuracy: 0.9823
Epoch 25/30
300/300 - 2s - loss: 1.6380e-04 - accuracy: 1.0000 - val_loss: 0.0904 - val_accuracy: 0.9819
Epoch 26/30
300/300 - 2s - loss: 1.4503e-04 - accuracy: 1.0000 - val_loss: 0.0902 - val_accuracy: 0.9830
Epoch 27/30
300/300 - 2s - loss: 1.2713e-04 - accuracy: 1.0000 - val_loss: 0.0929 - val_accuracy: 0.9816
Epoch 28/30
300/300 - 2s - loss: 1.0971e-04 - accuracy: 1.0000 - val_loss: 0.0926 - val_accuracy: 0.9822
Epoch 29/30
300/300 - 2s - loss: 1.0372e-04 - accuracy: 1.0000 - val_loss: 0.0931 - val_accuracy: 0.9823
Epoch 30/30
300/300 - 2s - loss: 8.4501e-05 - accuracy: 1.0000 - val_loss: 0.0950 - val_accuracy: 0.9815
###Markdown
4.5. Evaluate the model
###Code
scores = model.evaluate(X_test, y_test, verbose=0)
print("Test loss: ", scores[0])
print("Test Accuracy: ", (scores[1]))
print("Baseline Error: ", (100-scores[1]*100))
###Output
Test loss: 0.09502233564853668
Test Accuracy: 0.9815000295639038
Baseline Error: 1.8499970436096191
###Markdown
4.6. Save the model in a h5 file
###Code
model.save("model.h5")
###Output
_____no_output_____
###Markdown
5. Convert the model to a web friendly format---
###Code
!tensorflowjs_converter --input_format keras './model.h5' '../UI/model'
###Output
2021-03-21 17:58:11.637568: W tensorflow/stream_executor/platform/default/dso_loader.cc:60] Could not load dynamic library 'libcudart.so.11.0'; dlerror: libcudart.so.11.0: cannot open shared object file: No such file or directory
2021-03-21 17:58:11.637600: I tensorflow/stream_executor/cuda/cudart_stub.cc:29] Ignore above cudart dlerror if you do not have a GPU set up on your machine.
|
Python_Stock/Technical_Indicators/ADL.ipynb | ###Markdown
Accumulation Distribution Line (ADL) https://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:accumulation_distribution_line
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
# yfinance is used to fetch data
import yfinance as yf
yf.pdr_override()
# input
symbol = 'AAPL'
start = '2018-06-01'
end = '2019-01-01'
# Read data
df = yf.download(symbol,start,end)
# View Columns
df.head()
df['MF Multiplier'] = (2*df['Adj Close'] - df['Low'] - df['High'])/(df['High']-df['Low'])
df['MF Volume'] = df['MF Multiplier']*df['Volume']
df['ADL'] = df['MF Volume'].cumsum()
df = df.drop(['MF Multiplier','MF Volume'],axis=1)
df['VolumePositive'] = df['Open'] < df['Adj Close']
fig = plt.figure(figsize=(14,10))
ax1 = plt.subplot(3, 1, 1)
ax1.plot(df['Adj Close'])
ax1.set_title('Stock '+ symbol +' Closing Price')
ax1.set_ylabel('Price')
ax1.legend(loc='best')
ax2 = plt.subplot(3, 1, 2)
ax2.plot(df['ADL'], label='Accumulation Distribution Line')
ax2.grid()
ax2.legend(loc='best')
ax2.set_ylabel('Accumulation Distribution Line')
ax3 = plt.subplot(3, 1, 3)
ax3v = ax3.twinx()
colors = df.VolumePositive.map({True: 'g', False: 'r'})
ax3v.bar(df.index, df['Volume'], color=colors, alpha=0.4)
ax3.set_ylabel('Volume')
ax3.grid()
ax3.set_xlabel('Date')
###Output
_____no_output_____
###Markdown
Candlestick with ADL
###Code
from matplotlib import dates as mdates
import datetime as dt
dfc = df.copy()
dfc['VolumePositive'] = dfc['Open'] < dfc['Adj Close']
#dfc = dfc.dropna()
dfc = dfc.reset_index()
dfc['Date'] = mdates.date2num(dfc['Date'].astype(dt.date))
dfc.head()
from mpl_finance import candlestick_ohlc
fig = plt.figure(figsize=(14,10))
ax1 = plt.subplot(3, 1, 1)
candlestick_ohlc(ax1,dfc.values, width=0.5, colorup='g', colordown='r', alpha=1.0)
ax1.xaxis_date()
ax1.xaxis.set_major_formatter(mdates.DateFormatter('%d-%m-%Y'))
ax1.grid(True, which='both')
ax1.minorticks_on()
ax1v = ax1.twinx()
colors = dfc.VolumePositive.map({True: 'g', False: 'r'})
ax1v.bar(dfc.Date, dfc['Volume'], color=colors, alpha=0.4)
ax1v.axes.yaxis.set_ticklabels([])
ax1v.set_ylim(0, 3*df.Volume.max())
ax1.set_title('Stock '+ symbol +' Closing Price')
ax1.set_ylabel('Price')
ax2 = plt.subplot(3, 1, 2)
ax2.plot(df['ADL'], label='Accumulation Distribution Line')
ax2.grid()
ax2.legend(loc='best')
ax2.set_ylabel('Accumulation Distribution Line')
ax3 = plt.subplot(3, 1, 3)
ax3v = ax3.twinx()
colors = df.VolumePositive.map({True: 'g', False: 'r'})
ax3v.bar(df.index, df['Volume'], color=colors, alpha=0.4)
ax3.set_ylabel('Volume')
ax3.grid()
ax3.set_xlabel('Date')
###Output
_____no_output_____
###Markdown
Accumulation Distribution Line (ADL) https://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:accumulation_distribution_line
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
# fix_yahoo_finance is used to fetch data
import fix_yahoo_finance as yf
yf.pdr_override()
# input
symbol = 'AAPL'
start = '2018-06-01'
end = '2019-01-01'
# Read data
df = yf.download(symbol,start,end)
# View Columns
df.head()
df['MF Multiplier'] = (2*df['Adj Close'] - df['Low'] - df['High'])/(df['High']-df['Low'])
df['MF Volume'] = df['MF Multiplier']*df['Volume']
df['ADL'] = df['MF Volume'].cumsum()
df = df.drop(['MF Multiplier','MF Volume'],axis=1)
df['VolumePositive'] = df['Open'] < df['Adj Close']
fig = plt.figure(figsize=(14,10))
ax1 = plt.subplot(3, 1, 1)
ax1.plot(df['Adj Close'])
ax1.set_title('Stock '+ symbol +' Closing Price')
ax1.set_ylabel('Price')
ax1.legend(loc='best')
ax2 = plt.subplot(3, 1, 2)
ax2.plot(df['ADL'], label='Accumulation Distribution Line')
ax2.grid()
ax2.legend(loc='best')
ax2.set_ylabel('Accumulation Distribution Line')
ax3 = plt.subplot(3, 1, 3)
ax3v = ax3.twinx()
colors = df.VolumePositive.map({True: 'g', False: 'r'})
ax3v.bar(df.index, df['Volume'], color=colors, alpha=0.4)
ax3.set_ylabel('Volume')
ax3.grid()
ax3.set_xlabel('Date')
###Output
_____no_output_____
###Markdown
Candlestick with ADL
###Code
from matplotlib import dates as mdates
import datetime as dt
dfc = df.copy()
dfc['VolumePositive'] = dfc['Open'] < dfc['Adj Close']
#dfc = dfc.dropna()
dfc = dfc.reset_index()
dfc['Date'] = mdates.date2num(dfc['Date'].astype(dt.date))
dfc.head()
from mpl_finance import candlestick_ohlc
fig = plt.figure(figsize=(14,10))
ax1 = plt.subplot(3, 1, 1)
candlestick_ohlc(ax1,dfc.values, width=0.5, colorup='g', colordown='r', alpha=1.0)
ax1.xaxis_date()
ax1.xaxis.set_major_formatter(mdates.DateFormatter('%d-%m-%Y'))
ax1.grid(True, which='both')
ax1.minorticks_on()
ax1v = ax1.twinx()
colors = dfc.VolumePositive.map({True: 'g', False: 'r'})
ax1v.bar(dfc.Date, dfc['Volume'], color=colors, alpha=0.4)
ax1v.axes.yaxis.set_ticklabels([])
ax1v.set_ylim(0, 3*df.Volume.max())
ax1.set_title('Stock '+ symbol +' Closing Price')
ax1.set_ylabel('Price')
ax2 = plt.subplot(3, 1, 2)
ax2.plot(df['ADL'], label='Accumulation Distribution Line')
ax2.grid()
ax2.legend(loc='best')
ax2.set_ylabel('Accumulation Distribution Line')
ax3 = plt.subplot(3, 1, 3)
ax3v = ax3.twinx()
colors = df.VolumePositive.map({True: 'g', False: 'r'})
ax3v.bar(df.index, df['Volume'], color=colors, alpha=0.4)
ax3.set_ylabel('Volume')
ax3.grid()
ax3.set_xlabel('Date')
###Output
_____no_output_____ |
notebooks/bnn_hmc_gaussian.ipynb | ###Markdown
(SG)HMC for inferring params of a 2d GaussianBased on https://github.com/google-research/google-research/blob/master/bnn_hmc/notebooks/mcmc_gaussian_test.ipynb
###Code
import jax
print(jax.devices())
!git clone https://github.com/google-research/google-research.git
%cd /content/google-research
!ls bnn_hmc
!pip install optax
###Output
Collecting optax
Downloading optax-0.0.9-py3-none-any.whl (118 kB)
[?25l
[K |██▊ | 10 kB 28.6 MB/s eta 0:00:01
[K |█████▌ | 20 kB 30.6 MB/s eta 0:00:01
[K |████████▎ | 30 kB 24.3 MB/s eta 0:00:01
[K |███████████ | 40 kB 19.8 MB/s eta 0:00:01
[K |█████████████▊ | 51 kB 14.1 MB/s eta 0:00:01
[K |████████████████▌ | 61 kB 10.6 MB/s eta 0:00:01
[K |███████████████████▎ | 71 kB 11.5 MB/s eta 0:00:01
[K |██████████████████████ | 81 kB 12.7 MB/s eta 0:00:01
[K |████████████████████████▉ | 92 kB 11.2 MB/s eta 0:00:01
[K |███████████████████████████▌ | 102 kB 12.1 MB/s eta 0:00:01
[K |██████████████████████████████▎ | 112 kB 12.1 MB/s eta 0:00:01
[K |████████████████████████████████| 118 kB 12.1 MB/s
[?25hRequirement already satisfied: absl-py>=0.7.1 in /usr/local/lib/python3.7/dist-packages (from optax) (0.12.0)
Requirement already satisfied: numpy>=1.18.0 in /usr/local/lib/python3.7/dist-packages (from optax) (1.19.5)
Collecting chex>=0.0.4
Downloading chex-0.0.8-py3-none-any.whl (57 kB)
[?25l
[K |█████▋ | 10 kB 46.9 MB/s eta 0:00:01
[K |███████████▎ | 20 kB 49.0 MB/s eta 0:00:01
[K |█████████████████ | 30 kB 51.8 MB/s eta 0:00:01
[K |██████████████████████▋ | 40 kB 52.8 MB/s eta 0:00:01
[K |████████████████████████████▎ | 51 kB 52.8 MB/s eta 0:00:01
[K |████████████████████████████████| 57 kB 5.7 MB/s
[?25hRequirement already satisfied: jax>=0.1.55 in /usr/local/lib/python3.7/dist-packages (from optax) (0.2.19)
Requirement already satisfied: jaxlib>=0.1.37 in /usr/local/lib/python3.7/dist-packages (from optax) (0.1.70+cuda110)
Requirement already satisfied: six in /usr/local/lib/python3.7/dist-packages (from absl-py>=0.7.1->optax) (1.15.0)
Requirement already satisfied: dm-tree>=0.1.5 in /usr/local/lib/python3.7/dist-packages (from chex>=0.0.4->optax) (0.1.6)
Requirement already satisfied: toolz>=0.9.0 in /usr/local/lib/python3.7/dist-packages (from chex>=0.0.4->optax) (0.11.1)
Requirement already satisfied: opt-einsum in /usr/local/lib/python3.7/dist-packages (from jax>=0.1.55->optax) (3.3.0)
Requirement already satisfied: flatbuffers<3.0,>=1.12 in /usr/local/lib/python3.7/dist-packages (from jaxlib>=0.1.37->optax) (1.12)
Requirement already satisfied: scipy in /usr/local/lib/python3.7/dist-packages (from jaxlib>=0.1.37->optax) (1.4.1)
Installing collected packages: chex, optax
Successfully installed chex-0.0.8 optax-0.0.9
###Markdown
Setup
###Code
from jax.config import config
import jax
from jax import numpy as jnp
import numpy as onp
import numpy as np
import os
import sys
import time
import tqdm
import optax
import functools
from matplotlib import pyplot as plt
from bnn_hmc.utils import losses
from bnn_hmc.utils import train_utils
from bnn_hmc.utils import tree_utils
%matplotlib inline
%load_ext autoreload
%autoreload 2
###Output
_____no_output_____
###Markdown
Data and model
###Code
mu = jnp.zeros([2,])
# sigma = jnp.array([[1., .5], [.5, 1.]])
sigma = jnp.array([[1.e-4, 0], [0., 1.]])
sigma_l = jnp.linalg.cholesky(sigma)
sigma_inv = jnp.linalg.inv(sigma)
sigma_det = jnp.linalg.det(sigma)
onp.random.seed(0)
samples = onp.random.multivariate_normal(onp.asarray(mu), onp.asarray(sigma), size=1000)
plt.scatter(samples[:, 0], samples[:, 1], alpha=0.3)
plt.grid()
def log_density_fn(params):
assert params.shape == mu.shape, "Shape error"
diff = params - mu
k = mu.size
log_density = -jnp.log(2 * jnp.pi) * k / 2
log_density -= jnp.log(sigma_det) / 2
log_density -= diff.T @ sigma_inv @ diff / 2
return log_density
def log_likelihood_fn(_, params, *args, **kwargs):
return log_density_fn(params), jnp.array(jnp.nan)
def log_prior_fn(_):
return 0.
def log_prior_diff_fn(*args):
return 0.
fake_net_apply = None
fake_data = jnp.array([[jnp.nan,],]), jnp.array([[jnp.nan,],])
fake_net_state = jnp.array([jnp.nan,])
###Output
_____no_output_____
###Markdown
HMC
###Code
step_size = 1e-1
trajectory_len = jnp.pi / 2
max_num_leapfrog_steps = int(trajectory_len // step_size + 1)
print("Leapfrog steps per iteration:", max_num_leapfrog_steps)
update, get_log_prob_and_grad = train_utils.make_hmc_update(
fake_net_apply, log_likelihood_fn, log_prior_fn, log_prior_diff_fn,
max_num_leapfrog_steps, 1., 0.)
# Initial log-prob and grad values
# params = jnp.ones_like(mu)[None, :]
params = jnp.ones_like(mu)
log_prob, state_grad, log_likelihood, net_state = (
get_log_prob_and_grad(fake_data, params, fake_net_state))
%%time
num_iterations = 500
all_samples = []
key = jax.random.PRNGKey(0)
for iteration in tqdm.tqdm(range(num_iterations)):
(params, net_state, log_likelihood, state_grad, step_size, key,
accept_prob, accepted) = (
update(fake_data, params, net_state, log_likelihood, state_grad,
key, step_size, trajectory_len, True))
if accepted:
all_samples.append(onp.asarray(params).copy())
# print("It: {} \t Accept P: {} \t Accepted {} \t Log-likelihood: {}".format(
# iteration, accept_prob, accepted, log_likelihood))
len(all_samples)
log_prob, state_grad, log_likelihood, net_state
all_samples_cat = onp.stack(all_samples)
plt.scatter(all_samples_cat[:, 0], all_samples_cat[:, 1], alpha=0.3)
plt.grid()
###Output
_____no_output_____
###Markdown
Blackjax
###Code
!pip install blackjax
import jax
import jax.numpy as jnp
import jax.scipy.stats as stats
import matplotlib.pyplot as plt
import numpy as np
import blackjax.hmc as hmc
import blackjax.nuts as nuts
import blackjax.stan_warmup as stan_warmup
print(jax.devices())
potential = lambda x: -log_density_fn(**x)
num_integration_steps = 30
kernel_generator = lambda step_size, inverse_mass_matrix: hmc.kernel(
potential, step_size, inverse_mass_matrix, num_integration_steps
)
rng_key = jax.random.PRNGKey(0)
initial_position = {"params": np.zeros(2)}
initial_state = hmc.new_state(initial_position, potential)
print(initial_state)
%%time
nsteps = 500
final_state, (step_size, inverse_mass_matrix), info = stan_warmup.run(
rng_key,
kernel_generator,
initial_state,
nsteps,
)
%%time
kernel = nuts.kernel(potential, step_size, inverse_mass_matrix)
kernel = jax.jit(kernel)
def inference_loop(rng_key, kernel, initial_state, num_samples):
def one_step(state, rng_key):
state, _ = kernel(rng_key, state)
return state, state
keys = jax.random.split(rng_key, num_samples)
_, states = jax.lax.scan(one_step, initial_state, keys)
return states
%%time
nsamples = 500
states = inference_loop(rng_key, kernel, initial_state, nsamples)
samples = states.position["params"].block_until_ready()
print(samples.shape)
plt.scatter(samples[:, 0], samples[:, 1], alpha=0.3)
plt.grid()
###Output
_____no_output_____ |
mnist test.ipynb | ###Markdown
Filters
###Code
fig = plt.figure(figsize=(20, 20))
num_cols = 10
gs = fig.add_gridspec(num_filters //num_cols, num_cols, hspace=0, wspace=0)
axs = gs.subplots(sharex='col', sharey='row')
for i in range(num_filters):
axs[i // num_cols][i % num_cols].imshow(sparse_model.dictionary.w[:, i].reshape([14, 14]).cpu())
plt.show()
num_reconstructions = 10
fig = plt.figure(figsize=(20, 20))
gs = fig.add_gridspec(num_reconstructions, 2, hspace=0, wspace=0)
axs = gs.subplots(sharex='col', sharey='row')
for i in range(num_reconstructions):
axs[i][0].imshow(X.values[i].reshape([28, 28]))
reconstructions = sparse_model.forward(torch.from_numpy(X.values[i].reshape([-1, dict_filter_size])))
axs[i][1].imshow(reconstructions.reshape([28, 28]).cpu())
plt.show()
###Output
_____no_output_____ |
nodec_experiments/ct_lti/multi_sample/table_2.ipynb | ###Markdown
CT-LTI: Multiple Sample Performance Evaluation TableThis table is found in the appendix section A.4. and summarizes the performance comparison between NODEC and OC in relative terms of error and energy. Without extensive hyperparameter optimization we see that NODEC is competitive to OC for all graphs and intial-target state settings.Furthermore, please make sure that the required data folder is available at the paths used by the script.You may generate the required data by running the python script```nodec_experiments/ct_lti/gen_parameters.py```.Please also make sure that a trainingproceedure has produced results in the corresponding paths used below.Running ```nodec_experiments/ct_lti/multi_sample/train.ipynb``` with default paths is expected to generate at the requiered location.As neural network intialization is stochastic, please make sure that appropriate seeds are used or expect some variance to paper results.
###Code
%load_ext autoreload
%autoreload 2
import numpy as np
import pandas as pd
###Output
_____no_output_____
###Markdown
Gather data from filesBelow we gather the data from files generated by the ```train_and_eval.ipynb``` file. Please run this first if the data files are not present!
###Code
data_folder = '../../../../data/results/ct_lti/multi_sample/'
graphs = ['lattice', 'ba', 'tree']
graph_name = {'lattice' : 'Square Lattice', 'ba' : 'Barabasi Albert', 'tree' : 'Random Tree'}
resulting_rows = []
for graph in graphs:
graph_folder = data_folder + graph + '/'
interactions = [50, 500, 5000]
for interaction in interactions:
mse_diffs = []
energy_diffs = []
for i in range(100):
nnres = pd.read_csv(graph_folder+'nn_sample_'+str(i)+'_train_'+str(interaction)+'/epoch_metadata.csv')
ocres = pd.read_csv(graph_folder+'oc_sample'+str(i)+'_ninter_'+str(interaction)+'/epoch_metadata.csv')
nn_en = nnres['total_energy'].item()
oc_en = ocres['total_energy'].item()
nn_fl = nnres['final_loss'].item()
oc_fl = ocres['final_loss'].item()
mse_diffs.append((nn_fl-oc_fl)/oc_fl)
energy_diffs.append((nn_en-oc_en)/oc_en)
row = {'Graph' : graph_name[graph], 'Interaction Interval': 5.0/interaction,
'Median Energy' : round(np.quantile(energy_diffs, 0.5), 2),
'IQR Energy' : round(np.quantile(energy_diffs, 0.75)-np.quantile(energy_diffs,0.25), 2),
'Median MSE' : round(np.quantile(mse_diffs, 0.5), 2),
'IQR MSE' : round(np.quantile(mse_diffs, 0.75)-np.quantile(mse_diffs, 0.25), 2),
'Numerical Instabilities' : round((np.array(mse_diffs) > 10).mean(), 2)
}
resulting_rows.append(row)
###Output
_____no_output_____
###Markdown
Resulting Table
###Code
df = pd.DataFrame(resulting_rows).groupby(['Graph', 'Interaction Interval']).first()
styler = df.style.apply(lambda x: ["background: lightblue" if v <= 0.1 and i in [0,2] else "" for i,v in enumerate(x)], axis = 1)
styler
###Output
_____no_output_____ |
matplotlibteste.ipynb | ###Markdown
quero usar matplotlib para ilustrar permutaçõesA primeira coisa é fazer circulos numerados
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
circle1=plt.Circle((0,0),.1,color='r', alpha=0.2, clip_on=False)
plt.axes(aspect="equal")
fig = plt.gcf()
fig.gca().add_artist(circle1)
plt.axis("off")
circle1=plt.Circle((0,0),.1,color='r', alpha=0.2, clip_on=False)
circle2=plt.Circle((0,0.2),.1,color='y', alpha=0.2, clip_on=False)
circle3=plt.Circle((0,0.4),.1,color='b', alpha=0.2, clip_on=False)
circle4=plt.Circle((0,0.6),.1,color='g', alpha=0.2, clip_on=False)
circle5=plt.Circle((0,0.8),.1,color=(0.2,0.6,0.7), alpha=0.2, clip_on=False)
plt.axes(aspect="equal")
fig1 = plt.gcf()
plt.axis("off")
fig1.gca().add_artist(circle1)
fig1.gca().add_artist(circle2)
fig1.gca().add_artist(circle3)
fig1.gca().add_artist(circle4)
fig1.gca().add_artist(circle5)
circle1=plt.Circle((0,0),.1,color='r', alpha=0.2, clip_on=False)
circle2=plt.Circle((0,0.2),.1,color='y', alpha=0.2, clip_on=False)
circle3=plt.Circle((0,0.4),.1,color='b', alpha=0.2, clip_on=False)
circle4=plt.Circle((0,0.6),.1,color='g', alpha=0.2, clip_on=False)
circle5=plt.Circle((0,0.8),.1,color=(0.2,0.6,0.7), alpha=0.2, clip_on=False)
circled1=plt.Circle((1,0),.1,color='r', alpha=0.2, clip_on=False)
circled2=plt.Circle((1,0.2),.1,color='y', alpha=0.2, clip_on=False)
circled3=plt.Circle((1,0.4),.1,color='b', alpha=0.2, clip_on=False)
circled4=plt.Circle((1,0.6),.1,color='g', alpha=0.2, clip_on=False)
circled5=plt.Circle((1,0.8),.1,color=(0.2,0.6,0.7), alpha=0.2, clip_on=False)
plt.axes(aspect="equal")
fig1 = plt.gcf()
plt.axis("off")
fig1.gca().add_artist(circle1)
fig1.gca().add_artist(circle2)
fig1.gca().add_artist(circle3)
fig1.gca().add_artist(circle4)
fig1.gca().add_artist(circle5)
fig1.gca().add_artist(circled1)
fig1.gca().add_artist(circled2)
fig1.gca().add_artist(circled3)
fig1.gca().add_artist(circled4)
fig1.gca().add_artist(circled5)
circle1=plt.Circle((0,0),.1,color='r', alpha=0.2, clip_on=False)
circle2=plt.Circle((0,0.2),.1,color='y', alpha=0.2, clip_on=False)
circle3=plt.Circle((0,0.4),.1,color='b', alpha=0.2, clip_on=False)
circle4=plt.Circle((0,0.6),.1,color='g', alpha=0.2, clip_on=False)
circle5=plt.Circle((0,0.8),.1,color=(0.2,0.6,0.7), alpha=0.2, clip_on=False)
circled1=plt.Circle((1,0),.1,color='r', alpha=0.2, clip_on=False)
circled2=plt.Circle((1,0.2),.1,color='y', alpha=0.2, clip_on=False)
circled3=plt.Circle((1,0.4),.1,color='b', alpha=0.2, clip_on=False)
circled4=plt.Circle((1,0.6),.1,color='g', alpha=0.2, clip_on=False)
circled5=plt.Circle((1,0.8),.1,color=(0.2,0.6,0.7), alpha=0.2, clip_on=False)
plt.axes(aspect="equal")
fig1 = plt.gcf()
plt.axis("off")
fig1.gca().add_artist(circle1)
fig1.gca().add_artist(circle2)
fig1.gca().add_artist(circle3)
fig1.gca().add_artist(circle4)
fig1.gca().add_artist(circle5)
fig1.gca().add_artist(circled1)
fig1.gca().add_artist(circled2)
fig1.gca().add_artist(circled3)
fig1.gca().add_artist(circled4)
fig1.gca().add_artist(circled5)
# as arestas
fig1.gca().plot([0.15,0.85],[0,0.8], color="red", alpha=0.6 )
fig1.gca().text(0.,0.,r'$5$', fontsize=20,verticalalignment='center', horizontalalignment='center')
fig1.gca().text(1,0,r'$5$', fontsize=20, verticalalignment='center', horizontalalignment='center')
fig1.gca().text(1,0.8,r'$1$', fontsize=20, verticalalignment='center', horizontalalignment='center')
fig1.gca().text(0,0.8,r'$1$', fontsize=20, verticalalignment='center', horizontalalignment='center')
fig1.gca().plot([0.15,0.85],[0.8,0.4], color=(.2,.6,.7), alpha=0.6 )
# agora faremos as funções. primeiro a cor de um inteiro
def cor(n):
''' Dado um inteiro n designa uma cor'''
return (n/(n+1), 1- n/(n+1), 1-(n+2)/(n+5))
#teste
circle1=plt.Circle((0,0),.1,color=cor(1), alpha=0.2, clip_on=False)
circle2=plt.Circle((0,0.2),.1,color=cor(3), alpha=0.2, clip_on=False)
plt.axes(aspect="equal")
fig1 = plt.gcf()
plt.axis("off")
fig1.gca().add_artist(circle1)
fig1.gca().add_artist(circle2)
def circulo(x,n):
'''Define um circulo de centro (x,0.2*n) de raio 0.1 e cor n'''
return plt.Circle((x,0.2*n), .1, color=cor(n), alpha=0.3, clip_on=False )
#teste
plt.axes(aspect="equal")
fig1 = plt.gcf()
plt.axis("off")
fig1.gca().add_artist(circulo(0,3))
fig1.gca().add_artist(circulo(0,4))
# função pilha de circulos
def pilha_de_circulos(x,n):
'''Faz uma pilha de n circulos sobre a abcissa x'''
for k in range(n):
fig1.gca().add_artist(circulo(x,k))
fig1.gca().text(x,0.2*k,r'$'+str(k+1)+'$', fontsize=20,verticalalignment='center', horizontalalignment='center')
return
# teste desta função:
plt.axes(aspect="equal")
fig1 = plt.gcf()
plt.axis("off")
pilha_de_circulos(0,3)
pilha_de_circulos(1,3)
pilha_de_circulos(2,3)
# agora a função mapa_permu
def mapa_permu(x,p):
''' desenha a permutação p (uma lista) na posição x'''
l=len(p)
x1= x+.15
x2= x+.85
for y in range(l):
fig1.gca().plot([x1,x2],[0.2*y,0.2*(p[y]-1)], color=cor(y), alpha=0.6 )
return
# teste
plt.axes(aspect="equal")
fig1 = plt.gcf()
plt.axis("off")
pilha_de_circulos(0,3)
pilha_de_circulos(1,3)
pilha_de_circulos(2,3)
mapa_permu(0,[2,1,3])
mapa_permu(1.0, [3,1,2])
plt.axes(aspect="equal")
fig1 = plt.gcf()
plt.axis("off")
pilha_de_circulos(0,5)
pilha_de_circulos(1,5)
mapa_permu(0,[3,2,1,5,4])
def pgrafico(x,p):
'''Faz o grafico da permutação p começando em x'''
n=len(p)
fig1= plt.gcf()
plt.axis("off")
pilha_de_circulos(x,n)
pilha_de_circulos(x+1,n)
return mapa_permu(x,p)
#teste
plt.axes(aspect="equal")
fig1= plt.gcf()
plt.axis("off")
pgrafico(0,[3,1,2])
###Output
_____no_output_____ |
homework/Homework07_Boyao.ipynb | ###Markdown
Homework03: Topic Modeling with Latent Semantic Analysis Latent Semantic Analysis (LSA) is a method for finding latent similarities between documents treated as a bag of words by using a low rank approximation. It is used for document classification, clustering and retrieval. For example, LSA can be used to search for prior art given a new patent application. In this homework, we will implement a small library for simple latent semantic analysis as a practical example of the application of SVD. The ideas are very similar to PCA. SVD is also used in recommender systems in an similar fashion (for an SVD-based recommender system library, see [Surpise](http://surpriselib.com). We will implement a toy example of LSA to get familiar with the ideas. If you want to use LSA or similar methods for statistical language analysis, the most efficient Python libraries are probably [gensim](https://radimrehurek.com/gensim/) and [spaCy](https://spacy.io) - these also provide an online algorithm - i.e. the training information can be continuously updated. Other useful functions for processing natural language can be found in the [Natural Language Toolkit](http://www.nltk.org/). **Note**: The SVD from scipy.linalg performs a full decomposition, which is inefficient since we only need to decompose until we get the first k singluar values. If the SVD from `scipy.linalg` is too slow, please use the `sparsesvd` function from the [sparsesvd](https://pypi.python.org/pypi/sparsesvd/) package to perform SVD instead. You can install in the usual way with ```!pip install sparsesvd```Then import the following```pythonfrom sparsesvd import sparsesvd from scipy.sparse import csc_matrix ```and use as follows```pythonsparsesvd(csc_matrix(M), k=10)``` **Exercise 1 (20 points)**. Calculating pairwise distance matrices.Suppose we want to construct a distance matrix between the rows of a matrix. For example, given the matrix ```pythonM = np.array([[1,2,3],[4,5,6]])```the distance matrix using Euclidean distance as the measure would be```python[[ 0.000 1.414 2.828] [ 1.414 0.000 1.414] [ 2.828 1.414 0.000]] ```if $M$ was a collection of column vectors.Write a function to calculate the pairwise-distance matrix given the matrix $M$ and some arbitrary distance function. Your functions should have the following signature:```def func_name(M, distance_func): pass```0. Write a distance function for the Euclidean, squared Euclidean and cosine measures.1. Write the function using looping for M as a collection of row vectors.2. Write the function using looping for M as a collection of column vectors.3. Wrtie the function using broadcasting for M as a collection of row vectors.4. Write the function using broadcasting for M as a collection of column vectors. For 3 and 4, try to avoid using transposition (but if you get stuck, there will be no penalty for using transposition). Check that all four functions give the same result when applied to the given matrix $M$.
###Code
import numpy as np
import scipy.linalg as la
import string
import pandas as pd
from scipy import stats
np.set_printoptions(precision=4)
def Euc(x, y):
return np.sqrt(np.sum((x - y) ** 2))
def sqEuc(x, y):
return np.sum((x - y) ** 2)
def Cos(x, y):
return np.dot(x.T, y)/(np.linalg.norm(x) * np.linalg.norm(y))
M = np.array([[1,2,3],[4,5,6]])
def loop_row(M, distance_func):
n = M.shape[0]
dist = np.zeros((n, n))
for i in range(n):
for j in range(i + 1, n):
dist[i, j] = dist[j, i] = distance_func(M[i, :], M[j, :])
return dist
def loop_col(M, distance_func):
return loop_row(M.T, distance_func)
def broadcast_row(M, distance_func):
dist = np.sum(M ** 2, axis = 1) + np.sum(M ** 2, axis = 1)[:, np.newaxis] - 2 * np.dot(M, M.T)
return dist
broadcast_row(M, Euc)
###Output
_____no_output_____
###Markdown
**Exercise 2 (20 points)**. **Exercise 2 (20 points)**. Write 3 functions to calculate the term frequency (tf), the inverse document frequency (idf) and the product (tf-idf). Each function should take a single argument `docs`, which is a dictionary of (key=identifier, value=document text) pairs, and return an appropriately sized array. Convert '-' to ' ' (space), remove punctuation, convert text to lowercase and split on whitespace to generate a collection of terms from the document text.- tf = the number of occurrences of term $i$ in document $j$- idf = $\log \frac{n}{1 + \text{df}_i}$ where $n$ is the total number of documents and $\text{df}_i$ is the number of documents in which term $i$ occurs.Print the table of tf-idf values for the following document collection```s1 = "The quick brown fox"s2 = "Brown fox jumps over the jumps jumps jumps"s3 = "The the the lazy dog elephant."s4 = "The the the the the dog peacock lion tiger elephant"docs = {'s1': s1, 's2': s2, 's3': s3, 's4': s4}```
###Code
def tf(docs):
doc_words = [doc.strip().lower().translate(str.maketrans('-', ' ', string.punctuation)).split()
for key, doc in docs.items()]
words = [word for words in doc_words for word in words]
terms = set(words)
table = np.zeros((len(terms), len(docs)), dtype = 'int')
for i, term in enumerate(terms):
for j, doc in enumerate(doc_words):
table[i, j] = doc.count(term)
df = pd.DataFrame(table, columns = docs.keys(), index=terms)
return df
def idf(docs):
doc_words = [doc.strip().lower().translate(str.maketrans('-', ' ', string.punctuation)).split()
for key, doc in docs.items()]
words = [word for words in doc_words for word in words]
terms = set(words)
table = np.zeros((len(terms)), dtype = 'int')
for i, term in enumerate(terms):
for doc in doc_words:
table[i] += int(term in doc)
table = np.log(len(docs) / (1 + table))
df = pd.DataFrame(table, columns=['idf'], index = terms)
return df
def tfidf(docs):
tf_tbl = tf(docs)
idf_tbl = idf(docs)
tfidf_tbl = pd.DataFrame(np.array(tf_tbl) * np.array(idf_tbl),columns = docs.keys(), index = idf_tbl.index)
return tfidf_tbl
s1 = "The quick brown fox"
s2 = "Brown fox jumps over the jumps jumps jumps"
s3 = "The the the lazy dog elephant."
s4 = "The the the the the dog peacock lion tiger elephant"
docs = {'s1': s1, 's2': s2, 's3': s3, 's4': s4}
print(tf(docs))
print(idf(docs))
print(tfidf(docs))
###Output
s1 s2 s3 s4
lion 0 0 0 1
the 1 1 3 5
dog 0 0 1 1
elephant 0 0 1 1
tiger 0 0 0 1
lazy 0 0 1 0
peacock 0 0 0 1
over 0 1 0 0
quick 1 0 0 0
brown 1 1 0 0
fox 1 1 0 0
jumps 0 4 0 0
idf
lion 0.693147
the -0.223144
dog 0.287682
elephant 0.287682
tiger 0.693147
lazy 0.693147
peacock 0.693147
over 0.693147
quick 0.693147
brown 0.287682
fox 0.287682
jumps 0.693147
s1 s2 s3 s4
lion 0.000000 0.000000 0.000000 0.693147
the -0.223144 -0.223144 -0.669431 -1.115718
dog 0.000000 0.000000 0.287682 0.287682
elephant 0.000000 0.000000 0.287682 0.287682
tiger 0.000000 0.000000 0.000000 0.693147
lazy 0.000000 0.000000 0.693147 0.000000
peacock 0.000000 0.000000 0.000000 0.693147
over 0.000000 0.693147 0.000000 0.000000
quick 0.693147 0.000000 0.000000 0.000000
brown 0.287682 0.287682 0.000000 0.000000
fox 0.287682 0.287682 0.000000 0.000000
jumps 0.000000 2.772589 0.000000 0.000000
###Markdown
**Exercise 3 (20 points)**. 1. Write a function that takes a matrix $M$ and an integer $k$ as arguments, and reconstructs a reduced matrix using only the $k$ largest singular values. Use the `scipy.linagl.svd` function to perform the decomposition. This is the least squares approximation to the matrix $M$ in $k$ dimensions.2. Apply the function you just wrote to the following term-frequency matrix for a set of $9$ documents using $k=2$ and print the reconstructed matrix $M'$.```M = np.array([[1, 0, 0, 1, 0, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 0, 1, 0, 0, 0, 0], [0, 1, 1, 2, 0, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 1, 1]])```3. Calculate the pairwise correlation matrix for the original matrix M and the reconstructed matrix using $k=2$ singular values (you may use [scipy.stats.spearmanr](http://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html) to do the calculations). Consider the fist 5 sets of documents as one group $G1$ and the last 4 as another group $G2$ (i.e. first 5 and last 4 columns). What is the average within group correlation for $G1$, $G2$ and the average cross-group correlation for G1-G2 using either $M$ or $M'$. (Do not include self-correlation in the within-group calculations.).
###Code
def reconstruct(M, k):
U, s, Vt = la.svd(M, full_matrices = False)
M_reduced = U[:, :k] @ np.diag(s[:k]) @ Vt[:k, :]
return M_reduced
M = np.array([[1, 0, 0, 1, 0, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 1, 0, 0, 0, 0],
[0, 1, 1, 2, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 1, 1]])
k = 2
Mp = reconstruct(M, k)
Mp
M_cor = stats.spearmanr(M).correlation
Mp_cor = stats.spearmanr(Mp).correlation
print(M_cor)
print(Mp_cor)
G1 = M[:, :5]
G2 = M[:, 5:]
G1_cor = stats.spearmanr(G1).correlation
G1_cor_mean = G1_cor[0, 1:].mean()
print(G1_cor_mean)
G2_cor = stats.spearmanr(G2).correlation
G2_cor_mean = G2_cor[0, 1:].mean()
print(G2_cor_mean)
G1_G2_cor = stats.spearmanr(G1, G2).correlation
G1_G2_cor_mean = G1_G2_cor[0, :].mean()
print(G1_G2_cor_mean)
###Output
-0.11309645968036279
0.3407850581248793
-0.06125878345455297
###Markdown
**Exercise 4 (40 points)**. Clustering with LSA1. Begin by loading a PubMed database of selected article titles using 'pickle'. With the following:```import pickledocs = pickle.load(open('pubmed.pic', 'rb'))``` Create a tf-idf matrix for every term that appears at least once in any of the documents. What is the shape of the tf-idf matrix? 2. Perform SVD on the tf-idf matrix to obtain $U \Sigma V^T$ (often written as $T \Sigma D^T$ in this context with $T$ representing the terms and $D$ representing the documents). If we set all but the top $k$ singular values to 0, the reconstructed matrix is essentially $U_k \Sigma_k V_k^T$, where $U_k$ is $m \times k$, $\Sigma_k$ is $k \times k$ and $V_k^T$ is $k \times n$. Terms in this reduced space are represented by $U_k \Sigma_k$ and documents by $\Sigma_k V^T_k$. Reconstruct the matrix using the first $k=10$ singular values.3. Use agglomerative hierarchical clustering with complete linkage to plot a dendrogram and comment on the likely number of document clusters with $k = 100$. Use the dendrogram function from [SciPy ](https://docs.scipy.org/doc/scipy-0.15.1/reference/generated/scipy.cluster.hierarchy.dendrogram.html).4. Determine how similar each of the original documents is to the new document `data/mystery.txt`. Since $A = U \Sigma V^T$, we also have $V = A^T U S^{-1}$ using orthogonality and the rule for transposing matrix products. This suggests that in order to map the new document to the same concept space, first find the tf-idf vector $v$ for the new document - this must contain all (and only) the terms present in the existing tf-idx matrix. Then the query vector $q$ is given by $v^T U_k \Sigma_k^{-1}$. Find the 10 documents most similar to the new document and the 10 most dissimilar.
###Code
import pickle
docs = pickle.load(open('pubmed.pic', 'rb'))
tfidf_df = tfidf(docs)
tfidf_m = np.array(tfidf_df)
tfidf_m.shape
tfidf_mp = reconstruct(tfidf_m, 10)
tfidf_mp
from matplotlib import pyplot as plt
from scipy.cluster.hierarchy import dendrogram, linkage
k = 100
Z = linkage(reconstruct(tfidf_m, k), 'complete')
plt.figure(figsize=(25, 10))
plt.title('Hierarchical Clustering Dendrogram')
plt.xlabel('index')
plt.ylabel('distance')
dendrogram(
Z,
truncate_mode = 'level',
p = 15,
leaf_font_size=15., # font size for the x axis labels
)
plt.show()
U, s, Vt = la.svd(tfidf_m, full_matrices = False)
with open("mystery.txt") as f:
newtext = f.read()
idf_df = idf(docs)
newtext_words = newtext.strip().lower().translate(str.maketrans('-', ' ', string.punctuation)).split()
terms = [term for term in idf_df.index]
tf_new = np.zeros((len(terms)), dtype = 'int')
for i, term in enumerate(terms):
tf_new[i] = newtext_words.count(term)
tfidf_new = np.array(idf_df) * tf_new.reshape((-1,1))
tfidf_new.T @ U[:, :k] @ la.inv(np.diag(s[:k]))
###Output
_____no_output_____ |
Notebooks/.ipynb_checkpoints/Capo-checkpoint.ipynb | ###Markdown
Visualizzare i 6 grafici
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy
import sklearn
import seaborn as sns
import xlrd
import funzioni as fn
import statsmodels.api as sm
###Output
_____no_output_____
###Markdown
Per il sesso M=0 e F=1
###Code
data=pd.read_excel('Data/Mini Project EFSA.xlsx')
data.rename(columns={'sex \n(0=M, 1=F)':'sex'}, inplace=True)
data
###Output
_____no_output_____
###Markdown
Grafici maschili
###Code
male_data=data[data.sex==0]
male_data
###Output
_____no_output_____
###Markdown
Endpoint 1
###Code
male_data_1=male_data[male_data.endpoint==1]
male_data_1.plot(x='dose',y='response',yerr='SD',kind='bar',figsize=(12,6))
###Output
_____no_output_____
###Markdown
Endpoint 2
###Code
male_data_2=male_data[male_data.endpoint==2]
male_data_2.plot(x='dose',y='response',yerr='SD',kind='bar',figsize=(12,6))
###Output
_____no_output_____
###Markdown
Endpoint 3
###Code
male_data_3=male_data[male_data.endpoint==3]
male_data_3.plot(x='dose',y='response',yerr='SD',kind='bar',figsize=(12,6))
###Output
_____no_output_____
###Markdown
Grafici femminili
###Code
female_data=data[data.sex==1]
female_data
###Output
_____no_output_____
###Markdown
Endpoint 1
###Code
female_data_1=female_data[female_data.endpoint==1]
female_data_1.plot(x='dose',y='response',yerr='SD',kind='bar',figsize=(12,6))
###Output
_____no_output_____
###Markdown
Endpoint 2
###Code
female_data_2=female_data[female_data.endpoint==2]
female_data_2.plot(x='dose',y='response',yerr='SD',kind='bar',figsize=(12,6))
###Output
_____no_output_____
###Markdown
Endpoint 3
###Code
female_data_3=female_data[female_data.endpoint==3]
female_data_3.plot(x='dose',y='response',yerr='SD',kind='bar',figsize=(12,6))
###Output
_____no_output_____
###Markdown
prove plot
###Code
data_Endpoint1 = data[data.endpoint == 1]
data_Endpoint2 = data[data.endpoint == 2]
data_Endpoint3 = data[data.endpoint == 3]
Y = data_Endpoint1.response
weights = data.SD
X = data_Endpoint1.drop(columns=["response", "SD", "endpoint"])
group_of_models_endpoint1 = fn.mainForward(X, Y, weights)
display(group_of_models_endpoint1)
pred=pd.DataFrame([data_Endpoint1['number of animals'],group_of_models_endpoint1.Y_pred[1]],index=["noa","y_pred"])
pred1=pred.T
pred=pd.DataFrame([data_Endpoint1['number of animals'],data_Endpoint1['sex'],group_of_models_endpoint1.Y_pred[2]],index=["noa","sex","y_pred"])
pred2=pred.T
pred=pd.DataFrame([data_Endpoint1['number of animals'],data_Endpoint1['sex'],data_Endpoint1['dose'],group_of_models_endpoint1.Y_pred[3]],index=["noa","sex","dose","y_pred"])
pred3=pred.T
pred3
fig, axs = plt.subplots(figsize=(15,20),nrows=3)
data_Endpoint1.plot(x='number of animals', y='response',s=100,marker='x', ax=axs[0],kind='scatter')
pred1.plot(x='noa', y='y_pred',color='r', ax=axs[0])
data_Endpoint1.plot(x=data_Endpoint1[['number of animals','sex']], y='response',s=100,marker='x', ax=axs[1],kind='scatter')
pred1.plot(x='noa', y='y_pred',color='r', ax=axs[0])
p = model1.params()
#Plot
x = np.arange(0,40)
ax = data_Endpoint1.plot(kind='scatter', x='number of animals', y='response')
ax.plot(x, p.)
ax.set_xlim([0,30])
#Seaborn
sns.lmplot(x='Xvalue', y='Yvalue', data=data)
group_of_models_endpoint1.plot(x = 'number_of_predictors', y = 'RSS')
#print(model1.summary())
#fig, ax = plt.subplots()
#fig = sm.graphics.plot_fit(model1, 0, ax=ax)
###Output
_____no_output_____ |
notebooks/ProductClassificationSoftmax[Training].ipynb | ###Markdown
Shopee-Product-Matching 1. If you want to learn more about this amazing competition hosted by [Shopee](https://www.kaggle.com/c/shopee-product-matching), Please visit following [Shopee EDA Image AutoEncoder](https://www.kaggle.com/code/chiragtagadiya/shopee-basic-autoencoder).2. This Notebook contains EDA and Image AutoEncoder solution.
###Code
%config Completer.use_jedi = False
###Output
_____no_output_____
###Markdown
Import Packages
###Code
import sys
sys.path.append('../input/timmmaster')
import timm
import math
import os
import numpy as np
import cv2
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import StratifiedKFold
import timm
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
import torch.nn.functional as F
import albumentations
from albumentations.pytorch.transforms import ToTensorV2
from torch.optim import lr_scheduler
from tqdm import tqdm
import matplotlib.pyplot as plt
from sklearn import metrics
from datetime import date
from sklearn.metrics import f1_score, accuracy_score
###Output
_____no_output_____
###Markdown
Configuration Options
###Code
TRAIN_DIR = '../input/shopee-product-matching/train_images'
TEST_DIR = '../input/shopee-product-matching/test_images'
TRAIN_CSV = '../input/crossvalidationfolds/folds.csv'
MODEL_PATH = './'
class CFG:
seed = 123
img_size = 512
classes = 11014
fc_dim = 512
epochs = 15
batch_size = 32
num_workers = 3
model_name = 'tf_efficientnet_b4'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
scheduler_params = {
"lr_start": 1e-3,
"lr_max": 1e-5 * batch_size,
"lr_min": 1e-6,
"lr_ramp_ep": 5,
"lr_sus_ep": 0,
"lr_decay": 0.8,
}
model_path='../input/21-mar-lr-large/2022-03-20_softmax_512x512_tf_efficientnet_b4.pt'
isTraining=False
###Output
_____no_output_____
###Markdown
Solution Approach* In this competition it is given that,if two or more images have **same label group** then they are **similar products.** * Basically we can use this information to transfer the business problem into **multi class classification** problem.* From Image EDA, I found out that we have **11014** different classes, and dataset is **not balanced dataset*** If you see below plot, we can clearly see that there are **hardly 1000 data points having more than 10 products per label.** In this notebook I used **Weighted Sampler technique used in pytorch for handling imbalanced classification problem**
###Code
train_df=pd.read_csv('../input/shopee-product-matching/train.csv')
labelGroups = train_df.label_group.value_counts()
# print(labelGroups)
plt.figure(figsize=(15,5))
plt.plot(np.arange(len(labelGroups)), labelGroups.values)
plt.xlabel("Index for unique label_group_item", size=12)
plt.ylabel("Number of product data for label ", size=12)
plt.title("label vs label frequency", size=15)
plt.show()
###Output
_____no_output_____
###Markdown
Create Custom DataSet
###Code
class ShopeeDataset(Dataset):
def __init__(self, df,root_dir, isTraining=False, transform=None):
self.df = df
self.transform = transform
self.root_dir = root_dir
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
# get row at index idx
# print("idx",idx)
row = self.df.iloc[idx]
# print(row)
label = row.label_group
image_path = os.path.join(self.root_dir, row.image)
# read image convert to RGB and apply augmentation
image = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if self.transform:
aug = self.transform(image=image)
image = aug['image']
return image, torch.tensor(label).long()
###Output
_____no_output_____
###Markdown
Create Data Augmentation For training and validation Data
###Code
def getAugmentation(IMG_SIZE, isTraining=False):
if isTraining:
return albumentations.Compose([
albumentations.Resize(IMG_SIZE, IMG_SIZE, always_apply=True),
albumentations.HorizontalFlip(p=0.5),
albumentations.VerticalFlip(p=0.5),
albumentations.Rotate(limit=120, p=0.75),
albumentations.RandomBrightness(limit=(0.09, 0.6), p=0.5),
albumentations.Normalize(
mean = [0.485, 0.456, 0.406],
std = [0.229, 0.224, 0.225]
),
ToTensorV2(p=1.0)
])
else:
return albumentations.Compose([
albumentations.Resize(IMG_SIZE, IMG_SIZE, always_apply=True),
albumentations.Normalize(
mean = [0.485, 0.456, 0.406],
std = [0.229, 0.224, 0.225]
),
ToTensorV2(p=1.0)
])
###Output
_____no_output_____
###Markdown
Build Model
###Code
class ShopeeLabelGroupClassfier(nn.Module):
def __init__(self,
model_name='tf_efficientnet_b0',
loss_fn='softmax',
classes = CFG.classes,
fc_dim = CFG.fc_dim,
pretrained=True,
use_fc=True,
isTraining=False
):
super(ShopeeLabelGroupClassfier,self).__init__()
# create bottlenack backbone network from pretrained model
self.backbone = timm.create_model(model_name, pretrained=pretrained)
in_features = self.backbone.classifier.in_features
# we will put FC layers over backbone to classfy images based on label groups
self.backbone.classifier = nn.Identity()
self.backbone.global_pool = nn.Identity()
self.pooling = nn.AdaptiveAvgPool2d(1)
self.use_fc = use_fc
self.loss_fn =loss_fn
# build top fc layers
if self.use_fc:
self.dropout = nn.Dropout(0.2)
self.fc = nn.Linear(in_features,fc_dim )
self.bn = nn.BatchNorm1d(fc_dim)
in_features = fc_dim
self.loss_fn = loss_fn
if self.loss_fn=='softmax':
self.final = nn.Linear(in_features, CFG.classes)
def forward(self, image, label):
features = self.get_features(image)
if self.loss_fn=='softmax':
logits = self.final(features)
return logits
def get_features(self,inp):
batch_dim = inp.shape[0]
inp = self.backbone(inp)
inp = self.pooling(inp).view(batch_dim, -1)
if self.use_fc:
inp = self.dropout(inp)
inp = self.fc(inp)
inp = self.bn(inp)
return inp
# shoppe_label_classfier = ShopeeLabelGroupClassfier()
###Output
_____no_output_____
###Markdown
Training Single Epoch
###Code
def training_one_epoch(epoch_num,model, dataloader,optimizer, scheduler, device, loss_criteria):
avgloss = 0.0
# put model in traning model
model.train()
tq = tqdm(enumerate(dataloader), total=len(dataloader))
for idx, data in tq:
batch_size = data[0].shape[0]
images = data[0]
targets = data[1]
# zero out gradient
optimizer.zero_grad()
# put input and target to device
images = images.to(device)
targets = targets.to(device)
# pass input to the model
output = model(images,targets)
# get loss
loss = loss_criteria(output,targets)
# backpropogation
loss.backward()
# update learning rate step
optimizer.step()
# avg loss
avgloss += loss.item()
tq.set_postfix({'loss' : '%.6f' %float(avgloss/(idx+1)), 'LR' : optimizer.param_groups[0]['lr']})
# lr scheduler step after each epoch
scheduler.step()
return avgloss / len(dataloader)
###Output
_____no_output_____
###Markdown
Validating Single Epoch
###Code
def validation_one_epoch(model, dataloader, epoch, device, loss_criteria):
avgloss = 0.0
# put model in traning model
model.eval()
tq = tqdm(enumerate(dataloader), desc = "Training Epoch { }" + str(epoch+1))
y_true=[]
y_pred=[]
with torch.no_grad():
for idx, data in tq:
batch_size = data[0].shape[0]
images = data[0]
targets = data[1]
images = images.to(device)
targets = targets.to(device)
output = model(images,targets)
predicted_label=torch.argmax(output,1)
y_true.extend(targets.detach().cpu().numpy())
y_pred.extend(predicted_label.detach().cpu().numpy())
loss = loss_criteria(output,targets)
avgloss += loss.item()
tq.set_postfix({'validation loss' : '%.6f' %float(avgloss/(idx+1))})
f1_score_metric = f1_score(y_true, y_pred, average='micro')
tq.set_postfix({'validation f1 score' : '%.6f' %float(f1_score_metric)})
return avgloss / len(dataloader),f1_score_metric
###Output
_____no_output_____
###Markdown
Helper Function for Handling class imbalanced data
###Code
import numpy as np
def get_class_weights(data):
weight_dict=dict()
# Format of row : PostingId, Image, ImageHash, Title, LabelGroup
# LabelGroup index is 4 and it is representating class information
for row in data.values:
weight_dict[row[4]]=0
# Word dictionary keys will be label and value will be frequency of label in dataset
for row in data.values:
weight_dict[row[4]]+=1
# for each data point get label count data
class_sample_count= np.array([weight_dict[row[4]] for row in data.values])
# each data point weight will be inverse of frequency
weight = 1. / class_sample_count
weight=torch.from_numpy(weight)
return weight
###Output
_____no_output_____
###Markdown
Training Loop
###Code
def run_training():
data = pd.read_csv('../input/crossvalidationfolds/folds.csv')
# label encoding
labelencoder= LabelEncoder()
data['label_group_original']=data['label_group']
data['label_group'] = labelencoder.fit_transform(data['label_group'])
#data['weights'] = data['label_group'].map(1/data['label_group'].value_counts())
# create training_data and validation data initially not using k fold
train_data = data[data['fold']!=0]
# get weights for classes
samples_weight=get_class_weights(train_data)
print("samples_weight", len(samples_weight))
validation_data = data[data['fold']==0]
# training augmentation
train_aug = getAugmentation(CFG.img_size,isTraining=True )
validation_aug = getAugmentation(CFG.img_size, isTraining=False)
# create custom train and validation dataset
trainset = ShopeeDataset(train_data, TRAIN_DIR, isTraining=True, transform = train_aug)
validset = ShopeeDataset(validation_data, TRAIN_DIR, isTraining=False, transform = validation_aug)
print(len(data), len(samples_weight))
print(len(trainset))
# create data sampler
sampler = torch.utils.data.sampler.WeightedRandomSampler(samples_weight, num_samples=len(samples_weight))
# create custom training and validation data loader num_workers=CFG.num_workers,
train_dataloader = DataLoader(trainset, batch_size=CFG.batch_size,
drop_last=True,pin_memory=True, sampler=sampler)
validation_dataloader = DataLoader(validset, batch_size=CFG.batch_size,
drop_last=True,pin_memory=True)
# define loss function
loss_criteria = nn.CrossEntropyLoss()
loss_criteria.to(CFG.device)
# define model
model = ShopeeLabelGroupClassfier()
model.to(CFG.device)
# define optimzer
optimizer = torch.optim.Adam(model.parameters(),lr= CFG.scheduler_params['lr_start'])
# learning rate scheudler
scheduler = lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=7, T_mult=1, eta_min=1e-6, last_epoch=-1)
history = {'train_loss':[],'validation_loss':[]}
for epoch in range(CFG.epochs):
# get current epoch training loss
avg_train_loss = training_one_epoch(epoch_num = epoch,
model = model,
dataloader = train_dataloader,
optimizer = optimizer,
scheduler = scheduler,
device = CFG.device,
loss_criteria = loss_criteria)
# get current epoch validation loss
avg_validation_loss = validation_one_epoch(model = model,
dataloader = validation_dataloader,
epoch = epoch,
device = CFG.device,
loss_criteria = loss_criteria)
history['train_loss'].append(avg_train_loss)
history['validation_loss'].append(avg_validation_loss)
# save model
torch.save(model.state_dict(), MODEL_PATH + str(date.today()) +'_softmax_512x512_{}.pt'.format(CFG.model_name))
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
# 'scheduler': lr_scheduler.state_dict()
},
MODEL_PATH + str(date.today()) +'_softmax_512x512_{}_checkpoints.pt'.format(CFG.model_name)
)
return model, history
history=None
if CFG.isTraining:
model, history = run_training()
###Output
_____no_output_____
###Markdown
Plot Training and Validation Loss and Accuracy
###Code
if CFG.isTraining:
epoch_lst = [ i+1 for i in range(15)]
plt.plot(epoch_lst,history['train_loss'])
plt.xlabel("Epoch number")
plt.ylabel('Training Loss')
plt.title('Training Loss SoftMax Loss Function')
plt.show()
if CFG.isTraining:
plt.plot(epoch_lst,history['validation_loss'])
plt.xlabel("Epoch number")
plt.ylabel('Validation Loss')
plt.title('Validation Loss SoftMax Loss Function')
plt.show()
###Output
_____no_output_____
###Markdown
Prediction
###Code
def prediction(model):
data = pd.read_csv('../input/crossvalidationfolds/folds.csv')
# label encoding
labelencoder= LabelEncoder()
data['label_group'] = labelencoder.fit_transform(data['label_group'])
# Prepare Validation data
validation_data = data[data['fold']==0]
validation_aug = getAugmentation(CFG.img_size,isTraining=False)
validset = ShopeeDataset(validation_data, TRAIN_DIR, isTraining=False, transform = validation_aug)
test_data_loader = torch.utils.data.DataLoader(validset,batch_size=CFG.batch_size)
# put model in evalution mode
model.eval()
tq = tqdm(enumerate(test_data_loader))
y_true=[]
y_pred=[]
with torch.no_grad():
for idx, data in tq:
images = data[0]
targets = data[1]
images = images.to(CFG.device)
targets = targets.to(CFG.device)
y_true.extend(targets.detach().cpu().numpy())
output = model(images,targets)
outputs=torch.argmax(output,1)
y_pred.extend(outputs.detach().cpu().numpy())
f1_score_metric = f1_score(y_true, y_pred, average='micro')
return f1_score_metric
if not CFG.isTraining:
model = ShopeeLabelGroupClassfier(pretrained=False).to(CFG.device)
model.load_state_dict(torch.load(CFG.model_path))
f1=prediction(model)
print("F1 score {}".format(f1))
###Output
215it [02:25, 1.48it/s] |
SIC_AI_Coding_Exercises/SIC_AI_Chapter_02_Coding_Exercises/ex_0112.ipynb | ###Markdown
Coding Exercise 0112 1. Working with Excel documents:
###Code
# Install the library.
!pip install openpyxl
# Import the required libraries.
import openpyxl
import os
!wget --no-clobber https://github.com/stefannae/SIC-Artificial-Intelligence/raw/main/SIC_AI_Coding_Exercises/SIC_AI_Chapter_02_Coding_Exercises/my_excel_workbook.xlsx
# Go to the directory where the file is located.
os.chdir(r'~~') # Please, replace the path with your own.
###Output
_____no_output_____
###Markdown
1.1. Working with existing documents:
###Code
wb = openpyxl.load_workbook('my_excel_workbook.xlsx') # Open an workbook.
wb.sheetnames # Show the sheet names as a list.
sh = wb['Sheet1'] # Get the 'Sheet1' as an object.
cl = sh['A1'] # 'Get the A1' cell as an object.
print(cl.value) # Show the cell value.
print(sh['A1'].value) # Another way to show the value of 'A1' cell.
print(sh.cell(1,1).value) # Get the cell value by specifying the row and column positions.
# Show values from several cells.
for i in range(1,11):
print(sh.cell(i,1).value )
###Output
_____no_output_____
###Markdown
1.2. Creating a new document:
###Code
# Create a new workbook.
my_wb = openpyxl.Workbook() # Create a new workbook object in the memory.
print(my_wb.sheetnames) # In this workbook there is only the 'Sheet'.
# Manipulating the content of new new workbook.
my_sh = my_wb['Sheet']
my_sh['A1'].value = 999 # Change the value of a cell.
my_sh['A2'] = 666 # Change the value of another cell. This is OK.
my_sh.title = 'MySheet1' # Change the sheet name.
my_sh2 = my_wb.create_sheet(index = 0, title = 'MySheet2') # Append a new sheet.
my_sh2['A1'].value = 777 # Change the value of a cell in the new sheet.
print(my_wb.sheetnames)
# Save the workbook object as a file.
my_wb.save('my_new_excel_workbook.xlsx')
###Output
_____no_output_____ |
practice/week-14/Regression-All-in-One.ipynb | ###Markdown
BIG DATA ANALYTICS PROGRAMMING : Regression Task Regression(회귀) 문제를 처음 부터 끝까지 다뤄 봅니다---References- https://github.com/rickiepark/handson-ml2/blob/master/02_end_to_end_machine_learning_project.ipynb 1. Load Dataset
###Code
import pandas as pd
import numpy as np
df = pd.read_csv("data/housing.csv")
###Output
_____no_output_____
###Markdown
2. Data에 대한 기본적인 정보 확인
###Code
df.head()
df.info()
df['ocean_proximity'].value_counts()
df.describe()
%matplotlib inline
import matplotlib.pyplot as plt
df.hist(bins=50, figsize=(20,15))
plt.show()
###Output
_____no_output_____
###Markdown
3. 미리 훈련/테스트 데이터셋 나누기
###Code
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import train_test_split
df["income_cat"] = pd.cut(df["median_income"],
bins=[0., 1.5, 3.0, 4.5, 6., np.inf],
labels=[1, 2, 3, 4, 5])
df.head()
df["income_cat"].value_counts()
df["income_cat"].hist()
train_set_random, test_set_random = train_test_split(df, test_size=0.2, random_state=42)
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index, test_index in split.split(df, df["income_cat"]):
strat_train_set = df.loc[train_index]
strat_test_set = df.loc[test_index]
strat_test_set["income_cat"].value_counts() / len(strat_test_set)
test_set_random['income_cat'].value_counts() / len(test_set_random)
df["income_cat"].value_counts() / len(df)
def income_cat_proportions(data):
return data["income_cat"].value_counts() / len(data)
compare_props = pd.DataFrame({
"Overall": income_cat_proportions(df),
"Stratified": income_cat_proportions(strat_test_set),
"Random": income_cat_proportions(test_set_random),
}).sort_index()
compare_props["Rand. %error"] = 100 * compare_props["Random"] / compare_props["Overall"] - 100
compare_props["Strat. %error"] = 100 * compare_props["Stratified"] / compare_props["Overall"] - 100
compare_props
for set_ in (strat_train_set, strat_test_set):
set_.drop("income_cat", axis=1, inplace=True)
df = strat_train_set.copy()
###Output
_____no_output_____
###Markdown
4. 탐색적 데이터 분석
###Code
df.plot(kind="scatter", x="longitude", y="latitude")
df.plot(kind="scatter", x="longitude", y="latitude", alpha=0.1)
df.plot(kind="scatter", x="longitude", y="latitude", alpha=0.4,
s=df["population"]/100, label="population", figsize=(10,7),
c="median_house_value", cmap=plt.get_cmap("jet"), colorbar=True,
sharex=False)
plt.legend()
import matplotlib.image as mpimg
california_img=mpimg.imread("data/california.png")
ax = df.plot(kind="scatter", x="longitude", y="latitude", figsize=(10,7),
s=df['population']/100, label="Population",
c="median_house_value", cmap=plt.get_cmap("jet"),
colorbar=False, alpha=0.4,
)
plt.imshow(california_img, extent=[-124.55, -113.80, 32.45, 42.05], alpha=0.5,
cmap=plt.get_cmap("jet"))
plt.ylabel("Latitude", fontsize=14)
plt.xlabel("Longitude", fontsize=14)
prices = df["median_house_value"]
tick_values = np.linspace(prices.min(), prices.max(), 11)
cbar = plt.colorbar(ticks=tick_values/prices.max())
cbar.ax.set_yticklabels(["$%dk"%(round(v/1000)) for v in tick_values], fontsize=14)
cbar.set_label('Median House Value', fontsize=16)
plt.legend(fontsize=16)
plt.show()
corr_matrix = df.corr()
corr_matrix["median_house_value"].sort_values(ascending=False)
from pandas.plotting import scatter_matrix
attributes = ["median_house_value", "median_income", "total_rooms",
"housing_median_age"]
scatter_matrix(df[attributes], figsize=(12, 8))
df.plot(kind="scatter", x="median_income", y="median_house_value",
alpha=0.1)
plt.axis([0, 16, 0, 550000])
###Output
_____no_output_____
###Markdown
5. 추가 속성 생성
###Code
df["rooms_per_household"] = df["total_rooms"]/df["households"]
df["bedrooms_per_room"] = df["total_bedrooms"]/df["total_rooms"]
df["population_per_household"]=df["population"]/df["households"]
corr_matrix = df.corr()
corr_matrix["median_house_value"].sort_values(ascending=False)
df.plot(kind="scatter", x="bedrooms_per_room", y="median_house_value",
alpha=0.2)
plt.show()
df.describe()
###Output
_____no_output_____
###Markdown
6. 데이터 전처리 6-1. Label 분리 및 결측값 핸들링
###Code
df = strat_train_set.drop("median_house_value", axis=1) # 훈련 세트를 위해 레이블 삭제
df_labels = strat_train_set["median_house_value"].copy()
sample_incomplete_rows = df[df.isnull().any(axis=1)].head()
sample_incomplete_rows
sample_incomplete_rows.dropna(subset=["total_bedrooms"]) # 옵션 1
sample_incomplete_rows.drop("total_bedrooms", axis=1) # 옵션 2
median = df["total_bedrooms"].median()
df["total_bedrooms"].fillna(median, inplace=True) # 옵션 3
df.info()
###Output
_____no_output_____
###Markdown
6-2. Categorical 데이터 인코딩
###Code
df_cat = df[["ocean_proximity"]]
df_cat.head(10)
from sklearn.preprocessing import OrdinalEncoder
ordinal_encoder = OrdinalEncoder()
df_cat_encoded = ordinal_encoder.fit_transform(df_cat)
df_cat_encoded[:10]
ordinal_encoder.categories_
from sklearn.preprocessing import OneHotEncoder
cat_encoder = OneHotEncoder()
df_cat_1hot = cat_encoder.fit_transform(df_cat)
df_cat_1hot
cat_encoder.get_feature_names()
df_cat_1hot.toarray()
for index, category in enumerate(cat_encoder.get_feature_names()):
print(index)
print(category)
df[category] = df_cat_1hot.toarray()[:,index]
df.head()
organized_df = df.drop("ocean_proximity", axis=1)
organized_df
###Output
_____no_output_____
###Markdown
6-3. Numerical 데이터 정규화
###Code
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
X = scaler.fit_transform(organized_df)
y = df_labels.values
X
y
###Output
_____no_output_____
###Markdown
7. 정리된 데이터셋을 확인 하기 위한 간단한 모델 적용
###Code
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.linear_model import LinearRegression
reg = LinearRegression()
reg.fit(X, y)
###Output
_____no_output_____
###Markdown
7-1. 테스트 데이터셋에 전처리 적용
###Code
def organizing(encoder, scaler, data):
for index, category in enumerate(encoder.get_feature_names()):
df_cat = data[["ocean_proximity"]]
data[category] = encoder.transform(df_cat).toarray()[:,index]
data.drop("ocean_proximity", axis=1, inplace=True)
X = scaler.transform(data)
return X
test_y = strat_test_set['median_house_value']
test_X = strat_test_set.drop("median_house_value", axis=1) # 훈련 세트를 위해 레이블 삭제
test_X.info()
test_X["total_bedrooms"].fillna(median,inplace=True)
test_X.info()
test_X = organizing(cat_encoder, scaler, test_X)
print(test_X)
###Output
_____no_output_____
###Markdown
7-2. 예측
###Code
pred_y = reg.predict(test_X)
mse = mean_squared_error(test_y, pred_y)
rmse = np.sqrt(mse)
print(rmse)
mae = mean_absolute_error(test_y, pred_y)
print(mae)
###Output
_____no_output_____
###Markdown
8. 최적의 모델 찾기
###Code
from sklearn.utils import all_estimators
estimators = all_estimators(type_filter='regressor')
all_regs = []
for name, RegressorClass in estimators:
try:
reg = RegressorClass()
all_regs.append(reg)
print('Appending', name)
except:
pass
results = []
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestRegressor
rfr = RandomForestRegressor(random_state=42)
scores = cross_val_score(rfr, X, y,
scoring="neg_mean_squared_error", cv=10)
scores = np.sqrt(-scores)
print("점수:", scores)
print("평균:", scores.mean())
print("표준 편차:", scores.std())
SUPER_SLOW_REGRESSION = ["GaussianProcessRegressor","KernelRidge"]
for reg in all_regs:
reg_name = reg.__class__.__name__
if reg_name not in SUPER_SLOW_REGRESSION:
try:
# reg.fit(X, y)
scores = cross_val_score(reg, X, y, scoring="neg_mean_squared_error", cv=5)
scores = np.sqrt(-scores)
if not scores.mean():
break
print("{}: RMSE {}".format(reg.__class__.__name__, scores.mean()))
result = {
"Name":reg.__class__.__name__,
"RMSE":scores.mean()
}
results.append(result)
except:
pass
result_df = pd.DataFrame(results)
result_df
result_df.sort_values(by="RMSE")
###Output
_____no_output_____
###Markdown
9. 모델 세부 튜닝
###Code
from sklearn.model_selection import GridSearchCV
param_grid = [
{'n_estimators': [50, 70, 100, 120, 150], 'max_features': [2, 4, 6, 8]},
]
forest_reg = RandomForestRegressor(random_state=42)
grid_search = GridSearchCV(forest_reg, param_grid, cv=5, verbose=2,
scoring='neg_mean_squared_error',
return_train_score=True)
grid_search.fit(X, y)
grid_search.best_params_
reg = RandomForestRegressor(max_features=6, n_estimators=150,random_state=42)
reg.fit(X,y)
pred_y = reg.predict(test_X)
mse = mean_squared_error(test_y, pred_y)
rmse = np.sqrt(mse)
mae = mean_absolute_error(test_y, pred_y)
print("RMSE {}, MAE {}".format(rmse,mae))
###Output
_____no_output_____
###Markdown
Q. 중요하지 않은 속성 제거뒤 다시 해보기!
###Code
feature_importances = grid_search.best_estimator_.feature_importances_
print(feature_importances)
features_with_importance = zip(df.columns, grid_search.best_estimator_.feature_importances_)
sorted(features_with_importance,key=lambda f : f[1], reverse=True)
###Output
_____no_output_____ |
_notebooks/2020-05-06-Shortest-Unsorted-Continuous-Subarray.ipynb | ###Markdown
"Shortest Unsorted Continuous Subarray"> "[[Leetcode]](https://leetcode.com/problems/shortest-unsorted-continuous-subarray/)[Arrays]"- toc: true - badges: true- comments: true- categories: [Problem Solving,Leetcode]- comments: true- author: Teja Kummarikuntla Problem StatementGiven an integer array, you need to find one continuous subarray that if you only sort this subarray in ascending order, then the whole array will be sorted in ascending order, too.You need to find the shortest such subarray and output its length. [URL](https://leetcode.com/problems/shortest-unsorted-continuous-subarray/) Example 1:```Input: [2, 6, 4, 8, 10, 9, 15]Output: 5``` Explanation: You need to sort [6, 4, 8, 10, 9] in ascending order to make the whole array sorted in ascending order. Note:```- Then length of the input array is in range [1, 10,000].- The input array may contain duplicates, so ascending order here means <=.``` Approach 1 [Reference](https://leetcode.com/problems/shortest-unsorted-continuous-subarray/discuss/609557/Python-Using-sorted-array-to-cross-check-(Runtime%3A-O(nlog(n)))
###Code
#collapse-hide
from typing import List
class Solution:
def findUnsortedSubarray(self, nums: List[int]) -> int:
sortedArr = sorted(nums)
startIndex = 0
endIndex = len(nums)-1
if nums == sortedArr:
return 0
while(nums[endIndex] == sortedArr[endIndex]):
endIndex -= 1
while(nums[startIndex] == sortedArr[startIndex]):
startIndex += 1
return (endIndex-startIndex)+1
sol = Solution()
sol.findUnsortedSubarray([2, 6, 4, 8, 10, 9, 15])
sol.findUnsortedSubarray([])
sol.findUnsortedSubarray([1, 2, 3, 4])
###Output
_____no_output_____ |
Applying KNN Classifier on Iris Dataset.ipynb | ###Markdown
Loading Required Libraries
###Code
# Loading Required Libraries
import numpy as np
import pandas as pd
import sklearn
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.datasets import load_iris
from matplotlib import pyplot as plt
from sklearn import datasets
from sklearn import tree
###Output
_____no_output_____
###Markdown
Exploring Iris Dataset
###Code
# Loading Datasets
iris_data = load_iris()
iris = pd.DataFrame(iris_data.data)
iris_targets = pd.DataFrame(iris_data.target)
# Priting Features Name of Iris Data
print ("Features Name : ", iris_data.feature_names)
# Priting Targets Name of Iris Data
print ("Targets Name : ", iris_data.target_names)
# Shape of Datasets
print ("Dataset Shape: ", iris.shape)
# First Five Sample features
print ("Dataset: ",iris.head())
# First Five Sample Targets
print ("Dataset: ",iris_targets.head())
###Output
Features Name : ['sepal length (cm)', 'sepal width (cm)', 'petal length (cm)', 'petal width (cm)']
Targets Name : ['setosa' 'versicolor' 'virginica']
Dataset Shape: (150, 4)
Dataset: 0 1 2 3
0 5.1 3.5 1.4 0.2
1 4.9 3.0 1.4 0.2
2 4.7 3.2 1.3 0.2
3 4.6 3.1 1.5 0.2
4 5.0 3.6 1.4 0.2
Dataset: 0
0 0
1 0
2 0
3 0
4 0
###Markdown
Splitting Dataset into training and testing sets
###Code
# Features and Targets
X = iris_data.data
Y = iris_data.target
# Splitting the Dataset into Training and Testing sets
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size = 0.2, random_state = 42)
###Output
_____no_output_____
###Markdown
Normalizing the dataset
###Code
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
X_train[0:4,:]
###Output
_____no_output_____
###Markdown
KNN Classifier
###Code
from sklearn.neighbors import KNeighborsClassifier
KNN = KNeighborsClassifier(n_neighbors = 5)
KNN.fit(X_train, y_train)
###Output
_____no_output_____
###Markdown
Predicting
###Code
Y_pred = KNN.predict(X_test)
###Output
_____no_output_____
###Markdown
Accuracy & Confusion Matrix
###Code
from sklearn.metrics import confusion_matrix
#Accuray of the Model
print("Accuracy:", accuracy_score(y_test, Y_pred)*100, "%")
print(confusion_matrix(y_test, Y_pred))
###Output
Accuracy: 100.0 %
[[10 0 0]
[ 0 9 0]
[ 0 0 11]]
###Markdown
Calculating Error for K Values
###Code
error = []
# Calculating error for K values between 1 and 40
for i in range(1, 40):
knn = KNeighborsClassifier(n_neighbors=i)
knn.fit(X_train, y_train)
pred_i = knn.predict(X_test)
error.append(np.mean(pred_i != y_test))
print(np.mean(pred_i != y_test))
###Output
0.0
0.0
0.0
0.0
0.0
0.0
0.0
0.0
0.0
0.0
0.0
0.0
0.0
0.0
0.0
0.0
0.0
0.0
0.0
0.0
0.0
0.0
0.0
0.0
0.0
0.03333333333333333
0.0
0.03333333333333333
0.03333333333333333
0.03333333333333333
0.03333333333333333
0.03333333333333333
0.03333333333333333
0.03333333333333333
0.03333333333333333
0.03333333333333333
0.06666666666666667
0.03333333333333333
0.03333333333333333
###Markdown
Plotting Error for K Values
###Code
plt.figure(figsize=(12, 6))
plt.plot(range(1, 40), error, color='red', linestyle='dashed', marker='o',
markerfacecolor='blue', markersize=10)
plt.title('Error Rate K Value')
plt.xlabel('K Value')
plt.ylabel('Mean Error')
###Output
_____no_output_____ |
examples/private-set-intersection/PSI_Client_Syft_Data_Scientist.ipynb | ###Markdown
from: https://github.com/OpenMined/PSI/blob/master/private_set_intersection/python/tests.py Goto --------> [server-step-1] [Client-Step-1] get reveal_intersection
###Code
duet.store.pandas
reveal_intersection_ptr = duet.store["reveal_intersection"]
reveal_intersection = reveal_intersection_ptr.get(
request_block=True,
name="reveal_intersection",
reason="Are we revealing or not?",
timeout_secs=10,
delete_obj=False
)
reveal_intersection
###Output
_____no_output_____
###Markdown
send client_items_len
###Code
client = psi.client.CreateWithNewKey(reveal_intersection)
client_items = ["Element " + str(i) for i in range(1000)]
sy_client_items_len = sy.lib.python.Int(len(client_items))
sy_client_items_len_ptr = sy_client_items_len.send(duet, searchable=True, tags=["client_items_len"], description="client items length")
duet.store.pandas
###Output
_____no_output_____
###Markdown
Goto --------> [Server-Step-2] [Client-Step-2] get setup message
###Code
duet.store.pandas
setup_ptr = duet.store["setup"]
setup = setup_ptr.get(
request_block=True,
name="setup",
reason="To get the setup",
timeout_secs=10,
delete_obj=False
)
type(setup)
###Output
_____no_output_____
###Markdown
send request
###Code
request = client.CreateRequest(client_items)
request_ptr = request.send(duet, tags=["request"], searchable=True, description="client request")
duet.store.pandas
###Output
_____no_output_____
###Markdown
Goto --------> [Server-Step-3] [Client-Step-3] get response
###Code
duet.store.pandas
response_ptr = duet.store["response"]
response = response_ptr.get(
request_block=True,
name="response",
reason="To get the response",
timeout_secs=10,
)
type(response)
###Output
_____no_output_____
###Markdown
get result
###Code
if reveal_intersection:
intersection = client.GetIntersection(setup, response)
iset = set(intersection)
for idx in range(len(client_items)):
if idx % 2 == 0:
assert idx in iset
else:
assert idx not in iset
if not reveal_intersection:
intersection = client.GetIntersectionSize(setup, response)
assert intersection >= (len(client_items) / 2.0)
assert intersection <= (1.1 * len(client_items) / 2.0)
intersection
###Output
_____no_output_____
###Markdown
from: https://github.com/OpenMined/PSI/blob/master/private_set_intersection/python/tests.py Goto --------> [server-step-1] [Client-Step-1] get reveal_intersection
###Code
duet.store.pandas
reveal_intersection_ptr = duet.store["reveal_intersection"]
reveal_intersection = reveal_intersection_ptr.get(
request_block=True,
name="reveal_intersection",
reason="Are we revealing or not?",
timeout_secs=10,
delete_obj=False
)
reveal_intersection
###Output
_____no_output_____
###Markdown
send client_items_len
###Code
client = psi.client.CreateWithNewKey(reveal_intersection)
client_items = ["Element " + str(i) for i in range(1000)]
sy_client_items_len = sy.lib.python.Int(len(client_items))
sy_client_items_len_ptr = sy_client_items_len.send(duet, pointable=True, tags=["client_items_len"], description="client items length")
duet.store.pandas
###Output
_____no_output_____
###Markdown
Goto --------> [Server-Step-2] [Client-Step-2] get setup message
###Code
duet.store.pandas
setup_ptr = duet.store["setup"]
setup = setup_ptr.get(
request_block=True,
name="setup",
reason="To get the setup",
timeout_secs=10,
delete_obj=False
)
type(setup)
###Output
_____no_output_____
###Markdown
send request
###Code
request = client.CreateRequest(client_items)
request_ptr = request.send(duet, tags=["request"], pointable=True, description="client request")
duet.store.pandas
###Output
_____no_output_____
###Markdown
Goto --------> [Server-Step-3] [Client-Step-3] get response
###Code
duet.store.pandas
response_ptr = duet.store["response"]
response = response_ptr.get(
request_block=True,
name="response",
reason="To get the response",
timeout_secs=10,
)
type(response)
###Output
_____no_output_____
###Markdown
get result
###Code
if reveal_intersection:
intersection = client.GetIntersection(setup, response)
iset = set(intersection)
for idx in range(len(client_items)):
if idx % 2 == 0:
assert idx in iset
else:
assert idx not in iset
if not reveal_intersection:
intersection = client.GetIntersectionSize(setup, response)
assert intersection >= (len(client_items) / 2.0)
assert intersection <= (1.1 * len(client_items) / 2.0)
intersection
###Output
_____no_output_____
###Markdown
from: https://github.com/OpenMined/PSI/blob/master/private_set_intersection/python/tests.py Goto --------> [server-step-1] [Client-Step-1] get reveal_intersection
###Code
duet.store.pandas
reveal_intersection_ptr = duet.store["reveal_intersection"]
reveal_intersection = reveal_intersection_ptr.get(
request_block=True,
name="reveal_intersection",
reason="Are we revealing or not?",
timeout_secs=10,
delete_obj=False
)
reveal_intersection
###Output
_____no_output_____
###Markdown
send client_items_len
###Code
client = psi.client.CreateWithNewKey(reveal_intersection)
client_items = ["Element " + str(i) for i in range(1000)]
sy_client_items_len = sy.lib.python.Int(len(client_items))
sy_client_items_len_ptr = sy_client_items_len.send(duet, pointable=True, tags=["client_items_len"], description="client items length")
duet.store.pandas
###Output
_____no_output_____
###Markdown
Goto --------> [Server-Step-2] [Client-Step-2] get setup message
###Code
duet.store.pandas
setup_ptr = duet.store["setup"]
setup = setup_ptr.get(
request_block=True,
name="setup",
reason="To get the setup",
timeout_secs=10,
delete_obj=False
)
type(setup)
###Output
_____no_output_____
###Markdown
send request
###Code
request = client.CreateRequest(client_items)
request_ptr = request.send(duet, tags=["request"], pointable=True, description="client request")
duet.store.pandas
###Output
_____no_output_____
###Markdown
Goto --------> [Server-Step-3] [Client-Step-3] get response
###Code
duet.store.pandas
response_ptr = duet.store["response"]
response = response_ptr.get(
request_block=True,
name="response",
reason="To get the response",
timeout_secs=10,
)
type(response)
###Output
_____no_output_____
###Markdown
get result
###Code
if reveal_intersection:
intersection = client.GetIntersection(setup, response)
iset = set(intersection)
for idx in range(len(client_items)):
if idx % 2 == 0:
assert idx in iset
else:
assert idx not in iset
if not reveal_intersection:
intersection = client.GetIntersectionSize(setup, response)
assert intersection >= (len(client_items) / 2.0)
assert intersection <= (1.1 * len(client_items) / 2.0)
intersection
###Output
_____no_output_____ |
Train MRCNN.ipynb | ###Markdown
Validating the model
###Code
import os
import cv2
import sys
import random
import math
import re
import time
import numpy as np
import tensorflow as tf
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import skimage
import glob
# Root directory of the project
ROOT_DIR = '/content/drive/My Drive/Mask_RCNN'
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn import utils
from mrcnn import visualize
from mrcnn.visualize import display_images
import mrcnn.model as modellib
from mrcnn.model import log
import cloud
%matplotlib inline
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
custom_WEIGHTS_PATH = "/content/drive/My Drive/Mask_RCNN/logs/cloud20200316T1012/mask_rcnn_cloud_0010.h5" # TODO: update this path
config = cloud.CloudConfig()
custom_DIR = os.path.join(ROOT_DIR, "Cloud_Dataset")
#Override the training configurations with a few
# changes for inferencing.
class InferenceConfig(config.__class__):
# Run detection on one image at a time
GPU_COUNT = 1
IMAGES_PER_GPU = 1
config = InferenceConfig()
config.display()
# Device to load the neural network on.
# Useful if you're training a model on the same
# machine, in which case use CPU and leave the
# GPU for training.
DEVICE = "/gpu:0" # /cpu:0 or /gpu:0
# Inspect the model in training or inference modes
# values: 'inference' or 'training'
# TODO: code for 'training' test mode not ready yet
TEST_MODE = "inference"
def get_ax(rows=1, cols=1, size=16):
"""Return a Matplotlib Axes array to be used in
all visualizations in the notebook. Provide a
central point to control graph sizes.
Adjust the size attribute to control how big to render images
"""
_, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))
return ax
# Load validation dataset
dataset = cloud.CloudDataset()
dataset.load_custom(custom_DIR, "val")
# Must call before using the dataset
dataset.prepare()
print("Images: {}\nClasses: {}".format(len(dataset.image_ids), dataset.class_names))
# Create model in inference mode
with tf.device(DEVICE):
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR,config=config)
# load the last model you trained
# weights_path = model.find_last()[1]
# Load weights
print("Loading weights ", custom_WEIGHTS_PATH)
model.load_weights(custom_WEIGHTS_PATH, by_name=True)
from importlib import reload
reload(visualize)
image_id = random.choice(dataset.image_ids)
image, image_meta, gt_class_id, gt_bbox, gt_mask =\
modellib.load_image_gt(dataset, config, image_id, use_mini_mask=False)
info = dataset.image_info[image_id]
print("image ID: {}.{} ({}) {}".format(info["source"], info["id"], image_id,
dataset.image_reference(image_id)))
# Run object detection
results = model.detect([image], verbose=1)
# Display results
ax = get_ax(1)
r = results[0]
visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],
dataset.class_names, r['scores'], ax=ax,
title="Predictions")
# log("gt_class_id", gt_class_id)
# log("gt_bbox", gt_bbox)
print(r['rois'])
# print(r['masks'])
# log("gt_mask", gt_mask)
###Output
image ID: cloud.satellite6.jpg (5) /content/drive/My Drive/Mask_RCNN/Cloud_Dataset/val/satellite6.jpg
Processing 1 images
image shape: (1024, 1024, 3) min: 0.00000 max: 255.00000 uint8
molded_images shape: (1, 1024, 1024, 3) min: -123.70000 max: 151.10000 float64
image_metas shape: (1, 14) min: 0.00000 max: 1024.00000 int64
anchors shape: (1, 261888, 4) min: -0.35390 max: 1.29134 float32
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:422: The name tf.global_variables is deprecated. Please use tf.compat.v1.global_variables instead.
[[721 319 769 439]
[386 461 753 771]
[260 317 589 853]]
|
notebooks/dflow.ipynb | ###Markdown
mmdflowDetect water in a static image of an oil-water flow experiment. DescriptionA gray-scale image of an oil-water flow experiment is processed. This image is composed of a top-lateral view of a transparent pipe containing water, in the center, and oil, around the water. This procedure detects the region where the water is by using connected filtering, thresholding and shape smoothing.
###Code
import numpy as np
from PIL import Image
import ia870 as ia
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Reading The gray-scale image of the water-oil flow experiment is read.
###Code
a_pil = Image.open('../data/flow.tif').convert('L')
a = np.array (a_pil)
(fig, axes) = plt.subplots(nrows=1, ncols=1,figsize=(5, 5))
axes.set_title('a')
axes.imshow(a, cmap='gray')
axes.axis('off')
###Output
_____no_output_____
###Markdown
Dark region enhancementThe dark region of the image is enhanced by the close top-hat operator.
###Code
b=ia.iacloseth(a,ia.iaseline(50,90));
(fig, axes) = plt.subplots(nrows=1, ncols=1,figsize=(5, 5))
axes.set_title('b')
axes.imshow(b, cmap='gray')
axes.axis('off')
###Output
_____no_output_____
###Markdown
FilteringA connected filtering is applied to remove small artifacts present in the image.
###Code
c=ia.iacloserec(b,ia.iasebox(5));
(fig, axes) = plt.subplots(nrows=1, ncols=1,figsize=(5, 5))
axes.set_title('c')
axes.imshow(c, cmap='gray')
axes.axis('off')
###Output
_____no_output_____
###Markdown
Shape filteringAn alternated sequential filtering is used for shape smoothing.
###Code
d=ia.iaasf(c,'co',ia.iasecross());
(fig, axes) = plt.subplots(nrows=1, ncols=1,figsize=(5, 5))
axes.set_title('d')
axes.imshow(d, cmap='gray')
axes.axis('off')
###Output
_____no_output_____
###Markdown
ThresholdingThe original and thresholded image overlayed on the original are presented successively.
###Code
e=ia.iathreshad(d,100);
(fig, axes) = plt.subplots(nrows=1, ncols=2,figsize=(10, 7))
axes[0].set_title('a')
axes[0].imshow(a, cmap='gray')
axes[0].axis('off')
axes[1].set_title('a, e')
axes[1].imshow(ia.iagshow(a, e).transpose(1, 2, 0), cmap='gray')
axes[1].axis('off')
###Output
_____no_output_____ |
notebooks/issues/53_move_local_template_to_config_dir.ipynb | ###Markdown
[53](https://github.com/blaylockbk/Herbie/issues/53) To extend Herbie, put local template in `~/.config/herbie`
###Code
from herbie.archive import Herbie
import herbie.models as models_template
H = Herbie('2017-10-21', model='gefs', variable='tmp', member=1)
H.SOURCES
H.download()
H.read_idx(':6 hour')
ds = H.xarray(":6 hour")
ds
from toolbox.cartopy_tools import common_features, pc
ax = common_features().ax
ds.t.plot(ax=ax, transform=pc)
from datetime import datetime
from os import remove
import matplotlib.pyplot as plt
from herbie.archive import Herbie
now = datetime.now()
today = datetime(now.year, now.month, now.day)
today_str = today.strftime('%Y-%m-%d %H:%M')
H = Herbie(today_str, model='hrrr', product='prs', save_dir='$TMPDIR')
H.download()
H.xarray('TMP:2 m')
H.local_grib.expand()
###Output
_____no_output_____ |
notebook/c620_Mathematical_Programming_Solver.ipynb | ###Markdown
###Code
!pip install autorch > log.txt
import joblib
import autorch
from autorch.function import sp2wt
import pandas as pd
import numpy as np
import torch
from torch import nn
from torch.optim import Adam
pd.options.display.max_rows = 999
df = pd.read_csv('/content/drive/MyDrive/台塑輕油案子/data/c620/cleaned/c620_train.csv',index_col=0)
c = joblib.load('/content/drive/MyDrive/台塑輕油案子/data/c620/col_names/c620_col_names.pkl')
df.head(3)
###Output
_____no_output_____
###Markdown
建立 f(case,input_wt,op) = output_wt
###Code
# def columns
input_wt_col = c['x41']
case_col = c['case']
op_col = c['density']+c['yRefluxRate']+c['yHeatDuty']+c['yControl']
sp_col = c['vent_gas_sf'] +c['distillate_sf'] +c['sidedraw_sf'] +c['bottoms_sf']
output_wt_col = c['vent_gas_x'] +c['distillate_x'] +c['sidedraw_x'] +c['bottoms_x']
n_idx = [ [i,i+41,i+41*2,i+41*3] for i in range(41)]
# train
c620_f = autorch.utils.PartBulider(df,case_col+input_wt_col+op_col,sp_col,max_epochs=100,limit_y_range=True,normalize_idx_list=n_idx)
c620_f.net = nn.Sequential(nn.Linear(len(case_col+input_wt_col+op_col),256),nn.Linear(256,256),nn.Linear(256,len(sp_col)),nn.Sigmoid())
c620_f.optimizer = Adam(c620_f.net.parameters(),lr=1e-3)
c620_f.train()
# test
x_test = c620_f.data['X_test']
x41 = df.loc[x_test.index,c['x41']].values
sp = c620_f.predict(x_test).iloc[:,:41*4]
s1,s2,s3,s4 = sp.iloc[:,:41].values,sp.iloc[:,41:41*2].values,sp.iloc[:,41*2:41*3].values,sp.iloc[:,41*3:41*4].values
w1,w2,w3,w4 = sp2wt(x41,s1),sp2wt(x41,s2),sp2wt(x41,s3),sp2wt(x41,s4)
wt_pred = np.hstack((w1,w2,w3,w4))
wt_pred = pd.DataFrame(wt_pred,index=x_test.index,columns=output_wt_col)
wt_real = df.loc[x_test.index,output_wt_col]
res = c620_f.show_metrics(wt_real,wt_pred)
res
res.loc[['Tatoray Stripper C620 Operation_Sidedraw Production Rate and Composition_Benzene_wt%']]
a = wt_pred[['Tatoray Stripper C620 Operation_Sidedraw Production Rate and Composition_Benzene_wt%']].head(10)
b = wt_real[['Tatoray Stripper C620 Operation_Sidedraw Production Rate and Composition_Benzene_wt%']].head(10)
b.columns = ['輸出端']
a.join(b)
def F(case,input_wt,op):
sp = c620_f.predict(case.join(input_wt).join(op))
s1,s2,s3,s4 = sp.iloc[:,:41].values,sp.iloc[:,41:41*2].values,sp.iloc[:,41*2:41*3].values,sp.iloc[:,41*3:41*4].values
x41 = input_wt.values
w1,w2,w3,w4 = sp2wt(x41,s1),sp2wt(x41,s2),sp2wt(x41,s3),sp2wt(x41,s4)
wt_pred = np.hstack((w1,w2,w3,w4))
wt_pred = pd.DataFrame(wt_pred,index=input_wt.index,columns=output_wt_col)
return wt_pred
###Output
_____no_output_____
###Markdown
建立 g(case,input_wt) = op
###Code
path = '/content/drive/MyDrive/台塑輕油案子/data/c620/模擬擴充資料0514.xlsx'
df_extend = pd.read_excel(path)
idx_extend = df_extend.iloc[0,6:].values
case_extend = df_extend.iloc[[5,6,7],6:].T
case_extend.columns = case_col
case_extend.index = idx_extend
case_extend.head(1)
wt_extend = df_extend.iloc[132:132+41,6:].T
wt_extend.columns = input_wt_col
wt_extend.index = idx_extend
wt_extend.head(1)
op_extend = df_extend.iloc[221:221+2,6:].T
op_extend.columns = op_col[-2:]
op_extend.index = idx_extend
op_extend.head(1)
G = joblib.load('/content/drive/MyDrive/台塑輕油案子/data/c620/model/c620_G.pkl')
op_pred = G.predict(case_extend.join(wt_extend))
op_pred = pd.DataFrame(op_pred,columns=op_col,index=idx_extend)
op_pred[op_col[-2:]] = op_extend
op_extend = op_pred
op_extend.head(1)
df_extend = case_extend.join(wt_extend).join(op_extend).astype('float32')
df_extend.head(1)
G = autorch.utils.PartBulider(df.append(df_extend),case_col+input_wt_col,op_col,max_epochs=42,limit_y_range=True)
G.net = nn.Sequential(nn.Linear(len(case_col+input_wt_col),256),nn.Linear(256,256),nn.Linear(256,len(op_col)),nn.Sigmoid())
G.optimizer = Adam(G.net.parameters(),lr=1e-3)
G.train()
G.test()
cond = (df['Tatoray Stripper C620 Operation_Sidedraw Production Rate and Composition_Benzene_wt%'] >= 69) & (df['Tatoray Stripper C620 Operation_Sidedraw Production Rate and Composition_Benzene_wt%'] <= 71)
sample = df[cond].sample(10)
case,input_wt,op = sample[case_col] ,sample[input_wt_col] ,sample[op_col]
F(case,input_wt,op)
sample[output_wt_col]
sample['Tatoray Stripper C620 Operation_Sidedraw Production Rate and Composition_Benzene_wt%']
###Output
_____no_output_____
###Markdown
數學規劃求解器 透過調整op 來讓 'Tatoray Stripper C620 Operation_Sidedraw Production Rate and Composition_Benzene_wt%' == 70
###Code
!pip install optuna > log.txt
sample[input_wt_col]
sample[['Tatoray Stripper C620 Operation_Sidedraw Production Rate and Composition_Benzene_wt%']]
op_max = df.append(df_extend)[op_col].max().to_dict()
op_min = df.append(df_extend)[op_col].min().to_dict()
op_max
op_min
joblib.dump(op_max,'/content/drive/MyDrive/台塑輕油案子/data/c620/map_dict/c620_op_max.pkl')
joblib.dump(op_min,'/content/drive/MyDrive/台塑輕油案子/data/c620/map_dict/c620_op_min.pkl')
import optuna
# 目標函數
def objective(trial):
# 可控變數
op_dict = {}
for name in op_col:
op_dict[name] = trial.suggest_uniform(name,op_min[name],op_max[name])
op = pd.DataFrame(op_dict,index=sample.index)
# 計算loss
輸入端bz = sample[case_col]['Tatoray Stripper C620 Operation_Specifications_Spec 3 : Benzene in Sidedraw_wt%'].values
輸出端bz = F(sample[case_col],sample[input_wt_col],op)['Tatoray Stripper C620 Operation_Sidedraw Production Rate and Composition_Benzene_wt%'].values
loss = np.mean((輸入端bz - 輸出端bz)**2)
return loss
# 做搜索
study = optuna.create_study()
study.optimize(objective, n_trials=100)
op_opt = pd.DataFrame(study.best_params,index=sample.index)
#搜索結果
op_opt
# 看有沒有符合業主需要的70
a = sample[['Tatoray Stripper C620 Operation_Specifications_Spec 3 : Benzene in Sidedraw_wt%']]
b = F(case,input_wt,op_opt)[['Tatoray Stripper C620 Operation_Sidedraw Production Rate and Composition_Benzene_wt%']]
b.columns = ['優化結果']
a.join(b)
op_pred = G.predict(sample[case_col+input_wt_col])
import seaborn as sb
import matplotlib.pyplot as plt
for i in op_col:
sb.kdeplot(df[op_col][i],label='kde')
plt.axvline(op_opt[i][0],label='op_optimal',c='red')
plt.axvline(op_pred[i][0],label='op_pred',c='green')
plt.legend()
plt.show()
import joblib
G.shrink()
c620_f.shrink()
joblib.dump(G,'/content/drive/MyDrive/台塑輕油案子/data/c620/model/c620_G.pkl')
joblib.dump(c620_f,'/content/drive/MyDrive/台塑輕油案子/data/c620/model/c620_F.pkl')
###Output
_____no_output_____ |
Multiple_Text_Combination_and_Mapping.ipynb | ###Markdown
Multiple Text Combination and Mapping ProjectThe aim of this project was to find all possible answers to a quiz with 10 questions which had two options each.**CONSIDERATIONS**- Only one answer can be picked per question.- Final output **should not have any duplicate** combination of answers.- Lastly, assuming all items in the left list (option 1) stood for **ODD** (O) selections, while those in the right stood for **EVEN (E)**; Map the final output as **Os** and **Es** .
###Code
# Import necessary modules
import pandas as pd
import random
import numpy as np
# generate a dataframe of quiz possible answers
possible_ans = pd.DataFrame({
'opt_1': ['A','C','E','G','I','K','M','O','Q','S'],
'opt_2': ['B','D','F','H','J','L','N','P','R','T']
})
possible_ans
answers = [] #all possible lists of answers are stored here , this is a list of lists
x = 0
# create a loop to keep generating random choices,
# of course, there are not up to or more than 100000 possible combinations
while x< 100000:
# generate a random choice from each row across both columns, then write all the choices to a list
# store list in rand_choice
rand_choice = possible_ans.apply(lambda row : random.choice(row.tolist()),axis =1).tolist()
# append the rand_choice generated into another list called 'answers' , if the list has not yet been added
if rand_choice not in answers:
answers.append(rand_choice)
x+=1
answers
print ('there are {} possible combination of answers'.format(len(answers)))
answers
list_of_answers = pd.DataFrame(answers)
list_of_answers.to_csv('list_of_answers.csv')
list_of_answers
# reason for importing the file earler exported was to avoid changing the already established values since
# values were randomly generated
raw_text = pd.read_csv('/list_of_answers.csv',index_col = 0)
raw_text.head(10)
# concatenate answers across columns for all rows and save in new column
raw_text['possible_outcomes'] = raw_text.sum(axis=1)
raw_text
# Create a function to replace text with O's and E's by mapping using the translate() method
def map_text(value):
# define the map list
map_list = {
'A':'O','B':'E','C':'O','D':'E','E':'O','F':'E','G':'O','H':'E','I':'O','J':'E','K':'O',
'L':'E','M':'O','N':'E','O':'O','P':'E','Q':'O','R':'E','S':'O','T':'E'
}
# create a mapped table which the translate method will use
trans_table = value.maketrans(map_list)
# translate all values introduced into the function
value = value.translate(trans_table)
return value
# test the function
map_text('ACFGIKMPQS')
raw_text_2 = raw_text
raw_text_2.head()
# apply map_text function on the column with earlier saved possible outcomes
raw_text_2['replaced_values'] = raw_text_2['possible_outcomes'].apply(map_text)
raw_text_2
# save final output to csv
raw_text_2.to_csv('updated_list_of_answers.csv')
###Output
_____no_output_____ |
notebooks/4.1-mbml_kf_w_input_IT_unique.ipynb | ###Markdown
Kalman filters Italy Table of contents1. [Data](Data)2. [Model with the vector c fixed as [0, 1]](Model-with-the-vector-c-fixed-as-[0,-1])3. [Model with the vector c as a random variable with prior](Model-with-the-vector-c-as-a-random-variable-with-prior)4. [Model without input (2 hidden variables)](Model-without-input)
###Code
import sys
from os.path import pardir, join
import pandas as pd
import numpy as np
import numpyro
import numpyro.distributions as dist
from numpyro import handlers
from numpyro.infer import MCMC, NUTS
import matplotlib.pyplot as plt
import jax
import jax.numpy as jnp
from jax import random, vmap
from jax.scipy.special import logsumexp
from jax import lax
np.random.seed(42)
plt.style.use('ggplot')
%matplotlib inline
plt.rcParams['figure.figsize'] = (16, 10)
from sklearn.preprocessing import StandardScaler
ROOT = pardir
DATA = join(ROOT, "data", "processed")
###Output
_____no_output_____
###Markdown
Data The data from italy has 57 unduplicated dates.
###Code
df = pd.read_csv(join(DATA, 'data_italy_sixcol.csv'))
df_filtered = df.groupby("Date").apply(lambda x: x.iloc[0])
data = df_filtered.values
X = data[:, 2:].astype(np.float_)
y = data[:,1].astype(np.float_)
n_train = 45
n_test = len(y)-n_train
idx_train = [*range(0,n_train)]
idx_test = [*range(n_train, len(y))]
y_train = y[idx_train]
y_test = y[idx_test]
###Output
_____no_output_____
###Markdown
Model with the vector c fixed as [0, 1]
###Code
sys.path.append(join(ROOT, "src", "models"))
sys.path.append(join(ROOT, "src", "visualization"))
from kf_input import model_wo_c, model_w_c
from train import train_kf
from visualize import get_samples, plot_samples, plot_forecast
mcmc = train_kf(model_wo_c, y_train, n_train, n_test, x=X)
hmc_samples = get_samples(mcmc)
plot_samples(hmc_samples, ["beta", "tau", "sigma"])
plot_forecast(hmc_samples, idx_train, idx_test, y_train, y_test)
###Output
_____no_output_____
###Markdown
Model with the vector c as a random variable with prior
###Code
mcmc2 = train_kf(model_w_c, y_train, n_train, n_test, x=X)
hmc_samples = get_samples(mcmc2)
plot_samples(hmc_samples, ["beta", "tau", "sigma"])
plot_forecast(hmc_samples, idx_train, idx_test, y_train, y_test)
###Output
_____no_output_____
###Markdown
Model without input
###Code
from kf import twoh_c_kf
mcmc3 = train_kf(twoh_c_kf, y_train, n_train, n_test, x=None)
hmc_samples = get_samples(mcmc3)
plot_samples(hmc_samples, ["beta", "tau", "sigma"])
plot_forecast(hmc_samples, idx_train, idx_test, y_train, y_test)
###Output
_____no_output_____ |
site/en/tutorials/keras/Intro_to_RNN.ipynb | ###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###Output
_____no_output_____
###Markdown
Understanding recurrent neural networks View on TensorFlow.org Run in Google Colab View source on GitHub This tutorial gives a brief introduction of recurrent neural networks (RNN). The code example in this tutorial is adapted from Chapter 6, Section 2 of [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python?a_aid=keras&a_bid=76564dff). We'll use [tf.keras](https://www.tensorflow.org/guide/keras), a high-level API to build and train a simple RNN model in TensorFlow. Introduction A major characteristics for traditional neural networks is that they process each input independently, with no states kept in between inputs. With such models, a sequence input, such as an entire movie review on IMDB, needs to be transformed into a single data point, and processed in one go.In contrast, recurrent neural networks (RNN) process sequence input by iterating through the elements in the sequence, and maintain a state for all the data it has seen so far. Taking IMDB movie review as an example, RNN processes each review word by word. When processing a word, the RNN network "remembers" the state of all the previous words in this review. The state of the RNN is reset when processing another independent input, such as another review.
###Code
import tensorflow as tf
from tensorflow import keras
print(tf.__version__)
###Output
_____no_output_____
###Markdown
A recurrent layer in Keras Keras recurrent layers can be run in two different modes: they return either the full sequences of successive outputs for each timestep (a 3D tensor of shape (batch_size, timesteps, output_features)), or return only the last output for each input sequence (a 2D tensor of shape (batch_size, output_features)). These two modes are controlled by the return_sequences constructor argument.Let's take a look at an example that uses a SimpleRNN layer and returns only the output at the last timestep:
###Code
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Embedding, SimpleRNN
model = Sequential()
model.add(Embedding(10000, 32))
model.add(SimpleRNN(32))
model.summary()
###Output
_____no_output_____
###Markdown
The following example returns the full state sequence.
###Code
model = Sequential()
model.add(Embedding(10000, 32))
model.add(SimpleRNN(32, return_sequences=True))
model.summary()
###Output
_____no_output_____
###Markdown
It is sometimes useful to stack several recurrent layers one after the other in order to increase the representational power of a network. In such a setup, you have to get all intermediate layers to return full sequences:
###Code
model = Sequential()
model.add(Embedding(10000, 32))
model.add(SimpleRNN(32, return_sequences=True))
model.add(SimpleRNN(32, return_sequences=True))
model.add(SimpleRNN(32, return_sequences=True))
model.add(SimpleRNN(32)) # This last layer only returns the last outputs.
model.summary()
###Output
_____no_output_____
###Markdown
Now let's try to use such a model on the IMDB movie review classification problem. First, let's preprocess the data:
###Code
from tensorflow.keras.datasets import imdb
from tensorflow.keras.preprocessing import sequence
max_features = 10000 # number of words to consider as features
maxlen = 500 # cut texts after this number of words (among top max_features most common words)
batch_size = 32
print('Loading data...')
(input_train, y_train), (input_test, y_test) = imdb.load_data(num_words=max_features)
print(len(input_train), 'train sequences')
print(len(input_test), 'test sequences')
print('Pad sequences (samples x time)')
input_train = sequence.pad_sequences(input_train, maxlen=maxlen)
input_test = sequence.pad_sequences(input_test, maxlen=maxlen)
print('input_train shape:', input_train.shape, 'y_train shape:', y_train.shape)
print('input_test shape:', input_test.shape, 'y_test shape:', y_test.shape)
###Output
_____no_output_____
###Markdown
Let's train a simple recurrent network using an Embedding layer and a SimpleRNN layer:
###Code
from tensorflow.keras.layers import Dense
model = Sequential()
model.add(Embedding(max_features, 32))
model.add(SimpleRNN(32))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])
history = model.fit(input_train, y_train,
epochs=10,
batch_size=128,
validation_split=0.2)
###Output
_____no_output_____
###Markdown
Let's display the training and validation loss and accuracy:
###Code
import matplotlib.pyplot as plt
%matplotlib inline
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
test_loss, test_accuracy = model.evaluate(input_test, y_test)
print('Test Loss:', test_loss, 'Test Accuracy:', test_accuracy)
###Output
_____no_output_____
###Markdown
As a reminder, in [basic text classification tutorial](https://www.tensorflow.org/tutorials/keras/basic_text_classification) , our fairly naive approach to this very dataset got us to 88% test accuracy. Unfortunately, our small recurrent network doesn't perform very well at all compared to this baseline (only up to 85% validation accuracy, and 74% test accuracy). Part of the problem is that our inputs only consider the first 500 words rather the full sequences -- hence our RNN has access to less information than our earlier baseline model. The remainder of the problem is simply that SimpleRNN isn't very good at processing long sequences, like text. Other types of recurrent layers perform much better. Let's take a look at some more advanced layers. A concrete LSTM example in Keras Although SimpleRNN should retain the information about inputs seen many timesteps before, in practice, such long-term dependency is impossible to learn because of the [vanishing gradient problem](https://en.wikipedia.org/wiki/Vanishing_gradient_problem). Long Short-Term Memory (LSTM) algorithm was developed to overcome this problem. Please see [Understanding LSTM Networks](https://colah.github.io/posts/2015-08-Understanding-LSTMs/) for an introduction to LSTM.The example below is a network with LSTM layer, similar to the one with SimpleRNN that we just presented. For simplicity, we only specify the output dimensionality of the LSTM layer, and leave every other argument (there are lots) to the Keras defaults.
###Code
from tensorflow.keras.layers import LSTM
model = Sequential()
model.add(Embedding(max_features, 32))
model.add(LSTM(32))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['acc'])
history = model.fit(input_train, y_train,
epochs=10,
batch_size=128,
validation_split=0.2)
###Output
_____no_output_____
###Markdown
Let's display the training and validation loss and accuracy:
###Code
import matplotlib.pyplot as plt
%matplotlib inline
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
test_loss, test_accuracy = model.evaluate(input_test, y_test)
print('Test Loss:', test_loss, 'Test Accuracy:', test_accuracy)
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###Output
_____no_output_____
###Markdown
Understanding recurrent neural networks View on TensorFlow.org Run in Google Colab View source on GitHub This tutorial gives a brief introduction of recurrent neural networks (RNN). The code example in this tutorial is adapted from Chapter 6, Section 2 of [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python?a_aid=keras&a_bid=76564dff). We'll use [tf.keras](https://www.tensorflow.org/guide/keras), a high-level API to build and train a simple RNN model in TensorFlow. Introduction A major characteristics for traditional neural networks is that they process each input independently, with no states kept in between inputs. With such models, a sequence input, such as an entire movie review on IMDB, needs to be transformed into a single data point, and processed in one go.In contrast, recurrent neural networks (RNN) process sequence input by iterating through the elements in the sequence, and maintain a state for all the data it has seen so far. As shown in the diagram below, ${X_t}$, ${O_t}$ and ${S_t}$ are respectively the input, output and hidden state at time step t. When we move from time step t to time step t + 1, the hidden state at time step t + 1 depends on both the input at t + 1, and the hidden state from the previous time step.Taking IMDB movie review as an example, RNN processes each review word by word. When processing a word, the RNN network "remembers" the state of all the previous words in this review. The state of the RNN is reset when processing another independent input, such as another review.
###Code
import tensorflow as tf
from tensorflow import keras
print(tf.__version__)
###Output
_____no_output_____
###Markdown
A recurrent layer in Keras Keras recurrent layers can be run in two different modes: they return either the full sequences of successive outputs for each timestep (a 3D tensor of shape (batch_size, timesteps, output_features)), or return only the last output for each input sequence (a 2D tensor of shape (batch_size, output_features)). These two modes are controlled by the return_sequences constructor argument.Let's take a look at an example that uses a SimpleRNN layer and returns only the output at the last timestep.Before we add a simple RNN layer, let's first create a embedding layer. An embedding layer organize the words with similar meanings into similar vectors. Comparing to one-hot encoding, word embedding is dense and relative low dimensional. To create an embedding layer, we will pass two parameters, the first parameter is the maximum number of words in each vector, the second paramater is the number of vectors for the embedding layer. In this example, the embedding layer splits the words into 32 vectors, each vector contains maximum 10000 words.
###Code
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Embedding, SimpleRNN
model = Sequential()
model.add(Embedding(10000, 32))
model.add(SimpleRNN(32))
model.summary()
###Output
_____no_output_____
###Markdown
The following example returns the full state sequence.
###Code
model = Sequential()
model.add(Embedding(10000, 32))
model.add(SimpleRNN(32, return_sequences=True))
model.summary()
###Output
_____no_output_____
###Markdown
It is sometimes useful to stack several recurrent layers one after the other in order to increase the representational power of a network. In such a setup, you have to get all intermediate layers to return full sequences:
###Code
model = Sequential()
model.add(Embedding(10000, 32))
model.add(SimpleRNN(32, return_sequences=True))
model.add(SimpleRNN(32, return_sequences=True))
model.add(SimpleRNN(32, return_sequences=True))
model.add(SimpleRNN(32)) # This last layer only returns the last outputs.
model.summary()
###Output
_____no_output_____
###Markdown
Now let's try to use such a model on the IMDB movie review classification problem. First, let's load and preprocess the data. The preprocessing step applies padding to the sentences so that all have the same length. The padding step is required because we are going to use batch mode later on when we fit the model. It is required to have same input length within a batch.
###Code
from tensorflow.keras.datasets import imdb
from tensorflow.keras.preprocessing import sequence
max_features = 10000 # number of words to consider as features
maxlen = 500 # cut texts after this number of words (among top max_features most common words)
batch_size = 32
print('Loading data...')
(input_train, y_train), (input_test, y_test) = imdb.load_data(num_words=max_features)
print(len(input_train), 'train sequences')
print(len(input_test), 'test sequences')
print('Pad sequences (samples x time)')
input_train = sequence.pad_sequences(input_train, maxlen=maxlen)
input_test = sequence.pad_sequences(input_test, maxlen=maxlen)
print('input_train shape:', input_train.shape, 'y_train shape:', y_train.shape)
print('input_test shape:', input_test.shape, 'y_test shape:', y_test.shape)
###Output
_____no_output_____
###Markdown
Let's train a simple recurrent network using an Embedding layer and a SimpleRNN layer:
###Code
from tensorflow.keras.layers import Dense
model = Sequential()
model.add(Embedding(max_features, 32))
model.add(SimpleRNN(32))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])
history = model.fit(input_train, y_train,
epochs=10,
batch_size=128,
validation_split=0.2)
###Output
_____no_output_____
###Markdown
Let's display the training and validation loss and accuracy:
###Code
import matplotlib.pyplot as plt
%matplotlib inline
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
test_loss, test_accuracy = model.evaluate(input_test, y_test)
print('Test Loss:', test_loss, 'Test Accuracy:', test_accuracy)
###Output
_____no_output_____
###Markdown
As a reminder, in [basic text classification tutorial](https://www.tensorflow.org/tutorials/keras/basic_text_classification) , our fairly naive approach to this very dataset got us to 88% test accuracy. Unfortunately, our small recurrent network doesn't perform very well at all compared to this baseline (only up to 85% validation accuracy, and 74% test accuracy). Part of the problem is that our inputs only consider the first 500 words rather the full sequences -- hence our RNN has access to less information than our earlier baseline model. The remainder of the problem is simply that SimpleRNN isn't very good at processing long sequences, like text. Other types of recurrent layers perform much better. Let's take a look at some more advanced layers. A concrete LSTM example in Keras Although SimpleRNN should retain the information about inputs seen many timesteps before, in practice, such long-term dependency is impossible to learn because of the [vanishing gradient problem](https://en.wikipedia.org/wiki/Vanishing_gradient_problem). Long Short-Term Memory (LSTM) algorithm was developed to overcome this problem. Please see [Understanding LSTM Networks](https://colah.github.io/posts/2015-08-Understanding-LSTMs/) for an introduction to LSTM.The example below is a network with LSTM layer, similar to the one with SimpleRNN that we just presented. For simplicity, we only specify the output dimensionality of the LSTM layer, and leave every other argument (there are lots) to the Keras defaults.
###Code
from tensorflow.keras.layers import LSTM
model = Sequential()
model.add(Embedding(max_features, 32))
model.add(LSTM(32))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['acc'])
history = model.fit(input_train, y_train,
epochs=10,
batch_size=128,
validation_split=0.2)
###Output
_____no_output_____
###Markdown
Let's display the training and validation loss and accuracy:
###Code
import matplotlib.pyplot as plt
%matplotlib inline
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
test_loss, test_accuracy = model.evaluate(input_test, y_test)
print('Test Loss:', test_loss, 'Test Accuracy:', test_accuracy)
###Output
_____no_output_____ |
04_transformer_tutorial_2nd_part/BERT_tutorial/transformer_2_tutorial.ipynb | ###Markdown
Transformer解读的第二部分, 这部分是实践的部分 二. transformer代码解读, 语料数据预处理, BERT的预训练和情感分析的应用: 首先是今天课程内容的顺序, 我将BERT代码解读放到了最后, 把主要内容排在了前面, 注意我们今天使用的是**PyTorch**深度学习框架, **其实用什么样的框架并不重要**, 本节课**代码的部分不是重点**, 重点是让大家可以掌握$NLP$中**语料预处理**和**建模**并**解决实际应用中出现的困难**的的一些思路, 那话说回来为什么用PyTorch呢? 我其实用Tensorflow的时间要比PyTorch长很多, 但是目前用了PyTorch之后, 我感觉对于NLP来说, PyTorch真的比Tensorflow好用多了, 因为Tensorflow属于静态图, 建模和调试都很麻烦. 尤其是序列模型要定义很多variable scope和name scope之类的, 也就是张量的作用域, 这些东西命名搞不好一不小心就会有bug, 而且有些bug不会报错, 当你发现计算结果不对, 要再返回头debug, 而且Tensorflow的静态图不支持调试, 要用sess.run把想要的结果计算出来才可以. 但是PyTorch是动态图, 就和写numpy一样, 非常方便调试, 而且用class面向对象方式建模, 先声明操作再执行操作, 这样基本不容易在数据流图上出现bug. 如果你从来没用过**PyTorch**我今天在后面代码部分会带大家大致熟悉一下, 主要是带大家熟悉一下PyTorch的特性, 具体教程官方文档中的快速入门(英文)写的就很好, https://pytorch.org/tutorials/: 1. 进一步理解$positional \ encoding$, 结合注意力矩阵可视化位置编码;2. **语言模型**的定义和**BERT解读**;3. BERT训练之前的准备工作, 语料预处理;4. BERT的预训练, 训练参数;5. 使用BERT预训练模型进行自然语言的情感分类;6. BERT代码解读(这部分因为长度原因放在单独一个视频里). 1. 进一步理解$positional \ encoding$, 结合注意力矩阵可视化位置编码;
###Code
# 导入依赖库
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import math
import plotly.graph_objs as go
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
from IPython.display import Image
init_notebook_mode(connected=True)
def get_positional_encoding(max_seq_len, embed_dim):
# 初始化一个positional encoding
# embed_dim: 字嵌入的维度
# max_seq_len: 最大的序列长度
positional_encoding = np.array([
[pos / np.power(10000, 2 * i / embed_dim) for i in range(embed_dim)]
if pos != 0 else np.zeros(embed_dim) for pos in range(max_seq_len)])
positional_encoding[1:, 0::2] = np.sin(positional_encoding[1:, 0::2]) # dim 2i 偶数
positional_encoding[1:, 1::2] = np.cos(positional_encoding[1:, 1::2]) # dim 2i+1 奇数
# 归一化, 用位置嵌入的每一行除以它的模长
# denominator = np.sqrt(np.sum(position_enc**2, axis=1, keepdims=True))
# position_enc = position_enc / (denominator + 1e-8)
return positional_encoding
positional_encoding = get_positional_encoding(max_seq_len=100, embed_dim=128)
# 3d可视化
relation_matrix = np.dot(positional_encoding, positional_encoding.T)[1:, 1:]
data = [go.Surface(z=relation_matrix)]
layout = go.Layout(scene={"xaxis": {'title': "sequence length"}, "yaxis": {"title": "sequence length"}})
fig = go.Figure(data=data, layout=layout)
iplot(fig)
###Output
_____no_output_____
###Markdown
上图中, 我们用位置编码矩阵乘以(矩阵乘)他本身的转置, 也就是$PE: \ [seq\_len, \ embedding\_dim ]$, 我们求$PEPE^T$, 得出的维度是$[seq\_len, \ seq\_len ]$. 我们看到上图中, 矩阵的对角线隆起, 也就是值比较大, 是因为一个矩阵乘以他本身的转置之后, 形成的矩阵的对角线正是这个矩阵的每一行$(row)$点乘这一行本身, 所以是值最大的区域(红色部分). 对于位置编码来说, 也就是当前位置与当前位置本身相关程度最高. 再往对角线两边看, 发现以对角线(红色山峰)区域为中心, 两边属于缓慢下降趋势, 这就说明了随着离当前位置越远, 其位置编码的相关程度就越低. 由此可见, 位置编码建立在时间维度的关联关系. 2. 语言模型的定义和BERT解读; 什么是语言模型, 其实用一个公式就可以表示$P(c_{1},\ldots ,c_{m})$, 假设我们有一句话, $c_{1}到c_{m}$是这句话里的$m$个字, 而语言模型就是求的是这句话出现的概率是多少. 比如说在一个语音识别的场景, 机器听到一句话是"wo wang dai san le(我忘带伞了)", 然后机器解析出两个句子, 一个是"我网袋散了", 另一个是"我忘带伞了", 也就是前者的概率大于后者. 然后语言模型就可以判断$P("我忘带伞了") > P("我网袋散了")$, 从而得出这句语音的正确解析结果是"我忘带伞了". BERT的全称是: Bidirectional Encoder Representations from Transformers, 如果翻译过来也就是**双向transformer编码表达**, 我们在上节课解读了transformer的编码器, 编码器输出的隐藏层就是自然语言序列的数学表达, 那么双向是什么意思呢? 我们来看一下下面这张图.  上图中$E_i$是指的单个字或词, $T_i$指的是最终计算得出的**隐藏层**, 还记得我们在Transformer(一)中讲到的注意力矩阵和注意力加权, 经过这样的操作之后, 序列里面的每一个字, **都含有这个字前面的信息和后面的信息**, 这就是**双向**的理解, 在这里, 一句话中每一个字, 经过注意力机制和加权之后, **当前这个字等于用这句话中其他所有字重新表达了一遍**, 每个字含有了这句话中所有成分的信息. 在BERT中, 主要是以两种预训练的方式来建立语言模型: BERT语言模型任务一: MASKED LM在BERT中, Masked LM(Masked language Model)构建了语言模型, 这也是BERT的预训练中任务之一, 简单来说, 就是**随机遮盖或替换**一句话里面任意字或词, 然后让模型通过上下文的理解预测那一个被遮盖或替换的部分, 之后**做$Loss$的时候只计算被遮盖部分的$Loss$**, 其实是一个很容易理解的任务, 实际操作方式如下: 1. 随机把一句话中$15 \% $的$token$替换成以下内容: 1) 这些$token$有$80 \% $的几率被替换成$[mask]$; 2) 有$10 \%$的几率被替换成任意一个其他的$token$; 3) 有$10 \%$的几率原封不动.2. 之后让模型**预测和还原**被遮盖掉或替换掉的部分, 模型最终输出的隐藏层的计算结果的维度是: $X_{hidden}: [batch\_size, \ seq\_len, \ embedding\_dim]$ 我们初始化一个映射层的权重$W_{vocab}$: $W_{vocab}: [embedding\_dim, \ vocab\_size]$ 我们用$W_{vocab}$完成隐藏维度到字向量数量的映射, 只要求$X_{hidden}$和$W_{vocab}$的矩阵乘(点积): $X_{hidden}W_{vocab}: [batch\_size, \ seq\_len, \ vocab\_size] $之后把上面的计算结果在$vocab\_size$(最后一个)维度做$softmax$归一化, 是每个字对应的$vocab\_size$的和为$1$, 我们就可以通过$vocab\_size$里概率最大的字来得到模型的预测结果, 就可以和我们准备好的$Label$做损失($Loss$)并反传梯度了. 注意做损失的时候, 只计算在第1步里当句中**随机遮盖或替换**的部分, 其余部分不做损失, 对于其他部分, 模型输出什么东西, 我们不在意. BERT语言模型任务二: Next Sentence Prediction1. 首先我们拿到属于上下文的一对句子, 也就是两个句子, 之后我们要在这两段连续的句子里面加一些特殊$token$: $[cls]$上一句话,$[sep]$下一句话.$[sep]$ 也就是在句子开头加一个$[cls]$, 在两句话之中和句末加$[sep]$, 具体地就像下图一样:  2. 我们看到上图中两句话是$[cls]$ my dog is cute $[sep]$ he likes playing $[sep]$, $[cls]$我的狗很可爱$[sep]$他喜欢玩耍$[sep]$, 除此之外, 我们还要准备同样格式的两句话, 但他们不属于上下文关系的情况; $[cls]$我的狗很可爱$[sep]$企鹅不擅长飞行$[sep]$, 可见这属于上下句不属于上下文关系的情况; 在实际的训练中, 我们让上面两种情况出现的比例为$1:1$, 也就是一半的时间输出的文本属于上下文关系, 一半时间不是.3. 我们进行完上述步骤之后, 还要随机初始化一个可训练的$segment \ embeddings$, 见上图中, 作用就是用$embeddings$的信息让模型分开上下句, 我们一把给上句全$0$的$token$, 下句啊全$1$的$token$, 让模型得以判断上下句的起止位置, 例如: $[cls]$我的狗很可爱$[sep]$企鹅不擅长飞行$[sep]$ $0 \quad \ 0 \ \ 0 \ \ 0 \ \ 0 \ \ 0 \ \ 0 \ \ 0 \ \ \ 1 \ \ 1 \ \ 1 \ \ 1 \ \ 1 \ \ 1 \ \ 1 \ \ 1 $ 上面$0$和$1$就是$segment \ embeddings$.4. 还记得我们上节课说过的, 注意力机制就是, 让每句话中的每一个字对应的那一条向量里, 都融入这句话所有字的信息, 那么我们在最终隐藏层的计算结果里, 只要取出$[cls]token$所对应的一条向量, 里面就含有整个句子的信息, 因为我们期望这个句子里面所有信息都会往$[cls]token$所对应的一条向量里汇总: 模型最终输出的隐藏层的计算结果的维度是: 我们$X_{hidden}: [batch\_size, \ seq\_len, \ embedding\_dim]$ 我们要取出$[cls]token$所对应的一条向量, $[cls]$对应着$\ seq\_len$维度的第$0$条: $cls\_vector = X_{hidden}[:, \ 0, \ :]$ $cls\_vector \in \mathbb{R}^{batch\_size, \ embedding\_dim}$ 之后我们再初始化一个权重, 完成从$embedding\_dim$维度到$1$的映射, 也就是逻辑回归, 之后用$sigmoid$函数激活, 就得到了而分类问题的推断. 我们用$\hat{y}$来表示模型的输出的推断, 他的值介于$(0, \ 1)$之间: $\hat{y} = sigmoid(Linear(cls\_vector)) \quad \hat{y} \in (0, \ 1)$至此$BERT$的训练方法就讲完了, 是不是很简单, 下面我们来为$BERT$的预训练准备数据. 3. BERT训练之前的准备工作, 语料预处理;__字典的制作, 参见目录./corpus/BERT_preprocessing.ipynb文件中的讲解__ 4. BERT的预训练, 训练参数;BERT论文中, 推荐的模型参数为: 基准模型$transformer\_block=12, \ embedding\_dimension=768, \ num\_heads=12, \ Total Param eters=110M)$, 可见其中共有$1.1$亿参数, 除此之外, 还有比基准模型还大的高性能模型, 参数量为$3$亿, 要想训练并使用这么大参数的模型, 需要充裕的计算资源! 但是经过我的实际测试, 结合我目前正在研究的命名实体识别, 语义分析, 关系抽取和知识图谱的需求, 发现其实这个参数比较过剩, 我们今天训练BERT所用的参数为$transformer\_block=6, \ embedding\_dimension=384, \ num\_heads=12, \ Total Param eters=23M)$, 可见我把参数缩减到$2$千万, 但即使这样, 使用一块11GB显存的2080Ti显卡, 训练维基百科语料的BERT也需要一周的时间. 注意我们今天所使用的模型, 是在开源项目 https://github.com/huggingface/pytorch-transformers 的基础上修改而来, 其中我添加了很多中文注释, 添加了预处理模块, 添加了动态padding优化了速度(在后面代码解读的部分会讲到), 添加了情感分析模块等; 中文维基百科语料: https://github.com/brightmart/nlp_chinese_corpus 我只是做了一下预处理, 以适应BERT的预训练, 预处理之后的语料可以在readme.md文件中的百度网盘地址下载; 我已经把使用维基百科语料预训练好的BERT模型上传到了百度网盘, 请在readme.md文件中查看, 我还想提醒大家一下, 网盘上的BERT预训练模型在训练的时候, 使用了一些简单的技巧, 但这些技巧并没有出现在这个教程开源的代码里面, 这是因为某些不方便的原因, 不过我可以告诉大家这些技巧, 大家可以自己实现一下, 另外, 不建议大家用我公开的BERT训练代码来重新训练BERT模型, 因为我上传的已经训练好的BERT性能要更好一些: BERT训练技巧: 1) 因为我们是按单个字为单位训练BERT, 所以在Masked LM里面, 把句子中的英文单词分出来, 将英文单词所在的区域一起遮盖掉, 让模型预测这个部分; 2) 很多句子里含有数字, 显然在Masked LM中, 让模型准确地预测数据是不现实的, 所以我们把原文中的数字(包括整数和小数)都替换成一个特殊token, NUM, 这样模型只要预测出这个地方应该是某些数字就可以来. BERT训练代码解读在第6部分 5. 使用BERT预训练模型进行自然语言的情感分类;1) **情感分析语料预处理**: 参见目录./corpus/sentiment_preprocessing.ipynb, 我用使用来酒店评论语料, 不过这个语料规模要比2018年用LSTM做情感分析的要大一些, 正面评论和负面评论各5000条, 其实这也是玩具级数据集, 用BERT参数这么大的模型, 训练会产生严重过拟合, 泛化能力差的情况, 这也是我们下面需要解决的问题; 2) 回顾在BERT的训练中Next Sentence Prediction中, 我们取出$[cls]$对应的那一条向量, 然后把他映射成1个数值并用$sigmoid$函数激活: $$\hat{y} = sigmoid(Linear(cls\_vector)) \quad \hat{y} \in (0, \ 1)$$3) **动态学习率和提前终止$(early \ stop)$**: 上一步我们将语料划分成了训练和测试集, 我们的训练方式是, 每个$epoch$, 用训练集训练. 对模型性能的衡量标准是$AUC$, $AUC$的衡量标准对二分类非常易用, 这里因为时间关系就不讲了, 如果大家不熟悉可以上网搜寻相关资料. 当前$epoch$训练完毕之后, 用测试集衡量当前训练结果, 并记下当前$epoch$的$AUC$, 如果当前的$AUC$较上一个$epoch$没有提升, 那就**降低学习率**, 实际操作是让当前的学习率降低$1/5$, 直到$10$个$epoch$测试集的$AUC$都没有提升, 就终止训练. 我们的初始学习率是$1e-6$, 因为我们是在维基百科预训练语料的基础上进行训练的, 属于下游任务, 只需要微调预训练模型就好. 4) **解决过拟合问题:** 但在实际操作中, 使用$\hat{y} = sigmoid(Linear(cls\_vector)) \quad \hat{y} \in (0, \ 1)$的方式, 发现虽然在训练集和测试集上$AUC$都很高, 但实际随便输入一些从各种网上随便找的一些酒店评论后, 发现泛化能力不好. 这是因为我们的训练数据集非常小, 即使区分训练集和测试集, 但因为整体数据形态比较单一, 模型遇到自己没见过的情况就很容易无法做出正确判断, 为了提高模型的泛化性能, 我尝试了另一种模型结构:  如上图, 我尝试$mean \ max \ pool$的一种把隐藏层的序列转换为一条向量的方式, 其实就是沿着$sequence \ length$的维度分别求均值和$max$, 之后拼起来成为一条向量, 之后同样映射成一个值再激活, 伪代码如下: $X_{hidden}: [batch\_size, \ seq\_len, \ embedding\_dim]$ $mean\_pooled = mean(X_{hidden}, \ dimension=seq\_len) \quad [batch\_size, \ embedding\_dim]$$max\_pooled = max(X_{hidden}, \ dimension=seq\_len) \quad [batch\_size, \ embedding\_dim]$$mean\_max\_pooled = concatenate(mean\_pooled, \ max\_pooled, \ dimension=embedding\_dim ) \quad [batch\_size, \ embedding\_dim * 2]$ 上式中$mean\_max\_pooled$也就是我们得到的一句话的数学表达, 含有这句话的信息, 其实这也是一种$DOC2VEC$的方法, 也就是把一句话转换成一条向量, 而且无论这句话有多长, 转换出来向量的维度都是一样的, 之后可以用这些向量做一些分类聚类等任务. 下一步我们同样做映射, 之后用$sigmoid$激活: $\hat{y} = sigmoid(Linear(mean\_max\_pooled)) \quad \hat{y} \in (0, \ 1)$ 怎样理解这样的操作呢, 隐藏层就是一句话的数学表达, 我们求均值和最大值正数学表达对这句话的平均响应, 和最大响应, 之后我们用线性映射来识别这些响应, 从而得到模型的推断结果. 我们还用了$weight \ decay$的方式, 其实就是$L2 \ normalization$, 在PyTorch里有接口可以直接调用, 一会会说到, 其实$L2$正则的作用就是防止参数的值变得过大或过小, 我们可以设想一下, 由于我们的训练数据很少, 所以实际使用模型进行推断的时候有些字和词或者句子结构的组合模型都是没见过的, 模型里面参数的值很大的话会造成遇到某一些特别的句子或者词语的时候, 模型对句子的响应过大, 导致最终输出的值偏离实际, 其实我们希望模型更从容淡定一些, 所以我们加入$L2 \ normalization$. 除此之外, 我们预训练的BERT有6个transformer block, 我们在情感分析的时候, 只用了3个, 因为后面实在是参数太多, 容易导致过拟合, 所以在第三个transformer block之后, 就截出隐藏层进行$pooling$了, 后面的transformer block都没有用到. 再除此之外, 我使用了$dropout$机制, $dropout$设为了$0.4$, 因为模型参数是在是太多, 所以在训练的时候直接让$40\%$的参数失能, 防止过拟合. 经过以上方法, 模型训练集和测试机的$AUC$都达到了$0.95$以上, 而且经过实际的测试, 模型也可以基本比较正确的分辨出语句的情感极性. 5) **阈值微调:** 经过模型的推断, 输出的值介于0到1之间, 我们可以认为只要这个值在0.5以上, 就是正样本, 如果在0.5以下, 就是副样本, 其实这是不一定的, 0.5通常不是最佳的分类边界, 所以我写了一个用来寻找最佳阈值的脚本, 在./metrics/\_\_init\_\_.py里面. 这个脚本的方法是从0.01到0.99定义99个阈值, 高于阈值算正样本, 低于算副样本, 然后与测试集计算$f1 \ score$, 之后选出可以使$f1 \ score$最高的阈值, 在训练中, 每一个$epoch$都会运行一次寻找阈值的脚本.
###Code
import pandas as pd
df = pd.read_pickle("./sentiment_state_dict_mean_max_pool/df_log.pickle")
# 训练日志的尾部, 可见训练集train_auc和测试集test_auc都到达了0.95以上,
# 实际上测试集的auc比训练集还要高, 因为训练集有dropout
df.tail()
# 让我们来画一下图
import matplotlib.pyplot as plt
plt.plot(df["train_auc"].tolist(), c="b", label="train_auc")
plt.plot(df["test_auc"].tolist(), c="r", label="test_auc")
plt.xlabel("epochs")
plt.ylabel("AUC")
plt.yticks([i/10 for i in range(11)])
plt.grid()
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
6) **情感分析代码解读和实际测试**: 代码解读见视频讲解, 下面我们进行测试:
###Code
from Sentiment_Inference import *
model = Sentiment_Analysis(max_seq_len=300, batch_size=2)
# https://www.booking.com/reviews.zh-cn.html
test_list = [
"有几次回到酒店房间都没有被整理。两个人入住,只放了一套洗漱用品。",
"早餐时间询问要咖啡或茶,本来是好事,但每张桌子上没有放“怡口糖”(代糖),又显得没那么周到。房间里卫生间用品补充,有时有点漫不经心个人觉得酒店房间禁烟比较好",
'十六浦酒店有提供港澳码头的SHUTTLE BUS, 但氹仔没有订了普通房, 可能是会员的关系 UPGRADE到了DELUXE房,风景是绿色的河, 感观一般, 但房间还是不错的, 只是装修有点旧了另外品尝了酒店的自助晚餐, 种类不算多, 味道OK, 酒类也免费任饮, 这个不错最后就是在酒店的娱乐场赢了所有费用, 一切都值得了!',
'地理位置优越,出门就是步行街,也应该是耶路撒冷的中心地带,去老城走约20分钟。房间很实用,虽然不含早餐,但是楼下周边有很多小超市和餐厅、面包店,所以一切都不是问题。',
'实在失望!如果果晚唔系送朋友去码头翻香港一定会落酒店大堂投诉佢!太离谱了!我地吃个晚饭消费千几蚊 ,买单个黑色衫叫Annie果个唔知系部长定系经理录左我万几蚊!简直系离晒大谱的 !咁样的管理层咁大间酒店真的都不敢恭维!',
'酒店服务太棒了, 服务态度非常好, 房间很干净',
"服务各方面没有不周到而的地方, 各方面没有没想到的细节",
"房间设施比较旧,虽然是古典风格,但浴室的浴霸比较不好用。很不满意的是大厅坐下得消费,不人性化,而且糕点和沙拉很难吃,贵而且是用塑料盒子装的,5星级?特别是青团,58块钱4个,感觉放了好几天了,超级难吃。。。把外国朋友吓坏了。。。",
"南京东路地铁出来就能看到,很方便。酒店大堂和房间布置都有五星级的水准。",
"服务不及5星,前台非常不专业,入住时会告知你没房要等,不然就加钱升级房间。前台个个冰块脸,对待客人好像仇人一般,带着2岁的小孩前台竟然还要收早餐费。门口穿白衣的大爷是木头人,不会提供任何帮助。入住期间想要多一副牙刷给孩子用,竟然被问为什么。五星设施,一星服务,不会再入住!"
]
model(test_list)
text = "对于这个亲子房来说,没有浴缸对于比较小的小朋友来说可能会有点不太方便,小的时候不太会站立洗澡的,所以可能需要洗盆浴,我们宝宝4岁了,其实也没有关系,但是之前有自己经历过带6个月宝宝出去玩的,很多店家觉得浴缸浪费空间所以都只有淋浴房。但是自己给宝宝洗澡的时候就非常尴尬…不知道这家是不是可以有租用的。因为我们不是一定需要,也没有做询问。"
model(text)
###Output
对于这个亲子房来说,没有浴缸对于比较小的小朋友来说可能会有点不太方便,小的时候不太会站立洗澡的,所以可能需要洗盆浴,我们宝宝4岁了,其实也没有关系,但是之前有自己经历过带6个月宝宝出去玩的,很多店家觉得浴缸浪费空间所以都只有淋浴房。但是自己给宝宝洗澡的时候就非常尴尬…不知道这家是不是可以有租用的。因为我们不是一定需要,也没有做询问。
负样本, 输出值0.31
----------
###Markdown
中文自然语言处理Transformer模型(二)BERT的预训练实践与应用 这是Transformer解读的第二部分, 这部分是实践的部分, 如果你没有看第一部分: [汉语自然语言处理-从零解读碾压循环神经网络的transformer模型(一)](https://github.com/aespresso/a_journey_into_math_of_ml/tree/master/03_transformer_tutorial_1st_part) | 视频讲解: [B站讲解](https://www.bilibili.com/video/av58239477/) / [youtube](https://www.youtube.com/watch?v=wLKsaZWeuCM) | 二. transformer代码解读, 语料数据预处理, BERT的预训练和情感分析的应用: 首先是今天课程内容的顺序, 我将BERT代码解读放到了最后, 把主要内容排在了前面, 注意我们今天使用的是**PyTorch**深度学习框架, **其实用什么样的框架并不重要**, 本节课**代码的部分不是重点**, 重点是让大家可以掌握$NLP$中**语料预处理**和**建模**并**解决实际应用中出现的困难**的的一些思路, 那话说回来为什么用PyTorch呢? 我其实用Tensorflow的时间要比PyTorch长很多, 但是目前用了PyTorch之后, 我感觉对于NLP来说, PyTorch真的比Tensorflow好用多了, 因为Tensorflow属于静态图, 建模和调试都很麻烦. 尤其是序列模型要定义很多variable scope和name scope之类的, 也就是张量的作用域, 这些东西命名搞不好一不小心就会有bug, 而且有些bug不会报错, 当你发现计算结果不对, 要再返回头debug, 而且Tensorflow的静态图不支持调试, 要用sess.run把想要的结果计算出来才可以. 但是PyTorch是动态图, 就和写numpy一样, 非常方便调试, 而且用class面向对象方式建模, 先声明操作再执行操作, 这样基本不容易在数据流图上出现bug. 如果你从来没用过**PyTorch**我今天在后面代码部分会带大家大致熟悉一下, 主要是带大家熟悉一下PyTorch的特性, 具体教程官方文档中的快速入门(英文)写的就很好, https://pytorch.org/tutorials/: 1. 进一步理解$positional \ encoding$, 结合注意力矩阵可视化位置编码;2. **语言模型**的定义和**BERT解读**;3. BERT训练之前的准备工作, 语料预处理;4. BERT的预训练, 训练参数;5. 使用BERT预训练模型进行自然语言的情感分类;6. BERT代码解读(这部分因为长度原因放在单独一个视频里). 1. 进一步理解$positional \ encoding$, 结合注意力矩阵可视化位置编码;
###Code
# 导入依赖库
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import math
import plotly.graph_objs as go
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
from IPython.display import Image
init_notebook_mode(connected=True)
def get_positional_encoding(max_seq_len, embed_dim):
# 初始化一个positional encoding
# embed_dim: 字嵌入的维度
# max_seq_len: 最大的序列长度
positional_encoding = np.array([
[pos / np.power(10000, 2 * i / embed_dim) for i in range(embed_dim)]
if pos != 0 else np.zeros(embed_dim) for pos in range(max_seq_len)])
positional_encoding[1:, 0::2] = np.sin(positional_encoding[1:, 0::2]) # dim 2i 偶数
positional_encoding[1:, 1::2] = np.cos(positional_encoding[1:, 1::2]) # dim 2i+1 奇数
# 归一化, 用位置嵌入的每一行除以它的模长
# denominator = np.sqrt(np.sum(position_enc**2, axis=1, keepdims=True))
# position_enc = position_enc / (denominator + 1e-8)
return positional_encoding
positional_encoding = get_positional_encoding(max_seq_len=100, embed_dim=128)
# 3d可视化
relation_matrix = np.dot(positional_encoding, positional_encoding.T)[1:, 1:]
data = [go.Surface(z=relation_matrix)]
layout = go.Layout(scene={"xaxis": {'title': "sequence length"}, "yaxis": {"title": "sequence length"}})
fig = go.Figure(data=data, layout=layout)
iplot(fig)
###Output
_____no_output_____
###Markdown
上图中, 我们用位置编码矩阵乘以(矩阵乘)他本身的转置, 也就是$PE: \ [seq\_len, \ embedding\_dim ]$, 我们求$PEPE^T$, 得出的维度是$[seq\_len, \ seq\_len ]$. 我们看到上图中, 矩阵的对角线隆起, 也就是值比较大, 是因为一个矩阵乘以他本身的转置之后, 形成的矩阵的对角线正是这个矩阵的每一行$(row)$点乘这一行本身, 所以是值最大的区域(红色部分). 对于位置编码来说, 也就是当前位置与当前位置本身相关程度最高. 再往对角线两边看, 发现以对角线(红色山峰)区域为中心, 两边属于缓慢下降趋势, 这就说明了随着离当前位置越远, 其位置编码的相关程度就越低. 由此可见, 位置编码建立在时间维度的关联关系. 2. 语言模型的定义和BERT解读; 什么是语言模型, 其实用一个公式就可以表示$P(c_{1},\ldots ,c_{m})$, 假设我们有一句话, $c_{1}到c_{m}$是这句话里的$m$个字, 而语言模型就是求的是这句话出现的概率是多少. 比如说在一个语音识别的场景, 机器听到一句话是"wo wang dai san le(我忘带伞了)", 然后机器解析出两个句子, 一个是"我网袋散了", 另一个是"我忘带伞了", 也就是前者的概率大于后者. 然后语言模型就可以判断$P("我忘带伞了") > P("我网袋散了")$, 从而得出这句语音的正确解析结果是"我忘带伞了". BERT的全称是: Bidirectional Encoder Representations from Transformers, 如果翻译过来也就是**双向transformer编码表达**, 我们在上节课解读了transformer的编码器, 编码器输出的隐藏层就是自然语言序列的数学表达, 那么双向是什么意思呢? 我们来看一下下面这张图.  上图中$E_i$是指的单个字或词, $T_i$指的是最终计算得出的**隐藏层**, 还记得我们在Transformer(一)中讲到的注意力矩阵和注意力加权, 经过这样的操作之后, 序列里面的每一个字, **都含有这个字前面的信息和后面的信息**, 这就是**双向**的理解, 在这里, 一句话中每一个字, 经过注意力机制和加权之后, **当前这个字等于用这句话中其他所有字重新表达了一遍**, 每个字含有了这句话中所有成分的信息. 在BERT中, 主要是以两种预训练的方式来建立语言模型: BERT语言模型任务一: MASKED LM在BERT中, Masked LM(Masked language Model)构建了语言模型, 这也是BERT的预训练中任务之一, 简单来说, 就是**随机遮盖或替换**一句话里面任意字或词, 然后让模型通过上下文的理解预测那一个被遮盖或替换的部分, 之后**做$Loss$的时候只计算被遮盖部分的$Loss$**, 其实是一个很容易理解的任务, 实际操作方式如下: 1. 随机把一句话中$15 \% $的$token$替换成以下内容: 1) 这些$token$有$80 \% $的几率被替换成$[mask]$; 2) 有$10 \%$的几率被替换成任意一个其他的$token$; 3) 有$10 \%$的几率原封不动.2. 之后让模型**预测和还原**被遮盖掉或替换掉的部分, 模型最终输出的隐藏层的计算结果的维度是: $X_{hidden}: [batch\_size, \ seq\_len, \ embedding\_dim]$ 我们初始化一个映射层的权重$W_{vocab}$: $W_{vocab}: [embedding\_dim, \ vocab\_size]$ 我们用$W_{vocab}$完成隐藏维度到字向量数量的映射, 只要求$X_{hidden}$和$W_{vocab}$的矩阵乘(点积): $X_{hidden}W_{vocab}: [batch\_size, \ seq\_len, \ vocab\_size] $之后把上面的计算结果在$vocab\_size$(最后一个)维度做$softmax$归一化, 是每个字对应的$vocab\_size$的和为$1$, 我们就可以通过$vocab\_size$里概率最大的字来得到模型的预测结果, 就可以和我们准备好的$Label$做损失($Loss$)并反传梯度了. 注意做损失的时候, 只计算在第1步里当句中**随机遮盖或替换**的部分, 其余部分不做损失, 对于其他部分, 模型输出什么东西, 我们不在意. BERT语言模型任务二: Next Sentence Prediction1. 首先我们拿到属于上下文的一对句子, 也就是两个句子, 之后我们要在这两段连续的句子里面加一些特殊$token$: $[cls]$上一句话,$[sep]$下一句话.$[sep]$ 也就是在句子开头加一个$[cls]$, 在两句话之中和句末加$[sep]$, 具体地就像下图一样:  2. 我们看到上图中两句话是$[cls]$ my dog is cute $[sep]$ he likes playing $[sep]$, $[cls]$我的狗很可爱$[sep]$他喜欢玩耍$[sep]$, 除此之外, 我们还要准备同样格式的两句话, 但他们不属于上下文关系的情况; $[cls]$我的狗很可爱$[sep]$企鹅不擅长飞行$[sep]$, 可见这属于上下句不属于上下文关系的情况; 在实际的训练中, 我们让上面两种情况出现的比例为$1:1$, 也就是一半的时间输出的文本属于上下文关系, 一半时间不是.3. 我们进行完上述步骤之后, 还要随机初始化一个可训练的$segment \ embeddings$, 见上图中, 作用就是用$embeddings$的信息让模型分开上下句, 我们一把给上句全$0$的$token$, 下句啊全$1$的$token$, 让模型得以判断上下句的起止位置, 例如: $[cls]$我的狗很可爱$[sep]$企鹅不擅长飞行$[sep]$ $0 \quad \ 0 \ \ 0 \ \ 0 \ \ 0 \ \ 0 \ \ 0 \ \ 0 \ \ \ 1 \ \ 1 \ \ 1 \ \ 1 \ \ 1 \ \ 1 \ \ 1 \ \ 1 $ 上面$0$和$1$就是$segment \ embeddings$.4. 还记得我们上节课说过的, 注意力机制就是, 让每句话中的每一个字对应的那一条向量里, 都融入这句话所有字的信息, 那么我们在最终隐藏层的计算结果里, 只要取出$[cls]token$所对应的一条向量, 里面就含有整个句子的信息, 因为我们期望这个句子里面所有信息都会往$[cls]token$所对应的一条向量里汇总: 模型最终输出的隐藏层的计算结果的维度是: 我们$X_{hidden}: [batch\_size, \ seq\_len, \ embedding\_dim]$ 我们要取出$[cls]token$所对应的一条向量, $[cls]$对应着$\ seq\_len$维度的第$0$条: $cls\_vector = X_{hidden}[:, \ 0, \ :]$ $cls\_vector \in \mathbb{R}^{batch\_size, \ embedding\_dim}$ 之后我们再初始化一个权重, 完成从$embedding\_dim$维度到$1$的映射, 也就是逻辑回归, 之后用$sigmoid$函数激活, 就得到了而分类问题的推断. 我们用$\hat{y}$来表示模型的输出的推断, 他的值介于$(0, \ 1)$之间: $\hat{y} = sigmoid(Linear(cls\_vector)) \quad \hat{y} \in (0, \ 1)$至此$BERT$的训练方法就讲完了, 是不是很简单, 下面我们来为$BERT$的预训练准备数据. 3. BERT训练之前的准备工作, 语料预处理;__字典的制作, 参见目录./corpus/BERT_preprocessing.ipynb文件中的讲解__ 4. BERT的预训练, 训练参数;BERT论文中, 推荐的模型参数为: 基准模型$transformer\_block=12, \ embedding\_dimension=768, \ num\_heads=12, \ Total Param eters=110M)$, 可见其中共有$1.1$亿参数, 除此之外, 还有比基准模型还大的高性能模型, 参数量为$3$亿, 要想训练并使用这么大参数的模型, 需要充裕的计算资源! 但是经过我的实际测试, 结合我目前正在研究的命名实体识别, 语义分析, 关系抽取和知识图谱的需求, 发现其实这个参数比较过剩, 我们今天训练BERT所用的参数为$transformer\_block=6, \ embedding\_dimension=384, \ num\_heads=12, \ Total Param eters=23M)$, 可见我把参数缩减到$2$千万, 但即使这样, 使用一块11GB显存的2080Ti显卡, 训练维基百科语料的BERT也需要一周的时间. 注意我们今天所使用的模型, 是在开源项目 https://github.com/huggingface/pytorch-transformers 的基础上修改而来, 其中我添加了很多中文注释, 添加了预处理模块, 添加了动态padding优化了速度(在后面代码解读的部分会讲到), 添加了情感分析模块等; 中文维基百科语料: https://github.com/brightmart/nlp_chinese_corpus 我只是做了一下预处理, 以适应BERT的预训练, 预处理之后的语料可以在readme.md文件中的百度网盘地址下载; 我已经把使用维基百科语料预训练好的BERT模型上传到了百度网盘, 请在readme.md文件中查看, 我还想提醒大家一下, 网盘上的BERT预训练模型在训练的时候, 使用了一些简单的技巧, 但这些技巧并没有出现在这个教程开源的代码里面, 这是因为某些不方便的原因, 不过我可以告诉大家这些技巧, 大家可以自己实现一下, 另外, 不建议大家用我公开的BERT训练代码来重新训练BERT模型, 因为我上传的已经训练好的BERT性能要更好一些: BERT训练技巧: 1) 因为我们是按单个字为单位训练BERT, 所以在Masked LM里面, 把句子中的英文单词分出来, 将英文单词所在的区域一起遮盖掉, 让模型预测这个部分; 2) 很多句子里含有数字, 显然在Masked LM中, 让模型准确地预测数据是不现实的, 所以我们把原文中的数字(包括整数和小数)都替换成一个特殊token, NUM, 这样模型只要预测出这个地方应该是某些数字就可以来. BERT训练代码解读在第6部分 5. 使用BERT预训练模型进行自然语言的情感分类;1) **情感分析语料预处理**: 参见目录./corpus/sentiment_preprocessing.ipynb, 我用使用来酒店评论语料, 不过这个语料规模要比2018年用LSTM做情感分析的要大一些, 正面评论和负面评论各5000条, 其实这也是玩具级数据集, 用BERT参数这么大的模型, 训练会产生严重过拟合, 泛化能力差的情况, 这也是我们下面需要解决的问题; 2) 回顾在BERT的训练中Next Sentence Prediction中, 我们取出$[cls]$对应的那一条向量, 然后把他映射成1个数值并用$sigmoid$函数激活: $$\hat{y} = sigmoid(Linear(cls\_vector)) \quad \hat{y} \in (0, \ 1)$$3) **动态学习率和提前终止$(early \ stop)$**: 上一步我们将语料划分成了训练和测试集, 我们的训练方式是, 每个$epoch$, 用训练集训练. 对模型性能的衡量标准是$AUC$, $AUC$的衡量标准对二分类非常易用, 这里因为时间关系就不讲了, 如果大家不熟悉可以上网搜寻相关资料. 当前$epoch$训练完毕之后, 用测试集衡量当前训练结果, 并记下当前$epoch$的$AUC$, 如果当前的$AUC$较上一个$epoch$没有提升, 那就**降低学习率**, 实际操作是让当前的学习率降低$1/5$, 直到$10$个$epoch$测试集的$AUC$都没有提升, 就终止训练. 我们的初始学习率是$1e-6$, 因为我们是在维基百科预训练语料的基础上进行训练的, 属于下游任务, 只需要微调预训练模型就好. 4) **解决过拟合问题:** 但在实际操作中, 使用$\hat{y} = sigmoid(Linear(cls\_vector)) \quad \hat{y} \in (0, \ 1)$的方式, 发现虽然在训练集和测试集上$AUC$都很高, 但实际随便输入一些从各种网上随便找的一些酒店评论后, 发现泛化能力不好. 这是因为我们的训练数据集非常小, 即使区分训练集和测试集, 但因为整体数据形态比较单一, 模型遇到自己没见过的情况就很容易无法做出正确判断, 为了提高模型的泛化性能, 我尝试了另一种模型结构:  如上图, 我尝试$mean \ max \ pool$的一种把隐藏层的序列转换为一条向量的方式, 其实就是沿着$sequence \ length$的维度分别求均值和$max$, 之后拼起来成为一条向量, 之后同样映射成一个值再激活, 伪代码如下: $X_{hidden}: [batch\_size, \ seq\_len, \ embedding\_dim]$ $mean\_pooled = mean(X_{hidden}, \ dimension=seq\_len) \quad [batch\_size, \ embedding\_dim]$$max\_pooled = max(X_{hidden}, \ dimension=seq\_len) \quad [batch\_size, \ embedding\_dim]$$mean\_max\_pooled = concatenate(mean\_pooled, \ max\_pooled, \ dimension=embedding\_dim ) \quad [batch\_size, \ embedding\_dim * 2]$ 上式中$mean\_max\_pooled$也就是我们得到的一句话的数学表达, 含有这句话的信息, 其实这也是一种$DOC2VEC$的方法, 也就是把一句话转换成一条向量, 而且无论这句话有多长, 转换出来向量的维度都是一样的, 之后可以用这些向量做一些分类聚类等任务. 下一步我们同样做映射, 之后用$sigmoid$激活: $\hat{y} = sigmoid(Linear(mean\_max\_pooled)) \quad \hat{y} \in (0, \ 1)$ 怎样理解这样的操作呢, 隐藏层就是一句话的数学表达, 我们求均值和最大值正数学表达对这句话的平均响应, 和最大响应, 之后我们用线性映射来识别这些响应, 从而得到模型的推断结果. 我们还用了$weight \ decay$的方式, 其实就是$L2 \ normalization$, 在PyTorch里有接口可以直接调用, 一会会说到, 其实$L2$正则的作用就是防止参数的值变得过大或过小, 我们可以设想一下, 由于我们的训练数据很少, 所以实际使用模型进行推断的时候有些字和词或者句子结构的组合模型都是没见过的, 模型里面参数的值很大的话会造成遇到某一些特别的句子或者词语的时候, 模型对句子的响应过大, 导致最终输出的值偏离实际, 其实我们希望模型更从容淡定一些, 所以我们加入$L2 \ normalization$. 除此之外, 我们预训练的BERT有6个transformer block, 我们在情感分析的时候, 只用了3个, 因为后面实在是参数太多, 容易导致过拟合, 所以在第三个transformer block之后, 就截出隐藏层进行$pooling$了, 后面的transformer block都没有用到. 再除此之外, 我使用了$dropout$机制, $dropout$设为了$0.4$, 因为模型参数是在是太多, 所以在训练的时候直接让$40\%$的参数失能, 防止过拟合. 经过以上方法, 模型训练集和测试机的$AUC$都达到了$0.95$以上, 而且经过实际的测试, 模型也可以基本比较正确的分辨出语句的情感极性. 5) **阈值微调:** 经过模型的推断, 输出的值介于0到1之间, 我们可以认为只要这个值在0.5以上, 就是正样本, 如果在0.5以下, 就是副样本, 其实这是不一定的, 0.5通常不是最佳的分类边界, 所以我写了一个用来寻找最佳阈值的脚本, 在./metrics/\_\_init\_\_.py里面. 这个脚本的方法是从0.01到0.99定义99个阈值, 高于阈值算正样本, 低于算副样本, 然后与测试集计算$f1 \ score$, 之后选出可以使$f1 \ score$最高的阈值, 在训练中, 每一个$epoch$都会运行一次寻找阈值的脚本.
###Code
import pandas as pd
df = pd.read_pickle("./sentiment_state_dict_mean_max_pool/df_log.pickle")
# 训练日志的尾部, 可见训练集train_auc和测试集test_auc都到达了0.95以上,
# 实际上测试集的auc比训练集还要高, 因为训练集有dropout
df.tail()
# 让我们来画一下图
import matplotlib.pyplot as plt
plt.plot(df["train_auc"].tolist(), c="b", label="train_auc")
plt.plot(df["test_auc"].tolist(), c="r", label="test_auc")
plt.xlabel("epochs")
plt.ylabel("AUC")
plt.yticks([i/10 for i in range(11)])
plt.grid()
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
6) **情感分析代码解读和实际测试**: 代码解读见视频讲解, 下面我们进行测试:
###Code
from Sentiment_Inference import *
model = Sentiment_Analysis(max_seq_len=300, batch_size=2)
# https://www.booking.com/reviews.zh-cn.html
test_list = [
"有几次回到酒店房间都没有被整理。两个人入住,只放了一套洗漱用品。",
"早餐时间询问要咖啡或茶,本来是好事,但每张桌子上没有放“怡口糖”(代糖),又显得没那么周到。房间里卫生间用品补充,有时有点漫不经心个人觉得酒店房间禁烟比较好",
'十六浦酒店有提供港澳码头的SHUTTLE BUS, 但氹仔没有订了普通房, 可能是会员的关系 UPGRADE到了DELUXE房,风景是绿色的河, 感观一般, 但房间还是不错的, 只是装修有点旧了另外品尝了酒店的自助晚餐, 种类不算多, 味道OK, 酒类也免费任饮, 这个不错最后就是在酒店的娱乐场赢了所有费用, 一切都值得了!',
'地理位置优越,出门就是步行街,也应该是耶路撒冷的中心地带,去老城走约20分钟。房间很实用,虽然不含早餐,但是楼下周边有很多小超市和餐厅、面包店,所以一切都不是问题。',
'实在失望!如果果晚唔系送朋友去码头翻香港一定会落酒店大堂投诉佢!太离谱了!我地吃个晚饭消费千几蚊 ,买单个黑色衫叫Annie果个唔知系部长定系经理录左我万几蚊!简直系离晒大谱的 !咁样的管理层咁大间酒店真的都不敢恭维!',
'酒店服务太棒了, 服务态度非常好, 房间很干净',
"服务各方面没有不周到而的地方, 各方面没有没想到的细节",
"房间设施比较旧,虽然是古典风格,但浴室的浴霸比较不好用。很不满意的是大厅坐下得消费,不人性化,而且糕点和沙拉很难吃,贵而且是用塑料盒子装的,5星级?特别是青团,58块钱4个,感觉放了好几天了,超级难吃。。。把外国朋友吓坏了。。。",
"南京东路地铁出来就能看到,很方便。酒店大堂和房间布置都有五星级的水准。",
"服务不及5星,前台非常不专业,入住时会告知你没房要等,不然就加钱升级房间。前台个个冰块脸,对待客人好像仇人一般,带着2岁的小孩前台竟然还要收早餐费。门口穿白衣的大爷是木头人,不会提供任何帮助。入住期间想要多一副牙刷给孩子用,竟然被问为什么。五星设施,一星服务,不会再入住!"
]
model(test_list)
text = "对于这个亲子房来说,没有浴缸对于比较小的小朋友来说可能会有点不太方便,小的时候不太会站立洗澡的,所以可能需要洗盆浴,我们宝宝4岁了,其实也没有关系,但是之前有自己经历过带6个月宝宝出去玩的,很多店家觉得浴缸浪费空间所以都只有淋浴房。但是自己给宝宝洗澡的时候就非常尴尬…不知道这家是不是可以有租用的。因为我们不是一定需要,也没有做询问。"
model(text)
###Output
对于这个亲子房来说,没有浴缸对于比较小的小朋友来说可能会有点不太方便,小的时候不太会站立洗澡的,所以可能需要洗盆浴,我们宝宝4岁了,其实也没有关系,但是之前有自己经历过带6个月宝宝出去玩的,很多店家觉得浴缸浪费空间所以都只有淋浴房。但是自己给宝宝洗澡的时候就非常尴尬…不知道这家是不是可以有租用的。因为我们不是一定需要,也没有做询问。
负样本, 输出值0.31
----------
|
VacationPy/.ipynb_checkpoints/VacationPy-checkpoint.ipynb | ###Markdown
VacationPy---- Note* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps. installed gmaps- pip install gmaps
###Code
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import gmaps
import os
# Import API key
from api_keys import g_key
###Output
_____no_output_____
###Markdown
Store Part I results into DataFrame* Load the csv exported in Part I to a DataFrame
###Code
# Create DataFrame from WeatherPy csv
weather_info = pd.read_csv("../output_data/city_weather.csv")
weather_info
###Output
_____no_output_____
###Markdown
Humidity Heatmap* Configure gmaps.* Use the Lat and Lng as locations and Humidity as the weight.* Add Heatmap layer to map.
###Code
# Configure gmaps
gmaps.configure(api_key=g_key)
# Store latitude and longitude in locations
locations = weather_info[["Lat","Lng"]]
# Store humidity as the weight in hweight
hweight = weather_info["Humidity"].astype(float)
# Plot Heatmap
fig = gmaps.figure(map_type="SATELLITE")
# Create heat layer
heat_layer = gmaps.heatmap_layer(locations, weights=hweight,dissipating=False, max_intensity=100,point_radius=2)
# Add layer
fig.add_layer(heat_layer)
# Display figure
fig
###Output
_____no_output_____
###Markdown
Create new DataFrame fitting weather criteria* Narrow down the cities to fit weather conditions.* Drop any rows will null values.
###Code
# Set perfect weather conditions
hotel_df = weather_info.loc[(weather_info["Max Temp"]>= 65)&(weather_info["Max Temp"]<=70)&(weather_info["Wind Speed"]<10)&(weather_info["Cloudiness"]==0)]
hotel_df
# Drop the other rows that do not fit the criteria above
# Reset the index and drop previous index number
hotel_df = hotel_df.dropna(how='any')
hotel_df = hotel_df.reset_index(drop=True)
hotel_df
###Output
_____no_output_____
###Markdown
Hotel Map* Store into variable named `hotel_df`.* Add a "Hotel Name" column to the DataFrame.* Set parameters to search for hotels with 5000 meters.* Hit the Google Places API for each city's coordinates.* Store the first Hotel result into the DataFrame.* Plot markers on top of the heatmap.
###Code
# Add "Hotel Name" column to the DataFrame
hotel_df["Hotel Name"]=""
hotel_df.head()
# create params
# create a for loop to iterate through each city
# then we want to grab hotel name by lat and lng from api
# store it back into dataframe
# Params dictionary to update each iteration
params = {
"radius":5000,
"types":"hotel",
"key":g_key
}
# Use Lat and Long to identify the hotel names
for index, row in hotel_df.iterrows():
lat = row["Lat"]
lng = row["Lng"]
# Change location each iteration while leaving original params in place
params["location"] = f"{lat},{lng}"
# Base URL to use for request
base_url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json"
# Make the request
hotel_name = requests.get(base_url, params=params).json()
# Since some data may be missing we incorporate a try-except to skip any that are missing a data point.
try:
hotel_df.loc[index, "Hotel Name"] = hotel_name["results"][3]["name"]
except (KeyError, IndexError):
print("Missing field/result...skipping.")
# Export to csv file
# Display dataframe with new Hotel Name information
hotel_df.to_csv("../output_data/hotel_df.csv")
hotel_df
# NOTE: Do not change any of the code in this cell
# Using the template add the hotel marks to the heatmap
info_box_template = """
<dl>
<dt>Name</dt><dd>{Hotel Name}</dd>
<dt>City</dt><dd>{City}</dd>
<dt>Country</dt><dd>{Country}</dd>
</dl>
"""
# Store the DataFrame Row
# NOTE: be sure to update with your DataFrame name
hotel_info = [info_box_template.format(**row) for index, row in hotel_df.iterrows()]
locations = hotel_df[["Lat", "Lng"]]
# Add marker layer ontop of heat map
# info_box_content displays information from hotel_info when clicked on
markers = gmaps.marker_layer(locations, info_box_content = hotel_info )
fig.add_layer(markers)
# Display figure
fig
# info_box_content source: API documentation¶. (n.d.). Retrieved July 23, 2020, from https://jupyter-gmaps.readthedocs.io/en/latest/api.html
###Output
_____no_output_____
###Markdown
VacationPy---- Note* Keep an eye on your API usage. Use https://developers.google.com/maps/reporting/gmp-reporting as reference for how to monitor your usage and billing.* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
###Code
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import gmaps
import os
import json
#!jupyter nbextension enable --py gmaps
# Import API key
from api_keys import g_key
###Output
_____no_output_____
###Markdown
Store Part I results into DataFrame* Load the csv exported in Part I to a DataFrame
###Code
weather_data = pd.read_csv('WeatherPy.csv')
weather_data.head()
weather_data.rename(columns = {'Lat':'Latitude', 'Lng': 'Longitude'}, inplace = True)
weather_data.head()
###Output
_____no_output_____
###Markdown
Humidity Heatmap* Configure gmaps.* Use the Lat and Lng as locations and Humidity as the weight.* Add Heatmap layer to map.
###Code
gmaps.configure(api_key=g_key)
locations = weather_data[["Latitude", "Longitude"]]
humidity = weather_data["Humidity"]
fig = gmaps.figure()
heat_layer = gmaps.heatmap_layer(locations, weights=humidity,
dissipating=False, max_intensity=10,
point_radius = 1)
fig.add_layer(heat_layer)
fig
fig.savefig("map_export.png", dpi=300)
###Output
_____no_output_____
###Markdown
Create new DataFrame fitting weather criteria* Narrow down the cities to fit weather conditions.* Drop any rows will null values.
###Code
cond_weather_data = weather_data.loc[(weather_data["Max Temp"]>=75) & (weather_data["Max Temp"]<=80) & (weather_data["Humidity"]<50) & (weather_data["Wind Speed"]<10)]
cond_weather_data.dropna()
###Output
_____no_output_____
###Markdown
Hotel Map* Store into variable named `hotel_df`.* Add a "Hotel Name" column to the DataFrame.* Set parameters to search for hotels with 5000 meters.* Hit the Google Places API for each city's coordinates.* Store the first Hotel result into the DataFrame.* Plot markers on top of the heatmap.
###Code
hotel_df = cond_weather_data
hotel_df['Hotel Name'] = ""
hotel_df.head()
for index, row in hotel_df.iterrows():
lat = row["Latitude"]
lng = row["Longitude"]
target_type = "hotel"
radius = 5000
params["location"] = f"{lat},{lng}"
base_url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json"
hotel_name = requests.get(base_url, params=params)
try:
hotel_df.loc[index, "Hotel Name"] = hotel_name["results"][0]["name"]
except (KeyError, IndexError):
hotel_df.loc[index, 'Hotel Name'] = "NaN"
hotel_df
# NOTE: Do not change any of the code in this cell
# Using the template add the hotel marks to the heatmap
info_box_template = """
<dl>
<dt>Name</dt><dd>{Hotel Name}</dd>
<dt>City</dt><dd>{City}</dd>
<dt>Country</dt><dd>{Country}</dd>
</dl>
"""
# Store the DataFrame Row
# NOTE: be sure to update with your DataFrame name
hotel_info = [info_box_template.format(**row) for index, row in hotel_df.iterrows()]
locations = hotel_df[["Latitude", "Longitude"]]
# Add marker layer ontop of heat map
#https://jupyter-gmaps.readthedocs.io/en/v0.3.4/gmaps.html
# Create a map using state centroid coordinates to set markers
marker_locations = hotel_df[['Hotel Name']]
# Create a marker_layer using the poverty list to fill the info box <-- census assignment
fig = gmaps.figure()
markers = gmaps.marker_layer(marker_locations,
hotel_df=[f"Hotel Name: {hotel_df.loc[index,'Hotel Name']}" for index,row in hotel_df.iterrows])
fig.add_layer(markers)
# Display figure
fig
###Output
_____no_output_____
###Markdown
VacationPy---- Note* Keep an eye on your API usage. Use https://developers.google.com/maps/reporting/gmp-reporting as reference for how to monitor your usage and billing.* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
###Code
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import gmaps
import os
# Import API key
from api_keys import g_key
###Output
_____no_output_____
###Markdown
Store Part I results into DataFrame* Load the csv exported in Part I to a DataFrame
###Code
weather_data = pd.read_csv("../WeatherPy/output_data/cities.csv")
weather_data
###Output
_____no_output_____
###Markdown
Humidity Heatmap* Configure gmaps.* Use the Lat and Lng as locations and Humidity as the weight.* Add Heatmap layer to map. Create new DataFrame fitting weather criteria* Narrow down the cities to fit weather conditions.* Drop any rows will null values. Hotel Map* Store into variable named `hotel_df`.* Add a "Hotel Name" column to the DataFrame.* Set parameters to search for hotels with 5000 meters.* Hit the Google Places API for each city's coordinates.* Store the first Hotel result into the DataFrame.* Plot markers on top of the heatmap.
###Code
# NOTE: Do not change any of the code in this cell
# Using the template add the hotel marks to the heatmap
info_box_template = """
<dl>
<dt>Name</dt><dd>{Hotel Name}</dd>
<dt>City</dt><dd>{City}</dd>
<dt>Country</dt><dd>{Country}</dd>
</dl>
"""
# Store the DataFrame Row
# NOTE: be sure to update with your DataFrame name
hotel_info = [info_box_template.format(**row) for index, row in hotel_df.iterrows()]
locations = hotel_df[["Lat", "Lng"]]
# Add marker layer ontop of heat map
# Display figure
###Output
_____no_output_____
###Markdown
VacationPy---- Note* Keep an eye on your API usage. Use https://developers.google.com/maps/reporting/gmp-reporting as reference for how to monitor your usage and billing.* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
###Code
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import gmaps
import os
# Import API key
from api_keys import g_key
###Output
_____no_output_____
###Markdown
Store Part I results into DataFrame* Load the csv exported in Part I to a DataFrame
###Code
cities = pd.read_csv("cities.csv", encoding="utf-8")
cities.head()
###Output
_____no_output_____
###Markdown
Humidity Heatmap* Configure gmaps.* Use the Lat and Lng as locations and Humidity as the weight.* Add Heatmap layer to map.
###Code
humidity = cities["Humidity"].astype(float)
maxhumidity = humidity.max()
locations = cities[["Lat", "Lng"]]
fig = gmaps.figure()
heat_layer = gmaps.heatmap_layer(locations, weights=humidity,dissipating=False, max_intensity=maxhumidity,point_radius=3)
fig.add_layer(heat_layer)
fig
###Output
_____no_output_____
###Markdown
Create new DataFrame fitting weather criteria* Narrow down the cities to fit weather conditions.* Drop any rows will null values.
###Code
hotel_df = cities.loc[(cities["Max Temp"] > 70) & (cities["Max Temp"] < 80) & (cities["Cloudiness"] == 0), :]
hotel_df = narrowed_city_df.dropna(how='any')
hotel_df.reset_index(inplace=True)
del hotel_df['index']
hotel_df.head()
###Output
_____no_output_____
###Markdown
Hotel Map* Store into variable named `hotel_df`.* Add a "Hotel Name" column to the DataFrame.* Set parameters to search for hotels with 5000 meters.* Hit the Google Places API for each city's coordinates.* Store the first Hotel result into the DataFrame.* Plot markers on top of the heatmap.
###Code
hotellist = []
for i in range(len(hotel_df)):
lat = hotel_df.loc[i]['Lat']
lng = hotel_df.loc[i]['Lng']
params = {
"location": f"{lat},{lng}",
"radius": 5000,
"types" : "hotel",
"key": g_key
}
base_url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json"
requested = requests.get(base_url, params=params)
jsn = requested.json()
try:
hotellist.append(jsn['results'][0]['name'])
except:
hotellist.append("")
hotel_df["Hotel Name"] = hotellist
hotel_df = narrowed_city_df.dropna(how='any')
hotel_df.head()
# NOTE: Do not change any of the code in this cell
# Using the template add the hotel marks to the heatmap
info_box_template = """
<dl>
<dt>Name</dt><dd>{Hotel Name}</dd>
<dt>City</dt><dd>{City}</dd>
<dt>Country</dt><dd>{Country}</dd>
</dl>
"""
# Store the DataFrame Row
# NOTE: be sure to update with your DataFrame name
hotel_info = [info_box_template.format(**row) for index, row in hotel_df.iterrows()]
locations = hotel_df[["Lat", "Lng"]]
# Add marker layer ontop of heat map
markers = gmaps.marker_layer(locations)
fig.add_layer(markers)
fig
# Display figure
###Output
_____no_output_____
###Markdown
VacationPy---- Note* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
###Code
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import gmaps
import os
import json
# Import API key
from api_keys import g_key
###Output
_____no_output_____
###Markdown
Store Part I results into DataFrame* Load the csv exported in Part I to a DataFrame
###Code
part_1_dataframe=pd.read_csv("../WeatherPy/city_weather_df.csv")
part_1_dataframe.head()
#dropping the unnamed
dropna_part_1_dataframe=part_1_dataframe.dropna()
del dropna_part_1_dataframe["Unnamed: 0"]
dropna_part_1_dataframe.head()
###Output
_____no_output_____
###Markdown
Humidity Heatmap* Configure gmaps.* Use the Lat and Lng as locations and Humidity as the weight.* Add Heatmap layer to map.
###Code
#Access maps with API key
gmaps.configure(api_key=g_key)
#getting coordinates
coord=pd.DataFrame(dropna_part_1_dataframe, columns=["Lat","Lng"])
#creating coordinate list
coord_list=coord.values.tolist()
#figure layout
figure_layout={
"width":"400px",
"height":"300px",
"border":"1px solid black",
"padding":"1px",
"margin":"0 auto 0 auto"
}
fig=gmaps.figure(layout=figure_layout)
#Weights
weights=dropna_part_1_dataframe.Humidity
heatmap=gmaps.heatmap_layer(coord_list, weights=weights)
#Heatmap
heatmap=gmaps.heatmap_layer(coord_list)
heatmap.max_intensity=2
heatmap.point_radius=3
fig.add_layer(heatmap)
fig
###Output
_____no_output_____
###Markdown
Create new DataFrame fitting weather criteria* Narrow down the cities to fit weather conditions.* Drop any rows will null values.
###Code
#Fit weather conditions: A max temperature between 55-65 degrees, wind speed less than 15 mph, 100% cloudiness, and below 20% humidty
hotel_df=dropna_part_1_dataframe.loc[(dropna_part_1_dataframe["Max Temp"]>=55) & (dropna_part_1_dataframe["Max Temp"]<=65) & (dropna_part_1_dataframe["Wind Speed"]<=15) & (dropna_part_1_dataframe["Cloudiness"]<=100) & (dropna_part_1_dataframe["Humidity"]<=20)]
hotel_df
###Output
_____no_output_____
###Markdown
Hotel Map* Store into variable named `hotel_df`.* Add a "Hotel Name" column to the DataFrame.* Set parameters to search for hotels with 5000 meters.* Hit the Google Places API for each city's coordinates.* Store the first Hotel result into the DataFrame.* Plot markers on top of the heatmap.
###Code
hotel_df["Hotel Name"]=""
hotel_df
#geocoordinates
target_coord="-35.10, 173.78"
target_search="Hotels"
target_radius=5000
target_type="lodging"
#setting up params
params={
"location":target_coord,
"keyword":target_search,
"radius":target_radius,
"type":target_type,
"key":g_key
}
#base url
base_url="https://maps.googleapis.com/maps/api/place/nearbysearch/json"
#run a ruquest
response=requests.get(base_url, params=params)
#convert to json
hotels=response.json()
json.dumps(hotels, indent=4, sort_keys=True)
#For City #2
#geocoordinates
target_coord2="-35.23, 173.95"
target_search2="Hotels"
target_radius2=5000
target_type2="lodging"
#setting up params
params2={
"location":target_coord2,
"keyword":target_search2,
"radius":target_radius2,
"type":target_type2,
"key":g_key
}
#base url
base_url="https://maps.googleapis.com/maps/api/place/nearbysearch/json"
#run a ruquest
response2=requests.get(base_url, params=params2)
#convert to json
hotels2=response2.json()
json.dumps(hotels2, indent=4, sort_keys=True)
#storing into dataframe
hotel_df["Hotel Name"]=[(hotels["results"][0]["name"]), (hotels2["results"][0]["name"])]
hotel_df
# NOTE: Do not change any of the code in this cell
# Using the template add the hotel marks to the heatmap
info_box_template = """
<dl>
<dt>Name</dt><dd>{Hotel Name}</dd>
<dt>City</dt><dd>{City}</dd>
<dt>Country</dt><dd>{Country}</dd>
</dl>
"""
# Store the DataFrame Row
# NOTE: be sure to update with your DataFrame name
hotel_info = [info_box_template.format(**row) for index, row in hotel_df.iterrows()]
locations = hotel_df[["Lat", "Lng"]]
# Add marker layer ontop of heat map
markers=gmaps.marker_layer(locations)
# Display figure
fig.add_layer(markers)
fig
###Output
_____no_output_____
###Markdown
VacationPy---- Note* Keep an eye on your API usage. Use https://developers.google.com/maps/reporting/gmp-reporting as reference for how to monitor your usage and billing.* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
###Code
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import gmaps
import os
import citipy
# Import API key
from config import g_key
%matplotlib inline
gmaps.configure(api_key=g_key)
fig1 = gmaps.figure()
fig1
###Output
_____no_output_____
###Markdown
Store Part I results into DataFrame* Load the csv exported in Part I to a DataFrame
###Code
vaca_cities_df = pd.read_csv (r'C:\Users\ICPC\Desktop\Data_Sci_Homework\Python-API-HW\WeatherPy\cities_df.csv')
print (vaca_cities_df)
###Output
City Cloudiness Country Date Humidity Lat \
0 colares 20.0 3600.0 1.620053e+09 55.0 38.7992
1 barrow 90.0 -28800.0 1.620053e+09 86.0 71.2906
2 avarua 90.0 -36000.0 1.620053e+09 73.0 -21.2078
3 hasaki 20.0 32400.0 1.620053e+09 38.0 35.7333
4 kunming 0.0 28800.0 1.620053e+09 52.0 25.0389
.. ... ... ... ... ... ...
546 mackenzie 90.0 -25200.0 1.620054e+09 87.0 55.2999
547 monrovia 75.0 0.0 1.620054e+09 66.0 6.3005
548 seoul 0.0 32400.0 1.620054e+09 58.0 37.5683
549 dubrajpur 90.0 19800.0 1.620054e+09 94.0 23.8000
550 rocha 64.0 -10800.0 1.620054e+09 52.0 -34.4833
Lng Max Temp Windspeed
0 -9.4469 73.00 10.36
1 -156.7887 15.80 3.27
2 -159.7750 75.20 8.05
3 140.8333 59.00 10.36
4 102.7183 66.20 13.42
.. ... ... ...
546 -123.1698 35.60 3.44
547 -10.7969 91.40 10.36
548 126.9778 53.60 3.44
549 87.3833 77.00 5.75
550 -54.3333 77.09 12.71
[551 rows x 9 columns]
###Markdown
Humidity Heatmap* Configure gmaps.* Use the Lat and Lng as locations and Humidity as the weight.* Add Heatmap layer to map.
###Code
humidity = vaca_cities_df["Humidity"]
maxhumidity = humidity.max()
locations = vaca_cities_df[["Lat", "Lng"]]
locations
figure = gmaps.figure()
heat_layer = gmaps.heatmap_layer(locations, weights=humidity,dissipating=False, max_intensity=maxhumidity,point_radius=2)
figure.add_layer(heat_layer)
figure
###Output
_____no_output_____
###Markdown
Create new DataFrame fitting weather criteria* Narrow down the cities to fit weather conditions.* Drop any rows will null values.
###Code
ideal_df = vaca_cities_df[(vaca_cities_df['Max Temp'].between(70,80)) & (vaca_cities_df['Windspeed']<10) & (vaca_cities_df['Cloudiness']=0)]
ideal_df.reset_index(inplace=True)
del ideal_df['index']
ideal_df
###Output
_____no_output_____
###Markdown
Hotel Map* Store into variable named `hotel_df`.* Add a "Hotel Name" column to the DataFrame.* Set parameters to search for hotels with 5000 meters.* Hit the Google Places API for each city's coordinates.* Store the first Hotel result into the DataFrame.* Plot markers on top of the heatmap.
###Code
hotel_df = []
for x in range(len(ideal_df)):
lat = ideal_df.loc[x]['Lat']
lng = ideal_df.loc[x]['Lng']
params = {
"location": f"{lat},{lng}",
"radius": 5000,
"types" : "hotel",
"key": g_key
}
base_url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json"
requested = requests.get(base_url, params=params)
response = requested.json()
try:
hotel_df.append(response['results'][0]['name'])
except:
hotel_df.append("")
ideal_df["Hotel Name"] = hotel_df
ideal_df
ideal_df = ideal_df.dropna(how='any')
ideal_df
# NOTE: Do not change any of the code in this cell
# Using the template add the hotel marks to the heatmap
info_box_template = """
<dl>
<dt>Name</dt><dd>{Hotel Name}</dd>
<dt>City</dt><dd>{City}</dd>
<dt>Country</dt><dd>{Country}</dd>
</dl>
"""
# Store the DataFrame Row
# NOTE: be sure to update with your DataFrame name
hotel_info = [info_box_template.format(**row) for index, row in ideal_df.iterrows()]
locations = ideal_df[["Lat", "Lng"]]
# Add marker layer ontop of heat map
# Plot markers on top of the heatmap
# Add marker layer ontop of heat map
markers = gmaps.marker_layer(locations, info_box_content = hotel_info)
figure.add_layer(markers)
# Display figure
figure
# Display figure
###Output
_____no_output_____
###Markdown
VacationPy---- Note* Keep an eye on your API usage. Use https://developers.google.com/maps/reporting/gmp-reporting as reference for how to monitor your usage and billing.* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
###Code
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import gmaps
import os
# Import API key
from api_keys import g_key
###Output
_____no_output_____
###Markdown
Store Part I results into DataFrame* Load the csv exported in Part I to a DataFrame
###Code
cities = "C:/Users/arc user/Desktop/python-api-challenge/WeatherPy/cities.csv"
weather_data = pd.read_csv(cities)
weather_data
###Output
_____no_output_____
###Markdown
Humidity Heatmap* Configure gmaps.* Use the Lat and Lng as locations and Humidity as the weight.* Add Heatmap layer to map.
###Code
# Configure gmaps
gmaps.configure(api_key=g_key)
%matplotlib inline
# Store latitude and longitude in locations
locations = weather_data[["Lat", "Lng"]]
#Store humidity
humidity = weather_data["Humidity"]
# Plot Heatmap
fig = gmaps.figure()
# Create heat layer
heat_layer = gmaps.heatmap_layer(locations, weights=humidity,
dissipating=False, max_intensity=100,
point_radius=5)
# Add layer
fig.add_layer(heat_layer)
# Display figure
fig
###Output
_____no_output_____
###Markdown
Create new DataFrame fitting weather criteria* Narrow down the cities to fit weather conditions.* Drop any rows will null values.
###Code
#A max temperature lower than 80 degrees but higher than 70 (converted to Celsius).
#Wind speed less than 10 mph.
#Zero cloudiness.
narrow_weather = weather_data.loc[(weather_data["Max Temp"]> 21.11)\
&(weather_data["Max Temp"]<26.66)&(weather_data["Wind Speed"]<10)&(weather_data["Cloudiness"]==0)].dropna()
###Output
_____no_output_____
###Markdown
Hotel Map* Store into variable named `hotel_df`.* Add a "Hotel Name" column to the DataFrame.* Set parameters to search for hotels with 5000 meters.* Hit the Google Places API for each city's coordinates.* Store the first Hotel result into the DataFrame.* Plot markers on top of the heatmap.
###Code
hotel_df = narrow_weather[["City", "Lat", "Lng"]].copy()
hotel_df["Hotel Name"]=""
hotel_df
# params dictionary to update each iteration
params = {
"radius": 5000,
"types": "lodging",
"key": g_key
}
# Use the lat/lng we recovered to identify hotels
for index, row in hotel_df.iterrows():
# get lat, lng from df
lat = row["Lat"]
lng = row["Lng"]
# change location each iteration while leaving original params in place
params["location"] = f"{lat},{lng}"
# Use the search term: "lodging" and our lat/lng
base_url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json"
# make request and print url
name_address = requests.get(base_url, params=params)
# print the name_address url, avoid doing for public github repos in order to avoid exposing key
# print(name_address.url)
# convert to json
name_address = name_address.json()
# print(json.dumps(name_address, indent=4, sort_keys=True))
# Since some data may be missing we incorporate a try-except to skip any that are missing a data point.
try:
hotel_df.loc[index, "Hotel Name"] = name_address["results"][0]["name"]
except (KeyError, IndexError):
print("Missing field/result... skipping.")
hotel_df
# NOTE: Do not change any of the code in this cell
# Using the template add the hotel marks to the heatmap
info_box_template = """
<dl>
<dt>Name</dt><dd>{Hotel Name}</dd>
<dt>City</dt><dd>{City}</dd>
</dl>
"""
# Store the DataFrame Row
# NOTE: be sure to update with your DataFrame name
hotel_info = [info_box_template.format(**row) for index, row in hotel_df.iterrows()]
locations = hotel_df[["Lat", "Lng"]]
# Add marker layer ontop of heat map
markers = gmaps.marker_layer(locations,info_box_content=hotel_info)
fig.add_layer(markers)
fig
# Display figure
###Output
_____no_output_____
###Markdown
VacationPy---- Note* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
###Code
pip install gmaps
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import gmaps
import os
# Import API key
from api_keys import g_key
###Output
_____no_output_____
###Markdown
Store Part I results into DataFrame* Load the csv exported in Part I to a DataFrame
###Code
Weather_file = "../WeatherPy/weathercities.csv"
#Weather_file
Weather_df2 = pd.read_csv(Weather_file)
Weatger_df2 = Weather_df2.dropna()
Weather_df2
###Output
_____no_output_____
###Markdown
Humidity Heatmap* Configure gmaps.* Use the Lat and Lng as locations and Humidity as the weight.* Add Heatmap layer to map.
###Code
gmaps.configure(g_key)
locations = Weather_df2[["Lat", "Lng"]]
locations
humidity = Weather_df2["Humidity"].astype(float)
humidity
#Plotting Heatmap
figure = gmaps.figure()
# Create the heat layer
heatmap_layer = gmaps.heatmap_layer(locations, weights=humidity,
dissipating=False, max_intensity=10,
point_radius=1)
# Add layer
figure.add_layer(heatmap_layer)
# Display figure
figure
###Output
_____no_output_____
###Markdown
Create new DataFrame fitting weather criteria* Narrow down the cities to fit weather conditions.* Drop any rows will null values.
###Code
#Narrow down the DataFrame to find your ideal weather condition.
# A max temperature lower than 85 degrees but higher than 70.
# Wind speed less than 10 mph.
# Zero cloudiness.
# Drop any rows that don't contain all three conditions.
# You want to be sure the weather is ideal
NarrowDown_df = pd.DataFrame(Weather_df2, columns = ["City","Country", "Max Temp", "Wind Speed", "Cloudiness"])
max_temp = (NarrowDown_df["Max Temp"] <= 85) & (NarrowDown_df["Max Temp"] > 70)
wind_speed = NarrowDown_df["Wind Speed"] < 10
cloudiness = NarrowDown_df["Cloudiness"] == 0
NarrowDown_df[max_temp & wind_speed & cloudiness]
###Output
_____no_output_____
###Markdown
Hotel Map* Store into variable named `hotel_df`.* Add a "Hotel Name" column to the DataFrame.* Set parameters to search for hotels with 5000 meters.* Hit the Google Places API for each city's coordinates.* Store the first Hotel result into the DataFrame.* Plot markers on top of the heatmap.
###Code
# Store into variable named hotel_df.
hotel_df = pd.DataFrame(Weather_df2, columns=["City","Country","Lat","Lng"])
#Add a "Hotel Name" column to the DataFrame.
hotel_df["Hotel Name"] = ""
hotel_df
#Set parameters to search for hotels with 5000 meters.
hotel_name = []
# params dictionary to update each iteration
params = {
"radius": 5000,
"types": "hotel",
"keyword": "hotel",
"key": g_key
}
#-------------------------------------------------------------------------------------------------------------
# Nearest Restuatrant Exercise CODE
# use iterrows to iterate through pandas dataframe
for index, row in hotel_df.iterrows():
#get coordinates
lat = row["Lat"]
lng = row["Lng"]
# change location each iteration
params["location"] = f"{lat},{lng}"
# base url
base_url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json"
#make API request & convert to json
response = requests.get(base_url, params=params).json()
# extract results
results = response['results']
try:
hotel_name.append(response['results'][0]['name'])
except (KeyError, IndexError):
hotel_name.append(np.nan)
hotel_name
hotel_df.iloc[0][0]
#Store the first Hotel result into the DataFrame.
hotel_df['Hotel Name']= hotel_name
hotel_df.dropna()
hotel_df.to_csv('VacationHotels.csv')
#from gmaps in-class exercise
#Plot markers on top of the heatmap.
markers = gmaps.marker_layer(locations)
# Add the layer to the map
figure.add_layer(markers)
figure
# NOTE: Do not change any of the code in this cell
# Using the template add the hotel marks to the heatmap
info_box_template = """
<dl>
<dt>Name</dt><dd>{Hotel Name}</dd>
<dt>City</dt><dd>{City}</dd>
<dt>Country</dt><dd>{Country}</dd>
</dl>
"""
# Store the DataFrame Row
# NOTE: be sure to update with your DataFrame name
hotel_info = [info_box_template.format(**row) for index, row in NarrowDown_df.iterrows()]
locations = hotel_df[["Lat", "Lng"]]
# Add marker layer ontop of heat map
# Display Map
###Output
_____no_output_____
###Markdown
VacationPy---- Note* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
###Code
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import gmaps
import os
# Import API key
from api_keys import g_key
###Output
_____no_output_____
###Markdown
Store Part I results into DataFrame* Load the csv exported in Part I to a DataFrame
###Code
df = pd.read_csv('../clean_city_data.csv')
df
###Output
_____no_output_____
###Markdown
Humidity Heatmap* Configure gmaps.* Use the Lat and Lng as locations and Humidity as the weight.* Add Heatmap layer to map. Create new DataFrame fitting weather criteria* Narrow down the cities to fit weather conditions.* Drop any rows will null values. Hotel Map* Store into variable named `hotel_df`.* Add a "Hotel Name" column to the DataFrame.* Set parameters to search for hotels with 5000 meters.* Hit the Google Places API for each city's coordinates.* Store the first Hotel result into the DataFrame.* Plot markers on top of the heatmap.
###Code
# NOTE: Do not change any of the code in this cell
# Using the template add the hotel marks to the heatmap
info_box_template = """
<dl>
<dt>Name</dt><dd>{Hotel Name}</dd>
<dt>City</dt><dd>{City}</dd>
<dt>Country</dt><dd>{Country}</dd>
</dl>
"""
# Store the DataFrame Row
# NOTE: be sure to update with your DataFrame name
hotel_info = [info_box_template.format(**row) for index, row in hotel_df.iterrows()]
locations = hotel_df[["Lat", "Lng"]]
# Add marker layer ontop of heat map
# Display figure
###Output
_____no_output_____
###Markdown
VacationPy---- Note* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
###Code
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import gmaps
import time
import json
import os
os.chdir('../config')
from config import gp
os.chdir('../WeatherPy')
# Import API key
###Output
_____no_output_____
###Markdown
Store Part I results into DataFrame* Load the csv exported in Part I to a DataFrame
###Code
load_csv = "../WeatherPy/output/cities.csv"
city_df = pd.read_csv(load_csv)
city_df.head()
###Output
_____no_output_____
###Markdown
Humidity Heatmap* Configure gmaps.* Use the Lat and Lng as locations and Humidity as the weight.* Add Heatmap layer to map.
###Code
locations = city_df[["Latitude", "Longitude"]]
humidity = city_df['Humidity'].astype(int)
# Plot Heatmap
fig = gmaps.figure()
# Create heat layer
heat_layer = gmaps.heatmap_layer(locations, weights=humidity,
dissipating=False, max_intensity=10,
point_radius=1)
# Add layer
fig.add_layer(heat_layer)
# Display figure
fig
###Output
_____no_output_____
###Markdown
Create new DataFrame fitting weather criteria* Narrow down the cities to fit weather conditions.* Drop any rows will null values.
###Code
# max temperature lower than 80 degrees but higher than 70.
# Wind speed less than 10 mph.
# Zero cloudiness.
narrowed_city_df = city_df.copy()
narrowed_city_df = narrowed_city_df[narrowed_city_df['Max Temp'] >= 70]
narrowed_city_df = narrowed_city_df[narrowed_city_df['Max Temp'] <= 80]
narrowed_city_df = narrowed_city_df[narrowed_city_df['Wind Speed'] <= 10]
narrowed_city_df = narrowed_city_df[narrowed_city_df['Cloudiness'] == 0]
narrowed_city_df = narrowed_city_df.dropna()
narrowed_city_df = narrowed_city_df.reset_index()
narrowed_city_df
###Output
_____no_output_____
###Markdown
Hotel Map* Store into variable named `hotel_df`.* Add a "Hotel Name" column to the DataFrame.* Set parameters to search for hotels with 5000 meters.* Hit the Google Places API for each city's coordinates.* Store the first Hotel result into the DataFrame.* Plot markers on top of the heatmap.
###Code
# create hotel_df with hotel name column
hotel_df = narrowed_city_df
hotel_df["Hotel Name"] = np.nan
hotel_df=hotel_df.rename(columns={"Latitude":"Lat","Longitude":"Lng"})
hotel_df
#search googleplaces for hotels within 5000 meters
#list for hotel names
hotels = []
#checker variable
x = 0
# for loop to search through city coords
for cities in hotel_df['City']:
target_coordinates = str(hotel_df['Lat'][x])+' , '+str(hotel_df['Lng'][x])
x+=1
target_radius = 5000
target_type = 'lodging'
params = {
"location": target_coordinates,
"radius": target_radius,
"type": target_type,
"key": gp
}
base_url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json"
# try/except to check if a city turns up
try:
response = requests.get(base_url, params=params)
hotel_results = response.json()
hotels.append(hotel_results['results'][0]['name'])
except IndexError:
hotels.append(np.nan)
#append dataframe with hotel names
hotel_df['Hotel Name'] = hotels
hotel_df
#clean up DataSet by remooving NAN
hotel_df_clean = hotel_df.dropna()
hotel_df_clean
# NOTE: Do not change any of the code in this cell
# Using the template add the hotel marks to the heatmap
info_box_template = """
<dl>
<dt>Name</dt><dd>{Hotel Name}</dd>
<dt>City</dt><dd>{City}</dd>
<dt>Country</dt><dd>{Country}</dd>
</dl>
"""
# Store the DataFrame Row
# NOTE: be sure to update with your DataFrame name
hotel_info = [info_box_template.format(**row) for index, row in narrowed_city_df.iterrows()]
locations = hotel_df[["Lat", "Lng"]]
# Add marker layer ontop of heat map
# Add marker layer ontop of heat map and display
fig = gmaps.figure()
markers = gmaps.marker_layer(locations, info_box_content=hotel_info)
fig.add_layer(markers)
fig
# Display Map
###Output
_____no_output_____
###Markdown
VacationPy---- Note* Keep an eye on your API usage. Use https://developers.google.com/maps/reporting/gmp-reporting as reference for how to monitor your usage and billing.* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
###Code
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import gmaps
import os
import json
import time
# Import API key
from api_keys import g_key
###Output
_____no_output_____
###Markdown
Store Part I results into DataFrame* Load the csv exported in Part I to a DataFrame
###Code
weather_data = pd.read_csv("../WeatherPy/output_data/cities.csv")
weather_data
###Output
_____no_output_____
###Markdown
Humidity Heatmap* Configure gmaps.* Use the Lat and Lng as locations and Humidity as the weight.* Add Heatmap layer to map.
###Code
# Configure gmaps
gmaps.configure(api_key=g_key)
# Store latitude and longitude in locations
locations = weather_data[["Lat", "Lng"]]
# Store Humidity in humidity
humidity = weather_data["Humidity"]
# Plot Heatmap
fig = gmaps.figure(center=(46.0, -5.0), zoom_level=2)
max_intensity = np.max(humidity)
# Create heat layer
heat_layer = gmaps.heatmap_layer(locations, weights = humidity, dissipating=False, max_intensity=100, point_radius=3)
# Add layer
fig.add_layer(heat_layer)
# Display figure
fig
max_intensity
###Output
_____no_output_____
###Markdown
Create new DataFrame fitting weather criteria* Narrow down the cities to fit weather conditions.* Drop any rows will null values.
###Code
# Narrow down the cities with wind speed less than 10 mph, cloudiness equals to 0 and max temp between 60 and 80
narrowed_city_df = weather_data.loc[(weather_data["Wind Speed"] <= 10) & (weather_data["Cloudiness"] == 0) & \
(weather_data["Max Temp"] >= 70) & (weather_data["Max Temp"] <= 80)].dropna()
narrowed_city_df
###Output
_____no_output_____
###Markdown
Hotel Map* Store into variable named `hotel_df`.* Add a "Hotel Name" column to the DataFrame.* Set parameters to search for hotels with 5000 meters.* Hit the Google Places API for each city's coordinates.* Store the first Hotel result into the DataFrame.* Plot markers on top of the heatmap.
###Code
# Create a hotel_df
hotel_df = narrowed_city_df.loc[:,["City","Country", "Lat", "Lng"]]
# Add a "Hotel Name" column to the DataFrame.
hotel_df["Hotel Name"] = ""
# Display the result
hotel_df
base_url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json"
params = {"type" : "hotel",
"keyword" : "hotel",
"radius" : 5000,
"key" : g_key}
for index, row in hotel_df.iterrows():
# get city name, lat, lnt from df
lat = row["Lat"]
lng = row["Lng"]
city_name = row["City"]
# add keyword to params dict
params["location"] = f"{lat},{lng}"
# assemble url and make API request
print(f"Retrieving Results for Index {index}: {city_name}.")
response = requests.get(base_url, params=params).json()
# extract results
results = response['results']
# save the hotel name to dataframe
try:
print(f"Closest hotel in {city_name} is {results[0]['name']}.")
hotel_df.loc[index, "Hotel Name"] = results[0]['name']
# if there is no hotel available, show missing field
except (KeyError, IndexError):
print("Missing field/result... skipping.")
print("------------")
# Wait 1 sec to make another api request to avoid SSL Error
time.sleep(1)
# Print end of search once searching is completed
print("-------End of Search-------")
# Display the hotel dataframe
hotel_df
# NOTE: Do not change any of the code in this cell
# Using the template add the hotel marks to the heatmap
info_box_template = """
<dl>
<dt>Name</dt><dd>{Hotel Name}</dd>
<dt>City</dt><dd>{City}</dd>
<dt>Country</dt><dd>{Country}</dd>
</dl>
"""
# Store the DataFrame Row
# NOTE: be sure to update with your DataFrame name
hotel_info = [info_box_template.format(**row) for index, row in hotel_df.iterrows()]
locations = hotel_df[["Lat", "Lng"]]
# Add marker layer and info box content ontop of heat map
markers = gmaps.marker_layer(locations, info_box_content = hotel_info)
# Add the layer to the map
fig.add_layer(markers)
# Display Map
fig
###Output
_____no_output_____
###Markdown
VacationPy---- Note* Keep an eye on your API usage. Use https://developers.google.com/maps/reporting/gmp-reporting as reference for how to monitor your usage and billing.* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
###Code
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import gmaps
import os
# Import API key
from api_keys import g_key
###Output
_____no_output_____
###Markdown
Store Part I results into DataFrame* Load the csv exported in Part I to a DataFrame
###Code
###Output
_____no_output_____
###Markdown
Humidity Heatmap* Configure gmaps.* Use the Lat and Lng as locations and Humidity as the weight.* Add Heatmap layer to map. Create new DataFrame fitting weather criteria* Narrow down the cities to fit weather conditions.* Drop any rows will null values. Hotel Map* Store into variable named `hotel_df`.* Add a "Hotel Name" column to the DataFrame.* Set parameters to search for hotels with 5000 meters.* Hit the Google Places API for each city's coordinates.* Store the first Hotel result into the DataFrame.* Plot markers on top of the heatmap.
###Code
# NOTE: Do not change any of the code in this cell
# Using the template add the hotel marks to the heatmap
info_box_template = """
<dl>
<dt>Name</dt><dd>{Hotel Name}</dd>
<dt>City</dt><dd>{City}</dd>
<dt>Country</dt><dd>{Country}</dd>
</dl>
"""
# Store the DataFrame Row
# NOTE: be sure to update with your DataFrame name
hotel_info = [info_box_template.format(**row) for index, row in hotel_df.iterrows()]
locations = hotel_df[["Lat", "Lng"]]
# Add marker layer ontop of heat map
# Display figure
###Output
_____no_output_____
###Markdown
VacationPy---- Note* Keep an eye on your API usage. Use https://developers.google.com/maps/reporting/gmp-reporting as reference for how to monitor your usage and billing.* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
###Code
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import gmaps
import os
# Output File (CSV)
output_data_file = "output_data/Hotels.csv"
# Import API key
from api_keys import g_key
hotel_df
###Output
_____no_output_____
###Markdown
Store Part I results into DataFrame* Load the csv exported in Part I to a DataFrame
###Code
city_data_file= "../output_data/cities.csv"
city_data_df = pd.read_csv(city_data_file)
city_data_df
###Output
_____no_output_____
###Markdown
Humidity Heatmap* Configure gmaps.* Use the Lat and Lng as locations and Humidity as the weight.* Add Heatmap layer to map.
###Code
gmaps.configure(api_key=g_key)
locations = city_data_df[['Lat','Lng']]
center_map = (0,0)
fig = gmaps.figure(map_type='ROADMAP')
heatmap_layer = gmaps.heatmap_layer(
city_data_df[["Lat", "Lng"]], weights=city_data_df["Humidity"],
max_intensity=100, point_radius=15)
fig = gmaps.figure(layout={'width' : '100%', 'height' : '675px'},center=(0,0),zoom_level=1.9)
fig.add_layer(heatmap_layer)
fig
###Output
_____no_output_____
###Markdown
Create new DataFrame fitting weather criteria* Narrow down the cities to fit weather conditions.* Drop any rows will null values.
###Code
# narrow down weather df
# Max temp <80, Min temp >70, Cloudiness =0, Wind Speed <10mph
best_weather_df=city_data_df[city_data_df["Max Temp"] <75]
best_weather_df=best_weather_df[best_weather_df["Max Temp"] >55]
best_weather_df=best_weather_df[best_weather_df["Wind Speed"] <10]
best_weather_df=best_weather_df[best_weather_df["Cloudiness"]<1]
best_weather_df.dropna()
best_weather_df
###Output
_____no_output_____
###Markdown
Hotel Map* Store into variable named `hotel_df`.* Add a "Hotel Name" column to the DataFrame.* Set parameters to search for hotels with 5000 meters.* Hit the Google Places API for each city's coordinates.* Store the first Hotel result into the DataFrame.* Plot markers on top of the heatmap.
###Code
hotel_df = best_weather_df
hotel_df["Hotel Name"] = ""
hotel_df.reset_index(inplace=True)
hotel_df.drop(columns='index')
lat = hotel_df["Lat"].astype(float)
lng = hotel_df["Lng"].astype(float)
hotel_df.head()
import pprint
lat = hotel_df["Lat"]
lng = hotel_df["Lng"]
hotels=[]
for i in range (len(hotel_df)):
target_type = "lodging"
location = f"{lat[i]},{lng[i]}"
keyword = "hotel"
radius = 5000
base_url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json?"
params= {
"location" : location,
"radius" : radius,
"types" : target_type,
"key" : g_key
}
try:
response = requests.get(base_url, params=params).json()
pprint(response)
hotel_name = response
hotels.append(hotel_name["results"][0]["name"])
print(hotel_name["results"][0]["name"])
except:
hotels.append("")
print("No result found. Skipping ... ")
pass
# NOTE: Do not change any of the code in this cell
# Using the template add the hotel marks to the heatmap
info_box_template = """
<dl>
<dt>Name</dt><dd>{Hotel Name}</dd>
<dt>City</dt><dd>{City}</dd>
<dt>Country</dt><dd>{Country}</dd>
</dl>
"""
# Store the DataFrame Row
# NOTE: be sure to update with your DataFrame name
hotel_info = [info_box_template.format(**row) for index, row in hotel_df.iterrows()]
locations = hotel_df[["Lat", "Lng"]]
# Add marker layer ontop of heat map
# Display figure
###Output
_____no_output_____
###Markdown
VacationPy---- Note* Keep an eye on your API usage. Use https://developers.google.com/maps/reporting/gmp-reporting as reference for how to monitor your usage and billing.* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
###Code
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import gmaps
import os
import json
import pprint
# Import API key
from api_keys import g_key
###Output
_____no_output_____
###Markdown
Store Part I results into DataFrame* Load the csv exported in Part I to a DataFrame
###Code
cities_df = pd.read_csv('../cities.csv')
cities_df = cities_df.drop(['Unnamed: 0'], axis=1)
cities_df
###Output
_____no_output_____
###Markdown
Humidity Heatmap* Configure gmaps.* Use the Lat and Lng as locations and Humidity as the weight.* Add Heatmap layer to map.
###Code
gmaps.configure(api_key = g_key)
locations = cities_df[["latitude", "longitude"]]
humidity = cities_df["humidity"]
humidity
fig = gmaps.figure()
heat_layer = gmaps.heatmap_layer(locations, weights=humidity,dissipating=False,max_intensity=100,point_radius=1)
fig.add_layer(heat_layer)
fig
###Output
_____no_output_____
###Markdown
Create new DataFrame fitting weather criteria* Narrow down the cities to fit weather conditions.* Drop any rows will null values.
###Code
hotel_df = cities_df.loc[(cities_df['max temperature']<80) & (cities_df['max temperature']>70) & (cities_df['wind speed']<10) & (cities_df['cloudiness']==0)]
hotel_df
###Output
_____no_output_____
###Markdown
Hotel Map* Store into variable named `hotel_df`.* Add a "Hotel Name" column to the DataFrame.* Set parameters to search for hotels with 5000 meters.* Hit the Google Places API for each city's coordinates.* Store the first Hotel result into the DataFrame.* Plot markers on top of the heatmap.
###Code
hotel_df['Hotel Name'] = ""
target_radius = 5000
target_search = "Hotel"
base_url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json"
params = {
"radius": target_radius,
"keyword": target_search,
"key": g_key
}
for index, row in hotel_df.iterrows():
lat = row['latitude']
lon = row['longitude']
params['location'] = f"{lat},{lon}"
print(f"Retrieving Results for Index {index}.")
response = requests.get(base_url, params=params).json()
results = response['results']
try:
print(f"Closest hotel is {results[0]['name']}.")
print("------------")
hotel_df.loc[index, 'Hotel Name'] = results[0]['name']
except (KeyError, IndexError):
print("No hotel within 5000 meters")
hotel_df.loc[index, 'Hotel Name'] = "No hotel within 5000 meters"
print("------------")
hotel_df
# NOTE: Do not change any of the code in this cell
# Using the template add the hotel marks to the heatmap
info_box_template = """
<dl>
<dt>Name</dt><dd>{Hotel Name}</dd>
<dt>City</dt><dd>{city}</dd>
<dt>Country</dt><dd>{country}</dd>
</dl>
"""
# Store the DataFrame Row
# NOTE: be sure to update with your DataFrame name
hotel_info = [info_box_template.format(**row) for index, row in hotel_df.iterrows()]
locations = hotel_df[["latitude", "longitude"]]
humidity = hotel_df["humidity"]
# Add marker layer ontop of heat map
fig = gmaps.figure()
marker_layer = gmaps.marker_layer(locations, info_box_content=hotel_info)
heat_layer = gmaps.heatmap_layer(locations, weights=humidity,dissipating=False,max_intensity=80,point_radius=3)
fig.add_layer(heat_layer)
fig.add_layer(marker_layer)
fig
###Output
_____no_output_____
###Markdown
VacationPy---- Note* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
###Code
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import gmaps
import os
import json
# Import API key
from api_keys import g_key
###Output
_____no_output_____
###Markdown
Store Part I results into DataFrame* Load the csv exported in Part I to a DataFrame
###Code
cities_df = pd.read_csv("../WeatherPy/Cities.csv")
cities_df
###Output
_____no_output_____
###Markdown
Humidity Heatmap* Configure gmaps.* Use the Lat and Lng as locations and Humidity as the weight.* Add Heatmap layer to map.
###Code
gmaps.configure(api_key=g_key)
locations = cities_df[["Lat", "Lng"]]
Humidity = cities_df["Humidity"]
max_humidity = cities_df["Humidity"].max()
fig = gmaps.figure()
heatmap = gmaps.heatmap_layer(locations, weights=Humidity, max_intensity=max_humidity)
fig.add_layer(heatmap)
fig
###Output
_____no_output_____
###Markdown
Create new DataFrame fitting weather criteria* Narrow down the cities to fit weather conditions.* Drop any rows will null values.
###Code
hotel_df = cities_df.loc[(cities_df["Max Temp"] < 80) & (cities_df["Max Temp"] > 70),:]
hotel_df = hotel_df.loc[hotel_df["Wind Speed"] < 10,:]
hotel_df = hotel_df.loc[hotel_df["Cloudiness"] == 0,:]
hotel_df = hotel_df.loc[hotel_df["Humidity"] < 50,:]
hotel_df
###Output
_____no_output_____
###Markdown
Hotel Map* Store into variable named `hotel_df`.* Add a "Hotel Name" column to the DataFrame.* Set parameters to search for hotels with 5000 meters.* Hit the Google Places API for each city's coordinates.* Store the first Hotel result into the DataFrame.* Plot markers on top of the heatmap.
###Code
hotel_df["Hotel Name"] = ""
hotel_df["Hotel Lat"] = ""
hotel_df["Hotel Lng"] = ""
hotel_df.head()
url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json?"
radius = 5000
parameters = {"key": g_key,
"radius": radius,
"keyword": "hotel"}
failed = []
for index, row in hotel_df.iterrows():
parameters["location"] = f"{row[6]},{row[7]}"
response = requests.get(url, params=parameters).json()
try:
hotel_df.loc[index, "Hotel Name"] = response["results"][0]["name"]
hotel_df.loc[index, "Hotel Lat"] = response["results"][0]["geometry"]["location"]["lat"]
hotel_df.loc[index, "Hotel Lng"] = response["results"][0]["geometry"]["location"]["lng"]
except (KeyError, IndexError):
print("Couldn't find a hotel...")
failed.append(hotel_df.loc[index, "City"])
failed
for city in failed:
hotel_df = hotel_df[hotel_df["City"] != city]
hotel_df
# NOTE: Do not change any of the code in this cell
# Using the template add the hotel marks to the heatmap
info_box_template = """
<dl>
<dt>Name</dt><dd>{Hotel Name}</dd>
<dt>City</dt><dd>{City}</dd>
<dt>Country</dt><dd>{Country}</dd>
</dl>
"""
# Store the DataFrame Row
# NOTE: be sure to update with your DataFrame name
hotel_info = [info_box_template.format(**row) for index, row in hotel_df.iterrows()]
hotel_locations = hotel_df[["Hotel Lat", "Hotel Lng"]]
# Add marker layer ontop of heat map
markers = gmaps.marker_layer(hotel_locations, info_box_content=hotel_info)
fig.add_layer(markers)
# Display Map
fig
###Output
_____no_output_____
###Markdown
VacationPy---- Note* Keep an eye on your API usage. Use https://developers.google.com/maps/reporting/gmp-reporting as reference for how to monitor your usage and billing.* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
###Code
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import gmaps
import os
import json
# Import API key
from api_keys import g_key
# Configure gmaps
gmaps.configure(api_key=g_key)
###Output
_____no_output_____
###Markdown
Store Part I results into DataFrame* Load the csv exported in Part I to a DataFrame
###Code
weather_df = pd.read_csv('../WeatherPy/weather_data.csv')
###Output
_____no_output_____
###Markdown
Humidity Heatmap* Configure gmaps.* Use the Lat and Lng as locations and Humidity as the weight.* Add Heatmap layer to map.
###Code
# Latitude and longitude as locations.
latitude_longitude = weather_df[['lat', 'lng']]
# Humidity as weight.
humidity = weather_df["humidity"]
latitude_longitude.head()
# Add Heatmap layer to map.
figure_layout = {
'width': '500px',
'height': '400px',
'padding': '1px',
'margin': '0 auto 0 auto'
}
# Use the gmaps.figure
fig = gmaps.figure(layout=figure_layout,zoom_level=3,center=(25,25))
# Create heat layer
heat_layer = gmaps.heatmap_layer(latitude_longitude, weights=humidity,
dissipating=False, max_intensity=100,
point_radius=1.5)
# Add heat layer
fig.add_layer(heat_layer)
###Output
_____no_output_____
###Markdown
Create new DataFrame fitting weather criteria* Narrow down the cities to fit weather conditions.* Drop any rows will null values.
###Code
# Create the perfect vacation climate
# A max temperature lower than 80 degrees but higher than 70.
perfect_temperature = (weather_df.temperature < 80) & (weather_df.temperature > 70)
perfect_wind = weather_df.wind_speed < 10
perfect_cloudiness = weather_df.cloudiness == 0
perfect_vacation = perfect_temperature & perfect_wind & perfect_cloudiness
# Use boolean indexing to filter the weather_df dataframe - drop null values
ideal_weather = weather_df[perfect_vacation]
ideal_weather = ideal_weather.dropna()
ideal_weather = ideal_weather.reset_index()
ideal_weather.head(10)
###Output
_____no_output_____
###Markdown
Hotel Map* Store into variable named `hotel_df`.* Add a "Hotel Name" column to the DataFrame.* Set parameters to search for hotels with 5000 meters.* Hit the Google Places API for each city's coordinates.* Store the first Hotel result into the DataFrame.* Plot markers on top of the heatmap.
###Code
hotel_df = ideal_weather
# Add column for Hotel Name
hotel_df['Hotel Name'] = ""
hotel_df.head()
# NOTE: Do not change any of the code in this cell
# Using the template add the hotel marks to the heatmap
info_box_template = """
<dl>
<dt>Name</dt><dd>{Hotel Name}</dd>
<dt>City</dt><dd>{City}</dd>
<dt>Country</dt><dd>{Country}</dd>
</dl>
"""
# Store the DataFrame Row
# NOTE: be sure to update with your DataFrame name
hotel_info = [info_box_template.format(**row) for index, row in hotel_df.iterrows()]
locations = hotel_df[["lat", "lng"]]
# Add marker layer ontop of heat map
# Display figure
###Output
_____no_output_____
###Markdown
VacationPy---- Note* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
###Code
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import gmaps
import os
# Import API key
from api_keys import g_key
###Output
_____no_output_____
###Markdown
Store Part I results into DataFrame* Load the csv exported in Part I to a DataFrame
###Code
###Output
_____no_output_____
###Markdown
Humidity Heatmap* Configure gmaps.* Use the Lat and Lng as locations and Humidity as the weight.* Add Heatmap layer to map. Create new DataFrame fitting weather criteria* Narrow down the cities to fit weather conditions.* Drop any rows will null values. Hotel Map* Store into variable named `hotel_df`.* Add a "Hotel Name" column to the DataFrame.* Set parameters to search for hotels with 5000 meters.* Hit the Google Places API for each city's coordinates.* Store the first Hotel result into the DataFrame.* Plot markers on top of the heatmap.
###Code
# NOTE: Do not change any of the code in this cell
# Using the template add the hotel marks to the heatmap
info_box_template = """
<dl>
<dt>Name</dt><dd>{Hotel Name}</dd>
<dt>City</dt><dd>{City}</dd>
<dt>Country</dt><dd>{Country}</dd>
</dl>
"""
# Store the DataFrame Row
# NOTE: be sure to update with your DataFrame name
hotel_info = [info_box_template.format(**row) for index, row in hotel_df.iterrows()]
locations = hotel_df[["Lat", "Lng"]]
# Add marker layer ontop of heat map
# Display figure
###Output
_____no_output_____
###Markdown
VacationPy---- Note* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
###Code
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import gmaps
import os
import json
# Import API key
from api_keys import g_key
###Output
_____no_output_____
###Markdown
Store Part I results into DataFrame* Load the csv exported in Part I to a DataFrame
###Code
cities_df =pd.read_csv("../WeatherPy/output_data/cities.csv")
#drop na value(if there were) and remove unnamed column
cities_df=cities_df.drop(columns=["Unnamed: 0"])
cities_df = cities_df.dropna()
cities_df
###Output
_____no_output_____
###Markdown
Humidity Heatmap* Configure gmaps.* Use the Lat and Lng as locations and Humidity as the weight.* Add Heatmap layer to map.
###Code
# Configure gmaps with API key
gmaps.configure(api_key = g_key)
# Store 'Lat' and 'Lng' into locations
locations = cities_df [["Lat", "Lng"]].astype(float)
weights = cities_df["Humidity"].astype(float)
# Create a Humidity Heatmap layer
fig = gmaps.figure()
heat_layer = gmaps.heatmap_layer(locations, weights=weights ,
dissipating=False, max_intensity= max(weights),
point_radius = 5)
fig.add_layer(heat_layer)
# Saves an image of our chart so that we can view it in a folder
plt.savefig("output_data/Fig1.png")
fig
###Output
_____no_output_____
###Markdown
Analysis: It seems that the humidity is higher in South America in comparison to north america humidity is lower in Algeria, Sudan, Egypt
###Code
###Output
_____no_output_____
###Markdown
Create new DataFrame fitting weather criteria* Narrow down the cities to fit weather conditions.* Drop any rows will null values.
###Code
## Narrow down the DataFrame to find your ideal weather condition
new_cities = pd.DataFrame(cities_df, columns = ["City", "Max Temp", "Wind Speed", "Cloudiness"])
# The ideal condition based on the instruction:
# A max temperature lower than 80 degrees but higher than 70.
# Wind speed less than 10 mph.
# Zero cloudiness.
ideal_temp = (new_cities["Max Temp"]<80) & (new_cities["Max Temp"]>70)
ideal_wind = new_cities["Wind Speed"]<10
ideal_cloudiness = new_cities["Cloudiness"]==0
ideal_cities = new_cities.loc[ideal_cloudiness & ideal_temp & ideal_wind]
ideal_cities
###Output
_____no_output_____
###Markdown
Hotel Map* Store into variable named `hotel_df`.* Add a "Hotel Name" column to the DataFrame.* Set parameters to search for hotels with 5000 meters.* Hit the Google Places API for each city's coordinates.* Store the first Hotel result into the DataFrame.* Plot markers on top of the heatmap.
###Code
hotel_df = pd.DataFrame(cities_df, columns=["City", "Country", "Lat", "Lng"])
#Add a "Hotel Name" column to the DataFrame.
hotel_df["Hotel Name"] = ""
hotel_df
hotel_name = []
# params dictionary to update each iteration
params = {
"radius": 5000,
"types": "hotel",
"keyword": "hotel",
"key": g_key,
}
# Loop through the hotel_df and run a lat/long search for each city
for index, row in hotel_df.iterrows():
# get lat, lng from df
lat = row["Lat"]
lng = row["Lng"]
# change location each iteration while leaving original params in place
params["location"] = f"{lat},{lng}"
# Use the search term: "hotel" and our lat/lng
base_url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json"
# make request
hotel_names = requests.get(base_url, params=params).json()
try:
hotel_df.loc[index,"Hotel Name"]=hotel_names["results"][0]["name"]
except IndexError:
print("Missing field/result... skipping.")
# Save Data to csv
hotel_df.to_csv("output_data/hotel.csv")
# Visualize to confirm airport data appears
hotel_df
hotel_df.to_csv("output_data/hotel.csv")
#pprint the hotel names
print(json.dumps(hotel_names, indent=4, sort_keys=True))
# NOTE: Do not change any of the code in this cell
# Using the template add the hotel marks to the heatmap
info_box_template = """
<dl>
<dt>Name</dt><dd>{Hotel Name}</dd>
<dt>City</dt><dd>{City}</dd>
<dt>Country</dt><dd>{Country}</dd>
</dl>
"""
# Store the DataFrame Row
# NOTE: be sure to update with your DataFrame name
hotel_info = [info_box_template.format(**row) for index, row in hotel_df.iterrows()]
locations = hotel_df[["Lat", "Lng"]]
# Add marker layer ontop of heat map
markers = gmaps.marker_layer(locations, info_box_content = hotel_info)
# Add the layer to the map
fig.add_layer(markers)
# Display Map
fig
###Output
_____no_output_____
###Markdown
VacationPy---- Note* Keep an eye on your API usage. Use https://developers.google.com/maps/reporting/gmp-reporting as reference for how to monitor your usage and billing.* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
###Code
!jupyter nbextension enable --py --sys-prefix widgetsnbextension
!pip install gmaps
!jupyter nbextension enable --py --sys-prefix gmaps
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import gmaps
import os
# Import API key
from api_keys import g_key
###Output
_____no_output_____
###Markdown
Store Part I results into DataFrame* Load the csv exported in Part I to a DataFrame
###Code
###Output
_____no_output_____
###Markdown
Humidity Heatmap* Configure gmaps.* Use the Lat and Lng as locations and Humidity as the weight.* Add Heatmap layer to map. Create new DataFrame fitting weather criteria* Narrow down the cities to fit weather conditions.* Drop any rows will null values. Hotel Map* Store into variable named `hotel_df`.* Add a "Hotel Name" column to the DataFrame.* Set parameters to search for hotels with 5000 meters.* Hit the Google Places API for each city's coordinates.* Store the first Hotel result into the DataFrame.* Plot markers on top of the heatmap.
###Code
# NOTE: Do not change any of the code in this cell
# Using the template add the hotel marks to the heatmap
info_box_template = """
<dl>
<dt>Name</dt><dd>{Hotel Name}</dd>
<dt>City</dt><dd>{City}</dd>
<dt>Country</dt><dd>{Country}</dd>
</dl>
"""
# Store the DataFrame Row
# NOTE: be sure to update with your DataFrame name
hotel_info = [info_box_template.format(**row) for index, row in hotel_df.iterrows()]
locations = hotel_df[["Lat", "Lng"]]
# Add marker layer ontop of heat map
# Display figure
###Output
_____no_output_____
###Markdown
VacationPy---- Note* Keep an eye on your API usage. Use https://developers.google.com/maps/reporting/gmp-reporting as reference for how to monitor your usage and billing.* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
###Code
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import gmaps
import os
import json
import time
# Import API key
from api_keys import g_key
###Output
_____no_output_____
###Markdown
Store Part I results into DataFrame* Load the csv exported in Part I to a DataFrame
###Code
path = os.path.join("..", "output_data", "cities.csv")
weather_df = pd.read_csv(path)
weather_df.head()
###Output
_____no_output_____
###Markdown
Humidity Heatmap* Configure gmaps.* Use the Lat and Lng as locations and Humidity as the weight.* Add Heatmap layer to map.
###Code
# Configure gmaps
gmaps.configure(api_key=g_key)
# Store latitude and longitude in locations
locations = weather_df[["Lat", "Lng"]]
# Store Humidity in humidity
humidity = weather_df["Humidity"]
# Plot Heatmap
fig = gmaps.figure(center=(46.0, -5.0), zoom_level=2)
max_intensity = np.max(humidity)
# Create heat layer
heatmap_layer = gmaps.heatmap_layer(locations, weights = humidity, dissipating=False, max_intensity=100, point_radius=3)
# Add layer
fig.add_layer(heatmap_layer)
# Display figure
fig
max_intensity
###Output
_____no_output_____
###Markdown
Create new DataFrame fitting weather criteria* Narrow down the cities to fit weather conditions.* Drop any rows will null values.
###Code
# Narrow down the cities with wind speed less than 10 mph, cloudiness equals to 0 and max temp between 60 and 80
new_weather_df = weather_df.loc[(weather_df["Wind Speed"] <= 10) & (weather_df["Cloudiness"] == 0) & \
(weather_df["Max Temp"] >= 70) & (weather_df["Max Temp"] <= 80)].dropna()
new_weather_df
###Output
_____no_output_____
###Markdown
Hotel Map* Store into variable named `hotel_df`.* Add a "Hotel Name" column to the DataFrame.* Set parameters to search for hotels with 5000 meters.* Hit the Google Places API for each city's coordinates.* Store the first Hotel result into the DataFrame.* Plot markers on top of the heatmap.
###Code
# Create a hotel_df
hotel_df = new_weather_df.loc[:,["City","Country", "Lat", "Lng"]]
# Add a "Hotel Name" column to the DataFrame.
hotel_df["Hotel Name"] = ""
# Display the result
hotel_df
base_url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json"
params = {"type" : "hotel",
"keyword" : "hotel",
"radius" : 5000,
"key" : g_key}
for index, row in hotel_df.iterrows():
# get city name, lat, lnt from df
lat = row["Lat"]
lng = row["Lng"]
city_name = row["City"]
# add keyword to params dict
params["location"] = f"{lat},{lng}"
# assemble url and make API request
print(f"Retrieving Results for Index {index}: {city_name}.")
response = requests.get(base_url, params=params).json()
# extract results
results = response['results']
# save the hotel name to dataframe
try:
print(f"Closest hotel in {city_name} is {results[0]['name']}.")
hotel_df.loc[index, "Hotel Name"] = results[0]['name']
# if there is no hotel available, show missing field
except (KeyError, IndexError):
print("Missing field/result... skipping.")
print("------------")
# Wait 1 sec to make another api request to avoid SSL Error
time.sleep(1)
# Print done when search is completed
print("Done")
# Display the hotel dataframe
hotel_df
# NOTE: Do not change any of the code in this cell
# Using the template add the hotel marks to the heatmap
info_box_template = """
<dl>
<dt>Name</dt><dd>{Hotel Name}</dd>
<dt>City</dt><dd>{City}</dd>
<dt>Country</dt><dd>{Country}</dd>
</dl>
"""
# Store the DataFrame Row
# NOTE: be sure to update with your DataFrame name
hotel_info = [info_box_template.format(**row) for index, row in hotel_df.iterrows()]
locations = hotel_df[["Lat", "Lng"]]
# Add marker layer ontop of heat map
# Add marker layer and info box content ontop of heat map
markers = gmaps.marker_layer(locations, info_box_content = hotel_info)
# Add the layer to the map
fig.add_layer(markers)
# Display figure
fig
###Output
_____no_output_____
###Markdown
VacationPy
###Code
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import gmaps
import os
# Otras
%matplotlib inline
# Import API key
from llavero import gkey
###Output
_____no_output_____
###Markdown
Create a heat map that displays the humidity for every city from Part I.
###Code
# Import Data from las excercise
old_data = "../WeatherPy/weather_df.csv"
cities = pd.read_csv(old_data)
city_data = cities[cities["Cloudiness"] > 0] # Eliminate rows without data
city_data
# Create a Humidity Heatmap Layer
loc_hm = city_data[['Lat','Lon']].astype(float)
loc_hm.head()
# Create a Humidity Heatmap Layer
hum_hm = city_data["Humidity"].astype(float)
hum_df = pd.DataFrame(hum_hm)
hum_df.head()
# create a humidity Heatmap layer
fig = gmaps.figure()
heat_layer = gmaps.heatmap_layer(loc_hm, weights=hum_hm,
dissipating=False, max_intensity=100,
point_radius = 1)
fig.add_layer(heat_layer)
fig
# Select criteria to map
# A max temperature lower than 25 degrees Celsius but higher than 18 degrees Celsius.
# Wind speed less than 10 km/h.
# Zero cloudiness.
mycities = city_data
mycities = mycities[mycities['Hi Temp'] <= 25]
mycities = mycities[mycities['Hi Temp'] > 18]
mycities = mycities[mycities['Wind Speed'] <= 16]
mycities = mycities[mycities['Cloudiness'] <= 25]
mycities.head()
###Output
_____no_output_____
###Markdown
* Using Google Places API to find the first hotel for each city located within 5000 meters of your coordinates.* Plot the hotels on top of the humidity heatmap with each pin containing the Hotel Name, City, and Country. Hotel Map* Store into variable named hotel_df.* Add a "Hotel Name" column to the DataFrame.* Set parameters to search for hotels with 5000 meters.* Hit the Google Places API for each city's coordinates.* Store the first Hotel result into the DataFrame.* Plot markers on top of the heatmap.
###Code
hotel_df = mycities
hotel_df["Hotel"] = ""
hotel_df
# Look for nearby hotels in the selected regions
base_url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json"
hotel_per_city = hotel_df[['City','Lat','Lon']]
hotel = []
for index, row in hotel_per_city.iterrows():
lats = hotel_per_city['Lat'][index]
lngs = hotel_per_city['Lon'][index]
params = {
"location": f"{lats},{lngs}",
"rankby": "distance",
"keyword": "hotel",
"key": gkey,}
response = requests.get(base_url, params=params).json()
try:
results = response['results']
hotel.append(results[0]['name'])
except:
hotel_df = hotel_df.drop(index)
hotel
# Integrate hotel list to column in dataframe
hotel_df = pd.DataFrame(hotel_df)
hotel_df['Hotel Name'] = hotel
hotel_df
# NOTE: Do not change any of the code in this cell
# Using the template add the hotel marks to the heatmap
info_box_template = """
<dl>
<dt>Name</dt><dd>{Hotel Name}</dd>
<dt>City</dt><dd>{City}</dd>
<dt>Country</dt><dd>{Country}</dd>
</dl>
"""
# Store the DataFrame Row
# NOTE: be sure to update with your DataFrame name
hotel_info = [info_box_template.format(**row) for index, row in hotel_df.iterrows()]
locations = hotel_df[['Lat', 'Lon']]
# Plot Heatmap
fig = gmaps.figure()
# Create heat layer
heat_layer = gmaps.heatmap_layer(locations,
dissipating=False, max_intensity=10,
point_radius=1)
# Add layer
fig.add_layer(heat_layer)
# Display figure
fig
###Output
_____no_output_____
###Markdown
VacationPy---- Note* Keep an eye on your API usage. Use https://developers.google.com/maps/reporting/gmp-reporting as reference for how to monitor your usage and billing.* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
###Code
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import gmaps
import os
# Import API key
from config import g_key
###Output
_____no_output_____
###Markdown
Store Part I results into DataFrame* Load the csv exported in Part I to a DataFrame
###Code
# Read cities file and store into Pandas data frame
file_to_load = "../output_data/cities.csv"
cities_df = pd.read_csv(file_to_load)
cities_df.head()
###Output
_____no_output_____
###Markdown
Humidity Heatmap* Configure gmaps.* Use the Lat and Lng as locations and Humidity as the weight.* Add Heatmap layer to map.
###Code
#Configure gmaps
gmaps.configure(api_key=g_key)
#Use the lat and Lng as locations and humidity as the weight
geolocations = cities_df[["Lat", "Lng"]].astype(float)
humidity = cities_df["Humidity"].astype(float)
#Add Heatmap layer to map
fig = gmaps.figure(center=(20,0), zoom_level=1.5)
heat_layer = gmaps.heatmap_layer(geolocations, weights=humidity,
dissipating=False, max_intensity=500,
point_radius = 4)
fig.add_layer(heat_layer)
fig
###Output
_____no_output_____
###Markdown
Create new DataFrame fitting weather criteria* Narrow down the cities to fit weather conditions.* Drop any rows will null values.
###Code
# Narrow down the DataFrame to find your ideal weather condition.
# A max temperature lower than 80 degrees but higher than 70.
# Wind speed less than 10 mph.
# Zero cloudiness.
# Drop any rows that don't contain all three conditions. You want to be sure the weather is ideal.
narrow_df = cities_df.loc[(cities_df["Max Temp"] > 70) & (cities_df["Max Temp"] < 80)
& (cities_df["Wind Speed"] < 10)
& (cities_df["Cloudiness"] == 0 )]
narrow_df.dropna()
narrow_df
#Total 8 rows returned, reasonable count for api hits
###Output
_____no_output_____
###Markdown
Hotel Map* Store into variable named `hotel_df`.* Add a "Hotel Name" column to the DataFrame.* Set parameters to search for hotels with 5000 meters.* Hit the Google Places API for each city's coordinates.* Store the first Hotel result into the DataFrame.* Plot markers on top of the heatmap.
###Code
#Store filtered and narrow data frame from above to a new data frame that will include hotel information
#Note: received a SettingWithCopyWarning when simply using the = operator. Using loc and copy per documentation
hotel_df = narrow_df[0:len(narrow_df)].copy()
hotel_df["Hotel Name"] = ""
hotel_df["Hotel Address"] = ""
#Set parameters to search for hotels within 5000 meters
params = {
"radius" : 5000,
"keyword" : "hotel",
"key" : g_key
}
for index, row in hotel_df.iterrows():
# get lat, lng from df
lat = row["Lat"]
lng = row["Lng"]
# change location each iteration while leaving original params in place
params["location"] = f"{lat},{lng}"
# Use the lat/lng and the rest of the params as set earlier
base_url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json"
hotels = requests.get(base_url, params=params)
hotels = hotels.json()
try:
hotel_df.loc[index, "Hotel Name"] = hotels["results"][0]["name"]
hotel_df.loc[index, "Hotel Address"] = hotels["results"][0]["vicinity"]
print(f"Hotel found")
except (KeyError, IndexError):
print("Missing field/result... skipping.")
hotel_df
# NOTE: Do not change any of the code in this cell
# Using the template add the hotel marks to the heatmap
info_box_template = """
<dl>
<dt>Name</dt><dd>{Hotel Name}</dd>
<dt>City</dt><dd>{City}</dd>
<dt>Country</dt><dd>{Country}</dd>
</dl>
"""
# Store the DataFrame Row
# NOTE: be sure to update with your DataFrame name
hotel_info = [info_box_template.format(**row) for index, row in hotel_df.iterrows()]
locations = hotel_df[["Lat", "Lng"]]
# Add marker layer ontop of heat map
markers = gmaps.marker_layer(locations, info_box_content=hotel_info)
fig.add_layer(markers)
# Display figure
fig
#Please note, screenshot of the final image included within "output_data/map.png"
###Output
_____no_output_____
###Markdown
VacationPy---- Note* Keep an eye on your API usage. Use https://developers.google.com/maps/reporting/gmp-reporting as reference for how to monitor your usage and billing.* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
###Code
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import gmaps
import os
import json
import pprint
# Import API key
from api_keys import g_key
###Output
_____no_output_____
###Markdown
Store Part I results into DataFrame* Load the csv exported in Part I to a DataFrame
###Code
# create a reference to the csv file
cities_csv = "output_data/cities.csv"
# read the csv file into a dataframe
cities_df = pd.read_csv(cities_csv)
cities_df = cities_df.dropna()
del cities_df["Unnamed: 0"]
cities_df.head()
cities_df.count()
###Output
_____no_output_____
###Markdown
Humidity Heatmap* Configure gmaps.* Use the Lat and Lng as locations and Humidity as the weight.* Add Heatmap layer to map.
###Code
# Configure gmaps with API key
gmaps.configure(api_key=g_key)
# Store 'Lat' and 'Lng' as locations
locations = cities_df[["Lat", "Lng"]].astype(float)
humidity = cities_df["Humidity"].astype(float)
# Add heatmap layer to map
fig = gmaps.figure(center=(50.0, -35.0), zoom_level=2)
heat_layer = gmaps.heatmap_layer(locations, weights=humidity, dissipating=False, max_intensity=300, point_radius = 5)
fig.add_layer(heat_layer)
fig
###Output
_____no_output_____
###Markdown
Create new DataFrame fitting weather criteria* Narrow down the cities to fit weather conditions.* Drop any rows will null values.
###Code
# Narrow down the DataFrame to find your ideal weather condition.
# Drop any rows that don't contain all three conditions
# A max temperature lower than 80 degrees but higher than 70.
city_low_temp = cities_df[cities_df["Max Temp"] > 70]
city_max_temp = city_low_temp[city_low_temp["Max Temp"] < 80]
# Zero cloudiness
city_cloudless = city_max_temp[city_max_temp["Cloudiness"] == 0]
# Wind speed less than 10 mph
city_wind = city_cloudless[city_cloudless["Wind Speed"] < 40]
# Store into hotel variable name
hotel_df = city_wind
hotel_df
###Output
_____no_output_____
###Markdown
Hotel Map* Store into variable named `hotel_df`.* Add a "Hotel Name" column to the DataFrame.* Set parameters to search for hotels with 5000 meters.* Hit the Google Places API for each city's coordinates.* Store the first Hotel result into the DataFrame.* Plot markers on top of the heatmap.
###Code
reduced_cities_df = cities_df.loc[(cities_df["Wind Speed"] < 10) & (cities_df["Cloudiness"] == 0) & \
(cities_df["Max Temp"] > 70) & (cities_df["Max Temp"] < 80)].dropna()
reduced_cities_df.count()
hotel_df = city_wind.loc[:,["City", "Country", "Lat", "Lng"]]
hotel_df
import json
import pprint
base_url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json"
params = {"type" : "hotel",
"keyword" : "hotel",
"radius" : 5000,
"key" : g_key}
for index, row in hotel_df.iterrows():
lat = row["Lat"]
lng = row["Lng"]
city_name = row["City"]
# add keyword to params dict
params["location"] = f"{lat},{lng}"
# assemble url and make API request
print(f"Retrieving Results for Index {index}: {city_name}.")
response = requests.get(base_url, params=params).json()
pprint.pprint(response)
# extract results
results = response['results']
print(json.dumps(response, indent=4, sort_keys=True))
# save the hotel name to dataframe
try:
print(f"Closest hotel in {city_name} is {results[0]['name']}.")
hotel_df.loc[index, "Hotel Name"] = results[0]['name']
# if there is no hotel available, show missing field
except (KeyError, IndexError):
print("No hotel")
print("------------")
# Add "Hotel Name" column to DF
hotel_df["Hotel Name"] = ""
hotel_name = []
# Using Google Places API to find the first hotel for each city located within 5000 meters of your coordinates.
base_url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json"
# Set parameters
target_radius = 5000
target_type = "hotel"
params = {
"key": g_key,
"radius": target_radius,
"type": target_type
}
# Loop through the hotel_df and get the Lat/Lng for each city
for index, row in hotel_df.iterrows():
# get coordinates
lat = row["Lat"]
lng = row["Lng"]
city_name = row["City"]
params["location"] = f"{lat},{lng}"
print(f"Retrieving Results for Index {index}: {city_name}.")
response = requests.get(base_url, params=params).json()
# print response
results = response['results']
pprint.pprint(response)
# save the hotel name to dataFrame
hotel_df.loc[index, "Hotel Name"] = results[0]['name']
# Plot the hotels on top of the humidity heatmap with each pin containing the **Hotel Name**, **City**, and **Country**
hotel_df
# NOTE: Do not change any of the code in this cell
# Using the template add the hotel marks to the heatmap
info_box_template = """
<dl>
<dt>Name</dt><dd>{Hotel Name}</dd>
<dt>City</dt><dd>{City}</dd>
<dt>Country</dt><dd>{Country}</dd>
</dl>
"""
# Store the DataFrame Row
# NOTE: be sure to update with your DataFrame name
hotel_info = [info_box_template.format(**row) for index, row in hotel_df.iterrows()]
locations = hotel_df[["Lat", "Lng"]]
# Add marker layer ontop of heat map
marker = gmaps.marker_layer(locations, info_box_content=hotel_info)
fig.add_layer(marker)
# Display figure
fig
###Output
_____no_output_____
###Markdown
VacationPy---- Note* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
###Code
# Dependencies and Setup
%matplotlib inline
from ipywidgets.embed import embed_minimal_html #To export image to html
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import json
import os
# Import API key
from api_keys import g_key
###Output
_____no_output_____
###Markdown
Store Part I results into DataFrame* Load the csv exported in Part I to a DataFrame
###Code
#pip install gmaps
City_df=pd.read_csv("../WeatherPy/city_data.csv")
City_df.head()
###Output
_____no_output_____
###Markdown
Humidity Heatmap* Configure gmaps.* Use the Lat and Lng as locations and Humidity as the weight.* Add Heatmap layer to map.
###Code
import gmaps
## Configure gmaps with API key
gmaps.configure(api_key=g_key)
# Store 'Lat' and 'Lng' into locations
locations=City_df[['lat','lng']]
locations
Humidity_rate=City_df["Humidity"]
# # Create a Humidity Heatmap layer
fig = gmaps.figure(zoom_level=1,center=(10,10))
#fig.add_layer(gmaps.heatmap_layer(locations, weights=weights))
heat_layer = gmaps.heatmap_layer(locations, weights=Humidity_rate,
dissipating=False, max_intensity=100,
point_radius = 3
)
fig.add_layer(heat_layer)
embed_minimal_html('Heatmap.html', views=[fig])
fig
###Output
_____no_output_____
###Markdown
Create new DataFrame fitting weather criteria* Narrow down the cities to fit weather conditions.* Drop any rows will null values.
###Code
#new df with all null values dropped
New_City_df=City_df.dropna().reset_index()
New_City_df.head() #reduced to 567 rows
###Output
_____no_output_____
###Markdown
Hotel Map* Store into variable named `hotel_df`.* Add a "Hotel Name" column to the DataFrame.* Set parameters to search for hotels with 5000 meters.* Hit the Google Places API for each city's coordinates.* Store the first Hotel result into the DataFrame.* Plot markers on top of the heatmap.
###Code
#ideal vacation spots
Hotel_df=New_City_df.loc[((New_City_df["MaxTemp"]>70) &
(New_City_df["MaxTemp"]<75))&
(New_City_df["Wind_Speed"]<5)&
(New_City_df["Cloudiness"]<10)]
Hotel_df.head()
Hotel_df["Hotel Name"]= ""
Hotel_df.head()
base_url= "https://maps.googleapis.com/maps/api/place/nearbysearch/json"
lat=Hotel_df["lat"]
lng=Hotel_df["lng"]
params={
"key":g_key,
"radius":5000,
"types":"lodging"
}
for index,row in Hotel_df.iterrows():
lat=row["lat"]
lng=row["lng"]
params["location"]=f"{lat},{lng}"
response=requests.get(base_url,params=params).json()
# print(json.dumps(response, indent=4, sort_keys=True))
try:
Hotel_df.loc[index,"Hotel Name"]=response["results"][0]["name"]
except (KeyError,IndexError):
print("Missing data.....skipping")
Hotel_df.reset_index()
# NOTE: Do not change any of the code in this cell
# Using the template add the hotel marks to the heatmap
info_box_template = """
<dl>
<dt>Name</dt><dd>{Hotel Name}</dd>
<dt>City</dt><dd>{City Name}</dd>
<dt>Country</dt><dd>{Country}</dd>
</dl>
"""
# Store the DataFrame Row
# NOTE: be sure to update with your DataFrame name
hotel_info = [info_box_template.format(**row) for index, row in Hotel_df.iterrows()]
locations = Hotel_df[["lat", "lng"]]
# Add marker layer ontop of heat map
marker_layer = gmaps.marker_layer(locations, info_box_content=hotel_info)
fig1= gmaps.figure(zoom_level=1,center=(15,15))
fig1.add_layer(marker_layer)
fig1.add_layer(heat_layer)
embed_minimal_html('Hotel_markers.html', views=[fig1])
# Display figure
fig1
###Output
_____no_output_____
###Markdown
VacationPy---- Note* Keep an eye on your API usage. Use https://developers.google.com/maps/reporting/gmp-reporting as reference for how to monitor your usage and billing.* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
###Code
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import gmaps
import os
import json
# Import API key
from api_keys import g_key
###Output
_____no_output_____
###Markdown
Store Part I results into DataFrame* Load the csv exported in Part I to a DataFrame
###Code
# Load csv file
csv_file="../WeatherPy/City_Weather_Data.csv"
#Read csv file
weather_df=pd.read_csv(csv_file)
weather_df.head(10)
###Output
_____no_output_____
###Markdown
Humidity Heatmap* Configure gmaps.* Use the Lat and Lng as locations and Humidity as the weight.* Add Heatmap layer to map.
###Code
# Configure gmaps
gmaps.configure(api_key=g_key)
# Locations
locations = weather_df[["Lat", "Lng"]]
humidity =weather_df["Humidity"].astype(float)
# Plot Heatmap
fig = gmaps.figure()
# Create heat layer
heat_layer = gmaps.heatmap_layer(locations, weights=humidity,
dissipating=False, max_intensity=100,
point_radius=2)
# Add layer
fig.add_layer(heat_layer)
# Display figure
fig
###Output
_____no_output_____
###Markdown
Create new DataFrame fitting weather criteria* Narrow down the cities to fit weather conditions.* Drop any rows will null values.
###Code
# Filter vacation with zero cloudiness
vacation_no_cloud = weather_df[weather_df["Cloudiness"] == 0]
# Filter vacation with max temp above 70 degrees F
vacation_above_70_degrees = vacation_no_cloud[vacation_no_cloud["Max Temp"] > 70]
# Filter vacation with max temp below 80 degrees F
vacation_below_80_degrees = vacation_above_70_degrees[vacation_above_70_degrees["Max Temp"] < 80]
# Filter vacation with wind speed below 10 mph
vacation_slow_wind = vacation_below_80_degrees[vacation_below_80_degrees["Wind Speed"] < 10]
# Filter vacation with humidity below 60 %
perfect_vacation = vacation_slow_wind[vacation_slow_wind["Humidity"] < 60]
# Set Index
indexed_perfect_vacation = perfect_vacation.reset_index()
del indexed_perfect_vacation["index"]
indexed_perfect_vacation
vaca_locations = indexed_perfect_vacation[["Lat", "Lng"]]
vaca_humidity = indexed_perfect_vacation["Humidity"].astype(float)
# Plot Heatmap
vaca_fig = gmaps.figure()
# Create heat layer
vaca_heat_layer = gmaps.heatmap_layer(vaca_locations, weights=vaca_humidity,
dissipating=False, max_intensity=50,
point_radius=2.5)
# Add layer
vaca_fig.add_layer(vaca_heat_layer)
# Display figure
vaca_fig
###Output
_____no_output_____
###Markdown
Hotel Map* Store into variable named `hotel_df`.* Add a "Hotel Name" column to the DataFrame.* Set parameters to search for hotels with 5000 meters.* Hit the Google Places API for each city's coordinates.* Store the first Hotel result into the DataFrame.* Plot markers on top of the heatmap.
###Code
hotels = []
# Loop through narrowed down dataframe to get nearest hotel
for city in range(len(indexed_perfect_vacation["City"])):
lat = indexed_perfect_vacation.loc[city]["Lat"]
lng = indexed_perfect_vacation.loc[city]["Lng"]
city_coords = f"{lat},{lng}"
params = {
"location": city_coords,
"types": "lodging",
"radius": 5000,
"key": g_key
}
base_url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json"
hotel_request = requests.get(base_url, params=params)
hotel_response = hotel_request.json()
try:
hotels.append(hotel_response["results"][0]["name"])
except:
hotels.append("Nearest hotel not found")
# Dataframe with nearest hotel
indexed_perfect_vacation["Nearest Hotel"] = hotels
indexed_perfect_vacation
# Using the template add the hotel marks to the heatmap
info_box_template = """
<dl>
<dt>Name</dt><dd>{Nearest Hotel}</dd>
<dt>City</dt><dd>{City}</dd>
</dl>
"""
# Store the DataFrame Row
# NOTE: be sure to update with your DataFrame name
hotel_info = [info_box_template.format(**row) for index, row in indexed_perfect_vacation.iterrows()]
locations = indexed_perfect_vacation[["Lat", "Lng"]]
# Add marker layer ontop of heat map
markers = gmaps.marker_layer(locations, info_box_content= [f"Nearest Hotel: {hotel}" for hotel in hotels])
vaca_fig.add_layer(markers)
# Display figure
vaca_fig
###Output
_____no_output_____
###Markdown
VacationPy---- Note* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
###Code
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import gmaps
import os
# Import API key
from api_keys import g_key
###Output
_____no_output_____
###Markdown
Store Part I results into DataFrame* Load the csv exported in Part I to a DataFrame
###Code
weather_df = pd.read_csv('../output_data/cities.csv')
weather_df.head(3)
###Output
_____no_output_____
###Markdown
Humidity Heatmap* Configure gmaps.* Use the Lat and Lng as locations and Humidity as the weight.* Add Heatmap layer to map.
###Code
# Configure gmaps with API key
gmaps.configure(api_key=g_key)
###Output
_____no_output_____
###Markdown
Create new DataFrame fitting weather criteria* Narrow down the cities to fit weather conditions.* Drop any rows will null values.
###Code
# Create data with nice weather conditions
nice_weather_df = weather_df[(weather_df["Max Temp"] > 70) & (weather_df["Max Temp"] < 80) &
(weather_df["Wind Speed"] < 10) & (weather_df["Cloudiness"] == 0)]
###Output
_____no_output_____
###Markdown
Hotel Map* Store into variable named `hotel_df`.* Add a "Hotel Name" column to the DataFrame.* Set parameters to search for hotels with 5000 meters.* Hit the Google Places API for each city's coordinates.* Store the first Hotel result into the DataFrame.* Plot markers on top of the heatmap.
###Code
# initialize variables
target_lat = []
target_lng = []
humidity = []
hotel_name = []
locate = []
lat1 = []
lng1 = []
h_city = []
h_country = []
# Get latitude and longitude from google maps
for i in range(0, 10):
lat = nice_weather_df.iloc[i,6].astype(str)
lon = nice_weather_df.iloc[i,7].astype(str)
humid = nice_weather_df.iloc[i,5]
#city = nice_weather_df.iloc[i,1]
#country = nice_weather_df.iloc[i,3]
loc = f"{lat}, {lon}"
params = {
"location": loc,
"radius": 5000,
"type": "hotel",
"key": g_key
}
base_url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json"
response = requests.get(base_url, params=params).json()
results = response['results']
lat1 = results[0]["geometry"]['location']["lat"]
lng1 = results[0]["geometry"]['location']["lng"]
#loc1 = f"'{lat1}, {lng1}'"
target_lat.append(results[0]["geometry"]['location']["lat"])
target_lng.append(results[0]["geometry"]['location']["lng"])
#locate = results[0]["geometry"]['location']["lat"] )
humidity.append(nice_weather_df.iloc[i,5])
hotel_name.append(results[0]["name"])
h_city.append(nice_weather_df.iloc[i,1])
h_country.append(nice_weather_df.iloc[i,3])
# Create dataframe for google maps
hotel_df = pd.DataFrame({
"lat": target_lat,
"lng": target_lng,
"humid": humidity,
"hotel": hotel_name,
"city": h_city,
"country": h_country
})
# Create paramters for mp
locations = hotel_df[["lat", "lng"]].astype(float)
location = hotel_df[["lat", "lng"]].astype(float)
humidity = hotel_df["humid"].astype(float)
city_cntry = hotel_df["city"] + ", "+ hotel_df["country"]
hotel = "Hotel: "+ hotel_df["hotel"]
# Create map
fig = gmaps.figure()
# Add heatmap layer
heat_layer = gmaps.heatmap_layer(locations, weights=humidity,
dissipating=False, max_intensity=100,
point_radius = 2)
fig.add_layer(heat_layer)
# Add marker layer
marker_layer = gmaps.marker_layer(locations,hover_text=hotel,info_box_content=city_cntry)
fig.add_layer(marker_layer)
# Display map
fig
###Output
_____no_output_____
###Markdown
VacationPy---- Note* Keep an eye on your API usage. Use https://developers.google.com/maps/reporting/gmp-reporting as reference for how to monitor your usage and billing.* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
###Code
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import gmaps
import os
# Import API key
from api_keys import g_key
###Output
_____no_output_____
###Markdown
Store Part I results into DataFrame* Load the csv exported in Part I to a DataFrame
###Code
# Load the csv file from part 1
file = "../output_data/Clean_City_Data.csv"
clean = pd.read_csv(file)
# clean_df has a 'unnamed' column, so remove it so it looks better
#clean_df.head()
cleancities = clean.drop(columns=["Unnamed: 0"])
cleancities.head()
###Output
_____no_output_____
###Markdown
Humidity Heatmap* Configure gmaps.* Use the Lat and Lng as locations and Humidity as the weight.* Add Heatmap layer to map.
###Code
# Configure gmaps
gmaps.configure(api_key = g_key)
# Initiate variables
locations = cleancities[['Lat', 'Lng']].astype(float)
humidity = cleancities['Humidity'].astype(float)
# Add some specifications to heatmap
heatmap_specs = {
'width': '1000px',
'height': '500px',
'margin': '0 auto 0 auto'
}
# Create map
fig = gmaps.figure(layout=heatmap_specs, zoom_level=2, center=(0,0))
# Add layer details
heat_layer = gmaps.heatmap_layer(locations,
weights=humidity,
dissipating=False,
max_intensity=100,
point_radius=1)
fig.add_layer(heat_layer)
plt.savefig("../Images/humidty_heatmap.png")
fig
###Output
_____no_output_____
###Markdown
Create new DataFrame fitting weather criteria* Narrow down the cities to fit weather conditions.* Drop any rows will null values.
###Code
# Narrow down the DataFrame to find your ideal weather condition.
# Drop any rows that don't contain all three conditions. You want to be sure the weather is ideal.
# Set specifications
ideal_temp = (cleancities['Max Temp']>70) & (cleancities['Max Temp']<86)
ideal_wind = cleancities['Wind Speed']<10
ideal_humid = cleancities['Humidity']<50
# Collect all ideal specs
ideal_vaca = ideal_temp & ideal_wind & ideal_humid
# Create new df using collected specs
ideal_vaca_df = cleancities[ideal_vaca]
ideal_vaca_df = ideal_vaca_df.dropna()
# Limit the number of rows returned by your API requests to a reasonable number.
# I think this is what that means... by making sure only 7 rows are returned..
ideal_vaca_df.head(7)
###Output
_____no_output_____
###Markdown
Hotel Map* Store into variable named `hotel_df`.* Add a "Hotel Name" column to the DataFrame.* Set parameters to search for hotels with 5000 meters.* Hit the Google Places API for each city's coordinates.* Store the first Hotel result into the DataFrame.* Plot markers on top of the heatmap.
###Code
# Store into variable named hotel_df
# I think that means just change the name of the df..
hotel_df = ideal_vaca_df
# Add a "Hotel Name" column to the df
# Use empty quotes for initial value, since we don't have that data yet
hotel_df['Hotel Name'] = ""
hotel_df.head()
# Hit the Google Places API for each city's coordinates
# Set parameters dictionary to search for hotels with 5000 meters
params = {
"radius": 5000,
"types": "hotels",
"keyword": "hotel",
"key": g_key}
# Start a for loop using iterrows
for index, row in hotel_df.iterrows():
# First, get the lat and long coords from our df
lat = row['Lat']
lng = row['Lng']
# Add a location parameter using lat and long that we just iterrated through
params['location'] = f"{lat},{lng}"
base_url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json"
response = requests.get(base_url, params=params).json()
# Store the first Hotel result into the DataFrame
try:
hotel_df.loc[index, "Hotel Name"] = response["results"][0]["name"]
except:
print("Missing data")
hotel_df.head()
# NOTE: Do not change any of the code in this cell
# Using the template add the hotel marks to the heatmap
info_box_template = """
<dl>
<dt>Name</dt><dd>{Hotel Name}</dd>
<dt>City</dt><dd>{City}</dd>
<dt>Country</dt><dd>{Country}</dd>
</dl>
"""
# Store the DataFrame Row
# NOTE: be sure to update with your DataFrame name
hotel_info = [info_box_template.format(**row) for index, row in hotel_df.iterrows()]
locations = hotel_df[["Lat", "Lng"]]
# Add some specifications to heatmap
# This part reminds me of CSS, syntax is basicly identical, which makes sense since we are styling an image
heatmap_specs = {
'width': '1000px',
'height': '500px',
'margin': '0 auto 0 auto'
}
# Add marker layer ontop of heat map
fig = gmaps.figure(layout=heatmap_specs, zoom_level=2, center=(0,0))
hotel_markers = gmaps.marker_layer(locations, info_box_content=hotel_info)
fig.add_layer(heat_layer)
fig.add_layer(hotel_markers)
# Save figure
plt.savefig("../Images/hotel_heatmap.png")
# Display figure
fig
###Output
_____no_output_____
###Markdown
VacationPy---- Note* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
###Code
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import gmaps
import os
# Import API key
from api_keys import g_key
###Output
_____no_output_____
###Markdown
Store Part I results into DataFrame* Load the csv exported in Part I to a DataFrame
###Code
weather_df = pd.read_csv("../WeatherPy/weather.csv")
weather_df.head()
###Output
_____no_output_____
###Markdown
Humidity Heatmap* Configure gmaps.* Use the Lat and Lng as locations and Humidity as the weight.* Add Heatmap layer to map.
###Code
# Access maps with unique API key
gmaps.configure(api_key=g_key)
# Store latitude and longitude in locations
locations = weather_df[["Lat", "Lng"]]
humidity = weather_df["Humidity"]
# Plot Heatmap
fig = gmaps.figure()
# Create heat layer
heat_layer = gmaps.heatmap_layer(locations, weights=humidity,
dissipating=False, max_intensity=10,
point_radius=1)
# Add layer
fig.add_layer(heat_layer)
# Display figure
fig
heat_layer.max_intensity = 150
heat_layer.point_radius = 3
###Output
_____no_output_____
###Markdown
Create new DataFrame fitting weather criteria* Narrow down the cities to fit weather conditions.* Drop any rows will null values.
###Code
ideal_weather = weather_df[weather_df["Max Temp"]<80]
ideal_weather = weather_df[weather_df["Max Temp"]>70]
ideal_weather = weather_df[weather_df["Wind Speed"]<10]
ideal_weather = weather_df[weather_df["Cloudiness"] == 0]
ideal_weather = ideal_weather.dropna()
ideal_weather = ideal_weather.reset_index(drop=True)
ideal_weather
###Output
_____no_output_____
###Markdown
Hotel Map* Store into variable named `hotel_df`.* Add a "Hotel Name" column to the DataFrame.* Set parameters to search for hotels with 5000 meters.* Hit the Google Places API for each city's coordinates.* Store the first Hotel result into the DataFrame.* Plot markers on top of the heatmap.
###Code
locations = ideal_weather[["Lat", "Lng"]]
target_type = "hotel"
radius = 5000
base_url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json"
params = {
"location": str(locations.iloc[33,0]) + ", " +str(locations.iloc[33,1]),
"types": target_type,
"radius": radius,
"key": g_key
}
# Run request
response = requests.get(base_url, params)
ideal_hotel = response.json()
from pprint import pprint
pprint(ideal_hotel)
locations = ideal_weather[["Lat", "Lng"]]
target_type = "hotel"
radius = 5000
hotels = []
# Build URL using the Google Maps API
base_url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json"
for i in range(len(ideal_weather)):
params = {
"location": str(locations.iloc[i,0]) + ", " +str(locations.iloc[i,1]),
"types": target_type,
"radius": radius,
"key": g_key
}
# Run request
response = requests.get(base_url, params)
ideal_hotel = response.json()
try:
hotels.append(ideal_hotel["results"][1]["name"])
except:
print(f"Row {i} search has zero results")
# ideal_weather.drop(index=33,inplace=True)
# ideal_weather.drop(index=78,inplace=True)
hotel_df = ideal_weather
hotel_df["Hotel Name"] = hotels
hotel_df.head()
# NOTE: Do not change any of the code in this cell
# Using the template add the hotel marks to the heatmap
info_box_template = """
<dl>
<dt>Name</dt><dd>{Hotel Name}</dd>
<dt>City</dt><dd>{City}</dd>
<dt>Country</dt><dd>{Country}</dd>
</dl>
"""
# Store the DataFrame Row
# NOTE: be sure to update with your DataFrame name
hotel_info = [info_box_template.format(**row) for index, row in hotel_df.iterrows()]
locations = hotel_df[["Lat", "Lng"]]
# Add marker layer ontop of heat map
markers = gmaps.marker_layer(locations,hover_text=hotel_info)
fig.add_layer(markers)
# Display figure
fig
###Output
_____no_output_____ |
LilySu_Assignment2_LS_DS_112_Loading_Data.ipynb | ###Markdown
Lambda School Data Science - Loading DataData comes in many shapes and sizes - we'll start by loading tabular data, usually in csv format.Data set sources:- https://archive.ics.uci.edu/ml/datasets.html- https://github.com/awesomedata/awesome-public-datasets- https://registry.opendata.aws/ (beyond scope for now, but good to be aware of)Let's start with an example - [data about flags](https://archive.ics.uci.edu/ml/datasets/Flags). Lecture example - flag data
###Code
# Step 1 - find the actual file to download
# From navigating the page, clicking "Data Folder"
flag_data_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data'
# You can "shell out" in a notebook for more powerful tools
# https://jakevdp.github.io/PythonDataScienceHandbook/01.05-ipython-and-shell-commands.html
# Funny extension, but on inspection looks like a csv
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data
# Extensions are just a norm! You have to inspect to be sure what something is
# Step 2 - load the data
# How to deal with a csv? 🐼
import pandas as pd
flag_data = pd.read_csv(flag_data_url)
# Step 3 - verify we've got *something*
flag_data.head()
# Step 4 - Looks a bit odd - verify that it is what we want
flag_data.count()
!curl https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data | wc
# So we have 193 observations with funny names, file has 194 rows
# Looks like the file has no header row, but read_csv assumes it does
help(pd.read_csv)
# Alright, we can pass header=None to fix this
flag_data = pd.read_csv(flag_data_url, header=None)
flag_data.head()
flag_data.count()
flag_data.isna().sum()
###Output
_____no_output_____
###Markdown
Yes, but what does it *mean*?This data is fairly nice - it was "donated" and is already "clean" (no missing values). But there are no variable names - so we have to look at the codebook (also from the site).```1. name: Name of the country concerned2. landmass: 1=N.America, 2=S.America, 3=Europe, 4=Africa, 4=Asia, 6=Oceania3. zone: Geographic quadrant, based on Greenwich and the Equator; 1=NE, 2=SE, 3=SW, 4=NW4. area: in thousands of square km5. population: in round millions6. language: 1=English, 2=Spanish, 3=French, 4=German, 5=Slavic, 6=Other Indo-European, 7=Chinese, 8=Arabic, 9=Japanese/Turkish/Finnish/Magyar, 10=Others7. religion: 0=Catholic, 1=Other Christian, 2=Muslim, 3=Buddhist, 4=Hindu, 5=Ethnic, 6=Marxist, 7=Others8. bars: Number of vertical bars in the flag9. stripes: Number of horizontal stripes in the flag10. colours: Number of different colours in the flag11. red: 0 if red absent, 1 if red present in the flag12. green: same for green13. blue: same for blue14. gold: same for gold (also yellow)15. white: same for white16. black: same for black17. orange: same for orange (also brown)18. mainhue: predominant colour in the flag (tie-breaks decided by taking the topmost hue, if that fails then the most central hue, and if that fails the leftmost hue)19. circles: Number of circles in the flag20. crosses: Number of (upright) crosses21. saltires: Number of diagonal crosses22. quarters: Number of quartered sections23. sunstars: Number of sun or star symbols24. crescent: 1 if a crescent moon symbol present, else 025. triangle: 1 if any triangles present, 0 otherwise26. icon: 1 if an inanimate image present (e.g., a boat), otherwise 027. animate: 1 if an animate image (e.g., an eagle, a tree, a human hand) present, 0 otherwise28. text: 1 if any letters or writing on the flag (e.g., a motto or slogan), 0 otherwise29. topleft: colour in the top-left corner (moving right to decide tie-breaks)30. botright: Colour in the bottom-left corner (moving left to decide tie-breaks)```Exercise - read the help for `read_csv` and figure out how to load the data with the above variable names. One pitfall to note - with `header=None` pandas generated variable names starting from 0, but the above list starts from 1... Your assignment - pick a dataset and do something like the aboveThis is purposely open-ended - you can pick any data set you wish. It is highly advised you pick a dataset from UCI or a similar "clean" source.If you get that done and want to try more challenging or exotic things, go for it! Use documentation as illustrated above, and follow the 20-minute rule (that is - ask for help if you're stuck).If you have loaded a few traditional datasets, see the following section for suggested stretch goals.
###Code
# TODO your work here!
# And note you should write comments, descriptions, and add new
# code and text blocks as needed
import pandas as pd
import io
from google.colab import files
uploaded = files.upload()
df_airlines = pd.read_csv(io.BytesIO(uploaded['airlines.csv']))
df_airlines.head()
from google.colab import files
uploaded = files.upload()
df_airport= pd.read_csv(io.BytesIO(uploaded['airports.csv']))
df_airport.head()
df_airport.copy(deep=True)
df_airlines.head()
df_airlines.copy(deep=True)
df_airlines[:3]
###Output
_____no_output_____ |
howtolens/chapter_1_introduction/tutorial_5_ray_tracing.ipynb | ###Markdown
Tutorial 5: Ray Tracing=======================In the last tutorial, our use of `Plane`'s was a bit clunky. We manually had to input `Grid`'s to trace them, and keeptrack of which `Grid`'s were the image-plane`s and which were the source planes. It was easy to make mistakes!Fotunately, in **PyAutoLens**, you won't actually spend much hands-on time with the `Plane` objects. Instead, you'llprimarily use the `ray-tracing` module, which we'll cover in this example. Lets look at how easy it is to setup thesame lens-plane + source-plane strong lens configuration as the previous tutorial, but with a lot less lines of code!
###Code
%matplotlib inline
from pyprojroot import here
workspace_path = str(here())
%cd $workspace_path
print(f"Working Directory has been set to `{workspace_path}`")
import autolens as al
import autolens.plot as aplt
###Output
_____no_output_____
###Markdown
Let use the same `Grid` we've all grown to know and love by now!
###Code
image_plane_grid = al.Grid.uniform(shape_2d=(100, 100), pixel_scales=0.05, sub_size=2)
###Output
_____no_output_____
###Markdown
For our lens galaxy, we'll use the same SIS `MassProfile` as before.
###Code
sis_mass_profile = al.mp.SphericalIsothermal(centre=(0.0, 0.0), einstein_radius=1.6)
lens_galaxy = al.Galaxy(redshift=0.5, mass=sis_mass_profile)
print(lens_galaxy)
###Output
_____no_output_____
###Markdown
And for our source galaxy, the same `SphericalSersic` `LightProfile`
###Code
sersic_light_profile = al.lp.SphericalSersic(
centre=(0.0, 0.0), intensity=1.0, effective_radius=1.0, sersic_index=1.0
)
source_galaxy = al.Galaxy(redshift=1.0, light=sersic_light_profile)
print(source_galaxy)
###Output
_____no_output_____
###Markdown
Now, lets use the lens and source galaxies to ray-trace our `Grid`, using a `Tracer` from the ray-tracing module. When we pass our galaxies into the `Tracer` below, the following happens:1) The galaxies are ordered in ascending redshift.2) Planes are created at every one of these redshifts, with the galaxies at those redshifts associated with those planes.
###Code
tracer = al.Tracer.from_galaxies(galaxies=[lens_galaxy, source_galaxy])
###Output
_____no_output_____
###Markdown
This `Tracer` is composed of a list of planes, in this case two `Plane`'s (the image and source plane).
###Code
print(tracer.planes)
###Output
_____no_output_____
###Markdown
We can access these using the `image_plane` and `source_plane` attributes.
###Code
print("Image Plane:")
print(tracer.planes[0])
print(tracer.image_plane)
print()
print("Source Plane:")
print(tracer.planes[1])
print(tracer.source_plane)
###Output
_____no_output_____
###Markdown
The most convenient part of the `Tracer` is we can use it to create fully `ray-traced` images, without manually setting up the `Plane`'s to do this. The function below does the following1) Using the lens-total mass distribution, the deflection angle of every image-plane `Grid` coordinate is computed.2) These deflection angles are used to trace every image-plane coordinate to a source-plane coordinate.3) The light of each traced source-plane coordinate is evaluated using the source-plane `Galaxy`'s `LightProfile`.
###Code
traced_image = tracer.image_from_grid(grid=image_plane_grid)
print("traced image pixel 1")
print(traced_image.in_2d[0, 0])
print("traced image pixel 2")
print(traced_image.in_2d[0, 1])
print("traced image pixel 3")
print(traced_image.in_2d[0, 2])
###Output
_____no_output_____
###Markdown
This image appears as the Einstein ring we saw in the previous tutorial.
###Code
aplt.Tracer.image(tracer=tracer, grid=image_plane_grid)
###Output
_____no_output_____
###Markdown
We can also use the `Tracer` to compute the traced `Grid` of every plane, instead of getting the traced image itself:
###Code
traced_grids = tracer.traced_grids_of_planes_from_grid(grid=image_plane_grid)
###Output
_____no_output_____
###Markdown
And the source-plane`s `Grid` has been deflected.
###Code
print("grid source-plane coordinate 1")
print(traced_grids[1].in_2d[0, 0])
print("grid source-plane coordinate 2")
print(traced_grids[1].in_2d[0, 1])
print("grid source-plane coordinate 3")
print(traced_grids[1].in_2d[0, 2])
###Output
_____no_output_____
###Markdown
We can use the plane_plotter to plot these grids, like we did before.
###Code
plotter = aplt.Plotter(labels=aplt.Labels(title="Image-plane Grid"))
aplt.Plane.plane_grid(plane=tracer.image_plane, grid=traced_grids[0], plotter=plotter)
plotter = aplt.Plotter(labels=aplt.Labels(title="Source-plane Grid"))
aplt.Plane.plane_grid(plane=tracer.source_plane, grid=traced_grids[1], plotter=plotter)
aplt.Plane.plane_grid(
plane=tracer.source_plane,
grid=traced_grids[1],
axis_limits=[-0.1, 0.1, -0.1, 0.1],
plotter=plotter,
)
###Output
_____no_output_____
###Markdown
**PyAutoLens** has tools for plotting a `Tracer`. A ray-tracing subplot plots the following:1) The image, computed by tracing the source-`Galaxy`'s light `forwards` through the `Tracer`.2) The source-plane image, showing the source-`Galaxy`'s true appearance (i.e. if it were not lensed).3) The image-plane convergence, computed using the lens `Galaxy`'s total mass distribution.4) The image-plane gravitational potential, computed using the lens `Galaxy`'s total mass distribution.5) The image-plane deflection angles, computed using the lens `Galaxy`'s total mass distribution.
###Code
aplt.Tracer.subplot_tracer(tracer=tracer, grid=image_plane_grid)
###Output
_____no_output_____
###Markdown
Just like for a plane, these quantities attributes can be computed by passing a `Grid` (converted to 2D ndarraysthe same dimensions as our input grid!).
###Code
convergence = tracer.convergence_from_grid(grid=image_plane_grid)
print("Tracer - Convergence - `Grid` coordinate 1:")
print(convergence.in_2d[0, 0])
print("Tracer - Convergence - `Grid` coordinate 2:")
print(convergence.in_2d[0, 1])
print("Tracer - Convergence - `Grid` coordinate 3:")
print(convergence.in_2d[0, 2])
print("Tracer - Convergence - `Grid` coordinate 101:")
print(convergence.in_2d[1, 0])
###Output
_____no_output_____
###Markdown
Of course, these convergences are identical to the image-plane convergences, as it`s only the lens galaxy that contributes to the overall mass of the ray-tracing system.
###Code
image_plane_convergence = tracer.image_plane.convergence_from_grid(
grid=image_plane_grid
)
print("Image-Plane - Convergence - `Grid` coordinate 1:")
print(image_plane_convergence.in_2d[0, 0])
print("Image-Plane - Convergence - `Grid` coordinate 2:")
print(image_plane_convergence.in_2d[0, 1])
print("Image-Plane - Convergence - `Grid` coordinate 3:")
print(image_plane_convergence.in_2d[0, 2])
print("Image-Plane - Convergence - `Grid` coordinate 101:")
print(image_plane_convergence.in_2d[1, 0])
###Output
_____no_output_____
###Markdown
I've left the rest below commented to avoid too many print statements, but if you're feeling adventurous go ahead and uncomment the lines below!
###Code
# print(`Potential:`)
# print(tracer.potential_from_grid(grid=image_plane_grid))
# print(tracer.image_plane.potential_from_grid(grid=image_plane_grid))
# print(`Deflections:`)
# print(tracer.deflections_from_grid(grid=image_plane_grid))
# print(tracer.deflections_from_grid(grid=image_plane_grid))
# print(tracer.image_plane.deflections_from_grid(grid=image_plane_grid))
# print(tracer.image_plane.deflections_from_grid(grid=image_plane_grid))
###Output
_____no_output_____
###Markdown
You can also plot the above attributes on individual figures, using appropriate ray-tracing `Plotter` (I've left most commented out again for convenience)
###Code
aplt.Tracer.convergence(tracer=tracer, grid=image_plane_grid)
# aplt.Tracer.potential(tracer=tracer, grid=image_plane_grid)
# aplt.Tracer.deflections_y(tracer=tracer, grid=image_plane_grid)
# aplt.Tracer.deflections_x(tracer=tracer, grid=image_plane_grid)
# aplt.Tracer.image(tracer=tracer, grid=image_plane_grid)
###Output
_____no_output_____
###Markdown
Tutorial 5: Ray Tracing=======================In the last tutorial, our use of `Plane`'s was a bit clunky. We manually had to input `Grid`'s to trace them, and keeptrack of which `Grid`'s were the image-plane`s and which were the source planes. It was easy to make mistakes!Fortunately, in **PyAutoLens**, you won't actually spend much hands-on time with the `Plane` objects. Instead, you'llprimarily use the `ray-tracing` module, which we'll cover in this example. Lets look at how easy it is to setup thesame lens-plane + source-plane strong lens configuration as the previous tutorial, but with a lot less lines of code!
###Code
%matplotlib inline
from pyprojroot import here
workspace_path = str(here())
%cd $workspace_path
print(f"Working Directory has been set to `{workspace_path}`")
import autolens as al
import autolens.plot as aplt
###Output
_____no_output_____
###Markdown
Let use the same `Grid` we've all grown to know and love by now!
###Code
image_plane_grid = al.Grid.uniform(shape_2d=(100, 100), pixel_scales=0.05, sub_size=2)
###Output
_____no_output_____
###Markdown
For our lens galaxy, we'll use the same SIS `MassProfile` as before.
###Code
sis_mass_profile = al.mp.SphericalIsothermal(centre=(0.0, 0.0), einstein_radius=1.6)
lens_galaxy = al.Galaxy(redshift=0.5, mass=sis_mass_profile)
print(lens_galaxy)
###Output
_____no_output_____
###Markdown
And for our source galaxy, the same `SphericalSersic` `LightProfile`
###Code
sersic_light_profile = al.lp.SphericalSersic(
centre=(0.0, 0.0), intensity=1.0, effective_radius=1.0, sersic_index=1.0
)
source_galaxy = al.Galaxy(redshift=1.0, light=sersic_light_profile)
print(source_galaxy)
###Output
_____no_output_____
###Markdown
Now, lets use the lens and source galaxies to ray-trace our `Grid`, using a `Tracer` from the ray-tracing module. When we pass our galaxies into the `Tracer` below, the following happens:1) The galaxies are ordered in ascending redshift.2) Planes are created at every one of these redshifts, with the galaxies at those redshifts associated with those planes.
###Code
tracer = al.Tracer.from_galaxies(galaxies=[lens_galaxy, source_galaxy])
###Output
_____no_output_____
###Markdown
This `Tracer` is composed of a list of planes, in this case two `Plane`'s (the image and source plane).
###Code
print(tracer.planes)
###Output
_____no_output_____
###Markdown
We can access these using the `image_plane` and `source_plane` attributes.
###Code
print("Image Plane:")
print(tracer.planes[0])
print(tracer.image_plane)
print()
print("Source Plane:")
print(tracer.planes[1])
print(tracer.source_plane)
###Output
_____no_output_____
###Markdown
The most convenient part of the `Tracer` is we can use it to create fully `ray-traced` images, without manually setting up the `Plane`'s to do this. The function below does the following1) Using the lens-total mass distribution, the deflection angle of every image-plane `Grid` coordinate is computed.2) These deflection angles are used to trace every image-plane coordinate to a source-plane coordinate.3) The light of each traced source-plane coordinate is evaluated using the source-plane `Galaxy`'s `LightProfile`.
###Code
traced_image = tracer.image_from_grid(grid=image_plane_grid)
print("traced image pixel 1")
print(traced_image.in_2d[0, 0])
print("traced image pixel 2")
print(traced_image.in_2d[0, 1])
print("traced image pixel 3")
print(traced_image.in_2d[0, 2])
###Output
_____no_output_____
###Markdown
This image appears as the Einstein ring we saw in the previous tutorial.
###Code
tracer_plotter = aplt.TracerPlotter(tracer=tracer, grid=image_plane_grid)
tracer_plotter.figures(image=True)
###Output
_____no_output_____
###Markdown
We can also use the `Tracer` to compute the traced `Grid` of every plane, instead of getting the traced image itself:
###Code
traced_grids = tracer.traced_grids_of_planes_from_grid(grid=image_plane_grid)
###Output
_____no_output_____
###Markdown
And the source-plane`s `Grid` has been deflected.
###Code
print("grid source-plane coordinate 1")
print(traced_grids[1].in_2d[0, 0])
print("grid source-plane coordinate 2")
print(traced_grids[1].in_2d[0, 1])
print("grid source-plane coordinate 3")
print(traced_grids[1].in_2d[0, 2])
###Output
_____no_output_____
###Markdown
We can use the TracerPlotter to plot these planes and grids.
###Code
include_2d = aplt.Include2D(grid=True)
tracer_plotter = aplt.TracerPlotter(
tracer=tracer, grid=image_plane_grid, include_2d=include_2d
)
tracer_plotter.figures_of_planes(plane_image=True, plane_grid=True, plane_index=0)
tracer_plotter.figures_of_planes(plane_image=True, plane_grid=True, plane_index=1)
###Output
_____no_output_____
###Markdown
**PyAutoLens** has tools for plotting a `Tracer`. A ray-tracing subplot plots the following:1) The image, computed by tracing the source-`Galaxy`'s light `forwards` through the `Tracer`.2) The source-plane image, showing the source-`Galaxy`'s true appearance (i.e. if it were not lensed).3) The image-plane convergence, computed using the lens `Galaxy`'s total mass distribution.4) The image-plane gravitational potential, computed using the lens `Galaxy`'s total mass distribution.5) The image-plane deflection angles, computed using the lens `Galaxy`'s total mass distribution.
###Code
tracer_plotter.subplot_tracer()
###Output
_____no_output_____
###Markdown
Just like for a plane, these quantities attributes can be computed by passing a `Grid` (converted to 2D ndarraysthe same dimensions as our input grid!).
###Code
convergence = tracer.convergence_from_grid(grid=image_plane_grid)
print("Tracer - Convergence - `Grid` coordinate 1:")
print(convergence.in_2d[0, 0])
print("Tracer - Convergence - `Grid` coordinate 2:")
print(convergence.in_2d[0, 1])
print("Tracer - Convergence - `Grid` coordinate 3:")
print(convergence.in_2d[0, 2])
print("Tracer - Convergence - `Grid` coordinate 101:")
print(convergence.in_2d[1, 0])
###Output
_____no_output_____
###Markdown
Of course, these convergences are identical to the image-plane convergences, as it`s only the lens galaxy that contributes to the overall mass of the ray-tracing system.
###Code
image_plane_convergence = tracer.image_plane.convergence_from_grid(
grid=image_plane_grid
)
print("Image-Plane - Convergence - `Grid` coordinate 1:")
print(image_plane_convergence.in_2d[0, 0])
print("Image-Plane - Convergence - `Grid` coordinate 2:")
print(image_plane_convergence.in_2d[0, 1])
print("Image-Plane - Convergence - `Grid` coordinate 3:")
print(image_plane_convergence.in_2d[0, 2])
print("Image-Plane - Convergence - `Grid` coordinate 101:")
print(image_plane_convergence.in_2d[1, 0])
###Output
_____no_output_____
###Markdown
I've left the rest below commented to avoid too many print statements, but if you're feeling adventurous go ahead and uncomment the lines below!
###Code
# print(`Potential:`)
# print(tracer.potential_from_grid(grid=image_plane_grid))
# print(tracer.image_plane.potential_from_grid(grid=image_plane_grid))
# print(`Deflections:`)
# print(tracer.deflections_from_grid(grid=image_plane_grid))
# print(tracer.deflections_from_grid(grid=image_plane_grid))
# print(tracer.image_plane.deflections_from_grid(grid=image_plane_grid))
# print(tracer.image_plane.deflections_from_grid(grid=image_plane_grid))
###Output
_____no_output_____
###Markdown
You can also plot the above attributes on individual figures, using appropriate ray-tracing `Plotter` (I've left most commented out again for convenience)
###Code
tracer_plotter = aplt.TracerPlotter(tracer=tracer, grid=image_plane_grid)
tracer_plotter.figures(
image=True,
convergence=True,
potential=False,
deflections_y=False,
deflections_x=False,
)
###Output
_____no_output_____
###Markdown
In the previous tutorial, we plotted the critical curves on the convergence map of a `MassProfile`. We now introducethe 'caustic' which is a critical curve mapped to the source-plane. This is computed by calculating the the deflection angles of the `Tracer` at the critical curves and ray-tracing them to the source plane.As discussed in the previous tutorial, critical curves mark regions of infinite magnification. Thus, if a sourceappears near a caustic in the source plane it will appear significantly brighter than its true luminosity. We can plot both the critical curve and caustic using an `Include2D` object. Note how the critical curve appearsonly for the image-plane grid, whereas the caustic only appears in the source plane.NOTE: Again, numerical issues make the caustic appear 'jagged' when it should be smooth.
###Code
include_2d = aplt.Include2D(critical_curves=True, caustics=True)
tracer_plotter = aplt.TracerPlotter(tracer=tracer, grid=image_plane_grid)
tracer_plotter.figures_of_planes(plane_grid=True, plane_index=0)
tracer_plotter.figures_of_planes(plane_grid=True, plane_index=1)
###Output
_____no_output_____
###Markdown
We can also plot the caustic on the source-plane image.
###Code
tracer_plotter.figures_of_planes(plane_image=True, plane_index=1)
###Output
_____no_output_____
###Markdown
Caustics also mark the regions in the source-plane where the multiplicity of the strong lens changes. That if,if a source crosses a caustic, it goes from 2 images to 1 image. Try and show this yourself by changing the (y,x) centre of the source-plane galaxy's light profile!
###Code
sersic_light_profile = al.lp.SphericalSersic(
centre=(0.0, 0.0), intensity=1.0, effective_radius=1.0, sersic_index=1.0
)
source_galaxy = al.Galaxy(redshift=1.0, light=sersic_light_profile)
tracer = al.Tracer.from_galaxies(galaxies=[lens_galaxy, source_galaxy])
tracer_plotter = aplt.TracerPlotter(tracer=tracer, grid=image_plane_grid)
tracer_plotter.figures_of_planes(plane_image=True, plane_index=1)
###Output
_____no_output_____ |
notebooks/version_02_1/1_intro_to_automl.ipynb | ###Markdown
AutoML solution vs single model FEDOT version = 0.2.1Below is an example of running an Auto ML solution for a classification problem. Description of the task and dataset
###Code
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
# Input data from csv files
train_data_path = '../data/scoring_train.csv'
test_data_path = '../data/scoring_test.csv'
df = pd.read_csv(train_data_path)
df.head(5)
###Output
_____no_output_____
###Markdown
Baseline modelLet's use the api features to solve the classification problem. First, we create a chain from a single model "xgboost". To do this, we will substitute the appropriate name in the predefined_model field.
###Code
from fedot.api.main import Fedot
#task selection, initialisation of the framework
baseline_model = Fedot(problem='classification')
#fit model without optimisation - single XGBoost node is used
baseline_model.fit(features=train_data_path, target='target', predefined_model='xgboost')
#evaluate the prediction with test data
baseline_model.predict_proba(features=test_data_path)
#evaluate quality metric for the test sample
baseline_metrics = baseline_model.get_metrics()
print(baseline_metrics)
###Output
{'roc_auc': 0.827, 'f1': 0.32508833922261476}
###Markdown
FEDOT AutoML for classificationWe can identify the model using an evolutionary algorithm built into the core of the FEDOT framework.
###Code
# new instance to be used as AutoML tool
auto_model = Fedot(problem='classification', seed = 42, verbose_level=4)
#run of the AutoML-based model generation
pipeline = auto_model.fit(features=train_data_path, target='target')
prediction = auto_model.predict_proba(features=test_data_path)
auto_metrics = auto_model.get_metrics()
print(auto_metrics)
#comparison with the manual pipeline
print('Baseline', round(baseline_metrics['roc_auc'], 3))
print('AutoML solution', round(auto_metrics['roc_auc'], 3))
###Output
Baseline 0.827
AutoML solution 0.849
###Markdown
AutoML solution vs single model FEDOT version = 0.2.1
###Code
pip install fedot==0.2.1
###Output
_____no_output_____
###Markdown
Below is an example of running an Auto ML solution for a classification problem. Description of the task and dataset
###Code
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
# Input data from csv files
train_data_path = '../data/scoring_train.csv'
test_data_path = '../data/scoring_test.csv'
df = pd.read_csv(train_data_path)
df.head(5)
###Output
_____no_output_____
###Markdown
Baseline modelLet's use the api features to solve the classification problem. First, we create a chain from a single model "xgboost". To do this, we will substitute the appropriate name in the predefined_model field.
###Code
from fedot.api.main import Fedot
#task selection, initialisation of the framework
baseline_model = Fedot(problem='classification')
#fit model without optimisation - single XGBoost node is used
baseline_model.fit(features=train_data_path, target='target', predefined_model='xgboost')
#evaluate the prediction with test data
baseline_model.predict_proba(features=test_data_path)
#evaluate quality metric for the test sample
baseline_metrics = baseline_model.get_metrics()
print(baseline_metrics)
###Output
{'roc_auc': 0.827, 'f1': 0.32508833922261476}
###Markdown
FEDOT AutoML for classificationWe can identify the model using an evolutionary algorithm built into the core of the FEDOT framework.
###Code
# new instance to be used as AutoML tool
auto_model = Fedot(problem='classification', seed = 42, verbose_level=4)
#run of the AutoML-based model generation
pipeline = auto_model.fit(features=train_data_path, target='target')
prediction = auto_model.predict_proba(features=test_data_path)
auto_metrics = auto_model.get_metrics()
print(auto_metrics)
#comparison with the manual pipeline
print('Baseline', round(baseline_metrics['roc_auc'], 3))
print('AutoML solution', round(auto_metrics['roc_auc'], 3))
###Output
Baseline 0.827
AutoML solution 0.849
|
intro-to-python/intro-to-python-workbook.ipynb | ###Markdown
Intro to Python Variables in PythonVariables in programming languages hold values.* In Python a single `=` (equals sign) assigns the value on the right to the name of the variable on the left.* Variables are created when a value is assigned to it.* In the code block below, Python assigns the numerical value `42` to a variable called `age` Creating variables
###Code
helpful_articles = 42
library = "Cushing/Whitney Medical Library"
###Output
_____no_output_____
###Markdown
Rules for creating variables* Variable names can only contain letters, digits, and underscores `_`* Variable names cannot start with a digit* Variable names are case sensitive (`library`, `Library`, and `LIBRARY`) are three different variables Use `print` to display values* `print` is a Python function used to read-out, display, or "print out" the value stored within a variable name.* Function in python look like this, `print()`, where the function name is followed by a set of parentheses. * You will provide values to the function within the parentheses. These values can also be called "arguments".* In the case of print, the values that are fed into the parentheses are the items you want to display, or print.* You can print the values of variables, strings, or digits.* You can also print multiple values in one print statement
###Code
print(helpful_articles)
print(100 + 2 / 3 * 3)
print("String is another word for text")
print(String is another word for text)
print("The", library, "is my favorite library")
###Output
The Cushing/Whitney Medical Library is my favorite library
###Markdown
Using variables within calculations
###Code
helpful_articles = helpful_articles + 3
print(helpful_articles)
###Output
48
###Markdown
Use an index to get a single character from a string* The characters within a string are ordered. For example, the string `AB` is not the same as the string `BA`. Updating Variables* Python operates from top to bottom.* The value held in second will not be updated to reflect that `first = 2`, because we are not reassigning the variable
###Code
first = 1
second = 5 * first
first = 2
print('first is:', first, 'and second is:', second)
###Output
first is: 2 and second is: 5
###Markdown
Break here for 10 minutes to complete exercises. Data Types in Python* Every value in a program has a specific type.* Integer(`int`): represents positive or negative whole numbers like 3 or -15.* Floating point (`float`): (i.e. decimal point): represents real numbers like 3.14159 or -2.5.* Character string (`str`): text. * Written in either single quotations marks or double quotation marks (`""` or `''`) * The quotation marks are not printed when the string is displayed Use the `type` function to find the type of a value* `type` workes on values and the values of variables
###Code
print(type(52))
print(type("some words"))
message = "penny for your thoughts"
print(message)
print(type(message))
print(type("100"))
###Output
<class 'str'>
###Markdown
Data type conversions
###Code
excel_cell = "299182"
print(type(excel_cell))
excel_cell = int(excel_cell)
print(type(excel_cell))
number = 100
print(str(number) + "ish")
###Output
100ish
###Markdown
Strings have length and an index* The built-in function `len` counts the number of characters in a string* The characters (individual letters, numbers, spaces, etc.) within a string are ordered. For example the strings "AB" and "BA" are not the same.* Each position in the string is given a number - the first character is `1`, the second character is `2`, and so on. This number representing the position of a character is called an index.
###Code
print(library) # let's refer to a string variable we created previously
print(len(library))
###Output
Cushing/Whitney Medical Library
31
###Markdown
Of the 31 characters that make up our string, `C` should be the first one. We can check this. In C based programming languages like Python, index counting starts from 0.
###Code
print(library[0])
###Output
C
###Markdown
Use slicing to parse out a substring* A part of a string is called a substring. A substring can be as short as a single character.* An item in a list is called an element. Whenever we treat a string as a list, the string's elements are its individual characters. * We take a slice of a string or list using `[start:stop]`, where `start` is replaced with the index of the first element we want and `stop` is replaced by the index of the element just after the last element we want.* Slicing does not change the contents of the original string. The slice is a copy of part of the original string.
###Code
print(library)
print(library[16:23])
print(library[24:31])
###Output
Cushing/Whitney Medical Library
Medical
Library
###Markdown
Break here for 10 minutes to complete exercises. Functions and Finding Help in Python* Different functions may take 0 or 1, or many arguments.* Functions are likely to be specific about the data type they need.* Functions may have default values for some arguments.* You can learn about functions using a `help` function.* Python has built-in functions, as well as many other functions that are associated with external packages. * You can create your own functions in Python. Functions might be able to take multiple items
###Code
print(max(2039, 39228, 3948, 10029))
print(min(2039, 39228, 3948, 10029))
###Output
39228
2039
###Markdown
Functions might have default settings
###Code
print(round(3.712))
print(round(3.712, 1))
###Output
4
3.7
###Markdown
Finding more information about a function
###Code
help(round)
###Output
Help on built-in function round in module builtins:
round(number, ndigits=None)
Round a number to a given precision in decimal digits.
The return value is an integer if ndigits is omitted or None. Otherwise
the return value has the same type as the number. ndigits may be negative.
###Markdown
Access more functions by importing external libraries into a projectWe will discuss libraries further in a later section, but this is how you add or import a library (i.e., package) into a python project. * Libraries are installed once per your machine, but they need to be imported into each project you would like to use them in. * In order to use functions from a specific package, you need to indicate which package the function is coming from using the syntax: library_name.function_name() * E.g., statistics.mode()
###Code
import statistics
n = [1, 1, 2, 3, 3, 3, 3]
s = statistics.mode(n)
print(s)
###Output
3
###Markdown
Lists* Doing calculations with a hundred variables called `patient_001`, `patient_002`, `patient_003`, etc., would be very slow and tedious. However, if all of these patients were in a list, you can perform calculations across each item in the list in an automated way.* Lists store multiple values.* Items in a list are stored between hard brackets `[]`.* Values in lists are separated by commas `,`. Creating and returning list contents
###Code
weights = [157, 180, 166, 150, 183, 160]
print("Weights in list:", weights)
print("Length of weights list:", len(weights))
###Output
Weights in list: [157, 180, 166, 150, 183, 160]
Length of weights list: 6
###Markdown
Use an index to return a specific element from a list
###Code
print('First item in list:', weights[0])
###Output
First item in list: 157
###Markdown
Use an index to replace an item in a list
###Code
weights[0] = 156
print('Weights list is now:', weights)
###Output
Weights list is now: [156, 180, 166, 150, 183, 160]
###Markdown
Adding (i.e. appending) items to a list* `append` is a "method" of list. Methods are like functions, but tied to a particular object.* Use `object_name.method_name` to call methods.* You can find the methods that object have associated with them by running the `help` function on the object name (eg, `help(list)`)
###Code
names = ["Elo", "Molly", "Charlie", "Riley"]
print("Original list:", names)
names.append("Ben")
names.append("Charolette")
print("List after append", names)
###Output
Original list: ['Elo', 'Molly', 'Charlie', 'Riley']
List after append ['Elo', 'Molly', 'Charlie', 'Riley', 'Ben', 'Charolette']
###Markdown
Combining lists together* You can combine lists together with another list method called `extend`
###Code
names_1 = ["Elo", "Molly", "Charlie", "Riley"]
names_2 = ["Ben", "Charolette"]
names_1.extend(names_2)
print(names_1)
###Output
['Elo', 'Molly', 'Charlie', 'Riley', 'Ben', 'Charolette']
###Markdown
Create an empty list and append items to it
###Code
empty_list = []
print(empty_list)
empty_list.append("this is a single string")
print(empty_list)
empty_list.append(["this", "is", "a", "few", "strings"])
print(empty_list)
###Output
[]
['this is a single string']
['this is a single string', ['this', 'is', 'a', 'few', 'strings']]
###Markdown
Break here for 10 minutes to complete the exercises in Part 3: Functions and Lists. For Loops* For loops allow you to drill down into a data structure. * Operate on sentence in a paragraph, each word in a sentence, or each character in a word. * Operate on each table in a database, each column in a spreadsheet, or each cell in a column. * Operate on each item in a list.* A for loop executes commands once for each element in a set.
###Code
for number in [2, 3, 5]:
print(number) #indentations in python are important!
###Output
2
3
5
###Markdown
You can also return items in a list that has been stored as a variable
###Code
for name in names_1:
print('First name:', name)
###Output
First name: Elo
First name: Molly
First name: Charlie
First name: Riley
First name: Ben
First name: Charolette
###Markdown
The body of a loop can contain many statements
###Code
prime_numbers = [2, 3, 5]
for p in prime_numbers:
first_equation = p + 100
second_equation = p * -1
print(p, first_equation, second_equation)
###Output
2 102 -2
3 103 -3
5 105 -5
###Markdown
Break here for 10 minutes to complete the exercises in Part 4: For Loops. Python Libraries * A "library" in python is a collection of files (called modules) that contain functions for use by other programs.* A Python program must import a library in order to use it. You will use `import` to do this.* Refer to items from specific libraries as library_name.item_name. * Python uses `.` to mean "part of"* Use the `help` function to learn about the contents of a library module. Pandas (a Python Library) and Data Frames* Pandas is a widely-used Python library for statistics, particularly on tabular data.* Pandas borrows many features from R’s dataframes. * A 2-dimensional table whose columns have names and potentially have different data types.* Load it with import pandas as pd. The alias pd is commonly used for Pandas. Shortning pandas to pd this saves typing time. Import the Pandas library to this project
###Code
import pandas as pd
###Output
_____no_output_____
###Markdown
Read a Comma Separate Values (CSV) data file with pd.read_csv.* This imports the csv into your project and saves it as a variable called data.* As you work with `data`, you are not altering the original CSV file.
###Code
data = pd.read_csv("data/newly_hiv_infected_number_all_ages.csv", index_col = "country")
print(data)
data.info()
###Output
<class 'pandas.core.frame.DataFrame'>
Index: 143 entries, Afghanistan to Zimbabwe
Data columns (total 22 columns):
1990 68 non-null float64
1991 68 non-null float64
1992 68 non-null float64
1993 68 non-null float64
1994 68 non-null float64
1995 68 non-null float64
1996 68 non-null float64
1997 68 non-null float64
1998 68 non-null float64
1999 68 non-null float64
2000 68 non-null float64
2001 68 non-null float64
2002 68 non-null float64
2003 68 non-null float64
2004 68 non-null float64
2005 68 non-null float64
2006 68 non-null float64
2007 68 non-null float64
2008 68 non-null float64
2009 68 non-null float64
2010 49 non-null float64
2011 132 non-null float64
dtypes: float64(22)
memory usage: 25.7+ KB
###Markdown
Use `DataFrame.loc[... , ...]` to select values by their index labels.* The position before the comma indicates the row, and the position after the comma indicates the column returned. * Fill in both spaces before and after the comma to return a single cell (sample of 1 country during one year). * Indicate only the first position (with a `:` in the second position) to return an entire row (country) from the data frame.* Indicate only the second position (with a `:` in the first position) to return an entire column (year) from the data frame.
###Code
print(data.loc["France","1995"])
print(data.loc["France",:])
print(data.loc[:,"1995"])
###Output
country
Afghanistan NaN
Angola 13000.0
Argentina 6700.0
Armenia 120.0
Australia NaN
Austria NaN
Azerbaijan NaN
Bahamas 750.0
Bangladesh 160.0
Barbados 160.0
Belarus 60.0
Belgium NaN
Belize 350.0
Benin 7500.0
Bhutan NaN
Bolivia NaN
Botswana 36000.0
Brazil NaN
Bulgaria NaN
Burkina Faso 14000.0
Burundi 27000.0
Cambodia 11000.0
Cameroon 51000.0
Canada NaN
Central African Republic 25000.0
Chile NaN
Colombia NaN
Congo, Rep. 6000.0
Costa Rica NaN
Cote d'Ivoire 89000.0
...
Slovak Republic NaN
Slovenia NaN
Somalia NaN
South Africa 430000.0
South Korea NaN
South Sudan NaN
Spain NaN
Sri Lanka 160.0
Sudan NaN
Suriname 1100.0
Swaziland 13000.0
Sweden NaN
Switzerland NaN
Tajikistan 350.0
Tanzania 170000.0
Thailand 49000.0
Togo 13000.0
Trinidad and Tobago 1300.0
Tunisia NaN
Turkey NaN
Uganda 92000.0
Ukraine NaN
United Kingdom NaN
United States 51000.0
Uruguay NaN
Venezuela NaN
Vietnam NaN
Yemen NaN
Zambia 84000.0
Zimbabwe 250000.0
Name: 1995, Length: 143, dtype: float64
###Markdown
Use `DataFrame.loc[... , ...]` to select multiple columns or rows
###Code
print(data.loc["Angola":"Cameroon", "2005":"2011"])
###Output
2005 2006 2007 2008 2009 2010 2011
country
Angola 23000.0 24000.0 24000.0 24000.0 24000.0 24000.0 23000.0
Argentina 7700.0 7700.0 7600.0 7500.0 7500.0 NaN 5600.0
Armenia 120.0 120.0 120.0 300.0 300.0 NaN 350.0
Australia NaN NaN NaN NaN NaN NaN 1100.0
Austria NaN NaN NaN NaN NaN NaN 1200.0
Azerbaijan NaN NaN NaN NaN NaN NaN 750.0
Bahamas 750.0 750.0 350.0 350.0 350.0 350.0 350.0
Bangladesh 750.0 750.0 750.0 750.0 750.0 1100.0 1300.0
Barbados 60.0 60.0 60.0 60.0 60.0 60.0 60.0
Belarus 2400.0 2100.0 1900.0 1800.0 1800.0 1800.0 1900.0
Belgium NaN NaN NaN NaN NaN NaN 1300.0
Belize 350.0 350.0 350.0 350.0 350.0 350.0 350.0
Benin 5100.0 4800.0 4800.0 4900.0 4900.0 4900.0 4900.0
Bhutan NaN NaN NaN NaN NaN NaN 350.0
Bolivia NaN NaN NaN NaN NaN NaN 1300.0
Botswana 17000.0 16000.0 16000.0 15000.0 14000.0 NaN NaN
Brazil NaN NaN NaN NaN NaN NaN 18000.0
Bulgaria NaN NaN NaN NaN NaN NaN 350.0
Burkina Faso 7300.0 6900.0 6900.0 7000.0 8000.0 NaN NaN
Burundi 6900.0 6000.0 5100.0 4400.0 3700.0 3200.0 3000.0
Cambodia 3200.0 2500.0 2100.0 1900.0 1400.0 1300.0 1100.0
Cameroon 54000.0 52000.0 50000.0 47000.0 46000.0 43000.0 43000.0
###Markdown
Use `DataFrame.loc[... , ...]` to call a customized subset
###Code
north_america = ["Canada", "United States", "Mexico"]
every_five = ["1990", "1995", "2000", "2005", "2010"]
subset = data.loc[north_america,every_five]
print(subset)
###Output
1990 1995 2000 2005 2010
country
Canada NaN NaN NaN NaN NaN
United States 88000.0 51000.0 52000.0 49000.0 49000.0
Mexico 11000.0 12000.0 13000.0 12000.0 10000.0
###Markdown
Summarizing data subsets
###Code
print(subset.describe())
###Output
1990 1995 2000 2005 2010
count 2.000000 2.000000 2.000000 2.000000 2.000000
mean 49500.000000 31500.000000 32500.000000 30500.000000 29500.000000
std 54447.222151 27577.164466 27577.164466 26162.950904 27577.164466
min 11000.000000 12000.000000 13000.000000 12000.000000 10000.000000
25% 30250.000000 21750.000000 22750.000000 21250.000000 19750.000000
50% 49500.000000 31500.000000 32500.000000 30500.000000 29500.000000
75% 68750.000000 41250.000000 42250.000000 39750.000000 39250.000000
max 88000.000000 51000.000000 52000.000000 49000.000000 49000.000000
###Markdown
Finding elements where rates per year are higher than average. This returns a TRUE or FALSE boolean.
###Code
print(data > data.mean())
filter = data > data.mean()
print(data[filter])
###Output
1990 1991 1992 1993 1994 \
country
Afghanistan NaN NaN NaN NaN NaN
Angola NaN NaN NaN NaN NaN
Argentina NaN NaN NaN NaN NaN
Armenia NaN NaN NaN NaN NaN
Australia NaN NaN NaN NaN NaN
Austria NaN NaN NaN NaN NaN
Azerbaijan NaN NaN NaN NaN NaN
Bahamas NaN NaN NaN NaN NaN
Bangladesh NaN NaN NaN NaN NaN
Barbados NaN NaN NaN NaN NaN
Belarus NaN NaN NaN NaN NaN
Belgium NaN NaN NaN NaN NaN
Belize NaN NaN NaN NaN NaN
Benin NaN NaN NaN NaN NaN
Bhutan NaN NaN NaN NaN NaN
Bolivia NaN NaN NaN NaN NaN
Botswana NaN NaN NaN NaN NaN
Brazil NaN NaN NaN NaN NaN
Bulgaria NaN NaN NaN NaN NaN
Burkina Faso 26000.0 NaN NaN NaN NaN
Burundi NaN NaN NaN NaN NaN
Cambodia NaN NaN NaN NaN NaN
Cameroon NaN NaN 33000.0 40000.0 46000.0
Canada NaN NaN NaN NaN NaN
Central African Republic NaN NaN NaN NaN NaN
Chile NaN NaN NaN NaN NaN
Colombia NaN NaN NaN NaN NaN
Congo, Rep. NaN NaN NaN NaN NaN
Costa Rica NaN NaN NaN NaN NaN
Cote d'Ivoire 52000.0 72000.0 90000.0 99000.0 98000.0
... ... ... ... ... ...
Slovak Republic NaN NaN NaN NaN NaN
Slovenia NaN NaN NaN NaN NaN
Somalia NaN NaN NaN NaN NaN
South Africa 44000.0 73000.0 120000.0 190000.0 300000.0
South Korea NaN NaN NaN NaN NaN
South Sudan NaN NaN NaN NaN NaN
Spain NaN NaN NaN NaN NaN
Sri Lanka NaN NaN NaN NaN NaN
Sudan NaN NaN NaN NaN NaN
Suriname NaN NaN NaN NaN NaN
Swaziland NaN NaN NaN NaN NaN
Sweden NaN NaN NaN NaN NaN
Switzerland NaN NaN NaN NaN NaN
Tajikistan NaN NaN NaN NaN NaN
Tanzania 180000.0 200000.0 200000.0 200000.0 190000.0
Thailand 140000.0 150000.0 120000.0 90000.0 64000.0
Togo NaN NaN NaN NaN NaN
Trinidad and Tobago NaN NaN NaN NaN NaN
Tunisia NaN NaN NaN NaN NaN
Turkey NaN NaN NaN NaN NaN
Uganda 130000.0 120000.0 110000.0 110000.0 98000.0
Ukraine NaN NaN NaN NaN NaN
United Kingdom NaN NaN NaN NaN NaN
United States 88000.0 52000.0 50000.0 52000.0 49000.0
Uruguay NaN NaN NaN NaN NaN
Venezuela NaN NaN NaN NaN NaN
Vietnam NaN NaN NaN NaN NaN
Yemen NaN NaN NaN NaN NaN
Zambia 85000.0 87000.0 87000.0 86000.0 85000.0
Zimbabwe 180000.0 200000.0 210000.0 240000.0 260000.0
1995 1996 1997 1998 1999 \
country
Afghanistan NaN NaN NaN NaN NaN
Angola NaN NaN NaN NaN NaN
Argentina NaN NaN NaN NaN NaN
Armenia NaN NaN NaN NaN NaN
Australia NaN NaN NaN NaN NaN
Austria NaN NaN NaN NaN NaN
Azerbaijan NaN NaN NaN NaN NaN
Bahamas NaN NaN NaN NaN NaN
Bangladesh NaN NaN NaN NaN NaN
Barbados NaN NaN NaN NaN NaN
Belarus NaN NaN NaN NaN NaN
Belgium NaN NaN NaN NaN NaN
Belize NaN NaN NaN NaN NaN
Benin NaN NaN NaN NaN NaN
Bhutan NaN NaN NaN NaN NaN
Bolivia NaN NaN NaN NaN NaN
Botswana NaN NaN NaN NaN NaN
Brazil NaN NaN NaN NaN NaN
Bulgaria NaN NaN NaN NaN NaN
Burkina Faso NaN NaN NaN NaN NaN
Burundi NaN NaN NaN NaN NaN
Cambodia NaN NaN NaN NaN NaN
Cameroon 51000.0 56000.0 59000.0 61000.0 61000.0
Canada NaN NaN NaN NaN NaN
Central African Republic NaN NaN NaN NaN NaN
Chile NaN NaN NaN NaN NaN
Colombia NaN NaN NaN NaN NaN
Congo, Rep. NaN NaN NaN NaN NaN
Costa Rica NaN NaN NaN NaN NaN
Cote d'Ivoire 89000.0 78000.0 68000.0 61000.0 55000.0
... ... ... ... ... ...
Slovak Republic NaN NaN NaN NaN NaN
Slovenia NaN NaN NaN NaN NaN
Somalia NaN NaN NaN NaN NaN
South Africa 430000.0 570000.0 680000.0 720000.0 710000.0
South Korea NaN NaN NaN NaN NaN
South Sudan NaN NaN NaN NaN NaN
Spain NaN NaN NaN NaN NaN
Sri Lanka NaN NaN NaN NaN NaN
Sudan NaN NaN NaN NaN NaN
Suriname NaN NaN NaN NaN NaN
Swaziland NaN NaN NaN NaN NaN
Sweden NaN NaN NaN NaN NaN
Switzerland NaN NaN NaN NaN NaN
Tajikistan NaN NaN NaN NaN NaN
Tanzania 170000.0 160000.0 150000.0 140000.0 140000.0
Thailand 49000.0 NaN NaN NaN NaN
Togo NaN NaN NaN NaN NaN
Trinidad and Tobago NaN NaN NaN NaN NaN
Tunisia NaN NaN NaN NaN NaN
Turkey NaN NaN NaN NaN NaN
Uganda 92000.0 88000.0 87000.0 87000.0 89000.0
Ukraine NaN NaN NaN NaN NaN
United Kingdom NaN NaN NaN NaN NaN
United States 51000.0 66000.0 68000.0 67000.0 60000.0
Uruguay NaN NaN NaN NaN NaN
Venezuela NaN NaN NaN NaN NaN
Vietnam NaN NaN NaN NaN NaN
Yemen NaN NaN NaN NaN NaN
Zambia 84000.0 84000.0 85000.0 86000.0 88000.0
Zimbabwe 250000.0 240000.0 230000.0 220000.0 190000.0
... 2002 2003 2004 2005 \
country ...
Afghanistan ... NaN NaN NaN NaN
Angola ... NaN NaN NaN NaN
Argentina ... NaN NaN NaN NaN
Armenia ... NaN NaN NaN NaN
Australia ... NaN NaN NaN NaN
Austria ... NaN NaN NaN NaN
Azerbaijan ... NaN NaN NaN NaN
Bahamas ... NaN NaN NaN NaN
Bangladesh ... NaN NaN NaN NaN
Barbados ... NaN NaN NaN NaN
Belarus ... NaN NaN NaN NaN
Belgium ... NaN NaN NaN NaN
Belize ... NaN NaN NaN NaN
Benin ... NaN NaN NaN NaN
Bhutan ... NaN NaN NaN NaN
Bolivia ... NaN NaN NaN NaN
Botswana ... NaN NaN NaN NaN
Brazil ... NaN NaN NaN NaN
Bulgaria ... NaN NaN NaN NaN
Burkina Faso ... NaN NaN NaN NaN
Burundi ... NaN NaN NaN NaN
Cambodia ... NaN NaN NaN NaN
Cameroon ... 58000.0 58000.0 56000.0 54000.0
Canada ... NaN NaN NaN NaN
Central African Republic ... NaN NaN NaN NaN
Chile ... NaN NaN NaN NaN
Colombia ... NaN NaN NaN NaN
Congo, Rep. ... NaN NaN NaN NaN
Costa Rica ... NaN NaN NaN NaN
Cote d'Ivoire ... 42000.0 38000.0 NaN NaN
... ... ... ... ... ...
Slovak Republic ... NaN NaN NaN NaN
Slovenia ... NaN NaN NaN NaN
Somalia ... NaN NaN NaN NaN
South Africa ... 560000.0 520000.0 500000.0 480000.0
South Korea ... NaN NaN NaN NaN
South Sudan ... NaN NaN NaN NaN
Spain ... NaN NaN NaN NaN
Sri Lanka ... NaN NaN NaN NaN
Sudan ... NaN NaN NaN NaN
Suriname ... NaN NaN NaN NaN
Swaziland ... NaN NaN NaN NaN
Sweden ... NaN NaN NaN NaN
Switzerland ... NaN NaN NaN NaN
Tajikistan ... NaN NaN NaN NaN
Tanzania ... 140000.0 140000.0 140000.0 140000.0
Thailand ... NaN 38000.0 NaN NaN
Togo ... NaN NaN NaN NaN
Trinidad and Tobago ... NaN NaN NaN NaN
Tunisia ... NaN NaN NaN NaN
Turkey ... NaN NaN NaN NaN
Uganda ... 100000.0 110000.0 120000.0 120000.0
Ukraine ... NaN NaN NaN NaN
United Kingdom ... NaN NaN NaN NaN
United States ... 48000.0 49000.0 49000.0 49000.0
Uruguay ... NaN NaN NaN NaN
Venezuela ... NaN NaN NaN NaN
Vietnam ... NaN NaN NaN NaN
Yemen ... NaN NaN NaN NaN
Zambia ... 94000.0 96000.0 96000.0 93000.0
Zimbabwe ... 130000.0 110000.0 110000.0 110000.0
2006 2007 2008 2009 2010 \
country
Afghanistan NaN NaN NaN NaN NaN
Angola NaN NaN NaN NaN NaN
Argentina NaN NaN NaN NaN NaN
Armenia NaN NaN NaN NaN NaN
Australia NaN NaN NaN NaN NaN
Austria NaN NaN NaN NaN NaN
Azerbaijan NaN NaN NaN NaN NaN
Bahamas NaN NaN NaN NaN NaN
Bangladesh NaN NaN NaN NaN NaN
Barbados NaN NaN NaN NaN NaN
Belarus NaN NaN NaN NaN NaN
Belgium NaN NaN NaN NaN NaN
Belize NaN NaN NaN NaN NaN
Benin NaN NaN NaN NaN NaN
Bhutan NaN NaN NaN NaN NaN
Bolivia NaN NaN NaN NaN NaN
Botswana NaN NaN NaN NaN NaN
Brazil NaN NaN NaN NaN NaN
Bulgaria NaN NaN NaN NaN NaN
Burkina Faso NaN NaN NaN NaN NaN
Burundi NaN NaN NaN NaN NaN
Cambodia NaN NaN NaN NaN NaN
Cameroon 52000.0 50000.0 47000.0 46000.0 43000.0
Canada NaN NaN NaN NaN NaN
Central African Republic NaN NaN NaN NaN NaN
Chile NaN NaN NaN NaN NaN
Colombia NaN NaN NaN NaN NaN
Congo, Rep. NaN NaN NaN NaN NaN
Costa Rica NaN NaN NaN NaN NaN
Cote d'Ivoire NaN NaN NaN NaN NaN
... ... ... ... ... ...
Slovak Republic NaN NaN NaN NaN NaN
Slovenia NaN NaN NaN NaN NaN
Somalia NaN NaN NaN NaN NaN
South Africa 470000.0 460000.0 450000.0 430000.0 390000.0
South Korea NaN NaN NaN NaN NaN
South Sudan NaN NaN NaN NaN NaN
Spain NaN NaN NaN NaN NaN
Sri Lanka NaN NaN NaN NaN NaN
Sudan NaN NaN NaN NaN NaN
Suriname NaN NaN NaN NaN NaN
Swaziland NaN NaN NaN NaN NaN
Sweden NaN NaN NaN NaN NaN
Switzerland NaN NaN NaN NaN NaN
Tajikistan NaN NaN NaN NaN NaN
Tanzania 140000.0 140000.0 140000.0 140000.0 140000.0
Thailand NaN NaN NaN NaN NaN
Togo NaN NaN NaN NaN NaN
Trinidad and Tobago NaN NaN NaN NaN NaN
Tunisia NaN NaN NaN NaN NaN
Turkey NaN NaN NaN NaN NaN
Uganda 130000.0 140000.0 150000.0 150000.0 150000.0
Ukraine NaN NaN NaN NaN NaN
United Kingdom NaN NaN NaN NaN NaN
United States 49000.0 49000.0 49000.0 49000.0 49000.0
Uruguay NaN NaN NaN NaN NaN
Venezuela NaN NaN NaN NaN NaN
Vietnam NaN NaN NaN NaN NaN
Yemen NaN NaN NaN NaN NaN
Zambia 88000.0 81000.0 72000.0 76000.0 NaN
Zimbabwe 100000.0 100000.0 100000.0 95000.0 87000.0
2011
country
Afghanistan NaN
Angola 23000.0
Argentina NaN
Armenia NaN
Australia NaN
Austria NaN
Azerbaijan NaN
Bahamas NaN
Bangladesh NaN
Barbados NaN
Belarus NaN
Belgium NaN
Belize NaN
Benin NaN
Bhutan NaN
Bolivia NaN
Botswana NaN
Brazil 18000.0
Bulgaria NaN
Burkina Faso NaN
Burundi NaN
Cambodia NaN
Cameroon 43000.0
Canada NaN
Central African Republic NaN
Chile NaN
Colombia NaN
Congo, Rep. NaN
Costa Rica NaN
Cote d'Ivoire NaN
... ...
Slovak Republic NaN
Slovenia NaN
Somalia NaN
South Africa 380000.0
South Korea NaN
South Sudan 16000.0
Spain NaN
Sri Lanka NaN
Sudan NaN
Suriname NaN
Swaziland NaN
Sweden NaN
Switzerland NaN
Tajikistan NaN
Tanzania 150000.0
Thailand NaN
Togo NaN
Trinidad and Tobago NaN
Tunisia NaN
Turkey NaN
Uganda 150000.0
Ukraine NaN
United Kingdom NaN
United States 49000.0
Uruguay NaN
Venezuela NaN
Vietnam 21000.0
Yemen NaN
Zambia NaN
Zimbabwe 74000.0
[143 rows x 22 columns]
###Markdown
Return countries that have reported numbers that are higher than average every year.
###Code
higher_than_average = data[filter]
always_higher_than_average = higher_than_average.dropna() #drops rows with 1 or more NAN value
print(always_higher_than_average)
print(always_higher_than_average.describe())
###Output
1990 1991 1992 1993 1994 1995 \
country
Kenya 120000.0 170000.0 240000.0 280000.0 280000.0 250000.0
Malawi 79000.0 85000.0 88000.0 89000.0 88000.0 90000.0
Nigeria 96000.0 140000.0 180000.0 230000.0 280000.0 320000.0
South Africa 44000.0 73000.0 120000.0 190000.0 300000.0 430000.0
Tanzania 180000.0 200000.0 200000.0 200000.0 190000.0 170000.0
Uganda 130000.0 120000.0 110000.0 110000.0 98000.0 92000.0
United States 88000.0 52000.0 50000.0 52000.0 49000.0 51000.0
Zimbabwe 180000.0 200000.0 210000.0 240000.0 260000.0 250000.0
1996 1997 1998 1999 ... 2002 \
country ...
Kenya 210000.0 170000.0 150000.0 140000.0 ... 130000.0
Malawi 96000.0 100000.0 100000.0 100000.0 ... 100000.0
Nigeria 340000.0 340000.0 350000.0 350000.0 ... 300000.0
South Africa 570000.0 680000.0 720000.0 710000.0 ... 560000.0
Tanzania 160000.0 150000.0 140000.0 140000.0 ... 140000.0
Uganda 88000.0 87000.0 87000.0 89000.0 ... 100000.0
United States 66000.0 68000.0 67000.0 60000.0 ... 48000.0
Zimbabwe 240000.0 230000.0 220000.0 190000.0 ... 130000.0
2003 2004 2005 2006 2007 2008 \
country
Kenya 130000.0 130000.0 120000.0 120000.0 130000.0 120000.0
Malawi 100000.0 96000.0 90000.0 82000.0 73000.0 63000.0
Nigeria 290000.0 290000.0 300000.0 320000.0 330000.0 340000.0
South Africa 520000.0 500000.0 480000.0 470000.0 460000.0 450000.0
Tanzania 140000.0 140000.0 140000.0 140000.0 140000.0 140000.0
Uganda 110000.0 120000.0 120000.0 130000.0 140000.0 150000.0
United States 49000.0 49000.0 49000.0 49000.0 49000.0 49000.0
Zimbabwe 110000.0 110000.0 110000.0 100000.0 100000.0 100000.0
2009 2010 2011
country
Kenya 120000.0 110000.0 100000.0
Malawi 58000.0 51000.0 46000.0
Nigeria 360000.0 380000.0 340000.0
South Africa 430000.0 390000.0 380000.0
Tanzania 140000.0 140000.0 150000.0
Uganda 150000.0 150000.0 150000.0
United States 49000.0 49000.0 49000.0
Zimbabwe 95000.0 87000.0 74000.0
[8 rows x 22 columns]
1990 1991 1992 1993 \
count 8.000000 8.000000 8.000000 8.00000
mean 114625.000000 130000.000000 149750.000000 173875.00000
std 47996.837693 57268.789805 67021.851447 80945.19751
min 44000.000000 52000.000000 50000.000000 52000.00000
25% 85750.000000 82000.000000 104500.000000 104750.00000
50% 108000.000000 130000.000000 150000.000000 195000.00000
75% 142500.000000 177500.000000 202500.000000 232500.00000
max 180000.000000 200000.000000 240000.000000 280000.00000
1994 1995 1996 1997 \
count 8.000000 8.000000 8.000000 8.000000
mean 193125.000000 206625.000000 221250.000000 228125.000000
std 101327.527638 130282.042946 167972.574292 202895.073164
min 49000.000000 51000.000000 66000.000000 68000.000000
25% 95500.000000 91500.000000 94000.000000 96750.000000
50% 225000.000000 210000.000000 185000.000000 160000.000000
75% 280000.000000 267500.000000 265000.000000 257500.000000
max 300000.000000 430000.000000 570000.000000 680000.000000
1998 1999 ... 2002 \
count 8.000000 8.000000 ... 8.000000
mean 229250.000000 222375.000000 ... 188500.000000
std 218113.175079 216404.342114 ... 166924.277101
min 67000.000000 60000.000000 ... 48000.000000
25% 96750.000000 97250.000000 ... 100000.000000
50% 145000.000000 140000.000000 ... 130000.000000
75% 252500.000000 230000.000000 ... 180000.000000
max 720000.000000 710000.000000 ... 560000.000000
2003 2004 2005 2006 \
count 8.00000 8.000000 8.000000 8.000000
mean 181125.00000 179375.000000 176125.000000 176375.000000
std 153600.07208 147024.718524 143048.880257 143656.273793
min 49000.00000 49000.000000 49000.000000 49000.000000
25% 107500.00000 106500.000000 105000.000000 95500.000000
50% 120000.00000 125000.000000 120000.000000 125000.000000
75% 177500.00000 177500.000000 180000.000000 185000.000000
max 520000.00000 500000.000000 480000.000000 470000.000000
2007 2008 2009 2010 \
count 8.000000 8.000000 8.000000 8.00000
mean 177750.000000 176500.000000 175250.000000 169625.00000
std 142211.864082 142303.498602 141466.553341 137887.05264
min 49000.000000 49000.000000 49000.000000 49000.00000
25% 93250.000000 90750.000000 85750.000000 78000.00000
50% 135000.000000 130000.000000 130000.000000 125000.00000
75% 187500.000000 197500.000000 202500.000000 207500.00000
max 460000.000000 450000.000000 430000.000000 390000.00000
2011
count 8.000000
mean 161125.000000
std 129450.969759
min 46000.000000
25% 67750.000000
50% 125000.000000
75% 197500.000000
max 380000.000000
[8 rows x 22 columns]
###Markdown
Use .iloc to subset the dataframe
###Code
subset = data.iloc[88:92,:5] #.iloc uses numerical indexes instead of row or column names like .loc does
print(subset)
###Output
1990 1991 1992 1993 1994
country
Mozambique 25000.0 33000.0 43000.0 55000.0 67000.0
Myanmar 14000.0 13000.0 13000.0 16000.0 17000.0
Namibia 3800.0 5200.0 7200.0 9800.0 13000.0
Nepal 350.0 350.0 750.0 1000.0 1500.0
###Markdown
Perform calculations over columns
###Code
print("1990:\n", subset.loc[:, "1990"],"\n1994: \n", subset.loc[:, "1994"])
print("diff: \n", subset.loc[:, "1994"] - subset.loc[:, "1990"])
###Output
1990:
country
Mozambique 25000.0
Myanmar 14000.0
Namibia 3800.0
Nepal 350.0
Name: 1990, dtype: float64
1994:
country
Mozambique 67000.0
Myanmar 17000.0
Namibia 13000.0
Nepal 1500.0
Name: 1994, dtype: float64
diff:
country
Mozambique 42000.0
Myanmar 3000.0
Namibia 9200.0
Nepal 1150.0
dtype: float64
###Markdown
Break here for 10 minutes to complete the exercises in Part 5: Pandas and Data Frames. Conditional Statements* An `if` statement (more properly called a conditional statement) controls whether a block of code is executed or not. * The structure of an `if` statement is similar to that of a `for` loop: * The first line opens with `if` and ends with a colon `:` * The body containing one or more statements is indented (by 4 spaces or a tab)
###Code
result = 42
if result == 42:
print(result, "is the answer to the meaning of life and the universe")
result = 10
if result < 42:
print(result, "is not the answer to the meaning of life")
###Output
42 is the answer to the meaning of life
10 is not the answer to the meaning of life
###Markdown
Conditionals are often used within `for` loops
###Code
results = [10, 20, 12, 43, 50, 42]
for result in results:
if result > statistics.mean(results):
print(result, "is larger than average")
###Output
43 is larger than average
50 is larger than average
42 is larger than average
###Markdown
Use `else` within a to execute a block of code when an `if` condition is *not* true
###Code
for result in results:
if result > statistics.mean(results):
print(result, "is larger than average")
else:
print(result, "is smaller than average")
###Output
10 is smaller than average
20 is smaller than average
12 is smaller than average
43 is larger than average
50 is larger than average
42 is larger than average
###Markdown
Use `elif` to add additional tests
###Code
for result in results:
if result > 42:
print(result, "is too large")
elif result < 42:
print(result, "is too small")
else:
print(result, "is the answer to the meaning of life and the universe")
###Output
10 is too small
20 is too small
12 is too small
43 is too large
50 is too large
42 is the answer to the meaning of life and the universe
|
notebooks/Dissertation/data_gen/explicit_problems_5d.ipynb | ###Markdown
Explicit 5D BenchmarksThis file demonstrates how to generate, plot, and output data for 1d benchmarksChoose from:1. Korns_011. Korns_021. Korns_031. Korns_041. Korns_051. Korns_061. Korns_071. Korns_081. Korns_091. Korns_101. Korns_111. Korns_121. Korns_131. Korns_141. Korns_15 Imports
###Code
from pypge.benchmarks import explicit
import numpy as np
# visualization libraries
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import gridspec
# plot the visuals in ipython
%matplotlib inline
###Output
_____no_output_____
###Markdown
Generate the data with noise
###Code
# Set your output directories
img_dir = "../img/benchmarks/explicit/"
data_dir = "../data/benchmarks/explicit/"
# used for plotting
manual_scale = True
ymin = -2000
ymax = 2000
do_enable = False
xs_params = [
(-3.14,3.14),
(-3.14,3.14),
(0.001,1000),
(-3.14,3.14),
(-3.14,3.14)
]
# choose your problem here
prob = explicit.Korns_15(noise=1.0, npts=4000, xs_params=xs_params)
# you can also specify the following params as keyword arguments
#
# params = {
# 'name': "Koza_01",
# 'xs_str': ["x"],
# 'eqn_str': "x**4 + x**3 + x**2 + x",
# 'xs_params': [ (-4.0,4.0) ],
# 'npts': 200,
# 'noise': 1.0
# }
# or make your own with the following
#
# explicit.Explicit_1D(params):
###Output
{ 'eqn_str': '12 - 6*(tan(x)/exp(y))*(ln(z)-tan(v))',
'name': 'Korns_15',
'noise': 1.0,
'npts': 4000,
'xs': [x, y, z, v, w],
'xs_params': [ (-3.14, 3.14),
(-3.14, 3.14),
(0.001, 1000),
(-3.14, 3.14),
(-3.14, 3.14)],
'xs_str': ['x', 'y', 'z', 'v', 'w']}
###Markdown
Plot inline and save image
###Code
print prob['name'], prob['eqn']
print prob['xpts'].shape
xs = prob['xpts'][0]
ys = prob['xpts'][1]
zs = prob['xpts'][2]
vs = prob['xpts'][3]
ws = prob['xpts'][4]
Ys = prob['ypure']
fig = plt.figure()
fig.set_size_inches(16, 20)
gs = gridspec.GridSpec(5, 2)
fig.suptitle(prob['name'] + " Clean", fontsize=36)
ax0 = fig.add_subplot(gs[0,:])
ax0.scatter(xs, Ys, marker='.')
ax0.set_xlabel('X')
ax0.set_ylabel('OUT')
if manual_scale:
plt.autoscale(enable=do_enable)
plt.ylim(ymin,ymax)
ax1 = fig.add_subplot(gs[1,:])
ax1.scatter(ys, Ys, marker='.')
ax1.set_xlabel('Y')
ax1.set_ylabel('OUT')
if manual_scale:
plt.autoscale(enable=do_enable)
plt.ylim(ymin,ymax)
ax2 = fig.add_subplot(gs[2,:])
ax2.scatter(zs, Ys, marker='.')
ax2.set_xlabel('Z')
ax2.set_ylabel('OUT')
if manual_scale:
plt.autoscale(enable=do_enable)
plt.ylim(ymin,ymax)
ax3 = fig.add_subplot(gs[3,:])
ax3.scatter(vs, Ys, marker='.')
ax3.set_xlabel('V')
ax3.set_ylabel('OUT')
if manual_scale:
plt.autoscale(enable=do_enable)
plt.ylim(ymin,ymax)
ax4 = fig.add_subplot(gs[4,:])
ax4.scatter(ws, Ys, marker='.')
ax4.set_xlabel('W')
ax4.set_ylabel('OUT')
if manual_scale:
plt.autoscale(enable=do_enable)
plt.ylim(ymin,ymax)
plt.savefig(img_dir + prob['name'].lower() + "_clean.png", dpi=200)
plt.show()
Ys = prob['ypts']
fig = plt.figure()
fig.set_size_inches(16, 20)
gs = gridspec.GridSpec(5, 2)
fig.suptitle(prob['name'] + " Noisy", fontsize=36)
ax0 = fig.add_subplot(gs[0,:])
ax0.scatter(xs, Ys, marker='.')
ax0.set_xlabel('X')
ax0.set_ylabel('OUT')
if manual_scale:
plt.autoscale(enable=do_enable)
plt.ylim(ymin,ymax)
ax1 = fig.add_subplot(gs[1,:])
ax1.scatter(ys, Ys, marker='.')
ax1.set_xlabel('Y')
ax1.set_ylabel('OUT')
if manual_scale:
plt.autoscale(enable=do_enable)
plt.ylim(ymin,ymax)
ax2 = fig.add_subplot(gs[2,:])
ax2.scatter(zs, Ys, marker='.')
ax2.set_xlabel('Z')
ax2.set_ylabel('OUT')
if manual_scale:
plt.autoscale(enable=do_enable)
plt.ylim(ymin,ymax)
ax3 = fig.add_subplot(gs[3,:])
ax3.scatter(vs, Ys, marker='.')
ax3.set_xlabel('V')
ax3.set_ylabel('OUT')
if manual_scale:
plt.autoscale(enable=do_enable)
plt.ylim(ymin,ymax)
ax4 = fig.add_subplot(gs[4,:])
ax4.scatter(ws, Ys, marker='.')
ax4.set_xlabel('W')
ax4.set_ylabel('OUT')
if manual_scale:
plt.autoscale(enable=do_enable)
plt.ylim(ymin,ymax)
plt.savefig(img_dir + prob['name'].lower() + "_noisy.png", dpi=200)
plt.show()
###Output
Korns_15 -6*(log(z) - tan(v))*exp(-y)*tan(x) + 12
(5, 4000)
###Markdown
Output json and csv data
###Code
data = np.array([prob['xpts'][0],prob['xpts'][1],prob['xpts'][2],prob['xpts'][3],prob['xpts'][4], prob['ypts']]).T
print data.shape
cols = [['x', 'y', 'z', 'v', 'w', 'out']]
out_data = cols + data.tolist()
import json
json_out = json.dumps( out_data, indent=4)
# print json_out
f_json = open(data_dir + prob['name'].lower() + ".json", 'w')
f_json.write(json_out)
f_json.close()
f_csv = open(data_dir + prob['name'].lower() + ".csv", 'w')
for row in out_data:
line = ", ".join([str(col) for col in row]) + "\n"
f_csv.write(line)
f_csv.close()
###Output
(4000, 6)
###Markdown
Output *clean* json and csv data
###Code
data = np.array([prob['xpts'][0],prob['xpts'][1],prob['xpts'][2],prob['xpts'][3],prob['xpts'][4], prob['ypure']]).T
print data.shape
cols = [['x', 'y', 'z', 'v', 'w', 'out']]
out_data = cols + data.tolist()
import json
json_out = json.dumps( out_data, indent=4)
# print json_out
f_json = open(data_dir + prob['name'].lower() + "_clean.json", 'w')
f_json.write(json_out)
f_json.close()
f_csv = open(data_dir + prob['name'].lower() + "_clean.csv", 'w')
for row in out_data:
line = ", ".join([str(col) for col in row]) + "\n"
f_csv.write(line)
f_csv.close()
###Output
(4000, 6)
|
Python Tutorial Tensorflow/15_Data_augmentation.ipynb | ###Markdown
Rotate, zoom, transform, change contrast to get new data
###Code
import os
import PIL
import cv2
import pathlib
import numpy as np
import pandas as pd
import seaborn as sn
import tensorflow as tf
from tensorflow import keras
from matplotlib import pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
%matplotlib inline
# load data dir
dataset_url = "https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz"
data_dir = tf.keras.utils.get_file('flower_photos', origin=dataset_url, cache_dir='.', untar=True)
data_dir = pathlib.Path(data_dir)
print(f"Numbers of images: {len(list(data_dir.glob('*/*.jpg')))}")
roses = list(data_dir.glob("roses/*.jpg"))
PIL.Image.open(str(roses[1]))
flowers_images_dict = {
'roses': list(data_dir.glob('roses/*')),
'daisy': list(data_dir.glob('daisy/*')),
'dandelion': list(data_dir.glob('dandelion/*')),
'sunflowers': list(data_dir.glob('sunflowers/*')),
'tulips': list(data_dir.glob('tulips/*')),
}
flowers_labels_dict = {
'roses': 0,
'daisy': 1,
'dandelion': 2,
'sunflowers': 3,
'tulips': 4,
}
# get X, y data sets using a loop
X, y = [], []
for name, imgs in flowers_images_dict.items():
for img in imgs:
img = cv2.imread(str(img))
resized_img = cv2.resize(img, (180, 180))
X.append(resized_img)
y.append(flowers_labels_dict[name])
X, y = np.array(X), np.array(y)
X.shape, y.shape
# split train test sets and scale them
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
X_train, X_test = X_train / 255, X_test / 255
# define the model
model = keras.Sequential([
keras.layers.Conv2D(filters=16, kernel_size=(3, 3), padding="same", activation="relu"),
keras.layers.MaxPooling2D(),
keras.layers.Conv2D(filters=32, kernel_size=(3, 3), padding="same", activation="relu"),
keras.layers.MaxPooling2D(),
keras.layers.Conv2D(filters=64, kernel_size=(3, 3), padding="same", activation="relu"),
keras.layers.MaxPooling2D(),
keras.layers.Flatten(),
keras.layers.Dense(128, activation="relu"),
keras.layers.Dense(5, activation="linear"),
])
model.compile(optimizer="adam", loss=tf.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=["accuracy"])
# train model
model.fit(X_train, y_train, epochs=30)
# this model is overfitting
y_pred = model.predict(X_test)
y_pred_class = [np.argmax(x) for x in y_pred]
print("Classification Report: \n", classification_report(y_test, y_pred_class))
# Augment Data
data_augmentation = keras.Sequential([
keras.layers.experimental.preprocessing.RandomFlip("horizontal"),
keras.layers.experimental.preprocessing.RandomRotation(0.2),
keras.layers.experimental.preprocessing.RandomZoom(0.2),
keras.layers.experimental.preprocessing.RandomContrast(0.3),
])
plt.axis("off")
plt.imshow(X[0])
plt.axis("off")
plt.imshow(data_augmentation(X)[0])
model = keras.Sequential([
data_augmentation,
keras.layers.Conv2D(filters=16, kernel_size=(3, 3), padding="same", activation="relu"),
keras.layers.MaxPooling2D(),
keras.layers.Conv2D(filters=32, kernel_size=(3, 3), padding="same", activation="relu"),
keras.layers.MaxPooling2D(),
keras.layers.Conv2D(filters=64, kernel_size=(3, 3), padding="same", activation="relu"),
keras.layers.MaxPooling2D(),
keras.layers.Conv2D(filters=128, kernel_size=(3, 3), padding="same", activation="relu"),
keras.layers.MaxPooling2D(),
keras.layers.Flatten(),
keras.layers.Dense(128, activation="relu"),
keras.layers.Dense(5, activation="linear"),
])
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy'])
model.fit(X_train, y_train, epochs=50)
# Accuracy increased from 68 percent to 77 percent
y_pred = model.predict(X_test)
y_pred_class = [np.argmax(x) for x in y_pred]
print("Classification Report: \n", classification_report(y_test, y_pred_class))
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import Dropout
from keras.layers import BatchNormalization
def better_model():
model = Sequential([data_augmentation])
model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same', input_shape=(180, 180, 3)))
model.add(BatchNormalization())
model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))
model.add(BatchNormalization())
model.add(MaxPooling2D((2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))
model.add(BatchNormalization())
model.add(MaxPooling2D((2, 2)))
model.add(Dropout(0.3))
model.add(Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))
model.add(BatchNormalization())
model.add(MaxPooling2D((2, 2)))
model.add(Dropout(0.4))
model.add(Flatten())
model.add(Dense(128, activation='relu', kernel_initializer='he_uniform'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(5, activation='softmax'))
# compile model
model.compile(optimizer="SGD", loss='sparse_categorical_crossentropy', metrics=['accuracy'])
return model
better_model = better_model()
better_model.fit(X_train, y_train, epochs=30)
# actually not better
y_pred = better_model.predict(X_test)
y_pred_class = [np.argmax(x) for x in y_pred]
print("Classification Report: \n", classification_report(y_test, y_pred_class))
###Output
Classification Report:
precision recall f1-score support
0 0.53 0.33 0.41 176
1 0.62 0.42 0.50 154
2 0.34 0.94 0.50 226
3 0.56 0.10 0.17 150
4 0.87 0.19 0.31 212
accuracy 0.42 918
macro avg 0.58 0.40 0.38 918
weighted avg 0.58 0.42 0.38 918
|
docs/examples/1_Getting_Started.ipynb | ###Markdown
Hello, welcome to our `boomdiff` package tutorial series. The `boomdiff` is a package implementing forward-mode auto differentiations and graident-based optimizations of user-specified or pre-set objective functions. The organization of `boomdiff` package is highly modularized, with three major modules: 1. The `boomdiff.AD` class, as the core functionality of `boomdiff` package, provides the interface to create, operate variables and track their gradients. Such functionality is realized through the `AD` instances or `AD` instances array data structures. We will walk thorugh this in tutorial section 1.2. The `boomdiff.optimize` module includes optimization algorithms based on the gradients of loss functions, user-defined or pre-set. We will illustrate the usage in tutorial section 2.3. The `boomdiff.loss_function` module includes some pre-set loss functions, like mean squared error (MSE), for users' convenience. We will illustrate the usage in tutorial section 2Following the basic tutorials (if you are proficient at progamming, you can probably skip the basic tutorials), two general pedagogical examples will be given - A linear regression model in tutorial section 3 - A logistic regression model in tutorial section 4- A simple neural network model in section 5. (Yes! we are proud that `boomdiff` can be used as a deep learning framework! Although there are still lots of things to do regarding performance.)These two examples will include most features, show you the usability and power of the `boomdiff` package, and you can probably understand the basic logics to use `boomdiff`: $$\text{Create variables} \to \text{Construct models and define target functions} \to \text{Optimization}$$Then, you can construct your own models with `boomdiff` and solve real-life optimization problems! 1.1 Create a AD instance as a scalar variable To start, make sure you have followed the [installation tutorial](https://github.com/team-boomeraang/cs107-FinalProject/blob/master/README.mdinstallation-of-boomdiff) and successfully installed the `boomdiff` package. If this is the case, we can import the package:
###Code
from boomdiff import AD
###Output
_____no_output_____
###Markdown
The instantiation of a single variable with AD is quite intuitive, we can simply call:
###Code
a = AD(10., {'a': 1.0})
###Output
_____no_output_____
###Markdown
Then a variable called `a` is created, it is an AD class instance. There are two arguments should be put in such instantiation: value and partial derivative dictionary. The two property can be called by attributes `func_val` and `partial_dict`:
###Code
print(a.func_val)
print(a.partial_dict)
###Output
10.0
{'a': 1.0}
###Markdown
Here the value is `10`, which means the variable `a` itself is at 10. And the partial derivative dictionary `{'a': 1.0}` means, the variable's partial derivative to the name `'a'` is 1.0. **Name string** is one key property used to tracking the gradient in the multi-variable case. Here, as we haven't put any operations to such a variable, you can simple view the string `'a'` as the name of the variable, the derivative to itself should mostly be 1 (you can set it to other values as a seed vector).Based on such motivation, we also support create a varible in a simpler manner:
###Code
b = AD(7,'b')
print(b.func_val)
print(b.partial_dict)
###Output
7
{'b': 1.0}
###Markdown
As above, you can only give the value and a name string, the partial derivative to itself will be set to 1.0 by default. Now you have another variable `b`.> Note: When dealing with multi-variable cases, make sure the name strings of your different variables are different! And the best practice is making the name of variables and their name string consistent, i.e. do `a = AD(10, 'a')` instead of `a = AD(10, 'b')`.And, if you are super lazy and will only work with a single variable, so the name string is not quite meaningful for you. We also support such syntax:
###Code
x1 = AD(5.0)
print(x1.func_val)
print(x1.partial_dict)
###Output
5.0
{'x1': 1}
###Markdown
As shown, you can only put an value and the name string is set to `'x1'` by default. Now you have another variable `x1`. You will see the power of the name string in following operations. 1.2 Apply operations to AD instances and track gradient With the three variables `a`, `b`, `x1` we created above, we can do some operations. Let's start with an simple case: `f = 2*a + 3*b - 4*x1`. For this case, we can simply calculate by hand that:$$f=21,\quad \frac{\partial f}{\partial a} = 2, \quad \frac{\partial f}{\partial b} = 3, \quad \frac{\partial f}{\partial x1} = -4$$This calcualation can also be done quite intuitively in `boomdiff`:
###Code
f = 2*a + 3*b - 4*x1
print("Function value: ", f.func_val)
print("Partial derivatives: ", f.partial_dict)
###Output
Function value: 21.0
Partial derivatives: {'a': 2.0, 'b': 3.0, 'x1': -4}
###Markdown
The object `f` is still an AD instance. Besides the function value, the name strings in its `partial_dict` atribute clearly show the gradients relation. Now you can see why the name string is important. Furthermore, as the `f` is still an AD instance, you can continue to apply operations on it and extend the computational graph, the gradients tape will still hold:
###Code
f2 = f**2 + AD.sin(a)/AD.exp(b) + AD.log(x1)
print("Function value: ", f2.func_val)
print("Partial derivatives: ", f2.partial_dict)
###Output
Function value: 442.6089418293942
Partial derivatives: {'a': 83.99923486580482, 'b': 126.0004960830399, 'x1': -167.8}
###Markdown
If you are annoyed by the long float expression like me, you can control the decimal rounded length by a helper method `round`:
###Code
print("Rounded Function value: ", f2.round(1).func_val)
print("Rounded Partial derivatives: ", f2.round(1).partial_dict)
###Output
Rounded Function value: 442.6
Rounded Partial derivatives: {'a': 84.0, 'b': 126.0, 'x1': -167.8}
###Markdown
Currently, the `boomdiff` has alrealy support a huge amount of basic operations, functions and helper methods. For a complete API list and descriptions, see [AD API](https://github.com/team-boomeraang/cs107-FinalProject/blob/master/docs/documentation.mdautodiff).If you are done with the tracked operations and only want the function values, we provide a simple method called `value` to detach, it will return simple number values:
###Code
f2.round(4).value()
###Output
_____no_output_____
###Markdown
1.3 Create AD instances arrays as variable arrays Above, we demonstrated how to create and operate scalar varaibles in `boomdiff`. However, sometimes the number of parameters in a real-life model is quite large, it might be exhausting to create them one by one. Based on such motivation, we develop the following tools to create AD instances arrays as variable arrays. You can create a bunch of parameters with few lines. At the moment, `numpy>=1.19` is needed to support all desired features, we import `numpy`:
###Code
import numpy as np
np.random.seed(14) # For reproducibility
###Output
_____no_output_____
###Markdown
Now let's say we want a `2*2` parameters matrix `w1`, we can make it by two lines:1. Create an array with `numpy`, with size equal `2*2`
###Code
w1_np = np.array([[1.,2.],[3.,4.]])
###Output
_____no_output_____
###Markdown
2. Convert it to AD instances arrays with `AD.from_array()` method:
###Code
w1 = AD.from_array(w1_np, prefix='w1')
print(w1)
###Output
[[1.0 ({'w1_0_0': 1.0}) 2.0 ({'w1_0_1': 1.0})]
[3.0 ({'w1_1_0': 1.0}) 4.0 ({'w1_1_1': 1.0})]]
###Markdown
Now we have the parameters matrix `w1`. You can see that object `w1` is an array with all elements are AD instances. The value of the elements are determined by the `w1_np` array, and the name string is `prefix_i_j`, `i` and `j` are the row and column index in the matrix, so each element will have different name strings. All derivatives here are set to 1.0 by default. You can simple convert `w1` back by `AD.to_array()` method:
###Code
AD.to_array(w1)
###Output
_____no_output_____
###Markdown
1.4 Apply operations to AD instances array and track gradient All operations mentioned in section 1.2 will still work for AD instances arrays, either element-wise or broadcast. For example:
###Code
w1**2
w1/w1
AD.log(w1)
AD.tanh(w1)
###Output
_____no_output_____
###Markdown
---Besides, there are some array-specific operations, like `AD.sum()`, `AD.mean()`, `AD.dot()`. For example, we can define Frobinius norm of `w1` by one line, and the gradients are tracked
###Code
AD.sum(w1**2)
###Output
_____no_output_____
###Markdown
---We support matrix operations between a `numpy` array and an AD instances array:
###Code
A = np.random.randint(0,5,size=[2,1])
A
AD.dot(w1,A)
w1@A
B = np.random.randint(0,5,size=[2,2])
B
w1+B
###Output
_____no_output_____
###Markdown
---We support matrix operations between two AD instances arrays:
###Code
w2 = AD.from_array(np.random.randint(0,5,size=[2,1]), prefix="w2")
w2
w1@w2
w3 = AD.from_array(np.random.randint(0,5,size=[2,2]), prefix="w3")
w3
w1-w3
###Output
_____no_output_____
###Markdown
---We support operations between an AD instance and an AD instances arrays, in a broadcast manner:
###Code
a
w1+a
###Output
_____no_output_____ |
old/districts-cities.ipynb | ###Markdown
Get congressional district shapefiles FTP down from ftp2.census.gov
###Code
# s = time.time()
# print('getting congressional district shapefiles from Census FTP...')
# os.chdir(shapefiledir+'CD/')
# ftp = FTP('ftp2.census.gov')
# ftp.login()
# #print(ftp.getwelcome())
# ftp.cwd('geo/tiger/TIGER{0:.0f}/CD/'.format(thisyear))
# #print(ftp.nlst())
# thefilename = 'tl_{0:.0f}_us_cd116.zip'.format(thisyear)
# #print(thefilename)
# with open(thefilename, 'wb') as f:
# ftp.retrbinary('RETR {0:}'.format(thefilename), f.write)
# ftp.quit()
# #print('ok')
# print('unzipping...')
# thezipfile = zipfile.ZipFile(shapefiledir+'CD/tl_{0:.0f}_us_cd116.zip'.format(thisyear))
# thezipfile.extractall()
# thezipfile.close()
# os.remove(shapefiledir+'CD/tl_2018_us_cd116.zip')
# #os.listdir()
# e = time.time()
# g = g + (e-s)
# print('Got 1 file in {0:,.0f} seconds!'.format(e-s))
# #os.listdir()
###Output
_____no_output_____
###Markdown
Load congressional district shapefiles into a GeoDataFrame
###Code
s = time.time()
print('reading congressional districts...')
cd_gdf = geopandas.read_file(shapefiledir+'CD/tl_2018_us_cd116.shp')
cd_gdf.loc[:, 'GEOID'] = cd_gdf['GEOID'].apply(lambda x: '50000US'+str(x))
#cd_gdf = cd_gdf.set_index('GEOID')
print('reading helpfel files...')
geo_summary_levels_df = pandas.read_csv(extras_dir+'geo_summary_levels.csv', index_col='SUMLEVEL')
statecodes_df = pandas.read_csv(extras_dir+'statecodes.csv', index_col='STATE')
print('converting to numeric and adding state names and setting index...')
for x in ['STATEFP', 'CD116FP']:
cd_gdf.loc[:, x] = pandas.to_numeric(cd_gdf[x], errors='coerce', downcast='integer')
for x in ['CDSESSN', 'ALAND', 'AWATER', 'INTPTLAT', 'INTPTLON']:
cd_gdf.loc[:, x] = pandas.to_numeric(cd_gdf[x], errors='coerce')
cd_gdf = cd_gdf.merge(statecodes_df.reset_index(), how='left', left_on='STATEFP', right_on='STATE').set_index('GEOID')
#cd_gdf = cd_gdf.set_index('GEOID')
e = time.time()
g = g + (e-s)
print('Read {0:,.0f} districts in {1:,.1f} seconds.'.format(len(cd_gdf), e-s))
#statecodes_df
#cd_gdf.apply(lambda row: '50000US{0:02d}{1:02d}'.format(int(row['STATEFP']), int(row['CD116FP'])), axis=1)
#cd_gdf[['CD116FP','NAMELSAD','LSAD','CDSESSN','STATE','STUSAB','STATE_NAME','STATENS']]
###Output
reading congressional districts...
reading helpfel files...
converting to numeric and adding state names and setting index...
Read 444 districts in 2.0 seconds.
###Markdown
Get community-based statistical areas
###Code
s = time.time()
print('getting community-based statistical areas...')
cbsa_gdf = geopandas.read_file(shapefiledir+'CBSA/tl_2018_us_cbsa.shp')
print('removing CBSAs in Puerto Rico...')
cbsa_gdf = cbsa_gdf[cbsa_gdf['NAME'].apply(lambda x: ', PR' in x) == False]
cbsa_gdf = cbsa_gdf.set_index('GEOID')
cbsa_gdf = cbsa_gdf.sort_index()
print('reading OMB data for community-based statistical areas...')
cbsa_data_df = pandas.read_excel(extras_dir+'cbsa_list1_2020.xls', header=2)
cbsa_principal_cities_df = pandas.read_excel(extras_dir+'cbsa_list2_2020.xls', header=2)
print('chopping off non-data from bottom of dataframes...')
cbsa_data_df = cbsa_data_df.head(-4)
cbsa_principal_cities_df = cbsa_principal_cities_df.head(-4)
#cbsa_principal_cities_df = cbsa_principal_cities_df.rename(columns={'CBSA Code': 'GEOID'})
#cbsa_principal_cities_df = cbsa_principal_cities_df.set_index('GEOID')
e = time.time()
g = g + (e-s)
print('\nRead {0:,.0f} community-based statistical areas in {1:,.0f} seconds!'.format(len(cbsa_gdf), e-s))
#metro_areas_df.sample(1)
###Output
getting community-based statistical areas...
removing CBSAs in Puerto Rico...
reading OMB data for community-based statistical areas...
chopping off non-data from bottom of dataframes...
Read 933 community-based statistical areas in 2 seconds!
###Markdown
Identify metropolitan statistical areas (MSAs)
###Code
metro_areas_gdf = cbsa_gdf.join(
cbsa_principal_cities_df[
(cbsa_principal_cities_df['Metropolitan/Micropolitan Statistical Area'] == 'Metropolitan Statistical Area')
& (cbsa_principal_cities_df['CBSA Title'].apply(lambda x: x[-4:] != ', PR'))
][['CBSA Code', 'CBSA Title', 'Metropolitan/Micropolitan Statistical Area']].drop_duplicates().set_index('CBSA Code')
)
metro_areas_gdf = metro_areas_gdf[metro_areas_gdf['Metropolitan/Micropolitan Statistical Area'].notnull()]
metro_areas_gdf = metro_areas_gdf[[x for x in metro_areas_gdf.columns.tolist() if x != 'geometry'] + ['geometry']]
print('\nFound {0:,.0f} MSAs in {1:,.0f} seconds!'.format(len(metro_areas_gdf), e-s))
###Output
Found 381 MSAs in 2 seconds!
###Markdown
Load places shapefiles
###Code
s = time.time()
place_gdf = geopandas.GeoDataFrame()
place_file_list = [shapefiledir+'PLACE/'+x for x in os.listdir(shapefiledir+'PLACE/') if x[-4:] == '.shp']
for i in range(0, len(place_file_list)):
if (debug >= 2):
if ((np.mod(i,10) == 0) | (i == len(place_file_list)-1)):
print('\tReading file {0:,.0f} of {1:,.0f}...'.format(i+1, len(place_file_list)))
place_gdf_i = geopandas.read_file(place_file_list[i])
place_gdf = pandas.concat((place_gdf, place_gdf_i), axis=0, sort=False)
print('converting to numeric...')
place_gdf.loc[:, 'STATEFP'] = pandas.to_numeric(place_gdf['STATEFP'], errors='coerce')
place_gdf.loc[:, 'PLACEFP'] = pandas.to_numeric(place_gdf['PLACEFP'], errors='coerce')
print('setting GEOID as index...')
place_gdf = place_gdf.set_index("GEOID")
place_gdf = place_gdf.sort_index()
e = time.time()
g = g + (e-s)
print('Read {0:,.0f} places in {1:,.0f} seconds!'.format(len(place_gdf), e-s))
###Output
Reading file 1 of 51...
Reading file 11 of 51...
Reading file 21 of 51...
Reading file 31 of 51...
Reading file 41 of 51...
Reading file 51 of 51...
converting to numeric...
setting GEOID as index...
Read 29,321 places in 20 seconds!
###Markdown
Identify principal cities for each metro area Metro areas with one principal city
###Code
s = time.time()
print('finding MSAs with only one principal city...')
value_counts_s = cbsa_principal_cities_df[
cbsa_principal_cities_df['CBSA Code'].isin(metro_areas_gdf.index)
]['CBSA Code'].value_counts()
single_principal_city_list = value_counts_s[value_counts_s == 1].sort_index().index.tolist()
metro_areas_gdf.loc[single_principal_city_list]
metro_areas_gdf = metro_areas_gdf.assign(principal_city_placeid = np.nan)
metro_areas_gdf = metro_areas_gdf.assign(principal_cities_geometry = np.nan)
#metro_areas_gdf
metro_areas_gdf.loc[
cbsa_principal_cities_df[
cbsa_principal_cities_df['CBSA Code'].isin(single_principal_city_list)
]['CBSA Code'].tolist(), 'principal_city_placeid'] = cbsa_principal_cities_df[
cbsa_principal_cities_df['CBSA Code'].isin(single_principal_city_list)
][
['FIPS State Code', 'FIPS Place Code']
].apply(lambda row: '{0:02d}{1:05d}'.format(int(row['FIPS State Code']), int(row['FIPS Place Code'])), axis=1).values
metro_areas_gdf.loc[
metro_areas_gdf[metro_areas_gdf['principal_city_placeid'].notnull()].index
, 'principal_cities_geometry'] = place_gdf.loc[
metro_areas_gdf[metro_areas_gdf['principal_city_placeid'].notnull()]['principal_city_placeid']
].geometry.values
e = time.time()
g = g + (e-s)
print('added principal city geometries for {0:,.0f} MSAs with a single principal city in {1:,.1f} seconds!'.format(len(metro_areas_gdf[metro_areas_gdf['principal_city_placeid'].notnull()]), e-s))
###Output
finding MSAs with only one principal city...
added principal city geometries for 244 MSAs with a single principal city in 0.1 seconds!
###Markdown
Metro areas with multiple principal cities
###Code
s = time.time()
print('finding MSAs with multiple principal cities...')
need_these_metro_areas_list = metro_areas_gdf[metro_areas_gdf['principal_city_placeid'].isnull()].index.tolist()
newgdf = geopandas.GeoDataFrame(data=None, columns=['geometry'], crs=metro_areas_gdf.crs, geometry='geometry')
cnt = 0
for this_metro_area in need_these_metro_areas_list:
cnt = cnt + 1
if ((np.mod(cnt,20) == 0) | (cnt == len(need_these_metro_areas_list))):
print('Checking metro area {0:,.0f} of {1:,.0f}...'.format(cnt, len(need_these_metro_areas_list)))
the_principal_city_place_id_list = cbsa_principal_cities_df[
cbsa_principal_cities_df['CBSA Code'] == this_metro_area].apply(lambda row:
'{0:02d}{1:05d}'.format(int(row['FIPS State Code']), int(row['FIPS Place Code'])), axis=1).tolist()
this_metro_area_principal_city_geolist = []
for this_principal_city_id in the_principal_city_place_id_list:
if (place_gdf.loc[this_principal_city_id].geometry.type == 'Polygon'):
this_metro_area_principal_city_geolist.append(place_gdf.loc[this_principal_city_id].geometry)
else:
for x in place_gdf.loc[this_principal_city_id].geometry:
if (x.type == 'Polygon'):
this_metro_area_principal_city_geolist.append(x)
else:
print(x.type)
combined_geo = unary_union(this_metro_area_principal_city_geolist)
newgdf.loc[this_metro_area, 'geometry'] = combined_geo
#newgdf.index.name = 'GEOID'
metro_areas_gdf.loc[newgdf.index, 'principal_cities_geometry'] = newgdf.geometry
metro_areas_gdf = metro_areas_gdf.drop('principal_city_placeid', axis=1)
# print('backing up...')
# cd_gdf_bk = cd_gdf
# metro_areas_gdf_bk = metro_areas_gdf
e = time.time()
g = g + (e-s)
print('Done in {0:,.0f} seconds!'.format(e-s))
#print('ok')
s = time.time()
print('keeping only the 435 voting members...')
cd_gdf = cd_gdf[
(cd_gdf['NAMELSAD'].apply(lambda x: ('Congressional District' in x) == True))
& (cd_gdf['CD116FP'].notnull())
]
cd_gdf = cd_gdf.assign(pct_metro_area_overlap = np.nan)
cd_gdf = cd_gdf.assign(pct_city_area_overlap = np.nan)
cd_gdf = cd_gdf.assign(type = np.nan)
print('\n')
print('calculating overlap between congressional districts and metro areas...')
print('Considering {0:,.0f} congressional districts...'.format(len(cd_gdf)))
i = 0
for ix, thisrow in cd_gdf.sort_values(by=['STATE_NAME', 'CD116FP']).iterrows():
i = i + 1
if ((np.mod(i,50) == 0) | (i == 435)):
print('Reading district {0:,.0f} of 435...'.format(i))
# print('{0:}-{1:.0f}...'.format(
# thisrow['STATE_NAME'],
# thisrow['CD116FP']
# ))
total_metro_area_overlap = 0
for jx, thatrow in metro_areas_gdf[metro_areas_gdf.geometry.apply(lambda x: x.intersects(thisrow.geometry))].iterrows():
intersector = thisrow.geometry.intersection(thatrow.geometry)
intersector_area_sq_m = geopandas.GeoSeries(intersector, crs=cd_gdf.crs).to_crs(equal_area_crs).geometry.values[0].area
if (intersector_area_sq_m >= overlap_area_metro_tol):
total_metro_area_overlap = total_metro_area_overlap + intersector_area_sq_m
cd_gdf.loc[ix, 'pct_metro_area_overlap'] = total_metro_area_overlap / (thisrow['ALAND'] + thisrow['AWATER'])
# metro_areas_gdf.loc[ix, 'pct_metro_area'] = intersector_area_sq_m / (thisrow['ALAND'] + thisrow['AWATER'])
#metro_areas_gdf
print('\n')
print('calculating overlap between congressional districts and principal cities...')
i = 0
metro_areas_gdf = metro_areas_gdf.set_geometry('principal_cities_geometry')
for ix, thisrow in cd_gdf.iterrows():
i = i + 1
if ((np.mod(i,50) == 0) | (i == 435)):
print('Reading district {0:,.0f} of 435...'.format(i))
# print('{0:}-{1:.0f}...'.format(
# thisrow['STATE_NAME'],
# thisrow['CD116FP']
# ))
total_city_overlap = 0
for jx, thatrow in metro_areas_gdf[metro_areas_gdf.geometry.apply(lambda x: x.intersects(thisrow.geometry))].iterrows():
city_intersector = thisrow.geometry.intersection(thatrow.geometry)
city_intersector_area_sq_m = geopandas.GeoSeries(city_intersector, crs=cd_gdf.crs).to_crs(equal_area_crs).geometry.values[0].area
if (city_intersector_area_sq_m >= overlap_area_city_tol):
total_city_overlap = total_city_overlap + city_intersector_area_sq_m
cd_gdf.loc[ix, 'pct_city_area_overlap'] = total_city_overlap / (thisrow['ALAND'] + thisrow['AWATER'])
#cd_gdf = cd_gdf['pct_metro_area_overlap'].fillna(0)
#cd_gdf = cd_gdf['pct_city_overlap'].fillna(0)
metro_areas_gdf = metro_areas_gdf.set_geometry('geometry')
print('\n')
print('identifying urban/suburban/rural based on pct_metro_area_overlap, pct_city_area_overlap...')
cd_gdf = cd_gdf.assign(type = np.nan)
cd_gdf.loc[cd_gdf['pct_city_area_overlap'] > .95, 'type'] = 'Urban'
cd_gdf.loc[cd_gdf['pct_metro_area_overlap'] <= .5, 'type'] = 'Rural'
cd_gdf.loc[
(cd_gdf['pct_city_area_overlap'] <= .95)
& (cd_gdf['pct_metro_area_overlap'] > .5)
, 'type'] = 'Suburban'
e = time.time()
g = g + (e-s)
print('Got district urban/suburban/rural for {0:,.0f} districts in {1:,.0f} minutes {2:,.0f} seconds!'.format(len(cd_gdf), np.floor((e-s)/60), np.floor((e-s)%60)))
print(cd_gdf.groupby('type').size())
cd_gdf[cd_gdf['type'] == 'Rural'][['STATE_NAME', 'CD116FP']
].sort_values(['STATE_NAME', 'CD116FP'])[125:]
print('Saving districts...')
cd_gdf.reset_index().to_file(outdir+'cd116_with_areas_and_types_435.shp')
print('DONE! Total time: {0:,.0f} minutes {1:,.0f} seconds!'.format(np.floor(g/60), np.floor(g%60)))
z = geopandas.read_file(outdir+'cd116_with_areas_and_types_435.shp')
z
###Output
_____no_output_____ |
ai-platform-unified/notebooks/unofficial/migration/UJ13 unified Data Labeling task.ipynb | ###Markdown
AI Platform (Unified) SDK: Data Labeling InstallationInstall the latest (preview) version of AI Platform (Unified) SDK.
###Code
! pip3 install -U google-cloud-aiplatform --user
###Output
_____no_output_____
###Markdown
Install the Google *cloud-storage* library as well.
###Code
! pip3 install google-cloud-storage
###Output
_____no_output_____
###Markdown
Restart the KernelOnce you've installed the AI Platform (Unified) SDK and Google *cloud-storage*, you need to restart the notebook kernel so it can find the packages.
###Code
import os
if not os.getenv("AUTORUN"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
###Output
_____no_output_____
###Markdown
Before you begin GPU run-time*Make sure you're running this notebook in a GPU runtime if you have that option. In Colab, select* **Runtime > Change Runtime Type > GPU** Set up your GCP project**The following steps are required, regardless of your notebook environment.**1. [Select or create a GCP project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.2. [Make sure that billing is enabled for your project.](https://cloud.google.com/billing/docs/how-to/modify-project)3. [Enable the AI Platform APIs and Compute Engine APIs.](https://console.cloud.google.com/flows/enableapi?apiid=ml.googleapis.com,compute_component)4. [Google Cloud SDK](https://cloud.google.com/sdk) is already installed in AI Platform Notebooks.5. Enter your project ID in the cell below. Then run the cell to make sure theCloud SDK uses the right project for all the commands in this notebook.**Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands.
###Code
PROJECT_ID = "[your-project-id]" #@param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
###Output
_____no_output_____
###Markdown
RegionYou can also change the `REGION` variable, which is used for operationsthroughout the rest of this notebook. Below are regions supported for AI Platform (Unified). We recommend when possible, to choose the region closest to you.- Americas: `us-central1`- Europe: `europe-west4`- Asia Pacific: `asia-east1`You cannot use a Multi-Regional Storage bucket for training with AI Platform. Not all regions provide support for all AI Platform services. For the latest support per region, see [Region support for AI Platform (Unified) services](https://cloud.google.com/ai-platform-unified/docs/general/locations)
###Code
REGION = 'us-central1' #@param {type: "string"}
###Output
_____no_output_____
###Markdown
TimestampIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append onto the name of resources which will be created in this tutorial.
###Code
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
###Output
_____no_output_____
###Markdown
Authenticate your GCP account**If you are using AI Platform Notebooks**, your environment is alreadyauthenticated. Skip this step.*Note: If you are on an AI Platform notebook and run the cell, the cell knows to skip executing the authentication steps.*
###Code
import os
import sys
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your Google Cloud account. This provides access
# to your Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
# If on AI Platform, then don't execute this code
if not os.path.exists('/opt/deeplearning/metadata/env_version'):
if 'google.colab' in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this tutorial in a notebook locally, replace the string
# below with the path to your service account key and run this cell to
# authenticate your Google Cloud account.
else:
%env GOOGLE_APPLICATION_CREDENTIALS your_path_to_credentials.json
# Log in to your account on Google Cloud
! gcloud auth login
###Output
_____no_output_____
###Markdown
Create a Cloud Storage bucket**The following steps are required, regardless of your notebook environment.**This tutorial is designed to use training data that is in a public Cloud Storage bucket and a local Cloud Storage bucket for your batch predictions. You may alternatively use your own training data that you have stored in a local Cloud Storage bucket.Set the name of your Cloud Storage bucket below. It must be unique across all Cloud Storage buckets.
###Code
BUCKET_NAME = "[your-bucket-name]" #@param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "[your-bucket-name]":
BUCKET_NAME = PROJECT_ID + "aip-" + TIMESTAMP
###Output
_____no_output_____
###Markdown
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
###Code
! gsutil mb -l $REGION gs://$BUCKET_NAME
###Output
_____no_output_____
###Markdown
Finally, validate access to your Cloud Storage bucket by examining its contents:
###Code
! gsutil ls -al gs://$BUCKET_NAME
###Output
_____no_output_____
###Markdown
Set up variablesNext, set up some variables used throughout the tutorial. Import libraries and define constants Import AI Platform (Unified) SDKImport the AI Platform (Unified) SDK into our Python environment.
###Code
import os
import sys
import time
from google.cloud.aiplatform import gapic as aip
from google.protobuf import json_format
from google.protobuf.struct_pb2 import Value
from google.protobuf.struct_pb2 import Struct
from google.protobuf.json_format import MessageToJson
from google.protobuf.json_format import ParseDict
###Output
_____no_output_____
###Markdown
AI Platform (Unified) constantsSetup up the following constants for AI Platform (Unified):- `API_ENDPOINT`: The AI Platform (Unified) API service endpoint for dataset, model, job, pipeline and endpoint services.- `API_PREDICT_ENDPOINT`: The AI Platform (Unified) API service endpoint for prediction.- `PARENT`: The AI Platform (Unified) location root path for dataset, model and endpoint resources.
###Code
# API Endpoint
API_ENDPOINT = "{0}-aiplatform.googleapis.com".format(REGION)
# AI Platform (Unified) location root path for your dataset, model and endpoint resources
PARENT = "projects/" + PROJECT_ID + "/locations/" + REGION
###Output
_____no_output_____
###Markdown
AutoML constantsNext, setup constants unique to AutoML image classification datasets and training:- Dataset Schemas: Tells the managed dataset service which type of dataset it is.- Data Labeling (Annotations) Schemas: Tells the managed dataset service how the data is labeled (annotated).- Dataset Training Schemas: Tells the managed pipelines service the task (e.g., classification) to train the model for.
###Code
# Image Dataset type
IMAGE_SCHEMA = "google-cloud-aiplatform/schema/dataset/metadata/image_1.0.0.yaml"
# Image Labeling type
IMPORT_SCHEMA_IMAGE_CLASSIFICATION = "gs://google-cloud-aiplatform/schema/dataset/ioformat/image_classification_single_label_io_format_1.0.0.yaml"
# Image labeling task
LABELING_SCHEMA_IMAGE = "gs://google-cloud-aiplatform/schema/datalabelingjob/inputs/image_classification_1.0.0.yaml"
###Output
_____no_output_____
###Markdown
ClientsThe AI Platform (Unified) SDK works as a client/server model. On your side (the Python script) you will create a client that sends requests and receives responses from the server (AI Platform).You will use several clients in this tutorial, so set them all up upfront.- Dataset Service for managed datasets.- Job Service for batch jobs and custom training.
###Code
# client options same for all services
client_options = {"api_endpoint": API_ENDPOINT}
def create_dataset_client():
client = aip.DatasetServiceClient(
client_options=client_options
)
return client
def create_job_client():
client = aip.JobServiceClient(
client_options=client_options
)
return client
clients = {}
clients["dataset"] = create_dataset_client()
clients["job"] = create_job_client()
for client in clients.items():
print(client)
import tensorflow as tf
LABELING_FILES = [
"https://raw.githubusercontent.com/googleapis/python-aiplatform/master/samples/snippets/resources/daisy.jpg"
]
IMPORT_FILE = "gs://" + BUCKET_NAME + '/labeling.csv'
with tf.io.gfile.GFile(IMPORT_FILE, 'w') as f:
for lf in LABELING_FILES:
! wget {lf} | gsutil cp {lf.split("/")[-1]} gs://{BUCKET_NAME}
f.write("gs://" + BUCKET_NAME + "/" + lf.split("/")[-1] + "\n")
! gsutil cat $IMPORT_FILE
###Output
_____no_output_____
###Markdown
*Example output*:```gs://migration-ucaip-trainingaip-20210303215432/daisy.jpg``` Create a dataset [projects.locations.datasets.create](https://cloud.google.com/ai-platform-unified/docs/reference/rest/v1beta1/projects.locations.datasets/create) Request
###Code
DATA_SCHEMA = IMAGE_SCHEMA
dataset = {
"display_name": "labeling_" + TIMESTAMP,
"metadata_schema_uri": "gs://" + DATA_SCHEMA
}
print(MessageToJson(
aip.CreateDatasetRequest(
parent=PARENT,
dataset=dataset
).__dict__["_pb"])
)
###Output
_____no_output_____
###Markdown
*Example output*:```{ "parent": "projects/migration-ucaip-training/locations/us-central1", "dataset": { "displayName": "labeling_20210303215432", "metadataSchemaUri": "gs://google-cloud-aiplatform/schema/dataset/metadata/image_1.0.0.yaml" }}``` Call
###Code
request = clients["dataset"].create_dataset(
parent=PARENT,
dataset=dataset
)
###Output
_____no_output_____
###Markdown
Response
###Code
result = request.result()
print(MessageToJson(result.__dict__["_pb"]))
###Output
_____no_output_____
###Markdown
*Example output*:```{ "name": "projects/116273516712/locations/us-central1/datasets/1165112889535627264", "displayName": "labeling_20210303215432", "metadataSchemaUri": "gs://google-cloud-aiplatform/schema/dataset/metadata/image_1.0.0.yaml", "labels": { "aiplatform.googleapis.com/dataset_metadata_schema": "IMAGE" }, "metadata": { "dataItemSchemaUri": "gs://google-cloud-aiplatform/schema/dataset/dataitem/image_1.0.0.yaml" }}```
###Code
# The full unique ID for the dataset
dataset_id = result.name
# The short numeric ID for the dataset
dataset_short_id = dataset_id.split('/')[-1]
print(dataset_id)
###Output
_____no_output_____
###Markdown
[projects.locations.datasets.import](https://cloud.google.com/ai-platform-unified/docs/reference/rest/v1beta1/projects.locations.datasets/import) Request
###Code
LABEL_SCHEMA = IMPORT_SCHEMA_IMAGE_CLASSIFICATION
import_config = {
"gcs_source": {
"uris": [IMPORT_FILE]
},
"import_schema_uri": LABEL_SCHEMA
}
print(MessageToJson(
aip.ImportDataRequest(
name=dataset_short_id,
import_configs=[import_config]
).__dict__["_pb"])
)
###Output
_____no_output_____
###Markdown
*Example output*:```{ "name": "1165112889535627264", "importConfigs": [ { "gcsSource": { "uris": [ "gs://migration-ucaip-trainingaip-20210303215432/labeling.csv" ] }, "importSchemaUri": "gs://google-cloud-aiplatform/schema/dataset/ioformat/image_classification_single_label_io_format_1.0.0.yaml" } ]}``` Call
###Code
request = clients["dataset"].import_data(
name=dataset_id,
import_configs=[import_config]
)
###Output
_____no_output_____
###Markdown
Response
###Code
result = request.result()
print(MessageToJson(result.__dict__["_pb"]))
###Output
_____no_output_____
###Markdown
*Example output*:```{}``` Create data labeling specialist pool In case you do not have access to labeling services execute this section.
###Code
# add client for specialist pool
clients["specialist_pool"] = aip.SpecialistPoolServiceClient(
client_options=client_options
)
###Output
_____no_output_____
###Markdown
[projects.locations.specialistPools.create](https://cloud.google.com/ai-platform-unified/docs/reference/rest/v1beta1/projects.locations.specialistPools/createe) RequestIn this part, you will replace [your-email-address] with your email address. This makes you the specialist and recipient of the labeling request.
###Code
EMAIL = "[your-email-address]"
specialist_pool = {
"name": "labeling_" + TIMESTAMP, #he resource name of the SpecialistPool.
"display_name": "labeling_" + TIMESTAMP, # user-defined name of the SpecialistPool
"specialist_manager_emails": [EMAIL]
}
print(MessageToJson(
aip.CreateSpecialistPoolRequest(
parent=PARENT,
specialist_pool=specialist_pool
).__dict__["_pb"])
)
###Output
_____no_output_____
###Markdown
*Example output*:```{ "parent": "projects/migration-ucaip-training/locations/us-central1", "specialistPool": { "name": "labeling_20210303215432", "displayName": "labeling_20210303215432", "specialistManagerEmails": [ "[email protected]" ] }}``` Call
###Code
request = clients["specialist_pool"].create_specialist_pool(
parent=PARENT,
specialist_pool=specialist_pool
)
###Output
_____no_output_____
###Markdown
Response
###Code
result = request.result()
print(MessageToJson(result.__dict__["_pb"]))
###Output
_____no_output_____
###Markdown
*Example output*:```{ "name": "projects/116273516712/locations/us-central1/specialistPools/1167839678372511744"}```
###Code
specialist_name = result.name
specialist_id = specialist_name.split("/")[-1]
print(specialist_name)
###Output
_____no_output_____
###Markdown
Create data labeling job [projects.locations.dataLabelingJobs.create](https://cloud.google.com/ai-platform-unified/docs/reference/rest/v1beta1/projects.locations.dataLabelingJobs/create)
###Code
# create placeholder file for valid PDF file with instruction for data labeling
! echo "this is instruction" >> instruction.txt | gsutil cp instruction.txt gs://$BUCKET_NAME
###Output
_____no_output_____
###Markdown
Request
###Code
LABLEING_SCHEMA = LABELING_SCHEMA_IMAGE
INSTRUCTION_FILE = "gs://" + BUCKET_NAME + "/instruction.txt"
inputs = json_format.ParseDict({"annotation_specs": ["rose"]}, Value())
data_labeling_job = {
"display_name": "labeling_" + TIMESTAMP,
"datasets": [dataset_id],
"labeler_count": 1,
"instruction_uri": INSTRUCTION_FILE,
"inputs_schema_uri": LABLEING_SCHEMA,
"inputs": inputs,
"annotation_labels": {
"aiplatform.googleapis.com/annotation_set_name": "data_labeling_job_specialist_pool"
},
"specialist_pools": [specialist_name]
}
print(MessageToJson(
aip.CreateDataLabelingJobRequest(
parent=PARENT,
data_labeling_job=data_labeling_job
).__dict__["_pb"])
)
###Output
_____no_output_____
###Markdown
*Example output*:```{ "parent": "projects/migration-ucaip-training/locations/us-central1", "dataLabelingJob": { "displayName": "labeling_20210303215432", "datasets": [ "projects/116273516712/locations/us-central1/datasets/1165112889535627264" ], "labelerCount": 1, "instructionUri": "gs://migration-ucaip-trainingaip-20210303215432/instruction.txt", "inputsSchemaUri": "gs://google-cloud-aiplatform/schema/datalabelingjob/inputs/image_classification_1.0.0.yaml", "inputs": { "annotation_specs": [ "rose" ] }, "annotationLabels": { "aiplatform.googleapis.com/annotation_set_name": "data_labeling_job_specialist_pool" }, "specialistPools": [ "projects/116273516712/locations/us-central1/specialistPools/1167839678372511744" ] }``` Call
###Code
request = clients["job"].create_data_labeling_job(
parent=PARENT,
data_labeling_job=data_labeling_job
)
###Output
_____no_output_____
###Markdown
Response
###Code
print(MessageToJson(request.__dict__["_pb"]))
###Output
_____no_output_____
###Markdown
*Example output*:```{ "name": "projects/116273516712/locations/us-central1/dataLabelingJobs/3830883229125050368", "displayName": "labeling_20210303215432", "datasets": [ "projects/116273516712/locations/us-central1/datasets/1165112889535627264" ], "labelerCount": 1, "instructionUri": "gs://migration-ucaip-trainingaip-20210303215432/instruction.txt", "inputsSchemaUri": "gs://google-cloud-aiplatform/schema/datalabelingjob/inputs/image_classification_1.0.0.yaml", "inputs": { "annotationSpecs": [ "rose" ] }, "state": "JOB_STATE_PENDING", "createTime": "2021-03-03T21:55:31.239049Z", "updateTime": "2021-03-03T21:55:31.239049Z"}```
###Code
labeling_task_name = request.name
print(labeling_task_name)
###Output
_____no_output_____
###Markdown
[projects.locations.dataLabelingJobs.get](https://cloud.google.com/ai-platform-unified/docs/reference/rest/v1beta1/projects.locations.dataLabelingJobs/get) Call
###Code
request = clients["job"].get_data_labeling_job(
name=labeling_task_name
)
###Output
_____no_output_____
###Markdown
Response
###Code
print(MessageToJson(request.__dict__["_pb"]))
###Output
_____no_output_____
###Markdown
*Example output*:```{ "name": "projects/116273516712/locations/us-central1/dataLabelingJobs/3830883229125050368", "displayName": "labeling_20210303215432", "datasets": [ "projects/116273516712/locations/us-central1/datasets/1165112889535627264" ], "labelerCount": 1, "instructionUri": "gs://migration-ucaip-trainingaip-20210303215432/instruction.txt", "inputsSchemaUri": "gs://google-cloud-aiplatform/schema/datalabelingjob/inputs/image_classification_1.0.0.yaml", "inputs": { "annotationSpecs": [ "rose" ] }, "state": "JOB_STATE_PENDING", "createTime": "2021-03-03T21:55:31.239049Z", "updateTime": "2021-03-03T21:55:31.239049Z", "specialistPools": [ "projects/116273516712/locations/us-central1/specialistPools/1167839678372511744" ]}``` [projects.locations.dataLabelingJobs.cancel](https://cloud.google.com/ai-platform-unified/docs/reference/rest/v1beta1/projects.locations.dataLabelingJobs/cancel) Call
###Code
request = clients["job"].cancel_data_labeling_job(
name=labeling_task_name
)
###Output
_____no_output_____
###Markdown
Response
###Code
print(request)
###Output
_____no_output_____
###Markdown
*Example output*:```None```
###Code
while True:
response = clients["job"].get_data_labeling_job(name=labeling_task_name)
if response.state == aip.JobState.JOB_STATE_CANCELLED:
print("Labeling job CANCELED")
break
else:
print("Canceling labeling job:", response.state)
time.sleep(60)
###Output
_____no_output_____
###Markdown
Cleaning upTo clean up all GCP resources used in this project, you can [delete the GCPproject](https://cloud.google.com/resource-manager/docs/creating-managing-projectsshutting_down_projects) you used for the tutorial.Otherwise, you can delete the individual resources you created in this tutorial.
###Code
delete_dataset = True
delete_job = True
delete_specialist_pool = True
delete_bucket = True
# Delete the dataset using the AI Platform (Unified) fully qualified identifier for the dataset
try:
if delete_dataset:
clients['dataset'].delete_dataset(name=dataset_id)
except Exception as e:
print(e)
# Delete the labeling job using the AI Platform (Unified) fully qualified identifier for the dataset
try:
if delete_job:
request = clients["job"].delete_data_labeling_job(name=labeling_task_name)
except Exception as e:
print(e)
# Delete the specialist pool using the AI Platform (Unified) fully qualified identifier for the dataset
try:
if delete_specialist_pool:
clients["specialist_pool"].delete_specialist_pool(name=specialist_name)
except Exception as e:
print(e)
if delete_bucket and 'BUCKET_NAME' in globals():
! gsutil rm -r gs://$BUCKET_NAME
###Output
_____no_output_____ |
quora/notebooks/EDA.ipynb | ###Markdown
Exploration of Quora dataset
###Code
import sys
sys.path.append("..")
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use("dark_background") # comment out if using light Jupyter theme
dtypes = {"qid": str, "question_text": str, "target": int}
train = pd.read_csv("../data/train.csv", dtype=dtypes)
test = pd.read_csv("../data/test.csv", dtype=dtypes)
###Output
_____no_output_____
###Markdown
1. A first glance
###Code
train.head()
print("There are {} questions in train and {} in test".format(train.shape[0], test.shape[0]))
print("Target value is binary (values: {})".format(set(train["target"].unique())))
print("Number of toxic questions in training data is {} (proportion: {}).".format(train["target"].sum(), train["target"].mean()))
###Output
_____no_output_____
###Markdown
2. A closer look at the questions 2.1 Question length (characters)
###Code
train["text_length"] = train["question_text"].str.len()
train["text_length"].describe()
###Output
_____no_output_____
###Markdown
Most questions are relatively short, i.e., less than 100 characters. There are some exceptions, however, with a maximum of more than a thousand. Let's see how many characters we should consider.
###Code
for length in [100, 150, 200, 250, 300, 350, 500]:
num = np.sum(train["text_length"] > length)
print("There are {} questions ({}%) with more than {} characters."
.format(num, np.round(num / len(train) * 100, 2), length))
###Output
_____no_output_____
###Markdown
The number of questions with more than 250 characters is already small and with more than 300 negligible. We can cut the questions at 300 or even just remove them. Would there be a difference between the length of toxic and sincere questions?
###Code
def split_on_target(data):
toxic = data[data["target"] == 1]
sincere = data[data["target"] == 0]
return sincere, toxic
sincere, toxic = split_on_target(train)
def plot_density_plots(sincere_data, toxic_data, column, xlim=(0, 300), bin_size=5):
fig, axes = plt.subplots(1, 2, figsize=(12, 5))
axes[0] = sns.distplot(sincere_data[column], ax=axes[0], bins=np.arange(xlim[0], xlim[1], bin_size))
axes[0].set_title("Sincere questions")
axes[1] = sns.distplot(toxic_data[column], ax=axes[1], bins=np.arange(xlim[0], xlim[1], bin_size))
axes[1].set_title("Toxic questions")
if xlim is not None:
for ax in axes:
ax.set_xlim(xlim[0], xlim[1])
plt.suptitle("Comparison of {} between sincere and toxic questions".format(column))
plt.show()
plot_density_plots(sincere, toxic, "text_length")
###Output
_____no_output_____
###Markdown
Toxic questions seem to have a higher chance of having somewhat more characters, although the medians seem to be more or less the same. The numbers confirm:
###Code
pd.concat([sincere["text_length"].describe(), toxic["text_length"].describe()], axis=1)
###Output
_____no_output_____
###Markdown
2.2 Question length (words)A similar analysis can be done based on the number of _words_ per question, rather than the number of characters. To do this properly, we should probably first remove symbols and punctuation, but let's take a quick look.
###Code
train["words"] = train["question_text"].apply(lambda x: len(x.split(" ")))
sincere, toxic = split_on_target(train)
plot_density_plots(sincere, toxic, "words", xlim=(0, 60), bin_size=2)
###Output
_____no_output_____
###Markdown
The same conclusion seems to hold for the number of words. It is, thus, useful to include the question size as a feature in our models. Also, it seems that there are not many questions with more than 50 or 60 words:
###Code
for n in [50, 55, 60]:
print("{} questions with more than {} words.".format(np.sum(train["words"] > n), n))
###Output
_____no_output_____ |
Cross Language Information Retrieval.ipynb | ###Markdown
Cross Language Information Retrieval OverviewThe aim of this project is to build a cross language information retrieval system (CLIR) which, given a query in German, will be capable of searching text documents written in English and displaying the results in German.We're going to use machine translation, information retrieval using a vector space model, and then assess the performance of the system using IR evaluation techniques.Parts of the project are explained as we progress. Data Used- bitext.(en,de): A sentence aligned, parallel German-English corpus, sourced from the Europarl corpus (which is a collection of debates held in the EU parliament over a number of years). We'll use this to develop word-alignment tools, and build a translation probability table. - newstest.(en,de): A separate, smaller parallel corpus for evaulation of the translation system.- devel.(docs,queries,qrel): A set of documents in English (sourced from Wikipedia), queries in German, and relevance judgement scores for each query-document pair. The files are available to check out in the data/clir directory of the repo. Housekeeping: File encodings and tokenisationSince the data files we use is utf-8 encoded text, we need to convert the strings into ASCII by escaping the special symbols. We also import some libraries in this step as well.
###Code
from nltk.tokenize import word_tokenize
from __future__ import division #To properly handle floating point divisions.
import math
#Function to tokenise string/sentences.
def tokenize(line, tokenizer=word_tokenize):
utf_line = line.decode('utf-8').lower()
return [token.encode('ascii', 'backslashreplace') for token in tokenizer(utf_line)]
###Output
_____no_output_____
###Markdown
Now we can test out our tokenize function. Notice how it converts the word Über.
###Code
tokenize("Seit damals ist er auf über 10.000 Punkte gestiegen.")
###Output
_____no_output_____
###Markdown
Let's store the path of the data files as easily identifiable variables for future access.
###Code
DEVELOPMENT_DOCS = 'data/clir/devel.docs' #Data file for IR engine development
DEVELOPMENT_QUERIES = 'data/clir/devel.queries' #Data file containing queries in German
DEVELOPMENT_QREL = 'data/clir/devel.qrel' #Data file containing a relevance score or query-doc pairs
BITEXT_ENG = 'data/clir/bitext.en' #Bitext data file in English for translation engine and language model development
BITEXT_DE = 'data/clir/bitext.de' #Bitext data file in German
NEWSTEST_ENG = 'data/clir/newstest.en' #File for testing language model
###Output
_____no_output_____
###Markdown
With that out of the way, lets get to the meat of the project. As mentioned earlier, we're going to build a CLIR engine consisting of information retrieval and translation components, and then evaluate its accuracy.The CLIR system will:- **translate queries** from German into English (because our searcheable corpus is in English), using word-based translation, a rather simplistic approach as opposed to the sophistication you might see in, say, *Google Translate*.- **search over the document corpus** using the Okapi BM25 IR ranking model, a variation of the traditional TF-IDF model.- **evaluate the quality** of ranked retrieval results using the query relevance judgements. Information Retrieval using [Okapi BM25](https://en.wikipedia.org/wiki/Okapi_BM25)We'll start by building an IR system, and give it a test run with some English queries. Here's an overview of the tasks involved:- Loading the data files, and tokenizing the input.- Preprocessing the lexicon by stemming, removing stopwords.- Calculating the TF/IDF representation for all documents in our wikipedia corpus.- Storing an inverted index to efficiently documents, given a query term.- Implementing querying with BM25.- Test runs.So for our first task, we'll load the devel.docs file, extract and tokenize the terms, and store them in a python dictionary with the document ids as keys.
###Code
import nltk
import re
stopwords = set(nltk.corpus.stopwords.words('english')) #converting stopwords to a set for faster processing in the future.
stemmer = nltk.stem.PorterStemmer()
#Function to extract and tokenize terms from a document
def extract_and_tokenize_terms(doc):
terms = []
for token in tokenize(doc):
if token not in stopwords: # 'in' and 'not in' operations are faster over sets than lists
if not re.search(r'\d',token) and not re.search(r'[^A-Za-z-]',token): #Removing numbers and punctuations
#(excluding hyphenated words)
terms.append(stemmer.stem(token.lower()))
return terms
documents = {} #Dictionary to store documents with ids as keys.
#Reading each line in the file and storing it documents dictionary
f = open(DEVELOPMENT_DOCS)
for line in f:
doc = line.split("\t")
terms = extract_and_tokenize_terms(doc[1])
documents[doc[0]] = terms
f.close()
###Output
_____no_output_____
###Markdown
To check if everything is working till now, let's access a document from the dictionary, with the id '290'.
###Code
documents['290'][:20] #To keep things short, we're only going to check out 20 tokens.
###Output
_____no_output_____
###Markdown
Now we'll build an inverted index for the documents, so that we can quickly access documents for the terms we need.
###Code
#Building an inverted index for the documents
from collections import defaultdict
inverted_index = defaultdict(set)
for docid, terms in documents.items():
for term in terms:
inverted_index[term].add(docid)
###Output
_____no_output_____
###Markdown
To test it out, the list of documents containing the word 'pizza':
###Code
inverted_index['pizza']
###Output
_____no_output_____
###Markdown
On to the BM25 TF-IDF representation, we'll create the td-idf matrix for terms-documents, first without the query component. The query component is dependent on the terms in our query. So we'll just calculate that, and multiply it with the overall score when we want to retreive documents for a particular query.
###Code
#Building a TF-IDF representation using BM25
NO_DOCS = len(documents) #Number of documents
AVG_LEN_DOC = sum([len(doc) for doc in documents.values()])/len(documents) #Average length of documents
#The function below takes the documentid, and the term, to calculate scores for the tf and idf
#components, and multiplies them together.
def tf_idf_score(k1,b,term,docid):
ft = len(inverted_index[term])
term = stemmer.stem(term.lower())
fdt = documents[docid].count(term)
idf_comp = math.log((NO_DOCS - ft + 0.5)/(ft+0.5))
tf_comp = ((k1 + 1)*fdt)/(k1*((1-b) + b*(len(documents[docid])/AVG_LEN_DOC))+fdt)
return idf_comp * tf_comp
#Function to create tf_idf matrix without the query component
def create_tf_idf(k1,b):
tf_idf = defaultdict(dict)
for term in set(inverted_index.keys()):
for docid in inverted_index[term]:
tf_idf[term][docid] = tf_idf_score(k1,b,term,docid)
return tf_idf
#Creating tf_idf matrix with said parameter values: k1 and b for all documents.
tf_idf = create_tf_idf(1.5,0.5)
###Output
_____no_output_____
###Markdown
We took the default values for k1 and b (1.5 and 0.5), which seemed to give good results. Although these parameters may be altered depending on the type of data being dealth with. Now we create a method to retrieve the query component, and another method that will use the previous ones and retrieve the relevant documents for a query, sorted on the basis of their ranks.
###Code
#Function to retrieve query component
def get_qtf_comp(k3,term,fqt):
return ((k3+1)*fqt[term])/(k3 + fqt[term])
#Function to retrieve documents || Returns a set of documents and their relevance scores.
def retr_docs(query,result_count):
q_terms = [stemmer.stem(term.lower()) for term in query.split() if term not in stopwords] #Removing stopwords from queries
fqt = {}
for term in q_terms:
fqt[term] = fqt.get(term,0) + 1
scores = {}
for word in fqt.keys():
#print word + ': '+ str(inverted_index[word])
for document in inverted_index[word]:
scores[document] = scores.get(document,0) + (tf_idf[word][document]*get_qtf_comp(0,word,fqt)) #k3 chosen as 0 (default)
return sorted(scores.items(),key = lambda x : x[1] , reverse=True)[:result_count]
###Output
_____no_output_____
###Markdown
Let's try and retrieve a document for a query.
###Code
retr_docs("Manchester United",5)
###Output
_____no_output_____
###Markdown
Checking out the terms in the top ranked document..
###Code
documents['19961'][:30]
###Output
_____no_output_____
###Markdown
The information retrieval engine has worked quite well in this case. The top ranked document for the query is a snippet of the wikipedia article for Manchester United Football Club. On further inspection, we can see that the documents ranked lower are, for example, for The University of Manchester, or even just articles with the words 'Manchester' or 'United' in them.Now we can begin translating the German queries to English. Query Translation: For translation, we'll implement a simple word-based translation model in a noisy channel setting. This means that we'll use both a language model over English, and a translation model.We'll use a unigram language model for decoding/translation, but also create a model with trigram to test the improvement in performace). Our aim is to find the string, $\vec{e}$ which maximises $p(\vec{e}) p(\vec{g} | \vec{e})$, given English output string $\vec{e}$ and German input string $\vec{g}$. Language Model:[From Wikipedia](https://en.wikipedia.org/wiki/Language_model): A statistical language model is a probability distribution over sequences of words. Given such a sequence, say of length m, it assigns a probability P(w1,....,wm) to the whole sequence. The models will be trained on the 'bitext.en' file, and tested on 'newstest.en'.As we'll train the model on different files, it's obvious that we'll run into words (unigrams) and trigrams what we hadn't seen in the file we trained the model on. To account for these unknown information, we'll use add-k or [laplace smoothing](https://en.wikipedia.org/wiki/Additive_smoothing) for the unigram and [Katz-Backoff smoothing](https://en.wikipedia.org/wiki/Katz%27s_back-off_model) for the trigram model.Let's start with calculating the unigram, bigram and trigram counts (we need the bigram counts for trigram smoothing). The sentences are also converted appropriately by adding sentinels at the start and end of sentences.
###Code
#Calculating the unigram, bigram and trigram counts.
f = open(BITEXT_ENG)
train_sentences = []
for line in f:
train_sentences.append(tokenize(line))
f.close()
#Function to mark the first occurence of words as unknown, for training.
def check_for_unk_train(word,unigram_counts):
if word in unigram_counts:
return word
else:
unigram_counts[word] = 0
return "UNK"
#Function to convert sentences for training the language model.
def convert_sentence_train(sentence,unigram_counts):
#<s1> and <s2> are sentinel tokens added to the start and end, for handling tri/bigrams at the start of a sentence.
return ["<s1>"] + ["<s2>"] + [check_for_unk_train(token.lower(),unigram_counts) for token in sentence] + ["</s2>"]+ ["</s1>"]
#Function to obtain unigram, bigram and trigram counts.
def get_counts(sentences):
trigram_counts = defaultdict(lambda: defaultdict(dict))
bigram_counts = defaultdict(dict)
unigram_counts = {}
for sentence in sentences:
sentence = convert_sentence_train(sentence, unigram_counts)
for i in range(len(sentence) - 2):
trigram_counts[sentence[i]][sentence[i+1]][sentence[i+2]] = trigram_counts[sentence[i]][sentence[i+1]].get(sentence[i+2],0) + 1
bigram_counts[sentence[i]][sentence[i+1]] = bigram_counts[sentence[i]].get(sentence[i+1],0) + 1
unigram_counts[sentence[i]] = unigram_counts.get(sentence[i],0) + 1
unigram_counts["</s1>"] = unigram_counts["<s1>"]
unigram_counts["</s2>"] = unigram_counts["<s2>"]
bigram_counts["</s2>"]["</s1>"] = bigram_counts["<s1>"]["<s2>"]
return unigram_counts, bigram_counts, trigram_counts
unigram_counts, bigram_counts,trigram_counts = get_counts(train_sentences)
###Output
_____no_output_____
###Markdown
We can calculate the [perplexity](https://en.wikipedia.org/wiki/Perplexity) of our language models to see how well they predict a sentence.
###Code
#Constructing unigram model with 'add-k' smoothing
token_count = sum(unigram_counts.values())
#Function to convert unknown words for testing.
#Words that don't appear in the training corpus (even if they are in the test corpus) are marked as UNK.
def check_for_unk_test(word,unigram_counts):
if word in unigram_counts and unigram_counts[word] > 0:
return word
else:
return "UNK"
def convert_sentence_test(sentence,unigram_counts):
return ["<s1>"] + ["<s2>"] + [check_for_unk_test(word.lower(),unigram_counts) for word in sentence] + ["</s2>"] + ["</s1>"]
#Returns the log probability of a unigram, with add-k smoothing. We're taking logs to avoid probability underflow.
def get_log_prob_addk(word,unigram_counts,k):
return math.log((unigram_counts[word] + k)/ \
(token_count + k*len(unigram_counts)))
#Returns the log probability of a sentence.
def get_sent_log_prob_addk(sentence, unigram_counts,k):
sentence = convert_sentence_test(sentence, unigram_counts)
return sum([get_log_prob_addk(word, unigram_counts,k) for word in sentence])
def calculate_perplexity_uni(sentences,unigram_counts, token_count, k):
total_log_prob = 0
test_token_count = 0
for sentence in sentences:
test_token_count += len(sentence) + 2 # have to consider the end token
total_log_prob += get_sent_log_prob_addk(sentence,unigram_counts,k)
return math.exp(-total_log_prob/test_token_count)
f = open(NEWSTEST_ENG)
test_sents = []
for line in f:
test_sents.append(tokenize(line))
f.close()
###Output
_____no_output_____
###Markdown
Now we'll calculate the [perplexity](https://en.wikipedia.org/wiki/Perplexity) for the model, as a measure of performance i.e. how well they predict a sentence. To find the optimum value of k, we can just calculate the perplexity multiple times with different k(s).
###Code
#Calculating the perplexity for different ks
ks = [0.0001,0.01,0.1,1,10]
for k in ks:
print str(k) +": " + str(calculate_perplexity_uni(test_sents,unigram_counts,token_count,k))
###Output
0.0001: 613.918691403
0.01: 614.027477551
0.1: 615.06903252
1: 628.823994251
10: 823.302441447
###Markdown
Using add-k smoothing, perplexity for the unigram model increases with the increase in k. So 0.0001 is the best choice for k.Moving on to tri-grams.
###Code
#Calculating the N1/N paramaters for Trigrams/Bigrams/Unigrams in Katz-Backoff Smoothing
TRI_ONES = 0 #N1 for Trigrams
TRI_TOTAL = 0 #N for Trigrams
for twod in trigram_counts.values():
for oned in twod.values():
for val in oned.values():
if val==1:
TRI_ONES+=1 #Count of trigram seen once
TRI_TOTAL += 1 #Count of all trigrams seen
BI_ONES = 0 #N1 for Bigrams
BI_TOTAL = 0 #N for Bigrams
for oned in bigram_counts.values():
for val in oned.values():
if val==1:
BI_ONES += 1 #Count of bigram seen once
BI_TOTAL += 1 #Count of all bigrams seen
UNI_ONES = unigram_counts.values().count(1)
UNI_TOTAL = len(unigram_counts)
#Constructing trigram model with backoff smoothing
TRI_ALPHA = TRI_ONES/TRI_TOTAL #Alpha parameter for trigram counts
BI_ALPHA = BI_ONES/BI_TOTAL #Alpha parameter for bigram counts
UNI_ALPHA = UNI_ONES/UNI_TOTAL
def get_log_prob_back(sentence,i,unigram_counts,bigram_counts,trigram_counts,token_count):
if trigram_counts[sentence[i-2]][sentence[i-1]].get(sentence[i],0) > 0:
return math.log((1-TRI_ALPHA)*trigram_counts[sentence[i-2]][sentence[i-1]].get(sentence[i])/bigram_counts[sentence[i-2]][sentence[i-1]])
else:
if bigram_counts[sentence[i-1]].get(sentence[i],0)>0:
return math.log(TRI_ALPHA*((1-BI_ALPHA)*bigram_counts[sentence[i-1]][sentence[i]]/unigram_counts[sentence[i-1]]))
else:
return math.log(TRI_ALPHA*BI_ALPHA*(1-UNI_ALPHA)*((unigram_counts[sentence[i]]+0.0001)/(token_count+(0.0001)*len(unigram_counts))))
def get_sent_log_prob_back(sentence, unigram_counts, bigram_counts,trigram_counts, token_count):
sentence = convert_sentence_test(sentence, unigram_counts)
return sum([get_log_prob_back(sentence,i, unigram_counts,bigram_counts,trigram_counts,token_count) for i in range(2,len(sentence))])
def calculate_perplexity_tri(sentences,unigram_counts,bigram_counts,trigram_counts, token_count):
total_log_prob = 0
test_token_count = 0
for sentence in sentences:
test_token_count += len(sentence) + 2 # have to consider the end token
total_log_prob += get_sent_log_prob_back(sentence,unigram_counts,bigram_counts,trigram_counts,token_count)
return math.exp(-total_log_prob/test_token_count)
#Calculating the perplexity
calculate_perplexity_tri(test_sents,unigram_counts,bigram_counts,trigram_counts,token_count)
###Output
_____no_output_____
###Markdown
For unigram language model, the perplexity for different values of k were as follow:kPerplexity0.0001613.920.01614.030.1628.821823.302For tri-gram model, Katz-Backoff smoothing was chosen as it takes a discounted probability for things only seen once, and backs off to a lower level n-gram for unencountered n-grams.Compared with the trigram model, the perplexity was as follows:ModelPerplexityUnigram (Best K)613.92Trigram (Katz Backoff)461.65As can be seen, the trigram model with 'Katz Backoff' smoothing seems to perform better than the best unigram model (with k = 0.0001). Thus we can say that this model is better for predicting the sequence of a sentence than unigram, which should is obvious if you think about it. Translation modelNext, we'll estimate translation model probabilities. For this, we'll use IBM1 from the NLTK library. IBM1 learns word based translation probabilities using expectation maximisation. We'll use both 'bitext.de' and 'bitext.en' files for this purpose; extract the sentences from each, and then use IBM1 to build the translation tables.
###Code
#Creating lists of English and German sentences from bitext.
from nltk.translate import IBMModel1
from nltk.translate import AlignedSent, Alignment
eng_sents = []
de_sents = []
f = open(BITEXT_ENG)
for line in f:
terms = tokenize(line)
eng_sents.append(terms)
f.close()
f = open(BITEXT_DE)
for line in f:
terms = tokenize(line)
de_sents.append(terms)
f.close()
#Zipping together the bitexts for easier access
paral_sents = zip(eng_sents,de_sents)
#Building English to German translation table for words (Backward alignment)
eng_de_bt = [AlignedSent(E,G) for E,G in paral_sents]
eng_de_m = IBMModel1(eng_de_bt, 5)
#Building German to English translation table for words (Backward alignment)
de_eng_bt = [AlignedSent(G,E) for E,G in paral_sents]
de_eng_m = IBMModel1(de_eng_bt, 5)
###Output
_____no_output_____
###Markdown
We can take the intersection of the dual alignments to obtain a combined alignment for each sentence in the bitext.
###Code
#Script below to combine alignments using set intersections
combined_align = []
for i in range(len(eng_de_bt)):
forward = {x for x in eng_de_bt[i].alignment}
back_reversed = {x[::-1] for x in de_eng_bt[i].alignment}
combined_align.append(forward.intersection(back_reversed))
###Output
_____no_output_____
###Markdown
Now we can create translation dictionaries in both English to German, and German to English directions. Creating dictionaries for occurence counts first.
###Code
#Creating German to English dictionary with occurence count of word pairs
de_eng_count = defaultdict(dict)
for i in range(len(de_eng_bt)):
for item in combined_align[i]:
de_eng_count[de_eng_bt[i].words[item[1]]][de_eng_bt[i].mots[item[0]]] = de_eng_count[de_eng_bt[i].words[item[1]]].get(de_eng_bt[i].mots[item[0]],0) + 1
#Creating a English to German dict with occ count of word pais
eng_de_count = defaultdict(dict)
for i in range(len(eng_de_bt)):
for item in combined_align[i]:
eng_de_count[eng_de_bt[i].words[item[0]]][eng_de_bt[i].mots[item[1]]] = eng_de_count[eng_de_bt[i].words[item[0]]].get(eng_de_bt[i].mots[item[1]],0) + 1
###Output
_____no_output_____
###Markdown
Creating dictionaries for translation probabilities.
###Code
#Creating German to English table with word translation probabilities
de_eng_prob = defaultdict(dict)
for de in de_eng_count.keys():
for eng in de_eng_count[de].keys():
de_eng_prob[de][eng] = de_eng_count[de][eng]/sum(de_eng_count[de].values())
#Creating English to German dict with word translation probabilities
eng_de_prob = defaultdict(dict)
for eng in eng_de_count.keys():
for de in eng_de_count[eng].keys():
eng_de_prob[eng][de] = eng_de_count[eng][de]/sum(eng_de_count[eng].values())
###Output
_____no_output_____
###Markdown
Let's look at some examples of translating individual words from German to English.
###Code
#Examples of translating individual words from German to English
print de_eng_prob['frage']
print de_eng_prob['handlung']
print de_eng_prob['haus']
###Output
{'question': 0.970873786407767, 'issue': 0.019417475728155338, 'matter': 0.009708737864077669}
{'rush': 1.0}
{'begins': 0.058823529411764705, 'house': 0.9411764705882353}
###Markdown
Building the noisy channel translation model, which uses the english to german translation dictionary and the unigram language model to add "noise".
###Code
#Building noisy channel translation model
def de_eng_noisy(german):
noisy={}
for eng in de_eng_prob[german].keys():
noisy[eng] = eng_de_prob[eng][german]+ get_log_prob_addk(eng,unigram_counts,0.0001)
return noisy
###Output
_____no_output_____
###Markdown
Let's check out the translation using the noise channel approach.
###Code
#Test block to check alignments
print de_eng_noisy('vater')
print de_eng_noisy('haus')
print de_eng_noisy('das')
print de_eng_noisy('entschuldigung')
###Output
{'father': -8.798834996562721}
{'begins': -10.2208672198799, 'house': -8.163007778647888}
{'this': -5.214590799418497, 'the': -3.071527829335362, 'that': -4.664995720177421}
{'excuse': -11.870404868087332, 'apology': -12.39683538573032, 'comprehend': -11.89683538573032}
###Markdown
Translations for 'vater', 'hause', 'das' seem to be pretty good, with the max score going to the best translation. For the word 'entschuldigung', the best possible translation is 'excuse', while 'comprehend' being close. But in real world use, the most common translation for 'entschuldigung' is 'sorry'. Checking the reverse translation for 'sorry',
###Code
eng_de_prob['sorry']
###Output
_____no_output_____
###Markdown
The word 'bereue', which Google translates as 'regret'. This is one example of a 'bad' alignment.Let's try tanslating some queries now.
###Code
#Translating first 5 queries into English
#Function for direct translation
def de_eng_direct(query):
query_english = []
query_tokens = tokenize(query)
for token in query_tokens:
try:
query_english.append(max(de_eng_prob[token], key=de_eng_prob[token].get))
except:
query_english.append(token) #Returning the token itself when it cannot be found in the translation table.
#query_english.append("NA")
return " ".join(query_english)
#Function for noisy channel translation
def de_eng_noisy_translate(query):
query_english = []
query_tokens = tokenize(query)
for token in query_tokens:
try:
query_english.append(max(de_eng_noisy(token), key=de_eng_noisy(token).get))
except:
query_english.append(token) #Returning the token itself when it cannot be found in the translation table.
#query_english.append("NA")
return " ".join(query_english)
f = open(DEVELOPMENT_QUERIES)
lno = 0
plno = 0
#Also building a dictionary of query ids and query content (only for the first 100s)
german_qs = {}
test_query_trans_sents = [] #Building a list for perplexity checks.
for line in f:
lno+=1
query_id = line.split('\t')[0]
query_german = line.split('\t')[1]
german_qs[query_id] = query_german.strip()
translation = str(de_eng_noisy_translate(query_german))
if plno<5:
print query_id + "\n" + "German: " + str(query_german) + "\n" + "English: " + translation +"\n\n"
plno+=1
test_query_trans_sents.append(translation)
if lno==100:
break
f.close()
###Output
82
German: der ( von engl . action : tat , handlung , bewegung ) ist ein filmgenre des unterhaltungskinos , in welchem der fortgang der äußeren handlung von zumeist spektakulär inszenierten kampf - und gewaltszenen vorangetrieben und illustriert wird .
English: the ( , guises . action : indeed , rush , movement ) is a filmgenre the unterhaltungskinos , in much the fortgang the external rush , zumeist spektakul\xe4r inszenierten fight - and gewaltszenen pushed and illustriert will .
116
German: die ( einheitenzeichen : u für unified atomic mass unit , veraltet amu für atomic mass unit ) ist eine maßeinheit der masse .
English: the ( einheitenzeichen : u for unified atomic mass unit , obsolete amu for atomic mass unit ) is a befuddled the mass .
240
German: der von lateinisch actualis , " wirklich " , auch aktualitätsprinzip , uniformitäts - oder gleichförmigkeitsprinzip , englisch uniformitarianism , ist die grundlegende wissenschaftliche methode in der .
English: the , lateinisch actualis , `` really `` , , aktualit\xe4tsprinzip , uniformit\xe4ts - or gleichf\xf6rmigkeitsprinzip , english uniformitarianism , is the fundamental scientific method in the .
320
German: die ( griechisch el , von altgriechisch grc , - " zusammen - " , " anbinden " , gemeint ist " die herzbeutel angehängte " ) , ist ein blutgefäß , welches das blut vom herz wegführt .
English: the ( griechisch el , , altgriechisch grc , - `` together - `` , `` anbinden `` , meant is `` the herzbeutel angeh\xe4ngte `` ) , is a blutgef\xe4\xdf , welches the blood vom heart wegf\xfchrt .
540
German: unter der bezeichnung fasst man die drei im nördlichen alpenvorland liegenden gewässereinheiten obersee , untersee und seerhein zusammen .
English: under the bezeichnung summarizes one the three , northern alpenvorland liegenden gew\xe4ssereinheiten obersee , untersee and seerhein together .
###Markdown
The translations of the first 5 queries according to Google translate are as follows: 82 of ( . Of eng action : act, action , movement, ) is a film genre of entertainment cinema , in which the continued transition of the external action of mostly spectacularly staged battle - and violent scenes is advanced and illustrated .116 ( unit sign : u for unified atomic mass unit , amu outdated for atomic mass unit ) is a unit of measure of mass .240 of actualis from Latin , "real" , even actuality principle , uniformity - or gleichförmigkeitsprinzip , English uniformitarianism , is the basic scientific method in .320 (Greek el , from Ancient Greek grc , - " together - " , " tie " , is meant " the heart bag attached" ) is a blood vessel that leads away the blood from the heart .540 under the designation one summarizes the three lying in the northern waters alpenvorland units obersee , subsea and Seerhein together .---Translations obtained through Google Translate are obviously better. It's interesting to note that our own translation engine works well if a 'word-word' translation is considered, and if the word-pair has been encountered enough times in the bi-lingual corpora. Google Translate also seems to perform better as it's considering phrase based translation, which is more sophisticated and accurate than word-word translation. Our engine also seems to work better for function words rather than content words as those would have been the one encountered a lot in the bi-corpora and are better aligned.The alignments were combined by taking the intersection of the forward and reverse alignments in this case. Combining the two alignments improved things in the sense that the intersection got rid of all the extra 'noise' in the alignments, so that the most likely ones remained (that existed both in the forward and reverse direction). Combining, and Evaluation For the final bit, we'll create a function that translates a query, and retrieves the relevant documents for it. Then, to evaluate the results of our CLIR engine, we'll use the [Mean Average Precision](https://www.youtube.com/watch?v=pM6DJ0ZZee0) to judge the performance of the CLIR system. MAP is a standard evaluation metric used in IR.
###Code
#Building a dictionary for queryids and relevant document ids
qrel = defaultdict(list)
f = open(DEVELOPMENT_QREL)
for line in f:
item = line.split('\t')
qrel[item[0]].append(item[2])
f.close()
#Single function to retreive documents for a German query
def trans_retr_docs(german_query,no_of_results,translation_function):
trans_query = " ".join(extract_and_tokenize_terms(translation_function(german_query)))
return [item[0] for item in retr_docs(trans_query,no_of_results)] #Retriving 100 documents
#Calculating the map score
def calc_map(no_of_results,translation_function):
average_precision = []
for gq in german_qs.keys():
relevant_docs = qrel[gq]
incremental_precision = []
resulting_docs = trans_retr_docs(german_qs[gq],no_of_results,translation_function)
total_counter = 0
true_positive_counter = 0
for doc in resulting_docs:
total_counter+=1
if doc in relevant_docs:
true_positive_counter += 1
incremental_precision.append(true_positive_counter/total_counter)
#For no relevant retreivals, the average precision will be considered 0.
try:
average_precision.append(sum(incremental_precision)/len(incremental_precision))
except:
average_precision.append(0)
return (sum(average_precision)/len(average_precision))
###Output
_____no_output_____
###Markdown
To keep runtime at a minimum, we'll only consider the top 100 returned results (documents) when
###Code
#Printing the map score for direct translations
print calc_map(100,de_eng_direct)
#Printing the map score for noisy channel translations
print calc_map(100,de_eng_noisy_translate)
###Output
0.364795198505
###Markdown
Cross Language Information Retrieval Housekeeping: File encodings and tokenisation
###Code
from nltk.tokenize import word_tokenize
from __future__ import division #To properly handle floating point divisions.
import math
#Function to tokenise string/sentences.
def tokenize(line, tokenizer=word_tokenize):
utf_line = line.decode('utf-8').lower()
return [token.encode('ascii', 'backslashreplace') for token in tokenizer(utf_line)]
tokenize("Seit damals ist er auf über 10.000 Punkte gestiegen.")
DEVELOPMENT_DOCS = 'data/clir/devel.docs' #Data file for IR engine development
DEVELOPMENT_QUERIES = 'data/clir/devel.queries' #Data file containing queries in German
DEVELOPMENT_QREL = 'data/clir/devel.qrel' #Data file containing a relevance score or query-doc pairs
BITEXT_ENG = 'data/clir/bitext.en' #Bitext data file in English for translation engine and language model development
BITEXT_DE = 'data/clir/bitext.de' #Bitext data file in German
NEWSTEST_ENG = 'data/clir/newstest.en' #File for testing language model
###Output
_____no_output_____
###Markdown
Information Retrieval using [Okapi BM25](https://en.wikipedia.org/wiki/Okapi_BM25)
###Code
import nltk
import re
stopwords = set(nltk.corpus.stopwords.words('english')) #converting stopwords to a set for faster processing in the future.
stemmer = nltk.stem.PorterStemmer()
#Function to extract and tokenize terms from a document
def extract_and_tokenize_terms(doc):
terms = []
for token in tokenize(doc):
if token not in stopwords: # 'in' and 'not in' operations are faster over sets than lists
if not re.search(r'\d',token) and not re.search(r'[^A-Za-z-]',token): #Removing numbers and punctuations
#(excluding hyphenated words)
terms.append(stemmer.stem(token.lower()))
return terms
documents = {} #Dictionary to store documents with ids as keys.
#Reading each line in the file and storing it documents dictionary
f = open(DEVELOPMENT_DOCS)
for line in f:
doc = line.split("\t")
terms = extract_and_tokenize_terms(doc[1])
documents[doc[0]] = terms
f.close()
documents['290'][:20] #To keep things short, we're only going to check out 20 tokens.
#Building an inverted index for the documents
from collections import defaultdict
inverted_index = defaultdict(set)
for docid, terms in documents.items():
for term in terms:
inverted_index[term].add(docid)
inverted_index['pizza']
#Building a TF-IDF representation using BM25
NO_DOCS = len(documents) #Number of documents
AVG_LEN_DOC = sum([len(doc) for doc in documents.values()])/len(documents) #Average length of documents
#The function below takes the documentid, and the term, to calculate scores for the tf and idf
#components, and multiplies them together.
def tf_idf_score(k1,b,term,docid):
ft = len(inverted_index[term])
term = stemmer.stem(term.lower())
fdt = documents[docid].count(term)
idf_comp = math.log((NO_DOCS - ft + 0.5)/(ft+0.5))
tf_comp = ((k1 + 1)*fdt)/(k1*((1-b) + b*(len(documents[docid])/AVG_LEN_DOC))+fdt)
return idf_comp * tf_comp
#Function to create tf_idf matrix without the query component
def create_tf_idf(k1,b):
tf_idf = defaultdict(dict)
for term in set(inverted_index.keys()):
for docid in inverted_index[term]:
tf_idf[term][docid] = tf_idf_score(k1,b,term,docid)
return tf_idf
#Creating tf_idf matrix with said parameter values: k1 and b for all documents.
tf_idf = create_tf_idf(1.5,0.5)
#Function to retrieve query component
def get_qtf_comp(k3,term,fqt):
return ((k3+1)*fqt[term])/(k3 + fqt[term])
#Function to retrieve documents || Returns a set of documents and their relevance scores.
def retr_docs(query,result_count):
q_terms = [stemmer.stem(term.lower()) for term in query.split() if term not in stopwords] #Removing stopwords from queries
fqt = {}
for term in q_terms:
fqt[term] = fqt.get(term,0) + 1
scores = {}
for word in fqt.keys():
#print word + ': '+ str(inverted_index[word])
for document in inverted_index[word]:
scores[document] = scores.get(document,0) + (tf_idf[word][document]*get_qtf_comp(0,word,fqt)) #k3 chosen as 0 (default)
return sorted(scores.items(),key = lambda x : x[1] , reverse=True)[:result_count]
retr_docs("Manchester United",5)
documents['19961'][:30]
###Output
_____no_output_____
###Markdown
Language Model:
###Code
#Calculating the unigram, bigram and trigram counts.
f = open(BITEXT_ENG)
train_sentences = []
for line in f:
train_sentences.append(tokenize(line))
f.close()
#Function to mark the first occurence of words as unknown, for training.
def check_for_unk_train(word,unigram_counts):
if word in unigram_counts:
return word
else:
unigram_counts[word] = 0
return "UNK"
#Function to convert sentences for training the language model.
def convert_sentence_train(sentence,unigram_counts):
#<s1> and <s2> are sentinel tokens added to the start and end, for handling tri/bigrams at the start of a sentence.
return ["<s1>"] + ["<s2>"] + [check_for_unk_train(token.lower(),unigram_counts) for token in sentence] + ["</s2>"]+ ["</s1>"]
#Function to obtain unigram, bigram and trigram counts.
def get_counts(sentences):
trigram_counts = defaultdict(lambda: defaultdict(dict))
bigram_counts = defaultdict(dict)
unigram_counts = {}
for sentence in sentences:
sentence = convert_sentence_train(sentence, unigram_counts)
for i in range(len(sentence) - 2):
trigram_counts[sentence[i]][sentence[i+1]][sentence[i+2]] = trigram_counts[sentence[i]][sentence[i+1]].get(sentence[i+2],0) + 1
bigram_counts[sentence[i]][sentence[i+1]] = bigram_counts[sentence[i]].get(sentence[i+1],0) + 1
unigram_counts[sentence[i]] = unigram_counts.get(sentence[i],0) + 1
unigram_counts["</s1>"] = unigram_counts["<s1>"]
unigram_counts["</s2>"] = unigram_counts["<s2>"]
bigram_counts["</s2>"]["</s1>"] = bigram_counts["<s1>"]["<s2>"]
return unigram_counts, bigram_counts, trigram_counts
unigram_counts, bigram_counts,trigram_counts = get_counts(train_sentences)
#Constructing unigram model with 'add-k' smoothing
token_count = sum(unigram_counts.values())
#Function to convert unknown words for testing.
#Words that don't appear in the training corpus (even if they are in the test corpus) are marked as UNK.
def check_for_unk_test(word,unigram_counts):
if word in unigram_counts and unigram_counts[word] > 0:
return word
else:
return "UNK"
def convert_sentence_test(sentence,unigram_counts):
return ["<s1>"] + ["<s2>"] + [check_for_unk_test(word.lower(),unigram_counts) for word in sentence] + ["</s2>"] + ["</s1>"]
#Returns the log probability of a unigram, with add-k smoothing. We're taking logs to avoid probability underflow.
def get_log_prob_addk(word,unigram_counts,k):
return math.log((unigram_counts[word] + k)/ \
(token_count + k*len(unigram_counts)))
#Returns the log probability of a sentence.
def get_sent_log_prob_addk(sentence, unigram_counts,k):
sentence = convert_sentence_test(sentence, unigram_counts)
return sum([get_log_prob_addk(word, unigram_counts,k) for word in sentence])
def calculate_perplexity_uni(sentences,unigram_counts, token_count, k):
total_log_prob = 0
test_token_count = 0
for sentence in sentences:
test_token_count += len(sentence) + 2 # have to consider the end token
total_log_prob += get_sent_log_prob_addk(sentence,unigram_counts,k)
return math.exp(-total_log_prob/test_token_count)
f = open(NEWSTEST_ENG)
test_sents = []
for line in f:
test_sents.append(tokenize(line))
f.close()
#Calculating the perplexity for different ks
ks = [0.0001,0.01,0.1,1,10]
for k in ks:
print str(k) +": " + str(calculate_perplexity_uni(test_sents,unigram_counts,token_count,k))
#Calculating the N1/N paramaters for Trigrams/Bigrams/Unigrams in Katz-Backoff Smoothing
TRI_ONES = 0 #N1 for Trigrams
TRI_TOTAL = 0 #N for Trigrams
for twod in trigram_counts.values():
for oned in twod.values():
for val in oned.values():
if val==1:
TRI_ONES+=1 #Count of trigram seen once
TRI_TOTAL += 1 #Count of all trigrams seen
BI_ONES = 0 #N1 for Bigrams
BI_TOTAL = 0 #N for Bigrams
for oned in bigram_counts.values():
for val in oned.values():
if val==1:
BI_ONES += 1 #Count of bigram seen once
BI_TOTAL += 1 #Count of all bigrams seen
UNI_ONES = unigram_counts.values().count(1)
UNI_TOTAL = len(unigram_counts)
#Constructing trigram model with backoff smoothing
TRI_ALPHA = TRI_ONES/TRI_TOTAL #Alpha parameter for trigram counts
BI_ALPHA = BI_ONES/BI_TOTAL #Alpha parameter for bigram counts
UNI_ALPHA = UNI_ONES/UNI_TOTAL
def get_log_prob_back(sentence,i,unigram_counts,bigram_counts,trigram_counts,token_count):
if trigram_counts[sentence[i-2]][sentence[i-1]].get(sentence[i],0) > 0:
return math.log((1-TRI_ALPHA)*trigram_counts[sentence[i-2]][sentence[i-1]].get(sentence[i])/bigram_counts[sentence[i-2]][sentence[i-1]])
else:
if bigram_counts[sentence[i-1]].get(sentence[i],0)>0:
return math.log(TRI_ALPHA*((1-BI_ALPHA)*bigram_counts[sentence[i-1]][sentence[i]]/unigram_counts[sentence[i-1]]))
else:
return math.log(TRI_ALPHA*BI_ALPHA*(1-UNI_ALPHA)*((unigram_counts[sentence[i]]+0.0001)/(token_count+(0.0001)*len(unigram_counts))))
def get_sent_log_prob_back(sentence, unigram_counts, bigram_counts,trigram_counts, token_count):
sentence = convert_sentence_test(sentence, unigram_counts)
return sum([get_log_prob_back(sentence,i, unigram_counts,bigram_counts,trigram_counts,token_count) for i in range(2,len(sentence))])
def calculate_perplexity_tri(sentences,unigram_counts,bigram_counts,trigram_counts, token_count):
total_log_prob = 0
test_token_count = 0
for sentence in sentences:
test_token_count += len(sentence) + 2 # have to consider the end token
total_log_prob += get_sent_log_prob_back(sentence,unigram_counts,bigram_counts,trigram_counts,token_count)
return math.exp(-total_log_prob/test_token_count)
#Calculating the perplexity
calculate_perplexity_tri(test_sents,unigram_counts,bigram_counts,trigram_counts,token_count)
###Output
_____no_output_____
###Markdown
Translation model
###Code
#Creating lists of English and German sentences from bitext.
from nltk.translate import IBMModel1
from nltk.translate import AlignedSent, Alignment
eng_sents = []
de_sents = []
f = open(BITEXT_ENG)
for line in f:
terms = tokenize(line)
eng_sents.append(terms)
f.close()
f = open(BITEXT_DE)
for line in f:
terms = tokenize(line)
de_sents.append(terms)
f.close()
#Zipping together the bitexts for easier access
paral_sents = zip(eng_sents,de_sents)
#Building English to German translation table for words (Backward alignment)
eng_de_bt = [AlignedSent(E,G) for E,G in paral_sents]
eng_de_m = IBMModel1(eng_de_bt, 5)
#Building German to English translation table for words (Backward alignment)
de_eng_bt = [AlignedSent(G,E) for E,G in paral_sents]
de_eng_m = IBMModel1(de_eng_bt, 5)
#Script below to combine alignments using set intersections
combined_align = []
for i in range(len(eng_de_bt)):
forward = {x for x in eng_de_bt[i].alignment}
back_reversed = {x[::-1] for x in de_eng_bt[i].alignment}
combined_align.append(forward.intersection(back_reversed))
#Creating German to English dictionary with occurence count of word pairs
de_eng_count = defaultdict(dict)
for i in range(len(de_eng_bt)):
for item in combined_align[i]:
de_eng_count[de_eng_bt[i].words[item[1]]][de_eng_bt[i].mots[item[0]]] = de_eng_count[de_eng_bt[i].words[item[1]]].get(de_eng_bt[i].mots[item[0]],0) + 1
#Creating a English to German dict with occ count of word pais
eng_de_count = defaultdict(dict)
for i in range(len(eng_de_bt)):
for item in combined_align[i]:
eng_de_count[eng_de_bt[i].words[item[0]]][eng_de_bt[i].mots[item[1]]] = eng_de_count[eng_de_bt[i].words[item[0]]].get(eng_de_bt[i].mots[item[1]],0) + 1
#Creating German to English table with word translation probabilities
de_eng_prob = defaultdict(dict)
for de in de_eng_count.keys():
for eng in de_eng_count[de].keys():
de_eng_prob[de][eng] = de_eng_count[de][eng]/sum(de_eng_count[de].values())
#Creating English to German dict with word translation probabilities
eng_de_prob = defaultdict(dict)
for eng in eng_de_count.keys():
for de in eng_de_count[eng].keys():
eng_de_prob[eng][de] = eng_de_count[eng][de]/sum(eng_de_count[eng].values())
#Examples of translating individual words from German to English
print de_eng_prob['frage']
print de_eng_prob['handlung']
print de_eng_prob['haus']
#Building noisy channel translation model
def de_eng_noisy(german):
noisy={}
for eng in de_eng_prob[german].keys():
noisy[eng] = eng_de_prob[eng][german]+ get_log_prob_addk(eng,unigram_counts,0.0001)
return noisy
#Test block to check alignments
print de_eng_noisy('vater')
print de_eng_noisy('haus')
print de_eng_noisy('das')
print de_eng_noisy('entschuldigung')
eng_de_prob['sorry']
#Translating first 5 queries into English
#Function for direct translation
def de_eng_direct(query):
query_english = []
query_tokens = tokenize(query)
for token in query_tokens:
try:
query_english.append(max(de_eng_prob[token], key=de_eng_prob[token].get))
except:
query_english.append(token) #Returning the token itself when it cannot be found in the translation table.
#query_english.append("NA")
return " ".join(query_english)
#Function for noisy channel translation
def de_eng_noisy_translate(query):
query_english = []
query_tokens = tokenize(query)
for token in query_tokens:
try:
query_english.append(max(de_eng_noisy(token), key=de_eng_noisy(token).get))
except:
query_english.append(token) #Returning the token itself when it cannot be found in the translation table.
#query_english.append("NA")
return " ".join(query_english)
f = open(DEVELOPMENT_QUERIES)
lno = 0
plno = 0
#Also building a dictionary of query ids and query content (only for the first 100s)
german_qs = {}
test_query_trans_sents = [] #Building a list for perplexity checks.
for line in f:
lno+=1
query_id = line.split('\t')[0]
query_german = line.split('\t')[1]
german_qs[query_id] = query_german.strip()
translation = str(de_eng_noisy_translate(query_german))
if plno<5:
print query_id + "\n" + "German: " + str(query_german) + "\n" + "English: " + translation +"\n\n"
plno+=1
test_query_trans_sents.append(translation)
if lno==100:
break
f.close()
###Output
82
German: der ( von engl . action : tat , handlung , bewegung ) ist ein filmgenre des unterhaltungskinos , in welchem der fortgang der äußeren handlung von zumeist spektakulär inszenierten kampf - und gewaltszenen vorangetrieben und illustriert wird .
English: the ( , guises . action : indeed , rush , movement ) is a filmgenre the unterhaltungskinos , in much the fortgang the external rush , zumeist spektakul\xe4r inszenierten fight - and gewaltszenen pushed and illustriert will .
116
German: die ( einheitenzeichen : u für unified atomic mass unit , veraltet amu für atomic mass unit ) ist eine maßeinheit der masse .
English: the ( einheitenzeichen : u for unified atomic mass unit , obsolete amu for atomic mass unit ) is a befuddled the mass .
240
German: der von lateinisch actualis , " wirklich " , auch aktualitätsprinzip , uniformitäts - oder gleichförmigkeitsprinzip , englisch uniformitarianism , ist die grundlegende wissenschaftliche methode in der .
English: the , lateinisch actualis , `` really `` , , aktualit\xe4tsprinzip , uniformit\xe4ts - or gleichf\xf6rmigkeitsprinzip , english uniformitarianism , is the fundamental scientific method in the .
320
German: die ( griechisch el , von altgriechisch grc , - " zusammen - " , " anbinden " , gemeint ist " die herzbeutel angehängte " ) , ist ein blutgefäß , welches das blut vom herz wegführt .
English: the ( griechisch el , , altgriechisch grc , - `` together - `` , `` anbinden `` , meant is `` the herzbeutel angeh\xe4ngte `` ) , is a blutgef\xe4\xdf , welches the blood vom heart wegf\xfchrt .
540
German: unter der bezeichnung fasst man die drei im nördlichen alpenvorland liegenden gewässereinheiten obersee , untersee und seerhein zusammen .
English: under the bezeichnung summarizes one the three , northern alpenvorland liegenden gew\xe4ssereinheiten obersee , untersee and seerhein together .
###Markdown
Combining, and Evaluation
###Code
#Building a dictionary for queryids and relevant document ids
qrel = defaultdict(list)
f = open(DEVELOPMENT_QREL)
for line in f:
item = line.split('\t')
qrel[item[0]].append(item[2])
f.close()
#Single function to retreive documents for a German query
def trans_retr_docs(german_query,no_of_results,translation_function):
trans_query = " ".join(extract_and_tokenize_terms(translation_function(german_query)))
return [item[0] for item in retr_docs(trans_query,no_of_results)] #Retriving 100 documents
#Calculating the map score
def calc_map(no_of_results,translation_function):
average_precision = []
for gq in german_qs.keys():
relevant_docs = qrel[gq]
incremental_precision = []
resulting_docs = trans_retr_docs(german_qs[gq],no_of_results,translation_function)
total_counter = 0
true_positive_counter = 0
for doc in resulting_docs:
total_counter+=1
if doc in relevant_docs:
true_positive_counter += 1
incremental_precision.append(true_positive_counter/total_counter)
#For no relevant retreivals, the average precision will be considered 0.
try:
average_precision.append(sum(incremental_precision)/len(incremental_precision))
except:
average_precision.append(0)
return (sum(average_precision)/len(average_precision))
#Printing the map score for direct translations
print calc_map(100,de_eng_direct)
#Printing the map score for noisy channel translations
print calc_map(100,de_eng_noisy_translate)
###Output
0.364795198505
###Markdown
Cross Language Information Retrieval OverviewThe aim of this project is to build the cross language information retrieval system (CLIR) which, given a query in German, will be capable of searching text documents written in English and displaying the results in German.We're going to use machine translation, information retrieval using a vector space model, and then assess the performance of the system using IR evaluation techniques.Parts of the project are explained as we progress. Data Used- bitext.(en,de): A sentence aligned, parallel German-English corpus, sourced from the Europarl corpus (which is a collection of debates held in the EU parliament over a number of years). We'll use this to develop word-alignment tools, and build a translation probability table. - newstest.(en,de): A separate, smaller parallel corpus for evaulation of the translation system.- devel.(docs,queries,qrel): A set of documents in English (sourced from Wikipedia), queries in German, and relevance judgement scores for each query-document pair. The files are available to check out in the data/clir directory of the repo. Housekeeping: File encodings and tokenisationSince the data files we use is utf-8 encoded text, we need to convert the strings into ASCII by escaping the special symbols. We also import some libraries in this step as well.
###Code
from nltk.tokenize import word_tokenize
from __future__ import division #To properly handle floating point divisions.
import math
#Function to tokenise string/sentences.
def tokenize(line, tokenizer=word_tokenize):
utf_line = line.decode('utf-8').lower()
return [token.encode('ascii', 'backslashreplace') for token in tokenizer(utf_line)]
###Output
_____no_output_____
###Markdown
Now we can test out our tokenize function. Notice how it converts the word Über.
###Code
tokenize("Seit damals ist er auf über 10.000 Punkte gestiegen.")
###Output
_____no_output_____
###Markdown
Let's store the path of the data files as easily identifiable variables for future access.
###Code
DEVELOPMENT_DOCS = 'data/clir/devel.docs' #Data file for IR engine development
DEVELOPMENT_QUERIES = 'data/clir/devel.queries' #Data file containing queries in German
DEVELOPMENT_QREL = 'data/clir/devel.qrel' #Data file containing a relevance score or query-doc pairs
BITEXT_ENG = 'data/clir/bitext.en' #Bitext data file in English for translation engine and language model development
BITEXT_DE = 'data/clir/bitext.de' #Bitext data file in German
NEWSTEST_ENG = 'data/clir/newstest.en' #File for testing language model
###Output
_____no_output_____
###Markdown
With that out of the way, lets get to the meat of the project. As mentioned earlier, we're going to build a CLIR engine consisting of information retrieval and translation components, and then evaluate its accuracy.The CLIR system will:- **translate queries** from German into English (because our searcheable corpus is in English), using word-based translation, a rather simplistic approach as opposed to the sophistication you might see in, say, *Google Translate*.- **search over the document corpus** using the Okapi BM25 IR ranking model, a variation of the traditional TF-IDF model.- **evaluate the quality** of ranked retrieval results using the query relevance judgements. Information Retrieval using [Okapi BM25](https://en.wikipedia.org/wiki/Okapi_BM25)We'll start by building an IR system, and give it a test run with some English queries. Here's an overview of the tasks involved:- Loading the data files, and tokenizing the input.- Preprocessing the lexicon by stemming, removing stopwords.- Calculating the TF/IDF representation for all documents in our wikipedia corpus.- Storing an inverted index to efficiently documents, given a query term.- Implementing querying with BM25.- Test runs.So for our first task, we'll load the devel.docs file, extract and tokenize the terms, and store them in a python dictionary with the document ids as keys.
###Code
import nltk
import re
stopwords = set(nltk.corpus.stopwords.words('english')) #converting stopwords to a set for faster processing in the future.
stemmer = nltk.stem.PorterStemmer()
#Function to extract and tokenize terms from a document
def extract_and_tokenize_terms(doc):
terms = []
for token in tokenize(doc):
if token not in stopwords: # 'in' and 'not in' operations are faster over sets than lists
if not re.search(r'\d',token) and not re.search(r'[^A-Za-z-]',token): #Removing numbers and punctuations
#(excluding hyphenated words)
terms.append(stemmer.stem(token.lower()))
return terms
documents = {} #Dictionary to store documents with ids as keys.
#Reading each line in the file and storing it documents dictionary
f = open(DEVELOPMENT_DOCS)
for line in f:
doc = line.split("\t")
terms = extract_and_tokenize_terms(doc[1])
documents[doc[0]] = terms
f.close()
###Output
_____no_output_____
###Markdown
To check if everything is working till now, let's access a document from the dictionary, with the id '290'.
###Code
documents['290'][:20] #To keep things short, we're only going to check out 20 tokens.
###Output
_____no_output_____
###Markdown
Now we'll build an inverted index for the documents, so that we can quickly access documents for the terms we need.
###Code
#Building an inverted index for the documents
from collections import defaultdict
inverted_index = defaultdict(set)
for docid, terms in documents.items():
for term in terms:
inverted_index[term].add(docid)
###Output
_____no_output_____
###Markdown
To test it out, the list of documents containing the word 'pizza':
###Code
inverted_index['pizza']
###Output
_____no_output_____
###Markdown
On to the BM25 TF-IDF representation, we'll create the td-idf matrix for terms-documents, first without the query component. The query component is dependent on the terms in our query. So we'll just calculate that, and multiply it with the overall score when we want to retreive documents for a particular query.
###Code
#Building a TF-IDF representation using BM25
NO_DOCS = len(documents) #Number of documents
AVG_LEN_DOC = sum([len(doc) for doc in documents.values()])/len(documents) #Average length of documents
#The function below takes the documentid, and the term, to calculate scores for the tf and idf
#components, and multiplies them together.
def tf_idf_score(k1,b,term,docid):
ft = len(inverted_index[term])
term = stemmer.stem(term.lower())
fdt = documents[docid].count(term)
idf_comp = math.log((NO_DOCS - ft + 0.5)/(ft+0.5))
tf_comp = ((k1 + 1)*fdt)/(k1*((1-b) + b*(len(documents[docid])/AVG_LEN_DOC))+fdt)
return idf_comp * tf_comp
#Function to create tf_idf matrix without the query component
def create_tf_idf(k1,b):
tf_idf = defaultdict(dict)
for term in set(inverted_index.keys()):
for docid in inverted_index[term]:
tf_idf[term][docid] = tf_idf_score(k1,b,term,docid)
return tf_idf
#Creating tf_idf matrix with said parameter values: k1 and b for all documents.
tf_idf = create_tf_idf(1.5,0.5)
###Output
_____no_output_____
###Markdown
We took the default values for k1 and b (1.5 and 0.5), which seemed to give good results. Although these parameters may be altered depending on the type of data being dealth with. Now we create a method to retrieve the query component, and another method that will use the previous ones and retrieve the relevant documents for a query, sorted on the basis of their ranks.
###Code
#Function to retrieve query component
def get_qtf_comp(k3,term,fqt):
return ((k3+1)*fqt[term])/(k3 + fqt[term])
#Function to retrieve documents || Returns a set of documents and their relevance scores.
def retr_docs(query,result_count):
q_terms = [stemmer.stem(term.lower()) for term in query.split() if term not in stopwords] #Removing stopwords from queries
fqt = {}
for term in q_terms:
fqt[term] = fqt.get(term,0) + 1
scores = {}
for word in fqt.keys():
#print word + ': '+ str(inverted_index[word])
for document in inverted_index[word]:
scores[document] = scores.get(document,0) + (tf_idf[word][document]*get_qtf_comp(0,word,fqt)) #k3 chosen as 0 (default)
return sorted(scores.items(),key = lambda x : x[1] , reverse=True)[:result_count]
###Output
_____no_output_____
###Markdown
Let's try and retrieve a document for a query.
###Code
retr_docs("Manchester United",5)
###Output
_____no_output_____
###Markdown
Checking out the terms in the top ranked document..
###Code
documents['19961'][:30]
###Output
_____no_output_____
###Markdown
The information retrieval engine has worked quite well in this case. The top ranked document for the query is a snippet of the wikipedia article for Manchester United Football Club. On further inspection, we can see that the documents ranked lower are, for example, for The University of Manchester, or even just articles with the words 'Manchester' or 'United' in them.Now we can begin translating the German queries to English. Query Translation: For translation, we'll implement a simple word-based translation model in a noisy channel setting. This means that we'll use both a language model over English, and a translation model.We'll use a unigram language model for decoding/translation, but also create a model with trigram to test the improvement in performace). Our aim is to find the string, $\vec{e}$ which maximises $p(\vec{e}) p(\vec{g} | \vec{e})$, given English output string $\vec{e}$ and German input string $\vec{g}$. Language Model:[From Wikipedia](https://en.wikipedia.org/wiki/Language_model): A statistical language model is a probability distribution over sequences of words. Given such a sequence, say of length m, it assigns a probability P(w1,....,wm) to the whole sequence. The models will be trained on the 'bitext.en' file, and tested on 'newstest.en'.As we'll train the model on different files, it's obvious that we'll run into words (unigrams) and trigrams what we hadn't seen in the file we trained the model on. To account for these unknown information, we'll use add-k or [laplace smoothing](https://en.wikipedia.org/wiki/Additive_smoothing) for the unigram and [Katz-Backoff smoothing](https://en.wikipedia.org/wiki/Katz%27s_back-off_model) for the trigram model.Let's start with calculating the unigram, bigram and trigram counts (we need the bigram counts for trigram smoothing). The sentences are also converted appropriately by adding sentinels at the start and end of sentences.
###Code
#Calculating the unigram, bigram and trigram counts.
f = open(BITEXT_ENG)
train_sentences = []
for line in f:
train_sentences.append(tokenize(line))
f.close()
#Function to mark the first occurence of words as unknown, for training.
def check_for_unk_train(word,unigram_counts):
if word in unigram_counts:
return word
else:
unigram_counts[word] = 0
return "UNK"
#Function to convert sentences for training the language model.
def convert_sentence_train(sentence,unigram_counts):
#<s1> and <s2> are sentinel tokens added to the start and end, for handling tri/bigrams at the start of a sentence.
return ["<s1>"] + ["<s2>"] + [check_for_unk_train(token.lower(),unigram_counts) for token in sentence] + ["</s2>"]+ ["</s1>"]
#Function to obtain unigram, bigram and trigram counts.
def get_counts(sentences):
trigram_counts = defaultdict(lambda: defaultdict(dict))
bigram_counts = defaultdict(dict)
unigram_counts = {}
for sentence in sentences:
sentence = convert_sentence_train(sentence, unigram_counts)
for i in range(len(sentence) - 2):
trigram_counts[sentence[i]][sentence[i+1]][sentence[i+2]] = trigram_counts[sentence[i]][sentence[i+1]].get(sentence[i+2],0) + 1
bigram_counts[sentence[i]][sentence[i+1]] = bigram_counts[sentence[i]].get(sentence[i+1],0) + 1
unigram_counts[sentence[i]] = unigram_counts.get(sentence[i],0) + 1
unigram_counts["</s1>"] = unigram_counts["<s1>"]
unigram_counts["</s2>"] = unigram_counts["<s2>"]
bigram_counts["</s2>"]["</s1>"] = bigram_counts["<s1>"]["<s2>"]
return unigram_counts, bigram_counts, trigram_counts
unigram_counts, bigram_counts,trigram_counts = get_counts(train_sentences)
###Output
_____no_output_____
###Markdown
We can calculate the [perplexity](https://en.wikipedia.org/wiki/Perplexity) of our language models to see how well they predict a sentence.
###Code
#Constructing unigram model with 'add-k' smoothing
token_count = sum(unigram_counts.values())
#Function to convert unknown words for testing.
#Words that don't appear in the training corpus (even if they are in the test corpus) are marked as UNK.
def check_for_unk_test(word,unigram_counts):
if word in unigram_counts and unigram_counts[word] > 0:
return word
else:
return "UNK"
def convert_sentence_test(sentence,unigram_counts):
return ["<s1>"] + ["<s2>"] + [check_for_unk_test(word.lower(),unigram_counts) for word in sentence] + ["</s2>"] + ["</s1>"]
#Returns the log probability of a unigram, with add-k smoothing. We're taking logs to avoid probability underflow.
def get_log_prob_addk(word,unigram_counts,k):
return math.log((unigram_counts[word] + k)/ \
(token_count + k*len(unigram_counts)))
#Returns the log probability of a sentence.
def get_sent_log_prob_addk(sentence, unigram_counts,k):
sentence = convert_sentence_test(sentence, unigram_counts)
return sum([get_log_prob_addk(word, unigram_counts,k) for word in sentence])
def calculate_perplexity_uni(sentences,unigram_counts, token_count, k):
total_log_prob = 0
test_token_count = 0
for sentence in sentences:
test_token_count += len(sentence) + 2 # have to consider the end token
total_log_prob += get_sent_log_prob_addk(sentence,unigram_counts,k)
return math.exp(-total_log_prob/test_token_count)
f = open(NEWSTEST_ENG)
test_sents = []
for line in f:
test_sents.append(tokenize(line))
f.close()
###Output
_____no_output_____
###Markdown
Now we'll calculate the [perplexity](https://en.wikipedia.org/wiki/Perplexity) for the model, as a measure of performance i.e. how well they predict a sentence. To find the optimum value of k, we can just calculate the perplexity multiple times with different k(s).
###Code
#Calculating the perplexity for different ks
ks = [0.0001,0.01,0.1,1,10]
for k in ks:
print str(k) +": " + str(calculate_perplexity_uni(test_sents,unigram_counts,token_count,k))
###Output
0.0001: 613.918691403
0.01: 614.027477551
0.1: 615.06903252
1: 628.823994251
10: 823.302441447
###Markdown
Using add-k smoothing, perplexity for the unigram model increases with the increase in k. So 0.0001 is the best choice for k.Moving on to tri-grams.
###Code
#Calculating the N1/N paramaters for Trigrams/Bigrams/Unigrams in Katz-Backoff Smoothing
TRI_ONES = 0 #N1 for Trigrams
TRI_TOTAL = 0 #N for Trigrams
for twod in trigram_counts.values():
for oned in twod.values():
for val in oned.values():
if val==1:
TRI_ONES+=1 #Count of trigram seen once
TRI_TOTAL += 1 #Count of all trigrams seen
BI_ONES = 0 #N1 for Bigrams
BI_TOTAL = 0 #N for Bigrams
for oned in bigram_counts.values():
for val in oned.values():
if val==1:
BI_ONES += 1 #Count of bigram seen once
BI_TOTAL += 1 #Count of all bigrams seen
UNI_ONES = unigram_counts.values().count(1)
UNI_TOTAL = len(unigram_counts)
#Constructing trigram model with backoff smoothing
TRI_ALPHA = TRI_ONES/TRI_TOTAL #Alpha parameter for trigram counts
BI_ALPHA = BI_ONES/BI_TOTAL #Alpha parameter for bigram counts
UNI_ALPHA = UNI_ONES/UNI_TOTAL
def get_log_prob_back(sentence,i,unigram_counts,bigram_counts,trigram_counts,token_count):
if trigram_counts[sentence[i-2]][sentence[i-1]].get(sentence[i],0) > 0:
return math.log((1-TRI_ALPHA)*trigram_counts[sentence[i-2]][sentence[i-1]].get(sentence[i])/bigram_counts[sentence[i-2]][sentence[i-1]])
else:
if bigram_counts[sentence[i-1]].get(sentence[i],0)>0:
return math.log(TRI_ALPHA*((1-BI_ALPHA)*bigram_counts[sentence[i-1]][sentence[i]]/unigram_counts[sentence[i-1]]))
else:
return math.log(TRI_ALPHA*BI_ALPHA*(1-UNI_ALPHA)*((unigram_counts[sentence[i]]+0.0001)/(token_count+(0.0001)*len(unigram_counts))))
def get_sent_log_prob_back(sentence, unigram_counts, bigram_counts,trigram_counts, token_count):
sentence = convert_sentence_test(sentence, unigram_counts)
return sum([get_log_prob_back(sentence,i, unigram_counts,bigram_counts,trigram_counts,token_count) for i in range(2,len(sentence))])
def calculate_perplexity_tri(sentences,unigram_counts,bigram_counts,trigram_counts, token_count):
total_log_prob = 0
test_token_count = 0
for sentence in sentences:
test_token_count += len(sentence) + 2 # have to consider the end token
total_log_prob += get_sent_log_prob_back(sentence,unigram_counts,bigram_counts,trigram_counts,token_count)
return math.exp(-total_log_prob/test_token_count)
#Calculating the perplexity
calculate_perplexity_tri(test_sents,unigram_counts,bigram_counts,trigram_counts,token_count)
###Output
_____no_output_____
###Markdown
For unigram language model, the perplexity for different values of k were as follow:kPerplexity0.0001613.920.01614.030.1628.821823.302For tri-gram model, Katz-Backoff smoothing was chosen as it takes a discounted probability for things only seen once, and backs off to a lower level n-gram for unencountered n-grams.Compared with the trigram model, the perplexity was as follows:ModelPerplexityUnigram (Best K)613.92Trigram (Katz Backoff)461.65As can be seen, the trigram model with 'Katz Backoff' smoothing seems to perform better than the best unigram model (with k = 0.0001). Thus we can say that this model is better for predicting the sequence of a sentence than unigram, which should is obvious if you think about it. Translation modelNext, we'll estimate translation model probabilities. For this, we'll use IBM1 from the NLTK library. IBM1 learns word based translation probabilities using expectation maximisation. We'll use both 'bitext.de' and 'bitext.en' files for this purpose; extract the sentences from each, and then use IBM1 to build the translation tables.
###Code
#Creating lists of English and German sentences from bitext.
from nltk.translate import IBMModel1
from nltk.translate import AlignedSent, Alignment
eng_sents = []
de_sents = []
f = open(BITEXT_ENG)
for line in f:
terms = tokenize(line)
eng_sents.append(terms)
f.close()
f = open(BITEXT_DE)
for line in f:
terms = tokenize(line)
de_sents.append(terms)
f.close()
#Zipping together the bitexts for easier access
paral_sents = zip(eng_sents,de_sents)
#Building English to German translation table for words (Backward alignment)
eng_de_bt = [AlignedSent(E,G) for E,G in paral_sents]
eng_de_m = IBMModel1(eng_de_bt, 5)
#Building German to English translation table for words (Backward alignment)
de_eng_bt = [AlignedSent(G,E) for E,G in paral_sents]
de_eng_m = IBMModel1(de_eng_bt, 5)
###Output
_____no_output_____
###Markdown
We can take the intersection of the dual alignments to obtain a combined alignment for each sentence in the bitext.
###Code
#Script below to combine alignments using set intersections
combined_align = []
for i in range(len(eng_de_bt)):
forward = {x for x in eng_de_bt[i].alignment}
back_reversed = {x[::-1] for x in de_eng_bt[i].alignment}
combined_align.append(forward.intersection(back_reversed))
###Output
_____no_output_____
###Markdown
Now we can create translation dictionaries in both English to German, and German to English directions. Creating dictionaries for occurence counts first.
###Code
#Creating German to English dictionary with occurence count of word pairs
de_eng_count = defaultdict(dict)
for i in range(len(de_eng_bt)):
for item in combined_align[i]:
de_eng_count[de_eng_bt[i].words[item[1]]][de_eng_bt[i].mots[item[0]]] = de_eng_count[de_eng_bt[i].words[item[1]]].get(de_eng_bt[i].mots[item[0]],0) + 1
#Creating a English to German dict with occ count of word pais
eng_de_count = defaultdict(dict)
for i in range(len(eng_de_bt)):
for item in combined_align[i]:
eng_de_count[eng_de_bt[i].words[item[0]]][eng_de_bt[i].mots[item[1]]] = eng_de_count[eng_de_bt[i].words[item[0]]].get(eng_de_bt[i].mots[item[1]],0) + 1
###Output
_____no_output_____
###Markdown
Creating dictionaries for translation probabilities.
###Code
#Creating German to English table with word translation probabilities
de_eng_prob = defaultdict(dict)
for de in de_eng_count.keys():
for eng in de_eng_count[de].keys():
de_eng_prob[de][eng] = de_eng_count[de][eng]/sum(de_eng_count[de].values())
#Creating English to German dict with word translation probabilities
eng_de_prob = defaultdict(dict)
for eng in eng_de_count.keys():
for de in eng_de_count[eng].keys():
eng_de_prob[eng][de] = eng_de_count[eng][de]/sum(eng_de_count[eng].values())
###Output
_____no_output_____
###Markdown
Let's look at some examples of translating individual words from German to English.
###Code
#Examples of translating individual words from German to English
print de_eng_prob['frage']
print de_eng_prob['handlung']
print de_eng_prob['haus']
###Output
{'question': 0.970873786407767, 'issue': 0.019417475728155338, 'matter': 0.009708737864077669}
{'rush': 1.0}
{'begins': 0.058823529411764705, 'house': 0.9411764705882353}
###Markdown
Building the noisy channel translation model, which uses the english to german translation dictionary and the unigram language model to add "noise".
###Code
#Building noisy channel translation model
def de_eng_noisy(german):
noisy={}
for eng in de_eng_prob[german].keys():
noisy[eng] = eng_de_prob[eng][german]+ get_log_prob_addk(eng,unigram_counts,0.0001)
return noisy
###Output
_____no_output_____
###Markdown
Let's check out the translation using the noise channel approach.
###Code
#Test block to check alignments
print de_eng_noisy('vater')
print de_eng_noisy('haus')
print de_eng_noisy('das')
print de_eng_noisy('entschuldigung')
###Output
{'father': -8.798834996562721}
{'begins': -10.2208672198799, 'house': -8.163007778647888}
{'this': -5.214590799418497, 'the': -3.071527829335362, 'that': -4.664995720177421}
{'excuse': -11.870404868087332, 'apology': -12.39683538573032, 'comprehend': -11.89683538573032}
###Markdown
Translations for 'vater', 'hause', 'das' seem to be pretty good, with the max score going to the best translation. For the word 'entschuldigung', the best possible translation is 'excuse', while 'comprehend' being close. But in real world use, the most common translation for 'entschuldigung' is 'sorry'. Checking the reverse translation for 'sorry',
###Code
eng_de_prob['sorry']
###Output
_____no_output_____
###Markdown
The word 'bereue', which Google translates as 'regret'. This is one example of a 'bad' alignment.Let's try tanslating some queries now.
###Code
#Translating first 5 queries into English
#Function for direct translation
def de_eng_direct(query):
query_english = []
query_tokens = tokenize(query)
for token in query_tokens:
try:
query_english.append(max(de_eng_prob[token], key=de_eng_prob[token].get))
except:
query_english.append(token) #Returning the token itself when it cannot be found in the translation table.
#query_english.append("NA")
return " ".join(query_english)
#Function for noisy channel translation
def de_eng_noisy_translate(query):
query_english = []
query_tokens = tokenize(query)
for token in query_tokens:
try:
query_english.append(max(de_eng_noisy(token), key=de_eng_noisy(token).get))
except:
query_english.append(token) #Returning the token itself when it cannot be found in the translation table.
#query_english.append("NA")
return " ".join(query_english)
f = open(DEVELOPMENT_QUERIES)
lno = 0
plno = 0
#Also building a dictionary of query ids and query content (only for the first 100s)
german_qs = {}
test_query_trans_sents = [] #Building a list for perplexity checks.
for line in f:
lno+=1
query_id = line.split('\t')[0]
query_german = line.split('\t')[1]
german_qs[query_id] = query_german.strip()
translation = str(de_eng_noisy_translate(query_german))
if plno<5:
print query_id + "\n" + "German: " + str(query_german) + "\n" + "English: " + translation +"\n\n"
plno+=1
test_query_trans_sents.append(translation)
if lno==100:
break
f.close()
###Output
82
German: der ( von engl . action : tat , handlung , bewegung ) ist ein filmgenre des unterhaltungskinos , in welchem der fortgang der äußeren handlung von zumeist spektakulär inszenierten kampf - und gewaltszenen vorangetrieben und illustriert wird .
English: the ( , guises . action : indeed , rush , movement ) is a filmgenre the unterhaltungskinos , in much the fortgang the external rush , zumeist spektakul\xe4r inszenierten fight - and gewaltszenen pushed and illustriert will .
116
German: die ( einheitenzeichen : u für unified atomic mass unit , veraltet amu für atomic mass unit ) ist eine maßeinheit der masse .
English: the ( einheitenzeichen : u for unified atomic mass unit , obsolete amu for atomic mass unit ) is a befuddled the mass .
240
German: der von lateinisch actualis , " wirklich " , auch aktualitätsprinzip , uniformitäts - oder gleichförmigkeitsprinzip , englisch uniformitarianism , ist die grundlegende wissenschaftliche methode in der .
English: the , lateinisch actualis , `` really `` , , aktualit\xe4tsprinzip , uniformit\xe4ts - or gleichf\xf6rmigkeitsprinzip , english uniformitarianism , is the fundamental scientific method in the .
320
German: die ( griechisch el , von altgriechisch grc , - " zusammen - " , " anbinden " , gemeint ist " die herzbeutel angehängte " ) , ist ein blutgefäß , welches das blut vom herz wegführt .
English: the ( griechisch el , , altgriechisch grc , - `` together - `` , `` anbinden `` , meant is `` the herzbeutel angeh\xe4ngte `` ) , is a blutgef\xe4\xdf , welches the blood vom heart wegf\xfchrt .
540
German: unter der bezeichnung fasst man die drei im nördlichen alpenvorland liegenden gewässereinheiten obersee , untersee und seerhein zusammen .
English: under the bezeichnung summarizes one the three , northern alpenvorland liegenden gew\xe4ssereinheiten obersee , untersee and seerhein together .
###Markdown
The translations of the first 5 queries according to Google translate are as follows: 82 of ( . Of eng action : act, action , movement, ) is a film genre of entertainment cinema , in which the continued transition of the external action of mostly spectacularly staged battle - and violent scenes is advanced and illustrated .116 ( unit sign : u for unified atomic mass unit , amu outdated for atomic mass unit ) is a unit of measure of mass .240 of actualis from Latin , "real" , even actuality principle , uniformity - or gleichförmigkeitsprinzip , English uniformitarianism , is the basic scientific method in .320 (Greek el , from Ancient Greek grc , - " together - " , " tie " , is meant " the heart bag attached" ) is a blood vessel that leads away the blood from the heart .540 under the designation one summarizes the three lying in the northern waters alpenvorland units obersee , subsea and Seerhein together .---Translations obtained through Google Translate are obviously better. It's interesting to note that our own translation engine works well if a 'word-word' translation is considered, and if the word-pair has been encountered enough times in the bi-lingual corpora. Google Translate also seems to perform better as it's considering phrase based translation, which is more sophisticated and accurate than word-word translation. Our engine also seems to work better for function words rather than content words as those would have been the one encountered a lot in the bi-corpora and are better aligned.The alignments were combined by taking the intersection of the forward and reverse alignments in this case. Combining the two alignments improved things in the sense that the intersection got rid of all the extra 'noise' in the alignments, so that the most likely ones remained (that existed both in the forward and reverse direction). Combining, and Evaluation For the final bit, we'll create a function that translates a query, and retrieves the relevant documents for it. Then, to evaluate the results of our CLIR engine, we'll use the [Mean Average Precision](https://www.youtube.com/watch?v=pM6DJ0ZZee0) to judge the performance of the CLIR system. MAP is a standard evaluation metric used in IR.
###Code
#Building a dictionary for queryids and relevant document ids
qrel = defaultdict(list)
f = open(DEVELOPMENT_QREL)
for line in f:
item = line.split('\t')
qrel[item[0]].append(item[2])
f.close()
#Single function to retreive documents for a German query
def trans_retr_docs(german_query,no_of_results,translation_function):
trans_query = " ".join(extract_and_tokenize_terms(translation_function(german_query)))
return [item[0] for item in retr_docs(trans_query,no_of_results)] #Retriving 100 documents
#Calculating the map score
def calc_map(no_of_results,translation_function):
average_precision = []
for gq in german_qs.keys():
relevant_docs = qrel[gq]
incremental_precision = []
resulting_docs = trans_retr_docs(german_qs[gq],no_of_results,translation_function)
total_counter = 0
true_positive_counter = 0
for doc in resulting_docs:
total_counter+=1
if doc in relevant_docs:
true_positive_counter += 1
incremental_precision.append(true_positive_counter/total_counter)
#For no relevant retreivals, the average precision will be considered 0.
try:
average_precision.append(sum(incremental_precision)/len(incremental_precision))
except:
average_precision.append(0)
return (sum(average_precision)/len(average_precision))
###Output
_____no_output_____
###Markdown
To keep runtime at a minimum, we'll only consider the top 100 returned results (documents) when
###Code
#Printing the map score for direct translations
print calc_map(100,de_eng_direct)
#Printing the map score for noisy channel translations
print calc_map(100,de_eng_noisy_translate)
###Output
0.364795198505
###Markdown
Cross Language Information Retrieval OverviewThe aim of this project is to build a cross language information retrieval system (CLIR) which, given a query in German, will be capable of searching text documents written in English and displaying the results in German.We're going to use machine translation, information retrieval using a vector space model, and then assess the performance of the system using IR evaluation techniques.Parts of the project are explained as we progress. Data Used- bitext.(en,de): A sentence aligned, parallel German-English corpus, sourced from the Europarl corpus (which is a collection of debates held in the EU parliament over a number of years). We'll use this to develop word-alignment tools, and build a translation probability table. - newstest.(en,de): A separate, smaller parallel corpus for evaulation of the translation system.- devel.(docs,queries,qrel): A set of documents in English (sourced from Wikipedia), queries in German, and relevance judgement scores for each query-document pair. The files are available to check out in the data/clir directory of the repo. Housekeeping: File encodings and tokenisationSince the data files we use is utf-8 encoded text, we need to convert the strings into ASCII by escaping the special symbols. We also import some libraries in this step as well.
###Code
from nltk.tokenize import word_tokenize
from __future__ import division #To properly handle floating point divisions.
import math
#Function to tokenise string/sentences.
def tokenize(line, tokenizer=word_tokenize):
utf_line = line.decode('utf-8').lower()
return [token.encode('ascii', 'backslashreplace') for token in tokenizer(utf_line)]
###Output
_____no_output_____
###Markdown
Now we can test out our tokenize function. Notice how it converts the word Über.
###Code
tokenize("Seit damals ist er auf über 10.000 Punkte gestiegen.")
###Output
_____no_output_____
###Markdown
Let's store the path of the data files as easily identifiable variables for future access.
###Code
DEVELOPMENT_DOCS = 'data/clir/devel.docs' #Data file for IR engine development
DEVELOPMENT_QUERIES = 'data/clir/devel.queries' #Data file containing queries in German
DEVELOPMENT_QREL = 'data/clir/devel.qrel' #Data file containing a relevance score or query-doc pairs
BITEXT_ENG = 'data/clir/bitext.en' #Bitext data file in English for translation engine and language model development
BITEXT_DE = 'data/clir/bitext.de' #Bitext data file in German
NEWSTEST_ENG = 'data/clir/newstest.en' #File for testing language model
###Output
_____no_output_____
###Markdown
With that out of the way, lets get to the meat of the project. As mentioned earlier, we're going to build a CLIR engine consisting of information retrieval and translation components, and then evaluate its accuracy.The CLIR system will:- **translate queries** from German into English (because our searcheable corpus is in English), using word-based translation, a rather simplistic approach as opposed to the sophistication you might see in, say, *Google Translate*.- **search over the document corpus** using the Okapi BM25 IR ranking model, a variation of the traditional TF-IDF model.- **evaluate the quality** of ranked retrieval results using the query relevance judgements. Information Retrieval using [Okapi BM25](https://en.wikipedia.org/wiki/Okapi_BM25)We'll start by building an IR system, and give it a test run with some English queries. Here's an overview of the tasks involved:- Loading the data files, and tokenizing the input.- Preprocessing the lexicon by stemming, removing stopwords.- Calculating the TF/IDF representation for all documents in our wikipedia corpus.- Storing an inverted index to efficiently documents, given a query term.- Implementing querying with BM25.- Test runs.So for our first task, we'll load the devel.docs file, extract and tokenize the terms, and store them in a python dictionary with the document ids as keys.
###Code
import nltk
import re
stopwords = set(nltk.corpus.stopwords.words('english')) #converting stopwords to a set for faster processing in the future.
stemmer = nltk.stem.PorterStemmer()
#Function to extract and tokenize terms from a document
def extract_and_tokenize_terms(doc):
terms = []
for token in tokenize(doc):
if token not in stopwords: # 'in' and 'not in' operations are faster over sets than lists
if not re.search(r'\d',token) and not re.search(r'[^A-Za-z-]',token): #Removing numbers and punctuations
#(excluding hyphenated words)
terms.append(stemmer.stem(token.lower()))
return terms
documents = {} #Dictionary to store documents with ids as keys.
#Reading each line in the file and storing it documents dictionary
f = open(DEVELOPMENT_DOCS)
for line in f:
doc = line.split("\t")
terms = extract_and_tokenize_terms(doc[1])
documents[doc[0]] = terms
f.close()
###Output
_____no_output_____
###Markdown
To check if everything is working till now, let's access a document from the dictionary, with the id '290'.
###Code
documents['290'][:20] #To keep things short, we're only going to check out 20 tokens.
###Output
_____no_output_____
###Markdown
Now we'll build an inverted index for the documents, so that we can quickly access documents for the terms we need.
###Code
#Building an inverted index for the documents
from collections import defaultdict
inverted_index = defaultdict(set)
for docid, terms in documents.items():
for term in terms:
inverted_index[term].add(docid)
###Output
_____no_output_____
###Markdown
To test it out, the list of documents containing the word 'pizza':
###Code
inverted_index['pizza']
###Output
_____no_output_____
###Markdown
On to the BM25 TF-IDF representation, we'll create the td-idf matrix for terms-documents, first without the query component. The query component is dependent on the terms in our query. So we'll just calculate that, and multiply it with the overall score when we want to retreive documents for a particular query.
###Code
#Building a TF-IDF representation using BM25
NO_DOCS = len(documents) #Number of documents
AVG_LEN_DOC = sum([len(doc) for doc in documents.values()])/len(documents) #Average length of documents
#The function below takes the documentid, and the term, to calculate scores for the tf and idf
#components, and multiplies them together.
def tf_idf_score(k1,b,term,docid):
ft = len(inverted_index[term])
term = stemmer.stem(term.lower())
fdt = documents[docid].count(term)
idf_comp = math.log((NO_DOCS - ft + 0.5)/(ft+0.5))
tf_comp = ((k1 + 1)*fdt)/(k1*((1-b) + b*(len(documents[docid])/AVG_LEN_DOC))+fdt)
return idf_comp * tf_comp
#Function to create tf_idf matrix without the query component
def create_tf_idf(k1,b):
tf_idf = defaultdict(dict)
for term in set(inverted_index.keys()):
for docid in inverted_index[term]:
tf_idf[term][docid] = tf_idf_score(k1,b,term,docid)
return tf_idf
#Creating tf_idf matrix with said parameter values: k1 and b for all documents.
tf_idf = create_tf_idf(1.5,0.5)
###Output
_____no_output_____
###Markdown
We took the default values for k1 and b (1.5 and 0.5), which seemed to give good results. Although these parameters may be altered depending on the type of data being dealth with. Now we create a method to retrieve the query component, and another method that will use the previous ones and retrieve the relevant documents for a query, sorted on the basis of their ranks.
###Code
#Function to retrieve query component
def get_qtf_comp(k3,term,fqt):
return ((k3+1)*fqt[term])/(k3 + fqt[term])
#Function to retrieve documents || Returns a set of documents and their relevance scores.
def retr_docs(query,result_count):
q_terms = [stemmer.stem(term.lower()) for term in query.split() if term not in stopwords] #Removing stopwords from queries
fqt = {}
for term in q_terms:
fqt[term] = fqt.get(term,0) + 1
scores = {}
for word in fqt.keys():
#print word + ': '+ str(inverted_index[word])
for document in inverted_index[word]:
scores[document] = scores.get(document,0) + (tf_idf[word][document]*get_qtf_comp(0,word,fqt)) #k3 chosen as 0 (default)
return sorted(scores.items(),key = lambda x : x[1] , reverse=True)[:result_count]
###Output
_____no_output_____
###Markdown
Let's try and retrieve a document for a query.
###Code
retr_docs("Manchester United",5)
###Output
_____no_output_____
###Markdown
Checking out the terms in the top ranked document..
###Code
documents['19961'][:30]
###Output
_____no_output_____
###Markdown
The information retrieval engine has worked quite well in this case. The top ranked document for the query is a snippet of the wikipedia article for Manchester United Football Club. On further inspection, we can see that the documents ranked lower are, for example, for The University of Manchester, or even just articles with the words 'Manchester' or 'United' in them.Now we can begin translating the German queries to English. Query Translation: For translation, we'll implement a simple word-based translation model in a noisy channel setting. This means that we'll use both a language model over English, and a translation model.We'll use a unigram language model for decoding/translation, but also create a model with trigram to test the improvement in performace). Our aim is to find the string, $\vec{e}$ which maximises $p(\vec{e}) p(\vec{g} | \vec{e})$, given English output string $\vec{e}$ and German input string $\vec{g}$. Language Model:[From Wikipedia](https://en.wikipedia.org/wiki/Language_model): A statistical language model is a probability distribution over sequences of words. Given such a sequence, say of length m, it assigns a probability P(w1,....,wm) to the whole sequence. The models will be trained on the 'bitext.en' file, and tested on 'newstest.en'.As we'll train the model on different files, it's obvious that we'll run into words (unigrams) and trigrams what we hadn't seen in the file we trained the model on. To account for these unknown information, we'll use add-k or [laplace smoothing](https://en.wikipedia.org/wiki/Additive_smoothing) for the unigram and [Katz-Backoff smoothing](https://en.wikipedia.org/wiki/Katz%27s_back-off_model) for the trigram model.Let's start with calculating the unigram, bigram and trigram counts (we need the bigram counts for trigram smoothing). The sentences are also converted appropriately by adding sentinels at the start and end of sentences.
###Code
#Calculating the unigram, bigram and trigram counts.
f = open(BITEXT_ENG)
train_sentences = []
for line in f:
train_sentences.append(tokenize(line))
f.close()
#Function to mark the first occurence of words as unknown, for training.
def check_for_unk_train(word,unigram_counts):
if word in unigram_counts:
return word
else:
unigram_counts[word] = 0
return "UNK"
#Function to convert sentences for training the language model.
def convert_sentence_train(sentence,unigram_counts):
#<s1> and <s2> are sentinel tokens added to the start and end, for handling tri/bigrams at the start of a sentence.
return ["<s1>"] + ["<s2>"] + [check_for_unk_train(token.lower(),unigram_counts) for token in sentence] + ["</s2>"]+ ["</s1>"]
#Function to obtain unigram, bigram and trigram counts.
def get_counts(sentences):
trigram_counts = defaultdict(lambda: defaultdict(dict))
bigram_counts = defaultdict(dict)
unigram_counts = {}
for sentence in sentences:
sentence = convert_sentence_train(sentence, unigram_counts)
for i in range(len(sentence) - 2):
trigram_counts[sentence[i]][sentence[i+1]][sentence[i+2]] = trigram_counts[sentence[i]][sentence[i+1]].get(sentence[i+2],0) + 1
bigram_counts[sentence[i]][sentence[i+1]] = bigram_counts[sentence[i]].get(sentence[i+1],0) + 1
unigram_counts[sentence[i]] = unigram_counts.get(sentence[i],0) + 1
unigram_counts["</s1>"] = unigram_counts["<s1>"]
unigram_counts["</s2>"] = unigram_counts["<s2>"]
bigram_counts["</s2>"]["</s1>"] = bigram_counts["<s1>"]["<s2>"]
return unigram_counts, bigram_counts, trigram_counts
unigram_counts, bigram_counts,trigram_counts = get_counts(train_sentences)
###Output
_____no_output_____
###Markdown
We can calculate the [perplexity](https://en.wikipedia.org/wiki/Perplexity) of our language models to see how well they predict a sentence.
###Code
#Constructing unigram model with 'add-k' smoothing
token_count = sum(unigram_counts.values())
#Function to convert unknown words for testing.
#Words that don't appear in the training corpus (even if they are in the test corpus) are marked as UNK.
def check_for_unk_test(word,unigram_counts):
if word in unigram_counts and unigram_counts[word] > 0:
return word
else:
return "UNK"
def convert_sentence_test(sentence,unigram_counts):
return ["<s1>"] + ["<s2>"] + [check_for_unk_test(word.lower(),unigram_counts) for word in sentence] + ["</s2>"] + ["</s1>"]
#Returns the log probability of a unigram, with add-k smoothing. We're taking logs to avoid probability underflow.
def get_log_prob_addk(word,unigram_counts,k):
return math.log((unigram_counts[word] + k)/ \
(token_count + k*len(unigram_counts)))
#Returns the log probability of a sentence.
def get_sent_log_prob_addk(sentence, unigram_counts,k):
sentence = convert_sentence_test(sentence, unigram_counts)
return sum([get_log_prob_addk(word, unigram_counts,k) for word in sentence])
def calculate_perplexity_uni(sentences,unigram_counts, token_count, k):
total_log_prob = 0
test_token_count = 0
for sentence in sentences:
test_token_count += len(sentence) + 2 # have to consider the end token
total_log_prob += get_sent_log_prob_addk(sentence,unigram_counts,k)
return math.exp(-total_log_prob/test_token_count)
f = open(NEWSTEST_ENG)
test_sents = []
for line in f:
test_sents.append(tokenize(line))
f.close()
###Output
_____no_output_____
###Markdown
Now we'll calculate the [perplexity](https://en.wikipedia.org/wiki/Perplexity) for the model, as a measure of performance i.e. how well they predict a sentence. To find the optimum value of k, we can just calculate the perplexity multiple times with different k(s).
###Code
#Calculating the perplexity for different ks
ks = [0.0001,0.01,0.1,1,10]
for k in ks:
print str(k) +": " + str(calculate_perplexity_uni(test_sents,unigram_counts,token_count,k))
###Output
0.0001: 613.918691403
0.01: 614.027477551
0.1: 615.06903252
1: 628.823994251
10: 823.302441447
###Markdown
Using add-k smoothing, perplexity for the unigram model increases with the increase in k. So 0.0001 is the best choice for k.Moving on to tri-grams.
###Code
#Calculating the N1/N paramaters for Trigrams/Bigrams/Unigrams in Katz-Backoff Smoothing
TRI_ONES = 0 #N1 for Trigrams
TRI_TOTAL = 0 #N for Trigrams
for twod in trigram_counts.values():
for oned in twod.values():
for val in oned.values():
if val==1:
TRI_ONES+=1 #Count of trigram seen once
TRI_TOTAL += 1 #Count of all trigrams seen
BI_ONES = 0 #N1 for Bigrams
BI_TOTAL = 0 #N for Bigrams
for oned in bigram_counts.values():
for val in oned.values():
if val==1:
BI_ONES += 1 #Count of bigram seen once
BI_TOTAL += 1 #Count of all bigrams seen
UNI_ONES = unigram_counts.values().count(1)
UNI_TOTAL = len(unigram_counts)
#Constructing trigram model with backoff smoothing
TRI_ALPHA = TRI_ONES/TRI_TOTAL #Alpha parameter for trigram counts
BI_ALPHA = BI_ONES/BI_TOTAL #Alpha parameter for bigram counts
UNI_ALPHA = UNI_ONES/UNI_TOTAL
def get_log_prob_back(sentence,i,unigram_counts,bigram_counts,trigram_counts,token_count):
if trigram_counts[sentence[i-2]][sentence[i-1]].get(sentence[i],0) > 0:
return math.log((1-TRI_ALPHA)*trigram_counts[sentence[i-2]][sentence[i-1]].get(sentence[i])/bigram_counts[sentence[i-2]][sentence[i-1]])
else:
if bigram_counts[sentence[i-1]].get(sentence[i],0)>0:
return math.log(TRI_ALPHA*((1-BI_ALPHA)*bigram_counts[sentence[i-1]][sentence[i]]/unigram_counts[sentence[i-1]]))
else:
return math.log(TRI_ALPHA*BI_ALPHA*(1-UNI_ALPHA)*((unigram_counts[sentence[i]]+0.0001)/(token_count+(0.0001)*len(unigram_counts))))
def get_sent_log_prob_back(sentence, unigram_counts, bigram_counts,trigram_counts, token_count):
sentence = convert_sentence_test(sentence, unigram_counts)
return sum([get_log_prob_back(sentence,i, unigram_counts,bigram_counts,trigram_counts,token_count) for i in range(2,len(sentence))])
def calculate_perplexity_tri(sentences,unigram_counts,bigram_counts,trigram_counts, token_count):
total_log_prob = 0
test_token_count = 0
for sentence in sentences:
test_token_count += len(sentence) + 2 # have to consider the end token
total_log_prob += get_sent_log_prob_back(sentence,unigram_counts,bigram_counts,trigram_counts,token_count)
return math.exp(-total_log_prob/test_token_count)
#Calculating the perplexity
calculate_perplexity_tri(test_sents,unigram_counts,bigram_counts,trigram_counts,token_count)
###Output
_____no_output_____
###Markdown
For unigram language model, the perplexity for different values of k were as follow:kPerplexity0.0001613.920.01614.030.1628.821823.302For tri-gram model, Katz-Backoff smoothing was chosen as it takes a discounted probability for things only seen once, and backs off to a lower level n-gram for unencountered n-grams.Compared with the trigram model, the perplexity was as follows:ModelPerplexityUnigram (Best K)613.92Trigram (Katz Backoff)461.65As can be seen, the trigram model with 'Katz Backoff' smoothing seems to perform better than the best unigram model (with k = 0.0001). Thus we can say that this model is better for predicting the sequence of a sentence than unigram, which should is obvious if you think about it. Translation modelNext, we'll estimate translation model probabilities. For this, we'll use IBM1 from the NLTK library. IBM1 learns word based translation probabilities using expectation maximisation. We'll use both 'bitext.de' and 'bitext.en' files for this purpose; extract the sentences from each, and then use IBM1 to build the translation tables.
###Code
#Creating lists of English and German sentences from bitext.
from nltk.translate import IBMModel1
from nltk.translate import AlignedSent, Alignment
eng_sents = []
de_sents = []
f = open(BITEXT_ENG)
for line in f:
terms = tokenize(line)
eng_sents.append(terms)
f.close()
f = open(BITEXT_DE)
for line in f:
terms = tokenize(line)
de_sents.append(terms)
f.close()
#Zipping together the bitexts for easier access
paral_sents = zip(eng_sents,de_sents)
#Building English to German translation table for words (Backward alignment)
eng_de_bt = [AlignedSent(E,G) for E,G in paral_sents]
eng_de_m = IBMModel1(eng_de_bt, 5)
#Building German to English translation table for words (Backward alignment)
de_eng_bt = [AlignedSent(G,E) for E,G in paral_sents]
de_eng_m = IBMModel1(de_eng_bt, 5)
###Output
_____no_output_____
###Markdown
We can take the intersection of the dual alignments to obtain a combined alignment for each sentence in the bitext.
###Code
#Script below to combine alignments using set intersections
combined_align = []
for i in range(len(eng_de_bt)):
forward = {x for x in eng_de_bt[i].alignment}
back_reversed = {x[::-1] for x in de_eng_bt[i].alignment}
combined_align.append(forward.intersection(back_reversed))
###Output
_____no_output_____
###Markdown
Now we can create translation dictionaries in both English to German, and German to English directions. Creating dictionaries for occurence counts first.
###Code
#Creating German to English dictionary with occurence count of word pairs
de_eng_count = defaultdict(dict)
for i in range(len(de_eng_bt)):
for item in combined_align[i]:
de_eng_count[de_eng_bt[i].words[item[1]]][de_eng_bt[i].mots[item[0]]] = de_eng_count[de_eng_bt[i].words[item[1]]].get(de_eng_bt[i].mots[item[0]],0) + 1
#Creating a English to German dict with occ count of word pais
eng_de_count = defaultdict(dict)
for i in range(len(eng_de_bt)):
for item in combined_align[i]:
eng_de_count[eng_de_bt[i].words[item[0]]][eng_de_bt[i].mots[item[1]]] = eng_de_count[eng_de_bt[i].words[item[0]]].get(eng_de_bt[i].mots[item[1]],0) + 1
###Output
_____no_output_____
###Markdown
Creating dictionaries for translation probabilities.
###Code
#Creating German to English table with word translation probabilities
de_eng_prob = defaultdict(dict)
for de in de_eng_count.keys():
for eng in de_eng_count[de].keys():
de_eng_prob[de][eng] = de_eng_count[de][eng]/sum(de_eng_count[de].values())
#Creating English to German dict with word translation probabilities
eng_de_prob = defaultdict(dict)
for eng in eng_de_count.keys():
for de in eng_de_count[eng].keys():
eng_de_prob[eng][de] = eng_de_count[eng][de]/sum(eng_de_count[eng].values())
###Output
_____no_output_____
###Markdown
Let's look at some examples of translating individual words from German to English.
###Code
#Examples of translating individual words from German to English
print de_eng_prob['frage']
print de_eng_prob['handlung']
print de_eng_prob['haus']
###Output
{'question': 0.970873786407767, 'issue': 0.019417475728155338, 'matter': 0.009708737864077669}
{'rush': 1.0}
{'begins': 0.058823529411764705, 'house': 0.9411764705882353}
###Markdown
Building the noisy channel translation model, which uses the english to german translation dictionary and the unigram language model to add "noise".
###Code
#Building noisy channel translation model
def de_eng_noisy(german):
noisy={}
for eng in de_eng_prob[german].keys():
noisy[eng] = eng_de_prob[eng][german]+ get_log_prob_addk(eng,unigram_counts,0.0001)
return noisy
###Output
_____no_output_____
###Markdown
Let's check out the translation using the noise channel approach.
###Code
#Test block to check alignments
print de_eng_noisy('vater')
print de_eng_noisy('haus')
print de_eng_noisy('das')
print de_eng_noisy('entschuldigung')
###Output
{'father': -8.798834996562721}
{'begins': -10.2208672198799, 'house': -8.163007778647888}
{'this': -5.214590799418497, 'the': -3.071527829335362, 'that': -4.664995720177421}
{'excuse': -11.870404868087332, 'apology': -12.39683538573032, 'comprehend': -11.89683538573032}
###Markdown
Translations for 'vater', 'hause', 'das' seem to be pretty good, with the max score going to the best translation. For the word 'entschuldigung', the best possible translation is 'excuse', while 'comprehend' being close. But in real world use, the most common translation for 'entschuldigung' is 'sorry'. Checking the reverse translation for 'sorry',
###Code
eng_de_prob['sorry']
###Output
_____no_output_____
###Markdown
The word 'bereue', which Google translates as 'regret'. This is one example of a 'bad' alignment.Let's try tanslating some queries now.
###Code
#Translating first 5 queries into English
#Function for direct translation
def de_eng_direct(query):
query_english = []
query_tokens = tokenize(query)
for token in query_tokens:
try:
query_english.append(max(de_eng_prob[token], key=de_eng_prob[token].get))
except:
query_english.append(token) #Returning the token itself when it cannot be found in the translation table.
#query_english.append("NA")
return " ".join(query_english)
#Function for noisy channel translation
def de_eng_noisy_translate(query):
query_english = []
query_tokens = tokenize(query)
for token in query_tokens:
try:
query_english.append(max(de_eng_noisy(token), key=de_eng_noisy(token).get))
except:
query_english.append(token) #Returning the token itself when it cannot be found in the translation table.
#query_english.append("NA")
return " ".join(query_english)
f = open(DEVELOPMENT_QUERIES)
lno = 0
plno = 0
#Also building a dictionary of query ids and query content (only for the first 100s)
german_qs = {}
test_query_trans_sents = [] #Building a list for perplexity checks.
for line in f:
lno+=1
query_id = line.split('\t')[0]
query_german = line.split('\t')[1]
german_qs[query_id] = query_german.strip()
translation = str(de_eng_noisy_translate(query_german))
if plno<5:
print query_id + "\n" + "German: " + str(query_german) + "\n" + "English: " + translation +"\n\n"
plno+=1
test_query_trans_sents.append(translation)
if lno==100:
break
f.close()
###Output
82
German: der ( von engl . action : tat , handlung , bewegung ) ist ein filmgenre des unterhaltungskinos , in welchem der fortgang der äußeren handlung von zumeist spektakulär inszenierten kampf - und gewaltszenen vorangetrieben und illustriert wird .
English: the ( , guises . action : indeed , rush , movement ) is a filmgenre the unterhaltungskinos , in much the fortgang the external rush , zumeist spektakul\xe4r inszenierten fight - and gewaltszenen pushed and illustriert will .
116
German: die ( einheitenzeichen : u für unified atomic mass unit , veraltet amu für atomic mass unit ) ist eine maßeinheit der masse .
English: the ( einheitenzeichen : u for unified atomic mass unit , obsolete amu for atomic mass unit ) is a befuddled the mass .
240
German: der von lateinisch actualis , " wirklich " , auch aktualitätsprinzip , uniformitäts - oder gleichförmigkeitsprinzip , englisch uniformitarianism , ist die grundlegende wissenschaftliche methode in der .
English: the , lateinisch actualis , `` really `` , , aktualit\xe4tsprinzip , uniformit\xe4ts - or gleichf\xf6rmigkeitsprinzip , english uniformitarianism , is the fundamental scientific method in the .
320
German: die ( griechisch el , von altgriechisch grc , - " zusammen - " , " anbinden " , gemeint ist " die herzbeutel angehängte " ) , ist ein blutgefäß , welches das blut vom herz wegführt .
English: the ( griechisch el , , altgriechisch grc , - `` together - `` , `` anbinden `` , meant is `` the herzbeutel angeh\xe4ngte `` ) , is a blutgef\xe4\xdf , welches the blood vom heart wegf\xfchrt .
540
German: unter der bezeichnung fasst man die drei im nördlichen alpenvorland liegenden gewässereinheiten obersee , untersee und seerhein zusammen .
English: under the bezeichnung summarizes one the three , northern alpenvorland liegenden gew\xe4ssereinheiten obersee , untersee and seerhein together .
###Markdown
The translations of the first 5 queries according to Google translate are as follows: 82 of ( . Of eng action : act, action , movement, ) is a film genre of entertainment cinema , in which the continued transition of the external action of mostly spectacularly staged battle - and violent scenes is advanced and illustrated .116 ( unit sign : u for unified atomic mass unit , amu outdated for atomic mass unit ) is a unit of measure of mass .240 of actualis from Latin , "real" , even actuality principle , uniformity - or gleichförmigkeitsprinzip , English uniformitarianism , is the basic scientific method in .320 (Greek el , from Ancient Greek grc , - " together - " , " tie " , is meant " the heart bag attached" ) is a blood vessel that leads away the blood from the heart .540 under the designation one summarizes the three lying in the northern waters alpenvorland units obersee , subsea and Seerhein together .---Translations obtained through Google Translate are obviously better. It's interesting to note that our own translation engine works well if a 'word-word' translation is considered, and if the word-pair has been encountered enough times in the bi-lingual corpora. Google Translate also seems to perform better as it's considering phrase based translation, which is more sophisticated and accurate than word-word translation. Our engine also seems to work better for function words rather than content words as those would have been the one encountered a lot in the bi-corpora and are better aligned.The alignments were combined by taking the intersection of the forward and reverse alignments in this case. Combining the two alignments improved things in the sense that the intersection got rid of all the extra 'noise' in the alignments, so that the most likely ones remained (that existed both in the forward and reverse direction). Combining, and Evaluation For the final bit, we'll create a function that translates a query, and retrieves the relevant documents for it. Then, to evaluate the results of our CLIR engine, we'll use the [Mean Average Precision](https://www.youtube.com/watch?v=pM6DJ0ZZee0) to judge the performance of the CLIR system. MAP is a standard evaluation metric used in IR.
###Code
#Building a dictionary for queryids and relevant document ids
qrel = defaultdict(list)
f = open(DEVELOPMENT_QREL)
for line in f:
item = line.split('\t')
qrel[item[0]].append(item[2])
f.close()
#Single function to retreive documents for a German query
def trans_retr_docs(german_query,no_of_results,translation_function):
trans_query = " ".join(extract_and_tokenize_terms(translation_function(german_query)))
return [item[0] for item in retr_docs(trans_query,no_of_results)] #Retriving 100 documents
#Calculating the map score
def calc_map(no_of_results,translation_function):
average_precision = []
for gq in german_qs.keys():
relevant_docs = qrel[gq]
incremental_precision = []
resulting_docs = trans_retr_docs(german_qs[gq],no_of_results,translation_function)
total_counter = 0
true_positive_counter = 0
for doc in resulting_docs:
total_counter+=1
if doc in relevant_docs:
true_positive_counter += 1
incremental_precision.append(true_positive_counter/total_counter)
#For no relevant retreivals, the average precision will be considered 0.
try:
average_precision.append(sum(incremental_precision)/len(incremental_precision))
except:
average_precision.append(0)
return (sum(average_precision)/len(average_precision))
###Output
_____no_output_____
###Markdown
To keep runtime at a minimum, we'll only consider the top 100 returned results (documents) when
###Code
#Printing the map score for direct translations
print calc_map(100,de_eng_direct)
#Printing the map score for noisy channel translations
print calc_map(100,de_eng_noisy_translate)
###Output
0.364795198505
###Markdown
Cross Language Information Retrieval OverviewThe aim of this project is to build a cross language information retrieval system (CLIR) which, given a query in German, will be capable of searching text documents written in English and displaying the results in German.We're going to use machine translation, information retrieval using a vector space model, and then assess the performance of the system using IR evaluation techniques.Parts of the project are explained as we progress. Data Used- bitext.(en,de): A sentence aligned, parallel German-English corpus, sourced from the Europarl corpus (which is a collection of debates held in the EU parliament over a number of years). We'll use this to develop word-alignment tools, and build a translation probability table. - newstest.(en,de): A separate, smaller parallel corpus for evaulation of the translation system.- devel.(docs,queries,qrel): A set of documents in English (sourced from Wikipedia), queries in German, and relevance judgement scores for each query-document pair. The files are available to check out in the data/clir directory of the repo. Housekeeping: File encodings and tokenisationSince the data files we use is utf-8 encoded text, we need to convert the strings into ASCII by escaping the special symbols. We also import some libraries in this step as well.
###Code
from nltk.tokenize import word_tokenize
from __future__ import division #To properly handle floating point divisions.
import math
#Function to tokenise string/sentences.
def tokenize(line, tokenizer=word_tokenize):
utf_line = line.decode('utf-8').lower()
return [token.encode('ascii', 'backslashreplace') for token in tokenizer(utf_line)]
###Output
_____no_output_____
###Markdown
Now we can test out our tokenize function. Notice how it converts the word Über.
###Code
tokenize("Seit damals ist er auf über 10.000 Punkte gestiegen.")
###Output
_____no_output_____
###Markdown
Let's store the path of the data files as easily identifiable variables for future access.
###Code
DEVELOPMENT_DOCS = 'data/clir/devel.docs' #Data file for IR engine development
DEVELOPMENT_QUERIES = 'data/clir/devel.queries' #Data file containing queries in German
DEVELOPMENT_QREL = 'data/clir/devel.qrel' #Data file containing a relevance score or query-doc pairs
BITEXT_ENG = 'data/clir/bitext.en' #Bitext data file in English for translation engine and language model development
BITEXT_DE = 'data/clir/bitext.de' #Bitext data file in German
NEWSTEST_ENG = 'data/clir/newstest.en' #File for testing language model
###Output
_____no_output_____
###Markdown
With that out of the way, lets get to the meat of the project. As mentioned earlier, we're going to build a CLIR engine consisting of information retrieval and translation components, and then evaluate its accuracy.The CLIR system will:- **translate queries** from German into English (because our searcheable corpus is in English), using word-based translation, a rather simplistic approach as opposed to the sophistication you might see in, say, *Google Translate*.- **search over the document corpus** using the Okapi BM25 IR ranking model, a variation of the traditional TF-IDF model.- **evaluate the quality** of ranked retrieval results using the query relevance judgements. Information Retrieval using [Okapi BM25](https://en.wikipedia.org/wiki/Okapi_BM25)We'll start by building an IR system, and give it a test run with some English queries. Here's an overview of the tasks involved:- Loading the data files, and tokenizing the input.- Preprocessing the lexicon by stemming, removing stopwords.- Calculating the TF/IDF representation for all documents in our wikipedia corpus.- Storing an inverted index to efficiently documents, given a query term.- Implementing querying with BM25.- Test runs.So for our first task, we'll load the devel.docs file, extract and tokenize the terms, and store them in a python dictionary with the document ids as keys.
###Code
import nltk
import re
stopwords = set(nltk.corpus.stopwords.words('english')) #converting stopwords to a set for faster processing in the future.
stemmer = nltk.stem.PorterStemmer()
#Function to extract and tokenize terms from a document
def extract_and_tokenize_terms(doc):
terms = []
for token in tokenize(doc):
if token not in stopwords: # 'in' and 'not in' operations are faster over sets than lists
if not re.search(r'\d',token) and not re.search(r'[^A-Za-z-]',token): #Removing numbers and punctuations
#(excluding hyphenated words)
terms.append(stemmer.stem(token.lower()))
return terms
documents = {} #Dictionary to store documents with ids as keys.
#Reading each line in the file and storing it documents dictionary
f = open(DEVELOPMENT_DOCS)
for line in f:
doc = line.split("\t")
terms = extract_and_tokenize_terms(doc[1])
documents[doc[0]] = terms
f.close()
###Output
_____no_output_____
###Markdown
To check if everything is working till now, let's access a document from the dictionary, with the id '290'.
###Code
documents['290'][:20] #To keep things short, we're only going to check out 20 tokens.
###Output
_____no_output_____
###Markdown
Now we'll build an inverted index for the documents, so that we can quickly access documents for the terms we need.
###Code
#Building an inverted index for the documents
from collections import defaultdict
inverted_index = defaultdict(set)
for docid, terms in documents.items():
for term in terms:
inverted_index[term].add(docid)
###Output
_____no_output_____
###Markdown
To test it out, the list of documents containing the word 'pizza':
###Code
inverted_index['pizza']
###Output
_____no_output_____
###Markdown
On to the BM25 TF-IDF representation, we'll create the td-idf matrix for terms-documents, first without the query component. The query component is dependent on the terms in our query. So we'll just calculate that, and multiply it with the overall score when we want to retreive documents for a particular query.
###Code
#Building a TF-IDF representation using BM25
NO_DOCS = len(documents) #Number of documents
AVG_LEN_DOC = sum([len(doc) for doc in documents.values()])/len(documents) #Average length of documents
#The function below takes the documentid, and the term, to calculate scores for the tf and idf
#components, and multiplies them together.
def tf_idf_score(k1,b,term,docid):
ft = len(inverted_index[term])
term = stemmer.stem(term.lower())
fdt = documents[docid].count(term)
idf_comp = math.log((NO_DOCS - ft + 0.5)/(ft+0.5))
tf_comp = ((k1 + 1)*fdt)/(k1*((1-b) + b*(len(documents[docid])/AVG_LEN_DOC))+fdt)
return idf_comp * tf_comp
#Function to create tf_idf matrix without the query component
def create_tf_idf(k1,b):
tf_idf = defaultdict(dict)
for term in set(inverted_index.keys()):
for docid in inverted_index[term]:
tf_idf[term][docid] = tf_idf_score(k1,b,term,docid)
return tf_idf
#Creating tf_idf matrix with said parameter values: k1 and b for all documents.
tf_idf = create_tf_idf(1.5,0.5)
###Output
_____no_output_____
###Markdown
We took the default values for k1 and b (1.5 and 0.5), which seemed to give good results. Although these parameters may be altered depending on the type of data being dealth with. Now we create a method to retrieve the query component, and another method that will use the previous ones and retrieve the relevant documents for a query, sorted on the basis of their ranks.
###Code
#Function to retrieve query component
def get_qtf_comp(k3,term,fqt):
return ((k3+1)*fqt[term])/(k3 + fqt[term])
#Function to retrieve documents || Returns a set of documents and their relevance scores.
def retr_docs(query,result_count):
q_terms = [stemmer.stem(term.lower()) for term in query.split() if term not in stopwords] #Removing stopwords from queries
fqt = {}
for term in q_terms:
fqt[term] = fqt.get(term,0) + 1
scores = {}
for word in fqt.keys():
#print word + ': '+ str(inverted_index[word])
for document in inverted_index[word]:
scores[document] = scores.get(document,0) + (tf_idf[word][document]*get_qtf_comp(0,word,fqt)) #k3 chosen as 0 (default)
return sorted(scores.items(),key = lambda x : x[1] , reverse=True)[:result_count]
###Output
_____no_output_____
###Markdown
Let's try and retrieve a document for a query.
###Code
retr_docs("Manchester United",5)
###Output
_____no_output_____
###Markdown
Checking out the terms in the top ranked document..
###Code
documents['19961'][:30]
###Output
_____no_output_____
###Markdown
The information retrieval engine has worked quite well in this case. The top ranked document for the query is a snippet of the wikipedia article for Manchester United Football Club. On further inspection, we can see that the documents ranked lower are, for example, for The University of Manchester, or even just articles with the words 'Manchester' or 'United' in them.Now we can begin translating the German queries to English. Query Translation: For translation, we'll implement a simple word-based translation model in a noisy channel setting. This means that we'll use both a language model over English, and a translation model.We'll use a unigram language model for decoding/translation, but also create a model with trigram to test the improvement in performace). Our aim is to find the string, $\vec{e}$ which maximises $p(\vec{e}) p(\vec{g} | \vec{e})$, given English output string $\vec{e}$ and German input string $\vec{g}$. Language Model:[From Wikipedia](https://en.wikipedia.org/wiki/Language_model): A statistical language model is a probability distribution over sequences of words. Given such a sequence, say of length m, it assigns a probability P(w1,....,wm) to the whole sequence. The models will be trained on the 'bitext.en' file, and tested on 'newstest.en'.As we'll train the model on different files, it's obvious that we'll run into words (unigrams) and trigrams what we hadn't seen in the file we trained the model on. To account for these unknown information, we'll use add-k or [laplace smoothing](https://en.wikipedia.org/wiki/Additive_smoothing) for the unigram and [Katz-Backoff smoothing](https://en.wikipedia.org/wiki/Katz%27s_back-off_model) for the trigram model.Let's start with calculating the unigram, bigram and trigram counts (we need the bigram counts for trigram smoothing). The sentences are also converted appropriately by adding sentinels at the start and end of sentences.
###Code
#Calculating the unigram, bigram and trigram counts.
f = open(BITEXT_ENG)
train_sentences = []
for line in f:
train_sentences.append(tokenize(line))
f.close()
#Function to mark the first occurence of words as unknown, for training.
def check_for_unk_train(word,unigram_counts):
if word in unigram_counts:
return word
else:
unigram_counts[word] = 0
return "UNK"
#Function to convert sentences for training the language model.
def convert_sentence_train(sentence,unigram_counts):
#<s1> and <s2> are sentinel tokens added to the start and end, for handling tri/bigrams at the start of a sentence.
return ["<s1>"] + ["<s2>"] + [check_for_unk_train(token.lower(),unigram_counts) for token in sentence] + ["</s2>"]+ ["</s1>"]
#Function to obtain unigram, bigram and trigram counts.
def get_counts(sentences):
trigram_counts = defaultdict(lambda: defaultdict(dict))
bigram_counts = defaultdict(dict)
unigram_counts = {}
for sentence in sentences:
sentence = convert_sentence_train(sentence, unigram_counts)
for i in range(len(sentence) - 2):
trigram_counts[sentence[i]][sentence[i+1]][sentence[i+2]] = trigram_counts[sentence[i]][sentence[i+1]].get(sentence[i+2],0) + 1
bigram_counts[sentence[i]][sentence[i+1]] = bigram_counts[sentence[i]].get(sentence[i+1],0) + 1
unigram_counts[sentence[i]] = unigram_counts.get(sentence[i],0) + 1
unigram_counts["</s1>"] = unigram_counts["<s1>"]
unigram_counts["</s2>"] = unigram_counts["<s2>"]
bigram_counts["</s2>"]["</s1>"] = bigram_counts["<s1>"]["<s2>"]
return unigram_counts, bigram_counts, trigram_counts
unigram_counts, bigram_counts,trigram_counts = get_counts(train_sentences)
###Output
_____no_output_____
###Markdown
We can calculate the [perplexity](https://en.wikipedia.org/wiki/Perplexity) of our language models to see how well they predict a sentence.
###Code
#Constructing unigram model with 'add-k' smoothing
token_count = sum(unigram_counts.values())
#Function to convert unknown words for testing.
#Words that don't appear in the training corpus (even if they are in the test corpus) are marked as UNK.
def check_for_unk_test(word,unigram_counts):
if word in unigram_counts and unigram_counts[word] > 0:
return word
else:
return "UNK"
def convert_sentence_test(sentence,unigram_counts):
return ["<s1>"] + ["<s2>"] + [check_for_unk_test(word.lower(),unigram_counts) for word in sentence] + ["</s2>"] + ["</s1>"]
#Returns the log probability of a unigram, with add-k smoothing. We're taking logs to avoid probability underflow.
def get_log_prob_addk(word,unigram_counts,k):
return math.log((unigram_counts[word] + k)/ \
(token_count + k*len(unigram_counts)))
#Returns the log probability of a sentence.
def get_sent_log_prob_addk(sentence, unigram_counts,k):
sentence = convert_sentence_test(sentence, unigram_counts)
return sum([get_log_prob_addk(word, unigram_counts,k) for word in sentence])
def calculate_perplexity_uni(sentences,unigram_counts, token_count, k):
total_log_prob = 0
test_token_count = 0
for sentence in sentences:
test_token_count += len(sentence) + 2 # have to consider the end token
total_log_prob += get_sent_log_prob_addk(sentence,unigram_counts,k)
return math.exp(-total_log_prob/test_token_count)
f = open(NEWSTEST_ENG)
test_sents = []
for line in f:
test_sents.append(tokenize(line))
f.close()
###Output
_____no_output_____
###Markdown
Now we'll calculate the [perplexity](https://en.wikipedia.org/wiki/Perplexity) for the model, as a measure of performance i.e. how well they predict a sentence. To find the optimum value of k, we can just calculate the perplexity multiple times with different k(s).
###Code
#Calculating the perplexity for different ks
ks = [0.0001,0.01,0.1,1,10]
for k in ks:
print str(k) +": " + str(calculate_perplexity_uni(test_sents,unigram_counts,token_count,k))
###Output
0.0001: 613.918691403
0.01: 614.027477551
0.1: 615.06903252
1: 628.823994251
10: 823.302441447
###Markdown
Using add-k smoothing, perplexity for the unigram model increases with the increase in k. So 0.0001 is the best choice for k.Moving on to tri-grams.
###Code
#Calculating the N1/N paramaters for Trigrams/Bigrams/Unigrams in Katz-Backoff Smoothing
TRI_ONES = 0 #N1 for Trigrams
TRI_TOTAL = 0 #N for Trigrams
for twod in trigram_counts.values():
for oned in twod.values():
for val in oned.values():
if val==1:
TRI_ONES+=1 #Count of trigram seen once
TRI_TOTAL += 1 #Count of all trigrams seen
BI_ONES = 0 #N1 for Bigrams
BI_TOTAL = 0 #N for Bigrams
for oned in bigram_counts.values():
for val in oned.values():
if val==1:
BI_ONES += 1 #Count of bigram seen once
BI_TOTAL += 1 #Count of all bigrams seen
UNI_ONES = unigram_counts.values().count(1)
UNI_TOTAL = len(unigram_counts)
#Constructing trigram model with backoff smoothing
TRI_ALPHA = TRI_ONES/TRI_TOTAL #Alpha parameter for trigram counts
BI_ALPHA = BI_ONES/BI_TOTAL #Alpha parameter for bigram counts
UNI_ALPHA = UNI_ONES/UNI_TOTAL
def get_log_prob_back(sentence,i,unigram_counts,bigram_counts,trigram_counts,token_count):
if trigram_counts[sentence[i-2]][sentence[i-1]].get(sentence[i],0) > 0:
return math.log((1-TRI_ALPHA)*trigram_counts[sentence[i-2]][sentence[i-1]].get(sentence[i])/bigram_counts[sentence[i-2]][sentence[i-1]])
else:
if bigram_counts[sentence[i-1]].get(sentence[i],0)>0:
return math.log(TRI_ALPHA*((1-BI_ALPHA)*bigram_counts[sentence[i-1]][sentence[i]]/unigram_counts[sentence[i-1]]))
else:
return math.log(TRI_ALPHA*BI_ALPHA*(1-UNI_ALPHA)*((unigram_counts[sentence[i]]+0.0001)/(token_count+(0.0001)*len(unigram_counts))))
def get_sent_log_prob_back(sentence, unigram_counts, bigram_counts,trigram_counts, token_count):
sentence = convert_sentence_test(sentence, unigram_counts)
return sum([get_log_prob_back(sentence,i, unigram_counts,bigram_counts,trigram_counts,token_count) for i in range(2,len(sentence))])
def calculate_perplexity_tri(sentences,unigram_counts,bigram_counts,trigram_counts, token_count):
total_log_prob = 0
test_token_count = 0
for sentence in sentences:
test_token_count += len(sentence) + 2 # have to consider the end token
total_log_prob += get_sent_log_prob_back(sentence,unigram_counts,bigram_counts,trigram_counts,token_count)
return math.exp(-total_log_prob/test_token_count)
#Calculating the perplexity
calculate_perplexity_tri(test_sents,unigram_counts,bigram_counts,trigram_counts,token_count)
###Output
_____no_output_____
###Markdown
For unigram language model, the perplexity for different values of k were as follow:kPerplexity0.0001613.920.01614.030.1628.821823.302For tri-gram model, Katz-Backoff smoothing was chosen as it takes a discounted probability for things only seen once, and backs off to a lower level n-gram for unencountered n-grams.Compared with the trigram model, the perplexity was as follows:ModelPerplexityUnigram (Best K)613.92Trigram (Katz Backoff)461.65As can be seen, the trigram model with 'Katz Backoff' smoothing seems to perform better than the best unigram model (with k = 0.0001). Thus we can say that this model is better for predicting the sequence of a sentence than unigram, which should is obvious if you think about it. Translation modelNext, we'll estimate translation model probabilities. For this, we'll use IBM1 from the NLTK library. IBM1 learns word based translation probabilities using expectation maximisation. We'll use both 'bitext.de' and 'bitext.en' files for this purpose; extract the sentences from each, and then use IBM1 to build the translation tables.
###Code
#Creating lists of English and German sentences from bitext.
from nltk.translate import IBMModel1
from nltk.translate import AlignedSent, Alignment
eng_sents = []
de_sents = []
f = open(BITEXT_ENG)
for line in f:
terms = tokenize(line)
eng_sents.append(terms)
f.close()
f = open(BITEXT_DE)
for line in f:
terms = tokenize(line)
de_sents.append(terms)
f.close()
#Zipping together the bitexts for easier access
paral_sents = zip(eng_sents,de_sents)
#Building English to German translation table for words (Backward alignment)
eng_de_bt = [AlignedSent(E,G) for E,G in paral_sents]
eng_de_m = IBMModel1(eng_de_bt, 5)
#Building German to English translation table for words (Backward alignment)
de_eng_bt = [AlignedSent(G,E) for E,G in paral_sents]
de_eng_m = IBMModel1(de_eng_bt, 5)
###Output
_____no_output_____
###Markdown
We can take the intersection of the dual alignments to obtain a combined alignment for each sentence in the bitext.
###Code
#Script below to combine alignments using set intersections
combined_align = []
for i in range(len(eng_de_bt)):
forward = {x for x in eng_de_bt[i].alignment}
back_reversed = {x[::-1] for x in de_eng_bt[i].alignment}
combined_align.append(forward.intersection(back_reversed))
###Output
_____no_output_____
###Markdown
Now we can create translation dictionaries in both English to German, and German to English directions. Creating dictionaries for occurence counts first.
###Code
#Creating German to English dictionary with occurence count of word pairs
de_eng_count = defaultdict(dict)
for i in range(len(de_eng_bt)):
for item in combined_align[i]:
de_eng_count[de_eng_bt[i].words[item[1]]][de_eng_bt[i].mots[item[0]]] = de_eng_count[de_eng_bt[i].words[item[1]]].get(de_eng_bt[i].mots[item[0]],0) + 1
#Creating a English to German dict with occ count of word pais
eng_de_count = defaultdict(dict)
for i in range(len(eng_de_bt)):
for item in combined_align[i]:
eng_de_count[eng_de_bt[i].words[item[0]]][eng_de_bt[i].mots[item[1]]] = eng_de_count[eng_de_bt[i].words[item[0]]].get(eng_de_bt[i].mots[item[1]],0) + 1
###Output
_____no_output_____
###Markdown
Creating dictionaries for translation probabilities.
###Code
#Creating German to English table with word translation probabilities
de_eng_prob = defaultdict(dict)
for de in de_eng_count.keys():
for eng in de_eng_count[de].keys():
de_eng_prob[de][eng] = de_eng_count[de][eng]/sum(de_eng_count[de].values())
#Creating English to German dict with word translation probabilities
eng_de_prob = defaultdict(dict)
for eng in eng_de_count.keys():
for de in eng_de_count[eng].keys():
eng_de_prob[eng][de] = eng_de_count[eng][de]/sum(eng_de_count[eng].values())
###Output
_____no_output_____
###Markdown
Let's look at some examples of translating individual words from German to English.
###Code
#Examples of translating individual words from German to English
print de_eng_prob['frage']
print de_eng_prob['handlung']
print de_eng_prob['haus']
###Output
{'question': 0.970873786407767, 'issue': 0.019417475728155338, 'matter': 0.009708737864077669}
{'rush': 1.0}
{'begins': 0.058823529411764705, 'house': 0.9411764705882353}
###Markdown
Building the noisy channel translation model, which uses the english to german translation dictionary and the unigram language model to add "noise".
###Code
#Building noisy channel translation model
def de_eng_noisy(german):
noisy={}
for eng in de_eng_prob[german].keys():
noisy[eng] = eng_de_prob[eng][german]+ get_log_prob_addk(eng,unigram_counts,0.0001)
return noisy
###Output
_____no_output_____
###Markdown
Let's check out the translation using the noise channel approach.
###Code
#Test block to check alignments
print de_eng_noisy('vater')
print de_eng_noisy('haus')
print de_eng_noisy('das')
print de_eng_noisy('entschuldigung')
###Output
{'father': -8.798834996562721}
{'begins': -10.2208672198799, 'house': -8.163007778647888}
{'this': -5.214590799418497, 'the': -3.071527829335362, 'that': -4.664995720177421}
{'excuse': -11.870404868087332, 'apology': -12.39683538573032, 'comprehend': -11.89683538573032}
###Markdown
Translations for 'vater', 'hause', 'das' seem to be pretty good, with the max score going to the best translation. For the word 'entschuldigung', the best possible translation is 'excuse', while 'comprehend' being close. But in real world use, the most common translation for 'entschuldigung' is 'sorry'. Checking the reverse translation for 'sorry',
###Code
eng_de_prob['sorry']
###Output
_____no_output_____
###Markdown
The word 'bereue', which Google translates as 'regret'. This is one example of a 'bad' alignment.Let's try tanslating some queries now.
###Code
#Translating first 5 queries into English
#Function for direct translation
def de_eng_direct(query):
query_english = []
query_tokens = tokenize(query)
for token in query_tokens:
try:
query_english.append(max(de_eng_prob[token], key=de_eng_prob[token].get))
except:
query_english.append(token) #Returning the token itself when it cannot be found in the translation table.
#query_english.append("NA")
return " ".join(query_english)
#Function for noisy channel translation
def de_eng_noisy_translate(query):
query_english = []
query_tokens = tokenize(query)
for token in query_tokens:
try:
query_english.append(max(de_eng_noisy(token), key=de_eng_noisy(token).get))
except:
query_english.append(token) #Returning the token itself when it cannot be found in the translation table.
#query_english.append("NA")
return " ".join(query_english)
f = open(DEVELOPMENT_QUERIES)
lno = 0
plno = 0
#Also building a dictionary of query ids and query content (only for the first 100s)
german_qs = {}
test_query_trans_sents = [] #Building a list for perplexity checks.
for line in f:
lno+=1
query_id = line.split('\t')[0]
query_german = line.split('\t')[1]
german_qs[query_id] = query_german.strip()
translation = str(de_eng_noisy_translate(query_german))
if plno<5:
print query_id + "\n" + "German: " + str(query_german) + "\n" + "English: " + translation +"\n\n"
plno+=1
test_query_trans_sents.append(translation)
if lno==100:
break
f.close()
###Output
82
German: der ( von engl . action : tat , handlung , bewegung ) ist ein filmgenre des unterhaltungskinos , in welchem der fortgang der äußeren handlung von zumeist spektakulär inszenierten kampf - und gewaltszenen vorangetrieben und illustriert wird .
English: the ( , guises . action : indeed , rush , movement ) is a filmgenre the unterhaltungskinos , in much the fortgang the external rush , zumeist spektakul\xe4r inszenierten fight - and gewaltszenen pushed and illustriert will .
116
German: die ( einheitenzeichen : u für unified atomic mass unit , veraltet amu für atomic mass unit ) ist eine maßeinheit der masse .
English: the ( einheitenzeichen : u for unified atomic mass unit , obsolete amu for atomic mass unit ) is a befuddled the mass .
240
German: der von lateinisch actualis , " wirklich " , auch aktualitätsprinzip , uniformitäts - oder gleichförmigkeitsprinzip , englisch uniformitarianism , ist die grundlegende wissenschaftliche methode in der .
English: the , lateinisch actualis , `` really `` , , aktualit\xe4tsprinzip , uniformit\xe4ts - or gleichf\xf6rmigkeitsprinzip , english uniformitarianism , is the fundamental scientific method in the .
320
German: die ( griechisch el , von altgriechisch grc , - " zusammen - " , " anbinden " , gemeint ist " die herzbeutel angehängte " ) , ist ein blutgefäß , welches das blut vom herz wegführt .
English: the ( griechisch el , , altgriechisch grc , - `` together - `` , `` anbinden `` , meant is `` the herzbeutel angeh\xe4ngte `` ) , is a blutgef\xe4\xdf , welches the blood vom heart wegf\xfchrt .
540
German: unter der bezeichnung fasst man die drei im nördlichen alpenvorland liegenden gewässereinheiten obersee , untersee und seerhein zusammen .
English: under the bezeichnung summarizes one the three , northern alpenvorland liegenden gew\xe4ssereinheiten obersee , untersee and seerhein together .
###Markdown
The translations of the first 5 queries according to Google translate are as follows: 82 of ( . Of eng action : act, action , movement, ) is a film genre of entertainment cinema , in which the continued transition of the external action of mostly spectacularly staged battle - and violent scenes is advanced and illustrated .116 ( unit sign : u for unified atomic mass unit , amu outdated for atomic mass unit ) is a unit of measure of mass .240 of actualis from Latin , "real" , even actuality principle , uniformity - or gleichförmigkeitsprinzip , English uniformitarianism , is the basic scientific method in .320 (Greek el , from Ancient Greek grc , - " together - " , " tie " , is meant " the heart bag attached" ) is a blood vessel that leads away the blood from the heart .540 under the designation one summarizes the three lying in the northern waters alpenvorland units obersee , subsea and Seerhein together .---Translations obtained through Google Translate are obviously better. It's interesting to note that our own translation engine works well if a 'word-word' translation is considered, and if the word-pair has been encountered enough times in the bi-lingual corpora. Google Translate also seems to perform better as it's considering phrase based translation, which is more sophisticated and accurate than word-word translation. Our engine also seems to work better for function words rather than content words as those would have been the one encountered a lot in the bi-corpora and are better aligned.The alignments were combined by taking the intersection of the forward and reverse alignments in this case. Combining the two alignments improved things in the sense that the intersection got rid of all the extra 'noise' in the alignments, so that the most likely ones remained (that existed both in the forward and reverse direction). Combining, and Evaluation For the final bit, we'll create a function that translates a query, and retrieves the relevant documents for it. Then, to evaluate the results of our CLIR engine, we'll use the [Mean Average Precision](https://www.youtube.com/watch?v=pM6DJ0ZZee0) to judge the performance of the CLIR system. MAP is a standard evaluation metric used in IR.
###Code
#Building a dictionary for queryids and relevant document ids
qrel = defaultdict(list)
f = open(DEVELOPMENT_QREL)
for line in f:
item = line.split('\t')
qrel[item[0]].append(item[2])
f.close()
#Single function to retreive documents for a German query
def trans_retr_docs(german_query,no_of_results,translation_function):
trans_query = " ".join(extract_and_tokenize_terms(translation_function(german_query)))
return [item[0] for item in retr_docs(trans_query,no_of_results)] #Retriving 100 documents
#Calculating the map score
def calc_map(no_of_results,translation_function):
average_precision = []
for gq in german_qs.keys():
relevant_docs = qrel[gq]
incremental_precision = []
resulting_docs = trans_retr_docs(german_qs[gq],no_of_results,translation_function)
total_counter = 0
true_positive_counter = 0
for doc in resulting_docs:
total_counter+=1
if doc in relevant_docs:
true_positive_counter += 1
incremental_precision.append(true_positive_counter/total_counter)
#For no relevant retreivals, the average precision will be considered 0.
try:
average_precision.append(sum(incremental_precision)/len(incremental_precision))
except:
average_precision.append(0)
return (sum(average_precision)/len(average_precision))
###Output
_____no_output_____
###Markdown
To keep runtime at a minimum, we'll only consider the top 100 returned results (documents) when
###Code
#Printing the map score for direct translations
print calc_map(100,de_eng_direct)
#Printing the map score for noisy channel translations
print calc_map(100,de_eng_noisy_translate)
###Output
0.364795198505
|
charts/Yang,Wenxin_final.ipynb | ###Markdown
Codes for final project of MUSA620 2019 Wenxin Yang | 2019.05 1 Preparation 1.1 Load data and packages 1.1.1 import packages
###Code
import geopandas as gpd
import json
import pandas as pd
import requests
from io import StringIO
from shapely.geometry import Point
import matplotlib.pyplot as plt
from census_area import Census
###Output
_____no_output_____
###Markdown
1.1.2 load car crash data
###Code
url = 'https://data.boston.gov/dataset/7b29c1b2-7ec2-4023-8292-c24f5d8f0905/resource/e4bfe397-6bfc-49c5-9367-c879fac7401d/download/crash_april_2019.csv.csv'
r = requests.get(url)
df = pd.read_csv(StringIO(r.text))
df['coord'] = list(zip(df.long, df.lat))
df['coord'] = df['coord'].apply(Point)
crash = gpd.GeoDataFrame(df, geometry = 'coord')
crash.crs = ({'init':'epsg:26986'})
crash.head()
# convert time of car crash data
crash['dispatch_ts'] = pd.to_datetime(crash['dispatch_ts'], format='%Y-%m-%d %H:%M:%S')
crash['year'] = crash['dispatch_ts'].dt.year
crash['month'] = crash['dispatch_ts'].dt.month
crash['week'] = crash['dispatch_ts'].dt.week
###Output
_____no_output_____
###Markdown
1.1.3 load census data
###Code
my_api_key = '' # get api key for census data api
api_key = my_api_key
c = Census(key=api_key)
ma_code = 25
boston_code = '07000'
#from https://api.census.gov/data/2017/acs/acs5/variables.html
#B19013_001E is the code for median household income
variables = ('NAME', 'B19013_001E')
result = c.acs5.state_place(variables, ma_code,
boston_code, year=2017)
inc_tracts = c.acs5.state_place_tract(variables, ma_code,
boston_code,
return_geometry=True)
crs = {'init':'epsg:26986'}
inc_df = gpd.GeoDataFrame.from_features(inc_tracts, crs=crs)
len(inc_df)
inc_df = inc_df.loc[inc_df['B19013_001E']>0]
len(inc_df)
inc_df.head()
###Output
_____no_output_____
###Markdown
2 Visualizations 2.1 Visualizations created with folium
###Code
import folium
from folium.plugins import HeatMap
coordcrash = crash[['lat','long']].values
m1 = folium.Map(
location=[42.31, -71.10],
tiles='cartodbpositron',
zoom_start=11
)
HeatMap(coordcrash).add_to(m1)
m1
###Output
_____no_output_____
###Markdown
2.1.1 Heatmap with time
###Code
def generateBaseMap(default_location=[42.31, -71.10], default_zoom_start=11):
base_map = folium.Map(location=default_location, control_scale=True, zoom_start=default_zoom_start,tiles='stamentoner')
return base_map
df_year_list = []
for year in range(2015,2020):
for month in range(1,13):
df_year_list.append(crash.loc[(crash['year']==year) & (crash['month']==month)].groupby(['lat','long']).sum().reset_index().values.tolist())
len(df_year_list)
from folium.plugins import HeatMap
basemap = generateBaseMap(default_location=[42.31, -71.16],default_zoom_start=11)
HeatMapWithTime(df_year_list,radius=15,gradient={0.2:'blue',0.4:'lime',0.6:'orange',1:'red'},min_opacity=0.6,max_opacity=0.9,use_local_extrema=True).add_to(basemap)
basemap.save('final_heatmapwithtime.html')
ct2 = ct = inc_df[['tract','OBJECTID','B19013_001E','STGEOMETRY.AREA','STGEOMETRY.LEN','geometry']]
ct2.to_file('ct2.geojson',driver='GeoJSON')
import branca
import json
import os
import folium
from folium.plugins import MarkerCluster
MarkerCluster()
colorscale = branca.colormap.linear.YlGnBu_09.scale(10000,220000)
def col(feature):
inc = feature['properties']['B19013_001E']
return {
'fillOpacity': 0.5,
'weight': 0,
'fillColor': '#black' if inc is None else colorscale(inc)
}
ctgjson = json.load(open('ct2.geojson'))
###Output
_____no_output_____
###Markdown
2.1.2 Choropleth map of median income level & cluster of car crashes
###Code
m = folium.Map(
location=[42.31, -71.10],
tiles='stamentoner',
zoom_start=11
)
folium.GeoJson(
ctgjson,
name = 'Median Household Income Level',
style_function = col
).add_to(m)
marker_cluster = MarkerCluster(
name = 'Cluster of Car Crashes'
).add_to(m)
for point in range(len(crash)):
folium.Marker(coordcrash[point],
tooltip = 'Time of crash: '+str(crash['dispatch_ts'][point]),
icon = folium.Icon(
color = 'red',
icon_color = 'white',
icon = 'car',
angle = 0,
prefix = 'fa'
)).add_to(marker_cluster)
folium.LayerControl().add_to(m)
m
m.save('final_cluster_and_choro.html')
###Output
_____no_output_____
###Markdown
2.2 Visualizations created with altair 2.2.1 Chart of car crashes vs. transit mode & location type
###Code
crash.location_type.unique()
joined = gpd.sjoin(crash, inc_df, op='within', how='left')
# spatial join
joined1 = gpd.sjoin(crash, inc_df, op='within', how='left').groupby(['year','mode_type','location_type']).size().reset_index()
joined1.columns = ['year','mode_type','location_type','count']
joined1.head()
import altair as alt
pink_blue = alt.Scale(domain=('ped', 'mv','bike'),
range=["steelblue", "salmon","orange"])
slider = alt.binding_range(min=2015, max=2019, step=1)
select_year = alt.selection_single(name = 'select',fields=['year'],
bind=slider)
chart1 = alt.Chart(joined1).mark_bar().encode(
x=alt.X('mode_type:N', title=None),
y=alt.Y('count:Q', scale=alt.Scale(domain=(0, 2000)),title = 'Number of Crashes'),
color=alt.Color('mode_type:N', scale=pink_blue),
column='location_type:N'
).properties(
width=150
).add_selection(
select_year
).transform_filter(
select_year
)
chart1.save('final_count_crash_150.json')
###Output
_____no_output_____
###Markdown
2.2.2 Charts of car crashes vs. transit modes / location types over time
###Code
crash['date'] = pd.to_datetime(crash['year'].astype(str)+'-'+crash['month'].astype(str))
crash.head()
chart2df = crash.groupby(['date','mode_type']).size().reset_index()
chart3df = crash.groupby(['date','location_type']).size().reset_index()
chart2df.columns = ['date','mode_type','count']
chart3df.columns = ['date','location_type','count']
from datetime import datetime
nearest = alt.selection(type='single', nearest=True, on='mouseover',
fields=['date'], empty='none')
# The basic line
line = alt.Chart().mark_line().encode(
alt.X('date:T', axis=alt.Axis(title='')),
alt.Y('count:Q', axis=alt.Axis(title='',format='f')),
color='mode_type:N'
)
# Transparent selectors across the chart. This is what tells us
# the x-value of the cursor
selectors = alt.Chart().mark_point().encode(
x='date:T',
opacity=alt.value(0),
).add_selection(
nearest
)
# Draw points on the line, and highlight based on selection
points = line.mark_point().encode(
opacity=alt.condition(nearest, alt.value(1), alt.value(0))
)
# Draw text labels near the points, and highlight based on selection
text = line.mark_text(align='left', dx=5, dy=-5).encode(
text=alt.condition(nearest, 'count:Q', alt.value(' '))
)
# Draw a rule at the location of the selection
rules = alt.Chart().mark_rule(color='gray').encode(
x='date:T',
).transform_filter(
nearest
)
# Put the five layers into a chart and bind the data
stockChart = alt.layer(line, selectors, points, rules, text,
data=chart2df,
width=500, height=300,title='Car Crashes by Mode Type')
stockChart.save('final_car_crash_mode_time_400.json')
nearest = alt.selection(type='single', nearest=True, on='mouseover',
fields=['date'], empty='none')
# The basic line
line = alt.Chart().mark_line().encode(
alt.X('date:T', axis=alt.Axis(title='')),
alt.Y('count:Q', axis=alt.Axis(title='',format='f')),
color='location_type:N'
)
# Transparent selectors across the chart. This is what tells us
# the x-value of the cursor
selectors = alt.Chart().mark_point().encode(
x='date:T',
opacity=alt.value(0),
).add_selection(
nearest
)
# Draw points on the line, and highlight based on selection
points = line.mark_point().encode(
opacity=alt.condition(nearest, alt.value(1), alt.value(0))
)
# Draw text labels near the points, and highlight based on selection
text = line.mark_text(align='left', dx=5, dy=-5).encode(
text=alt.condition(nearest, 'count:Q', alt.value(' '))
)
# Draw a rule at the location of the selection
rules = alt.Chart().mark_rule(color='gray').encode(
x='date:T',
).transform_filter(
nearest
)
# Put the five layers into a chart and bind the data
loc_type = alt.layer(line, selectors, points, rules, text,
data=chart3df,
width=500, height=300,title='Car Crashes by Location Type')
loc_type.save('final_car_crash_location_time_400.json')
###Output
_____no_output_____ |
(t2) Deep Learning Computations/Shalaka_DL_DLComputations/Shalaka_DL_DLComputations_HPTClassification.ipynb | ###Markdown
Hyper-parameter Tunning of Machine Learning (ML) Models Code for Classification Problems `Dataset Used:`MNIST dataset `Machine Learning Algorithm Used:`* Random Forest (RF) * Support Vector Machine (SVM) * K-Nearest Neighbor (KNN) * Artificial Neural Network (ANN) `Hyper-parameter Tuning Algorithms Used:`* Grid Search * Random Search* Bayesian Optimization with Gaussian Processes (BO-GP)* Bayesian Optimization with Tree-structured Parzen Estimator (BO-TPE) ---
###Code
# Importing required libraries
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
import scipy.stats as stats
from sklearn import datasets
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report,confusion_matrix,accuracy_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
###Output
_____no_output_____
###Markdown
Loading MNIST DatasetThe Modified National Institute of Standards and Technology (MNIST) database is a large database of handwritten digits that is commonly used by the people who want to try learning techniques and pattern recognition methods on real-world data while spending minimal efforts on preprocessing and formatting. It has a training set of 60,000 examples, and a test set of 10,000 examples.It is a subset of a larger set available from NIST. The digits have been size-normalized and centered in a fixed-size image.It has 1797 record and 64 columns.For more details about the dataset click here: [Details-1](http://yann.lecun.com/exdb/mnist/), [Details-2](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.htmlsklearn.datasets.load_digits/)
###Code
# Loading the dataset
X, y = datasets.load_digits(return_X_y=True)
datasets.load_digits()
###Output
_____no_output_____
###Markdown
Baseline Machine Learning Models: Classifier with default Hyper-parameters `Random Forest`
###Code
# Random Forest (RF) with 3-fold cross validation
RF_clf = RandomForestClassifier()
RF_clf.fit(X,y)
RF_scores = cross_val_score(RF_clf, X, y, cv = 3,scoring = 'accuracy')
print("Accuracy (RF): "+ str(RF_scores.mean()))
###Output
Accuracy (RF): 0.9365609348914857
###Markdown
`Support Vector Machine`
###Code
# Support Vector Machine (SVM)
SVM_clf = SVC(gamma='scale')
SVM_clf.fit(X,y)
SVM_scores = cross_val_score(SVM_clf, X, y, cv = 3,scoring = 'accuracy')
print("Accuracy (SVM): "+ str(SVM_scores.mean()))
###Output
Accuracy (SVM): 0.9699499165275459
###Markdown
`K-Nearest Neighbor`
###Code
# K-Nearest Neighbor (KNN)
KNN_clf = KNeighborsClassifier()
KNN_clf.fit(X,y)
KNN_scores = cross_val_score(KNN_clf, X, y, cv = 3,scoring='accuracy')
print("Accuracy (KNN):"+ str(KNN_scores.mean()))
###Output
Accuracy (KNN):0.9627156371730662
###Markdown
`Artificial Neural Network`
###Code
# Artificial Neural Network (ANN)
from keras.models import Sequential, Model
from keras.layers import Dense, Input
from keras.wrappers.scikit_learn import KerasClassifier
from keras.callbacks import EarlyStopping
def ann_model(optimizer = 'sgd',neurons = 32,batch_size = 32,epochs = 50,activation = 'relu',patience = 5,loss = 'categorical_crossentropy'):
model = Sequential()
model.add(Dense(neurons, input_shape = (X.shape[1],), activation = activation))
model.add(Dense(neurons, activation = activation))
model.add(Dense(10,activation='softmax'))
model.compile(optimizer = optimizer, loss = loss)
early_stopping = EarlyStopping(monitor = "loss", patience = patience)
history = model.fit(X, pd.get_dummies(y).values, batch_size = batch_size, epochs=epochs, callbacks = [early_stopping], verbose=0)
return model
ANN_clf = KerasClassifier(build_fn = ann_model, verbose = 0)
ANN_scores = cross_val_score(ANN_clf, X, y, cv = 3,scoring = 'accuracy')
print("Accuracy (ANN):"+ str(ANN_scores.mean()))
###Output
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/wrappers/scikit_learn.py:241: Sequential.predict_classes (from tensorflow.python.keras.engine.sequential) is deprecated and will be removed after 2021-01-01.
Instructions for updating:
Please use instead:* `np.argmax(model.predict(x), axis=-1)`, if your model does multi-class classification (e.g. if it uses a `softmax` last-layer activation).* `(model.predict(x) > 0.5).astype("int32")`, if your model does binary classification (e.g. if it uses a `sigmoid` last-layer activation).
Accuracy (ANN):0.9988870339454646
###Markdown
Hyper-parameter Tuning Algorithms `1] Grid Search`
###Code
from sklearn.model_selection import GridSearchCV
###Output
_____no_output_____
###Markdown
`Random Forest`
###Code
# Random Forest (RF)
RF_params = {
'n_estimators': [10, 20, 30],
'max_depth': [15,20,25,30,50],
"criterion":['gini','entropy']
}
RF_clf = RandomForestClassifier(random_state = 1)
RF_grid = GridSearchCV(RF_clf, RF_params, cv = 3, scoring = 'accuracy')
RF_grid.fit(X, y)
print(RF_grid.best_params_)
print("Accuracy (RF): "+ str(RF_grid.best_score_))
###Output
{'criterion': 'entropy', 'max_depth': 15, 'n_estimators': 30}
Accuracy (RF): 0.9343350027824151
###Markdown
`Support Vector Machine`
###Code
# Support Vector Machine (SVM)
SVM_params = {
'C': [1, 10, 20, 50, 100],
"kernel":['linear','poly','rbf','sigmoid']
}
SVM_clf = SVC(gamma='scale')
SVM_grid = GridSearchCV(SVM_clf, SVM_params, cv = 3, scoring = 'accuracy')
SVM_grid.fit(X, y)
print(SVM_grid.best_params_)
print("Accuracy:"+ str(SVM_grid.best_score_))
###Output
{'C': 10, 'kernel': 'rbf'}
Accuracy:0.9738452977184195
###Markdown
`K-Nearest Neighbor`
###Code
#K-Nearest Neighbor (KNN)
KNN_params = { 'n_neighbors': [2, 4, 6, 8] }
KNN_clf = KNeighborsClassifier()
KNN_grid = GridSearchCV(KNN_clf, KNN_params, cv = 3, scoring = 'accuracy')
KNN_grid.fit(X, y)
print(KNN_grid.best_params_)
print("Accuracy:"+ str(KNN_grid.best_score_))
###Output
{'n_neighbors': 4}
Accuracy:0.9638286032276016
###Markdown
`Artificial Neural Network`
###Code
# Artificial Neural Network (ANN)
ANN_params = {
'optimizer': ['adam','sgd'],
'activation': ['relu','tanh'],
'batch_size': [16,32],
'neurons':[16,32],
'epochs':[30,50],
'patience':[3,5]
}
ANN_clf = KerasClassifier(build_fn = ann_model, verbose = 0)
ANN_grid = GridSearchCV(ANN_clf, ANN_params, cv = 3,scoring = 'accuracy')
ANN_grid.fit(X, y)
print(ANN_grid.best_params_)
print("Accuracy (ANN): "+ str(ANN_grid.best_score_))
###Output
{'activation': 'relu', 'batch_size': 16, 'epochs': 50, 'neurons': 32, 'optimizer': 'adam', 'patience': 5}
Accuracy (ANN): 0.9994435169727324
###Markdown
`2] Random Search`
###Code
from sklearn.model_selection import RandomizedSearchCV
from random import randrange as sp_randrange
from scipy.stats import randint as sp_randint
###Output
_____no_output_____
###Markdown
`Random Forest`
###Code
# Random Forest (RF)
RF_params = {
'n_estimators': sp_randint(10,100),
'max_depth': sp_randint(5,50),
"criterion":['gini','entropy']
}
RF_clf = RandomForestClassifier(random_state = 1)
RF_Random = RandomizedSearchCV(RF_clf, param_distributions = RF_params, n_iter = 20,cv = 3,scoring = 'accuracy')
RF_Random.fit(X, y)
print(RF_Random.best_params_)
print("Accuracy (RF):"+ str(RF_Random.best_score_))
###Output
{'criterion': 'gini', 'max_depth': 10, 'n_estimators': 86}
Accuracy (RF):0.9476905954368391
###Markdown
`Support Vector Machine`
###Code
# Support Vector Machine(SVM)
SVM_params = {
'C': stats.uniform(1,50),
"kernel":['poly','rbf']
}
SVM_clf = SVC(gamma='scale')
SVM_Random = RandomizedSearchCV(SVM_clf, param_distributions = SVM_params, n_iter = 20,cv = 3,scoring = 'accuracy')
SVM_Random.fit(X, y)
print(SVM_Random.best_params_)
print("Accuracy (SVM): "+ str(SVM_Random.best_score_))
###Output
{'C': 33.97410441400006, 'kernel': 'rbf'}
Accuracy (SVM): 0.9738452977184195
###Markdown
`K-Nearest Neighbor`
###Code
# K-Nearest Neighbor (KNN)
KNN_params = {'n_neighbors': range(1,20)}
KNN_clf = KNeighborsClassifier()
KNN_Random = RandomizedSearchCV(KNN_clf, param_distributions = KNN_params,n_iter = 10,cv = 3,scoring = 'accuracy')
KNN_Random.fit(X, y)
print(KNN_Random.best_params_)
print("Accuracy (KNN): "+ str(KNN_Random.best_score_))
###Output
{'n_neighbors': 3}
Accuracy (KNN): 0.9682804674457429
###Markdown
`Artificial Neural Network`
###Code
# Artificial Neural Network (ANN)
ANN_params = {
'optimizer': ['adam','sgd'],
'activation': ['relu','tanh'],
'batch_size': [16,32],
'neurons':sp_randint(10,100),
'epochs':[30,50],
'patience':sp_randint(5,20)
}
ANN_clf = KerasClassifier(build_fn = ann_model, verbose = 0)
ANN_Random = RandomizedSearchCV(ANN_clf, param_distributions = ANN_params, n_iter = 10,cv = 3,scoring = 'accuracy')
ANN_Random.fit(X, y)
print(ANN_Random.best_params_)
print("Accuracy (ANN): "+ str(ANN_Random.best_score_))
###Output
{'activation': 'relu', 'batch_size': 16, 'epochs': 30, 'neurons': 89, 'optimizer': 'adam', 'patience': 5}
Accuracy (ANN): 1.0
###Markdown
`3] Bayesian Optimization with Gaussian Process (BO-GP)`
###Code
from skopt import Optimizer
from skopt import BayesSearchCV
from skopt.space import Real, Categorical, Integer
###Output
_____no_output_____
###Markdown
`Random Factor`
###Code
#Random Forest (RF)
RF_params = {
'n_estimators': Integer(10,100),
'max_depth': Integer(5,50),
"criterion":['gini','entropy']
}
RF_clf = RandomForestClassifier(random_state = 1)
RF_Bayes = BayesSearchCV(RF_clf, RF_params,cv = 3,n_iter = 20, n_jobs = -1,scoring = 'accuracy')
RF_Bayes.fit(X, y)
print(RF_Bayes.best_params_)
print("Accuracy (RF): "+ str(RF_Bayes.best_score_))
###Output
OrderedDict([('criterion', 'gini'), ('max_depth', 29), ('n_estimators', 81)])
Accuracy (RF): 0.9449081803005008
###Markdown
`Support Vector Machine`
###Code
# Support Vector Machine (SVM)
SVM_params = {
'C': Real(1,50),
"kernel":['poly','rbf']
}
SVM_clf = SVC(gamma = 'scale')
SVM_Bayes = BayesSearchCV(SVM_clf, SVM_params,cv = 3,n_iter = 20, n_jobs = -1,scoring = 'accuracy')
SVM_Bayes.fit(X, y)
print(SVM_Bayes.best_params_)
print("Accuracy (SVM): "+ str(SVM_Bayes.best_score_))
###Output
/usr/local/lib/python3.6/dist-packages/skopt/optimizer/optimizer.py:449: UserWarning: The objective has been evaluated at this point before.
warnings.warn("The objective has been evaluated "
/usr/local/lib/python3.6/dist-packages/skopt/optimizer/optimizer.py:449: UserWarning: The objective has been evaluated at this point before.
warnings.warn("The objective has been evaluated "
###Markdown
`K-Nearest Neighbor`
###Code
# K-Nearest Neighbor (KNN)
KNN_params = {'n_neighbors': Integer(1,20),}
KNN_clf = KNeighborsClassifier()
KNN_Bayes = BayesSearchCV(KNN_clf, KNN_params,cv = 3,n_iter = 10, n_jobs = -1,scoring = 'accuracy')
KNN_Bayes.fit(X, y)
print(KNN_Bayes.best_params_)
print("Accuracy (KNN): "+ str(KNN_Bayes.best_score_))
###Output
OrderedDict([('n_neighbors', 4)])
Accuracy (KNN): 0.9638286032276016
###Markdown
`Artificial Neural Network`
###Code
# Artificial Neural Network (ANN)
ANN_params = {
'optimizer': ['adam','sgd'],
'activation': ['relu','tanh'],
'batch_size': [16,32],
'neurons':Integer(10,100),
'epochs':[30,50],
'patience':Integer(5,20)
}
ANN_clf = KerasClassifier(build_fn = ann_model, verbose = 0)
ANN_Bayes = BayesSearchCV(ANN_clf, ANN_params,cv = 3,n_iter = 10, scoring = 'accuracy')
ANN_Bayes.fit(X, y)
print(ANN_Bayes.best_params_)
print("Accuracy (ANN): "+ str(ANN_Bayes.best_score_))
###Output
OrderedDict([('activation', 'relu'), ('batch_size', 22), ('epochs', 35), ('neurons', 83), ('optimizer', 'sgd'), ('patience', 13)])
Accuracy (ANN): 0.9994435169727324
###Markdown
`4] Bayesian Optimization with Tree-structured Parzen Estimator (BO-TPE)`
###Code
from sklearn.model_selection import StratifiedKFold
from hyperopt import hp, fmin, tpe, STATUS_OK, Trials
###Output
_____no_output_____
###Markdown
`Random Forest`
###Code
# Random Forest (RF)
def RF_fun(params):
params = {
'n_estimators': int(params['n_estimators']),
'max_features': int(params['max_features']),
"criterion":str(params['criterion'])
}
RF_clf = RandomForestClassifier(**params)
RF_score = cross_val_score(RF_clf, X, y, cv = StratifiedKFold(n_splits = 3),scoring = 'accuracy').mean()
return {'loss':-RF_score, 'status': STATUS_OK }
RF_space = {
'n_estimators': hp.quniform('n_estimators', 10, 100, 1),
"max_features":hp.quniform('max_features', 1, 32, 1),
"criterion":hp.choice('criterion',['gini','entropy'])
}
RF_best = fmin(fn = RF_fun, space = RF_space, algo = tpe.suggest, max_evals = 20)
print("Estimated optimum (RF): " +str(RF_best))
###Output
100%|██████████| 20/20 [00:12<00:00, 1.62it/s, best loss: -0.9410127991096271]
Estimated optimum (RF): {'criterion': 1, 'max_features': 4.0, 'n_estimators': 84.0}
###Markdown
`Support Vector Machine`
###Code
# Support Vector Machine (SVM)
def SVM_fun(params):
params = {
'C': abs(float(params['C'])),
"kernel":str(params['kernel'])
}
SVM_clf = SVC(gamma ='scale', **params)
SVM_score = cross_val_score(SVM_clf, X, y, cv = StratifiedKFold(n_splits = 3), scoring ='accuracy').mean()
return {'loss':-SVM_score, 'status': STATUS_OK }
SVM_space = {
'C': hp.normal('C', 0, 50),
"kernel":hp.choice('kernel',['poly','rbf'])
}
SVM_best = fmin(fn = SVM_fun, space = SVM_space, algo = tpe.suggest, max_evals = 20)
print("Estimated optimum (SVM): "+str(SVM_best))
###Output
100%|██████████| 20/20 [00:05<00:00, 3.91it/s, best loss: -0.9749582637729549]
Estimated optimum (SVM): {'C': 2.5830277799962245, 'kernel': 1}
###Markdown
`K-Nearest Neighbor`
###Code
# K-Nearest Neighbor (KNN)
def KNN_fun(params):
params = {'n_neighbors': abs(int(params['n_neighbors'])) }
KNN_clf = KNeighborsClassifier(**params)
KNN_score = cross_val_score(KNN_clf, X, y, cv = StratifiedKFold(n_splits=3), scoring='accuracy').mean()
return {'loss':-KNN_score, 'status': STATUS_OK }
KNN_space = {'n_neighbors': hp.quniform('n_neighbors', 1, 20, 1)}
KNN_best = fmin(fn = KNN_fun, space = KNN_space, algo = tpe.suggest, max_evals = 10)
print("Estimated optimum (KNN): "+str(KNN_best))
###Output
100%|██████████| 10/10 [00:03<00:00, 2.87it/s, best loss: -0.9638286032276016]
Estimated optimum (KNN): {'n_neighbors': 4.0}
###Markdown
`Artificial Neural Network`
###Code
# Artificial Neural Network (ANN)
def ANN_fun(params):
params = {
"optimizer":str(params['optimizer']),
"activation":str(params['activation']),
'batch_size': abs(int(params['batch_size'])),
'neurons': abs(int(params['neurons'])),
'epochs': abs(int(params['epochs'])),
'patience': abs(int(params['patience']))
}
ANN_clf = KerasClassifier(build_fn = ann_model,**params, verbose = 0)
ANN_score = -np.mean(cross_val_score(ANN_clf, X, y, cv=3, scoring = "accuracy"))
return {'loss':ANN_score, 'status': STATUS_OK }
ANN_space = {
"optimizer":hp.choice('optimizer',['adam','rmsprop','sgd']),
"activation":hp.choice('activation',['relu','tanh']),
'batch_size': hp.quniform('batch_size', 16, 32, 16),
'neurons': hp.quniform('neurons', 10, 100, 10),
'epochs': hp.quniform('epochs', 30, 50, 10),
'patience': hp.quniform('patience', 5, 20, 5),
}
ANN_best = fmin(fn = ANN_fun, space = ANN_space, algo = tpe.suggest, max_evals = 10)
print("Estimated optimum (ANN): "+str(ANN_best))
###Output
100%|██████████| 10/10 [03:07<00:00, 18.70s/it, best loss: -1.0]
Estimated optimum (ANN): {'activation': 0, 'batch_size': 16.0, 'epochs': 50.0, 'neurons': 80.0, 'optimizer': 0, 'patience': 10.0}
|
LoopingandListsandStuff.ipynb | ###Markdown
Looping and Lists and stuff Section 1: working with loops Read the instructions, then _add one line of code_ to complete the functions below Function 1The function `letters_one` takes in a `string` as an argument, and then `prints` out each letter on a new line.**Note** do not `return` anything, only `print`. Do not change our code. Only add *ONE LINE* of code.
###Code
def letters_one(word):
for i in word:
print(i)
#Add your line of code below here#
#Add your line of code above here#
letters_one('Explore')
letters_one('2w3vc78u')
###Output
2
w
3
v
c
7
8
u
###Markdown
Function 2The function `letters_two` takes in a `string` as an argument, and then `prints` out each letter on a new line.**Note** do not `return` anything, only `print`. Do not change our code. Only add *ONE LINE* of code.
###Code
def letters_two(word):
for i in range(len(word)):
#Add your line of code below here#
print(word[i])
#Add your line of code above here#
letters_two('Erolpxe')
letters_two('u87cv3w2')
###Output
u
8
7
c
v
3
w
2
###Markdown
Function 3The function `items_one` takes in a `List` as an argument, and then `prints` out each item on a new line.**Note** do not `return` anything, only `print`. Do not change our code. Only add *ONE LINE* of code.
###Code
def items_one(a_list):
for i in a_list:
#Add your line of code below here#
print(i)
#Add your line of code above here#
items_one(['E','x','p','l','o','r','e'])
items_one(['2','w','3','v','c','7','8','u'])
###Output
2
w
3
v
c
7
8
u
###Markdown
Function 4The function `items_two` takes in a `List` as an argument, and then `prints` out each item on a new line.**note** do not `return` anything, only `print`. Do not change our code. Only add *ONE LINE* of code.
###Code
def items_two(a_list):
for i in range(len(a_list)):
#Add your line of code below here#
print(a_list[i])
#Add your line of code above here#
items_two(['E','r','o','l','p','x','e'])
items_two(['u','8','7','c','v','3','w','2'])
###Output
u
8
7
c
v
3
w
2
###Markdown
Section 2: Manipulating Lists Read the instructions, then complete the functions| Function 1The function `string_to_list` takes in a `String` as a parameter and `returns` a list where each item is a character in the `String`.If we pass ```Hello``` to the function, then it must `return` ```['H','e','l','l','o']```**Note** the function **MUST** `return`. It must **not** `print`.
###Code
def string_to_list(string_par):
#Add your code below here#
return list(string_par)
#Add your code above here#
string_to_list('Explore')
string_to_list('2w3vc78u')
###Output
_____no_output_____
###Markdown
Function 2The function `string_to_int_list` takes in a `String` of characters and digits as a parameter and `returns` a list called `digits_list` where each item the integer version of the digits in the string.If we pass ```'1ee7'``` to the function, then it must add `return` ```[1,7]``` (**NOT** ```['1','7']```)**Note** the function **MUST not** `print`.**Hint** you can test if a character is a digit using `isdigit()` e.g. `'2'.isdigit()` returns `True` but `'w'.isdigit()` returns `False`
###Code
def string_to_int_list(mixed_string):
digits_list = []
#Add your code below here#
mixed_string = list(mixed_string)
for item in mixed_string:
if item.isdigit():
digits_list.append(int(item))
#Add your line of code above here#
return digits_list
string_to_int_list('2w3vc78u')
string_to_int_list('1337')
###Output
_____no_output_____ |
Reducer/min_max_reducer.ipynb | ###Markdown
View source on GitHub Notebook Viewer Run in binder Run in Google Colab Install Earth Engine APIInstall the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geehydro](https://github.com/giswqs/geehydro). The **geehydro** Python package builds on the [folium](https://github.com/python-visualization/folium) package and implements several methods for displaying Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, `Map.centerObject()`, and `Map.setOptions()`.The magic command `%%capture` can be used to hide output from a specific cell. Uncomment these lines if you are running this notebook for the first time.
###Code
# %%capture
# !pip install earthengine-api
# !pip install geehydro
###Output
_____no_output_____
###Markdown
Import libraries
###Code
import ee
import folium
import geehydro
###Output
_____no_output_____
###Markdown
Authenticate and initialize Earth Engine API. You only need to authenticate the Earth Engine API once. Uncomment the line `ee.Authenticate()` if you are running this notebook for the first time or if you are getting an authentication error.
###Code
# ee.Authenticate()
ee.Initialize()
###Output
_____no_output_____
###Markdown
Create an interactive map This step creates an interactive map using [folium](https://github.com/python-visualization/folium). The default basemap is the OpenStreetMap. Additional basemaps can be added using the `Map.setOptions()` function. The optional basemaps can be `ROADMAP`, `SATELLITE`, `HYBRID`, `TERRAIN`, or `ESRI`.
###Code
Map = folium.Map(location=[40, -100], zoom_start=4)
Map.setOptions('HYBRID')
###Output
_____no_output_____
###Markdown
Add Earth Engine Python script
###Code
# Load and filter the Sentinel-2 image collection.
collection = ee.ImageCollection('COPERNICUS/S2') \
.filterDate('2016-01-01', '2016-12-31') \
.filterBounds(ee.Geometry.Point([-81.31, 29.90]))
# Reduce the collection.
extrema = collection.reduce(ee.Reducer.minMax())
# print(extrema.getInfo())
min_image = extrema.select(0)
max_image = extrema.select(1)
Map.setCenter(-81.31, 29.90, 10)
Map.addLayer(min_image, {}, 'Min image')
Map.addLayer(max_image, {}, 'Max image')
###Output
_____no_output_____
###Markdown
Display Earth Engine data layers
###Code
Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True)
Map
###Output
_____no_output_____
###Markdown
View source on GitHub Notebook Viewer Run in Google Colab Install Earth Engine API and geemapInstall the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemapdependencies), including earthengine-api, folium, and ipyleaflet.**Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.eefolium`](https://github.com/giswqs/geemap/blob/master/geemap/eefolium.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving).
###Code
# Installs geemap package
import subprocess
try:
import geemap
except ImportError:
print('geemap package not installed. Installing ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap'])
# Checks whether this notebook is running on Google Colab
try:
import google.colab
import geemap.eefolium as emap
except:
import geemap as emap
# Authenticates and initializes Earth Engine
import ee
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
###Output
_____no_output_____
###Markdown
Create an interactive map The default basemap is `Google Satellite`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/geemap.pyL13) can be added using the `Map.add_basemap()` function.
###Code
Map = emap.Map(center=[40,-100], zoom=4)
Map.add_basemap('ROADMAP') # Add Google Map
Map
###Output
_____no_output_____
###Markdown
Add Earth Engine Python script
###Code
# Add Earth Engine dataset
# Load and filter the Sentinel-2 image collection.
collection = ee.ImageCollection('COPERNICUS/S2') \
.filterDate('2016-01-01', '2016-12-31') \
.filterBounds(ee.Geometry.Point([-81.31, 29.90]))
# Reduce the collection.
extrema = collection.reduce(ee.Reducer.minMax())
# print(extrema.getInfo())
min_image = extrema.select(0)
max_image = extrema.select(1)
Map.setCenter(-81.31, 29.90, 10)
Map.addLayer(min_image, {}, 'Min image')
Map.addLayer(max_image, {}, 'Max image')
###Output
_____no_output_____
###Markdown
Display Earth Engine data layers
###Code
Map.addLayerControl() # This line is not needed for ipyleaflet-based Map.
Map
###Output
_____no_output_____
###Markdown
View source on GitHub Notebook Viewer Run in Google Colab Install Earth Engine API and geemapInstall the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://geemap.org). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemapdependencies), including earthengine-api, folium, and ipyleaflet.
###Code
# Installs geemap package
import subprocess
try:
import geemap
except ImportError:
print('Installing geemap ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap'])
import ee
import geemap
###Output
_____no_output_____
###Markdown
Create an interactive map The default basemap is `Google Maps`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/basemaps.py) can be added using the `Map.add_basemap()` function.
###Code
Map = geemap.Map(center=[40,-100], zoom=4)
Map
###Output
_____no_output_____
###Markdown
Add Earth Engine Python script
###Code
# Add Earth Engine dataset
# Load and filter the Sentinel-2 image collection.
collection = ee.ImageCollection('COPERNICUS/S2') \
.filterDate('2016-01-01', '2016-12-31') \
.filterBounds(ee.Geometry.Point([-81.31, 29.90]))
# Reduce the collection.
extrema = collection.reduce(ee.Reducer.minMax())
# print(extrema.getInfo())
min_image = extrema.select(0)
max_image = extrema.select(1)
Map.setCenter(-81.31, 29.90, 10)
Map.addLayer(min_image, {}, 'Min image')
Map.addLayer(max_image, {}, 'Max image')
###Output
_____no_output_____
###Markdown
Display Earth Engine data layers
###Code
Map.addLayerControl() # This line is not needed for ipyleaflet-based Map.
Map
###Output
_____no_output_____
###Markdown
View source on GitHub Notebook Viewer Run in binder Run in Google Colab Install Earth Engine APIInstall the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geehydro](https://github.com/giswqs/geehydro). The **geehydro** Python package builds on the [folium](https://github.com/python-visualization/folium) package and implements several methods for displaying Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, `Map.centerObject()`, and `Map.setOptions()`.The following script checks if the geehydro package has been installed. If not, it will install geehydro, which automatically install its dependencies, including earthengine-api and folium.
###Code
import subprocess
try:
import geehydro
except ImportError:
print('geehydro package not installed. Installing ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geehydro'])
###Output
_____no_output_____
###Markdown
Import libraries
###Code
import ee
import folium
import geehydro
###Output
_____no_output_____
###Markdown
Authenticate and initialize Earth Engine API. You only need to authenticate the Earth Engine API once.
###Code
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
###Output
_____no_output_____
###Markdown
Create an interactive map This step creates an interactive map using [folium](https://github.com/python-visualization/folium). The default basemap is the OpenStreetMap. Additional basemaps can be added using the `Map.setOptions()` function. The optional basemaps can be `ROADMAP`, `SATELLITE`, `HYBRID`, `TERRAIN`, or `ESRI`.
###Code
Map = folium.Map(location=[40, -100], zoom_start=4)
Map.setOptions('HYBRID')
###Output
_____no_output_____
###Markdown
Add Earth Engine Python script
###Code
# Load and filter the Sentinel-2 image collection.
collection = ee.ImageCollection('COPERNICUS/S2') \
.filterDate('2016-01-01', '2016-12-31') \
.filterBounds(ee.Geometry.Point([-81.31, 29.90]))
# Reduce the collection.
extrema = collection.reduce(ee.Reducer.minMax())
# print(extrema.getInfo())
min_image = extrema.select(0)
max_image = extrema.select(1)
Map.setCenter(-81.31, 29.90, 10)
Map.addLayer(min_image, {}, 'Min image')
Map.addLayer(max_image, {}, 'Max image')
###Output
_____no_output_____
###Markdown
Display Earth Engine data layers
###Code
Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True)
Map
###Output
_____no_output_____
###Markdown
View source on GitHub Notebook Viewer Run in Google Colab Install Earth Engine API and geemapInstall the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemapdependencies), including earthengine-api, folium, and ipyleaflet.**Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.eefolium`](https://github.com/giswqs/geemap/blob/master/geemap/eefolium.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving).
###Code
# Installs geemap package
import subprocess
try:
import geemap
except ImportError:
print('geemap package not installed. Installing ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap'])
# Checks whether this notebook is running on Google Colab
try:
import google.colab
import geemap.eefolium as geemap
except:
import geemap
# Authenticates and initializes Earth Engine
import ee
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
###Output
_____no_output_____
###Markdown
Create an interactive map The default basemap is `Google Maps`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/basemaps.py) can be added using the `Map.add_basemap()` function.
###Code
Map = geemap.Map(center=[40,-100], zoom=4)
Map
###Output
_____no_output_____
###Markdown
Add Earth Engine Python script
###Code
# Add Earth Engine dataset
# Load and filter the Sentinel-2 image collection.
collection = ee.ImageCollection('COPERNICUS/S2') \
.filterDate('2016-01-01', '2016-12-31') \
.filterBounds(ee.Geometry.Point([-81.31, 29.90]))
# Reduce the collection.
extrema = collection.reduce(ee.Reducer.minMax())
# print(extrema.getInfo())
min_image = extrema.select(0)
max_image = extrema.select(1)
Map.setCenter(-81.31, 29.90, 10)
Map.addLayer(min_image, {}, 'Min image')
Map.addLayer(max_image, {}, 'Max image')
###Output
_____no_output_____
###Markdown
Display Earth Engine data layers
###Code
Map.addLayerControl() # This line is not needed for ipyleaflet-based Map.
Map
###Output
_____no_output_____
###Markdown
View source on GitHub Notebook Viewer Run in binder Run in Google Colab Install Earth Engine APIInstall the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geehydro](https://github.com/giswqs/geehydro). The **geehydro** Python package builds on the [folium](https://github.com/python-visualization/folium) package and implements several methods for displaying Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, `Map.centerObject()`, and `Map.setOptions()`.The magic command `%%capture` can be used to hide output from a specific cell.
###Code
# %%capture
# !pip install earthengine-api
# !pip install geehydro
###Output
_____no_output_____
###Markdown
Import libraries
###Code
import ee
import folium
import geehydro
###Output
_____no_output_____
###Markdown
Authenticate and initialize Earth Engine API. You only need to authenticate the Earth Engine API once. Uncomment the line `ee.Authenticate()` if you are running this notebook for this first time or if you are getting an authentication error.
###Code
# ee.Authenticate()
ee.Initialize()
###Output
_____no_output_____
###Markdown
Create an interactive map This step creates an interactive map using [folium](https://github.com/python-visualization/folium). The default basemap is the OpenStreetMap. Additional basemaps can be added using the `Map.setOptions()` function. The optional basemaps can be `ROADMAP`, `SATELLITE`, `HYBRID`, `TERRAIN`, or `ESRI`.
###Code
Map = folium.Map(location=[40, -100], zoom_start=4)
Map.setOptions('HYBRID')
###Output
_____no_output_____
###Markdown
Add Earth Engine Python script
###Code
# Load and filter the Sentinel-2 image collection.
collection = ee.ImageCollection('COPERNICUS/S2') \
.filterDate('2016-01-01', '2016-12-31') \
.filterBounds(ee.Geometry.Point([-81.31, 29.90]))
# Reduce the collection.
extrema = collection.reduce(ee.Reducer.minMax())
# print(extrema.getInfo())
min_image = extrema.select(0)
max_image = extrema.select(1)
Map.setCenter(-81.31, 29.90, 10)
Map.addLayer(min_image, {}, 'Min image')
Map.addLayer(max_image, {}, 'Max image')
###Output
_____no_output_____
###Markdown
Display Earth Engine data layers
###Code
Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True)
Map
###Output
_____no_output_____
###Markdown
View source on GitHub Notebook Viewer Run in binder Run in Google Colab Install Earth Engine API and geemapInstall the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemapdependencies), including earthengine-api, folium, and ipyleaflet.**Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.eefolium`](https://github.com/giswqs/geemap/blob/master/geemap/eefolium.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving).
###Code
# Installs geemap package
import subprocess
try:
import geemap
except ImportError:
print('geemap package not installed. Installing ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap'])
# Checks whether this notebook is running on Google Colab
try:
import google.colab
import geemap.eefolium as emap
except:
import geemap as emap
# Authenticates and initializes Earth Engine
import ee
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
###Output
_____no_output_____
###Markdown
Create an interactive map The default basemap is `Google Satellite`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/geemap.pyL13) can be added using the `Map.add_basemap()` function.
###Code
Map = emap.Map(center=[40,-100], zoom=4)
Map.add_basemap('ROADMAP') # Add Google Map
Map
###Output
_____no_output_____
###Markdown
Add Earth Engine Python script
###Code
# Add Earth Engine dataset
# Load and filter the Sentinel-2 image collection.
collection = ee.ImageCollection('COPERNICUS/S2') \
.filterDate('2016-01-01', '2016-12-31') \
.filterBounds(ee.Geometry.Point([-81.31, 29.90]))
# Reduce the collection.
extrema = collection.reduce(ee.Reducer.minMax())
# print(extrema.getInfo())
min_image = extrema.select(0)
max_image = extrema.select(1)
Map.setCenter(-81.31, 29.90, 10)
Map.addLayer(min_image, {}, 'Min image')
Map.addLayer(max_image, {}, 'Max image')
###Output
_____no_output_____
###Markdown
Display Earth Engine data layers
###Code
Map.addLayerControl() # This line is not needed for ipyleaflet-based Map.
Map
###Output
_____no_output_____
###Markdown
Pydeck Earth Engine IntroductionThis is an introduction to using [Pydeck](https://pydeck.gl) and [Deck.gl](https://deck.gl) with [Google Earth Engine](https://earthengine.google.com/) in Jupyter Notebooks. If you wish to run this locally, you'll need to install some dependencies. Installing into a new Conda environment is recommended. To create and enter the environment, run:```conda create -n pydeck-ee -c conda-forge python jupyter notebook pydeck earthengine-api requests -ysource activate pydeck-eejupyter nbextension install --sys-prefix --symlink --overwrite --py pydeckjupyter nbextension enable --sys-prefix --py pydeck```then open Jupyter Notebook with `jupyter notebook`. Now in a Python Jupyter Notebook, let's first import required packages:
###Code
from pydeck_earthengine_layers import EarthEngineLayer
import pydeck as pdk
import requests
import ee
###Output
_____no_output_____
###Markdown
AuthenticationUsing Earth Engine requires authentication. If you don't have a Google account approved for use with Earth Engine, you'll need to request access. For more information and to sign up, go to https://signup.earthengine.google.com/. If you haven't used Earth Engine in Python before, you'll need to run the following authentication command. If you've previously authenticated in Python or the command line, you can skip the next line.Note that this creates a prompt which waits for user input. If you don't see a prompt, you may need to authenticate on the command line with `earthengine authenticate` and then return here, skipping the Python authentication.
###Code
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
###Output
_____no_output_____
###Markdown
Create MapNext it's time to create a map. Here we create an `ee.Image` object
###Code
# Initialize objects
ee_layers = []
view_state = pdk.ViewState(latitude=37.7749295, longitude=-122.4194155, zoom=10, bearing=0, pitch=45)
# %%
# Add Earth Engine dataset
# Load and filter the Sentinel-2 image collection.
collection = ee.ImageCollection('COPERNICUS/S2') \
.filterDate('2016-01-01', '2016-12-31') \
.filterBounds(ee.Geometry.Point([-81.31, 29.90]))
# Reduce the collection.
extrema = collection.reduce(ee.Reducer.minMax())
# print(extrema.getInfo())
min_image = extrema.select(0)
max_image = extrema.select(1)
view_state = pdk.ViewState(longitude=-81.31, latitude=29.90, zoom=10)
ee_layers.append(EarthEngineLayer(ee_object=min_image, vis_params={}))
ee_layers.append(EarthEngineLayer(ee_object=max_image, vis_params={}))
###Output
_____no_output_____
###Markdown
Then just pass these layers to a `pydeck.Deck` instance, and call `.show()` to create a map:
###Code
r = pdk.Deck(layers=ee_layers, initial_view_state=view_state)
r.show()
###Output
_____no_output_____ |
Chapter01/TensorFlow Estimator API.ipynb | ###Markdown
Tensorflow Estimator API **Getting ready...**
###Code
install.packages("tfestimators")
library(tfestimators)
# training data
x_data_df <- as.data.frame( matrix(rnorm(1000*784), nrow = 1000, ncol = 784))
y_data_df <- as.data.frame(matrix(rnorm(1000), nrow = 1000, ncol = 1))
colnames(y_data_df)<- c("target")
dummy_data_estimator <- cbind(x_data_df,y_data_df)
###Output
_____no_output_____
###Markdown
**How to do it...**
###Code
# feature columns
features_set <- setdiff(names(dummy_data_estimator), "target")
# construct feature columns
feature_cols <- feature_columns(
column_numeric(features_set)
)
# construct input function
estimator_input_fn <- function(data_,num_epochs = 1) {
input_fn(data_, features = features_set, response = "target",num_epochs = num_epochs )
}
# construct classifier
regressor <- dnn_regressor(
feature_columns = feature_cols,
hidden_units = c(5, 10, 8),
label_dimension = 1L,
activation_fn = "relu"
)
# train classifier with training dataset
train(regressor, input_fn = estimator_input_fn(data_ = dummy_data_estimator))
# test data
x_data_test_df <- as.data.frame( matrix(rnorm(100*784), nrow = 100, ncol = 784))
y_data_test_df <- as.data.frame(matrix(rnorm(100), nrow = 100, ncol = 1))
colnames(y_data_test_df)<- c("target")
dummy_data_test_df <- cbind(x_data_test_df,y_data_test_df)
# predict with test dataset
predictions <- predict(regressor, input_fn = estimator_input_fn(dummy_data_test_df), predict_keys = c("predictions"))
# evaluate with test dataset
evaluation <- evaluate(regressor, input_fn = estimator_input_fn(dummy_data_test_df))
evaluation
###Output
_____no_output_____
###Markdown
**There is more...**
###Code
training_history <- train(regressor,
input_fn = estimator_input_fn(data_ = dummy_data_estimator),
hooks = list(hook_history_saver(every_n_step = 2))
)
###Output
_____no_output_____ |
docs/source/examples/geochem/mineral_endmembers.ipynb | ###Markdown
Mineral Endmember Decomposition=================================A common task when working with mineral chemistry data is to take measured compositionsand decompose these into relative proportions of mineral endmember compositions.pyrolite includes some utilities to achieve this and a limited mineral databasefor looking up endmember compositions. This part of the package is being activelydeveloped, so expect expansions and improvements soon.
###Code
import pandas as pd
import numpy as np
from pyrolite.mineral.mindb import get_mineral
from pyrolite.mineral.normative import endmember_decompose
###Output
_____no_output_____
###Markdown
First we'll start with a composition of an unknown olivine:
###Code
comp = pd.Series({"MgO": 42.06, "SiO2": 39.19, "FeO": 18.75})
###Output
_____no_output_____
###Markdown
We can break this down into olivine endmebmers using the:func:`~pyrolite.mineral.transform.endmember_decompose` function:
###Code
ed = endmember_decompose(
pd.DataFrame(comp).T, endmembers="olivine", ord=1, molecular=True
)
ed
###Output
_____no_output_____
###Markdown
Equally, if you knew the likely endmembers beforehand, you could specify a list ofendmembers:
###Code
ed = endmember_decompose(
pd.DataFrame(comp).T, endmembers=["forsterite", "fayalite"], ord=1, molecular=True
)
ed
###Output
_____no_output_____
###Markdown
We can check this by recombining the components with these proportions. We can firstlookup the compositions for our endmembers:
###Code
em = pd.DataFrame([get_mineral("forsterite"), get_mineral("fayalite")])
em.loc[:, ~(em == 0).all(axis=0)] # columns not full of zeros
###Output
_____no_output_____
###Markdown
First we have to convert these element-based compositions to oxide-based compositions:
###Code
emvalues = (
em.loc[:, ["Mg", "Si", "Fe"]]
.pyrochem.to_molecular()
.fillna(0)
.pyrochem.convert_chemistry(to=["MgO", "SiO2", "FeO"], molecular=True)
.fillna(0)
.pyrocomp.renormalise(scale=1)
)
emvalues
###Output
_____no_output_____
###Markdown
These can now be used with our endmember proportions to regenerate a composition:
###Code
recombined = pd.DataFrame(ed.values.flatten() @ emvalues).T.pyrochem.to_weight()
recombined
###Output
_____no_output_____
###Markdown
To make sure these compositions are within 0.01 percent:
###Code
assert np.allclose(recombined.values, comp.values, rtol=10 ** -4)
###Output
_____no_output_____
###Markdown
Mineral Endmember Decomposition=================================A common task when working with mineral chemistry data is to take measured compositionsand decompose these into relative proportions of mineral endmember compositions.pyrolite includes some utilities to achieve this and a limited mineral databasefor looking up endmember compositions. This part of the package is being activelydeveloped, so expect expansions and improvements soon.
###Code
import pandas as pd
import numpy as np
from pyrolite.mineral.mindb import get_mineral
from pyrolite.mineral.normative import endmember_decompose
###Output
_____no_output_____
###Markdown
First we'll start with a composition of an unknown olivine:
###Code
comp = pd.Series({"MgO": 42.06, "SiO2": 39.19, "FeO": 18.75})
###Output
_____no_output_____
###Markdown
We can break this down into olivine endmebmers using the:func:`~pyrolite.mineral.transform.endmember_decompose` function:
###Code
ed = endmember_decompose(
pd.DataFrame(comp).T, endmembers="olivine", ord=1, molecular=True
)
ed
###Output
_____no_output_____
###Markdown
Equally, if you knew the likely endmembers beforehand, you could specify a list ofendmembers:
###Code
ed = endmember_decompose(
pd.DataFrame(comp).T, endmembers=["forsterite", "fayalite"], ord=1, molecular=True
)
ed
###Output
_____no_output_____
###Markdown
We can check this by recombining the components with these proportions. We can firstlookup the compositions for our endmembers:
###Code
em = pd.DataFrame([get_mineral("forsterite"), get_mineral("fayalite")])
em.loc[:, ~(em == 0).all(axis=0)] # columns not full of zeros
###Output
_____no_output_____
###Markdown
First we have to convert these element-based compositions to oxide-based compositions:
###Code
emvalues = (
em.loc[:, ["Mg", "Si", "Fe"]]
.pyrochem.to_molecular()
.fillna(0)
.pyrochem.convert_chemistry(to=["MgO", "SiO2", "FeO"], molecular=True)
.fillna(0)
.pyrocomp.renormalise(scale=1)
)
emvalues
###Output
_____no_output_____
###Markdown
These can now be used with our endmember proportions to regenerate a composition:
###Code
recombined = pd.DataFrame(ed.values.flatten() @ emvalues).T.pyrochem.to_weight()
recombined
###Output
_____no_output_____
###Markdown
To make sure these compositions are within 0.01 percent:
###Code
assert np.allclose(recombined.values, comp.values, rtol=10 ** -4)
###Output
_____no_output_____ |
ukpsummarizer-be/cplex/python/examples/mp/jupyter/incremental_modeling.ipynb | ###Markdown
Incremental modeling with decision optimizationThis tutorial includes everything you need to set up decision optimization engines, build a mathematical programming model, then incrementally modify it.You will learn how to:- change coefficients in an expression- add terms in an expression- modify constraints and variables bounds- remove/add constraints- play with relaxationsWhen you finish this tutorial, you'll have a foundational knowledge of _Prescriptive Analytics_.>This notebook is part of the **[Prescriptive Analytics for Python](https://rawgit.com/IBMDecisionOptimization/docplex-doc/master/docs/index.html)**>It requires a valid subscription to **Decision Optimization on the Cloud** or a **local installation of CPLEX Optimizers**. Discover us [here](https://developer.ibm.com/docloud)Table of contents:- [Describe the business problem](Describe-the-business-problem:--Games-Scheduling-in-the-National-Football-League)* [How decision optimization (prescriptive analytics) can help](How--decision-optimization-can-help)* [Use decision optimization](Use-decision-optimization) * [Step 1: Download the library](Step-1:-Download-the-library) * [Step 2: Set up the engines](Step-2:-Set-up-the-prescriptive-engine) * [Step 3: Set up the prescriptive model](Step-3:-Set-up-the-prescriptive-model) * [Step 4: Modify the model](Step-4:-Modify-the-model)* [Summary](Summary)**** Describe the business problem: Telephone productionA possible descriptive model of the telephone production problem is as follows:* Decision variables: * Number of desk phones produced (DeskProduction) * Number of cellular phones produced (CellProduction)Objective: Maximize profit* Constraints: * The DeskProduction should be greater than or equal to 100. * The CellProduction should be greater than or equal to 100. * The assembly time for DeskProduction plus the assembly time for CellProduction should not exceed 400 hours. * The painting time for DeskProduction plus the painting time for CellProduction should not exceed 490 hours.This is a type of discrete optimization problem that can be solved by using either **Integer Programming** (IP) or **Constraint Programming** (CP). > **Integer Programming** is the class of problems defined as the optimization of a linear function, subject to linear constraints over integer variables. > **Constraint Programming** problems generally have discrete decision variables, but the constraints can be logical, and the arithmetic expressions are not restricted to being linear. For the purposes of this tutorial, we will illustrate a solution with mathematical programming (MP). How decision optimization can help* Prescriptive analytics (decision optimization) technology recommends actions that are based on desired outcomes. It takes into account specific scenarios, resources, and knowledge of past and current events. With this insight, your organization can make better decisions and have greater control of business outcomes. * Prescriptive analytics is the next step on the path to insight-based actions. It creates value through synergy with predictive analytics, which analyzes data to predict future outcomes. * Prescriptive analytics takes that insight to the next level by suggesting the optimal way to handle that future situation. Organizations that can act fast in dynamic conditions and make superior decisions in uncertain environments gain a strong competitive advantage. With prescriptive analytics, you can: * Automate the complex decisions and trade-offs to better manage your limited resources.* Take advantage of a future opportunity or mitigate a future risk.* Proactively update recommendations based on changing events.* Meet operational goals, increase customer loyalty, prevent threats and fraud, and optimize business processes. Use decision optimization Step 1: Download the libraryRun the following code to install Decision Optimization CPLEX Modeling library. The *DOcplex* library contains the two modeling packages, Mathematical Programming and Constraint Programming, referred to earlier.
###Code
import docplex
check = (docplex.__version__ >= '2.1')
if check is False:
!conda install -y -c ibmdecisionoptimization docplex
###Output
_____no_output_____
###Markdown
A restart of the kernel might be needed. Step 2: Set up the prescriptive engine* Subscribe to the [Decision Optimization on Cloud solve service](https://developer.ibm.com/docloud).* Get the service URL and your personal API key.
###Code
from docplex.mp.model import *
SVC_URL = "ENTER YOUR URL HERE"
SVC_KEY = "ENTER YOUR KEY HERE"
###Output
_____no_output_____
###Markdown
Step 3: Set up the prescriptive model Writing a mathematical modelConvert the descriptive model into a mathematical model:* Use the two decision variables DeskProduction and CellProduction* Use the data given in the problem description (remember to convert minutes to hours where appropriate)* Write the objective as a mathematical expression* Write the constraints as mathematical expressions (use “=”, “=”, and name the constraints to describe their purpose)* Define the domain for the decision variables Telephone production: a mathematical modelTo express the last two constraints, we model assembly time and painting time as linear combinations of the two productions, resulting in the following mathematical model:maximize: 12 desk_production+20 cell_productionsubject to: desk_production>=100 cell_production>=100 0.2 desk_production+0.4 cell_production<=400 0.5 desk_production+0.4 cell_production<=490
###Code
# first import the Model class from docplex.mp
from docplex.mp.model import Model
# create one model instance, with a name
m = Model(name='telephone_production')
###Output
_____no_output_____
###Markdown
The continuous variable desk represents the production of desk telephones.The continuous variable cell represents the production of cell phones.
###Code
# by default, all variables in Docplex have a lower bound of 0 and infinite upper bound
desk = m.integer_var(name='desk')
cell = m.integer_var(name='cell')
m.maximize(12 * desk + 20 * cell)
# write constraints
# constraint #1: desk production is greater than 100
m.add_constraint(desk >= 100, "desk")
# constraint #2: cell production is greater than 100
m.add_constraint(cell >= 100, "cell")
# constraint #3: assembly time limit
ct_assembly = m.add_constraint( 0.2 * desk + 0.4 * cell <= 400, "assembly_limit")
# constraint #4: paiting time limit
ct_painting = m.add_constraint( 0.5 * desk + 0.4 * cell <= 490, "painting_limit")
###Output
_____no_output_____
###Markdown
Solve with Decision Optimization solve service If url and key are None, the Modeling layer will look for a local runtime, otherwise will use the credentials.Look at the documentation for a good understanding of the various solving/generation modes.If you're using a Community Edition of CPLEX runtimes, depending on the size of the problem, the solve stage may fail and will need a paying subscription or product installation.You will get the best solution found after ***n*** seconds, thanks to a time limit parameter.
###Code
m.print_information()
msol = m.solve(url=SVC_URL, key=SVC_KEY)
assert msol is not None, "model can't solve"
m.print_solution()
###Output
objective: 20600
desk=300
cell=850
###Markdown
Step 4: Modify the model Modify constraints and variables bounds The model object provides getters to retrieve variables and constraints by name:* get_var_by_name* get_constraint_by_nameThe variable and constraint objects both provide properties to access the right hand side (rhs) and left hand side (lhs).When you modify a rhs or lhs of a variable, you of course need to give a number.When you modify a rhs or lhs of a constraint, you can give a number or an expression based on variables.Let's say we want to build 2000 cells and 1000 desks maximum.And let's say we want to increase the production of both of them from 100 to 350
###Code
# Access by name
m.get_var_by_name("desk").ub = 2000
# acess via the object
cell.ub = 1000
m.get_constraint_by_name("desk").rhs = 350
m.get_constraint_by_name("cell").rhs = 350
msol = m.solve(url=SVC_URL, key=SVC_KEY)
assert msol is not None, "model can't solve"
m.print_solution()
###Output
objective: 19940
desk=350
cell=787
###Markdown
The production plan has been updated accordingly to our small changes. Modify expressions We now want to introduce a new type of product: the "hybrid" telephone.
###Code
hybrid = m.integer_var(name='hybrid')
###Output
_____no_output_____
###Markdown
We need to:- introduce it in the objective- introduce it in the existing painting and assembly time constraints - add a new constraint for its production to produce at least 350 of them.
###Code
m.add_constraint(hybrid >= 350)
;
###Output
_____no_output_____
###Markdown
The objective will move frommaximize: 12 desk_production+20 cell_productiontomaximize: 12 desk_production+20 cell_production + 10 hybrid_prodction
###Code
m.get_objective_expr().add_term(hybrid, 10)
;
###Output
_____no_output_____
###Markdown
The time constraints will be updated from 0.2 desk_production+0.4 cell_production<=4000.5 desk_production+0.4 cell_production<=490to0.2 desk_production+0.4 cell_production + 0.2 hybrid_production<=4000.5 desk_production+0.4 cell_production + 0.2 hybrid_production<=490 When you add a constraint to a model, its object is returned to you by the method add_constraint.If you don't have it, you can access it via its name
###Code
m.get_constraint_by_name("assembly_limit").lhs.add_term(hybrid, 0.2)
ct_painting.lhs.add_term(hybrid, 0.2)
;
###Output
_____no_output_____
###Markdown
We can now compute the new production plan for our 3 products
###Code
msol = m.solve(url=SVC_URL, key=SVC_KEY)
assert msol is not None, "model can't solve"
m.print_solution()
###Output
objective: 19950
desk=350
cell=612
hybrid=351
###Markdown
Let's now say we improved our painting process, the distribution of the coefficients in the painting limits is not [0.5, 0.4, 0.2] anymore but [0.1, 0.1, 0.1]When you have the hand on an expression, you can modify the coefficient variable by variable with set_coefficient or via a list of (variable, coeff) with set_coefficients
###Code
ct_painting.lhs.set_coefficients([(desk, 0.1), (cell, 0.1), (hybrid, 0.1)])
msol = m.solve(url=SVC_URL, key=SVC_KEY)
assert msol is not None, "model can't solve"
m.print_solution()
###Output
objective: 21900
desk=950
cell=350
hybrid=350
###Markdown
Relaxations Let's now introduce a new constraint: polishing time limit.
###Code
# constraint: polishing time limit
ct_polishing = m.add_constraint( 0.6 * desk + 0.6 * cell + 0.3 * hybrid <= 290, "polishing_limit")
msol = m.solve(url=SVC_URL, key=SVC_KEY)
if msol is None:
print("model can't solve")
###Output
model can't solve
###Markdown
The model is now infeasible. We need to handle it and dig into the infeasibilities. You can now use the Relaxer object. You can control the way it will relax the constraints or you can use 1 of the various automatic modes:- 'all' relaxes all constraints using a MEDIUM priority; this is the default.- 'named' relaxes all constraints with a user name but not the others.- 'match' looks for priority names within constraint names; unnamed constraints are not relaxed.We will use the 'match' mode.Polishing constraint is mandatory.Painting constraint is a nice to have.Assembly constraint has low priority.
###Code
ct_polishing.name = "high_"+ct_polishing.name
ct_assembly.name = "low_"+ct_assembly.name
ct_painting.name = "medium_"+ct_painting.name
# if a name contains "low", it has priority LOW
# if a ct name contains "medium" it has priority MEDIUM
# same for HIGH
# if a constraint has no name or does not match any, it is not relaxable.
from docplex.mp.relaxer import Relaxer
relaxer = Relaxer(prioritizer='match', verbose=True)
relaxed_sol = relaxer.relax(m, url=SVC_URL, key=SVC_KEY)
relaxed_ok = relaxed_sol is not None
assert relaxed_ok, "relaxation failed"
relaxer.print_information()
m.print_solution()
ct_polishing_relax = relaxer.get_relaxation(ct_polishing)
print("* found slack of {0} for polish ct".format(ct_polishing_relax))
ct_polishing.rhs+= ct_polishing_relax
m.solve(url=SVC_URL, key=SVC_KEY)
m.report()
m.print_solution()
###Output
* found slack of 235.0 for polish ct
* model telephone_production solved with objective = 14700
objective: 14700
desk=350
cell=350
hybrid=350
|
course2/session1/kadenze_mir_c2_s1_3_random_sample_generation.ipynb | ###Markdown
Random sample generation In this notebook we sketch how we can generate random samples from a discrte probability distribution using the cummulative distribution. If we have a probabilistic model even if it is significantly more complicated then we can generate random data samples using similar ideas.
###Code
import numpy as np
values = np.int64([1, 2, 3])
probability_distribution = [1/6., 3/6., 2/6.]
cummulative_distribution = np.cumsum(probability_distribution)
print(cummulative_distribution)
samples = []
for n in np.arange(0,10):
# generate a random number uniformly distributed between 0.0 and 1.0
r = np.random.uniform()
print("Random number %f"% r)
if r < cummulative_distribution[0]:
samples.append(1)
print('Sample value: 1')
elif r < cummulative_distribution[1]:
print('Sample value 2')
samples.append(2)
else:
print('Sample value3')
samples.append(3)
print(samples)
###Output
_____no_output_____ |
example/custom_layers_and_models.ipynb | ###Markdown
Copyright 2019 The TensorFlow Authors. This notebook was running on tensorflow-114-vm@google cloud
###Code
import tensorflow as tf
tf.enable_eager_execution(
config=None,
device_policy=None,
execution_mode=None
)
###Output
/usr/local/lib/python3.5/dist-packages/tensorflow/python/framework/dtypes.py:516: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint8 = np.dtype([("qint8", np.int8, 1)])
/usr/local/lib/python3.5/dist-packages/tensorflow/python/framework/dtypes.py:517: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_quint8 = np.dtype([("quint8", np.uint8, 1)])
/usr/local/lib/python3.5/dist-packages/tensorflow/python/framework/dtypes.py:518: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint16 = np.dtype([("qint16", np.int16, 1)])
/usr/local/lib/python3.5/dist-packages/tensorflow/python/framework/dtypes.py:519: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_quint16 = np.dtype([("quint16", np.uint16, 1)])
/usr/local/lib/python3.5/dist-packages/tensorflow/python/framework/dtypes.py:520: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint32 = np.dtype([("qint32", np.int32, 1)])
/usr/local/lib/python3.5/dist-packages/tensorflow/python/framework/dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
np_resource = np.dtype([("resource", np.ubyte, 1)])
/usr/local/lib/python3.5/dist-packages/tensorboard/compat/tensorflow_stub/dtypes.py:541: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint8 = np.dtype([("qint8", np.int8, 1)])
/usr/local/lib/python3.5/dist-packages/tensorboard/compat/tensorflow_stub/dtypes.py:542: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_quint8 = np.dtype([("quint8", np.uint8, 1)])
/usr/local/lib/python3.5/dist-packages/tensorboard/compat/tensorflow_stub/dtypes.py:543: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint16 = np.dtype([("qint16", np.int16, 1)])
/usr/local/lib/python3.5/dist-packages/tensorboard/compat/tensorflow_stub/dtypes.py:544: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_quint16 = np.dtype([("quint16", np.uint16, 1)])
/usr/local/lib/python3.5/dist-packages/tensorboard/compat/tensorflow_stub/dtypes.py:545: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint32 = np.dtype([("qint32", np.int32, 1)])
/usr/local/lib/python3.5/dist-packages/tensorboard/compat/tensorflow_stub/dtypes.py:550: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
np_resource = np.dtype([("resource", np.ubyte, 1)])
###Markdown
Writing custom layers and models with Keras View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook Setup The Layer class Layers encapsulate a state (weights) and some computationThe main data structure you'll work with is the `Layer`.A layer encapsulates both a state (the layer's "weights")and a transformation from inputs to outputs (a "call", the layer'sforward pass).Here's a densely-connected layer. It has a state: the variables `w` and `b`.
###Code
from tensorflow.keras import layers
class Linear(layers.Layer):
def __init__(self, units=32, input_dim=32):
super(Linear, self).__init__()
w_init = tf.random_normal_initializer()
self.w = tf.Variable(initial_value=w_init(shape=(input_dim, units),
dtype='float32'),
trainable=True)
b_init = tf.zeros_initializer()
self.b = tf.Variable(initial_value=b_init(shape=(units,),
dtype='float32'),
trainable=True)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
x = tf.ones((2, 2))
linear_layer = Linear(4, 2)
y = linear_layer(x)
print(y)
###Output
tf.Tensor(
[[ 1.2631972 -1.1278888 0.7652812 -0.10996719]
[ 1.2631972 -1.1278888 0.7652812 -0.10996719]], shape=(2, 4), dtype=float32)
###Markdown
Note that the weights `w` and `b` are automatically tracked by the layer uponbeing set as layer attributes:
###Code
assert linear_layer.weights == [linear_layer.w, linear_layer.b]
###Output
_____no_output_____
###Markdown
Note you also have access to a quicker shortcut for adding weight to a layer: the `add_weight` method:
###Code
class Linear(layers.Layer):
def __init__(self, units=32, input_dim=32):
super(Linear, self).__init__()
self.w = self.add_weight(shape=(input_dim, units),
initializer='random_normal',
trainable=True)
self.b = self.add_weight(shape=(units,),
initializer='zeros',
trainable=True)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
x = tf.ones((2, 2))
linear_layer = Linear(4, 2)
y = linear_layer(x)
print(y)
###Output
tf.Tensor(
[[-0.05017339 -0.09985163 -0.04027011 0.01080477]
[-0.05017339 -0.09985163 -0.04027011 0.01080477]], shape=(2, 4), dtype=float32)
###Markdown
Layers can have non-trainable weightsBesides trainable weights, you can add non-trainable weights to a layer as well.Such weights are meant not to be taken into account during backpropagation,when you are training the layer.Here's how to add and use a non-trainable weight:
###Code
class ComputeSum(layers.Layer):
def __init__(self, input_dim):
super(ComputeSum, self).__init__()
self.total = tf.Variable(initial_value=tf.zeros((input_dim,)),
trainable=False)
def call(self, inputs):
self.total.assign_add(tf.reduce_sum(inputs, axis=0))
return self.total
x = tf.ones((2, 2))
my_sum = ComputeSum(2)
y = my_sum(x)
print(y.numpy())
y = my_sum(x)
print(y.numpy())
###Output
[2. 2.]
[4. 4.]
###Markdown
It's part of `layer.weights`, but it gets categorized as a non-trainable weight:
###Code
print('weights:', len(my_sum.weights))
print('non-trainable weights:', len(my_sum.non_trainable_weights))
# It's not included in the trainable weights:
print('trainable_weights:', my_sum.trainable_weights)
###Output
weights: 1
non-trainable weights: 1
trainable_weights: []
###Markdown
Best practice: deferring weight creation until the shape of the inputs is knownIn the logistic regression example above, our `Linear` layer took an `input_dim` argumentthat was used to compute the shape of the weights `w` and `b` in `__init__`:
###Code
class Linear(layers.Layer):
def __init__(self, units=32, input_dim=32):
super(Linear, self).__init__()
self.w = self.add_weight(shape=(input_dim, units),
initializer='random_normal',
trainable=True)
self.b = self.add_weight(shape=(units,),
initializer='zeros',
trainable=True)
###Output
_____no_output_____
###Markdown
In many cases, you may not know in advance the size of your inputs, and you wouldlike to lazily create weights when that value becomes known,some time after instantiating the layer.In the Keras API, we recommend creating layer weights in the `build(inputs_shape)` method of your layer.Like this:
###Code
class Linear(layers.Layer):
def __init__(self, units=32):
super(Linear, self).__init__()
self.units = units
def build(self, input_shape):
self.w = self.add_weight(shape=(input_shape[-1], self.units),
initializer='random_normal',
trainable=True)
self.b = self.add_weight(shape=(self.units,),
initializer='random_normal',
trainable=True)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
###Output
_____no_output_____
###Markdown
The `__call__` method of your layer will automatically run `build` the first time it is called.You now have a layer that's lazy and easy to use:
###Code
linear_layer = Linear(32) # At instantiation, we don't know on what inputs this is going to get called
y = linear_layer(x) # The layer's weights are created dynamically the first time the layer is called
###Output
_____no_output_____
###Markdown
Layers are recursively composableIf you assign a Layer instance as attribute of another Layer,the outer layer will start tracking the weights of the inner layer.We recommend creating such sublayers in the `__init__` method (since the sublayers will typically have a `build` method, they will be built when the outer layer gets built).
###Code
# Let's assume we are reusing the Linear class
# with a `build` method that we defined above.
class MLPBlock(layers.Layer):
def __init__(self):
super(MLPBlock, self).__init__()
self.linear_1 = Linear(32)
self.linear_2 = Linear(32)
self.linear_3 = Linear(1)
def call(self, inputs):
x = self.linear_1(inputs)
x = tf.nn.relu(x)
x = self.linear_2(x)
x = tf.nn.relu(x)
return self.linear_3(x)
mlp = MLPBlock()
y = mlp(tf.ones(shape=(3, 64))) # The first call to the `mlp` will create the weights
print('weights:', len(mlp.weights))
print('trainable weights:', len(mlp.trainable_weights))
###Output
weights: 6
trainable weights: 6
###Markdown
Layers recursively collect losses created during the forward passWhen writing the `call` method of a layer, you can create loss tensors that you will want to use later, when writing your training loop. This is doable by calling `self.add_loss(value)`:
###Code
# A layer that creates an activity regularization loss
class ActivityRegularizationLayer(layers.Layer):
def __init__(self, rate=1e-2):
super(ActivityRegularizationLayer, self).__init__()
self.rate = rate
def call(self, inputs):
self.add_loss(self.rate * tf.reduce_sum(inputs))
return inputs
###Output
_____no_output_____
###Markdown
These losses (including those created by any inner layer) can be retrieved via `layer.losses`.This property is reset at the start of every `__call__` to the top-level layer, so that `layer.losses` always contains the loss values created during the last forward pass.
###Code
class OuterLayer(layers.Layer):
def __init__(self):
super(OuterLayer, self).__init__()
self.activity_reg = ActivityRegularizationLayer(1e-2)
def call(self, inputs):
return self.activity_reg(inputs)
layer = OuterLayer()
assert len(layer.losses) == 0 # No losses yet since the layer has never been called
_ = layer(tf.zeros(1, 1))
assert len(layer.losses) == 1 # We created one loss value
# `layer.losses` gets reset at the start of each __call__
_ = layer(tf.zeros(1, 1))
assert len(layer.losses) == 1 # This is the loss created during the call above
###Output
_____no_output_____
###Markdown
In addition, the `loss` property also contains regularization losses created for the weights of any inner layer:
###Code
class OuterLayer(layers.Layer):
def __init__(self):
super(OuterLayer, self).__init__()
self.dense = layers.Dense(32, kernel_regularizer=tf.keras.regularizers.l2(1e-3))
def call(self, inputs):
return self.dense(inputs)
layer = OuterLayer()
_ = layer(tf.zeros((1, 1)))
# This is `1e-3 * sum(layer.dense.kernel ** 2)`,
# created by the `kernel_regularizer` above.
print(layer.losses)
###Output
[<tf.Tensor: id=270, shape=(), dtype=float32, numpy=0.001828252>]
###Markdown
These losses are meant to be taken into account when writing training loops, like this:```python Instantiate an optimizer.optimizer = tf.keras.optimizers.SGD(learning_rate=1e-3)loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True) Iterate over the batches of a dataset.for x_batch_train, y_batch_train in train_dataset: with tf.GradientTape() as tape: logits = layer(x_batch_train) Logits for this minibatch Loss value for this minibatch loss_value = loss_fn(y_batch_train, logits) Add extra losses created during this forward pass: loss_value += sum(model.losses) grads = tape.gradient(loss_value, model.trainable_weights) optimizer.apply_gradients(zip(grads, model.trainable_weights))```For a detailed guide about writing training loops, see the second section of the [guide to training and evaluation](./train_and_evaluate.ipynb). You can optionally enable serialization on your layersIf you need your custom layers to be serializable as part of a [Functional model](./functional.ipynb), you can optionally implement a `get_config` method:
###Code
class Linear(layers.Layer):
def __init__(self, units=32):
super(Linear, self).__init__()
self.units = units
def build(self, input_shape):
self.w = self.add_weight(shape=(input_shape[-1], self.units),
initializer='random_normal',
trainable=True)
self.b = self.add_weight(shape=(self.units,),
initializer='random_normal',
trainable=True)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
def get_config(self):
return {'units': self.units}
# Now you can recreate the layer from its config:
layer = Linear(64)
config = layer.get_config()
print(config)
new_layer = Linear.from_config(config)
###Output
{'units': 64}
###Markdown
Note that the `__init__` method of the base `Layer` class takes some keyword arguments, in particular a `name` and a `dtype`. It's good practice to pass these arguments to the parent class in `__init__` and to include them in the layer config:
###Code
class Linear(layers.Layer):
def __init__(self, units=32, **kwargs):
super(Linear, self).__init__(**kwargs)
self.units = units
def build(self, input_shape):
self.w = self.add_weight(shape=(input_shape[-1], self.units),
initializer='random_normal',
trainable=True)
self.b = self.add_weight(shape=(self.units,),
initializer='random_normal',
trainable=True)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
def get_config(self):
config = super(Linear, self).get_config()
config.update({'units': self.units})
return config
layer = Linear(64)
config = layer.get_config()
print(config)
new_layer = Linear.from_config(config)
###Output
{'name': 'linear_8', 'dtype': None, 'trainable': True, 'units': 64}
###Markdown
If you need more flexibility when deserializing the layer from its config, you can also override the `from_config` class method. This is the base implementation of `from_config`:```pythondef from_config(cls, config): return cls(**config)```To learn more about serialization and saving, see the complete [Guide to Saving and Serializing Models](./save_and_serialize.ipynb). Privileged `training` argument in the `call` methodSome layers, in particular the `BatchNormalization` layer and the `Dropout` layer, have different behaviors during training and inference. For such layers, it is standard practice to expose a `training` (boolean) argument in the `call` method.By exposing this argument in `call`, you enable the built-in training and evaluation loops (e.g. `fit`) to correctly use the layer in training and inference.
###Code
class CustomDropout(layers.Layer):
def __init__(self, rate, **kwargs):
super(CustomDropout, self).__init__(**kwargs)
self.rate = rate
def call(self, inputs, training=None):
if training:
return tf.nn.dropout(inputs, rate=self.rate)
return inputs
###Output
_____no_output_____
###Markdown
Building Models The Model classIn general, you will use the `Layer` class to define inner computation blocks,and will use the `Model` class to define the outer model -- the object you will train.For instance, in a ResNet50 model, you would have several ResNet blocks subclassing `Layer`,and a single `Model` encompassing the entire ResNet50 network.The `Model` class has the same API as `Layer`, with the following differences:- It exposes built-in training, evaluation, and prediction loops (`model.fit()`, `model.evaluate()`, `model.predict()`).- It exposes the list of its inner layers, via the `model.layers` property.- It exposes saving and serialization APIs.Effectively, the "Layer" class corresponds to what we refer to in the literatureas a "layer" (as in "convolution layer" or "recurrent layer") or as a "block" (as in "ResNet block" or "Inception block").Meanwhile, the "Model" class corresponds to what is referred to in the literatureas a "model" (as in "deep learning model") or as a "network" (as in "deep neural network").For instance, we could take our mini-resnet example above, and use it to build a `Model` that we couldtrain with `fit()`, and that we could save with `save_weights`:```pythonclass ResNet(tf.keras.Model): def __init__(self): super(ResNet, self).__init__() self.block_1 = ResNetBlock() self.block_2 = ResNetBlock() self.global_pool = layers.GlobalAveragePooling2D() self.classifier = Dense(num_classes) def call(self, inputs): x = self.block_1(inputs) x = self.block_2(x) x = self.global_pool(x) return self.classifier(x)resnet = ResNet()dataset = ...resnet.fit(dataset, epochs=10)resnet.save_weights(filepath)``` Putting it all together: an end-to-end exampleHere's what you've learned so far:- A `Layer` encapsulate a state (created in `__init__` or `build`) and some computation (in `call`).- Layers can be recursively nested to create new, bigger computation blocks.- Layers can create and track losses (typically regularization losses).- The outer container, the thing you want to train, is a `Model`. A `Model` is just like a `Layer`, but with added training and serialization utilities.Let's put all of these things together into an end-to-end example: we're going to implement a Variational AutoEncoder (VAE). We'll train it on MNIST digits.Our VAE will be a subclass of `Model`, built as a nested composition of layers that subclass `Layer`. It will feature a regularization loss (KL divergence).
###Code
class Sampling(layers.Layer):
"""Uses (z_mean, z_log_var) to sample z, the vector encoding a digit."""
def call(self, inputs):
z_mean, z_log_var = inputs
batch = tf.shape(z_mean)[0]
dim = tf.shape(z_mean)[1]
epsilon = tf.keras.backend.random_normal(shape=(batch, dim))
return z_mean + tf.exp(0.5 * z_log_var) * epsilon
class Encoder(layers.Layer):
"""Maps MNIST digits to a triplet (z_mean, z_log_var, z)."""
def __init__(self,
latent_dim=32,
intermediate_dim=64,
name='encoder',
**kwargs):
super(Encoder, self).__init__(name=name, **kwargs)
self.dense_proj = layers.Dense(intermediate_dim, activation='relu')
self.dense_mean = layers.Dense(latent_dim)
self.dense_log_var = layers.Dense(latent_dim)
self.sampling = Sampling()
def call(self, inputs):
x = self.dense_proj(inputs)
z_mean = self.dense_mean(x)
z_log_var = self.dense_log_var(x)
z = self.sampling((z_mean, z_log_var))
return z_mean, z_log_var, z
class Decoder(layers.Layer):
"""Converts z, the encoded digit vector, back into a readable digit."""
def __init__(self,
original_dim,
intermediate_dim=64,
name='decoder',
**kwargs):
super(Decoder, self).__init__(name=name, **kwargs)
self.dense_proj = layers.Dense(intermediate_dim, activation='relu')
self.dense_output = layers.Dense(original_dim, activation='sigmoid')
def call(self, inputs):
x = self.dense_proj(inputs)
return self.dense_output(x)
class VariationalAutoEncoder(tf.keras.Model):
"""Combines the encoder and decoder into an end-to-end model for training."""
def __init__(self,
original_dim,
intermediate_dim=64,
latent_dim=32,
name='autoencoder',
**kwargs):
super(VariationalAutoEncoder, self).__init__(name=name, **kwargs)
self.original_dim = original_dim
self.encoder = Encoder(latent_dim=latent_dim,
intermediate_dim=intermediate_dim)
self.decoder = Decoder(original_dim, intermediate_dim=intermediate_dim)
def call(self, inputs):
z_mean, z_log_var, z = self.encoder(inputs)
reconstructed = self.decoder(z)
# Add KL divergence regularization loss.
kl_loss = - 0.5 * tf.reduce_mean(
z_log_var - tf.square(z_mean) - tf.exp(z_log_var) + 1)
self.add_loss(kl_loss)
return reconstructed
original_dim = 784
vae = VariationalAutoEncoder(original_dim, 64, 32)
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)
mse_loss_fn = tf.keras.losses.MeanSquaredError()
loss_metric = tf.keras.metrics.Mean()
(x_train, _), _ = tf.keras.datasets.mnist.load_data()
x_train = x_train.reshape(60000, 784).astype('float32') / 255
train_dataset = tf.data.Dataset.from_tensor_slices(x_train)
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)
# Iterate over epochs.
for epoch in range(3):
print('Start of epoch %d' % (epoch,))
# Iterate over the batches of the dataset.
for step, x_batch_train in enumerate(train_dataset):
with tf.GradientTape() as tape:
reconstructed = vae(x_batch_train)
# Compute reconstruction loss
loss = mse_loss_fn(x_batch_train, reconstructed)
loss += sum(vae.losses) # Add KLD regularization loss
grads = tape.gradient(loss, vae.trainable_weights)
optimizer.apply_gradients(zip(grads, vae.trainable_weights))
loss_metric(loss)
if step % 100 == 0:
print('step %s: mean loss = %s' % (step, loss_metric.result()))
###Output
Start of epoch 0
step 0: mean loss = tf.Tensor(0.333594, shape=(), dtype=float32)
step 100: mean loss = tf.Tensor(0.12490749, shape=(), dtype=float32)
step 200: mean loss = tf.Tensor(0.09895289, shape=(), dtype=float32)
step 300: mean loss = tf.Tensor(0.08900405, shape=(), dtype=float32)
step 400: mean loss = tf.Tensor(0.08410553, shape=(), dtype=float32)
step 500: mean loss = tf.Tensor(0.08077162, shape=(), dtype=float32)
step 600: mean loss = tf.Tensor(0.07866916, shape=(), dtype=float32)
step 700: mean loss = tf.Tensor(0.07706785, shape=(), dtype=float32)
step 800: mean loss = tf.Tensor(0.07590426, shape=(), dtype=float32)
step 900: mean loss = tf.Tensor(0.07490685, shape=(), dtype=float32)
Start of epoch 1
step 0: mean loss = tf.Tensor(0.07461874, shape=(), dtype=float32)
step 100: mean loss = tf.Tensor(0.07394995, shape=(), dtype=float32)
step 200: mean loss = tf.Tensor(0.07347135, shape=(), dtype=float32)
step 300: mean loss = tf.Tensor(0.07299441, shape=(), dtype=float32)
step 400: mean loss = tf.Tensor(0.07266846, shape=(), dtype=float32)
step 500: mean loss = tf.Tensor(0.07226782, shape=(), dtype=float32)
step 600: mean loss = tf.Tensor(0.07197859, shape=(), dtype=float32)
step 700: mean loss = tf.Tensor(0.07168568, shape=(), dtype=float32)
step 800: mean loss = tf.Tensor(0.07144765, shape=(), dtype=float32)
step 900: mean loss = tf.Tensor(0.07119144, shape=(), dtype=float32)
Start of epoch 2
step 0: mean loss = tf.Tensor(0.07111951, shape=(), dtype=float32)
step 100: mean loss = tf.Tensor(0.07093658, shape=(), dtype=float32)
step 200: mean loss = tf.Tensor(0.07081041, shape=(), dtype=float32)
step 300: mean loss = tf.Tensor(0.070655316, shape=(), dtype=float32)
step 400: mean loss = tf.Tensor(0.07056138, shape=(), dtype=float32)
step 500: mean loss = tf.Tensor(0.070404164, shape=(), dtype=float32)
step 600: mean loss = tf.Tensor(0.07029703, shape=(), dtype=float32)
step 700: mean loss = tf.Tensor(0.07017324, shape=(), dtype=float32)
step 800: mean loss = tf.Tensor(0.07007284, shape=(), dtype=float32)
step 900: mean loss = tf.Tensor(0.06995187, shape=(), dtype=float32)
###Markdown
Note that since the VAE is subclassing `Model`, it features built-in training loops. So you could also have trained it like this:
###Code
vae = VariationalAutoEncoder(784, 64, 32)
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)
vae.compile(optimizer, loss=tf.keras.losses.MeanSquaredError())
vae.fit(x_train, x_train, epochs=3, batch_size=64)
###Output
Epoch 1/3
60000/60000 [==============================] - 4s 60us/sample - loss: 0.0746
Epoch 2/3
60000/60000 [==============================] - 3s 55us/sample - loss: 0.0676
Epoch 3/3
60000/60000 [==============================] - 3s 55us/sample - loss: 0.0676
###Markdown
Beyond object-oriented development: the Functional APIWas this example too much object-oriented development for you? You can also build models using [the Functional API](./functional.ipynb). Importantly, choosing one style or another does not prevent you from leveraging components written in the other style: you can always mix-and-match.For instance, the Functional API example below reuses the same `Sampling` layer we defined in the example above.
###Code
original_dim = 784
intermediate_dim = 64
latent_dim = 32
# Define encoder model.
original_inputs = tf.keras.Input(shape=(original_dim,), name='encoder_input')
x = layers.Dense(intermediate_dim, activation='relu')(original_inputs)
z_mean = layers.Dense(latent_dim, name='z_mean')(x)
z_log_var = layers.Dense(latent_dim, name='z_log_var')(x)
z = Sampling()((z_mean, z_log_var))
encoder = tf.keras.Model(inputs=original_inputs, outputs=z, name='encoder')
# Define decoder model.
latent_inputs = tf.keras.Input(shape=(latent_dim,), name='z_sampling')
x = layers.Dense(intermediate_dim, activation='relu')(latent_inputs)
outputs = layers.Dense(original_dim, activation='sigmoid')(x)
decoder = tf.keras.Model(inputs=latent_inputs, outputs=outputs, name='decoder')
# Define VAE model.
outputs = decoder(z)
vae = tf.keras.Model(inputs=original_inputs, outputs=outputs, name='vae')
# Add KL divergence regularization loss.
kl_loss = - 0.5 * tf.reduce_mean(
z_log_var - tf.square(z_mean) - tf.exp(z_log_var) + 1)
vae.add_loss(kl_loss)
# Train.
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)
vae.compile(optimizer, loss=tf.keras.losses.MeanSquaredError())
vae.fit(x_train, x_train, epochs=3, batch_size=64)
tf.__version__
###Output
_____no_output_____ |
tutorials/Certification_Trainings/Public/databricks_notebooks/2.6/6.Playground_DataFrames_v2.6.3.ipynb | ###Markdown
 6. Spark DataFrames Playground v.2.6.3
###Code
import sparknlp
from sparknlp.base import *
from sparknlp.annotator import *
from pyspark.ml import Pipeline
print("Spark NLP version", sparknlp.version())
spark = sparknlp.start()
print("Apache Spark version:", spark.version)
spark
document = DocumentAssembler().setInputCol('text').setOutputCol('document')
tokenizer = Tokenizer().setInputCols('document').setOutputCol('token')
pos = PerceptronModel.pretrained().setInputCols('document', 'token').setOutputCol('pos')
pipeline = Pipeline().setStages([document, tokenizer, pos])
!wget https://raw.githubusercontent.com/JohnSnowLabs/spark-nlp-workshop/master/jupyter/annotation/english/spark-nlp-basics/sample-sentences-en.txt
dbutils.fs.cp("file:/databricks/driver/sample-sentences-en.txt", "dbfs:/")
%fs ls "file:/databricks/driver"
data = spark.read.text('./sample-sentences-en.txt').toDF('text')
data.show(5)
model = pipeline.fit(data)
result = model.transform(data)
result.show(5)
stored = result\
.select('text', 'pos.begin', 'pos.end', 'pos.result', 'pos.metadata')\
.toDF('text', 'pos_begin', 'pos_end', 'pos_result', 'pos_meta')\
.cache()
stored.printSchema()
stored.show(5)
###Output
_____no_output_____
###Markdown
--------- Spark SQL Functions
###Code
from pyspark.sql.functions import *
stored.filter(array_contains('pos_result', 'VBD')).show(5)
stored.withColumn('token_count', size('pos_result')).select('pos_result', 'token_count').show(5)
stored.select('text', array_max('pos_end')).show(5)
stored.withColumn('unique_pos', array_distinct('pos_result')).select('pos_result', 'unique_pos').show(5)
stored.groupBy(array_sort(array_distinct('pos_result'))).count().show(10)
###Output
_____no_output_____
###Markdown
---------------- SQL Functions with `col`
###Code
from pyspark.sql.functions import col
stored.select(col('pos_meta').getItem(0).getItem('word')).show(5)
###Output
_____no_output_____
###Markdown
------------- Spark NLP Annotation UDFs
###Code
result.select('pos').show(1, truncate=False)
def nn_tokens(annotations):
nn_annotations = list(
filter(lambda annotation: annotation.result == 'NN', annotations)
)
return list(
map(lambda nn_annotation: nn_annotation.metadata['word'], nn_annotations)
)
from sparknlp.functions import *
from pyspark.sql.types import ArrayType, StringType
result.select(map_annotations(nn_tokens, ArrayType(StringType()))('pos').alias('nn_tokens')).show(truncate=False)
###Output
_____no_output_____ |
notebook/MS_Nature_Rowitch_snRNAseq.ipynb | ###Markdown
Human Brain samples - MS Nature 2019 Rowitch dataset reprocessed Please download the input data before proceedPlease extract the tarball to current working directory, input data would be in **./data****Download link https://bit.ly/2F6o5n7**
###Code
import scanpy as sc
import numpy as np
import scipy as sp
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import rcParams
from matplotlib import colors
import seaborn as sb
import glob
import rpy2.rinterface_lib.callbacks
import logging
from rpy2.robjects import pandas2ri
import anndata2ri
from scipy.sparse.csc import csc_matrix
# Ignore R warning messages
#Note: this can be commented out to get more verbose R output
rpy2.rinterface_lib.callbacks.logger.setLevel(logging.ERROR)
# Automatically convert rpy2 outputs to pandas dataframes
pandas2ri.activate()
anndata2ri.activate()
%load_ext rpy2.ipython
plt.rcParams['figure.figsize']=(8,8) #rescale figures
sc.settings.verbosity = 3
#sc.set_figure_params(dpi=200, dpi_save=300)
sc.logging.print_versions()
results_file = './write/ms_nature_2019_rowitch_pp.h5ad'
###Output
scanpy==1.5.1 anndata==0.7.1 umap==0.4.3 numpy==1.18.4 scipy==1.4.1 pandas==1.0.3 scikit-learn==0.22.1 statsmodels==0.10.1 python-igraph==0.7.1 louvain==0.6.1
###Markdown
Load human brain snRNAseq samplesHere we load the pre-processed datasets (which has been annotated), and the raw matrices (which won't be filtered on the gene level). Raw data
###Code
wpath = "./data/"
metafile = "all_samples.txt"
meta = pd.read_csv( wpath + "/" + metafile, header = 0)
meta
# design
# Set up data loading
file_base = './data/'
adatas_raw = []
# Loop to load data
for i in range(len(meta['library_id'])):
#Parse filenames
sample = meta['library_id'][i]
h5_file = file_base+sample+'/outs/filtered_feature_bc_matrix.h5'
#Load data
adata_tmp = sc.read_10x_h5(h5_file)
adata_tmp.X = csc_matrix(adata_tmp.X)
#Annotate data
sampleID = sample.split('-rxn')[0]
adata_tmp.obs['sample'] = ['MSsnRNAseq2019_'+sample]*adata_tmp.n_obs
# adata_tmp.obs['study'] = ['MS_Nature_2019_Rowitch_snRNAseq']*adata_tmp.n_obs
# adata_tmp.obs['chemistry'] = ['v2_10X']*adata_tmp.n_obs
# adata_tmp.obs['tissue'] = ['Brain']*adata_tmp.n_obs
# adata_tmp.obs['species'] = ['Human']*adata_tmp.n_obs
# adata_tmp.obs['data_type'] = ['UMI']*adata_tmp.n_obs
# adata_tmp.obs
adata_tmp.var_names_make_unique()
#Append to main adata object
adatas_raw.append(adata_tmp)
meta['sample_id'] = meta['library_id'].copy()
meta['sample_id'] = meta['sample_id'].str.replace("_3PEE_ref", "")
meta
meta.shape
# Concatenate to unique adata object
adata_raw = adatas_raw[0].concatenate(adatas_raw[1:], batch_key='sample_ID',
batch_categories=meta['sample_id'])
adata_raw.obs['sample'] = adata_raw.obs['sample'].str.replace("_3PEE_ref", "")
adata_raw.obs.head()
adata_raw.obs.drop(columns=['sample_ID'], inplace=True)
adata_raw.obs.head()
adata_raw.obs.index.rename('barcode', inplace=True)
adata_raw.obs.head()
adata_raw.shape
type(adata_raw.X)
# adata_raw.X = csc_matrix(adata_raw.X)
# Save merged object
adata_raw.write(results_file)
###Output
... storing 'sample' as categorical
... storing 'feature_types' as categorical
... storing 'genome' as categorical
###Markdown
1. Pre-processing and visualization 1.1 Quality control
###Code
adata_raw_copy = adata_raw.copy()
sc.pp.calculate_qc_metrics(adata_raw, inplace=True)
# Quality control - calculate QC covariates
adata_raw.obs['n_counts'] = adata_raw.X.sum(1)
adata_raw.obs['log_counts'] = np.log(adata_raw.obs['n_counts'])
adata_raw.obs['n_genes'] = (adata_raw.X > 0).sum(1)
# mt_gene_mask = [gene.startswith('MT-') for gene in adata_raw.var_names]
# adata_raw.obs['mt_frac'] = adata_raw.X[:, mt_gene_mask].sum(1)/adata_raw.obs['n_counts']
mito_genes = adata_raw.var_names.str.startswith('MT-')
adata_raw.obs['mt_frac'] = np.sum(adata_raw[:, mito_genes].X, axis=1) / np.sum(adata_raw.X, axis=1)
# Quality control - plot QC metrics
sc.pl.violin(adata_raw, ['n_genes', 'n_counts', 'mt_frac'],groupby='sample',
jitter=0.4, multi_panel=False)
sc.pl.scatter(adata_raw, x='n_counts', y='mt_frac')
sc.pl.scatter(adata_raw, x='n_counts', y='n_genes', color='mt_frac')
sc.pl.scatter(adata_raw[adata_raw.obs['n_counts'] < 20000], x='n_counts', y='n_genes', color='mt_frac')
#Thresholding decision: counts
p3 = sb.distplot(adata_raw.obs['n_counts'], kde=False, bins=200)
plt.show()
p4 = sb.distplot(adata_raw.obs['n_counts'][adata_raw.obs['n_counts']<4000], kde=False,bins=200)
plt.show()
p5 = sb.distplot(adata_raw.obs['n_counts'][adata_raw.obs['n_counts']>25000], kde=False, bins=60)
plt.show()
###Output
_____no_output_____
###Markdown
Zoom-in histograms of the number of counts per cell show that there's a group of cells with n_counts < 3500, this would remove 47k out of 65k cells. But paper said cut at 1000 reads, stick with 1000 reads. On the upper end of the distribution, we can see that the high peak centered around 5000 counts spans until around 40000 counts.
###Code
# Filter cells according to identified QC thresholds:
print('Total number of cells: {:d}'.format(adata_raw.n_obs))
sc.pp.filter_cells(adata_raw, min_counts = 1000)
print('Number of cells after min count filter: {:d}'.format(adata_raw.n_obs))
sc.pp.filter_cells(adata_raw, max_counts = 40000)
print('Number of cells after max count filter: {:d}'.format(adata_raw.n_obs))
adata_raw = adata_raw[adata_raw.obs['mt_frac'] < 0.2]
print('Number of cells after MT filter: {:d}'.format(adata_raw.n_obs))
# look at the effect of thresholding
sc.pl.scatter(adata_raw, x='n_counts', y='n_genes', color='mt_frac')
#Thresholding decision: genes
p6 = sb.distplot(adata_raw.obs['n_genes'], kde=False, bins=60)
plt.show()
p7 = sb.distplot(adata_raw.obs['n_genes'][adata_raw.obs['n_genes']<1500], kde=False, bins=60)
plt.show()
###Output
_____no_output_____
###Markdown
From the histograms of the number of genes per cell, we can notice that there still is a small population showing n_genes < 600 which should be filtered out. But paper said 500
###Code
# Thresholding on number of genes
print('Total number of cells: {:d}'.format(adata_raw.n_obs))
sc.pp.filter_cells(adata_raw, min_genes = 600)
print('Number of cells after gene filter: {:d}'.format(adata_raw.n_obs))
#Filter genes:
print('Total number of genes: {:d}'.format(adata_raw.n_vars))
# Min 20 cells - filters out 0 count genes
sc.pp.filter_genes(adata_raw, min_cells=20)
print('Number of genes after cell filter: {:d}'.format(adata_raw.n_vars))
# Save merged object
adata_raw.write('./write/ms_nature_2019_rowitch_done_QC_filter_46kcell_25kgene.h5ad')
###Output
_____no_output_____
###Markdown
Normalization
###Code
adata_raw = sc.read_h5ad('./write/ms_nature_2019_rowitch_done_QC_filter_46kcell_25kgene.h5ad')
sc.pp.normalize_per_cell(adata_raw, counts_per_cell_after=1e6)
sc.pp.log1p(adata_raw)
# sc.pp.pca(adata_pp, n_comps=15, svd_solver='arpack')
# sc.pp.neighbors(adata_pp)
# sc.tl.louvain(adata_pp, key_added='groups', resolution=0.5)
adata_raw.write('./write/ms_nature_2019_rowitch_filtered_normalized_log1p_non_scaled.h5ad')
import gc
gc.collect()
infile = './write/ms_nature_2019_rowitch_filtered_normalized_log1p_non_scaled.h5ad'
adata_raw = sc.read_h5ad(infile)
def mod_index(meta):
meta['index'] = meta['index'].str.replace("_3PEE_ref", "")
return meta
# attach exisiting harmony and liger coordinates
# harmony
adata_harmony = sc.read_h5ad("./data/harmony_clustered.h5ad")
adata_harmony.obs.index = adata_harmony.obs.index.str.replace("_3PEE_ref", "")
adata_harmony.obs
# subset adata_raw to match same cells
cells = list(set(adata_raw.obs.index) & set(adata_harmony.obs.index))
adata_raw = adata_raw[cells]
xpca = pd.DataFrame(adata_harmony.obsm['X_pca']).set_index(adata_harmony.obs.index)
xtsne = pd.DataFrame(adata_harmony.obsm['X_tsne']).set_index(adata_harmony.obs.index)
xumap = pd.DataFrame(adata_harmony.obsm['X_umap']).set_index(adata_harmony.obs.index)
adata_raw.obsm['X_pca_harmony'] = np.array(xpca.loc[adata_raw.obs.index])
adata_raw.obsm['X_tsne_harmony'] = np.array(xtsne.loc[adata_raw.obs.index])
adata_raw.obsm['X_umap_harmony'] = np.array(xumap.loc[adata_raw.obs.index])
adata_raw.obs['louvain_harmony'] = adata_harmony.obs['louvain'].loc[adata_raw.obs.index]
adata_raw.obs = adata_raw.obs.astype({'louvain_harmony':'category'})
# liger
xtsne = pd.read_csv("./data/liger_runumap.tsne.coords.txt", sep='\t', encoding='utf-8')
xumap = pd.read_csv("./data/liger_runumap.umap.coords.txt", sep='\t', encoding='utf-8')
xlouvain = pd.read_csv("./data/liger_clusterID.txt", sep='\t', encoding='utf-8')
xtsne = mod_index(xtsne)
xumap = mod_index(xumap)
xlouvain['index'] = xlouvain['barcode']
xlouvain = mod_index(xlouvain)
xumap.set_index('index', inplace=True)
xtsne.set_index('index', inplace=True)
xlouvain.set_index('index', inplace=True)
adata_raw.obsm['X_tsne_liger'] = np.array(xtsne.loc[adata_raw.obs.index])
adata_raw.obsm['X_umap_liger'] = np.array(xumap.loc[adata_raw.obs.index])
adata_raw.obs['louvain_liger'] = np.array(xlouvain.loc[adata_raw.obs.index]['clusterID'])
adata_raw.obs = adata_raw.obs.astype({'louvain_liger':'category'})
outfile = infile
outfile = outfile.replace(".h5ad","")
adata_raw.write_h5ad(outfile+"_with_embedings.h5ad")
import gc
gc.collect()
###Output
_____no_output_____
###Markdown
attach meta data from the paper
###Code
xmeta = pd.read_csv("./data/meta.tsv", sep='\t', encoding='utf-8')
xmeta.index = xmeta['cell'].str.replace("_.*_.*","")+"-"+xmeta['sample']+"_10x"
xmeta
xmeta.loc[set(set(xmeta.index) & set(adata_raw.obs.index))][['Capbatch','Seqbatch','cell_type','diagnosis','region','sample','sex','stage']]
features = ['Capbatch','Seqbatch','cell_type','diagnosis','region','sample','sex','stage']
bcodes = set(set(xmeta.index) & set(adata_raw.obs.index))
for f in features:
adata_raw.obs[f] = 'nan'
adata_raw.obs[f].loc[bcodes] = xmeta[f].loc[bcodes]
set(adata_raw.obs['cell_type'])
adata_raw.obs['>Description'] = ['Human brain snRNAseq 46k cells (MS Nature 2019 Schirmer et al.); data - normalized, log transformed UMI; platform - 10X v2 chemistry | embedding by umap_harmony; color by cell_type']*adata_raw.n_obs
outfile = infile
outfile = outfile.replace(".h5ad","")
adata_raw.write_h5ad(outfile+"_with_embedings_and_labels.h5ad")
###Output
... storing 'sample' as categorical
... storing 'Capbatch' as categorical
... storing 'Seqbatch' as categorical
... storing 'cell_type' as categorical
... storing 'diagnosis' as categorical
... storing 'region' as categorical
... storing 'sex' as categorical
... storing 'stage' as categorical
... storing '>Description' as categorical
|
6.deployment_eia.ipynb | ###Markdown
Module 6. Amazon SageMaker Deployment for EIA(Elastic Inference Accelerator)---***[주의] 본 모듈은 PyTorch EIA 1.3.1 버전에서 훈련을 수행한 모델만 배포가 가능합니다. 코드가 정상적으로 수행되지 않는다면, 프레임워크 버전을 동일 버전으로 맞춰 주시기 바랍니다.***본 모듈에서는 Elastic Inference Accelerator(EIA)를 사용하여 모델을 배포해 보겠습니다. Elastic Inference Accelerator훈련 인스턴스와 달리 실시간 추론 인스턴스는 계속 상시로 띄우는 경우가 많기에, 딥러닝 어플리케이션에서 low latency를 위해 GPU 인스턴스를 사용하면 많은 비용이 발생합니다.Amazon Elastic Inference는 저렴하고 메모리가 작은 GPU 기반 가속기를 Amazon EC2, Amazon ECS, Amazon SageMaker에 연결할 수 있는 서비스로, Accelerator가 CPU 인스턴스에 프로비저닝되고 연결됩니다. EIA를 사용하면 GPU 인스턴스에 근접한 퍼포먼스를 보이면서 인스턴스 실행 비용을 최대 75%까지 절감할 수 있습니다. 모든 Amazon SageMaker 인스턴스 유형, EC2 인스턴스 유형 또는 Amazon ECS 작업을 지원하며, 대부분의 딥러닝 프레임워크를 지원하고 있습니다. 지원되는 프레임워크 버전은 AWS CLI로 확인할 수 있습니다.```bash$ aws ecr list-images --repository-name tensorflow-inference-eia --registry-id 763104351884$ aws ecr list-images --repository-name pytorch-inference-eia --registry-id 763104351884$ aws ecr list-images --repository-name mxnet-inference-eia --registry-id 763104351884```참조: https://aws.amazon.com/ko/blogs/korea/amazon-elastic-inference-gpu-powered-deep-learning-inference-acceleration/ 1. Inference script---아래 코드 셀은 `src` 디렉토리에 SageMaker 추론 스크립트인 `inference_eia.py`를 저장합니다.Module 5의 코드와 대부분 동일하지만, `model_fn()` 메서드의 구현이 다른 점을 유의해 주세요.
###Code
import os
import time
import sagemaker
from sagemaker.pytorch.model import PyTorchModel
role = sagemaker.get_execution_role()
%%writefile ./src/inference_eia.py
from __future__ import absolute_import
import argparse
import json
import logging
import os
import sys
import time
import random
from os.path import join
import numpy as np
import io
import tarfile
import boto3
from PIL import Image
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import lr_scheduler
import torch.optim as optim
import torchvision
import copy
import torch.utils.data
import torch.utils.data.distributed
from torchvision import datasets, transforms, models
from torch import topk
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler(sys.stdout))
JSON_CONTENT_TYPE = 'application/json'
def model_fn(model_dir):
logger.info("==> model_dir : {}".format(model_dir))
traced_model = torch.jit.load(os.path.join(model_dir, 'model_eia.pth'))
return traced_model
# Deserialize the request body
def input_fn(request_body, request_content_type='application/x-image'):
print('An input_fn that loads a image tensor')
print(request_content_type)
if request_content_type == 'application/x-image':
img = np.array(Image.open(io.BytesIO(request_body)))
elif request_content_type == 'application/x-npy':
img = np.frombuffer(request_body, dtype='uint8').reshape(137, 236)
else:
raise ValueError(
'Requested unsupported ContentType in content_type : ' + request_content_type)
img = 255 - img
img = img[:,:,np.newaxis]
img = np.repeat(img, 3, axis=2)
test_transforms = transforms.Compose([
transforms.ToTensor()
])
img_tensor = test_transforms(img)
return img_tensor
# Predicts on the deserialized object with the model from model_fn()
def predict_fn(input_data, model):
logger.info('Entering the predict_fn function')
start_time = time.time()
input_data = input_data.unsqueeze(0)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model.to(device)
model.eval()
input_data = input_data.to(device)
result = {}
with torch.no_grad():
logits = model(input_data)
pred_probs = F.softmax(logits, dim=1).data.squeeze()
outputs = topk(pred_probs, 5)
result['score'] = outputs[0].detach().cpu().numpy()
result['class'] = outputs[1].detach().cpu().numpy()
print("--- Elapsed time: %s secs ---" % (time.time() - start_time))
return result
# Serialize the prediction result into the response content type
def output_fn(pred_output, accept=JSON_CONTENT_TYPE):
return json.dumps({'score': pred_output['score'].tolist(),
'class': pred_output['class'].tolist()}), accept
###Output
Overwriting ./src/inference_eia.py
###Markdown
2. TorchScript Compile (Tracing)---PyTorch 프레임워크에서 EI를 사용하기 위해서는 [TorchScript](https://pytorch.org/docs/1.3.1/jit.html)로 모델을 컴파일해야 하며, 2020년 8월 시점에서는 PyTorch 1.3.1을 지원하고 있습니다. TorchScript는 PyTorch 코드에서 직렬화 및 최적화 가능한 모델로 컴파일하며 Python 인터프리터의 글로벌 인터프리터 잠금 (GIL)과 무관하기 때문에 Python 외의 언어에서 로드 가능하고 최적화가 용이합니다.TorchScript로 변환하는 방법은 **tracing** 방식과 **scripting** 방식이 있으며, 본 핸즈온에서는 tracing 방식을 사용하겠습니다. 참고로 tracing 방식은 샘플 입력 데이터를 모델에 입력 후 그 입력의 흐름(feedforward)을 기록하여 포착하는 메커니즘이며, scripting 방식은 모델 코드를 직접 분석해서 컴파일하는 방식입니다. Install dependencies
###Code
import sys
!{sys.executable} -m pip install --upgrade pip --trusted-host pypi.org --trusted-host files.pythonhosted.org
!{sys.executable} -m pip install https://download.pytorch.org/whl/cpu/torchvision-0.4.2%2Bcpu-cp36-cp36m-linux_x86_64.whl
!{sys.executable} -m pip install https://s3.amazonaws.com/amazonei-pytorch/torch_eia-1.3.1-cp36-cp36m-manylinux1_x86_64.whl
!{sys.executable} -m pip install graphviz==0.13.2
!{sys.executable} -m pip install mxnet-model-server==1.0.8
!{sys.executable} -m pip install pillow==7.1.0
!{sys.executable} -m pip install sagemaker_containers
!{sys.executable} -m pip install -U sagemaker
###Output
_____no_output_____
###Markdown
CompileTracing 방식은 특정 input을 모델에 적용했을 때 수행되면서 operation이 저장하기 때문에, 이미지 사이즈와 동일한 크기의 랜덤 입력 데이터를 모델을 적용해야 합니다.
###Code
import torch, os
from torchvision import models
model_dir = './model'
print("==> model_dir : {}".format(model_dir))
model = models.resnet18(pretrained=True)
last_hidden_units = model.fc.in_features
model.fc = torch.nn.Linear(last_hidden_units, 186)
model.load_state_dict(torch.load(os.path.join(model_dir, 'model.pth')))
import torch
data = torch.rand(1,3,137,236)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model.to(device)
input_data = data.to(device)
with torch.jit.optimized_execution(True, {'target_device': 'eia:0'}):
traced_model = torch.jit.trace(model, input_data)
###Output
/local/p4clients/pkgbuild-kCleo/workspace/build/PyTorchECL/PyTorchECL-1.x.548.0/AL2012/DEV.STD.PTHREAD/build/private/src/torch/csrc/jit/eia/eia_interface.h:52: UserWarning: Notice - No last error found
/local/p4clients/pkgbuild-kCleo/workspace/build/PyTorchECL/PyTorchECL-1.x.548.0/AL2012/DEV.STD.PTHREAD/build/private/src/torch/csrc/jit/eia/eia_interface.h:52: UserWarning: Notice - No last error found
###Markdown
컴파일한 모델로 로컬 환경에서 추론을 수행해 보겠습니다.
###Code
from src.inference_eia import model_fn, input_fn, predict_fn, output_fn
from PIL import Image
import numpy as np
import json
file_path = 'test_imgs/test_0.jpg'
with open(file_path, mode='rb') as file:
img_byte = bytearray(file.read())
data = input_fn(img_byte)
result = predict_fn(data, traced_model)
print(result)
###Output
An input_fn that loads a image tensor
application/x-image
Entering the predict_fn function
--- Elapsed time: 0.023025035858154297 secs ---
{'score': array([0.62198836, 0.2314413 , 0.04159953, 0.02067479, 0.01897352],
dtype=float32), 'class': array([ 3, 2, 169, 168, 70])}
###Markdown
TorchScript 모델을 파일로 직렬화하여 저장합니다. 그런 다음, `tar.gz`로 압축하고 이 파일을 S3로 복사합니다.
###Code
torch.jit.save(traced_model, './model/model_eia.pth')
tar_filename = 'model_eia.tar.gz'
!cd model/ && tar -czvf $tar_filename model_eia.pth
artifacts_dir = 's3://sagemaker-us-east-1-143656149352/pytorch-training-2020-08-16-04-47-36-618/output/'
!aws s3 cp model/$tar_filename $artifacts_dir
###Output
upload: model/model_eia.tar.gz to s3://sagemaker-us-east-1-143656149352/pytorch-training-2020-08-16-04-47-36-618/output/model_eia.tar.gz
###Markdown
3. SageMaker Hosted Endpoint Inference---SageMaker가 관리하는 배포 클러스터를 프로비저닝하는 시간이 소요되기 때문에 추론 서비스를 시작하는 데에는 약 5~10분 정도 소요됩니다.
###Code
import boto3
client = boto3.client('sagemaker')
runtime_client = boto3.client('sagemaker-runtime')
def get_model_path(sm_client, max_results=1, name_contains='pytorch'):
training_job = sm_client.list_training_jobs(MaxResults=max_results,
NameContains=name_contains,
SortBy='CreationTime',
SortOrder='Descending')
training_job_name = training_job['TrainingJobSummaries'][0]['TrainingJobName']
training_job_description = sm_client.describe_training_job(TrainingJobName=training_job_name)
model_path = training_job_description['ModelArtifacts']['S3ModelArtifacts']
return model_path
#model_path = get_model_path(client, max_results=3)
model_path = os.path.join(artifacts_dir, tar_filename)
print(model_path)
endpoint_name = "endpoint-bangali-classifier-eia-{}".format(int(time.time()))
pytorch_model = PyTorchModel(model_data=model_path,
role=role,
entry_point='./src/inference_eia.py',
framework_version='1.3.1',
py_version='py3')
predictor = pytorch_model.deploy(instance_type='ml.c5.large',
initial_instance_count=1,
accelerator_type='ml.eia2.large',
endpoint_name=endpoint_name,
wait=False)
# client = boto3.client('sagemaker')
# waiter = client.get_waiter('endpoint_in_service')
# waiter.wait(EndpointName=endpoint_name)
import boto3
client = boto3.client('sagemaker')
runtime_client = boto3.client('sagemaker-runtime')
endpoint_name = pytorch_model.endpoint_name
client.describe_endpoint(EndpointName = endpoint_name)
###Output
_____no_output_____
###Markdown
추론을 수행합니다. (`ContentType='application/x-image'`)
###Code
with open(file_path, mode='rb') as file:
img_byte = bytearray(file.read())
response = runtime_client.invoke_endpoint(
EndpointName=endpoint_name,
ContentType='application/x-image',
Accept='application/json',
Body=img_byte
)
print(response['Body'].read().decode())
%timeit runtime_client.invoke_endpoint(EndpointName=endpoint_name, ContentType='application/x-image', Accept='application/json', Body=img_byte)
###Output
94.1 ms ± 6.12 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
###Markdown
SageMaker Hosted Endpoint Clean-up엔드포인트를 계속 사용하지 않는다면, 불필요한 과금을 피하기 위해 엔드포인트를 삭제해야 합니다. SageMaker SDK에서는 `delete_endpoint()` 메소드로 간단히 삭제할 수 있으며, UI에서도 쉽게 삭제할 수 있습니다.
###Code
def delete_endpoint(client, endpoint_name):
response = client.describe_endpoint_config(EndpointConfigName=endpoint_name)
model_name = response['ProductionVariants'][0]['ModelName']
client.delete_model(ModelName=model_name)
client.delete_endpoint(EndpointName=endpoint_name)
client.delete_endpoint_config(EndpointConfigName=endpoint_name)
print(f'--- Deleted model: {model_name}')
print(f'--- Deleted endpoint: {endpoint_name}')
print(f'--- Deleted endpoint_config: {endpoint_name}')
delete_endpoint(client, endpoint_name)
###Output
_____no_output_____ |
notebooks/ols_baseline.ipynb | ###Markdown
OLS regressions - baseline for Capstone analysisIn this notebook, I perform OLS regressions using systemwide CaBi trips as the dependent variable.
###Code
from util_functions import *
import numpy as np
import pandas as pd
import statsmodels.formula.api as smf
import matplotlib.pyplot as plt
import seaborn as sns; sns.set_style('darkgrid')
import statsmodels.graphics.gofplots as gofplots
%matplotlib inline
set_env_path()
conn, cur = aws_connect()
query = """
SELECT *,
CASE day_of_week WHEN 5 THEN 1 WHEN 6 THEN 1 ELSE 0 END AS weekend_dummy,
from final_db"""
df = pd.read_sql(query, con=conn)
df.shape
###Output
_____no_output_____
###Markdown
First specification attempt - theory basedA lot of the variation in daily CaBi rides can be explained by weather.I decided on the following specification based on trial and error and intuition. For our ML analysis, we will want to look into ways to perform feature selection algorithmically (I'm looking into this right now).That said, the variables I've chosen are fairly arbitrary and could probably be improved, but we shouldn't spend a huge amount of time on baseline stuff. I made sure to try to avoid multicollinearity, for example high and low temperature, population and date, and all of the CaBi data are all highly correlated.
###Code
def fitOLS(equation, cov='nonrobust'):
'''
This function uses statsmodels.ols to estimate OLS regressions using R/patsy-style syntax.
Args:
equation (str): A patsy-style regression equation.
e.g. 'cabi_trips ~ apparenttemperaturehigh + daylight_hours + rain'
cov (str): A specific covariance matrix type. Default is 'nonrobust'.
HC0-HC3 available for heteroskedasticity-robust standard errors.
Returns:
results: A RegressionResults object which summarizes the fit of a linear regression model.
'''
model = smf.ols('{}'.format(equation), df)
results = model.fit(cov_type='{}'.format(cov), use_t=True)
return results
# Using the new weekend_dummy for demonstrative purposes
results = fitOLS('cabi_trips ~ year + daylight_hours + '
'apparenttemperaturehigh + rain + snow + '
'nats_games + weekend_dummy', cov='HC0')
results.summary()
# Fit the model and print results
# I wanted to use dc_pop instead of year (they're highly correlated)
# But there are 0s in dc_pop that throw off the analysis
results = fitOLS('cabi_trips ~ year + daylight_hours + '
'apparenttemperaturehigh + rain + snow + '
'nats_games + C(day_of_week)', cov='HC0')
results.summary()
###Output
_____no_output_____
###Markdown
Our results look good.The R-squared tells us that about 74% of the variance in cabi_trips is explained by the variance in the explanatory variables.The low p-values indicate that the results we found are all statistically significant.Each of the coefficient estimates indicates the average change in daily CaBi trips associated with a one-unit increase in the explanatory variable, all else held equal. For dummy variables, this can be interpreted as an on-off switch, so on days when it snows, we should expect 1550 fewer rides.There are other things to worry about, though. Statistical programming packages often include diagnostic plots by default, but statsmodels doesn't. I explain three of these plots below.
###Code
'''Homoskedasticity is when the variance/scatter/spread of the residuals is
constant for all values of the fitted values. It is an assumption under OLS.
Heteroskedasticity is when the variance of the residuals changes as the fitted values change.
If not addressed, it can lead to biased estimators.
If our residuals were heteroskedastic, we would expect a scatter plot to form a funnel shape,
and a regression line to have a slope.
'''
# Regplot fits a regression line to a scatterplot
plt.title('Residuals vs Fitted Values')
sns.regplot(results.fittedvalues, results.resid)
plt.xlabel('Y-hat')
plt.ylabel('Residuals')
plt.show()
###Output
_____no_output_____
###Markdown
It doesn't look like there's heteroskedasticity, and the regression line is flat. However I think given our sample size and the significance of our variables, it couldn't hurt to specify heteroskedasticity-robust standard errors (the cov=HC0 argument in fitOLS). In practice I rarely see standard errors that aren't robust to either heteroskedasticity or clustering. (If we wanted to cluster, we would have to choose variables to cluster on, and I haven't looked into that for our data).
###Code
'''Normality of the residuals with mean 0 is another assumption under OLS.
If residuals are nonnormal and not approximately centered at 0, the model is probably misspecified.
The first chart is a kernel density estimation and the second is a Q-Q plot.
Q-Q plots compare two datasets to see whether or not they come from the same distribution.
If they do, the points should form a straight line.
Here, we have a Normal Q-Q plot, where our residuals are being compared against a normal distribution.
'''
# How are our residuals distributed?
plt.title('Density Plot of Residuals')
sns.kdeplot(results.resid)
plt.show()
# How close are our residuals to normal?
fig = gofplots.qqplot(results.resid, line='s')
plt.title("Normal Q-Q plot")
plt.show()
###Output
_____no_output_____
###Markdown
The residuals appear to be approximately centered around 0.The third chart shows that our residuals are close to normal, but at the extreme ends of our distribution we get farther from a normal distribution. Second specification attempt - dockless?Next, I add dless_trips_all to the specification to see if there's any effect.
###Code
results = fitOLS('cabi_trips ~ year + daylight_hours +'
'apparenttemperaturehigh + rain + snow + '
'nats_games + C(day_of_week) + dless_trips_all', cov='HC0')
results.summary()
###Output
_____no_output_____
###Markdown
R squared is slightly higher.dless_trips_all is statistically significant, but its coefficient is small. An increase of 100 dockless trips is associated with 33 fewer CaBi trips. Its upper bound is also fairly close to 0. For the sake of brevity I don't include the diagnostic plots here because they don't change much after adding just one independent variable. Third specification attempt - transformationsNext, I try taking the natural log of certain variables. When you include a logged variable, its interpretation changes to percentage change instead of unit change. I get into specifics in the cell after the regression results.Logging variables is also very good for dealing with outliers. OLS is sensitive to outliers - we saw this demonstrated in class when we removed one observation from the IQ ~ TVhours regression. Logging a variable with a long right tail will often make it approximately normal, which is better for OLS.
###Code
# I ran into errors trying to log cabi_trips because the log of 0 is undefined.
# Ended up having to drop the four observations where cabi_trips==0
df = df[df.cabi_trips != 0]
df.shape
results = fitOLS('np.log(cabi_trips) ~ year + daylight_hours + '
'np.log(apparenttemperaturehigh) + rain + snow + nats_games + C(day_of_week) + '
'dless_trips_all', cov='HC0')
results.summary()
###Output
_____no_output_____
###Markdown
Since we have some logged variables, the interpretation of the coefficients changes.Before, the interpretation of apparenttemperaturehigh's effect on cabi_rides was basically "Holding all else equal, how many more cabi rides should we see if the feels-like temperature is one degree (F) higher?"Now that both are logged, the coefficient of 0.8136 means "Holding all else equal, if feels-like temperature rises by 1%, we expect there to be a 0.81% increase in CaBi rides." I explain the interpretation of the dummy coefficients below.
###Code
# When you have a logged dependent variable, be careful with dummies
# The effect is asymmetrical!
# more: https://davegiles.blogspot.com/2011/03/dummies-for-dummies.html
print('If rain switches from 0 to 1, the % impact on cabi_trips is ', 100*(np.exp(-0.2168) - 1))
print('If rain switches from 1 to 0, the % impact on cabi_trips is ', 100*(np.exp(0.2168) - 1))
print('If snow switches from 0 to 1, the % impact on cabi_trips is ', 100*(np.exp(-0.3684) - 1))
print('If snow switches from 1 to 0, the % impact on cabi_trips is ', 100*(np.exp(0.3684) - 1))
###Output
If rain switches from 0 to 1, the % impact on cabi_trips is -19.490902860146488
If rain switches from 1 to 0, the % impact on cabi_trips is 24.209565816256216
If snow switches from 0 to 1, the % impact on cabi_trips is -30.815960982195236
If snow switches from 1 to 0, the % impact on cabi_trips is 44.542009139224504
###Markdown
All in all, this third specification isn't that appealing. nats_games is no longer significant, the R squared is lower, and the dummy variables don't make as much intuitive sense.Looking at the charts below you can see that things look worse than before. This particular specification is no good.
###Code
# Heteroskedasticity?
plt.title('Residuals vs Fitted Values')
sns.regplot(results.fittedvalues, results.resid)
plt.xlabel('Y-hat')
plt.ylabel('Residuals')
plt.show()
# How are our residuals distributed?
plt.title('Density Plot of Residuals')
sns.kdeplot(results.resid)
plt.show()
# How close are our residuals to normality?
fig = gofplots.qqplot(results.resid, line='s')
plt.title("Normal Q-Q plot")
plt.show()
###Output
_____no_output_____ |
FINAL_Scrapping.ipynb | ###Markdown
Development
###Code
url = 'https://www.conestogac.on.ca/fulltime/3d-computer-animation'
driver = get_driver(url)
df = pd.read_csv("dataset/all_program_names.csv")
count = 3
availabilities = {}
def loop(availabilities, count):
try:
url_list = df.ProgramLink
for url in url_list[count-3:]:
driver.get(url)
time.sleep(2)
print(count, ")", url)
availability_temp = get_availability(driver)
if(availability_temp):
availabilities[url] = availability_temp
count += 1
else:
return [availabilities, count]
except:
print("----")
return [availabilities, count]
for i in range(246):
availabilities, count = loop(availabilities, count)
df_avaibility = pd.DataFrame(columns=["URL", "DOMESTIC", "INTERNATIONAL"])
for url, availability in availabilities.items():
row = {"URL": url}
domestic = []
for intake, campus, status in zip(availability["domestic"]["intake"], availability["domestic"]["campus"], availability["domestic"]["status"]):
domestic.append(", ".join([intake, campus, status]))
row["DOMESTIC"] = " | ".join(domestic)
international = []
for intake, campus, status in zip(availability["international"]["intake"], availability["international"]["campus"], availability["international"]["status"]):
international.append(", ".join([intake, campus, status]))
row["INTERNATIONAL"] = " | ".join(international)
df_avaibility = df_avaibility.append(row, ignore_index=True)
df_avaibility
df_avaibility.to_csv("dataset/fasnshwe_careers.csv", index=False)
!shutdown -s
for progress in range(10):
sys.stdout.write("Download progress: %d%% \r" % (progress))
time.sleep(0.8)
sys.stdout.flush()
url = 'https://www.conestogac.on.ca/fulltime/autism-and-behavioural-science'
driver.get(url)
def get_availability(driver):
availability = {"domestic": {"intake": [], "campus": [], "status": []}, "international": {"intake": [], "campus": [], "status": []}}
button = find_element_n(driver, xpath_="//button[@class='btn bg-primary-dropdown text-uppercase']")
if(button):
button.click()
time.sleep(1.5)
div = find_element_n(driver, xpath_="//div[@class='px-0 dropdown-menu bg-primary-dropdown dropdown-menu-right mt-0 show']")
children = None
if(div):
children = find_elements_n(div, xpath_='./*')
a_list = None
if(children):
a_list = children[:-1]
test = []
if(a_list):
for a in a_list:
div = find_element_n(a, xpath_="//div[@class='row mx-auto']")
if(div):
div_list = find_elements_n(div, class_='row')
if(div_list):
intake_campus = div_list[0].text.strip()
intake = intake_campus.split("|")[0].strip()
campus = intake_campus.split("|")[1].strip()
status = div_list[1].text.strip()
domestic_status = status.split("\n")[0].strip()
international_status = None
if("\n" in status):
international_status = status.split("\n")[1].strip()
availability["domestic"]["intake"].append(intake)
availability["domestic"]["campus"].append(campus)
availability["domestic"]["status"].append(domestic_status)
if(international_status):
availability["international"]["intake"].append(intake)
availability["international"]["campus"].append(campus)
availability["international"]["status"].append(international_status)
return availability
get_availability(driver)
###Output
_____no_output_____
###Markdown
import
###Code
import sqlite3
import sys
from selenium import webdriver
import time
from os import path
from tqdm import tqdm
import os
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.service import Service
import time
from tqdm import tqdm
import pandas as pd
import numpy as np
import string
import random
from bs4 import BeautifulSoup
###Output
_____no_output_____
###Markdown
Browser setup
###Code
def chrome():
service = Service("../drivers/chromedriver")
driver = webdriver.Chrome(service = service)
return driver
def opera():
# need to change, srd
DRIVER_PATH = "../drivers/operadriver"
driver = webdriver.Opera(executable_path=DRIVER_PATH)
return driver
def firefox():
# need to change, srd
DRIVER_PATH = "../drivers/geckodriver"
driver = webdriver.Firefox(executable_path=DRIVER_PATH)
return driver
def get_driver(url):
driver = chrome()
# driver = opera()
# driver = firefox()
driver.get(url)
return driver
###Output
_____no_output_____
###Markdown
Element Search
###Code
def find_element_n(root, xpath_=None, tag_=None, id_=None, class_=None, exception=False):
tag = None
try:
if(xpath_):
tag = root.find_element(by=By.XPATH, value=xpath_)
elif(tag_):
tag = root.find_element(by=By.TAG_NAME, value=tag_)
elif(id_):
tag = root.find_element(by=By.ID, value=id_)
elif(class_):
tag = root.find_element(by=By.CLASS_NAME, value=class_)
return tag
except:
pass
if(exception):
raise Exception("\n\n\n>>> Element not found <<<\n\n\n")
else:
return False
def find_elements_n(root, xpath_=None, tag_=None, id_=None, class_=None, exception=False):
tags = None
try:
if(xpath_):
tags = root.find_elements(by=By.XPATH, value=xpath_)
elif(tag_):
tags = root.find_elements(by=By.TAG_NAME, value=tag_)
elif(id_):
tags = root.find_elements(by=By.ID, value=id_)
elif(class_):
tags = root.find_elements(by=By.CLASS_NAME, value=class_)
return tags
except:
pass
if(exception):
raise Exception("\n\n\n>>> Element not found <<<\n\n\n")
else:
return False
def find_element(root, xpath_=None, tag_=None, id_=None, class_=None, init_wait = 0, wait = 1, limit=3.5, exception=False):
"""
{init_wait} : sleep for given time before element existence check
{wait} : sleep every time when through except except block
{exception} : whether to through error{True} or return False{False}
"""
time.sleep(init_wait)
while(limit > 0):
tag = None
try:
if(xpath_):
tag = root.find_element(by=By.XPATH, value=xpath_)
elif(tag_):
tag = root.find_element(by=By.TAG_NAME, value=tag_)
elif(id_):
tag = root.find_element(by=By.ID, value=id_)
elif(class_):
tag = root.find_element(by=By.CLASS_NAME, value=class_)
return tag
except:
print("----", limit, "----")
time.sleep(wait)
limit -= wait
if(exception):
raise Exception("\n\n\n>>> Element not found <<<\n\n\n")
else:
return False
def find_elements(root, xpath_=None, tag_=None, id_=None, class_=None, init_wait = 0, wait = 1, limit=3.5, exception=False):
"""
{init_wait} : sleep for given time before element existence check
{wait} : sleep every time when through except except block
{exception} : whether to through error{True} or return False{False}
"""
time.sleep(init_wait)
while(limit > 0):
tags = None
try:
if(xpath_):
tags = root.find_elements(by=By.XPATH, value=xpath_)
elif(tag_):
tags = root.find_elements(by=By.TAG_NAME, value=tag_)
elif(id_):
tags = root.find_elements(by=By.ID, value=id_)
elif(class_):
tags = root.find_elements(by=By.CLASS_NAME, value=class_)
return tags
except:
print("----", limit, "----")
time.sleep(wait)
limit -= wait
if(exception):
raise Exception("\n\n\n>>> Element not found <<<\n\n\n")
else:
return False
###Output
_____no_output_____
###Markdown
All Program Search
###Code
url = "https://www.conestogac.on.ca/fulltime"
driver = get_driver(url)
def get_all_program_names_list(driver):
ul = find_element_n(driver, xpath_="//ul[@class='list-unstyled mt-md-5']")
li_list = find_elements_n(ul, tag_='li')
program_names = []
program_links = []
for li in tqdm(li_list):
a = find_element(li, tag_='a')
link = a.get_attribute("href").strip()
span = find_element(a, tag_='span')
name = span.text.strip()
program_names.append(name)
program_links.append(link)
if(program_names or program_links or program_codes):
programs = {'ProgramName': program_names, 'ProgramLink': program_links}
return programs
else:
raise Exception("---ERROR IN FETCHING PROGRAM NAME---")
return False
programs = get_all_program_names_list(driver)
df = pd.DataFrame(programs)
df.to_csv('dataset/all_program_names.csv')
###Output
_____no_output_____
###Markdown
Functions Program Name
###Code
def get_program_name(driver):
div = find_element_n(driver, xpath_="//div[@class='col-12 text-white text-shadow']")
h1 = find_element_n(div, tag_='h1')
name = ""
if(h1):
name = h1.text.strip()
return name
get_program_name(driver)
###Output
_____no_output_____
###Markdown
overview
###Code
def get_overview(driver):
div = find_element_n(driver, xpath_="//div[@class='pr-lg-3']")
children = find_elements_n(div, xpath_='./*')
p = None
for tag in children:
if(tag.tag_name == "p"):
p = tag
break
overview = ""
if(p):
overview = p.text.strip()
return overview
get_overview(driver)
###Output
_____no_output_____
###Markdown
category
###Code
def get_category(driver):
div = find_element_n(driver, xpath_="//div[@class='my-5 border pl-3 pr-3']")
children = find_elements_n(div, xpath_='./*')
p = None
for tag in children:
if(tag.tag_name == "p"):
p = tag
break
category = ""
if(p):
a = find_element_n(p, tag_='a')
if(a):
category = a.text.strip()
return category
get_category(driver)
###Output
_____no_output_____
###Markdown
code
###Code
def get_code(driver):
div_main = find_element_n(driver, xpath_="//div[@class='container mt-5 datasection bg-white']")
div = find_element_n(div_main, tag_='div')
sub_div = find_element_n(div, class_='col-lg-5')
sub_div_1 = find_element_n(sub_div, tag_='div')
sub_div_2 = find_element_n(sub_div_1, class_='row')
div_list = find_elements_n(sub_div_2, tag_='div')
code = ""
div_credential = div_list[0]
if(div_credential):
strong = find_element_n(div_credential, tag_='strong')
if(strong):
strong_len = len(strong.text)
code = div_credential.text[strong_len:].strip()
return code
get_code(driver)
###Output
_____no_output_____
###Markdown
availability (srd working)
###Code
def get_availability(driver):
availability = {"domestic": {"intake": [], "campus": [], "status": []}, "international": {"intake": [], "campus": [], "status": []}}
button = find_element_n(driver, xpath_="//button[@class='btn bg-primary-dropdown text-uppercase']")
if(button):
button.click()
time.sleep(1.5)
div = find_element_n(driver, xpath_="//div[@class='px-0 dropdown-menu bg-primary-dropdown dropdown-menu-right mt-0 show']")
print(div)
children = None
if(div):
children = find_elements_n(div, xpath_='./*')
a_list = None
if(children):
a_list = children[:-1]
test = []
if(a_list):
for a in a_list:
div = find_element_n(a, xpath_="//div[@class='row mx-auto']")
if(div):
div_list = find_elements_n(div, class_='row')
if(div_list):
intake_campus = div_list[0].text.strip()
intake = intake_campus.split("|")[0].strip()
campus = intake_campus.split("|")[1].strip()
status = div_list[1].text.strip()
domestic_status = status.split("\n")[0].strip()
international_status = status.split("\n")[1].strip()
availability["domestic"]["intake"].append(intake)
availability["domestic"]["campus"].append(campus)
availability["domestic"]["status"].append(domestic_status)
availability["international"]["intake"].append(intake)
availability["international"]["campus"].append(campus)
availability["international"]["status"].append(international_status)
return availability
get_availability(driver)
###Output
<selenium.webdriver.remote.webelement.WebElement (session="58af2cd73cc3bc55f9a10472352d7473", element="6b6e3bd8-d35f-4140-816b-c68c4379a33a")>
###Markdown
credential
###Code
def get_credential(driver):
div_main = find_element_n(driver, xpath_="//div[@class='container mt-5 datasection bg-white']")
div = find_element_n(div_main, tag_='div')
sub_div = find_element_n(div, class_='col-lg-5')
sub_div_1 = find_element_n(sub_div, tag_='div')
sub_div_2 = find_element_n(sub_div_1, class_='row')
div_list = find_elements_n(sub_div_2, tag_='div')
credential = ""
div_credential = div_list[5]
if(div_credential):
strong = find_element_n(div_credential, tag_='strong')
if(strong):
strong_len = len(strong.text)
credential = div_credential.text[strong_len:].strip()
return credential
get_credential(driver)
###Output
_____no_output_____
###Markdown
Program Coordinator
###Code
def get_coordinator(driver):
div = find_element_n(driver, xpath_="//div[@class='my-5 border pl-3 pr-3']")
children = find_elements_n(div, xpath_='./*')
ul = None
for tag in children:
if(tag.tag_name == "ul"):
ul = tag
break
li = None
if(ul):
li = find_element_n(ul, tag_='li')
span = None
if(li):
span = find_element_n(li, tag_='span')
coordinator = ""
if(span):
coordinator = span.text.strip()
return coordinator
get_coordinator(driver)
###Output
_____no_output_____
###Markdown
delivery
###Code
def get_delivery(driver):
div_main = find_element_n(driver, xpath_="//div[@class='container mt-5 datasection bg-white']")
div = find_element_n(div_main, tag_='div')
sub_div = find_element_n(div, class_='col-lg-5')
sub_div_1 = find_element_n(sub_div, tag_='div')
sub_div_2 = find_element_n(sub_div_1, class_='row')
div_list = find_elements_n(sub_div_2, tag_='div')
delivery = ""
div_delivery = div_list[1]
if(div_delivery):
strong = find_element_n(div_delivery, tag_='strong')
if(strong):
strong_len = len(strong.text)
delivery = div_delivery.text[strong_len:].strip()
return delivery
get_delivery(driver)
###Output
_____no_output_____
###Markdown
Tution Fees (srd working)
###Code
def get_tution_fees(driver):
fees = {"domestic_fees": "", "international_fees": ""}
div = find_element_n(driver, class_='fees-block', exception=False)
div_domestic = find_element_n(div, class_='canadian-cost-block', exception=False)
div_international = find_element_n(div, class_='international-cost-block', exception=False)
span_domestic = find_element_n(div_domestic, class_='fees-cost-dollar', exception=False)
span_international = find_element_n(div_international, class_='fees-cost-dollar', exception=False)
if(span_domestic):
fees["domestic_fees"] = span_domestic.text.strip()
if(span_international):
fees["international_fees"] = span_international.text.strip()
return fees
get_tution_fees(driver)
###Output
_____no_output_____
###Markdown
Related Programs (srd working)
###Code
def get_related_programs(driver):
related_programs = {}
main_div = find_element_n(driver, class_='field--name-field-related-programs', exception=False)
div = find_element_n(main_div, class_='field__items', exception=False)
if(div):
sub_divs = find_elements_n(div, class_='field__item')
if(sub_divs):
for sub_div in sub_divs:
a = sub_div.find_element(By.TAG_NAME, 'a')
if(a):
name = a.text.strip()
link = a.get_attribute("href").strip()
related_programs[name] = link
return related_programs
get_related_programs(driver)
###Output
_____no_output_____
###Markdown
Other Functions url_creater_for_subjects
###Code
def url_creater_for_subjects(url):
url_list = url.split("/")
url_list = url_list[:-1]
url_list.append("courses-next")
url = "/".join(url_list)
return url
###Output
_____no_output_____
###Markdown
Link joiner
###Code
def link_joiner(url):
base_url = "https://www.fanshawec.ca" + url
return base_url
###Output
_____no_output_____
###Markdown
Data Screapping
###Code
program_details = {
"URL": None,
"CollegeName": None,
"ProgramName": None,
"Overview": None,
"Category": None,
"Code" : None,
"Availability" : None,
"Credential" : None,
"RelatedPrograms" : None,
"ProgramCoordinator" : None,
"Delivery" : None,
"TuitionFees" : None,
}
def get_program_details(program_details, college_name, url):
driver.get(url)
time.sleep(0.3)
program_details["URL"] = url.strip()
program_details["CollegeName"] = college_name.strip()
program_details["ProgramName"] = get_program_name(driver)
program_details["Overview"] = get_overview(driver)
program_details["Category"] = get_category(driver)
program_details["Code"] = get_code(driver)
program_details["Availability"] = get_availability(driver)
program_details["Credential"] = get_credential(driver)
program_details["RelatedPrograms"] = get_related_programs(driver)
program_details["ProgramCoordinator"] = get_coordinator(driver)
program_details["Delivery"] = get_delivery(driver)
program_details["TuitionFees"] = get_tution_fees(driver)
return program_details
df_all_programs = pd.read_csv('./dataset/all_program_names.csv')
count = 3
programs = {}
for url in df_all_programs.ProgramLink[count-3:]:
print(str(count) + ') '+ url)
programs[url] = get_program_details(program_details.copy(), "Fanshawe", url)
count += 1
###Output
235) https://www.fanshawec.ca/programs/smm1-sport-and-event-marketing/next
236) https://www.fanshawec.ca/programs/scm2-supply-chain-management-logistics-co-op/next
237) https://www.fanshawec.ca/programs/tes2-teaching-english-speakers-other-languages-and-intercultural-competence/next
238) https://www.fanshawec.ca/programs/tss2-technical-systems-analysis/next
239) https://www.fanshawec.ca/programs/tdm2s-tool-and-die-maker-block-and-day-release-apprenticeship/next
240) https://www.fanshawec.ca/programs/ttc6-tourism-travel/next
241) https://www.fanshawec.ca/programs/tts1-tourism-travel-studies/next
242) https://www.fanshawec.ca/programs/tct4-truck-and-coach-technician-block-release-apprenticeship/next
243) https://www.fanshawec.ca/programs/tct3-truck-and-coach-technician-day-release-apprenticeship/next
244) https://www.fanshawec.ca/programs/vee1-visual-effects-and-editing-contemporary-media/next
245) https://www.fanshawec.ca/programs/iwd2-web-development-and-internet-applications/next
246) https://www.fanshawec.ca/programs/wft1-welding-and-fabrication-technician-co-op/next
247) https://www.fanshawec.ca/programs/wtq1j-welding-techniques/next
###Markdown
Store to DataFrame
###Code
columns = ["ID", "URL", "College", "Program", "Overview", "Category", "Code", "Domestic Availability", "International Availability", "Credential", "Related Programs", "Program Coordinator", "Delivery", "Domestic Fees", "International Fees"]
columns
L = len(programs.keys())
program_id_list = {}
letters = string.ascii_uppercase + string.digits
for key in programs.keys():
while(True):
temp_id = "FAN"+''.join(random.choice(letters) for i in range(4))
if(temp_id not in program_id_list):
program_id_list[temp_id] = key
break
def get_value_from_string(string):
if(string):
return string
return np.NaN
def make_string_availability(availability):
intake = availability["intake"]
campus = availability["campus"]
status = availability["status"]
temp = [intake_ + ", " + campus_ + ", " + status_ for intake_, campus_, status_ in zip(intake, campus, status)]
availability = " | ".join(temp)
return availability
df = pd.DataFrame(columns=columns)
for id_, url in program_id_list.items():
row = {}
row = {"ID": id_, "URL": url}
program = programs[url]
row["College"] = program["CollegeName"]
row["Program"] = get_value_from_string(program["ProgramName"])
row["Overview"] = get_value_from_string(program["Overview"])
row["Category"] = get_value_from_string(program["Category"])
row["Code"] = get_value_from_string(program["Code"])
availability_domestic_temp = program["Availability"]["domestic"]
availability_domestic_temp = make_string_availability(availability_domestic_temp)
row["Domestic Availability"] = get_value_from_string(availability_domestic_temp)
availability_international_temp = program["Availability"]["international"]
availability_international_temp = make_string_availability(availability_international_temp)
row["International Availability"] = get_value_from_string(availability_international_temp)
row["Credential"] = get_value_from_string(program["Credential"])
related_temp = program["RelatedPrograms"].keys()
related_temp = ", ".join(related_temp)
row["Related Programs"] = get_value_from_string(related_temp)
row["Program Coordinator"] = get_value_from_string(program["ProgramCoordinator"])
delivery_temp = program["Delivery"]
delivery_temp = ", ".join(delivery_temp.split("\n"))
row["Delivery"] = get_value_from_string(delivery_temp)
row["Domestic Fees"] = get_value_from_string(program["TuitionFees"]["domestic_fees"])
row["International Fees"] = get_value_from_string(program["TuitionFees"]["international_fees"])
df = df.append(row, ignore_index=True)
df
###Output
_____no_output_____
###Markdown
Create CSV file
###Code
df.to_csv("dataset//fanshawe_dataset.csv", index=False)
###Output
_____no_output_____
###Markdown
Careers
###Code
def get_careers(driver):
a = driver.find_element(by=By.CLASS_NAME, value='details-title')
if(a):
a.click()
time.sleep(2)
source = driver.page_source
soup = BeautifulSoup(source, 'lxml')
div = soup.find('details', class_='collapse-processed')
p_list = div.find_all("p")
careers = []
for p in p_list:
career = p.find("strong")
if(career):
careers.append(career.text.strip())
return careers
get_careers(driver)
count = 3
career_list = {}
key_values = list(program_id_list.items())
for id_, url in key_values[count-3:]:
driver.get(url)
time.sleep(0.3)
print(count, ")", url)
careers = get_careers(driver)
career_list[id_] = ", ".join(careers)
count += 1
def loop(career_list, count):
try:
key_values = list(program_id_list.items())
for id_, url in key_values[count-3:]:
driver.get(url)
time.sleep(2)
print(count, ")", url)
careers = get_careers(driver)
career_list[id_] = ", ".join(careers)
count += 1
except:
print("----")
return [career_list, count]
for i in range(260):
career_list, count = loop(career_list, count)
df_careers = pd.DataFrame(columns=["ID", "Careers"])
for id_, careers in career_list.items():
row = {"ID": id_, "Careers": careers.strip(' ,')}
df_careers = df_careers.append(row, ignore_index=True)
df_careers.to_csv("dataset/fasnshwe_careers.csv", index=False)
###Output
_____no_output_____ |
Project 2 - Online Shoppers Intent/Online Shoppers Intention Exercise.ipynb | ###Markdown
Online shopping intention analysis
###Code
import numpy as np # linear algebra
import pandas as pd # data processing
import matplotlib.pyplot as plt #PLOTTING
import seaborn as sns #plotting
import plotly as py #plotting
import plotly.graph_objs as go #plotting
###loading data
base_url = 'https://raw.githubusercontent.com/onur-duman/OnlineShoppersIntention-EDA-Classification-Clustering/main/online_shoppers_intention.csv'
df = pd.read_csv(base_url)
df.head()
## looking at missing values
missing = df.isnull().sum()
print(missing)
## since no value is empty we can skip this part df.fillna(0, inplace = True)
### lets take the product related bounce rate
x = df.iloc[:,[5,6]].values
x.shape
# let's apply elbow method to check the number of clusters
from sklearn.cluster import KMeans
wcss = []
for i in range(1, 11):
km = KMeans(n_clusters = i,
init = 'k-means++',
max_iter = 300,
n_init = 10,
random_state = 0,
algorithm = 'full',
tol = 0.001)
km.fit(x)
labels = km.labels_
wcss.append(km.inertia_)
plt.rcParams['figure.figsize'] = (13, 7)
plt.plot(range(1, 11), wcss)
plt.grid()
plt.tight_layout()
plt.title('The Elbow Method', fontsize = 20)
plt.xlabel('No. of Clusters')
plt.ylabel('wcss')
plt.show()
km = KMeans(n_clusters = 2, init = 'k-means++', max_iter = 300, n_init = 10, random_state = 0)
# get predicted cluster index for each sample: 0, 1, 2
y_means = km.fit_predict(x)
plt.scatter(x[y_means == 0, 0], x[y_means == 0, 1], s = 50, c = 'yellow', label = 'Uninterested Customers')
plt.scatter(x[y_means == 1, 0], x[y_means == 1, 1], s = 50, c = 'pink', label = 'Target Customers')
plt.scatter(km.cluster_centers_[:,0], km.cluster_centers_[:, 1], s = 50, c = 'blue' , label = 'centeroid')
plt.title('ProductRelated Duration vs Bounce Rate', fontsize = 20)
plt.grid()
plt.xlabel('ProductRelated Duration')
plt.ylabel('Bounce Rates')
plt.legend()
plt.show()
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
labels_true = le.fit_transform(df['Revenue'])
# get predicted clustering result label
labels_pred = y_means
# print adjusted rand index, which measures the similarity of the two assignments
from sklearn import metrics
score = metrics.adjusted_rand_score(labels_true, labels_pred)
print("Adjusted rand index: ")
print(score)
# print confusion matrix
#cm = metrics.plot_confusion_matrix(None, labels_true, labels_pred)
#print(cm)
import scikitplot as skplt
plt_1 = skplt.metrics.plot_confusion_matrix(labels_true, labels_pred, normalize=False)
plt_2 = skplt.metrics.plot_confusion_matrix(labels_true, labels_pred, normalize=True)
###Output
Adjusted rand index:
0.08344649929017146
|
runge_kutta_mv.ipynb | ###Markdown
Create a notebook to perform Runge-Kutta integration for multiple coupled variables
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
###Output
_____no_output_____
###Markdown
Define our coupled derivativs to integrate
###Code
def dydx(x,y):
# set the derivatives
# our equation is d^2x/dx^2 = -y
# so we can write
# dydx = z
# dzdx = -y
# we will set y = y[0] (the function y)
# we will set z = y[1] (the function x)
# declare an array
y_derivs = np.zeros(2) # array of functions
# set dydx = z
y_derivs[0] = y[1] # dydx we put in y_derivs[1]
# set dydx = -y
y_derivs[1] = -1*y[0]
# here we have to return an array
return y_derivs
###Output
_____no_output_____
###Markdown
Define the 4th order RK method
###Code
# now we'll operate on all the elements at the same time
def rk4_mv_core(dydx,xi,yi,nv,h): #dydx is function we just wrote,
# xi is value of x at step i
# xi changes by value of h
# nv is number of variables
# yi is the array
# declare k? arrays
k1 = np.zeros(nv) # each k is array of two emelents one that
k2 = np.zeros(nv) # corresponds to y and one that corresponds to z
k3 = np.zeros(nv)
k4 = np.zeros(nv)
# define x at 1/2 step
x_ipoh = xi + 0.5*h
# define x at 1 step
x_ipo = xi + h
# declare a temp y array, will contain estimated values of y and z
# ad different points of steps
y_temp = np.zeros(nv)
# get k1 values
y_derivs = dydx(xi,yi)
k1[:] = h*y_derivs[:]
# get k2 values
y_temp[:] = yi[:] + 0.5*k1[:] # initial values on left side + .5 of k1, we get to midpoint
y_derivs = dydx(x_ipoh,y_temp) # estimate of y and z at half step and
k2[:] = h*y_derivs[:] # recompute derivatives at half step
# get k3 values
y_temp[:] = yi[:] + 0.5*k2[:] # different step, with different derivative
y_derivs = dydx(x_ipoh,y_temp) # retaking half step and recomputing derivatives
k3[:] = h*y_derivs[:] #
# get k4 values
y_temp[:] = yi[:] + k3[:] # taking full step for k4 using recomputed
y_derivs = dydx(x_ipoh,y_temp) # derivatives from above
k4[:] = h*y_derivs[:]
# advance y by a step h
yipo = yi + (k1 + 2*k2 + 2*k3 + k4)/6.
return yipo
###Output
_____no_output_____
###Markdown
Define an adaptive step size driver for RK4
###Code
# dydx functions that take drivatives
# x_i value of x at step i
# y_i values of items in array at i
# h is step size
# tol tolerance
# this function
def rk4_mv_ad(dydx,x_i,y_i,nv,h,tol):
#define safety scale
SAFETY = 0.9
H_NEW_FAC = 2.0
# set a maximum number of iterations
imax = 10000
# set an iteration variable
i = 0
# create an error
Delta = np.full(nv,2*tol) # array that contains error estimates
# remember the step
h_step = h
# adjust steps until error is in our tolerance
while(Delta.max()/tol > 1.0):
#print(Delta.max(),h,x_i,y_i,nv,h_step)
# estimate our error by taking one step of size h
# vs. two steps of size h/2
y_2 = rk4_mv_core(dydx,x_i,y_i,nv,h_step) # copmaring to one full step
y_1 = rk4_mv_core(dydx,x_i,y_i,nv,0.5*h_step) # copmaring to two steps of h/2
y_11 = rk4_mv_core(dydx,x_i+0.5*h_step,y_1,nv,0.5*h_step) # using y_1 estimate here which is
# estimate for y and z from previous step
# that used half step
# compute an error
Delta = np.fabs(y_2 - y_11)
# if the error is too large, take a smaller step
# Delta.max() is the biggest element in Delta
# if this value is > 1 after divided by tol, our step was too big
if(Delta.max()/tol > 1.0):
# our errors is too large, decrease the step
# multiplying by SAFETY to get smaller step
h_step *= SAFETY * (Delta.max()/tol)**(-0.25)
# check iteration
if(i >= imax):
print("Too many iterations in rk4_mv_ad()")
raise StopIteration("Ending after i = ", i)
# iterate
i+=1
# next time, try to take a bigger step, so estimate new h step with below
h_new = np.fmin(h_step * (Delta.max()/tol)**(-0.9), h_step*H_NEW_FAC)
# return the answer, a new step, and the step we actually took
return y_2, h_new, h_step
###Output
_____no_output_____
###Markdown
Define a wrapper for RK4
###Code
# this function, you pass in the derivatives you want to evolve
# the starting place, the ending place, and tolerance
# it will call the two functions we just wrote
# we don't know how many steps we'll take so we have initial conditions
# we just provide some tolerance and want for this wrapper to try to stay
# within the tolerance
def rk4_mv(dydx,a,b,y_a,tol):
# dydx is the derivate wrt x
# a is the lower bound
# b is the upper bound
# y_a are the boundary conditions
# tol is the tolerance for integrating y
# define our starting step
xi = a # current value of x
yi = y_a.copy() # current value of
# an initial step size == make very small
h = 1.0e-4 * (b-a)
# set a minimum number of iterations since we don't know how
# many iterations we'll need to take
imax = 10000
# set an iteration variable
i = 0
# set the number of coupled odes to the size of y_a
nv = len(y_a) # initial comditions of y_a
# set the initial conditions ( arrays that we'll be plotting )
x = np.full(1,a) # signle element array with value a
y = np.full((1,nv),y_a) # array with all values of x at all nv steps,
# and y and z at values of x (variable y is actually
# an array)
# set a flag
flag = 1
# loop until we reach the right side
while(flag):
# calculate y_i+1
yi_new, h_new, h_step = rk4_mv_ad(dydx,xi,yi,nv,h,tol)
# update the step
h = h_new
# prevent an overshoot, as we integrate along x, when we get close to
# edge, we don't want to cross it, so we retake step to get to the edge
if(xi+h_step>b):
# take a smaller step
h = b-xi
# recalculate y_i+1
yi_new, h_new, h_step = rk4_mv_ad(dydx,xi,yi,nv,h,tol)
# break
flag = 0
# update values in the arrays
xi += h_step
yi[:] = yi_new[:] # arrays
# add the step to the arrays
x = np.append(x,xi)
y_new = np.zeros((len(x),nv))
y_new[0:len(x)-1,:] = y
y_new[-1,:] = yi[:]
del y # frees memory of y
y = y_new
# prevent too many iterations
if(i>=imax):
print("Maximum iterations reached.")
raise StopIteration("Iteration number = ", i)
#iterate
i += 1
# output some information
# %3d is a format of the integer so here we want 3 places in the integer
# \t means to pritn out tab space on screen
# %9.8f print out floating number with 8 digets on right hand side of decimal
s = "i = %3d\tx = %9.8f\th = %9.8f\tb=%9.8f" % (i,xi,h_step,b)
print(s)
# break if new xi is == b
if(xi == b):
flag = 0
# return the answer
return x, y
###Output
_____no_output_____
###Markdown
Perform the integration
###Code
a = 0.0
b = 2.0 * np.pi
# initial conditions
y_0 = np.zeros(2) # array of size 2
y_0[0] = 0.0
y_0[1] = 1.0
nv = 2
tolerance = 1.0e-6
# perform the integration
x,y = rk4_mv(dydx,a,b,y_0,tolerance)
###Output
_____no_output_____
###Markdown
Create a notebook to perform Runge-Kutta for multiple coupled variables
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
###Output
_____no_output_____
###Markdown
Define our coupled derivatives to integrate
###Code
def dydx(x,y):
#set the derivatives
#our eq is d^2y/dx^2 = -y
#so write
#dydx = z
#dzdx = -y
#we will set y = y[0]
#we will set z = y[1]
#declare an array, size 2
y_derivs = np.zeros(2)
#set dydx = z
y_derivs[0] = y[1]
#set dzdx = -1
y_derivs[1] = -1*y[0]
#return an array
return y_derivs
###Output
_____no_output_____
###Markdown
Def. the 4th order RK method
###Code
def rk4_mv_core(dydx, xi, yi, nv, h):
#declare k? arrays
#h = step size
#yi = array
#nv = number of variables
#each new k depends on the previous k's, K4 depends on k3 to k1
#nv in our case is 2, y and z
k1 = np.zeros(nv)
k2 = np.zeros(nv)
k3 = np.zeros(nv)
k4 = np.zeros(nv)
#define x at 1/2 step
x_ipoh = xi +0.5*h
#define x at 1 step
x_ipo = xi + h
#declare a temp y array
y_temp = np.zeros(nv)
#get k1 values
y_derivs = dydx(xi,yi)
k1[:] = h*y_derivs[:]
#get k2 values
y_temp[:] = yi[:] + 0.5*k1[:]
y_derivs = dydx(x_ipoh, y_temp)
k2[:] = h*y_derivs[:]
#get k3 values
y_temp[:] = yi[:] + 0.5*k2[:]
y_derivs = dydx(x_ipoh, y_temp)
k3[:] = h*y_derivs[:]
#get k4 values
y_temp[:] = yi[:] + 0.5*k3[:]
y_derivs = dydx(x_ipo, y_temp)
k4[:] = h*y_derivs[:]
#advance y by a step h
#weighted sum
yipo = yi + (k1 + 2*k2+ 2*k3 +k4)/6.
return yipo
###Output
_____no_output_____
###Markdown
Def an adaptive step size driver for RK4
###Code
def rk4_mv_ad(dydx, x_i, y_i, nv, h, tol):
#define safety scale
#used to estimate an error, 1 big step, 2 little steps
#if error is bigger than tol, reduce size of step
#if error is smaller than tol, increase size of step, limit in
#increase of step size, half of previous step
SAFETY = 0.9
H_NEW_FAC = 2.0
#set a maximum number of iterations
imax = 10000
#set an iteration variable
i = 0
#create an error
Delta = np.full(nv, 2*tol)
#remember the step
h_step = h
#adjust step
while(Delta.max()/tol > 1.0):
#estimate our error by taking one step of size h
#vs. two steps of size h/2
y_2 = rk4_mv_core(dydx, x_i, y_i, nv, h_step)
y_1 = rk4_mv_core(dydx, x_i, y_i, nv, 0.5*h_step)
y_11 = rk4_mv_core(dydx, x_i+0.5*h_step, y_i, nv, 0.5*h_step)
#compute error
Delta = np.fabs(y_2 - y_11)
#if the error is too lage, take smaller step
if(Delta.max()/tol > 1.0):
#our error is too large, decrease the step
h_step *= SAFETY * (Delta.max()/tol)**(-0.25)
#check iteration
if(i>imax):
print("Too many iterations in rk4_mv_ad()")
raise StopIteration("Ending after i = ", i)
#iterate
i += 1
#next time, try to take a bigger step
#can only take a step thats 2x as big as current step, due to h_new_fac
h_new = np. fmin(h_step * (Delta.max()/tol)**(-0.9), h_step*H_NEW_FAC)
#return the answer, a new step, and the step we actually took
return y_2, h_new, h_step
def rk4_mv(dfdx, a, b, y_a, tol):
#dfdx is the derivative wrt x
#a is the lower bound
#b is the upper bound
#y_a are the boundary conditions
#tol is the tolerane for integrating y
#define our starting step
xi = a
yi = y_a.copy()
#an initial step size == make very small
h = 1.0e-4 *(b-a)
#set a max number of iterations
imax = 10000
#set an iteration variable
i = 0
#set the number of coupled odes to the
#size of y_a
nv = len(y_a)
#set the initial conditions
x = np.full(1,a)
y = np.full((1,nv), y_a)
#set a flag
flag = 1
###Output
_____no_output_____
###Markdown
Create a notebook to perform Runge-Kutta for multiple coupled variables
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
###Output
_____no_output_____
###Markdown
Define our coupled derivatives to integrate
###Code
def dydx(x,y):
#set the derivatives
#our eq is d^2y/dx^2 = -y
#so write
#dydx = z
#dzdx = -y
#we will set y = y[0]
#we will set z = y[1]
#declare an array, size 2
y_derivs = np.zeros(2)
#set dydx = z
y_derivs[0] = y[1]
#set dzdx = -1
y_derivs[1] = -1*y[0]
#return an array
return y_derivs
###Output
_____no_output_____
###Markdown
Def. the 4th order RK method
###Code
def rk4_mv_core(dydx, xi, yi, nv, h):
#declare k? arrays
#h = step size
#yi = array
#nv = number of variables
#each new k depends on the previous k's, K4 depends on k3 to k1
#nv in our case is 2, y and z
k1 = np.zeros(nv)
k2 = np.zeros(nv)
k3 = np.zeros(nv)
k4 = np.zeros(nv)
#define x at 1/2 step
x_ipoh = xi +0.5*h
#define x at 1 step
x_ipo = xi + h
#declare a temp y array
y_temp = np.zeros(nv)
#get k1 values
y_derivs = dydx(xi,yi)
k1[:] = h*y_derivs[:]
#get k2 values
y_temp[:] = yi[:] + 0.5*k1[:]
y_derivs = dydx(x_ipoh, y_temp)
k2[:] = h*y_derivs[:]
#get k3 values
y_temp[:] = yi[:] + 0.5*k2[:]
y_derivs = dydx(x_ipoh, y_temp)
k3[:] = h*y_derivs[:]
#get k4 values
y_temp[:] = yi[:] + k3[:]
y_derivs = dydx(x_ipo, y_temp)
k4[:] = h*y_derivs[:]
#advance y by a step h
#weighted sum
yipo = yi + (k1 + 2*k2+ 2*k3 +k4)/6.
return yipo
###Output
_____no_output_____
###Markdown
Def an adaptive step size driver for RK4
###Code
def rk4_mv_ad(dydx, x_i, y_i, nv, h, tol):
#define safety scale
#used to estimate an error, 1 big step, 2 little steps
#if error is bigger than tol, reduce size of step
#if error is smaller than tol, increase size of step, limit in
#increase of step size, half of previous step
SAFETY = 0.9
H_NEW_FAC = 2.0
#set a maximum number of iterations
imax = 10000
#set an iteration variable
i = 0
#create an error
Delta = np.full(nv, 2*tol)
#remember the step
h_step = h
#adjust step
while(Delta.max()/tol > 1.0):
#estimate our error by taking one step of size h
#vs. two steps of size h/2
y_2 = rk4_mv_core(dydx, x_i, y_i, nv, h_step)
y_1 = rk4_mv_core(dydx, x_i, y_i, nv, 0.5*h_step)
y_11 = rk4_mv_core(dydx, x_i+0.5*h_step, y_1, nv, 0.5*h_step)
#compute error, just an estimate
Delta = np.fabs(y_2 - y_11)
#if the error is too lage, take smaller step
if(Delta.max()/tol > 1.0):
#our error is too large, decrease the step
h_step *= SAFETY * (Delta.max()/tol)**(-0.25)
#check iteration
if(i>imax):
print("Too many iterations in rk4_mv_ad()")
raise StopIteration("Ending after i = ", i)
#iterate
i += 1
#next time, try to take a bigger step
#can only take a step thats 2x as big as current step, due to h_new_fac
h_new = np. fmin(h_step * (Delta.max()/tol)**(-0.9), h_step*H_NEW_FAC)
#return the answer, a new step, and the step we actually took
return y_2, h_new, h_step
def rk4_mv(dydx, a, b, y_a, tol):
#dfdx is the derivative wrt x
#a is the lower bound
#b is the upper bound
#y_a are the boundary conditions
#tol is the tolerane for integrating y
#define our starting step
xi = a
yi = y_a.copy()
#an initial step size == make very small
h = 1.0e-4 *(b-a)
#set a max number of iterations
imax = 10000
#set an iteration variable
i = 0
#set the number of coupled odes to the
#size of y_a
nv = len(y_a)
#set the initial conditions
#single element array with a as that value
x = np.full(1,a)
y = np.full((1,nv), y_a)
#set a flag, loop until we reach right side
#number of steps unkown
flag = 1
#loop until we reach the right side
while(flag):
#calculate y_i+1
yi_new, h_new, h_step = rk4_mv_ad(dydx, xi, yi, nv, h, tol)
#update the step
h = h_new
#prevent an overshoot
if(xi+h_step>b):
#take a smaller step
h = b-xi
#recalculate y_i+1
yi_new, h_new, h_step = rk4_mv_ad(dydx, xi, yi, nv, h, tol)
#break
flag = 0
#update values
xi += h_step
yi[:] = yi_new[:]
#add the step to the arrays
x =np.append(x, xi)
#give it an array and number it'll append xi, and overwrite x with the new array
y_new = np.zeros((len(x),nv))
y_new[0:len(x)-1,:] = y
#x-1 because we added an extra element to x, but y and x have to be the same length
y_new[-1,:] = yi[:]
del y
#erases last y matrix
y = y_new
#prevent too many iterations
if(i>=imax):
print("Maximum iterations reached.")
raise StopIteration("Iteration number= ", i)
#iterate
i += 1
#output some information
#3 means a field with 3 elements, 3 digits
#d means integer
# \t means print tab followed by x =
# %9.8 means print out 9 total digits where 8 are on the RHS of decimal
s = "i = %3d\tx = %9.8f\th = %9.8f\tb=%9.8f" % (i, xi, h_step, b)
print(s)
#break if new xi is == b
#ends integration
if(xi == b):
flag = 0
#return answer
return x,y
###Output
_____no_output_____
###Markdown
Perform the integration a = 0.0b = 2.0 *np.piy_0 = np.zeros(2)y_0 [0] = 0.0y_0 [1] = 1.0nv = 2tolerance = 1.0e-6perform the integrationx, y = rk4_mv(dydx, a, b, y_0, tolerance) Plot the result
###Code
plt.plot(x,y[:,0], 'o', label = 'y(x)')
plt.plot(x, y[:,1], 'o', label='dydx(x)')
xx = np.linspace(0, 2.0*np.pi, 1000)
plt.plot(xx, np.sin(xx), label='sin(x)')
plt.plot(xx, np.cos(xx), label= 'cos(x)')
plt.xlabel('x')
plt.ylabel('y, dy/dx')
plt.legend(frameon=False)
###Output
_____no_output_____
###Markdown
Plot error
###Code
sine = np.sin(x)
cosine = np.cos(x)
y_error = (y[:,0]-sine)
dydx_error = (y[:,1]-cosine)
plt.plot(x, y_error, label="y(x) Error")
plt.plot(x, dydx_error, label="y(x) Error")
plt.legend(frameon=False)
###Output
_____no_output_____
###Markdown
Create a notebook to perform Runge-Kutta integration for multiple coupled variables
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
###Output
_____no_output_____
###Markdown
Define our coupled variables to integrate
###Code
def dydx(x,y):
#set the derivatives
#our equation is d**2/dx**2 = -y
#so we can write
#dy/dx = x
#dz/dx = -y
#we will set y=y[0]
#and z=y[1]
#declare an array
y_derivs = np.zeros(2)
#set dydx = z
y_derivs[1] = -1*y[0]
#return the derivatives
return y_derivs
###Output
_____no_output_____
###Markdown
Define 4th order RK scheme for multiple variables
###Code
def rk4_mv_core(dydx,xi,yi,nv,h):
#dydx = function of derivatives
#xi = val of x at step i
#yi = array of vars at step i
#nv = number of vars
#h = step size
#declare
k1 = np.zeros(nv)
k2 = np.zeros(nv)
k3 = np.zeros(nv)
k4 = np.zeros(nv)
#half step
x_ipoh = x_i + 0.5*h
#define x at 1 step
x_ipo = x_i + h
#declare a temp array
y_temp = np.zeros(nv)
#advance y[] by a step h
y_derivs = dydx(xi,yi)
k1[:] = h*y_derivs[:]
#k2 vals
#NXT CLASS
#k_1 = h*g(x_i,f_i)
#k_2 = h*g(x_ipoh, f_i + 0.5*k_1)
#k_3 = h*g(x_ipoh, f_i + 0.5*k_2)
#k_4 = h*g(x_ipo, f_i + k_3)
f_ipo = f_i + (k_1 + 2*k_2 + 2*k_3 + k_4)/6
return f_ipo
###Output
_____no_output_____
###Markdown
Create a notebook to perform Runge-Kutta integration for multiple coupled variables
###Code
%matplotlib inline
import mathplotlib.pyplot as plt
import numpy as np
###Output
_____no_output_____
###Markdown
Define our coupled derivativs to integrate
###Code
def dydx(x,y):
# set the derivatives
# our equation is d^2x/dx^2 = -y
# so we can write
# dydx = z
# dzdx = -y
# we will set y = y[0] (the function y)
# we will set z = y[1] (the function x)
# declare an array
y_derivs = np.zeros(2) # array of functions
# set dydx = z
y_derivs[0] = y[1] # dydx we put in y_derivs[1]
# set dydx = -y
y_derivs[1] = -1*y[0]
# here we have to return an array
return y_derivs
###Output
_____no_output_____
###Markdown
Define the 4th order RK method
###Code
# now we'll operate on all the elements at the same time
def rk4+mv_core(dydx,xi,yi,nv,h): #dydx is function we just wrote,
# xi is value of x at step i
# xi changes by value of h
# nv is number of variables
# yi is the array
# declare k? arrays
k1 = np.zeros(nv) # each k is array of two emelents one that
k2 = np.zeros(nv) # corresponds to y and one that corresponds to z
k3 = np.zeros(nv)
k4 = np.zeros(nv)
# define x at 1/2 step
x_ipoh = xi + 0.5*h
# define x at 1 step
x_ipo = xi + h
# declare a temp y array, will contain estimated values of y and z
# ad different points of steps
y_temp = np.zeros(nv)
# get k1 values
y_derivs = dydx(xi,yi)
k1[:] = h*y_derivs[:]
# get k2 values
y_temp[:] = yi[:] + 0.5*k1[:] # initial values on left side + .5 of k1, we get to midpoint
y_derivs = dydx(x_ipoh,y_temp) # estimate of y and z at half step and
k2[:] = h*y_derivs[:] # recompute derivatives at half step
# get k3 values
y_temp[:] = yi[:] + 0.5*k2[:] # different step, with different derivative
y_derivs = dydx(x_ipoh,y_temp) # retaking half step and recomputing derivatives
k3[:] = h*y_derivs[:] #
# get k4 values
y_temp[:] = yi[:] + k3[:] # taking full step for k4 using recomputed
y_derivs = dydx(x_ipoh,y_temp) # derivatives from above
k3[:] = h*y_derivs[:]
# advance y by a step h
yipo = yi + (k1 + 2*k2 + 2*k3 + k4)/6.
return yipo
###Output
_____no_output_____
###Markdown
Define an adaptive step size driver for RK4
###Code
# dydx functions that take drivatives
# x_i value of x at step i
# y_i values of items in array at i
# h is step size
# tol tolerance
# this function
def rk4_mv_ad(dydx,x_i,y_i,nv,h,tol):
#define safety scale
SAFETY = 0.9
H_NEW_FAC = 2.0
# set a maximum number of iterations
imax = 1000
# set an iteration variable
i = 0
# create an error
Delta = np.full(nv,2*tol) # array that contains error estimates
# remember the step
h_step = h
# adjust steps until error is in our tolerance
while(Delta.max()/tol > 1.0):
# estimate our error by taking one step of size h
# vs. two steps of size h/2
y_2 = rk4_mv_core(dydx,x_i,y_i,nv,h_step) # copmaring to one full step
y_1 = rk4_mv_core(dydx,x_i,y_i,nv,0.5*h_step) # copmaring to two steps of h/2
y_11 = rk4_mv_core(dydx,x_i+0.5*h_step,y_1,nv,0.5*h_step) # using y_1 estimate here which is
# estimate for y and z from previous step
# that used half step
# compute an error
Delta = np.fabs(y_2 - y_11)
# if the error is too large, take a smaller step
# Delta.max() is the biggest element in Delta
# if this value is > 1 after divided by tol, our step was too big
if(Delta.max()/tol > 1.0):
# our errors is too large, decrease the step
# multiplying by SAFETY to get smaller step
h_step *= SAFETY * (Delta.max()/tol)**(-0.25)
# check iteration
if(i >= imax):
print("Too many iterations in rk4_mv_ad()")
raise StopIteration("Ending after i = ", i)
# iterate
i+=1
# next time, try to take a bigger step, so estimate new h step with below
h_new = np.fmin(h_step * (Delta.max()/tol)**(-0.9), h_step*step*H_NEW_FAC)
# return the answer, a new step, and the step we actually took
return y_2, h_new, h_step
###Output
_____no_output_____
###Markdown
Define a wrapper for RK4
###Code
# this function, you pass in the derivatives you want to evolve
# the starting place, the ending place, and tolerance
# it will call the two functions we just wrote
# we don't know how many steps we'll take so we have initial conditions
# we just provide some tolerance and want for this wrapper to try to stay
# within the tolerance
def rk4_mv(dydx,a,b,y_a,tol):
# dydx is the derivate wrt x
# a is the lower bound
# b is the upper bound
# y_a are the boundary conditions
# tol is the tolerance for integrating y
# define our starting step
xi = a
yi = y_a.copy()
# an initial step size == make very small
h = 1.0e-4 * (b-a)
# set a minimum number of iterations since we don't know how
# many iterations we'll need to take
imax = 10000
# set an iteration variable
i = 0
# set the number of coupled odes to the size of y_a
nv = len(y_a)
# set the initial conditions
x = np.full(1,a) # array
y = np.full((1,nv),y_a) # array with all values of x at all nv steps,
# and y and z at values of x
# set a flag
flag = 1
# loop until we reach the right side
while(flag):
# calculate y_i+1
yi_new, h_new, h_step = rk4_mv_ad(dydx,xi,yi,nv,h,tol)
# update the step
h = h_new
# prevent an overshoot, as we integrate along x, when we get close to
# edge, we don't want to cross it, so we retake step to get to the edge
if(xi+h_step>b):
# take a smaller step
h = b-xi
# recalculate y_i+1
yi_new, h_new, h_step = rk4_mv_ad(dydx,xi,yi,nv,h,tol)
# break
flag = 0
# update values in the arrays
xi += h_step
yi[:] = yi_new[:] # arrays
# add the step to the arrays
x = npappend(x,xi)
y_new = np.zeros((len(x),nv))
y_new[0:len(x)-1,:] = y
y_new[-1,:] = yi[:]
del y
y = y_new
# prevent too many iterations
if(i>=imax):
print("Maximum iterations reached.")
raise StopIteration("Iteration number = ", i)
#iterate
i += 1
# output some information
s = "i = %sd"
###Output
_____no_output_____
###Markdown
Create a notebook to perform Runge-Kutta integration for multiple coupled variables.
###Code
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Define our coupled derivatives to integrate
###Code
def dydx(x,y):
#set the derivatives
#our equation is d^2y/dx^2 = -y
#so we can write
#dydx = z
#dzdx = -y
#we will set y = y[0]
#we will set z = y[1]
#declare an array
y_derivs = np.zeros(2)
#set dydx = z
y_derivs[0] = y[1]
#set dzdx = -y
y_derivs[1] = -1*y[0]
#here we have to return an array
return y_derivs
def rk4_mv_core(dydx,xi,yi,nv,h):
#declare k? arrays
k1 = np.zeros(nv)
k2 = np.zeros(nv)
k3 = np.zeros(nv)
k4 = np.zeros(nv)
#define x at 1/2 step
x_ipoh = xi + 0.5*h
#define x at 1 step
x_ipo = xi + h
#declare a temp y array
y_temp = np.zeros(nv)
#get k1 values
y_derivs = dydx(xi,yi)
k1[:] = h*y_derivs[:]
#get k2 values
y_temp[:] = yi[:] + 0.5*k1[:]
y_derivs = dydx(x_ipoh,y_temp)
k2[:] = h*y_derivs[:]
#get k3 values
y_temp[:] = yi[:] + 0.5*k2[:]
y_derivs = dydx(x_ipoh,y_temp)
k3[:] = h*y_derivs[:]
#get k4 values
y_temp[:] = yi[:] + 0.5*k3[:]
y_derivs = dydx(x_ipo,y_temp)
k4[:] = h*y_derivs[:]
#advance y by a step h
yipo = yi + (k1 + 2*k2 + 2*k3 + k4)/6
return yipo
def rk4_mv_ad(dydx,x_i,y_i,nv,h,tol):
#define safety scale
SAFETY = 0.9
H_NEW_FAC = 2.0
#set a maximum number of iterations
imax = 10000
#set an iteration variable
i = 0
#create a error
Delta = np.full(nv,2*tol)
#remember the step
h_step = h
#adjur step
while(Delta.max()/tol > 1.0):
#estimate our error by taking one step of size h
#vs. two steps of size h/2
y_2 = rk4_mv_core(dydx,x_i,y_i,nv,h_step)
y_1 = rk4_mv_core(dydx,x_i,y_i,nv,0.5*h_step)
y_11 = rk4_mv_core(dydx,x_i+0.5*h_step,y_1,0.5*h_step)
#compute an error
Delta = np.fabs(y_2 - y_11)
#if the error is too large take a smaller step
if(Delta.max()/tol > 1.0):
#our error is too large, decrease the step
h_step *= SAFETY * (Delta.max()/tol)**(-0.25)
#check iteration
if(i>=imax):
print("Too many iteration in rk4_mv_ad()")
raise StopIteration("Ending after i = ",i)
#iterate
i+=1
#next time,try to take a bigger step
h_new = npfmin(h_step * (Delta.max()/tol)**(-0.9), h_step*H_NEW_FACE)
#return the answer, a new step, and the step actually took
return y_2, h_new, h_step
def rk4_mv(dfdx,a,b,y_a,tol):
#dydx is the derivative wrt x
#a is the lower bound
#b is the upper bound
#y_a are the boundary conditions
#tol is the tolerance for integrating y
#define our starting step
xi = a
yi = y_a.copy
#an initial step size == make very small!
h = 1.04e-4 * (b-a)
#set a maximum number of iterations
imax=10000
#set an iteration variable
i=0
#set the number of coupled odes to the
#size of y_a
nv = len(y_a)
#set the intial conditions
x = np.full(1,a)
y = np.full((1,nv),y_a)
#set a flag
flag = 1
#loop until we reach the right side
while(flag):
#calculate y_i+1
yi_new, h_new, h_step = rk4_mv_ad(dydx,xi,yi,nv,h,tol)
#update the step
h = h_new
#prevent an overshoot
if(xi+h_step>b):
#take a smaller step
h = b-xi
#recalculate y_i+1
y_new,h_new, h_step = rk4_mv_ad(dydx,xi,yi,nv,h,tol)
#break
flag = 0
#update values
xi+= h_step
yi[:] = yi_new[:]
#add the stp to the arrays
x = np.append(x,xi)
y_new = np.zeros((lens(x),nv))
y_new[0:len(x)-1,:] = y
y_new[-1,:] = yi[:]
del y
y = y_new
#prevent too many iterations
if(i>=imax):
print("Maximum iterations reached.")
raise StopIteration("Iteration number = ",i)
#iterate
i+=1
#output some information
s = "i = %3d\tx = %9.8f\tb=%9.8f" %(i,xi, h_step, b)
print(s)
#break if new xi is == b
if(xi==b):
flag = 0
#return the answer
return x,y
a = 0.0
b = 2.0 * np.pi
y_0 = np.zeros(2)
y_0[0] = 0.0
y_0[1] = 1.0
nv = 2
tolerance = 1.0e-6
#perform the integration
x,y = rk4_mv(dydx,a,b,y_0,tolerance)
###Output
_____no_output_____
###Markdown
create a notebook to perform Runge-Kutta integration for multiple coupled variables
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
###Output
_____no_output_____
###Markdown
define coupled deribatives to integrate
###Code
def dydx(x,y):
#set derivatives, equation is d^2y/dx^2=-y
#dydx=z & dzdx=-y
#set y=y[0] and z=y[1]
#declare array
y_derivs = np.zeros(2)
#set dydx = z
y_derivs[0] = y[1]
#set dzdx=-y
y_derivs[1]=-1*y[0]
#here we have to return an array
return y_derivs
###Output
_____no_output_____
###Markdown
define 4th order rk method
###Code
def rk4_mv_core(dydx,xi,yi,nv,h):
#declare k?[wild card gives digits btwn 0-9] arrays
k1=np.zeros(nv)
k2=np.zeros(nv)
k3=np.zeros(nv)
k4=np.zeros(nv)
#define x at 1/2 step
x_ipoh = xi+0.5*h
#define x at 1 step
x_ipo = xi+h
#declare a tempy array
y_temp= np.zeros(nv)
#get k1 values
y_derivs = dydx(xi,yi)
k1[:]=h*y_derivs[:]
#get k2 values
y_temp[:]=yi[:]+0.5*k1[:]
y_derivs= dydx(x_ipoh,y_temp)
k2[:]=h*y_derivs[:]
#k3 values
y_temp[:]=yi[:]+0.5*k2[:]
y_derivs= dydx(x_ipoh,y_temp)
k3[:]=h*y_derivs[:]
#k4 values
y_temp[:]=yi[:]+k3[:]
y_derivs= dydx(x_ipo,y_temp)
k4[:]=h*y_derivs[:]
#advance y by step h
yipo=yi+(k1+2*k2+2*k3+k4)/6
return yipo
###Output
_____no_output_____
###Markdown
define adaptive step size driver for rk4
###Code
def rk4_mv_ad(dydx,x_i,y_i,nv,h,tol):
#define safety scale
SAFETY = 0.9
H_NEW_FAC = 2.0
#set maximum number of iterations
imax= 10000
#set an iteration variable
i=0
#create an error
Delta= np.full(nv,2*tol)
#remember the step
h_step=h
#adjust step
while(Delta.max()/tol>1.0):
#estimate error by taking 1 h step vs. 2 h/2 steps
y_2=rk4_mv_core(dydx,x_i,y_i,nv,h_step)
y_1=rk4_mv_core(dydx,x_i,y_i,nv,0.5*h_step)
y_11=rky_mv_core(dydx,x_i+0.5*h_step,y_1,nv,0.5*h_step)
#compute error
Delta=np.fabs(y_2-y_11)
#if error is too large take smaller step
if(Delta.max()/tol>1.0):
#error to large-> decrease step
h_step *= SAFETY*(Delta.max()/tol)**(-0.25)
#check iteration
if(i>=imax):
print("Too many iterations in rk4_mv_ad()")
raise StopIteration("ending after i = ",i)
#iterate
i+=1
#next time, try to take a bigger step
h_new= np.fmin(h_step*(Delta.max()/tol)**(-0.9),h_step*H_NEW_FAC)
#reurn the answer, a new step, and the step we actually took
return y_2, h_new, h_step
###Output
_____no_output_____
###Markdown
define wrapper 4 rk4
###Code
def rk4_mv(dydx,a,b,y_a,tol):
#dydx is the derivative wrt x
#a is lower bound/ b is upper bound
#y_a is boundary conditions
#tol: tolerance for int y
#define starting step
xi = a
yi = y_a.copy()
#an initial step size == make very small
h= 1.0e-4*(b-a)
imax=10000
i=0
#set # of coupled ode's to size y_a
nv = len(y_a)
#set initial conditions
x = np.full(l,a)
y = np.full((1,nv),y_a)
#set a flag
flag=1
#loop til we reach right side
while(flag):
#calculate y_i+1
yi_new, h_new, h_step = rk4_mv_ad(dydx,xi,yi,nv,h,tol)
#update new step
h = h_new
#prevent overshoot
if(xi+h_step>b):
#take smaller step
h = b-xi
#recalc y_i+1
yi_new, h_new, h_step = rk4_mv_ad(dydx,xi,yi,nv,h,tol)
#break
flag = 0
###Output
_____no_output_____
###Markdown
Create a notebook to perform Runge-Kutta integration for multiple coupled variables
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
###Output
_____no_output_____
###Markdown
Define our coupled derivatives to integrate
###Code
def dydx(x,y):
#set our derivatives
#our equation is d^2y/dx^2=-y
#so we can write
#dydx=z
#dzdx=-y
#we will set y=y[0]
#we will set z=y[1]
#declare an array
y_derivs=np.zeros(2)
#set dydx=z
y_derivs[0] = y[1]
#set dzdx=-y
y_derivs[1] = -1*y[0]
#here we have to return the array
return y_derivs
###Output
_____no_output_____
###Markdown
Define the 4th order RK method
###Code
def rk4_mv_core(dydx,xi,yi,nv,h):
#declare k? arrays
k1=np.zeros(nv)
k2=np.zeros(nv)
k3=np.zeros(nv)
k4=np.zeros(nv)
#define x at 1/2 step
x_ipoh=xi+0.5*h
#define x at 1 step
x_pio=xi+h
#declare a temp y array
y_temp=np.zeros(nv)
#get k1 values
y_derivs=dydx(xi,yi)
k1[:]=h*y_derivs[:]
#det k2 values
y_temp[:]=yi[:]+0.5*k1[:]
y_derivs=dydx(x_ipoh,y_temp)
k2[:]=h*y_derivs[:]
#det k3 values
y_temp[:]=yi[:]+0.5*k2[:]
y_derivs=dydx(x_ipoh,y_temp)
k3[:]=h*y_derivs[:]
#det k4 values
y_temp[:]=yi[:]+k3[:]
y_derivs=dydx(x_ipoh,y_temp)
k4[:]=h*y_derivs[:]
#advance y by a step h
yipo=yi+(k1+2*k2+2*k3+k4)/6.
return yipo
###Output
_____no_output_____
###Markdown
Define an adaptive step size driver for RK4
###Code
def rk4_mv_ad(dydx,x_i,y_i,nv,h,tol):
#define safety scale
SAFETY=0.9
H_NEW_FAC=2.0
#set a maximum number of iterations
imax=10000
#set an iteration variable
i=0
#create an error
Delta=np.full(nv,2*tol)
#remember the step
h_step=h
#adjust step
while(Delta.max()/tol>1.0):
#estimate our error by taking one step of size h vs. two stepsof size h/2
y_2=rk4_mv_core(dydx,x_i,y_i,nv,h_step)
y_1=rk4_mv_core(dydx,x_i,y_i,nv,0.5*h_step)
y_11=rk4_mv_core(dydx,x_i+0.5*h_step,y_i,nv,0.5*h_step)
#compute the error
Delta=np.fabs(y_2-y_11)
#if the error is too large, take a smaller step
if(Delta.max()/tol>1.0):
#our error is too large, decrease the step
h_step*=SAFETY*(Delta.max()/tol)**(-0.25)
#check iteration
if(i>=imax):
print("Too many iterations in rk4_mv_ad()")
raise StopIteration("Ending after i=",i)
#iterate
i+=1
#next time, try to take a bigger step
h_new=np.fmin(h_step*(Delta.max()/tol)**(-0.9),h_step*H_NEW_FAC)
#return the answer, a new step, and the step we actually took
return y_2, h_new, h_step
###Output
_____no_output_____
###Markdown
Define a wrapper for RK4
###Code
def rk4_mv(dfdx,a,b,y_a,tol):
#dfdx is the derivative wrt x
#a is the lower bound
#b is the upper bound
#y_a are the boundary conditions
#tol is the tolerance for integrating y
#define our starting step
xi=a
yi=y_a.copy()
#an initial step size == make very small!
h=1.0e-4*(b-a)
#set a max number of iterations
imax=10000
#set an iteration variable
i=0
#set the number of coupled odes to the size of y_a
nv=len(y_a)
#set the initial conditions
x=np.full(1,a)
y=np.full((1,nv),y_a)
#set a flag
flag=1
#loop until we reach the right side
while(flag):
yi_new, h_new, h_step=rk4_mv_ad(dydx,xi,yi,nv,h,tol)
#calculate y=i+1
h=h_new
#update the step
if(xi+h_step>b):
#take a smaller step
h=b-xi
#recalculate y_i+1
yi_new,h_new,h_step=rk4_mv_ad(dydx,xi,yi,nv,h,tol)
#break
flag=0
#update values
xi+=h_step
yi[:]=yi_new[:]
#add the step to the arrays
x=np.append(x,xi)
y_new=np.zeros((len(x),nv))
y_new[0:len(x)-1,:]=y
y_new[-1,:]=yi[:]
del y
y=y_new
#prevent too many iteraions
if(i>=imax):
print ("Maxiomum iterations reached")
raise StopIteration("Iteration number=",i)
#iterate
i+=1
#output some information
s="i=%3d\tx=%9.8f\th=%9.8f\tb=%9.8f"%(i,xi,h_step,b)
print(s)
#break if new xi is == b
if(xi==b):
flag=0
return x,y
###Output
_____no_output_____
###Markdown
Perform the integration
###Code
a=0.0
b=2.0*np.pi
y_0=np.zeros(2)
y_0[0]=0.0
y_0[1]=1.0
nv=2
tolerance=1.0e-6
#perform the integration
x,y=rk4_mv(dydx,a,b,y_0,tolerance)
###Output
_____no_output_____
###Markdown
Plot the results
###Code
plt.plot(x,y[:,0],'o',label='y(x)')
plt.plot(x,y[:,1],'o',label='dydx(x)')
xx=np.linspace(0,2.0*np.pi,1000)
plt.plot(xx,np.sin(xx),label='sin(x)')
plt.plot(xx,np.cos(xx),label='cos(x)')
plt.xlabel('x')
plt.ylabel('y,dy/dx')
plt.legend(frameon=False)
###Output
_____no_output_____
###Markdown
Create a notebook to perform Runge-Kutta integration for multiple coupled variables
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
###Output
_____no_output_____
###Markdown
Define our coupled derivatives to integrate
###Code
def dydx(x,y):
#set the derivatives
#our equation is d^2y/dx^2 = -y
#so we can write
#dydx = z
#dzdx = -y
#we will set y = y[0]
#we will set z = y[1]
#declare an array
y_derivs = np.zeros(2)
#set dydx = z
y_derivs[0] = y[1]
#set dzdx = -y
y_derivs[1] = -1*y[0]
#here we have to return an array
return y_derivs
###Output
_____no_output_____
###Markdown
Define the 4th order RK method
###Code
def rk4_mv_core(dydx,xi,yi,nv,h):
#declare k? arrays
k1 = np.zeros(nv)
k2 = np.zeros(nv)
k3 = np.zeros(nv)
k4 = np.zeros(nv)
#define x at 1/2 step
x_ipoh = xi +0.5*h
#define x at 1 step
x_ipo = xi + h
#declare a temp y array
y_temp = np.zeros(nv)
#get k1 values
y_derivs = dydx(xi,yi)
k1[:] = h*y_derivs[:]
#get k2 values
y_temp[:] = yi[:] + 0.5*k1[:]
y_derivs = dydx(x_ipoh,y_temp)
k2[:] = h*y_derivs[:]
#get k3 values
y_temp[:] = yi[:] + 0.5*k2[:]
y_derivs = dydx(x_ipoh,y_temp)
k3[:] = h*y_derivs[:]
#get k4 values
y_temp[:] = yi[:] + k3[:]
y_derivs = dydx(x_ipo,y_temp)
k4[:] = h*y_derivs[:]
#advance y by a step h
yipo = yi + (k1 + 2*k2 + 2*k3 + k4)/6
return yipo
###Output
_____no_output_____
###Markdown
Define an adaptive step size driver for RK4
###Code
def rk4_mv_ad(dydx,x_i,y_i,nv,h,tol):
#define safety scale
SAFETY = 0.9
H_NEW_FAC = 2.0
#set a maximum number of iterations
imax = 10000
#set an iteration variable
i = 0
#create an error
Delta = np.full(nv,2*tol)
#remember the step
h_step = h
#adjust step
while(Delta.max()/tol > 1.0):
#estimate our error by taking one step of size h
#vs. two steps of size h/2
y_2 = rk4_mv_core(dydx,x_i,y_i,nv,h_step)
y_1 = rk4_mv_core(dydx,x_i,y_i,nv,0.5*h_step)
y_11 = rk4_mv_core(dydx,x_i+0.5*h_step,y_1,nv,0.5*h_step)
#compute an error
Delta = np.fabs(y_2 - y_11)
#if the error is too large, take a smaller step
if(Delta.max()/tol > 1.0):
#our error is too large, decrease the step
h_step *= SAFETY * (Delta.max()/tol)**(-0.25)
#check iteration
if(i>=imax):
print("Too many iterations in rk4_mv_ad()")
raise StopIteration("Ending after i = ",i)
#iterate
i+=1
#next time, try to take a bigger step
h_new = np.fmin(h_step * (Delta.max()/tol)**(-0.9), h_step*H_NEW_FAC)
#return the answer, a new step, and the step we actually took
return y_2, h_new, h_step
###Output
_____no_output_____
###Markdown
Define a wrapper for RK4
###Code
def rk4_mv(dydx,a,b,y_a,tol):
#dydx is the derivative with respect to x
#a is the lower bound
#b is the upper bound
#y_a are the boundary conditions
#tol is the tolerance for integrating y
#define our starting step
xi = a
yi = y_a.copy()
#an initial step size == make very small!
h = 1.0e-4 * (b-a)
#set a maximum number of iterations
imax = 10000
#set an iteration variable
i = 0
#set the number of coupled odes to the
#size of y_a
nv = len(y_a)
#set the initial conditions
x = np.full(1,a)
y = np.full((1,nv),y_a)
#set a flag
flag = 1
#loop until we reach the right side
while(flag):
#calculate y_i+1
yi_new, h_new, h_step = rk4_mv_ad(dydx,xi,yi,nv,h,tol)
#update the step
h = h_new
#prevent an overshoot
if(xi+h_step>b):
#take a smaller step
h = b-xi
#recalculate y_i+1
yi_new, h_new, h_step = rk4_mv_ad(dydx,xi,yi,nv,h,tol)
#break
flag = 0
#update values
xi += h_step
yi[:] = yi_new[:]
#add the step to the arrays
x = np.append(x,xi)
y_new = np.zeros((len(x),nv))
y_new[0:len(x)-1,:] = y
y_new[-1,:] = yi[:]
del y
y = y_new
#prevent too many iterations
if(i>imax):
print("Maximum iterations reached.")
raise StopIteration("Iteration number = ",i)
#iterate
i += 1
#output some information
s = "i = %3d\tx = %9.8f\th = %9.8f\tb = %9.8f" % (i, xi, h_step, b)
print(s)
#break if new xi is == b
if(xi==b):
flag = 0
#return the answer
return x,y
###Output
_____no_output_____
###Markdown
Perform the integration
###Code
a = 0.0
b = 2.0 * np.pi
y_0 = np.zeros(2)
y_0[0] = 0.0
y_0[1] = 1.0
nv = 2
tolerance = 1.0e-6
#perform the integration
x,y = rk4_mv(dydx,a,b,y_0,tolerance)
###Output
_____no_output_____
###Markdown
Plot the result
###Code
plt.plot(x,y[:,0],'o',label='y(x)')
plt.plot(x,y[:,1],'o',label='dydx(x)')
xx = np.linspace(0,2.0*np.pi,1000)
plt.plot(xx,np.sin(xx),label='sin(x)')
plt.plot(xx,np.cos(xx),label='cos(x)')
plt.xlabel('x')
plt.ylabel('y, dy/dx')
plt.legend(frameon=False)
###Output
_____no_output_____
###Markdown
Plot the error(errors actually exceed the tolerance)
###Code
sine = np.sin(x)
cosine = np.cos(x)
y_error = (y[:,0]-sine)
dydx_error = (y[:,1]-cosine)
plt.plot(x, y_error, label="y(x) Error")
plt.plot(x, dydx_error, label="dydx(x) Error")
plt.legend(frameon=False)
###Output
_____no_output_____
###Markdown
Create a notebook to perform Runge-Kutta integration for multiple coupled variables
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
###Output
_____no_output_____
###Markdown
Define our coupled derivatives to integrate
###Code
def dydx(x,y):
#set the derivatives
#our equation is d^2y/dx^2 = -y
#so we can write
#dydx = z
#dzdx = -y
#we will set y = y[0]
#we will set z = y[1]
#declare an array
y_derivs = np.zeros(2)
#set dydx = z
y_derivs[0] = y[1]
#set dzdx = -y
y_derivs[1] = -1*y[0]
#here we have to return an array
return y_derivs
###Output
_____no_output_____
###Markdown
Define the 4th order RK method
###Code
def rk4_mv_core(dydx,xi,yi,nv,h):
#declare k? arrays
k1 = np.zeros(nv)
k2 = np.zeros(nv)
k3 = np.zeros(nv)
k4 = np.zeros(nv)
#define x at 1/2 step
x_ipoh = xi +0.5*h
#define x at 1 step
x_ipo = xi + h
#declare a temp y array
y_temp = np.zeros(nv)
#get k1 values
y_derivs = dydx(xi,yi)
k1[:] = h*y_derivs[:]
#get k2 values
y_temp[:] = yi[:] + 0.5*k1[:]
y_derivs = dydx(x_ipoh,y_temp)
k2[:] = h*y_derivs[:]
#get k3 values
y_temp[:] = yi[:] + 0.5*k2[:]
y_derivs = dydx(x_ipoh,y_temp)
k3[:] = h*y_derivs[:]
#get k4 values
y_temp[:] = yi[:] + k3[:]
y_derivs = dydx(x_ipo,y_temp)
k4[:] = h*y_derivs[:]
#advance y by a step h
yipo = yi + (k1 + 2*k2 + 2*k3 + k4)/6
return yipo
###Output
_____no_output_____
###Markdown
Define an adaptive step size driver for RK4
###Code
def rk4_mv_ad(dydx,x_i,y_i,nv,h,tol):
#define safety scale
SAFETY = 0.9
H_NEW_FAC = 2.0
#set a maximum number of iterations
imax = 10000
#set an iteration variable
i = 0
#create an error
Delta = np.full(nv,2*tol)
#remember the step
h_step = h
#adjust step
while(Delta.max()/tol > 1.0):
#estimate our error by taking one step of size h
#vs. two steps of size h/2
y_2 = rk4_mv_core(dydx,x_i,y_i,nv,h_step)
y_1 = rk4_mv_core(dydx,x_i,y_i,nv,0.5*h_step)
y_11 = rk4_mv_core(dydx,x_i+0.5*h_step,y_1,nv,0.5*h_step)
#compute an error
Delta = np.fabs(y_2 - y_11)
#if the error is too large, take a smaller step
if(Delta.max()/tol > 1.0):
#our error is too large, decrease the step
h_step *= SAFETY * (Delta.max()/tol)**(-0.25)
#check iteration
if(i>=imax):
print("Too many iterations in rk4_mv_ad()")
raise StopIteration("Ending after i = ",i)
#iterate
i+=1
#next time, try to take a bigger step
h_new = np.fmin(h_step * (Delta.max()/tol)**(-0.9), h_step*H_NEW_FAC)
#return the answer, a new step, and the step we actually took
return y_2, h_new, h_step
###Output
_____no_output_____
###Markdown
Define a wrapper for RK4
###Code
def rk4_mv(dydx,a,b,y_a,tol):
#dydx is the derivative with respect to x
#a is the lower bound
#b is the upper bound
#y_a are the boundary conditions
#tol is the tolerance for integrating y
#define our starting step
xi = a
yi = y_a.copy()
#an initial step size == make very small!
h = 1.0e-4 * (b-a)
#set a maximum number of iterations
imax = 10000
#set an iteration variable
i = 0
#set the number of coupled odes to the
#size of y_a
nv = len(y_a)
#set the initial conditions
x = np.full(1,a)
y = np.full((1,nv),y_a)
###Output
_____no_output_____
###Markdown
4th order runge kutta with adapted step size- small time step to improve accuracy- integration more efficient (partition) a simple coupled ODEd^2y/dx^2 = -yfor all x the second derivative of y is = -y (sin or cos curve)- specify boundary conditions to determine which- y(0) = 0 and dy/dx (x = 0) = 1 --> sin(x)rewrte as coupled ODEs to solve numerically (slide 8)
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
#define coupled derivatives to integrate
def dydx(x,y):
#y is a 2D array
#equation is d^2y/dx^2 = -y
#so: dydx = z, dz/dx = -y
#set y = y[0], z = y[1]
#declare array
y_derivs = np.zeros(2)
y_derivs[0] = y[1]
y_derivs[1] = -1*y[0]
return y_derivs
#can't evolve one without evolving the other, dependent variables
#define 4th order RK method
def rk_mv_core(dydx,xi,yi,nv,h):
#nv = number of variables
# h = width
#declare k arrays
k1 = np.zeros(nv)
k2 = np.zeros(nv)
k3 = np.zeros(nv)
k4 = np.zeros(nv)
#define x at half step
x_ipoh = xi + 0.5*h
#define x at 1 step
x_ipo = xi + h
#declare a temp y array
y_temp = np.zeros(nv)
#find k1 values
y_derivs = dydx(xi,yi) #array of y derivatives
k1[:] = h*y_derivs[:] #taking diff euler steps for derivs
#get k2 values
y_temp[:] = yi[:] + 0.5*k1[:]
y_derivs = dydx(x_ipoh,y_temp)
k2[:] = h*y_derivs[:]
#get k3 values
y_temp[:] = yi[:] + 0.5*k1[:]
y_derivs = dydx(x_ipoh,y_temp)
k3[:] = h*y_derivs[:]
#get k4 values
y_temp[:] = yi[:] + k3[:]
k4[:] = h*y_derivs[:]
#advance y by step h
yipo = yi + (k1 + 2*k2 + 2*k3 + k4)/6. #this is an array
return yipo
###Output
_____no_output_____
###Markdown
before, we took a single stepnow we take two different versions of the same equation for the stepcan be used as a check for the previous techniquethe difference should be within tolerance to be valid (if the steps are too big and outside of tolerance then they need to be smaller bebeh steps)
###Code
#define adaptive step size for RK4
def rk4_mv_ad(dydx,x_i,y_i,nv,h,tol):
#define safety scale
SAFETY = 0.9
H_NEW_FAC = 2.0
#set max number of iterations
imax = 10000
#set iteration variable, num of iterations taken
i = 0
#create an error (array)
Delta = np.full(nv,2*tol) #twice the tol, if it exceeds tol
#steps need to be smoler
#remember step
h_step = h
#adjust step
while(Delta.max()/tol > 1.0): #while loop
#estimate error by taking one step of size h vs two steps of size h/2
y_2 = rk4_mv_core(dydx,x_i,y_i,nv,h_step)
y_1 = rk4_mv_core(dydx,x_i,y_i,nv,0.5*h_step)
y_11 = rk4_mv_core(dydx,x_i+0.5*h_step,y_1,0.5*h_step)
#compute error
Delta = np.fabs(y_2 - y_1)
#if the error is too large
if(Delta.max()/tol > 1.0):
h_step *= SAFETY * (Delta.max()/tol)**(-0.25) #decreases h step size
#check iteration
if(i>=imax):
print("Too many iterations in rk4_mv_ad()")
raise StopIteration("Ending after i = ",i)
#iterate
i+=1
#leave while loop, to try bigger steps
h_new = np.fmin(h_step * (Delta.amx()/tol)**(-0.9), h_step*H_NEW_FAC)
#return the answer, the new step, and the step actually taken
return y_2, h_new, h_step
#wrapper function
def rk4_mv(dydx,a,b,y_a,tol):
#dydx = deriv wrt x
#a = lower bound
#b = upper bound
#y_a = boundary conditions (0,1)
#tol = tolerance for integrating y
#define starting step
xi = a
yi = y_a.copy()
#initial step size (smallllll)
h = 1.0e-4 * (b-a)
#max number of iterations
imax = 10000
#set iteration variable
i = 0
#set the number of coupled ODEs to the size of y_a
nv = len(y-a)
#set initial conditions
x = np.sull(1,a)
y = np.full((1,nv),y_a) #2 dimensional array
#set flag
flag = 1
#loop until we reach the right side
while(flag):
###Output
_____no_output_____
###Markdown
Create a notebook to perform Runge-Kutta integration for multiple coupled variables.
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
###Output
_____no_output_____
###Markdown
This cell and the one following it are not a requirement, it is only for looks
###Code
#use colors.subclass(or command; e.g bold).colorname to print
#examples: print(colors.bold, colors.fg.blue, "this will be bold and blue")
#everything after this will have that format until the following command
#is given: print(colors.reset, "now, this text will be normal")
class colors:
reset='\033[0m' #reset all colors with colors.reset
bold='\033[01m'
underline='\033[04m'
strikethrough='\033[09m'
reverse='\033[07m'
class fg: #foreground subclass
black='\033[30m'
red='\033[31m'
green='\033[32m'
orange='\033[33m'
blue='\033[34m'
purple='\033[35m'
cyan='\033[36m'
lightgrey='\033[37m'
darkgrey='\033[90m'
lightred='\033[91m'
lightgreen='\033[92m'
yellow='\033[93m'
lightblue='\033[94m'
pink='\033[95m'
lightcyan='\033[96m'
class bg: #background subclass
black='\033[40m'
red='\033[41m'
green='\033[42m'
orange='\033[43m'
blue='\033[44m'
purple='\033[45m'
cyan='\033[46m'
lightgrey='\033[47m'
###Output
_____no_output_____
###Markdown
The above code was provided by https://www.geeksforgeeks.org/print-colors-python-terminal/ Define our coupled derivatives to integrate
###Code
def dydx(x,y):
#set the derivatives
#our equation is d^2y/dx^2 = -y
#so we can write
#dydx = z
#dzdx = -y
#we will set y = y[0]
#we will set z = y[1]
#declare an array
y_derivs = np.zeros(2)
#set dydx = z
y_derivs[0] = y[1]
#set dzdx = -y
y_derivs[1] = -1*y[0]
#here we have to return an array with dydx
return y_derivs
###Output
_____no_output_____
###Markdown
Define the 4th order RK method
###Code
def rk4_mv_core(dydx,xi,yi,nv,h):
#declare k? arrays; (? is a wildcard, used for k1,k2,...,kn)
k1=np.zeros(nv)
k2=np.zeros(nv)
k3=np.zeros(nv)
k4=np.zeros(nv)
#define x at 1/2 step
x_ipoh = xi + 0.5*h
#define x at 1 step
x_ipo = xi + h
#declare a temp y array
y_temp = np.zeros(nv)
#get k1 values
y_derivs = dydx(xi,yi)
k1[:] = h*y_derivs[:]
#get k2 values
y_temp[:] = yi[:] + 0.5*k1[:]
y_derivs = dydx(x_ipoh,y_temp)
k2[:] = h*y_derivs[:]
#get k3 values
y_temp[:] = yi[:] + 0.5*k2[:]
y_derivs = dydx(x_ipoh,y_temp)
k3[:] = h*y_derivs[:]
#get k4 values
y_temp[:] = yi[:] + k3[:]
y_derivs = dydx(x_ipo,y_temp)
k4[:] = h*y_derivs[:]
#advance y by a step h
yipo = yi + (k1 + 2*k2 + 2*k3 + k4)/6.
return yipo
###Output
_____no_output_____
###Markdown
Define an adaptive step size driver for RK4
###Code
def rk4_mv_ad(dydx,x_i,y_i,nv,h,tol):
#define safety scale
SAFETY = 0.9
H_NEW_FAC = 2.0
#set a max number of iterations
imax = 10000
#set an iteration variable
i = 0
#create an error
Delta = np.full(nv,2*tol)
#remember the step
h_step = h
#adjust step
while(Delta.max()/tol > 1.0):
#estimate our error by taking one step of size h
#vs. two steps of size h/2
y_2 = rk4_mv_core(dydx,x_i,y_i,nv,h_step)
y_1 = rk4_mv_core(dydx,x_i,y_i,nv,0.5*h_step)
y_11 = rk4_mv_core(dydx,x_i+0.5*h_step,y_1,nv,0.5*h_step)
#compute an error
Delta = np.fabs(y_2 - y_11)
#if the error is too large, take a smaller step
if(Delta.max()/tol > 1.0):
#our error is too large, decrease the step
h_step *= SAFETY * (Delta.max()/tol)**(-0.25)
#check iteration
if(i>=imax):
print("Too many iterations in rk4_mv_ad()")
raise StopIteration("Ending after i = ",i)
#iterate
i+=1
#next time, try to take a bigger step
h_new = np.fmin(h_step * (Delta.max()/tol)**(-0.9), h_step*H_NEW_FAC)
#return the answer, a new step, and the step we actually took
return y_2, h_new, h_step
###Output
_____no_output_____
###Markdown
Define a wrapper for RK4
###Code
def rk4_mv(dydx,a,b,y_a,tol):
#dfdx is derivative w.r.t. x
#a is lower bound
#b is upper bound
#y_a are boundary conditions
#tol is tolerance for integrating y
#define our starting step
xi = a
yi = y_a.copy()
#an initial step size == make very small
h = 1.0e-4 * (b-a)
#set max number of iterations
imax = 10000
#set an iteration variable
i = 0
#set the number of coupled ODEs to the size of y_a
nv = len(y_a)
#set the initial conditions
x = np.full(1,a)
y = np.full((1,nv),y_a)
#set a flag
flag = 1
#loop until we reach the right side
while(flag):
#calculate y_i+1
yi_new, h_new, h_step = rk4_mv_ad(dydx,xi,yi,nv,h,tol)
#update the step
h = h_new
#prevent an overshoot
if(xi+h_step>b):
#take a smaller step
h = b-xi
#recalculate y_i+1
yi_new, h_new, h_step = rk4_mv_ad(dydx,xi,yi,nv,h,tol)
#break
flag = 0
#update values
xi += h_step
yi[:] = yi_new[:]
#add the step to the arrays
x = np.append(x,xi)
y_new = np.zeros((len(x),nv))
y_new[0:len(x)-1,:] = y
y_new[-1,:] = yi[:]
del y
y = y_new
#prevent too many iterations
if(i>=imax):
print(colors.bold, colors.fg.red, colors.bg.black, "Maximum iterations reached.", colors.reset)
raise StopIteration("Iteration number = ",i)
#iterate
i += 1
#output some information
s = "i =%3d\tx = %9.8f\th = %9.8f\tb=%9.8f" % (i,xi,h_step,b)
print(s)
#break if new xi is == b
if(xi==b):
flag = 0
#return the answer
print(colors.bold, colors.fg.purple, "Iteration #", i, colors.reset)
return x,y
###Output
_____no_output_____
###Markdown
Perform the integration
###Code
a = 0.0
b = 2.0 * np.pi
y_0 = np.zeros(2)
y_0[0] = 0.0
y_0[1] = 1.0
nv = 2
tolerance = 1.0e-6
#perform the integration
x,y = rk4_mv(dydx,a,b,y_0,tolerance)
###Output
i = 1 x = 0.00000186 h = 0.00000186 b=6.28318531
i = 2 x = 0.00000384 h = 0.00000199 b=6.28318531
i = 3 x = 0.00000584 h = 0.00000200 b=6.28318531
i = 4 x = 0.00000784 h = 0.00000200 b=6.28318531
i = 5 x = 0.00000984 h = 0.00000200 b=6.28318531
i = 6 x = 0.00001184 h = 0.00000200 b=6.28318531
i = 7 x = 0.00001384 h = 0.00000200 b=6.28318531
i = 8 x = 0.00001584 h = 0.00000200 b=6.28318531
i = 9 x = 0.00001784 h = 0.00000200 b=6.28318531
i = 10 x = 0.00001984 h = 0.00000200 b=6.28318531
i = 11 x = 0.00002184 h = 0.00000200 b=6.28318531
i = 12 x = 0.00002384 h = 0.00000200 b=6.28318531
i = 13 x = 0.00002584 h = 0.00000200 b=6.28318531
i = 14 x = 0.00002784 h = 0.00000200 b=6.28318531
i = 15 x = 0.00002984 h = 0.00000200 b=6.28318531
i = 16 x = 0.00003184 h = 0.00000200 b=6.28318531
i = 17 x = 0.00003384 h = 0.00000200 b=6.28318531
i = 18 x = 0.00003584 h = 0.00000200 b=6.28318531
i = 19 x = 0.00003784 h = 0.00000200 b=6.28318531
i = 20 x = 0.00003984 h = 0.00000200 b=6.28318531
i = 21 x = 0.00004184 h = 0.00000200 b=6.28318531
i = 22 x = 0.00004384 h = 0.00000200 b=6.28318531
i = 23 x = 0.00004584 h = 0.00000200 b=6.28318531
i = 24 x = 0.00004784 h = 0.00000200 b=6.28318531
i = 25 x = 0.00004984 h = 0.00000200 b=6.28318531
i = 26 x = 0.00005184 h = 0.00000200 b=6.28318531
i = 27 x = 0.00005384 h = 0.00000200 b=6.28318531
i = 28 x = 0.00005584 h = 0.00000200 b=6.28318531
i = 29 x = 0.00005784 h = 0.00000200 b=6.28318531
i = 30 x = 0.00005984 h = 0.00000200 b=6.28318531
i = 31 x = 0.00006184 h = 0.00000200 b=6.28318531
i = 32 x = 0.00006384 h = 0.00000200 b=6.28318531
i = 33 x = 0.00006584 h = 0.00000200 b=6.28318531
i = 34 x = 0.00006784 h = 0.00000200 b=6.28318531
i = 35 x = 0.00006984 h = 0.00000200 b=6.28318531
i = 36 x = 0.00007184 h = 0.00000200 b=6.28318531
i = 37 x = 0.00007384 h = 0.00000200 b=6.28318531
i = 38 x = 0.00007584 h = 0.00000200 b=6.28318531
i = 39 x = 0.00007784 h = 0.00000200 b=6.28318531
i = 40 x = 0.00007984 h = 0.00000200 b=6.28318531
i = 41 x = 0.00008184 h = 0.00000200 b=6.28318531
i = 42 x = 0.00008384 h = 0.00000200 b=6.28318531
i = 43 x = 0.00008584 h = 0.00000200 b=6.28318531
i = 44 x = 0.00008784 h = 0.00000200 b=6.28318531
i = 45 x = 0.00008984 h = 0.00000200 b=6.28318531
i = 46 x = 0.00009184 h = 0.00000200 b=6.28318531
i = 47 x = 0.00009384 h = 0.00000200 b=6.28318531
i = 48 x = 0.00009584 h = 0.00000200 b=6.28318531
i = 49 x = 0.00009784 h = 0.00000200 b=6.28318531
i = 50 x = 0.00009984 h = 0.00000200 b=6.28318531
i = 51 x = 0.00010184 h = 0.00000200 b=6.28318531
i = 52 x = 0.00010384 h = 0.00000200 b=6.28318531
i = 53 x = 0.00010584 h = 0.00000200 b=6.28318531
i = 54 x = 0.00010784 h = 0.00000200 b=6.28318531
i = 55 x = 0.00010984 h = 0.00000200 b=6.28318531
i = 56 x = 0.00011184 h = 0.00000200 b=6.28318531
i = 57 x = 0.00011384 h = 0.00000200 b=6.28318531
i = 58 x = 0.00011584 h = 0.00000200 b=6.28318531
i = 59 x = 0.00011784 h = 0.00000200 b=6.28318531
i = 60 x = 0.00011984 h = 0.00000200 b=6.28318531
i = 61 x = 0.00012184 h = 0.00000200 b=6.28318531
i = 62 x = 0.00012384 h = 0.00000200 b=6.28318531
i = 63 x = 0.00012584 h = 0.00000200 b=6.28318531
i = 64 x = 0.00012784 h = 0.00000200 b=6.28318531
i = 65 x = 0.00012984 h = 0.00000200 b=6.28318531
i = 66 x = 0.00013184 h = 0.00000200 b=6.28318531
i = 67 x = 0.00013384 h = 0.00000200 b=6.28318531
i = 68 x = 0.00013584 h = 0.00000200 b=6.28318531
i = 69 x = 0.00013784 h = 0.00000200 b=6.28318531
i = 70 x = 0.00013984 h = 0.00000200 b=6.28318531
i = 71 x = 0.00014184 h = 0.00000200 b=6.28318531
i = 72 x = 0.00014384 h = 0.00000200 b=6.28318531
i = 73 x = 0.00014584 h = 0.00000200 b=6.28318531
i = 74 x = 0.00014784 h = 0.00000200 b=6.28318531
i = 75 x = 0.00014984 h = 0.00000200 b=6.28318531
i = 76 x = 0.00015184 h = 0.00000200 b=6.28318531
i = 77 x = 0.00015384 h = 0.00000200 b=6.28318531
i = 78 x = 0.00015584 h = 0.00000200 b=6.28318531
i = 79 x = 0.00015784 h = 0.00000200 b=6.28318531
i = 80 x = 0.00015984 h = 0.00000200 b=6.28318531
i = 81 x = 0.00016184 h = 0.00000200 b=6.28318531
i = 82 x = 0.00016384 h = 0.00000200 b=6.28318531
i = 83 x = 0.00016584 h = 0.00000200 b=6.28318531
i = 84 x = 0.00016784 h = 0.00000200 b=6.28318531
i = 85 x = 0.00016984 h = 0.00000200 b=6.28318531
i = 86 x = 0.00017184 h = 0.00000200 b=6.28318531
i = 87 x = 0.00017384 h = 0.00000200 b=6.28318531
i = 88 x = 0.00017584 h = 0.00000200 b=6.28318531
i = 89 x = 0.00017784 h = 0.00000200 b=6.28318531
i = 90 x = 0.00017984 h = 0.00000200 b=6.28318531
i = 91 x = 0.00018184 h = 0.00000200 b=6.28318531
i = 92 x = 0.00018384 h = 0.00000200 b=6.28318531
i = 93 x = 0.00018584 h = 0.00000200 b=6.28318531
i = 94 x = 0.00018784 h = 0.00000200 b=6.28318531
i = 95 x = 0.00018984 h = 0.00000200 b=6.28318531
i = 96 x = 0.00019184 h = 0.00000200 b=6.28318531
i = 97 x = 0.00019384 h = 0.00000200 b=6.28318531
i = 98 x = 0.00019584 h = 0.00000200 b=6.28318531
i = 99 x = 0.00019784 h = 0.00000200 b=6.28318531
i =100 x = 0.00019984 h = 0.00000200 b=6.28318531
i =101 x = 0.00020184 h = 0.00000200 b=6.28318531
i =102 x = 0.00020384 h = 0.00000200 b=6.28318531
i =103 x = 0.00020584 h = 0.00000200 b=6.28318531
i =104 x = 0.00020784 h = 0.00000200 b=6.28318531
i =105 x = 0.00020984 h = 0.00000200 b=6.28318531
i =106 x = 0.00021184 h = 0.00000200 b=6.28318531
i =107 x = 0.00021384 h = 0.00000200 b=6.28318531
i =108 x = 0.00021584 h = 0.00000200 b=6.28318531
i =109 x = 0.00021784 h = 0.00000200 b=6.28318531
i =110 x = 0.00021984 h = 0.00000200 b=6.28318531
i =111 x = 0.00022184 h = 0.00000200 b=6.28318531
i =112 x = 0.00022384 h = 0.00000200 b=6.28318531
i =113 x = 0.00022584 h = 0.00000200 b=6.28318531
i =114 x = 0.00022784 h = 0.00000200 b=6.28318531
i =115 x = 0.00022984 h = 0.00000200 b=6.28318531
i =116 x = 0.00023184 h = 0.00000200 b=6.28318531
i =117 x = 0.00023384 h = 0.00000200 b=6.28318531
i =118 x = 0.00023584 h = 0.00000200 b=6.28318531
i =119 x = 0.00023784 h = 0.00000200 b=6.28318531
i =120 x = 0.00023984 h = 0.00000200 b=6.28318531
i =121 x = 0.00024184 h = 0.00000200 b=6.28318531
i =122 x = 0.00024384 h = 0.00000200 b=6.28318531
i =123 x = 0.00024584 h = 0.00000200 b=6.28318531
i =124 x = 0.00024784 h = 0.00000200 b=6.28318531
i =125 x = 0.00024984 h = 0.00000200 b=6.28318531
i =126 x = 0.00025184 h = 0.00000200 b=6.28318531
i =127 x = 0.00025384 h = 0.00000200 b=6.28318531
i =128 x = 0.00025584 h = 0.00000200 b=6.28318531
i =129 x = 0.00025784 h = 0.00000200 b=6.28318531
i =130 x = 0.00025984 h = 0.00000200 b=6.28318531
i =131 x = 0.00026184 h = 0.00000200 b=6.28318531
i =132 x = 0.00026384 h = 0.00000200 b=6.28318531
i =133 x = 0.00026584 h = 0.00000200 b=6.28318531
i =134 x = 0.00026784 h = 0.00000200 b=6.28318531
i =135 x = 0.00026984 h = 0.00000200 b=6.28318531
i =136 x = 0.00027184 h = 0.00000200 b=6.28318531
i =137 x = 0.00027384 h = 0.00000200 b=6.28318531
i =138 x = 0.00027584 h = 0.00000200 b=6.28318531
i =139 x = 0.00027784 h = 0.00000200 b=6.28318531
i =140 x = 0.00027984 h = 0.00000200 b=6.28318531
i =141 x = 0.00028184 h = 0.00000200 b=6.28318531
i =142 x = 0.00028384 h = 0.00000200 b=6.28318531
i =143 x = 0.00028584 h = 0.00000200 b=6.28318531
i =144 x = 0.00028784 h = 0.00000200 b=6.28318531
i =145 x = 0.00028984 h = 0.00000200 b=6.28318531
i =146 x = 0.00029184 h = 0.00000200 b=6.28318531
i =147 x = 0.00029384 h = 0.00000200 b=6.28318531
i =148 x = 0.00029584 h = 0.00000200 b=6.28318531
i =149 x = 0.00029784 h = 0.00000200 b=6.28318531
i =150 x = 0.00029984 h = 0.00000200 b=6.28318531
i =151 x = 0.00030184 h = 0.00000200 b=6.28318531
i =152 x = 0.00030384 h = 0.00000200 b=6.28318531
i =153 x = 0.00030584 h = 0.00000200 b=6.28318531
i =154 x = 0.00030784 h = 0.00000200 b=6.28318531
i =155 x = 0.00030984 h = 0.00000200 b=6.28318531
i =156 x = 0.00031184 h = 0.00000200 b=6.28318531
i =157 x = 0.00031384 h = 0.00000200 b=6.28318531
i =158 x = 0.00031584 h = 0.00000200 b=6.28318531
i =159 x = 0.00031784 h = 0.00000200 b=6.28318531
i =160 x = 0.00031984 h = 0.00000200 b=6.28318531
i =161 x = 0.00032184 h = 0.00000200 b=6.28318531
i =162 x = 0.00032384 h = 0.00000200 b=6.28318531
i =163 x = 0.00032584 h = 0.00000200 b=6.28318531
i =164 x = 0.00032784 h = 0.00000200 b=6.28318531
i =165 x = 0.00032984 h = 0.00000200 b=6.28318531
i =166 x = 0.00033184 h = 0.00000200 b=6.28318531
i =167 x = 0.00033384 h = 0.00000200 b=6.28318531
i =168 x = 0.00033584 h = 0.00000200 b=6.28318531
i =169 x = 0.00033784 h = 0.00000200 b=6.28318531
i =170 x = 0.00033984 h = 0.00000200 b=6.28318531
i =171 x = 0.00034184 h = 0.00000200 b=6.28318531
i =172 x = 0.00034384 h = 0.00000200 b=6.28318531
i =173 x = 0.00034584 h = 0.00000200 b=6.28318531
i =174 x = 0.00034784 h = 0.00000200 b=6.28318531
i =175 x = 0.00034984 h = 0.00000200 b=6.28318531
i =176 x = 0.00035184 h = 0.00000200 b=6.28318531
i =177 x = 0.00035384 h = 0.00000200 b=6.28318531
i =178 x = 0.00035584 h = 0.00000200 b=6.28318531
i =179 x = 0.00035784 h = 0.00000200 b=6.28318531
i =180 x = 0.00035984 h = 0.00000200 b=6.28318531
i =181 x = 0.00036184 h = 0.00000200 b=6.28318531
i =182 x = 0.00036384 h = 0.00000200 b=6.28318531
i =183 x = 0.00036584 h = 0.00000200 b=6.28318531
i =184 x = 0.00036784 h = 0.00000200 b=6.28318531
i =185 x = 0.00036984 h = 0.00000200 b=6.28318531
i =186 x = 0.00037184 h = 0.00000200 b=6.28318531
i =187 x = 0.00037384 h = 0.00000200 b=6.28318531
i =188 x = 0.00037584 h = 0.00000200 b=6.28318531
i =189 x = 0.00037784 h = 0.00000200 b=6.28318531
i =190 x = 0.00037984 h = 0.00000200 b=6.28318531
i =191 x = 0.00038184 h = 0.00000200 b=6.28318531
i =192 x = 0.00038384 h = 0.00000200 b=6.28318531
i =193 x = 0.00038584 h = 0.00000200 b=6.28318531
i =194 x = 0.00038784 h = 0.00000200 b=6.28318531
i =195 x = 0.00038984 h = 0.00000200 b=6.28318531
i =196 x = 0.00039184 h = 0.00000200 b=6.28318531
i =197 x = 0.00039384 h = 0.00000200 b=6.28318531
i =198 x = 0.00039584 h = 0.00000200 b=6.28318531
i =199 x = 0.00039784 h = 0.00000200 b=6.28318531
i =200 x = 0.00039984 h = 0.00000200 b=6.28318531
i =201 x = 0.00040184 h = 0.00000200 b=6.28318531
i =202 x = 0.00040384 h = 0.00000200 b=6.28318531
i =203 x = 0.00040584 h = 0.00000200 b=6.28318531
i =204 x = 0.00040784 h = 0.00000200 b=6.28318531
i =205 x = 0.00040984 h = 0.00000200 b=6.28318531
i =206 x = 0.00041184 h = 0.00000200 b=6.28318531
i =207 x = 0.00041384 h = 0.00000200 b=6.28318531
i =208 x = 0.00041584 h = 0.00000200 b=6.28318531
i =209 x = 0.00041784 h = 0.00000200 b=6.28318531
i =210 x = 0.00041984 h = 0.00000200 b=6.28318531
i =211 x = 0.00042184 h = 0.00000200 b=6.28318531
i =212 x = 0.00042384 h = 0.00000200 b=6.28318531
i =213 x = 0.00042584 h = 0.00000200 b=6.28318531
i =214 x = 0.00042784 h = 0.00000200 b=6.28318531
i =215 x = 0.00042984 h = 0.00000200 b=6.28318531
i =216 x = 0.00043184 h = 0.00000200 b=6.28318531
i =217 x = 0.00043384 h = 0.00000200 b=6.28318531
i =218 x = 0.00043584 h = 0.00000200 b=6.28318531
i =219 x = 0.00043784 h = 0.00000200 b=6.28318531
i =220 x = 0.00043984 h = 0.00000200 b=6.28318531
i =221 x = 0.00044184 h = 0.00000200 b=6.28318531
i =222 x = 0.00044384 h = 0.00000200 b=6.28318531
i =223 x = 0.00044584 h = 0.00000200 b=6.28318531
i =224 x = 0.00044784 h = 0.00000200 b=6.28318531
i =225 x = 0.00044984 h = 0.00000200 b=6.28318531
i =226 x = 0.00045184 h = 0.00000200 b=6.28318531
i =227 x = 0.00045384 h = 0.00000200 b=6.28318531
i =228 x = 0.00045584 h = 0.00000200 b=6.28318531
i =229 x = 0.00045784 h = 0.00000200 b=6.28318531
i =230 x = 0.00045984 h = 0.00000200 b=6.28318531
i =231 x = 0.00046184 h = 0.00000200 b=6.28318531
i =232 x = 0.00046384 h = 0.00000200 b=6.28318531
i =233 x = 0.00046584 h = 0.00000200 b=6.28318531
i =234 x = 0.00046784 h = 0.00000200 b=6.28318531
i =235 x = 0.00046984 h = 0.00000200 b=6.28318531
i =236 x = 0.00047184 h = 0.00000200 b=6.28318531
i =237 x = 0.00047384 h = 0.00000200 b=6.28318531
i =238 x = 0.00047584 h = 0.00000200 b=6.28318531
i =239 x = 0.00047784 h = 0.00000200 b=6.28318531
i =240 x = 0.00047984 h = 0.00000200 b=6.28318531
i =241 x = 0.00048184 h = 0.00000200 b=6.28318531
i =242 x = 0.00048384 h = 0.00000200 b=6.28318531
i =243 x = 0.00048584 h = 0.00000200 b=6.28318531
i =244 x = 0.00048784 h = 0.00000200 b=6.28318531
i =245 x = 0.00048984 h = 0.00000200 b=6.28318531
i =246 x = 0.00049184 h = 0.00000200 b=6.28318531
i =247 x = 0.00049384 h = 0.00000200 b=6.28318531
i =248 x = 0.00049584 h = 0.00000200 b=6.28318531
i =249 x = 0.00049784 h = 0.00000200 b=6.28318531
i =250 x = 0.00049984 h = 0.00000200 b=6.28318531
i =251 x = 0.00050184 h = 0.00000200 b=6.28318531
i =252 x = 0.00050384 h = 0.00000200 b=6.28318531
i =253 x = 0.00050584 h = 0.00000200 b=6.28318531
i =254 x = 0.00050784 h = 0.00000200 b=6.28318531
i =255 x = 0.00050984 h = 0.00000200 b=6.28318531
i =256 x = 0.00051184 h = 0.00000200 b=6.28318531
i =257 x = 0.00051384 h = 0.00000200 b=6.28318531
i =258 x = 0.00051584 h = 0.00000200 b=6.28318531
i =259 x = 0.00051784 h = 0.00000200 b=6.28318531
i =260 x = 0.00051984 h = 0.00000200 b=6.28318531
i =261 x = 0.00052184 h = 0.00000200 b=6.28318531
i =262 x = 0.00052384 h = 0.00000200 b=6.28318531
i =263 x = 0.00052584 h = 0.00000200 b=6.28318531
i =264 x = 0.00052784 h = 0.00000200 b=6.28318531
i =265 x = 0.00052984 h = 0.00000200 b=6.28318531
i =266 x = 0.00053184 h = 0.00000200 b=6.28318531
i =267 x = 0.00053384 h = 0.00000200 b=6.28318531
i =268 x = 0.00053584 h = 0.00000200 b=6.28318531
i =269 x = 0.00053784 h = 0.00000200 b=6.28318531
i =270 x = 0.00053984 h = 0.00000200 b=6.28318531
i =271 x = 0.00054184 h = 0.00000200 b=6.28318531
i =272 x = 0.00054384 h = 0.00000200 b=6.28318531
i =273 x = 0.00054584 h = 0.00000200 b=6.28318531
i =274 x = 0.00054784 h = 0.00000200 b=6.28318531
i =275 x = 0.00054984 h = 0.00000200 b=6.28318531
i =276 x = 0.00055184 h = 0.00000200 b=6.28318531
i =277 x = 0.00055384 h = 0.00000200 b=6.28318531
i =278 x = 0.00055584 h = 0.00000200 b=6.28318531
i =279 x = 0.00055784 h = 0.00000200 b=6.28318531
i =280 x = 0.00055984 h = 0.00000200 b=6.28318531
i =281 x = 0.00056184 h = 0.00000200 b=6.28318531
i =282 x = 0.00056384 h = 0.00000200 b=6.28318531
i =283 x = 0.00056584 h = 0.00000200 b=6.28318531
i =284 x = 0.00056784 h = 0.00000200 b=6.28318531
i =285 x = 0.00056984 h = 0.00000200 b=6.28318531
i =286 x = 0.00057184 h = 0.00000200 b=6.28318531
i =287 x = 0.00057384 h = 0.00000200 b=6.28318531
i =288 x = 0.00057584 h = 0.00000200 b=6.28318531
i =289 x = 0.00057784 h = 0.00000200 b=6.28318531
i =290 x = 0.00057984 h = 0.00000200 b=6.28318531
i =291 x = 0.00058184 h = 0.00000200 b=6.28318531
i =292 x = 0.00058384 h = 0.00000200 b=6.28318531
i =293 x = 0.00058584 h = 0.00000200 b=6.28318531
i =294 x = 0.00058784 h = 0.00000200 b=6.28318531
i =295 x = 0.00058984 h = 0.00000200 b=6.28318531
i =296 x = 0.00059184 h = 0.00000200 b=6.28318531
i =297 x = 0.00059384 h = 0.00000200 b=6.28318531
i =298 x = 0.00059584 h = 0.00000200 b=6.28318531
i =299 x = 0.00059784 h = 0.00000200 b=6.28318531
i =300 x = 0.00059984 h = 0.00000200 b=6.28318531
i =301 x = 0.00060184 h = 0.00000200 b=6.28318531
i =302 x = 0.00060384 h = 0.00000200 b=6.28318531
i =303 x = 0.00060584 h = 0.00000200 b=6.28318531
i =304 x = 0.00060784 h = 0.00000200 b=6.28318531
i =305 x = 0.00060984 h = 0.00000200 b=6.28318531
i =306 x = 0.00061184 h = 0.00000200 b=6.28318531
i =307 x = 0.00061384 h = 0.00000200 b=6.28318531
i =308 x = 0.00061584 h = 0.00000200 b=6.28318531
i =309 x = 0.00061784 h = 0.00000200 b=6.28318531
i =310 x = 0.00061984 h = 0.00000200 b=6.28318531
i =311 x = 0.00062184 h = 0.00000200 b=6.28318531
i =312 x = 0.00062384 h = 0.00000200 b=6.28318531
i =313 x = 0.00062584 h = 0.00000200 b=6.28318531
i =314 x = 0.00062784 h = 0.00000200 b=6.28318531
i =315 x = 0.00062984 h = 0.00000200 b=6.28318531
i =316 x = 0.00063184 h = 0.00000200 b=6.28318531
i =317 x = 0.00063384 h = 0.00000200 b=6.28318531
i =318 x = 0.00063584 h = 0.00000200 b=6.28318531
i =319 x = 0.00063784 h = 0.00000200 b=6.28318531
i =320 x = 0.00063984 h = 0.00000200 b=6.28318531
i =321 x = 0.00064184 h = 0.00000200 b=6.28318531
i =322 x = 0.00064384 h = 0.00000200 b=6.28318531
i =323 x = 0.00064584 h = 0.00000200 b=6.28318531
i =324 x = 0.00064784 h = 0.00000200 b=6.28318531
i =325 x = 0.00064984 h = 0.00000200 b=6.28318531
i =326 x = 0.00065184 h = 0.00000200 b=6.28318531
i =327 x = 0.00065384 h = 0.00000200 b=6.28318531
i =328 x = 0.00065584 h = 0.00000200 b=6.28318531
i =329 x = 0.00065784 h = 0.00000200 b=6.28318531
i =330 x = 0.00065984 h = 0.00000200 b=6.28318531
i =331 x = 0.00066184 h = 0.00000200 b=6.28318531
i =332 x = 0.00066384 h = 0.00000200 b=6.28318531
i =333 x = 0.00066584 h = 0.00000200 b=6.28318531
i =334 x = 0.00066784 h = 0.00000200 b=6.28318531
i =335 x = 0.00066984 h = 0.00000200 b=6.28318531
i =336 x = 0.00067184 h = 0.00000200 b=6.28318531
i =337 x = 0.00067384 h = 0.00000200 b=6.28318531
i =338 x = 0.00067584 h = 0.00000200 b=6.28318531
i =339 x = 0.00067784 h = 0.00000200 b=6.28318531
i =340 x = 0.00067984 h = 0.00000200 b=6.28318531
i =341 x = 0.00068184 h = 0.00000200 b=6.28318531
i =342 x = 0.00068384 h = 0.00000200 b=6.28318531
i =343 x = 0.00068584 h = 0.00000200 b=6.28318531
i =344 x = 0.00068784 h = 0.00000200 b=6.28318531
i =345 x = 0.00068984 h = 0.00000200 b=6.28318531
i =346 x = 0.00069184 h = 0.00000200 b=6.28318531
i =347 x = 0.00069384 h = 0.00000200 b=6.28318531
i =348 x = 0.00069584 h = 0.00000200 b=6.28318531
i =349 x = 0.00069784 h = 0.00000200 b=6.28318531
i =350 x = 0.00069984 h = 0.00000200 b=6.28318531
i =351 x = 0.00070184 h = 0.00000200 b=6.28318531
i =352 x = 0.00070384 h = 0.00000200 b=6.28318531
i =353 x = 0.00070584 h = 0.00000200 b=6.28318531
i =354 x = 0.00070784 h = 0.00000200 b=6.28318531
i =355 x = 0.00070984 h = 0.00000200 b=6.28318531
i =356 x = 0.00071184 h = 0.00000200 b=6.28318531
i =357 x = 0.00071384 h = 0.00000200 b=6.28318531
i =358 x = 0.00071584 h = 0.00000200 b=6.28318531
i =359 x = 0.00071784 h = 0.00000200 b=6.28318531
i =360 x = 0.00071984 h = 0.00000200 b=6.28318531
i =361 x = 0.00072184 h = 0.00000200 b=6.28318531
i =362 x = 0.00072384 h = 0.00000200 b=6.28318531
i =363 x = 0.00072584 h = 0.00000200 b=6.28318531
i =364 x = 0.00072784 h = 0.00000200 b=6.28318531
i =365 x = 0.00072984 h = 0.00000200 b=6.28318531
i =366 x = 0.00073184 h = 0.00000200 b=6.28318531
i =367 x = 0.00073384 h = 0.00000200 b=6.28318531
i =368 x = 0.00073584 h = 0.00000200 b=6.28318531
i =369 x = 0.00073784 h = 0.00000200 b=6.28318531
i =370 x = 0.00073984 h = 0.00000200 b=6.28318531
i =371 x = 0.00074184 h = 0.00000200 b=6.28318531
i =372 x = 0.00074384 h = 0.00000200 b=6.28318531
i =373 x = 0.00074584 h = 0.00000200 b=6.28318531
i =374 x = 0.00074784 h = 0.00000200 b=6.28318531
i =375 x = 0.00074984 h = 0.00000200 b=6.28318531
i =376 x = 0.00075184 h = 0.00000200 b=6.28318531
i =377 x = 0.00075384 h = 0.00000200 b=6.28318531
i =378 x = 0.00075584 h = 0.00000200 b=6.28318531
i =379 x = 0.00075784 h = 0.00000200 b=6.28318531
i =380 x = 0.00075984 h = 0.00000200 b=6.28318531
i =381 x = 0.00076184 h = 0.00000200 b=6.28318531
i =382 x = 0.00076384 h = 0.00000200 b=6.28318531
i =383 x = 0.00076584 h = 0.00000200 b=6.28318531
i =384 x = 0.00076784 h = 0.00000200 b=6.28318531
i =385 x = 0.00076984 h = 0.00000200 b=6.28318531
i =386 x = 0.00077184 h = 0.00000200 b=6.28318531
i =387 x = 0.00077384 h = 0.00000200 b=6.28318531
i =388 x = 0.00077584 h = 0.00000200 b=6.28318531
i =389 x = 0.00077784 h = 0.00000200 b=6.28318531
i =390 x = 0.00077984 h = 0.00000200 b=6.28318531
i =391 x = 0.00078184 h = 0.00000200 b=6.28318531
i =392 x = 0.00078384 h = 0.00000200 b=6.28318531
i =393 x = 0.00078584 h = 0.00000200 b=6.28318531
i =394 x = 0.00078784 h = 0.00000200 b=6.28318531
i =395 x = 0.00078984 h = 0.00000200 b=6.28318531
i =396 x = 0.00079184 h = 0.00000200 b=6.28318531
i =397 x = 0.00079384 h = 0.00000200 b=6.28318531
i =398 x = 0.00079584 h = 0.00000200 b=6.28318531
i =399 x = 0.00079784 h = 0.00000200 b=6.28318531
i =400 x = 0.00079984 h = 0.00000200 b=6.28318531
i =401 x = 0.00080184 h = 0.00000200 b=6.28318531
i =402 x = 0.00080384 h = 0.00000200 b=6.28318531
i =403 x = 0.00080584 h = 0.00000200 b=6.28318531
i =404 x = 0.00080784 h = 0.00000200 b=6.28318531
i =405 x = 0.00080984 h = 0.00000200 b=6.28318531
i =406 x = 0.00081184 h = 0.00000200 b=6.28318531
i =407 x = 0.00081384 h = 0.00000200 b=6.28318531
i =408 x = 0.00081584 h = 0.00000200 b=6.28318531
i =409 x = 0.00081784 h = 0.00000200 b=6.28318531
i =410 x = 0.00081984 h = 0.00000200 b=6.28318531
i =411 x = 0.00082184 h = 0.00000200 b=6.28318531
i =412 x = 0.00082384 h = 0.00000200 b=6.28318531
i =413 x = 0.00082584 h = 0.00000200 b=6.28318531
i =414 x = 0.00082784 h = 0.00000200 b=6.28318531
i =415 x = 0.00082984 h = 0.00000200 b=6.28318531
i =416 x = 0.00083184 h = 0.00000200 b=6.28318531
i =417 x = 0.00083384 h = 0.00000200 b=6.28318531
i =418 x = 0.00083584 h = 0.00000200 b=6.28318531
i =419 x = 0.00083784 h = 0.00000200 b=6.28318531
i =420 x = 0.00083984 h = 0.00000200 b=6.28318531
i =421 x = 0.00084184 h = 0.00000200 b=6.28318531
i =422 x = 0.00084384 h = 0.00000200 b=6.28318531
i =423 x = 0.00084584 h = 0.00000200 b=6.28318531
i =424 x = 0.00084784 h = 0.00000200 b=6.28318531
i =425 x = 0.00084984 h = 0.00000200 b=6.28318531
i =426 x = 0.00085184 h = 0.00000200 b=6.28318531
i =427 x = 0.00085384 h = 0.00000200 b=6.28318531
i =428 x = 0.00085584 h = 0.00000200 b=6.28318531
i =429 x = 0.00085784 h = 0.00000200 b=6.28318531
i =430 x = 0.00085984 h = 0.00000200 b=6.28318531
i =431 x = 0.00086184 h = 0.00000200 b=6.28318531
i =432 x = 0.00086384 h = 0.00000200 b=6.28318531
i =433 x = 0.00086584 h = 0.00000200 b=6.28318531
i =434 x = 0.00086784 h = 0.00000200 b=6.28318531
i =435 x = 0.00086984 h = 0.00000200 b=6.28318531
i =436 x = 0.00087184 h = 0.00000200 b=6.28318531
i =437 x = 0.00087384 h = 0.00000200 b=6.28318531
i =438 x = 0.00087584 h = 0.00000200 b=6.28318531
i =439 x = 0.00087784 h = 0.00000200 b=6.28318531
i =440 x = 0.00087984 h = 0.00000200 b=6.28318531
i =441 x = 0.00088184 h = 0.00000200 b=6.28318531
i =442 x = 0.00088384 h = 0.00000200 b=6.28318531
i =443 x = 0.00088584 h = 0.00000200 b=6.28318531
i =444 x = 0.00088784 h = 0.00000200 b=6.28318531
i =445 x = 0.00088984 h = 0.00000200 b=6.28318531
i =446 x = 0.00089184 h = 0.00000200 b=6.28318531
i =447 x = 0.00089384 h = 0.00000200 b=6.28318531
i =448 x = 0.00089584 h = 0.00000200 b=6.28318531
i =449 x = 0.00089784 h = 0.00000200 b=6.28318531
i =450 x = 0.00089984 h = 0.00000200 b=6.28318531
i =451 x = 0.00090184 h = 0.00000200 b=6.28318531
i =452 x = 0.00090384 h = 0.00000200 b=6.28318531
i =453 x = 0.00090584 h = 0.00000200 b=6.28318531
i =454 x = 0.00090784 h = 0.00000200 b=6.28318531
i =455 x = 0.00090984 h = 0.00000200 b=6.28318531
i =456 x = 0.00091184 h = 0.00000200 b=6.28318531
i =457 x = 0.00091384 h = 0.00000200 b=6.28318531
i =458 x = 0.00091584 h = 0.00000200 b=6.28318531
i =459 x = 0.00091784 h = 0.00000200 b=6.28318531
i =460 x = 0.00091984 h = 0.00000200 b=6.28318531
i =461 x = 0.00092184 h = 0.00000200 b=6.28318531
i =462 x = 0.00092384 h = 0.00000200 b=6.28318531
i =463 x = 0.00092584 h = 0.00000200 b=6.28318531
i =464 x = 0.00092784 h = 0.00000200 b=6.28318531
i =465 x = 0.00092984 h = 0.00000200 b=6.28318531
i =466 x = 0.00093184 h = 0.00000200 b=6.28318531
i =467 x = 0.00093384 h = 0.00000200 b=6.28318531
i =468 x = 0.00093584 h = 0.00000200 b=6.28318531
i =469 x = 0.00093784 h = 0.00000200 b=6.28318531
i =470 x = 0.00093984 h = 0.00000200 b=6.28318531
i =471 x = 0.00094184 h = 0.00000200 b=6.28318531
i =472 x = 0.00094384 h = 0.00000200 b=6.28318531
i =473 x = 0.00094584 h = 0.00000200 b=6.28318531
i =474 x = 0.00094784 h = 0.00000200 b=6.28318531
i =475 x = 0.00094984 h = 0.00000200 b=6.28318531
i =476 x = 0.00095184 h = 0.00000200 b=6.28318531
i =477 x = 0.00095384 h = 0.00000200 b=6.28318531
i =478 x = 0.00095584 h = 0.00000200 b=6.28318531
i =479 x = 0.00095784 h = 0.00000200 b=6.28318531
i =480 x = 0.00095984 h = 0.00000200 b=6.28318531
i =481 x = 0.00096184 h = 0.00000200 b=6.28318531
i =482 x = 0.00096384 h = 0.00000200 b=6.28318531
i =483 x = 0.00096584 h = 0.00000200 b=6.28318531
i =484 x = 0.00096784 h = 0.00000200 b=6.28318531
i =485 x = 0.00096984 h = 0.00000200 b=6.28318531
i =486 x = 0.00097184 h = 0.00000200 b=6.28318531
i =487 x = 0.00097384 h = 0.00000200 b=6.28318531
i =488 x = 0.00097584 h = 0.00000200 b=6.28318531
i =489 x = 0.00097784 h = 0.00000200 b=6.28318531
i =490 x = 0.00097984 h = 0.00000200 b=6.28318531
i =491 x = 0.00098184 h = 0.00000200 b=6.28318531
i =492 x = 0.00098384 h = 0.00000200 b=6.28318531
i =493 x = 0.00098584 h = 0.00000200 b=6.28318531
i =494 x = 0.00098784 h = 0.00000200 b=6.28318531
i =495 x = 0.00098984 h = 0.00000200 b=6.28318531
i =496 x = 0.00099184 h = 0.00000200 b=6.28318531
i =497 x = 0.00099384 h = 0.00000200 b=6.28318531
i =498 x = 0.00099584 h = 0.00000200 b=6.28318531
i =499 x = 0.00099784 h = 0.00000200 b=6.28318531
i =500 x = 0.00099984 h = 0.00000200 b=6.28318531
i =501 x = 0.00100184 h = 0.00000200 b=6.28318531
i =502 x = 0.00100384 h = 0.00000200 b=6.28318531
i =503 x = 0.00100584 h = 0.00000200 b=6.28318531
i =504 x = 0.00100784 h = 0.00000200 b=6.28318531
i =505 x = 0.00100984 h = 0.00000200 b=6.28318531
i =506 x = 0.00101184 h = 0.00000200 b=6.28318531
i =507 x = 0.00101384 h = 0.00000200 b=6.28318531
i =508 x = 0.00101584 h = 0.00000200 b=6.28318531
i =509 x = 0.00101784 h = 0.00000200 b=6.28318531
i =510 x = 0.00101984 h = 0.00000200 b=6.28318531
i =511 x = 0.00102184 h = 0.00000200 b=6.28318531
i =512 x = 0.00102384 h = 0.00000200 b=6.28318531
i =513 x = 0.00102584 h = 0.00000200 b=6.28318531
i =514 x = 0.00102784 h = 0.00000200 b=6.28318531
i =515 x = 0.00102984 h = 0.00000200 b=6.28318531
i =516 x = 0.00103184 h = 0.00000200 b=6.28318531
i =517 x = 0.00103384 h = 0.00000200 b=6.28318531
i =518 x = 0.00103584 h = 0.00000200 b=6.28318531
i =519 x = 0.00103784 h = 0.00000200 b=6.28318531
i =520 x = 0.00103984 h = 0.00000200 b=6.28318531
i =521 x = 0.00104184 h = 0.00000200 b=6.28318531
i =522 x = 0.00104384 h = 0.00000200 b=6.28318531
i =523 x = 0.00104584 h = 0.00000200 b=6.28318531
i =524 x = 0.00104784 h = 0.00000200 b=6.28318531
i =525 x = 0.00104984 h = 0.00000200 b=6.28318531
i =526 x = 0.00105184 h = 0.00000200 b=6.28318531
i =527 x = 0.00105384 h = 0.00000200 b=6.28318531
i =528 x = 0.00105584 h = 0.00000200 b=6.28318531
i =529 x = 0.00105784 h = 0.00000200 b=6.28318531
i =530 x = 0.00105984 h = 0.00000200 b=6.28318531
i =531 x = 0.00106184 h = 0.00000200 b=6.28318531
i =532 x = 0.00106384 h = 0.00000200 b=6.28318531
i =533 x = 0.00106584 h = 0.00000200 b=6.28318531
i =534 x = 0.00106784 h = 0.00000200 b=6.28318531
i =535 x = 0.00106984 h = 0.00000200 b=6.28318531
i =536 x = 0.00107184 h = 0.00000200 b=6.28318531
i =537 x = 0.00107384 h = 0.00000200 b=6.28318531
i =538 x = 0.00107584 h = 0.00000200 b=6.28318531
i =539 x = 0.00107784 h = 0.00000200 b=6.28318531
i =540 x = 0.00107984 h = 0.00000200 b=6.28318531
i =541 x = 0.00108184 h = 0.00000200 b=6.28318531
i =542 x = 0.00108384 h = 0.00000200 b=6.28318531
i =543 x = 0.00108584 h = 0.00000200 b=6.28318531
i =544 x = 0.00108784 h = 0.00000200 b=6.28318531
i =545 x = 0.00108984 h = 0.00000200 b=6.28318531
i =546 x = 0.00109184 h = 0.00000200 b=6.28318531
i =547 x = 0.00109384 h = 0.00000200 b=6.28318531
i =548 x = 0.00109584 h = 0.00000200 b=6.28318531
i =549 x = 0.00109784 h = 0.00000200 b=6.28318531
i =550 x = 0.00109984 h = 0.00000200 b=6.28318531
i =551 x = 0.00110184 h = 0.00000200 b=6.28318531
i =552 x = 0.00110384 h = 0.00000200 b=6.28318531
i =553 x = 0.00110584 h = 0.00000200 b=6.28318531
i =554 x = 0.00110784 h = 0.00000200 b=6.28318531
i =555 x = 0.00110984 h = 0.00000200 b=6.28318531
i =556 x = 0.00111184 h = 0.00000200 b=6.28318531
i =557 x = 0.00111384 h = 0.00000200 b=6.28318531
i =558 x = 0.00111584 h = 0.00000200 b=6.28318531
i =559 x = 0.00111784 h = 0.00000200 b=6.28318531
i =560 x = 0.00111984 h = 0.00000200 b=6.28318531
i =561 x = 0.00112184 h = 0.00000200 b=6.28318531
i =562 x = 0.00112384 h = 0.00000200 b=6.28318531
i =563 x = 0.00112584 h = 0.00000200 b=6.28318531
i =564 x = 0.00112784 h = 0.00000200 b=6.28318531
i =565 x = 0.00112984 h = 0.00000200 b=6.28318531
i =566 x = 0.00113184 h = 0.00000200 b=6.28318531
i =567 x = 0.00113384 h = 0.00000200 b=6.28318531
i =568 x = 0.00113584 h = 0.00000200 b=6.28318531
i =569 x = 0.00113784 h = 0.00000200 b=6.28318531
i =570 x = 0.00113984 h = 0.00000200 b=6.28318531
i =571 x = 0.00114184 h = 0.00000200 b=6.28318531
i =572 x = 0.00114384 h = 0.00000200 b=6.28318531
i =573 x = 0.00114584 h = 0.00000200 b=6.28318531
i =574 x = 0.00114784 h = 0.00000200 b=6.28318531
i =575 x = 0.00114984 h = 0.00000200 b=6.28318531
i =576 x = 0.00115184 h = 0.00000200 b=6.28318531
i =577 x = 0.00115384 h = 0.00000200 b=6.28318531
i =578 x = 0.00115584 h = 0.00000200 b=6.28318531
i =579 x = 0.00115784 h = 0.00000200 b=6.28318531
i =580 x = 0.00115984 h = 0.00000200 b=6.28318531
i =581 x = 0.00116184 h = 0.00000200 b=6.28318531
i =582 x = 0.00116384 h = 0.00000200 b=6.28318531
i =583 x = 0.00116584 h = 0.00000200 b=6.28318531
i =584 x = 0.00116784 h = 0.00000200 b=6.28318531
i =585 x = 0.00116984 h = 0.00000200 b=6.28318531
i =586 x = 0.00117184 h = 0.00000200 b=6.28318531
i =587 x = 0.00117384 h = 0.00000200 b=6.28318531
i =588 x = 0.00117584 h = 0.00000200 b=6.28318531
i =589 x = 0.00117784 h = 0.00000200 b=6.28318531
i =590 x = 0.00117984 h = 0.00000200 b=6.28318531
i =591 x = 0.00118184 h = 0.00000200 b=6.28318531
i =592 x = 0.00118384 h = 0.00000200 b=6.28318531
i =593 x = 0.00118584 h = 0.00000200 b=6.28318531
i =594 x = 0.00118784 h = 0.00000200 b=6.28318531
i =595 x = 0.00118984 h = 0.00000200 b=6.28318531
i =596 x = 0.00119184 h = 0.00000200 b=6.28318531
i =597 x = 0.00119384 h = 0.00000200 b=6.28318531
i =598 x = 0.00119584 h = 0.00000200 b=6.28318531
i =599 x = 0.00119784 h = 0.00000200 b=6.28318531
i =600 x = 0.00119984 h = 0.00000200 b=6.28318531
i =601 x = 0.00120184 h = 0.00000200 b=6.28318531
i =602 x = 0.00120384 h = 0.00000200 b=6.28318531
i =603 x = 0.00120584 h = 0.00000200 b=6.28318531
i =604 x = 0.00120784 h = 0.00000200 b=6.28318531
i =605 x = 0.00120984 h = 0.00000200 b=6.28318531
i =606 x = 0.00121184 h = 0.00000200 b=6.28318531
i =607 x = 0.00121384 h = 0.00000200 b=6.28318531
i =608 x = 0.00121584 h = 0.00000200 b=6.28318531
i =609 x = 0.00121784 h = 0.00000200 b=6.28318531
###Markdown
Define our coupled derivatives to integrate
###Code
def dydx(x,y):
#set the derivatives
#our equation is d^2y/dx^2 = -y
#dydx=z
#dzdx=-y
#set y=y[0]
#set z=y[1]
#declare an array
y_derivs = np.zeros(2)
#set dydx=z
y_derivs[0] = y[1]
#set dzdx=-y
y_derivs[1] = -1*y[0]
#here we have to return an array
return y_derivs
###Output
_____no_output_____
###Markdown
Define the 4th order RK method
###Code
def rk4_mv_core(dydx,xi,yi,nv,h):
#declare k? arrays
k1 = np.zeros(nv)
k2 = np.zeros(nv)
k3 = np.zeros(nv)
k4 = np.zeros(nv)
#define x at 1/2 step
x_ipoh = xi+0.5*h
#define x at 1 step
x_ipo=xi+h
#declare a temp y array
y_temp = np.zeros(nv)
#get k1 values
y_derivs = dydx(xi,yi)
k1[:]=h*y_derivs[:]
#get k2 values
y_temp[:]=yi[:]+0.5*k1[:]
y_derivs = dydx(x_ipoh,y_temp)
k2[:] = h*y_derivs[:]
#get k3 values
y_temp[:]=yi[:]+0.5*k2[:]
y_derivs = dydx(x_ipoh,y_temp)
k3[:] = h*y_derivs[:]
#get k4 values
y_temp[:]=yi[:]+0.5*k3[:]
y_derivs = dydx(x_ipoh,y_temp)
k4[:] = h*y_derivs[:]
#advance y by a step h
yipo = yi + (k1 + 2*k2 + 2*k3 + k4)/6.
return yipo
###Output
_____no_output_____
###Markdown
Define an adaptive step size driver for RK4
###Code
def rk4_mv_ad(dydx,x_i,y_i,nv,h,tol):
#define safety scale
SAFETY = 0.9
H_NEW_FAC = 2.0
#set a maximum number of iterations
imax = 10000
#set an iteration variable
i = 0
#create an error
Delta = np.full(nv,2*tol)
#remember the step
h_step = h
#adjust step
while(Delta.max()/tol > 1.0):
#estimate error by taking one step of size h vs. two steps of size h/2
y_2 = rk4_mv_core(dydx,x_i,y_i,nv,h_step)
y_1 = rk4_mv_core(dydx,x_i,y_i,nv,0.5*h_step)
y_11 = rk4_mv_core(dydx,x_i+0.5*h_step,y_1,nv,0.5*h_step)
#compute an error
Delta = np.fabs(y_2 - y_11)
#if the error is too large, take a smaller step
if(Delta.max()/tol>1.0):
#decrease the step
h_step *= SAFETY*(Delta.max()/tol)**(-0.25)
#check iteration
if(i>=imax):
print("Too many iterations in rk4_mv_ad()")
raise StopIteration("Ending after i = ",i)
#iterate
i+=1
#next time, try to take a bigger step
h_new = np.fmin(h_step * (Delta.max()/tol)**(-0.9),h_step*H_NEW_FAC)
#return the answer, a new step, and the step we actually took
return y_2, h_new, h_step
###Output
_____no_output_____
###Markdown
Define a wrapper for RK4
###Code
def rk4_mv(dfdx,a,b,y_a,tol):
#dfdx is the derivative of x
#a is upper bound
#b is lower bound
#y_a are the boundary conditions
#tol is the tolerance for integrating y
#define our starting step
xi=a
yi=y_a.copy()
#an initial step size == make very small!
h = 1.0e-4 * (b-a)
#set a maximum number of iterations
imax = 10000
#set an iteration variable
i = 0
#set the number of coupled ODEs to the size of y_a
nv=len(y_a)
#set initial conditions
x = np.full(1,a)
y = np.full((1,nv),y_a)
#set a flag
flag = 1
#loop until we reach the right side
while(flag):
#calculate y_i+1
yi_new, h_new, h_step = rk4_mv_ad(dydx,xi,yi,nv,h,tol)
#update the step
h = h_new
#prevent an overshoot
if(xi+h_step>b):
#take a smaller step
h = b-xi
#recalculate y_i+1
yi_new, h_new, h_step = rk4_mv_ad(dydx,xi,yi,nv,h,tol)
#break
flag=0
#update values
xi += h_step
yi[:] = yi_new[:]
#add the step to the arrays
x = np.append(x,xi)
y_new = np.zeros((len(x),nv))
y_new[0:len(x)-1,:] = y
y_new[-1,:] = yi[:]
del y
y=y_new
#prevent too many iterations
if(i>=imax):
print("Maximum iterations reached.")
raise StopIteration("Iteration number = ",i)
#iterate
i+=1
#output some information
s = "i = %3d\tx = %9.8f\th = %9.8f\tb =%9.8f" % (i,xi,h_step,b)
print(s)
#break if new xi is ==b
if(xi==b):
flag=0
#return the answer
return x, y
a=0.0
b=2.0*np.pi
y_0 = np.zeros(2)
y_0[0] = 0.0
y_0[1] =1.0
nv = 2
tolerance = 1.0e-6
#perform the integration
x,y = rk4_mv(dydx,a,b,y_0,tolerance)
plt.plot(x,y[:,0],'o',label='y(x)')
plt.plot(x,y[:,1],'o',label='dydx(x)')
xx = np.linspace(0,2.0*np.pi,1000)
plt.plot(xx,np.sin(xx),label='sin(x)')
plt.plot(xx,np.cos(xx),label='cos(x)')
plt.xlabel('x')
plt.ylabel('y, dy/dx')
plt.legend(frameon=False)
###Output
_____no_output_____
###Markdown
Plot the error
###Code
sine = np.sin(x)
cosine = np.cos(x)
y_error = (y[:,0]-sine)
dydx_error = (y[:,1]-cosine)
plt.plot(x,y_error,label="y(x) Error")
plt.plot(x,dydx_error,label="dydx(x) Error")
plt.legend(frameon=False)
###Output
_____no_output_____
###Markdown
Create a notebook to perform Runge-Kutta integration for multiple coupled variables.
###Code
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Define our coupled derivatives to integrate
###Code
def dydx(x,y): #dydx needs to return an array
#set the derivatives
#our equation is d^2y/dx^2 = -y
#so we can write
#dydx = z
#dzdx = -y
#we will set y = y[0]
#we will set z = y[1]
#declare an array
y_derivs = np.zeros(2)
#set dydx = z
y_derivs[0] = y[1]
#set dzdx = -y
y_derivs[1] = -1*y[0]
#here we have to return an array
return y_derivs
###Output
_____no_output_____
###Markdown
Define the 4th order RK method
###Code
def rk4_mv_core(dydx, xi, yi, nv, h):
#declare k? arrays
k1 = np.zeros(nv)
k2 = np.zeros(nv)
k3 = np.zeros(nv)
k4 = np.zeros(nv)
#define x at 1/2 step
x_ipoh = xi + 0.5*h
#define x at 1 step
x_ipo = xi + h
#declare a temp y array
y_temp = np.zeros(nv)
#get k1 values
y_dervis = dydx(xi,yi)
k1[:] = h*y_derivs[:]
#get k2 values
y_temp[:] = yi[:] + 0.5*k1[:]
y_derivs = dydx(x_ipoh,y_temp)
k2[:] = h*y_derivs[:]
#get k3 values
y_temp[:] = yi[:] + 0.5*k2[:]
y_derivs = dydx(x_ipoh,y_temp)
k3[:] = h*y_derivs[:]
#get k4 values
y_temp[:] = yi[:] + k3[:]
y_derivs = dydx(x_ipo,y_temp)
k4[:] = h*y_derivs[:]
#advance y by a step h
yipo = yi + (k1 + 2*k2 + 2*k3 + k4)/6.
return yipo
###Output
_____no_output_____
###Markdown
Define an adaptive step size driver for RK4
###Code
def rk4_mv_ad(dydx, x_i, y_i, nv, h, tol):
#define safety scale
SAFETY = 0.9
H_NEW_PAC = 2.0
#set a maximum number of iterations
imax = 10000
#set an iteration variable
i = 0
#create an error
Delta = np.full(nv, 2*tol)
#remember the step
h_step = h
#adjust step
while(Delta.max()/tol > 1.0):
#estimate our error by taking one step of size h
#vs. two steps of size h/2
y_2 = rk4_mv_core(dydx, x_i, y_i, nv, h_step)
y_1 = rk4_mv_core(dydx, x_i, y_i, nv, 0.5*h_step)
y_11 = rk4_mv_core(dydx, x_i+0.5*h_step,y_1,nv,0.5*h_step)
#compute an error
Delta = np.fabs(y_2 - y_11)
#if the error is too large, take a smaller step
if(Delta.max()/tol > 1.0):
#our error is too large, decrease the step
h_step *= SAFETY * (Delta.max()/tol)**(-0.25)
#check iteration
if(i>=imax):
print("Too many iterations in rk4_mv_ad()")
raise StopIteration("Ending after i =",i)
#iterate
i+=1
#next time, try to take a bigger step
h_new = np.fmin(h_step * (Delta.max()/tol)**(-0.9), h_step*H_NEW_FAC)
#return the answer, a new step, and the step we actually took
return y_2, h_new, h_step
###Output
_____no_output_____
###Markdown
Define a wrapper for RK4
###Code
def rk4_mv(dfdx, a, b, y_a, tol):
#dfdx is teh derivative wrt x
#a is the lower bound
#b is the upper bound
#y_a are the boundary conditions
#tol is teh tolerance for integrating y
#define our starting step
xi = a
yi = y_a.copy()
#an initial step size == make very small!
h = 1.0e-4 * (b-a)
#set a maximum number of iterations
imax = 10000
#set an iteration variable
i = 0
#set the number of coupled odes to the
#size of y_a
nv = len(y_a)
#set the initial conditions
x = np.full(1,a)
y = np.full((1,nv), y_a)
#set a flag
flag = 1
#loop until we reach the right side
while(flag):
#calculate y_i+1
#############################################
#############################################
#############################################
###Output
_____no_output_____
###Markdown
create a notebook to perform Runge-Kutta integration for multiple coupled variables
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
###Output
_____no_output_____
###Markdown
define coupled deribatives to integrate
###Code
def dydx(x,y):
#set derivatives, equation is d^2y/dx^2=-y
#dydx=z & dzdx=-y
#set y=y[0] and z=y[1]
#declare array
y_derivs = np.zeros(2)
#set dydx = z
y_derivs[0] = y[1]
#set dzdx=-y
y_derivs[1]=-1*y[0]
#here we have to return an array
return y_derivs
###Output
_____no_output_____
###Markdown
define 4th order rk method
###Code
def rk4_mv_core(dydx,xi,yi,nv,h):
#declare k?[wild card gives digits btwn 0-9] arrays
k1=np.zeros(nv)
k2=np.zeros(nv)
k3=np.zeros(nv)
k4=np.zeros(nv)
#define x at 1/2 step
x_ipoh = xi+0.5*h
#define x at 1 step
x_ipo = xi+h
#declare a tempy array
y_temp= np.zeros(nv)
#get k1 values
y_derivs = dydx(xi,yi)
k1[:]=h*y_derivs[:]
#get k2 values
y_temp[:]=yi[:]+0.5*k1[:]
y_derivs= dydx(x_ipoh,y_temp)
k2[:]=h*y_derivs[:]
#k3 values
y_temp[:]=yi[:]+0.5*k2[:]
y_derivs= dydx(x_ipoh,y_temp)
k3[:]=h*y_derivs[:]
#k4 values
y_temp[:]=yi[:]+k3[:]
y_derivs= dydx(x_ipo,y_temp)
k4[:]=h*y_derivs[:]
#advance y by step h
yipo=yi+(k1+2*k2+2*k3+k4)/6
return yipo
###Output
_____no_output_____
###Markdown
define adaptive step size driver for rk4
###Code
def rk4_mv_ad(dydx,x_i,y_i,nv,h,tol):
#define safety scale
SAFETY = 0.9
H_NEW_FAC = 2.0
#set maximum number of iterations
imax= 10000
#set an iteration variable
i=0
#create an error
Delta= np.full(nv,2*tol)
#remember the step
h_step=h
#adjust step
while(Delta.max()/tol>1.0):
#estimate error by taking 1 h step vs. 2 h/2 steps
y_2=rk4_mv_core(dydx,x_i,y_i,nv,h_step)
y_1=rk4_mv_core(dydx,x_i,y_i,nv,0.5*h_step)
y_11=rk4_mv_core(dydx,x_i+0.5*h_step,y_1,nv,0.5*h_step)
#compute error
Delta=np.fabs(y_2-y_11)
#if error is too large take smaller step
if(Delta.max()/tol>1.0):
#error to large-> decrease step
h_step *= SAFETY*(Delta.max()/tol)**(-0.25)
#check iteration
if(i>=imax):
print("Too many iterations in rk4_mv_ad()")
raise StopIteration("ending after i = ",i)
#iterate
i+=1
#next time, try to take a bigger step
h_new= np.fmin(h_step*(Delta.max()/tol)**(-0.9),h_step*H_NEW_FAC)
#reurn the answer, a new step, and the step we actually took
return y_2, h_new, h_step
###Output
_____no_output_____
###Markdown
define wrapper 4 rk4
###Code
def rk4_mv(dydx,a,b,y_a,tol):
#dydx is the derivative wrt x
#a is lower bound/ b is upper bound
#y_a is boundary conditions
#tol: tolerance for int y
#define starting step
xi = a
yi = y_a.copy()
#an initial step size == make very small
h= 1.0e-4*(b-a)
imax=10000
i=0
#set # of coupled ode's to size y_a
nv = len(y_a)
#set initial conditions
x = np.full(1,a)
y = np.full((1,nv),y_a)
#set a flag
flag=1
#loop til we reach right side
while(flag):
#calculate y_i+1
yi_new, h_new, h_step = rk4_mv_ad(dydx,xi,yi,nv,h,tol)
#update new step
h = h_new
#prevent overshoot
if(xi+h_step>b):
#take smaller step
h = b-xi
#recalc y_i+1
yi_new, h_new, h_step = rk4_mv_ad(dydx,xi,yi,nv,h,tol)
#break
flag = 0
#update values
xi += h_step
yi[:] = yi_new[:]
# add step to arrays
x = np.append(x,xi)
y_new = np.zeros((len(x),nv))
y_new[0:len(x)-1,:]=y
y_new[-1,:]=yi[:]
del y
y = y_new
#prevent too many itterations
if(i>imax):
print("Max iterations reached")
raise StopIteration("Iteration number = ",i)
#iterate
i += 1
#output some info
s = "i = %3d\tx = %9.8f\th = %9.8f\tb=%9.8f" % (i,xi,h_step, b)
print(s)
#break if new xi is ==b
if(xi==b):
flag = 0
#return answer
return x,y
###Output
_____no_output_____
###Markdown
perform integration
###Code
a = 0.0
b = 2.0*np.pi
y_0 = np.zeros(2)
y_0[0]=0.0
y_0[1]=1.0
nv=2
tolerance = 1.0e-6
#perform integration
x,y=rk4_mv(dydx,a,b,y_0,tolerance)
###Output
_____no_output_____
###Markdown
plot result
###Code
plt.plot(x,y[:,0],'o',label='y(x)')
plt.plot(x,y[:,1],'o',label='dydx(x)')
xx = np.linspace(0,2.0*np.pi,1000)
plt.plot(xx,np.sin(xx),label='sin(x)')
plt.plot(xx,np.cos(xx),label='cos(x)')
plt.xlabel('x')
plt.ylabel('y, dy/dx')
plt.legend(frameon=False)
sine = np.sin(x)
cosine = np.cos(x)
y_error = (y[:,0]-sine)
dydx_error = (y[:,1]-cosine)
plt.plot(x, y_error, label="y(x) Error")
plt.plot(x, dydx_error, label="dydx(x) Error")
plt.legend(frameon=False)
###Output
_____no_output_____
###Markdown
Create a notebook to perform Runge-Kutta integration for multiple coupled variables
###Code
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Define our coupled derivatives to integrate
###Code
def dydx(x,y):
# Set the derivatives
# Our equation is d^2y/dx^2 = -y
# So we can write
# dydx = z
# dzdx = -y
# We will set y = y[0]
# We will sey z = y[1]
# Declare an array
y_derivs = np.zeros(2)
# Set dydx = x
y_derivs[0] = y[1]
# Set dzdx = -y
y_derivs[1] = -1*y[0]
# Here we have to return the arrays of dydx
return y_derivs
###Output
_____no_output_____
###Markdown
Define the 4th order RK method
###Code
def rk4_mv_core(dydx,xi,yi,nv,h):
# Declare k? arrays
k1 = np.zeros(nv)
k2 = np.zeros(nv)
k3 = np.zeros(nv)
k4 = np.zeros(nv)
# Define x at 1/2 step
x_ipoh = xi + 0.5*h
# Define x at 1 step
x_ipo = xi + h
# Declare a tempy array
y_temp = np.zeros(nv)
# Get k1 values
y_derivs = dydx(xi,yi)
k1[:] = h*y_derivs[:]
# Get k2 values
y_temp[:] = yi[:] + 0.5*k1[:]
y_derivs = dydx(x_ipoh,y_temp)
k2[:] = h*y_derivs[:]
# Get k3 values
y_temp[:] = yi[:] + 0.5*k2[:]
y_derivs = dydx(x_ipoh,y_temp)
k3[:] = h*y_derivs[:]
# Get k4 values
y_temp[:] = yi[:] + k3[:]
y_derivs = dydx(x_ipo,y_temp)
k4[:] = h*y_derivs[:]
# Advance y by step h
yipo = yi + (k1 + 2*k2 + 2*k3 + k4)/6.
# THIS IS AN ARRAY
return yipo
###Output
_____no_output_____
###Markdown
Define an adaptive step size driver for RK4
###Code
def rk4_mv_ad(dydx,x_i,y_i,nv,h,tol):
# Define safety scale
SAFETY = 0.9
H_NEW_FAC = 2.0
# Set a maximum number of iterations
imax = 10000
# Set an iteration variable
i = 0
# Create an error
Delta = np.full(nv,2*tol)
# Remember the step
h_step = h
# Adjust the step
while(Delta.max()/tol > 1.0):
# Estimate our error by taking one step of size h
# vs. two steps of size h/2
y_2 = rk4_mv_core(dydx,x_i,y_i,nv,h_step)
y_1 = rk4_mv_core(dydx,x_i,y_i,nv,0.5*h_step)
y_11 = rk4_mv_core(dydx,x_i+0.5*h_step,y_i,nv,0.5*h_step)
# Compute an error
Delta = np.fabs(y_2 - y_11)
# If error is too latge, take a smaller step
if(Delta.max()/tol > 1.0):
# Our error is too large, decrease the step
h_step *= SAFETY * (Delta.max()/tol)**(-0.25)
# Check the iteration
if(i>imax):
print("Too many iterations in rk4_mv_ad()")
raise StopIteration("Ending after i = ",i)
# Iterate
i += 1
# Next time, try to take a bigger step
h_new= np.fmin(h_step * (Delta.max()/tol)**(-0.9), h_step*H_NEW_PAC)
# Return the answer, a new step, and the step we actually took
return y_2, h_new, h_step
###Output
_____no_output_____
###Markdown
Define a wrapper for RK4
###Code
def rk4_mv(dydx,a,b,y_a,tol):
# dydx is the derivative wrt x
# a is the lower bound
# b is the upper bound
# y_a are the boundary conditions
# tol is the tolerance for integrating y
# Define our starting step
xi = a
yi = y_a.cop()
# An initial step size == make very small
h = 1.0e-4 * (b-a)
# Set a maximum number of iterations
imax = 10000
# Set an iteration variable
i = 0
# Set the number of coupled odes to the size of y_a
nv = len(y_a)
# Set the initial conditions
x = np.full(1,a)
y = np.full((1,nv),y_a)
# Set a flag
flag = 1
# Loop until we reach the right side
while(flag):
# Calculate y_i+1
yi_new, h_new, h_step = rk4_mv_ad(dydx,di,yi,nv,h,tol)
#Break
flag = 0
###Output
_____no_output_____
###Markdown
Create a notebook to perform Runge-Kutta integration for multiple coupled variables
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
###Output
_____no_output_____
###Markdown
Define our coupled derivatives to integrate
###Code
def dydx(x,y):
#set our derivatives
#our equation is d^2y/dx^2=-y
#so we can write
#dydx=z
#dzdx=-y
#we will set y=y[0]
#we will set z=y[1]
#declare an array
y_derivs=np.zeros(2)
#set dydx=z
y_derivs[0]=y[1]
#set dzdx=-y
y_derivs[1]=-1*y(0)
#here we have to return the array
return y_derivs
###Output
_____no_output_____
###Markdown
Define the 4th order RK method
###Code
def k4_mv_core(dydx,xi,yi,nv,h):
#declare k? arrays
k1=np.zeros(nv)
k2=np.zeros(nv)
k3=np.zeros(nv)
k4=np.zeros(nv)
#define x at 1/2 step
x_ipoh=xi+0.5*h
#define x at 1 step
x_pio=xi+h
#declare a temp y array
y_temp=np.zeros(nv)
#get k1 values
y_derivs=dydx(xi,yi)
k1[:]=h*y_derivs[:]
#det k2 values
y_temp[:]=yi[:]+0.5*k1[:]
y_derivs=dydx(x_ipoh,y_temp)
k2[:]=h*y_derivs[:]
#det k3 values
y_temp[:]=yi[:]+0.5*k2[:]
y_derivs=dydx(x_ipoh,y_temp)
k3[:]=h*y_derivs[:]
#det k4 values
y_temp[:]=yi[:]+k3[:]
y_derivs=dydx(x_ipoh,y_temp)
k4[:]=h*y_derivs[:]
#advance y by a step h
yipo=yi+(k1+2*k2+2*k3+k4)/6.
return yipo
###Output
_____no_output_____
###Markdown
Define an adaptive step size driver for RK4
###Code
def rk4_mv_ad(dydx,x_i,y_i,nv,h,tol):
#define safety scale
SAFETY=0.9
H_NEW_FAC=2.0
#set a maximum number of iterations
imax=10000
#set an iteration variable
i=0
#create an error
Delta=np.full(nv,2*tol)
#remember the step
h_step=h
#adjust step
while(Delta.max()/tol>1.0):
#estimate our error by taking one step of size h vs. two stepsof size h/2
y_2=rk4_mv_core(dydx,x_i,y_i,nv,h_step)
y_1=rk4_mv_core(dydx,x_i,y_i,nv,0.5*h_step)
y_11=rk4_mv_core(dydx,x_i+0.5*h_step,y_i,nv,0.5*h_step)
#compute the error
Delta=n.fabs(y_2-y_11)
#if the error is too large, take a smaller step
if(Delta.max()/tol>1.0):
#our error is too large, decrease the step
h_step*=SAFETY*(Delta.max()/tol)**(-0.25)
#check iteration
if(i>=imax):
print("Too many iterations in rk4_mv_ad()")
raise StopIteration("Ending after i=",i)
#iterate
i+=1
#next time, try to take a bigger step
h_new=np.fmin(h_step*(Delta.max()/tol)**(-0.9),h_step*H_NEW_FAC)
#return the answer, a new step, and the step we actually took
return y_2, h_new, h_step
###Output
_____no_output_____
###Markdown
Define a wrapper for RK4
###Code
def rk4_mv(dfdx,a,b,y_a,tol):
#dfdx is the derivative wrt x
#a is the lower bound
#b is the upper bound
#y_a are the boundary conditions
#tol is the tolerance for integrating y
#define our starting step
xi=a
yi=y_a.copy
h=1.0e-4(b-a)
imax=10000
i=0
nv=len(y_a)
x=np.full(1,a)
y=np.full((1,nv),y_a)
flag=1
while(flag):
yi_new, h_new, h_step=rk4_mv_ad(dydx,xi,yi,nv,h,tol)
h=h_new
###Output
_____no_output_____
###Markdown
Define our coupled derivatives to integrate
###Code
def dydx(x,y):
#set the derivatives
#our equation is d^2y/dx^2 = -y
#dydx=z
#dzdx=-y
#set y=y[0]
#set z=y[1]
#declare an array
y_derivs = np.zeros(2)
#set dydx=z
y_derivs[0] = y[1]
#set dzdx=-y
y_derivs[1] = -1*y[0]
#here we have to return an array
return y_derivs
###Output
_____no_output_____
###Markdown
Define the 4th order RK method
###Code
def rk4_mv_core(dydx,xi,yi,nv,h):
#declare k? arrays
k1 = np.zeros(nv)
k2 = np.zeros(nv)
k3 = np.zeros(nv)
k4 = np.zeros(nv)
#define x at 1/2 step
x_ipoh = xi+0.5*h
#define x at 1 step
x_ipo=xi+h
#declare a temp y array
y_temp = np.zeros(nv)
#get k1 values
y_derivs = dydx(xi,yi)
k1[:]=h*y_derivs[:]
#get k2 values
y_temp[:]=yi[:]+0.5*k1[:]
y_derivs = dydx(x_ipoh,y_temp)
k2[:] = h*y_derivs[:]
#get k3 values
y_temp[:]=yi[:]+0.5*k2[:]
y_derivs = dydx(x_ipoh,y_temp)
k3[:] = h*y_derivs[:]
#get k4 values
y_temp[:]=yi[:]+0.5*k3[:]
y_derivs = dydx(x_ipoh,y_temp)
k4[:] = h*y_derivs[:]
#advance y by a step h
yipo = yi + (k1 + 2*k2 + 2*k3 + k4)/6.
return yipo
###Output
_____no_output_____
###Markdown
Define an adaptive step size driver for RK4
###Code
def rk4_mv_ad(dydx,x_i,y_i,nv,h,tol):
#define safety scale
SAFETY = 0.9
H_NEW_FAC = 2.0
#set a maximum number of iterations
imax = 10000
#set an iteration variable
i = 0
#create an error
Delta = np.full(nv,2*tol)
#remember the step
h_step = h
#adjust step
while(Delta.max()/tol > 1.0):
#estimate error by taking one step of size h vs. two steps of size h/2
y_2 = rk4_mv_core(dydx,x_i,y_i,nv,h_step)
y_1 = rk4_mv_core(dydx,x_i,y_i,nv,0.5*h_step)
y_11 = rk4_mv_core(dydx,x_i+0.5*h_step,y_1,nv,0.5*h_step)
#compute an error
Delta = np.fabs(y_2 - y_11)
#if the error is too large, take a smaller step
if(Delta.max()/tol>1.0):
#decrease the step
h_step *= SAFETY*(Delta.max()/tol)**(-0.25)
#check iteration
if(i>=imax):
print("Too many iterations in rk4_mv_ad()")
raise StopIteration("Ending after i = ",i)
#iterate
i+=1
#next time, try to take a bigger step
h_new = np.fmin(h_step * (Delta.max()/tol)**(-0.9),h_step*H_NEW_FAC)
#return the answer, a new step, and the step we actually took
return y_2, h_new, h_step
###Output
_____no_output_____
###Markdown
Define a wrapper for RK4
###Code
def rk4_mv(dfdx,a,b,y_a,tol):
#dfdx is the derivative of x
#a is upper bound
#b is lower bound
#y_a are the boundary conditions
#tol is the tolerance for integrating y
#define our starting step
xi=a
yi=y_a.copy()
#an initial step size == make very small!
h = 1.0e-4 * (b-a)
#set a maximum number of iterations
imax = 10000
#set an iteration variable
i = 0
#set the number of coupled ODEs to the size of y_a
nv=len(y_a)
#set initial conditions
x = np.full(1,a)
y = np.full((1,nv),y_a)
#set a flag
flag = 1
#loop until we reach the right side
while(flag):
#calculate y_i+1
yi_new, h_new, h_step = rk4_mv_ad(dydx,xi,yi,nv,h,tol)
#update the step
h = h_new
#prevent an overshoot
if(xi+h_step>b):
#take a smaller step
h = b-xi
#recalculate y_i+1
yi_new, h_new, h_step = rk4_mv_ad(dydx,xi,yi,nv,h,tol)
#break
flag=0
#update values
xi += h_step
yi[:] = yi_new[:]
#add the step to the arrays
x = np.append(x,xi)
y_new = np.zeros((len(x),nv))
y_new[0:len(x)-1,:] = y
y_new[-1,:] = yi[:]
del y
y=y_new
#prevent too many iterations
if(i>=imax):
print("Maximum iterations reached.")
raise StopIteration("Iteration number = ",i)
#iterate
i+=1
#output some information
s = "i = %3d\tx = %9.8f\th = %9.8f\tb =%9.8f" % (i,xi,h_step,b)
print(s)
#break if new xi is ==b
if(xi==b):
flag=0
#return the answer
return x, y
a=0.0
b=2.0*np.pi
y_0 = np.zeros(2)
y_0[0] = 0.0
y_0[1] =1.0
nv = 2
tolerance = 1.0e-6
#perform the integration
x,y = rk4_mv(dydx,a,b,y_0,tolerance)
###Output
i = 1 x = 0.00062832 h = 0.00062832 b =6.28318531
i = 2 x = 0.00188496 h = 0.00125664 b =6.28318531
i = 3 x = 0.00439823 h = 0.00251327 b =6.28318531
i = 4 x = 0.00886437 h = 0.00446614 b =6.28318531
i = 5 x = 0.01343975 h = 0.00457538 b =6.28318531
i = 6 x = 0.01797124 h = 0.00453149 b =6.28318531
i = 7 x = 0.02252043 h = 0.00454919 b =6.28318531
i = 8 x = 0.02706280 h = 0.00454237 b =6.28318531
i = 9 x = 0.03160823 h = 0.00454543 b =6.28318531
i = 10 x = 0.03615282 h = 0.00454460 b =6.28318531
i = 11 x = 0.04069821 h = 0.00454539 b =6.28318531
i = 12 x = 0.04524381 h = 0.00454560 b =6.28318531
i = 13 x = 0.04978991 h = 0.00454610 b =6.28318531
i = 14 x = 0.05433647 h = 0.00454656 b =6.28318531
i = 15 x = 0.05888357 h = 0.00454710 b =6.28318531
i = 16 x = 0.06343124 h = 0.00454767 b =6.28318531
i = 17 x = 0.06797954 h = 0.00454830 b =6.28318531
i = 18 x = 0.07252851 h = 0.00454897 b =6.28318531
i = 19 x = 0.07707820 h = 0.00454969 b =6.28318531
i = 20 x = 0.08162866 h = 0.00455046 b =6.28318531
i = 21 x = 0.08617994 h = 0.00455127 b =6.28318531
i = 22 x = 0.09073207 h = 0.00455214 b =6.28318531
i = 23 x = 0.09528512 h = 0.00455305 b =6.28318531
i = 24 x = 0.09983912 h = 0.00455401 b =6.28318531
i = 25 x = 0.10439413 h = 0.00455501 b =6.28318531
i = 26 x = 0.10895020 h = 0.00455607 b =6.28318531
i = 27 x = 0.11350737 h = 0.00455717 b =6.28318531
i = 28 x = 0.11806569 h = 0.00455832 b =6.28318531
i = 29 x = 0.12262521 h = 0.00455952 b =6.28318531
i = 30 x = 0.12718598 h = 0.00456077 b =6.28318531
i = 31 x = 0.13174805 h = 0.00456207 b =6.28318531
i = 32 x = 0.13631146 h = 0.00456341 b =6.28318531
i = 33 x = 0.14087627 h = 0.00456481 b =6.28318531
i = 34 x = 0.14544253 h = 0.00456625 b =6.28318531
i = 35 x = 0.15001028 h = 0.00456775 b =6.28318531
i = 36 x = 0.15457957 h = 0.00456929 b =6.28318531
i = 37 x = 0.15915046 h = 0.00457089 b =6.28318531
i = 38 x = 0.16372299 h = 0.00457253 b =6.28318531
i = 39 x = 0.16829721 h = 0.00457422 b =6.28318531
i = 40 x = 0.17287318 h = 0.00457597 b =6.28318531
i = 41 x = 0.17745094 h = 0.00457776 b =6.28318531
i = 42 x = 0.18203056 h = 0.00457961 b =6.28318531
i = 43 x = 0.18661206 h = 0.00458151 b =6.28318531
i = 44 x = 0.19119552 h = 0.00458346 b =6.28318531
i = 45 x = 0.19578097 h = 0.00458546 b =6.28318531
i = 46 x = 0.20036848 h = 0.00458751 b =6.28318531
i = 47 x = 0.20495809 h = 0.00458961 b =6.28318531
i = 48 x = 0.20954986 h = 0.00459177 b =6.28318531
i = 49 x = 0.21414383 h = 0.00459398 b =6.28318531
i = 50 x = 0.21874007 h = 0.00459624 b =6.28318531
i = 51 x = 0.22333862 h = 0.00459855 b =6.28318531
i = 52 x = 0.22793954 h = 0.00460092 b =6.28318531
i = 53 x = 0.23254288 h = 0.00460334 b =6.28318531
i = 54 x = 0.23714869 h = 0.00460581 b =6.28318531
i = 55 x = 0.24175704 h = 0.00460834 b =6.28318531
i = 56 x = 0.24636797 h = 0.00461093 b =6.28318531
i = 57 x = 0.25098153 h = 0.00461357 b =6.28318531
i = 58 x = 0.25559779 h = 0.00461626 b =6.28318531
i = 59 x = 0.26021681 h = 0.00461901 b =6.28318531
i = 60 x = 0.26483862 h = 0.00462182 b =6.28318531
i = 61 x = 0.26946330 h = 0.00462468 b =6.28318531
i = 62 x = 0.27409090 h = 0.00462760 b =6.28318531
i = 63 x = 0.27872147 h = 0.00463057 b =6.28318531
i = 64 x = 0.28335507 h = 0.00463361 b =6.28318531
i = 65 x = 0.28799177 h = 0.00463670 b =6.28318531
i = 66 x = 0.29263161 h = 0.00463984 b =6.28318531
i = 67 x = 0.29727467 h = 0.00464305 b =6.28318531
i = 68 x = 0.30192098 h = 0.00464632 b =6.28318531
i = 69 x = 0.30657063 h = 0.00464964 b =6.28318531
i = 70 x = 0.31122366 h = 0.00465303 b =6.28318531
i = 71 x = 0.31588014 h = 0.00465648 b =6.28318531
i = 72 x = 0.32054012 h = 0.00465998 b =6.28318531
i = 73 x = 0.32520367 h = 0.00466355 b =6.28318531
i = 74 x = 0.32987085 h = 0.00466718 b =6.28318531
i = 75 x = 0.33454172 h = 0.00467087 b =6.28318531
i = 76 x = 0.33921635 h = 0.00467463 b =6.28318531
i = 77 x = 0.34389480 h = 0.00467845 b =6.28318531
i = 78 x = 0.34857713 h = 0.00468233 b =6.28318531
i = 79 x = 0.35326340 h = 0.00468628 b =6.28318531
i = 80 x = 0.35795369 h = 0.00469029 b =6.28318531
i = 81 x = 0.36264805 h = 0.00469436 b =6.28318531
i = 82 x = 0.36734656 h = 0.00469851 b =6.28318531
i = 83 x = 0.37204928 h = 0.00470272 b =6.28318531
i = 84 x = 0.37675627 h = 0.00470699 b =6.28318531
i = 85 x = 0.38146761 h = 0.00471134 b =6.28318531
i = 86 x = 0.38618336 h = 0.00471575 b =6.28318531
i = 87 x = 0.39090359 h = 0.00472023 b =6.28318531
i = 88 x = 0.39562837 h = 0.00472478 b =6.28318531
i = 89 x = 0.40035778 h = 0.00472941 b =6.28318531
i = 90 x = 0.40509188 h = 0.00473410 b =6.28318531
i = 91 x = 0.40983075 h = 0.00473887 b =6.28318531
i = 92 x = 0.41457445 h = 0.00474370 b =6.28318531
i = 93 x = 0.41932307 h = 0.00474862 b =6.28318531
i = 94 x = 0.42407667 h = 0.00475360 b =6.28318531
i = 95 x = 0.42883533 h = 0.00475866 b =6.28318531
i = 96 x = 0.43359912 h = 0.00476380 b =6.28318531
i = 97 x = 0.43836813 h = 0.00476901 b =6.28318531
i = 98 x = 0.44314243 h = 0.00477430 b =6.28318531
i = 99 x = 0.44792210 h = 0.00477967 b =6.28318531
i = 100 x = 0.45270721 h = 0.00478511 b =6.28318531
i = 101 x = 0.45749785 h = 0.00479064 b =6.28318531
i = 102 x = 0.46229410 h = 0.00479625 b =6.28318531
i = 103 x = 0.46709603 h = 0.00480194 b =6.28318531
i = 104 x = 0.47190374 h = 0.00480771 b =6.28318531
i = 105 x = 0.47671730 h = 0.00481356 b =6.28318531
i = 106 x = 0.48153680 h = 0.00481950 b =6.28318531
i = 107 x = 0.48636233 h = 0.00482553 b =6.28318531
i = 108 x = 0.49119397 h = 0.00483164 b =6.28318531
i = 109 x = 0.49603181 h = 0.00483784 b =6.28318531
i = 110 x = 0.50087593 h = 0.00484413 b =6.28318531
i = 111 x = 0.50572644 h = 0.00485050 b =6.28318531
i = 112 x = 0.51058341 h = 0.00485697 b =6.28318531
i = 113 x = 0.51544695 h = 0.00486353 b =6.28318531
i = 114 x = 0.52031713 h = 0.00487019 b =6.28318531
i = 115 x = 0.52519407 h = 0.00487694 b =6.28318531
i = 116 x = 0.53007785 h = 0.00488378 b =6.28318531
i = 117 x = 0.53496858 h = 0.00489072 b =6.28318531
i = 118 x = 0.53986634 h = 0.00489776 b =6.28318531
i = 119 x = 0.54477124 h = 0.00490490 b =6.28318531
i = 120 x = 0.54968338 h = 0.00491214 b =6.28318531
i = 121 x = 0.55460287 h = 0.00491949 b =6.28318531
i = 122 x = 0.55952980 h = 0.00492693 b =6.28318531
i = 123 x = 0.56446429 h = 0.00493448 b =6.28318531
i = 124 x = 0.56940643 h = 0.00494214 b =6.28318531
i = 125 x = 0.57435634 h = 0.00494991 b =6.28318531
i = 126 x = 0.57931413 h = 0.00495779 b =6.28318531
i = 127 x = 0.58427990 h = 0.00496578 b =6.28318531
i = 128 x = 0.58925378 h = 0.00497388 b =6.28318531
i = 129 x = 0.59423587 h = 0.00498209 b =6.28318531
i = 130 x = 0.59922630 h = 0.00499043 b =6.28318531
i = 131 x = 0.60422518 h = 0.00499888 b =6.28318531
i = 132 x = 0.60923263 h = 0.00500745 b =6.28318531
i = 133 x = 0.61424877 h = 0.00501614 b =6.28318531
i = 134 x = 0.61927373 h = 0.00502496 b =6.28318531
i = 135 x = 0.62430764 h = 0.00503391 b =6.28318531
i = 136 x = 0.62935062 h = 0.00504298 b =6.28318531
i = 137 x = 0.63440280 h = 0.00505218 b =6.28318531
i = 138 x = 0.63946431 h = 0.00506151 b =6.28318531
i = 139 x = 0.64453529 h = 0.00507098 b =6.28318531
i = 140 x = 0.64961588 h = 0.00508059 b =6.28318531
i = 141 x = 0.65470622 h = 0.00509033 b =6.28318531
i = 142 x = 0.65980643 h = 0.00510022 b =6.28318531
i = 143 x = 0.66491668 h = 0.00511025 b =6.28318531
i = 144 x = 0.67003711 h = 0.00512042 b =6.28318531
i = 145 x = 0.67516786 h = 0.00513075 b =6.28318531
i = 146 x = 0.68030908 h = 0.00514123 b =6.28318531
i = 147 x = 0.68546094 h = 0.00515186 b =6.28318531
i = 148 x = 0.69062358 h = 0.00516264 b =6.28318531
i = 149 x = 0.69579718 h = 0.00517359 b =6.28318531
i = 150 x = 0.70098188 h = 0.00518470 b =6.28318531
i = 151 x = 0.70617786 h = 0.00519598 b =6.28318531
i = 152 x = 0.71138529 h = 0.00520743 b =6.28318531
i = 153 x = 0.71660434 h = 0.00521905 b =6.28318531
i = 154 x = 0.72183518 h = 0.00523084 b =6.28318531
i = 155 x = 0.72707799 h = 0.00524281 b =6.28318531
i = 156 x = 0.73233296 h = 0.00525497 b =6.28318531
i = 157 x = 0.73760027 h = 0.00526731 b =6.28318531
i = 158 x = 0.74288012 h = 0.00527985 b =6.28318531
i = 159 x = 0.74817269 h = 0.00529257 b =6.28318531
i = 160 x = 0.75347819 h = 0.00530550 b =6.28318531
i = 161 x = 0.75879681 h = 0.00531862 b =6.28318531
i = 162 x = 0.76412877 h = 0.00533196 b =6.28318531
i = 163 x = 0.76947427 h = 0.00534550 b =6.28318531
i = 164 x = 0.77483354 h = 0.00535926 b =6.28318531
i = 165 x = 0.78020678 h = 0.00537324 b =6.28318531
i = 166 x = 0.78559423 h = 0.00538745 b =6.28318531
i = 167 x = 0.79099141 h = 0.00539718 b =6.28318531
i = 168 x = 0.79638303 h = 0.00539162 b =6.28318531
i = 169 x = 0.80175680 h = 0.00537377 b =6.28318531
i = 170 x = 0.80711792 h = 0.00536112 b =6.28318531
i = 171 x = 0.81246464 h = 0.00534672 b =6.28318531
i = 172 x = 0.81779796 h = 0.00533333 b =6.28318531
i = 173 x = 0.82311779 h = 0.00531983 b =6.28318531
i = 174 x = 0.82842446 h = 0.00530667 b =6.28318531
i = 175 x = 0.83371812 h = 0.00529366 b =6.28318531
i = 176 x = 0.83899899 h = 0.00528087 b =6.28318531
i = 177 x = 0.84426726 h = 0.00526827 b =6.28318531
i = 178 x = 0.84952313 h = 0.00525587 b =6.28318531
i = 179 x = 0.85476678 h = 0.00524365 b =6.28318531
i = 180 x = 0.85999840 h = 0.00523162 b =6.28318531
i = 181 x = 0.86521817 h = 0.00521977 b =6.28318531
i = 182 x = 0.87042626 h = 0.00520809 b =6.28318531
i = 183 x = 0.87562286 h = 0.00519659 b =6.28318531
i = 184 x = 0.88080812 h = 0.00518527 b =6.28318531
i = 185 x = 0.88598223 h = 0.00517410 b =6.28318531
i = 186 x = 0.89114533 h = 0.00516311 b =6.28318531
i = 187 x = 0.89629761 h = 0.00515227 b =6.28318531
i = 188 x = 0.90143920 h = 0.00514160 b =6.28318531
i = 189 x = 0.90657028 h = 0.00513107 b =6.28318531
i = 190 x = 0.91169098 h = 0.00512071 b =6.28318531
i = 191 x = 0.91680147 h = 0.00511049 b =6.28318531
i = 192 x = 0.92190189 h = 0.00510042 b =6.28318531
i = 193 x = 0.92699238 h = 0.00509049 b =6.28318531
i = 194 x = 0.93207309 h = 0.00508071 b =6.28318531
i = 195 x = 0.93714416 h = 0.00507107 b =6.28318531
i = 196 x = 0.94220572 h = 0.00506156 b =6.28318531
i = 197 x = 0.94725791 h = 0.00505219 b =6.28318531
i = 198 x = 0.95230087 h = 0.00504296 b =6.28318531
i = 199 x = 0.95733472 h = 0.00503385 b =6.28318531
i = 200 x = 0.96235959 h = 0.00502487 b =6.28318531
i = 201 x = 0.96737561 h = 0.00501602 b =6.28318531
i = 202 x = 0.97238291 h = 0.00500730 b =6.28318531
i = 203 x = 0.97738161 h = 0.00499870 b =6.28318531
i = 204 x = 0.98237183 h = 0.00499022 b =6.28318531
i = 205 x = 0.98735368 h = 0.00498186 b =6.28318531
i = 206 x = 0.99232729 h = 0.00497361 b =6.28318531
i = 207 x = 0.99729278 h = 0.00496548 b =6.28318531
i = 208 x = 1.00225025 h = 0.00495747 b =6.28318531
i = 209 x = 1.00719981 h = 0.00494957 b =6.28318531
i = 210 x = 1.01214159 h = 0.00494177 b =6.28318531
i = 211 x = 1.01707568 h = 0.00493409 b =6.28318531
i = 212 x = 1.02200220 h = 0.00492652 b =6.28318531
i = 213 x = 1.02692124 h = 0.00491905 b =6.28318531
i = 214 x = 1.03183292 h = 0.00491168 b =6.28318531
i = 215 x = 1.03673734 h = 0.00490442 b =6.28318531
i = 216 x = 1.04163460 h = 0.00489726 b =6.28318531
i = 217 x = 1.04652480 h = 0.00489020 b =6.28318531
i = 218 x = 1.05140804 h = 0.00488324 b =6.28318531
i = 219 x = 1.05628441 h = 0.00487637 b =6.28318531
i = 220 x = 1.06115402 h = 0.00486961 b =6.28318531
i = 221 x = 1.06601695 h = 0.00486293 b =6.28318531
i = 222 x = 1.07087330 h = 0.00485635 b =6.28318531
i = 223 x = 1.07572317 h = 0.00484987 b =6.28318531
i = 224 x = 1.08056664 h = 0.00484347 b =6.28318531
i = 225 x = 1.08540381 h = 0.00483717 b =6.28318531
i = 226 x = 1.09023476 h = 0.00483095 b =6.28318531
i = 227 x = 1.09505959 h = 0.00482482 b =6.28318531
i = 228 x = 1.09987837 h = 0.00481878 b =6.28318531
i = 229 x = 1.10469120 h = 0.00481283 b =6.28318531
i = 230 x = 1.10949816 h = 0.00480696 b =6.28318531
i = 231 x = 1.11429933 h = 0.00480117 b =6.28318531
i = 232 x = 1.11909480 h = 0.00479547 b =6.28318531
i = 233 x = 1.12388465 h = 0.00478985 b =6.28318531
i = 234 x = 1.12866896 h = 0.00478431 b =6.28318531
i = 235 x = 1.13344781 h = 0.00477885 b =6.28318531
i = 236 x = 1.13822128 h = 0.00477347 b =6.28318531
i = 237 x = 1.14298944 h = 0.00476817 b =6.28318531
i = 238 x = 1.14775239 h = 0.00476294 b =6.28318531
i = 239 x = 1.15251018 h = 0.00475779 b =6.28318531
i = 240 x = 1.15726290 h = 0.00475272 b =6.28318531
i = 241 x = 1.16201063 h = 0.00474773 b =6.28318531
i = 242 x = 1.16675343 h = 0.00474280 b =6.28318531
i = 243 x = 1.17149139 h = 0.00473795 b =6.28318531
i = 244 x = 1.17622456 h = 0.00473318 b =6.28318531
i = 245 x = 1.18095304 h = 0.00472847 b =6.28318531
i = 246 x = 1.18567688 h = 0.00472384 b =6.28318531
i = 247 x = 1.19039616 h = 0.00471928 b =6.28318531
i = 248 x = 1.19511095 h = 0.00471479 b =6.28318531
i = 249 x = 1.19982131 h = 0.00471037 b =6.28318531
i = 250 x = 1.20452732 h = 0.00470601 b =6.28318531
i = 251 x = 1.20922905 h = 0.00470173 b =6.28318531
i = 252 x = 1.21392655 h = 0.00469751 b =6.28318531
i = 253 x = 1.21861991 h = 0.00469336 b =6.28318531
i = 254 x = 1.22330918 h = 0.00468927 b =6.28318531
i = 255 x = 1.22799443 h = 0.00468525 b =6.28318531
i = 256 x = 1.23267573 h = 0.00468130 b =6.28318531
i = 257 x = 1.23735313 h = 0.00467741 b =6.28318531
i = 258 x = 1.24202671 h = 0.00467358 b =6.28318531
i = 259 x = 1.24669653 h = 0.00466982 b =6.28318531
i = 260 x = 1.25136264 h = 0.00466612 b =6.28318531
i = 261 x = 1.25602512 h = 0.00466248 b =6.28318531
i = 262 x = 1.26068403 h = 0.00465890 b =6.28318531
i = 263 x = 1.26533942 h = 0.00465539 b =6.28318531
i = 264 x = 1.26999135 h = 0.00465194 b =6.28318531
i = 265 x = 1.27463990 h = 0.00464854 b =6.28318531
i = 266 x = 1.27928511 h = 0.00464521 b =6.28318531
i = 267 x = 1.28392705 h = 0.00464194 b =6.28318531
i = 268 x = 1.28856577 h = 0.00463872 b =6.28318531
i = 269 x = 1.29320134 h = 0.00463557 b =6.28318531
i = 270 x = 1.29783381 h = 0.00463247 b =6.28318531
i = 271 x = 1.30246324 h = 0.00462943 b =6.28318531
i = 272 x = 1.30708970 h = 0.00462645 b =6.28318531
i = 273 x = 1.31171322 h = 0.00462353 b =6.28318531
i = 274 x = 1.31633388 h = 0.00462066 b =6.28318531
i = 275 x = 1.32095173 h = 0.00461785 b =6.28318531
i = 276 x = 1.32556682 h = 0.00461509 b =6.28318531
i = 277 x = 1.33017921 h = 0.00461239 b =6.28318531
i = 278 x = 1.33478896 h = 0.00460975 b =6.28318531
i = 279 x = 1.33939612 h = 0.00460716 b =6.28318531
i = 280 x = 1.34400074 h = 0.00460462 b =6.28318531
i = 281 x = 1.34860289 h = 0.00460214 b =6.28318531
i = 282 x = 1.35320260 h = 0.00459972 b =6.28318531
i = 283 x = 1.35779995 h = 0.00459734 b =6.28318531
i = 284 x = 1.36239497 h = 0.00459502 b =6.28318531
i = 285 x = 1.36698773 h = 0.00459276 b =6.28318531
i = 286 x = 1.37157827 h = 0.00459054 b =6.28318531
i = 287 x = 1.37616666 h = 0.00458838 b =6.28318531
i = 288 x = 1.38075293 h = 0.00458628 b =6.28318531
i = 289 x = 1.38533715 h = 0.00458422 b =6.28318531
i = 290 x = 1.38991936 h = 0.00458221 b =6.28318531
i = 291 x = 1.39449962 h = 0.00458026 b =6.28318531
i = 292 x = 1.39907798 h = 0.00457836 b =6.28318531
i = 293 x = 1.40365449 h = 0.00457651 b =6.28318531
i = 294 x = 1.40822920 h = 0.00457471 b =6.28318531
i = 295 x = 1.41280216 h = 0.00457296 b =6.28318531
i = 296 x = 1.41737342 h = 0.00457126 b =6.28318531
i = 297 x = 1.42194303 h = 0.00456961 b =6.28318531
i = 298 x = 1.42651104 h = 0.00456801 b =6.28318531
i = 299 x = 1.43107751 h = 0.00456646 b =6.28318531
i = 300 x = 1.43564247 h = 0.00456497 b =6.28318531
i = 301 x = 1.44020599 h = 0.00456352 b =6.28318531
i = 302 x = 1.44476810 h = 0.00456212 b =6.28318531
i = 303 x = 1.44932887 h = 0.00456077 b =6.28318531
i = 304 x = 1.45388833 h = 0.00455946 b =6.28318531
i = 305 x = 1.45844654 h = 0.00455821 b =6.28318531
i = 306 x = 1.46300355 h = 0.00455701 b =6.28318531
i = 307 x = 1.46755940 h = 0.00455585 b =6.28318531
i = 308 x = 1.47211414 h = 0.00455474 b =6.28318531
i = 309 x = 1.47666782 h = 0.00455368 b =6.28318531
i = 310 x = 1.48122049 h = 0.00455267 b =6.28318531
i = 311 x = 1.48577220 h = 0.00455171 b =6.28318531
i = 312 x = 1.49032300 h = 0.00455079 b =6.28318531
i = 313 x = 1.49487292 h = 0.00454993 b =6.28318531
i = 314 x = 1.49942203 h = 0.00454911 b =6.28318531
i = 315 x = 1.50397037 h = 0.00454834 b =6.28318531
i = 316 x = 1.50851798 h = 0.00454761 b =6.28318531
i = 317 x = 1.51306491 h = 0.00454693 b =6.28318531
i = 318 x = 1.51761122 h = 0.00454631 b =6.28318531
i = 319 x = 1.52215694 h = 0.00454572 b =6.28318531
i = 320 x = 1.52670213 h = 0.00454519 b =6.28318531
i = 321 x = 1.53124683 h = 0.00454470 b =6.28318531
i = 322 x = 1.53579109 h = 0.00454426 b =6.28318531
i = 323 x = 1.54033496 h = 0.00454387 b =6.28318531
i = 324 x = 1.54487848 h = 0.00454352 b =6.28318531
i = 325 x = 1.54942170 h = 0.00454322 b =6.28318531
i = 326 x = 1.55396467 h = 0.00454297 b =6.28318531
i = 327 x = 1.55850743 h = 0.00454276 b =6.28318531
i = 328 x = 1.56305004 h = 0.00454260 b =6.28318531
i = 329 x = 1.56759253 h = 0.00454249 b =6.28318531
i = 330 x = 1.57213496 h = 0.00454243 b =6.28318531
i = 331 x = 1.57667737 h = 0.00454241 b =6.28318531
i = 332 x = 1.58121981 h = 0.00454244 b =6.28318531
i = 333 x = 1.58576232 h = 0.00454251 b =6.28318531
i = 334 x = 1.59030496 h = 0.00454264 b =6.28318531
i = 335 x = 1.59484776 h = 0.00454281 b =6.28318531
i = 336 x = 1.59939079 h = 0.00454302 b =6.28318531
i = 337 x = 1.60393407 h = 0.00454329 b =6.28318531
i = 338 x = 1.60847767 h = 0.00454360 b =6.28318531
i = 339 x = 1.61302163 h = 0.00454395 b =6.28318531
i = 340 x = 1.61756598 h = 0.00454436 b =6.28318531
i = 341 x = 1.62211079 h = 0.00454481 b =6.28318531
i = 342 x = 1.62665610 h = 0.00454531 b =6.28318531
i = 343 x = 1.63120196 h = 0.00454585 b =6.28318531
i = 344 x = 1.63574841 h = 0.00454645 b =6.28318531
i = 345 x = 1.64029549 h = 0.00454709 b =6.28318531
i = 346 x = 1.64484327 h = 0.00454778 b =6.28318531
i = 347 x = 1.64939178 h = 0.00454851 b =6.28318531
i = 348 x = 1.65394107 h = 0.00454929 b =6.28318531
i = 349 x = 1.65849120 h = 0.00455012 b =6.28318531
i = 350 x = 1.66304220 h = 0.00455100 b =6.28318531
i = 351 x = 1.66759413 h = 0.00455193 b =6.28318531
i = 352 x = 1.67214703 h = 0.00455290 b =6.28318531
i = 353 x = 1.67670095 h = 0.00455392 b =6.28318531
i = 354 x = 1.68125594 h = 0.00455499 b =6.28318531
i = 355 x = 1.68581206 h = 0.00455611 b =6.28318531
i = 356 x = 1.69036933 h = 0.00455728 b =6.28318531
i = 357 x = 1.69492783 h = 0.00455849 b =6.28318531
i = 358 x = 1.69948759 h = 0.00455976 b =6.28318531
i = 359 x = 1.70404866 h = 0.00456107 b =6.28318531
i = 360 x = 1.70861109 h = 0.00456243 b =6.28318531
i = 361 x = 1.71317493 h = 0.00456384 b =6.28318531
i = 362 x = 1.71774024 h = 0.00456530 b =6.28318531
i = 363 x = 1.72230705 h = 0.00456681 b =6.28318531
i = 364 x = 1.72687542 h = 0.00456837 b =6.28318531
i = 365 x = 1.73144541 h = 0.00456998 b =6.28318531
i = 366 x = 1.73601705 h = 0.00457164 b =6.28318531
i = 367 x = 1.74059040 h = 0.00457335 b =6.28318531
i = 368 x = 1.74516551 h = 0.00457511 b =6.28318531
i = 369 x = 1.74974243 h = 0.00457692 b =6.28318531
i = 370 x = 1.75432121 h = 0.00457878 b =6.28318531
i = 371 x = 1.75890191 h = 0.00458070 b =6.28318531
i = 372 x = 1.76348457 h = 0.00458266 b =6.28318531
i = 373 x = 1.76806925 h = 0.00458468 b =6.28318531
i = 374 x = 1.77265599 h = 0.00458674 b =6.28318531
i = 375 x = 1.77724485 h = 0.00458886 b =6.28318531
i = 376 x = 1.78183588 h = 0.00459103 b =6.28318531
|
docs/notebooks/03_cells_autoname_and_cache.ipynb | ###Markdown
CellProblem:In GDS format- each cell must have a unique name. Ideally the name is also consitent from different run times, in case you want to merge GDS files that were created at different times or computers.- two cells stored in the GDS file cannot have the same name. Ideally they will be references to the same Cell. See `References tutorial`. That way we only have to store that cell in memory once and all the references are just pointers to that cell.- GDS cells info: - `changed` used to create the cell - `default` in function signature - `full` full settings - name - function_name - module - child: (if any) - simulation, testing, data analysis, derived properties (for example path length of the bend) ...Solution: The decorator `@gf.cell` addresses all these issues:1. Gives the cell a unique name depending on the parameters that you pass to it.2. Creates a cache of cells where we use the cell name as the key. The first time the function runs, the cache stores the component, so the second time, you get the component directly from the cache, so you don't create the same cell twice.For creating new Components you need to create them inside a function, and to make sure that the component gets a good name you just need to add the `@cell` decoratorLets see how it works
###Code
import gdsfactory as gf
@gf.cell
def wg(length=10, width=1):
print("BUILDING waveguide")
c = gf.Component()
c.add_polygon([(0, 0), (length, 0), (length, width), (0, width)], layer=(1, 0))
c.add_port(name="o1", midpoint=[0, width / 2], width=width, orientation=180)
c.add_port(name="o2", midpoint=[length, width / 2], width=width, orientation=0)
return c
###Output
_____no_output_____
###Markdown
See how the cells get the name from the parameters that you pass them
###Code
c = wg()
print(c)
# The second time you will get this cell from the cache
c = wg()
print(c)
# If you call the cell with different parameters, the cell will get a different name
c = wg(width=0.5)
print(c)
c.info.changed
c.info.full
c.info.default
c.pprint()
###Output
_____no_output_____
###Markdown
thanks to `gf.cell` you can also add any metadata `info` relevant to the cell
###Code
c = wg(length=3, info=dict(polarization="te", wavelength=1.55))
c.pprint()
print(c.info.wavelength)
###Output
_____no_output_____
###Markdown
MetadataTogether with the GDS files that you send to the foundry you can also store some metadata in YAML for each cell containing all the settings that we used to build the GDS.the metadata will consists of all the parameters that were passed to the component function as well as derived properties- info: includes all component metadata - derived properties - external metadata (test_protocol, docs, ...) - simulation_settings - function_name - name: for the component - name_long: for the component - full: full list of settings - changed: changed settings - default: includes the default signature of the component- ports: port name, width, orientation How can you have add two different references to a cell with the same parameters?
###Code
import gdsfactory as gf
c = gf.Component("problem")
R1 = gf.components.rectangle(
size=(4, 2), layer=(0, 0)
) # Creates a rectangle (same Unique ID uid)
R2 = gf.components.rectangle(size=(4, 2), layer=(0, 0))
# Try Create a new rectangle that we want to change (but has the same name so we will get R1 from the cache)
r1r = c << R1 # Add the first rectangle to c
r2r = c << R2 # Add the second rectangle to c
r2r.move((4, 2))
c
print(R1 == R2)
print(R1)
print(R2)
# lets do it cleaner with references
import gdsfactory as gf
c = gf.Component("solution")
R = gf.components.rectangle(size=(4, 2), layer=(0, 0))
r1 = c << R # Add the first rectangle reference to c
r2 = c << R # Add the second rectangle reference to c
r2.rotate(45)
c
import gdsfactory as gf
c = gf.components.straight()
c.show()
c.plot()
###Output
_____no_output_____
###Markdown
We can even show ports of all references with `component.show(show_subports=True)`
###Code
c = gf.components.mzi_phase_shifter(length_x=50)
c
###Output
_____no_output_____
###Markdown
CacheTo avoid that 2 exact cells are not references of the same cell the `cell` decorator has acache where if a component has already been built it will return the componentfrom the cache
###Code
@gf.cell
def wg(length=10, width=1):
c = gf.Component()
c.add_polygon([(0, 0), (length, 0), (length, width), (0, width)], layer=(1, 0))
print("BUILDING waveguide")
return c
gf.clear_cache()
wg1 = wg() # cell builds a straight
print(wg1)
wg2 = wg()
# cell returns the same straight as before without having to run the function
print(wg2) # notice that they have the same uuid (unique identifier)
wg2.plot()
from gdsfactory.cell import print_cache
###Output
_____no_output_____
###Markdown
Lets say that you change the code of the straight function in a jupyter notebook like this one. (I mostly use Vim/VsCode/Pycharm for creating new cells in python)
###Code
print_cache()
wg3 = wg()
wg4 = wg(length=11)
print_cache()
gf.clear_cache()
###Output
_____no_output_____
###Markdown
To enable nice notebook tutorials, every time we show a cell in Matplotlib or Klayout, you can clear the cache,in case you want to develop cells in jupyter notebooks or an IPython kernel
###Code
print_cache() # cache is now empty
###Output
_____no_output_____
###Markdown
Validate argument typesBy default, also `@cell` validates arguments based on their type annotations.To make sure you pass the correct arguments to the cell it runs a validator that checks the type annotations for the function.For example this will be correct```pythonimport gdsfactory as [email protected] straigth_waveguide(length:float): return gf.components.straight(length=length)component = straigth_waveguide(length=3)```While this will raise an error, because you are passing a length that is a string, so it cannot convert it to a float```pythonimport gdsfactory as [email protected] straigth_waveguide(length:float): return gf.components.straight(length=length)component = straigth_waveguide(length='long')```by default `@cell` validates all arguments using [pydantic](https://pydantic-docs.helpmanual.io/usage/validation_decorator/argument-types)
###Code
@gf.cell
def straigth_waveguide(length: float):
print(type(length))
return gf.components.straight(length=length)
# It will also convert an `int` to a `float`
straigth_waveguide(3)
###Output
_____no_output_____
###Markdown
Parametric CellsProblem:In GDS format- each component must have a unique name. Ideally the name is also consitent from different run times, in case you want to merge GDS files that were created at different times or computers.- two components stored in the GDS file cannot have the same name. Ideally they will be references to the same component. See `References tutorial`. That way we only have to store that component in memory once and all the references are just pointers to that component.Solution: The decorator `@gf.cell` addresses all these issues:1. Gives the component a unique name depending on the parameters that you pass to it.2. Creates a cache of components where we use the name as the key. The first time the function runs, the cache stores the component, so the second time, you get the component directly from the cache, so you don't create the same component twice.Also, thanks to the @cell decorator, GDS cells in gdsfactory include an `metadata` dictionary where you can access all component settings:- `changed` settings used to create the component- `default` settings in function signature- `full` full settings- name- function_name- module`@cell` comes from PCell `parametric cell` that returns a different Component depending on the input parameters.Make sure that your components get good names by adding the `@cell` decorator to that each function that returns a component.Lets see how it works.
###Code
import gdsfactory as gf
# gf.CONF.plotter = 'holoviews'
@gf.cell
def wg(length=10, width=1, layer=(1, 0)):
print("BUILDING waveguide")
c = gf.Component()
c.add_polygon([(0, 0), (length, 0), (length, width), (0, width)], layer=layer)
c.add_port(
name="o1", midpoint=[0, width / 2], width=width, orientation=180, layer=layer
)
c.add_port(
name="o2", midpoint=[length, width / 2], width=width, orientation=0, layer=layer
)
return c
###Output
_____no_output_____
###Markdown
See how the cells get the name from the parameters that you pass them
###Code
c = wg()
print(c)
# The second time you will get this cell from the cache
c = wg()
print(c)
# If you call the cell with different parameters, the cell will get a different name
c = wg(width=0.5)
print(c)
###Output
_____no_output_____
###Markdown
Sometimes when you are changing the inside code of the function, you need to **ignore** the cache.you can pass `cache=False`
###Code
c = wg(cache=False)
c.metadata.changed
c.metadata.default
c.metadata.full
c.pprint()
###Output
_____no_output_____
###Markdown
thanks to `gf.cell` you can also add any metadata `info` relevant to the cell
###Code
c = wg(length=3, info=dict(polarization="te", wavelength=1.55))
c.pprint()
print(c.metadata.info.wavelength)
###Output
_____no_output_____
###Markdown
MetadataTogether with the GDS files that you send to the foundry you can also store some metadata in YAML for each cell containing all the settings that we used to build the GDS.the metadata will consists of all the parameters that were passed to the component function as well as derived properties- settings: includes all component metadata - derived properties - external metadata (test_protocol, docs, ...) - simulation_settings - function_name - name: for the component - name_long: for the component - full: full list of settings - changed: changed settings - default: includes the default signature of the component- ports: port name, width, orientation How can you have add two different references to a cell with the same parameters?
###Code
import gdsfactory as gf
c = gf.Component("problem")
R1 = gf.components.rectangle(
size=(4, 2), layer=(2, 0)
) # Creates a rectangle (same Unique ID uid)
R2 = gf.components.rectangle(size=(4, 2), layer=(3, 0))
# Try Create a new rectangle that we want to change (but has the same name so we will get R1 from the cache)
r1r = c << R1 # Add the first rectangle to c
r2r = c << R2 # Add the second rectangle to c
r2r.move((4, 2))
c
print(R1 == R2)
print(R1)
print(R2)
# lets do it cleaner with references
import gdsfactory as gf
c = gf.Component("solution")
R = gf.components.rectangle(size=(4, 2), layer=(2, 0))
r1 = c << R # Add the first rectangle reference to c
r2 = c << R # Add the second rectangle reference to c
r2.rotate(45)
c
import gdsfactory as gf
c = gf.components.straight()
c.show()
c
###Output
_____no_output_____
###Markdown
We can even show ports of all references with `component.show(show_subports=True)`
###Code
c = gf.components.mzi_phase_shifter(length_x=50)
c
###Output
_____no_output_____
###Markdown
CacheTo avoid that 2 exact cells are not references of the same cell the `cell` decorator has acache where if a component has already been built it will return the componentfrom the cache
###Code
@gf.cell
def wg(length=10, width=1):
c = gf.Component()
c.add_polygon([(0, 0), (length, 0), (length, width), (0, width)], layer=(1, 0))
print("BUILDING waveguide")
return c
gf.clear_cache()
wg1 = wg() # cell builds a straight
print(wg1)
wg2 = wg()
# cell returns the same straight as before without having to run the function
print(wg2) # notice that they have the same uuid (unique identifier)
wg2
from gdsfactory.cell import print_cache
###Output
_____no_output_____
###Markdown
Lets say that you change the code of the straight function in a jupyter notebook like this one. (I mostly use Vim/VsCode/Pycharm for creating new cells in python)
###Code
print_cache()
wg3 = wg()
wg4 = wg(length=11)
print_cache()
gf.clear_cache()
###Output
_____no_output_____
###Markdown
To enable nice notebook tutorials, every time we show a cell in Matplotlib or Klayout, you can clear the cache,in case you want to develop cells in jupyter notebooks or an IPython kernel
###Code
print_cache() # cache is now empty
###Output
_____no_output_____
###Markdown
Validate argument typesBy default, also `@cell` validates arguments based on their type annotations.To make sure you pass the correct arguments to the cell function it runs a validator that checks the type annotations for the function.For example this will be correct```pythonimport gdsfactory as [email protected] straigth_waveguide(length:float): return gf.components.straight(length=length)component = straigth_waveguide(length=3)```While this will raise an error, because you are passing a length that is a string, so it cannot convert it to a float```pythoncomponent = straigth_waveguide(length='long')``````bashValidationError: 1 validation error for StraigthWaveguidelength value is not a valid float (type=type_error.float)```by default `@cell` validates all arguments using [pydantic](https://pydantic-docs.helpmanual.io/usage/validation_decorator/argument-types)
###Code
@gf.cell
def straigth_waveguide(length: float):
print(type(length))
return gf.components.straight(length=length)
# It will also convert an `int` to a `float`
c = straigth_waveguide(length=3)
###Output
_____no_output_____
###Markdown
CellProblem:In GDS format- each component must have a unique name. Ideally the name is also consitent from different run times, in case you want to merge GDS files that were created at different times or computers.- two components stored in the GDS file cannot have the same name. Ideally they will be references to the same component. See `References tutorial`. That way we only have to store that component in memory once and all the references are just pointers to that component.Solution: The decorator `@gf.cell` for Parametric cell functions:1. Gives the component a unique name depending on the parameters that you pass to it.2. Creates a cache of components where we use the name as the key. The first time the function runs, the cache stores the component, so the second time, you get the component directly from the cache, so you don't create the same component twice.Also, thanks to the `@cell` decorator, GDS cells in gdsfactory include an `metadata` dictionary where you can access all component settings:- `changed` settings used to create the component- `default` settings in function signature- `full` full settings- name- function_name- module`@cell` comes from PCell `parametric cell`, where the function returns a different Component depending on the input parameters.Make sure that your components get good names by adding the `@cell` decorator to that each function that returns a Component.A decorator is a function that runs over a function, so when you do ```import gdsfactory as [email protected] mzi_with_bend() -> gf.Component: c = gf.Component() mzi = c << gf.components.mzi() bend = c << gf.components.bend_euler() return c ```it's equivalent to Lets see how it works.
###Code
import gdsfactory as gf
def mzi_with_bend(radius:float=10.) -> gf.Component:
c = gf.Component()
mzi = c << gf.components.mzi()
bend = c << gf.components.bend_euler(radius=radius)
bend.connect('o1', mzi.ports['o2'])
return c
c = mzi_with_bend()
print(f'this cell {c.name!r} does NOT get automatic name')
c
mzi_with_bend_decorated = gf.cell(mzi_with_bend)
c = mzi_with_bend_decorated(radius = 10)
print(f'this cell {c.name!r} gets automatic name thanks to the `cell` decorator')
c
@gf.cell
def mzi_with_bend(radius:float=10.) -> gf.Component:
c = gf.Component()
mzi = c << gf.components.mzi()
bend = c << gf.components.bend_euler(radius=radius)
bend.connect('o1', mzi.ports['o2'])
return c
print(f'this cell {c.name!r} gets automatic name thanks to the `cell` decorator')
c
import gdsfactory as gf
# gf.CONF.plotter = 'holoviews'
@gf.cell
def wg(length=10, width=1, layer=(1, 0)):
print("BUILDING waveguide")
c = gf.Component()
c.add_polygon([(0, 0), (length, 0), (length, width), (0, width)], layer=layer)
c.add_port(
name="o1", midpoint=[0, width / 2], width=width, orientation=180, layer=layer
)
c.add_port(
name="o2", midpoint=[length, width / 2], width=width, orientation=0, layer=layer
)
return c
###Output
_____no_output_____
###Markdown
See how the cells get the name from the parameters that you pass them
###Code
c = wg()
print(c)
# The second time you will get this cell from the cache
c = wg()
print(c)
# If you call the cell with different parameters, the cell gets a different name
c = wg(width=0.5)
print(c)
###Output
_____no_output_____
###Markdown
Sometimes when you are changing the inside code of the function, you need to use `cache=False` to **ignore** the cache.
###Code
c = wg(cache=False)
c.metadata.changed
c.metadata.default
c.metadata.full
c.pprint()
###Output
_____no_output_____
###Markdown
thanks to `gf.cell` you can also add any metadata `info` relevant to the cell
###Code
c = wg(length=3, info=dict(polarization="te", wavelength=1.55))
c.pprint()
print(c.metadata.info.wavelength)
###Output
_____no_output_____
###Markdown
MetadataTogether with the GDS file that you send to the foundry you can also store metadata in YAML for each cell containing all the settings that we used to build the GDS.the metadata will consists of all the parameters that were passed to the component function as well as derived properties- settings: includes all component metadata: - changed: changed settings. - child: child settings. - default: includes the default cell function settings. - full: full settings. - function_name: from the cell function. - info: meatada in Component.info dict. - module: python module where you can find the cell function. - name: for the component- ports: port name, width, orientation
###Code
dir(c.metadata)
###Output
_____no_output_____
###Markdown
How can you have add two different references to a cell with the same parameters?
###Code
import gdsfactory as gf
c = gf.Component("problem")
R1 = gf.components.rectangle(
size=(4, 2), layer=(2, 0)
) # Creates a rectangle (same Unique ID uid)
R2 = gf.components.rectangle(size=(4, 2), layer=(3, 0))
# Try Create a new rectangle that we want to change (but has the same name so we will get R1 from the cache)
r1r = c << R1 # Add the first rectangle to c
r2r = c << R2 # Add the second rectangle to c
r2r.move((4, 2))
c
print(R1 == R2)
print(R1)
print(R2)
# lets do it cleaner with references
import gdsfactory as gf
c = gf.Component("solution")
R = gf.components.rectangle(size=(4, 2), layer=(2, 0))
r1 = c << R # Add the first rectangle reference to c
r2 = c << R # Add the second rectangle reference to c
r2.rotate(45)
c
import gdsfactory as gf
c = gf.components.straight()
c.show()
c
###Output
_____no_output_____
###Markdown
We can even show ports of all references with `component.show(show_subports=True)`
###Code
c = gf.components.mzi_phase_shifter(length_x=50)
c
###Output
_____no_output_____
###Markdown
CacheTo avoid that 2 exact cells are not references of the same cell the `cell` decorator has acache where if a component has already been built it will return the componentfrom the cache
###Code
@gf.cell
def wg(length=10, width=1):
c = gf.Component()
c.add_polygon([(0, 0), (length, 0), (length, width), (0, width)], layer=(1, 0))
print("BUILDING waveguide")
return c
gf.clear_cache()
wg1 = wg() # cell builds a straight
print(wg1)
wg2 = wg()
# cell returns the same straight as before without having to run the function
print(wg2) # notice that they have the same uuid (unique identifier)
wg2
from gdsfactory.cell import print_cache
###Output
_____no_output_____
###Markdown
Lets say that you change the code of the straight function in a jupyter notebook like this one. (I mostly use Vim/VsCode/Pycharm for creating new cells in python)
###Code
print_cache()
wg3 = wg()
wg4 = wg(length=11)
print_cache()
gf.clear_cache()
###Output
_____no_output_____
###Markdown
To enable nice notebook tutorials, every time we show a cell in Matplotlib or Klayout, you can clear the cache,in case you want to develop cells in jupyter notebooks or an IPython kernel
###Code
print_cache() # cache is now empty
###Output
_____no_output_____
###Markdown
Validate argument typesBy default, also `@cell` validates arguments based on their type annotations.To make sure you pass the correct arguments to the cell function it runs a validator that checks the type annotations for the function.For example this will be correct```pythonimport gdsfactory as [email protected] straigth_waveguide(length:float): return gf.components.straight(length=length)component = straigth_waveguide(length=3)```While this will raise an error, because you are passing a length that is a string, so it cannot convert it to a float```pythoncomponent = straigth_waveguide(length='long')``````bashValidationError: 1 validation error for StraigthWaveguidelength value is not a valid float (type=type_error.float)```by default `@cell` validates all arguments using [pydantic](https://pydantic-docs.helpmanual.io/usage/validation_decorator/argument-types)
###Code
@gf.cell
def straigth_waveguide(length: float):
print(type(length))
return gf.components.straight(length=length)
# It will also convert an `int` to a `float`
c = straigth_waveguide(length=3)
###Output
_____no_output_____
###Markdown
CellProblem:In GDS format- each cell must have a unique name. Ideally the name is also consitent from different run times, in case you want to merge GDS files that were created at different times or computers.- two cells stored in the GDS file cannot have the same name. Ideally they will be references to the same Cell. See `References tutorial`. That way we only have to store that cell in memory once and all the references are just pointers to that cell.- GDS cells info: - `changed` used to create the cell - `default` in function signature - `full` full settings - name - function_name - module - child: (if any) - simulation, testing, data analysis, derived properties (for example path length of the bend) ...Solution: The decorator `@gf.cell` addresses all these issues:1. Gives the cell a unique name depending on the parameters that you pass to it.2. Creates a cache of cells where we use the cell name as the key. The first time the function runs, the cache stores the component, so the second time, you get the component directly from the cache, so you don't create the same cell twice.For creating new Components you need to create them inside a function, and to make sure that the component gets a good name you just need to add the `@cell` decoratorLets see how it works
###Code
import gdsfactory as gf
@gf.cell
def wg(length=10, width=1):
print('BUILDING waveguide')
c = gf.Component()
c.add_polygon([(0, 0), (length, 0), (length, width), (0, width)], layer=(1, 0))
c.add_port(name="o1", midpoint=[0, width / 2], width=width, orientation=180)
c.add_port(name="o2", midpoint=[length, width / 2], width=width, orientation=0)
return c
###Output
_____no_output_____
###Markdown
See how the cells get the name from the parameters that you pass them
###Code
c = wg()
print(c)
# The second time you will get this cell from the cache
c = wg()
print(c)
# If you call the cell with different parameters, the cell will get a different name
c = wg(width=0.5)
print(c)
c.info.changed
c.info.full
c.info.default
c.pprint
###Output
_____no_output_____
###Markdown
thanks to `gf.cell` you can also add any metadata `info` relevant to the cell
###Code
c = wg(length=3, info=dict(polarization='te', wavelength=1.55))
c.pprint
print(c.info.wavelength)
###Output
_____no_output_____
###Markdown
MetadataTogether with the GDS files that you send to the foundry you can also store some metadata in YAML for each cell containing all the settings that we used to build the GDS.the metadata will consists of all the parameters that were passed to the component function as well as derived properties - info: includes all component metadata - derived properties - external metadata (test_protocol, docs, ...) - simulation_settings - function_name - name: for the component - name_long: for the component - full: full list of settings - changed: changed settings - default: includes the default signature of the component- ports: port name, width, orientation How can you have add two different references to a cell with the same parameters?
###Code
import gdsfactory as gf
c = gf.Component("problem")
R1 = gf.components.rectangle(
size=(4, 2), layer=(0, 0)
) # Creates a rectangle (same Unique ID uid)
R2 = gf.components.rectangle(size=(4, 2), layer=(0, 0))
# Try Create a new rectangle that we want to change (but has the same name so we will get R1 from the cache)
r1r = c << R1 # Add the first rectangle to c
r2r = c << R2 # Add the second rectangle to c
r2r.move((4, 2))
c
print(R1 == R2)
print(R1)
print(R2)
# lets do it cleaner with references
import gdsfactory as gf
c = gf.Component("solution")
R = gf.components.rectangle(size=(4, 2), layer=(0, 0))
r1 = c << R # Add the first rectangle reference to c
r2 = c << R # Add the second rectangle reference to c
r2.rotate(45)
c
import gdsfactory as gf
c = gf.components.straight()
c.show()
c.plot()
###Output
_____no_output_____
###Markdown
We can even show ports of all references with `component.show(show_subports=True)`
###Code
c = gf.components.mzi_phase_shifter(length_x=50)
c
###Output
_____no_output_____
###Markdown
CacheTo avoid that 2 exact cells are not references of the same cell the `cell` decorator has acache where if component has already been build it will return the componentfrom the cache
###Code
@gf.cell
def wg(length=10, width=1):
c = gf.Component()
c.add_polygon([(0, 0), (length, 0), (length, width), (0, width)], layer=(1, 0))
print("BUILDING waveguide")
return c
gf.clear_cache()
wg1 = wg() # cell builds a straight
print(wg1)
wg2 = wg()
# cell returns the same straight as before without having to run the function
print(wg2) # notice that they have the same uuid (unique identifier)
wg2.plot()
from gdsfactory.cell import print_cache
###Output
_____no_output_____
###Markdown
Lets say that you change the code of the straight function in a jupyter notebook like this one. (I mostly use Vim/VsCode/Pycharm for creating new cells in python)
###Code
print_cache()
wg3 = wg()
wg4 = wg(length=11)
print_cache()
gf.clear_cache()
###Output
_____no_output_____
###Markdown
To enable nice notebook tutorials, every time we show a cell in Matplotlib or Klayout, you can clear the cache,in case you want to develop cells in jupyter notebooks or an IPython kernel
###Code
print_cache() # cache is now empty
###Output
_____no_output_____
###Markdown
Validate argument typesBy default, also `@cell` validates and converts the argument types.To make sure you pass the correct arguments to the cell it runs a validator that checks the type annotations for the function.For example this will be correct```pythonimport gdsfactory as [email protected] straigth_waveguide(length:float): return gf.components.straight(length=length)component = straigth_waveguide(length=3)```While this will raise an error, because you are passing a length that is a string, so it cannot convert it to a float```pythonimport gdsfactory as [email protected] straigth_waveguide(length:float): return gf.components.straight(length=length)component = straigth_waveguide(length='long')```by default `@cell` validates all arguments using [pydantic](https://pydantic-docs.helpmanual.io/usage/validation_decorator/argument-types)
###Code
@gf.cell
def straigth_waveguide(length:float):
print(type(length))
return gf.components.straight(length=length)
# It will also convert an `int` to a `float`
straigth_waveguide(3)
###Output
_____no_output_____
###Markdown
CellProblem:In GDS format- each cell must have a unique name. Ideally the name is also consitent from different run times, in case you want to merge GDS files that were created at different times or computers.- two cells stored in the GDS file cannot have the same name. Ideally they will be references to the same Cell. See `References tutorial`. That way we only have to store that cell in memory once and all the references are just pointers to that cell.Solution: The decorator `@gf.cell` addresses all these issues:1. Gives the cell a unique name depending on the parameters that you pass to it.2. Creates a cache of cells where we use the cell name as the key. The first time the function runs, the cache stores the component, so the second time, you get the component directly from the cache, so you don't create the same cell twice.Also, thanks to the @cell decorator, GDS cells in gdsfactory include an `info` dictionary where you can access any metadata from the cell: - `changed` settings used to create the cell - `default` settings in function signature - `full` full settings - name - function_name - module - child: (if any) - simulation, testing, data analysis, derived properties (for example path length of the bend) ...For creating Components you can define them in a function, and to make sure that the component gets a good name you just need to add the `@cell` decorator to that functionLets see how it works.
###Code
import gdsfactory as gf
gf.CONF.plotter = 'holoviews'
@gf.cell
def wg(length=10, width=1):
print("BUILDING waveguide")
c = gf.Component()
c.add_polygon([(0, 0), (length, 0), (length, width), (0, width)], layer=(1, 0))
c.add_port(name="o1", midpoint=[0, width / 2], width=width, orientation=180)
c.add_port(name="o2", midpoint=[length, width / 2], width=width, orientation=0)
return c
###Output
_____no_output_____
###Markdown
See how the cells get the name from the parameters that you pass them
###Code
c = wg()
print(c)
# The second time you will get this cell from the cache
c = wg()
print(c)
# If you call the cell with different parameters, the cell will get a different name
c = wg(width=0.5)
print(c)
c.metadata.changed
c.metadata.default
c.metadata.full
c.pprint()
###Output
_____no_output_____
###Markdown
thanks to `gf.cell` you can also add any metadata `info` relevant to the cell
###Code
c = wg(length=3, info=dict(polarization="te", wavelength=1.55))
c.pprint()
print(c.metadata.info.wavelength)
###Output
_____no_output_____
###Markdown
MetadataTogether with the GDS files that you send to the foundry you can also store some metadata in YAML for each cell containing all the settings that we used to build the GDS.the metadata will consists of all the parameters that were passed to the component function as well as derived properties- settings: includes all component metadata - derived properties - external metadata (test_protocol, docs, ...) - simulation_settings - function_name - name: for the component - name_long: for the component - full: full list of settings - changed: changed settings - default: includes the default signature of the component- ports: port name, width, orientation- cells: How can you have add two different references to a cell with the same parameters?
###Code
import gdsfactory as gf
c = gf.Component("problem")
R1 = gf.components.rectangle(
size=(4, 2), layer=(2, 0)
) # Creates a rectangle (same Unique ID uid)
R2 = gf.components.rectangle(size=(4, 2), layer=(3, 0))
# Try Create a new rectangle that we want to change (but has the same name so we will get R1 from the cache)
r1r = c << R1 # Add the first rectangle to c
r2r = c << R2 # Add the second rectangle to c
r2r.move((4, 2))
c.plot()
print(R1 == R2)
print(R1)
print(R2)
# lets do it cleaner with references
import gdsfactory as gf
c = gf.Component("solution")
R = gf.components.rectangle(size=(4, 2), layer=(2, 0))
r1 = c << R # Add the first rectangle reference to c
r2 = c << R # Add the second rectangle reference to c
r2.rotate(45)
c.plot()
import gdsfactory as gf
c = gf.components.straight()
c.show()
c.plot()
###Output
_____no_output_____
###Markdown
We can even show ports of all references with `component.show(show_subports=True)`
###Code
c = gf.components.mzi_phase_shifter(length_x=50)
c.plot()
###Output
_____no_output_____
###Markdown
CacheTo avoid that 2 exact cells are not references of the same cell the `cell` decorator has acache where if a component has already been built it will return the componentfrom the cache
###Code
@gf.cell
def wg(length=10, width=1):
c = gf.Component()
c.add_polygon([(0, 0), (length, 0), (length, width), (0, width)], layer=(1, 0))
print("BUILDING waveguide")
return c
gf.clear_cache()
wg1 = wg() # cell builds a straight
print(wg1)
wg2 = wg()
# cell returns the same straight as before without having to run the function
print(wg2) # notice that they have the same uuid (unique identifier)
wg2.plot()
from gdsfactory.cell import print_cache
###Output
_____no_output_____
###Markdown
Lets say that you change the code of the straight function in a jupyter notebook like this one. (I mostly use Vim/VsCode/Pycharm for creating new cells in python)
###Code
print_cache()
wg3 = wg()
wg4 = wg(length=11)
print_cache()
gf.clear_cache()
###Output
_____no_output_____
###Markdown
To enable nice notebook tutorials, every time we show a cell in Matplotlib or Klayout, you can clear the cache,in case you want to develop cells in jupyter notebooks or an IPython kernel
###Code
print_cache() # cache is now empty
###Output
_____no_output_____
###Markdown
Validate argument typesBy default, also `@cell` validates arguments based on their type annotations.To make sure you pass the correct arguments to the cell it runs a validator that checks the type annotations for the function.For example this will be correct```pythonimport gdsfactory as [email protected] straigth_waveguide(length:float): return gf.components.straight(length=length)component = straigth_waveguide(length=3)```While this will raise an error, because you are passing a length that is a string, so it cannot convert it to a float```pythoncomponent = straigth_waveguide(length='long')``````bashValidationError: 1 validation error for StraigthWaveguidelength value is not a valid float (type=type_error.float)```by default `@cell` validates all arguments using [pydantic](https://pydantic-docs.helpmanual.io/usage/validation_decorator/argument-types)
###Code
@gf.cell
def straigth_waveguide(length: float):
print(type(length))
return gf.components.straight(length=length)
# It will also convert an `int` to a `float`
c = straigth_waveguide(length=3)
###Output
_____no_output_____ |
project_nb.ipynb | ###Markdown
Training Data Getting Data
###Code
%load_ext autoreload
%autoreload 2
# Getting the data
from glob import glob
imgs = glob('dataset/**/*.png', recursive=True)
cars = []
not_cars = []
for img in imgs:
if 'non-vehicles' in img:
not_cars.append(img)
else:
cars.append(img)
###Output
_____no_output_____
###Markdown
Getting Features
###Code
from tqdm import tqdm
import cv2
from skimage.feature import hog
import matplotlib.image as mpimg
import numpy as np
# colorwise histogram feature
def color_hist(img, nbins=32):
channel1 = np.histogram(img[:, :, 0], bins=nbins)
channel2 = np.histogram(img[:, :, 1], bins=nbins)
channel3 = np.histogram(img[:, :, 2], bins=nbins)
return np.concatenate((channel1[0], channel2[0], channel3[0]))
# spatial features
def bin_spatial(img, size=(32, 32)):
c1 = cv2.resize(img[:, :, 0], size).ravel()
c2 = cv2.resize(img[:, :, 1], size).ravel()
c3 = cv2.resize(img[:, :, 2], size).ravel()
return np.hstack((c1, c2, c3))
# convenience method for hog
def get_hog(img, orientation, pix_per_cell, cell_per_block, feature_vec=True):
return hog(img, orientations=orientation, pixels_per_cell=(pix_per_cell, pix_per_cell),
cells_per_block=(cell_per_block, cell_per_block), transform_sqrt=True,
visualise=True, feature_vector=feature_vec)
# used for training
def extract_features(imgs, spatial_size=(32, 32), hist_bins=32, orient=9,
pix_per_cell=8, cell_per_block=2):
# Create a list to append feature vectors to
features = []
# Iterate through the list of images
for file in tqdm(imgs):
file_features = []
# Read in each one by one
image = mpimg.imread(file)
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YCrCb)
spatial_features = bin_spatial(feature_image, size=spatial_size)
file_features.append(spatial_features)
hist_features = color_hist(feature_image, nbins=hist_bins)
file_features.append(hist_features)
hog_features = []
for channel in range(feature_image.shape[2]):
feat, img = get_hog(feature_image[:, :, channel],
orient, pix_per_cell, cell_per_block,
feature_vec=True)
hog_features.append(feat)
hog_features = np.ravel(hog_features)
# Append the new feature vector to the features list
file_features.append(hog_features)
features.append(np.concatenate(file_features))
# Return list of feature vectors
return features
#car_features = extract_features(cars)
#not_car_features = extract_features(not_cars)
###Output
0%| | 0/8968 [00:00<?, ?it/s]/Users/alex/miniconda3/envs/carnd-term1/lib/python3.5/site-packages/skimage/feature/_hog.py:119: skimage_deprecation: Default value of `block_norm`==`L1` is deprecated and will be changed to `L2-Hys` in v0.15
'be changed to `L2-Hys` in v0.15', skimage_deprecation)
100%|██████████| 8968/8968 [06:44<00:00, 17.77it/s]
###Markdown
Normalizing features
###Code
y = np.hstack((np.ones(len(car_features)), np.zeros(len(not_car_features))))
raw_X = np.vstack((car_features, not_car_features)).astype(np.float64)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X = scaler.fit_transform(raw_X)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, stratify=y)
###Output
_____no_output_____
###Markdown
SVC Training
###Code
from sklearn.svm import LinearSVC
from sklearn.externals import joblib
svc = LinearSVC()
%time svc.fit(X_train, y_train)
print('Test Score: ', svc.score(X_test, y_test))
# saving the data
joblib.dump(svc, 'svc_pickle.pkl')
joblib.dump(scaler, 'scaler.pkl')
###Output
_____no_output_____
###Markdown
Loading the SVC
###Code
from sklearn.svm import LinearSVC
from sklearn.externals import joblib
from sklearn.preprocessing import StandardScaler
from tqdm import tqdm
import cv2
from skimage.feature import hog
import matplotlib.image as mpimg
import numpy as np
import skvideo.io
from scipy.ndimage.measurements import label
loaded_svc = joblib.load('svc_pickle.pkl')
loaded_scaler = joblib.load('scaler.pkl')
# used for inference, takes an image & draws boxes around the detected cars
# reuse scaler for inference
# includes sliding window technique
def mark_cars(img, y_regions_and_scales, svc, scaler,
orient=9, pix_per_cell=8,
cell_per_block=2,
spatial_size=(32, 32),
hist_bins=32, display=False):
output_img = None
# 2d heatmap
heatmap = np.zeros(img.shape[:2])
if display:
output_img = np.copy(img)
img = img.astype(np.float32) / 255
for (y_region, scale) in y_regions_and_scales:
img_region = img[y_region[0]:y_region[1], :, :]
color_transformed = cv2.cvtColor(img_region, cv2.COLOR_RGB2YCrCb)
img_shape = color_transformed.shape
color_transformed = cv2.resize(color_transformed,
(int(img_shape[1]//scale),
int(img_shape[0]//scale)))
ch1 = color_transformed[:, :, 0]
ch2 = color_transformed[:, :, 1]
ch3 = color_transformed[:, :, 2]
num_x_blocks = (ch1.shape[1] // pix_per_cell) - 1
num_y_blocks = (ch1.shape[0] // pix_per_cell) - 1
pixels_per_window = 64
num_blocks_per_window = (pixels_per_window // pix_per_cell) - 1
cells_per_step = 2
num_xsteps = (num_x_blocks - num_blocks_per_window) // cells_per_step
num_ysteps = (num_y_blocks - num_blocks_per_window) // cells_per_step
# we cut out a section later, don't grab the whole vector yet
hog1, _ = get_hog(ch1, orient, pix_per_cell, cell_per_block, feature_vec=False)
hog2, _ = get_hog(ch2, orient, pix_per_cell, cell_per_block, feature_vec=False)
hog3, _ = get_hog(ch3, orient, pix_per_cell, cell_per_block, feature_vec=False)
# sliding window here
for xb in range(num_xsteps):
for yb in range(num_ysteps):
y_start = yb * cells_per_step
y_end = y_start + num_blocks_per_window
x_start = xb * cells_per_step
x_end = x_start + num_blocks_per_window
hog_feat1 = hog1[y_start:y_end, x_start:x_end].ravel()
hog_feat2 = hog2[y_start:y_end, x_start:x_end].ravel()
hog_feat3 = hog3[y_start:y_end, x_start:x_end].ravel()
hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3))
x_left = x_start * pix_per_cell
y_top = y_start * pix_per_cell
img_cut = cv2.resize(color_transformed[y_top:y_top + pixels_per_window,
x_left:x_left + pixels_per_window], (64, 64))
spatial_features = bin_spatial(img_cut, size=spatial_size)
hist_features = color_hist(img_cut, nbins=hist_bins)
all_features = scaler.transform(
np.hstack((spatial_features, hist_features, hog_features)).reshape(1, -1))
prediction = svc.predict(all_features)
if prediction == 1:
y_top_coord = np.int(y_top * scale)
win_len = np.int(pixels_per_window * scale)
x_top_left = np.int(x_left * scale)
y_top_left = y_top_coord + y_region[0]
x_bot_right = x_top_left + win_len
y_bot_right = y_top_left + win_len
if display:
# cut short & return 1 img only
cv2.rectangle(output_img,
(x_top_left, y_top_left),
(x_bot_right, y_bot_right),
(0, 0, 255), 6)
else:
heatmap[y_top_left:y_bot_right,
x_top_left:x_bot_right] += 1
if display:
return output_img
return heatmap
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
# 400 to 656, have exactly 8 blocks
region_and_scale = [((400, 700), 1.65)]
test_img = mpimg.imread('test_images/test5.jpg')
output = mark_cars(test_img, region_and_scale, loaded_svc, loaded_scaler, display=True)
plt.imshow(output)
###Output
/Users/alex/miniconda3/envs/carnd-term1/lib/python3.5/site-packages/skimage/feature/_hog.py:119: skimage_deprecation: Default value of `block_norm`==`L1` is deprecated and will be changed to `L2-Hys` in v0.15
'be changed to `L2-Hys` in v0.15', skimage_deprecation)
###Markdown
Multi-Scale Window
###Code
# EDIT CODE & NUMBERS!!
pix_per_cell = 8
orient = 9
cell_per_block = 2
def draw_scale_windows(img, y_start, y_stop, scale):
output_img = np.copy(img)
img = img.astype(np.float32) / 255
img_region = img[y_start:y_stop, :, :]
imshape = img_region.shape
img_region = cv2.resize(img_region, (np.int(imshape[1] / scale), np.int(imshape[0] / scale)))
num_xblocks = (img_region.shape[1] // pix_per_cell) - 1
num_yblocks = (img_region.shape[0] // pix_per_cell) - 1
window = 64
cells_per_step = 2
num_blocks_per_window = (window // pix_per_cell) - 1
num_xsteps = (num_xblocks - num_blocks_per_window) // cells_per_step
num_ysteps = (num_yblocks - num_blocks_per_window) // cells_per_step
rect_start = None
rect_end = None
for xb in range(num_xsteps+1):
for yb in range(num_ysteps+1):
ypos = yb * cells_per_step
xpos = xb * cells_per_step
xleft = xpos * pix_per_cell
ytop = ypos * pix_per_cell
x_box_left = np.int(xleft * scale)
y_top_draw = np.int(ytop * scale)
win_draw = np.int(window * scale)
rect_start = (x_box_left, y_top_draw + y_start)
rect_end = (x_box_left + win_draw, y_top_draw + win_draw + y_start)
cv2.rectangle(output_img, rect_start, rect_end, (0, 0, 255), 4)
return output_img
region_and_scales = [((380, 500), 1.0), ((380, 600), 1.5), ((400, 650), 2), ((400, 700), 2.5)]
plt.figure(figsize=(15,40))
plot_count=1
for (region, scale) in region_and_scales:
y_start, y_stop = region
plt.subplot(1,4, plot_count)
plt.imshow(draw_scale_windows(test_img, y_start, y_stop, scale))
plt.title('Region & Scale %s'% plot_count)
plt.xticks([])
plt.yticks([])
plot_count +=1
###Output
_____no_output_____
###Markdown
Video processing
###Code
from utils import HeatmapBuffer
test_path = 'test_video.mp4'
project_path = 'project_video.mp4'
output_path = 'output_video.mp4'
def gather_frames(path):
video = cv2.VideoCapture(path)
frames = []
while video.isOpened():
has_frame, frame = video.read()
if has_frame:
color_transformed = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frames.append(color_transformed)
else:
break
video.release()
return np.array(frames)
def frames_to_video(frames, path):
skvideo.io.vwrite(path, frames)
def draw_boxes(frames, heatmaps):
imgs_with_boxes = frames.copy()
color = (0, 0, 255)
thickness = 4
for i, heatmap in tqdm(enumerate(heatmaps)):
img_with_box = imgs_with_boxes[i]
labelled, num_cars = label(heatmap)
for car_idx in range(1, num_cars+1):
region_y, region_x = np.where(labelled == car_idx)
box_top_left = (np.min(region_x), np.min(region_y))
box_bottom_right = (np.max(region_x), np.max(region_y))
cv2.rectangle(img_with_box, box_top_left, box_bottom_right, color, thickness)
imgs_with_boxes[i] = img_with_box
return imgs_with_boxes
def threshold_heatmaps(heatmaps, threshold=3, buffer_size=8):
buffer = HeatmapBuffer((buffer_size,) + heatmaps[0].shape)
thresholded = list()
for heatmap in tqdm(heatmaps):
buffer.add_heatmap(heatmap)
mean_heatmap = buffer.mean_heatmap()
heatmap[mean_heatmap < threshold] = 0
thresholded.append(heatmap)
return thresholded
def pipeline(frames, svc, scaler, region_and_scales):
raw_heatmaps = [mark_cars(img, region_and_scales, svc, scaler) for img in tqdm(frames)]
thresholded_heatmaps = threshold_heatmaps(raw_heatmaps)
processed_frames = draw_boxes(frames, thresholded_heatmaps)
return processed_frames
%time frames = gather_frames(project_path)
region_and_scales = [((380, 500), 1.0), ((380, 600), 1.5), ((400, 650), 2), ((420, 700), 2.5)]
processed_frames = pipeline(frames, loaded_svc, loaded_scaler, region_and_scales)
%time frames_to_video(processed_frames, output_path)
#frames2 = gather_frames(test_path)
x1 = mark_cars(frames2[0], region_and_scales, loaded_svc, loaded_scaler)
x2 = mark_cars(frames2[1], region_and_scales, loaded_svc, loaded_scaler)
x3 = mark_cars(frames2[2], region_and_scales, loaded_svc, loaded_scaler)
x4 = mark_cars(frames2[3], region_and_scales, loaded_svc, loaded_scaler)
y = threshold_heatmaps((x1,x2,x3,x4))
boxed = draw_boxes(frames[0:4], y)
plt.imshow(boxed[3])
plt.imshow(x4)
z, _ = label(x4)
plt.imshow(z)
###Output
_____no_output_____ |
agents/manufacturer2/notebooks/03_connect_with_city.ipynb | ###Markdown
--- 1 – Initiate Manufacturer2 Agent 1.1 – Init ACA-PY agent controller
###Code
# Setup
agent_controller = AriesAgentController(admin_url,api_key)
print(f"Initialising a controller with admin api at {admin_url} and an api key of {api_key}")
###Output
Initialising a controller with admin api at http://manufacturer2-agent:3021 and an api key of adminApiKey
###Markdown
1.2 – Start Webhook Server to enable communication with other agents@todo: is communication with other agents, or with other docker containers?
###Code
# Listen on webhook server
await agent_controller.init_webhook_server(webhook_host, webhook_port)
print(f"Listening for webhooks from agent at http://{webhook_host}:{webhook_port}")
###Output
Listening for webhooks from agent at http://0.0.0.0:3010
###Markdown
1.3 – Init ACM Credential Holder
###Code
# The CredentialHolder registers relevant webhook servers and event listeners
manufacturer2_agent = CredentialHolder(agent_controller)
# Verify if Manufacturer already has a VC
# (if there are manufacturer credentials, there is no need to execute the notebook)
manufacturer2_agent.get_credentials()
###Output
[1m[32mSuccessfully initiated AgentConnectionManager for a(n) Holder ACA-PY agent[0m
###Markdown
--- 2 – Establish a connection with the City agent 🏙️A connection with the credential issuer (i.e., the authority agent) must be established before a VC can be received. In this scenario, the Manufacturer2 requests a connection with the Authority to be certified as an official city agency. Thus, the Manufacturer2 agent sends an invitation to the Authority. In real life, the invitation can be shared via video call, phone call, or E-Mail. In this PoC, this is represented by copy and pasting the invitation into the manufacturers' notebooks. 2.1 Join invitation of City agent 🏙️Copy and paste the multi-use invitation of the city agent, and establish a connection with them.
###Code
# Variables
alias = "undisclosedM2"
auto_accept = True
# Receive connection invitation
connection_id = manufacturer2_agent.receive_connection_invitation(alias=alias, auto_accept=auto_accept)
###Output
[1m[35mPlease enter invitation received by external agent:[0m
###Markdown
Break Point 2 / 3 / 4🚛 ➡️ 🚗 / 🛵 / 🏙️ Please proceed to the remaining Manufacturers. If you have established a connection between the City and all Manufacturers, proceed to the City Notebook's Step 2.2--- 3 – Create Presentation to Send Proof Presentation 3.1 – Create presentation that satisfies requirements of proof requestBefore you can present a presentation, you must identify the presentation record which you wish to respond to with a presentation. To do so, the `prepare_presentation()` function runs through the following steps: 1. Get all proof requests that were sent through `connection_id`2. Get the most recent `presentation_exchange_id` and the corresponding `proof_request` from (1)3. Get the restrictions the City agent defined in `proof_request` from (2)4. Compare all VCs the Manufacturer2 agent has stored, and find (if available) a VC that satisfies the restrictions from (3)5. Return a presentation dictionary from a VC from (4) that satisfies all requirements. Generally, a presentation consists of three classes of attributes: a. `requested_attributes`: Attributes that were signed by an issuer and have been revealed in the presentation process b. `self_attested_attributes`: Attributes that the prover has self attested to in the presentation object. c. `requested_predicates` (predicate proofs): Attribute values that have been proven to meet some statement. (TODO: Show how you can parse this information)
###Code
presentation, presentation_exchange_id = manufacturer2_agent.prepare_presentation(connection_id)
###Output
[34m> Found proof_request with presentation_exchange_id 3b7d039d-82fb-4f63-aec4-d579a318a9e9[0m
[34m> Restrictions for a suitable proof: {'isManufacturer': {'requirements': {'schema_id': 'AkvQpXzutUhSeeiuZbVcbq:2:certify-manufacturer:0.0.1'}, 'request_attr_name': '0_isManufacturer_uuid'}}[0m
[34m> Attribute request for 'isManufacturer' can be satisfied by Credential with VC 'isManufacturer-VC-M2'[0m
[34m> Generate the proof presentation : [0m
{
'requested_attributes': {
'0_isManufacturer_uuid': {
'cred_id': 'isManufacturer-VC-M2',
'revealed': True,
},
},
'requested_predicates': {},
'self_attested_attributes': {},
}
###Markdown
3.2 – Send PresentationSend the presentation to the recipient of `presentation_exchange_id`
###Code
manufacturer2_agent.send_proof_presentation(presentation_exchange_id, presentation)
###Output
---------------------------------------------------------------------
[1mConnection Webhook Event Received: Present-Proof Handler[0m
Connection ID : beeebb5f-dfbd-46f6-bc64-e117ceae5af1
Presentation Exchange ID : 3b7d039d-82fb-4f63-aec4-d579a318a9e9
Protocol State : [34mpresentation_sent[0m
Agent Role : prover
Initiator : external
---------------------------------------------------------------------
---------------------------------------------------------------------
[1mConnection Webhook Event Received: Present-Proof Handler[0m
Connection ID : beeebb5f-dfbd-46f6-bc64-e117ceae5af1
Presentation Exchange ID : 3b7d039d-82fb-4f63-aec4-d579a318a9e9
Protocol State : [34mpresentation_acked[0m
Agent Role : prover
Initiator : external
---------------------------------------------------------------------
[1m[32m
Presentation Exchange ID: 3b7d039d-82fb-4f63-aec4-d579a318a9e9 is acknowledged by Relying Party[0m
###Markdown
Break Point 6 / 7 / 8🚛 ➡️ 🚗 / 🛵 / 🏙️ Please proceed to the remaining Manufacturers and run all cells between Steps 3 and 4.1 If you have sent proof presentations from all manufacturers, proceed to the City Notebook's Step 3.3 --- 4 – Do Data ScienceAssuming that the City agent will acknowledge the proofs and deem them to be correct, proceed by inviting the City agent to a Duet Connection. 4.1 – Establish a Duet Connection with City Agent: Send Duet invitationDuet is a package that allows you to exchange encrypted data and run privacy-preserving arithmetic operations on them (e.g., through homomorphic encryption or secure multiparty computation).
###Code
# Set up connection_id to use for duet connection
manufacturer2_agent._update_connection(connection_id=connection_id, is_duet_connection=True, reset_duet=True)
# Create duet invitation for city agent
duet = sy.launch_duet(credential_exchanger=manufacturer2_agent)
###Output
🎤 🎸 ♪♪♪ Starting Duet ♫♫♫ 🎻 🎹
♫♫♫ >[93m DISCLAIMER[0m: [1mDuet is an experimental feature currently in beta.
♫♫♫ > Use at your own risk.
[0m
[1m
> ❤️ [91mLove[0m [92mDuet[0m? [93mPlease[0m [94mconsider[0m [95msupporting[0m [91mour[0m [93mcommunity![0m
> https://github.com/sponsors/OpenMined[1m
♫♫♫ > Punching through firewall to OpenGrid Network Node at:
♫♫♫ > http://ec2-18-218-7-180.us-east-2.compute.amazonaws.com:5000
♫♫♫ >
♫♫♫ > ...waiting for response from OpenGrid Network...
♫♫♫ > [92mDONE![0m
♫♫♫ > [1mSTEP 1:[0m Sending Duet Token 4d07c4905d3548cf581e7faf06dee356
♫♫♫ > to Duet Partner City-Agency
♫♫♫ > via Connection ID beeebb5f-dfbd-46f6-bc64-e117ceae5af1
[1m[32m♫♫♫ > Done![0m
♫♫♫ > [1mSTEP 2:[0m Awaiting Duet Token from Duet Partner...
♫♫♫ > [1m[32mDONE![0m Partner's Duet Token: 27e39dabc65cdaf7f2f2d6cca2801889
♫♫♫ > Connecting...
♫♫♫ > [92mCONNECTED![0m
♫♫♫ > DUET LIVE STATUS - Objects: 0 Requests: 0 Messages: 1 Request Handlers: 0
###Markdown
4.2 - Load data to duet store
###Code
# Verify data store of duet
duet.store.pandas # There should only be an MPC session statement by the City agent
###Output
_____no_output_____
###Markdown
Process data before loading it to the duet store. We take a synthetically created dataset of CO2 emission per trip across the City Agent's City (in this case Berlin, Germany).
###Code
# Get zipcode data (zipcode data from https://daten.odis-berlin.de/de/dataset/plz/)
df_zipcode = pd.read_csv("data/geo/berlin_zipcodes.csv").rename(columns={"plz":"zipcode"})
valid_zipcodes = list(df_zipcode.zipcode)
df_zipcode.head()
# Get trip data
df_co2 = pd.read_csv("data/trips/data.csv", index_col=0)
df_co2 = df_co2[df_co2.zipcode.isin(valid_zipcodes)]
df_co2["hour"] = df_co2.timestamp.apply(lambda x: int(x[11:13]))
df_co2.head()
###Output
_____no_output_____
###Markdown
The trip data is then grouped by zipcode to sum the CO2 emission per hour per zipcode.
###Code
# Get hourly co2
df_hourly_co2 = df_co2[["zipcode", "hour","co2_grams"]].groupby(["zipcode", "hour"]).sum().reset_index()
df_hourly_co2 = df_hourly_co2.pivot(index=["zipcode"], columns=["hour"])["co2_grams"].replace(np.nan, 0)
# Get matrix that of shape (4085,25)
df_hourly_zipcode = df_zipcode.set_index("zipcode").reindex(columns=list(range(0,24))).replace(np.nan,0)#.reset_index()
# Merge dataframes together
df = df_hourly_zipcode.add(df_hourly_co2, fill_value=0)
print(df.shape)
df.head()
###Output
(194, 24)
###Markdown
Then, convert the dataset to a tensor, and upload the tensor with shape (194 x 24) to the duet data store
###Code
# Configure tensor
hourly_co2_torch = torch.tensor(df.values)
hourly_co2_torch = hourly_co2_torch.tag("hourly-co2-per-zip_2021-08-19")
hourly_co2_torch = hourly_co2_torch.describe("Total CO2 per Zipcode per Hour on August 19, 2021. Shape: zipcode (10115-14199) x hour (0-23) = 4085 x 24")
# Load tensor to datastore
hourly_co2_torch_pointer = hourly_co2_torch.send(duet, pointable=True)
# Verify datastore
duet.store.pandas
###Output
_____no_output_____
###Markdown
4.3 – Authorize City agent to `.reconstruct()` the dataAuthorize the city agent to reconstruct the data once it is shared and joined with other manufacutrers' data
###Code
duet.requests.add_handler(
#name="reconstruct",
action="accept"
)
###Output
[2021-11-27T15:57:16.560773+0000][CRITICAL][logger]][41] Path sympc.session.Session not present in the AST.
[2021-11-27T15:57:16.561993+0000][CRITICAL][logger]][41] Path sympc.session.Session not present in the AST.
ERROR:asyncio:Exception in callback AsyncIOEventEmitter._emit_run.<locals>._callback(<Task finishe...in the AST.')>) at /opt/conda/lib/python3.9/site-packages/pyee/_asyncio.py:57
handle: <Handle AsyncIOEventEmitter._emit_run.<locals>._callback(<Task finishe...in the AST.')>) at /opt/conda/lib/python3.9/site-packages/pyee/_asyncio.py:57>
Traceback (most recent call last):
File "/opt/conda/lib/python3.9/asyncio/events.py", line 80, in _run
self._context.run(self._callback, *self._args)
File "/opt/conda/lib/python3.9/site-packages/pyee/_asyncio.py", line 64, in _callback
self.emit("error", exc)
File "/opt/conda/lib/python3.9/site-packages/pyee/_base.py", line 118, in emit
self._emit_handle_potential_error(event, args[0] if args else None)
File "/opt/conda/lib/python3.9/site-packages/pyee/_base.py", line 88, in _emit_handle_potential_error
raise error
File "/opt/conda/lib/python3.9/asyncio/tasks.py", line 256, in __step
result = coro.send(None)
File "/opt/conda/lib/python3.9/site-packages/syft/grid/connections/webrtc.py", line 233, in on_message
await self.consumer(msg=message)
File "/opt/conda/lib/python3.9/site-packages/syft/grid/connections/webrtc.py", line 449, in consumer
traceback_and_raise(e)
File "/opt/conda/lib/python3.9/site-packages/syft/logger.py", line 61, in traceback_and_raise
raise e
File "/opt/conda/lib/python3.9/site-packages/syft/grid/connections/webrtc.py", line 417, in consumer
_msg = _deserialize(blob=msg, from_bytes=True)
File "/opt/conda/lib/python3.9/site-packages/syft/core/common/serde/deserialize.py", line 89, in _deserialize
res = _proto2object(proto=blob)
File "/opt/conda/lib/python3.9/site-packages/syft/core/common/message.py", line 183, in _proto2object
_deserialize(blob=proto.message, from_bytes=True), SyftMessage
File "/opt/conda/lib/python3.9/site-packages/syft/core/common/serde/deserialize.py", line 89, in _deserialize
res = _proto2object(proto=blob)
File "/opt/conda/lib/python3.9/site-packages/syft/core/node/common/action/run_class_method_action.py", line 289, in _proto2object
_self=_deserialize(blob=proto._self),
File "/opt/conda/lib/python3.9/site-packages/syft/core/common/serde/deserialize.py", line 89, in _deserialize
res = _proto2object(proto=blob)
File "/opt/conda/lib/python3.9/site-packages/syft/core/pointer/pointer.py", line 335, in _proto2object
points_to_type = sy.lib_ast.query(proto.points_to_object_with_path)
File "/opt/conda/lib/python3.9/site-packages/syft/ast/attribute.py", line 161, in query
traceback_and_raise(
File "/opt/conda/lib/python3.9/site-packages/syft/logger.py", line 61, in traceback_and_raise
raise e
ValueError: Path sympc.session.Session not present in the AST.
[2021-11-27T15:57:16.666951+0000][CRITICAL][logger]][41] Path sympc.session.Session not present in the AST.
[2021-11-27T15:57:16.668047+0000][CRITICAL][logger]][41] Path sympc.session.Session not present in the AST.
ERROR:asyncio:Exception in callback AsyncIOEventEmitter._emit_run.<locals>._callback(<Task finishe...in the AST.')>) at /opt/conda/lib/python3.9/site-packages/pyee/_asyncio.py:57
handle: <Handle AsyncIOEventEmitter._emit_run.<locals>._callback(<Task finishe...in the AST.')>) at /opt/conda/lib/python3.9/site-packages/pyee/_asyncio.py:57>
Traceback (most recent call last):
File "/opt/conda/lib/python3.9/asyncio/events.py", line 80, in _run
self._context.run(self._callback, *self._args)
File "/opt/conda/lib/python3.9/site-packages/pyee/_asyncio.py", line 64, in _callback
self.emit("error", exc)
File "/opt/conda/lib/python3.9/site-packages/pyee/_base.py", line 118, in emit
self._emit_handle_potential_error(event, args[0] if args else None)
File "/opt/conda/lib/python3.9/site-packages/pyee/_base.py", line 88, in _emit_handle_potential_error
raise error
File "/opt/conda/lib/python3.9/asyncio/tasks.py", line 256, in __step
result = coro.send(None)
File "/opt/conda/lib/python3.9/site-packages/syft/grid/connections/webrtc.py", line 233, in on_message
await self.consumer(msg=message)
File "/opt/conda/lib/python3.9/site-packages/syft/grid/connections/webrtc.py", line 449, in consumer
traceback_and_raise(e)
File "/opt/conda/lib/python3.9/site-packages/syft/logger.py", line 61, in traceback_and_raise
raise e
File "/opt/conda/lib/python3.9/site-packages/syft/grid/connections/webrtc.py", line 417, in consumer
_msg = _deserialize(blob=msg, from_bytes=True)
File "/opt/conda/lib/python3.9/site-packages/syft/core/common/serde/deserialize.py", line 89, in _deserialize
res = _proto2object(proto=blob)
File "/opt/conda/lib/python3.9/site-packages/syft/core/common/message.py", line 183, in _proto2object
_deserialize(blob=proto.message, from_bytes=True), SyftMessage
File "/opt/conda/lib/python3.9/site-packages/syft/core/common/serde/deserialize.py", line 89, in _deserialize
res = _proto2object(proto=blob)
File "/opt/conda/lib/python3.9/site-packages/syft/core/node/common/action/run_class_method_action.py", line 289, in _proto2object
_self=_deserialize(blob=proto._self),
File "/opt/conda/lib/python3.9/site-packages/syft/core/common/serde/deserialize.py", line 89, in _deserialize
res = _proto2object(proto=blob)
File "/opt/conda/lib/python3.9/site-packages/syft/core/pointer/pointer.py", line 335, in _proto2object
points_to_type = sy.lib_ast.query(proto.points_to_object_with_path)
File "/opt/conda/lib/python3.9/site-packages/syft/ast/attribute.py", line 161, in query
traceback_and_raise(
File "/opt/conda/lib/python3.9/site-packages/syft/logger.py", line 61, in traceback_and_raise
raise e
ValueError: Path sympc.session.Session not present in the AST.
[2021-11-27T15:57:16.697455+0000][CRITICAL][logger]][41] Path ReplicatedSharedTensorShareTensorUnion not present in the AST.
[2021-11-27T15:57:16.707379+0000][CRITICAL][logger]][41] Path ReplicatedSharedTensorShareTensorUnion not present in the AST.
ERROR:asyncio:Exception in callback AsyncIOEventEmitter._emit_run.<locals>._callback(<Task finishe...in the AST.')>) at /opt/conda/lib/python3.9/site-packages/pyee/_asyncio.py:57
handle: <Handle AsyncIOEventEmitter._emit_run.<locals>._callback(<Task finishe...in the AST.')>) at /opt/conda/lib/python3.9/site-packages/pyee/_asyncio.py:57>
Traceback (most recent call last):
File "/opt/conda/lib/python3.9/asyncio/events.py", line 80, in _run
self._context.run(self._callback, *self._args)
File "/opt/conda/lib/python3.9/site-packages/pyee/_asyncio.py", line 64, in _callback
self.emit("error", exc)
File "/opt/conda/lib/python3.9/site-packages/pyee/_base.py", line 118, in emit
self._emit_handle_potential_error(event, args[0] if args else None)
File "/opt/conda/lib/python3.9/site-packages/pyee/_base.py", line 88, in _emit_handle_potential_error
raise error
File "/opt/conda/lib/python3.9/asyncio/tasks.py", line 256, in __step
result = coro.send(None)
File "/opt/conda/lib/python3.9/site-packages/syft/grid/connections/webrtc.py", line 233, in on_message
await self.consumer(msg=message)
File "/opt/conda/lib/python3.9/site-packages/syft/grid/connections/webrtc.py", line 449, in consumer
traceback_and_raise(e)
File "/opt/conda/lib/python3.9/site-packages/syft/logger.py", line 61, in traceback_and_raise
raise e
File "/opt/conda/lib/python3.9/site-packages/syft/grid/connections/webrtc.py", line 417, in consumer
_msg = _deserialize(blob=msg, from_bytes=True)
File "/opt/conda/lib/python3.9/site-packages/syft/core/common/serde/deserialize.py", line 89, in _deserialize
res = _proto2object(proto=blob)
File "/opt/conda/lib/python3.9/site-packages/syft/core/common/message.py", line 183, in _proto2object
_deserialize(blob=proto.message, from_bytes=True), SyftMessage
File "/opt/conda/lib/python3.9/site-packages/syft/core/common/serde/deserialize.py", line 89, in _deserialize
res = _proto2object(proto=blob)
File "/opt/conda/lib/python3.9/site-packages/syft/core/node/common/action/run_class_method_action.py", line 289, in _proto2object
_self=_deserialize(blob=proto._self),
File "/opt/conda/lib/python3.9/site-packages/syft/core/common/serde/deserialize.py", line 89, in _deserialize
res = _proto2object(proto=blob)
File "/opt/conda/lib/python3.9/site-packages/syft/core/pointer/pointer.py", line 335, in _proto2object
points_to_type = sy.lib_ast.query(proto.points_to_object_with_path)
File "/opt/conda/lib/python3.9/site-packages/syft/ast/attribute.py", line 159, in query
return self.attrs[_path[0]].query(path=_path[1:])
File "/opt/conda/lib/python3.9/site-packages/syft/ast/attribute.py", line 159, in query
return self.attrs[_path[0]].query(path=_path[1:])
File "/opt/conda/lib/python3.9/site-packages/syft/ast/attribute.py", line 159, in query
return self.attrs[_path[0]].query(path=_path[1:])
[Previous line repeated 1 more time]
File "/opt/conda/lib/python3.9/site-packages/syft/ast/attribute.py", line 161, in query
traceback_and_raise(
File "/opt/conda/lib/python3.9/site-packages/syft/logger.py", line 61, in traceback_and_raise
raise e
ValueError: Path ReplicatedSharedTensorShareTensorUnion not present in the AST.
[2021-11-27T15:57:16.763512+0000][CRITICAL][logger]][41] <class 'syft.core.store.store_memory.MemoryStore'> __delitem__ error <UID: f75cd6b7f45d4e61ba403d4836eed90d>.
[2021-11-27T15:57:16.800770+0000][CRITICAL][logger]][41] Path sympc.session.Session not present in the AST.
[2021-11-27T15:57:16.821268+0000][CRITICAL][logger]][41] Path sympc.session.Session not present in the AST.
ERROR:asyncio:Exception in callback AsyncIOEventEmitter._emit_run.<locals>._callback(<Task finishe...in the AST.')>) at /opt/conda/lib/python3.9/site-packages/pyee/_asyncio.py:57
handle: <Handle AsyncIOEventEmitter._emit_run.<locals>._callback(<Task finishe...in the AST.')>) at /opt/conda/lib/python3.9/site-packages/pyee/_asyncio.py:57>
Traceback (most recent call last):
File "/opt/conda/lib/python3.9/asyncio/events.py", line 80, in _run
self._context.run(self._callback, *self._args)
File "/opt/conda/lib/python3.9/site-packages/pyee/_asyncio.py", line 64, in _callback
self.emit("error", exc)
File "/opt/conda/lib/python3.9/site-packages/pyee/_base.py", line 118, in emit
self._emit_handle_potential_error(event, args[0] if args else None)
File "/opt/conda/lib/python3.9/site-packages/pyee/_base.py", line 88, in _emit_handle_potential_error
raise error
File "/opt/conda/lib/python3.9/asyncio/tasks.py", line 256, in __step
result = coro.send(None)
File "/opt/conda/lib/python3.9/site-packages/syft/grid/connections/webrtc.py", line 233, in on_message
await self.consumer(msg=message)
File "/opt/conda/lib/python3.9/site-packages/syft/grid/connections/webrtc.py", line 449, in consumer
traceback_and_raise(e)
File "/opt/conda/lib/python3.9/site-packages/syft/logger.py", line 61, in traceback_and_raise
raise e
File "/opt/conda/lib/python3.9/site-packages/syft/grid/connections/webrtc.py", line 417, in consumer
_msg = _deserialize(blob=msg, from_bytes=True)
File "/opt/conda/lib/python3.9/site-packages/syft/core/common/serde/deserialize.py", line 89, in _deserialize
res = _proto2object(proto=blob)
File "/opt/conda/lib/python3.9/site-packages/syft/core/common/message.py", line 183, in _proto2object
_deserialize(blob=proto.message, from_bytes=True), SyftMessage
File "/opt/conda/lib/python3.9/site-packages/syft/core/common/serde/deserialize.py", line 89, in _deserialize
res = _proto2object(proto=blob)
File "/opt/conda/lib/python3.9/site-packages/syft/core/node/common/action/run_class_method_action.py", line 289, in _proto2object
_self=_deserialize(blob=proto._self),
File "/opt/conda/lib/python3.9/site-packages/syft/core/common/serde/deserialize.py", line 89, in _deserialize
res = _proto2object(proto=blob)
File "/opt/conda/lib/python3.9/site-packages/syft/core/pointer/pointer.py", line 335, in _proto2object
points_to_type = sy.lib_ast.query(proto.points_to_object_with_path)
File "/opt/conda/lib/python3.9/site-packages/syft/ast/attribute.py", line 161, in query
traceback_and_raise(
File "/opt/conda/lib/python3.9/site-packages/syft/logger.py", line 61, in traceback_and_raise
raise e
ValueError: Path sympc.session.Session not present in the AST.
[2021-11-27T15:57:52.402899+0000][CRITICAL][logger]][41] Path ReplicatedSharedTensorShareTensorUnion not present in the AST.
[2021-11-27T15:57:52.404386+0000][CRITICAL][logger]][41] Path ReplicatedSharedTensorShareTensorUnion not present in the AST.
ERROR:asyncio:Exception in callback AsyncIOEventEmitter._emit_run.<locals>._callback(<Task finishe...in the AST.')>) at /opt/conda/lib/python3.9/site-packages/pyee/_asyncio.py:57
handle: <Handle AsyncIOEventEmitter._emit_run.<locals>._callback(<Task finishe...in the AST.')>) at /opt/conda/lib/python3.9/site-packages/pyee/_asyncio.py:57>
Traceback (most recent call last):
File "/opt/conda/lib/python3.9/asyncio/events.py", line 80, in _run
self._context.run(self._callback, *self._args)
File "/opt/conda/lib/python3.9/site-packages/pyee/_asyncio.py", line 64, in _callback
self.emit("error", exc)
File "/opt/conda/lib/python3.9/site-packages/pyee/_base.py", line 118, in emit
self._emit_handle_potential_error(event, args[0] if args else None)
File "/opt/conda/lib/python3.9/site-packages/pyee/_base.py", line 88, in _emit_handle_potential_error
raise error
File "/opt/conda/lib/python3.9/asyncio/tasks.py", line 256, in __step
result = coro.send(None)
File "/opt/conda/lib/python3.9/site-packages/syft/grid/connections/webrtc.py", line 233, in on_message
await self.consumer(msg=message)
File "/opt/conda/lib/python3.9/site-packages/syft/grid/connections/webrtc.py", line 449, in consumer
traceback_and_raise(e)
File "/opt/conda/lib/python3.9/site-packages/syft/logger.py", line 61, in traceback_and_raise
raise e
File "/opt/conda/lib/python3.9/site-packages/syft/grid/connections/webrtc.py", line 417, in consumer
_msg = _deserialize(blob=msg, from_bytes=True)
File "/opt/conda/lib/python3.9/site-packages/syft/core/common/serde/deserialize.py", line 89, in _deserialize
res = _proto2object(proto=blob)
File "/opt/conda/lib/python3.9/site-packages/syft/core/common/message.py", line 183, in _proto2object
_deserialize(blob=proto.message, from_bytes=True), SyftMessage
File "/opt/conda/lib/python3.9/site-packages/syft/core/common/serde/deserialize.py", line 89, in _deserialize
res = _proto2object(proto=blob)
File "/opt/conda/lib/python3.9/site-packages/syft/core/node/common/action/run_class_method_action.py", line 289, in _proto2object
_self=_deserialize(blob=proto._self),
File "/opt/conda/lib/python3.9/site-packages/syft/core/common/serde/deserialize.py", line 89, in _deserialize
res = _proto2object(proto=blob)
File "/opt/conda/lib/python3.9/site-packages/syft/core/pointer/pointer.py", line 335, in _proto2object
points_to_type = sy.lib_ast.query(proto.points_to_object_with_path)
File "/opt/conda/lib/python3.9/site-packages/syft/ast/attribute.py", line 159, in query
return self.attrs[_path[0]].query(path=_path[1:])
File "/opt/conda/lib/python3.9/site-packages/syft/ast/attribute.py", line 159, in query
return self.attrs[_path[0]].query(path=_path[1:])
File "/opt/conda/lib/python3.9/site-packages/syft/ast/attribute.py", line 159, in query
return self.attrs[_path[0]].query(path=_path[1:])
[Previous line repeated 1 more time]
File "/opt/conda/lib/python3.9/site-packages/syft/ast/attribute.py", line 161, in query
traceback_and_raise(
File "/opt/conda/lib/python3.9/site-packages/syft/logger.py", line 61, in traceback_and_raise
raise e
ValueError: Path ReplicatedSharedTensorShareTensorUnion not present in the AST.
[2021-11-27T15:57:52.544584+0000][CRITICAL][logger]][41] Path ReplicatedSharedTensorShareTensorUnion not present in the AST.
[2021-11-27T15:57:52.547442+0000][CRITICAL][logger]][41] Path ReplicatedSharedTensorShareTensorUnion not present in the AST.
ERROR:asyncio:Exception in callback AsyncIOEventEmitter._emit_run.<locals>._callback(<Task finishe...in the AST.')>) at /opt/conda/lib/python3.9/site-packages/pyee/_asyncio.py:57
handle: <Handle AsyncIOEventEmitter._emit_run.<locals>._callback(<Task finishe...in the AST.')>) at /opt/conda/lib/python3.9/site-packages/pyee/_asyncio.py:57>
Traceback (most recent call last):
File "/opt/conda/lib/python3.9/asyncio/events.py", line 80, in _run
self._context.run(self._callback, *self._args)
File "/opt/conda/lib/python3.9/site-packages/pyee/_asyncio.py", line 64, in _callback
self.emit("error", exc)
File "/opt/conda/lib/python3.9/site-packages/pyee/_base.py", line 118, in emit
self._emit_handle_potential_error(event, args[0] if args else None)
File "/opt/conda/lib/python3.9/site-packages/pyee/_base.py", line 88, in _emit_handle_potential_error
raise error
File "/opt/conda/lib/python3.9/asyncio/tasks.py", line 256, in __step
result = coro.send(None)
File "/opt/conda/lib/python3.9/site-packages/syft/grid/connections/webrtc.py", line 233, in on_message
await self.consumer(msg=message)
File "/opt/conda/lib/python3.9/site-packages/syft/grid/connections/webrtc.py", line 449, in consumer
traceback_and_raise(e)
File "/opt/conda/lib/python3.9/site-packages/syft/logger.py", line 61, in traceback_and_raise
raise e
File "/opt/conda/lib/python3.9/site-packages/syft/grid/connections/webrtc.py", line 417, in consumer
_msg = _deserialize(blob=msg, from_bytes=True)
File "/opt/conda/lib/python3.9/site-packages/syft/core/common/serde/deserialize.py", line 89, in _deserialize
res = _proto2object(proto=blob)
File "/opt/conda/lib/python3.9/site-packages/syft/core/common/message.py", line 183, in _proto2object
_deserialize(blob=proto.message, from_bytes=True), SyftMessage
File "/opt/conda/lib/python3.9/site-packages/syft/core/common/serde/deserialize.py", line 89, in _deserialize
res = _proto2object(proto=blob)
File "/opt/conda/lib/python3.9/site-packages/syft/core/node/common/action/run_class_method_action.py", line 290, in _proto2object
args=list(map(lambda x: _deserialize(blob=x), proto.args)),
File "/opt/conda/lib/python3.9/site-packages/syft/core/node/common/action/run_class_method_action.py", line 290, in <lambda>
args=list(map(lambda x: _deserialize(blob=x), proto.args)),
File "/opt/conda/lib/python3.9/site-packages/syft/core/common/serde/deserialize.py", line 89, in _deserialize
res = _proto2object(proto=blob)
File "/opt/conda/lib/python3.9/site-packages/syft/core/pointer/pointer.py", line 335, in _proto2object
points_to_type = sy.lib_ast.query(proto.points_to_object_with_path)
File "/opt/conda/lib/python3.9/site-packages/syft/ast/attribute.py", line 159, in query
return self.attrs[_path[0]].query(path=_path[1:])
File "/opt/conda/lib/python3.9/site-packages/syft/ast/attribute.py", line 159, in query
return self.attrs[_path[0]].query(path=_path[1:])
File "/opt/conda/lib/python3.9/site-packages/syft/ast/attribute.py", line 159, in query
return self.attrs[_path[0]].query(path=_path[1:])
[Previous line repeated 1 more time]
File "/opt/conda/lib/python3.9/site-packages/syft/ast/attribute.py", line 161, in query
traceback_and_raise(
File "/opt/conda/lib/python3.9/site-packages/syft/logger.py", line 61, in traceback_and_raise
raise e
ValueError: Path ReplicatedSharedTensorShareTensorUnion not present in the AST.
[2021-11-27T15:57:52.555167+0000][CRITICAL][logger]][41] <class 'syft.core.store.store_memory.MemoryStore'> __delitem__ error <UID: 0fbbbaa35c76451e9b7b52c9aa6df455>.
[2021-11-27T15:57:52.566703+0000][CRITICAL][logger]][41] <class 'syft.core.store.store_memory.MemoryStore'> __getitem__ error <UID: afc3651963e14748bfc8e833b012439f> <UID: afc3651963e14748bfc8e833b012439f>
[2021-11-27T15:57:52.568993+0000][CRITICAL][logger]][41] <UID: afc3651963e14748bfc8e833b012439f>
[2021-11-27T15:57:52.574127+0000][CRITICAL][logger]][41] <UID: afc3651963e14748bfc8e833b012439f>
[2021-11-27T15:57:52.574886+0000][CRITICAL][logger]][41] <UID: afc3651963e14748bfc8e833b012439f>
ERROR:asyncio:Exception in callback AsyncIOEventEmitter._emit_run.<locals>._callback(<Task finishe...833b012439f>)>) at /opt/conda/lib/python3.9/site-packages/pyee/_asyncio.py:57
handle: <Handle AsyncIOEventEmitter._emit_run.<locals>._callback(<Task finishe...833b012439f>)>) at /opt/conda/lib/python3.9/site-packages/pyee/_asyncio.py:57>
Traceback (most recent call last):
File "/opt/conda/lib/python3.9/asyncio/events.py", line 80, in _run
self._context.run(self._callback, *self._args)
File "/opt/conda/lib/python3.9/site-packages/pyee/_asyncio.py", line 64, in _callback
self.emit("error", exc)
File "/opt/conda/lib/python3.9/site-packages/pyee/_base.py", line 118, in emit
self._emit_handle_potential_error(event, args[0] if args else None)
File "/opt/conda/lib/python3.9/site-packages/pyee/_base.py", line 88, in _emit_handle_potential_error
raise error
File "/opt/conda/lib/python3.9/asyncio/tasks.py", line 256, in __step
result = coro.send(None)
File "/opt/conda/lib/python3.9/site-packages/syft/grid/connections/webrtc.py", line 233, in on_message
await self.consumer(msg=message)
File "/opt/conda/lib/python3.9/site-packages/syft/grid/connections/webrtc.py", line 449, in consumer
traceback_and_raise(e)
File "/opt/conda/lib/python3.9/site-packages/syft/logger.py", line 61, in traceback_and_raise
raise e
File "/opt/conda/lib/python3.9/site-packages/syft/grid/connections/webrtc.py", line 434, in consumer
self.recv_immediate_msg_without_reply(msg=_msg)
File "/opt/conda/lib/python3.9/site-packages/syft/grid/connections/webrtc.py", line 490, in recv_immediate_msg_without_reply
traceback_and_raise(e)
File "/opt/conda/lib/python3.9/site-packages/syft/logger.py", line 61, in traceback_and_raise
raise e
File "/opt/conda/lib/python3.9/site-packages/syft/grid/connections/webrtc.py", line 485, in recv_immediate_msg_without_reply
self.node.recv_immediate_msg_without_reply(msg=msg)
File "/opt/conda/lib/python3.9/site-packages/syft/core/node/common/node.py", line 399, in recv_immediate_msg_without_reply
self.process_message(msg=msg, router=self.immediate_msg_without_reply_router)
File "/opt/conda/lib/python3.9/site-packages/syft/core/node/common/node.py", line 481, in process_message
result = service.process(
File "/opt/conda/lib/python3.9/site-packages/syft/core/node/domain/service/request_message.py", line 222, in process
msg.object_tags.extend(node.store[msg.object_id]._tags)
File "/opt/conda/lib/python3.9/site-packages/syft/core/store/store_memory.py", line 66, in __getitem__
traceback_and_raise(e)
File "/opt/conda/lib/python3.9/site-packages/syft/logger.py", line 61, in traceback_and_raise
raise e
File "/opt/conda/lib/python3.9/site-packages/syft/core/store/store_memory.py", line 63, in __getitem__
return self._objects[key]
KeyError: <UID: afc3651963e14748bfc8e833b012439f>
[2021-11-27T15:57:52.661545+0000][CRITICAL][logger]][41] <class 'syft.core.store.store_memory.MemoryStore'> __getitem__ error <UID: afc3651963e14748bfc8e833b012439f> <UID: afc3651963e14748bfc8e833b012439f>
[2021-11-27T15:57:52.662557+0000][CRITICAL][logger]][41] <UID: afc3651963e14748bfc8e833b012439f>
[2021-11-27T15:57:52.663229+0000][CRITICAL][logger]][41] Unable to Get Object with ID <UID: afc3651963e14748bfc8e833b012439f> from store. Possible dangling Pointer. <UID: afc3651963e14748bfc8e833b012439f>
[2021-11-27T15:57:52.672784+0000][CRITICAL][logger]][41] Unable to Get Object with ID <UID: afc3651963e14748bfc8e833b012439f> from store. Possible dangling Pointer. <UID: afc3651963e14748bfc8e833b012439f>
ERROR:asyncio:Exception in callback Transaction.__retry()
handle: <TimerHandle when=22498.439884 Transaction.__retry()>
Traceback (most recent call last):
File "/opt/conda/lib/python3.9/asyncio/events.py", line 80, in _run
self._context.run(self._callback, *self._args)
File "/opt/conda/lib/python3.9/site-packages/aioice/stun.py", line 306, in __retry
self.__future.set_exception(TransactionTimeout())
File "/opt/conda/lib/python3.9/asyncio/futures.py", line 270, in set_exception
raise exceptions.InvalidStateError(f'{self._state}: {self!r}')
asyncio.exceptions.InvalidStateError: FINISHED: <Future finished result=(Message(messa...\xcc\x14\xea'), ('172.25.0.5', 45515))>
ERROR:asyncio:Exception in callback Transaction.__retry()
handle: <TimerHandle when=64373.296573857 Transaction.__retry()>
Traceback (most recent call last):
File "/opt/conda/lib/python3.9/asyncio/events.py", line 80, in _run
self._context.run(self._callback, *self._args)
File "/opt/conda/lib/python3.9/site-packages/aioice/stun.py", line 306, in __retry
self.__future.set_exception(TransactionTimeout())
File "/opt/conda/lib/python3.9/asyncio/futures.py", line 270, in set_exception
raise exceptions.InvalidStateError(f'{self._state}: {self!r}')
asyncio.exceptions.InvalidStateError: FINISHED: <Future finished result=(Message(messa...xa2\x91"\xe1'), ('172.25.0.5', 45515))>
ERROR:asyncio:Exception in callback Transaction.__retry()
handle: <TimerHandle when=64718.658408357 Transaction.__retry()>
Traceback (most recent call last):
File "/opt/conda/lib/python3.9/asyncio/events.py", line 80, in _run
self._context.run(self._callback, *self._args)
File "/opt/conda/lib/python3.9/site-packages/aioice/stun.py", line 306, in __retry
self.__future.set_exception(TransactionTimeout())
File "/opt/conda/lib/python3.9/asyncio/futures.py", line 270, in set_exception
raise exceptions.InvalidStateError(f'{self._state}: {self!r}')
asyncio.exceptions.InvalidStateError: FINISHED: <Future finished result=(Message(messa...xa9\x8d\x84#'), ('172.25.0.5', 45515))>
###Markdown
--- 5 – Terminate ControllerWhenever you have finished with this notebook, be sure to terminate the controller. This is especially important if your business logic runs across multiple notebooks.(Note: the terminating the controller will not terminate the Duet session).
###Code
await agent_controller.terminate()
###Output
_____no_output_____
###Markdown
PETs/TETs – Hyperledger Aries / PySyft – Manufacturer 2 (Holder) 🚛
###Code
%%javascript
document.title = '🚛 Manufacturer2'
###Output
_____no_output_____
###Markdown
PART 3: Connect with City to Analyze Data**What:** Share encrypted data with City agent in a trust- and privacy-preserving manner**Why:** Share data with City agent (e.g., to obtain funds)**How:** 1. [Initiate Manufacturer's AgentCommunicationManager (ACM)](1)2. [Connect anonymously with the City agent via a multi-use SSI invitation](2)3. [Prove Manufacturer2 Agent is a certified manufacturer via VCs](3)4. [Establish anonymous Duet Connection to share encrypted data](4)**Accompanying Agents and Notebooks:*** City 🏙️️: `03_connect_with_manufacturer.ipynb`* Optional – Manufacturer1 🚗: `03_connect_with_city.ipynb`* Optional – Manufacturer3 🛵: `03_connect_with_city.ipynb` --- 0 - Setup 0.1 - Imports
###Code
import os
import numpy as np
import pandas as pd
import syft as sy
import torch
from aries_cloudcontroller import AriesAgentController
from libs.agent_connection_manager import CredentialHolder
###Output
_____no_output_____
###Markdown
0.2 – Variables
###Code
# Get relevant details from .env file
api_key = os.getenv("ACAPY_ADMIN_API_KEY")
admin_url = os.getenv("ADMIN_URL")
webhook_port = int(os.getenv("WEBHOOK_PORT"))
webhook_host = "0.0.0.0"
###Output
_____no_output_____ |
week03/.ipynb_checkpoints/prep_notebook_fishData_redo_week03-checkpoint.ipynb | ###Markdown
Week 03: More analysis with the fish datasetSee lecture slides for more info about how this dataset was collected. Topics: 1. Further data exploration and some beginning Stats Functions 1. Example 1: Croatian Imports 1. Example 2: Plotting by time & dataframes in R Resize plots:
###Code
require(repr)
options(repr.plot.width=10, repr.plot.height=4)
###Output
Loading required package: repr
###Markdown
1. Read in fish data Read in fish data:
###Code
fishdata = read.csv("undata_fish_2020.csv")
###Output
_____no_output_____
###Markdown
Make sure this is stored somewhere you can remember! You can put it in the same directory as this file (or whatever R-script you are working from) or you can specify a location. For example, on my Mac I can specify the default `Downloads` folder as the location with:```rfishdata = read.csv("~/Downloads/undata_fish_2020.csv")``` Let's make some vectors out of this data - you can use the data as a dataframe (which we'll get to later) but since many folks have a Python background, we might be more used to doing things with vectors:
###Code
# make some vectors, first country:
country = fishdata[,1]
# how about year of data?
year = fishdata[,2]
# how about type of fish
type = fishdata[,3]
# how about transaction type? (import, export, re-export/import)
transaction = fishdata[,4]
# how about the cash amount of the transaction?
trade_usd = fishdata[,5]
# how about the weight of the fish in kg?
weight = fishdata[,6]
# how about the quantity name?
quant_name = fishdata[,7] # some of of the "quantity" measures are weight, or # of items, or nothing
###Output
_____no_output_____
###Markdown
2. Exploring the fish data The first step here is to explore our dataset - let's look at one vector at a time.Country of each case:
###Code
barplot(table(country)) # note: if you stretch the plot window in RStudio you see more/less data
###Output
_____no_output_____
###Markdown
What are the different countries?
###Code
print(levels(country))
###Output
[1] "Afghanistan" "Albania"
[3] "Algeria" "Andorra"
[5] "Angola" "Anguilla"
[7] "Antigua and Barbuda" "Argentina"
[9] "Armenia" "Aruba"
[11] "Australia" "Austria"
[13] "Azerbaijan" "Bahamas"
[15] "Bahrain" "Bangladesh"
[17] "Barbados" "Belarus"
[19] "Belgium" "Belgium-Luxembourg"
[21] "Belize" "Benin"
[23] "Bermuda" "Bhutan"
[25] "Bolivia (Plurinational State of)" "Bosnia Herzegovina"
[27] "Botswana" "Brazil"
[29] "Brunei Darussalam" "Bulgaria"
[31] "Burkina Faso" "Burundi"
[33] "Cabo Verde" "Cambodia"
[35] "Cameroon" "Canada"
[37] "Central African Rep." "Chad"
[39] "Chile" "China"
[41] "China, Hong Kong SAR" "China, Macao SAR"
[43] "Colombia" "Comoros"
[45] "Congo" "Cook Isds"
[47] "Costa Rica" "Croatia"
[49] "Cuba" "Cyprus"
[51] "Czech Rep." "Côte d'Ivoire"
[53] "Denmark" "Djibouti"
[55] "Dominica" "Dominican Rep."
[57] "Ecuador" "Egypt"
[59] "El Salvador"
###Markdown
How about year of data?
###Code
barplot(table(year))
###Output
_____no_output_____
###Markdown
How about type of fish?
###Code
barplot(table(type))
###Output
_____no_output_____
###Markdown
Since its hard to see the labels, maybe we want to print out levels by hand:
###Code
levels(type)
###Output
_____no_output_____
###Markdown
So, all-in-all we see we have about 87 different types of fish import/export in this dataset. How about transaction type? (import, export, re-export/import)
###Code
barplot(table(transaction))
###Output
_____no_output_____
###Markdown
How about the cash amount of the transaction?
###Code
hist(trade_usd) # numerical data
###Output
_____no_output_____
###Markdown
We can see here that this histogram tells us very little.Why? Well let's print out the values of `trade_usd` and take a look:
###Code
head(trade_usd, n=10)
###Output
_____no_output_____
###Markdown
These numbers are very large overall. One option is that we can divide by something like $1000 and see what that looks like:
###Code
hist(trade_usd/1000., xlab='Trade in $1000 USD')
###Output
_____no_output_____
###Markdown
Well that didn't do too much good! Why is that? Let's look at some summary stats for this variable:
###Code
print(summary(trade_usd))
###Output
Min. 1st Qu. Median Mean 3rd Qu. Max.
0.000e+00 5.347e+03 7.026e+04 8.734e+06 9.152e+05 4.368e+09
###Markdown
So, the min seems to be $0-$1 and the max is $5.2 billion dollars! You can see that the Median & mean are very different, and the IQR extends from 1000 to almost 10 million.When we have data over such a huge range that we want to take a look at, one good idea is to take the log and plot that.Recall log10 means "log base 10". What about a log scale plot?
###Code
hist(log10(trade_usd))
###Output
_____no_output_____
###Markdown
Now we can see a lot more detail - what this plot means is that the peak of the distribution is log10 ~ 5 or at 0.1 million dollars ($10^5$ dollars). How about the weight of the fish in kg?
###Code
hist(weight)
###Output
_____no_output_____
###Markdown
Hard to see - let's look at a summary again:
###Code
print(summary(weight))
###Output
Min. 1st Qu. Median Mean 3rd Qu. Max. NA's
0.000e+00 9.410e+02 1.644e+04 2.506e+06 2.328e+05 1.484e+09 879
###Markdown
So again, min & max have a wide range, and a large spread in quartiles. Let's try a log plot again:
###Code
hist(log10(weight))
###Output
_____no_output_____
###Markdown
That this looks similar to the trade_usd histogram makes sense inuitivley - more weight of fish probably corresponds to more money flowing. How about the quantity name?
###Code
levels(quant_name)
###Output
_____no_output_____
###Markdown
Looks like some of of the "quantity" measures are weight, or of items, or nothing.Since this is non-numeric, and only 3 value, let's just look at the table:
###Code
table(quant_name)
###Output
_____no_output_____
###Markdown
It looks like most entries are in kg, and only a few are in 's. A few specify `No Quantity` - we might want to be careful that we are comparing "apples to apples" - i.e. "like things to like things" and make sure we are not comparing measurements in kg to measurements in " items". 3. Further data exploration and some beginning Stats Functions I'm going to show a few stats functions that we'll use later in class. We'll go over them in a lot of detail later, but for right now, I'm just showing an example of how one might use R to explore a dataset and try to learn stuff about it. I'll say a lot of "this will be quantified later" and it will! So don't get frustrated if its weird or vague at this point! 3A - Example 1: Croatian ImportsLet's start by grabbing a subset of our data. We can do this by "masking" out our dataset and only looking at these specific values using "boolean" operators.Let's say I want *only* Croatian data:
###Code
mask = country=="Croatia" # Feel feel to pick your own country! recall: print(levels(country)) to take a look
###Output
_____no_output_____
###Markdown
I can then make a subset of this dataset to work with, for example, to plot the total amount of trade in USD from Croatia:
###Code
trade_usd_croatia = subset(trade_usd,mask)
###Output
_____no_output_____
###Markdown
Here, I use the "subset" function to grab only data with a country code of "croatia".What does the histogram look like?
###Code
hist(trade_usd_croatia)
###Output
_____no_output_____
###Markdown
Again, we probably want to do a log10-based histogram:
###Code
hist(log10(trade_usd_croatia))
###Output
_____no_output_____
###Markdown
We can make more complex masks, for example, remember how we only wanted to compare "like to like" in terms of how things are weighted/counted?Let's also "mask out" only Croatian data that is measured in kg:
###Code
mask = (country=="Croatia") & (quant_name == "Weight in kilograms")
trade_usd_croatia_kg = subset(trade_usd,mask)
###Output
_____no_output_____
###Markdown
Let's overplot this new dataset on our old histogram:
###Code
hist(log10(trade_usd_croatia))
hist(log10(trade_usd_croatia_kg),col=rgb(1,0,0),add=T) # add=T to include on old plot
###Output
_____no_output_____
###Markdown
It turns out this was an important addition to our mask - it changes how things look at the lowest trade in USD values.Finally, we can further subset and look at only how much import trade they are doing:
###Code
mask = (country=="Croatia") & (quant_name == "Weight in kilograms") & (transaction == "Import")
trade_usd_croatia_kg_import = subset(trade_usd,mask)
options(repr.plot.width=10, repr.plot.height=6)
hist(log10(trade_usd_croatia))
hist(log10(trade_usd_croatia_kg),col=rgb(1,0,0),add=T)
hist(log10(trade_usd_croatia_kg_import),col=rgb(0,0,1),add=T)
# and of course, no plot is complete without a legend!
legend("topleft",c("Croatian Trade","Croatian Trade (kg)", "Croatian Imports (kg)"),
fill=c(rgb(1,1,1),rgb(1,0,0),rgb(0,0,1)))
###Output
_____no_output_____
###Markdown
This subsetting is also useful for looking at summary statistics of this dataset:
###Code
print(summary(trade_usd_croatia))
print(summary(trade_usd_croatia_kg))
###Output
Min. 1st Qu. Median Mean 3rd Qu. Max.
1 8118 74396 1375985 538727 77440578
Min. 1st Qu. Median Mean 3rd Qu. Max.
7 12107 94297 1468056 619837 77440578
###Markdown
With the difference between these two, we can already see that if we select for things weighted in kg we find a slightly higher mean/median, etc.This sort of lines up with what we expect from looking at the histograms.Let's also finally compare the imports to the exports from Croatia:
###Code
mask = (country=="Croatia") & (quant_name == "Weight in kilograms") & (transaction == "Export")
trade_usd_croatia_kg_export = subset(trade_usd,mask)
hist(log10(trade_usd_croatia))
hist(log10(trade_usd_croatia_kg),col=rgb(1,0,0),add=T)
hist(log10(trade_usd_croatia_kg_import),col=rgb(0,0,1),add=T)
hist(log10(trade_usd_croatia_kg_export),col=rgb(0,1,0),add=T)
# and, obviously, update our legend:
legend("topleft",c("Croatian Trade","Croatian Trade (kg)",
"Croatian Imports (kg)", "Croatian Exports (kg)"),
fill=c(rgb(1,1,1),rgb(1,0,0),rgb(0,0,1),rgb(0,1,0)))
###Output
_____no_output_____
###Markdown
By eye we can see that they seem like the mean/medians might be different but let's use summary to see:
###Code
print('IMPORTS')
print(summary(trade_usd_croatia_kg_import))
print('EXPORTS')
print(summary(trade_usd_croatia_kg_export))
###Output
[1] "IMPORTS"
Min. 1st Qu. Median Mean 3rd Qu. Max.
49 12907 105273 1115864 686450 35298642
[1] "EXPORTS"
Min. 1st Qu. Median Mean 3rd Qu. Max.
7 11477 85244 1882598 547928 77440578
###Markdown
Again, our histogram seems to be accurate - the export median < import, though note this is not true of the mean.This makes sense because if we look at the STDDEV of each:
###Code
print(sd(trade_usd_croatia_kg_import))
print(sd(trade_usd_croatia_kg_export))
###Output
[1] 3053592
[1] 7065054
###Markdown
The sd of the export > import meaning there is a larger spread of trade in USD in the export dataset so it makes sense the mean might be different from the median. **Practice Question: what is skewness of each histogram?** **Practice Question : Can we accurately say for sure that the medians between these are different? Can we quantify how sure we are these means or medians are different?**$\rightarrow$ more on these concepts later in class. 3B - Example 2: Plotting by time & dataframes in RWe can also check out relationships between the data in other ways, like how things change over time.To make sure we are comparing like-to-like, we should also apply whatever mask we are using to our time variable.Let's say we want to see how Croatian imports change with time:
###Code
mask = (country=="Croatia") & (quant_name == "Weight in kilograms") & (transaction == "Import")
year_croatia_import_kg = subset(year, mask)
###Output
_____no_output_____
###Markdown
Now we can plot the imports into Croatia as a function of time:
###Code
plot(year_croatia_import_kg,trade_usd_croatia_kg_import, xlab="Year",ylab="Import Trade in USD in Croatia")
###Output
_____no_output_____
###Markdown
So this has multiple values - what are they? They are for each type of fish throughout the years.If we want to sum along each year there are plenty of fancy ways to do this.One thing that is nice about R is its use of dataframes. We'll work more with this later, but as an intro, we could either use our original dataframe, or create a new dataframe out of our subset data. Let's try the last option. First, lets take a look at our original dataframe:
###Code
head(fishdata)
###Output
_____no_output_____
###Markdown
Also try: `fishdata$` and see what autocompletes in RStudio. Let's subset into a new frame based on the masks we used before:
###Code
mask = (fishdata$Country.or.Area == "Croatia") & (fishdata$Quantity.Name == "Weight in kilograms") & (fishdata$Flow == "Import")
croatianImports = subset(fishdata,mask)
head(croatianImports)
###Output
_____no_output_____
###Markdown
So you can see from the above that we get the same type of dataframe, or data list, except now if we do `croatianImports$Country.or.Area` in RStudio its only Croatia:
###Code
head(croatianImports$Country.or.Area)
###Output
_____no_output_____
###Markdown
We'll talk more about functions later, so don't worry if this doesn't make sense now, but we can use something called the `aggregate` function to aggregate the "Trade USD" variable in our dataframe by year:
###Code
tradeUSD_by_year = aggregate(Trade..USD. ~ Year, data=croatianImports, sum)
###Output
_____no_output_____
###Markdown
What does that ~ mean?? In this case it means "aggregate Trade USD by Year". But in other functions it means different things! We'll look at this later in class as well when we start thinking about linear regression.
###Code
plot(tradeUSD_by_year$Year, tradeUSD_by_year$Trade..USD.)
###Output
_____no_output_____
###Markdown
We could do fancier aggregates with our base data, but for now, this was just a taste. EXTRA:
###Code
myfit = lm(tradeUSD_by_year$Trade..USD. ~ tradeUSD_by_year$Year)
plot(tradeUSD_by_year$Year, tradeUSD_by_year$Trade..USD.)
abline(myfit, col='blue')
###Output
_____no_output_____ |
category4.ipynb | ###Markdown
###Code
import json
import tensorflow as tf
import numpy as np
import urllib
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if(logs.get('accuracy')>0.99):
print("\nReached 99.9% accuracy so cancelling training!")
self.model.stop_training = True
callbacks = myCallback()
def solution_model():
url = 'https://storage.googleapis.com/download.tensorflow.org/data/sarcasm.json'
urllib.request.urlretrieve(url, 'sarcasm.json')
# DO NOT CHANGE THIS CODE OR THE TESTS MAY NOT WORK
vocab_size = 1000
embedding_dim = 16
max_length = 120
trunc_type='post'
padding_type='post'
oov_tok = "<OOV>"
training_size = 20000
sentences = []
labels = []
with open("sarcasm.json", 'r') as f:
datastore = json.load(f)
# YOUR CODE HERE
for item in datastore:
sentences.append(item['headline'])
labels.append(item['is_sarcastic'])
training_sentences = sentences[0:training_size]
testing_sentences = sentences[training_size:]
training_labels = labels[0:training_size]
testing_labels = labels[training_size:]
tokenizer = Tokenizer(num_words=vocab_size, oov_token=oov_tok)
tokenizer.fit_on_texts(training_sentences)
word_index = tokenizer.word_index
training_sequences = tokenizer.texts_to_sequences(training_sentences)
training_padded = pad_sequences(training_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)
testing_sequences = tokenizer.texts_to_sequences(testing_sentences)
testing_padded = pad_sequences(testing_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)
training_padded = np.array(training_padded)
training_labels = np.array(training_labels)
testing_padded = np.array(testing_padded)
testing_labels = np.array(testing_labels)
model = tf.keras.Sequential([
# YOUR CODE HERE. KEEP THIS OUTPUT LAYER INTACT OR TESTS MAY FAIL
tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),
tf.keras.layers.GlobalAveragePooling1D(),
tf.keras.layers.Dense(24, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
num_epochs = 350
history = model.fit(training_padded, training_labels, epochs=num_epochs, validation_data=(testing_padded, testing_labels), verbose=2, callbacks=[callbacks])
return model
model = solution_model()
model.save("mymodel2.h5")
###Output
_____no_output_____ |
Example3/0-Workflow.ipynb | ###Markdown
Use Altair to Generate SPLOM Chart Note: Student name removed. Submitted, Fall 2019. Table of Contents1 Introduction1.1 Visualization Technique1.2 Visualization Library2 Demonstration2.1 Basic SPLOM Chart2.2 Advanced SPOLM Chart3 Conclusion Introduction Visualization Technique This tutorial demonstrates the use of a SPLOM chart. SPLOM stands for **S**catter **PLO**t **M**atrix. As the name suggests, it comprises of a number of scatter plots arranged in a squared matrix. Below is an illustration of SPLOM from **Seaborn** library using the iris data set. We can see it arranges nicely the 4 quantitative variables of iris data set (*sepal length, sepal width, petal length and petal width*) into rows and columns of the matrix. Each pair of variables are plotted in a scatter plot to show their correlation (if exists). The charts along the diagonal line where each variable is plotted against itself are drawn as histograms to show frequencies of each variable.
###Code
import seaborn as sns
import matplotlib.pyplot as plt
iris = sns.load_dataset('iris')
sns.pairplot(iris)
plt.show()
###Output
_____no_output_____ |
_archived/sstats/sstats-v2.0.ipynb | ###Markdown
Soccerstats Predictions v2.0 The changelog from v1.x:* Implement data cleaning pipeline for model predictions.* Load saved model from disk.* Use model to predict data points. A. Data Preparation 1. Read csv file
###Code
# load csv data to predict
stat_df = sqlContext.read\
.format("com.databricks.spark.csv")\
.options(header = True)\
.load("data/predFixture.csv")
###Output
_____no_output_____
###Markdown
2. Filter-out column values
###Code
from pyspark.sql.functions import udf
from pyspark.sql.types import StringType
# replace "-" values with null: HTS_teamAvgOpponentPPG, ATS_teamAvgOpponentPPG
nullify_hyphen_cols = udf(
lambda row_value: None if row_value == "-" else row_value,
StringType()
)
stat_df = (stat_df.withColumn("HTS_teamAvgOpponentPPG", nullify_hyphen_cols(stat_df.HTS_teamAvgOpponentPPG))
.withColumn("ATS_teamAvgOpponentPPG", nullify_hyphen_cols(stat_df.ATS_teamAvgOpponentPPG))
)
# drop Null values
stat_df = stat_df.dropna()
###Output
_____no_output_____
###Markdown
B. Deep Learning 1. Clean data
###Code
# drop unnecessary columns
ml_df = stat_df.drop(
"gameID", "gamePlayDate", "gamePlayTime", "gameHomeTeamName",
"gameAwayTeamName", "gameHomeTeamID","gameAwayTeamID", "leagueName",
"leagueDivisionName", "gameFtScore"
)
# separate col types: double & string
# double type features
dtype_features = [
"leagueCompletion", "HTS_teamPosition", "HTS_teamGamesPlayed", "HTS_teamGamesWon",
"HTS_teamGamesDraw", "HTS_teamGamesLost", "HTS_teamGoalsScored", "HTS_teamGoalsConceded",
"HTS_teamPoints", "HTS_teamPointsPerGame", "HTS_teamPPGlast8", "HTS_homeGamesWon",
"HTS_homeGamesDraw", "HTS_homeGamesLost", "HTS_homeGamesPlayed", "HTS_awayGamesWon",
"HTS_awayGamesDraw", "HTS_awayGamesLost", "HTS_awayGamesPlayed", "HTS_teamPPGHome",
"HTS_teamPPGAway", "HTS_teamAvgOpponentPPG", "HTS_homeGoalMargin_by1_wins",
"HTS_homeGoalMargin_by1_losses", "HTS_homeGoalMargin_by2_wins", "HTS_homeGoalMargin_by2_losses",
"HTS_homeGoalMargin_by3_wins", "HTS_homeGoalMargin_by3_losses", "HTS_homeGoalMargin_by4p_wins",
"HTS_homeGoalMargin_by4p_losses", "HTS_awayGoalMargin_by1_wins", "HTS_awayGoalMargin_by1_losses",
"HTS_awayGoalMargin_by2_wins", "HTS_awayGoalMargin_by2_losses", "HTS_awayGoalMargin_by3_wins",
"HTS_awayGoalMargin_by3_losses", "HTS_awayGoalMargin_by4p_wins", "HTS_awayGoalMargin_by4p_losses",
"HTS_totalGoalMargin_by1_wins", "HTS_totalGoalMargin_by1_losses", "HTS_totalGoalMargin_by2_wins",
"HTS_totalGoalMargin_by2_losses", "HTS_totalGoalMargin_by3_wins", "HTS_totalGoalMargin_by3_losses",
"HTS_totalGoalMargin_by4p_wins", "HTS_totalGoalMargin_by4p_losses", "HTS_homeGoalsScored",
"HTS_homeGoalsConceded", "HTS_homeGoalsScoredPerMatch", "HTS_homeGoalsConcededPerMatch",
"HTS_homeScored_ConcededPerMatch", "HTS_awayGoalsScored", "HTS_awayGoalsConceded",
"HTS_awayGoalsScoredPerMatch", "HTS_awayGoalsConcededPerMatch", "HTS_awayScored_ConcededPerMatch",
"ATS_teamPosition", "ATS_teamGamesPlayed", "ATS_teamGamesWon", "ATS_teamGamesDraw", "ATS_teamGamesLost",
"ATS_teamGoalsScored", "ATS_teamGoalsConceded", "ATS_teamPoints", "ATS_teamPointsPerGame",
"ATS_teamPPGlast8", "ATS_homeGamesWon", "ATS_homeGamesDraw", "ATS_homeGamesLost",
"ATS_homeGamesPlayed", "ATS_awayGamesWon", "ATS_awayGamesDraw", "ATS_awayGamesLost",
"ATS_awayGamesPlayed", "ATS_teamPPGHome", "ATS_teamPPGAway", "ATS_teamAvgOpponentPPG",
"ATS_homeGoalMargin_by1_wins", "ATS_homeGoalMargin_by1_losses", "ATS_homeGoalMargin_by2_wins",
"ATS_homeGoalMargin_by2_losses", "ATS_homeGoalMargin_by3_wins", "ATS_homeGoalMargin_by3_losses",
"ATS_homeGoalMargin_by4p_wins", "ATS_homeGoalMargin_by4p_losses", "ATS_awayGoalMargin_by1_wins",
"ATS_awayGoalMargin_by1_losses", "ATS_awayGoalMargin_by2_wins", "ATS_awayGoalMargin_by2_losses",
"ATS_awayGoalMargin_by3_wins", "ATS_awayGoalMargin_by3_losses", "ATS_awayGoalMargin_by4p_wins",
"ATS_awayGoalMargin_by4p_losses", "ATS_totalGoalMargin_by1_wins", "ATS_totalGoalMargin_by1_losses",
"ATS_totalGoalMargin_by2_wins", "ATS_totalGoalMargin_by2_losses", "ATS_totalGoalMargin_by3_wins",
"ATS_totalGoalMargin_by3_losses", "ATS_totalGoalMargin_by4p_wins", "ATS_totalGoalMargin_by4p_losses",
"ATS_homeGoalsScored", "ATS_homeGoalsConceded", "ATS_homeGoalsScoredPerMatch", "ATS_homeGoalsConcededPerMatch",
"ATS_homeScored_ConcededPerMatch", "ATS_awayGoalsScored", "ATS_awayGoalsConceded", "ATS_awayGoalsScoredPerMatch",
"ATS_awayGoalsConcededPerMatch", "ATS_awayScored_ConcededPerMatch"
]
# string type features
stype_features = [
"HTS_teamCleanSheetPercent", "HTS_homeOver1_5GoalsPercent",
"HTS_homeOver2_5GoalsPercent", "HTS_homeOver3_5GoalsPercent", "HTS_homeOver4_5GoalsPercent",
"HTS_awayOver1_5GoalsPercent", "HTS_awayOver2_5GoalsPercent", "HTS_awayOver3_5GoalsPercent",
"HTS_awayOver4_5GoalsPercent", "HTS_homeCleanSheets", "HTS_homeWonToNil", "HTS_homeBothTeamsScored",
"HTS_homeFailedToScore", "HTS_homeLostToNil", "HTS_awayCleanSheets", "HTS_awayWonToNil",
"HTS_awayBothTeamsScored", "HTS_awayFailedToScore", "HTS_awayLostToNil", "HTS_homeScored_ConcededBy_0",
"HTS_homeScored_ConcededBy_1", "HTS_homeScored_ConcededBy_2", "HTS_homeScored_ConcededBy_3",
"HTS_homeScored_ConcededBy_4", "HTS_homeScored_ConcededBy_5p", "HTS_homeScored_ConcededBy_0_or_1",
"HTS_homeScored_ConcededBy_2_or_3", "HTS_homeScored_ConcededBy_4p", "HTS_awayScored_ConcededBy_0",
"HTS_awayScored_ConcededBy_1", "HTS_awayScored_ConcededBy_2", "HTS_awayScored_ConcededBy_3",
"HTS_awayScored_ConcededBy_4", "HTS_awayScored_ConcededBy_5p", "HTS_awayScored_ConcededBy_0_or_1",
"HTS_awayScored_ConcededBy_2_or_3", "HTS_awayScored_ConcededBy_4p",
"ATS_teamCleanSheetPercent", "ATS_homeOver1_5GoalsPercent", "ATS_homeOver2_5GoalsPercent",
"ATS_homeOver3_5GoalsPercent", "ATS_homeOver4_5GoalsPercent", "ATS_awayOver1_5GoalsPercent",
"ATS_awayOver2_5GoalsPercent", "ATS_awayOver3_5GoalsPercent", "ATS_awayOver4_5GoalsPercent",
"ATS_homeCleanSheets", "ATS_homeWonToNil", "ATS_homeBothTeamsScored", "ATS_homeFailedToScore",
"ATS_homeLostToNil", "ATS_awayCleanSheets", "ATS_awayWonToNil", "ATS_awayBothTeamsScored",
"ATS_awayFailedToScore", "ATS_awayLostToNil", "ATS_homeScored_ConcededBy_0", "ATS_homeScored_ConcededBy_1",
"ATS_homeScored_ConcededBy_2", "ATS_homeScored_ConcededBy_3", "ATS_homeScored_ConcededBy_4",
"ATS_homeScored_ConcededBy_5p", "ATS_homeScored_ConcededBy_0_or_1", "ATS_homeScored_ConcededBy_2_or_3",
"ATS_homeScored_ConcededBy_4p", "ATS_awayScored_ConcededBy_0", "ATS_awayScored_ConcededBy_1",
"ATS_awayScored_ConcededBy_2", "ATS_awayScored_ConcededBy_3", "ATS_awayScored_ConcededBy_4",
"ATS_awayScored_ConcededBy_5p", "ATS_awayScored_ConcededBy_0_or_1", "ATS_awayScored_ConcededBy_2_or_3",
"ATS_awayScored_ConcededBy_4p"
]
# integer type features
itype_features = ["HTS_teamGoalsDifference", "ATS_teamGoalsDifference"]
from pyspark.sql.types import DoubleType, IntegerType
from pyspark.sql.functions import col
# cast types to columns: doubles
ml_df = ml_df.select(*[col(c).cast("double").alias(c) for c in dtype_features] + stype_features + itype_features)
# convert "HTS_teamGoalsDifference" & "ATS_teamGoalsDifference" to integer
int_udf = udf(
lambda r: int(r),
IntegerType()
)
# cast types to columns: integers
ml_df = ml_df.select(*[int_udf(col(col_name)).name(col_name) for col_name in itype_features] + stype_features + dtype_features)
# convert percent cols to float
percent_udf = udf(
lambda r: float(r.split("%")[0])/100,
DoubleType()
)
# cast types to columns: strings
ml_df = ml_df.select(*[percent_udf(col(col_name)).name(col_name) for col_name in stype_features] + itype_features + dtype_features)
###Output
_____no_output_____
###Markdown
2. Some featurization
###Code
import numpy as np
feature_cols = dtype_features + stype_features + itype_features
ml_df = ml_df[feature_cols]
# convert dataframe to ndarray
X_new = np.array(ml_df.select(feature_cols).collect())
print("New features shape: '{}'".format(X_new.shape))
###Output
New features shape: '(2, 187)'
###Markdown
3. Restore model from disk
###Code
from keras.models import model_from_json
# model version to restore
MODEL_VERSION = 1.6
# load json and create model
json_file = open('models/model_({}).json'.format(MODEL_VERSION), 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("models/model_({}).h5".format(MODEL_VERSION))
print("Loaded model version '{}' from disk!".format(MODEL_VERSION))
###Output
/Users/gilbert/Envs/pyspark/lib/python2.7/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
from ._conv import register_converters as _register_converters
Using TensorFlow backend.
###Markdown
4. Model predictions
###Code
import numpy as np
from prettytable import PrettyTable
# evaluate loaded model on test data
loaded_model.compile(
loss='binary_crossentropy',
optimizer='adagrad',
metrics=['accuracy'])
# make prediction: class prediction
y_new_class = loaded_model.predict_classes(X_new)
# make prediction: probability prediction
y_new_prob = loaded_model.predict_proba(X_new)
# create predictions table
predictions = PrettyTable()
predictions.field_names = [
"gamePlayDate", "gameHomeTeamName", "gameAwayTeamName", "leagueName",
"leagueDivisionName", "predClass", "predProb", "predOutcome"
]
# populate prediction table
for val in range(len(X_new)):
if y_new_class[val] == 0:
pred = "Under 3.5"
else:
pred = "Over 3.5"
# append values to predictions table
predictions.add_row([
"{}".format(stat_df.collect()[val]["gamePlayDate"]),
"{}".format(stat_df.collect()[val]["gameHomeTeamName"]),
"{}".format(stat_df.collect()[val]["gameAwayTeamName"]),
"{}".format(stat_df.collect()[val]["leagueName"]),
"{}".format(stat_df.collect()[val]["leagueDivisionName"]),
"{}".format(y_new_class[val]),
"{}".format(y_new_prob[val]),
"{}".format(pred)
])
print(predictions)
###Output
+--------------+------------------+------------------+------------+---------------------+-----------+-------------+-------------+
| gamePlayDate | gameHomeTeamName | gameAwayTeamName | leagueName | leagueDivisionName | predClass | predProb | predOutcome |
+--------------+------------------+------------------+------------+---------------------+-----------+-------------+-------------+
| 2018-04-02 | Guayaquil City | Aucas | Ecuador | Serie A - 2nd stage | [0] | [0.0189554] | Under 3.5 |
| 2018-03-10 | Southern Utd | Hamilton Wand. | NewZealand | Premiership | [1] | [0.5632164] | Over 3.5 |
+--------------+------------------+------------------+------------+---------------------+-----------+-------------+-------------+
|
iwd_2020.ipynb | ###Markdown
Tips:* Enable a GPU in Colab before running this notebook. *Edit -> Notebook settings -> Hardware accelerator -> GPU.* * Should you need to reset your environment to a clean state, you can use *Runtime -> Factory reset runtime*. IWD 2020: Training Neural Networks with TensorFlowWelcome! Today, you'll gain hands-on experience training neural networks with TensorFlow. This notebook contains several tutorials and exercises. Your instructor will guide you through the sections you'll explore today. If you're new to Deep Learning, this is a *lot* of material to cover in a short workshop. Our goals are to dive in and get started. You'll find educational resources for you to continue learning at the end, and you can complete the sections we don't finish today at home. Here's an outline of what we'll cover.1. You'll train a Deep Neural Network to classify handwritten digits. This is the "hello world" of computer vision, and a great place to begin if you're new to the subject. As an exercise, you'll use a different dataset, and modify the network.1. Next, you'll train a Convolutional Neural Network to classify images of cats and dogs, using a real-world dataset you read off disk. As an exercise, you'll use data augmentation and dropout to reduce overfitting.1. If time remains, your instructor will walk you through DeepDream. This is an advanced example that lets you visualize some of the features learned by a CNN.Okay, let's get started! Tutorial: MNISTTraining an image classifier on the MNIST dataset of handwritten digits is considered the "hello world" of computer vision. In this tutorial, you will download the dataset, then train a linear model, a neural network, and a deep neural network to classify it. **Key point:** Deep Learning is "code light, but concept heavy". You'll be able to implement a Deep Neural Network in about five lines of code, but the underlying concepts (cross-entropy, softmax, dense layers, etc) normally take a few months to learn. You do need to understand these all today to dive in. Import TensorFlow Let's import TensorFlow. At the time of writing, Colab has TensorFlow version 1.x installed by default. TensorFlow 2.x is much easier to use, so let's start with that. To switch to 2.x we'll use the magic command below. Note, you can also [install](http://tensorflow.org/install) TensorFlow by using `pip`, but in Colab, the magic command is faster.
###Code
%tensorflow_version 2.x
import tensorflow as tf
print("You are using TensorFlow version", tf.__version__)
if len(tf.config.list_physical_devices('GPU')) > 0:
print("You have a GPU enabled.")
else:
print("Enable a GPU before running this notebook.")
###Output
_____no_output_____
###Markdown
Colab has a variety of GPU types available (each new instance is assigned one randomly, depending on availability). To see which type of GPU you have, you can run ```!nvidia-smi``` in a code cell. Some are quite fast!
###Code
# In this notebook, we'll use Keras: TensorFlow's user-friendly API to
# define neural networks. Let's import Keras now.
from tensorflow import keras
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Download the MNIST datasetMNIST contains 70,000 grayscale images in 10 categories. The images are low resolution (28 by 28 pixels). An important skill in Deep Learning is exploring your dataset, and understanding the format. Let's download MNIST, and explore it now.
###Code
dataset = keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = dataset.load_data()
###Output
_____no_output_____
###Markdown
There are 60,000 images in the training set:
###Code
print(train_images.shape)
###Output
_____no_output_____
###Markdown
And 10,000 in the testing set:
###Code
print(test_images.shape)
###Output
_____no_output_____
###Markdown
Each label is an integer between 0-9:
###Code
print(train_labels)
###Output
_____no_output_____
###Markdown
Preprocess the dataThe pixel values in the images range between 0 and 255. Let's normalize the values 0 and 1 by dividing all the images by 255. It's important that the training set and the testing set are preprocessed in the same way.
###Code
train_images = train_images / 255.0
test_images = test_images / 255.0
###Output
_____no_output_____
###Markdown
Let's display the first 25 images from the training set, and display the label below each image.
###Code
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(train_labels[i])
plt.show()
###Output
_____no_output_____
###Markdown
Create the layersNeural networks are made up of layers. Here, you'll define the layers, and assemble them into a model. We will start with a single Dense layer. What does a layer do?The basic building block of a neural network is the layer. Layers extract representations from the data fed into them. For example:- The first layer in a network might receives the pixel values as input. From these, it learns to detect edges (combinations of pixels). - The next layer in the network receives edges as input, and may learn to detect lines (combinations of edges). - If you added another layer, it might learn to detect shapes (combinations of edges).The "Deep" in "Deep Learning" refers to the depth of the network. Deeper networks can learn increasingly abstract patterns. Roughly, the width of a layer (in terms of the number of neurons) refers to the number of patterns it can learn of each type.Most of deep learning consists of chaining together simple layers. Most layers, such as [tf.keras.layers.Dense](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dense), have parameters that are initialized randomly, then tuned (or learned) during training by gradient descent.
###Code
# A linear model
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(10, activation='softmax')
])
###Output
_____no_output_____
###Markdown
The first layer in this network, [tf.keras.layers.Flatten](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Flatten), transforms the format of the images from a two-dimensional array (of 28 by 28 pixels) to a one-dimensional array (of 28 * 28 = 784 pixels). Think of this layer as unstacking rows of pixels in the image and lining them up. This layer has no parameters to learn; it only reformats the data. This is necessary since Dense layers require arrays as input.After the pixels are flattened, this model consists of a single Dense layer. This is a densely connected, or fully connected, neural layer. The Dense layer has 10 neurons with softmax activation. This returns an array of 10 probability scores that sum to 1. After classifying an image, each neuron will contains a score that indicates the probability that the current image belongs to one of the 10 classes. Compile the modelBefore the model is ready for training, it needs a few more settings. These are added during the model's compile step:*Loss function* — This measures how accurate the model is during training. You want to minimize this function to "steer" the model in the right direction.*Optimizer* — This is how the model is updated based on the data it sees and its loss function.*Metrics* — Used to monitor the training and testing steps. The following example uses accuracy, the fraction of the images that are correctly classified.
###Code
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Train the modelTraining the neural network model requires the following steps:1. Feed the training data to the model. In this example, the training data is in the ```train_images``` and ```train_labels``` arrays.1. The model learns to associate images and labels.1. You ask the model to make predictions about a test set—in this example, the ```test_images``` array.1. Verify that the predictions match the labels from the ```test_labels``` array.To begin training, call the ```model.fit``` method — so called because it "fits" the model to the training data:
###Code
EPOCHS=10
model.fit(train_images, train_labels, epochs=EPOCHS)
###Output
_____no_output_____
###Markdown
As the model trains, the loss and accuracy metrics are displayed. This model reaches an accuracy of about 0.90 (or 90%) on the training data. Accuracy may be slightly different each time you run this code, since the parameters inside the Dense layer are randomly initialized. Evaluate accuracyNext, compare how the model performs on the test dataset:
###Code
test_loss, test_acc = model.evaluate(test_images, test_labels)
print('\nTest accuracy:', test_acc)
###Output
_____no_output_____
###Markdown
It turns out that the accuracy on the test dataset is a little less than the accuracy on the training dataset. This gap between training accuracy and test accuracy represents overfitting. Overfitting is when a machine learning model performs worse on new, previously unseen inputs than on the training data. An overfitted model "memorizes" the training data—with less accuracy on testing data. Make predictionsWith the model trained, you can use it to make predictions about some images.
###Code
predictions = model.predict(test_images)
###Output
_____no_output_____
###Markdown
Here, the model has predicted the label for each image in the testing set. Let's take a look at the first prediction:
###Code
print(predictions[0])
###Output
_____no_output_____
###Markdown
A prediction is an array of 10 numbers. They represent the model's "confidence" that the image corresponds to each of the 10 digits. You can see which label has the highest confidence value:
###Code
print(tf.argmax(predictions[0]))
###Output
_____no_output_____
###Markdown
Exercise: Fashion MNISTIn the above tutorial, you trained a linear model (a single Dense layer) on the MNIST dataset. As an exercise, let's modify your code above to:- Use a new dataset (Fashion MNIST)- Train a neural network (with two Dense layers, instead of just one)- Create plots to observe overfitting and underfitting InstructionsYou will need to make two changes in the code above.**1) Import the Fashion MNIST** To do so, change the line```dataset = keras.datasets.mnist``` to ```dataset = keras.datasets.fashion_mnist```**2) Modify the model definition to create a neural network**To do so, change the lines from:```model = keras.Sequential([ keras.layers.Flatten(input_shape=(28, 28)), keras.layers.Dense(10, activation='softmax')])```to```model = keras.Sequential([ keras.layers.Flatten(input_shape=(28, 28)), keras.layers.Dense(128, activation='relu'), keras.layers.Dense(10, activation='softmax')])```This will define a neural network with a single hidden layer. If you like, you can experiment by adding a third Dense layer, which will create a deep neural network. For example:```model = keras.Sequential([ keras.layers.Flatten(input_shape=(28, 28)), keras.layers.Dense(128, activation='relu'), keras.layers.Dense(128, activation='relu'), keras.layers.Dense(10, activation='softmax')])```After making the above changes, on the Colab menu select *Edit -> Clear all outputs* and *Runtime -> Restart runtime* to restore this notebook to a clean state. Run the cells in the tutorial above to train your neural network on Fashion MNIST.**3) Add plots to observe overfitting**If trained for too long, a NN may begin to memorize the training data (rather than learning patterns that generalize to unseen data). This is called overfitting. Of all the hyperparmeters in the design of your network (the number and width of layers, the optimizer, etc) - the most important to set properly is ```epochs```. You will learn more about this in exercise two.To create plots to observe overfitting, modify your training loop as follows.Change:```history = model.fit(train_images, train_labels, epochs=EPOCHS)```to:```history = model.fit(train_images, train_labels, validation_data=(test_images, test_labels), epochs=EPOCHS)```This will capture the accuracy and loss on the training and validation data after epoch. To plot the results, create a new code cell, and add the following code:```acc = history.history['accuracy']val_acc = history.history['val_accuracy']loss = history.history['loss']val_loss = history.history['val_loss']epochs_range = range(EPOCHS)plt.figure(figsize=(8, 8))plt.subplot(1, 2, 1)plt.plot(epochs_range, acc, label='Training Accuracy')plt.plot(epochs_range, val_acc, label='Validation Accuracy')plt.legend(loc='lower right')plt.title('Training and Validation Accuracy')plt.subplot(1, 2, 2)plt.plot(epochs_range, loss, label='Training Loss')plt.plot(epochs_range, val_loss, label='Validation Loss')plt.legend(loc='upper right')plt.title('Training and Validation Loss')plt.show()``` Game break: Teachable MachineIf you'd like, now would be a good time to take a break from coding and try: https://teachablemachine.withgoogle.com/ Tutorial: Cats and DogsYour instructor will walk you through this section (please follow along and ask questions as you have them!). You'll train a CNN to classify images of cats and dogs using a real-world dataset you will download from the web. Download and explore the datasetAlthough you are downloading large files, you are doing so in Colab through Google Cloud Platform (instead of over your local WiFi connection). This means that downloads will usually be fast, regardless of your internet connection.
###Code
import os
# Our dataset is a zip on the web
origin = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip'
path_to_zip = tf.keras.utils.get_file('cats_and_dogs.zip', origin=origin, extract=True)
path_to_folder = os.path.join(os.path.dirname(path_to_zip), 'cats_and_dogs_filtered')
###Output
_____no_output_____
###Markdown
The unzipped dataset has the following directory structure:cats_and_dogs_filtered|__ train |______ cats: [cat.0.jpg, cat.1.jpg, cat.2.jpg ....] |______ dogs: [dog.0.jpg, dog.1.jpg, dog.2.jpg ...]|__ validation |______ cats: [cat.2000.jpg, cat.2001.jpg, cat.2002.jpg ....] |______ dogs: [dog.2000.jpg, dog.2001.jpg, dog.2002.jpg ...] The dataset is divided into train and validation splits. Let's create variables that point to each of these directories.
###Code
train_dir = os.path.join(path_to_folder, 'train')
validation_dir = os.path.join(path_to_folder, 'validation')
train_cats_dir = os.path.join(train_dir, 'cats')
train_dogs_dir = os.path.join(train_dir, 'dogs')
validation_cats_dir = os.path.join(validation_dir, 'cats')
validation_dogs_dir = os.path.join(validation_dir, 'dogs')
###Output
_____no_output_____
###Markdown
Now let's count the number of images in each directory.
###Code
num_cats_tr = len(os.listdir(train_cats_dir))
num_dogs_tr = len(os.listdir(train_dogs_dir))
num_cats_val = len(os.listdir(validation_cats_dir))
num_dogs_val = len(os.listdir(validation_dogs_dir))
total_train = num_cats_tr + num_dogs_tr
total_val = num_cats_val + num_dogs_val
print('Total training cat images:', num_cats_tr)
print('Total training dog images:', num_dogs_tr)
print('Total validation cat images:', num_cats_val)
print('Total validation dog images:', num_dogs_val)
print('---')
print("Total training images:", total_train)
print("Total validation images:", total_val)
###Output
_____no_output_____
###Markdown
You should see that we have 3,000 total images (2,000 in train and 1,000 in validation). Note that this dataset is balanced (we have an equal number of cats and dogs). Tip: in addition to Python, you can run shell commands in Colab (for example, ```!ls $train_cats_dir```).
###Code
!ls $train_cats_dir
###Output
_____no_output_____
###Markdown
Let's display a couple images.
###Code
import matplotlib.pyplot as plt
_ = plt.imshow(plt.imread(os.path.join(train_cats_dir, "cat.0.jpg")))
_ = plt.imshow(plt.imread(os.path.join(train_cats_dir, "cat.1.jpg")))
###Output
_____no_output_____
###Markdown
Note that the images are different sizes. Before feeding them into a CNN, we'll need to reshape them all to the same dimensions. We'll take care of that in the next section. Data preprocessing Next, we will need a way to read these images off disk, and to preprocess them. Specifically, we will need to:- Read the image off disk.- Decode contents of these images and convert them into RGB arrays.- Convert the pixels values from integer to floating point types.- Rescale the pixel from values between 0 and 255 to values between 0 and 1 (neural networks work better with small input values - under the hood, each input is multiplied by a weight, large inputs could result in overflow).Fortunately, all of these tasks can be done with the `ImageDataGenerator` class provided by `tf.keras`. It can read images from disk and preprocess them into proper arrays.
###Code
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# Let's resize images to this size
IMG_HEIGHT = 150
IMG_WIDTH = 150
# Rescale the pixel values to range between 0 and 1
train_generator = ImageDataGenerator(rescale=1./255)
val_generator = ImageDataGenerator(rescale=1./255)
###Output
_____no_output_____
###Markdown
After defining the generators for training and validation images, the `flow_from_directory` method load images from the disk, applies rescaling, and resizes the images into the required dimensions.
###Code
batch_size = 32 # Read a batch of 64 images at each step
train_data_gen = train_generator.flow_from_directory(batch_size=batch_size,
directory=train_dir,
shuffle=True,
target_size=(IMG_HEIGHT, IMG_WIDTH),
class_mode='binary')
val_data_gen = val_generator.flow_from_directory(batch_size=batch_size,
directory=validation_dir,
target_size=(IMG_HEIGHT, IMG_WIDTH),
class_mode='binary')
###Output
_____no_output_____
###Markdown
Use the generators to display a few images and their labelsNext, we will extract a batch of images from the training generator, then plot several of them with `matplotlib`. The `next` function returns a batch from the dataset. The return value of `next` function is in form of `(x_train, y_train)` where x_train is the pixel values and y_train is the labels.
###Code
image_batch, labels_batch = next(train_data_gen)
# The shape will be (32, 150, 150, 3)
# This means a list of 32 images, each of which is 150x150x3.
# The 3 at the end refers to the R,G,B color channels.
# A grayscale image would be (for example) 150x150x1
print(image_batch.shape)
# The shape (32,) means a list of 64 numbers
# each of these will either be 0 or 1
print(labels_batch.shape)
# This function will plot images returned by the generator
# in a grid with 1 row and 5 columns
def plot_images(images):
fig, axes = plt.subplots(1, 5, figsize=(10,10))
axes = axes.flatten()
for img, ax in zip(images, axes):
ax.imshow(img)
ax.axis('off')
plt.tight_layout()
plt.show()
plot_images(image_batch[:5])
###Output
_____no_output_____
###Markdown
Next, let's retrieve the labels. All images will be labeled either 0 or 1, since this is a binary classification problem.
###Code
# Here are the first 5 labels from the dataset
# that correspond to the images above
print(labels_batch[:5])
# Here, we can see that "0" maps to cat,
# and "1" maps to dog
print(train_data_gen.class_indices)
###Output
_____no_output_____
###Markdown
Create the modelYour model will consist of three convolutional blocks followed by max pooling. There's a fully connected layer with 256 units on top. This model will output class probabilities (between 0 and 1) based on the `sigmoid` activation function. If the output is closer to 1, the image will be classified as a dog, otherwise a cat.
###Code
from tensorflow.keras.layers import Conv2D, Dense, Flatten, MaxPooling2D
from tensorflow.keras.models import Sequential
model = Sequential([
Conv2D(32, 3, padding='same', activation='relu',
input_shape=(IMG_HEIGHT, IMG_WIDTH ,3)),
MaxPooling2D(),
Conv2D(32, 3, padding='same', activation='relu'),
MaxPooling2D(),
Conv2D(64, 3, padding='same', activation='relu'),
MaxPooling2D(),
Flatten(),
Dense(256, activation='relu'),
Dense(1, activation='sigmoid')
])
###Output
_____no_output_____
###Markdown
Compile the model, and select the adam optimizer for gradient descent, and binary cross entropy for the loss function (roughly, cross entropy is a way to measure the distance between the prediction we wanted the network to make, and the prediction it made).
###Code
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Let's look at a diagram of all the layers of the network using the `summary` method:
###Code
model.summary()
###Output
_____no_output_____
###Markdown
This model has about 5M parameters (or weights) to learn. Our model is ready to go, and next we can train it using the data generators we created earlier. Train the model Use the `fit` method to train the network. You will train the model for 15 epochs (an epoch is one "sweep" over the training set, where each image is used once to perform a round of gradient descent, and update the models parameters). This will take one to two minutes, so let's start it now:
###Code
epochs = 15
history = model.fit(
train_data_gen,
epochs=epochs,
validation_data=val_data_gen,
)
###Output
_____no_output_____
###Markdown
Inside `model.fit`, TensorFlow uses gradient descent to find useful values for all the weights in the model. When you create the model, the weights are initialized randomly, then gradually improved over time. The data generator is used to load batches of data off disk. Then, for each batch:- The model performs a forward pass (the images are classified by the network).- Then, the model performs a backward pass (the error is computed, then each weight is slightly adjusted using gradient descent to improve the accuracy on the next iteration).Gradient descent is an iterative process. The longer you train the model, the more accurate it will become on the training set. But, the more likely it is to overfit! Meaning, the model will begin to memorize the training images, rather than learn patterns that enable it generalize to new images not included in the training set. - We can see whether overfitting is present by comparing the accuracy on the training and validation data.If you look at the accuracy figures reported above, you should see that training accuracy is over 90%, while validation accuracy is only around 70%. Create plots to check for overfittingAccuracy on the validation data is important: it helps you estimate how well our model is likely to work on new, unseen data in the future. To see how much overfitting is present (and when it occurs), we will create two plots, one for accuracy, and another for loss. Roughly, loss (or error) is the inverse of accuracy (lower is better). Unlike accuracy, loss takes the confidence of a prediction into account (a confidently wrong predicitions has a higher loss than one that is only slightly wrong).
###Code
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(epochs)
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
###Output
_____no_output_____
###Markdown
Overfitting occurs when the validation loss stops decreasing. In this case, that occurs around epoch 5 (give or take). Your results may be slightly different each time you run this code (since the weights are initialized randomly).Why does overfitting happen? When there are only a "small" number of training examples, the model sometimes learns from noises or unwanted details, to an extent that it negatively impacts the performance of the model on new examples. It means that the model will have a difficult time "generalizing" on a new dataset (making accurate predictions on images that weren't included in the training set). Game break: Quick, Draw!If you'd like, now would be a good time to take a break from coding and try: https://quickdraw.withgoogle.com/ Exercise: Reduce overfitting InstructionsIn this exercise, you will use data augmentation and dropout to improve your model. Follow along by reading and running the code below. There are two **TODOs** for you to complete, and a solution is given below. Data augmentationOverfitting occurs when there are a "small" number of training examples. One way to fix this problem is to increase the size of the training set, by gathering more data (the larger and more diverse the dataset, the better!)We can also use a technique called "data augmentation" to increase the size of the training set, by generating new examples from existing ones by applying random transformations (for example, rotation) that yield believable-looking images. This is especially effective when working with images. For example, our training set may only contain images of cats that are right side up. If our validation set contains images of cats that are upside down, our model may have trouble classifying them correctly. To help teach it that cats can appear in any orientation, we will randomly rotate images from our training set during training. This helps expose the model to more aspects of the data, and can lead to better generalization.Data augmentation is built into the ImageDataGenerator. You can specifiy different transformations, and it will take care of applying then during the training.
###Code
# Let's create new data generators, this time with
# data augmentation enabled
train_generator = ImageDataGenerator(
rescale=1./255,
rotation_range=45,
width_shift_range=.15,
height_shift_range=.15,
horizontal_flip=True,
zoom_range=0.5
)
train_data_gen = train_generator.flow_from_directory(batch_size=32,
directory=train_dir,
shuffle=True,
target_size=(IMG_HEIGHT, IMG_WIDTH),
class_mode='binary')
###Output
_____no_output_____
###Markdown
The next cell will show how the same training image appears when used with five different types of data augmentation.
###Code
augmented_images = [train_data_gen[0][0][0] for i in range(5)]
plot_images(augmented_images)
###Output
_____no_output_____
###Markdown
We only apply data augmentation to the training examples, so our validation generator looks the same as before.
###Code
val_generator = ImageDataGenerator(rescale=1./255)
val_data_gen = val_generator.flow_from_directory(batch_size=32,
directory=validation_dir,
target_size=(IMG_HEIGHT, IMG_WIDTH),
class_mode='binary')
###Output
_____no_output_____
###Markdown
Dropout Another technique to reduce overfitting is to introduce dropout to the network. Dropout is a form of regularization that makes it more difficult for the network to memorize rare details (instead, it is forced to learn more general patterns).When you apply dropout to a layer it randomly drops out (set to zero) a number of activations during training. Dropout takes a fractional number as its input value, in the form such as 0.1, 0.2, 0.4, etc. This means dropping out 10%, 20% or 40% of the output units randomly from the applied layer.When appling 0.1 dropout to a certain layer, it randomly deactivates 10% of the output units in each training epoch.Create a new model using Dropout. You'll reuse the model definition from above, and add a Dropout layer.
###Code
from tensorflow.keras.layers import Dropout
# TODO: Your code here
# Create a new CNN that takes advantage of Dropout.
# 1) Reuse the model declared in tutorial above.
# 2) Add a new line that says "Dropout(0.2)," immediately
# before the line that says "Flatten()".
###Output
_____no_output_____
###Markdown
Solution
###Code
#@title
model = Sequential([
Conv2D(32, 3, padding='same', activation='relu',
input_shape=(IMG_HEIGHT, IMG_WIDTH ,3)),
MaxPooling2D(),
Conv2D(32, 3, padding='same', activation='relu'),
MaxPooling2D(),
Conv2D(64, 3, padding='same', activation='relu'),
MaxPooling2D(),
Dropout(0.2),
Flatten(),
Dense(256, activation='relu'),
Dense(1, activation='sigmoid')
])
###Output
_____no_output_____
###Markdown
After introducing dropout to the network, compile your model and view the layers summary. You should see a Dropout layer right before flatten.
###Code
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
model.summary()
###Output
_____no_output_____
###Markdown
Train your new modelAdd code to train your new model. Previously, we trained for 15 epochs. You will need to train this new modek for more epochs, as data augmentation and dropout make it more difficult for a CNN to memorize the training data (this is what we want!).Here, you'll train this model for 25 epochs. This may take a few minutes, and you may need to train it for longer to reach peak accuracy. If you like, you can continue experimenting with that at home.
###Code
epochs = 25
# TODO: your code here
# Add code to call model.fit, using your new
# data generators with image augmentation
# For reference, see the "Train the model"
# section above
###Output
_____no_output_____
###Markdown
Solution
###Code
#@title
history = model.fit(
train_data_gen,
epochs=epochs,
validation_data=val_data_gen,
)
###Output
_____no_output_____
###Markdown
Evaluate your new modelFinally, let's again create plots of accuracy and loss (we use these plots often in practice!) Now, compare the loss and accuracy curves for the training and validation data. Were you able to achieve a higher validation accuracy than before? Note that even this model will eventually overfit. To prevent that, we use a technique called early stopping (we stop training when the validation loss is no longer decreasing).
###Code
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(epochs)
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
###Output
_____no_output_____
###Markdown
Game break: Sketch-RNNIf you'd like, now would be a good time to take a break from coding and try: https://magenta.tensorflow.org/assets/sketch_rnn_demo/index.html Exercise: FlowersIn this exercise, you write a CNN and use it to classify five different types of flowers (sunflowers, tulips, etc). The dataset contains 1000 images in the training set, and 500 in the validation set.You will download the dataset, read and preprocess the images using ImageDataGenerator, then create, train and evaluate a model. A code outline is written for you, and there are several sections for you to complete, using the same pattern as the tutorial above. Download the dataset
###Code
origin = 'https://storage.googleapis.com/tensorflow-blog/datasets/mini_flowers.zip'
path_to_zip = tf.keras.utils.get_file('mini_flowers.zip', origin=origin, extract=True)
path_to_folder = os.path.join(os.path.dirname(path_to_zip))
train_dir = os.path.join(path_to_folder, "train/")
val_dir = os.path.join(path_to_folder, "val/")
###Output
_____no_output_____
###Markdown
Read the images off disk
###Code
train_image_generator = ImageDataGenerator(rescale=1./255)
train_data_gen = train_image_generator.flow_from_directory(batch_size=32,
directory=train_dir,
shuffle=True,
target_size=(IMG_HEIGHT, IMG_WIDTH),
class_mode='categorical')
###Output
_____no_output_____
###Markdown
Plot images and their labels
###Code
image_batch, labels_batch = next(train_data_gen)
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(image_batch[i])
plt.xlabel(str(labels_batch[i]))
plt.show()
###Output
_____no_output_____
###Markdown
Understanding one-hot labels Notice the labels are in one-hot format. Let's add some code to display the class names.
###Code
print(train_data_gen.class_indices)
class_names = {v:k for k,v in train_data_gen.class_indices.items()}
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(image_batch[i])
plt.xlabel(class_names[tf.argmax(labels_batch[i]).numpy()])
plt.show()
###Output
_____no_output_____
###Markdown
Read the validation images
###Code
# Above, you created a ImageDataGenerator for the training set
# Next, create one to read the validation images
# For example:
# validation_image_generator = ImageDataGenerator ...
# val_data_gen = validation_image_generator.flow_from_directory ...
###Output
_____no_output_____
###Markdown
Create a CNNNow, it's time to define your model. You can create a similar model to the CNN used in the tutorial above.The only difference is that the final Dense layer of your model (which classifies the data based on the features provided by the convolutional base) must use softmax activation and have five output classes:```model.add(Dense(5, activation='softmax'))```This is because we now have five different types of flowers, instead of just cats and dogs.
###Code
# TODO: your code here
# Define a CNN using code similar to the above
# For example
# model = Sequential()
# model.add ...
# ...
# The last line of your model should be:
# model.add(Dense(5, activation='softmax'))
###Output
_____no_output_____
###Markdown
After you have defined your model, compile it by uncommenting and running this code. Important: notice that the loss has changed to ```categorical_crossentropy```. This is necessary because the labels are in one-hot format. Finally, although these loss functions sound complicated, there are only a handful for you to learn.
###Code
#model.compile(optimizer='adam',
# loss='categorical_crossentropy',
# metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Now train your model for 10 epochs using ```model.fit```. If you like, you can try to create plots of the training and validation accuracy and loss.
###Code
# TODO: your code here
# For example
# model.fit ...
###Output
_____no_output_____
###Markdown
If all has gone well, your model should be about 90% accurate on the training data. Solution``` Read the validation imagesvalidation_image_generator = ImageDataGenerator(rescale=1./255)val_data_gen = validation_image_generator.flow_from_directory(batch_size=32, directory=val_dir, shuffle=True, target_size=(IMG_HEIGHT, IMG_WIDTH), class_mode='categorical')`````` Define a modelmodel = Sequential()model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH, 3)))model.add(MaxPooling2D())model.add(Conv2D(32, (3, 3), activation='relu'))model.add(MaxPooling2D())model.add(Conv2D(32, (3, 3), activation='relu'))model.add(MaxPooling2D())model.add(Flatten())model.add(Dense(128, activation='relu'))model.add(Dense(5, activation='softmax'))`````` Train the modelhistory = model.fit( train_data_gen, epochs=10, validation_data=val_data_gen,)``` An advanced example: DeepDreamIf time remains, in this tutorial your instructor will walk you through a minimal version of DeepDream, an experiment to visualize some of the features a convolutional neural network has learned to detect. DeepDream is an advanced tutorial, and our goal is to introduce you to some of the fascinating (and unexpected) things you can explore with Deep Learning. Normally, when training a model we use gradient descent to minimize classification loss. In a CNN, this means we adjust the weights in the filters. In DeepDream, we start with a large, pretrained CNN (and leave the filters fixed!) We then use gradient descent to modify the input image to increasingly activate the filters. For example, if there is a filter that recognizes a certain kind of texture, we can progressively modify the image to contain more and more examples of that texture.
###Code
import numpy as np
from IPython.display import clear_output
###Output
_____no_output_____
###Markdown
Download and display an image
###Code
url = 'https://storage.googleapis.com/download.tensorflow.org/example_images/YellowLabradorLooking_new.jpg'
def download(url, target_size=None):
name = url.split('/')[-1]
image_path = tf.keras.utils.get_file(name, origin=url)
return tf.keras.preprocessing.image.load_img(image_path, target_size)
def show(img):
plt.figure(figsize=(8,8))
plt.grid(False)
plt.axis('off')
plt.imshow(img)
plt.show()
original_img = download(url, target_size=[225, 375])
original_img = np.array(original_img)
show(original_img)
###Output
_____no_output_____
###Markdown
Rescale the pixel values
###Code
def preprocess(img):
""" Convert RGB values from [0, 255] to [-1, 1] """
img = tf.cast(img, tf.float32)
img /= 128.0
img -= 1.
return img
def unprocess(img):
""" Undo the preprocessing above """
img = 255 * (img + 1.0) / 2.0
return tf.cast(img, tf.uint8)
###Output
_____no_output_____
###Markdown
Import a large, pretrained CNNThis model has been trained on ImageNet, a dataset with about 1M images in about 1K classes
###Code
conv_base = tf.keras.applications.InceptionV3(weights='imagenet',
include_top=False)
###Output
_____no_output_____
###Markdown
Choose layers to activateNormally, when you train a neural network, you use gradient descent to adjust the weights to minimize loss, in order to accurately classify images. In DeepDream, the trick is to use gradient descent to adjust the **image**, in order to increasingly activate certain layers from the network. You can explore different layers and see how this affects the results. You can find all the layer names using ```model.summary()```.
###Code
names = ['mixed2', 'mixed3', 'mixed4', 'mixed5']
layers = [conv_base.get_layer(name).output for name in names]
model = tf.keras.Model(inputs=conv_base.input, outputs=layers)
###Output
_____no_output_____
###Markdown
Custom loss functionNormally, we would use cross-entropy loss (for classification), or mean squared error (for regression). Here, we'll write a loss function that describes how activated our layers were by the image.
###Code
def calc_loss(img):
img_batch = tf.expand_dims(img, axis=0)
layer_activations = model(img_batch)
losses = [tf.math.reduce_mean(act) for act in layer_activations]
return tf.reduce_sum(losses)
###Output
_____no_output_____
###Markdown
Use gradient ascent to progressively activate the layersNormally, when training a model you use gradient *descent* to adjust the weights to reduce the loss. In DeepDream, you will use gradient *ascent* to maximize the activation of the layers you selected by modifying the image, while leaving the weights of the network fixed.
###Code
@tf.function
def step(img, lr=0.001):
with tf.GradientTape() as tape:
loss = calc_loss(img)
gradients = tape.gradient(loss, img)
gradients /= tf.math.reduce_std(gradients) + 1e-8
# Because the gradients are in the same shape
# as the image, we can directly add them to it!
img.assign_add(gradients * lr)
img.assign(tf.clip_by_value(img, -1, 1))
img = tf.Variable(preprocess(original_img))
steps = 1000
for i in range(steps):
step(img)
if i % 200 == 0:
clear_output(wait=True)
print ("Step {}".format(i))
show(unprocess(img.numpy()))
clear_output(wait=True)
show(unprocess(img.numpy()))
###Output
_____no_output_____ |
Learn Python/04. Assign Variables.ipynb | ###Markdown
Assign variables No need to tell the type
###Code
my_age = 42
my_age
type(my_age)
?type
pi = 3.14159
type(pi)
π = 3.14149
π
🌵 = 1
###Output
_____no_output_____
###Markdown
We can reassign a value of a different type.
###Code
pi = 3.14
pi = 3
###Output
_____no_output_____ |
sst2_models/ML/SST2_Gensim_Pretrainded_Word2Vec.ipynb | ###Markdown
Introduction
In this notebook we will use a pre-trained Word2Vec model from Gensim to extract the word embeddings that ML algorithms will use to as features to learn how to predict sentiment polarity in English tweets. Import packages
###Code
!pip install -U -q Unidecode
import sys
import unidecode
import re
import numpy as np
from nltk.tokenize import word_tokenize
from nltk import pos_tag
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from sklearn.preprocessing import LabelEncoder
from collections import defaultdict
from nltk.corpus import wordnet as wn
import nltk
nltk.download('stopwords')
nltk.download('wordnet')
nltk.download('punkt')
from sklearn.feature_extraction.text import TfidfVectorizer
###Output
[?25l
[K |█▍ | 10kB 22.2MB/s eta 0:00:01
[K |██▊ | 20kB 11.9MB/s eta 0:00:01
[K |████ | 30kB 8.3MB/s eta 0:00:01
[K |█████▌ | 40kB 6.8MB/s eta 0:00:01
[K |██████▉ | 51kB 4.4MB/s eta 0:00:01
[K |████████▏ | 61kB 4.9MB/s eta 0:00:01
[K |█████████▋ | 71kB 5.1MB/s eta 0:00:01
[K |███████████ | 81kB 5.2MB/s eta 0:00:01
[K |████████████▎ | 92kB 5.3MB/s eta 0:00:01
[K |█████████████▊ | 102kB 4.3MB/s eta 0:00:01
[K |███████████████ | 112kB 4.3MB/s eta 0:00:01
[K |████████████████▍ | 122kB 4.3MB/s eta 0:00:01
[K |█████████████████▊ | 133kB 4.3MB/s eta 0:00:01
[K |███████████████████▏ | 143kB 4.3MB/s eta 0:00:01
[K |████████████████████▌ | 153kB 4.3MB/s eta 0:00:01
[K |█████████████████████▉ | 163kB 4.3MB/s eta 0:00:01
[K |███████████████████████▎ | 174kB 4.3MB/s eta 0:00:01
[K |████████████████████████▋ | 184kB 4.3MB/s eta 0:00:01
[K |██████████████████████████ | 194kB 4.3MB/s eta 0:00:01
[K |███████████████████████████▍ | 204kB 4.3MB/s eta 0:00:01
[K |████████████████████████████▊ | 215kB 4.3MB/s eta 0:00:01
[K |██████████████████████████████ | 225kB 4.3MB/s eta 0:00:01
[K |███████████████████████████████▍| 235kB 4.3MB/s eta 0:00:01
[K |████████████████████████████████| 245kB 4.3MB/s
[?25h[nltk_data] Downloading package stopwords to /root/nltk_data...
[nltk_data] Unzipping corpora/stopwords.zip.
[nltk_data] Downloading package wordnet to /root/nltk_data...
[nltk_data] Unzipping corpora/wordnet.zip.
[nltk_data] Downloading package punkt to /root/nltk_data...
[nltk_data] Unzipping tokenizers/punkt.zip.
###Markdown
Load the SST2 data
Clone the GitHub repository
###Code
# Clone the repository and all the dependencies
!git clone https://github.com/Huertas97/Sentiment_Analysis.git
###Output
Cloning into 'Sentiment_Analysis'...
remote: Enumerating objects: 30, done.[K
remote: Counting objects: 100% (30/30), done.[K
remote: Compressing objects: 100% (24/24), done.[K
remote: Total 30 (delta 8), reused 23 (delta 4), pack-reused 0[K
Unpacking objects: 100% (30/30), done.
###Markdown
Extract the SST2 train set
###Code
import io
import pandas as pd
# Load the data from SST2
def loadFile(fpath):
sst_data = {'X': [], 'y': []}
with io.open(fpath, 'r', encoding='utf-8') as f:
for line in f:
sample = line.strip().split('\t')
sst_data['y'].append(int(sample[1]))
sst_data['X'].append(sample[0])
assert max(sst_data['y']) == 2 - 1
return sst_data
sst2_train = loadFile("/content/Sentiment_Analysis/sst_2_data/sentiment-train")
sst2_df_train = pd.DataFrame( {"text": sst2_train["X"],
"labels": sst2_train["y"]} )
sst2_dev = loadFile("/content/Sentiment_Analysis/sst_2_data/sentiment-dev")
sst2_df_dev = pd.DataFrame( {"text": sst2_dev["X"],
"labels": sst2_dev["y"]} )
sst2_test = loadFile("/content/Sentiment_Analysis/sst_2_data/sentiment-test")
sst2_df_test = pd.DataFrame( {"text": sst2_test["X"],
"labels": sst2_test["y"]} )
###Output
_____no_output_____
###Markdown
Preprocess the text data
For TF-IDF and Word2Vec is important to preprocess the text. This step is important because the quality of the sentence embedding depends on the words that belong to the sentence. If stopwords (i.e, but, and, so) are not removed we will have noise in the embedding since this words do not represent properly our task problem.
###Code
def preprocessor(text, stoptext = "nltk", lemmatizer = "nltk"):
# sys.stdout.write('.')
# sys.stdout.flush()
# Text to unicode
text = unidecode.unidecode(text)
# Remove introduction words for sections
text = re.sub("[A-Z]{0,}\s[A-Z]+:", "", text)
# Lowercase and remove extra spaces
text = text.strip().lower()
# E mail
text = re.sub(r"e\s?-\s?mail", "email", text)
# Substitute p value
text = re.sub('p\s?[<=]\s?0?[.,]0[0-5]+', 'hppv', text) # Significant
text = re.sub('p\s?[>=]\s?[\d]+[.,]?\d*', 'lppv', text) # Non-significant
# Separate punctation to replace numbers for NUM better
from string import punctuation
punctuation_marks = set(punctuation)
punctuation_marks.update(chr(177))
for i in punctuation_marks:
element = "\\"+i # scape the character
sub_element = " "+i+" " # Example "=" --> " = "
text = re.sub(element, sub_element, text)
# Substitute irrelevant (isolated) numbers by NUM
text = re.sub( '[^A-Za-z][\-~]?[0-9][0-9]*\s?[.,]?\s?[0-9]+[^A-Za-z]', " num ", text)
text = re.sub( "\s[0-9]+\s", " num ", text)
# Tokenize the text
tokenized_text = nltk.word_tokenize(text)
# Delete Punctuation
tokenized_text = [i for i in tokenized_text if i not in punctuation_marks]
# Delete stop words
if stoptext == "spacy":
stop_words = sorted(spacy_stopwords)
if stoptext == "nltk":
nltk_stopwords = nltk.corpus.stopwords.words('english')
stop_words = sorted(nltk_stopwords)
if stoptext == "clinical":
stop_words = sorted(clinical_stopwords)
if stoptext == "long":
stop_words = sorted(long_stopwords)
tokenized_text = [i for i in tokenized_text if i not in stop_words]
# Lemmanization
if lemmatizer == "nltk":
lemmatizer = WordNetLemmatizer().lemmatize
lemmatized_text = [lemmatizer(word) for word in tokenized_text]
if lemmatizer == "spacy":
nlp = spacy.load('en', disable=['parser', 'ner'])
doc = nlp(" ".join(tokenized_text))
lemmatized_text = [token.lemma_ for token in doc]
# Join all the text
full_text = " ".join(lemmatized_text)
return full_text
from tqdm.auto import tqdm
clean_sst2_train = [preprocessor(text, stoptext="nltk", lemmatizer="nltk") for text in tqdm(sst2_df_train.text.to_list(), desc = "Train cleaning")]
clean_sst2_dev = [preprocessor(text, stoptext="nltk", lemmatizer="nltk") for text in tqdm(sst2_df_dev.text.to_list(), desc = "Dev cleaning")]
clean_sst2_test = [preprocessor(text, stoptext="nltk", lemmatizer="nltk") for text in tqdm(sst2_df_test.text.to_list(), desc = "Test cleaning")]
###Output
_____no_output_____
###Markdown
Create Word2Vec model
###Code
import gensim # pip install gensim
from gensim.models.word2vec import Word2Vec # word2vec model gensim class
TaggedDocument = gensim.models.doc2vec.TaggedDocument
from sklearn.model_selection import train_test_split
tokenized_tr = [ nltk.word_tokenize(text) for text in tqdm(clean_sst2_train, desc= "Tokenize train")]
print(tokenized_tr[0])
tokenized_dev = [ nltk.word_tokenize(text) for text in tqdm(clean_sst2_dev, "Tokenize dev")]
tokenized_te = [ nltk.word_tokenize(text) for text in tqdm(clean_sst2_test, "Tokenize test")]
###Output
_____no_output_____
###Markdown
TF-IDF embeddings
TF-IDF has several parameters `max_features` (words that will be used as features. This is the vocabulary extracted for computing TF-IDF), `min_df` and `man_df` (words below or above these thresholds will be omitted for building the vocabulary), `ngram_range` (select if considering unigrams, bigrams, trigrams...).
###Code
from sklearn.feature_extraction.text import TfidfVectorizer
print('building tf-idf matrix ...')
max_features = 5000
vectorizer = TfidfVectorizer(max_features=max_features, min_df=0, max_df=0.8,
strip_accents='unicode', ngram_range=(1, 3))
vectorizer.fit(clean_sst2_train)
IDFs = dict(zip(vectorizer.get_feature_names(), vectorizer.idf_))
print('size of vocabulary obtained with TfidfVectorizer:', len(IDFs))
# print('size of vocabulary obtained with word2vec:', len(w2v.wv.vocab))
print("Some idfs:")
aux = list(IDFs.items())
for i in list(range(3))+list(range(1000,1005)):
print(" ", aux[i])
###Output
building tf-idf matrix ...
size of vocabulary obtained with TfidfVectorizer: 5000
Some idfs:
('19th', 9.284444837782294)
('19th century', 9.284444837782294)
('20th', 9.026615728480195)
('debt', 9.227286423942346)
('debut', 7.183184248707819)
('decade', 7.760949355148918)
('decent', 7.313637137105253)
('decent performance', 9.227286423942346)
###Markdown
Combine TF-IDF and Word2Vec
###Code
def Text2Vec(tokens, size):
vec = np.zeros(size).reshape((1, size))
count = 0.
for word in tokens:
try:
vec += glove_vectors.wv[word].reshape((1, size)) * IDFs[word] # el embedding lo multiplica por el IDF
count += 1.
except KeyError: # handling the case where the token is not
# in the corpus. useful for testing.
continue
if count != 0:
vec /= count
return vec
# Download the pre-trained model from Gensim
import gensim.downloader
glove_vectors = gensim.downloader.load('word2vec-google-news-300')
vec_dim = 300
vecs_train = np.zeros((len(tokenized_tr), vec_dim))
for i,x in tqdm(enumerate(tokenized_tr), total=len(tokenized_tr), desc="Train vecs"):
vecs_train[i] = Text2Vec(x, vec_dim)
vecs_dev = np.zeros((len(tokenized_dev), vec_dim))
for i,x in tqdm(enumerate(tokenized_dev), total=len(tokenized_dev), desc="Dev vecs"):
vecs_dev[i] = Text2Vec(x, vec_dim)
vecs_test = np.zeros((len(tokenized_te), vec_dim))
for i,x in tqdm(enumerate(tokenized_te), total=len(tokenized_te), desc="Test vecs"):
vecs_test[i] = Text2Vec(x, vec_dim)
###Output
_____no_output_____
###Markdown
ML models
###Code
from sklearn import model_selection, naive_bayes, svm
from sklearn import metrics
from sklearn.preprocessing import LabelEncoder
from sklearn import model_selection, naive_bayes, svm
from sklearn.metrics import accuracy_score, matthews_corrcoef
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
!pip install wandb -qq
###Output
_____no_output_____
###Markdown
Naive Bayes
###Code
y_tr = sst2_df_train.labels.to_list()
y_te = sst2_df_test.labels.to_list()
from sklearn.naive_bayes import GaussianNB
import wandb
wandb.init(project="sklearn-sst2-gensim")
y_tr = sst2_df_train.labels.to_list()
y_te = sst2_df_test.labels.to_list()
Naive = naive_bayes.GaussianNB()
Naive.fit(vecs_train, y_tr)
# predict the labels on validation dataset
predictions_NB = Naive.predict(vecs_test)
# Use accuracy_score function to get the accuracy
print("Naive Bayes Accuracy Score -> ",accuracy_score(y_te, predictions_NB)*100)
# Print the precision and recall, among other metrics
print(metrics.classification_report(y_te, predictions_NB, digits=3))
# Print the confusion matrix
print(metrics.confusion_matrix(y_te, predictions_NB))
print("MCC", matthews_corrcoef(y_te, predictions_NB))
# Visualize all classifier plots
wandb.sklearn.plot_classifier(Naive, vecs_train, vecs_test, y_tr, y_te, predictions_NB, y_probas=Naive.predict_proba(vecs_test),
labels= ["Negative", "Positive"],
model_name='Naive Bayes',
feature_names= None)
wandb.finish()
###Output
_____no_output_____
###Markdown
Logistic Regression
###Code
from sklearn.linear_model import LogisticRegression
model = LogisticRegression(fit_intercept=True, random_state=0, max_iter=1000,
penalty='l1', solver = "liblinear")
model.fit(vecs_train, y_tr)
# predict the labels on validation dataset
predictions_LR = model.predict(vecs_test)
# Use accuracy_score function to get the accuracy
print("Logistic Regression Accuracy Score -> ",accuracy_score(y_te, predictions_LR)*100)
print(metrics.classification_report(y_te, predictions_LR))
###Output
_____no_output_____
###Markdown
L1
###Code
from sklearn.metrics import make_scorer
# Set the parameters by cross-validation
tuned_parameters = [{'C': np.logspace(-3, 1, 6), "max_iter": [1000]}]
scores = ["accuracy"]
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
knn = GridSearchCV(
LogisticRegression(penalty='l1', solver = "liblinear"),
cv=5,
param_grid=tuned_parameters,
scoring=make_scorer(accuracy_score),
n_jobs = 2
)
knn.fit(vecs_train, y_tr)
print("Best parameters set found on development set:")
print()
print(knn.best_params_)
print()
print("Grid scores on development set:")
print()
means = knn.cv_results_['mean_test_score']
stds = knn.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, knn.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r"
% (mean, std * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_te, knn.predict(vecs_test)
print("Accuracy Score -> ",accuracy_score(y_true, y_pred)*100)
print("MCC", matthews_corrcoef(y_true, y_pred))
print(metrics.classification_report(y_true, y_pred))
print()
import wandb
wandb.init(project="sklearn-sst2-gensim")
log_l1 = LogisticRegression(penalty='l1', solver = "liblinear", max_iter = 1000,
C=0.039810717055349734)
log_l1.fit(vecs_train, y_tr)
y_true, y_pred = y_te, log_l1.predict(vecs_test)
print("Accuracy Score -> ",accuracy_score(y_true, y_pred)*100)
# predict the labels on validation dataset
predictions_LR = log_l1.predict(vecs_test)
# Visualize all classifier plots
wandb.sklearn.plot_classifier(log_l1, vecs_train, vecs_test, y_tr, y_te, predictions_LR, y_probas=log_l1.predict_proba(vecs_test),
labels= ["Negative", "Positive"],
model_name='Log Reg L1',
feature_names= None)
wandb.finish()
###Output
_____no_output_____
###Markdown
KNN
###Code
from sklearn.metrics import make_scorer
from sklearn.neighbors import KNeighborsClassifier
# Set the parameters by cross-validation
tuned_parameters = [{'n_neighbors':[1, 3, 5, 7, 11, 15, 20, 25, 30, 50, 100, 150, 200]}]
scores = ["accuracy"]
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
knn = GridSearchCV(
KNeighborsClassifier(), tuned_parameters, scoring=make_scorer(accuracy_score), cv = 5
)
knn.fit(vecs_train, y_tr)
print("Best parameters set found on development set:")
print()
print(knn.best_params_)
print()
print("Grid scores on development set:")
print()
means = knn.cv_results_['mean_test_score']
stds = knn.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, knn.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r"
% (mean, std * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_te, knn.predict(vecs_test)
print("Accuracy Score -> ",accuracy_score(y_true, y_pred)*100)
print("MCC", matthews_corrcoef(y_true, y_pred))
print(metrics.classification_report(y_true, y_pred))
print()
###Output
# Tuning hyper-parameters for accuracy
Best parameters set found on development set:
{'n_neighbors': 1}
Grid scores on development set:
0.879 (+/-0.010) for {'n_neighbors': 1}
0.864 (+/-0.005) for {'n_neighbors': 3}
0.850 (+/-0.005) for {'n_neighbors': 5}
0.839 (+/-0.007) for {'n_neighbors': 7}
0.823 (+/-0.009) for {'n_neighbors': 11}
0.814 (+/-0.008) for {'n_neighbors': 15}
0.810 (+/-0.007) for {'n_neighbors': 20}
0.800 (+/-0.005) for {'n_neighbors': 25}
0.800 (+/-0.004) for {'n_neighbors': 30}
0.790 (+/-0.004) for {'n_neighbors': 50}
0.778 (+/-0.003) for {'n_neighbors': 100}
0.773 (+/-0.004) for {'n_neighbors': 150}
0.771 (+/-0.004) for {'n_neighbors': 200}
Detailed classification report:
The model is trained on the full development set.
The scores are computed on the full evaluation set.
Accuracy Score -> 65.40362438220758
MCC 0.3166991832162829
precision recall f1-score support
0 0.70 0.54 0.61 912
1 0.62 0.77 0.69 909
accuracy 0.65 1821
macro avg 0.66 0.65 0.65 1821
weighted avg 0.66 0.65 0.65 1821
###Markdown
RF
###Code
from sklearn.metrics import make_scorer
from sklearn.ensemble import RandomForestClassifier
# Set the parameters by cross-validation
tuned_parameters = [{'n_estimators':[50, 100, 150, 200, 300, 400, 500],
'max_depth': [10, 20, 30, 40]}]
scores = ["accuracy"]
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
knn = GridSearchCV(
RandomForestClassifier( max_depth=3, random_state=0), tuned_parameters, scoring=make_scorer(accuracy_score), cv = 2
)
knn.fit( vecs_train, y_tr)
print("Best parameters set found on development set:")
print()
print(knn.best_params_)
print()
print("Grid scores on development set:")
print()
means = knn.cv_results_['mean_test_score']
stds = knn.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, knn.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r"
% (mean, std * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_te, knn.predict(vecs_test)
print("Accuracy Score -> ",accuracy_score(y_true, y_pred)*100)
print("MCC", matthews_corrcoef(y_true, y_pred))
print(metrics.classification_report(y_true, y_pred))
print()
###Output
# Tuning hyper-parameters for accuracy
Best parameters set found on development set:
{'max_depth': 30, 'n_estimators': 400}
Grid scores on development set:
0.823 (+/-0.002) for {'max_depth': 10, 'n_estimators': 50}
0.828 (+/-0.005) for {'max_depth': 10, 'n_estimators': 100}
0.830 (+/-0.006) for {'max_depth': 10, 'n_estimators': 150}
0.831 (+/-0.005) for {'max_depth': 10, 'n_estimators': 200}
0.831 (+/-0.004) for {'max_depth': 10, 'n_estimators': 300}
0.832 (+/-0.004) for {'max_depth': 10, 'n_estimators': 400}
0.833 (+/-0.003) for {'max_depth': 10, 'n_estimators': 500}
0.849 (+/-0.003) for {'max_depth': 20, 'n_estimators': 50}
0.854 (+/-0.003) for {'max_depth': 20, 'n_estimators': 100}
0.856 (+/-0.004) for {'max_depth': 20, 'n_estimators': 150}
0.857 (+/-0.004) for {'max_depth': 20, 'n_estimators': 200}
0.859 (+/-0.004) for {'max_depth': 20, 'n_estimators': 300}
0.859 (+/-0.003) for {'max_depth': 20, 'n_estimators': 400}
0.860 (+/-0.003) for {'max_depth': 20, 'n_estimators': 500}
0.850 (+/-0.005) for {'max_depth': 30, 'n_estimators': 50}
0.854 (+/-0.004) for {'max_depth': 30, 'n_estimators': 100}
0.858 (+/-0.004) for {'max_depth': 30, 'n_estimators': 150}
0.859 (+/-0.002) for {'max_depth': 30, 'n_estimators': 200}
0.860 (+/-0.003) for {'max_depth': 30, 'n_estimators': 300}
0.861 (+/-0.003) for {'max_depth': 30, 'n_estimators': 400}
0.861 (+/-0.004) for {'max_depth': 30, 'n_estimators': 500}
0.850 (+/-0.006) for {'max_depth': 40, 'n_estimators': 50}
0.855 (+/-0.004) for {'max_depth': 40, 'n_estimators': 100}
0.858 (+/-0.004) for {'max_depth': 40, 'n_estimators': 150}
0.859 (+/-0.003) for {'max_depth': 40, 'n_estimators': 200}
0.860 (+/-0.003) for {'max_depth': 40, 'n_estimators': 300}
0.861 (+/-0.003) for {'max_depth': 40, 'n_estimators': 400}
0.861 (+/-0.003) for {'max_depth': 40, 'n_estimators': 500}
Detailed classification report:
The model is trained on the full development set.
The scores are computed on the full evaluation set.
Accuracy Score -> 75.39813289401428
MCC 0.5281323492859495
precision recall f1-score support
0 0.85 0.62 0.72 912
1 0.70 0.89 0.78 909
accuracy 0.75 1821
macro avg 0.77 0.75 0.75 1821
weighted avg 0.77 0.75 0.75 1821
|
cursos_alura/corretor_ortografico_aplicando_tecnicas_de_nlp/code/corretor.ipynb | ###Markdown
Criando meu banco de palavras
###Code
len(artigo)
len('olá')
len('olá ')
texto_exemplo = 'Olá, tudo bem?'
tokens = texto_exemplo.split()
print(tokens)
print(len(tokens))
!pip install nltk
import nltk
nltk.download('punkt')
palavras_separadas = nltk.tokenize.word_tokenize(texto_exemplo)
print(palavras_separadas)
len(palavras_separadas)
'palavra'.isalpha() #isalpha é uma função que retorna um verdadeiro quando o conteudo for alphanumerico
'palavra1'.isalpha()
def separa_palavras(lista_tokens):
lista_palavras = []
for token in lista_tokens:
if token.isalpha():
lista_palavras.append(token)
return lista_palavras
separa_palavras(palavras_separadas)
lista_tokens = nltk.tokenize.word_tokenize(artigo)
lista_palavras = separa_palavras(lista_tokens)
print('O número de palavras é: {}'.format(len(lista_palavras)))
print(lista_palavras[:5])
def normalizacao(lista_palavras):
lista_normalizada = []
for palavra in lista_palavras:
lista_normalizada.append(palavra.lower())
return lista_normalizada
lista_normalizada = normalizacao(lista_palavras)
print(lista_normalizada[:5])
set([1,2,3,3,3,4,5,6])
len(set(lista_normalizada))
###Output
_____no_output_____
###Markdown
Criando meu gerador de palavras
###Code
palavra_exemplo = 'lgica'
def insere_letras(fatias):
novas_palavras = []
letras = 'abcdefghijklmnopqrstuvwxyzàáâãèéêìíîòóôõùúûç'
for E, D in fatias:
for letra in letras:
novas_palavras.append(E + letra + D)
return novas_palavras
def gerador_palavras(palavra):
fatias = []
for i in range(len(palavra)+1):
fatias.append((palavra[:i],palavra[i:]))
palavras_geradas = insere_letras(fatias)
return palavras_geradas
palavras_geradas = gerador_palavras(palavra_exemplo)
print(palavras_geradas)
###Output
['algica', 'blgica', 'clgica', 'dlgica', 'elgica', 'flgica', 'glgica', 'hlgica', 'ilgica', 'jlgica', 'klgica', 'llgica', 'mlgica', 'nlgica', 'olgica', 'plgica', 'qlgica', 'rlgica', 'slgica', 'tlgica', 'ulgica', 'vlgica', 'wlgica', 'xlgica', 'ylgica', 'zlgica', 'àlgica', 'álgica', 'âlgica', 'ãlgica', 'èlgica', 'élgica', 'êlgica', 'ìlgica', 'ílgica', 'îlgica', 'òlgica', 'ólgica', 'ôlgica', 'õlgica', 'ùlgica', 'úlgica', 'ûlgica', 'çlgica', 'lagica', 'lbgica', 'lcgica', 'ldgica', 'legica', 'lfgica', 'lggica', 'lhgica', 'ligica', 'ljgica', 'lkgica', 'llgica', 'lmgica', 'lngica', 'logica', 'lpgica', 'lqgica', 'lrgica', 'lsgica', 'ltgica', 'lugica', 'lvgica', 'lwgica', 'lxgica', 'lygica', 'lzgica', 'làgica', 'lágica', 'lâgica', 'lãgica', 'lègica', 'légica', 'lêgica', 'lìgica', 'lígica', 'lîgica', 'lògica', 'lógica', 'lôgica', 'lõgica', 'lùgica', 'lúgica', 'lûgica', 'lçgica', 'lgaica', 'lgbica', 'lgcica', 'lgdica', 'lgeica', 'lgfica', 'lggica', 'lghica', 'lgiica', 'lgjica', 'lgkica', 'lglica', 'lgmica', 'lgnica', 'lgoica', 'lgpica', 'lgqica', 'lgrica', 'lgsica', 'lgtica', 'lguica', 'lgvica', 'lgwica', 'lgxica', 'lgyica', 'lgzica', 'lgàica', 'lgáica', 'lgâica', 'lgãica', 'lgèica', 'lgéica', 'lgêica', 'lgìica', 'lgíica', 'lgîica', 'lgòica', 'lgóica', 'lgôica', 'lgõica', 'lgùica', 'lgúica', 'lgûica', 'lgçica', 'lgiaca', 'lgibca', 'lgicca', 'lgidca', 'lgieca', 'lgifca', 'lgigca', 'lgihca', 'lgiica', 'lgijca', 'lgikca', 'lgilca', 'lgimca', 'lginca', 'lgioca', 'lgipca', 'lgiqca', 'lgirca', 'lgisca', 'lgitca', 'lgiuca', 'lgivca', 'lgiwca', 'lgixca', 'lgiyca', 'lgizca', 'lgiàca', 'lgiáca', 'lgiâca', 'lgiãca', 'lgièca', 'lgiéca', 'lgiêca', 'lgiìca', 'lgiíca', 'lgiîca', 'lgiòca', 'lgióca', 'lgiôca', 'lgiõca', 'lgiùca', 'lgiúca', 'lgiûca', 'lgiçca', 'lgicaa', 'lgicba', 'lgicca', 'lgicda', 'lgicea', 'lgicfa', 'lgicga', 'lgicha', 'lgicia', 'lgicja', 'lgicka', 'lgicla', 'lgicma', 'lgicna', 'lgicoa', 'lgicpa', 'lgicqa', 'lgicra', 'lgicsa', 'lgicta', 'lgicua', 'lgicva', 'lgicwa', 'lgicxa', 'lgicya', 'lgicza', 'lgicàa', 'lgicáa', 'lgicâa', 'lgicãa', 'lgicèa', 'lgicéa', 'lgicêa', 'lgicìa', 'lgicía', 'lgicîa', 'lgicòa', 'lgicóa', 'lgicôa', 'lgicõa', 'lgicùa', 'lgicúa', 'lgicûa', 'lgicça', 'lgicaa', 'lgicab', 'lgicac', 'lgicad', 'lgicae', 'lgicaf', 'lgicag', 'lgicah', 'lgicai', 'lgicaj', 'lgicak', 'lgical', 'lgicam', 'lgican', 'lgicao', 'lgicap', 'lgicaq', 'lgicar', 'lgicas', 'lgicat', 'lgicau', 'lgicav', 'lgicaw', 'lgicax', 'lgicay', 'lgicaz', 'lgicaà', 'lgicaá', 'lgicaâ', 'lgicaã', 'lgicaè', 'lgicaé', 'lgicaê', 'lgicaì', 'lgicaí', 'lgicaî', 'lgicaò', 'lgicaó', 'lgicaô', 'lgicaõ', 'lgicaù', 'lgicaú', 'lgicaû', 'lgicaç']
###Markdown
Criando a avaliação de qual é a palavra correta
###Code
total_palavras = len(lista_normalizada)
frequencia = nltk.FreqDist(lista_normalizada)
def probabilidade(palavra_gerada):
return frequencia[palavra_gerada] / total_palavras
def corretor(palavra):
palavras_geradas = gerador_palavras(palavra)
palavra_correta = max(palavras_geradas, key=probabilidade)
return palavra_correta
corretor(palavra_exemplo)
def cria_dados_teste(nome_arquivo):
lista_palavras_teste = []
f = open(nome_arquivo, 'r', encoding='utf8')
for linha in f:
correta, errada = linha.split()
lista_palavras_teste.append((correta, errada))
f.close()
return lista_palavras_teste
lista_teste = cria_dados_teste('../sample_data/for_modeling/palavras.txt')
lista_teste
def avaliador(testes):
numero_palavras = len(testes)
acertou = 0
for correta, errada in teste:
corretor(errada)
taxa_acerto = acertou/numero_palavras
print("Taxa de acerto" taxa_de_acerto)
###Output
_____no_output_____ |
notebooks/DNNRegression.ipynb | ###Markdown
Barebones example of DNNRegressor in TensorflowIn this notebook a DNNRegressor is used through [TensorFlow](https://www.tensorflow.org/)'s tf.contrib.learn library. The example shows how to generate the feature_columns and feed the input using input_fn argument.
###Code
# Used to clear up the workspace.
%reset -f
import numpy as np
import pickle
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.estimators import estimator
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
# Load the data.
data = pickle.load(open('../data/data-ant.pkl', 'rb'))
observations = data['observations']
actions = data['actions']
# We will only look at the first label column, since multiple regression is not supported for some reason...
actions = actions[:, 0]
# Split the data.
X_train, X_test, y_train, y_test = train_test_split(observations, actions, test_size=10, random_state=42)
num_train = X_train.shape[0]
num_test = X_test.shape[0]
###Output
_____no_output_____
###Markdown
*pred_fn* and *feed_fn* functions take lists or numpy arrays as input and generate feature columns or labels. Feature columns takes the form of a dictionary with column names as Keys and tf.constant of columns as Values, while the label is simply a tf.constant of labels.np.newaxis is added in order to address TensorFlow's warning that the input should be a two instead of one dimensional tensor.
###Code
def pred_fn(X):
return {str("my_col" + str(k)): tf.constant(X[:, k][:, np.newaxis]) for k in range(X.shape[1])}
def input_fn(X, y):
feature_cols = pred_fn(X)
label = tf.constant(y)
return feature_cols, label
feature_cols = [tf.contrib.layers.real_valued_column(str("my_col") + str(i)) for i in range(X_train.shape[1])]
# This does not work for some reason.
#feature_cols = tf.contrib.learn.infer_real_valued_columns_from_input(X_train)
regressor = tf.contrib.learn.DNNRegressor(feature_columns=feature_cols, hidden_units=[100, 100])
regressor.fit(input_fn=lambda: input_fn(X_train, y_train), steps=1000);
pred = list(regressor.predict_scores(input_fn=lambda: pred_fn(X_test)))
print pred
print y_test
print mean_squared_error(pred, y_test)
###Output
[0.39723414, -0.027354294, -0.061233871, -0.017296148, -0.37245646, 0.1132348, 0.1976911, -0.1596929, 0.38804257, 0.0017217866]
[ 0.50300872 0.04458803 -0.07244712 0.00861396 -0.49456769 -0.03319729
0.18001977 -0.25375277 0.25746021 -0.05760179]
0.00832451
|
squad20/preprocess.ipynb | ###Markdown
Word, token lengths
###Code
%%time
col = "qc_length"
df[col] = df["question"].str.len() + df["context"].str.len()
df[col] = df[col].astype(np.int32)
%%time
col = "a_length"
df[col] = df["answer_text"].str.len()
df[col] = df[col].astype(np.int32)
def word_length(cols: Iterable) -> Callable:
def f(row) -> int:
res = 0
for col in cols:
res += len(row[col].split())
return res
return f
%%time
col = "qc_word_length"
df[col] = df.progress_apply(word_length(["question", "context"]), axis=1)
df[col] = df[col].astype(np.int32)
%%time
col = "a_word_length"
df[col] = df.progress_apply(word_length(["answer_text"]), axis=1)
df[col] = df[col].astype(np.int32)
#pretrained_dir = "../pretrained/google/electra-small-discriminator"
#tokenizer = AutoTokenizer.from_pretrained(pretrained_dir, model_max_length=512)
#print(f"{repr(tokenizer)}\n{tokenizer.model_input_names}")
#pretrained_dir = "../pretrained/albert-base-v2"
#sp_tokenizer = AutoTokenizer.from_pretrained(pretrained_dir, model_max_length=512)
#print(f"{repr(sp_tokenizer)}\n{sp_tokenizer.model_input_names}")
#pretrained_dir = "../pretrained/distilroberta-base"
#bpe_tokenizer = AutoTokenizer.from_pretrained(pretrained_dir, model_max_length=512)
#print(f"{repr(bpe_tokenizer)}\n{bpe_tokenizer.model_input_names}")
#%%time
#x = tokenizer(questions, contexts)
#print(f"{repr(x.keys())}\nlen={len(x['input_ids'])}")
#col = "qc_wp_length"
#df[col] = [len(v) for v in x["input_ids"]]
#df[col] = df[col].astype(np.int16)
#%%time
#x = sp_tokenizer(questions, contexts)
#print(f"{repr(x.keys())}\nlen={len(x['input_ids'])}")
#col = "qc_sp_length"
#df[col] = [len(v) for v in x["input_ids"]]
#df[col] = df[col].astype(np.int16)
#%%time
#x = bpe_tokenizer(questions, contexts)
#print(f"{repr(x.keys())}\nlen={len(x['input_ids'])}")
#col = "qc_bpe_length"
#df[col] = [len(v) for v in x["input_ids"]]
#df[col] = df[col].astype(np.int16)
#cols = ["qc_length", "a_length", "qc_word_length", "a_word_length",
# "qc_wp_length", "qc_sp_length", "qc_bpe_length"]
#df[cols].describe(percentiles=percentiles)
df.info()
%%time
df.to_parquet("output/train.parquet", index=False)
###Output
Wall time: 323 ms
|
figure_making/Fig_3_SLA.ipynb | ###Markdown
Setup
###Code
# import packages
%run ../global_packages.py
# get the global parameters
%run ../global_pars.py
# import your local functions
sys.path.insert(1, '../')
from global_functions import *
# make sure the figures plot inline rather than at the end
%matplotlib inline
###Output
_____no_output_____
###Markdown
Paths
###Code
figpath = '../figures/'
###Output
_____no_output_____
###Markdown
Get Data
###Code
# SLA
ds_SLA = xr.open_dataset('../data_processing/2_SLA/sla_processed.nc')
ds_SLA
mon_sla = ds_SLA['mon_sla']
mon_sla_mon_anom = ds_SLA['mon_sla_mon_anom']
mon_sla_mon_clim = ds_SLA['mon_sla_mon_clim']
lat = mon_sla.lat.values
lon = mon_sla.lon.values
###Output
_____no_output_____
###Markdown
Get DMI
###Code
# load DMI data
ds_DMI= xr.open_dataset('../data_processing/3_DMI/dmi_processed.nc')
posIODyears = list(np.array(ds_DMI.pos_IOD_years))
negIODyears = list(np.array(ds_DMI.neg_IOD_years))
neuIODyears = list(np.array(ds_DMI.neu_IOD_years))
sposIODyears = list(np.array(ds_DMI.spos_IOD_years))
snegIODyears = list(np.array(ds_DMI.sneg_IOD_years))
wposIODyears = list(np.array(ds_DMI.wpos_IOD_years))
wnegIODyears = list(np.array(ds_DMI.wneg_IOD_years))
ds_DMI
###Output
_____no_output_____
###Markdown
Group Anomalies Into IOD Phases
###Code
var = mon_sla_mon_anom
# ------------------------------------------------------------#
# Anomaly
# ------------------------------------------------------------#
# average over the positive IOD years -------------------------------------------#
posIOD_mon_sla_mon_anom,_ = IOD_year_group_grid(var,IODyear_begin,IODyear_end,posIODyears)
# average over the negative IOD years -------------------------------------------#
negIOD_mon_sla_mon_anom,_ = IOD_year_group_grid(var,IODyear_begin,IODyear_end,negIODyears)
# average over the neutral IOD years -------------------------------------------#
neuIOD_mon_sla_mon_anom,_ = IOD_year_group_grid(var,IODyear_begin,IODyear_end,neuIODyears)
# ------------------------------------------------------------#
# Strong Anomaly
# ------------------------------------------------------------#
# average over the strong positive IOD years -------------------------------------------#
sposIOD_mon_sla_mon_anom,_ = IOD_year_group_grid(var,IODyear_begin,IODyear_end,sposIODyears)
# average over the strong negative IOD years -------------------------------------------#
snegIOD_mon_sla_mon_anom,_ = IOD_year_group_grid(var,IODyear_begin,IODyear_end,snegIODyears)
# ------------------------------------------------------------#
# Weak Anomaly
# ------------------------------------------------------------#
# average over the weak positive IOD years -------------------------------------------#
wposIOD_mon_sla_mon_anom,_ = IOD_year_group_grid(var,IODyear_begin,IODyear_end,wposIODyears)
# average over the weak negative IOD years -------------------------------------------#
wnegIOD_mon_sla_mon_anom,_ = IOD_year_group_grid(var,IODyear_begin,IODyear_end,wnegIODyears)
# ------------------------------------------------------------#
# Annual Cycle
# ------------------------------------------------------------#
mon_sla_mon_clim = mon_sla_mon_clim.roll(month=-5,roll_coords = False)
###Output
/home/jennap/anaconda3/lib/python3.7/site-packages/xarray/core/nanops.py:161: RuntimeWarning: Mean of empty slice
return np.nanmean(a, axis=axis, dtype=dtype)
###Markdown
Hovmueller Diagrams
###Code
# create list of integer years
IODphases = list([mon_sla_mon_clim,posIOD_mon_sla_mon_anom,
negIOD_mon_sla_mon_anom,sposIOD_mon_sla_mon_anom])
titles = ['Monthly\nClimatology','Interannual Anomaly\nPositive IOD Phases',
'Interannual Anomaly\nNegative IOD Phases','Interannual Anomaly\nStrong Positive Phases']
# plt.rcParams.update({'font.size': 20})
fig = plt.figure(figsize=(17.8/2.54, 3), dpi = 200)
cmin = -0.15
cmax = 0.15
letters = ['a','b','c','d','e','f']
params = {'legend.fontsize': 6,
'axes.labelsize': 8,
'axes.titlesize': 8,
'xtick.labelsize':6.15,
'ytick.labelsize':7.5,
'hatch.linewidth':0.5,
'hatch.color':'#3A3B3C',
'axes.linewidth':0.35,
'xtick.major.width':0,
'xtick.major.size':1.5,
'ytick.major.width':0.75,
'ytick.major.size':1.5}
pylab.rcParams.update(params)
#########################
for ii,phase in enumerate(IODphases):
# Get times and make array of datetime objects
vtimes = phase.month
data = np.zeros([vtimes.shape[0],ds_SLA.sta_loninds.shape[0]])
ac = np.zeros([vtimes.shape[0],ds_SLA.sta_loninds.shape[0]])
for jj in range(ds_SLA.sta_loninds.shape[0]):
data[:,jj] = phase[:,ds_SLA.sta_latinds[jj],ds_SLA.sta_loninds[jj]]
ac[:,jj] = mon_sla_mon_clim[:,ds_SLA.sta_latinds[jj],ds_SLA.sta_loninds[jj]]
# colorbar limits
levels = np.round(np.linspace(cmin, cmax, 10),2)
# Specify longitude values for chosen domain
sta = np.arange(len(ds_SLA.sta_loninds))
ax = fig.add_subplot(1,4,ii+1)
# Plot of chosen variable averaged over latitude and slightly smoothed
# cf = ax.contourf(sta,vtimes,data,levels = levels,cmap=plt.cm.PuOr_r, extend="both")
cf = ax.contourf(sta,vtimes,data,levels = levels,cmap=plt.cm.PuOr_r, extend="both")
if ii >0:
ss = np.ma.array(data, mask= np.sign(ac) * np.sign(data)>=0) # maintains opposite condition
css = ax.contourf(sta,vtimes,ss,levels = levels,cmap=plt.cm.PuOr_r,
extend="both", hatches=['//////'], alpha=0.5)
for loc in ds_SLA.loc_list:
plt.axvline(x=loc,color = 'k', linewidth = 0.5)
plt.plot(ds_SLA.loc_list,np.full(ds_SLA.loc_list.shape,1), markersize = 2,
markerfacecolor = 'k', marker = 's', color = 'g',markeredgecolor = 'k',
clip_on=False)
# plt.xlabel('Station')
# if ii == 0:
# plt.ylabel('Month')
plt.title(titles[ii])
ax2 = ax.twinx()
# if ii == 0:
# ax.set_yticklabels(['','','','','summer/fall','','','','', '', '','winter/spring'])
# else:
# ax.set_yticklabels([])
# plt.yticks(rotation=90)
# if ii == 3:
# ax2.set_yticks(np.arange(1,13))
# ax2.set_yticklabels(['Jun','Jul','Aug','Sep','Oct','Nov','Dec','Jan','Feb','Mar', 'Apr', 'May'])
# else:
# ax2.set_yticklabels([])
if ii == 3:
ax2.set_yticks(np.arange(0,12))
ax2.set_yticklabels(['Jun','Jul','Aug','Sep','Oct','Nov','Dec','Jan','Feb','Mar', 'Apr', 'May'],
fontsize = 6)
else:
ax2.set_yticks(np.arange(0,12))
ax2.set_yticklabels([])
if ii == 0:
ax.set_yticks(np.arange(1,13))
ax.set_yticklabels(['',' ',' ','','','Summer/Fall ','','','', '', '','Winter/Spring '], rotation = 90)
else:
ax.set_yticks(np.arange(1,13))
ax.set_yticklabels([])
ax.axhline(y = 6.5, color = 'dimgray',linestyle = '--', linewidth = 1)
xticks = (np.array(ds_SLA.loc_list[:-1]) + np.array(ds_SLA.loc_list[1:]))*0.5
ax.set_xticks(xticks)
ax.set_xticklabels(['EQ', 'EBoB','WBoB', 'EAS','WAS'])
ax.tick_params(axis='x', which='major', pad=5)
# ax.set_yticks(list(np.arange(0,12)))
cf.set_clim(cmin, cmax)# reset lims because contourf does weird things.
add_letter(ax, letters[ii], x = 0.01,y=0.95, fontsize = 7)
# add coasta waveguide inset
left, bottom, width, height = [0.845, 0.715, 0.11, 0.155]
axi = fig.add_axes([left, bottom, width, height],projection= ccrs.PlateCarree())
axi.scatter(ds_SLA.sta_lon,ds_SLA.sta_lat,s=7,marker = '.',c='g',
edgecolor = 'none',transform=ccrs.PlateCarree(), zorder = 3)
axi.scatter(ds_SLA.sta_lon[ds_SLA.loc_list],ds_SLA.sta_lat[ds_SLA.loc_list],s=7,
edgecolor = 'none',marker = 's',c='k',transform=ccrs.PlateCarree(), zorder = 3)
g = add_land(axi, bounds = [49,104,-5,30], lcolor = 'dimgray')
g.xlocator = mticker.FixedLocator([])
g.ylocator = mticker.FixedLocator([])
add_text(axi, 'WAS', x = 0.1,y=0.5, fontsize = 6, color = 'k', weight = 'bold')
add_text(axi, 'EAS', x = 0.3,y=0.71, fontsize = 6, color = 'k', weight = 'bold')
add_text(axi, 'WBoB', x = 0.5,y=0.46, fontsize = 6, color = 'k', weight = 'bold')
add_text(axi, 'EBoB', x = 0.67,y=0.85, fontsize = 6, color = 'k', weight = 'bold')
add_text(axi, 'EQ', x = 0.3,y=0.1, fontsize = 6, color = 'k', weight = 'bold')
# cbar_ax = fig.add_axes([0.91, 0.125, 0.015, 0.75])
cbar_ax = fig.add_axes([0.045, 0.14, 0.9, 0.025])
cbar = fig.colorbar(cf,cax=cbar_ax, pad=0.04, orientation = 'horizontal')
cbar.set_label('Sea-level Anomaly ($m$)', size = 7)
plt.subplots_adjust(wspace = 0.12, bottom = 0.265, left = 0.03, right = 0.96)
plt.savefig(figpath + 'Fig_3_sla.pdf', format='pdf', dpi = 400)
###Output
_____no_output_____ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.