path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
site/en-snapshot/probability/examples/Probabilistic_Layers_VAE.ipynb | ###Markdown
Make things Fast! Before we dive in, let's make sure we're using a GPU for this demo. To do this, select "Runtime" -> "Change runtime type" -> "Hardware accelerator" -> "GPU".The following snippet will verify that we have access to a GPU.
###Code
if tf.test.gpu_device_name() != '/device:GPU:0':
print('WARNING: GPU device not found.')
else:
print('SUCCESS: Found GPU: {}'.format(tf.test.gpu_device_name()))
###Output
SUCCESS: Found GPU: /device:GPU:0
###Markdown
Note: if for some reason you cannot access a GPU, this colab will still work. (Training will just take longer.) Load Dataset
###Code
datasets, datasets_info = tfds.load(name='mnist',
with_info=True,
as_supervised=False)
def _preprocess(sample):
image = tf.cast(sample['image'], tf.float32) / 255. # Scale to unit interval.
image = image < tf.random.uniform(tf.shape(image)) # Randomly binarize.
return image, image
train_dataset = (datasets['train']
.map(_preprocess)
.batch(256)
.prefetch(tf.data.experimental.AUTOTUNE)
.shuffle(int(10e3)))
eval_dataset = (datasets['test']
.map(_preprocess)
.batch(256)
.prefetch(tf.data.experimental.AUTOTUNE))
###Output
_____no_output_____
###Markdown
Note that _preprocess() above returns `image, image` rather than just `image` because Keras is set up for discriminative models with an (example, label) input format, i.e. $p_\theta(y|x)$. Since the goal of the VAE is to recover the input x from x itself (i.e. $p_\theta(x|x)$), the data pair is (example, example). VAE Code Golf Specify model.
###Code
input_shape = datasets_info.features['image'].shape
encoded_size = 16
base_depth = 32
prior = tfd.Independent(tfd.Normal(loc=tf.zeros(encoded_size), scale=1),
reinterpreted_batch_ndims=1)
encoder = tfk.Sequential([
tfkl.InputLayer(input_shape=input_shape),
tfkl.Lambda(lambda x: tf.cast(x, tf.float32) - 0.5),
tfkl.Conv2D(base_depth, 5, strides=1,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2D(base_depth, 5, strides=2,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2D(2 * base_depth, 5, strides=1,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2D(2 * base_depth, 5, strides=2,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2D(4 * encoded_size, 7, strides=1,
padding='valid', activation=tf.nn.leaky_relu),
tfkl.Flatten(),
tfkl.Dense(tfpl.MultivariateNormalTriL.params_size(encoded_size),
activation=None),
tfpl.MultivariateNormalTriL(
encoded_size,
activity_regularizer=tfpl.KLDivergenceRegularizer(prior)),
])
decoder = tfk.Sequential([
tfkl.InputLayer(input_shape=[encoded_size]),
tfkl.Reshape([1, 1, encoded_size]),
tfkl.Conv2DTranspose(2 * base_depth, 7, strides=1,
padding='valid', activation=tf.nn.leaky_relu),
tfkl.Conv2DTranspose(2 * base_depth, 5, strides=1,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2DTranspose(2 * base_depth, 5, strides=2,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2DTranspose(base_depth, 5, strides=1,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2DTranspose(base_depth, 5, strides=2,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2DTranspose(base_depth, 5, strides=1,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2D(filters=1, kernel_size=5, strides=1,
padding='same', activation=None),
tfkl.Flatten(),
tfpl.IndependentBernoulli(input_shape, tfd.Bernoulli.logits),
])
vae = tfk.Model(inputs=encoder.inputs,
outputs=decoder(encoder.outputs[0]))
###Output
_____no_output_____
###Markdown
Do inference.
###Code
negloglik = lambda x, rv_x: -rv_x.log_prob(x)
vae.compile(optimizer=tf.optimizers.Adam(learning_rate=1e-3),
loss=negloglik)
_ = vae.fit(train_dataset,
epochs=15,
validation_data=eval_dataset)
###Output
Epoch 1/15
235/235 [==============================] - 14s 61ms/step - loss: 206.5541 - val_loss: 163.1924
Epoch 2/15
235/235 [==============================] - 14s 59ms/step - loss: 151.1891 - val_loss: 143.6748
Epoch 3/15
235/235 [==============================] - 14s 58ms/step - loss: 141.3275 - val_loss: 137.9188
Epoch 4/15
235/235 [==============================] - 14s 58ms/step - loss: 136.7453 - val_loss: 133.2726
Epoch 5/15
235/235 [==============================] - 14s 58ms/step - loss: 132.3803 - val_loss: 131.8343
Epoch 6/15
235/235 [==============================] - 14s 58ms/step - loss: 129.2451 - val_loss: 127.1935
Epoch 7/15
235/235 [==============================] - 14s 59ms/step - loss: 126.0975 - val_loss: 123.6789
Epoch 8/15
235/235 [==============================] - 14s 58ms/step - loss: 124.0565 - val_loss: 122.5058
Epoch 9/15
235/235 [==============================] - 14s 58ms/step - loss: 122.9974 - val_loss: 121.9544
Epoch 10/15
235/235 [==============================] - 14s 58ms/step - loss: 121.7349 - val_loss: 120.8735
Epoch 11/15
235/235 [==============================] - 14s 58ms/step - loss: 121.0856 - val_loss: 120.1340
Epoch 12/15
235/235 [==============================] - 14s 58ms/step - loss: 120.2232 - val_loss: 121.3554
Epoch 13/15
235/235 [==============================] - 14s 58ms/step - loss: 119.8123 - val_loss: 119.2351
Epoch 14/15
235/235 [==============================] - 14s 58ms/step - loss: 119.2685 - val_loss: 118.2133
Epoch 15/15
235/235 [==============================] - 14s 59ms/step - loss: 118.8895 - val_loss: 119.4771
###Markdown
Look Ma, No ~~Hands~~Tensors!
###Code
# We'll just examine ten random digits.
x = next(iter(eval_dataset))[0][:10]
xhat = vae(x)
assert isinstance(xhat, tfd.Distribution)
#@title Image Plot Util
import matplotlib.pyplot as plt
def display_imgs(x, y=None):
if not isinstance(x, (np.ndarray, np.generic)):
x = np.array(x)
plt.ioff()
n = x.shape[0]
fig, axs = plt.subplots(1, n, figsize=(n, 1))
if y is not None:
fig.suptitle(np.argmax(y, axis=1))
for i in range(n):
axs.flat[i].imshow(x[i].squeeze(), interpolation='none', cmap='gray')
axs.flat[i].axis('off')
plt.show()
plt.close()
plt.ion()
print('Originals:')
display_imgs(x)
print('Decoded Random Samples:')
display_imgs(xhat.sample())
print('Decoded Modes:')
display_imgs(xhat.mode())
print('Decoded Means:')
display_imgs(xhat.mean())
# Now, let's generate ten never-before-seen digits.
z = prior.sample(10)
xtilde = decoder(z)
assert isinstance(xtilde, tfd.Distribution)
print('Randomly Generated Samples:')
display_imgs(xtilde.sample())
print('Randomly Generated Modes:')
display_imgs(xtilde.mode())
print('Randomly Generated Means:')
display_imgs(xtilde.mean())
###Output
Randomly Generated Samples:
###Markdown
Copyright 2019 The TensorFlow Probability Authors.Licensed under the Apache License, Version 2.0 (the "License");
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
TFP Probabilistic Layers: Variational Auto Encoder View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook In this example we show how to fit a Variational Autoencoder using TFP's "probabilistic layers." Dependencies & Prerequisites
###Code
#@title Import { display-mode: "form" }
import numpy as np
import tensorflow.compat.v2 as tf
tf.enable_v2_behavior()
import tensorflow_datasets as tfds
import tensorflow_probability as tfp
tfk = tf.keras
tfkl = tf.keras.layers
tfpl = tfp.layers
tfd = tfp.distributions
###Output
_____no_output_____
###Markdown
Make things Fast! Before we dive in, let's make sure we're using a GPU for this demo. To do this, select "Runtime" -> "Change runtime type" -> "Hardware accelerator" -> "GPU".The following snippet will verify that we have access to a GPU.
###Code
if tf.test.gpu_device_name() != '/device:GPU:0':
print('WARNING: GPU device not found.')
else:
print('SUCCESS: Found GPU: {}'.format(tf.test.gpu_device_name()))
###Output
SUCCESS: Found GPU: /device:GPU:0
###Markdown
Note: if for some reason you cannot access a GPU, this colab will still work. (Training will just take longer.) Load Dataset
###Code
datasets, datasets_info = tfds.load(name='mnist',
with_info=True,
as_supervised=False)
def _preprocess(sample):
image = tf.cast(sample['image'], tf.float32) / 255. # Scale to unit interval.
image = image < tf.random.uniform(tf.shape(image)) # Randomly binarize.
return image, image
train_dataset = (datasets['train']
.map(_preprocess)
.batch(256)
.prefetch(tf.data.experimental.AUTOTUNE)
.shuffle(int(10e3)))
eval_dataset = (datasets['test']
.map(_preprocess)
.batch(256)
.prefetch(tf.data.experimental.AUTOTUNE))
###Output
_____no_output_____
###Markdown
Note that _preprocess() above returns `image, image` rather than just `image` because Keras is set up for discriminative models with an (example, label) input format, i.e. $p_\theta(y|x)$. Since the goal of the VAE is to recover the input x from x itself (i.e. $p_\theta(x|x)$), the data pair is (example, example). VAE Code Golf Specify model.
###Code
input_shape = datasets_info.features['image'].shape
encoded_size = 16
base_depth = 32
prior = tfd.Independent(tfd.Normal(loc=tf.zeros(encoded_size), scale=1),
reinterpreted_batch_ndims=1)
encoder = tfk.Sequential([
tfkl.InputLayer(input_shape=input_shape),
tfkl.Lambda(lambda x: tf.cast(x, tf.float32) - 0.5),
tfkl.Conv2D(base_depth, 5, strides=1,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2D(base_depth, 5, strides=2,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2D(2 * base_depth, 5, strides=1,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2D(2 * base_depth, 5, strides=2,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2D(4 * encoded_size, 7, strides=1,
padding='valid', activation=tf.nn.leaky_relu),
tfkl.Flatten(),
tfkl.Dense(tfpl.MultivariateNormalTriL.params_size(encoded_size),
activation=None),
tfpl.MultivariateNormalTriL(
encoded_size,
activity_regularizer=tfpl.KLDivergenceRegularizer(prior)),
])
decoder = tfk.Sequential([
tfkl.InputLayer(input_shape=[encoded_size]),
tfkl.Reshape([1, 1, encoded_size]),
tfkl.Conv2DTranspose(2 * base_depth, 7, strides=1,
padding='valid', activation=tf.nn.leaky_relu),
tfkl.Conv2DTranspose(2 * base_depth, 5, strides=1,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2DTranspose(2 * base_depth, 5, strides=2,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2DTranspose(base_depth, 5, strides=1,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2DTranspose(base_depth, 5, strides=2,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2DTranspose(base_depth, 5, strides=1,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2D(filters=1, kernel_size=5, strides=1,
padding='same', activation=None),
tfkl.Flatten(),
tfpl.IndependentBernoulli(input_shape, tfd.Bernoulli.logits),
])
vae = tfk.Model(inputs=encoder.inputs,
outputs=decoder(encoder.outputs[0]))
###Output
_____no_output_____
###Markdown
Do inference.
###Code
negloglik = lambda x, rv_x: -rv_x.log_prob(x)
vae.compile(optimizer=tf.optimizers.Adam(learning_rate=1e-3),
loss=negloglik)
_ = vae.fit(train_dataset,
epochs=15,
validation_data=eval_dataset)
###Output
Epoch 1/15
235/235 [==============================] - 14s 61ms/step - loss: 206.5541 - val_loss: 163.1924
Epoch 2/15
235/235 [==============================] - 14s 59ms/step - loss: 151.1891 - val_loss: 143.6748
Epoch 3/15
235/235 [==============================] - 14s 58ms/step - loss: 141.3275 - val_loss: 137.9188
Epoch 4/15
235/235 [==============================] - 14s 58ms/step - loss: 136.7453 - val_loss: 133.2726
Epoch 5/15
235/235 [==============================] - 14s 58ms/step - loss: 132.3803 - val_loss: 131.8343
Epoch 6/15
235/235 [==============================] - 14s 58ms/step - loss: 129.2451 - val_loss: 127.1935
Epoch 7/15
235/235 [==============================] - 14s 59ms/step - loss: 126.0975 - val_loss: 123.6789
Epoch 8/15
235/235 [==============================] - 14s 58ms/step - loss: 124.0565 - val_loss: 122.5058
Epoch 9/15
235/235 [==============================] - 14s 58ms/step - loss: 122.9974 - val_loss: 121.9544
Epoch 10/15
235/235 [==============================] - 14s 58ms/step - loss: 121.7349 - val_loss: 120.8735
Epoch 11/15
235/235 [==============================] - 14s 58ms/step - loss: 121.0856 - val_loss: 120.1340
Epoch 12/15
235/235 [==============================] - 14s 58ms/step - loss: 120.2232 - val_loss: 121.3554
Epoch 13/15
235/235 [==============================] - 14s 58ms/step - loss: 119.8123 - val_loss: 119.2351
Epoch 14/15
235/235 [==============================] - 14s 58ms/step - loss: 119.2685 - val_loss: 118.2133
Epoch 15/15
235/235 [==============================] - 14s 59ms/step - loss: 118.8895 - val_loss: 119.4771
###Markdown
Look Ma, No ~~Hands~~Tensors!
###Code
# We'll just examine ten random digits.
x = next(iter(eval_dataset))[0][:10]
xhat = vae(x)
assert isinstance(xhat, tfd.Distribution)
#@title Image Plot Util
import matplotlib.pyplot as plt
def display_imgs(x, y=None):
if not isinstance(x, (np.ndarray, np.generic)):
x = np.array(x)
plt.ioff()
n = x.shape[0]
fig, axs = plt.subplots(1, n, figsize=(n, 1))
if y is not None:
fig.suptitle(np.argmax(y, axis=1))
for i in range(n):
axs.flat[i].imshow(x[i].squeeze(), interpolation='none', cmap='gray')
axs.flat[i].axis('off')
plt.show()
plt.close()
plt.ion()
print('Originals:')
display_imgs(x)
print('Decoded Random Samples:')
display_imgs(xhat.sample())
print('Decoded Modes:')
display_imgs(xhat.mode())
print('Decoded Means:')
display_imgs(xhat.mean())
# Now, let's generate ten never-before-seen digits.
z = prior.sample(10)
xtilde = decoder(z)
assert isinstance(xtilde, tfd.Distribution)
print('Randomly Generated Samples:')
display_imgs(xtilde.sample())
print('Randomly Generated Modes:')
display_imgs(xtilde.mode())
print('Randomly Generated Means:')
display_imgs(xtilde.mean())
###Output
Randomly Generated Samples:
###Markdown
Copyright 2019 The TensorFlow Probability Authors.Licensed under the Apache License, Version 2.0 (the "License");
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
TFP Probabilistic Layers: Variational Auto Encoder View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook In this example we show how to fit a Variational Autoencoder using TFP's "probabilistic layers." Dependencies & Prerequisites
###Code
#@title Import { display-mode: "form" }
import numpy as np
import tensorflow.compat.v2 as tf
tf.enable_v2_behavior()
import tensorflow_datasets as tfds
import tensorflow_probability as tfp
tfk = tf.keras
tfkl = tf.keras.layers
tfpl = tfp.layers
tfd = tfp.distributions
###Output
_____no_output_____
###Markdown
Copyright 2019 The TensorFlow Probability Authors.Licensed under the Apache License, Version 2.0 (the "License");
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
TFP Probabilistic Layers: Variational Auto Encoder View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook In this example we show how to fit a Variational Autoencoder using TFP's "probabilistic layers." Dependencies & Prerequisites
###Code
#@title Import { display-mode: "form" }
import numpy as np
import tensorflow.compat.v2 as tf
tf.enable_v2_behavior()
import tensorflow_datasets as tfds
import tensorflow_probability as tfp
tfk = tf.keras
tfkl = tf.keras.layers
tfpl = tfp.layers
tfd = tfp.distributions
###Output
_____no_output_____
###Markdown
Make things Fast! Before we dive in, let's make sure we're using a GPU for this demo. To do this, select "Runtime" -> "Change runtime type" -> "Hardware accelerator" -> "GPU".The following snippet will verify that we have access to a GPU.
###Code
if tf.test.gpu_device_name() != '/device:GPU:0':
print('WARNING: GPU device not found.')
else:
print('SUCCESS: Found GPU: {}'.format(tf.test.gpu_device_name()))
###Output
SUCCESS: Found GPU: /device:GPU:0
###Markdown
Note: if for some reason you cannot access a GPU, this colab will still work. (Training will just take longer.) Load Dataset
###Code
datasets, datasets_info = tfds.load(name='mnist',
with_info=True,
as_supervised=False)
def _preprocess(sample):
image = tf.cast(sample['image'], tf.float32) / 255. # Scale to unit interval.
image = image < tf.random.uniform(tf.shape(image)) # Randomly binarize.
return image, image
train_dataset = (datasets['train']
.map(_preprocess)
.batch(256)
.prefetch(tf.data.experimental.AUTOTUNE)
.shuffle(int(10e3)))
eval_dataset = (datasets['test']
.map(_preprocess)
.batch(256)
.prefetch(tf.data.experimental.AUTOTUNE))
###Output
_____no_output_____
###Markdown
Note that _preprocess() above returns `image, image` rather than just `image` because Keras is set up for discriminative models with an (example, label) input format, i.e. $p_\theta(y|x)$. Since the goal of the VAE is to recover the input x from x itself (i.e. $p_\theta(x|x)$), the data pair is (example, example). VAE Code Golf Specify model.
###Code
input_shape = datasets_info.features['image'].shape
encoded_size = 16
base_depth = 32
prior = tfd.Independent(tfd.Normal(loc=tf.zeros(encoded_size), scale=1),
reinterpreted_batch_ndims=1)
encoder = tfk.Sequential([
tfkl.InputLayer(input_shape=input_shape),
tfkl.Lambda(lambda x: tf.cast(x, tf.float32) - 0.5),
tfkl.Conv2D(base_depth, 5, strides=1,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2D(base_depth, 5, strides=2,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2D(2 * base_depth, 5, strides=1,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2D(2 * base_depth, 5, strides=2,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2D(4 * encoded_size, 7, strides=1,
padding='valid', activation=tf.nn.leaky_relu),
tfkl.Flatten(),
tfkl.Dense(tfpl.MultivariateNormalTriL.params_size(encoded_size),
activation=None),
tfpl.MultivariateNormalTriL(
encoded_size,
activity_regularizer=tfpl.KLDivergenceRegularizer(prior)),
])
decoder = tfk.Sequential([
tfkl.InputLayer(input_shape=[encoded_size]),
tfkl.Reshape([1, 1, encoded_size]),
tfkl.Conv2DTranspose(2 * base_depth, 7, strides=1,
padding='valid', activation=tf.nn.leaky_relu),
tfkl.Conv2DTranspose(2 * base_depth, 5, strides=1,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2DTranspose(2 * base_depth, 5, strides=2,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2DTranspose(base_depth, 5, strides=1,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2DTranspose(base_depth, 5, strides=2,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2DTranspose(base_depth, 5, strides=1,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2D(filters=1, kernel_size=5, strides=1,
padding='same', activation=None),
tfkl.Flatten(),
tfpl.IndependentBernoulli(input_shape, tfd.Bernoulli.logits),
])
vae = tfk.Model(inputs=encoder.inputs,
outputs=decoder(encoder.outputs[0]))
###Output
_____no_output_____
###Markdown
Do inference.
###Code
negloglik = lambda x, rv_x: -rv_x.log_prob(x)
vae.compile(optimizer=tf.optimizers.Adam(learning_rate=1e-3),
loss=negloglik)
_ = vae.fit(train_dataset,
epochs=15,
validation_data=eval_dataset)
###Output
Epoch 1/15
235/235 [==============================] - 14s 61ms/step - loss: 206.5541 - val_loss: 163.1924
Epoch 2/15
235/235 [==============================] - 14s 59ms/step - loss: 151.1891 - val_loss: 143.6748
Epoch 3/15
235/235 [==============================] - 14s 58ms/step - loss: 141.3275 - val_loss: 137.9188
Epoch 4/15
235/235 [==============================] - 14s 58ms/step - loss: 136.7453 - val_loss: 133.2726
Epoch 5/15
235/235 [==============================] - 14s 58ms/step - loss: 132.3803 - val_loss: 131.8343
Epoch 6/15
235/235 [==============================] - 14s 58ms/step - loss: 129.2451 - val_loss: 127.1935
Epoch 7/15
235/235 [==============================] - 14s 59ms/step - loss: 126.0975 - val_loss: 123.6789
Epoch 8/15
235/235 [==============================] - 14s 58ms/step - loss: 124.0565 - val_loss: 122.5058
Epoch 9/15
235/235 [==============================] - 14s 58ms/step - loss: 122.9974 - val_loss: 121.9544
Epoch 10/15
235/235 [==============================] - 14s 58ms/step - loss: 121.7349 - val_loss: 120.8735
Epoch 11/15
235/235 [==============================] - 14s 58ms/step - loss: 121.0856 - val_loss: 120.1340
Epoch 12/15
235/235 [==============================] - 14s 58ms/step - loss: 120.2232 - val_loss: 121.3554
Epoch 13/15
235/235 [==============================] - 14s 58ms/step - loss: 119.8123 - val_loss: 119.2351
Epoch 14/15
235/235 [==============================] - 14s 58ms/step - loss: 119.2685 - val_loss: 118.2133
Epoch 15/15
235/235 [==============================] - 14s 59ms/step - loss: 118.8895 - val_loss: 119.4771
###Markdown
Look Ma, No ~~Hands~~Tensors!
###Code
# We'll just examine ten random digits.
x = next(iter(eval_dataset))[0][:10]
xhat = vae(x)
assert isinstance(xhat, tfd.Distribution)
#@title Image Plot Util
import matplotlib.pyplot as plt
def display_imgs(x, y=None):
if not isinstance(x, (np.ndarray, np.generic)):
x = np.array(x)
plt.ioff()
n = x.shape[0]
fig, axs = plt.subplots(1, n, figsize=(n, 1))
if y is not None:
fig.suptitle(np.argmax(y, axis=1))
for i in range(n):
axs.flat[i].imshow(x[i].squeeze(), interpolation='none', cmap='gray')
axs.flat[i].axis('off')
plt.show()
plt.close()
plt.ion()
print('Originals:')
display_imgs(x)
print('Decoded Random Samples:')
display_imgs(xhat.sample())
print('Decoded Modes:')
display_imgs(xhat.mode())
print('Decoded Means:')
display_imgs(xhat.mean())
# Now, let's generate ten never-before-seen digits.
z = prior.sample(10)
xtilde = decoder(z)
assert isinstance(xtilde, tfd.Distribution)
print('Randomly Generated Samples:')
display_imgs(xtilde.sample())
print('Randomly Generated Modes:')
display_imgs(xtilde.mode())
print('Randomly Generated Means:')
display_imgs(xtilde.mean())
###Output
Randomly Generated Samples:
###Markdown
Copyright 2019 The TensorFlow Probability Authors.Licensed under the Apache License, Version 2.0 (the "License");
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
TFP Probabilistic Layers: Variational Auto Encoder View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook In this example we show how to fit a Variational Autoencoder using TFP's "probabilistic layers." Dependencies & Prerequisites
###Code
#@title Import { display-mode: "form" }
import numpy as np
import tensorflow.compat.v2 as tf
tf.enable_v2_behavior()
import tensorflow_datasets as tfds
import tensorflow_probability as tfp
tfk = tf.keras
tfkl = tf.keras.layers
tfpl = tfp.layers
tfd = tfp.distributions
###Output
_____no_output_____
###Markdown
Make things Fast! Before we dive in, let's make sure we're using a GPU for this demo. To do this, select "Runtime" -> "Change runtime type" -> "Hardware accelerator" -> "GPU".The following snippet will verify that we have access to a GPU.
###Code
if tf.test.gpu_device_name() != '/device:GPU:0':
print('WARNING: GPU device not found.')
else:
print('SUCCESS: Found GPU: {}'.format(tf.test.gpu_device_name()))
###Output
SUCCESS: Found GPU: /device:GPU:0
###Markdown
Note: if for some reason you cannot access a GPU, this colab will still work. (Training will just take longer.) Load Dataset
###Code
datasets, datasets_info = tfds.load(name='mnist',
with_info=True,
as_supervised=False)
def _preprocess(sample):
image = tf.cast(sample['image'], tf.float32) / 255. # Scale to unit interval.
image = image < tf.random.uniform(tf.shape(image)) # Randomly binarize.
return image, image
train_dataset = (datasets['train']
.map(_preprocess)
.batch(256)
.prefetch(tf.data.AUTOTUNE)
.shuffle(int(10e3)))
eval_dataset = (datasets['test']
.map(_preprocess)
.batch(256)
.prefetch(tf.data.AUTOTUNE))
###Output
_____no_output_____
###Markdown
Note that _preprocess() above returns `image, image` rather than just `image` because Keras is set up for discriminative models with an (example, label) input format, i.e. $p_\theta(y|x)$. Since the goal of the VAE is to recover the input x from x itself (i.e. $p_\theta(x|x)$), the data pair is (example, example). VAE Code Golf Specify model.
###Code
input_shape = datasets_info.features['image'].shape
encoded_size = 16
base_depth = 32
prior = tfd.Independent(tfd.Normal(loc=tf.zeros(encoded_size), scale=1),
reinterpreted_batch_ndims=1)
encoder = tfk.Sequential([
tfkl.InputLayer(input_shape=input_shape),
tfkl.Lambda(lambda x: tf.cast(x, tf.float32) - 0.5),
tfkl.Conv2D(base_depth, 5, strides=1,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2D(base_depth, 5, strides=2,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2D(2 * base_depth, 5, strides=1,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2D(2 * base_depth, 5, strides=2,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2D(4 * encoded_size, 7, strides=1,
padding='valid', activation=tf.nn.leaky_relu),
tfkl.Flatten(),
tfkl.Dense(tfpl.MultivariateNormalTriL.params_size(encoded_size),
activation=None),
tfpl.MultivariateNormalTriL(
encoded_size,
activity_regularizer=tfpl.KLDivergenceRegularizer(prior)),
])
decoder = tfk.Sequential([
tfkl.InputLayer(input_shape=[encoded_size]),
tfkl.Reshape([1, 1, encoded_size]),
tfkl.Conv2DTranspose(2 * base_depth, 7, strides=1,
padding='valid', activation=tf.nn.leaky_relu),
tfkl.Conv2DTranspose(2 * base_depth, 5, strides=1,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2DTranspose(2 * base_depth, 5, strides=2,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2DTranspose(base_depth, 5, strides=1,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2DTranspose(base_depth, 5, strides=2,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2DTranspose(base_depth, 5, strides=1,
padding='same', activation=tf.nn.leaky_relu),
tfkl.Conv2D(filters=1, kernel_size=5, strides=1,
padding='same', activation=None),
tfkl.Flatten(),
tfpl.IndependentBernoulli(input_shape, tfd.Bernoulli.logits),
])
vae = tfk.Model(inputs=encoder.inputs,
outputs=decoder(encoder.outputs[0]))
###Output
_____no_output_____
###Markdown
Do inference.
###Code
negloglik = lambda x, rv_x: -rv_x.log_prob(x)
vae.compile(optimizer=tf.optimizers.Adam(learning_rate=1e-3),
loss=negloglik)
_ = vae.fit(train_dataset,
epochs=15,
validation_data=eval_dataset)
###Output
Epoch 1/15
235/235 [==============================] - 14s 61ms/step - loss: 206.5541 - val_loss: 163.1924
Epoch 2/15
235/235 [==============================] - 14s 59ms/step - loss: 151.1891 - val_loss: 143.6748
Epoch 3/15
235/235 [==============================] - 14s 58ms/step - loss: 141.3275 - val_loss: 137.9188
Epoch 4/15
235/235 [==============================] - 14s 58ms/step - loss: 136.7453 - val_loss: 133.2726
Epoch 5/15
235/235 [==============================] - 14s 58ms/step - loss: 132.3803 - val_loss: 131.8343
Epoch 6/15
235/235 [==============================] - 14s 58ms/step - loss: 129.2451 - val_loss: 127.1935
Epoch 7/15
235/235 [==============================] - 14s 59ms/step - loss: 126.0975 - val_loss: 123.6789
Epoch 8/15
235/235 [==============================] - 14s 58ms/step - loss: 124.0565 - val_loss: 122.5058
Epoch 9/15
235/235 [==============================] - 14s 58ms/step - loss: 122.9974 - val_loss: 121.9544
Epoch 10/15
235/235 [==============================] - 14s 58ms/step - loss: 121.7349 - val_loss: 120.8735
Epoch 11/15
235/235 [==============================] - 14s 58ms/step - loss: 121.0856 - val_loss: 120.1340
Epoch 12/15
235/235 [==============================] - 14s 58ms/step - loss: 120.2232 - val_loss: 121.3554
Epoch 13/15
235/235 [==============================] - 14s 58ms/step - loss: 119.8123 - val_loss: 119.2351
Epoch 14/15
235/235 [==============================] - 14s 58ms/step - loss: 119.2685 - val_loss: 118.2133
Epoch 15/15
235/235 [==============================] - 14s 59ms/step - loss: 118.8895 - val_loss: 119.4771
###Markdown
Look Ma, No ~~Hands~~Tensors!
###Code
# We'll just examine ten random digits.
x = next(iter(eval_dataset))[0][:10]
xhat = vae(x)
assert isinstance(xhat, tfd.Distribution)
#@title Image Plot Util
import matplotlib.pyplot as plt
def display_imgs(x, y=None):
if not isinstance(x, (np.ndarray, np.generic)):
x = np.array(x)
plt.ioff()
n = x.shape[0]
fig, axs = plt.subplots(1, n, figsize=(n, 1))
if y is not None:
fig.suptitle(np.argmax(y, axis=1))
for i in range(n):
axs.flat[i].imshow(x[i].squeeze(), interpolation='none', cmap='gray')
axs.flat[i].axis('off')
plt.show()
plt.close()
plt.ion()
print('Originals:')
display_imgs(x)
print('Decoded Random Samples:')
display_imgs(xhat.sample())
print('Decoded Modes:')
display_imgs(xhat.mode())
print('Decoded Means:')
display_imgs(xhat.mean())
# Now, let's generate ten never-before-seen digits.
z = prior.sample(10)
xtilde = decoder(z)
assert isinstance(xtilde, tfd.Distribution)
print('Randomly Generated Samples:')
display_imgs(xtilde.sample())
print('Randomly Generated Modes:')
display_imgs(xtilde.mode())
print('Randomly Generated Means:')
display_imgs(xtilde.mean())
###Output
Randomly Generated Samples:
|
trial_notebooks/LR_Range_Test_DSResNet.ipynb | ###Markdown
Download the DatasetDownload the dataset from this link: https://www.kaggle.com/shanwizard/modest-museum-dataset Dataset DescriptionDescription of the contents of the dataset can be found here: https://shan18.github.io/MODEST-Museum-Dataset Mount Google Drive (Works only on Google Colab)For running the notebook on Google Colab, upload the dataset into you Google Drive and execute the two cells below
###Code
from google.colab import drive
drive.mount('/content/gdrive')
###Output
Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3aietf%3awg%3aoauth%3a2.0%3aoob&response_type=code&scope=email%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdocs.test%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive.photos.readonly%20https%3a%2f%2fwww.googleapis.com%2fauth%2fpeopleapi.readonly
Enter your authorization code:
··········
Mounted at /content/gdrive
###Markdown
Unzip the data from Google Drive into Colab
###Code
!unzip -qq '/content/gdrive/My Drive/modest_museum_dataset.zip' -d .
###Output
_____no_output_____
###Markdown
Check GPU
###Code
!nvidia-smi
###Output
Mon May 25 03:45:06 2020
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 440.82 Driver Version: 418.67 CUDA Version: 10.1 |
|-------------------------------+----------------------+----------------------+
| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
|===============================+======================+======================|
| 0 Tesla P100-PCIE... Off | 00000000:00:04.0 Off | 0 |
| N/A 42C P0 27W / 250W | 0MiB / 16280MiB | 0% Default |
+-------------------------------+----------------------+----------------------+
+-----------------------------------------------------------------------------+
| Processes: GPU Memory |
| GPU PID Type Process name Usage |
|=============================================================================|
| No running processes found |
+-----------------------------------------------------------------------------+
###Markdown
Install Packages
###Code
!pip install -r requirements.txt
###Output
_____no_output_____
###Markdown
Import Packages
###Code
%matplotlib inline
import random
import matplotlib.pyplot as plt
import torch
from tensornet.data import MODESTMuseum
from tensornet.utils import initialize_cuda, plot_metric
from tensornet.model import DSResNet
from tensornet.model.optimizer import sgd
from tensornet.engine import LRFinder
from tensornet.engine.ops import ModelCheckpoint, TensorBoard
from tensornet.engine.ops.lr_scheduler import reduce_lr_on_plateau
from loss import RmseBceDiceLoss, SsimDiceLoss
from learner import ModelLearner
###Output
_____no_output_____
###Markdown
Set Seed and Get GPU Availability
###Code
# Initialize CUDA and set random seed
cuda, device = initialize_cuda(1)
###Output
GPU Available? True
###Markdown
Data Fetch
###Code
DATASET_PATH = 'modest_museum_dataset'
# Common parameter values for the dataset
dataset_params = dict(
cuda=cuda,
num_workers=16,
path=DATASET_PATH,
hue_saturation_prob=0.25,
contrast_prob=0.25,
)
%%time
# Create dataset
dataset = MODESTMuseum(
train_batch_size=256,
val_batch_size=256,
resize=(96, 96),
**dataset_params
)
# Create train data loader
train_loader = dataset.loader(train=True)
# Create val data loader
val_loader = dataset.loader(train=False)
###Output
CPU times: user 8 s, sys: 578 ms, total: 8.58 s
Wall time: 8.58 s
###Markdown
Model Architecture and Summary
###Code
%%time
model = DSResNet().to(device)
model.summary({
k: v for k, v in dataset.image_size.items() if k in ['bg', 'bg_fg']
})
###Output
----------------------------------------------------------------
Layer (type) Output Shape Param #
================================================================
Conv2d-1 [-1, 16, 224, 224] 64
Conv2d-2 [-1, 16, 224, 224] 448
ReLU-3 [-1, 16, 224, 224] 0
BatchNorm2d-4 [-1, 16, 224, 224] 32
Conv2d-5 [-1, 16, 224, 224] 2,320
ReLU-6 [-1, 16, 224, 224] 0
BatchNorm2d-7 [-1, 16, 224, 224] 32
DoubleConvBlock-8 [-1, 16, 224, 224] 0
MaxPool2d-9 [-1, 16, 112, 112] 0
ResEncoderBlock-10 [[-1, 16, 112, 112], [-1, 16, 224, 224]] 0
Conv2d-11 [-1, 32, 112, 112] 544
Conv2d-12 [-1, 32, 112, 112] 4,640
ReLU-13 [-1, 32, 112, 112] 0
BatchNorm2d-14 [-1, 32, 112, 112] 64
Conv2d-15 [-1, 32, 112, 112] 9,248
ReLU-16 [-1, 32, 112, 112] 0
BatchNorm2d-17 [-1, 32, 112, 112] 64
DoubleConvBlock-18 [-1, 32, 112, 112] 0
MaxPool2d-19 [-1, 32, 56, 56] 0
ResEncoderBlock-20 [[-1, 32, 56, 56], [-1, 32, 112, 112]] 0
Conv2d-21 [-1, 16, 224, 224] 64
Conv2d-22 [-1, 16, 224, 224] 448
ReLU-23 [-1, 16, 224, 224] 0
BatchNorm2d-24 [-1, 16, 224, 224] 32
Conv2d-25 [-1, 16, 224, 224] 2,320
ReLU-26 [-1, 16, 224, 224] 0
BatchNorm2d-27 [-1, 16, 224, 224] 32
DoubleConvBlock-28 [-1, 16, 224, 224] 0
MaxPool2d-29 [-1, 16, 112, 112] 0
ResEncoderBlock-30 [[-1, 16, 112, 112], [-1, 16, 224, 224]] 0
Conv2d-31 [-1, 32, 112, 112] 544
Conv2d-32 [-1, 32, 112, 112] 4,640
ReLU-33 [-1, 32, 112, 112] 0
BatchNorm2d-34 [-1, 32, 112, 112] 64
Conv2d-35 [-1, 32, 112, 112] 9,248
ReLU-36 [-1, 32, 112, 112] 0
BatchNorm2d-37 [-1, 32, 112, 112] 64
DoubleConvBlock-38 [-1, 32, 112, 112] 0
MaxPool2d-39 [-1, 32, 56, 56] 0
ResEncoderBlock-40 [[-1, 32, 56, 56], [-1, 32, 112, 112]] 0
Conv2d-41 [-1, 32, 56, 56] 2,080
Conv2d-42 [-1, 64, 56, 56] 2,112
Conv2d-43 [-1, 64, 56, 56] 18,496
ReLU-44 [-1, 64, 56, 56] 0
BatchNorm2d-45 [-1, 64, 56, 56] 128
Conv2d-46 [-1, 64, 56, 56] 36,928
ReLU-47 [-1, 64, 56, 56] 0
BatchNorm2d-48 [-1, 64, 56, 56] 128
DoubleConvBlock-49 [-1, 64, 56, 56] 0
MaxPool2d-50 [-1, 64, 28, 28] 0
ResEncoderBlock-51 [[-1, 64, 28, 28], [-1, 64, 56, 56]] 0
Conv2d-52 [-1, 128, 28, 28] 8,320
Conv2d-53 [-1, 128, 28, 28] 73,856
ReLU-54 [-1, 128, 28, 28] 0
BatchNorm2d-55 [-1, 128, 28, 28] 256
Conv2d-56 [-1, 128, 28, 28] 147,584
ReLU-57 [-1, 128, 28, 28] 0
BatchNorm2d-58 [-1, 128, 28, 28] 256
DoubleConvBlock-59 [-1, 128, 28, 28] 0
MaxPool2d-60 [-1, 128, 14, 14] 0
ResEncoderBlock-61 [[-1, 128, 14, 14], [-1, 128, 28, 28]] 0
Conv2d-62 [-1, 256, 14, 14] 33,024
Conv2d-63 [-1, 256, 14, 14] 295,168
ReLU-64 [-1, 256, 14, 14] 0
BatchNorm2d-65 [-1, 256, 14, 14] 512
Conv2d-66 [-1, 256, 14, 14] 590,080
ReLU-67 [-1, 256, 14, 14] 0
BatchNorm2d-68 [-1, 256, 14, 14] 512
DoubleConvBlock-69 [-1, 256, 14, 14] 0
MaxPool2d-70 [-1, 256, 7, 7] 0
ResEncoderBlock-71 [[-1, 256, 7, 7], [-1, 256, 14, 14]] 0
Conv2d-72 [-1, 512, 7, 7] 131,584
Conv2d-73 [-1, 512, 7, 7] 1,180,160
ReLU-74 [-1, 512, 7, 7] 0
BatchNorm2d-75 [-1, 512, 7, 7] 1,024
Conv2d-76 [-1, 512, 7, 7] 2,359,808
ReLU-77 [-1, 512, 7, 7] 0
BatchNorm2d-78 [-1, 512, 7, 7] 1,024
DoubleConvBlock-79 [-1, 512, 7, 7] 0
MaxPool2d-80 [-1, 512, 3, 3] 0
ResEncoderBlock-81 [[-1, 512, 3, 3], [-1, 512, 7, 7]] 0
Conv2d-82 [-1, 256, 7, 7] 131,328
Conv2d-83 [-1, 256, 14, 14] 131,328
Conv2d-84 [-1, 256, 14, 14] 1,179,904
ReLU-85 [-1, 256, 14, 14] 0
BatchNorm2d-86 [-1, 256, 14, 14] 512
Conv2d-87 [-1, 256, 14, 14] 590,080
ReLU-88 [-1, 256, 14, 14] 0
BatchNorm2d-89 [-1, 256, 14, 14] 512
DoubleConvBlock-90 [-1, 256, 14, 14] 0
ResDecoderBlock-91 [-1, 256, 14, 14] 0
Conv2d-92 [-1, 128, 14, 14] 32,896
Conv2d-93 [-1, 128, 28, 28] 32,896
Conv2d-94 [-1, 128, 28, 28] 295,040
ReLU-95 [-1, 128, 28, 28] 0
BatchNorm2d-96 [-1, 128, 28, 28] 256
Conv2d-97 [-1, 128, 28, 28] 147,584
ReLU-98 [-1, 128, 28, 28] 0
BatchNorm2d-99 [-1, 128, 28, 28] 256
DoubleConvBlock-100 [-1, 128, 28, 28] 0
ResDecoderBlock-101 [-1, 128, 28, 28] 0
Conv2d-102 [-1, 64, 28, 28] 8,256
Conv2d-103 [-1, 64, 56, 56] 8,256
Conv2d-104 [-1, 64, 56, 56] 73,792
ReLU-105 [-1, 64, 56, 56] 0
BatchNorm2d-106 [-1, 64, 56, 56] 128
Conv2d-107 [-1, 64, 56, 56] 36,928
ReLU-108 [-1, 64, 56, 56] 0
BatchNorm2d-109 [-1, 64, 56, 56] 128
DoubleConvBlock-110 [-1, 64, 56, 56] 0
ResDecoderBlock-111 [-1, 64, 56, 56] 0
Conv2d-112 [-1, 32, 56, 56] 2,080
Conv2d-113 [-1, 32, 112, 112] 2,080
Conv2d-114 [-1, 32, 112, 112] 2,080
Conv2d-115 [-1, 32, 112, 112] 18,464
ReLU-116 [-1, 32, 112, 112] 0
BatchNorm2d-117 [-1, 32, 112, 112] 64
Conv2d-118 [-1, 32, 112, 112] 9,248
ReLU-119 [-1, 32, 112, 112] 0
BatchNorm2d-120 [-1, 32, 112, 112] 64
DoubleConvBlock-121 [-1, 32, 112, 112] 0
ResDecoderBlock-122 [-1, 32, 112, 112] 0
Conv2d-123 [-1, 16, 112, 112] 528
Conv2d-124 [-1, 16, 224, 224] 528
Conv2d-125 [-1, 16, 224, 224] 528
Conv2d-126 [-1, 16, 224, 224] 4,624
ReLU-127 [-1, 16, 224, 224] 0
BatchNorm2d-128 [-1, 16, 224, 224] 32
Conv2d-129 [-1, 16, 224, 224] 2,320
ReLU-130 [-1, 16, 224, 224] 0
BatchNorm2d-131 [-1, 16, 224, 224] 32
DoubleConvBlock-132 [-1, 16, 224, 224] 0
ResDecoderBlock-133 [-1, 16, 224, 224] 0
Conv2d-134 [-1, 1, 224, 224] 17
Conv2d-135 [-1, 256, 7, 7] 131,328
Conv2d-136 [-1, 256, 14, 14] 131,328
Conv2d-137 [-1, 256, 14, 14] 1,179,904
ReLU-138 [-1, 256, 14, 14] 0
BatchNorm2d-139 [-1, 256, 14, 14] 512
Conv2d-140 [-1, 256, 14, 14] 590,080
ReLU-141 [-1, 256, 14, 14] 0
BatchNorm2d-142 [-1, 256, 14, 14] 512
DoubleConvBlock-143 [-1, 256, 14, 14] 0
ResDecoderBlock-144 [-1, 256, 14, 14] 0
Conv2d-145 [-1, 128, 14, 14] 32,896
Conv2d-146 [-1, 128, 28, 28] 32,896
Conv2d-147 [-1, 128, 28, 28] 295,040
ReLU-148 [-1, 128, 28, 28] 0
BatchNorm2d-149 [-1, 128, 28, 28] 256
Conv2d-150 [-1, 128, 28, 28] 147,584
ReLU-151 [-1, 128, 28, 28] 0
BatchNorm2d-152 [-1, 128, 28, 28] 256
DoubleConvBlock-153 [-1, 128, 28, 28] 0
ResDecoderBlock-154 [-1, 128, 28, 28] 0
Conv2d-155 [-1, 64, 28, 28] 8,256
Conv2d-156 [-1, 64, 56, 56] 8,256
Conv2d-157 [-1, 64, 56, 56] 73,792
ReLU-158 [-1, 64, 56, 56] 0
BatchNorm2d-159 [-1, 64, 56, 56] 128
Conv2d-160 [-1, 64, 56, 56] 36,928
ReLU-161 [-1, 64, 56, 56] 0
BatchNorm2d-162 [-1, 64, 56, 56] 128
DoubleConvBlock-163 [-1, 64, 56, 56] 0
ResDecoderBlock-164 [-1, 64, 56, 56] 0
Conv2d-165 [-1, 32, 56, 56] 2,080
Conv2d-166 [-1, 32, 112, 112] 2,080
Conv2d-167 [-1, 32, 112, 112] 2,080
Conv2d-168 [-1, 32, 112, 112] 18,464
ReLU-169 [-1, 32, 112, 112] 0
BatchNorm2d-170 [-1, 32, 112, 112] 64
Conv2d-171 [-1, 32, 112, 112] 9,248
ReLU-172 [-1, 32, 112, 112] 0
BatchNorm2d-173 [-1, 32, 112, 112] 64
DoubleConvBlock-174 [-1, 32, 112, 112] 0
ResDecoderBlock-175 [-1, 32, 112, 112] 0
Conv2d-176 [-1, 16, 112, 112] 528
Conv2d-177 [-1, 16, 224, 224] 528
Conv2d-178 [-1, 16, 224, 224] 528
Conv2d-179 [-1, 16, 224, 224] 4,624
ReLU-180 [-1, 16, 224, 224] 0
BatchNorm2d-181 [-1, 16, 224, 224] 32
Conv2d-182 [-1, 16, 224, 224] 2,320
ReLU-183 [-1, 16, 224, 224] 0
BatchNorm2d-184 [-1, 16, 224, 224] 32
DoubleConvBlock-185 [-1, 16, 224, 224] 0
ResDecoderBlock-186 [-1, 16, 224, 224] 0
Conv2d-187 [-1, 1, 224, 224] 17
DSResNetv1-188 [[-1, 1, 224, 224], [-1, 1, 224, 224]] 0
================================================================
Total params: 10,343,490
Trainable params: 10,343,490
Non-trainable params: 0
----------------------------------------------------------------
Input size (MB): 86436.00
Forward/backward pass size (MB): 3193797.28
Params size (MB): 39.46
Estimated Total Size (MB): 3280272.75
----------------------------------------------------------------
CPU times: user 2.46 s, sys: 1.03 s, total: 3.49 s
Wall time: 3.49 s
###Markdown
Find Initial Learning RateMultiple LR Range Test are done on the model to find the best initial learning rate. Range Test 1
###Code
model = DSResNet().to(device) # Create model
optimizer = sgd(model, 1e-7, 0.9) # Create optimizer
criterion = RmseBceDiceLoss() # Create loss function
# Find learning rate
lr_finder = LRFinder(model, optimizer, criterion, device=device)
lr_finder.range_test(train_loader, 400, learner=ModelLearner, start_lr=1e-7, end_lr=5, step_mode='exp')
# Get best initial learning rate
initial_lr = lr_finder.best_lr
# Print learning rate and loss
print('Learning Rate:', initial_lr)
print('Loss:', lr_finder.best_metric)
# Plot learning rate vs loss
lr_finder.plot()
# Reset graph
lr_finder.reset()
###Output
Learning Rate: 0.2804706226423061
Loss: 0.39172855019569397
###Markdown
Range Test 2
###Code
model = DSResNet().to(device) # Create model
optimizer = sgd(model, 1e-5, 0.9) # Create optimizer
criterion = RmseBceDiceLoss() # Create loss function
# Find learning rate
lr_finder = LRFinder(model, optimizer, criterion, device=device)
lr_finder.range_test(train_loader, 400, learner=ModelLearner, start_lr=1e-5, end_lr=1, step_mode='exp')
# Get best initial learning rate
initial_lr = lr_finder.best_lr
# Print learning rate and loss
print('Learning Rate:', initial_lr)
print('Loss:', lr_finder.best_metric)
# Plot learning rate vs loss
lr_finder.plot()
# Reset graph
lr_finder.reset()
###Output
Learning Rate: 0.10901844923851275
Loss: 0.29762303829193115
###Markdown
Range Test 3
###Code
model = DSResNet().to(device) # Create model
optimizer = sgd(model, 1e-4, 0.9) # Create optimizer
criterion = RmseBceDiceLoss() # Create loss function
# Find learning rate
lr_finder = LRFinder(model, optimizer, criterion, device=device)
lr_finder.range_test(train_loader, 200, learner=ModelLearner, start_lr=1e-4, end_lr=10, step_mode='exp')
# Get best initial learning rate
initial_lr = lr_finder.best_lr
# Print learning rate and loss
print('Learning Rate:', initial_lr)
print('Loss:', lr_finder.best_metric)
# Plot learning rate vs loss
lr_finder.plot()
# Reset graph
lr_finder.reset()
###Output
Learning Rate: 0.4466835921509631
Loss: 0.3652718663215637
###Markdown
Range Test 4
###Code
model = DSResNet().to(device) # Create model
optimizer = sgd(model, 1e-5, 0.9) # Create optimizer
criterion = RmseBceDiceLoss() # Create loss function
# Find learning rate
lr_finder = LRFinder(model, optimizer, criterion, device=device)
lr_finder.range_test(train_loader, 100, learner=ModelLearner, start_lr=1e-5, end_lr=2, step_mode='exp')
# Get best initial learning rate
initial_lr = lr_finder.best_lr
# Print learning rate and loss
print('Learning Rate:', initial_lr)
print('Loss:', lr_finder.best_metric)
# Plot learning rate vs loss
lr_finder.plot()
# Reset graph
lr_finder.reset()
###Output
Learning Rate: 0.2511047316821864
Loss: 0.8588053584098816
###Markdown
Range Test 5
###Code
model = DSResNet().to(device) # Create model
optimizer = sgd(model, 1e-7, 0.9) # Create optimizer
criterion = RmseBceDiceLoss() # Create loss function
# Find learning rate
lr_finder = LRFinder(model, optimizer, criterion, device=device)
lr_finder.range_test(train_loader, 400, learner=ModelLearner, start_lr=1e-7, end_lr=10, step_mode='exp')
# Get best initial learning rate
initial_lr = lr_finder.best_lr
# Print learning rate and loss
print('Learning Rate:', initial_lr)
print('Loss:', lr_finder.best_metric)
# Plot learning rate vs loss
lr_finder.plot()
# Reset graph
lr_finder.reset()
###Output
_____no_output_____ |
6_pipeline_prototyping_2.ipynb | ###Markdown
Initialization
###Code
cm = np.load('serialize/camera_matrix.npy')
dc = np.load('serialize/dist_coefs.npy')
CANVAS_SZ = (500, 1500)
OFFSET_X = 100
OFFSET_Y = 0
straight_images_files = ('test_images/straight_lines1.jpg', 'test_images/straight_lines2.jpg')
straight_images = [lanelines.open_image(f) for f in straight_images_files]
straight_images_undist = [cv2.undistort(im, cm, dc) for im in straight_images]
warp_src = roadplane.define_flat_plane_on_road(straight_images_undist, x_offset=0)
warp_src[1, 0] += 8 # <- a hack
warp_dst = lanelines.get_rectangle_corners_in_image(CANVAS_SZ, offset_x=OFFSET_X, offset_y=OFFSET_Y)
M = cv2.getPerspectiveTransform(warp_src, warp_dst)
Minv = cv2.getPerspectiveTransform(warp_dst, warp_src)
test_images = [lanelines.open_image(f) for f in glob('test_images/*.jpg')]
test_images_undist = [cv2.undistort(im, cm, dc) for im in test_images]
warped_images = [cv2.warpPerspective(im, M, CANVAS_SZ, flags=cv2.INTER_LINEAR) for im in test_images_undist]
###Output
_____no_output_____
###Markdown
Functions
###Code
def convert_to_HLS(im):
return cv2.cvtColor(im, cv2.COLOR_RGB2HLS)
def weighted_sum_images(images, weights):
assert len(weights) == len(images)
nonzero_indices = np.nonzero(weights)[0]
if len(nonzero_indices) < 2:
raise Exception('At least 2 non-zero weights are required')
first, second = nonzero_indices[:2]
res = cv2.addWeighted(images[first], weights[first], images[second], weights[second], 0)
if len(nonzero_indices) == 2:
return res
for i in nonzero_indices[2:]:
res = cv2.addWeighted(res, 1., images[i], weights[i], 0)
return res
def bitwise_or(images):
assert len(images) > 0
if len(images) == 1:
return images[0]
res = cv2.bitwise_or(images[0], images[1])
if len(images) == 2:
return res
for im in images[2:]:
res = cv2.bitwise_or(res, im)
return res
def weighted_HLS(H, L, S, weights):
return weighted_sum_images([H, L, S], weights)
def add_contrast(im, gain):
gained = gain * im
return lanelines.scale_image_255(gained)
def sobel_combo(im):
sobelx = lanelines.sobel_x(im)
sobely = lanelines.sobel_y(im)
magnitude = lanelines.sobel_magnitude(sobelx, sobely)
direction = lanelines.sobel_direction(sobelx, sobely)
return lanelines.scale_image_255(magnitude), lanelines.scale_image_255(direction)
def scaled_sobel_x(im):
return lanelines.scale_image_255( lanelines.sobel_x(im) )
def morphological_close(im, kernel=(3, 3)):
return cv2.morphologyEx(im, cv2.MORPH_CLOSE, kernel)
def get_hls_channels(im):
hls = convert_to_HLS(im)
H = hls[:,:,0]
L = hls[:,:,1]
S = hls[:,:,2]
return H, L, S
def gray(im):
return cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
def gather_thresholded_images(*images):
return images
###Output
_____no_output_____
###Markdown
Pipeline
###Code
func_dict = {
'gray': gray,
'get_HLS': get_hls_channels,
'weighted_HLS_sum': weighted_HLS,
'threshold_gray': lanelines.mask_threashold_range,
'threshold_S': lanelines.mask_threashold_range,
'threshold_H': lanelines.mask_threashold_range,
'threshold_wHLS': lanelines.mask_threashold_range,
'apply_sobel_x_to_S': scaled_sobel_x,
'threshold_S_sobel_x': lanelines.mask_threashold_range,
'median_blur_tssx': cv2.medianBlur,
'close_thresholded_S': morphological_close,
'gather_thresholded_images_for_ws': gather_thresholded_images,
'gather_thresholded_images': gather_thresholded_images,
'combine_thresholds_weighted': weighted_sum_images,
'combine_thresholds_bitwise_or': bitwise_or,
}
func_io = {
'gray': ('image', 'image_gray'),
'get_HLS': ('image', ('H', 'L', 'S')),
'weighted_HLS_sum': (('H', 'L', 'S', 'HLS_weights'), 'weighted_HLS'),
'threshold_gray': (('image_gray', 'gray_from', 'gray_to'), 'thresholded_gray'),
'threshold_S': (('S', 'S_from', 'S_to'), 'thresholded_S'),
'threshold_H': (('H', 'H_from', 'H_to'), 'thresholded_H'),
'threshold_wHLS': (('weighted_HLS', 'wHLS_from', 'wHLS_to'), 'thresholded_wHLS'),
'apply_sobel_x_to_S': ('S', 'S_sobel_x'),
'threshold_S_sobel_x': (('S_sobel_x', 'S_sobel_x_from', 'S_sobel_x_to'), 'thresholded_S_sobel_x'),
'median_blur_tssx': (('thresholded_S_sobel_x', 'tssx_median_kernel'), 'tssx_median'),
'close_thresholded_S': (('thresholded_S', 'close_kernel_for_tS'), 'ts_closed'),
'gather_thresholded_images_for_ws': (
('ts_closed', 'thresholded_S_sobel_x', 'thresholded_S', 'thresholded_wHLS', 'thresholded_gray'),
'thresholded_images_for_ws'
),
'gather_thresholded_images' : (
('thresholded_S', 'thresholded_wHLS', 'thresholded_S_sobel_x', 'tssx_median', 'ts_closed', 'thresholded_gray'),
'thresholded_images'
),
'combine_thresholds_weighted': (('thresholded_images_for_ws', 'thresholded_images_weights'), 'mega_image'),
'combine_thresholds_bitwise_or': ('thresholded_images', 'all_thresholds')
}
cg = CompGraph(func_dict, func_io)
params = {
'HLS_weights': [0, 0.4, 1.],
'gray_from': 210,
'gray_to': 255,
'S_from': 180,
'S_to': 255,
'H_from': 95,
'H_to': 100,
'wHLS_from': 180, # 200
'wHLS_to': 255,
'S_sobel_x_from': 20,
'S_sobel_x_to': 240, # 100
'tssx_median_kernel': 5,
'close_kernel_for_tS': (3, 3),
'thresholded_images_weights': [
#'ts_closed', 'thresholded_S_sobel_x', 'thresholded_S', 'thresholded_wHLS', thresholded_gray
1.,
1.,
0.9,
0.8,
0.9
]
}
runner = CompGraphRunner(cg, frozen_tokens=params)
runner.run(image=warped_images[1])
nxpd.draw(runner.token_manager.to_networkx())
###Output
_____no_output_____
###Markdown
Experiments
###Code
def show_intermediate_images(im, runner):
tokens = (
'thresholded_gray',
'thresholded_S',
'thresholded_H',
'thresholded_wHLS',
'thresholded_S_sobel_x',
'tssx_median',
'ts_closed'
)
runner.run(image=im)
plt.figure(figsize=(15, 15))
for i, tk in enumerate(tokens):
plt.subplot(1, len(tokens), i+1)
plt.imshow( runner[tk] )
_ = plt.axis('off')
plt.title(tk)
plt.tight_layout()
plt.figure(figsize=(20, 5))
for i, im in enumerate(warped_images):
runner.run(image=im)
plt.subplot(1, 8, i+1)
plt.imshow( runner['mega_image'])
_ = plt.axis('off')
plt.figure(figsize=(20, 5))
for i, im in enumerate(warped_images):
runner.run(image=im)
plt.subplot(1, 8, i+1)
plt.imshow( runner['all_thresholds'])
_ = plt.axis('off')
def lane_cells(im, nx, ny, threshold=20):
cells = divide_image_to_cells(im, nx, ny)
res = []
for i in range(ny):
idx_from = i * nx
idx_to = i * nx + nx
rowcells = cells[idx_from:idx_to]
sums = np.array([np.sum(cell) for cell in rowcells])
max_j = np.argmax(sums)
if sums[max_j] > threshold:
res.append( (i, max_j) )
return np.array(res)
def lane_cells_real_coords(lanecells, im, nx, ny):
rows, cols= im.shape[:2]
cell_sz_x = cols // nx
cell_sz_y = rows // ny
points = np.zeros_like(lanecells)
for i in range(len(lanecells)):
idx_row, idx_col = lanecells[i, :]
x = idx_col * cell_sz_x + cell_sz_x / 2
y = idx_row * cell_sz_y + cell_sz_y / 2
points[i, :] = (x, y)
return points
def divide_image_to_cells(im, nx, ny):
rows, cols= im.shape[:2]
assert rows % ny == 0
assert cols % nx == 0
offset_x = cols // nx
offset_y = rows // ny
cells = []
for j in range(ny):
for i in range(nx):
x_from = i * offset_x
x_to = x_from + offset_x
y_from = j * offset_y
y_to = y_from + offset_y
cell = im[y_from:y_to, x_from:x_to]
cells.append(cell)
return cells
def show_cells(cells, nx, ny):
for i, cell in enumerate(cells):
plt.subplot(ny, nx, i+1)
plt.axis('off')
plt.imshow(cell)
def split_image_lr(im):
cols = im.shape[1]
middle = cols // 2
return im[:, :middle], im[:, middle:]
def split_image_lr_and_show(im):
left, right = split_image_lr(im)
plt.figure()
plt.subplot(1, 2, 1)
plt.axis('off')
plt.imshow(left)
plt.subplot(1, 2, 2)
plt.axis('off')
plt.imshow(right)
def get_polynomial_2(coefs):
a, b, c = coefs
def f(y):
return a * (y**2) + b * y + c
return f
def do_lane_detection(im, starting_token='all_thresholds'):
nx = 50
ny = 100
runner.run(image=im)
left, right = split_image_lr( runner[starting_token] )
target_cells_left = lane_cells(left, nx, ny, threshold=70)
target_cells_coords_left = lane_cells_real_coords(target_cells_left, left, nx, ny)
p_coefs_left = np.polyfit(target_cells_coords_left[:, 1], target_cells_coords_left[:, 0], 2)
target_cells_right = lane_cells(right, nx, ny, threshold=70)
target_cells_coords_right = lane_cells_real_coords(target_cells_right, right, nx, ny)
target_cells_coords_right[:, 0] += left.shape[1]
p_coefs_right = np.polyfit(target_cells_coords_right[:, 1], target_cells_coords_right[:, 0], 2)
# PLOTTING
poly_left = get_polynomial_2(p_coefs_left)
poly_right = get_polynomial_2(p_coefs_right)
plt.imshow( cv2.cvtColor(im, cv2.COLOR_BGR2RGB) )
plt.plot(target_cells_coords_left[:, 0], target_cells_coords_left[:, 1], 'yo')
plt.plot(target_cells_coords_right[:, 0], target_cells_coords_right[:, 1], 'yo')
poly_y = np.linspace(0, im.shape[0])
plt.plot(poly_left(poly_y), poly_y)
plt.plot(poly_right(poly_y), poly_y)
###Output
_____no_output_____
###Markdown
```python common_a = np.mean([p_coefs_left[0], p_coefs_right[0]]) common_b = np.mean([p_coefs_left[1], p_coefs_right[1]]) poly_left = get_polynomial_2([common_a, common_b, p_coefs_left[-1]]) poly_right = get_polynomial_2([common_a, common_b, p_coefs_right[-1]])```
###Code
plt.figure(figsize=(20, 5))
for i, im in enumerate(warped_images):
plt.subplot(1, 8, i+1)
_ = plt.axis('off')
do_lane_detection(im, 'all_thresholds')
plt.figure(figsize=(20, 5))
for i, im in enumerate(warped_images):
plt.subplot(1, 8, i+1)
_ = plt.axis('off')
do_lane_detection(im, 'mega_image')
###Output
_____no_output_____ |
module2/Follow_LS_DS10_232.ipynb | ###Markdown
Lambda School Data Science*Unit 2, Sprint 3, Module 2*--- Wrangle ML datasets 🍌 In today's lesson, we’ll work with a dataset of [3 Million Instacart Orders, Open Sourced](https://tech.instacart.com/3-million-instacart-orders-open-sourced-d40d29ead6f2)! Setup
###Code
# Download data
import requests
def download(url):
filename = url.split('/')[-1]
print(f'Downloading {url}')
r = requests.get(url)
with open(filename, 'wb') as f:
f.write(r.content)
print(f'Downloaded {filename}')
download('https://s3.amazonaws.com/instacart-datasets/instacart_online_grocery_shopping_2017_05_01.tar.gz')
# Uncompress data
import tarfile
tarfile.open('instacart_online_grocery_shopping_2017_05_01.tar.gz').extractall()
# Change directory to where the data was uncompressed
%cd instacart_2017_05_01
# Print the csv filenames
from glob import glob
for filename in glob('*.csv'):
print(filename)
###Output
departments.csv
products.csv
order_products__train.csv
aisles.csv
order_products__prior.csv
orders.csv
###Markdown
For each csv file, look at its shape & head
###Code
import pandas as pd
from IPython.display import display
def preview():
for filename in glob('*.csv'):
df = pd.read_csv(filename)
print(filename, df.shape)
display(df.head())
print('\n')
preview()
###Output
departments.csv (21, 2)
###Markdown
The original task was complex ...[The Kaggle competition said,](https://www.kaggle.com/c/instacart-market-basket-analysis/data):> The dataset for this competition is a relational set of files describing customers' orders over time. The goal of the competition is to predict which products will be in a user's next order.> orders.csv: This file tells to which set (prior, train, test) an order belongs. You are predicting reordered items only for the test set orders.Each row in the submission is an order_id from the test set, followed by product_id(s) predicted to be reordered.> sample_submission.csv: ```order_id,products17,39276 2925934,39276 29259137,39276 29259182,39276 29259257,39276 29259``` ... but we can simplify!Simplify the question, from "Which products will be reordered?" (Multi-class, [multi-label](https://en.wikipedia.org/wiki/Multi-label_classification) classification) to **"Will customers reorder this one product?"** (Binary classification)Which product? How about **the most frequently ordered product?** Questions:- What is the most frequently ordered product?- How often is this product included in a customer's next order?- Which customers have ordered this product before?- How can we get a subset of data, just for these customers?- What features can we engineer? We want to predict, will these customers reorder this product on their next order? What was the most frequently ordered product?
###Code
prior = pd.read_csv('order_products__prior.csv')
prior['product_id'].mode()
prior['product_id'].value_counts()
train = pd.read_csv('order_products__train.csv')
train['product_id'].mode()
train['product_id'].value_counts()
products = pd.read_csv('products.csv')
products[products['product_id']==24852]
prior = pd.merge(prior, products, on='product_id')
###Output
_____no_output_____
###Markdown
How often are bananas included in a customer's next order?There are [three sets of data](https://gist.github.com/jeremystan/c3b39d947d9b88b3ccff3147dbcf6c6b):> "prior": orders prior to that users most recent order (3.2m orders) "train": training data supplied to participants (131k orders) "test": test data reserved for machine learning competitions (75k orders)Customers' next orders are in the "train" and "test" sets. (The "prior" set has the orders prior to the most recent orders.)We can't use the "test" set here, because we don't have its labels (only Kaggle & Instacart have them), so we don't know what products were bought in the "test" set orders.So, we'll use the "train" set. It currently has one row per product_id and multiple rows per order_id.But we don't want that. Instead we want one row per order_id, with a binary column: "Did the order include bananas?"Let's wrangle! Technique 1
###Code
df = train.head(16).copy()
df['bananas'] = df['product_id'] == 24852
df.groupby('order_id')['bananas'].any()
train['bananas'] = train['product_id'] == 24852
train.groupby('order_id')['bananas'].any()
train_wrangled = train.groupby('order_id')['bananas'].any().reset_index()
target = 'bananas'
train_wrangled[target].value_counts(normalize=True)
###Output
_____no_output_____
###Markdown
Technique 2
###Code
df
# Group by order_id, get a list of product_ids for that order
df.groupby('order_id')['product_id'].apply(list)
# Group by order_id, get a list of product_ids for that order, check if that list includes bananas
def includes_bananas(product_ids):
return 24852 in list(product_ids)
df.groupby('order_id')['product_id'].apply(includes_bananas)
train = (train
.groupby('order_id')
.agg({'product_id': includes_bananas})
.reset_index()
.rename(columns={'product_id': 'bananas'}))
target = 'bananas'
train[target].value_counts(normalize=True)
###Output
_____no_output_____
###Markdown
Which customers have ordered this product before?- Customers are identified by `user_id`- Products are identified by `product_id`Do we have a table with both these id's? (If not, how can we combine this information?)
###Code
preview()
###Output
departments.csv (21, 2)
###Markdown
Answer:No, we don't have a table with both these id's. But:- `orders.csv` has `user_id` and `order_id`- `order_products__prior.csv` has `order_id` and `product_id`- `order_products__train.csv` has `order_id` and `product_id` too
###Code
# In the order_products__prior table, which orders included bananas?
BANANAS = 24852
prior[prior.product_id==BANANAS]
banana_prior_order_ids = prior[prior.product_id==BANANAS].order_id
# Look at the orders table, which orders included bananas?
orders = pd.read_csv('orders.csv')
orders.sample(n=5)
# In the orders table, which orders included bananas?
orders[orders.order_id.isin(banana_prior_order_ids)]
# Check this order id, confirm that yes it includes bananas
prior[prior.order_id==738281]
banana_orders = orders[orders.order_id.isin(banana_prior_order_ids)]
# In the orders table, which users have bought bananas?
banana_user_ids = banana_orders.user_id.unique()
###Output
_____no_output_____
###Markdown
How can we get a subset of data, just for these customers?We want *all* the orders from customers who have *ever* bought bananas.(And *none* of the orders from customers who have *never* bought bananas.)
###Code
# orders table, shape before getting subset
orders.shape
# orders table, shape after getting subset
orders = orders[orders.user_id.isin(banana_user_ids)]
orders.shape
# IDs of *all* the orders from customers who have *ever* bought bananas
subset_order_ids = orders.order_id.unique()
# order_products__prior table, shape before getting subset
prior.shape
# order_products__prior table, shape after getting subset
prior = prior[prior.order_id.isin(subset_order_ids)]
prior.shape
# order_products__train table, shape before getting subset
train.shape
# order_products__train table, shape after getting subset
train = train[train.order_id.isin(subset_order_ids)]
train.shape
# In this subset, how often were bananas reordered in the customer's most recent order?
train[target].value_counts(normalize=True)
###Output
_____no_output_____
###Markdown
What features can we engineer? We want to predict, will these customers reorder bananas on their next order?- Other fruit they buy- Time between banana orders- Frequency of banana orders by a customer- Organic or not- Time of day
###Code
preview()
train.shape
train.head()
# Merge user_id, order_number, order_dow, order_hour_of_day, and days_since_prior_order
# with the training data
train = pd.merge(train, orders)
train.head()
###Output
_____no_output_____
###Markdown
- Frequency of banana orders - % of orders - Every n days on average - Total orders- Recency of banana orders - n of orders - n days
###Code
USER = 61911
prior = pd.merge(prior, orders[['order_id', 'user_id']])
prior['bananas'] = prior.product_id == BANANAS
# This user has ordered 196 products,
df = prior[prior.user_id==USER]
df
# This person has ordered bananas six times
df['bananas'].sum()
df[df['bananas']]
# How many unique orders for this user?
df['order_id'].nunique()
df['bananas'].sum() / df['order_id'].nunique()
###Output
_____no_output_____ |
MorePatterns.ipynb | ###Markdown
More patterns This is a notebook for playing around with other images.
###Code
#Preliminaries
%pylab inline
import numpy as np
import matplotlib.pyplot as plt
import requests
from io import BytesIO
#Import the StitchIt library
import StitchIt as st
#Define the inputs
imgurl = "http://www.symmetrymagazine.org/sites/default/files/styles/2015_hero/public/images/standard/NeutrinoExperiments.jpg?itok=b4ajCWpe" #image file to pattern
pattern_name = "Neutrinos" #Name for pattern
aidacolor = "white" #cloth color
aidasize = 14 #number of stitches per inch of aida cloth
reduct = 25 #Reduce the image size to this percent of the original
numcol = 24 #number of colors to reduce image to
#Retrieve image file
response = requests.get(imgurl)
before = plt.imread(BytesIO(response.content),format="jpg")
plt.imshow(before);
x,y = st.aida_size(before,aidasize,verbosity=1)
#Reduce the size of the image
smaller = st.resize(before,reduct)
st.plot_before_after(before,smaller,"Resized")
x,y = st.aida_size(smaller,aidasize,verbosity=1)
#Reduce the number of colors in the image
colors, counts, after = st.reduce_colors(smaller, numcol)
st.plot_before_after(smaller,after,"Color reduced")
x,y = st.aida_size(after,aidasize,verbosity=1)
###Output
_____no_output_____ |
7 QUORA INSINCERE QUESTIONN/text-modelling-in-pytorch.ipynb | ###Markdown
General informationThis kernel is a fork of my Keras kernel. But this one will use Pytorch.I'll gradually introduce more complex architectures.
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
from nltk.tokenize import TweetTokenizer
import datetime
import lightgbm as lgb
from scipy import stats
from scipy.sparse import hstack, csr_matrix
from sklearn.model_selection import train_test_split, cross_val_score
from wordcloud import WordCloud
from collections import Counter
from nltk.corpus import stopwords
from nltk.util import ngrams
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC
from sklearn.multiclass import OneVsRestClassifier
import time
pd.set_option('max_colwidth',400)
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from sklearn.preprocessing import OneHotEncoder
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from torch.autograd import Variable
import torch.utils.data
import random
import warnings
warnings.filterwarnings("ignore", message="F-score is ill-defined and being set to 0.0 due to no predicted samples.")
import re
from torch.optim.lr_scheduler import StepLR, ReduceLROnPlateau, CosineAnnealingLR
def seed_torch(seed=1029):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
train = pd.read_csv("../input/train.csv")
test = pd.read_csv("../input/test.csv")
sub = pd.read_csv('../input/sample_submission.csv')
###Output
_____no_output_____
###Markdown
Data overviewThis is a kernel competition, where we can't use external data. As a result we can use only train and test datasets as well as embeddings which were provided by organizers.
###Code
import os
print('Available embeddings:', os.listdir("../input/embeddings/"))
train["target"].value_counts()
###Output
_____no_output_____
###Markdown
We have a seriuos disbalance - only ~6% of data are positive. No wonder the metric for the competition is f1-score.
###Code
train.head()
###Output
_____no_output_____
###Markdown
In the dataset we have only texts of questions.
###Code
print('Average word length of questions in train is {0:.0f}.'.format(np.mean(train['question_text'].apply(lambda x: len(x.split())))))
print('Average word length of questions in test is {0:.0f}.'.format(np.mean(test['question_text'].apply(lambda x: len(x.split())))))
print('Max word length of questions in train is {0:.0f}.'.format(np.max(train['question_text'].apply(lambda x: len(x.split())))))
print('Max word length of questions in test is {0:.0f}.'.format(np.max(test['question_text'].apply(lambda x: len(x.split())))))
print('Average character length of questions in train is {0:.0f}.'.format(np.mean(train['question_text'].apply(lambda x: len(x)))))
print('Average character length of questions in test is {0:.0f}.'.format(np.mean(test['question_text'].apply(lambda x: len(x)))))
###Output
_____no_output_____
###Markdown
As we can see on average questions in train and test datasets are similar, but there are quite long questions in train dataset.
###Code
puncts = [',', '.', '"', ':', ')', '(', '-', '!', '?', '|', ';', "'", '$', '&', '/', '[', ']', '>', '%', '=', '#', '*', '+', '\\', '•', '~', '@', '£',
'·', '_', '{', '}', '©', '^', '®', '`', '<', '→', '°', '€', '™', '›', '♥', '←', '×', '§', '″', '′', 'Â', '█', '½', 'à', '…',
'“', '★', '”', '–', '●', 'â', '►', '−', '¢', '²', '¬', '░', '¶', '↑', '±', '¿', '▾', '═', '¦', '║', '―', '¥', '▓', '—', '‹', '─',
'▒', ':', '¼', '⊕', '▼', '▪', '†', '■', '’', '▀', '¨', '▄', '♫', '☆', 'é', '¯', '♦', '¤', '▲', 'è', '¸', '¾', 'Ã', '⋅', '‘', '∞',
'∙', ')', '↓', '、', '│', '(', '»', ',', '♪', '╩', '╚', '³', '・', '╦', '╣', '╔', '╗', '▬', '❤', 'ï', 'Ø', '¹', '≤', '‡', '√', ]
def clean_text(x):
x = str(x)
for punct in puncts:
x = x.replace(punct, f' {punct} ')
return x
def clean_numbers(x):
x = re.sub('[0-9]{5,}', '#####', x)
x = re.sub('[0-9]{4}', '####', x)
x = re.sub('[0-9]{3}', '###', x)
x = re.sub('[0-9]{2}', '##', x)
return x
mispell_dict = {"aren't" : "are not",
"can't" : "cannot",
"couldn't" : "could not",
"didn't" : "did not",
"doesn't" : "does not",
"don't" : "do not",
"hadn't" : "had not",
"hasn't" : "has not",
"haven't" : "have not",
"he'd" : "he would",
"he'll" : "he will",
"he's" : "he is",
"i'd" : "I would",
"i'd" : "I had",
"i'll" : "I will",
"i'm" : "I am",
"isn't" : "is not",
"it's" : "it is",
"it'll":"it will",
"i've" : "I have",
"let's" : "let us",
"mightn't" : "might not",
"mustn't" : "must not",
"shan't" : "shall not",
"she'd" : "she would",
"she'll" : "she will",
"she's" : "she is",
"shouldn't" : "should not",
"that's" : "that is",
"there's" : "there is",
"they'd" : "they would",
"they'll" : "they will",
"they're" : "they are",
"they've" : "they have",
"we'd" : "we would",
"we're" : "we are",
"weren't" : "were not",
"we've" : "we have",
"what'll" : "what will",
"what're" : "what are",
"what's" : "what is",
"what've" : "what have",
"where's" : "where is",
"who'd" : "who would",
"who'll" : "who will",
"who're" : "who are",
"who's" : "who is",
"who've" : "who have",
"won't" : "will not",
"wouldn't" : "would not",
"you'd" : "you would",
"you'll" : "you will",
"you're" : "you are",
"you've" : "you have",
"'re": " are",
"wasn't": "was not",
"we'll":" will",
"didn't": "did not",
"tryin'":"trying"}
def _get_mispell(mispell_dict):
mispell_re = re.compile('(%s)' % '|'.join(mispell_dict.keys()))
return mispell_dict, mispell_re
mispellings, mispellings_re = _get_mispell(mispell_dict)
def replace_typical_misspell(text):
def replace(match):
return mispellings[match.group(0)]
return mispellings_re.sub(replace, text)
# Clean the text
train["question_text"] = train["question_text"].apply(lambda x: clean_text(x.lower()))
test["question_text"] = test["question_text"].apply(lambda x: clean_text(x.lower()))
# Clean numbers
train["question_text"] = train["question_text"].apply(lambda x: clean_numbers(x))
test["question_text"] = test["question_text"].apply(lambda x: clean_numbers(x))
# Clean speelings
train["question_text"] = train["question_text"].apply(lambda x: replace_typical_misspell(x))
test["question_text"] = test["question_text"].apply(lambda x: replace_typical_misspell(x))
max_features = 120000
tk = Tokenizer(lower = True, filters='', num_words=max_features)
full_text = list(train['question_text'].values) + list(test['question_text'].values)
tk.fit_on_texts(full_text)
train_tokenized = tk.texts_to_sequences(train['question_text'].fillna('missing'))
test_tokenized = tk.texts_to_sequences(test['question_text'].fillna('missing'))
train['question_text'].apply(lambda x: len(x.split())).plot(kind='hist');
plt.yscale('log');
plt.title('Distribution of question text length in characters');
###Output
_____no_output_____
###Markdown
We can see that most of the questions are 40 words long or shorter. Let's try having sequence length equal to 70 for now.
###Code
max_len = 72
maxlen = 72
X_train = pad_sequences(train_tokenized, maxlen = max_len)
X_test = pad_sequences(test_tokenized, maxlen = max_len)
###Output
_____no_output_____
###Markdown
Preparing data for PytorchOne of main differences from Keras is preparing data.Pytorch requires special dataloaders. I'll write a class for it.At first I'll append padded texts to original DF.
###Code
y_train = train['target'].values
def sigmoid(x):
return 1 / (1 + np.exp(-x))
from sklearn.model_selection import StratifiedKFold
splits = list(StratifiedKFold(n_splits=4, shuffle=True, random_state=10).split(X_train, y_train))
embed_size = 300
embedding_path = "../input/embeddings/glove.840B.300d/glove.840B.300d.txt"
def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32')
embedding_index = dict(get_coefs(*o.split(" ")) for o in open(embedding_path, encoding='utf-8', errors='ignore'))
# all_embs = np.stack(embedding_index.values())
# emb_mean,emb_std = all_embs.mean(), all_embs.std()
emb_mean,emb_std = -0.005838499, 0.48782197
word_index = tk.word_index
nb_words = min(max_features, len(word_index))
embedding_matrix = np.random.normal(emb_mean, emb_std, (nb_words + 1, embed_size))
for word, i in word_index.items():
if i >= max_features: continue
embedding_vector = embedding_index.get(word)
if embedding_vector is not None: embedding_matrix[i] = embedding_vector
embedding_path = "../input/embeddings/paragram_300_sl999/paragram_300_sl999.txt"
def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32')
embedding_index = dict(get_coefs(*o.split(" ")) for o in open(embedding_path, encoding='utf-8', errors='ignore') if len(o)>100)
# all_embs = np.stack(embedding_index.values())
# emb_mean,emb_std = all_embs.mean(), all_embs.std()
emb_mean,emb_std = -0.0053247833, 0.49346462
embedding_matrix1 = np.random.normal(emb_mean, emb_std, (nb_words + 1, embed_size))
for word, i in word_index.items():
if i >= max_features: continue
embedding_vector = embedding_index.get(word)
if embedding_vector is not None: embedding_matrix1[i] = embedding_vector
embedding_matrix = np.mean([embedding_matrix, embedding_matrix1], axis=0)
del embedding_matrix1
###Output
_____no_output_____
###Markdown
Model
###Code
class Attention(nn.Module):
def __init__(self, feature_dim, step_dim, bias=True, **kwargs):
super(Attention, self).__init__(**kwargs)
self.supports_masking = True
self.bias = bias
self.feature_dim = feature_dim
self.step_dim = step_dim
self.features_dim = 0
weight = torch.zeros(feature_dim, 1)
nn.init.xavier_uniform_(weight)
self.weight = nn.Parameter(weight)
if bias:
self.b = nn.Parameter(torch.zeros(step_dim))
def forward(self, x, mask=None):
feature_dim = self.feature_dim
step_dim = self.step_dim
eij = torch.mm(
x.contiguous().view(-1, feature_dim),
self.weight
).view(-1, step_dim)
if self.bias:
eij = eij + self.b
eij = torch.tanh(eij)
a = torch.exp(eij)
if mask is not None:
a = a * mask
a = a / torch.sum(a, 1, keepdim=True) + 1e-10
weighted_input = x * torch.unsqueeze(a, -1)
return torch.sum(weighted_input, 1)
class NeuralNet(nn.Module):
def __init__(self):
super(NeuralNet, self).__init__()
hidden_size = 128
self.embedding = nn.Embedding(max_features, embed_size)
self.embedding.weight = nn.Parameter(torch.tensor(embedding_matrix, dtype=torch.float32))
self.embedding.weight.requires_grad = False
self.embedding_dropout = nn.Dropout2d(0.1)
self.lstm = nn.LSTM(embed_size, hidden_size, bidirectional=True, batch_first=True)
self.gru = nn.GRU(hidden_size*2, hidden_size, bidirectional=True, batch_first=True)
self.lstm_attention = Attention(hidden_size*2, maxlen)
self.gru_attention = Attention(hidden_size*2, maxlen)
self.linear = nn.Linear(1024, 16)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(0.1)
self.out = nn.Linear(16, 1)
def forward(self, x):
h_embedding = self.embedding(x)
h_embedding = torch.squeeze(self.embedding_dropout(torch.unsqueeze(h_embedding, 0)))
h_lstm, _ = self.lstm(h_embedding)
h_gru, _ = self.gru(h_lstm)
h_lstm_atten = self.lstm_attention(h_lstm)
h_gru_atten = self.gru_attention(h_gru)
avg_pool = torch.mean(h_gru, 1)
max_pool, _ = torch.max(h_gru, 1)
conc = torch.cat((h_lstm_atten, h_gru_atten, avg_pool, max_pool), 1)
conc = self.relu(self.linear(conc))
conc = self.dropout(conc)
out = self.out(conc)
return out
m = NeuralNet()
def train_model(model, x_train, y_train, x_val, y_val, validate=True):
optimizer = torch.optim.Adam(model.parameters())
# scheduler = CosineAnnealingLR(optimizer, T_max=5)
# scheduler = StepLR(optimizer, step_size=3, gamma=0.1)
train = torch.utils.data.TensorDataset(x_train, y_train)
valid = torch.utils.data.TensorDataset(x_val, y_val)
train_loader = torch.utils.data.DataLoader(train, batch_size=batch_size, shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid, batch_size=batch_size, shuffle=False)
loss_fn = torch.nn.BCEWithLogitsLoss(reduction='mean').cuda()
best_score = -np.inf
for epoch in range(n_epochs):
start_time = time.time()
model.train()
avg_loss = 0.
for x_batch, y_batch in tqdm(train_loader, disable=True):
y_pred = model(x_batch)
loss = loss_fn(y_pred, y_batch)
optimizer.zero_grad()
loss.backward()
optimizer.step()
avg_loss += loss.item() / len(train_loader)
model.eval()
valid_preds = np.zeros((x_val_fold.size(0)))
if validate:
avg_val_loss = 0.
for i, (x_batch, y_batch) in enumerate(valid_loader):
y_pred = model(x_batch).detach()
avg_val_loss += loss_fn(y_pred, y_batch).item() / len(valid_loader)
valid_preds[i * batch_size:(i+1) * batch_size] = sigmoid(y_pred.cpu().numpy())[:, 0]
search_result = threshold_search(y_val.cpu().numpy(), valid_preds)
val_f1, val_threshold = search_result['f1'], search_result['threshold']
elapsed_time = time.time() - start_time
print('Epoch {}/{} \t loss={:.4f} \t val_loss={:.4f} \t val_f1={:.4f} best_t={:.2f} \t time={:.2f}s'.format(
epoch + 1, n_epochs, avg_loss, avg_val_loss, val_f1, val_threshold, elapsed_time))
else:
elapsed_time = time.time() - start_time
print('Epoch {}/{} \t loss={:.4f} \t time={:.2f}s'.format(
epoch + 1, n_epochs, avg_loss, elapsed_time))
valid_preds = np.zeros((x_val_fold.size(0)))
avg_val_loss = 0.
for i, (x_batch, y_batch) in enumerate(valid_loader):
y_pred = model(x_batch).detach()
avg_val_loss += loss_fn(y_pred, y_batch).item() / len(valid_loader)
valid_preds[i * batch_size:(i+1) * batch_size] = sigmoid(y_pred.cpu().numpy())[:, 0]
print('Validation loss: ', avg_val_loss)
test_preds = np.zeros((len(test_loader.dataset)))
for i, (x_batch,) in enumerate(test_loader):
y_pred = model(x_batch).detach()
test_preds[i * batch_size:(i+1) * batch_size] = sigmoid(y_pred.cpu().numpy())[:, 0]
# scheduler.step()
return valid_preds, test_preds#, test_preds_local
x_test_cuda = torch.tensor(X_test, dtype=torch.long).cuda()
test = torch.utils.data.TensorDataset(x_test_cuda)
batch_size = 512
test_loader = torch.utils.data.DataLoader(test, batch_size=batch_size, shuffle=False)
seed=1029
def threshold_search(y_true, y_proba):
best_threshold = 0
best_score = 0
for threshold in tqdm([i * 0.01 for i in range(100)], disable=True):
score = f1_score(y_true=y_true, y_pred=y_proba > threshold)
if score > best_score:
best_threshold = threshold
best_score = score
search_result = {'threshold': best_threshold, 'f1': best_score}
return search_result
def seed_everything(seed=1234):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
seed_everything()
train_preds = np.zeros(len(train))
test_preds = np.zeros((len(test), len(splits)))
n_epochs = 5
from tqdm import tqdm
from sklearn.metrics import f1_score
for i, (train_idx, valid_idx) in enumerate(splits):
x_train_fold = torch.tensor(X_train[train_idx], dtype=torch.long).cuda()
y_train_fold = torch.tensor(y_train[train_idx, np.newaxis], dtype=torch.float32).cuda()
x_val_fold = torch.tensor(X_train[valid_idx], dtype=torch.long).cuda()
y_val_fold = torch.tensor(y_train[valid_idx, np.newaxis], dtype=torch.float32).cuda()
train = torch.utils.data.TensorDataset(x_train_fold, y_train_fold)
valid = torch.utils.data.TensorDataset(x_val_fold, y_val_fold)
train_loader = torch.utils.data.DataLoader(train, batch_size=batch_size, shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid, batch_size=batch_size, shuffle=False)
print(f'Fold {i + 1}')
seed_everything(seed + i)
model = NeuralNet()
model.cuda()
valid_preds_fold, test_preds_fold = train_model(model,
x_train_fold,
y_train_fold,
x_val_fold,
y_val_fold, validate=False)
train_preds[valid_idx] = valid_preds_fold
test_preds[:, i] = test_preds_fold
search_result = threshold_search(y_train, train_preds)
sub['prediction'] = test_preds.mean(1) > search_result['threshold']
sub.to_csv("submission.csv", index=False)
###Output
_____no_output_____ |
clustering/KMeans.ipynb | ###Markdown
KMeans Clusteringhttps://ko.wikipedia.org/wiki/K-%ED%8F%89%EA%B7%A0_%EC%95%8C%EA%B3%A0%EB%A6%AC%EC%A6%98https://en.wikipedia.org/wiki/Cluster_analysis
###Code
#2000개의 점 생성
import numpy as np
num_puntos = 2000
conjunto_puntos = []
for i in range(num_puntos):
if np.random.random() > 0.5:
conjunto_puntos.append([np.random.normal(0.0, 0.9), np.random.normal(0.0, 0.9)])
else:
conjunto_puntos.append([np.random.normal(3.0, 0.5), np.random.normal(1.0, 0.5)])
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.DataFrame({"x": [v[0] for v in conjunto_puntos],
"y": [v[1] for v in conjunto_puntos]})
sns.lmplot("x","y",data=df, fit_reg=False, size=6)
plt.show()
import numpy as np
import tensorflow as tf
vectors = tf.constant(conjunto_puntos)
k = 4
centroids = tf.Variable(tf.slice(tf.random_shuffle(vectors),[0,0],[k,-1]))
expanded_vectors = tf.expand_dims(vectors, 0)
expanded_centroids = tf.expand_dims(centroids, 1)
print(expanded_vectors.get_shape())
print(expanded_centroids.get_shape())
distances = tf.reduce_sum(
tf.square(tf.subtract(expanded_vectors, expanded_centroids)), 2)
assignments = tf.argmin(distances, 0)
means = tf.concat([ tf.reduce_mean( tf.gather(vectors, tf.reshape( tf.where( tf.equal(assignments, c) ),[1,-1]) ),reduction_indices=[1]) for c in range(k)],0)
update_centroids = tf.assign(centroids, means)
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init_op)
for step in range(100):
_, centroid_values, assignment_values = sess.run([update_centroids,
centroids,
assignments])
print("centroids")
print(centroid_values)
data = {"x": [], "y": [], "cluster": []}
for i in range(len(assignment_values)):
data["x"].append(conjunto_puntos[i][0])
data["y"].append(conjunto_puntos[i][1])
data["cluster"].append(assignment_values[i])
df = pd.DataFrame(data)
sns.lmplot("x", "y", data=df, fit_reg=False, size=6, hue="cluster", legend=False)
plt.show()
###Output
/home/smprc/.virtualenvs/python36/lib/python3.6/site-packages/matplotlib/font_manager.py:1297: UserWarning: findfont: Font family ['sans-serif'] not found. Falling back to DejaVu Sans
(prop.get_family(), self.defaultFamily[fontext]))
###Markdown
Loading useful librariesMore details about them can be found in `readme.md`
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import load_iris
###Output
_____no_output_____
###Markdown
Loading dataset and converting it to dataframe using Pandas
###Code
data = load_iris()
df = pd.DataFrame(data.data, columns=data.feature_names)
df.head()
###Output
_____no_output_____
###Markdown
Simple EDA
###Code
cols_to_delete = df.columns[df.isna().sum()/len(df) >= .999]
cols_to_delete
# example of how to count unique values in a column
print(pd.crosstab(index=df["sepal length (cm)"], columns="count") )
count_sepal_length = (df['sepal length (cm)'] == 5.0).sum()
print(count_sepal_length)
# plotting works a lot better with np arrays, so we convert dimensions to nparrays to plot
x1 = np.array(df['sepal length (cm)'])
x2 = np.array(df['sepal width (cm)'])
plt.plot()
plt.title('Sepal Length vs. Sepal Width Scatter Plot')
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.scatter(x1, x2)
plt.show()
###Output
_____no_output_____
###Markdown
Implementing K-Means Elbow method to determine optimal K value
###Code
x = df.iloc[:, [0,1,2,3]].values
Error =[]
for i in range(1, 11):
kmeans = KMeans(n_clusters = i).fit(x)
kmeans.fit(x)
Error.append(kmeans.inertia_)
import matplotlib.pyplot as plt
plt.plot(range(1, 11), Error, marker = 'o')
plt.title('Elbow method')
plt.xlabel('No of clusters')
plt.ylabel('Error')
plt.show()
###Output
_____no_output_____
###Markdown
The plot above shows us that the optimal number of clusters is 3. We will use that to train our model
###Code
kmeans = KMeans(n_clusters=3).fit(x)
y_kmeans = kmeans.fit_predict(x)
y_kmeans
kmeans.cluster_centers_
x = np.array(list(zip(x1, x2)))
colors = ['b', 'g', 'r']
markers = ['x', 'o', 'v']
plt.ylabel('Length')
plt.scatter(kmeans.cluster_centers_[:,0], kmeans.cluster_centers_[:, 1], s = 200, c= 'yellow', label = 'Centroids')
for i,j in enumerate(kmeans.labels_):
plt.plot(x1[i], x2[i], color=colors[j], marker=markers[j])
plt.xlabel('Width')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Other plot options
###Code
plt.scatter(x[:, 0], x[:, 1], c=y_kmeans, cmap='rainbow')
###Output
_____no_output_____
###Markdown
Example Cluster Prediction
###Code
kmeans.predict(np.array([5.3, 2.9, 1.5, 2.3]).reshape(1,-1) )[0]
###Output
_____no_output_____
###Markdown
K-Means Clustering Example Let's make some fake data that includes people clustered by income and age, randomly:
###Code
from numpy import random, array
#Create fake income/age clusters for N people in k clusters
def createClusteredData(N, k):
random.seed(10)
pointsPerCluster = float(N)/k
X = []
for i in range (k):
incomeCentroid = random.uniform(20000.0, 200000.0)
ageCentroid = random.uniform(20.0, 70.0)
for j in range(int(pointsPerCluster)):
X.append([random.normal(incomeCentroid, 10000.0), random.normal(ageCentroid, 2.0)])
X = array(X)
return X
createClusteredData(1000, 3)
###Output
_____no_output_____
###Markdown
We'll use k-means to rediscover these clusters in unsupervised learning:
###Code
%matplotlib inline
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
from sklearn.preprocessing import scale
from numpy import random, float
data = createClusteredData(100, 5)
model = KMeans(n_clusters=5)
# Note I'm scaling the data to normalize it! Important for good results.
model = model.fit(scale(data))
data.shape
%matplotlib inline
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
from sklearn.preprocessing import scale
from numpy import random, float
data = createClusteredData(100, 5)
model = KMeans(n_clusters=5)
# Note I'm scaling the data to normalize it! Important for good results.
model = model.fit(scale(data))
# We can look at the clusters each data point was assigned to
print(model.labels_)
# And we'll visualize it:
plt.figure(figsize=(8, 6))
plt.scatter(data[:,0], data[:,1], c=model.labels_.astype(float))
plt.show()
###Output
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 2 3 3 3 3 3 3 3 3 3 3 3 3 3 3
3 3 3 3 3 3 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2]
|
test/benchmark/toy_cnf.ipynb | ###Markdown
Model
###Code
from torch.distributions import MultivariateNormal, Uniform, TransformedDistribution, SigmoidTransform, Categorical
prior = MultivariateNormal(torch.zeros(2).to(device), torch.eye(2).to(device))
f = nn.Sequential(
nn.Linear(2, 32),
nn.Softplus(),
DataControl(),
nn.Linear(32+2, 2)
)
# cnf wraps the net as with other energy models
noise_dist = MultivariateNormal(torch.zeros(2).to(device), torch.eye(2).to(device))
cnf = nn.Sequential(CNF(f))
nde = NeuralDE(cnf, solver='dopri5', s_span=torch.linspace(0, 1, 2), atol=1e-6, rtol=1e-6, sensitivity='adjoint')
model = nn.Sequential(Augmenter(augment_idx=1, augment_dims=1),
nde).to(device)
###Output
_____no_output_____
###Markdown
Learner
###Code
def cnf_density(model):
with torch.no_grad():
npts = 200
side = np.linspace(-2., 2., npts)
xx, yy = np.meshgrid(side, side)
memory= 100
x = np.hstack([xx.reshape(-1, 1), yy.reshape(-1, 1)])
x = torch.from_numpy(x).type(torch.float32).to(device)
z, delta_logp = [], []
inds = torch.arange(0, x.shape[0]).to(torch.int64)
for ii in torch.split(inds, int(memory**2)):
z_full = model(x[ii]).cpu().detach()
z_, delta_logp_ = z_full[:, 1:], z_full[:, 0]
z.append(z_)
delta_logp.append(delta_logp_)
z = torch.cat(z, 0)
delta_logp = torch.cat(delta_logp, 0)
logpz = prior.log_prob(z.cuda()).cpu() # logp(z)
logpx = logpz - delta_logp
px = np.exp(logpx.cpu().numpy()).reshape(npts, npts)
plt.imshow(px);
plt.xlabel([])
plt.ylabel([])
class Learner(pl.LightningModule):
def __init__(self, model:nn.Module):
super().__init__()
self.model = model
self.lr = 1e-3
def forward(self, x):
return self.model(x)
def training_step(self, batch, batch_idx):
# plot logging
if batch_idx == 0:
cnf_density(self.model)
self.logger.experiment.log({"chart": plt})
plt.close()
nde.nfe = 0
x, _ = batch
x += 1e-2*torch.randn_like(x).to(x)
xtrJ = self.model(x)
logprob = prior.log_prob(xtrJ[:,1:]).to(x) - xtrJ[:,0]
loss = -torch.mean(logprob)
nfe = nde.nfe
nde.nfe = 0
metrics = {'loss': loss, 'nfe':nfe}
self.logger.experiment.log(metrics)
return {'loss': loss}
def configure_optimizers(self):
return torch.optim.AdamW(self.model.parameters(), lr=self.lr, weight_decay=1e-5)
def train_dataloader(self):
self.loader_l = len(trainloader)
return trainloader
logger = WandbLogger(project='torchdyn-toy_cnf-bench')
learn = Learner(model)
trainer = pl.Trainer(min_steps=45000, max_steps=45000, gpus=1, logger=logger)
trainer.fit(learn);
sample = prior.sample(torch.Size([1<<15]))
# integrating from 1 to 0, 8 steps of rk4
model[1].s_span = torch.linspace(0, 1, 2)
new_x = model(sample).cpu().detach()
cnf_density(model)
plt.figure(figsize=(12, 4))
plt.subplot(121)
plt.scatter(new_x[:,1], new_x[:,2], s=0.3, c='blue')
#plt.scatter(boh[:,0], boh[:,1], s=0.3, c='black')
plt.subplot(122)
plt.scatter(X[:,0], X[:,1], s=0.3, c='red')
def cnf_density(model):
with torch.no_grad():
npts = 200
side = np.linspace(-2., 2., npts)
xx, yy = np.meshgrid(side, side)
memory= 100
x = np.hstack([xx.reshape(-1, 1), yy.reshape(-1, 1)])
x = torch.from_numpy(x).type(torch.float32).to(device)
z, delta_logp = [], []
inds = torch.arange(0, x.shape[0]).to(torch.int64)
for ii in torch.split(inds, int(memory**2)):
z_full = model(x[ii]).cpu().detach()
z_, delta_logp_ = z_full[:, 1:], z_full[:, 0]
z.append(z_)
delta_logp.append(delta_logp_)
z = torch.cat(z, 0)
delta_logp = torch.cat(delta_logp, 0)
logpz = prior.log_prob(z.cuda()).cpu() # logp(z)
logpx = logpz - delta_logp
px = np.exp(logpx.cpu().numpy()).reshape(npts, npts)
plt.imshow(px, cmap='inferno', vmax=px.mean());
a = cnf_density(model)
###Output
_____no_output_____
###Markdown
Model
###Code
from torch.distributions import MultivariateNormal, Uniform, TransformedDistribution, SigmoidTransform, Categorical
prior = MultivariateNormal(torch.zeros(2).to(device), torch.eye(2).to(device))
f = nn.Sequential(
nn.Linear(2, 32),
nn.Softplus(),
DataControl(),
nn.Linear(32+2, 2)
)
# cnf wraps the net as with other energy models
noise_dist = MultivariateNormal(torch.zeros(2).to(device), torch.eye(2).to(device))
cnf = nn.Sequential(CNF(f))
nde = NeuralDE(cnf, solver='dopri5', s_span=torch.linspace(0, 1, 2), atol=1e-6, rtol=1e-6, sensitivity='adjoint')
model = nn.Sequential(Augmenter(augment_idx=1, augment_dims=1),
nde).to(device)
###Output
_____no_output_____
###Markdown
Learner
###Code
def cnf_density(model):
with torch.no_grad():
npts = 200
side = np.linspace(-2., 2., npts)
xx, yy = np.meshgrid(side, side)
memory= 100
x = np.hstack([xx.reshape(-1, 1), yy.reshape(-1, 1)])
x = torch.from_numpy(x).type(torch.float32).to(device)
z, delta_logp = [], []
inds = torch.arange(0, x.shape[0]).to(torch.int64)
for ii in torch.split(inds, int(memory**2)):
z_full = model(x[ii]).cpu().detach()
z_, delta_logp_ = z_full[:, 1:], z_full[:, 0]
z.append(z_)
delta_logp.append(delta_logp_)
z = torch.cat(z, 0)
delta_logp = torch.cat(delta_logp, 0)
logpz = prior.log_prob(z.cuda()).cpu() # logp(z)
logpx = logpz - delta_logp
px = np.exp(logpx.cpu().numpy()).reshape(npts, npts)
plt.imshow(px);
plt.xlabel([])
plt.ylabel([])
class Learner(pl.LightningModule):
def __init__(self, model:nn.Module):
super().__init__()
self.model = model
self.lr = 1e-3
def forward(self, x):
return self.model(x)
def training_step(self, batch, batch_idx):
# plot logging
if batch_idx == 0:
cnf_density(self.model)
self.logger.experiment.log({"chart": plt})
plt.close()
nde.nfe = 0
x, _ = batch
x += 1e-2*torch.randn_like(x).to(x)
xtrJ = self.model(x)
logprob = prior.log_prob(xtrJ[:,1:]).to(x) - xtrJ[:,0]
loss = -torch.mean(logprob)
nfe = nde.nfe
nde.nfe = 0
metrics = {'loss': loss, 'nfe':nfe}
self.logger.experiment.log(metrics)
return {'loss': loss}
def configure_optimizers(self):
return torch.optim.AdamW(self.model.parameters(), lr=self.lr, weight_decay=1e-5)
def train_dataloader(self):
self.loader_l = len(trainloader)
return trainloader
logger = WandbLogger(project='torchdyn-toy_cnf-bench')
learn = Learner(model)
trainer = pl.Trainer(min_steps=45000, max_steps=45000, gpus=1, logger=logger)
trainer.fit(learn);
sample = prior.sample(torch.Size([1<<15]))
# integrating from 1 to 0, 8 steps of rk4
model[1].s_span = torch.linspace(0, 1, 2)
new_x = model(sample).cpu().detach()
cnf_density(model)
plt.figure(figsize=(12, 4))
plt.subplot(121)
plt.scatter(new_x[:,1], new_x[:,2], s=0.3, c='blue')
#plt.scatter(boh[:,0], boh[:,1], s=0.3, c='black')
plt.subplot(122)
plt.scatter(X[:,0], X[:,1], s=0.3, c='red')
def cnf_density(model):
with torch.no_grad():
npts = 200
side = np.linspace(-2., 2., npts)
xx, yy = np.meshgrid(side, side)
memory= 100
x = np.hstack([xx.reshape(-1, 1), yy.reshape(-1, 1)])
x = torch.from_numpy(x).type(torch.float32).to(device)
z, delta_logp = [], []
inds = torch.arange(0, x.shape[0]).to(torch.int64)
for ii in torch.split(inds, int(memory**2)):
z_full = model(x[ii]).cpu().detach()
z_, delta_logp_ = z_full[:, 1:], z_full[:, 0]
z.append(z_)
delta_logp.append(delta_logp_)
z = torch.cat(z, 0)
delta_logp = torch.cat(delta_logp, 0)
logpz = prior.log_prob(z.cuda()).cpu() # logp(z)
logpx = logpz - delta_logp
px = np.exp(logpx.cpu().numpy()).reshape(npts, npts)
plt.imshow(px, cmap='inferno', vmax=px.mean());
a = cnf_density(model)
###Output
_____no_output_____ |
notebooks/community/ml_ops/stage2/get_started_vertex_training_pytorch.ipynb | ###Markdown
E2E ML on GCP: MLOps stage 2 : experimentation: get started with Vertex Training for Pytorch View on GitHub Open in Vertex AI Workbench OverviewThis tutorial demonstrates how to use Vertex AI for E2E MLOps on Google Cloud in production. This tutorial covers stage 2 : experimentation: get started with Vertex Training for Pytorch. DatasetThe dataset used for this tutorial is the [CIFAR10 dataset](https://pytorch.org/vision/stable/datasets.htmlcifar) from [Pytorch Datasets](https://pytorch.org/vision/stable/datasets.html). The version of the dataset you will use is built into TensorFlow. The trained model predicts which type of class an image is from ten classes: airplane, automobile, bird, cat, deer, dog, frog, horse, ship, or truck. ObjectiveIn this tutorial, you learn how to use `Vertex AI Training` for training a Pytorch custom model.This tutorial uses the following Google Cloud ML services:* `Vertex AI Training`* `Vertex AI Model` resourceThe steps performed include:- Single node training using a Python package.- Report accuracy when hyperparameter tuning.- Save the model artifacts to Cloud Storage using GCSFuse.- Create a `Vertex AI Model` resource. Costs This tutorial uses billable components of Google Cloud:* Vertex AI* Cloud StorageLearn about [Vertex AIpricing](https://cloud.google.com/vertex-ai/pricing) and [Cloud Storagepricing](https://cloud.google.com/storage/pricing), and use the [PricingCalculator](https://cloud.google.com/products/calculator/)to generate a cost estimate based on your projected usage. Set up your local development environment**If you are using Colab or Google Cloud Notebooks**, your environment already meetsall the requirements to run this notebook. You can skip this step. **Otherwise**, make sure your environment meets this notebook's requirements.You need the following:* The Google Cloud SDK* Git* Python 3* virtualenv* Jupyter notebook running in a virtual environment with Python 3The Google Cloud guide to [Setting up a Python developmentenvironment](https://cloud.google.com/python/setup) and the [Jupyterinstallation guide](https://jupyter.org/install) provide detailed instructionsfor meeting these requirements. The following steps provide a condensed set ofinstructions:1. [Install and initialize the Cloud SDK.](https://cloud.google.com/sdk/docs/)1. [Install Python 3.](https://cloud.google.com/python/setupinstalling_python)1. [Install virtualenv](https://cloud.google.com/python/setupinstalling_and_using_virtualenv) and create a virtual environment that uses Python 3. Activate the virtual environment.1. To install Jupyter, run `pip3 install jupyter` on thecommand-line in a terminal shell.1. To launch Jupyter, run `jupyter notebook` on the command-line in a terminal shell.1. Open this notebook in the Jupyter Notebook Dashboard. InstallationsInstall *one time* the packages for executing the MLOps notebooks.
###Code
! pip3 install --upgrade google-cloud-aiplatform[tensorboard] $USER_FLAG
! pip3 install --upgrade google-cloud-pipeline-components $USER_FLAG
! pip3 install --upgrade cloudml-hypertune $USER_FLAG
! pip3 install --upgrade torchvision $USER_FLAG
###Output
_____no_output_____
###Markdown
Restart the kernelOnce you've installed the additional packages, you need to restart the notebook kernel so it can find the packages.
###Code
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
###Output
_____no_output_____
###Markdown
Set your project ID**If you don't know your project ID**, you may be able to get your project ID using `gcloud`.
###Code
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = ! gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
###Output
_____no_output_____
###Markdown
RegionYou can also change the `REGION` variable, which is used for operationsthroughout the rest of this notebook. Below are regions supported for Vertex AI. We recommend that you choose the region closest to you.- Americas: `us-central1`- Europe: `europe-west4`- Asia Pacific: `asia-east1`You may not use a multi-regional bucket for training with Vertex AI. Not all regions provide support for all Vertex AI services.Learn more about [Vertex AI regions](https://cloud.google.com/vertex-ai/docs/general/locations).
###Code
REGION = "us-central1" # @param {type: "string"}
###Output
_____no_output_____
###Markdown
TimestampIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append the timestamp onto the name of resources you create in this tutorial.
###Code
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
###Output
_____no_output_____
###Markdown
Create a Cloud Storage bucket**The following steps are required, regardless of your notebook environment.**When you initialize the Vertex SDK for Python, you specify a Cloud Storage staging bucket. The staging bucket is where all the data associated with your dataset and model resources are retained across sessions.Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization.
###Code
BUCKET_URI = "gs://[your-bucket-name]" # @param {type:"string"}
if BUCKET_URI == "" or BUCKET_URI is None or BUCKET_URI == "gs://[your-bucket-name]":
BUCKET_URI = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
###Output
_____no_output_____
###Markdown
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
###Code
! gsutil mb -l $REGION $BUCKET_URI
###Output
_____no_output_____
###Markdown
Finally, validate access to your Cloud Storage bucket by examining its contents:
###Code
! gsutil ls -al $BUCKET_URI
###Output
_____no_output_____
###Markdown
Set up variablesNext, set up some variables used throughout the tutorial. Import libraries and define constants
###Code
import google.cloud.aiplatform as aiplatform
###Output
_____no_output_____
###Markdown
Initialize Vertex AI SDK for PythonInitialize the Vertex AI SDK for Python for your project and corresponding bucket.
###Code
aiplatform.init(project=PROJECT_ID, staging_bucket=BUCKET_URI)
###Output
_____no_output_____
###Markdown
Set hardware acceleratorsYou can set hardware accelerators for training.Set the variable `TRAIN_GPU/TRAIN_NGPU` to use a container image supporting a GPU and the number of GPUs allocated to the virtual machine (VM) instance. For example, to use a GPU container image with 4 Nvidia Telsa K80 GPUs allocated to each VM, you would specify: (aip.AcceleratorType.NVIDIA_TESLA_K80, 4)Otherwise specify `(None, None)` to use a container image to run on a CPU.Learn more [here](https://cloud.google.com/vertex-ai/docs/general/locationsaccelerators) hardware accelerator support for your region
###Code
import os
if os.getenv("IS_TESTING_TRAIN_GPU"):
TRAIN_GPU, TRAIN_NGPU = (
aiplatform.gapic.AcceleratorType.NVIDIA_TESLA_K80,
int(os.getenv("IS_TESTING_TRAIN_GPU")),
)
else:
TRAIN_GPU, TRAIN_NGPU = (aiplatform.gapic.AcceleratorType.NVIDIA_TESLA_K80, 1)
###Output
_____no_output_____
###Markdown
Set pre-built containersSet the pre-built Docker container image for training.- Set the variable `TF` to the TensorFlow version of the container image. For example, `2-1` would be version 2.1, and `1-15` would be version 1.15. The following list shows some of the pre-built images available:For the latest list, see [Pre-built containers for training](https://cloud.google.com/ai-platform-unified/docs/training/pre-built-containers).
###Code
if TRAIN_GPU:
TRAIN_VERSION = "pytorch-gpu.1-9"
else:
TRAIN_VERSION = "pytorch-xla.1-9"
TRAIN_IMAGE = "{}-docker.pkg.dev/vertex-ai/training/{}:latest".format(
REGION.split("-")[0], TRAIN_VERSION
)
###Output
_____no_output_____
###Markdown
Set machine typeNext, set the machine type to use for training.- Set the variable `TRAIN_COMPUTE` to configure the compute resources for the VMs you will use for for training. - `machine type` - `n1-standard`: 3.75GB of memory per vCPU. - `n1-highmem`: 6.5GB of memory per vCPU - `n1-highcpu`: 0.9 GB of memory per vCPU - `vCPUs`: number of \[2, 4, 8, 16, 32, 64, 96 \]*Note: The following is not supported for training:* - `standard`: 2 vCPUs - `highcpu`: 2, 4 and 8 vCPUs*Note: You may also use n2 and e2 machine types for training and deployment, but they do not support GPUs*.
###Code
if os.getenv("IS_TESTING_TRAIN_MACHINE"):
MACHINE_TYPE = os.getenv("IS_TESTING_TRAIN_MACHINE")
else:
MACHINE_TYPE = "n1-standard"
VCPU = "4"
TRAIN_COMPUTE = MACHINE_TYPE + "-" + VCPU
print("Train machine type", TRAIN_COMPUTE)
###Output
_____no_output_____
###Markdown
Introduction to Pytorch trainingThe Pytorch package supports both single node and distributed model training.Once you have trained a Pytorch model, you will want to save it at a Cloud Storage location, so it can subsequently be uploaded to a `Vertex AI Model` resource.The Pytorch package does not have support to save the model to a Cloud Storage location. Instead, you will do the following steps to save to a Cloud Storage location.1. Save the in-memory model to the local filesystem (e.g., model.pth).2. Use gsutil to copy the local copy to the specified Cloud Storage location.*Note*: You can do hyperparameter tuning with a Pytorch model. Examine the training package Package layoutBefore you start the training, you will look at how a Python package is assembled for a custom training job. When unarchived, the package contains the following directory/file layout.- PKG-INFO- README.md- setup.cfg- setup.py- trainer - \_\_init\_\_.py - task.pyThe files `setup.cfg` and `setup.py` are the instructions for installing the package into the operating environment of the Docker image.The file `trainer/task.py` is the Python script for executing the custom training job. *Note*, when we referred to it in the worker pool specification, we replace the directory slash with a dot (`trainer.task`) and dropped the file suffix (`.py`). Package AssemblyIn the following cells, you will assemble the training package.
###Code
# Make folder for Python training script
! rm -rf custom
! mkdir custom
# Add package information
! touch custom/README.md
# Instructions for installing package into environment of the docker image
setup_cfg = "[egg_info]\n\ntag_build =\n\ntag_date = 0"
! echo "$setup_cfg" > custom/setup.cfg
setup_py = "import setuptools\n\nsetuptools.setup(\n\n install_requires=[\n\n 'cloudml-hypertune',\n\n ],\n\n packages=setuptools.find_packages())"
! echo "$setup_py" > custom/setup.py
pkg_info = "Metadata-Version: 1.0\n\nName: CIFAR10 image classification\n\nVersion: 0.0.0\n\nSummary: Demostration training script\n\nHome-page: www.google.com\n\nAuthor: Google\n\nAuthor-email: [email protected]\n\nLicense: Public\n\nDescription: Demo\n\nPlatform: Vertex"
! echo "$pkg_info" > custom/PKG-INFO
# Make the training subfolder
! mkdir custom/trainer
! touch custom/trainer/__init__.py
###Output
_____no_output_____
###Markdown
Create the task script for the Python training packageNext, you create the `task.py` script for driving the training package. Some noteable steps include:- Command-line arguments: - `model-dir`: The location to save the trained model. When using Vertex AI custom training, the location will be specified in the environment variable: `AIP_MODEL_DIR`, - `batch_size`/`lr` : Hyperparameter tuning variables - `distribute`: single node or distributed training.- Data preprocessing (`get_data()`): - Download the dataset and split into training and test.- Model architecture (`getmodel()`): - Get or build the model architecture.- Training (`train_model()`): - Trains the model- Evaluation (`evaluate_model()`): - Evaluates the model. - If hyperparameter tuning, reports the metric for accuracy.- Model artifact saving - Saves the model artifacts and evaluation metrics where the Cloud Storage location specified by `model-dir`.
###Code
%%writefile custom/trainer/task.py
import sys
import os
import argparse
import logging
import hypertune
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parallel import DistributedDataParallel
#import torch.backends.cudnn as cudnn
import torch.distributed as distributed
#import torch.optim
#import torch.multiprocessing as mp
import torch.utils.data as data
#import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('--model-dir', dest='model_dir',
default=os.getenv('AIP_MODEL_DIR'), type=str, help='Model dir.')
parser.add_argument('--batch_size', dest='batch_size',
type=int, default=16, help='Batch size')
parser.add_argument('--epochs', dest='epochs',
type=int, default=20, help='Number of epochs')
parser.add_argument('--lr', dest='lr',
type=int, default=20, help='Learning rate')
parser.add_argument('--distribute', default="single",
type=str, help='Distributed training strategy')
parser.add_argument('--checkpoints', default=False,
type=bool, help='Whether to save checkpoints')
args = parser.parse_args()
logging.getLogger().setLevel(logging.INFO)
def distributed_is_initialized():
if args.distribute == "mirror":
if distributed.is_available() and distributed.is_initialized():
return True
return False
def get_data():
normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
transform = transforms.Compose(
[transforms.ToTensor(), normalize,]
)
train_dataset = datasets.CIFAR10(root="./train", transform=transform, train=True, download=True)
logging.info(train_dataset)
if distributed_is_initialized():
sampler = data.DistributedSampler(train_dataset)
else:
sampler = None
train_loader = data.DataLoader(
train_dataset,
batch_size=args.batch_size,
shuffle=(sampler is None),
sampler=sampler,
)
test_dataset = datasets.CIFAR10(root="./test", transform=transform, train=False, download=True)
logging.info(test_dataset)
sampler = None
test_loader = data.DataLoader(
test_dataset,
batch_size=args.batch_size,
shuffle=False,
sampler=sampler,
)
return train_loader, test_loader
def get_model():
class Cifar10Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 16, 3)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(16, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = torch.flatten(x, 1) # flatten all dimensions except batch
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
logging.info("Get model architecture")
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
gpu_id = "0" if torch.cuda.is_available() else None
logging.info(f"Device: {device}")
model = Cifar10Model()
model.to(device)
if distributed_is_initialized():
model = DistributedDataParallel(model)
loss = nn.CrossEntropyLoss().cuda(gpu_id)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
return model, loss, optimizer, device
def train_model(model, loss_func, optimizer, train_loader, test_loader, is_chief, device):
class Average(object):
def __init__(self):
self.sum = 0
self.count = 0
def __str__(self):
return '{:.6f}'.format(self.average)
@property
def average(self):
return self.sum / self.count
def update(self, value, number):
self.sum += value * number
self.count += number
class Accuracy(object):
def __init__(self):
self.correct = 0
self.count = 0
def __str__(self):
return '{:.2f}%'.format(self.accuracy * 100)
@property
def accuracy(self):
return self.correct / self.count
@torch.no_grad()
def update(self, output, target):
pred = output.argmax(dim=1)
correct = pred.eq(target).sum().item()
self.correct += correct
self.count += output.size(0)
def train():
model.train()
train_loss = Average()
train_acc = Accuracy()
for data, target in train_loader:
data = data.to(device)
target = target.to(device)
output = model(data)
loss = loss_func(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss.update(loss.item(), data.size(0))
train_acc.update(output, target)
return train_loss, train_acc
@torch.no_grad()
def evaluate(epoch):
model.eval()
test_loss = Average()
test_acc = Accuracy()
for data, target in test_loader:
data = data.to(device)
target = target.to(device)
output = model(data)
loss = loss_func(output, target)
test_loss.update(loss.item(), data.size(0))
test_acc.update(output, target)
# report metric for hyperparameter tuning
hpt = hypertune.HyperTune()
hpt.report_hyperparameter_tuning_metric(
hyperparameter_metric_tag='accuracy',
metric_value=test_acc.accuracy,
global_step=epoch
)
return test_loss, test_acc
for epoch in range(1, args.epochs + 1):
logging.info('Epoch: {}, Training ...'.format(epoch))
train_loss, train_acc = train()
if is_chief:
test_loss, test_acc = evaluate(epoch)
if args.checkpoints:
torch.save(model.state_dict(), args.model_dir + f"/{epoch}.chkpt")
logging.info('Epoch: {}/{},'.format(epoch, args.epochs))
logging.info('train loss: {}, train acc: {},'.format(train_loss, train_acc))
logging.info('test loss: {}, test acc: {}.'.format(test_loss, test_acc))
return model
train_dataset, test_dataset = get_data()
model, loss, optimizer, device = get_model()
train_model(model, loss, optimizer, train_dataset, test_dataset, True, device)
logging.info('start saving')
# export model to gcs using GCSFuse
logging.info("Exporting model artifacts ...")
gs_prefix = 'gs://'
gcsfuse_prefix = '/gcs/'
if args.model_dir.startswith(gs_prefix):
args.model_dir = args.model_dir.replace(gs_prefix, gcsfuse_prefix)
dirpath = os.path.split(args.model_dir)[0]
if not os.path.isdir(dirpath):
os.makedirs(dirpath)
gcs_model_path = os.path.join(os.path.join(args.model_dir, 'model.pth'))
torch.save(model.state_dict(), gcs_model_path)
logging.info(f'Model is saved to {args.model_dir}')
###Output
_____no_output_____
###Markdown
Test training package locallyNext, test your completed training package locally with just a few epochs.
###Code
! python3 custom/trainer/task.py --model-dir=custom --distribute=mirror --checkpoints=True
###Output
_____no_output_____
###Markdown
Store training script on your Cloud Storage bucketNext, you package the training folder into a compressed tar ball, and then store it in your Cloud Storage bucket.
###Code
! rm -f custom.tar custom.tar.gz
! tar cvf custom.tar custom
! gzip custom.tar
! gsutil cp custom.tar.gz $BUCKET_URI/trainer_cifar10.tar.gz
###Output
_____no_output_____
###Markdown
Make Pytorch container for predictionCurrently, Vertex AI does not have a predefined container for making predictions with a deployed Pytorch model. No problem, you can assemble your own custom container. Typically, one would base the container on the `Torch Server`. For demonstration purpose, you build a placeholder container (not complete) that includes the latest `Torch Server` image, and push it to the `Container Registry`.
###Code
%%writefile Dockerfile
FROM pytorch/torchserve:latest-cpu
# run Torchserve HTTP serve to respond to prediction requests
CMD ["torchserve", \n "--start", \n "--ts-config=/home/model-server/config.properties", \n "--models", \n "$APP_NAME=$APP_NAME.mar", \n "--model-store", \n "/home/model-server/model-store"]
APP_NAME = "cifar10"
DEPLOY_IMAGE = f"gcr.io/{PROJECT_ID}/pytorch_predict_{APP_NAME}"
print(DEPLOY_IMAGE)
! docker build --tag=$DEPLOY_IMAGE ./
! docker push $DEPLOY_IMAGE
###Output
_____no_output_____
###Markdown
Create and run custom training jobTo train a custom model, you perform two steps: 1) create a custom training job, and 2) run the job. Create custom training jobA custom training job is created with the `CustomTrainingJob` class, with the following parameters:- `display_name`: The human readable name for the custom training job.- `container_uri`: The training container image.- `python_package_gcs_uri`: The location of the Python training package as a tarball.- `python_module_name`: The relative path to the training script in the Python package.- `model_serving_container_uri`: The container image for deploying the model.*Note:* There is no requirements parameter. You specify any requirements in the `setup.py` script in your Python package.
###Code
DISPLAY_NAME = "cifar10_" + TIMESTAMP
job = aiplatform.CustomPythonPackageTrainingJob(
display_name=DISPLAY_NAME,
python_package_gcs_uri=f"{BUCKET_URI}/trainer_cifar10.tar.gz",
python_module_name="trainer.task",
container_uri=TRAIN_IMAGE,
model_serving_container_image_uri=DEPLOY_IMAGE,
project=PROJECT_ID,
)
###Output
_____no_output_____
###Markdown
Prepare your command-line argumentsNow define the command-line arguments for your custom training container:- `args`: The command-line arguments to pass to the executable that is set as the entry point into the container. - `--model-dir` : For our demonstrations, we use this command-line argument to specify where to store the model artifacts. - direct: You pass the Cloud Storage location as a command line argument to your training script (set variable `DIRECT = True`), or - indirect: The service passes the Cloud Storage location as the environment variable `AIP_MODEL_DIR` to your training script (set variable `DIRECT = False`). In this case, you tell the service the model artifact location in the job specification. - `--BLAH`:
###Code
MODEL_DIR = "{}/{}".format(BUCKET_URI, TIMESTAMP)
DIRECT = False
if DIRECT:
CMDARGS = ["--model_dir=" + MODEL_DIR]
else:
CMDARGS = []
###Output
_____no_output_____
###Markdown
Run the custom training jobNext, you run the custom job to start the training job by invoking the method `run`, with the following parameters:- `model_display_name`: The human readable name for the `Model` resource.- `args`: The command-line arguments to pass to the training script.- `replica_count`: The number of compute instances for training (replica_count = 1 is single node training).- `machine_type`: The machine type for the compute instances.- `accelerator_type`: The hardware accelerator type.- `accelerator_count`: The number of accelerators to attach to a worker replica.- `base_output_dir`: The Cloud Storage location to write the model artifacts to.- `sync`: Whether to block until completion of the job.
###Code
if TRAIN_GPU:
model = job.run(
model_display_name="cifar10_" + TIMESTAMP,
args=CMDARGS,
replica_count=1,
machine_type=TRAIN_COMPUTE,
accelerator_type=TRAIN_GPU.name,
accelerator_count=TRAIN_NGPU,
base_output_dir=MODEL_DIR,
sync=False,
)
else:
model = job.run(
model_display_name="cifar10_" + TIMESTAMP,
args=CMDARGS,
replica_count=1,
machine_type=TRAIN_COMPUTE,
base_output_dir=MODEL_DIR,
sync=False,
)
model_path_to_deploy = MODEL_DIR
###Output
_____no_output_____
###Markdown
List a custom training job
###Code
_job = job.list(filter=f"display_name={DISPLAY_NAME}")
print(_job)
###Output
_____no_output_____
###Markdown
Wait for completion of custom training jobNext, wait for the custom training job to complete. Alternatively, one can set the parameter `sync` to `True` in the `run()` method to block until the custom training job is completed.
###Code
model.wait()
###Output
_____no_output_____
###Markdown
Delete a custom training jobAfter a training job is completed, you can delete the training job with the method `delete()`. Prior to completion, a training job can be cancelled with the method `cancel()`.
###Code
job.delete()
###Output
_____no_output_____
###Markdown
Cleaning upTo clean up all Google Cloud resources used in this project, you can [delete the Google Cloudproject](https://cloud.google.com/resource-manager/docs/creating-managing-projectsshutting_down_projects) you used for the tutorial.Otherwise, you can delete the individual resources you created in this tutorial:- Model- Custom Job (Custom job deleted in previous cell)- Cloud Storage Bucket
###Code
# Delete the model using the Vertex model object
model.delete()
if os.getenv("IS_TESTING"):
! gsutil rm -r $BUCKET_URI
###Output
_____no_output_____
###Markdown
E2E ML on GCP: MLOps stage 2 : experimentation: get started with Vertex Training for Pytorch Run in Colab View on GitHub Open in Vertex AI Workbench OverviewThis tutorial demonstrates how to use Vertex AI for E2E MLOps on Google Cloud in production. This tutorial covers stage 2 : experimentation: get started with Vertex Training for Pytorch. DatasetThe dataset used for this tutorial is the [CIFAR10 dataset](https://pytorch.org/vision/stable/datasets.htmlcifar) from [Pytorch Datasets](https://pytorch.org/vision/stable/datasets.html). The version of the dataset you will use is built into TensorFlow. The trained model predicts which type of class an image is from ten classes: airplane, automobile, bird, cat, deer, dog, frog, horse, ship, or truck. ObjectiveIn this tutorial, you learn how to use `Vertex AI Training` for training a Pytorch custom model.This tutorial uses the following Google Cloud ML services:* `Vertex AI Training`* `Vertex AI Model` resourceThe steps performed include:- Single node training using a Python package.- Report accuracy when hyperparameter tuning.- Save the model artifacts to Cloud Storage using GCSFuse.- Create a `Vertex AI Model` resource. Costs This tutorial uses billable components of Google Cloud:* Vertex AI* Cloud StorageLearn about [Vertex AIpricing](https://cloud.google.com/vertex-ai/pricing) and [Cloud Storagepricing](https://cloud.google.com/storage/pricing), and use the [PricingCalculator](https://cloud.google.com/products/calculator/)to generate a cost estimate based on your projected usage. Set up your local development environment**If you are using Colab or Google Cloud Notebooks**, your environment already meetsall the requirements to run this notebook. You can skip this step. **Otherwise**, make sure your environment meets this notebook's requirements.You need the following:* The Google Cloud SDK* Git* Python 3* virtualenv* Jupyter notebook running in a virtual environment with Python 3The Google Cloud guide to [Setting up a Python developmentenvironment](https://cloud.google.com/python/setup) and the [Jupyterinstallation guide](https://jupyter.org/install) provide detailed instructionsfor meeting these requirements. The following steps provide a condensed set ofinstructions:1. [Install and initialize the Cloud SDK.](https://cloud.google.com/sdk/docs/)1. [Install Python 3.](https://cloud.google.com/python/setupinstalling_python)1. [Install virtualenv](https://cloud.google.com/python/setupinstalling_and_using_virtualenv) and create a virtual environment that uses Python 3. Activate the virtual environment.1. To install Jupyter, run `pip3 install jupyter` on thecommand-line in a terminal shell.1. To launch Jupyter, run `jupyter notebook` on the command-line in a terminal shell.1. Open this notebook in the Jupyter Notebook Dashboard. InstallationsInstall the following packages to execute this notebook.
###Code
import os
# The Vertex AI Workbench Notebook product has specific requirements
IS_WORKBENCH_NOTEBOOK = os.getenv("DL_ANACONDA_HOME") and not os.getenv("VIRTUAL_ENV")
IS_USER_MANAGED_WORKBENCH_NOTEBOOK = os.path.exists(
"/opt/deeplearning/metadata/env_version"
)
# Vertex AI Notebook requires dependencies to be installed with '--user'
USER_FLAG = ""
if IS_WORKBENCH_NOTEBOOK:
USER_FLAG = "--user"
! pip3 install --upgrade google-cloud-aiplatform $USER_FLAG -q
! pip3 install --upgrade cloudml-hypertune $USER_FLAG -q
! pip3 install --upgrade torchvision $USER_FLAG -q
###Output
_____no_output_____
###Markdown
Restart the kernelOnce you've installed the additional packages, you need to restart the notebook kernel so it can find the packages.
###Code
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
###Output
_____no_output_____
###Markdown
Before you begin Set up your Google Cloud project**The following steps are required, regardless of your notebook environment.**1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.1. [Make sure that billing is enabled for your project](https://cloud.google.com/billing/docs/how-to/modify-project).1. [Enable the Vertex AI, BigQuery, Compute Engine and Cloud Storage APIs](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com,bigquery,compute_component,storage_component).1. If you are running this notebook locally, you need to install the [Cloud SDK](https://cloud.google.com/sdk).1. Enter your project ID in the cell below. Then run the cell to make sure theCloud SDK uses the right project for all the commands in this notebook.**Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands. Set your project ID**If you don't know your project ID**, you may be able to get your project ID using `gcloud`.
###Code
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = ! gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
###Output
_____no_output_____
###Markdown
RegionYou can also change the `REGION` variable, which is used for operationsthroughout the rest of this notebook. Below are regions supported for Vertex AI. We recommend that you choose the region closest to you.- Americas: `us-central1`- Europe: `europe-west4`- Asia Pacific: `asia-east1`You may not use a multi-regional bucket for training with Vertex AI. Not all regions provide support for all Vertex AI services.Learn more about [Vertex AI regions](https://cloud.google.com/vertex-ai/docs/general/locations).
###Code
REGION = "[your-region]" # @param {type: "string"}
if REGION == "[your-region]":
REGION = "us-central1"
###Output
_____no_output_____
###Markdown
TimestampIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append the timestamp onto the name of resources you create in this tutorial.
###Code
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
###Output
_____no_output_____
###Markdown
Authenticate your Google Cloud account**If you are using Vertex AI Workbench Notebooks**, your environment is already authenticated. Skip this step.**If you are using Colab**, run the cell below and follow the instructions when prompted to authenticate your account via oAuth.**Otherwise**, follow these steps:In the Cloud Console, go to the [Create service account key](https://console.cloud.google.com/apis/credentials/serviceaccountkey) page.1. **Click Create service account**.2. In the **Service account name** field, enter a name, and click **Create**.3. In the **Grant this service account access to project** section, click the Role drop-down list. Type "Vertex AI" into the filter box, and select **Vertex AI Administrator**. Type "Storage Object Admin" into the filter box, and select **Storage Object Admin**.4. Click Create. A JSON file that contains your key downloads to your local environment.5. Enter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell.
###Code
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
import os
import sys
# If on Vertex AI Workbench, then don't execute this code
IS_COLAB = False
if not os.path.exists("/opt/deeplearning/metadata/env_version") and not os.getenv(
"DL_ANACONDA_HOME"
):
if "google.colab" in sys.modules:
IS_COLAB = True
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
###Output
_____no_output_____
###Markdown
Create a Cloud Storage bucket**The following steps are required, regardless of your notebook environment.**When you initialize the Vertex SDK for Python, you specify a Cloud Storage staging bucket. The staging bucket is where all the data associated with your dataset and model resources are retained across sessions.Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization.
###Code
BUCKET_URI = "gs://[your-bucket-name]" # @param {type:"string"}
if BUCKET_URI == "" or BUCKET_URI is None or BUCKET_URI == "gs://[your-bucket-name]":
BUCKET_URI = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
###Output
_____no_output_____
###Markdown
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
###Code
! gsutil mb -l $REGION $BUCKET_URI
###Output
_____no_output_____
###Markdown
Finally, validate access to your Cloud Storage bucket by examining its contents:
###Code
! gsutil ls -al $BUCKET_URI
###Output
_____no_output_____
###Markdown
Set up variablesNext, set up some variables used throughout the tutorial. Import libraries and define constants
###Code
import google.cloud.aiplatform as aiplatform
###Output
_____no_output_____
###Markdown
Initialize Vertex AI SDK for PythonInitialize the Vertex AI SDK for Python for your project and corresponding bucket.
###Code
aiplatform.init(project=PROJECT_ID, staging_bucket=BUCKET_URI)
###Output
_____no_output_____
###Markdown
Set hardware acceleratorsYou can set hardware accelerators for training.Set the variable `TRAIN_GPU/TRAIN_NGPU` to use a container image supporting a GPU and the number of GPUs allocated to the virtual machine (VM) instance. For example, to use a GPU container image with 4 Nvidia Telsa K80 GPUs allocated to each VM, you would specify: (aip.AcceleratorType.NVIDIA_TESLA_K80, 4)Otherwise specify `(None, None)` to use a container image to run on a CPU.Learn more [here](https://cloud.google.com/vertex-ai/docs/general/locationsaccelerators) hardware accelerator support for your region
###Code
import os
if os.getenv("IS_TESTING_TRAIN_GPU"):
TRAIN_GPU, TRAIN_NGPU = (
aiplatform.gapic.AcceleratorType.NVIDIA_TESLA_K80,
int(os.getenv("IS_TESTING_TRAIN_GPU")),
)
else:
TRAIN_GPU, TRAIN_NGPU = (aiplatform.gapic.AcceleratorType.NVIDIA_TESLA_K80, 1)
###Output
_____no_output_____
###Markdown
Set pre-built containersSet the pre-built Docker container image for training.- Set the variable `TF` to the TensorFlow version of the container image. For example, `2-1` would be version 2.1, and `1-15` would be version 1.15. The following list shows some of the pre-built images available:For the latest list, see [Pre-built containers for training](https://cloud.google.com/ai-platform-unified/docs/training/pre-built-containers).
###Code
if TRAIN_GPU:
TRAIN_VERSION = "pytorch-gpu.1-9"
else:
TRAIN_VERSION = "pytorch-xla.1-9"
TRAIN_IMAGE = "{}-docker.pkg.dev/vertex-ai/training/{}:latest".format(
REGION.split("-")[0], TRAIN_VERSION
)
###Output
_____no_output_____
###Markdown
Set machine typeNext, set the machine type to use for training.- Set the variable `TRAIN_COMPUTE` to configure the compute resources for the VMs you will use for for training. - `machine type` - `n1-standard`: 3.75GB of memory per vCPU. - `n1-highmem`: 6.5GB of memory per vCPU - `n1-highcpu`: 0.9 GB of memory per vCPU - `vCPUs`: number of \[2, 4, 8, 16, 32, 64, 96 \]*Note: The following is not supported for training:* - `standard`: 2 vCPUs - `highcpu`: 2, 4 and 8 vCPUs*Note: You may also use n2 and e2 machine types for training and deployment, but they do not support GPUs*.
###Code
if os.getenv("IS_TESTING_TRAIN_MACHINE"):
MACHINE_TYPE = os.getenv("IS_TESTING_TRAIN_MACHINE")
else:
MACHINE_TYPE = "n1-standard"
VCPU = "4"
TRAIN_COMPUTE = MACHINE_TYPE + "-" + VCPU
print("Train machine type", TRAIN_COMPUTE)
###Output
_____no_output_____
###Markdown
Introduction to Pytorch trainingThe Pytorch package supports both single node and distributed model training.Once you have trained a Pytorch model, you will want to save it at a Cloud Storage location, so it can subsequently be uploaded to a `Vertex AI Model` resource.The Pytorch package does not have support to save the model to a Cloud Storage location. Instead, you will do the following steps to save to a Cloud Storage location.1. Save the in-memory model to the local filesystem (e.g., model.pth).2. Use gsutil to copy the local copy to the specified Cloud Storage location.*Note*: You can do hyperparameter tuning with a Pytorch model. Examine the training package Package layoutBefore you start the training, you will look at how a Python package is assembled for a custom training job. When unarchived, the package contains the following directory/file layout.- PKG-INFO- README.md- setup.cfg- setup.py- trainer - \_\_init\_\_.py - task.pyThe files `setup.cfg` and `setup.py` are the instructions for installing the package into the operating environment of the Docker image.The file `trainer/task.py` is the Python script for executing the custom training job. *Note*, when we referred to it in the worker pool specification, we replace the directory slash with a dot (`trainer.task`) and dropped the file suffix (`.py`). Package AssemblyIn the following cells, you will assemble the training package.
###Code
# Make folder for Python training script
! rm -rf custom
! mkdir custom
# Add package information
! touch custom/README.md
# Instructions for installing package into environment of the docker image
setup_cfg = "[egg_info]\n\ntag_build =\n\ntag_date = 0"
! echo "$setup_cfg" > custom/setup.cfg
setup_py = "import setuptools\n\nsetuptools.setup(\n\n install_requires=[\n\n 'cloudml-hypertune',\n\n ],\n\n packages=setuptools.find_packages())"
! echo "$setup_py" > custom/setup.py
pkg_info = "Metadata-Version: 1.0\n\nName: CIFAR10 image classification\n\nVersion: 0.0.0\n\nSummary: Demostration training script\n\nHome-page: www.google.com\n\nAuthor: Google\n\nAuthor-email: [email protected]\n\nLicense: Public\n\nDescription: Demo\n\nPlatform: Vertex"
! echo "$pkg_info" > custom/PKG-INFO
# Make the training subfolder
! mkdir custom/trainer
! touch custom/trainer/__init__.py
###Output
_____no_output_____
###Markdown
Create the task script for the Python training packageNext, you create the `task.py` script for driving the training package. Some noteable steps include:- Command-line arguments: - `model-dir`: The location to save the trained model. When using Vertex AI custom training, the location will be specified in the environment variable: `AIP_MODEL_DIR`, - `batch_size`/`lr` : Hyperparameter tuning variables - `distribute`: single node or distributed training.- Data preprocessing (`get_data()`): - Download the dataset and split into training and test.- Model architecture (`getmodel()`): - Get or build the model architecture.- Training (`train_model()`): - Trains the model- Evaluation (`evaluate_model()`): - Evaluates the model. - If hyperparameter tuning, reports the metric for accuracy.- Model artifact saving - Saves the model artifacts and evaluation metrics where the Cloud Storage location specified by `model-dir`.
###Code
%%writefile custom/trainer/task.py
import sys
import os
import argparse
import logging
import hypertune
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parallel import DistributedDataParallel
#import torch.backends.cudnn as cudnn
import torch.distributed as distributed
#import torch.optim
#import torch.multiprocessing as mp
import torch.utils.data as data
#import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('--model-dir', dest='model_dir',
default=os.getenv('AIP_MODEL_DIR'), type=str, help='Model dir.')
parser.add_argument('--batch_size', dest='batch_size',
type=int, default=16, help='Batch size')
parser.add_argument('--epochs', dest='epochs',
type=int, default=20, help='Number of epochs')
parser.add_argument('--lr', dest='lr',
type=int, default=20, help='Learning rate')
parser.add_argument('--distribute', default="single",
type=str, help='Distributed training strategy')
parser.add_argument('--checkpoints', default=False,
type=bool, help='Whether to save checkpoints')
args = parser.parse_args()
logging.getLogger().setLevel(logging.INFO)
def distributed_is_initialized():
if args.distribute == "mirror":
if distributed.is_available() and distributed.is_initialized():
return True
return False
def get_data():
normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
transform = transforms.Compose(
[transforms.ToTensor(), normalize,]
)
train_dataset = datasets.CIFAR10(root="./train", transform=transform, train=True, download=True)
logging.info(train_dataset)
if distributed_is_initialized():
sampler = data.DistributedSampler(train_dataset)
else:
sampler = None
train_loader = data.DataLoader(
train_dataset,
batch_size=args.batch_size,
shuffle=(sampler is None),
sampler=sampler,
)
test_dataset = datasets.CIFAR10(root="./test", transform=transform, train=False, download=True)
logging.info(test_dataset)
sampler = None
test_loader = data.DataLoader(
test_dataset,
batch_size=args.batch_size,
shuffle=False,
sampler=sampler,
)
return train_loader, test_loader
def get_model():
class Cifar10Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 16, 3)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(16, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = torch.flatten(x, 1) # flatten all dimensions except batch
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
logging.info("Get model architecture")
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
gpu_id = "0" if torch.cuda.is_available() else None
logging.info(f"Device: {device}")
model = Cifar10Model()
model.to(device)
if distributed_is_initialized():
model = DistributedDataParallel(model)
loss = nn.CrossEntropyLoss().cuda(gpu_id)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
return model, loss, optimizer, device
def train_model(model, loss_func, optimizer, train_loader, test_loader, is_chief, device):
class Average(object):
def __init__(self):
self.sum = 0
self.count = 0
def __str__(self):
return '{:.6f}'.format(self.average)
@property
def average(self):
return self.sum / self.count
def update(self, value, number):
self.sum += value * number
self.count += number
class Accuracy(object):
def __init__(self):
self.correct = 0
self.count = 0
def __str__(self):
return '{:.2f}%'.format(self.accuracy * 100)
@property
def accuracy(self):
return self.correct / self.count
@torch.no_grad()
def update(self, output, target):
pred = output.argmax(dim=1)
correct = pred.eq(target).sum().item()
self.correct += correct
self.count += output.size(0)
def train():
model.train()
train_loss = Average()
train_acc = Accuracy()
for data, target in train_loader:
data = data.to(device)
target = target.to(device)
output = model(data)
loss = loss_func(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss.update(loss.item(), data.size(0))
train_acc.update(output, target)
return train_loss, train_acc
@torch.no_grad()
def evaluate(epoch):
model.eval()
test_loss = Average()
test_acc = Accuracy()
for data, target in test_loader:
data = data.to(device)
target = target.to(device)
output = model(data)
loss = loss_func(output, target)
test_loss.update(loss.item(), data.size(0))
test_acc.update(output, target)
# report metric for hyperparameter tuning
hpt = hypertune.HyperTune()
hpt.report_hyperparameter_tuning_metric(
hyperparameter_metric_tag='accuracy',
metric_value=test_acc.accuracy,
global_step=epoch
)
return test_loss, test_acc
for epoch in range(1, args.epochs + 1):
logging.info('Epoch: {}, Training ...'.format(epoch))
train_loss, train_acc = train()
if is_chief:
test_loss, test_acc = evaluate(epoch)
if args.checkpoints:
torch.save(model.state_dict(), args.model_dir + f"/{epoch}.chkpt")
logging.info('Epoch: {}/{},'.format(epoch, args.epochs))
logging.info('train loss: {}, train acc: {},'.format(train_loss, train_acc))
logging.info('test loss: {}, test acc: {}.'.format(test_loss, test_acc))
return model
train_dataset, test_dataset = get_data()
model, loss, optimizer, device = get_model()
train_model(model, loss, optimizer, train_dataset, test_dataset, True, device)
logging.info('start saving')
# export model to gcs using GCSFuse
logging.info("Exporting model artifacts ...")
gs_prefix = 'gs://'
gcsfuse_prefix = '/gcs/'
if args.model_dir.startswith(gs_prefix):
args.model_dir = args.model_dir.replace(gs_prefix, gcsfuse_prefix)
dirpath = os.path.split(args.model_dir)[0]
if not os.path.isdir(dirpath):
os.makedirs(dirpath)
gcs_model_path = os.path.join(os.path.join(args.model_dir, 'model.pth'))
torch.save(model.state_dict(), gcs_model_path)
logging.info(f'Model is saved to {args.model_dir}')
###Output
_____no_output_____
###Markdown
Test training package locallyNext, test your completed training package locally with just a few epochs.
###Code
! python3 custom/trainer/task.py --model-dir=custom --distribute=mirror --checkpoints=True
###Output
_____no_output_____
###Markdown
Store training script on your Cloud Storage bucketNext, you package the training folder into a compressed tar ball, and then store it in your Cloud Storage bucket.
###Code
! rm -f custom.tar custom.tar.gz
! tar cvf custom.tar custom
! gzip custom.tar
! gsutil cp custom.tar.gz $BUCKET_URI/trainer_cifar10.tar.gz
###Output
_____no_output_____
###Markdown
Make Pytorch container for predictionCurrently, Vertex AI does not have a predefined container for making predictions with a deployed Pytorch model. No problem, you can assemble your own custom container. Typically, one would base the container on the `Torch Server`. For demonstration purpose, you build a placeholder container (not complete) that includes the latest `Torch Server` image, and push it to the `Container Registry`.
###Code
%%writefile Dockerfile
FROM pytorch/torchserve:latest-cpu
# run Torchserve HTTP serve to respond to prediction requests
CMD ["torchserve", \n "--start", \n "--ts-config=/home/model-server/config.properties", \n "--models", \n "$APP_NAME=$APP_NAME.mar", \n "--model-store", \n "/home/model-server/model-store"]
APP_NAME = "cifar10"
DEPLOY_IMAGE = f"gcr.io/{PROJECT_ID}/pytorch_predict_{APP_NAME}"
print(DEPLOY_IMAGE)
if not IS_COLAB:
! docker build --tag=$DEPLOY_IMAGE ./
! docker push $DEPLOY_IMAGE
else:
# install docker daemon
! apt-get -qq install docker.io
###Output
_____no_output_____
###Markdown
*Executes in Colab*
###Code
%%bash -s $IS_COLAB $DEPLOY_IMAGE
if [ $1 == "False" ]; then
exit 0
fi
set -x
dockerd -b none --iptables=0 -l warn &
for i in $(seq 5); do [ ! -S "/var/run/docker.sock" ] && sleep 2 || break; done
docker build --tag=$2 ./
docker push $2
kill $(jobs -p)
###Output
_____no_output_____
###Markdown
Create and run custom training jobTo train a custom model, you perform two steps: 1) create a custom training job, and 2) run the job. Create custom training jobA custom training job is created with the `CustomTrainingJob` class, with the following parameters:- `display_name`: The human readable name for the custom training job.- `container_uri`: The training container image.- `python_package_gcs_uri`: The location of the Python training package as a tarball.- `python_module_name`: The relative path to the training script in the Python package.- `model_serving_container_uri`: The container image for deploying the model.*Note:* There is no requirements parameter. You specify any requirements in the `setup.py` script in your Python package.
###Code
DISPLAY_NAME = "cifar10_" + TIMESTAMP
job = aiplatform.CustomPythonPackageTrainingJob(
display_name=DISPLAY_NAME,
python_package_gcs_uri=f"{BUCKET_URI}/trainer_cifar10.tar.gz",
python_module_name="trainer.task",
container_uri=TRAIN_IMAGE,
model_serving_container_image_uri=DEPLOY_IMAGE,
project=PROJECT_ID,
)
###Output
_____no_output_____
###Markdown
Prepare your command-line argumentsNow define the command-line arguments for your custom training container:- `args`: The command-line arguments to pass to the executable that is set as the entry point into the container. - `--model-dir` : For our demonstrations, we use this command-line argument to specify where to store the model artifacts. - direct: You pass the Cloud Storage location as a command line argument to your training script (set variable `DIRECT = True`), or - indirect: The service passes the Cloud Storage location as the environment variable `AIP_MODEL_DIR` to your training script (set variable `DIRECT = False`). In this case, you tell the service the model artifact location in the job specification. - `--BLAH`:
###Code
MODEL_DIR = "{}/{}".format(BUCKET_URI, TIMESTAMP)
DIRECT = False
if DIRECT:
CMDARGS = ["--model_dir=" + MODEL_DIR]
else:
CMDARGS = []
###Output
_____no_output_____
###Markdown
Run the custom training jobNext, you run the custom job to start the training job by invoking the method `run`, with the following parameters:- `model_display_name`: The human readable name for the `Model` resource.- `args`: The command-line arguments to pass to the training script.- `replica_count`: The number of compute instances for training (replica_count = 1 is single node training).- `machine_type`: The machine type for the compute instances.- `accelerator_type`: The hardware accelerator type.- `accelerator_count`: The number of accelerators to attach to a worker replica.- `base_output_dir`: The Cloud Storage location to write the model artifacts to.- `sync`: Whether to block until completion of the job.
###Code
if TRAIN_GPU:
model = job.run(
model_display_name="cifar10_" + TIMESTAMP,
args=CMDARGS,
replica_count=1,
machine_type=TRAIN_COMPUTE,
accelerator_type=TRAIN_GPU.name,
accelerator_count=TRAIN_NGPU,
base_output_dir=MODEL_DIR,
sync=False,
)
else:
model = job.run(
model_display_name="cifar10_" + TIMESTAMP,
args=CMDARGS,
replica_count=1,
machine_type=TRAIN_COMPUTE,
base_output_dir=MODEL_DIR,
sync=False,
)
model_path_to_deploy = MODEL_DIR
###Output
_____no_output_____
###Markdown
List a custom training job
###Code
_job = job.list(filter=f"display_name={DISPLAY_NAME}")
print(_job)
###Output
_____no_output_____
###Markdown
Wait for completion of custom training jobNext, wait for the custom training job to complete. Alternatively, one can set the parameter `sync` to `True` in the `run()` method to block until the custom training job is completed.
###Code
model.wait()
###Output
_____no_output_____
###Markdown
Delete a custom training jobAfter a training job is completed, you can delete the training job with the method `delete()`. Prior to completion, a training job can be cancelled with the method `cancel()`.
###Code
job.delete()
###Output
_____no_output_____
###Markdown
Cleaning upTo clean up all Google Cloud resources used in this project, you can [delete the Google Cloudproject](https://cloud.google.com/resource-manager/docs/creating-managing-projectsshutting_down_projects) you used for the tutorial.Otherwise, you can delete the individual resources you created in this tutorial:- Model- Cloud Storage Bucket
###Code
# Delete the model using the Vertex model object
model.delete()
delete_bucket = False
if delete_bucket or os.getenv("IS_TESTING"):
! gsutil rm -r $BUCKET_URI
###Output
_____no_output_____
###Markdown
E2E ML on GCP: MLOps stage 2 : experimentation: get started with Vertex Training for Pytorch Run in Colab View on GitHub Open in Vertex AI Workbench OverviewThis tutorial demonstrates how to use Vertex AI for E2E MLOps on Google Cloud in production. This tutorial covers stage 2 : experimentation: get started with Vertex Training for Pytorch. DatasetThe dataset used for this tutorial is the [CIFAR10 dataset](https://pytorch.org/vision/stable/datasets.htmlcifar) from [Pytorch Datasets](https://pytorch.org/vision/stable/datasets.html). The version of the dataset you will use is built into TensorFlow. The trained model predicts which type of class an image is from ten classes: airplane, automobile, bird, cat, deer, dog, frog, horse, ship, or truck. ObjectiveIn this tutorial, you learn how to use `Vertex AI Training` for training a Pytorch custom model.This tutorial uses the following Google Cloud ML services:* `Vertex AI Training`* `Vertex AI Model` resourceThe steps performed include:- Single node training using a Python package.- Report accuracy when hyperparameter tuning.- Save the model artifacts to Cloud Storage using GCSFuse.- Create a `Vertex AI Model` resource. Costs This tutorial uses billable components of Google Cloud:* Vertex AI* Cloud StorageLearn about [Vertex AIpricing](https://cloud.google.com/vertex-ai/pricing) and [Cloud Storagepricing](https://cloud.google.com/storage/pricing), and use the [PricingCalculator](https://cloud.google.com/products/calculator/)to generate a cost estimate based on your projected usage. Set up your local development environment**If you are using Colab or Google Cloud Notebooks**, your environment already meetsall the requirements to run this notebook. You can skip this step. **Otherwise**, make sure your environment meets this notebook's requirements.You need the following:* The Google Cloud SDK* Git* Python 3* virtualenv* Jupyter notebook running in a virtual environment with Python 3The Google Cloud guide to [Setting up a Python developmentenvironment](https://cloud.google.com/python/setup) and the [Jupyterinstallation guide](https://jupyter.org/install) provide detailed instructionsfor meeting these requirements. The following steps provide a condensed set ofinstructions:1. [Install and initialize the Cloud SDK.](https://cloud.google.com/sdk/docs/)1. [Install Python 3.](https://cloud.google.com/python/setupinstalling_python)1. [Install virtualenv](https://cloud.google.com/python/setupinstalling_and_using_virtualenv) and create a virtual environment that uses Python 3. Activate the virtual environment.1. To install Jupyter, run `pip3 install jupyter` on thecommand-line in a terminal shell.1. To launch Jupyter, run `jupyter notebook` on the command-line in a terminal shell.1. Open this notebook in the Jupyter Notebook Dashboard. InstallationsInstall the following packages to execute this notebook.
###Code
import os
# The Vertex AI Workbench Notebook product has specific requirements
IS_WORKBENCH_NOTEBOOK = os.getenv("DL_ANACONDA_HOME")
IS_USER_MANAGED_WORKBENCH_NOTEBOOK = os.path.exists(
"/opt/deeplearning/metadata/env_version"
)
# Vertex AI Notebook requires dependencies to be installed with '--user'
USER_FLAG = ""
if IS_WORKBENCH_NOTEBOOK:
USER_FLAG = "--user"
! pip3 install --upgrade google-cloud-aiplatform $USER_FLAG -q
! pip3 install --upgrade cloudml-hypertune $USER_FLAG -q
! pip3 install --upgrade torchvision $USER_FLAG -q
###Output
_____no_output_____
###Markdown
Restart the kernelOnce you've installed the additional packages, you need to restart the notebook kernel so it can find the packages.
###Code
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
###Output
_____no_output_____
###Markdown
Before you begin Set up your Google Cloud project**The following steps are required, regardless of your notebook environment.**1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.1. [Make sure that billing is enabled for your project](https://cloud.google.com/billing/docs/how-to/modify-project).1. [Enable the Vertex AI, BigQuery, Compute Engine and Cloud Storage APIs](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com,bigquery,compute_component,storage_component).1. If you are running this notebook locally, you need to install the [Cloud SDK](https://cloud.google.com/sdk).1. Enter your project ID in the cell below. Then run the cell to make sure theCloud SDK uses the right project for all the commands in this notebook.**Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands. Set your project ID**If you don't know your project ID**, you may be able to get your project ID using `gcloud`.
###Code
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = ! gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
###Output
_____no_output_____
###Markdown
RegionYou can also change the `REGION` variable, which is used for operationsthroughout the rest of this notebook. Below are regions supported for Vertex AI. We recommend that you choose the region closest to you.- Americas: `us-central1`- Europe: `europe-west4`- Asia Pacific: `asia-east1`You may not use a multi-regional bucket for training with Vertex AI. Not all regions provide support for all Vertex AI services.Learn more about [Vertex AI regions](https://cloud.google.com/vertex-ai/docs/general/locations).
###Code
REGION = "[your-region]" # @param {type: "string"}
if REGION == "[your-region]":
REGION = "us-central1"
###Output
_____no_output_____
###Markdown
TimestampIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append the timestamp onto the name of resources you create in this tutorial.
###Code
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
###Output
_____no_output_____
###Markdown
Authenticate your Google Cloud account**If you are using Vertex AI Workbench Notebooks**, your environment is already authenticated. Skip this step.**If you are using Colab**, run the cell below and follow the instructions when prompted to authenticate your account via oAuth.**Otherwise**, follow these steps:In the Cloud Console, go to the [Create service account key](https://console.cloud.google.com/apis/credentials/serviceaccountkey) page.1. **Click Create service account**.2. In the **Service account name** field, enter a name, and click **Create**.3. In the **Grant this service account access to project** section, click the Role drop-down list. Type "Vertex AI" into the filter box, and select **Vertex AI Administrator**. Type "Storage Object Admin" into the filter box, and select **Storage Object Admin**.4. Click Create. A JSON file that contains your key downloads to your local environment.5. Enter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell.
###Code
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
import os
import sys
# If on Vertex AI Workbench, then don't execute this code
IS_COLAB = False
if not os.path.exists("/opt/deeplearning/metadata/env_version") and not os.getenv(
"DL_ANACONDA_HOME"
):
if "google.colab" in sys.modules:
IS_COLAB = True
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
###Output
_____no_output_____
###Markdown
Create a Cloud Storage bucket**The following steps are required, regardless of your notebook environment.**When you initialize the Vertex SDK for Python, you specify a Cloud Storage staging bucket. The staging bucket is where all the data associated with your dataset and model resources are retained across sessions.Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization.
###Code
BUCKET_URI = "gs://[your-bucket-name]" # @param {type:"string"}
if BUCKET_URI == "" or BUCKET_URI is None or BUCKET_URI == "gs://[your-bucket-name]":
BUCKET_URI = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
###Output
_____no_output_____
###Markdown
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
###Code
! gsutil mb -l $REGION $BUCKET_URI
###Output
_____no_output_____
###Markdown
Finally, validate access to your Cloud Storage bucket by examining its contents:
###Code
! gsutil ls -al $BUCKET_URI
###Output
_____no_output_____
###Markdown
Set up variablesNext, set up some variables used throughout the tutorial. Import libraries and define constants
###Code
import google.cloud.aiplatform as aiplatform
###Output
_____no_output_____
###Markdown
Initialize Vertex AI SDK for PythonInitialize the Vertex AI SDK for Python for your project and corresponding bucket.
###Code
aiplatform.init(project=PROJECT_ID, staging_bucket=BUCKET_URI)
###Output
_____no_output_____
###Markdown
Set hardware acceleratorsYou can set hardware accelerators for training.Set the variable `TRAIN_GPU/TRAIN_NGPU` to use a container image supporting a GPU and the number of GPUs allocated to the virtual machine (VM) instance. For example, to use a GPU container image with 4 Nvidia Telsa K80 GPUs allocated to each VM, you would specify: (aip.AcceleratorType.NVIDIA_TESLA_K80, 4)Otherwise specify `(None, None)` to use a container image to run on a CPU.Learn more [here](https://cloud.google.com/vertex-ai/docs/general/locationsaccelerators) hardware accelerator support for your region
###Code
import os
if os.getenv("IS_TESTING_TRAIN_GPU"):
TRAIN_GPU, TRAIN_NGPU = (
aiplatform.gapic.AcceleratorType.NVIDIA_TESLA_K80,
int(os.getenv("IS_TESTING_TRAIN_GPU")),
)
else:
TRAIN_GPU, TRAIN_NGPU = (aiplatform.gapic.AcceleratorType.NVIDIA_TESLA_K80, 1)
###Output
_____no_output_____
###Markdown
Set pre-built containersSet the pre-built Docker container image for training.- Set the variable `TF` to the TensorFlow version of the container image. For example, `2-1` would be version 2.1, and `1-15` would be version 1.15. The following list shows some of the pre-built images available:For the latest list, see [Pre-built containers for training](https://cloud.google.com/ai-platform-unified/docs/training/pre-built-containers).
###Code
if TRAIN_GPU:
TRAIN_VERSION = "pytorch-gpu.1-9"
else:
TRAIN_VERSION = "pytorch-xla.1-9"
TRAIN_IMAGE = "{}-docker.pkg.dev/vertex-ai/training/{}:latest".format(
REGION.split("-")[0], TRAIN_VERSION
)
###Output
_____no_output_____
###Markdown
Set machine typeNext, set the machine type to use for training.- Set the variable `TRAIN_COMPUTE` to configure the compute resources for the VMs you will use for for training. - `machine type` - `n1-standard`: 3.75GB of memory per vCPU. - `n1-highmem`: 6.5GB of memory per vCPU - `n1-highcpu`: 0.9 GB of memory per vCPU - `vCPUs`: number of \[2, 4, 8, 16, 32, 64, 96 \]*Note: The following is not supported for training:* - `standard`: 2 vCPUs - `highcpu`: 2, 4 and 8 vCPUs*Note: You may also use n2 and e2 machine types for training and deployment, but they do not support GPUs*.
###Code
if os.getenv("IS_TESTING_TRAIN_MACHINE"):
MACHINE_TYPE = os.getenv("IS_TESTING_TRAIN_MACHINE")
else:
MACHINE_TYPE = "n1-standard"
VCPU = "4"
TRAIN_COMPUTE = MACHINE_TYPE + "-" + VCPU
print("Train machine type", TRAIN_COMPUTE)
###Output
_____no_output_____
###Markdown
Introduction to Pytorch trainingThe Pytorch package supports both single node and distributed model training.Once you have trained a Pytorch model, you will want to save it at a Cloud Storage location, so it can subsequently be uploaded to a `Vertex AI Model` resource.The Pytorch package does not have support to save the model to a Cloud Storage location. Instead, you will do the following steps to save to a Cloud Storage location.1. Save the in-memory model to the local filesystem (e.g., model.pth).2. Use gsutil to copy the local copy to the specified Cloud Storage location.*Note*: You can do hyperparameter tuning with a Pytorch model. Examine the training package Package layoutBefore you start the training, you will look at how a Python package is assembled for a custom training job. When unarchived, the package contains the following directory/file layout.- PKG-INFO- README.md- setup.cfg- setup.py- trainer - \_\_init\_\_.py - task.pyThe files `setup.cfg` and `setup.py` are the instructions for installing the package into the operating environment of the Docker image.The file `trainer/task.py` is the Python script for executing the custom training job. *Note*, when we referred to it in the worker pool specification, we replace the directory slash with a dot (`trainer.task`) and dropped the file suffix (`.py`). Package AssemblyIn the following cells, you will assemble the training package.
###Code
# Make folder for Python training script
! rm -rf custom
! mkdir custom
# Add package information
! touch custom/README.md
# Instructions for installing package into environment of the docker image
setup_cfg = "[egg_info]\n\ntag_build =\n\ntag_date = 0"
! echo "$setup_cfg" > custom/setup.cfg
setup_py = "import setuptools\n\nsetuptools.setup(\n\n install_requires=[\n\n 'cloudml-hypertune',\n\n ],\n\n packages=setuptools.find_packages())"
! echo "$setup_py" > custom/setup.py
pkg_info = "Metadata-Version: 1.0\n\nName: CIFAR10 image classification\n\nVersion: 0.0.0\n\nSummary: Demostration training script\n\nHome-page: www.google.com\n\nAuthor: Google\n\nAuthor-email: [email protected]\n\nLicense: Public\n\nDescription: Demo\n\nPlatform: Vertex"
! echo "$pkg_info" > custom/PKG-INFO
# Make the training subfolder
! mkdir custom/trainer
! touch custom/trainer/__init__.py
###Output
_____no_output_____
###Markdown
Create the task script for the Python training packageNext, you create the `task.py` script for driving the training package. Some noteable steps include:- Command-line arguments: - `model-dir`: The location to save the trained model. When using Vertex AI custom training, the location will be specified in the environment variable: `AIP_MODEL_DIR`, - `batch_size`/`lr` : Hyperparameter tuning variables - `distribute`: single node or distributed training.- Data preprocessing (`get_data()`): - Download the dataset and split into training and test.- Model architecture (`getmodel()`): - Get or build the model architecture.- Training (`train_model()`): - Trains the model- Evaluation (`evaluate_model()`): - Evaluates the model. - If hyperparameter tuning, reports the metric for accuracy.- Model artifact saving - Saves the model artifacts and evaluation metrics where the Cloud Storage location specified by `model-dir`.
###Code
%%writefile custom/trainer/task.py
import sys
import os
import argparse
import logging
import hypertune
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parallel import DistributedDataParallel
#import torch.backends.cudnn as cudnn
import torch.distributed as distributed
#import torch.optim
#import torch.multiprocessing as mp
import torch.utils.data as data
#import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('--model-dir', dest='model_dir',
default=os.getenv('AIP_MODEL_DIR'), type=str, help='Model dir.')
parser.add_argument('--batch_size', dest='batch_size',
type=int, default=16, help='Batch size')
parser.add_argument('--epochs', dest='epochs',
type=int, default=20, help='Number of epochs')
parser.add_argument('--lr', dest='lr',
type=int, default=20, help='Learning rate')
parser.add_argument('--distribute', default="single",
type=str, help='Distributed training strategy')
parser.add_argument('--checkpoints', default=False,
type=bool, help='Whether to save checkpoints')
args = parser.parse_args()
logging.getLogger().setLevel(logging.INFO)
def distributed_is_initialized():
if args.distribute == "mirror":
if distributed.is_available() and distributed.is_initialized():
return True
return False
def get_data():
normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
transform = transforms.Compose(
[transforms.ToTensor(), normalize,]
)
train_dataset = datasets.CIFAR10(root="./train", transform=transform, train=True, download=True)
logging.info(train_dataset)
if distributed_is_initialized():
sampler = data.DistributedSampler(train_dataset)
else:
sampler = None
train_loader = data.DataLoader(
train_dataset,
batch_size=args.batch_size,
shuffle=(sampler is None),
sampler=sampler,
)
test_dataset = datasets.CIFAR10(root="./test", transform=transform, train=False, download=True)
logging.info(test_dataset)
sampler = None
test_loader = data.DataLoader(
test_dataset,
batch_size=args.batch_size,
shuffle=False,
sampler=sampler,
)
return train_loader, test_loader
def get_model():
class Cifar10Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 16, 3)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(16, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = torch.flatten(x, 1) # flatten all dimensions except batch
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
logging.info("Get model architecture")
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
gpu_id = "0" if torch.cuda.is_available() else None
logging.info(f"Device: {device}")
model = Cifar10Model()
model.to(device)
if distributed_is_initialized():
model = DistributedDataParallel(model)
loss = nn.CrossEntropyLoss().cuda(gpu_id)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
return model, loss, optimizer, device
def train_model(model, loss_func, optimizer, train_loader, test_loader, is_chief, device):
class Average(object):
def __init__(self):
self.sum = 0
self.count = 0
def __str__(self):
return '{:.6f}'.format(self.average)
@property
def average(self):
return self.sum / self.count
def update(self, value, number):
self.sum += value * number
self.count += number
class Accuracy(object):
def __init__(self):
self.correct = 0
self.count = 0
def __str__(self):
return '{:.2f}%'.format(self.accuracy * 100)
@property
def accuracy(self):
return self.correct / self.count
@torch.no_grad()
def update(self, output, target):
pred = output.argmax(dim=1)
correct = pred.eq(target).sum().item()
self.correct += correct
self.count += output.size(0)
def train():
model.train()
train_loss = Average()
train_acc = Accuracy()
for data, target in train_loader:
data = data.to(device)
target = target.to(device)
output = model(data)
loss = loss_func(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss.update(loss.item(), data.size(0))
train_acc.update(output, target)
return train_loss, train_acc
@torch.no_grad()
def evaluate(epoch):
model.eval()
test_loss = Average()
test_acc = Accuracy()
for data, target in test_loader:
data = data.to(device)
target = target.to(device)
output = model(data)
loss = loss_func(output, target)
test_loss.update(loss.item(), data.size(0))
test_acc.update(output, target)
# report metric for hyperparameter tuning
hpt = hypertune.HyperTune()
hpt.report_hyperparameter_tuning_metric(
hyperparameter_metric_tag='accuracy',
metric_value=test_acc.accuracy,
global_step=epoch
)
return test_loss, test_acc
for epoch in range(1, args.epochs + 1):
logging.info('Epoch: {}, Training ...'.format(epoch))
train_loss, train_acc = train()
if is_chief:
test_loss, test_acc = evaluate(epoch)
if args.checkpoints:
torch.save(model.state_dict(), args.model_dir + f"/{epoch}.chkpt")
logging.info('Epoch: {}/{},'.format(epoch, args.epochs))
logging.info('train loss: {}, train acc: {},'.format(train_loss, train_acc))
logging.info('test loss: {}, test acc: {}.'.format(test_loss, test_acc))
return model
train_dataset, test_dataset = get_data()
model, loss, optimizer, device = get_model()
train_model(model, loss, optimizer, train_dataset, test_dataset, True, device)
logging.info('start saving')
# export model to gcs using GCSFuse
logging.info("Exporting model artifacts ...")
gs_prefix = 'gs://'
gcsfuse_prefix = '/gcs/'
if args.model_dir.startswith(gs_prefix):
args.model_dir = args.model_dir.replace(gs_prefix, gcsfuse_prefix)
dirpath = os.path.split(args.model_dir)[0]
if not os.path.isdir(dirpath):
os.makedirs(dirpath)
gcs_model_path = os.path.join(os.path.join(args.model_dir, 'model.pth'))
torch.save(model.state_dict(), gcs_model_path)
logging.info(f'Model is saved to {args.model_dir}')
###Output
_____no_output_____
###Markdown
Test training package locallyNext, test your completed training package locally with just a few epochs.
###Code
! python3 custom/trainer/task.py --model-dir=custom --distribute=mirror --checkpoints=True
###Output
_____no_output_____
###Markdown
Store training script on your Cloud Storage bucketNext, you package the training folder into a compressed tar ball, and then store it in your Cloud Storage bucket.
###Code
! rm -f custom.tar custom.tar.gz
! tar cvf custom.tar custom
! gzip custom.tar
! gsutil cp custom.tar.gz $BUCKET_URI/trainer_cifar10.tar.gz
###Output
_____no_output_____
###Markdown
Make Pytorch container for predictionCurrently, Vertex AI does not have a predefined container for making predictions with a deployed Pytorch model. No problem, you can assemble your own custom container. Typically, one would base the container on the `Torch Server`. For demonstration purpose, you build a placeholder container (not complete) that includes the latest `Torch Server` image, and push it to the `Container Registry`.
###Code
%%writefile Dockerfile
FROM pytorch/torchserve:latest-cpu
# run Torchserve HTTP serve to respond to prediction requests
CMD ["torchserve", \n "--start", \n "--ts-config=/home/model-server/config.properties", \n "--models", \n "$APP_NAME=$APP_NAME.mar", \n "--model-store", \n "/home/model-server/model-store"]
APP_NAME = "cifar10"
DEPLOY_IMAGE = f"gcr.io/{PROJECT_ID}/pytorch_predict_{APP_NAME}"
print(DEPLOY_IMAGE)
if not IS_COLAB:
! docker build --tag=$DEPLOY_IMAGE ./
! docker push $DEPLOY_IMAGE
else:
# install docker daemon
! apt-get -qq install docker.io
###Output
_____no_output_____
###Markdown
*Executes in Colab*
###Code
%%bash -s $IS_COLAB $DEPLOY_IMAGE
if [ $1 == "False" ]; then
exit 0
fi
set -x
dockerd -b none --iptables=0 -l warn &
for i in $(seq 5); do [ ! -S "/var/run/docker.sock" ] && sleep 2 || break; done
docker build --tag=$2 ./
docker push $2
kill $(jobs -p)
###Output
_____no_output_____
###Markdown
Create and run custom training jobTo train a custom model, you perform two steps: 1) create a custom training job, and 2) run the job. Create custom training jobA custom training job is created with the `CustomTrainingJob` class, with the following parameters:- `display_name`: The human readable name for the custom training job.- `container_uri`: The training container image.- `python_package_gcs_uri`: The location of the Python training package as a tarball.- `python_module_name`: The relative path to the training script in the Python package.- `model_serving_container_uri`: The container image for deploying the model.*Note:* There is no requirements parameter. You specify any requirements in the `setup.py` script in your Python package.
###Code
DISPLAY_NAME = "cifar10_" + TIMESTAMP
job = aiplatform.CustomPythonPackageTrainingJob(
display_name=DISPLAY_NAME,
python_package_gcs_uri=f"{BUCKET_URI}/trainer_cifar10.tar.gz",
python_module_name="trainer.task",
container_uri=TRAIN_IMAGE,
model_serving_container_image_uri=DEPLOY_IMAGE,
project=PROJECT_ID,
)
###Output
_____no_output_____
###Markdown
Prepare your command-line argumentsNow define the command-line arguments for your custom training container:- `args`: The command-line arguments to pass to the executable that is set as the entry point into the container. - `--model-dir` : For our demonstrations, we use this command-line argument to specify where to store the model artifacts. - direct: You pass the Cloud Storage location as a command line argument to your training script (set variable `DIRECT = True`), or - indirect: The service passes the Cloud Storage location as the environment variable `AIP_MODEL_DIR` to your training script (set variable `DIRECT = False`). In this case, you tell the service the model artifact location in the job specification. - `--BLAH`:
###Code
MODEL_DIR = "{}/{}".format(BUCKET_URI, TIMESTAMP)
DIRECT = False
if DIRECT:
CMDARGS = ["--model_dir=" + MODEL_DIR]
else:
CMDARGS = []
###Output
_____no_output_____
###Markdown
Run the custom training jobNext, you run the custom job to start the training job by invoking the method `run`, with the following parameters:- `model_display_name`: The human readable name for the `Model` resource.- `args`: The command-line arguments to pass to the training script.- `replica_count`: The number of compute instances for training (replica_count = 1 is single node training).- `machine_type`: The machine type for the compute instances.- `accelerator_type`: The hardware accelerator type.- `accelerator_count`: The number of accelerators to attach to a worker replica.- `base_output_dir`: The Cloud Storage location to write the model artifacts to.- `sync`: Whether to block until completion of the job.
###Code
if TRAIN_GPU:
model = job.run(
model_display_name="cifar10_" + TIMESTAMP,
args=CMDARGS,
replica_count=1,
machine_type=TRAIN_COMPUTE,
accelerator_type=TRAIN_GPU.name,
accelerator_count=TRAIN_NGPU,
base_output_dir=MODEL_DIR,
sync=False,
)
else:
model = job.run(
model_display_name="cifar10_" + TIMESTAMP,
args=CMDARGS,
replica_count=1,
machine_type=TRAIN_COMPUTE,
base_output_dir=MODEL_DIR,
sync=False,
)
model_path_to_deploy = MODEL_DIR
###Output
_____no_output_____
###Markdown
List a custom training job
###Code
_job = job.list(filter=f"display_name={DISPLAY_NAME}")
print(_job)
###Output
_____no_output_____
###Markdown
Wait for completion of custom training jobNext, wait for the custom training job to complete. Alternatively, one can set the parameter `sync` to `True` in the `run()` method to block until the custom training job is completed.
###Code
model.wait()
###Output
_____no_output_____
###Markdown
Delete a custom training jobAfter a training job is completed, you can delete the training job with the method `delete()`. Prior to completion, a training job can be cancelled with the method `cancel()`.
###Code
job.delete()
###Output
_____no_output_____
###Markdown
Cleaning upTo clean up all Google Cloud resources used in this project, you can [delete the Google Cloudproject](https://cloud.google.com/resource-manager/docs/creating-managing-projectsshutting_down_projects) you used for the tutorial.Otherwise, you can delete the individual resources you created in this tutorial:- Model- Cloud Storage Bucket
###Code
# Delete the model using the Vertex model object
model.delete()
delete_bucket = False
if delete_bucket or os.getenv("IS_TESTING"):
! gsutil rm -r $BUCKET_URI
###Output
_____no_output_____
###Markdown
E2E ML on GCP: MLOps stage 2 : experimentation: get started with Vertex Training for Pytorch View on GitHub Open in Vertex AI Workbench OverviewThis tutorial demonstrates how to use Vertex AI for E2E MLOps on Google Cloud in production. This tutorial covers stage 2 : experimentation: get started with Vertex Training for Pytorch. DatasetThe dataset used for this tutorial is the [CIFAR10 dataset](https://pytorch.org/vision/stable/datasets.htmlcifar) from [Pytorch Datasets](https://pytorch.org/vision/stable/datasets.html). The version of the dataset you will use is built into TensorFlow. The trained model predicts which type of class an image is from ten classes: airplane, automobile, bird, cat, deer, dog, frog, horse, ship, or truck. ObjectiveIn this tutorial, you learn how to use `Vertex AI Training` for training a Pytorch custom model.This tutorial uses the following Google Cloud ML services:- `Vertex AI Training`- `Vertex AI Model` resourceThe steps performed include:- Single node training using a Python package.- Report accuracy when hyperparameter tuning.- Save the model artifacts to Cloud Storage using GCSFuse.- Create a `Vertex AI Model` resource. InstallationsInstall *one time* the packages for executing the MLOps notebooks.
###Code
ONCE_ONLY = False
if ONCE_ONLY:
! pip3 install -U tensorflow==2.5 $USER_FLAG
! pip3 install -U tensorflow-data-validation==1.2 $USER_FLAG
! pip3 install -U tensorflow-transform==1.2 $USER_FLAG
! pip3 install -U tensorflow-io==0.18 $USER_FLAG
! pip3 install --upgrade google-cloud-aiplatform[tensorboard] $USER_FLAG
! pip3 install --upgrade google-cloud-pipeline-components $USER_FLAG
! pip3 install --upgrade google-cloud-bigquery $USER_FLAG
! pip3 install --upgrade google-cloud-logging $USER_FLAG
! pip3 install --upgrade apache-beam[gcp] $USER_FLAG
! pip3 install --upgrade pyarrow $USER_FLAG
! pip3 install --upgrade cloudml-hypertune $USER_FLAG
! pip3 install --upgrade kfp $USER_FLAG
! pip3 install --upgrade torchvision $USER_FLAG
! pip3 install --upgrade rpy2 $USER_FLAG
###Output
_____no_output_____
###Markdown
Restart the kernelOnce you've installed the additional packages, you need to restart the notebook kernel so it can find the packages.
###Code
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
###Output
_____no_output_____
###Markdown
Set your project ID**If you don't know your project ID**, you may be able to get your project ID using `gcloud`.
###Code
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = ! gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
###Output
_____no_output_____
###Markdown
RegionYou can also change the `REGION` variable, which is used for operationsthroughout the rest of this notebook. Below are regions supported for Vertex AI. We recommend that you choose the region closest to you.- Americas: `us-central1`- Europe: `europe-west4`- Asia Pacific: `asia-east1`You may not use a multi-regional bucket for training with Vertex AI. Not all regions provide support for all Vertex AI services.Learn more about [Vertex AI regions](https://cloud.google.com/vertex-ai/docs/general/locations).
###Code
REGION = "us-central1" # @param {type: "string"}
###Output
_____no_output_____
###Markdown
TimestampIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append the timestamp onto the name of resources you create in this tutorial.
###Code
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
###Output
_____no_output_____
###Markdown
Create a Cloud Storage bucket**The following steps are required, regardless of your notebook environment.**When you initialize the Vertex SDK for Python, you specify a Cloud Storage staging bucket. The staging bucket is where all the data associated with your dataset and model resources are retained across sessions.Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization.
###Code
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
###Output
_____no_output_____
###Markdown
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
###Code
! gsutil mb -l $REGION $BUCKET_NAME
###Output
_____no_output_____
###Markdown
Finally, validate access to your Cloud Storage bucket by examining its contents:
###Code
! gsutil ls -al $BUCKET_NAME
###Output
_____no_output_____
###Markdown
Set up variablesNext, set up some variables used throughout the tutorial. Import libraries and define constants
###Code
import google.cloud.aiplatform as aip
###Output
_____no_output_____
###Markdown
Initialize Vertex AI SDK for PythonInitialize the Vertex AI SDK for Python for your project and corresponding bucket.
###Code
aip.init(project=PROJECT_ID, staging_bucket=BUCKET_NAME)
###Output
_____no_output_____
###Markdown
Set hardware acceleratorsYou can set hardware accelerators for training.Set the variable `TRAIN_GPU/TRAIN_NGPU` to use a container image supporting a GPU and the number of GPUs allocated to the virtual machine (VM) instance. For example, to use a GPU container image with 4 Nvidia Telsa K80 GPUs allocated to each VM, you would specify: (aip.AcceleratorType.NVIDIA_TESLA_K80, 4)Otherwise specify `(None, None)` to use a container image to run on a CPU.Learn more [here](https://cloud.google.com/vertex-ai/docs/general/locationsaccelerators) hardware accelerator support for your region
###Code
if os.getenv("IS_TESTING_TRAIN_GPU"):
TRAIN_GPU, TRAIN_NGPU = (
aip.gapic.AcceleratorType.NVIDIA_TESLA_K80,
int(os.getenv("IS_TESTING_TRAIN_GPU")),
)
else:
TRAIN_GPU, TRAIN_NGPU = (aip.gapic.AcceleratorType.NVIDIA_TESLA_K80, 1)
###Output
_____no_output_____
###Markdown
Set pre-built containersSet the pre-built Docker container image for training.- Set the variable `TF` to the TensorFlow version of the container image. For example, `2-1` would be version 2.1, and `1-15` would be version 1.15. The following list shows some of the pre-built images available:For the latest list, see [Pre-built containers for training](https://cloud.google.com/ai-platform-unified/docs/training/pre-built-containers).
###Code
if TRAIN_GPU:
TRAIN_VERSION = "pytorch-gpu.1-9"
else:
TRAIN_VERSION = "pytorch-xla.1-9"
TRAIN_IMAGE = "{}-docker.pkg.dev/vertex-ai/training/{}:latest".format(
REGION.split("-")[0], TRAIN_VERSION
)
###Output
_____no_output_____
###Markdown
Set machine typeNext, set the machine type to use for training.- Set the variable `TRAIN_COMPUTE` to configure the compute resources for the VMs you will use for for training. - `machine type` - `n1-standard`: 3.75GB of memory per vCPU. - `n1-highmem`: 6.5GB of memory per vCPU - `n1-highcpu`: 0.9 GB of memory per vCPU - `vCPUs`: number of \[2, 4, 8, 16, 32, 64, 96 \]*Note: The following is not supported for training:* - `standard`: 2 vCPUs - `highcpu`: 2, 4 and 8 vCPUs*Note: You may also use n2 and e2 machine types for training and deployment, but they do not support GPUs*.
###Code
if os.getenv("IS_TESTING_TRAIN_MACHINE"):
MACHINE_TYPE = os.getenv("IS_TESTING_TRAIN_MACHINE")
else:
MACHINE_TYPE = "n1-standard"
VCPU = "4"
TRAIN_COMPUTE = MACHINE_TYPE + "-" + VCPU
print("Train machine type", TRAIN_COMPUTE)
###Output
_____no_output_____
###Markdown
Introduction to Pytorch trainingThe Pytorch package supports both single node and distributed model training.Once you have trained a Pytorch model, you will want to save it at a Cloud Storage location, so it can subsequently be uploaded to a `Vertex AI Model` resource.The Pytorch package does not have support to save the model to a Cloud Storage location. Instead, you will do the following steps to save to a Cloud Storage location.1. Save the in-memory model to the local filesystem (e.g., model.pth).2. Use gsutil to copy the local copy to the specified Cloud Storage location.*Note*: You can do hyperparameter tuning with a Pytorch model. Examine the training package Package layoutBefore you start the training, you will look at how a Python package is assembled for a custom training job. When unarchived, the package contains the following directory/file layout.- PKG-INFO- README.md- setup.cfg- setup.py- trainer - \_\_init\_\_.py - task.pyThe files `setup.cfg` and `setup.py` are the instructions for installing the package into the operating environment of the Docker image.The file `trainer/task.py` is the Python script for executing the custom training job. *Note*, when we referred to it in the worker pool specification, we replace the directory slash with a dot (`trainer.task`) and dropped the file suffix (`.py`). Package AssemblyIn the following cells, you will assemble the training package.
###Code
# Make folder for Python training script
! rm -rf custom
! mkdir custom
# Add package information
! touch custom/README.md
setup_cfg = "[egg_info]\n\ntag_build =\n\ntag_date = 0"
! echo "$setup_cfg" > custom/setup.cfg
setup_py = "import setuptools\n\nsetuptools.setup(\n\n install_requires=[\n\n 'cloudml-hypertune',\n\n ],\n\n packages=setuptools.find_packages())"
! echo "$setup_py" > custom/setup.py
pkg_info = "Metadata-Version: 1.0\n\nName: CIFAR10 image classification\n\nVersion: 0.0.0\n\nSummary: Demostration training script\n\nHome-page: www.google.com\n\nAuthor: Google\n\nAuthor-email: [email protected]\n\nLicense: Public\n\nDescription: Demo\n\nPlatform: Vertex"
! echo "$pkg_info" > custom/PKG-INFO
# Make the training subfolder
! mkdir custom/trainer
! touch custom/trainer/__init__.py
###Output
_____no_output_____
###Markdown
Create the task script for the Python training packageNext, you create the `task.py` script for driving the training package. Some noteable steps include:- Command-line arguments: - `model-dir`: The location to save the trained model. When using Vertex AI custom training, the location will be specified in the environment variable: `AIP_MODEL_DIR`, - `batch_size`/`lr` : Hyperparameter tuning variables - `distribute`: single node or distributed training.- Data preprocessing (`get_data()`): - Download the dataset and split into training and test.- Model architecture (`getmodel()`): - Get or build the model architecture.- Training (`train_model()`): - Trains the model- Evaluation (`evaluate_model()`): - Evaluates the model. - If hyperparameter tuning, reports the metric for accuracy.- Model artifact saving - Saves the model artifacts and evaluation metrics where the Cloud Storage location specified by `model-dir`.
###Code
%%writefile custom/trainer/task.py
import sys
import os
import argparse
import logging
import hypertune
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parallel import DistributedDataParallel
#import torch.backends.cudnn as cudnn
import torch.distributed as distributed
#import torch.optim
#import torch.multiprocessing as mp
import torch.utils.data as data
#import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('--model-dir', dest='model_dir',
default=os.getenv('AIP_MODEL_DIR'), type=str, help='Model dir.')
parser.add_argument('--batch_size', dest='batch_size',
type=int, default=16, help='Batch size')
parser.add_argument('--epochs', dest='epochs',
type=int, default=20, help='Number of epochs')
parser.add_argument('--lr', dest='lr',
type=int, default=20, help='Learning rate')
parser.add_argument('--distribute', default="single",
type=str, help='Distributed training strategy')
parser.add_argument('--checkpoints', default=False,
type=bool, help='Whether to save checkpoints')
args = parser.parse_args()
logging.getLogger().setLevel(logging.INFO)
def distributed_is_initialized():
if args.distribute == "mirror":
if distributed.is_available() and distributed.is_initialized():
return True
return False
def get_data():
normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
transform = transforms.Compose(
[transforms.ToTensor(), normalize,]
)
train_dataset = datasets.CIFAR10(root="./train", transform=transform, train=True, download=True)
logging.info(train_dataset)
if distributed_is_initialized():
sampler = data.DistributedSampler(train_dataset)
else:
sampler = None
train_loader = data.DataLoader(
train_dataset,
batch_size=args.batch_size,
shuffle=(sampler is None),
sampler=sampler,
)
test_dataset = datasets.CIFAR10(root="./test", transform=transform, train=False, download=True)
logging.info(test_dataset)
sampler = None
test_loader = data.DataLoader(
test_dataset,
batch_size=args.batch_size,
shuffle=False,
sampler=sampler,
)
return train_loader, test_loader
def get_model():
class Cifar10Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 16, 3)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(16, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = torch.flatten(x, 1) # flatten all dimensions except batch
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
logging.info("Get model architecture")
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
gpu_id = "0" if torch.cuda.is_available() else None
logging.info(f"Device: {device}")
model = Cifar10Model()
model.to(device)
if distributed_is_initialized():
model = DistributedDataParallel(model)
loss = nn.CrossEntropyLoss().cuda(gpu_id)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
return model, loss, optimizer, device
def train_model(model, loss_func, optimizer, train_loader, test_loader, is_chief, device):
class Average(object):
def __init__(self):
self.sum = 0
self.count = 0
def __str__(self):
return '{:.6f}'.format(self.average)
@property
def average(self):
return self.sum / self.count
def update(self, value, number):
self.sum += value * number
self.count += number
class Accuracy(object):
def __init__(self):
self.correct = 0
self.count = 0
def __str__(self):
return '{:.2f}%'.format(self.accuracy * 100)
@property
def accuracy(self):
return self.correct / self.count
@torch.no_grad()
def update(self, output, target):
pred = output.argmax(dim=1)
correct = pred.eq(target).sum().item()
self.correct += correct
self.count += output.size(0)
def train():
model.train()
train_loss = Average()
train_acc = Accuracy()
for data, target in train_loader:
data = data.to(device)
target = target.to(device)
output = model(data)
loss = loss_func(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss.update(loss.item(), data.size(0))
train_acc.update(output, target)
return train_loss, train_acc
@torch.no_grad()
def evaluate(epoch):
model.eval()
test_loss = Average()
test_acc = Accuracy()
for data, target in test_loader:
data = data.to(device)
target = target.to(device)
output = model(data)
loss = loss_func(output, target)
test_loss.update(loss.item(), data.size(0))
test_acc.update(output, target)
# report metric for hyperparameter tuning
hpt = hypertune.HyperTune()
hpt.report_hyperparameter_tuning_metric(
hyperparameter_metric_tag='accuracy',
metric_value=test_acc.accuracy,
global_step=epoch
)
return test_loss, test_acc
for epoch in range(1, args.epochs + 1):
logging.info('Epoch: {}, Training ...'.format(epoch))
train_loss, train_acc = train()
if is_chief:
test_loss, test_acc = evaluate(epoch)
if args.checkpoints:
torch.save(model.state_dict(), args.model_dir + f"/{epoch}.chkpt")
logging.info('Epoch: {}/{},'.format(epoch, args.epochs))
logging.info('train loss: {}, train acc: {},'.format(train_loss, train_acc))
logging.info('test loss: {}, test acc: {}.'.format(test_loss, test_acc))
return model
train_dataset, test_dataset = get_data()
model, loss, optimizer, device = get_model()
train_model(model, loss, optimizer, train_dataset, test_dataset, True, device)
logging.info('start saving')
# export model to gcs using GCSFuse
logging.info("Exporting model artifacts ...")
gs_prefix = 'gs://'
gcsfuse_prefix = '/gcs/'
if args.model_dir.startswith(gs_prefix):
args.model_dir = args.model_dir.replace(gs_prefix, gcsfuse_prefix)
dirpath = os.path.split(args.model_dir)[0]
if not os.path.isdir(dirpath):
os.makedirs(dirpath)
gcs_model_path = os.path.join(os.path.join(args.model_dir, 'model.pth'))
torch.save(model.state_dict(), gcs_model_path)
logging.info(f'Model is saved to {args.model_dir}')
###Output
_____no_output_____
###Markdown
Test training package locallyNext, test your completed training package locally with just a few epochs.
###Code
! python3 custom/trainer/task.py --model-dir=custom --distribute=mirror --checkpoints=True
###Output
_____no_output_____
###Markdown
Store training script on your Cloud Storage bucketNext, you package the training folder into a compressed tar ball, and then store it in your Cloud Storage bucket.
###Code
! rm -f custom.tar custom.tar.gz
! tar cvf custom.tar custom
! gzip custom.tar
! gsutil cp custom.tar.gz $BUCKET_NAME/trainer_cifar10.tar.gz
###Output
_____no_output_____
###Markdown
Make Pytorch container for predictionCurrently, Vertex AI does not have a prefined container for making predictions with a deployed Pytorch model. No problem, you can assemble your own custom container. Typically, one would base the container on the `Torch Server`. For demonstration purpose, you build a placeholder container (not complete) that includes the latest `Torch Server` image, and push it to the `Container Registry`.
###Code
%%writefile Dockerfile
FROM pytorch/torchserve:latest-cpu
# run Torchserve HTTP serve to respond to prediction requests
CMD ["torchserve", \n "--start", \n "--ts-config=/home/model-server/config.properties", \n "--models", \n "$APP_NAME=$APP_NAME.mar", \n "--model-store", \n "/home/model-server/model-store"]
APP_NAME = "cifar10"
DEPLOY_IMAGE = f"gcr.io/{PROJECT_ID}/pytorch_predict_{APP_NAME}"
print(DEPLOY_IMAGE)
! docker build --tag=$DEPLOY_IMAGE ./
! docker push $DEPLOY_IMAGE
###Output
_____no_output_____
###Markdown
Create and run custom training jobTo train a custom model, you perform two steps: 1) create a custom training job, and 2) run the job. Create custom training jobA custom training job is created with the `CustomTrainingJob` class, with the following parameters:- `display_name`: The human readable name for the custom training job.- `container_uri`: The training container image.- `python_package_gcs_uri`: The location of the Python training package as a tarball.- `python_module_name`: The relative path to the training script in the Python package.- `model_serving_container_uri`: The container image for deploying the model.*Note:* There is no requirements parameter. You specify any requirements in the `setup.py` script in your Python package.
###Code
DISPLAY_NAME = "cifar10_" + TIMESTAMP
job = aip.CustomPythonPackageTrainingJob(
display_name=DISPLAY_NAME,
python_package_gcs_uri=f"{BUCKET_NAME}/trainer_cifar10.tar.gz",
python_module_name="trainer.task",
container_uri=TRAIN_IMAGE,
model_serving_container_image_uri=DEPLOY_IMAGE,
project=PROJECT_ID,
)
###Output
_____no_output_____
###Markdown
Prepare your command-line argumentsNow define the command-line arguments for your custom training container:- `args`: The command-line arguments to pass to the executable that is set as the entry point into the container. - `--model-dir` : For our demonstrations, we use this command-line argument to specify where to store the model artifacts. - direct: You pass the Cloud Storage location as a command line argument to your training script (set variable `DIRECT = True`), or - indirect: The service passes the Cloud Storage location as the environment variable `AIP_MODEL_DIR` to your training script (set variable `DIRECT = False`). In this case, you tell the service the model artifact location in the job specification. - `--BLAH`:
###Code
MODEL_DIR = "{}/{}".format(BUCKET_NAME, TIMESTAMP)
DIRECT = False
if DIRECT:
CMDARGS = ["--model_dir=" + MODEL_DIR]
else:
CMDARGS = []
###Output
_____no_output_____
###Markdown
Run the custom training jobNext, you run the custom job to start the training job by invoking the method `run`, with the following parameters:- `model_display_name`: The human readable name for the `Model` resource.- `args`: The command-line arguments to pass to the training script.- `replica_count`: The number of compute instances for training (replica_count = 1 is single node training).- `machine_type`: The machine type for the compute instances.- `accelerator_type`: The hardware accelerator type.- `accelerator_count`: The number of accelerators to attach to a worker replica.- `base_output_dir`: The Cloud Storage location to write the model artifacts to.- `sync`: Whether to block until completion of the job.
###Code
if TRAIN_GPU:
model = job.run(
model_display_name="cifar10_" + TIMESTAMP,
args=CMDARGS,
replica_count=1,
machine_type=TRAIN_COMPUTE,
accelerator_type=TRAIN_GPU.name,
accelerator_count=TRAIN_NGPU,
base_output_dir=MODEL_DIR,
sync=False,
)
else:
model = job.run(
model_display_name="cifar10_" + TIMESTAMP,
args=CMDARGS,
replica_count=1,
machine_type=TRAIN_COMPUTE,
base_output_dir=MODEL_DIR,
sync=False,
)
model_path_to_deploy = MODEL_DIR
###Output
_____no_output_____
###Markdown
List a custom training job
###Code
_job = job.list(filter=f"display_name={DISPLAY_NAME}")
print(_job)
###Output
_____no_output_____
###Markdown
Wait for completion of custom training jobNext, wait for the custom training job to complete. Alternatively, one can set the parameter `sync` to `True` in the `run()` method to block until the custom training job is completed.
###Code
model.wait()
###Output
_____no_output_____
###Markdown
Delete a custom training jobAfter a training job is completed, you can delete the training job with the method `delete()`. Prior to completion, a training job can be canceled with the method `cancel()`.
###Code
job.delete()
###Output
_____no_output_____
###Markdown
Cleaning upTo clean up all Google Cloud resources used in this project, you can [delete the Google Cloudproject](https://cloud.google.com/resource-manager/docs/creating-managing-projectsshutting_down_projects) you used for the tutorial.Otherwise, you can delete the individual resources you created in this tutorial:- Dataset- Pipeline- Model- Endpoint- AutoML Training Job- Batch Job- Custom Job- Hyperparameter Tuning Job- Cloud Storage Bucket
###Code
delete_all = True
if delete_all:
# Delete the dataset using the Vertex dataset object
try:
if "dataset" in globals():
dataset.delete()
except Exception as e:
print(e)
# Delete the model using the Vertex model object
try:
if "model" in globals():
model.delete()
except Exception as e:
print(e)
# Delete the endpoint using the Vertex endpoint object
try:
if "endpoint" in globals():
endpoint.undeploy_all()
endpoint.delete()
except Exception as e:
print(e)
# Delete the AutoML or Pipeline training job
try:
if "dag" in globals():
dag.delete()
except Exception as e:
print(e)
# Delete the custom training job
try:
if "job" in globals():
job.delete()
except Exception as e:
print(e)
# Delete the batch prediction job using the Vertex batch prediction object
try:
if "batch_predict_job" in globals():
batch_predict_job.delete()
except Exception as e:
print(e)
# Delete the hyperparameter tuning job using the Vertex hyperparameter tuning object
try:
if "hpt_job" in globals():
hpt_job.delete()
except Exception as e:
print(e)
if "BUCKET_NAME" in globals():
! gsutil rm -r $BUCKET_NAME
###Output
_____no_output_____
###Markdown
E2E ML on GCP: MLOps stage 2 : experimentation: get started with Vertex Training for Pytorch View on GitHub Open in Google Cloud Notebooks OverviewThis tutorial demonstrates how to use Vertex AI for E2E MLOps on Google Cloud in production. This tutorial covers stage 2 : experimentation: get started with Vertex Training for Pytorch. DatasetThe dataset used for this tutorial is the [CIFAR10 dataset](https://pytorch.org/vision/stable/datasets.htmlcifar) from [Pytorch Datasets](https://pytorch.org/vision/stable/datasets.html). The version of the dataset you will use is built into TensorFlow. The trained model predicts which type of class an image is from ten classes: airplane, automobile, bird, cat, deer, dog, frog, horse, ship, or truck. ObjectiveIn this tutorial, you learn how to use `Vertex AI Training` for training a Pytorch custom model.This tutorial uses the following Google Cloud ML services:- `Vertex AI Training`- `Vertex AI Model` resourceThe steps performed include:- Single node training using a Python package.- Report accuracy when hyperparameter tuning.- Save the model artifacts to Cloud Storage using GCSFuse.- Create a `Vertex AI Model` resource. InstallationsInstall *one time* the packages for executing the MLOps notebooks.
###Code
ONCE_ONLY = False
if ONCE_ONLY:
! pip3 install -U tensorflow==2.5 $USER_FLAG
! pip3 install -U tensorflow-data-validation==1.2 $USER_FLAG
! pip3 install -U tensorflow-transform==1.2 $USER_FLAG
! pip3 install -U tensorflow-io==0.18 $USER_FLAG
! pip3 install --upgrade google-cloud-aiplatform[tensorboard] $USER_FLAG
! pip3 install --upgrade google-cloud-pipeline-components $USER_FLAG
! pip3 install --upgrade google-cloud-bigquery $USER_FLAG
! pip3 install --upgrade google-cloud-logging $USER_FLAG
! pip3 install --upgrade apache-beam[gcp] $USER_FLAG
! pip3 install --upgrade pyarrow $USER_FLAG
! pip3 install --upgrade cloudml-hypertune $USER_FLAG
! pip3 install --upgrade kfp $USER_FLAG
! pip3 install --upgrade torchvision $USER_FLAG
! pip3 install --upgrade rpy2 $USER_FLAG
###Output
_____no_output_____
###Markdown
Restart the kernelOnce you've installed the additional packages, you need to restart the notebook kernel so it can find the packages.
###Code
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
###Output
_____no_output_____
###Markdown
Set your project ID**If you don't know your project ID**, you may be able to get your project ID using `gcloud`.
###Code
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = ! gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
###Output
_____no_output_____
###Markdown
RegionYou can also change the `REGION` variable, which is used for operationsthroughout the rest of this notebook. Below are regions supported for Vertex AI. We recommend that you choose the region closest to you.- Americas: `us-central1`- Europe: `europe-west4`- Asia Pacific: `asia-east1`You may not use a multi-regional bucket for training with Vertex AI. Not all regions provide support for all Vertex AI services.Learn more about [Vertex AI regions](https://cloud.google.com/vertex-ai/docs/general/locations).
###Code
REGION = "us-central1" # @param {type: "string"}
###Output
_____no_output_____
###Markdown
TimestampIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append the timestamp onto the name of resources you create in this tutorial.
###Code
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
###Output
_____no_output_____
###Markdown
Create a Cloud Storage bucket**The following steps are required, regardless of your notebook environment.**When you initialize the Vertex SDK for Python, you specify a Cloud Storage staging bucket. The staging bucket is where all the data associated with your dataset and model resources are retained across sessions.Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization.
###Code
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
###Output
_____no_output_____
###Markdown
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
###Code
! gsutil mb -l $REGION $BUCKET_NAME
###Output
_____no_output_____
###Markdown
Finally, validate access to your Cloud Storage bucket by examining its contents:
###Code
! gsutil ls -al $BUCKET_NAME
###Output
_____no_output_____
###Markdown
Set up variablesNext, set up some variables used throughout the tutorial. Import libraries and define constants
###Code
import google.cloud.aiplatform as aip
###Output
_____no_output_____
###Markdown
Initialize Vertex AI SDK for PythonInitialize the Vertex AI SDK for Python for your project and corresponding bucket.
###Code
aip.init(project=PROJECT_ID, staging_bucket=BUCKET_NAME)
###Output
_____no_output_____
###Markdown
Set hardware acceleratorsYou can set hardware accelerators for training.Set the variable `TRAIN_GPU/TRAIN_NGPU` to use a container image supporting a GPU and the number of GPUs allocated to the virtual machine (VM) instance. For example, to use a GPU container image with 4 Nvidia Telsa K80 GPUs allocated to each VM, you would specify: (aip.AcceleratorType.NVIDIA_TESLA_K80, 4)Otherwise specify `(None, None)` to use a container image to run on a CPU.Learn more [here](https://cloud.google.com/vertex-ai/docs/general/locationsaccelerators) hardware accelerator support for your region
###Code
if os.getenv("IS_TESTING_TRAIN_GPU"):
TRAIN_GPU, TRAIN_NGPU = (
aip.gapic.AcceleratorType.NVIDIA_TESLA_K80,
int(os.getenv("IS_TESTING_TRAIN_GPU")),
)
else:
TRAIN_GPU, TRAIN_NGPU = (aip.gapic.AcceleratorType.NVIDIA_TESLA_K80, 1)
###Output
_____no_output_____
###Markdown
Set pre-built containersSet the pre-built Docker container image for training.- Set the variable `TF` to the TensorFlow version of the container image. For example, `2-1` would be version 2.1, and `1-15` would be version 1.15. The following list shows some of the pre-built images available:For the latest list, see [Pre-built containers for training](https://cloud.google.com/ai-platform-unified/docs/training/pre-built-containers).
###Code
if TRAIN_GPU:
TRAIN_VERSION = "pytorch-gpu.1-9"
else:
TRAIN_VERSION = "pytorch-xla.1-9"
TRAIN_IMAGE = "{}-docker.pkg.dev/vertex-ai/training/{}:latest".format(
REGION.split("-")[0], TRAIN_VERSION
)
###Output
_____no_output_____
###Markdown
Set machine typeNext, set the machine type to use for training.- Set the variable `TRAIN_COMPUTE` to configure the compute resources for the VMs you will use for for training. - `machine type` - `n1-standard`: 3.75GB of memory per vCPU. - `n1-highmem`: 6.5GB of memory per vCPU - `n1-highcpu`: 0.9 GB of memory per vCPU - `vCPUs`: number of \[2, 4, 8, 16, 32, 64, 96 \]*Note: The following is not supported for training:* - `standard`: 2 vCPUs - `highcpu`: 2, 4 and 8 vCPUs*Note: You may also use n2 and e2 machine types for training and deployment, but they do not support GPUs*.
###Code
if os.getenv("IS_TESTING_TRAIN_MACHINE"):
MACHINE_TYPE = os.getenv("IS_TESTING_TRAIN_MACHINE")
else:
MACHINE_TYPE = "n1-standard"
VCPU = "4"
TRAIN_COMPUTE = MACHINE_TYPE + "-" + VCPU
print("Train machine type", TRAIN_COMPUTE)
###Output
_____no_output_____
###Markdown
Introduction to Pytorch trainingThe Pytorch package supports both single node and distributed model training.Once you have trained a Pytorch model, you will want to save it at a Cloud Storage location, so it can subsequently be uploaded to a `Vertex AI Model` resource.The Pytorch package does not have support to save the model to a Cloud Storage location. Instead, you will do the following steps to save to a Cloud Storage location.1. Save the in-memory model to the local filesystem (e.g., model.pth).2. Use gsutil to copy the local copy to the specified Cloud Storage location.*Note*: You can do hyperparameter tuning with a Pytorch model. Examine the training package Package layoutBefore you start the training, you will look at how a Python package is assembled for a custom training job. When unarchived, the package contains the following directory/file layout.- PKG-INFO- README.md- setup.cfg- setup.py- trainer - \_\_init\_\_.py - task.pyThe files `setup.cfg` and `setup.py` are the instructions for installing the package into the operating environment of the Docker image.The file `trainer/task.py` is the Python script for executing the custom training job. *Note*, when we referred to it in the worker pool specification, we replace the directory slash with a dot (`trainer.task`) and dropped the file suffix (`.py`). Package AssemblyIn the following cells, you will assemble the training package.
###Code
# Make folder for Python training script
! rm -rf custom
! mkdir custom
# Add package information
! touch custom/README.md
setup_cfg = "[egg_info]\n\ntag_build =\n\ntag_date = 0"
! echo "$setup_cfg" > custom/setup.cfg
setup_py = "import setuptools\n\nsetuptools.setup(\n\n install_requires=[\n\n 'cloudml-hypertune',\n\n ],\n\n packages=setuptools.find_packages())"
! echo "$setup_py" > custom/setup.py
pkg_info = "Metadata-Version: 1.0\n\nName: CIFAR10 image classification\n\nVersion: 0.0.0\n\nSummary: Demostration training script\n\nHome-page: www.google.com\n\nAuthor: Google\n\nAuthor-email: [email protected]\n\nLicense: Public\n\nDescription: Demo\n\nPlatform: Vertex"
! echo "$pkg_info" > custom/PKG-INFO
# Make the training subfolder
! mkdir custom/trainer
! touch custom/trainer/__init__.py
###Output
_____no_output_____
###Markdown
Create the task script for the Python training packageNext, you create the `task.py` script for driving the training package. Some noteable steps include:- Command-line arguments: - `model-dir`: The location to save the trained model. When using Vertex AI custom training, the location will be specified in the environment variable: `AIP_MODEL_DIR`, - `batch_size`/`lr` : Hyperparameter tuning variables - `distribute`: single node or distributed training.- Data preprocessing (`get_data()`): - Download the dataset and split into training and test.- Model architecture (`getmodel()`): - Get or build the model architecture.- Training (`train_model()`): - Trains the model- Evaluation (`evaluate_model()`): - Evaluates the model. - If hyperparameter tuning, reports the metric for accuracy.- Model artifact saving - Saves the model artifacts and evaluation metrics where the Cloud Storage location specified by `model-dir`.
###Code
%%writefile custom/trainer/task.py
import sys
import os
import argparse
import logging
import hypertune
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parallel import DistributedDataParallel
#import torch.backends.cudnn as cudnn
import torch.distributed as distributed
#import torch.optim
#import torch.multiprocessing as mp
import torch.utils.data as data
#import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('--model-dir', dest='model_dir',
default=os.getenv('AIP_MODEL_DIR'), type=str, help='Model dir.')
parser.add_argument('--batch_size', dest='batch_size',
type=int, default=16, help='Batch size')
parser.add_argument('--epochs', dest='epochs',
type=int, default=20, help='Number of epochs')
parser.add_argument('--lr', dest='lr',
type=int, default=20, help='Learning rate')
parser.add_argument('--distribute', default="single",
type=str, help='Distributed training strategy')
parser.add_argument('--checkpoints', default=False,
type=bool, help='Whether to save checkpoints')
args = parser.parse_args()
logging.getLogger().setLevel(logging.INFO)
def distributed_is_initialized():
if args.distribute == "mirror":
if distributed.is_available() and distributed.is_initialized():
return True
return False
def get_data():
normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
transform = transforms.Compose(
[transforms.ToTensor(), normalize,]
)
train_dataset = datasets.CIFAR10(root="./train", transform=transform, train=True, download=True)
logging.info(train_dataset)
if distributed_is_initialized():
sampler = data.DistributedSampler(train_dataset)
else:
sampler = None
train_loader = data.DataLoader(
train_dataset,
batch_size=args.batch_size,
shuffle=(sampler is None),
sampler=sampler,
)
test_dataset = datasets.CIFAR10(root="./test", transform=transform, train=False, download=True)
logging.info(test_dataset)
sampler = None
test_loader = data.DataLoader(
test_dataset,
batch_size=args.batch_size,
shuffle=False,
sampler=sampler,
)
return train_loader, test_loader
def get_model():
class Cifar10Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 16, 3)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(16, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = torch.flatten(x, 1) # flatten all dimensions except batch
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
logging.info("Get model architecture")
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
gpu_id = "0" if torch.cuda.is_available() else None
logging.info(f"Device: {device}")
model = Cifar10Model()
model.to(device)
if distributed_is_initialized():
model = DistributedDataParallel(model)
loss = nn.CrossEntropyLoss().cuda(gpu_id)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
return model, loss, optimizer, device
def train_model(model, loss_func, optimizer, train_loader, test_loader, is_chief, device):
class Average(object):
def __init__(self):
self.sum = 0
self.count = 0
def __str__(self):
return '{:.6f}'.format(self.average)
@property
def average(self):
return self.sum / self.count
def update(self, value, number):
self.sum += value * number
self.count += number
class Accuracy(object):
def __init__(self):
self.correct = 0
self.count = 0
def __str__(self):
return '{:.2f}%'.format(self.accuracy * 100)
@property
def accuracy(self):
return self.correct / self.count
@torch.no_grad()
def update(self, output, target):
pred = output.argmax(dim=1)
correct = pred.eq(target).sum().item()
self.correct += correct
self.count += output.size(0)
def train():
model.train()
train_loss = Average()
train_acc = Accuracy()
for data, target in train_loader:
data = data.to(device)
target = target.to(device)
output = model(data)
loss = loss_func(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss.update(loss.item(), data.size(0))
train_acc.update(output, target)
return train_loss, train_acc
@torch.no_grad()
def evaluate(epoch):
model.eval()
test_loss = Average()
test_acc = Accuracy()
for data, target in test_loader:
data = data.to(device)
target = target.to(device)
output = model(data)
loss = loss_func(output, target)
test_loss.update(loss.item(), data.size(0))
test_acc.update(output, target)
# report metric for hyperparameter tuning
hpt = hypertune.HyperTune()
hpt.report_hyperparameter_tuning_metric(
hyperparameter_metric_tag='accuracy',
metric_value=test_acc.accuracy,
global_step=epoch
)
return test_loss, test_acc
for epoch in range(1, args.epochs + 1):
logging.info('Epoch: {}, Training ...'.format(epoch))
train_loss, train_acc = train()
if is_chief:
test_loss, test_acc = evaluate(epoch)
if args.checkpoints:
torch.save(model.state_dict(), args.model_dir + f"/{epoch}.chkpt")
logging.info('Epoch: {}/{},'.format(epoch, args.epochs))
logging.info('train loss: {}, train acc: {},'.format(train_loss, train_acc))
logging.info('test loss: {}, test acc: {}.'.format(test_loss, test_acc))
return model
train_dataset, test_dataset = get_data()
model, loss, optimizer, device = get_model()
train_model(model, loss, optimizer, train_dataset, test_dataset, True, device)
logging.info('start saving')
# export model to gcs using GCSFuse
logging.info("Exporting model artifacts ...")
gs_prefix = 'gs://'
gcsfuse_prefix = '/gcs/'
if args.model_dir.startswith(gs_prefix):
args.model_dir = args.model_dir.replace(gs_prefix, gcsfuse_prefix)
dirpath = os.path.split(args.model_dir)[0]
if not os.path.isdir(dirpath):
os.makedirs(dirpath)
gcs_model_path = os.path.join(os.path.join(args.model_dir, 'model.pth'))
torch.save(model.state_dict(), gcs_model_path)
logging.info(f'Model is saved to {args.model_dir}')
###Output
_____no_output_____
###Markdown
Test training package locallyNext, test your completed training package locally with just a few epochs.
###Code
! python3 custom/trainer/task.py --model-dir=custom --distribute=mirror --checkpoints=True
###Output
_____no_output_____
###Markdown
Store training script on your Cloud Storage bucketNext, you package the training folder into a compressed tar ball, and then store it in your Cloud Storage bucket.
###Code
! rm -f custom.tar custom.tar.gz
! tar cvf custom.tar custom
! gzip custom.tar
! gsutil cp custom.tar.gz $BUCKET_NAME/trainer_cifar10.tar.gz
###Output
_____no_output_____
###Markdown
Make Pytorch container for predictionCurrently, Vertex AI does not have a prefined container for making predictions with a deployed Pytorch model. No problem, you can assemble your own custom container. Typically, one would base the container on the `Torch Server`. For demonstration purpose, you build a placeholder container (not complete) that includes the latest `Torch Server` image, and push it to the `Container Registry`.
###Code
%%writefile Dockerfile
FROM pytorch/torchserve:latest-cpu
# run Torchserve HTTP serve to respond to prediction requests
CMD ["torchserve", \n "--start", \n "--ts-config=/home/model-server/config.properties", \n "--models", \n "$APP_NAME=$APP_NAME.mar", \n "--model-store", \n "/home/model-server/model-store"]
APP_NAME = "cifar10"
DEPLOY_IMAGE = f"gcr.io/{PROJECT_ID}/pytorch_predict_{APP_NAME}"
print(DEPLOY_IMAGE)
! docker build --tag=$DEPLOY_IMAGE ./
! docker push $DEPLOY_IMAGE
###Output
_____no_output_____
###Markdown
Create and run custom training jobTo train a custom model, you perform two steps: 1) create a custom training job, and 2) run the job. Create custom training jobA custom training job is created with the `CustomTrainingJob` class, with the following parameters:- `display_name`: The human readable name for the custom training job.- `container_uri`: The training container image.- `python_package_gcs_uri`: The location of the Python training package as a tarball.- `python_module_name`: The relative path to the training script in the Python package.- `model_serving_container_uri`: The container image for deploying the model.*Note:* There is no requirements parameter. You specify any requirements in the `setup.py` script in your Python package.
###Code
DISPLAY_NAME = "cifar10_" + TIMESTAMP
job = aip.CustomPythonPackageTrainingJob(
display_name=DISPLAY_NAME,
python_package_gcs_uri=f"{BUCKET_NAME}/trainer_cifar10.tar.gz",
python_module_name="trainer.task",
container_uri=TRAIN_IMAGE,
model_serving_container_image_uri=DEPLOY_IMAGE,
project=PROJECT_ID,
)
###Output
_____no_output_____
###Markdown
Prepare your command-line argumentsNow define the command-line arguments for your custom training container:- `args`: The command-line arguments to pass to the executable that is set as the entry point into the container. - `--model-dir` : For our demonstrations, we use this command-line argument to specify where to store the model artifacts. - direct: You pass the Cloud Storage location as a command line argument to your training script (set variable `DIRECT = True`), or - indirect: The service passes the Cloud Storage location as the environment variable `AIP_MODEL_DIR` to your training script (set variable `DIRECT = False`). In this case, you tell the service the model artifact location in the job specification. - `--BLAH`:
###Code
MODEL_DIR = "{}/{}".format(BUCKET_NAME, TIMESTAMP)
DIRECT = False
if DIRECT:
CMDARGS = ["--model_dir=" + MODEL_DIR]
else:
CMDARGS = []
###Output
_____no_output_____
###Markdown
Run the custom training jobNext, you run the custom job to start the training job by invoking the method `run`, with the following parameters:- `model_display_name`: The human readable name for the `Model` resource.- `args`: The command-line arguments to pass to the training script.- `replica_count`: The number of compute instances for training (replica_count = 1 is single node training).- `machine_type`: The machine type for the compute instances.- `accelerator_type`: The hardware accelerator type.- `accelerator_count`: The number of accelerators to attach to a worker replica.- `base_output_dir`: The Cloud Storage location to write the model artifacts to.- `sync`: Whether to block until completion of the job.
###Code
if TRAIN_GPU:
model = job.run(
model_display_name="cifar10_" + TIMESTAMP,
args=CMDARGS,
replica_count=1,
machine_type=TRAIN_COMPUTE,
accelerator_type=TRAIN_GPU.name,
accelerator_count=TRAIN_NGPU,
base_output_dir=MODEL_DIR,
sync=False,
)
else:
model = job.run(
model_display_name="cifar10_" + TIMESTAMP,
args=CMDARGS,
replica_count=1,
machine_type=TRAIN_COMPUTE,
base_output_dir=MODEL_DIR,
sync=False,
)
model_path_to_deploy = MODEL_DIR
###Output
_____no_output_____
###Markdown
List a custom training job
###Code
_job = job.list(filter=f"display_name={DISPLAY_NAME}")
print(_job)
###Output
_____no_output_____
###Markdown
Wait for completion of custom training jobNext, wait for the custom training job to complete. Alternatively, one can set the parameter `sync` to `True` in the `run()` method to block until the custom training job is completed.
###Code
model.wait()
###Output
_____no_output_____
###Markdown
Delete a custom training jobAfter a training job is completed, you can delete the training job with the method `delete()`. Prior to completion, a training job can be canceled with the method `cancel()`.
###Code
job.delete()
###Output
_____no_output_____
###Markdown
Cleaning upTo clean up all Google Cloud resources used in this project, you can [delete the Google Cloudproject](https://cloud.google.com/resource-manager/docs/creating-managing-projectsshutting_down_projects) you used for the tutorial.Otherwise, you can delete the individual resources you created in this tutorial:- Dataset- Pipeline- Model- Endpoint- AutoML Training Job- Batch Job- Custom Job- Hyperparameter Tuning Job- Cloud Storage Bucket
###Code
delete_all = True
if delete_all:
# Delete the dataset using the Vertex dataset object
try:
if "dataset" in globals():
dataset.delete()
except Exception as e:
print(e)
# Delete the model using the Vertex model object
try:
if "model" in globals():
model.delete()
except Exception as e:
print(e)
# Delete the endpoint using the Vertex endpoint object
try:
if "endpoint" in globals():
endpoint.undeploy_all()
endpoint.delete()
except Exception as e:
print(e)
# Delete the AutoML or Pipeline training job
try:
if "dag" in globals():
dag.delete()
except Exception as e:
print(e)
# Delete the custom training job
try:
if "job" in globals():
job.delete()
except Exception as e:
print(e)
# Delete the batch prediction job using the Vertex batch prediction object
try:
if "batch_predict_job" in globals():
batch_predict_job.delete()
except Exception as e:
print(e)
# Delete the hyperparameter tuning job using the Vertex hyperparameter tuning object
try:
if "hpt_job" in globals():
hpt_job.delete()
except Exception as e:
print(e)
if "BUCKET_NAME" in globals():
! gsutil rm -r $BUCKET_NAME
###Output
_____no_output_____ |
default.ipynb | ###Markdown
Python 2/3 compatibility
###Code
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import sys
if sys.version_info[0] == 2:
range = xrange
else:
basestring = str
print('`__future__` imports for Python 2/3 compatibility')
print('`range` defined as a generator in Python 2')
print('Defined `basestring=str` for Python 3')
###Output
_____no_output_____
###Markdown
Basic IPython utilities
###Code
from IPython.display import display
%matplotlib inline
print('Imported advanced `IPython` display')
print('Activated `matplotlib inline`')
###Output
_____no_output_____
###Markdown
Usual modules
###Code
import numpy as np
from matplotlib import pyplot as plt
# To get nicer-looking plots (according to me anyway).
# Get this one from https://github.com/cristobal-sifon/plottools
try:
from plottools.plotutils import update_rcParams
update_rcParams()
print('Updated `rcParams` using `plottools`')
except ImportError:
pass
print("""
Imported modules:
`sys`
`numpy` as `np`
`matplotlib.pyplot` as `plt`
""")
###Output
_____no_output_____ |
Aula Numpy 02 - Verificando propriedades dos arrays.ipynb | ###Markdown
Verificando propriedades de um ndarray
###Code
# Importando o NumPy
import numpy as np
# Cria um ndarray com 10 elementos entre 0 e 9
a1D = np.random.randint(0, 10, 10)
a1D
a2D = np.random.randint(1, 101, (4,5)) # Cria um ndarray de 4 linhas x 5 colunas com elementos entre 1 e 100
a2D
a3D = np.random.random((3,4,5)) # Cria um ndarray de 3 páginas x 4 linhas x 5 colunas com elementos entre 0 e 1
a3D
###Output
_____no_output_____
###Markdown
Verificando o formato e quantidade de dimensões de um array - shape e dim
###Code
# A propriedade shape retorna o formato do array
a1D.shape
a2D.shape
a3D.shape
# A propriedade ndim retorna o números de dimensões do array
a1D.ndim
a2D.ndim
a3D.ndim
###Output
_____no_output_____
###Markdown
Verificando o tamanho das dimensões de um array - len
###Code
len(a1D) # Retorna o tamanho da 1a dimensão (quantidade de colunas)
len(a2D) # Retorna o tamanho da 1a dimensão (quantidade de linhas)
len(a2D[0]) # Retorna o tamanho da 2a dimensão (quantidade de colunas)
len(a3D) # Retorna o tamanho da 1a dimensão (quantidade de páginas)
len(a3D[0]) # Retorna o tamanho da 2a dimensão (quantidade de linhas)
len(a3D[0][0]) # Retorna o tamanho da 3a dimensão (quantidade de colunas)
###Output
_____no_output_____
###Markdown
Verificando a quantidade de elementos de um array - size
###Code
a1D.size
a2D.size
a3D.size
###Output
_____no_output_____
###Markdown
Verificando o tipo dos elementos um array - dtype
###Code
a1D.dtype
a1D.dtype.name
a2D.dtype
a2D.dtype.name
a3D.dtype
a3D.dtype.name
###Output
_____no_output_____ |
content/homeworks/hw08/notebook/cs109b_hw8_v3.ipynb | ###Markdown
CS109B Data Science 2: Advanced Topics in Data Science Homework 8: Reinforcement Learning [100 pts]**Harvard University****Spring 2020****Instructors**: Pavlos Protopapas, Mark Glickman and Chris Tanner**DISCLAIMER**: No public reproduction of this homework nor its solution is allowed without the explicit consent of their authors.---
###Code
#PLEASE RUN THIS CELL
import requests
from IPython.core.display import HTML
styles = requests.get("https://raw.githubusercontent.com/Harvard-IACS/2018-CS109A/master/content/styles/cs109.css").text
HTML(styles)
###Output
_____no_output_____
###Markdown
INSTRUCTIONS- To submit your assignment follow the instructions given in Canvas.- Restart the kernel and run the whole notebook again before you submit.- Do not submit a notebook that is excessively long because output was not suppressed or otherwise limited.
###Code
# Numpy and plotting libraries
import numpy as np
import matplotlib.pyplot as plt
import time
%matplotlib inline
###Output
_____no_output_____
###Markdown
Overview The objective of this homework assignment is to get a taste of implementing a planning algorithm in a very simple setting. Markov Decision Process [100 points] We have a hallway consisting of 5 blocks (states 0-4). There are two actions, which deterministically move the agent to the left or the right. More explicitly: Performing action “left” in state 0 keeps you in state 0, moves you from state 1 to state 0, from state 2 to state 1, state 3 to state 2, and state 4 to state 3. Performing action “right” in state 4 keeps you in state 4, moves you from state 3 to state 4, from state 2 to state 3, from state 1 to state 2, and from state 0 to state 1. The agent receives a reward of -1.0 if it starts any iteration in state 0, state 1, state 2, or state 3. The agent receives a reward of +10.0 if it starts in state 4. Let the discount factor γ = 0.75.We provide class MDP that instantiates an object representing a Markov decision process and verifies shapes.**1.1** MDP proble [10 pts]: Build an MDP representing the hallway setting described above, by completing the function `build_hallway_mdp()`. You need to specify the array T that encodes the transitions from state and actions into next states; and a reward vector R that specifies the reward for being at a certain state.**1.2** Policy Evaluation [20 pts]: Initialize a policy “left” for every state (a 1D numpy array). Implement policy evaluation as described in lecture (also in Chapter 4 of [Sutton and Barto](http://incompleteideas.net/book/RLbook2018.pdf)). That is, for each possible starting state, what is the expected sum of future rewards for this policy? Using an iterative approach, how many iterations did it take for the value of the policy to converge to a precision of 10−5? **1.3** Q-function Computation [20 pts]: Compute the Q-function for the `always_left` policy above. Do you see any opportunties for policy improvement?**1.4** Policy Iteration [20 pts]: Using your solutions to questions 1.2 and 1.3 above, implement policy iteration. Report the sequence of policies you find starting with the policy “left” in every state. How many rounds of policy iteration are required to converge to the optimal policy? **1.5** [10 pts] What are the effects of different choices of the discount factor on the convergence of policy evaluation? Run policy evaluation for discount factor $\gamma \in [ 10^{-12}, 10^{-3}, 0.1, 0.33, 0.67, 0.9, 0.95, 0.99]$.**1.6** [20 pts] What happens if the transitions are stochastic? Recode the MDP with probability of switching to the opposite action of 0.1. What are now the values when following the optimal policy?
###Code
class MDP(object):
"""Wrapper for a discrete Markov decision process that makes shape checks"""
def __init__(self, T, R, discount):
"""Initialize the Markov Decision Process.
- `T` should be a 3D array whose dimensions represent initial states,
actions, and next states, respectively, and whose values represent
transition probabilities.
- `R` should be a 1D array describing rewards for beginning each
timestep in a particular state (or a 3D array like `T`). It will be
transformed into the appropriate 3D shape.
- `discount` should be a value in [0,1) controlling the decay of future
rewards."""
Ds, Da, _ = T.shape
if T.shape not in [(Ds, Da, Ds)]:
raise ValueError("T should be in R^|S|x|A|x|S|")
if R.shape not in [(Ds, Da, Ds), (Ds,)]:
raise ValueError("R should be in R^|S| or like T")
if discount < 0 or discount >= 1:
raise ValueError("discount should be in [0,1)")
if R.shape == (Ds,): # Expand R if necessary
R = np.array([[[R[s1] for s2 in range(Ds)] for a in range(Da)] for s1 in range(Ds)])
self.T = T
self.R = R
self.discount = discount
self.num_states = Ds
self.num_actions = Da
self.states = np.arange(Ds)
self.actions = np.arange(Da)
###Output
_____no_output_____
###Markdown
**1.1** MDP proble [10 pts]: Build an MDP representing the hallway setting described above, by completing the function `build_hallway_mdp()`. You need to specify the array T that encodes the transitions from state and actions into next states; and a reward vector R that specifies the reward for being at a certain state.
###Code
def build_hallway_mdp(discount_factor=0.75):
"""Build an MDP representing the hallway setting described."""
# your code here
#We have five states 0-4
# we have used the code demo in the lab session
states = np.array([0,1,2,3,4])
#we have two actions, left & right
actions = np.array([0,1])
def next_state_probs(s,a):
transition = np.zeros_like(states)
next_state = max(s-1,0) if a==0 else min(s+1,4)
transition[next_state]=1.0
return transition
T= np.array([[next_state_probs(s,a) for a in actions] for s in states])
# The agent receives a reward of -1.0 if it starts any iteration in state 0, state 1, state 2, or state 3.
# The agent receives a reward of +10.0 if it starts in state 4
R= np.array([-1,-1,-1,-1,10])
# end of your code here
return MDP(T, R, discount_factor)
# Run for sanity check
mdp = build_hallway_mdp()
plt.figure(figsize=(5,2))
plt.subplot(121, title='Left transitions')
plt.imshow(mdp.T[:,0,:])
plt.ylabel("Initial state"); plt.xlabel('Next state')
plt.subplot(122, title='Right transitions')
plt.imshow(mdp.T[:,1,:])
plt.ylabel("Initial state"); plt.xlabel('Next state')
plt.show()
###Output
_____no_output_____
###Markdown
**1.2** Policy Evaluation [20 pts]: Initialize a policy “left” for every state (a 1D numpy array). Implement policy evaluation as described in lecture (also in Chapter 4 of [Sutton and Barto](http://incompleteideas.net/book/RLbook2018.pdf)). That is, for each possible starting state, what is the expected sum of future rewards for this policy? Using an iterative approach, how many iterations did it take for the value of the policy to converge to a precision of 10−5?
###Code
def build_always_left_policy():
"""Build a policy representing the action "left" in every state."""
# your code here
action = np.zeros(shape=(5,), dtype=int)
return action
print(build_always_left_policy())
def iterative_value_estimation(mdp, policy, tol=1e-5):
"""Value estimation algorithm from page 75, Sutton and Barto. Returns an
estimate of the value of a given policy under the MDP (with the number of
iterations required to reach specified tolerance)."""
V = np.zeros(mdp.num_states)
num_iters = 0
# your code here
while True:
num_iters = num_iters + 1
delta = 0
#Loop for each state
for s in range(mdp.num_states):
v = V[s]
a = policy[s]
r = mdp.R[s,a]
V[s] = (mdp.T[s,a]*(r + mdp.discount*V)).sum()
# Update max update difference in this iteration
delta = max(delta, abs(V[s]-v))
# Terminate when delta is below tolerance
if delta < tol:
break
# end of your code here
return V, num_iters
# Run for sanity check
always_left = build_always_left_policy()
values, iters = iterative_value_estimation(mdp, always_left)
print('Policy value was:')
print(values.round(4))
tols = np.logspace(0,-8,9)
iters = [iterative_value_estimation(mdp, always_left, tol=tol)[1] for tol in tols]
plt.plot(tols, iters, marker='o')
plt.xscale('log')
plt.xlabel("Tolerance")
plt.ylabel("Iterations to converge to within tolerance")
plt.show()
###Output
Policy value was:
[-4. -4. -4. -4. 7.]
###Markdown
**1.3** Q-function Computation [20 pts]: Compute the Q-function for the `always_left` policy above. Do you see any opportunties for policy improvement?
###Code
# 1.3
def Q_function(mdp, policy, tol=1e-5):
"""Q function from Equation 4.6, Sutton and Barto. For each state and
action, returns the value of performing the action at that state, then
following the policy thereafter."""
# your code here
V, iters = iterative_value_estimation(mdp, policy, tol)
Q = np.zeros(shape=(mdp.num_states, mdp.num_actions))
for s in range(mdp.num_states):
for a in range(mdp.num_actions):
for next_state in range(mdp.num_states):
r = mdp.R[s, a]
Q[s, a] = Q[s, a]+ (mdp.T[s, a]*(r + mdp.discount*V)).sum()
# end of your code here
assert Q.shape == (mdp.num_states, mdp.num_actions)
return Q
# Run for sanity check
Q = Q_function(mdp, always_left)
print('Q function was:')
print(Q.round(4))
###Output
Q function was:
[[-19.9999 -19.9999]
[-19.9999 -20. ]
[-19.9999 -20. ]
[-20. 21.25 ]
[ 35. 76.25 ]]
###Markdown
*Your answer here* **1.4** Policy Iteration [20 pts]: Using your solutions to questions 1.2 and 1.3 above, implement policy iteration. Report the sequence of policies you find starting with the policy “left” in every state. How many rounds of policy iteration are required to converge to the optimal policy?
###Code
# 1.4
def policy_iteration(mdp, init_policy=None, tol=1e-5):
"""Policy iteration algorithm from page 80, Sutton and Barto.
Iteratively transform the initial policy to become optimal.
Return the full path."""
# your code here
policies = [init_policy.copy()]
while True:
#Policy Evaluation
V, iters = iterative_value_estimation(mdp, init_policy, tol)
policy_stable = True
#Policy improvement
#Loop for each state
for s in range(mdp.num_states):
old_a = init_policy[s]
# Compute best action
best_a = None
for a in range(mdp.num_actions):
r = mdp.R[s, a]
v = (mdp.T[s, a]*(r + mdp.discount*V)).sum()
if best_a is None or best_v < v:
best_a = a
best_v = v
init_policy[s] = best_a
# Check for policy change
if best_a != old_a:
policy_stable = False
policies.append(init_policy.copy())
if policy_stable:
break
# end of your code here
return policies
# Sanity check
policy_iters = policy_iteration(mdp, always_left)
policy_iters
###Output
_____no_output_____
###Markdown
*Your answer here* **1.5** [10 pts] What are the effects of different choices of the discount factor on the convergence of policy evaluation? Run policy evaluation for discount factor $\gamma \in [ 10^{-12}, 10^{-3}, 0.1, 0.33, 0.67, 0.9, 0.95, 0.99]$.
###Code
# 1.5
# your code here
discount_factors = [1e-12, 1e-3, 0.1, 0.33, 0.67, 0.9, 0.95, 0.99]
iters_by_factor = []
for discount in discount_factors:
mdp = build_hallway_mdp(discount)
always_left = build_always_left_policy()
values, iters = iterative_value_estimation(mdp, always_left)
iters_by_factor.append(iters)
plt.plot(discount_factors, iters_by_factor, marker='o')
plt.xlabel('Discount factor $\gamma$')
plt.ylabel('Iterations for value estimate to converge')
plt.title("Convergence of value estimate by $\gamma$")
plt.show()
###Output
_____no_output_____
###Markdown
*Your answer here* **1.6** [20 pts] What happens if the transitions are stochastic? Recode the MDP with probability of switching to the opposite action of 0.1. What are now the values when following the optimal policy?
###Code
# 1.6
# your code here
mdp.T[:,0,:]
###Output
_____no_output_____ |
site/ja/guide/migrate.ipynb | ###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
TensorFlow 1 のコードを TensorFlow 2 に移行する TensorFlow.org で表示 Google Colab で実行 GitHub でソースを表示 ノートブックをダウンロード 本ドキュメントは、低レベル TensorFlow API のユーザーを対象としています。高レベル API(`tf.keras`)をご使用の場合は、コードを TensorFlow 2.x と完全互換にするためのアクションはほとんどまたはまったくありません。- [オプティマイザのデフォルトの学習率](keras_optimizer_lr)を確認してください。- メトリクスが記録される「名前」が[変更されている可能性がある](keras_metric_names)ことに注意してください。 TensorFlow 2.x で 1.X のコードを未修正で実行することは、([contrib を除き](https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md))依然として可能です。```pythonimport tensorflow.compat.v1 as tf tf.disable_v2_behavior()```しかし、これでは TensorFlow 2.0 で追加された改善の多くを活用できません。このガイドでは、コードのアップグレード、さらなる単純化、パフォーマンス向上、そしてより容易なメンテナンスについて説明します。 自動変換スクリプトこのドキュメントで説明される変更を実装する前に行うべき最初のステップは、[アップグレードスクリプト](./upgrade.md)を実行してみることです。これはコードを TensorFlow 2.x にアップグレードする際の初期パスとしては十分ですが、v2 特有のコードに変換するわけではありません。コードは依然として `tf.compat.v1` エンドポイントを使用して、プレースホルダー、セッション、コレクション、その他 1.x スタイルの機能へのアクセスが可能です。 トップレベルの動作の変更`tf.compat.v1.disable_v2_behavior()` を使用することで TensorFlow 2.x でコードが機能する場合でも、対処すべきグローバルな動作の変更があります。主な変更点は次のとおりです。 - *Eager execution、`v1.enable_eager_execution()`*: 暗黙的に `tf.Graph` を使用するコードは失敗します。このコードは必ず `with tf.Graph().as_default()` コンテキストでラップしてください。- *リソース変数、`v1.enable_resource_variables()`*: 一部のコードは、TensorFlow 参照変数によって有効化される非決定的な動作に依存する場合があります。 リソース変数は書き込み中にロックされるため、より直感的な一貫性を保証します。 - これによりエッジケースでの動作が変わる場合があります。 - これにより余分なコピーが作成されるため、メモリ使用量が増える可能性があります。 - これを無効にするには、`use_resource=False` を `tf.Variable` コンストラクタに渡します。- *テンソルの形状、`v1.enable_v2_tensorshape()`*: TensorFlow 2.x は、テンソルの形状の動作を簡略化されており、`t.shape[0].value` の代わりに `t.shape[0]` とすることができます。簡単な変更なので、すぐに修正しておくことをお勧めします。例については [TensorShape](tensorshape) をご覧ください。- *制御フロー、`v1.enable_control_flow_v2()`*: TensorFlow 2.x 制御フローの実装が簡略化されたため、さまざまなグラフ表現を生成します。問題が生じた場合には、[バグを報告](https://github.com/tensorflow/tensorflow/issues)してください。 TensorFlow 2.x のコードを作成するこのガイドでは、TensorFlow 1.x のコードを TensorFlow 2.x に変換するいくつかの例を確認します。これらの変更によって、コードがパフォーマンスの最適化および簡略化された API 呼び出しを活用できるようになります。それぞれのケースのパターンは次のとおりです。 1. `v1.Session.run` 呼び出しを置き換えるすべての `v1.Session.run` 呼び出しは、Python 関数で置き換える必要があります。- `feed_dict`および`v1.placeholder`は関数の引数になります。- `fetch` は関数の戻り値になります。- Eager execution では、`pdb` などの標準的な Python ツールを使用して、変換中に簡単にデバッグできます。次に、`tf.function` デコレータを追加して、グラフで効率的に実行できるようにします。 この機能についての詳細は、[AutoGraph ガイド](function.ipynb)をご覧ください。注意点:- `v1.Session.run` とは異なり、`tf.function` は固定のリターンシグネチャを持ち、常にすべての出力を返します。これによってパフォーマンスの問題が生じる場合は、2 つの個別の関数を作成します。- `tf.control_dependencies` または同様の演算は必要ありません。`tf.function` は、記述された順序で実行されたかのように動作します。たとえば、`tf.Variable` 割り当てと `tf.assert` は自動的に実行されます。[「モデルを変換する」セクション](converting_models)には、この変換プロセスの実際の例が含まれています。 2. Python オブジェクトを変数と損失の追跡に使用するTensorFlow 2.x では、いかなる名前ベースの変数追跡もまったく推奨されていません。 変数の追跡には Python オブジェクトを使用します。`v1.get_variable` の代わりに `tf.Variable` を使用してください。すべての`v1.variable_scope`は Python オブジェクトに変換が可能です。通常は次のうちの 1 つになります。- `tf.keras.layers.Layer`- `tf.keras.Model`- `tf.Module``tf.Graph.get_collection(tf.GraphKeys.VARIABLES)` などの変数のリストを集める必要がある場合には、`Layer` および `Model` オブジェクトの `.variables` と `.trainable_variables` 属性を使用します。これら `Layer` クラスと `Model` クラスは、グローバルコレクションの必要性を除去した別のプロパティを幾つか実装します。`.losses` プロパティは、`tf.GraphKeys.LOSSES` コレクション使用の置き換えとなります。詳細は [Keras ガイド](keras.ipynb)をご覧ください。警告 : 多くの `tf.compat.v1` シンボルはグローバルコレクションを暗黙的に使用しています。 3. トレーニングループをアップグレードするご利用のユースケースで動作する最高レベルの API を使用してください。独自のトレーニングループを構築するよりも `tf.keras.Model.fit` の選択を推奨します。これらの高レベル関数は、独自のトレーニングループを書く場合に見落とされやすい多くの低レベル詳細を管理します。例えば、それらは自動的に正則化損失を集めて、モデルを呼び出す時に`training=True`引数を設定します。 4. データ入力パイプラインをアップグレードするデータ入力には `tf.data` データセットを使用してください。それらのオブジェクトは効率的で、表現力があり、TensorFlow とうまく統合します。次のように、`tf.keras.Model.fit` メソッドに直接渡すことができます。```pythonmodel.fit(dataset, epochs=5)```また、標準的な Python で直接にイテレートすることもできます。```pythonfor example_batch, label_batch in dataset: break``` 5. `compat.v1`シンボルを移行する`tf.compat.v1`モジュールには、元のセマンティクスを持つ完全な TensorFlow 1.x API が含まれています。[TensorFlow 2 アップグレードスクリプト](upgrade.ipynb)は、変換が安全な場合、つまり v2 バージョンの動作が完全に同等であると判断できる場合は、シンボルを 2.0 と同等のものに変換します。(たとえば、これらは同じ関数なので、`v1.arg_max` の名前を `tf.argmax` に変更します。)コードの一部を使用してアップグレードスクリプトを実行した後に、`compat.v1` が頻出する可能性があります。 コードを調べ、それらを手動で同等の v2 のコードに変換する価値はあります。(該当するものがある場合には、ログに表示されているはずです。) モデルを変換する 低レベル変数 & 演算子実行低レベル API の使用例を以下に示します。- 変数スコープを使用して再利用を制御する。- `v1.get_variable`で変数を作成する。- コレクションに明示的にアクセスする。- 次のようなメソッドでコレクションに暗黙的にアクセスする。 - `v1.global_variables` - `v1.losses.get_regularization_loss`- `v1.placeholder` を使用してグラフ入力のセットアップをする。- `Session.run`でグラフを実行する。- 変数を手動で初期化する。 変換前TensorFlow 1.x を使用したコードでは、これらのパターンは以下のように表示されます。
###Code
import tensorflow as tf
import tensorflow.compat.v1 as v1
import tensorflow_datasets as tfds
g = v1.Graph()
with g.as_default():
in_a = v1.placeholder(dtype=v1.float32, shape=(2))
in_b = v1.placeholder(dtype=v1.float32, shape=(2))
def forward(x):
with v1.variable_scope("matmul", reuse=v1.AUTO_REUSE):
W = v1.get_variable("W", initializer=v1.ones(shape=(2,2)),
regularizer=lambda x:tf.reduce_mean(x**2))
b = v1.get_variable("b", initializer=v1.zeros(shape=(2)))
return W * x + b
out_a = forward(in_a)
out_b = forward(in_b)
reg_loss=v1.losses.get_regularization_loss(scope="matmul")
with v1.Session(graph=g) as sess:
sess.run(v1.global_variables_initializer())
outs = sess.run([out_a, out_b, reg_loss],
feed_dict={in_a: [1, 0], in_b: [0, 1]})
print(outs[0])
print()
print(outs[1])
print()
print(outs[2])
###Output
_____no_output_____
###Markdown
変換後 変換されたコードでは :- 変数はローカル Python オブジェクトです。- `forward`関数は依然として計算を定義します。- `Session.run`呼び出しは`forward`への呼び出しに置き換えられます。- パフォーマンス向上のためにオプションで`tf.function`デコレータを追加可能です。- どのグローバルコレクションも参照せず、正則化は手動で計算されます。- セッションやプレースホルダーはありません。
###Code
W = tf.Variable(tf.ones(shape=(2,2)), name="W")
b = tf.Variable(tf.zeros(shape=(2)), name="b")
@tf.function
def forward(x):
return W * x + b
out_a = forward([1,0])
print(out_a)
out_b = forward([0,1])
regularizer = tf.keras.regularizers.l2(0.04)
reg_loss=regularizer(W)
###Output
_____no_output_____
###Markdown
`tf.layers`ベースのモデル `v1.layers`モジュールは、変数を定義および再利用する`v1.variable_scope`に依存するレイヤー関数を含めるために使用されます。 変換前
###Code
def model(x, training, scope='model'):
with v1.variable_scope(scope, reuse=v1.AUTO_REUSE):
x = v1.layers.conv2d(x, 32, 3, activation=v1.nn.relu,
kernel_regularizer=lambda x:0.004*tf.reduce_mean(x**2))
x = v1.layers.max_pooling2d(x, (2, 2), 1)
x = v1.layers.flatten(x)
x = v1.layers.dropout(x, 0.1, training=training)
x = v1.layers.dense(x, 64, activation=v1.nn.relu)
x = v1.layers.batch_normalization(x, training=training)
x = v1.layers.dense(x, 10)
return x
train_data = tf.ones(shape=(1, 28, 28, 1))
test_data = tf.ones(shape=(1, 28, 28, 1))
train_out = model(train_data, training=True)
test_out = model(test_data, training=False)
print(train_out)
print()
print(test_out)
###Output
_____no_output_____
###Markdown
変換後 - レイヤーの単純なスタックが `tf.keras.Sequential`にぴったり収まります。(より複雑なモデルについては[カスタムレイヤーとモデル](keras/custom_layers_and_models.ipynb)および[ Functional API ](keras/functional.ipynb)をご覧ください。)- モデルが変数と正則化損失を追跡します。- `v1.layers`から`tf.keras.layers`への直接的なマッピングがあるため、変換は一対一対応でした。ほとんどの引数はそのままです。しかし、以下の点は異なります。- `training`引数は、それが実行される時点でモデルによって各レイヤーに渡されます。- 元の`model`関数への最初の引数(入力 `x`)はなくなりました。これはオブジェクトレイヤーがモデルの呼び出しからモデルの構築を分離するためです。また以下にも注意してください。- `tf.contrib`からの初期化子の正則化子を使用している場合は、他よりも多くの引数変更があります。- コードはコレクションに書き込みを行わないため、`v1.losses.get_regularization_loss`などの関数はそれらの値を返さなくなり、トレーニングループが壊れる可能性があります。
###Code
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(0.04),
input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dense(10)
])
train_data = tf.ones(shape=(1, 28, 28, 1))
test_data = tf.ones(shape=(1, 28, 28, 1))
train_out = model(train_data, training=True)
print(train_out)
test_out = model(test_data, training=False)
print(test_out)
# Here are all the trainable variables.
len(model.trainable_variables)
# Here is the regularization loss.
model.losses
###Output
_____no_output_____
###Markdown
変数と`v1.layers`の混在 既存のコードは低レベルの TensorFlow 1.x 変数と演算子に高レベルの`v1.layers`が混ざっていることがよくあります。 変換前
###Code
def model(x, training, scope='model'):
with v1.variable_scope(scope, reuse=v1.AUTO_REUSE):
W = v1.get_variable(
"W", dtype=v1.float32,
initializer=v1.ones(shape=x.shape),
regularizer=lambda x:0.004*tf.reduce_mean(x**2),
trainable=True)
if training:
x = x + W
else:
x = x + W * 0.5
x = v1.layers.conv2d(x, 32, 3, activation=tf.nn.relu)
x = v1.layers.max_pooling2d(x, (2, 2), 1)
x = v1.layers.flatten(x)
return x
train_out = model(train_data, training=True)
test_out = model(test_data, training=False)
###Output
_____no_output_____
###Markdown
変換後 このコードを変換するには、前の例で示したレイヤーからレイヤーへのマッピングのパターンに従います。一般的なパターンは次の通りです。- `__init__`でレイヤーパラメータを収集する。- `build`で変数を構築する。- `call`で計算を実行し、結果を返す。`v1.variable_scope`は事実上それ自身のレイヤーです。従って`tf.keras.layers.Layer`として書き直します。詳細は[ガイド](keras/custom_layers_and_models.ipynb)をご覧ください。
###Code
# Create a custom layer for part of the model
class CustomLayer(tf.keras.layers.Layer):
def __init__(self, *args, **kwargs):
super(CustomLayer, self).__init__(*args, **kwargs)
def build(self, input_shape):
self.w = self.add_weight(
shape=input_shape[1:],
dtype=tf.float32,
initializer=tf.keras.initializers.ones(),
regularizer=tf.keras.regularizers.l2(0.02),
trainable=True)
# Call method will sometimes get used in graph mode,
# training will get turned into a tensor
@tf.function
def call(self, inputs, training=None):
if training:
return inputs + self.w
else:
return inputs + self.w * 0.5
custom_layer = CustomLayer()
print(custom_layer([1]).numpy())
print(custom_layer([1], training=True).numpy())
train_data = tf.ones(shape=(1, 28, 28, 1))
test_data = tf.ones(shape=(1, 28, 28, 1))
# Build the model including the custom layer
model = tf.keras.Sequential([
CustomLayer(input_shape=(28, 28, 1)),
tf.keras.layers.Conv2D(32, 3, activation='relu'),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
])
train_out = model(train_data, training=True)
test_out = model(test_data, training=False)
###Output
_____no_output_____
###Markdown
注意点:- サブクラス化された Keras モデルとレイヤーは v1 グラフ(自動制御依存性なし)と eager モードの両方で実行される必要があります。 - `call()`を`tf.function()`にラップして、AutoGraph と自動制御依存性を得るようにします。- `training`引数を受け取って`call`することを忘れないようにしてください。 - それは`tf.Tensor`である場合があります。 - それは Python ブール型である場合があります。- `self.add_weight()`を使用して、コンストラクタまたは`Model.build`でモデル変数を作成します。 - `Model.build`では、入力形状にアクセスできるため、適合する形状で重みを作成できます。 - `tf.keras.layers.Layer.add_weight`を使用すると、Keras が変数と正則化損失を追跡できるようになります。- オブジェクトに`tf.Tensors`を保持してはいけません。 - それらは`tf.function`または eager コンテキスト内のいずれかで作成される可能性がありますが、それらのテンソルは異なる振る舞いをします。 - 状態には`tf.Variable`を使用してください。これは常に両方のコンテキストから使用可能です。 - `tf.Tensors`は中間値専用です。 Slim & contrib.layers に関する注意古い TensorFlow 1.x コードの大部分は [Slim](https://ai.googleblog.com/2016/08/tf-slim-high-level-library-to-define.html) ライブラリを使用しており、これは`tf.contrib.layers`として TensorFlow 1.x でパッケージ化されていました。 `contrib`モジュールに関しては、TensorFlow 2.x では`tf.compat.v1`内でも、あっても利用できなくなりました。Slim を使用したコードの TensorFlow 2.x への変換は、`v1.layers`を使用したレポジトリの変換よりも複雑です。現実的には、まず最初に Slim コードを`v1.layers`に変換してから Keras に変換するほうが賢明かもしれません。- `arg_scopes`を除去します。すべての引数は明示的である必要があります。- それらを使用する場合、 `normalizer_fn`と`activation_fn`をそれら自身のレイヤーに分割します。- 分離可能な畳み込みレイヤーは 1 つまたはそれ以上の異なる Keras レイヤー(深さ的な、ポイント的な、分離可能な Keras レイヤー)にマップします。- Slim と`v1.layers`には異なる引数名とデフォルト値があります。- 一部の引数には異なるスケールがあります。- Slim 事前トレーニング済みモデルを使用する場合は、`tf.keras.applications`から Keras 事前トレーニング済みモデル、または元の Slim コードからエクスポートされた [TensorFlow ハブ](https://tfhub.dev/s?q=slim%20tf2)の TensorFlow 2 SavedModel をお試しください。一部の`tf.contrib`レイヤーはコアの TensorFlow に移動されていない可能性がありますが、代わりに [TensorFlow アドオンパッケージ](https://github.com/tensorflow/addons)に移動されています。 トレーニング `tf.keras`モデルにデータを供給する方法は沢山あります。それらは Python ジェネレータと Numpy 配列を入力として受け取ります。モデルへのデータ供給方法として推奨するのは、データ操作用の高パフォーマンスクラスのコレクションを含む`tf.data`パッケージの使用です。依然として`tf.queue`を使用している場合、これらは入力パイプラインとしてではなく、データ構造としてのみサポートされます。 データセットを使用する [TensorFlow Dataset](https://tensorflow.org/datasets) パッケージ(`tfds`)には、事前定義されたデータセットを`tf.data.Dataset`オブジェクトとして読み込むためのユーティリティが含まれています。この例として、`tfds`を使用して MNISTdataset を読み込んでみましょう。
###Code
datasets, info = tfds.load(name='mnist', with_info=True, as_supervised=True)
mnist_train, mnist_test = datasets['train'], datasets['test']
###Output
_____no_output_____
###Markdown
次に、トレーニング用のデータを準備します。- 各画像をリスケールする。- 例の順序をシャッフルする。- 画像とラベルのバッチを集める。
###Code
BUFFER_SIZE = 10 # Use a much larger value for real code.
BATCH_SIZE = 64
NUM_EPOCHS = 5
def scale(image, label):
image = tf.cast(image, tf.float32)
image /= 255
return image, label
###Output
_____no_output_____
###Markdown
例を短く保つために、データセットをトリミングして 5 バッチのみを返すようにします。
###Code
train_data = mnist_train.map(scale).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
test_data = mnist_test.map(scale).batch(BATCH_SIZE)
STEPS_PER_EPOCH = 5
train_data = train_data.take(STEPS_PER_EPOCH)
test_data = test_data.take(STEPS_PER_EPOCH)
image_batch, label_batch = next(iter(train_data))
###Output
_____no_output_____
###Markdown
Keras トレーニングループを使用するトレーニングプロセスの低レベル制御が不要な場合は、Keras 組み込みの`fit`、`evaluate`、`predict`メソッドの使用が推奨されます。これらのメソッドは(シーケンシャル、関数型、またはサブクラス化)実装を問わず、モデルをトレーニングするための統一インターフェースを提供します。これらのメソッドには次のような優位点があります。- Numpy 配列、Python ジェネレータ、`tf.data.Datasets`を受け取ります。- 正則化と活性化損失を自動的に適用します。- [マルチデバイストレーニングのために](distributed_training.ipynb)`tf.distribute`をサポートします。- 任意の callable は損失とメトリクスとしてサポートします。- `tf.keras.callbacks.TensorBoard`のようなコールバックとカスタムコールバックをサポートします。- 自動的に TensorFlow グラフを使用し、高性能です。ここに`Dataset`を使用したモデルのトレーニング例を示します。(この機能ついての詳細は[チュートリアル](../tutorials)をご覧ください。)
###Code
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(0.02),
input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dense(10)
])
# Model is the full model w/o custom layers
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.fit(train_data, epochs=NUM_EPOCHS)
loss, acc = model.evaluate(test_data)
print("Loss {}, Accuracy {}".format(loss, acc))
###Output
_____no_output_____
###Markdown
ループを自分で書くKeras モデルのトレーニングステップは動作していても、そのステップの外でより制御が必要な場合は、データ イテレーション ループで`tf.keras.Model.train_on_batch`メソッドの使用を検討してみてください。`tf.keras.callbacks.Callback`として、多くのものが実装可能であることに留意してください。このメソッドには前のセクションで言及したメソッドの優位点の多くがありますが、外側のループのユーザー制御も与えます。`tf.keras.Model.test_on_batch`または`tf.keras.Model.evaluate`を使用して、トレーニング中のパフォーマンスをチェックすることも可能です。注意: `train_on_batch`と`test_on_batch`は、デフォルトで単一バッチの損失とメトリクスを返します。`reset_metrics=False`を渡すと累積メトリックを返しますが、必ずメトリックアキュムレータを適切にリセットすることを忘れないようにしてくだい。また、`AUC`のような一部のメトリクスは正しく計算するために`reset_metrics=False`が必要なことも覚えておいてください。上のモデルのトレーニングを続けます。
###Code
# Model is the full model w/o custom layers
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
for epoch in range(NUM_EPOCHS):
#Reset the metric accumulators
model.reset_metrics()
for image_batch, label_batch in train_data:
result = model.train_on_batch(image_batch, label_batch)
metrics_names = model.metrics_names
print("train: ",
"{}: {:.3f}".format(metrics_names[0], result[0]),
"{}: {:.3f}".format(metrics_names[1], result[1]))
for image_batch, label_batch in test_data:
result = model.test_on_batch(image_batch, label_batch,
# return accumulated metrics
reset_metrics=False)
metrics_names = model.metrics_names
print("\neval: ",
"{}: {:.3f}".format(metrics_names[0], result[0]),
"{}: {:.3f}".format(metrics_names[1], result[1]))
###Output
_____no_output_____
###Markdown
トレーニングステップをカスタマイズするより多くの柔軟性と制御を必要とする場合、独自のトレーニングループを実装することでそれが可能になります。以下の 3 つのステップを踏みます。1. Python ジェネレータか`tf.data.Dataset`をイテレートして例のバッチを作成します。2. `tf.GradientTape`を使用して勾配を集めます。3. `tf.keras.optimizers`の 1 つを使用して、モデルの変数に重み更新を適用します。留意点:- サブクラス化されたレイヤーとモデルの`call`メソッドには、常に`training`引数を含めます。- `training`引数を確実に正しくセットしてモデルを呼び出します。- 使用方法によっては、モデルがデータのバッチ上で実行されるまでモデル変数は存在しないかもしれません。- モデルの正則化損失などを手動で処理する必要があります。v1 と比べて簡略化されている点に注意してください :- 変数初期化子を実行する必要はありません。作成時に変数は初期化されます。- たとえ`tf.function`演算が eager モードで振る舞う場合でも、手動の制御依存性を追加する必要はありません。
###Code
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(0.02),
input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dense(10)
])
optimizer = tf.keras.optimizers.Adam(0.001)
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
@tf.function
def train_step(inputs, labels):
with tf.GradientTape() as tape:
predictions = model(inputs, training=True)
regularization_loss=tf.math.add_n(model.losses)
pred_loss=loss_fn(labels, predictions)
total_loss=pred_loss + regularization_loss
gradients = tape.gradient(total_loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
for epoch in range(NUM_EPOCHS):
for inputs, labels in train_data:
train_step(inputs, labels)
print("Finished epoch", epoch)
###Output
_____no_output_____
###Markdown
新しいスタイルのメトリクスと損失TensorFlow 2.x では、メトリクスと損失はオブジェクトです。Eager で実行的に`tf.function`内で動作します。損失オブジェクトは呼び出し可能で、(y_true, y_pred) を引数として期待します。
###Code
cce = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
cce([[1, 0]], [[-1.0,3.0]]).numpy()
###Output
_____no_output_____
###Markdown
メトリックオブジェクトには次のメソッドがあります 。- `Metric.update_state()` — 新しい観測を追加する- `Metric.result()` — 観測値が与えられたとき、メトリックの現在の結果を得る- `Metric.reset_states()` — すべての観測をクリアするオブジェクト自体は呼び出し可能です。呼び出しは`update_state`と同様に新しい観測の状態を更新し、メトリクスの新しい結果を返します。メトリックの変数を手動で初期化する必要はありません。また、TensorFlow 2.x は自動制御依存性を持つため、それらについても気にする必要はありません。次のコードは、メトリックを使用してカスタムトレーニングループ内で観測される平均損失を追跡します。
###Code
# Create the metrics
loss_metric = tf.keras.metrics.Mean(name='train_loss')
accuracy_metric = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')
@tf.function
def train_step(inputs, labels):
with tf.GradientTape() as tape:
predictions = model(inputs, training=True)
regularization_loss=tf.math.add_n(model.losses)
pred_loss=loss_fn(labels, predictions)
total_loss=pred_loss + regularization_loss
gradients = tape.gradient(total_loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
# Update the metrics
loss_metric.update_state(total_loss)
accuracy_metric.update_state(labels, predictions)
for epoch in range(NUM_EPOCHS):
# Reset the metrics
loss_metric.reset_states()
accuracy_metric.reset_states()
for inputs, labels in train_data:
train_step(inputs, labels)
# Get the metric results
mean_loss=loss_metric.result()
mean_accuracy = accuracy_metric.result()
print('Epoch: ', epoch)
print(' loss: {:.3f}'.format(mean_loss))
print(' accuracy: {:.3f}'.format(mean_accuracy))
###Output
_____no_output_____
###Markdown
Keras メトリック名 TensorFlow 2.x では、Keras モデルはメトリクス名の処理に関してより一貫性があります。メトリクスリストで文字列を渡すと、*まさにその*文字列がメトリクスの`name`として使用されます。これらの名前は`model.fit`によって返される履歴オブジェクトと、`keras.callbacks`に渡されるログに表示されます。これはメトリクスリストで渡した文字列に設定されています。
###Code
model.compile(
optimizer = tf.keras.optimizers.Adam(0.001),
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics = ['acc', 'accuracy', tf.keras.metrics.SparseCategoricalAccuracy(name="my_accuracy")])
history = model.fit(train_data)
history.history.keys()
###Output
_____no_output_____
###Markdown
これは`metrics=["accuracy"]`を渡すと`dict_keys(['loss', 'acc'])`になっていた、以前のバージョンとは異なります。 Keras オプティマイザ `v1.train.AdamOptimizer`や`v1.train.GradientDescentOptimizer`などの`v1.train`内のオプティマイザは、`tf.keras.optimizers`内に同等のものを持ちます。 `v1.train`を`keras.optimizers`に変換するオプティマイザを変換する際の注意事項を次に示します。- オプティマイザをアップグレードすると、[古いチェックポイントとの互換性がなくなる可能性があります](checkpoints)。- epsilon のデフォルトはすべて`1e-8`ではなく`1e-7`になりました。(これはほとんどのユースケースで無視できます。)- `v1.train.GradientDescentOptimizer`は`tf.keras.optimizers.SGD`で直接置き換えが可能です。- `v1.train.MomentumOptimizer`はモメンタム引数(`tf.keras.optimizers.SGD(..., momentum=...)`)を使用して`SGD`オプティマイザで直接置き換えが可能です。- `v1.train.AdamOptimizer`を変換して`tf.keras.optimizers.Adam`を使用することが可能です。beta1引数と`beta2`引数の名前は、`beta_1`と`beta_2`に変更されています。- `v1.train.RMSPropOptimizer`は`tf.keras.optimizers.RMSprop`に変換可能です。 `decay`引数の名前は`rho`に変更されています。- `v1.train.AdadeltaOptimizer`は`tf.keras.optimizers.Adadelta`に直接変換が可能です。- `tf.train.AdagradOptimizer`は `tf.keras.optimizers.Adagrad`に直接変換が可能です。- `tf.train.FtrlOptimizer`は`tf.keras.optimizers.Ftrl`に直接変換が可能です。`accum_name`および`linear_name`引数は削除されています。- `tf.contrib.AdamaxOptimizer`と`tf.contrib.NadamOptimizer`は `tf.keras.optimizers.Adamax`と`tf.keras.optimizers.Nadam`に直接変換が可能です。`beta1`引数と`beta2`引数の名前は、`beta_1`と`beta_2`に変更されています。 一部の`tf.keras.optimizers`の新しいデフォルト警告: モデルの収束挙動に変化が見られる場合には、デフォルトの学習率を確認してください。`optimizers.SGD`、`optimizers.Adam`、または`optimizers.RMSprop`に変更はありません。次のデフォルトの学習率が変更されました。- `optimizers.Adagrad` 0.01 から 0.001 へ- `optimizers.Adadelta` 1.0 から 0.001 へ- `optimizers.Adamax` 0.002 から 0.001 へ- `optimizers.Nadam` 0.002 から 0.001 へ TensorBoard TensorFlow 2 には、TensorBoard で視覚化するための要約データを記述するために使用される`tf.summary` API の大幅な変更が含まれています。新しい`tf.summary`の概要については、TensorFlow 2 API を使用した[複数のチュートリアル](https://www.tensorflow.org/tensorboard/get_started)があります。これには、[TensorBoard TensorFlow 2 移行ガイド](https://www.tensorflow.org/tensorboard/migrate)も含まれています。 保存と読み込み チェックポイントの互換性TensorFlow 2.x は[オブジェクトベースのチェックポイント](checkpoint.ipynb)を使用します。古いスタイルの名前ベースのチェックポイントは、注意を払えば依然として読み込むことができます。コード変換プロセスは変数名変更という結果になるかもしれませんが、回避方法はあります。最も単純なアプローチは、チェックポイント内の名前と新しいモデルの名前を揃えて並べることです。- 変数にはすべて依然として設定が可能な`name`引数があります。- Keras モデルはまた `name`引数を取り、それらの変数のためのプレフィックスとして設定されます。- `v1.name_scope`関数は、変数名のプレフィックスの設定に使用できます。これは`tf.variable_scope`とは大きく異なります。これは名前だけに影響するもので、変数と再利用の追跡はしません。ご利用のユースケースで動作しない場合は、`v1.train.init_from_checkpoint`を試してみてください。これは`assignment_map`引数を取り、古い名前から新しい名前へのマッピングを指定します。注意 : [読み込みを遅延](checkpoint.ipynbloading_mechanics)できるオブジェクトベースのチェックポイントとは異なり、名前ベースのチェックポイントは関数が呼び出される時に全ての変数が構築されていることを要求します。一部のモデルは、`build`を呼び出すかデータのバッチでモデルを実行するまで変数の構築を遅延します。[TensorFlow Estimatorリポジトリ](https://github.com/tensorflow/estimator/blob/master/tensorflow_estimator/python/estimator/tools/checkpoint_converter.py)には事前作成された Estimator のチェックポイントを TensorFlow 1.X から 2.0 にアップグレードするための[変換ツール](checkpoint_converter)が含まれています。これは、同様のユースケースのツールを構築する方法の例として有用な場合があります。 保存されたモデルの互換性保存されたモデルには、互換性に関する重要な考慮事項はありません。- TensorFlow 1.x saved_models は TensorFlow 2.x で動作します。- TensorFlow 2.x saved_models は全ての演算がサポートされていれば TensorFlow 1.x で動作します。 Graph.pb または Graph.pbtxt 未加工の`Graph.pb`ファイルを TensorFlow 2.x にアップグレードする簡単な方法はありません。確実な方法は、ファイルを生成したコードをアップグレードすることです。ただし、「凍結グラフ」(変数が定数に変換された`tf.Graph`)がある場合、`v1.wrap_function`を使用して[`concrete_function`](https://tensorflow.org/guide/concrete_function)への変換が可能です。
###Code
def wrap_frozen_graph(graph_def, inputs, outputs):
def _imports_graph_def():
tf.compat.v1.import_graph_def(graph_def, name="")
wrapped_import = tf.compat.v1.wrap_function(_imports_graph_def, [])
import_graph = wrapped_import.graph
return wrapped_import.prune(
tf.nest.map_structure(import_graph.as_graph_element, inputs),
tf.nest.map_structure(import_graph.as_graph_element, outputs))
###Output
_____no_output_____
###Markdown
たとえば、次のような凍結された Inception v1 グラフ(2016 年)があります。
###Code
path = tf.keras.utils.get_file(
'inception_v1_2016_08_28_frozen.pb',
'http://storage.googleapis.com/download.tensorflow.org/models/inception_v1_2016_08_28_frozen.pb.tar.gz',
untar=True)
###Output
_____no_output_____
###Markdown
`tf.GraphDef`を読み込みます。
###Code
graph_def = tf.compat.v1.GraphDef()
loaded = graph_def.ParseFromString(open(path,'rb').read())
###Output
_____no_output_____
###Markdown
これを`concrete_function`にラップします。
###Code
inception_func = wrap_frozen_graph(
graph_def, inputs='input:0',
outputs='InceptionV1/InceptionV1/Mixed_3b/Branch_1/Conv2d_0a_1x1/Relu:0')
###Output
_____no_output_____
###Markdown
入力としてテンソルを渡します。
###Code
input_img = tf.ones([1,224,224,3], dtype=tf.float32)
inception_func(input_img).shape
###Output
_____no_output_____
###Markdown
Estimator Estimator でトレーニングするEstimator は TensorFlow 2.0 でサポートされています。Estimator を使用する際には、TensorFlow 1.x. からの`input_fn()`、`tf.estimator.TrainSpec`、`tf.estimator.EvalSpec`を使用できます。ここに train と evaluate specs を伴う `input_fn` を使用する例があります。 input_fn と train/eval specs を作成する
###Code
# Define the estimator's input_fn
def input_fn():
datasets, info = tfds.load(name='mnist', with_info=True, as_supervised=True)
mnist_train, mnist_test = datasets['train'], datasets['test']
BUFFER_SIZE = 10000
BATCH_SIZE = 64
def scale(image, label):
image = tf.cast(image, tf.float32)
image /= 255
return image, label[..., tf.newaxis]
train_data = mnist_train.map(scale).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
return train_data.repeat()
# Define train & eval specs
train_spec = tf.estimator.TrainSpec(input_fn=input_fn,
max_steps=STEPS_PER_EPOCH * NUM_EPOCHS)
eval_spec = tf.estimator.EvalSpec(input_fn=input_fn,
steps=STEPS_PER_EPOCH)
###Output
_____no_output_____
###Markdown
Keras モデル定義を使用する TensorFlow 2.x で Estimator を構築する方法には、いくつかの違いがあります。モデルは Keras を使用して定義することを推奨します。次に`tf.keras.estimator.model_to_estimator`ユーティリティを使用して、モデルを Estimator に変更します。次のコードは Estimator を作成してトレーニングする際に、このユーティリティをどのように使用するかを示します。
###Code
def make_model():
return tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(0.02),
input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dense(10)
])
model = make_model()
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
estimator = tf.keras.estimator.model_to_estimator(
keras_model = model
)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
###Output
_____no_output_____
###Markdown
注意 : Keras で重み付きメトリクスを作成し、`model_to_estimator`を使用してそれらを Estimator API で重み付きメトリクスを変換することはサポートされません。それらのメトリクスは、`add_metrics`関数を使用して Estimator 仕様で直接作成する必要があります。 カスタム `model_fn` を使用する保守する必要がある既存のカスタム Estimator `model_fn` を持つ場合には、`model_fn`を変換して Keras モデルを使用できるようにすることが可能です。しかしながら、互換性の理由から、カスタム`model_fn`は依然として1.x スタイルのグラフモードで動作します。これは eager execution はなく自動制御依存性もないことも意味します。注意: 長期的には、特にカスタムの `model_fn` を使って、`tf.estimator` から移行することを計画する必要があります。代替の API は `tf.keras` と `tf.distribute` です。トレーニングの一部に `Estimator` を使用する必要がある場合は、`tf.keras.estimator.model_to_estimator` コンバータを使用して `keras.Model` から Estimator を作成する必要があります。 最小限の変更で model_fn をカスタマイズするTensorFlow 2.0 でカスタム`model_fn`を動作させるには、既存のコードの変更を最小限に留めたい場合、`optimizers`や`metrics`などの`tf.compat.v1`シンボルを使用することができます。カスタム`model_fn`で Keras モデルを使用することは、それをカスタムトレーニングループで使用することに類似しています。- `mode`引数を基に、`training`段階を適切に設定します。- モデルの`trainable_variables`をオプティマイザに明示的に渡します。しかし、[カスタムループ](custom_loop)と比較して、重要な違いがあります。- `Model.losses`を使用する代わりに`Model.get_losses_for`を使用して損失を抽出します。- `Model.get_updates_for`を使用してモデルの更新を抽出します。注意 : 「更新」は各バッチの後にモデルに適用される必要がある変更です。例えば、`layers.BatchNormalization`レイヤーの平均と分散の移動平均などです。次のコードはカスタム`model_fn`から Estimator を作成し、これらの懸念事項をすべて示しています。
###Code
def my_model_fn(features, labels, mode):
model = make_model()
optimizer = tf.compat.v1.train.AdamOptimizer()
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
training = (mode == tf.estimator.ModeKeys.TRAIN)
predictions = model(features, training=training)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
reg_losses = model.get_losses_for(None) + model.get_losses_for(features)
total_loss=loss_fn(labels, predictions) + tf.math.add_n(reg_losses)
accuracy = tf.compat.v1.metrics.accuracy(labels=labels,
predictions=tf.math.argmax(predictions, axis=1),
name='acc_op')
update_ops = model.get_updates_for(None) + model.get_updates_for(features)
minimize_op = optimizer.minimize(
total_loss,
var_list=model.trainable_variables,
global_step=tf.compat.v1.train.get_or_create_global_step())
train_op = tf.group(minimize_op, update_ops)
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=total_loss,
train_op=train_op, eval_metric_ops={'accuracy': accuracy})
# Create the Estimator & Train
estimator = tf.estimator.Estimator(model_fn=my_model_fn)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
###Output
_____no_output_____
###Markdown
TensorFlow 2.x シンボルで`model_fn`をカスタマイズするTensorFlow 1.x シンボルをすべて削除し、カスタム`model_fn` をネイティブの TensorFlow 2.x にアップグレードする場合は、オプティマイザとメトリクスを`tf.keras.optimizers`と`tf.keras.metrics`にアップグレードする必要があります。カスタム`model_fn`では、上記の[変更](minimal_changes)に加えて、さらにアップグレードを行う必要があります。- `v1.train.Optimizer` の代わりに `tf.keras.optimizers` を使用します。- 損失が呼び出し可能(関数など)な場合は、`Optimizer.minimize()`を使用して`train_op/minimize_op`を取得します。- `train_op/minimize_op`を計算するには、 - 損失がスカラー損失`Tensor`(呼び出し不可)の場合は、`Optimizer.get_updates()`を使用します。返されるリストの最初の要素は目的とする`train_op/minimize_op`です。 - 損失が呼び出し可能(関数など)な場合は、`Optimizer.minimize()`を使用して`train_op/minimize_op`を取得します。- 評価には`tf.compat.v1.metrics`の代わりに[`tf.keras.metrics`](https://www.tensorflow.org/api_docs/python/tf/keras/metrics)を使用します。上記の`my_model_fn`の例では、2.0 シンボルの移行されたコードは次のように表示されます。
###Code
def my_model_fn(features, labels, mode):
model = make_model()
training = (mode == tf.estimator.ModeKeys.TRAIN)
loss_obj = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
predictions = model(features, training=training)
# Get both the unconditional losses (the None part)
# and the input-conditional losses (the features part).
reg_losses = model.get_losses_for(None) + model.get_losses_for(features)
total_loss=loss_obj(labels, predictions) + tf.math.add_n(reg_losses)
# Upgrade to tf.keras.metrics.
accuracy_obj = tf.keras.metrics.Accuracy(name='acc_obj')
accuracy = accuracy_obj.update_state(
y_true=labels, y_pred=tf.math.argmax(predictions, axis=1))
train_op = None
if training:
# Upgrade to tf.keras.optimizers.
optimizer = tf.keras.optimizers.Adam()
# Manually assign tf.compat.v1.global_step variable to optimizer.iterations
# to make tf.compat.v1.train.global_step increased correctly.
# This assignment is a must for any `tf.train.SessionRunHook` specified in
# estimator, as SessionRunHooks rely on global step.
optimizer.iterations = tf.compat.v1.train.get_or_create_global_step()
# Get both the unconditional updates (the None part)
# and the input-conditional updates (the features part).
update_ops = model.get_updates_for(None) + model.get_updates_for(features)
# Compute the minimize_op.
minimize_op = optimizer.get_updates(
total_loss,
model.trainable_variables)[0]
train_op = tf.group(minimize_op, *update_ops)
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=total_loss,
train_op=train_op,
eval_metric_ops={'Accuracy': accuracy_obj})
# Create the Estimator & Train.
estimator = tf.estimator.Estimator(model_fn=my_model_fn)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
###Output
_____no_output_____
###Markdown
事前作成された Estimator`tf.estimator.DNN*`、`tf.estimator.Linear*`、 `tf.estimator.DNNLinearCombined*`のファミリーに含まれる[事前作成された Estimator](https://www.tensorflow.org/guide/premade_estimators) は、依然として TensorFlow 2.0 API でもサポートされていますが、一部の引数が変更されています。1. `input_layer_partitioner`: v2 で削除されました。2. `loss_reduction`: `tf.compat.v1.losses.Reduction`の代わりに`tf.keras.losses.Reduction`に更新されました。デフォルト値も`tf.compat.v1.losses.Reduction.SUM`から`tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE`に変更されています。3. `optimizer`、`dnn_optimizer`、`linear_optimizer`: これらの引数は`tf.compat.v1.train.Optimizer`の代わりに`tf.keras.optimizers`に更新されています。上記の変更を移行するには :1. TensorFlow 2.x では[`配布戦略`](https://www.tensorflow.org/guide/distributed_training)が自動的に処理するため、`input_layer_partitioner`の移行は必要ありません。2. `loss_reduction`については[`tf.keras.losses.Reduction`](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/losses/Reduction)でサポートされるオプションを確認してください。3. `optimizer` 引数の場合: - 1) `optimizer`、`dnn_optimizer`、または `linear_optimizer` 引数を渡さない場合、または 2) `optimizer` 引数を `string` としてコードに指定しない場合、デフォルトで `tf.keras.optimizers` が使用されるため、何も変更する必要はありません。 - `optimizer`引数については、`optimizer`、`dnn_optimizer`、`linear_optimizer`引数を渡さない場合、または`optimizer`引数をコード内の内の`string`として指定する場合は、何も変更する必要はありません。デフォルトで`tf.keras.optimizers`を使用します。それ以外の場合は、`tf.compat.v1.train.Optimizer`から対応する[`tf.keras.optimizers`](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/optimizers)に更新する必要があります。 チェックポイントコンバータ`tf.keras.optimizers`は異なる変数セットを生成してチェックポイントに保存するするため、`keras.optimizers`への移行は TensorFlow 1.x を使用して保存されたチェックポイントを壊してしまいます。TensorFlow 2.x への移行後に古いチェックポイントを再利用できるようにするには、[チェックポイントコンバータツール](https://github.com/tensorflow/estimator/blob/master/tensorflow_estimator/python/estimator/tools/checkpoint_converter.py)をお試しください。
###Code
! curl -O https://raw.githubusercontent.com/tensorflow/estimator/master/tensorflow_estimator/python/estimator/tools/checkpoint_converter.py
###Output
_____no_output_____
###Markdown
ツールにはヘルプが組み込まれています。
###Code
! python checkpoint_converter.py -h
###Output
_____no_output_____
###Markdown
TensorShapeこのクラスは`tf.compat.v1.Dimension`オブジェクトの代わりに`int`を保持することにより単純化されました。従って、`.value()`を呼び出して`int`を取得する必要はありません。個々の`tf.compat.v1.Dimension`オブジェクトは依然として`tf.TensorShape.dims`からアクセス可能です。 以下に TensorFlow 1.x と TensorFlow 2.x 間の違いを示します。
###Code
# Create a shape and choose an index
i = 0
shape = tf.TensorShape([16, None, 256])
shape
###Output
_____no_output_____
###Markdown
TensorFlow 1.x で次を使っていた場合:```pythonvalue = shape[i].value```Then do this in TensorFlow 2.x:
###Code
value = shape[i]
value
###Output
_____no_output_____
###Markdown
TensorFlow 1.x で次を使っていた場合:```pythonfor dim in shape: value = dim.value print(value)```TensorFlow 2.0 では次のようにします:
###Code
for value in shape:
print(value)
###Output
_____no_output_____
###Markdown
TensorFlow 1.x で次を使っていた場合(またはその他の次元のメソッドを使用していた場合):```pythondim = shape[i] dim.assert_is_compatible_with(other_dim)```TensorFlow 2.0 では次のようにします:
###Code
other_dim = 16
Dimension = tf.compat.v1.Dimension
if shape.rank is None:
dim = Dimension(None)
else:
dim = shape.dims[i]
dim.is_compatible_with(other_dim) # or any other dimension method
shape = tf.TensorShape(None)
if shape:
dim = shape.dims[i]
dim.is_compatible_with(other_dim) # or any other dimension method
###Output
_____no_output_____
###Markdown
`tf.TensorShape` のブール型の値は、階数がわかっている場合は `True`で、そうでない場合は`False`です。
###Code
print(bool(tf.TensorShape([]))) # Scalar
print(bool(tf.TensorShape([0]))) # 0-length vector
print(bool(tf.TensorShape([1]))) # 1-length vector
print(bool(tf.TensorShape([None]))) # Unknown-length vector
print(bool(tf.TensorShape([1, 10, 100]))) # 3D tensor
print(bool(tf.TensorShape([None, None, None]))) # 3D tensor with no known dimensions
print()
print(bool(tf.TensorShape(None))) # A tensor with unknown rank.
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
TensorFlow 1 のコードを TensorFlow 2 に移行する TensorFlow.org で表示 Google Colab で実行 GitHub でソースを表示 ノートブックをダウンロード 本ドキュメントは、低レベル TensorFlow API のユーザーを対象としています。高レベル API(`tf.keras`)をご使用の場合は、コードを TensorFlow 2.x と完全互換にするためのアクションはほとんどまたはまったくありません。- [オプティマイザのデフォルトの学習率](keras_optimizer_lr)を確認してください。- メトリクスが記録される「名前」が[変更されている可能性がある](keras_metric_names)ことに注意してください。 TensorFlow 2.x で 1.X のコードを未修正で実行することは、([contrib を除き](https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md))依然として可能です。```pythonimport tensorflow._api.v2.compat.v1 as tftf.disable_v2_behavior()```しかし、これでは TensorFlow 2.0 で追加された改善の多くを活用できません。このガイドでは、コードのアップグレード、さらなる単純化、パフォーマンス向上、そしてより容易なメンテナンスについて説明します。 自動変換スクリプトこのドキュメントで説明される変更を実装する前に行うべき最初のステップは、[アップグレードスクリプト](./upgrade.md)を実行してみることです。これはコードを TensorFlow 2.x にアップグレードする際の初期パスとしては十分ですが、v2 特有のコードに変換するわけではありません。コードは依然として `tf.compat.v1` エンドポイントを使用して、プレースホルダー、セッション、コレクション、その他 1.x スタイルの機能へのアクセスが可能です。 トップレベルの動作の変更`tf.compat.v1.disable_v2_behavior()` を使用することで TensorFlow 2.x でコードが機能する場合でも、対処すべきグローバルな動作の変更があります。主な変更点は次のとおりです。 - *Eager execution、`v1.enable_eager_execution()`*: 暗黙的に `tf.Graph` を使用するコードは失敗します。このコードは必ず `with tf.Graph().as_default()` コンテキストでラップしてください。- *リソース変数、`v1.enable_resource_variables()`*: 一部のコードは、TensorFlow 参照変数によって有効化される非決定的な動作に依存する場合があります。 リソース変数は書き込み中にロックされるため、より直感的な一貫性を保証します。 - これによりエッジケースでの動作が変わる場合があります。 - これにより余分なコピーが作成されるため、メモリ使用量が増える可能性があります。 - これを無効にするには、`use_resource=False` を `tf.Variable` コンストラクタに渡します。- *テンソルの形状、`v1.enable_v2_tensorshape()`*: TensorFlow 2.x は、テンソルの形状の動作を簡略化されており、`t.shape[0].value` の代わりに `t.shape[0]` とすることができます。簡単な変更なので、すぐに修正しておくことをお勧めします。例については [TensorShape](tensorshape) をご覧ください。- *制御フロー、`v1.enable_control_flow_v2()`*: TensorFlow 2.x 制御フローの実装が簡略化されたため、さまざまなグラフ表現を生成します。問題が生じた場合には、[バグを報告](https://github.com/tensorflow/tensorflow/issues)してください。 TensorFlow 2.x のコードを作成するこのガイドでは、TensorFlow 1.x のコードを TensorFlow 2.x に変換するいくつかの例を確認します。これらの変更によって、コードがパフォーマンスの最適化および簡略化された API 呼び出しを活用できるようになります。それぞれのケースのパターンは次のとおりです。 1. `v1.Session.run` 呼び出しを置き換えるすべての `v1.Session.run` 呼び出しは、Python 関数で置き換える必要があります。- `feed_dict`および`v1.placeholder`は関数の引数になります。- `fetch` は関数の戻り値になります。- Eager execution では、`pdb` などの標準的な Python ツールを使用して、変換中に簡単にデバッグできます。次に、`tf.function` デコレータを追加して、グラフで効率的に実行できるようにします。 この機能についての詳細は、[AutoGraph ガイド](function.ipynb)をご覧ください。注意点:- `v1.Session.run` とは異なり、`tf.function` は固定のリターンシグネチャを持ち、常にすべての出力を返します。これによってパフォーマンスの問題が生じる場合は、2 つの個別の関数を作成します。- `tf.control_dependencies` または同様の演算は必要ありません。`tf.function` は、記述された順序で実行されたかのように動作します。たとえば、`tf.Variable` 割り当てと `tf.assert` は自動的に実行されます。[「モデルを変換する」セクション](converting_models)には、この変換プロセスの実際の例が含まれています。 2. Python オブジェクトを変数と損失の追跡に使用するTensorFlow 2.x では、いかなる名前ベースの変数追跡もまったく推奨されていません。 変数の追跡には Python オブジェクトを使用します。`v1.get_variable` の代わりに `tf.Variable` を使用してください。すべての`v1.variable_scope`は Python オブジェクトに変換が可能です。通常は次のうちの 1 つになります。- `tf.keras.layers.Layer`- `tf.keras.Model`- `tf.Module``tf.Graph.get_collection(tf.GraphKeys.VARIABLES)` などの変数のリストを集める必要がある場合には、`Layer` および `Model` オブジェクトの `.variables` と `.trainable_variables` 属性を使用します。これら `Layer` クラスと `Model` クラスは、グローバルコレクションの必要性を除去した別のプロパティを幾つか実装します。`.losses` プロパティは、`tf.GraphKeys.LOSSES` コレクション使用の置き換えとなります。詳細は [Keras ガイド](keras.ipynb)をご覧ください。警告 : 多くの `tf.compat.v1` シンボルはグローバルコレクションを暗黙的に使用しています。 3. トレーニングループをアップグレードするご利用のユースケースで動作する最高レベルの API を使用してください。独自のトレーニングループを構築するよりも `tf.keras.Model.fit` の選択を推奨します。これらの高レベル関数は、独自のトレーニングループを書く場合に見落とされやすい多くの低レベル詳細を管理します。例えば、それらは自動的に正則化損失を集めて、モデルを呼び出す時に`training=True`引数を設定します。 4. データ入力パイプラインをアップグレードするデータ入力には `tf.data` データセットを使用してください。それらのオブジェクトは効率的で、表現力があり、TensorFlow とうまく統合します。次のように、`tf.keras.Model.fit` メソッドに直接渡すことができます。```pythonmodel.fit(dataset, epochs=5)```また、標準的な Python で直接にイテレートすることもできます。```pythonfor example_batch, label_batch in dataset: break``` 5. `compat.v1`シンボルを移行する`tf.compat.v1`モジュールには、元のセマンティクスを持つ完全な TensorFlow 1.x API が含まれています。[TensorFlow 2 アップグレードスクリプト](upgrade.ipynb)は、変換が安全な場合、つまり v2 バージョンの動作が完全に同等であると判断できる場合は、シンボルを 2.0 と同等のものに変換します。(たとえば、これらは同じ関数なので、`v1.arg_max` の名前を `tf.argmax` に変更します。)コードの一部を使用してアップグレードスクリプトを実行した後に、`compat.v1` が頻出する可能性があります。 コードを調べ、それらを手動で同等の v2 のコードに変換する価値はあります。(該当するものがある場合には、ログに表示されているはずです。) モデルを変換する 低レベル変数 & 演算子実行低レベル API の使用例を以下に示します。- 変数スコープを使用して再利用を制御する。- `v1.get_variable`で変数を作成する。- コレクションに明示的にアクセスする。- 次のようなメソッドでコレクションに暗黙的にアクセスする。 - `v1.global_variables` - `v1.losses.get_regularization_loss`- `v1.placeholder` を使用してグラフ入力のセットアップをする。- `Session.run`でグラフを実行する。- 変数を手動で初期化する。 変換前TensorFlow 1.x を使用したコードでは、これらのパターンは以下のように表示されます。
###Code
import tensorflow as tf
import tensorflow.compat.v1 as v1
import tensorflow_datasets as tfds
g = v1.Graph()
with g.as_default():
in_a = v1.placeholder(dtype=v1.float32, shape=(2))
in_b = v1.placeholder(dtype=v1.float32, shape=(2))
def forward(x):
with v1.variable_scope("matmul", reuse=v1.AUTO_REUSE):
W = v1.get_variable("W", initializer=v1.ones(shape=(2,2)),
regularizer=lambda x:tf.reduce_mean(x**2))
b = v1.get_variable("b", initializer=v1.zeros(shape=(2)))
return W * x + b
out_a = forward(in_a)
out_b = forward(in_b)
reg_loss=v1.losses.get_regularization_loss(scope="matmul")
with v1.Session(graph=g) as sess:
sess.run(v1.global_variables_initializer())
outs = sess.run([out_a, out_b, reg_loss],
feed_dict={in_a: [1, 0], in_b: [0, 1]})
print(outs[0])
print()
print(outs[1])
print()
print(outs[2])
###Output
_____no_output_____
###Markdown
変換後 変換されたコードでは :- 変数はローカル Python オブジェクトです。- `forward`関数は依然として計算を定義します。- `Session.run`呼び出しは`forward`への呼び出しに置き換えられます。- パフォーマンス向上のためにオプションで`tf.function`デコレータを追加可能です。- どのグローバルコレクションも参照せず、正則化は手動で計算されます。- セッションやプレースホルダーはありません。
###Code
W = tf.Variable(tf.ones(shape=(2,2)), name="W")
b = tf.Variable(tf.zeros(shape=(2)), name="b")
@tf.function
def forward(x):
return W * x + b
out_a = forward([1,0])
print(out_a)
out_b = forward([0,1])
regularizer = tf.keras.regularizers.l2(0.04)
reg_loss=regularizer(W)
###Output
_____no_output_____
###Markdown
`tf.layers`ベースのモデル `v1.layers`モジュールは、変数を定義および再利用する`v1.variable_scope`に依存するレイヤー関数を含めるために使用されます。 変換前
###Code
def model(x, training, scope='model'):
with v1.variable_scope(scope, reuse=v1.AUTO_REUSE):
x = v1.layers.conv2d(x, 32, 3, activation=v1.nn.relu,
kernel_regularizer=lambda x:0.004*tf.reduce_mean(x**2))
x = v1.layers.max_pooling2d(x, (2, 2), 1)
x = v1.layers.flatten(x)
x = v1.layers.dropout(x, 0.1, training=training)
x = v1.layers.dense(x, 64, activation=v1.nn.relu)
x = v1.layers.batch_normalization(x, training=training)
x = v1.layers.dense(x, 10)
return x
train_data = tf.ones(shape=(1, 28, 28, 1))
test_data = tf.ones(shape=(1, 28, 28, 1))
train_out = model(train_data, training=True)
test_out = model(test_data, training=False)
print(train_out)
print()
print(test_out)
###Output
_____no_output_____
###Markdown
変換後 - レイヤーの単純なスタックが `tf.keras.Sequential`にぴったり収まります。(より複雑なモデルについては[カスタムレイヤーとモデル](keras/custom_layers_and_models.ipynb)および[ Functional API ](keras/functional.ipynb)をご覧ください。)- モデルが変数と正則化損失を追跡します。- `v1.layers`から`tf.keras.layers`への直接的なマッピングがあるため、変換は一対一対応でした。ほとんどの引数はそのままです。しかし、以下の点は異なります。- `training`引数は、それが実行される時点でモデルによって各レイヤーに渡されます。- 元の`model`関数への最初の引数(入力 `x`)はなくなりました。これはオブジェクトレイヤーがモデルの呼び出しからモデルの構築を分離するためです。また以下にも注意してください。- `tf.contrib`からの初期化子の正則化子を使用している場合は、他よりも多くの引数変更があります。- コードはコレクションに書き込みを行わないため、`v1.losses.get_regularization_loss`などの関数はそれらの値を返さなくなり、トレーニングループが壊れる可能性があります。
###Code
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(0.04),
input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dense(10)
])
train_data = tf.ones(shape=(1, 28, 28, 1))
test_data = tf.ones(shape=(1, 28, 28, 1))
train_out = model(train_data, training=True)
print(train_out)
test_out = model(test_data, training=False)
print(test_out)
# Here are all the trainable variables.
len(model.trainable_variables)
# Here is the regularization loss.
model.losses
###Output
_____no_output_____
###Markdown
変数と`v1.layers`の混在 既存のコードは低レベルの TensorFlow 1.x 変数と演算子に高レベルの`v1.layers`が混ざっていることがよくあります。 変換前
###Code
def model(x, training, scope='model'):
with v1.variable_scope(scope, reuse=v1.AUTO_REUSE):
W = v1.get_variable(
"W", dtype=v1.float32,
initializer=v1.ones(shape=x.shape),
regularizer=lambda x:0.004*tf.reduce_mean(x**2),
trainable=True)
if training:
x = x + W
else:
x = x + W * 0.5
x = v1.layers.conv2d(x, 32, 3, activation=tf.nn.relu)
x = v1.layers.max_pooling2d(x, (2, 2), 1)
x = v1.layers.flatten(x)
return x
train_out = model(train_data, training=True)
test_out = model(test_data, training=False)
###Output
_____no_output_____
###Markdown
変換後 このコードを変換するには、前の例で示したレイヤーからレイヤーへのマッピングのパターンに従います。一般的なパターンは次の通りです。- `__init__`でレイヤーパラメータを収集する。- `build`で変数を構築する。- `call`で計算を実行し、結果を返す。`v1.variable_scope`は事実上それ自身のレイヤーです。従って`tf.keras.layers.Layer`として書き直します。詳細は[ガイド](keras/custom_layers_and_models.ipynb)をご覧ください。
###Code
# Create a custom layer for part of the model
class CustomLayer(tf.keras.layers.Layer):
def __init__(self, *args, **kwargs):
super(CustomLayer, self).__init__(*args, **kwargs)
def build(self, input_shape):
self.w = self.add_weight(
shape=input_shape[1:],
dtype=tf.float32,
initializer=tf.keras.initializers.ones(),
regularizer=tf.keras.regularizers.l2(0.02),
trainable=True)
# Call method will sometimes get used in graph mode,
# training will get turned into a tensor
@tf.function
def call(self, inputs, training=None):
if training:
return inputs + self.w
else:
return inputs + self.w * 0.5
custom_layer = CustomLayer()
print(custom_layer([1]).numpy())
print(custom_layer([1], training=True).numpy())
train_data = tf.ones(shape=(1, 28, 28, 1))
test_data = tf.ones(shape=(1, 28, 28, 1))
# Build the model including the custom layer
model = tf.keras.Sequential([
CustomLayer(input_shape=(28, 28, 1)),
tf.keras.layers.Conv2D(32, 3, activation='relu'),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
])
train_out = model(train_data, training=True)
test_out = model(test_data, training=False)
###Output
_____no_output_____
###Markdown
注意点:- サブクラス化された Keras モデルとレイヤーは v1 グラフ(自動制御依存性なし)と eager モードの両方で実行される必要があります。 - `call()`を`tf.function()`にラップして、AutoGraph と自動制御依存性を得るようにします。- `training`引数を受け取って`call`することを忘れないようにしてください。 - それは`tf.Tensor`である場合があります。 - それは Python ブール型である場合があります。- `self.add_weight()`を使用して、コンストラクタまたは`Model.build`でモデル変数を作成します。 - `Model.build`では、入力形状にアクセスできるため、適合する形状で重みを作成できます。 - `tf.keras.layers.Layer.add_weight`を使用すると、Keras が変数と正則化損失を追跡できるようになります。- オブジェクトに`tf.Tensors`を保持してはいけません。 - それらは`tf.function`または eager コンテキスト内のいずれかで作成される可能性がありますが、それらのテンソルは異なる振る舞いをします。 - 状態には`tf.Variable`を使用してください。これは常に両方のコンテキストから使用可能です。 - `tf.Tensors`は中間値専用です。 Slim & contrib.layers に関する注意古い TensorFlow 1.x コードの大部分は [Slim](https://ai.googleblog.com/2016/08/tf-slim-high-level-library-to-define.html) ライブラリを使用しており、これは`tf.contrib.layers`として TensorFlow 1.x でパッケージ化されていました。 `contrib`モジュールに関しては、TensorFlow 2.x では`tf.compat.v1`内でも、あっても利用できなくなりました。Slim を使用したコードの TensorFlow 2.x への変換は、`v1.layers`を使用したレポジトリの変換よりも複雑です。現実的には、まず最初に Slim コードを`v1.layers`に変換してから Keras に変換するほうが賢明かもしれません。- `arg_scopes`を除去します。すべての引数は明示的である必要があります。- それらを使用する場合、 `normalizer_fn`と`activation_fn`をそれら自身のレイヤーに分割します。- 分離可能な畳み込みレイヤーは 1 つまたはそれ以上の異なる Keras レイヤー(深さ的な、ポイント的な、分離可能な Keras レイヤー)にマップします。- Slim と`v1.layers`には異なる引数名とデフォルト値があります。- 一部の引数には異なるスケールがあります。- Slim 事前トレーニング済みモデルを使用する場合は、`tf.keras.applications`から Keras 事前トレーニング済みモデル、または元の Slim コードからエクスポートされた [TensorFlow ハブ](https://tfhub.dev/s?q=slim%20tf2)の TensorFlow 2 SavedModel をお試しください。一部の`tf.contrib`レイヤーはコアの TensorFlow に移動されていない可能性がありますが、代わりに [TensorFlow アドオンパッケージ](https://github.com/tensorflow/addons)に移動されています。 トレーニング `tf.keras`モデルにデータを供給する方法は沢山あります。それらは Python ジェネレータと Numpy 配列を入力として受け取ります。モデルへのデータ供給方法として推奨するのは、データ操作用の高パフォーマンスクラスのコレクションを含む`tf.data`パッケージの使用です。依然として`tf.queue`を使用している場合、これらは入力パイプラインとしてではなく、データ構造としてのみサポートされます。 データセットを使用する [TensorFlow Dataset](https://tensorflow.org/datasets) パッケージ(`tfds`)には、事前定義されたデータセットを`tf.data.Dataset`オブジェクトとして読み込むためのユーティリティが含まれています。この例として、`tfds`を使用して MNISTdataset を読み込んでみましょう。
###Code
datasets, info = tfds.load(name='mnist', with_info=True, as_supervised=True)
mnist_train, mnist_test = datasets['train'], datasets['test']
###Output
_____no_output_____
###Markdown
次に、トレーニング用のデータを準備します。- 各画像をリスケールする。- 例の順序をシャッフルする。- 画像とラベルのバッチを集める。
###Code
BUFFER_SIZE = 10 # Use a much larger value for real code.
BATCH_SIZE = 64
NUM_EPOCHS = 5
def scale(image, label):
image = tf.cast(image, tf.float32)
image /= 255
return image, label
###Output
_____no_output_____
###Markdown
例を短く保つために、データセットをトリミングして 5 バッチのみを返すようにします。
###Code
train_data = mnist_train.map(scale).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
test_data = mnist_test.map(scale).batch(BATCH_SIZE)
STEPS_PER_EPOCH = 5
train_data = train_data.take(STEPS_PER_EPOCH)
test_data = test_data.take(STEPS_PER_EPOCH)
image_batch, label_batch = next(iter(train_data))
###Output
_____no_output_____
###Markdown
Keras トレーニングループを使用するトレーニングプロセスの低レベル制御が不要な場合は、Keras 組み込みの`fit`、`evaluate`、`predict`メソッドの使用が推奨されます。これらのメソッドは(シーケンシャル、関数型、またはサブクラス化)実装を問わず、モデルをトレーニングするための統一インターフェースを提供します。これらのメソッドには次のような優位点があります。- Numpy 配列、Python ジェネレータ、`tf.data.Datasets`を受け取ります。- 正則化と活性化損失を自動的に適用します。- [マルチデバイストレーニングのために](distributed_training.ipynb)`tf.distribute`をサポートします。- 任意の callable は損失とメトリクスとしてサポートします。- `tf.keras.callbacks.TensorBoard`のようなコールバックとカスタムコールバックをサポートします。- 自動的に TensorFlow グラフを使用し、高性能です。ここに`Dataset`を使用したモデルのトレーニング例を示します。(この機能ついての詳細は[チュートリアル](../tutorials)をご覧ください。)
###Code
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(0.02),
input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dense(10)
])
# Model is the full model w/o custom layers
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.fit(train_data, epochs=NUM_EPOCHS)
loss, acc = model.evaluate(test_data)
print("Loss {}, Accuracy {}".format(loss, acc))
###Output
_____no_output_____
###Markdown
ループを自分で書くKeras モデルのトレーニングステップは動作していても、そのステップの外でより制御が必要な場合は、データ イテレーション ループで`tf.keras.Model.train_on_batch`メソッドの使用を検討してみてください。`tf.keras.callbacks.Callback`として、多くのものが実装可能であることに留意してください。このメソッドには前のセクションで言及したメソッドの優位点の多くがありますが、外側のループのユーザー制御も与えます。`tf.keras.Model.test_on_batch`または`tf.keras.Model.evaluate`を使用して、トレーニング中のパフォーマンスをチェックすることも可能です。注意: `train_on_batch`と`test_on_batch`は、デフォルトで単一バッチの損失とメトリクスを返します。`reset_metrics=False`を渡すと累積メトリックを返しますが、必ずメトリックアキュムレータを適切にリセットすることを忘れないようにしてくだい。また、`AUC`のような一部のメトリクスは正しく計算するために`reset_metrics=False`が必要なことも覚えておいてください。上のモデルのトレーニングを続けます。
###Code
# Model is the full model w/o custom layers
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
for epoch in range(NUM_EPOCHS):
#Reset the metric accumulators
model.reset_metrics()
for image_batch, label_batch in train_data:
result = model.train_on_batch(image_batch, label_batch)
metrics_names = model.metrics_names
print("train: ",
"{}: {:.3f}".format(metrics_names[0], result[0]),
"{}: {:.3f}".format(metrics_names[1], result[1]))
for image_batch, label_batch in test_data:
result = model.test_on_batch(image_batch, label_batch,
# return accumulated metrics
reset_metrics=False)
metrics_names = model.metrics_names
print("\neval: ",
"{}: {:.3f}".format(metrics_names[0], result[0]),
"{}: {:.3f}".format(metrics_names[1], result[1]))
###Output
_____no_output_____
###Markdown
トレーニングステップをカスタマイズするより多くの柔軟性と制御を必要とする場合、独自のトレーニングループを実装することでそれが可能になります。以下の 3 つのステップを踏みます。1. Python ジェネレータか`tf.data.Dataset`をイテレートして例のバッチを作成します。2. `tf.GradientTape`を使用して勾配を集めます。3. `tf.keras.optimizers`の 1 つを使用して、モデルの変数に重み更新を適用します。留意点:- サブクラス化されたレイヤーとモデルの`call`メソッドには、常に`training`引数を含めます。- `training`引数を確実に正しくセットしてモデルを呼び出します。- 使用方法によっては、モデルがデータのバッチ上で実行されるまでモデル変数は存在しないかもしれません。- モデルの正則化損失などを手動で処理する必要があります。v1 と比べて簡略化されている点に注意してください :- 変数初期化子を実行する必要はありません。作成時に変数は初期化されます。- たとえ`tf.function`演算が eager モードで振る舞う場合でも、手動の制御依存性を追加する必要はありません。
###Code
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(0.02),
input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dense(10)
])
optimizer = tf.keras.optimizers.Adam(0.001)
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
@tf.function
def train_step(inputs, labels):
with tf.GradientTape() as tape:
predictions = model(inputs, training=True)
regularization_loss=tf.math.add_n(model.losses)
pred_loss=loss_fn(labels, predictions)
total_loss=pred_loss + regularization_loss
gradients = tape.gradient(total_loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
for epoch in range(NUM_EPOCHS):
for inputs, labels in train_data:
train_step(inputs, labels)
print("Finished epoch", epoch)
###Output
_____no_output_____
###Markdown
新しいスタイルのメトリクスと損失TensorFlow 2.x では、メトリクスと損失はオブジェクトです。Eager で実行的に`tf.function`内で動作します。損失オブジェクトは呼び出し可能で、(y_true, y_pred) を引数として期待します。
###Code
cce = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
cce([[1, 0]], [[-1.0,3.0]]).numpy()
###Output
_____no_output_____
###Markdown
メトリックオブジェクトには次のメソッドがあります 。- `Metric.update_state()` — 新しい観測を追加する- `Metric.result()` — 観測値が与えられたとき、メトリックの現在の結果を得る- `Metric.reset_states()` — すべての観測をクリアするオブジェクト自体は呼び出し可能です。呼び出しは`update_state`と同様に新しい観測の状態を更新し、メトリクスの新しい結果を返します。メトリックの変数を手動で初期化する必要はありません。また、TensorFlow 2.x は自動制御依存性を持つため、それらについても気にする必要はありません。次のコードは、メトリックを使用してカスタムトレーニングループ内で観測される平均損失を追跡します。
###Code
# Create the metrics
loss_metric = tf.keras.metrics.Mean(name='train_loss')
accuracy_metric = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')
@tf.function
def train_step(inputs, labels):
with tf.GradientTape() as tape:
predictions = model(inputs, training=True)
regularization_loss=tf.math.add_n(model.losses)
pred_loss=loss_fn(labels, predictions)
total_loss=pred_loss + regularization_loss
gradients = tape.gradient(total_loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
# Update the metrics
loss_metric.update_state(total_loss)
accuracy_metric.update_state(labels, predictions)
for epoch in range(NUM_EPOCHS):
# Reset the metrics
loss_metric.reset_states()
accuracy_metric.reset_states()
for inputs, labels in train_data:
train_step(inputs, labels)
# Get the metric results
mean_loss=loss_metric.result()
mean_accuracy = accuracy_metric.result()
print('Epoch: ', epoch)
print(' loss: {:.3f}'.format(mean_loss))
print(' accuracy: {:.3f}'.format(mean_accuracy))
###Output
_____no_output_____
###Markdown
Keras メトリック名 TensorFlow 2.x では、Keras モデルはメトリクス名の処理に関してより一貫性があります。メトリクスリストで文字列を渡すと、*まさにその*文字列がメトリクスの`name`として使用されます。これらの名前は`model.fit`によって返される履歴オブジェクトと、`keras.callbacks`に渡されるログに表示されます。これはメトリクスリストで渡した文字列に設定されています。
###Code
model.compile(
optimizer = tf.keras.optimizers.Adam(0.001),
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics = ['acc', 'accuracy', tf.keras.metrics.SparseCategoricalAccuracy(name="my_accuracy")])
history = model.fit(train_data)
history.history.keys()
###Output
_____no_output_____
###Markdown
これは`metrics=["accuracy"]`を渡すと`dict_keys(['loss', 'acc'])`になっていた、以前のバージョンとは異なります。 Keras オプティマイザ `v1.train.AdamOptimizer`や`v1.train.GradientDescentOptimizer`などの`v1.train`内のオプティマイザは、`tf.keras.optimizers`内に同等のものを持ちます。 `v1.train`を`keras.optimizers`に変換するオプティマイザを変換する際の注意事項を次に示します。- オプティマイザをアップグレードすると、[古いチェックポイントとの互換性がなくなる可能性があります](checkpoints)。- epsilon のデフォルトはすべて`1e-8`ではなく`1e-7`になりました。(これはほとんどのユースケースで無視できます。)- `v1.train.GradientDescentOptimizer`は`tf.keras.optimizers.SGD`で直接置き換えが可能です。- `v1.train.MomentumOptimizer`はモメンタム引数(`tf.keras.optimizers.SGD(..., momentum=...)`)を使用して`SGD`オプティマイザで直接置き換えが可能です。- `v1.train.AdamOptimizer`を変換して`tf.keras.optimizers.Adam`を使用することが可能です。beta1引数と`beta2`引数の名前は、`beta_1`と`beta_2`に変更されています。- `v1.train.RMSPropOptimizer`は`tf.keras.optimizers.RMSprop`に変換可能です。 `decay`引数の名前は`rho`に変更されています。- `v1.train.AdadeltaOptimizer`は`tf.keras.optimizers.Adadelta`に直接変換が可能です。- `tf.train.AdagradOptimizer`は `tf.keras.optimizers.Adagrad`に直接変換が可能です。- `tf.train.FtrlOptimizer`は`tf.keras.optimizers.Ftrl`に直接変換が可能です。`accum_name`および`linear_name`引数は削除されています。- `tf.contrib.AdamaxOptimizer`と`tf.contrib.NadamOptimizer`は `tf.keras.optimizers.Adamax`と`tf.keras.optimizers.Nadam`に直接変換が可能です。`beta1`引数と`beta2`引数の名前は、`beta_1`と`beta_2`に変更されています。 一部の`tf.keras.optimizers`の新しいデフォルト警告: モデルの収束挙動に変化が見られる場合には、デフォルトの学習率を確認してください。`optimizers.SGD`、`optimizers.Adam`、または`optimizers.RMSprop`に変更はありません。次のデフォルトの学習率が変更されました。- `optimizers.Adagrad` 0.01 から 0.001 へ- `optimizers.Adadelta` 1.0 から 0.001 へ- `optimizers.Adamax` 0.002 から 0.001 へ- `optimizers.Nadam` 0.002 から 0.001 へ TensorBoard TensorFlow 2 には、TensorBoard で視覚化するための要約データを記述するために使用される`tf.summary` API の大幅な変更が含まれています。新しい`tf.summary`の概要については、TensorFlow 2 API を使用した[複数のチュートリアル](https://www.tensorflow.org/tensorboard/get_started)があります。これには、[TensorBoard TensorFlow 2 移行ガイド](https://www.tensorflow.org/tensorboard/migrate)も含まれています。 保存と読み込み チェックポイントの互換性TensorFlow 2.x は[オブジェクトベースのチェックポイント](checkpoint.ipynb)を使用します。古いスタイルの名前ベースのチェックポイントは、注意を払えば依然として読み込むことができます。コード変換プロセスは変数名変更という結果になるかもしれませんが、回避方法はあります。最も単純なアプローチは、チェックポイント内の名前と新しいモデルの名前を揃えて並べることです。- 変数にはすべて依然として設定が可能な`name`引数があります。- Keras モデルはまた `name`引数を取り、それらの変数のためのプレフィックスとして設定されます。- `v1.name_scope`関数は、変数名のプレフィックスの設定に使用できます。これは`tf.variable_scope`とは大きく異なります。これは名前だけに影響するもので、変数と再利用の追跡はしません。ご利用のユースケースで動作しない場合は、`v1.train.init_from_checkpoint`を試してみてください。これは`assignment_map`引数を取り、古い名前から新しい名前へのマッピングを指定します。注意 : [読み込みを遅延](checkpoint.ipynbloading_mechanics)できるオブジェクトベースのチェックポイントとは異なり、名前ベースのチェックポイントは関数が呼び出される時に全ての変数が構築されていることを要求します。一部のモデルは、`build`を呼び出すかデータのバッチでモデルを実行するまで変数の構築を遅延します。[TensorFlow Estimatorリポジトリ](https://github.com/tensorflow/estimator/blob/master/tensorflow_estimator/python/estimator/tools/checkpoint_converter.py)には事前作成された Estimator のチェックポイントを TensorFlow 1.X から 2.0 にアップグレードするための[変換ツール](checkpoint_converter)が含まれています。これは、同様のユースケースのツールを構築する方法の例として有用な場合があります。 保存されたモデルの互換性保存されたモデルには、互換性に関する重要な考慮事項はありません。- TensorFlow 1.x saved_models は TensorFlow 2.x で動作します。- TensorFlow 2.x saved_models は全ての演算がサポートされていれば TensorFlow 1.x で動作します。 Graph.pb または Graph.pbtxt 未加工の`Graph.pb`ファイルを TensorFlow 2.x にアップグレードする簡単な方法はありません。確実な方法は、ファイルを生成したコードをアップグレードすることです。ただし、「凍結グラフ」(変数が定数に変換された`tf.Graph`)がある場合、`v1.wrap_function`を使用して[`concrete_function`](https://tensorflow.org/guide/concrete_function)への変換が可能です。
###Code
def wrap_frozen_graph(graph_def, inputs, outputs):
def _imports_graph_def():
tf.compat.v1.import_graph_def(graph_def, name="")
wrapped_import = tf.compat.v1.wrap_function(_imports_graph_def, [])
import_graph = wrapped_import.graph
return wrapped_import.prune(
tf.nest.map_structure(import_graph.as_graph_element, inputs),
tf.nest.map_structure(import_graph.as_graph_element, outputs))
###Output
_____no_output_____
###Markdown
たとえば、次のような凍結された Inception v1 グラフ(2016 年)があります。
###Code
path = tf.keras.utils.get_file(
'inception_v1_2016_08_28_frozen.pb',
'http://storage.googleapis.com/download.tensorflow.org/models/inception_v1_2016_08_28_frozen.pb.tar.gz',
untar=True)
###Output
_____no_output_____
###Markdown
`tf.GraphDef`を読み込みます。
###Code
graph_def = tf.compat.v1.GraphDef()
loaded = graph_def.ParseFromString(open(path,'rb').read())
###Output
_____no_output_____
###Markdown
これを`concrete_function`にラップします。
###Code
inception_func = wrap_frozen_graph(
graph_def, inputs='input:0',
outputs='InceptionV1/InceptionV1/Mixed_3b/Branch_1/Conv2d_0a_1x1/Relu:0')
###Output
_____no_output_____
###Markdown
入力としてテンソルを渡します。
###Code
input_img = tf.ones([1,224,224,3], dtype=tf.float32)
inception_func(input_img).shape
###Output
_____no_output_____
###Markdown
Estimator Estimator でトレーニングするEstimator は TensorFlow 2.0 でサポートされています。Estimator を使用する際には、TensorFlow 1.x. からの`input_fn()`、`tf.estimator.TrainSpec`、`tf.estimator.EvalSpec`を使用できます。ここに train と evaluate specs を伴う `input_fn` を使用する例があります。 input_fn と train/eval specs を作成する
###Code
# Define the estimator's input_fn
def input_fn():
datasets, info = tfds.load(name='mnist', with_info=True, as_supervised=True)
mnist_train, mnist_test = datasets['train'], datasets['test']
BUFFER_SIZE = 10000
BATCH_SIZE = 64
def scale(image, label):
image = tf.cast(image, tf.float32)
image /= 255
return image, label[..., tf.newaxis]
train_data = mnist_train.map(scale).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
return train_data.repeat()
# Define train & eval specs
train_spec = tf.estimator.TrainSpec(input_fn=input_fn,
max_steps=STEPS_PER_EPOCH * NUM_EPOCHS)
eval_spec = tf.estimator.EvalSpec(input_fn=input_fn,
steps=STEPS_PER_EPOCH)
###Output
_____no_output_____
###Markdown
Keras モデル定義を使用する TensorFlow 2.x で Estimator を構築する方法には、いくつかの違いがあります。モデルは Keras を使用して定義することを推奨します。次に`tf.keras.estimator.model_to_estimator`ユーティリティを使用して、モデルを Estimator に変更します。次のコードは Estimator を作成してトレーニングする際に、このユーティリティをどのように使用するかを示します。
###Code
def make_model():
return tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(0.02),
input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dense(10)
])
model = make_model()
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
estimator = tf.keras.estimator.model_to_estimator(
keras_model = model
)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
###Output
_____no_output_____
###Markdown
注意 : Keras で重み付きメトリクスを作成し、`model_to_estimator`を使用してそれらを Estimator API で重み付きメトリクスを変換することはサポートされません。それらのメトリクスは、`add_metrics`関数を使用して Estimator 仕様で直接作成する必要があります。 カスタム `model_fn` を使用する保守する必要がある既存のカスタム Estimator `model_fn` を持つ場合には、`model_fn`を変換して Keras モデルを使用できるようにすることが可能です。しかしながら、互換性の理由から、カスタム`model_fn`は依然として1.x スタイルのグラフモードで動作します。これは eager execution はなく自動制御依存性もないことも意味します。注意: 長期的には、特にカスタムの `model_fn` を使って、`tf.estimator` から移行することを計画する必要があります。代替の API は `tf.keras` と `tf.distribute` です。トレーニングの一部に `Estimator` を使用する必要がある場合は、`tf.keras.estimator.model_to_estimator` コンバータを使用して `keras.Model` から Estimator を作成する必要があります。 最小限の変更で model_fn をカスタマイズするTensorFlow 2.0 でカスタム`model_fn`を動作させるには、既存のコードの変更を最小限に留めたい場合、`optimizers`や`metrics`などの`tf.compat.v1`シンボルを使用することができます。カスタム`model_fn`で Keras モデルを使用することは、それをカスタムトレーニングループで使用することに類似しています。- `mode`引数を基に、`training`段階を適切に設定します。- モデルの`trainable_variables`をオプティマイザに明示的に渡します。しかし、[カスタムループ](custom_loop)と比較して、重要な違いがあります。- `Model.losses`を使用する代わりに`Model.get_losses_for`を使用して損失を抽出します。- `Model.get_updates_for`を使用してモデルの更新を抽出します。注意 : 「更新」は各バッチの後にモデルに適用される必要がある変更です。例えば、`layers.BatchNormalization`レイヤーの平均と分散の移動平均などです。次のコードはカスタム`model_fn`から Estimator を作成し、これらの懸念事項をすべて示しています。
###Code
def my_model_fn(features, labels, mode):
model = make_model()
optimizer = tf.compat.v1.train.AdamOptimizer()
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
training = (mode == tf.estimator.ModeKeys.TRAIN)
predictions = model(features, training=training)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
reg_losses = model.get_losses_for(None) + model.get_losses_for(features)
total_loss=loss_fn(labels, predictions) + tf.math.add_n(reg_losses)
accuracy = tf.compat.v1.metrics.accuracy(labels=labels,
predictions=tf.math.argmax(predictions, axis=1),
name='acc_op')
update_ops = model.get_updates_for(None) + model.get_updates_for(features)
minimize_op = optimizer.minimize(
total_loss,
var_list=model.trainable_variables,
global_step=tf.compat.v1.train.get_or_create_global_step())
train_op = tf.group(minimize_op, update_ops)
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=total_loss,
train_op=train_op, eval_metric_ops={'accuracy': accuracy})
# Create the Estimator & Train
estimator = tf.estimator.Estimator(model_fn=my_model_fn)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
###Output
_____no_output_____
###Markdown
TensorFlow 2.x シンボルで`model_fn`をカスタマイズするTensorFlow 1.x シンボルをすべて削除し、カスタム`model_fn` をネイティブの TensorFlow 2.x にアップグレードする場合は、オプティマイザとメトリクスを`tf.keras.optimizers`と`tf.keras.metrics`にアップグレードする必要があります。カスタム`model_fn`では、上記の[変更](minimal_changes)に加えて、さらにアップグレードを行う必要があります。- `v1.train.Optimizer` の代わりに `tf.keras.optimizers` を使用します。- 損失が呼び出し可能(関数など)な場合は、`Optimizer.minimize()`を使用して`train_op/minimize_op`を取得します。- `train_op/minimize_op`を計算するには、 - 損失がスカラー損失`Tensor`(呼び出し不可)の場合は、`Optimizer.get_updates()`を使用します。返されるリストの最初の要素は目的とする`train_op/minimize_op`です。 - 損失が呼び出し可能(関数など)な場合は、`Optimizer.minimize()`を使用して`train_op/minimize_op`を取得します。- 評価には`tf.compat.v1.metrics`の代わりに[`tf.keras.metrics`](https://www.tensorflow.org/api_docs/python/tf/keras/metrics)を使用します。上記の`my_model_fn`の例では、2.0 シンボルの移行されたコードは次のように表示されます。
###Code
def my_model_fn(features, labels, mode):
model = make_model()
training = (mode == tf.estimator.ModeKeys.TRAIN)
loss_obj = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
predictions = model(features, training=training)
# Get both the unconditional losses (the None part)
# and the input-conditional losses (the features part).
reg_losses = model.get_losses_for(None) + model.get_losses_for(features)
total_loss=loss_obj(labels, predictions) + tf.math.add_n(reg_losses)
# Upgrade to tf.keras.metrics.
accuracy_obj = tf.keras.metrics.Accuracy(name='acc_obj')
accuracy = accuracy_obj.update_state(
y_true=labels, y_pred=tf.math.argmax(predictions, axis=1))
train_op = None
if training:
# Upgrade to tf.keras.optimizers.
optimizer = tf.keras.optimizers.Adam()
# Manually assign tf.compat.v1.global_step variable to optimizer.iterations
# to make tf.compat.v1.train.global_step increased correctly.
# This assignment is a must for any `tf.train.SessionRunHook` specified in
# estimator, as SessionRunHooks rely on global step.
optimizer.iterations = tf.compat.v1.train.get_or_create_global_step()
# Get both the unconditional updates (the None part)
# and the input-conditional updates (the features part).
update_ops = model.get_updates_for(None) + model.get_updates_for(features)
# Compute the minimize_op.
minimize_op = optimizer.get_updates(
total_loss,
model.trainable_variables)[0]
train_op = tf.group(minimize_op, *update_ops)
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=total_loss,
train_op=train_op,
eval_metric_ops={'Accuracy': accuracy_obj})
# Create the Estimator & Train.
estimator = tf.estimator.Estimator(model_fn=my_model_fn)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
###Output
_____no_output_____
###Markdown
事前作成された Estimator`tf.estimator.DNN*`、`tf.estimator.Linear*`、 `tf.estimator.DNNLinearCombined*`のファミリーに含まれる[事前作成された Estimator](https://www.tensorflow.org/guide/premade_estimators) は、依然として TensorFlow 2.0 API でもサポートされていますが、一部の引数が変更されています。1. `input_layer_partitioner`: v2 で削除されました。2. `loss_reduction`: `tf.compat.v1.losses.Reduction`の代わりに`tf.keras.losses.Reduction`に更新されました。デフォルト値も`tf.compat.v1.losses.Reduction.SUM`から`tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE`に変更されています。3. `optimizer`、`dnn_optimizer`、`linear_optimizer`: これらの引数は`tf.compat.v1.train.Optimizer`の代わりに`tf.keras.optimizers`に更新されています。上記の変更を移行するには :1. TensorFlow 2.x では[`配布戦略`](https://www.tensorflow.org/guide/distributed_training)が自動的に処理するため、`input_layer_partitioner`の移行は必要ありません。2. `loss_reduction`については[`tf.keras.losses.Reduction`](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/losses/Reduction)でサポートされるオプションを確認してください。3. `optimizer` 引数の場合: - 1) `optimizer`、`dnn_optimizer`、または `linear_optimizer` 引数を渡さない場合、または 2) `optimizer` 引数を `string` としてコードに指定しない場合、デフォルトで `tf.keras.optimizers` が使用されるため、何も変更する必要はありません。 - `optimizer`引数については、`optimizer`、`dnn_optimizer`、`linear_optimizer`引数を渡さない場合、または`optimizer`引数をコード内の内の`string`として指定する場合は、何も変更する必要はありません。デフォルトで`tf.keras.optimizers`を使用します。それ以外の場合は、`tf.compat.v1.train.Optimizer`から対応する[`tf.keras.optimizers`](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/optimizers)に更新する必要があります。 チェックポイントコンバータ`tf.keras.optimizers`は異なる変数セットを生成してチェックポイントに保存するするため、`keras.optimizers`への移行は TensorFlow 1.x を使用して保存されたチェックポイントを壊してしまいます。TensorFlow 2.x への移行後に古いチェックポイントを再利用できるようにするには、[チェックポイントコンバータツール](https://github.com/tensorflow/estimator/blob/master/tensorflow_estimator/python/estimator/tools/checkpoint_converter.py)をお試しください。
###Code
! curl -O https://raw.githubusercontent.com/tensorflow/estimator/master/tensorflow_estimator/python/estimator/tools/checkpoint_converter.py
###Output
_____no_output_____
###Markdown
ツールにはヘルプが組み込まれています。
###Code
! python checkpoint_converter.py -h
###Output
_____no_output_____
###Markdown
TensorShapeこのクラスは`tf.compat.v1.Dimension`オブジェクトの代わりに`int`を保持することにより単純化されました。従って、`.value()`を呼び出して`int`を取得する必要はありません。個々の`tf.compat.v1.Dimension`オブジェクトは依然として`tf.TensorShape.dims`からアクセス可能です。 以下に TensorFlow 1.x と TensorFlow 2.x 間の違いを示します。
###Code
# Create a shape and choose an index
i = 0
shape = tf.TensorShape([16, None, 256])
shape
###Output
_____no_output_____
###Markdown
TensorFlow 1.x で次を使っていた場合:```pythonvalue = shape[i].value```Then do this in TensorFlow 2.x:
###Code
value = shape[i]
value
###Output
_____no_output_____
###Markdown
TensorFlow 1.x で次を使っていた場合:```pythonfor dim in shape: value = dim.value print(value)```TensorFlow 2.0 では次のようにします:
###Code
for value in shape:
print(value)
###Output
_____no_output_____
###Markdown
TensorFlow 1.x で次を使っていた場合(またはその他の次元のメソッドを使用していた場合):```pythondim = shape[i] dim.assert_is_compatible_with(other_dim)```TensorFlow 2.0 では次のようにします:
###Code
other_dim = 16
Dimension = tf.compat.v1.Dimension
if shape.rank is None:
dim = Dimension(None)
else:
dim = shape.dims[i]
dim.is_compatible_with(other_dim) # or any other dimension method
shape = tf.TensorShape(None)
if shape:
dim = shape.dims[i]
dim.is_compatible_with(other_dim) # or any other dimension method
###Output
_____no_output_____
###Markdown
`tf.TensorShape` のブール型の値は、階数がわかっている場合は `True`で、そうでない場合は`False`です。
###Code
print(bool(tf.TensorShape([]))) # Scalar
print(bool(tf.TensorShape([0]))) # 0-length vector
print(bool(tf.TensorShape([1]))) # 1-length vector
print(bool(tf.TensorShape([None]))) # Unknown-length vector
print(bool(tf.TensorShape([1, 10, 100]))) # 3D tensor
print(bool(tf.TensorShape([None, None, None]))) # 3D tensor with no known dimensions
print()
print(bool(tf.TensorShape(None))) # A tensor with unknown rank.
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
TensorFlow 1 のコードを TensorFlow 2 に移行する TensorFlow.org で表示 Google Colab で実行 GitHub でソースを表示 ノートブックをダウンロード 本ドキュメントは、低レベル TensorFlow API のユーザーを対象としています。高レベル API(`tf.keras`)をご使用の場合は、コードをTensorFlow 2.0 と完全互換にするためのアクションは殆どまたは全く必要ありません。- [オプティマイザのデフォルトの学習率](keras_optimizer_lr)を確認してください。- メトリクスが記録される「名前」が[変更されている可能性がある](keras_metric_names)ことに注意してください。 TensorFlow 2.0 で 1.X のコードを未修正で実行することは、([contrib を除き](https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md))依然として可能です。```import tensorflow.compat.v1 as tf tf.disable_v2_behavior()```しかし、これでは TensorFlow 2.0 で追加された改善の多くを活用できません。このガイドでは、コードのアップグレード、さらなる単純化、パフォーマンス向上、そしてより容易なメンテナンスについて説明します。 自動変換スクリプトこのドキュメントで説明される変更を実装する前に行うべき最初のステップは、[アップグレードスクリプト](./upgrade.md)を実行してみることです。これはコードを TensorFlow 2.0 にアップグレードする際の初期パスとしては十分ですが、2.0 特有のコードに変換するわけではありません。コードは依然として`tf.compat.v1`エンドポイントを使用して、プレースホルダー、セッション、コレクション、その他 1.x- スタイルの機能へのアクセスが可能です。 トップレベルの動作の変更`tf.compat.v1.disable_v2_behavior()`を使用することで TensorFlow 2.0 でコードが機能する場合でも、対処すべきグローバルな動作の変更があります。主な変更点は次のとおりです。 - *Eager execution、`v1.enable_eager_execution()`* : 暗黙的に`tf.Graph`を使用するコードは失敗します。このコードは必ず`with tf.Graph().as_default()`コンテキストでラップしてください。- *リソース変数、`v1.enable_resource_variables()`*: 一部のコードは、TensorFlow 参照変数によって有効化される非決定的な動作に依存する場合があります。 リソース変数は書き込み中にロックされるため、より直感的な一貫性を保証します。 - これによりエッジケースでの動作が変わる場合があります。 - これにより余分なコピーが作成されるため、メモリ使用量が増える可能性があります。 - これを無効にするには、`use_resource=False`を`tf.Variable`コンストラクタに渡します。- *テンソルの形状、`v1.enable_v2_tensorshape()`*: TensorFlow 2.0 は、テンソルの形状の動作を簡略化されており、`t.shape[0].value`の代わりに`t.shape[0]`とすることができます。簡単な変更なので、すぐに修正しておくことをお勧めします。例については [TensorShape](tensorshape) をご覧ください。- *制御フロー、`v1.enable_control_flow_v2()`*: TensorFlow 2.0 制御フローの実装が簡略化されたため、さまざまなグラフ表現を生成します。問題が生じた場合には、[バグを報告](https://github.com/tensorflow/tensorflow/issues)してください。 コードを 2.0 ネイティブにするこのガイドでは、TensorFlow 1.x のコードを TensorFlow 2.0 に変換する幾つかの例をウォークスルーします。これらの変更によって、コードがパフォーマンスの最適化および簡略化された API 呼び出しを活用できるようになります。それぞれのケースのパターンは次のとおりです。 1. `v1.Session.run`呼び出しを置き換えるすべての`v1.Session.run`呼び出しは、Python 関数で置き換える必要があります。- `feed_dict`および`v1.placeholder`は関数の引数になります。- `fetches`は関数の戻り値になります。- eager execution では、`pdb`などの標準的な Python ツールを使用して、変換中に簡単にデバッグできます。次に、`tf.function`デコレータを追加して、グラフで効率的に実行できるようにします。 この機能についての詳細は、[AutoGraph ガイド](function.ipynb)をご覧ください。注意点:- `v1.Session.run`とは異なり、`tf.function`は固定のリターンシグネチャを持ち、常にすべての出力を返します。これによってパフォーマンスの問題が生じる場合は、2 つの個別の関数を作成します。- `tf.control_dependencies`または同様の演算は必要ありません。`tf.function`は、記述された順序で実行されたかのように動作します。例えば、`tf.Variable`割り当てと`tf.assert`は自動的に実行されます。[変換モデルセクション{/ a0}には、この変換プロセスの実際の例が含まれています。](converting_models) 2. Python オブジェクトを変数と損失の追跡に使用するTensorFlow 2.0 では、いかなる名前ベースの変数追跡も全く推奨されていません。 変数の追跡には Python オブジェクトを使用します。`v1.get_variable`の代わりに`tf.Variable`を使用してください。すべての`v1.variable_scope`は Python オブジェクトに変換が可能です。通常は次のうちの 1 つになります。- `tf.keras.layers.Layer`- `tf.keras.Model`- `tf.Module``tf.Graph.get_collection(tf.GraphKeys.VARIABLES)`などの変数のリストを集める必要がある場合には、`Layer`および`Model`オブジェクトの`.variables`と`.trainable_variables`属性を使用します。これら`Layer`クラスと`Model`クラスは、グローバルコレクションの必要性を除去した別のプロパティを幾つか実装します。`.losses`プロパティは、`tf.GraphKeys.LOSSES`コレクション使用の置き換えとなります。詳細は [Keras ガイド](keras.ipynb)をご覧ください。警告 : 多くの`tf.compat.v1`シンボルはグローバルコレクションを暗黙的に使用しています。 3. トレーニングループをアップグレードするご利用のユースケースで動作する最高レベルの API を使用してください。独自のトレーニングループを構築するよりも `tf.keras.Model.fit` の選択を推奨します。これらの高レベル関数は、独自のトレーニングループを書く場合に見落とされやすい多くの低レベル詳細を管理します。例えば、それらは自動的に正則化損失を集めて、モデルを呼び出す時に`training=True`引数を設定します。 4. データ入力パイプラインをアップグレードするデータ入力には`tf.data`データセットを使用してください。それらのオブジェクトは効率的で、表現力があり、TensorFlow とうまく統合します。次のように、`tf.keras.Model.fit`メソッドに直接渡すことができます。```model.fit(dataset, epochs=5)```また、標準的な Python で直接にイテレートすることもできます。```for example_batch, label_batch in dataset: break``` 5. `compat.v1`シンボルを移行する`tf.compat.v1`モジュールには、元のセマンティクスを持つ完全な TensorFlow 1.x API が含まれています。[TensorFlow 2 アップグレードスクリプト](upgrade.ipynb)は、変換が安全な場合、つまり 2.0 バージョンの動作が完全に同等であると判断できる場合は、シンボルを 2.0 と同等のものに変換します。(例えば、これらは同じ関数なので、`v1.arg_max`の名前を`tf.argmax`に変更します。)コードの一部を使用してアップグレードスクリプトを実行した後に、`compat.v1`が頻出する可能性があります。 コードを調べ、それらを手動で同等の 2.0 のコードに変換する価値はあります。(該当するものがある場合には、ログに表示されているはずです。) モデルを変換する 低レベル変数 & 演算子実行低レベル API の使用例を以下に示します。- 変数スコープを使用して再利用を制御する。- `v1.get_variable`で変数を作成する。- コレクションに明示的にアクセスする。- 次のようなメソッドでコレクションに暗黙的にアクセスする。 - `v1.global_variables` - `v1.losses.get_regularization_loss`- `v1.placeholder` を使用してグラフ入力のセットアップをする。- `Session.run`でグラフを実行する。- 変数を手動で初期化する。 変換前TensorFlow 1.x を使用したコードでは、これらのパターンは以下のように表示されます。
###Code
import tensorflow as tf
import tensorflow.compat.v1 as v1
import tensorflow_datasets as tfds
g = v1.Graph()
with g.as_default():
in_a = v1.placeholder(dtype=v1.float32, shape=(2))
in_b = v1.placeholder(dtype=v1.float32, shape=(2))
def forward(x):
with v1.variable_scope("matmul", reuse=v1.AUTO_REUSE):
W = v1.get_variable("W", initializer=v1.ones(shape=(2,2)),
regularizer=lambda x:tf.reduce_mean(x**2))
b = v1.get_variable("b", initializer=v1.zeros(shape=(2)))
return W * x + b
out_a = forward(in_a)
out_b = forward(in_b)
reg_loss=v1.losses.get_regularization_loss(scope="matmul")
with v1.Session(graph=g) as sess:
sess.run(v1.global_variables_initializer())
outs = sess.run([out_a, out_b, reg_loss],
feed_dict={in_a: [1, 0], in_b: [0, 1]})
print(outs[0])
print()
print(outs[1])
print()
print(outs[2])
###Output
_____no_output_____
###Markdown
変換後 変換されたコードでは :- 変数はローカル Python オブジェクトです。- `forward`関数は依然として計算を定義します。- `Session.run`呼び出しは`forward`への呼び出しに置き換えられます。- パフォーマンス向上のためにオプションで`tf.function`デコレータを追加可能です。- どのグローバルコレクションも参照せず、正則化は手動で計算されます。- **セッションやプレースホルダーはありません。**
###Code
W = tf.Variable(tf.ones(shape=(2,2)), name="W")
b = tf.Variable(tf.zeros(shape=(2)), name="b")
@tf.function
def forward(x):
return W * x + b
out_a = forward([1,0])
print(out_a)
out_b = forward([0,1])
regularizer = tf.keras.regularizers.l2(0.04)
reg_loss=regularizer(W)
###Output
_____no_output_____
###Markdown
`tf.layers`ベースのモデル `v1.layers`モジュールは、変数を定義および再利用する`v1.variable_scope`に依存するレイヤー関数を含めるために使用されます。 変換前
###Code
def model(x, training, scope='model'):
with v1.variable_scope(scope, reuse=v1.AUTO_REUSE):
x = v1.layers.conv2d(x, 32, 3, activation=v1.nn.relu,
kernel_regularizer=lambda x:0.004*tf.reduce_mean(x**2))
x = v1.layers.max_pooling2d(x, (2, 2), 1)
x = v1.layers.flatten(x)
x = v1.layers.dropout(x, 0.1, training=training)
x = v1.layers.dense(x, 64, activation=v1.nn.relu)
x = v1.layers.batch_normalization(x, training=training)
x = v1.layers.dense(x, 10)
return x
train_data = tf.ones(shape=(1, 28, 28, 1))
test_data = tf.ones(shape=(1, 28, 28, 1))
train_out = model(train_data, training=True)
test_out = model(test_data, training=False)
print(train_out)
print()
print(test_out)
###Output
_____no_output_____
###Markdown
変換後 - レイヤーの単純なスタックが `tf.keras.Sequential`にぴったり収まります。(より複雑なモデルについては[カスタムレイヤーとモデル](keras/custom_layers_and_models.ipynb)および[ Functional API ](keras/functional.ipynb)をご覧ください。)- モデルが変数と正則化損失を追跡します。- `v1.layers`から`tf.keras.layers`への直接的なマッピングがあるため、変換は一対一対応でした。ほとんどの引数はそのままです。しかし、以下の点は異なります。- `training`引数は、それが実行される時点でモデルによって各レイヤーに渡されます。- 元の`model`関数への最初の引数(入力 `x`)はなくなりました。これはオブジェクトレイヤーがモデルの呼び出しからモデルの構築を分離するためです。また以下にも注意してください。- `tf.contrib`からの初期化子の正則化子を使用している場合は、他よりも多くの引数変更があります。- コードはコレクションに書き込みを行わないため、`v1.losses.get_regularization_loss`などの関数はそれらの値を返さなくなり、トレーニングループが壊れる可能性があります。
###Code
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(0.04),
input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dense(10)
])
train_data = tf.ones(shape=(1, 28, 28, 1))
test_data = tf.ones(shape=(1, 28, 28, 1))
train_out = model(train_data, training=True)
print(train_out)
test_out = model(test_data, training=False)
print(test_out)
# Here are all the trainable variables.
len(model.trainable_variables)
# Here is the regularization loss.
model.losses
###Output
_____no_output_____
###Markdown
変数と`v1.layers`の混在 既存のコードは低レベルの TensorFlow 1.x 変数と演算子に高レベルの`v1.layers`が混ざっていることがよくあります。 変換前
###Code
def model(x, training, scope='model'):
with v1.variable_scope(scope, reuse=v1.AUTO_REUSE):
W = v1.get_variable(
"W", dtype=v1.float32,
initializer=v1.ones(shape=x.shape),
regularizer=lambda x:0.004*tf.reduce_mean(x**2),
trainable=True)
if training:
x = x + W
else:
x = x + W * 0.5
x = v1.layers.conv2d(x, 32, 3, activation=tf.nn.relu)
x = v1.layers.max_pooling2d(x, (2, 2), 1)
x = v1.layers.flatten(x)
return x
train_out = model(train_data, training=True)
test_out = model(test_data, training=False)
###Output
_____no_output_____
###Markdown
変換後 このコードを変換するには、前の例で示したレイヤーからレイヤーへのマッピングのパターンに従います。一般的なパターンは次の通りです。- `__init__`でレイヤーパラメータを収集する。- `build`で変数を構築する。- `call`で計算を実行し、結果を返す。`v1.variable_scope`は事実上それ自身のレイヤーです。従って`tf.keras.layers.Layer`として書き直します。詳細は[ガイド](keras/custom_layers_and_models.ipynb)をご覧ください。
###Code
# Create a custom layer for part of the model
class CustomLayer(tf.keras.layers.Layer):
def __init__(self, *args, **kwargs):
super(CustomLayer, self).__init__(*args, **kwargs)
def build(self, input_shape):
self.w = self.add_weight(
shape=input_shape[1:],
dtype=tf.float32,
initializer=tf.keras.initializers.ones(),
regularizer=tf.keras.regularizers.l2(0.02),
trainable=True)
# Call method will sometimes get used in graph mode,
# training will get turned into a tensor
@tf.function
def call(self, inputs, training=None):
if training:
return inputs + self.w
else:
return inputs + self.w * 0.5
custom_layer = CustomLayer()
print(custom_layer([1]).numpy())
print(custom_layer([1], training=True).numpy())
train_data = tf.ones(shape=(1, 28, 28, 1))
test_data = tf.ones(shape=(1, 28, 28, 1))
# Build the model including the custom layer
model = tf.keras.Sequential([
CustomLayer(input_shape=(28, 28, 1)),
tf.keras.layers.Conv2D(32, 3, activation='relu'),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
])
train_out = model(train_data, training=True)
test_out = model(test_data, training=False)
###Output
_____no_output_____
###Markdown
注意点:- サブクラス化された Keras モデルとレイヤーは v1 グラフ(自動制御依存性なし)と eager モードの両方で実行される必要があります。 - `call()`を`tf.function()`にラップして、AutoGraph と自動制御依存性を得るようにします。- `training`引数を受け取って`call`することを忘れないようにしてください。 - それは`tf.Tensor`である場合があります。 - それは Python ブール型である場合があります。- `self.add_weight()`を使用して、コンストラクタまたは`Model.build`でモデル変数を作成します。 - `Model.build`では、入力形状にアクセスできるため、適合する形状で重みを作成できます。 - `tf.keras.layers.Layer.add_weight`を使用すると、Keras が変数と正則化損失を追跡できるようになります。- オブジェクトに`tf.Tensors`を保持してはいけません。 - それらは`tf.function`または eager コンテキスト内のいずれかで作成される可能性がありますが、それらのテンソルは異なる振る舞いをします。 - 状態には`tf.Variable`を使用してください。これは常に両方のコンテキストから使用可能です。 - `tf.Tensors`は中間値専用です。 Slim & contrib.layers に関する注意古い TensorFlow 1.x コードの大部分は [Slim](https://ai.googleblog.com/2016/08/tf-slim-high-level-library-to-define.html) ライブラリを使用しており、これは`tf.contrib.layers`として TensorFlow 1.x でパッケージ化されていました。 `contrib`モジュールに関しては、TensorFlow 2.0 では`tf.compat.v1`内でも、あっても利用できなくなりました。Slim を使用したコードの TensorFlow 2.0 への変換は、`v1.layers`を使用したレポジトリの変換よりも複雑です。現実的には、まず最初に Slim コードを`v1.layers`に変換してから Keras に変換するほうが賢明かもしれません。- `arg_scopes`を除去します。すべての引数は明示的である必要があります。- それらを使用する場合、 `normalizer_fn`と`activation_fn`をそれら自身のレイヤーに分割します。- 分離可能な畳み込みレイヤーは 1 つまたはそれ以上の異なる Keras レイヤー(深さ的な、ポイント的な、分離可能な Keras レイヤー)にマップします。- Slim と`v1.layers`には異なる引数名とデフォルト値があります。- 一部の引数には異なるスケールがあります。- Slim 事前トレーニング済みモデルを使用する場合は、`tf.keras.applications`から Keras 事前トレーニング済みモデル、または元の Slim コードからエクスポートされた [TensorFlow ハブ](https://tfhub.dev/s?q=slim%20tf2)の TensorFlow 2 SavedModel をお試しください。一部の`tf.contrib`レイヤーはコアの TensorFlow に移動されていない可能性がありますが、代わりに [TensorFlow アドオンパッケージ](https://github.com/tensorflow/addons)に移動されています。 トレーニング `tf.keras`モデルにデータを供給する方法は沢山あります。それらは Python ジェネレータと Numpy 配列を入力として受け取ります。モデルへのデータ供給方法として推奨するのは、データ操作用の高パフォーマンスクラスのコレクションを含む`tf.data`パッケージの使用です。依然として`tf.queue`を使用している場合、これらは入力パイプラインとしてではなく、データ構造としてのみサポートされます。 データセットを使用する [TensorFlow Dataset](https://tensorflow.org/datasets) パッケージ(`tfds`)には、事前定義されたデータセットを`tf.data.Dataset`オブジェクトとして読み込むためのユーティリティが含まれています。この例として、`tfds`を使用して MNISTdataset を読み込んでみましょう。
###Code
datasets, info = tfds.load(name='mnist', with_info=True, as_supervised=True)
mnist_train, mnist_test = datasets['train'], datasets['test']
###Output
_____no_output_____
###Markdown
次に、トレーニング用のデータを準備します。- 各画像をリスケールする。- 例の順序をシャッフルする。- 画像とラベルのバッチを集める。
###Code
BUFFER_SIZE = 10 # Use a much larger value for real code.
BATCH_SIZE = 64
NUM_EPOCHS = 5
def scale(image, label):
image = tf.cast(image, tf.float32)
image /= 255
return image, label
###Output
_____no_output_____
###Markdown
例を短く保つために、データセットをトリミングして 5 バッチのみを返すようにします。
###Code
train_data = mnist_train.map(scale).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
test_data = mnist_test.map(scale).batch(BATCH_SIZE)
STEPS_PER_EPOCH = 5
train_data = train_data.take(STEPS_PER_EPOCH)
test_data = test_data.take(STEPS_PER_EPOCH)
image_batch, label_batch = next(iter(train_data))
###Output
_____no_output_____
###Markdown
Keras トレーニングループを使用するトレーニングプロセスの低レベル制御が不要な場合は、Keras 組み込みの`fit`、`evaluate`、`predict`メソッドの使用が推奨されます。これらのメソッドは(シーケンシャル、関数型、またはサブクラス化)実装を問わず、モデルをトレーニングするための統一インターフェースを提供します。これらのメソッドには次のような優位点があります。- Numpy 配列、Python ジェネレータ、`tf.data.Datasets`を受け取ります。- 正則化と活性化損失を自動的に適用します。- [マルチデバイストレーニングのために](distributed_training.ipynb)`tf.distribute`をサポートします。- 任意の callable は損失とメトリクスとしてサポートします。- `tf.keras.callbacks.TensorBoard`のようなコールバックとカスタムコールバックをサポートします。- 自動的に TensorFlow グラフを使用し、高性能です。ここに`Dataset`を使用したモデルのトレーニング例を示します。(この機能ついての詳細は[チュートリアル](../tutorials)をご覧ください。)
###Code
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(0.02),
input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dense(10)
])
# Model is the full model w/o custom layers
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.fit(train_data, epochs=NUM_EPOCHS)
loss, acc = model.evaluate(test_data)
print("Loss {}, Accuracy {}".format(loss, acc))
###Output
_____no_output_____
###Markdown
ループを自分で書くKeras モデルのトレーニングステップは動作していても、そのステップの外でより制御が必要な場合は、データ イテレーション ループで`tf.keras.Model.train_on_batch`メソッドの使用を検討してみてください。`tf.keras.callbacks.Callback`として、多くのものが実装可能であることに留意してください。このメソッドには前のセクションで言及したメソッドの優位点の多くがありますが、外側のループのユーザ制御も与えます。`tf.keras.Model.test_on_batch`または`tf.keras.Model.evaluate`を使用して、トレーニング中のパフォーマンスをチェックすることも可能です。注意 : `train_on_batch`と`test_on_batch`は、デフォルトで単一バッチの損失とメトリクスを返します。`reset_metrics=False`を渡すと累積メトリックを返しますが、必ずメトリックアキュムレータを適切にリセットすることを忘れないようにしてくだい。また、`AUC`のような一部のメトリクスは正しく計算するために`reset_metrics=False`が必要なことも覚えておいてください。上のモデルのトレーニングを続けます。
###Code
# Model is the full model w/o custom layers
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
for epoch in range(NUM_EPOCHS):
#Reset the metric accumulators
model.reset_metrics()
for image_batch, label_batch in train_data:
result = model.train_on_batch(image_batch, label_batch)
metrics_names = model.metrics_names
print("train: ",
"{}: {:.3f}".format(metrics_names[0], result[0]),
"{}: {:.3f}".format(metrics_names[1], result[1]))
for image_batch, label_batch in test_data:
result = model.test_on_batch(image_batch, label_batch,
# return accumulated metrics
reset_metrics=False)
metrics_names = model.metrics_names
print("\neval: ",
"{}: {:.3f}".format(metrics_names[0], result[0]),
"{}: {:.3f}".format(metrics_names[1], result[1]))
###Output
_____no_output_____
###Markdown
トレーニングステップをカスタマイズするより多くの柔軟性と制御を必要とする場合、独自のトレーニングループを実装することでそれが可能になります。以下の 3 つのステップを踏みます。1. Python ジェネレータか`tf.data.Dataset`をイテレートして例のバッチを作成します。2. `tf.GradientTape`を使用して勾配を集めます。3. `tf.keras.optimizers`の 1 つを使用して、モデルの変数に重み更新を適用します。留意点:- サブクラス化されたレイヤーとモデルの`call`メソッドには、常に`training`引数を含めます。- `training`引数を確実に正しくセットしてモデルを呼び出します。- 使用方法によっては、モデルがデータのバッチ上で実行されるまでモデル変数は存在しないかもしれません。- モデルの正則化損失などを手動で処理する必要があります。v1 と比べて簡略化されている点に注意してください :- 変数初期化子を実行する必要はありません。作成時に変数は初期化されます。- たとえ`tf.function`演算が eager モードで振る舞う場合でも、手動の制御依存性を追加する必要はありません。
###Code
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(0.02),
input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dense(10)
])
optimizer = tf.keras.optimizers.Adam(0.001)
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
@tf.function
def train_step(inputs, labels):
with tf.GradientTape() as tape:
predictions = model(inputs, training=True)
regularization_loss=tf.math.add_n(model.losses)
pred_loss=loss_fn(labels, predictions)
total_loss=pred_loss + regularization_loss
gradients = tape.gradient(total_loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
for epoch in range(NUM_EPOCHS):
for inputs, labels in train_data:
train_step(inputs, labels)
print("Finished epoch", epoch)
###Output
_____no_output_____
###Markdown
新しいスタイルのメトリクスと損失TensorFlow 2.0 では、メトリクスと損失はオブジェクトです。逐次実行的に`tf.function`内で動作します。損失オブジェクトは呼び出し可能で、(y_true, y_pred) を引数として期待します。
###Code
cce = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
cce([[1, 0]], [[-1.0,3.0]]).numpy()
###Output
_____no_output_____
###Markdown
メトリックオブジェクトには次のメソッドがあります 。- `Metric.update_state()` — 新しい観測を追加する- `Metric.result()` — 観測値が与えられたとき、メトリックの現在の結果を得る- `Metric.reset_states()` — すべての観測をクリアするオブジェクト自体は呼び出し可能です。呼び出しは`update_state`と同様に新しい観測の状態を更新し、メトリクスの新しい結果を返します。メトリックの変数を手動で初期化する必要はありません。また、TensorFlow 2.0 は自動制御依存性を持つため、それらについても心配不要です。次のコードは、メトリックを使用してカスタムトレーニングループ内で観測される平均損失を追跡します。
###Code
# Create the metrics
loss_metric = tf.keras.metrics.Mean(name='train_loss')
accuracy_metric = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')
@tf.function
def train_step(inputs, labels):
with tf.GradientTape() as tape:
predictions = model(inputs, training=True)
regularization_loss=tf.math.add_n(model.losses)
pred_loss=loss_fn(labels, predictions)
total_loss=pred_loss + regularization_loss
gradients = tape.gradient(total_loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
# Update the metrics
loss_metric.update_state(total_loss)
accuracy_metric.update_state(labels, predictions)
for epoch in range(NUM_EPOCHS):
# Reset the metrics
loss_metric.reset_states()
accuracy_metric.reset_states()
for inputs, labels in train_data:
train_step(inputs, labels)
# Get the metric results
mean_loss=loss_metric.result()
mean_accuracy = accuracy_metric.result()
print('Epoch: ', epoch)
print(' loss: {:.3f}'.format(mean_loss))
print(' accuracy: {:.3f}'.format(mean_accuracy))
###Output
_____no_output_____
###Markdown
Keras メトリック名 TensorFlow 2.0では、Keras モデルはメトリック名の処理に関してより一貫性があります。メトリックリストで文字列を渡すと、*まさにその*文字列がメトリックの`name`として使用されます。これらの名前は`model.fit`によって返される履歴オブジェクトと、`keras.callbacks`に渡されるログに表示されます。これはメトリックリストで渡した文字列に設定されています。
###Code
model.compile(
optimizer = tf.keras.optimizers.Adam(0.001),
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics = ['acc', 'accuracy', tf.keras.metrics.SparseCategoricalAccuracy(name="my_accuracy")])
history = model.fit(train_data)
history.history.keys()
###Output
_____no_output_____
###Markdown
これは`metrics=["accuracy"]`を渡すと`dict_keys(['loss', 'acc'])`になっていた、以前のバージョンとは異なります。 Keras オプティマイザ `v1.train.AdamOptimizer`や`v1.train.GradientDescentOptimizer`などの`v1.train`内のオプティマイザは、`tf.keras.optimizers`内に同等のものを持ちます。 `v1.train`を`keras.optimizers`に変換するオプティマイザを変換する際の注意事項を次に示します。- オプティマイザをアップグレードすると、[古いチェックポイントとの互換性がなくなる可能性があります](checkpoints)。- epsilon のデフォルトは全て`1e-8`ではなく`1e-7`になりました。(これはほとんどのユースケースで無視できます。)- `v1.train.GradientDescentOptimizer`は`tf.keras.optimizers.SGD`で直接置き換えが可能です。- `v1.train.MomentumOptimizer`はモメンタム引数(`tf.keras.optimizers.SGD(..., momentum=...)`)を使用して`SGD`オプティマイザで直接置き換えが可能です。- `v1.train.AdamOptimizer`を変換して`tf.keras.optimizers.Adam`を使用することが可能です。beta1引数と`beta2`引数の名前は、`beta_1`と`beta_2`に変更されています。- `v1.train.RMSPropOptimizer`は`tf.keras.optimizers.RMSprop`に変換可能です。 `decay`引数の名前は`rho`に変更されています。- `v1.train.AdadeltaOptimizer`は`tf.keras.optimizers.Adadelta`に直接変換が可能です。- `tf.train.AdagradOptimizer`は `tf.keras.optimizers.Adagrad`に直接変換が可能です。- `tf.train.FtrlOptimizer`は`tf.keras.optimizers.Ftrl`に直接変換が可能です。`accum_name`および`linear_name`引数は削除されています。- `tf.contrib.AdamaxOptimizer`と`tf.contrib.NadamOptimizer`は `tf.keras.optimizers.Adamax`と`tf.keras.optimizers.Nadam`に直接変換が可能です。`beta1`引数と`beta2`引数の名前は、`beta_1`と`beta_2`に変更されています。 一部の`tf.keras.optimizers`の新しいデフォルト警告: モデルの収束挙動に変化が見られる場合には、デフォルトの学習率を確認してください。`optimizers.SGD`、`optimizers.Adam`、または`optimizers.RMSprop`に変更はありません。次のデフォルトの学習率が変更されました。- `optimizers.Adagrad` 0.01 から 0.001 へ- `optimizers.Adadelta` 1.0 から 0.001 へ- `optimizers.Adamax` 0.002 から 0.001 へ- `optimizers.Nadam` 0.002 から 0.001 へ TensorBoard TensorFlow 2 には、TensorBoard で視覚化するための要約データを記述するために使用される`tf.summary` API の大幅な変更が含まれています。新しい`tf.summary`の概要については、TensorFlow 2 API を使用した[複数のチュートリアル](https://www.tensorflow.org/tensorboard/get_started)があります。これには、[TensorBoard TensorFlow 2 移行ガイド](https://www.tensorflow.org/tensorboard/migrate)も含まれています。 保存 & 読み込み チェックポイントの互換性TensorFlow 2.0 は[オブジェクトベースのチェックポイント](checkpoint.ipynb)を使用します。古いスタイルの名前ベースのチェックポイントは、注意を払えば依然として読み込むことができます。コード変換プロセスは変数名変更という結果になるかもしれませんが、回避方法はあります。最も単純なアプローチは、チェックポイント内の名前と新しいモデルの名前を揃えて並べることです。- 変数にはすべて依然として設定が可能な`name`引数があります。- Keras モデルはまた `name`引数を取り、それらの変数のためのプレフィックスとして設定されます。- `v1.name_scope`関数は、変数名のプレフィックスの設定に使用できます。これは`tf.variable_scope`とは大きく異なります。これは名前だけに影響するもので、変数と再利用の追跡はしません。ご利用のユースケースで動作しない場合は、`v1.train.init_from_checkpoint`を試してみてください。これは`assignment_map`引数を取り、古い名前から新しい名前へのマッピングを指定します。注意 : [読み込みを遅延](checkpoint.ipynbloading_mechanics)できるオブジェクトベースのチェックポイントとは異なり、名前ベースのチェックポイントは関数が呼び出される時に全ての変数が構築されていることを要求します。一部のモデルは、`build`を呼び出すかデータのバッチでモデルを実行するまで変数の構築を遅延します。[TensorFlow Estimatorリポジトリ](https://github.com/tensorflow/estimator/blob/master/tensorflow_estimator/python/estimator/tools/checkpoint_converter.py)には事前作成された Estimator のチェックポイントを TensorFlow 1.X から 2.0 にアップグレードするための[変換ツール](checkpoint_converter)が含まれています。これは、同様のユースケースのツールを構築する方法の例として有用な場合があります。 保存されたモデルの互換性保存されたモデルには、互換性に関する重要な考慮事項はありません。- TensorFlow 1.x saved_models は TensorFlow 2.x で動作します。- TensorFlow 2.x saved_models は全ての演算がサポートされていれば TensorFlow 1.x で動作します。 Graph.pb または Graph.pbtxt 未加工の`Graph.pb`ファイルを TensorFlow 2.0 にアップグレードする簡単な方法はありません。確実な方法は、ファイルを生成したコードをアップグレードすることです。ただし、「凍結グラフ」(変数が定数に変換された`tf.Graph`)がある場合、`v1.wrap_function`を使用して[`concrete_function`](https://tensorflow.org/guide/concrete_function)への変換が可能です。
###Code
def wrap_frozen_graph(graph_def, inputs, outputs):
def _imports_graph_def():
tf.compat.v1.import_graph_def(graph_def, name="")
wrapped_import = tf.compat.v1.wrap_function(_imports_graph_def, [])
import_graph = wrapped_import.graph
return wrapped_import.prune(
tf.nest.map_structure(import_graph.as_graph_element, inputs),
tf.nest.map_structure(import_graph.as_graph_element, outputs))
###Output
_____no_output_____
###Markdown
例えば、次のような凍結された Inception v1 グラフ(2016 年)があります。
###Code
path = tf.keras.utils.get_file(
'inception_v1_2016_08_28_frozen.pb',
'http://storage.googleapis.com/download.tensorflow.org/models/inception_v1_2016_08_28_frozen.pb.tar.gz',
untar=True)
###Output
_____no_output_____
###Markdown
`tf.GraphDef`を読み込みます。
###Code
graph_def = tf.compat.v1.GraphDef()
loaded = graph_def.ParseFromString(open(path,'rb').read())
###Output
_____no_output_____
###Markdown
これを`concrete_function`にラップします。
###Code
inception_func = wrap_frozen_graph(
graph_def, inputs='input:0',
outputs='InceptionV1/InceptionV1/Mixed_3b/Branch_1/Conv2d_0a_1x1/Relu:0')
###Output
_____no_output_____
###Markdown
入力としてテンソルを渡します。
###Code
input_img = tf.ones([1,224,224,3], dtype=tf.float32)
inception_func(input_img).shape
###Output
_____no_output_____
###Markdown
Estimator Estimator でトレーニングするEstimator は TensorFlow 2.0 でサポートされています。Estimator を使用する際には、TensorFlow 1.x. からの`input_fn()`、`tf.estimator.TrainSpec`、`tf.estimator.EvalSpec`を使用できます。ここに train と evaluate specs を伴う `input_fn` を使用する例があります。 input_fn と train/eval specs を作成する
###Code
# Define the estimator's input_fn
def input_fn():
datasets, info = tfds.load(name='mnist', with_info=True, as_supervised=True)
mnist_train, mnist_test = datasets['train'], datasets['test']
BUFFER_SIZE = 10000
BATCH_SIZE = 64
def scale(image, label):
image = tf.cast(image, tf.float32)
image /= 255
return image, label[..., tf.newaxis]
train_data = mnist_train.map(scale).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
return train_data.repeat()
# Define train & eval specs
train_spec = tf.estimator.TrainSpec(input_fn=input_fn,
max_steps=STEPS_PER_EPOCH * NUM_EPOCHS)
eval_spec = tf.estimator.EvalSpec(input_fn=input_fn,
steps=STEPS_PER_EPOCH)
###Output
_____no_output_____
###Markdown
Keras モデル定義を使用する TensorFlow 2.0 で Estimator を構築する方法には、いくつかの違いがあります。モデルは Keras を使用して定義することを推奨します。次に`tf.keras.estimator.model_to_estimator`ユーティリティを使用して、モデルを Estimator に変更します。次のコードは Estimator を作成してトレーニングする際に、このユーティリティをどのように使用するかを示します。
###Code
def make_model():
return tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(0.02),
input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dense(10)
])
model = make_model()
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
estimator = tf.keras.estimator.model_to_estimator(
keras_model = model
)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
###Output
_____no_output_____
###Markdown
注意 : Keras で重み付きメトリクスを作成し、`model_to_estimator`を使用してそれらを Estimator API で重み付きメトリクスを変換することはサポートされません。それらのメトリクスは、`add_metrics`関数を使用して Estimator 仕様で直接作成する必要があります。 カスタム `model_fn` を使用する保守する必要がある既存のカスタム Estimator `model_fn` を持つ場合には、`model_fn`を変換して Keras モデルを使用できるようにすることが可能です。しかしながら、互換性の理由から、カスタム`model_fn`は依然として1.x スタイルのグラフモードで動作します。これは eager execution はなく自動制御依存性もないことも意味します。 最小限の変更で model_fn をカスタマイズするTensorFlow 2.0 でカスタム`model_fn`を動作させるには、既存のコードの変更を最小限に留めたい場合、`optimizers`や`metrics`などの`tf.compat.v1`シンボルを使用することができます。カスタム`model_fn`で Keras モデルを使用することは、それをカスタムトレーニングループで使用することに類似しています。- `mode`引数を基に、`training`段階を適切に設定します。- モデルの`trainable_variables`をオプティマイザに明示的に渡します。しかし、[カスタムループ](custom_loop)と比較して、重要な違いがあります。- `Model.losses`を使用する代わりに`Model.get_losses_for`を使用して損失を抽出します。- `Model.get_updates_for`を使用してモデルの更新を抽出します。注意 : 「更新」は各バッチの後にモデルに適用される必要がある変更です。例えば、`layers.BatchNormalization`レイヤーの平均と分散の移動平均などです。次のコードはカスタム`model_fn`から Estimator を作成し、これらの懸念事項を全て示しています。
###Code
def my_model_fn(features, labels, mode):
model = make_model()
optimizer = tf.compat.v1.train.AdamOptimizer()
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
training = (mode == tf.estimator.ModeKeys.TRAIN)
predictions = model(features, training=training)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
reg_losses = model.get_losses_for(None) + model.get_losses_for(features)
total_loss=loss_fn(labels, predictions) + tf.math.add_n(reg_losses)
accuracy = tf.compat.v1.metrics.accuracy(labels=labels,
predictions=tf.math.argmax(predictions, axis=1),
name='acc_op')
update_ops = model.get_updates_for(None) + model.get_updates_for(features)
minimize_op = optimizer.minimize(
total_loss,
var_list=model.trainable_variables,
global_step=tf.compat.v1.train.get_or_create_global_step())
train_op = tf.group(minimize_op, update_ops)
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=total_loss,
train_op=train_op, eval_metric_ops={'accuracy': accuracy})
# Create the Estimator & Train
estimator = tf.estimator.Estimator(model_fn=my_model_fn)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
###Output
_____no_output_____
###Markdown
TensorFlow 2.0 シンボルで`model_fn`をカスタマイズするTensorFlow 1.x シンボルを全て削除し、カスタム`model_fn` をネイティブの TensorFlow 2.0 にアップグレードする場合は、オプティマイザとメトリクスを`tf.keras.optimizers`と`tf.keras.metrics`にアップグレードする必要があります。カスタム`model_fn`では、上記の[変更](minimal_changes)に加えて、さらにアップグレードを行う必要があります。- `v1.train.Optimizer`の代わりに[`tf.keras.optimizers`](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/optimizers)を使用します。- モデルの`trainable_variables`を`tf.keras.optimizers`に明示的に渡します。- `train_op/minimize_op`を計算するには、 - 損失がスカラー損失`Tensor`(呼び出し不可)の場合は、`Optimizer.get_updates()`を使用します。返されるリストの最初の要素は目的とする`train_op/minimize_op`です。 - 損失が呼び出し可能(関数など)な場合は、`Optimizer.minimize()`を使用して`train_op/minimize_op`を取得します。- 評価には`tf.compat.v1.metrics`の代わりに[`tf.keras.metrics`](https://www.tensorflow.org/api_docs/python/tf/keras/metrics)を使用します。上記の`my_model_fn`の例では、2.0 シンボルの移行されたコードは次のように表示されます。
###Code
def my_model_fn(features, labels, mode):
model = make_model()
training = (mode == tf.estimator.ModeKeys.TRAIN)
loss_obj = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
predictions = model(features, training=training)
# Get both the unconditional losses (the None part)
# and the input-conditional losses (the features part).
reg_losses = model.get_losses_for(None) + model.get_losses_for(features)
total_loss=loss_obj(labels, predictions) + tf.math.add_n(reg_losses)
# Upgrade to tf.keras.metrics.
accuracy_obj = tf.keras.metrics.Accuracy(name='acc_obj')
accuracy = accuracy_obj.update_state(
y_true=labels, y_pred=tf.math.argmax(predictions, axis=1))
train_op = None
if training:
# Upgrade to tf.keras.optimizers.
optimizer = tf.keras.optimizers.Adam()
# Manually assign tf.compat.v1.global_step variable to optimizer.iterations
# to make tf.compat.v1.train.global_step increased correctly.
# This assignment is a must for any `tf.train.SessionRunHook` specified in
# estimator, as SessionRunHooks rely on global step.
optimizer.iterations = tf.compat.v1.train.get_or_create_global_step()
# Get both the unconditional updates (the None part)
# and the input-conditional updates (the features part).
update_ops = model.get_updates_for(None) + model.get_updates_for(features)
# Compute the minimize_op.
minimize_op = optimizer.get_updates(
total_loss,
model.trainable_variables)[0]
train_op = tf.group(minimize_op, *update_ops)
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=total_loss,
train_op=train_op,
eval_metric_ops={'Accuracy': accuracy_obj})
# Create the Estimator & Train.
estimator = tf.estimator.Estimator(model_fn=my_model_fn)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
###Output
_____no_output_____
###Markdown
事前作成された Estimator`tf.estimator.DNN*`、`tf.estimator.Linear*`、 `tf.estimator.DNNLinearCombined*`のファミリーに含まれる[事前作成された Estimator](https://www.tensorflow.org/guide/premade_estimators) は、依然として TensorFlow 2.0 API でもサポートされていますが、一部の引数が変更されています。1. `input_layer_partitioner`: 2.0 で削除されました。2. `loss_reduction`: `tf.compat.v1.losses.Reduction`の代わりに`tf.keras.losses.Reduction`に更新されました。デフォルト値も`tf.compat.v1.losses.Reduction.SUM`から`tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE`に変更されています。3. `optimizer`、`dnn_optimizer`、`linear_optimizer`: これらの引数は`tf.compat.v1.train.Optimizer`の代わりに`tf.keras.optimizers`に更新されています。上記の変更を移行するには :1. TF 2.0 では[`配布戦略`](https://www.tensorflow.org/guide/distributed_training)が自動的に処理するため、`input_layer_partitioner`の移行は必要ありません。2. `loss_reduction`については[`tf.keras.losses.Reduction`](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/losses/Reduction)でサポートされるオプションを確認してください。3. `optimizer`引数については、`optimizer`、`dnn_optimizer`、`linear_optimizer`引数を渡さない場合、または`optimizer`引数をコード内の内の`string`として指定する場合は、何も変更する必要はありません。デフォルトで`tf.keras.optimizers`を使用します。それ以外の場合は、`tf.compat.v1.train.Optimizer`から対応する[`tf.keras.optimizers`](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/optimizers)に更新する必要があります。 チェックポイントコンバータ`tf.keras.optimizers`は異なる変数セットを生成してチェックポイントに保存するするため、`keras.optimizers`への移行は TensorFlow 1.x を使用して保存されたチェックポイントを壊してしまいます。TensorFlow 2.0 への移行後に古いチェックポイントを再利用できるようにするには、[チェックポイントコンバータツール](https://github.com/tensorflow/estimator/blob/master/tensorflow_estimator/python/estimator/tools/checkpoint_converter.py)をお試しください。
###Code
! curl -O https://raw.githubusercontent.com/tensorflow/estimator/master/tensorflow_estimator/python/estimator/tools/checkpoint_converter.py
###Output
_____no_output_____
###Markdown
ツールにはヘルプが組み込まれています。
###Code
! python checkpoint_converter.py -h
###Output
_____no_output_____
###Markdown
TensorShapeこのクラスは`tf.compat.v1.Dimension`オブジェクトの代わりに`int`を保持することにより単純化されました。従って、`.value()`を呼び出して`int`を取得する必要はありません。個々の`tf.compat.v1.Dimension`オブジェクトは依然として`tf.TensorShape.dims`からアクセス可能です。 以下に TensorFlow 1.x と TensorFlow 2.0 間の違いを示します。
###Code
# Create a shape and choose an index
i = 0
shape = tf.TensorShape([16, None, 256])
shape
###Output
_____no_output_____
###Markdown
もし TensorFlow 1.x で次を使っていた場合:```pythonvalue = shape[i].value```TensorFlow 2.0 では次のようにします:
###Code
value = shape[i]
value
###Output
_____no_output_____
###Markdown
もし TensorFlow 1.x で次を使っていた場合:```pythonfor dim in shape: value = dim.value print(value)```TensorFlow 2.0 では次のようにします:
###Code
for value in shape:
print(value)
###Output
_____no_output_____
###Markdown
もし TensorFlow 1.x で次を使っていた場合(あるいは任意の他の次元のメソッドを使用したのであれば):```pythondim = shape[i] dim.assert_is_compatible_with(other_dim)```TensorFlow 2.0 では次のようにします:
###Code
other_dim = 16
Dimension = tf.compat.v1.Dimension
if shape.rank is None:
dim = Dimension(None)
else:
dim = shape.dims[i]
dim.is_compatible_with(other_dim) # or any other dimension method
shape = tf.TensorShape(None)
if shape:
dim = shape.dims[i]
dim.is_compatible_with(other_dim) # or any other dimension method
###Output
_____no_output_____
###Markdown
`tf.TensorShape`のブール型の値は、階数が既知の場合は`True`で、そうでない場合は`False`です。
###Code
print(bool(tf.TensorShape([]))) # Scalar
print(bool(tf.TensorShape([0]))) # 0-length vector
print(bool(tf.TensorShape([1]))) # 1-length vector
print(bool(tf.TensorShape([None]))) # Unknown-length vector
print(bool(tf.TensorShape([1, 10, 100]))) # 3D tensor
print(bool(tf.TensorShape([None, None, None]))) # 3D tensor with no known dimensions
print()
print(bool(tf.TensorShape(None))) # A tensor with unknown rank.
###Output
_____no_output_____ |
Optimization/KLDivergence/KL_divergence_optimization.ipynb | ###Markdown
Minimizing KL Divergence Let’s see how we could go about minimizing the KL divergence between two probability distributions using gradient descent. To begin, we create a probability distribution with a known mean (0) and variance (2). Then, we create another distribution with random parameters.
###Code
import os
import warnings
warnings.filterwarnings('ignore')
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (4,4) # Make the figures a bit bigger
plt.style.use('fivethirtyeight')
import numpy as np
from scipy.stats import norm
import tensorflow as tf
import seaborn as sns
sns.set()
import math
from tqdm import tqdm
np.random.seed(7)
###Output
_____no_output_____
###Markdown
To begin, we create a probability distribution, $p$, with a known mean (0) and variance (2).
###Code
x = np.arange(-10, 10, 0.1)
x.shape[0]
tf_pdf_shape=(1, x.shape[0])
p = tf.placeholder(tf.float64, shape=tf_pdf_shape)#p_pdf.shape
#mu = tf.Variable(np.zeros(1))
#mu = tf.Variable(tf.truncated_normal((1,), stddev=3.0))
mu = tf.Variable(np.ones(1)*5)
print(mu.dtype)
varq = tf.Variable(np.eye(1))
print(varq.dtype)
normal = tf.exp(-tf.square(x - mu) / (2 * varq))
q = normal / tf.reduce_sum(normal)
learning_rate = 0.01
nb_epochs = 500*2
###Output
_____no_output_____
###Markdown
We define a function to compute the KL divergence that excludes probabilities equal to zero.
###Code
kl_divergence = tf.reduce_sum( p * tf.log(p / q))
kl_divergence = tf.reduce_sum(
tf.where(p == 0, tf.zeros(tf_pdf_shape, tf.float64), p * tf.log(p / q))
)
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(kl_divergence)
init = tf.global_variables_initializer()
sess = tf.compat.v1.InteractiveSession()
sess.run(init)
history = []
means = []
variances = []
###Output
_____no_output_____
###Markdown
Just for test
###Code
m1 = 0
var1 = 2
p_pdf0 = norm.pdf(x, m1, np.sqrt(var1))
p_pdf1 = 1.0 / np.sqrt(var1) / np.sqrt(2 * math.pi) * np.exp(-np.square(x - m1) / (2 * var1))
import matplotlib
plt.plot(p_pdf0)
plt.plot(p_pdf1, marker=",")
###Output
_____no_output_____
###Markdown
KL(P||Q) !!* $p$ : given (target)* $q$ : variables to learn Generating values for $p$
###Code
m_truth = 0
var_truth = 7
p_pdf0 = norm.pdf(x, m_truth, np.sqrt(var_truth))
p_pdf0 = 1.0 / np.sqrt(var_truth) / np.sqrt(2 * math.pi) * np.exp(-np.square(x - m_truth) / (2 * var_truth))
p_pdf = p_pdf0.reshape(1, -1)
for i in tqdm(range(nb_epochs)):
sess.run(optimizer, { p: p_pdf })
history.append(sess.run(kl_divergence, { p: p_pdf }))
means.append(sess.run(mu)[0])
variances.append(sess.run(varq)[0][0])
if i % 100 == 10:
print(sess.run(mu)[0], sess.run(varq)[0][0])
###Output
11%|█ | 107/1000 [00:00<00:01, 514.85it/s]
###Markdown
Plot the results
###Code
len1 = np.shape(means)[0]
alphas = np.linspace(0.1, 1, len1)
rgba_colors = np.zeros((len1,4))
# for red the first column needs to be one
rgba_colors[:,0] = 1.0
# the fourth column needs to be your alphas
rgba_colors[:, 3] = alphas
print(rgba_colors.shape)
grange = range(len1)
print(np.shape(grange))
for mean, variance, g in zip(means, variances, grange):
if g%5 ==0:
q_pdf = norm.pdf(x, mean, np.sqrt(variance))
plt.plot(x, q_pdf.reshape(-1, 1), color=rgba_colors[g])
plt.title('KL(P||Q) = %1.3f' % history[-1])
plt.plot(x, p_pdf.reshape(-1, 1), linewidth=3)
plt.show()
#target
plt.plot(x, p_pdf.reshape(-1, 1), linewidth=5)
#initial
q_pdf = norm.pdf(x, means[0] , np.sqrt(variances[0]))
plt.plot(x, q_pdf.reshape(-1, 1))
#final
q_pdf = norm.pdf(x, means[-1] , np.sqrt(variances[-1]))
plt.plot(x, q_pdf.reshape(-1, 1), color='r')
plt.plot(means)
plt.xlabel('epoch')
plt.ylabel('mean')
plt.plot(variances)
plt.xlabel('epoch')
plt.ylabel('variances')
plt.plot(history)
plt.title('history')
plt.show()
#sess.close()
###Output
_____no_output_____ |
custom_scripts/double_linked_enrichment_ana.ipynb | ###Markdown
General investigation of A vs B calls here. Load data
###Code
import pandas as pd
import pickle
f = open('/home/ndh0004/Documents/keggPthCor/gene_dictv2.pckl','rb')
ab_dict = pickle.load(f)
f.close()
linked_genes = '/home/ndh0004/Documents/cor_exp/SS7/double_linked/non_redundant_double_linkedJan17.csv'
df = pd.read_csv(linked_genes,sep=",")
df.columns.values
to_write = []
not_ann = []
b_gene = []
for index,row in df.iterrows() :
row_to_write = [row['Agene'],
row['Asum'],
row['AAgene'],
row['AAsum'],
row['Bgene'],
row['Bsum'],
row['Bmax']]
if row['Bgene'] in ab_dict:
row_to_write += ab_dict[row['Bgene']]
to_write.append(row_to_write)
elif row['Agene'] in ab_dict:
row_to_write += ab_dict[row['Agene']]
to_write.append(row_to_write)
else:
not_ann.append(row_to_write)
b_gene.append(row['Bgene'])
ecorToSita_h = '/home/ndh0004/Documents/keggPthCor/Ecor2Sita.list'
ecorToSita_o = open(ecorToSita_h)
ecorToSita = ecorToSita_o.read().rstrip('\n').split('\n')
counter = 0
found = {}
for line in ecorToSita:
ecor, sita = line.split(' ')
if ecor in b_gene:
counter += 1
found[ecor] = ['b','manual',sita]
print(counter, len(b_gene))
for gene in b_gene :
if gene not in found:
print('missing: '+gene)
###Output
96
###Markdown
Need to come back for these they maybe nothing... but worth checking on nonetheless Now we are going to annontate 96 genes with good matches to Setaria
###Code
from bioservices import KEGG
s = KEGG()
convDb = s.conv('sita','ncbi-proteinid')
convDb['ncbi-proteinid:YP_008815800']
counter = 0
no_joy_for_sita = []
annotated = []
for gene in list(found.keys()):
sita = found[gene][-1]
print(sita, sita_q)
sita_q = 'ncbi-proteinid:{g}'.format(g=sita[:-2])
if sita_q in convDb:
print( 'found' )
found[gene].append(convDb[sita_q])
counter += 1
annotated.append(gene)
else:
no_joy_for_sita.append(sita)
print('No joy: ', len(no_joy_for_sita))
print(counter)
###Output
84
###Markdown
Okay we have 84 annontated 14 not 12 not in conversion database and 2 with no best blast hit.
###Code
counter = 0
for gene in annotated:
counter += 1
if (len(found[gene])) == 4:
call, conf, sita, kSita = found[gene]
keggObj = s.get(kSita)
keggParse = s.parse(keggObj)
ko = []
if 'ORTHOLOGY' in keggParse.keys():
ko = list(keggParse['ORTHOLOGY'].keys())
else:
ko = ['None']
assert len(ko) == 1,'{ko:{k}\ngene:{g}'.format(ko=ko,g=gene)
found[gene].append(ko[0])
if (counter % 10 ) == 0:
print('Finshed:{c}'.format(c=counter))
for X in found:
print(found[X])
print(len(found))
final_missing = []
for index,row in df.iterrows() :
row_to_write = [row['Agene'],
row['Asum'],
row['AAgene'],
row['AAsum'],
row['Bgene'],
row['Bsum'],
row['Bmax']]
if row['Bgene'] in found:
if len(found[row['Bgene']]) == 2:
row_to_write += found[row['Bgene']] + ['None','None']
else:
row_to_write += found[row['Bgene']]
to_write.append(row_to_write)
elif row['Bgene'] not in ab_dict and row['Agene'] not in ab_dict:
not_ann.append(row_to_write)
final_missing.append(row['Bgene'])
print(len(final_missing))
print(len(to_write))
print(len(df))
for X in to_write[80:100]:
print(X)
print(len(X))
fout = open('/home/ndh0004/Documents/cor_exp/SS7/double_linked/b_gene_annontated_double_linked.tsv', 'w')
fout.write('Agene\tAsum\tAAgene\tAAsum\tBgene\tBsum\tBmax\tbcall\tbconf\tSitaProt\tSitaRef\tKegg_pth\n')
for line in to_write:
if len(line)<12:
while len(line) < 12:
line += ['None']
fout.write('\t'.join([str(x) for x in line]))
fout.write('\n')
fout.close()
###Output
_____no_output_____ |
python02.ipynb | ###Markdown
**Estructura de datos tipo lista** Hasta ahora hemos trabajado con variables que permiten almacenar un único valor:edad=12altura=1.79nombre="juan"En Python existe un tipo de variable que permite almacenar una colección de datos y luego acceder por medio de un subíndice (similar a los string)Creación de la lista por asignaciónPara crear una lista por asignación debemos indicar sus elementos encerrados entre corchetes y separados por coma.lista1=[10, 5, 3] lista de enteroslista2=[1.78, 2.66, 1.55, 89,4] lista de valores floatlista3=["lunes", "martes", "miercoles"] lista de stringlista4=["juan", 45, 1.92] lista con elementos de distinto tipo Si queremos conocer la cantidad de elementos de una lista podemos llamar a la función len:lista1=[10, 5, 3] lista de enterosprint(len(lista1)) imprime un 3 **Problema 1:**Definir una lista que almacene 5 enteros. Sumar todos sus elementos y mostrar dicha suma.
###Code
lista = [10,7,3,7,2]
#print(lista)
a = lista[0]
b = lista[1]
c = lista[2]
d = lista[3]
e = lista[4]
suma = a + b + c + d + e
print(len(lista))
lista=[10,7,3,7,2]
suma=0
x=0
while x < len(lista):
suma = suma + lista[x]
x=x+1
print("Los elementos de la lista son")
print(lista)
print("La suma de todos sus elementos es")
print(suma)
lista=[10,7,3,7,2]
suma=0
limite = len(lista)
for i in range(limite):
suma = suma + lista[i]
print(suma)
###Output
29
###Markdown
**Problema 2:**Definir una lista por asignación que almacene los nombres de los primeros cuatro meses de año. Mostrar el primer y último elemento de la lista solamente.
###Code
meses=["enero", "febrero", "marzo", "abril"]
print(meses[0]) # se muestra enero
print(meses[3]) # se muestra abril
###Output
enero
abril
###Markdown
Si llamamos a print y pasamos solo el nombre de la lista luego se nos muestra todos los elementos:
###Code
print(meses) # se muestra ["enero", "febrero", "marzo", "abril"]
###Output
_____no_output_____
###Markdown
**Problema 3:**Definir una lista por asignación que almacene en la primer componente el nombre de un alumno y en las dos siguientes sus notas. Imprimir luego el nombre y el promedio de las dos notas.
###Code
lista = ["juan", 4, 7]
print("Nombre del estudiante: ")
print(lista[0])
promedio = (lista[1] + lista[2])/2
print("Promedio de sus dos notas: ")
print(promedio)
###Output
Nombre del estudiante:
juan
Promedio de sus dos notas:
5.5
###Markdown
***Problemas propuestos*** 1. Definir por asignación una lista con 8 elementos enteros. Contar cuantos de dichos valores almacenan un valor superior a 100.2. Definir una lista por asignación con 5 enteros. Mostrar por pantalla solo los elementos con valor iguales o superiores a 7.
###Code
lista=[10,3,9,1,20]
limite = len(lista)
for i in range(limite):
if lista[i] >= 7:
print(lista[i])
###Output
_____no_output_____
###Markdown
**Listas: carga por teclado de sus elementos** Una lista en Python es una estructura mutable (es decir puede ir cambiando durante la ejecución del programa)Hemos visto que podemos definir una lista por asignación indicando entre corchetes los valores a almacenar:lista=[10, 20, 40]Una lista luego de definida podemos agregarle nuevos elementos a la colección. La primera forma que veremos para que nuestra lista crezca es utilizar el método append que tiene la lista y pasar como parámetro el nuevo elemento:
###Code
lista=[10, 20, 30]
print(len(lista)) # imprime un 3
lista.append(100)
print(len(lista)) # imprime un 4
print(lista[0]) # imprime un 10
print(lista[3]) # imprime un 100
lista = [] #lista vacia
lista.append(10)
lista.append(20)
lista.append(30)
lista.append(40)
lista.append(50)
print(lista)
print(len(lista))
###Output
[10, 20, 30, 40, 50]
5
###Markdown
**Problema 4:**Definir una lista vacía y luego solicitar la carga de 5 enteros por teclado y añadirlos a la lista. Imprimir la lista generada.
###Code
#definimos una lista vacia
lista=[]
#disponemos un ciclo de 5 vueltas
for x in range(5):
valor=int(input("Ingrese un valor entero:"))
lista.append(valor)
#imprimimos la lista
print(lista)
#definimos una lista vacia
lista=[]
numero = int(input("Cuantos valores enteros? "))
for x in range(numero):
valor=int(input("Ingrese un valor entero:"))
lista.append(valor)
#imprimimos la lista
print(lista)
###Output
_____no_output_____
###Markdown
**Problema 5:**Realizar la carga de valores enteros por teclado, almacenarlos en una lista. Finalizar la carga de enteros al ingresar el cero. Mostrar finalmente el tamaño de la lista.
###Code
lista=[]
valor=int(input("Ingresar valor (0 para finalizar):"))
while valor != 0:
lista.append(valor)
valor=int(input("Ingresar valor (0 para finalizar):"))
print("Tamano de la lista:")
print(len(lista))
print("la lista:")
print(lista)
###Output
_____no_output_____
###Markdown
**Problemas propuestos** 1. Almacenar en una lista los sueldos (valores float) de 5 operarios. Imprimir la lista y el promedio de sueldos.2. Cargar por teclado y almacenar en una lista las alturas de 5 personas (valores float)Obtener el promedio de las mismas. Contar cuántas personas son más altas que el promedio y cuántas más bajas.3. Una empresa tiene dos turnos (mañana y tarde) en los que trabajan 8 empleados (4 por la mañana y 4 por la tarde) Confeccionar un programa que permita almacenar los sueldos de los empleados agrupados en dos listas.Imprimir las dos listas de sueldos.
###Code
###Output
_____no_output_____
###Markdown
**Listas: mayor y menor elemento** Es una actividad muy común la búsqueda del mayor y menor elemento de una lista.Es necesario que la lista tenga valores del mismo tipo por ejemplo enteros. Pueden ser de tipo cadenas de caracteres y se busque cual es mayor o menor alfabéticamente, pero no podemos buscar el mayor o menor si la lista tiene enteros y cadenas de caracteres al mismo tiempo.**Problema 6:**Crear y cargar una lista con 5 enteros. Implementar un algoritmo que identifique el mayor valor de la lista.
###Code
lista=[]
for x in range(5):
valor=int(input("Ingrese valor:"))
lista.append(valor)
mayor=lista[0]
for x in range(1,5):
if lista[x]>mayor:
mayor=lista[x]
print("Lista completa")
print(lista)
print("Mayor de la lista")
print(mayor)
###Output
_____no_output_____
###Markdown
**Problema 7:**Crear y cargar una lista con 5 enteros por teclado. Implementar un algoritmo que identifique el menor valor de la lista y la posición donde se encuentra.
###Code
lista=[]
for x in range(5):
valor=int(input("Ingrese valor:"))
lista.append(valor)
menor=lista[0]
posicion=0
for x in range(1,5):
if lista[x]<menor:
menor=lista[x]
posicion=x
print("Lista completa")
print(lista)
print("Menor de la lista")
print(menor)
print("Posicion del menor en la lista")
print(posicion)
###Output
_____no_output_____
###Markdown
**Problema propuesto**Cargar una lista con 5 elementos enteros. Imprimir el mayor y un mensaje si se repite dentro de la lista (es decir si dicho valor se encuentra en 2 o más posiciones en la lista)
###Code
lista=[]
for x in range(5):
valor=int(input("Ingrese valor:"))
lista.append(valor)
mayor=lista[0]
for x in range(1,5):
if lista[x]>mayor:
mayor=lista[x]
contador = 0
for x in range(1,5):
if mayor == lista[x]:
contador = contador + 1
print(lista)
print("el numero mayor es: " + str(mayor))
if contador > 1:
print("el numero mayor se repite")
###Output
_____no_output_____
###Markdown
**Listas paralelas**Podemos decir que dos listas son paralelas cuando hay una relación entre las componentes de igual subíndice (misma posición) de una lista y otra.Si tenemos dos listas que ya hemos inicializado con 5 elementos cada una. En una se almacenan los nombres de personas en la otra las edades de dichas personas.Decimos que la lista nombres es paralela a la lista edades si en la componente 0 de cada lista se almacena información relacionada a una persona (Juan - 12 años)Es decir hay una relación entre cada componente de las dos listas.Esta relación la conoce únicamente el programador y se hace para facilitar el desarrollo de algoritmos que procesen los datos almacenados en las estructuras de datos.**Problema 8:** Desarrollar un programa que permita cargar 5 nombres de personas y sus edades respectivas. Luego de realizar la carga por teclado de todos los datos imprimir los nombres de las personas mayores de edad (mayores o iguales a 18 años)
###Code
nombres=[]
edades=[]
for x in range(5):
nom=input("Ingrese el nombre de la persona:")
nombres.append(nom)
ed=int(input("Ingrese la edad de dicha persona:"))
edades.append(ed)
print("Nombre de las personas mayores de edad:")
for x in range(5):
if edades[x]>=18:
print(nombres[x])
###Output
Ingrese el nombre de la persona:ana
Ingrese la edad de dicha persona:18
Ingrese el nombre de la persona:maria
Ingrese la edad de dicha persona:19
Ingrese el nombre de la persona:juan
Ingrese la edad de dicha persona:17
Ingrese el nombre de la persona:alvaro
Ingrese la edad de dicha persona:21
Ingrese el nombre de la persona:alex
Ingrese la edad de dicha persona:22
Nombre de las personas mayores de edad:
ana
maria
alvaro
alex
###Markdown
**Problemas propuestos**1. Crear y cargar dos listas con los nombres de 5 productos en una y sus respectivos precios en otra. Definir dos listas paralelas. Mostrar cuantos productos tienen un precio mayor al primer producto ingresado.2. En un curso de 4 alumnos se registraron las notas de sus exámenes y se deben procesar de acuerdo a lo siguiente:a) Ingresar nombre y nota de cada alumno (almacenar los datos en dos listas paralelas)b) Realizar un listado que muestre los nombres, notas y condición del alumno. En la condición, colocar "Muy Bueno" si la nota es mayor o igual a 8, "Bueno" si la nota está entre 4 y 7, y colocar "Insuficiente" si la nota es inferior a 4.c) Imprimir cuantos alumnos tienen la leyenda “Muy Bueno”.3. Realizar un programa que pida la carga de dos listas numéricas enteras de 4 elementos cada una. Generar una tercer lista que surja de la suma de los elementos de la misma posición de cada lista. Mostrar esta tercer lista.
###Code
lista1=[]
lista2=[]
for x in range(5):
producto=str(input("Ingrese el producto:"))
valor=int(input("Ingrese valor:"))
lista1.append(producto)
lista2.append(valor)
cantidad=0
for x in range(5):
if lista2[0]<lista2[x]:
cantidad=cantidad+1
print(lista1[x])
print("cantidad de productos mayor que el 1ro")
print(cantidad)
alumno=[]
nota=[]
for x in range(4):
nom=input("nombre del alumno:")
alumno.append(nom)
ed=int(input("nota del examen:"))
nota.append(ed)
contador=0
for x in range(4):
if nota[x] >= 8:
print("nombre:",alumno[x],"nota:",nota[x],)
print("muy bueno")
contador = contador + 1
else:
if nota[x] >= 4:
print("nombre:",alumno[x],"nota:",nota[x],)
print("bueno")
else:
print("nombre:",alumno[x],"nota:",nota[x])
listaA=[]
listaB=[]
listaC=[]
# Leer los valores de la primer lista
print("lista #1")
for x in range(4):
num=int(input("Ingrese un mumero: "))
listaA.append(num)
# Leer los valores de la segunda lista
print("lista #2")
for y in range(4):
num2=int(input("ingrese un numero: "))
listaB.append(num2)
for z in range(4):
listaC.append(listaA[z]+listaB[z])
print("Lista #3")
print(listaC)
###Output
_____no_output_____
###Markdown
**Listas: ordenamiento de sus elementos** Otro algoritmo muy común que debe conocer y entender un programador es el ordenamiento de una lista de datos.El ordenamiento de una lista se logra intercambiando las componentes de manera que:lista[0] <= lista[1] <= lista[2] etc.El contenido de la componente lista[0] sea menor o igual al contenido de la componente lista[1] y así sucesivamente.Si se cumple lo dicho anteriormente decimos que la lista está ordenado de menor a mayor. Igualmente podemos ordenar una lista de mayor a menor.Tengamos en cuenta que la estructura de datos lista en Python es mutable, eso significa que podemos modificar sus elementos por otros.Se puede ordenar tanto listas con componentes de tipo int, float como cadena de caracteres. En este último caso el ordenamiento es alfabético.**Problema 9:** Se debe crear y cargar una lista donde almacenar 5 sueldos. Desplazar el valor mayor de la lista a la última posición.La primera aproximación para llegar en el próximo problema al ordenamiento completo de una lista tiene por objetivo analizar los intercambios de elementos dentro de la lista y dejar el mayor en la última posición.El algoritmo consiste en comparar si la primera componente es mayor a la segunda, en caso que la condición sea verdadera, intercambiamos los contenidos de las componentes.Vamos a suponer que se ingresan los siguientes valores por teclado:1200750820550490En este ejemplo: ¿es 1200 mayor a 750? La respuesta es verdadera, por lo tanto intercambiamos el contenido de la componente 0 con el de la componente 1.Luego comparamos el contenido de la componente 1 con el de la componente 2: ¿Es 1200 mayor a 820?La respuesta es verdadera entonces intercambiamos.Si hay 5 componentes hay que hacer 4 comparaciones, por eso el for se repite 4 veces.Generalizando: si la lista tiene N componentes hay que hacer N-1 comparaciones.Podemos ver cómo el valor más grande de la lista desciende a la última componente. Empleamos una variable auxiliar (aux) para el proceso de intercambio:
###Code
sueldos=[]
for x in range(8):
valor=int(input("Ingrese sueldo:"))
sueldos.append(valor)
print("Lista sin ordenar")
print(sueldos)
for x in range(7):
if sueldos[x]>sueldos[x+1]:
aux=sueldos[x]
sueldos[x]=sueldos[x+1]
sueldos[x+1]=aux
print(sueldos)
else:
print(sueldos)
print("Lista con el último elemento ordenado")
print(sueldos)
###Output
_____no_output_____
###Markdown
**ALGORTIMOS DE ORDENAMIENTO**En computación y matemáticas un algoritmo de ordenamiento es un algoritmo que pone elementos de una lista o un vector en una secuencia dada por una relación de orden, es decir, el resultado de salida ha de ser una permutación —o reordenamiento— de la entrada que satisfaga la relación de orden dada. Las relaciones de orden más usadas son el orden numérico y el orden lexicográfico. Ordenamientos eficientes son importantes para optimizar el uso de otros algoritmos (como los de búsqueda y fusión) que requieren listas ordenadas para una ejecución rápida. También es útil para poner datos en forma canónica y para generar resultados legibles por humanos.https://es.wikipedia.org/wiki/Algoritmo_de_ordenamientoClasificaci%C3%B3n**Ordenamiento de burbuja**La Ordenación de burbuja (Bubble Sort en inglés) es un sencillo algoritmo de ordenamiento. Funciona revisando cada elemento de la lista que va a ser ordenada con el siguiente, intercambiándolos de posición si están en el orden equivocado. Es necesario revisar varias veces toda la lista hasta que no se necesiten más intercambios, lo cual significa que la lista está ordenada. Este algoritmo obtiene su nombre de la forma con la que suben por la lista los elementos durante los intercambios, como si fueran pequeñas "burbujas". También es conocido como el método del intercambio directo. **Ordenamiento por inserción** Inicialmente se tiene un solo elemento, que obviamente es un conjunto ordenado. Después, cuando hay **k** elementos ordenados de menor a mayor, se toma el elemento **k + 1** y se compara con todos los elementos ya ordenados, deteniéndose cuando se encuentra un elemento menor (todos los elementos mayores han sido desplazados una posición a la derecha) o cuando ya no se encuentran elementos (todos los elementos fueron desplazados y este es el más pequeño). En este punto se inserta el elemento **k + 1** debiendo desplazarse los demás elementos. 
###Code
unaLista = []
for x in range(10):
valor=int(input("Ingrese valor:"))
unaLista.append(valor)
print("lista original ..",unaLista)
for indice in range(1,len(unaLista)):
valorActual = unaLista[indice]
posicion = indice
while posicion>0 and unaLista[posicion-1]>valorActual:
unaLista[posicion]=unaLista[posicion-1]
posicion = posicion-1
unaLista[posicion]=valorActual
print(unaLista, " ", indice)
###Output
_____no_output_____
###Markdown
练习1:邮箱注册
###Code
#random() 方法返回随机生成的一个实数,它在[0,1)范围内。
import random
class Regist (object):
def zc(self):
zc= input('注册免费邮箱or注册VIP邮箱')
print(zc)
def address(self):
address= input('邮件地址')
print("您输入的邮箱是:%s" % address,"@163.com")
self.mma()
def mma(self):
for _ in range(2):
password_1=input('密码')
password_2=input('确认密码')
if password_1==password_2:
print( )
#self.yzm()
break
else:
print('两次密码不一致请从新输入!!!')
else:
print('你可能是一个机器人')
#连接到注册对话
self.zc()
def yzm(self):
for i in range(4):
number=random.randrange(1000,9999)
print('验证码是: %d'%number)
number_2=input('输入验证码:')
if number == int(number_2):
print('注册成功')
break
#self.phone()
else:
print('验证码错误')
else:
print('机器人')
self.zc()
def phone(self):
pass
ss=Regist()
ss.zc()
ss.address()
ss.mma()
ss.yzm()
###Output
_____no_output_____
###Markdown
加图片的王者荣耀
###Code
#import cv
import time
import numpy as np
class wz(object):
def __init__(self,entry):
self.entry=entry
def jm(self):
self.entry= input('对战模式:人机对战or多人对战')
print(self.entry)
def rw(self):
figure= input('请选择人物:典韦,赵云,鲁班')
# im = cv2.imread(luban.jpg)
if figure== '典韦':
print(figure,":战力--1500,防御--1647")
# layout.addWidget(QLabel(self,pixmap=QPixmap("C:/Users/13947/Pictures/Saved Pictures/luban.jpg")))
elif figure == '赵云':
print(figure,":战力--1700,防御--1541")
else:
print(figure,":战力--253,防御--876")
def sj(self):
res =np.random.choice(['典韦','赵云','鲁班'])
if res== '典韦':
print(res,":战力--1500,防御--1647")
elif res == '赵云':
print (res,":战力--1700,防御--1541")
else:
print(res,":战力--253,防御--876")
def start(self):
b=input("请输入开始")
print('进入加载.......')
def s(self):
for i in range(1,3):
time.sleep(1)
print('%s%d%%\r'%('#'*i,i),end="",flush='true')
WZ=wz('人机')
WZ.jm()
WZ.rw()
WZ.sj()
WZ.start()
WZ.s()
print('加载失败,不建议玩,学习吧')
###Output
对战模式:人机对战or多人对战ee
ee
请选择人物:典韦,赵云,鲁班ee
ee :战力--253,防御--876
赵云 :战力--1700,防御--1541
请输入开始ee
进入加载.......
加载失败,不建议玩,学习吧
###Markdown
homework 1、(Rectangle类)设计一个名为Rectangle类来表示矩形
###Code
class Rectangle(object):
def __init__(self,width,height):
self.width=width
self.height=height
def getArea(self):
Area=self.height*self.width
print("这个矩形的面积为:",Area)
def getPerimeter(self):
Perimeter=2*self.height+2*self.width
print("这个矩形的周长为:",Perimeter)
A= Rectangle(11,10)
A.getArea()
A.getPerimeter()
###Output
这个矩形的面积为: 110
这个矩形的周长为: 42
###Markdown
2、(Account类)设计一个名为Account的类
###Code
class Account(object):
def __init__(self):
self.InterestRate=0
self.annuallnterestRate=100
self.Interest=0
def information(self,id,yu_e):
self.id=id
self.yu_e=yu_e
def getMonthlyInterestRate(self,InterestRate):
self.InterestRate=InterestRate
def getMonthlyInterest(self):
A=self.annuallnterestRate*self.InterestRate
self.Interest=A
def withdraw(self):
print("请输入取钱金额")
res = input("输入")
self.annuallnterestRate = self.annuallnterestRate - int(res)
print("您成功取出",res,"元")
def deposit(self):
print("请输入存钱金额")
res1=input("输入")
self.annuallnterestRate=self.annuallnterestRate+int(res1)
print("您成功存入",res1,"元")
print(self.id,"您账户余额为:",self.annuallnterestRate,"利率为:",self.InterestRate,"利息为",self.Interest)
E = Account()
E.information(1122,20000)
E.getMonthlyInterestRate(0.045)
E.getMonthlyInterest()
E.withdraw()
E.deposit()
###Output
请输入取钱金额
输入2500
您成功取出 2500 元
请输入存钱金额
输入3000
您成功存入 3000 元
1122 您账户余额为: 600 利率为: 0.045 利息为 4.5
###Markdown
3、(Fan类)设计一个名为Fan的类表示一个风扇
###Code
class Fan(object):
def fan(self,speed=2,on=False,radius=5.0,color='blue'):
self.speed=speed
self.color=color
self.radius=radius
self.on=on
def function(self):
if self.speed==1:
speed_="SLOW"
elif self.speed==2:
speed_="MEDIUM"
else:
speed_="FAST"
print(speed_,self.radius,self.color,self.on)
e = Fan()
e.fan(3,10.0,"yellow",True)
e.function()
e.fan(2,5.0,"blue",False)
e.function()
###Output
FAST 10.0 yellow True
MEDIUM 5.0 blue False
###Markdown
4、(几何:正n边形)设计一个名为RegularPolygon的类
###Code
import math
class RegularPolygon(object):
def __init__(self,n,side,x,y):
self.n=n
self.side=side
self.x=x
self.y=y
def getPerimenter(self):
print(self.n*self.side)
def getArea(self):
Area = self.n*self.side/(4*math.tan(math.pi/self.n))
print(Area)
e = RegularPolygon(10,4,5.6,7.8)
e.getPerimenter()
e.getArea()
###Output
40
30.776835371752536
###Markdown
5、(代数:2* 2线性方程式)设计一个名为LinearEquation的类
###Code
class LinearEquation(object):
def __init__(self,a,b,c,d,e,f):
self.__a=a
self.__b=b
self.__c=c
self.__d=d
self.__e=e
self.__f=f
def isSolvable(self):
z=self.__a*self.__d-self.__b*self.__c
if z != 0:
self.z=True
else:
self.z=False
print('这个方程无解')
def get(self):
self.x=(self.__e*self.__d-self.__b*self.__f)/(self.__a*self.__d-self.__b*self.__c)
self.y=(self.__a*self.__f-self.__e*self.__c)/(self.__a*self.__d-self.__b*self.__c)
def getX(self):
self.isSolvable()
if self.z == True:
self.get()
print(self.x)
def getY(self):
self.isSolvable()
if self.z == True:
self.get()
print(self.y)
e=LinearEquation(2,2,3,33,5,6)
e.getX()
e.getY()
###Output
2.55
-0.05
###Markdown
6、(几何:交叉线)
###Code
class LinearEquation(object):
def __init__(self):
pass
def line1(x1,y1):
dian1= [x1,y1][2,2]
dian2= [x2,y2][0,0]
def line2(x2,y2):
dian3= [x3,y3][0,2]
dian4= [x4,y4][2,0]
def jd(self):
pass
a=LinearEquation()
a.jd()
###Output
_____no_output_____
###Markdown
7、(代数:2* 2线性方程式)设计一个名为LinearEquation的类
###Code
class LinearEquation(object):
def __init__(self,a,b,c,d,e,f):
self.__a=a
self.__b=b
self.__c=c
self.__d=d
self.__e=e
self.__f=f
def isSolvable(self):
z=self.__a*self.__d-self.__b*self.__c
if z != 0:
self.z=True
else:
self.z=False
print('这个方程无解')
def get(self):
self.x=(self.__e*self.__d-self.__b*self.__f)/(self.__a*self.__d-self.__b*self.__c)
self.y=(self.__a*self.__f-self.__e*self.__c)/(self.__a*self.__d-self.__b*self.__c)
def getX(self):
self.isSolvable()
if self.z == True:
self.get()
print(self.x)
def getY(self):
self.isSolvable()
if self.z == True:
self.get()
print(self.y)
e=LinearEquation(2,2,3,3,5,6)
e.getX()
e.getY()
###Output
这个方程无解
这个方程无解
###Markdown
用类封装4个功能wxpy:用Python玩微信1、对于特定的好友自动回复,文本和图片2、封装一个统计微信性别数量和总人数的比例,男生的比例,女生的比例3、统计你的好友都属于哪个省份并绘制直方图
###Code
from wxpy import *
bot = Bot()
import requests
class wefriend(object):
def get_msg(self):
url = 'C:/Users/13947/Pictures/Camera Roll/love.jpeg'
self.url=url
def Special(self):
my_friend = bot.friends().search('小猪猪')[0] #找到好友
my_friend.send('这个男人帅呆了') #给好友发送消息
print('发送成功!')
self.get_msg()
my_friend.send_image(self.url)
print('ok')
def tongji(self):
my_friend = bot.friends()
print(my_friend)
print('共有',len(my_friend),'个人')
my_friend_man = bot.friends().search(sex=1)
print('男:',len(my_friend_man),'人')
my_friend_woman = bot.friends().search(sex=2)
print('女:',len(my_friend_woman),'人')
my_friend_weizhi = len(my_friend)-len(my_friend_man)-len(my_friend_woman)
print('人妖:',my_friend_weizhi)
print('男性比例:',len(my_friend_man)/len(my_friend))
print('女性比例:',len(my_friend_woman)/len(my_friend))
s=wefriend()
s.get_msg()
s.Special()
s.tongji()
###Output
发送成功!
ok
[<Friend: Q🌸>, <Friend: 娘亲>, <Friend: 罗轩美妆(长年招收学生)招美容师>, <Friend: 大舅>, <Friend: 李妍จุ๊บ>, <Friend: 门口早餐>, <Friend: 张虹姐>, <Friend: 老舅妈>, <Friend: 创一电商客服>, <Friend: 海拉尔丽涛化妆品>, <Friend: 化学补课班>, <Friend: 高中常瑞>, <Friend: 高中陈曦>, <Friend: 大学代行强>, <Friend: 假日高巍>, <Friend: 假日郭晓玲>, <Friend: 高中韩壮>, <Friend: 大学贺娜>, <Friend: 胡旺>, <Friend: 高中马慧>, <Friend: 初中戚慧杰>, <Friend: 大学王影>, <Friend: 高中王子博>, <Friend: 高中杨梁旭>, <Friend: 大学要志敏>, <Friend: 高中叶波>, <Friend: 大学张诗童 >, <Friend: 大学张晨照>, <Friend: 初中赵佳>, <Friend: 大学李媛媛>, <Friend: 大学薛东晶>, <Friend: 大学郭志颍>, <Friend: 大学张佳彤>, <Friend: 丁利军>, <Friend: 大学梁云鹏>, <Friend: 初中高晶>, <Friend: 大学秦聪妮>, <Friend: 初中卜佳琪>, <Friend: 高中王伟男>, <Friend: 大学刘旭哲>, <Friend: 初中晓荣>, <Friend: 大学刘静 >, <Friend: 大学王金红>, <Friend: 高中程武飞>, <Friend: 张娟老师>, <Friend: 大学曹阳>, <Friend: 大学丛婷婷>, <Friend: 一刻达快递代取>, <Friend: 大学邢显昭>, <Friend: 科二教练>, <Friend: 大学冯小敏>, <Friend: 老师王忠媛>, <Friend: 假日阿根>, <Friend: 大学罗广峰>, <Friend: 大学袁婷>, <Friend: 老师范安荣>, <Friend: 初中赵婧姝>, <Friend: 老舅>, <Friend: 大学张龙宇>, <Friend: 初中张雨>, <Friend: 婷婷姐>, <Friend: 老舅妈>, <Friend: 大学王涵>, <Friend: 大学田成>, <Friend: 高中郑思宇>, <Friend: 大学王春宇>, <Friend: 邢正如>, <Friend: 巨头女孩<span class="emoji emoji1f646"></span> >, <Friend: 初中大王悦>, <Friend: 大学赵美娟>, <Friend: 大学张晨照>, <Friend: 小可爱<span class="emoji emoji1f338"></span> <span class="emoji emoji1f60d"></span> >, <Friend: 小超超<span class="emoji emoji1f467"></span> >, <Friend: 小垃圾<span class="emoji emoji1f47f"></span> >, <Friend: 大学赵昀>, <Friend: 姥姥>, <Friend: 大学李东升>, <Friend: 大学金新华>, <Friend: 大学孟祥伟>, <Friend: 刘琳>, <Friend: 张丽芳>, <Friend: 父皇大大>, <Friend: 高中杨丹>, <Friend: 小舅>, <Friend: 中公教育邱蓉蓉>, <Friend: 明媚温莎莎>, <Friend: 大学梅林>, <Friend: 高中姜琪>, <Friend: 大学秋艺霖儿>, <Friend: 刘玉莹>, <Friend: 高中萨姆>, <Friend: 大学王润泽>, <Friend: 大学杨倩文>, <Friend: 小猪猪>, <Friend: 专享彩妆顾问13sdj>, <Friend: 大学腻.>, <Friend: 中公马老师>, <Friend: 大学张雅倩>, <Friend: 季心如>, <Friend: 高中赵静怡>, <Friend: 高中范含金>, <Friend: 驾校王冀真>, <Friend: 大学杨楠>, <Friend: 大学张芮>, <Friend: 高中高天皓 >, <Friend: 大学张颖>, <Friend: 徳👁邦18004701937>, <Friend: 假日葛爽>, <Friend: 高中张宇>, <Friend: 大学周轩扬>]
共有 110 个人
男: 34 人
女: 72 人
人妖: 4
男性比例: 0.3090909090909091
女性比例: 0.6545454545454545
|
notebooks/wythoff_exp64.ipynb | ###Markdown
Analysis - exp64- Control for opt calculations.
###Code
import os
import csv
import numpy as np
import torch as th
import pandas as pd
from glob import glob
from pprint import pprint
import matplotlib
import matplotlib.pyplot as plt
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import seaborn as sns
sns.set(font_scale=1.5)
sns.set_style('ticks')
matplotlib.rcParams.update({'font.size': 16})
matplotlib.rc('axes', titlesize=16)
from notebook_helpers import load_params
from notebook_helpers import load_monitored
from notebook_helpers import join_monitored
from notebook_helpers import score_summary
def load_data(path, model, run_index=None):
runs = range(run_index[0], run_index[1]+1)
exps = []
for r in runs:
file = os.path.join(path, f"run_{model}_{r}_monitor.csv".format(int(r)))
try:
mon = load_monitored(file)
except FileNotFoundError:
mon = None
exps.append(mon)
return exps
def load_hp(name):
return pd.read_csv(name, index_col=False)
def find_best(hp, data, window, score="score"):
scores = []
for r, mon in enumerate(exp_62):
if mon is not None:
full = mon[score]
# print(len(full))
selected = full[window[0]:window[1]]
# print(selected)
x = np.mean(selected)
# print(x)
scores.append(x)
else:
scores.append(np.nan)
# print(scores)
best = np.nanargmax(scores)
# print(best)
return hp[best:best+1]
###Output
_____no_output_____
###Markdown
Load data
###Code
path = "/Users/qualia/Code/azad/data/wythoff/exp64/"
hp_64 = load_hp(os.path.join(path,"grid.csv"))
models = ["DQN_xy4"]
index = (0, 50)
hp_64[0:1]
###Output
_____no_output_____
###Markdown
Plots All parameter summaryHow's it look overall. Timecourse
###Code
for model in models:
exp_64 = load_data(path, model, run_index=index)
plt.figure(figsize=(6, 3))
for r, mon in enumerate(exp_64):
if mon is not None:
_ = plt.plot(mon['episode'], mon['score'], color='black', alpha=0.05)
# _ = plt.ylim(0, 1)
_ = plt.title(model)
_ = plt.ylabel("Optimal score")
_ = plt.xlabel("Episode")
sns.despine()
###Output
_____no_output_____
###Markdown
Initial timecourse
###Code
stop = 100 # Plot episodes up until this value
for model in models:
exp_62 = load_data(path, model, run_index=index)
plt.figure(figsize=(6, 3))
for r, mon in enumerate(exp_62):
if mon is not None:
t = np.asarray(mon['episode'])
x = np.asarray(mon['score'])
avg = np.mean(x)
m = t <= stop
_ = plt.plot(t[m], x[m], color='black', alpha=0.05)
_ = plt.title(model)
_ = plt.ylabel("Optimal score")
_ = plt.xlabel("Episode")
sns.despine()
###Output
_____no_output_____
###Markdown
- There is real progress this time. For the first time with DQN on wythoff's. But why does it stop at arounf 0.3 or so of optimal?- If it gets this far it should be able to get to the best?- Code problem?- Player interaction problem? Find the best HP
###Code
for model in models:
exp_64 = load_data(path, model, run_index=index)
best_hp = find_best(hp_64, exp_64, (450,500))
print(f"{model}:\n{best_hp}\n---")
###Output
DQN_xy4:
row_code device_code epsilon learning_rate
3 3 3 0.1 0.233333
---
|
_doc/notebooks/progf/recursive_reducers.ipynb | ###Markdown
Reducers récursifsJ'utilise volontiers une terminologie découverte chez Microsoft pour illustrer une façon d'écrire le même calcul qui a un impact sur la facilité avec laquelle on peut le distribution : utiliser des comptes plutôt que des moyennes.
###Code
from jyquickhelper import add_notebook_menu
add_notebook_menu()
###Output
_____no_output_____
###Markdown
Le notebook utilise des fonctions développées pour illustrer les notions, plus claires qu'efficaces. StreamLe map reduce s'applique à des jeux de données très grands. D'un point de vue mathématique, on écrit des algorithmes qui s'appliquent à des jeux de données infinis ou plutôt dont la taille n'est pas connu. Pour les distinguer des jeux de données, on les appelle des *flux* ou *stream* en anglais.En aparté, écrits pour être parallélisés, ces traitements ont la particuliarité de ne pas conserver l'ordre dans lequel il traite les données. C'est particulièrement vrai lorsque le jeu de données est divisé sur plusieurs disques durs. Il est impossible de choisir un morceau en premier. MapperUn *mapper* applique le même traitement à chaque observation du *stream* de façon indépendante.
###Code
ens = [('a', 1), ('b', 4), ('a', 6), ('a', 3)]
from sparkouille.fctmr import mapper
stream1 = mapper(lambda el: (el[0], el[1]+1), ens)
stream1
###Output
_____no_output_____
###Markdown
Le résultat n'existe pas tant qu'on ne demande explicitement que le calcul soit faut. Il faut parcourir le résultat.
###Code
list(stream1)
###Output
_____no_output_____
###Markdown
Et on ne peut le parcourir qu'une fois :
###Code
list(stream1)
###Output
_____no_output_____
###Markdown
Coût du premier élémentQuand on a une infinité d'éléments à traiter, il est important de pouvoir regarder ce qu'un traitement donne sur les premiers éléments. Avec un mapper, cela correspond au coût d'un seul map.
###Code
from sparkouille.fctmr import take
first = lambda it: take(it, count=1)
big_ens = ens * 100
%timeit -n 1000 list(mapper(lambda el: (el[0], el[1]+1), big_ens))
%timeit -n 1000 first(mapper(lambda el: (el[0], el[1]+1), big_ens))
###Output
2.46 µs ± 451 ns per loop (mean ± std. dev. of 7 runs, 1000 loops each)
###Markdown
ReducerUn vrai *reducer* réduit les éléments d'un ensemble, il ne répartit pas les données. En pratique, on réduit rarement un ensemble qu'on n'a pas distribué au préalable, comme avec un *groupby*. On ne réduit pas toujours non plus un ensemble à une seule ligne. On empile les opérations de streaming, on repousse également le moment d'évaluer. La distribution s'effectue selon une clé qui est hashée (voir [Hash et distribution](http://www.xavierdupre.fr/app/ensae_teaching_cs/helpsphinx/notebooks/hash_distribution.html)). La première lambda fonction décrit ce qu'est cette clé, le premier élément du couple dans ce cas.
###Code
from sparkouille.fctmr import reducer
stream1 = mapper(lambda el: (el[0], el[1]+1), ens)
stream2 = reducer(lambda el: el[0], stream1, asiter=False)
stream2
list(stream2)
###Output
_____no_output_____
###Markdown
Dans cet exemple, le *reducer* réduit chaque groupe à un seul résultat qui est l'ensemble des éléments. Quel est le coup du premier élément...
###Code
def test2(ens, one=False):
stream1 = mapper(lambda el: (el[0], el[1]+1), ens)
stream2 = reducer(lambda el: el[0], stream1, asiter=False)
return list(stream2) if one else first(stream2)
%timeit -n 1000 test2(big_ens)
%timeit -n 1000 test2(big_ens, one=True)
###Output
720 µs ± 31.4 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
###Markdown
C'est plus court mais pas significativement plus court. Cela correspond au coût d'un tri de l'ensemble des observations et du coût de la construction du premier groupe. Reducer et triUn stream est infini en théorie. En pratique il est fini mais on ne sait pas si un ou plusieurs groupes entiers tiendraient en mémoire. Une façon de faire est de limiter la présence des données en mémoire à un seul groupe et pour cela, il faut d'abord trier les données selon les clés. Ce n'est pas indispensable mais dans le pire des cas, c'est une bonne option. On pourrait avoir un stream comme suit :
###Code
pas_cool = [(chr(int(c) + 96), i) for i, c in enumerate(str(11111111 ** 2))]
pas_cool
###Output
_____no_output_____
###Markdown
Le groupe *a* est au début et à la fin, si on regroupe en mémoire, le groupe associé à *a* doit rester en mémoire du début à la fin. On ne sait jamais si un groupe ne va pas réapparaître plus tard. En triant, on est sûr. Un autre mapOn ajoute un dernier map qui fait la somme des éléments de chaque groupe.
###Code
def sum_gr(key_gr):
key, gr = key_gr
return key, sum(e[1] for e in gr)
stream1 = mapper(lambda el: (el[0], el[1]+1), ens)
stream2 = reducer(lambda el: el[0], stream1)
stream3 = map(sum_gr, stream2)
stream3
list(stream3)
###Output
_____no_output_____
###Markdown
Combiner ou joinUn *combiner* ou *join* permet de fusionner deux bases de données qui ont en commun une clé.
###Code
from sparkouille.fctmr import combiner
stream1 = mapper(lambda el: (el[0], el[1]+1), ens)
stream2 = reducer(lambda el: el[0], stream1)
stream3 = map(sum_gr, stream2)
stream4 = mapper(lambda el: (el[0], el[1]+10), pas_cool)
comb = combiner(lambda el: el[0], stream3, lambda el: el[0], stream4)
comb
list(comb)
###Output
_____no_output_____
###Markdown
Le coût du premier élément est un peu plus compliqué à inférer, cela dépend beaucoup des données.
###Code
def job(ens, ens2, one=False, sens=True):
stream1 = mapper(lambda el: (el[0], el[1]+1), ens)
stream2 = reducer(lambda el: el[0], stream1)
stream3 = map(sum_gr, stream2)
stream4 = mapper(lambda el: (el[0], el[1]+10), ens2)
if sens:
comb = combiner(lambda el: el[0], stream3, lambda el: el[0], stream4)
else:
comb = combiner(lambda el: el[0], stream4, lambda el: el[0], stream3)
return list(comb) if one else first(comb)
%timeit -n 1000 job(big_ens, pas_cool)
%timeit -n 1000 job(big_ens, pas_cool, sens=False)
%timeit -n 1000 job(big_ens, pas_cool, one=True)
%timeit -n 1000 job(big_ens, pas_cool, one=True, sens=False)
###Output
389 µs ± 10.7 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
###Markdown
Il y a différentes façons de coder un *combiner*, l'une d'elle consiste à réduire chacun des deux streams puis à faire le produit croisé de chaque groupe assemblé. Reducers récursifsC'est pas loin d'être un abus de langage, disons que cela réduit la dépendance au tri. Un exemple.
###Code
def sum_gr(key_gr):
key, gr = key_gr
return key, sum(e[1] for e in gr)
def job_recursif(ens):
stream2 = reducer(lambda el: el[0], ens)
stream3 = map(sum_gr, stream2)
return list(stream3)
job_recursif(ens)
###Output
_____no_output_____
###Markdown
Et maintenant, on coupe en deux :
###Code
n = len(ens) // 2
job_recursif(ens[:n])
job_recursif(ens[n:])
###Output
_____no_output_____
###Markdown
Et maintenant :
###Code
job_recursif( job_recursif(ens[:n]) + job_recursif(ens[n:]))
###Output
_____no_output_____
###Markdown
Le job ainsi écrit est associatif en quelque sorte. Cela laisse plus de liberté pour la distribution car on peut maintenant distribuer des clés identiques sur des machines différentes puis réappliquer le *reducer* sur les résultats de la première salve. C'est d'autant plus efficace que le *reducer* réduit beaucoup les données. Il reste à voir le cas d'un *reducer* **non récursif**.
###Code
def mean(ens):
s = 0.
for i, e in enumerate(ens):
s += e
return s / (i + 1)
def mean_gr(key_gr):
key, gr = key_gr
return key, mean(e[1] for e in gr)
def job_non_recursif(ens):
stream2 = reducer(lambda el: el[0], ens)
stream3 = map(mean_gr, stream2)
return list(stream3)
job_non_recursif(ens)
n = len(ens) // 2
job_non_recursif(ens[:n])
job_non_recursif(ens[n:])
job_non_recursif( job_non_recursif(ens[:n]) + job_non_recursif(ens[n:]))
###Output
_____no_output_____ |
PY0101EN_4_1_ReadFile.ipynb | ###Markdown
Reading Files Python Welcome! This notebook will teach you about reading the text file in the Python Programming Language. By the end of this lab, you'll know how to read text files. Table of Contents Download Data Reading Text Files A Better Way to Open a File Estimated time needed: 40 min Download Data
###Code
# Download Example file
!mkdir -p /resources/data
!wget -O /resources/data/Example1.txt https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/labs/example1.txt
###Output
--2019-07-16 13:19:57-- https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/labs/example1.txt
Resolving s3-api.us-geo.objectstorage.softlayer.net (s3-api.us-geo.objectstorage.softlayer.net)... 67.228.254.193
Connecting to s3-api.us-geo.objectstorage.softlayer.net (s3-api.us-geo.objectstorage.softlayer.net)|67.228.254.193|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 45 [text/plain]
Saving to: ‘/resources/data/Example1.txt’
/resource 0%[ ] 0 --.-KB/s
/resources/data/Exa 100%[===================>] 45 --.-KB/s in 0s
2019-07-16 13:19:57 (7.64 MB/s) - ‘/resources/data/Example1.txt’ saved [45/45]
###Markdown
Reading Text Files One way to read or write a file in Python is to use the built-in open function. The open function provides a File object that contains the methods and attributes you need in order to read, save, and manipulate the file. In this notebook, we will only cover .txt files. The first parameter you need is the file path and the file name. An example is shown as follow: The mode argument is optional and the default value is r. In this notebook we only cover two modes: r Read mode for reading files w Write mode for writing files For the next example, we will use the text file Example1.txt. The file is shown as follow: We read the file:
###Code
# Read the Example1.txt
example1 = "/resources/data/Example1.txt"
file1 = open(example1, "r")
###Output
_____no_output_____
###Markdown
We can view the attributes of the file. The name of the file:
###Code
# Print the path of file
file1.name
###Output
_____no_output_____
###Markdown
The mode the file object is in:
###Code
# Print the mode of file, either 'r' or 'w'
file1.mode
###Output
_____no_output_____
###Markdown
We can read the file and assign it to a variable :
###Code
# Read the file
FileContent = file1.read()
FileContent
###Output
_____no_output_____
###Markdown
The /n means that there is a new line. We can print the file:
###Code
# Print the file with '\n' as a new line
print(FileContent)
###Output
This is line 1
This is line 2
This is line 3
###Markdown
The file is of type string:
###Code
# Type of file content
type(FileContent)
###Output
_____no_output_____
###Markdown
We must close the file object:
###Code
# Close file after finish
file1.close()
###Output
_____no_output_____
###Markdown
A Better Way to Open a File Using the with statement is better practice, it automatically closes the file even if the code encounters an exception. The code will run everything in the indent block then close the file object.
###Code
# Open file using with
with open(example1, "r") as file1:
FileContent = file1.read()
print(FileContent)
###Output
This is line 1
This is line 2
This is line 3
###Markdown
The file object is closed, you can verify it by running the following cell:
###Code
# Verify if the file is closed
file1.closed
###Output
_____no_output_____
###Markdown
We can see the info in the file:
###Code
# See the content of file
print(FileContent)
###Output
This is line 1
This is line 2
This is line 3
###Markdown
The syntax is a little confusing as the file object is after the as statement. We also don’t explicitly close the file. Therefore we summarize the steps in a figure: We don’t have to read the entire file, for example, we can read the first 4 characters by entering three as a parameter to the method **.read()**:
###Code
# Read first four characters
with open(example1, "r") as file1:
print(file1.read(4))
###Output
This
###Markdown
Once the method .read(4) is called the first 4 characters are called. If we call the method again, the next 4 characters are called. The output for the following cell will demonstrate the process for different inputs to the method read():
###Code
# Read certain amount of characters
with open(example1, "r") as file1:
print(file1.read(4))
print(file1.read(4))
print(file1.read(7))
print(file1.read(15))
###Output
This
is
line 1
This is line 2
###Markdown
The process is illustrated in the below figure, and each color represents the part of the file read after the method read() is called: Here is an example using the same file, but instead we read 16, 5, and then 9 characters at a time:
###Code
# Read certain amount of characters
with open(example1, "r") as file1:
print(file1.read(16))
print(file1.read(5))
print(file1.read(9))
###Output
This is line 1
This
is line 2
###Markdown
We can also read one line of the file at a time using the method readline():
###Code
# Read one line
with open(example1, "r") as file1:
print("first line: " + file1.readline())
###Output
first line: This is line 1
###Markdown
We can use a loop to iterate through each line:
###Code
# Iterate through the lines
with open(example1,"r") as file1:
i = 0;
for line in file1:
print("Iteration", str(i), ": ", line)
i = i + 1;
###Output
Iteration 0 : This is line 1
Iteration 1 : This is line 2
Iteration 2 : This is line 3
###Markdown
We can use the method readlines() to save the text file to a list:
###Code
# Read all lines and save as a list
with open(example1, "r") as file1:
FileasList = file1.readlines()
###Output
_____no_output_____
###Markdown
Each element of the list corresponds to a line of text:
###Code
# Print the first line
FileasList[0]
# Print the second line
FileasList[1]
# Print the third line
FileasList[2]
###Output
_____no_output_____ |
jupyter_notebooks/pandas/mastering_data_analysis/11. Visualization/05. Plotting with pandas.ipynb | ###Markdown
Plotting with pandasIn this chapter, we learn how to plot directly from pandas DataFrames or Series. Internally, pandas uses matplotlib to do all of its plotting. Let's begin by reading in the stocks dataset.
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
stocks = pd.read_csv('../data/stocks/stocks10.csv', index_col='date', parse_dates=['date'])
stocks.head(3)
###Output
_____no_output_____
###Markdown
Plotting a Seriespandas uses the Series index as the x-values and the values as y-values. By default, pandas creates a line plot. Let's plot Amazon's closing price for the last 5 years.
###Code
amzn = stocks['AMZN']
amzn.head(3)
amzn.plot();
###Output
_____no_output_____
###Markdown
Get four years of data from Apple, Facebook, Schlumberger and Tesla beginning in 2014. Plot many Series one at a timeAll calls to plot that happen in the same cell will be drawn on the same Axes unless otherwise specified. Let's plot several Series at the same time.
###Code
stocks['AMZN'].plot()
stocks['AAPL'].plot()
stocks['FB'].plot()
stocks['SLB'].plot()
stocks['TSLA'].plot();
###Output
_____no_output_____
###Markdown
Plot all at once from the DataFrameInstead of individually plotting Series, we can plot each column in the DataFrame at once with its `plot` method.
###Code
stocks.plot();
###Output
_____no_output_____
###Markdown
Plotting in Pandas is Column basedThe most important thing to know about plotting in pandas is that it is **column based**. pandas plots each column, one at a time. It uses the index as the x-values for each column and the values of each column as the y-values. The column names are put in the **legend**. Choosing other types of plotspandas directly uses Matplotlib for all of its plotting and does not have any plotting capabilities on its own. pandas is simply calling Matplotlib's plotting functions and supplying the arguments for you. pandas provides a small subset of the total available types of plots that matplotlib offers. Use the `kind` parameter to choose one of the following types of plots.* `line` : line plot (default)* `bar` : vertical bar plot* `barh` : horizontal bar plot* `hist` : histogram* `box` : boxplot* `kde` : Kernel Density Estimation plot. `density` is an alias* `area` : area plot* `pie` : pie plot* `scatter`: Does not plot all columns, you must choose x and y Histogram of the closing prices of AppleSet the `kind` parameter to thee string 'hist' to plot a histogram of closing prices.
###Code
aapl = stocks['AAPL']
aapl.plot(kind='hist');
###Output
_____no_output_____
###Markdown
Kernel Density EstimateVery similar to a histogram, a kernel density estimate (use string 'kde') plot estimates the probability density function.
###Code
aapl.plot(kind='kde');
###Output
_____no_output_____
###Markdown
Additional plotting parametersTo modify plots to your liking, pandas provides several of the same parameters found in matplotlib plotting functions. The most common are listed below:* `linestyle` or `ls` - Pass a string of one of the following ['--', '-.', '-', ':']* `color` or `c` - Can take a string of a named color, a string of the hexadecimal characters or a rgb tuple with each number between 0 and 1.* `linewidth` or `lw` - controls thickness of line. Default is 1* `alpha` - controls opacity with a number between 0 and 1* `figsize` - a tuple used to control the size of the plot. (width, height) * `legend` - boolean to control whether or not to show legend.
###Code
# Use several of the additional plotting arguemnts
aapl.plot(color="darkblue",
linestyle='--',
figsize=(10, 4),
linewidth=3,
alpha=.7,
legend=True,
title="AAPL Stock Price - Last 5 Years");
###Output
_____no_output_____
###Markdown
Diamonds datasetLet's read in the diamonds dataset and begin making plots with it.
###Code
diamonds = pd.read_csv('../data/diamonds.csv')
diamonds.head(3)
###Output
_____no_output_____
###Markdown
Changing the defaults for a scatterplotThe default plot is a line plot and uses the index as the x-axis. Each column of the frame become the y-values. This worked well for stock price data where the date was in the index and ordered. For many datasets, you will have to explicitly set the x and y axis variables. Below is a scatterplot comparison of carat vs price.
###Code
diamonds.plot(x='carat', y='price', kind='scatter', figsize=(8, 4));
diamonds.shape
###Output
_____no_output_____
###Markdown
Sample the data when too many pointsWhen there an abundance of data is present, sampling a fraction of the data can result in a more readable plot. Here, we sample five percent of the data and change the size of each point with the `s` parameter.
###Code
dia_sample = diamonds.sample(frac=.05)
dia_sample.plot('carat', 'price', kind='scatter', figsize=(8, 4), s=2);
###Output
_____no_output_____
###Markdown
If you have tidy data, use `groupby/pivot_table`, then make a bar plotIf your data is tidy like it is with this diamonds dataset, you will likely need to aggregate it with either a `groupby` or a `pivot_table` to make it work with a bar plot. The index becomes the tick labels for String IndexesPandas nicely integrates the index into plotting by using it as the tick mark labels for many plots.
###Code
cut_count = diamonds['cut'].value_counts()
cut_count
cut_count.plot(kind='bar');
###Output
_____no_output_____
###Markdown
More than one grouping column in the indexIt's possible to make plots with a Series that have a MultiIndex.
###Code
cut_color_count = diamonds.groupby(['cut', 'color']).size()
cut_color_count.head(10)
cut_color_count.plot(kind='bar');
###Output
_____no_output_____
###Markdown
Thats quite uglyLet's reshape and plot again.
###Code
cut_color_pivot = diamonds.pivot_table(index='cut', columns='color', aggfunc='size')
cut_color_pivot
###Output
_____no_output_____
###Markdown
Plot the whole DataFrame. The index always goes on the x-axis. Each column value is the y-value and the column names are used as labels in the legend.
###Code
cut_color_pivot.plot(kind='bar', figsize=(10, 4));
###Output
_____no_output_____
###Markdown
Pandas plots return matplotlib objectsAfter making a plot with pandas, you will see some text output immediately under the cell that was just executed. Pandas is returning to us the matplotlib Axes object. You can assign the result of the `plot` method to a variable.
###Code
ax = cut_color_pivot.plot(kind='bar');
###Output
_____no_output_____
###Markdown
Verify that we have a matplotlib Axes object.
###Code
type(ax)
###Output
_____no_output_____
###Markdown
Get the figure as an attribute of the Axes
###Code
fig = ax.figure
type(fig)
###Output
_____no_output_____
###Markdown
We can use the figure and axes as normalLet's set a new title for the Axes and change the size of the Figure.
###Code
ax.set_title('My new title on a Pandas plot')
fig.set_size_inches(10, 4)
fig
###Output
_____no_output_____ |
tutorials/spark-da-cse255/009_6_analyzing_residuals.ipynb | ###Markdown
Reconstruction using top eigen-vectorsFor measurement = {{meas}} Load the required libraries
###Code
# Enable automiatic reload of libraries
#%load_ext autoreload
#%autoreload 2 # means that all modules are reloaded before every command
%pylab inline
import numpy as np
import findspark
findspark.init()
import sys
sys.path.append('./lib')
from numpy_pack import packArray,unpackArray
from Eigen_decomp import Eigen_decomp
from YearPlotter import YearPlotter
from recon_plot import recon_plot
from import_modules import import_modules,modules
import_modules(modules)
from ipywidgets import interactive,widgets
from pyspark import SparkContext
#sc.stop()
sc = SparkContext(master="local[3]",pyFiles=['lib/numpy_pack.py','lib/spark_PCA.py','lib/computeStats.py','lib/recon_plot.py','lib/Eigen_decomp.py'])
from pyspark import SparkContext
from pyspark.sql import *
sqlContext = SQLContext(sc)
###Output
_____no_output_____
###Markdown
Read Statistics File
###Code
from pickle import load
#read statistics
filename=data_dir+'/STAT_%s.pickle'%file_index
STAT,STAT_Descriptions = load(open(filename,'rb'))
measurements=STAT.keys()
print 'keys from STAT=',measurements
###Output
keys from STAT= ['TMIN', 'TOBS', 'TMAX', 'SNOW', 'SNWD', 'PRCP']
###Markdown
Read data file into a spark DataFrameWe focus on the snow-depth records, because the eigen-vectors for them make sense.
###Code
#read data
filename=data_dir+'/decon_%s_%s.parquet'%(file_index,meas)
df_in=sqlContext.read.parquet(filename)
#filter in
df=df_in.filter(df_in.measurement==meas)
df.show(5)
###Output
+-------------------+-------------------+-------------------+---------+--------+--------+---------+-----------+------------------+------------------+------------------+------------------+-----------+---------+------+--------------------+------+
| coeff_1| coeff_2| coeff_3|elevation| label|latitude|longitude|measurement| res_1| res_2| res_3| res_mean| station|total_var|undefs| vector| year|
+-------------------+-------------------+-------------------+---------+--------+--------+---------+-----------+------------------+------------------+------------------+------------------+-----------+---------+------+--------------------+------+
| 46.09987594453083| 295.4098694166302| 49.08482003847539| 405.4|BBSBSBSB| 46.6803| -92.9542| PRCP| 0.996390186885152|0.9280267093587372|0.9232288716146848|0.8414779903375119|USC00219173|1480672.0| 8|[00 00 00 00 00 0...|2004.0|
| 104.42293954739986| -43.2683061107506| -243.8461952513991| 399.3|BBSBSBSB| 47.2436| -93.4975| PRCP|0.9884039497099895|0.9864130102787491|0.9231790724012362|0.8681122693301904|USC00213303|1083193.0| 0|[00 42 00 00 00 0...|1945.0|
|-113.68557343276933|-273.35089027570245|-121.24008389172191| 413.0|BBSBSBSB| 47.8947| -92.5336| PRCP| 0.98733784036864|0.9347167776031898|0.9229950278075122|0.8593586299476422|USC00211771|1463741.0| 34|[00 00 00 00 00 4...|2001.0|
| -99.39182104660928| 173.11891760725112| 172.89791333779644| 343.0|BBSBSBSB| 48.77| -92.62| PRCP|0.9890747826048196|0.9559297959750147|0.9228693813605475|0.8384501250125471|CA006025203|1078435.0| 0|[00 00 00 45 00 4...|1974.0|
| 211.17649421930383| -93.15366008476138| -13.14722565620616| 350.0|BBSBSBSB| 48.68| -93.83| PRCP|0.9330313660996218|0.9229356880698476|0.9228527745783914| 0.851237288986212|CA00602K300| 843468.0| 44|[00 00 00 00 00 0...|2002.0|
+-------------------+-------------------+-------------------+---------+--------+--------+---------+-----------+------------------+------------------+------------------+------------------+-----------+---------+------+--------------------+------+
only showing top 5 rows
###Markdown
Plot Mean and Eigenvecs
###Code
m=meas
fig,axes=plt.subplots(2,1, sharex='col', sharey='row',figsize=(10,6));
k=3
EigVec=np.matrix(STAT[m]['eigvec'][:,:k])
Mean=STAT[m]['Mean']
YearPlotter().plot(Mean,fig,axes[0],label='Mean',title=m+' Mean')
YearPlotter().plot(EigVec,fig,axes[1],title=m+' Eigs',labels=['eig'+str(i+1) for i in range(k)])
###Output
_____no_output_____
###Markdown
plot the percent of residual variance on average
###Code
# x=0 in the graphs below correspond to the fraction of the variance explained by the mean alone
# x=1,2,3,... are the residuals for eig1, eig1+eig2, eig1+eig2+eig3 ...
fig,ax=plt.subplots(1,1);
eigvals=STAT[m]['eigval']; eigvals/=sum(eigvals); cumvar=cumsum(eigvals); cumvar=100*np.insert(cumvar,0,0)
ax.plot(cumvar[:10]);
ax.grid();
ax.set_ylabel('Percent of variance explained')
ax.set_xlabel('number of eigenvectors')
ax.set_title('Percent of variance explained');
###Output
_____no_output_____
###Markdown
How well-explained are the vectors in this collection?To answer this question we extract all of the values of `res_3` which is the residual variance after the Mean and the first two Eigen-vectors have been subtracted out. We rely here on the fact that `df3` is already sorted according to `res_3`
###Code
# A function for plotting the CDF of a given feature
def plot_CDF(df,feat):
rows=df.select(feat).sort(feat).collect()
vals=[r[feat] for r in rows]
P=np.arange(0,1,1./(len(vals)))
while len(vals)< len(P):
vals=[vals[0]]+vals
plot(vals,P)
title('cumulative distribution of '+feat)
ylabel('fraction of instances')
xlabel(feat)
grid()
plot_CDF(df,'res_3')
rows=df.rdd.map(lambda row:(row.station,row.year,unpackArray(row['vector'],np.float16))).collect()
rows[0][:2]
days=set([r[1] for r in rows])
miny=min(days)
maxy=max(days)
record_len=int((maxy-miny+1)*365)
record_len
## combine the measurements for each station into a single long array with an entry for each day of each day
All={} # a dictionary with a numpy array for each day of each day
i=0
for station,day,vector in rows:
i+=1;
# if i%1000==0: print i,len(All)
if not station in All:
a=np.zeros(record_len)
a.fill(np.nan)
All[station]=a
loc = int((day-miny)*365)
All[station][loc:loc+365]=vector
from datetime import date
d=datetime.date(int(miny), month=1, day=1)
start=d.toordinal()
dates=[date.fromordinal(i) for i in range(start,start+record_len)]
for station in All:
print station, np.count_nonzero(~np.isnan(All[station]))
Stations=sorted(All.keys())
A=[]
for station in Stations:
A.append(All[station])
day_station_table=np.hstack([A])
print shape(day_station_table)
def RMS(Mat):
return np.sqrt(np.nanmean(Mat**2))
mean_by_day=np.nanmean(day_station_table,axis=0)
mean_by_station=np.nanmean(day_station_table,axis=1)
tbl_minus_day = day_station_table-mean_by_day
tbl_minus_station = (day_station_table.transpose()-mean_by_station).transpose()
print 'total RMS = ',RMS(day_station_table)
print 'RMS removing mean-by-station= ',RMS(tbl_minus_station)
print 'RMS removing mean-by-day = ',RMS(tbl_minus_day)
RT=day_station_table
F=RT.flatten()
NN=F[~np.isnan(F)]
NN.sort()
P=np.arange(0.,1.,1./len(NN))
plot(NN,P)
grid()
title('CDF of daily rainfall')
xlabel('daily rainfall')
ylabel('cumulative probability')
###Output
_____no_output_____
###Markdown
ConclusionsIt is likely to be hard to find correlations between the **amount** of rain on the same day in different stations. Because amounts of rain vary a lot between even close locations. It is more reasonable to try to compare whether or not it rained on the same day in different stations. As we see from the graph above, in our region it rains in about one third of the days. measuring statistical significanceWe want to find a statistical test for rejecting the null hypothesis that says that the rainfall in the two locations is independent.Using the inner product is too noisy, because you multiply the rainfall on the same day in two locations and that product can be very large - leading to a large variance and poor ability to discriminate.An alternative is to ignore the amount of rain, and just ask whether it rained in both locations. We can then compute the probability associated with the number of overlaps under the null hypothesis. Fix two stations. We restrict our attention to the days for which we have measurements for both stations, and define the following notation:* $m$ : the total number of days (for which we have measurements for both stations).* $n_1$ : the number of days that it rained on station 1* $n_2$ : the number of days that it rained on station 2* $l$ : the number of days that it rained on both stations.We want to calculate the probability that the number of overlap days is $l$ given $m,n_1,n_2$.The answer is:$$P = {m \choose l,n_1-l,n_2-l,m-n_1-n_2+l} /{m \choose n_1}{m \choose n_2}$$Where$${m \choose l,n_1-l,n_2-l,m-n_1-n_2+l} = \frac{m!}{l! (n_1-l)! (n_2-l)! (m-n_1-n_2+l)!}$$We use the fact that $\Gamma(n+1) = n!$ and denote $G(n) \doteq \log \Gamma(n+1)$$$\log P = \left[G(m) - G(l) -G(n_1-l) -G(n_2-l) -G(m-n_1-n_2+l) \right] - \left[G(m)-G(n_1)-G(m-n_1)\right] - \left[G(m)-G(n_2)-G(m-n_2)\right]$$Which slightly simplifies to $$\log P = -G(l) -G(n_1-l) -G(n_2-l) -G(m-n_1-n_2+l) - G(m)+G(n_1)+G(m-n_1) +G(n_2)+G(m-n_2)$$The log probability scales with $m$ the length of the overlap. So to get a per-day significance we consider $\frac{1}{m} \log P $
###Code
from scipy.special import gammaln,factorial
#for i in range(10):
# print exp(gammaln(i+1))-factorial(i)
def G(n):
return gammaln(n+1)
def LogProb(m,l,n1,n2):
logP=-G(l)-G(n1-l)-G(n2-l)-G(m-n1-n2+l)-G(m)+G(n1)+G(m-n1)+G(n2)+G(m-n2)
return logP/m
exp(LogProb(1000,0,500,500))
#USC00193270 21482
#USC00193702 28237
X=copy(All['USC00193270'])
Y=copy(All['USC00193702'])
print sum(~np.isnan(X))
print sum(~np.isnan(Y))
X[np.isnan(Y)]=np.nan
Y[np.isnan(X)]=np.nan
print sum(~np.isnan(X))
print sum(~np.isnan(Y))
def computeLogProb(X,Y):
X[np.isnan(Y)]=np.nan
Y[np.isnan(X)]=np.nan
G=~isnan(X)
m=sum(G)
XG=X[G]>0
YG=Y[G]>0
n1=sum(XG)
n2=sum(YG)
l=sum(XG*YG)
logprob=LogProb(m,l,n1,n2)
# print 'm=%d,l=%d,n1=%d,n2=%d,LogPval=%f'%(m,l,n1,n2,logprob)
return logprob,m
print computeLogProb(X,Y)
###Output
_____no_output_____
###Markdown
calculate the normalized log probability for each pair of stations.
###Code
L=len(Stations)
Pvals=np.zeros([L,L])
Length=np.zeros([L,L])
P_norm=np.zeros([L,L])
for i in range(L):
print i,
for j in range(L):
if i==j:
P_norm[i,j]=-0.4
continue
X=copy(All[Stations[i]])
Y=copy(All[Stations[j]])
P_norm[i,j],Length[i,j]=computeLogProb(X,Y)
if Length[i,j]<200:
P_norm[i,j]=np.nan
print Pvals[:2,:2]
print Length[:2,:2]
print P_norm[:2,:2]
A=P_norm.flatten();
B=A[~isnan(A)]
print A.shape,B.shape
hist(-B,bins=100);
xlabel('significance')
def showmat(mat):
fig,axes=plt.subplots(1,1,figsize=(10,10))
axes.imshow(mat, cmap=plt.cm.gray)
showmat(P_norm)
###Output
_____no_output_____
###Markdown
Finding structure in the rependency matrix.The matrix above shows, for each pair of stations, the normalized log probability that the overlap in rain days is random.We see immediately the first 8 stations are highly correlatedwith each other. To find more correlations we use SVD (the term PCA is reserved for decomposition of the covariance matrix). As we shall see that the top 10 eigenvectors explain about 80% of the square magnitude of the matrix.
###Code
print 'A group of very correlated stations is:',All.keys()[:8]
from sklearn.decomposition import PCA
P_norm0 = np.nan_to_num(P_norm)
n_comp=10
pca = PCA(n_components=n_comp, svd_solver='full')
pca.fit(P_norm0)
#print(pca.explained_variance_)
Var_explained=pca.explained_variance_ratio_
plot(insert(cumsum(Var_explained),0,0))
grid()
# we will look only at the top 4 eigenvectors.
n_comp=4
pca = PCA(n_components=n_comp, svd_solver='full')
pca.fit(P_norm0)
fig,axes=plt.subplots(1,4,figsize=(20,5),sharey='row');
L=list(pca.components_.transpose())
for i in range(4):
X=sorted(L,key=lambda x:x[i])
axes[i].plot(X);
def re_order_matrix(M,order):
M_reord=M[order,:]
M_reord=M_reord[:,order]
return M_reord
fig,axes=plt.subplots(2,2,figsize=(15,15),sharex='col',sharey='row');
i=0
for r in range(2):
for c in range(2):
order=np.argsort(pca.components_[i,:])
P_norm_reord=re_order_matrix(P_norm0,order)
axes[r,c].matshow(P_norm_reord)
i+=1
###Output
_____no_output_____
###Markdown
Explanation and possibe extensionsWhen we reorder the rows and columns of the matrix using one of the eigenvectors, the grouping of the stations becomes more evident. For example, consider the upper left corner of the scond matrix (The upper left one). The stations at positions 0-22 are clearly strongly correlated with each other. Even though there are some stations, in positions 15-18 or so, which are more related to each other than to the rest of this block.This type of organization is called **Block Diagonal** and it typically reveals important structure such as grouping or clustering.You might want to extract the sets of stations that form blocks for your region, and then plot them on the map to see their spatial relationship.
###Code
from pickle import dump
with open(data_dir+'/PRCP_residuals_PCA.pickle','wb') as file:
dump({'stations':All.keys(),
'eigen-vecs':pca.components_},
file)
###Output
_____no_output_____ |
notebooks/WV12 - DCT preprocessing.ipynb | ###Markdown
This jupyter notebooks provides the code for classifying signals using the Discrete Cosine Transform (type-2). To get some more background information, please have a look at the accompanying blog-post: http://ataspinar.com/2018/12/21/a-guide-for-using-the-wavelet-transform-in-machine-learning/
###Code
import os
from time import perf_counter
import numpy as np
from scipy.fftpack import dct
import matplotlib.pyplot as plt
from collections import defaultdict, Counter
import keras
from keras.layers import Conv1D, BatchNormalization, Dense, Flatten, Activation
from tensorflow.keras.layers.experimental import preprocessing
from keras.models import Sequential
from keras.callbacks import History, EarlyStopping
history = History()
###Output
_____no_output_____
###Markdown
1. Loading the UCI HAR datasetdownload dataset from https://archive.ics.uci.edu/ml/datasets/Human+Activity+Recognition+Using+Smartphones
###Code
activities_description = {
1: 'walking',
2: 'walking upstairs',
3: 'walking downstairs',
4: 'sitting',
5: 'standing',
6: 'laying'
}
def read_signals(filename):
with open(filename, 'r') as fp:
data = fp.read().splitlines()
data = map(lambda x: x.rstrip().lstrip().split(), data)
data = [list(map(float, line)) for line in data]
return data
def read_labels(filename):
with open(filename, 'r') as fp:
activities = fp.read().splitlines()
activities = list(map(lambda x: int(x)-1, activities))
return activities
def randomize(dataset, labels):
permutation = np.random.permutation(labels.shape[0])
shuffled_dataset = dataset[permutation, :, :]
shuffled_labels = labels[permutation]
return shuffled_dataset, shuffled_labels
DATA_FOLDER = '../datasets/UCI HAR Dataset/'
INPUT_FOLDER_TRAIN = DATA_FOLDER+'train/Inertial Signals/'
INPUT_FOLDER_TEST = DATA_FOLDER+'test/Inertial Signals/'
INPUT_FILES_TRAIN = ['body_acc_x_train.txt', 'body_acc_y_train.txt', 'body_acc_z_train.txt',
'body_gyro_x_train.txt', 'body_gyro_y_train.txt', 'body_gyro_z_train.txt',
'total_acc_x_train.txt', 'total_acc_y_train.txt', 'total_acc_z_train.txt']
INPUT_FILES_TEST = ['body_acc_x_test.txt', 'body_acc_y_test.txt', 'body_acc_z_test.txt',
'body_gyro_x_test.txt', 'body_gyro_y_test.txt', 'body_gyro_z_test.txt',
'total_acc_x_test.txt', 'total_acc_y_test.txt', 'total_acc_z_test.txt']
LABELFILE_TRAIN = DATA_FOLDER+'train/y_train.txt'
LABELFILE_TEST = DATA_FOLDER+'test/y_test.txt'
train_signals, test_signals = [], []
for input_file in INPUT_FILES_TRAIN:
signal = read_signals(INPUT_FOLDER_TRAIN + input_file)
train_signals.append(signal)
train_signals = np.transpose(np.array(train_signals), (1, 2, 0))
for input_file in INPUT_FILES_TEST:
signal = read_signals(INPUT_FOLDER_TEST + input_file)
test_signals.append(signal)
test_signals = np.transpose(np.array(test_signals), (1, 2, 0))
train_labels = read_labels(LABELFILE_TRAIN)
test_labels = read_labels(LABELFILE_TEST)
[no_signals_train, no_steps_train, no_components_train] = np.shape(train_signals)
[no_signals_test, no_steps_test, no_components_test] = np.shape(train_signals)
no_labels = len(np.unique(train_labels[:]))
print("The train dataset contains {} signals, each one of length {} and {} components ".format(no_signals_train, no_steps_train, no_components_train))
print("The test dataset contains {} signals, each one of length {} and {} components ".format(no_signals_test, no_steps_test, no_components_test))
print("The train dataset contains {} labels, with the following distribution:\n {}".format(np.shape(train_labels)[0], Counter(train_labels[:])))
print("The test dataset contains {} labels, with the following distribution:\n {}".format(np.shape(test_labels)[0], Counter(test_labels[:])))
uci_har_signals_train, uci_har_labels_train = randomize(train_signals, np.array(train_labels))
uci_har_signals_test, uci_har_labels_test = randomize(test_signals, np.array(test_labels))
###Output
The train dataset contains 7352 signals, each one of length 128 and 9 components
The test dataset contains 7352 signals, each one of length 128 and 9 components
The train dataset contains 7352 labels, with the following distribution:
Counter({5: 1407, 4: 1374, 3: 1286, 0: 1226, 1: 1073, 2: 986})
The test dataset contains 2947 labels, with the following distribution:
Counter({5: 537, 4: 532, 0: 496, 3: 491, 1: 471, 2: 420})
###Markdown
2. Generating features for the UCI-HAR features
###Code
def get_uci_har_dct_features(dataset, labels, truncated_len=None):
uci_har_features = []
# Take the DWT of each component and concat them end-to-end
for signal_no in range(0, len(dataset)):
features = []
for signal_comp in range(0,dataset.shape[2]):
signal = dataset[signal_no, :, signal_comp]
coeff = dct(signal)[:truncated_len]
features.append(list(coeff))
uci_har_features.append(features)
print(np.shape(uci_har_features))
X = np.array(uci_har_features).transpose(0,2,1)
Y = labels
return X, Y
truncate_len = 128
t_start = perf_counter()
x_train, y_train = get_uci_har_dct_features(uci_har_signals_train, uci_har_labels_train, \
truncated_len=truncate_len)
x_test, y_test = get_uci_har_dct_features(uci_har_signals_test, uci_har_labels_test, \
truncated_len=truncate_len)
t_stop = perf_counter()
t_diff = t_stop-t_start
print ('Time for DCT preprocessing {} seconds'.format(t_diff))
print ('Training data shape: {}'.format(x_train.shape))
print ('Test data shape: {}'.format(x_test.shape))
###Output
(7352, 9, 128)
(2947, 9, 128)
Time for DCT preprocessing 8.428480695999998 seconds
Training data shape: (7352, 128, 9)
Test data shape: (2947, 128, 9)
###Markdown
3. Classifying the train and test sets
###Code
num_classes = 6
batch_size = 8
epochs = 128
input_shape = np.shape(x_train)[1:]
print('input_shape: {}'.format(input_shape))
# convert the data to the right type
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices - this is for use in the
# categorical_crossentropy loss below
y_train = list(y_train)
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = list(y_test)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
normalizer = preprocessing.Normalization()
normalizer.adapt(x_train)
model.add(keras.Input(shape=input_shape))
model.add(normalizer)
model.add(Conv1D(16, kernel_size=3, strides=2))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv1D(32, kernel_size=3))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dense(64, kernel_regularizer=keras.regularizers.l1_l2(l1=5e-5,l2=5e-5)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dense(96, kernel_regularizer=keras.regularizers.l1_l2(l1=5e-4,l2=5e-4)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dense(64, kernel_regularizer=keras.regularizers.l1_l2(l1=5e-5,l2=5e-5)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dense(16, kernel_regularizer=keras.regularizers.l1_l2(l1=1e-5,l2=1e-5)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(num_classes, kernel_regularizer=keras.regularizers.l1_l2(l1=5e-5,l2=5e-5),\
activation='softmax'))
model.summary()
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(),
metrics=['accuracy'])
es = EarlyStopping(monitor='val_loss', verbose=0, patience=8)
t_start = perf_counter()
model.fit(x_train, y_train, batch_size=batch_size,
epochs=epochs, verbose=0,
validation_data=(x_test, y_test),
callbacks=[history,es])
t_stop = perf_counter()
t_diff = t_stop-t_start
print ('Time to train the network {} seconds'.format(t_diff))
train_score = model.evaluate(x_train, y_train, verbose=0)
print('Train loss: {}, Train accuracy: {}'.format(train_score[0], train_score[1]))
test_score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss: {}, Test accuracy: {}'.format(test_score[0], test_score[1]))
fig, axarr = plt.subplots(figsize=(14,7), ncols=2)
axarr[0].plot(history.history['accuracy'], label='train accuracy')
axarr[0].plot(history.history['val_accuracy'], label='test accuracy')
axarr[0].set_xlabel('Number of Epochs', fontsize=18)
axarr[0].set_ylabel('Accuracy', fontsize=18)
axarr[0].set_ylim([0.5,1])
axarr[0].legend()
axarr[1].plot(history.history['loss'], label='train loss')
axarr[1].plot(history.history['val_loss'], label='test loss')
axarr[1].set_xlabel('Number of Epochs', fontsize=18)
axarr[1].set_ylabel('Loss', fontsize=18)
axarr[1].legend()
plt.show()
###Output
_____no_output_____ |
assignments/2020/assignment1/features.ipynb | ###Markdown
Image features exercise*Complete and hand in this completed worksheet (including its outputs and any supporting code outside of the worksheet) with your assignment submission. For more details see the [assignments page](http://vision.stanford.edu/teaching/cs231n/assignments.html) on the course website.*We have seen that we can achieve reasonable performance on an image classification task by training a linear classifier on the pixels of the input image. In this exercise we will show that we can improve our classification performance by training linear classifiers not on raw pixels but on features that are computed from the raw pixels.All of your work for this exercise will be done in this notebook.
###Code
import random
import numpy as np
from cs231n.data_utils import load_CIFAR10
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading extenrnal modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
###Output
_____no_output_____
###Markdown
Load dataSimilar to previous exercises, we will load CIFAR-10 data from disk.
###Code
from cs231n.features import color_histogram_hsv, hog_feature
def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000):
# Load the raw CIFAR-10 data
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
# Cleaning up variables to prevent loading data multiple times (which may cause memory issue)
try:
del X_train, y_train
del X_test, y_test
print('Clear previously loaded data.')
except:
pass
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# Subsample the data
mask = list(range(num_training, num_training + num_validation))
X_val = X_train[mask]
y_val = y_train[mask]
mask = list(range(num_training))
X_train = X_train[mask]
y_train = y_train[mask]
mask = list(range(num_test))
X_test = X_test[mask]
y_test = y_test[mask]
return X_train, y_train, X_val, y_val, X_test, y_test
X_train, y_train, X_val, y_val, X_test, y_test = get_CIFAR10_data()
###Output
_____no_output_____
###Markdown
Extract FeaturesFor each image we will compute a Histogram of OrientedGradients (HOG) as well as a color histogram using the hue channel in HSVcolor space. We form our final feature vector for each image by concatenatingthe HOG and color histogram feature vectors.Roughly speaking, HOG should capture the texture of the image while ignoringcolor information, and the color histogram represents the color of the inputimage while ignoring texture. As a result, we expect that using both togetherought to work better than using either alone. Verifying this assumption wouldbe a good thing to try for your own interest.The `hog_feature` and `color_histogram_hsv` functions both operate on a singleimage and return a feature vector for that image. The extract_featuresfunction takes a set of images and a list of feature functions and evaluateseach feature function on each image, storing the results in a matrix whereeach column is the concatenation of all feature vectors for a single image.
###Code
from cs231n.features import *
num_color_bins = 10 # Number of bins in the color histogram
feature_fns = [hog_feature, lambda img: color_histogram_hsv(img, nbin=num_color_bins)]
X_train_feats = extract_features(X_train, feature_fns, verbose=True)
X_val_feats = extract_features(X_val, feature_fns)
X_test_feats = extract_features(X_test, feature_fns)
# Preprocessing: Subtract the mean feature
mean_feat = np.mean(X_train_feats, axis=0, keepdims=True)
X_train_feats -= mean_feat
X_val_feats -= mean_feat
X_test_feats -= mean_feat
# Preprocessing: Divide by standard deviation. This ensures that each feature
# has roughly the same scale.
std_feat = np.std(X_train_feats, axis=0, keepdims=True)
X_train_feats /= std_feat
X_val_feats /= std_feat
X_test_feats /= std_feat
# Preprocessing: Add a bias dimension
X_train_feats = np.hstack([X_train_feats, np.ones((X_train_feats.shape[0], 1))])
X_val_feats = np.hstack([X_val_feats, np.ones((X_val_feats.shape[0], 1))])
X_test_feats = np.hstack([X_test_feats, np.ones((X_test_feats.shape[0], 1))])
###Output
_____no_output_____
###Markdown
Train SVM on featuresUsing the multiclass SVM code developed earlier in the assignment, train SVMs on top of the features extracted above; this should achieve better results than training SVMs directly on top of raw pixels.
###Code
# Use the validation set to tune the learning rate and regularization strength
from cs231n.classifiers.linear_classifier import LinearSVM
learning_rates = [1e-9, 1e-8, 1e-7]
regularization_strengths = [5e4, 5e5, 5e6]
results = {}
best_val = -1
best_svm = None
################################################################################
# TODO: #
# Use the validation set to set the learning rate and regularization strength. #
# This should be identical to the validation that you did for the SVM; save #
# the best trained classifer in best_svm. You might also want to play #
# with different numbers of bins in the color histogram. If you are careful #
# you should be able to get accuracy of near 0.44 on the validation set. #
################################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
# Print out results.
for lr, reg in sorted(results):
train_accuracy, val_accuracy = results[(lr, reg)]
print('lr %e reg %e train accuracy: %f val accuracy: %f' % (
lr, reg, train_accuracy, val_accuracy))
print('best validation accuracy achieved during cross-validation: %f' % best_val)
# Evaluate your trained SVM on the test set: you should be able to get at least 0.40
y_test_pred = best_svm.predict(X_test_feats)
test_accuracy = np.mean(y_test == y_test_pred)
print(test_accuracy)
# An important way to gain intuition about how an algorithm works is to
# visualize the mistakes that it makes. In this visualization, we show examples
# of images that are misclassified by our current system. The first column
# shows images that our system labeled as "plane" but whose true label is
# something other than "plane".
examples_per_class = 8
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
for cls, cls_name in enumerate(classes):
idxs = np.where((y_test != cls) & (y_test_pred == cls))[0]
idxs = np.random.choice(idxs, examples_per_class, replace=False)
for i, idx in enumerate(idxs):
plt.subplot(examples_per_class, len(classes), i * len(classes) + cls + 1)
plt.imshow(X_test[idx].astype('uint8'))
plt.axis('off')
if i == 0:
plt.title(cls_name)
plt.show()
###Output
_____no_output_____
###Markdown
Inline question 1:Describe the misclassification results that you see. Do they make sense?$\color{blue}{\textit Your Answer:}$ Neural Network on image featuresEarlier in this assigment we saw that training a two-layer neural network on raw pixels achieved better classification performance than linear classifiers on raw pixels. In this notebook we have seen that linear classifiers on image features outperform linear classifiers on raw pixels. For completeness, we should also try training a neural network on image features. This approach should outperform all previous approaches: you should easily be able to achieve over 55% classification accuracy on the test set; our best model achieves about 60% classification accuracy.
###Code
# Preprocessing: Remove the bias dimension
# Make sure to run this cell only ONCE
print(X_train_feats.shape)
X_train_feats = X_train_feats[:, :-1]
X_val_feats = X_val_feats[:, :-1]
X_test_feats = X_test_feats[:, :-1]
print(X_train_feats.shape)
from cs231n.classifiers.neural_net import TwoLayerNet
input_dim = X_train_feats.shape[1]
hidden_dim = 500
num_classes = 10
net = TwoLayerNet(input_dim, hidden_dim, num_classes)
best_net = None
################################################################################
# TODO: Train a two-layer neural network on image features. You may want to #
# cross-validate various parameters as in previous sections. Store your best #
# model in the best_net variable. #
################################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
# Run your best neural net classifier on the test set. You should be able
# to get more than 55% accuracy.
test_acc = (best_net.predict(X_test_feats) == y_test).mean()
print(test_acc)
###Output
_____no_output_____
###Markdown
Image features exercise*Complete and hand in this completed worksheet (including its outputs and any supporting code outside of the worksheet) with your assignment submission. For more details see the [assignments page](http://vision.stanford.edu/teaching/cs231n/assignments.html) on the course website.*We have seen that we can achieve reasonable performance on an image classification task by training a linear classifier on the pixels of the input image. In this exercise we will show that we can improve our classification performance by training linear classifiers not on raw pixels but on features that are computed from the raw pixels.All of your work for this exercise will be done in this notebook.
###Code
import random
import numpy as np
from cs231n.data_utils import load_CIFAR10
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading extenrnal modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
###Output
_____no_output_____
###Markdown
Load dataSimilar to previous exercises, we will load CIFAR-10 data from disk.
###Code
from cs231n.features import color_histogram_hsv, hog_feature
def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000):
# Load the raw CIFAR-10 data
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
# Cleaning up variables to prevent loading data multiple times (which may cause memory issue)
try:
del X_train, y_train
del X_test, y_test
print('Clear previously loaded data.')
except:
pass
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# Subsample the data
mask = list(range(num_training, num_training + num_validation))
X_val = X_train[mask]
y_val = y_train[mask]
mask = list(range(num_training))
X_train = X_train[mask]
y_train = y_train[mask]
mask = list(range(num_test))
X_test = X_test[mask]
y_test = y_test[mask]
return X_train, y_train, X_val, y_val, X_test, y_test
X_train, y_train, X_val, y_val, X_test, y_test = get_CIFAR10_data()
###Output
_____no_output_____
###Markdown
Extract FeaturesFor each image we will compute a Histogram of OrientedGradients (HOG) as well as a color histogram using the hue channel in HSVcolor space. We form our final feature vector for each image by concatenatingthe HOG and color histogram feature vectors.Roughly speaking, HOG should capture the texture of the image while ignoringcolor information, and the color histogram represents the color of the inputimage while ignoring texture. As a result, we expect that using both togetherought to work better than using either alone. Verifying this assumption wouldbe a good thing to try for your own interest.The `hog_feature` and `color_histogram_hsv` functions both operate on a singleimage and return a feature vector for that image. The extract_featuresfunction takes a set of images and a list of feature functions and evaluateseach feature function on each image, storing the results in a matrix whereeach column is the concatenation of all feature vectors for a single image.
###Code
from cs231n.features import *
num_color_bins = 10 # Number of bins in the color histogram
feature_fns = [hog_feature, lambda img: color_histogram_hsv(img, nbin=num_color_bins)]
X_train_feats = extract_features(X_train, feature_fns, verbose=True)
X_val_feats = extract_features(X_val, feature_fns)
X_test_feats = extract_features(X_test, feature_fns)
# Preprocessing: Subtract the mean feature
mean_feat = np.mean(X_train_feats, axis=0, keepdims=True)
X_train_feats -= mean_feat
X_val_feats -= mean_feat
X_test_feats -= mean_feat
# Preprocessing: Divide by standard deviation. This ensures that each feature
# has roughly the same scale.
std_feat = np.std(X_train_feats, axis=0, keepdims=True)
X_train_feats /= std_feat
X_val_feats /= std_feat
X_test_feats /= std_feat
# Preprocessing: Add a bias dimension
X_train_feats = np.hstack([X_train_feats, np.ones((X_train_feats.shape[0], 1))])
X_val_feats = np.hstack([X_val_feats, np.ones((X_val_feats.shape[0], 1))])
X_test_feats = np.hstack([X_test_feats, np.ones((X_test_feats.shape[0], 1))])
###Output
Done extracting features for 1000 / 49000 images
Done extracting features for 2000 / 49000 images
Done extracting features for 3000 / 49000 images
Done extracting features for 4000 / 49000 images
Done extracting features for 5000 / 49000 images
Done extracting features for 6000 / 49000 images
Done extracting features for 7000 / 49000 images
Done extracting features for 8000 / 49000 images
Done extracting features for 9000 / 49000 images
Done extracting features for 10000 / 49000 images
Done extracting features for 11000 / 49000 images
Done extracting features for 12000 / 49000 images
Done extracting features for 13000 / 49000 images
Done extracting features for 14000 / 49000 images
Done extracting features for 15000 / 49000 images
Done extracting features for 16000 / 49000 images
Done extracting features for 17000 / 49000 images
Done extracting features for 18000 / 49000 images
Done extracting features for 19000 / 49000 images
Done extracting features for 20000 / 49000 images
Done extracting features for 21000 / 49000 images
Done extracting features for 22000 / 49000 images
Done extracting features for 23000 / 49000 images
Done extracting features for 24000 / 49000 images
Done extracting features for 25000 / 49000 images
Done extracting features for 26000 / 49000 images
Done extracting features for 27000 / 49000 images
Done extracting features for 28000 / 49000 images
Done extracting features for 29000 / 49000 images
Done extracting features for 30000 / 49000 images
Done extracting features for 31000 / 49000 images
Done extracting features for 32000 / 49000 images
Done extracting features for 33000 / 49000 images
Done extracting features for 34000 / 49000 images
Done extracting features for 35000 / 49000 images
Done extracting features for 36000 / 49000 images
Done extracting features for 37000 / 49000 images
Done extracting features for 38000 / 49000 images
Done extracting features for 39000 / 49000 images
Done extracting features for 40000 / 49000 images
Done extracting features for 41000 / 49000 images
Done extracting features for 42000 / 49000 images
Done extracting features for 43000 / 49000 images
Done extracting features for 44000 / 49000 images
Done extracting features for 45000 / 49000 images
Done extracting features for 46000 / 49000 images
Done extracting features for 47000 / 49000 images
Done extracting features for 48000 / 49000 images
Done extracting features for 49000 / 49000 images
###Markdown
Train SVM on featuresUsing the multiclass SVM code developed earlier in the assignment, train SVMs on top of the features extracted above; this should achieve better results than training SVMs directly on top of raw pixels.
###Code
# Use the validation set to tune the learning rate and regularization strength
from cs231n.classifiers.linear_classifier import LinearSVM
learning_rates = [1e-9, 1e-8, 1e-7]
regularization_strengths = [5e4, 5e5, 5e6]
results = {}
best_val = -1
best_svm = None
################################################################################
# TODO: #
# Use the validation set to set the learning rate and regularization strength. #
# This should be identical to the validation that you did for the SVM; save #
# the best trained classifer in best_svm. You might also want to play #
# with different numbers of bins in the color histogram. If you are careful #
# you should be able to get accuracy of near 0.44 on the validation set. #
################################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
for lr in learning_rates:
for reg in regularization_strengths:
svm = LinearSVM()
svm.train(X_train_feats, y_train, learning_rate=lr, reg=reg, num_iters=800)
y_pred_train = svm.predict(X_train_feats)
y_pred_valid = svm.predict(X_val_feats)
accuracy_train = np.mean(y_pred_train == y_train)
accuracy_valid = np.mean(y_pred_valid == y_val)
results[(lr, reg)] = (accuracy_train, accuracy_valid)
if accuracy_valid > best_val:
best_val = accuracy_valid
best_svm = svm
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
# Print out results.
for lr, reg in sorted(results):
train_accuracy, val_accuracy = results[(lr, reg)]
print('lr %e reg %e train accuracy: %f val accuracy: %f' % (
lr, reg, train_accuracy, val_accuracy))
print('best validation accuracy achieved during cross-validation: %f' % best_val)
# Evaluate your trained SVM on the test set: you should be able to get at least 0.40
y_test_pred = best_svm.predict(X_test_feats)
test_accuracy = np.mean(y_test == y_test_pred)
print(test_accuracy)
# An important way to gain intuition about how an algorithm works is to
# visualize the mistakes that it makes. In this visualization, we show examples
# of images that are misclassified by our current system. The first column
# shows images that our system labeled as "plane" but whose true label is
# something other than "plane".
examples_per_class = 8
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
for cls, cls_name in enumerate(classes):
idxs = np.where((y_test != cls) & (y_test_pred == cls))[0]
idxs = np.random.choice(idxs, examples_per_class, replace=False)
for i, idx in enumerate(idxs):
plt.subplot(examples_per_class, len(classes), i * len(classes) + cls + 1)
plt.imshow(X_test[idx].astype('uint8'))
plt.axis('off')
if i == 0:
plt.title(cls_name)
plt.show()
###Output
_____no_output_____
###Markdown
Inline question 1:Describe the misclassification results that you see. Do they make sense?$\color{blue}{\textit Your Answer:}$ Neural Network on image featuresEarlier in this assigment we saw that training a two-layer neural network on raw pixels achieved better classification performance than linear classifiers on raw pixels. In this notebook we have seen that linear classifiers on image features outperform linear classifiers on raw pixels. For completeness, we should also try training a neural network on image features. This approach should outperform all previous approaches: you should easily be able to achieve over 55% classification accuracy on the test set; our best model achieves about 60% classification accuracy.
###Code
# Preprocessing: Remove the bias dimension
# Make sure to run this cell only ONCE
print(X_train_feats.shape)
X_train_feats = X_train_feats[:, :-1]
X_val_feats = X_val_feats[:, :-1]
X_test_feats = X_test_feats[:, :-1]
print(X_train_feats.shape)
from cs231n.classifiers.neural_net import TwoLayerNet
input_dim = X_train_feats.shape[1]
hidden_dim = 500
num_classes = 10
net = TwoLayerNet(input_dim, hidden_dim, num_classes)
best_net = None
################################################################################
# TODO: Train a two-layer neural network on image features. You may want to #
# cross-validate various parameters as in previous sections. Store your best #
# model in the best_net variable. #
################################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
learning_rates = [1e-2 ,1e-1, 5e-1, 1, 5]
regularization_strengths = [1e-3, 5e-3, 1e-2, 1e-1, 0.5, 1]
best_acc = 0
best_lr = None
best_reg = None
for lr in learning_rates:
for reg in regularization_strengths:
net = TwoLayerNet(input_dim, hidden_dim, num_classes)
stats = net.train(X_train_feats, y_train, X_val_feats, y_val,
num_iters=1000, batch_size=200,
learning_rate=lr, learning_rate_decay=0.95,
reg=reg, verbose=True)
val_acc = (net.predict(X_val_feats) == y_val).mean()
print("val_acc: ", val_acc, ", learning rate: ", lr, ", reg: ", reg)
if val_acc > best_acc:
best_net = net
best_acc = val_acc
best_lr = lr
best_reg = reg
print("best acc: ", best_acc, ", learning rate: ", best_lr, ", reg: ", best_reg)
print("best acc: ", best_acc, ", learning rate: ", best_lr, ", reg: ", best_reg)
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
# Run your best neural net classifier on the test set. You should be able
# to get more than 55% accuracy.
test_acc = (best_net.predict(X_test_feats) == y_test).mean()
print(test_acc)
###Output
0.556
|
Covid_19_dataset_3.ipynb | ###Markdown
Exponential Modeling of COVID-19 Confirmed CasesThis notebook explores modeling the spread of COVID-19 confirmed cases as an exponential function. While this is not a good model for long or even medium-term predictions, it is able to fit initial outbreaks quite well. Defining our parameters and loading the dataHere we are looking at the confirmed and fatal cases for Italy through March 17. To apply the model to other countries or dates, just change the code below.
###Code
ESTIMATE_DAYS = 3
data_key = 'IT'
date_limit = '2020-03-17'
###Output
_____no_output_____
###Markdown
Seaborn is a Python data visualization library based on matplotlib. It provides a high-level interface for drawing attractive and informative statistical graphics.(*Instead of matplotlib*)
###Code
import pandas as pd
import seaborn as sns
sns.set()
df = pd.read_csv(f'https://storage.googleapis.com/covid19-open-data/v2/{data_key}/main.csv').set_index('date')
###Output
_____no_output_____
###Markdown
Looking at the outbreakThere are months of data, but we only care about when the number of cases started to grow. We define *outbreak* as whenever the number of cases exceeded certain threshold. In this case, we are using 10.
###Code
def get_outbreak_mask(data: pd.DataFrame, threshold: int = 10):
''' Returns a mask for > N confirmed cases '''
return data['total_confirmed'] > threshold
cols = ['total_confirmed', 'total_deceased']
# Get data only for the columns we care about
df = df[cols]
# Get data only for the selected dates
df = df[df.index <= date_limit]
# Get data only after the outbreak begun
df = df[get_outbreak_mask(df)]
###Output
_____no_output_____
###Markdown
Plotting the dataLet's take a first look at the data. A visual inspection will typically give us a lot of information.
###Code
df.plot(kind='bar', figsize=(16, 8));
###Output
_____no_output_____
###Markdown
Modeling the dataThe data appears to follow an exponential curve, it looks straight out of a middle school math textbook cover. Let's see if we can model it using some parameter fitting.***SciPy*** is a Python library used for scientific computing and technical computing. *SciPy* contains modules for optimization, linear algebra, integration, interpolation, special functions.
###Code
from scipy import optimize
def exponential_function(x: float, a: float, b: float, c: float):
''' a * (b ^ x) + c '''
return a * (b ** x) + c
X, y = list(range(len(df))), df['total_confirmed'].tolist()
params, _ = optimize.curve_fit(exponential_function, X, y)
print('Estimated function: {0:.3f} * ({1:.3f} ^ X) + {2:.3f}'.format(*params))
confirmed = df[['total_confirmed']].rename(columns={'total_confirmed': 'Ground Truth'})
ax = confirmed.plot(kind='bar', figsize=(16, 8))
estimate = [exponential_function(x, *params) for x in X]
ax.plot(df.index, estimate, color='red', label='Estimate')
ax.legend();
###Output
_____no_output_____
###Markdown
Validating the modelThat curve looks like a very good fit! Even though proper epidemiology models are fundamentally different (because diseases can't grow exponentially indefinitely), the exponential model should be good for short term predictions.To validate our model, let's try to fit it again without looking at the last 3 days of data. Then, we can estimate the missing days using our model, and verify if the results still hold by comparing what the model thought was going to happen with the actual data.
###Code
params_validate, _ = optimize.curve_fit(exponential_function, X[:-ESTIMATE_DAYS], y[:-ESTIMATE_DAYS])
# Project zero for all values except for the last ESTIMATE_DAYS
projected = [0] * len(X[:-ESTIMATE_DAYS]) + [exponential_function(x, *params_validate) for x in X[-ESTIMATE_DAYS:]]
projected = pd.Series(projected, index=df.index, name='Projected')
confirmed = pd.DataFrame({'Ground Truth': df['total_confirmed'], 'Projected': projected})
ax = confirmed.plot(kind='bar', figsize=(16, 8))
estimate = [exponential_function(x, *params_validate) for x in X]
ax.plot(df.index, estimate, color='red', label='Estimate')
ax.legend();
###Output
_____no_output_____
###Markdown
Projecting future dataIt looks like our exponential model slightly overestimates the confirmed cases. That's a good sign! It means that the disease is slowing down a bit. The numbers are close enough that a 3-day projection is probably an accurate enough estimate.Now, let's use the model we fitted earlier which used all the data, and try to predict what the next 3 days will look like.
###Code
import datetime
# Append N new days to our indices
date_format = '%Y-%m-%d'
date_range = [datetime.datetime.strptime(date, date_format) for date in df.index]
for _ in range(ESTIMATE_DAYS): date_range.append(date_range[-1] + datetime.timedelta(days=1))
date_range = [datetime.datetime.strftime(date, date_format) for date in date_range]
# Perform projection with the previously estimated parameters
projected = [0] * len(X) + [exponential_function(x, *params) for x in range(len(X), len(X) + ESTIMATE_DAYS)]
projected = pd.Series(projected, index=date_range, name='Projected')
df_ = pd.DataFrame({'Confirmed': df['total_confirmed'], 'Projected': projected})
ax = df_.plot(kind='bar', figsize=(16, 8))
estimate = [exponential_function(x, *params) for x in range(len(date_range))]
ax.plot(date_range, estimate, color='red', label='Estimate')
ax.legend();
###Output
_____no_output_____ |
simulations/notebooks_sim_bin/4_sim_binary_hypothesis_testing.ipynb | ###Markdown
Hypothesis testing based on boostrap for selected simulation scenario
###Code
dir = '/panfs/panfs1.ucsd.edu/panscratch/lij014/Stability_2020/sim_data/'
load(paste0(dir, 'binary_update/boot_toe_RF_binary.RData'))
load(paste0(dir, 'binary_update/boot_block_RF_binary.RData'))
load(paste0(dir, 'binary_update/boot_toe_genCompLasso_binary.RData'))
load(paste0(dir, 'binary_update/boot_block_genCompLasso_binary.RData'))
###Output
_____no_output_____
###Markdown
hypothesis testing based on Stability
###Code
table = as.data.frame(cbind(toe_rf$stab_index, toe_genCompLasso$stab_index, block_rf$stab_index, block_genCompLasso$stab_index))
colnames(table) = c('toe_rf', 'toe_genCompLasso', 'block_rf', 'block_genCompLasso')
head(table)
mean(table$toe_genCompLasso)
mean(table$toe_rf)
diff_toe = (table$toe_genCompLasso - table$toe_rf)
mean(diff_toe)
quantile(diff_toe, probs = c(0.025, 0.975))
mean(table$block_genCompLasso)
mean(table$block_rf)
diff_block = (table$block_genCompLasso - table$block_rf)
mean(diff_block)
quantile(diff_block, probs = c(0.025, 0.975))
###Output
_____no_output_____
###Markdown
hypothesis testing based on ROC block 0.5, p = 1000, n = 100
###Code
i = 46
load(paste0(dir, '/binary_update/block_RF_binary_', i, '.RData'))
results_block_rf[c('n', 'p', 'rou')]
block_rf_rocs = results_block_rf$ROC.list
i = 46
load(paste0(dir, '/binary_update/block_GenCompLasso_binary_', i, '.RData'))
results_block_GenCompLasso[c('n', 'p', 'rou')]
block_GenCompLasso_rocs = results_block_GenCompLasso$ROC.list
diff_block = (block_GenCompLasso_rocs - block_rf_rocs)
mean(diff_block)
quantile(diff_block, probs = c(0.025, 0.975))
table(diff_block)
###Output
_____no_output_____
###Markdown
Toeplitz 0.5
###Code
i = 46
load(paste0(dir, '/binary_update/toe_RF_binary_', i, '.RData'))
results_toe_rf[c('n', 'p', 'rou')]
toe_rf_rocs = results_toe_rf$ROC.list
i = 46
load(paste0(dir, '/binary_update/toe_GenCompLasso_binary_', i, '.RData'))
results_toe_GenCompLasso[c('n', 'p', 'rou')]
toe_GenCompLasso_rocs = results_toe_GenCompLasso$ROC.list
diff_toe = (toe_GenCompLasso_rocs - toe_rf_rocs)
mean(diff_toe)
quantile(diff_toe, probs = c(0.025, 0.975))
table(diff_toe)
###Output
_____no_output_____ |
Artin Sinani _ Regression Sprint Challenge.ipynb | ###Markdown
_Lambda School Data Science_ Regression Sprint Challenge For this Sprint Challenge, you'll predict the price of used cars. The dataset is real-world. It was collected from advertisements of cars for sale in the Ukraine in 2016.The following import statements have been provided for you, and should be sufficient. But you may not need to use every import. And you are permitted to make additional imports.
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import sklearn
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
import statsmodels.api as sm
from statsmodels.stats.outliers_influence import variance_inflation_factor
###Output
_____no_output_____
###Markdown
[The dataset](https://raw.githubusercontent.com/ryanleeallred/datasets/master/car_regression.csv) contains 8,495 rows and 9 variables:- make: manufacturer brand- price: seller’s price in advertisement (in USD)- body: car body type- mileage: as mentioned in advertisement (‘000 Km)- engV: rounded engine volume (‘000 cubic cm)- engType: type of fuel- registration: whether car registered in Ukraine or not- year: year of production- drive: drive typeRun this cell to read the data:
###Code
df = pd.read_csv('https://raw.githubusercontent.com/ryanleeallred/datasets/master/car_regression.csv')
print(df.shape)
df.sample(10)
df.isna().sum()
df.describe()
###Output
_____no_output_____
###Markdown
Predictive Modeling with Linear Regression 1.1 Split the data into an X matrix and y vector (`price` is the target we want to predict).
###Code
features = ['drive','make', 'body', 'mileage', 'engV', 'engType', 'registration','year']
target = ['price']
X=df.copy()[features]
y=df.copy()[target]
###Output
_____no_output_____
###Markdown
1.2 Split the data into test and train sets, using `train_test_split`.You may use a train size of 80% and a test size of 20%.
###Code
Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, train_size=0.80, test_size=0.20, random_state=50)
Xtrain.shape, Xtest.shape, ytrain.shape, ytest.shape
###Output
_____no_output_____
###Markdown
1.3 Use scikit-learn to fit a multiple regression model, using your training data.Use `year` and one or more features of your choice. You will not be evaluated on which features you choose. You may choose to use all features.
###Code
lin_reg= LinearRegression()
lin_reg.fit(Xtrain, ytrain)
###Output
_____no_output_____
###Markdown
1.4 Report the Intercept and Coefficients for the fitted model.
###Code
beta_0 = lin_reg.intercept_
beta_i = lin_reg.coef_
print("Slope Coefficients: ", beta_i)
print("\nIntercept Value: ", beta_0)
###Output
Slope Coefficients: [[ 8586.72417861 -37.65517874 -1614.94662551 -43.64298967
309.15889188 -1140.34780676 4398.17638681 1148.18153219]]
Intercept Value: [-2284036.93230084]
###Markdown
1.5 Use the test data to make predictions.
###Code
# Make predictions for y
y_predictions = lin_reg.predict(X_test)
###Output
_____no_output_____
###Markdown
1.6 Use the test data to get both the Root Mean Square Error and $R^2$ for the model. You will not be evaluated on how high or low your scores are.
###Code
rmse = (np.sqrt(mean_squared_error(y_test, y_pred)))
r2 = r2_score(y_test, y_pred)
print('Root Mean Square Error: ' , rmse)
print('R Squared: ' , r2)
###Output
Root Mean Square Error: 21140.393258484244
R Squared: 0.281987425279152
###Markdown
1.7 How should we interpret the coefficient corresponding to the `year` feature?One sentence can be sufficientThe coefficient corresponding to the 'year' indicates theslope of the 'year' feature from one point to the next on a regression line. In y = mx +b form it is the m. 1.8 How should we interpret the Root Mean Square Error?One sentence can be sufficientThe RMSE is a measure of how well our model performed. It does this by measuring the difference between predicted values and the actual value, while our RMSE is approximately 21140 and its relatively high, indicating poor prediction. 1.9 How should we interpret the $R^2$?One sentence can be sufficientThe $R^2$ is relatively low at 0.28, indicating that the regression model is only 28% accurate based on the features or independent variables provided in our model. Thus it is a low accuracy regression model. Log-Linear and Polynomial Regression 2.1 Engineer a new variable by taking the log of the price varible.
###Code
df['ln_price'] = np.log(df['price'])
plt.hist(df['ln_price'], bins=30)
###Output
_____no_output_____
###Markdown
2.2 Visualize scatterplots of the relationship between each feature versus the log of price, to look for non-linearly distributed features.You may use any plotting tools and techniques.
###Code
df.columns
features = ['make','body', 'mileage', 'engV', 'engType', 'registration',
'year', 'drive']
sns.lmplot('make', 'ln_price', data=df, scatter_kws=dict(alpha=0.3))
plt.show()
sns.lmplot('body', 'ln_price', data=df, scatter_kws=dict(alpha=0.3))
plt.show()
sns.lmplot('mileage', 'ln_price', data=df, scatter_kws=dict(alpha=0.3))
plt.show()
sns.lmplot('engV', 'ln_price', data=df, scatter_kws=dict(alpha=0.3))
plt.show()
sns.lmplot('engType', 'ln_price', data=df, scatter_kws=dict(alpha=0.3))
plt.show()
sns.lmplot('registration', 'ln_price', data=df, scatter_kws=dict(alpha=0.3))
plt.show()
sns.lmplot('year', 'ln_price', data=df, scatter_kws=dict(alpha=0.3))
plt.show()
sns.lmplot('drive', 'ln_price', data=df, scatter_kws=dict(alpha=0.3))
plt.show()
###Output
_____no_output_____
###Markdown
2.3 Create polynomial feature(s)You will not be evaluated on which feature(s) you choose. But try to choose appropriate features.
###Code
df['engV_sq'] = df['engV']**2
df['mileage_sq'] = df['mileage']**2
###Output
_____no_output_____
###Markdown
2.4 Use the new log-transformed y variable and your x variables (including any new polynomial features) to fit a new linear regression model. Then report the: intercept, coefficients, RMSE, and $R^2$.
###Code
# Polynomial Features
polynomial_feat = ['engV_sq','mileage_sq']
# Set feature and target
target = df['ln_price']
feature = df[polynomial_feat]
#Train/Test Split
xtrain, xtest, ytrain, ytest = train_test_split(feature, target, test_size = .2)
#Set/Fit Model
linreg = LinearRegression()
linreg.fit(feature, target)
# Intercept/Coefficient
beta_0 = linreg.intercept_
beta_1 = linreg.coef_
print("Slope Coefficients: ", beta_1)
print("Intercept Value: ", beta_0)
# RMSE
print('Root Mean Squared Error', np.sqrt(mean_squared_error(target, linreg.predict(feature))))
# R^2
print('R-Squared Value', linreg.score(feature, target))
###Output
Slope Coefficients: [-7.93188169e-09 -2.18532836e-12]
Intercept Value: 9.19573250433777
Root Mean Squared Error 0.9593264209467586
R-Squared Value 0.005077839984796051
###Markdown
2.5 How do we interpret coefficients in Log-Linear Regression (differently than Ordinary Least Squares Regression)?One sentence can be sufficient The coefficients indicate that one unit increase in X will produce an expected increase in log Y (target) units Decision Trees 3.1 Use scikit-learn to fit a decision tree regression model, using your training data.Use one or more features of your choice. You will not be evaluated on which features you choose. You may choose to use all features.You may use the log-transformed target or the original un-transformed target. You will not be evaluated on which you choose.
###Code
# Train/Test Split
X = df.drop(['price','ln_price'], axis=1)
y = df['ln_price']
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.80, test_size=0.20, random_state=42)
# Make Tree Model and Fit
tree = DecisionTreeRegressor(max_depth=10)
tree.fit(X_train,y_train)
###Output
_____no_output_____
###Markdown
3.2 Use the test data to get the $R^2$ for the model. You will not be evaluated on how high or low your scores are.
###Code
print('R^2 Score: ', tree.score(X_test, y_test))
###Output
R^2 Score: 0.8839889650792476
###Markdown
Regression Diagnostics 4.1 Use statsmodels to run a log-linear or log-polynomial linear regression with robust standard errors.
###Code
# OLS Model Summary
model = sm.OLS(y, sm.add_constant(X))
results = model.fit(cov_type='HC3')
print(results.summary())
###Output
OLS Regression Results
==============================================================================
Dep. Variable: ln_price R-squared: 0.657
Model: OLS Adj. R-squared: 0.657
Method: Least Squares F-statistic: 1442.
Date: Fri, 03 May 2019 Prob (F-statistic): 0.00
Time: 10:22:59 Log-Likelihood: -7173.5
No. Observations: 8495 AIC: 1.437e+04
Df Residuals: 8484 BIC: 1.445e+04
Df Model: 10
Covariance Type: HC3
================================================================================
coef std err z P>|z| [0.025 0.975]
--------------------------------------------------------------------------------
const -186.6376 3.483 -53.582 0.000 -193.465 -179.811
make -0.0015 0.000 -5.330 0.000 -0.002 -0.001
body -0.1004 0.004 -24.708 0.000 -0.108 -0.092
mileage 9.892e-07 3.32e-07 2.976 0.003 3.38e-07 1.64e-06
engV 0.0005 0.000 2.844 0.004 0.000 0.001
engType -0.0525 0.005 -10.907 0.000 -0.062 -0.043
registration 0.7382 0.020 36.972 0.000 0.699 0.777
year 0.0973 0.002 56.138 0.000 0.094 0.101
drive 0.3932 0.010 38.791 0.000 0.373 0.413
engV_sq -5.684e-08 1.91e-08 -2.972 0.003 -9.43e-08 -1.94e-08
mileage_sq -1.152e-12 6.22e-13 -1.851 0.064 -2.37e-12 6.77e-14
==============================================================================
Omnibus: 471.374 Durbin-Watson: 1.910
Prob(Omnibus): 0.000 Jarque-Bera (JB): 1775.213
Skew: 0.139 Prob(JB): 0.00
Kurtosis: 5.222 Cond. No. 8.95e+12
==============================================================================
Warnings:
[1] Standard Errors are heteroscedasticity robust (HC3)
[2] The condition number is large, 8.95e+12. This might indicate that there are
strong multicollinearity or other numerical problems.
###Markdown
4.2 Calculate the Variance Inflation Factor (VIF) of our X variables. Do we have multicollinearity problems?One sentence can be sufficient
###Code
# VIF
X = sm.add_constant(X)
vif = [variance_inflation_factor(X.values, i) for i in range(len(X.columns))]
pd.Series(vif, X.columns)
# There is a big multicollinearity problem here. We will ignore engV, engV_sq.
###Output
_____no_output_____ |
Week4/SGNS.ipynb | ###Markdown
**Neural Word Embedding**> **Word2Vec, Continuous Bag of Word (CBOW)**> **Word2Vec, Skip-gram with negative sampling (SGNS)**> **Main key point: Distributional Hypothesis**> Goal: Predict the context words from a given word **How to implement SGNS Algorithm:**1. Data preprocessing2. Hyperparameters3. Training Data4. Model Fitting5. Inference/Prediction the testing samples **Main Class**
###Code
from collections import defaultdict
import numpy as np
class word2vec():
def __init__(self):
self.n = hyperparameters['n']
self.learningrate = hyperparameters['learning_rate']
self.epochs = hyperparameters['epochs']
self.windowsize = hyperparameters['window_size']
def word2onehot(self, word):
word_vector = np.zeros(self.vocabulary_count)
word_index = self.word_index[word]
word_vector[word_index] = 1
return word_vector
def generate_training_data(self, setting, corpus):
word_counts = defaultdict(int)
# print(word_counts)
for row in corpus:
for token in row:
word_counts[token] +=1
#print(word_counts)
self.vocabulary_count = len(word_counts.keys())
#print(self.vocabulary_count)
self.words_list = list(word_counts.keys())
#print(self.words_list)
self.word_index = dict((word, i) for i, word in enumerate(self.words_list))
#print(self.word_index)
self.index_word = dict((i, word) for i, word in enumerate(self.words_list))
#print(self.index_word)
training_data = []
for sentence in corpus:
sentence_length = len(sentence)
for i , word in enumerate(sentence):
word_target = self.word2onehot(sentence[i])
#print(word_target)
word_context = []
for j in range(i - self.windowsize, i + self.windowsize + 1):
if j !=i and j <= sentence_length - 1 and j >= 0:
word_context.append(self.word2onehot(sentence[j]))
# print(word_context)
training_data.append([word_target, word_context])
return np.array(training_data)
def model_training(self, training_data):
self.w1 = np.random.uniform(-1, 1, (self.vocabulary_count, self.n))
self.w2 = np.random.uniform(-1, 1, (self.n, self.vocabulary_count))
for i in range(0, self.epochs):
# self.loss = 0
for word_target, word_context in training_data:
h, u, y_pred= self.forward_pass(word_target)
# print(y_pred)
def forward_pass(self, x):
h = np.dot(self.w1.T, x)
u = np.dot(self.w2.T, h)
y_pred= self.softmax(u)
return h, u, y_pred
def softmax(self, x):
e = np.exp(x - np.max(x))
return e / e.sum(axis=0)
def word_vector(self, word):
word_index = self.word_index[word]
word_vector = self.w1[word_index]
return word_vector
def similar_vectors(self, word, n):
vw1 = self.word_vector(word)
word_similar={}
for i in range(self.vocabulary_count):
vw2 = self.w1[i]
theta_nom= np.dot(vw1, vw2)
theta_denom = np.linalg.norm(vw1) * np.linalg.norm(vw2)
theta = theta_nom / theta_denom
# print(theta)
word = self.index_word[i]
word_similar[word] = theta
# {k: v for k, v in sorted(x.items(), key=lambda item: item[1])}
words_sorted = sorted(word_similar.items(), key=lambda ss: ss[1], reverse=True)
for word, similar in words_sorted[:n]:
print(word, similar)
###Output
_____no_output_____
###Markdown
**1.Data PreProcessing**
###Code
# Define the mini corpus
document = "A combination of Machine Learning and Natural Language Processing works well"
# Tokenizing and build a vocabulary
corpus = [[]]
for token in document.split():
corpus[0].append(token.lower())
print(corpus)
###Output
[['a', 'combination', 'of', 'machine', 'learning', 'and', 'natural', 'language', 'processing', 'works', 'well']]
###Markdown
**2. Hyperparameters**
###Code
hyperparameters = {
'window_size': 2, #it covers two words left and two words right
'n': 11, # dimension of word embedding
'epochs': 40, # number of training epochs
'learning_rate': 0.01, # a coefficient for updating weights
}
###Output
_____no_output_____
###Markdown
**3. Generate Training Data**
###Code
# we need to create one-hot vector based on our given corpus
# 1 [target(a)], [context(combination, of)] == [10000000000],[01000000000][00100000000]
# instance
w2v = word2vec()
training_data = w2v.generate_training_data(hyperparameters, corpus)
# print(training_data)
###Output
_____no_output_____
###Markdown
**4. Model Training**
###Code
w2v.model_training(training_data)
###Output
[0.08779638 0.07481873 0.02720081 0.07741055 0.00744272 0.16597957
0.02244375 0.03065301 0.24696449 0.05698785 0.20230215]
[0.05535867 0.00701134 0.03824704 0.10045965 0.56325283 0.01725363
0.0265237 0.05966504 0.09830109 0.0269263 0.00700071]
[0.04181769 0.01066172 0.11196532 0.21611837 0.09291857 0.06353542
0.12186928 0.09201719 0.00719568 0.10265939 0.13924137]
[0.01979228 0.67167761 0.0380796 0.00334896 0.01721451 0.03192899
0.10938238 0.05316565 0.02946898 0.01481168 0.01112936]
[0.08594459 0.01955307 0.03806679 0.20510115 0.00741567 0.1290254
0.00654433 0.01746104 0.087872 0.22842949 0.17458647]
[0.09563497 0.0609889 0.12708249 0.11587498 0.02070406 0.07517313
0.07438113 0.10863157 0.08416487 0.03121457 0.20614931]
[0.05032016 0.23525726 0.16200512 0.01933368 0.09044005 0.02026146
0.06624078 0.18744993 0.0542594 0.08477761 0.02965456]
[0.09318229 0.04413759 0.24420036 0.10517933 0.12382943 0.06460056
0.0371188 0.0105303 0.0077964 0.15646752 0.11295743]
[0.0451643 0.10487824 0.08784491 0.03077638 0.04817766 0.0241796
0.07871515 0.36046298 0.03539558 0.05103012 0.13337506]
[0.03536469 0.38921382 0.07153202 0.01173604 0.02046491 0.14331057
0.04427569 0.01477941 0.02839212 0.10029694 0.14063378]
[0.03581558 0.11027609 0.07132603 0.0326665 0.0393842 0.05157923
0.29849825 0.26142471 0.03815447 0.02041243 0.0404625 ]
[0.08779638 0.07481873 0.02720081 0.07741055 0.00744272 0.16597957
0.02244375 0.03065301 0.24696449 0.05698785 0.20230215]
[0.05535867 0.00701134 0.03824704 0.10045965 0.56325283 0.01725363
0.0265237 0.05966504 0.09830109 0.0269263 0.00700071]
[0.04181769 0.01066172 0.11196532 0.21611837 0.09291857 0.06353542
0.12186928 0.09201719 0.00719568 0.10265939 0.13924137]
[0.01979228 0.67167761 0.0380796 0.00334896 0.01721451 0.03192899
0.10938238 0.05316565 0.02946898 0.01481168 0.01112936]
[0.08594459 0.01955307 0.03806679 0.20510115 0.00741567 0.1290254
0.00654433 0.01746104 0.087872 0.22842949 0.17458647]
[0.09563497 0.0609889 0.12708249 0.11587498 0.02070406 0.07517313
0.07438113 0.10863157 0.08416487 0.03121457 0.20614931]
[0.05032016 0.23525726 0.16200512 0.01933368 0.09044005 0.02026146
0.06624078 0.18744993 0.0542594 0.08477761 0.02965456]
[0.09318229 0.04413759 0.24420036 0.10517933 0.12382943 0.06460056
0.0371188 0.0105303 0.0077964 0.15646752 0.11295743]
[0.0451643 0.10487824 0.08784491 0.03077638 0.04817766 0.0241796
0.07871515 0.36046298 0.03539558 0.05103012 0.13337506]
[0.03536469 0.38921382 0.07153202 0.01173604 0.02046491 0.14331057
0.04427569 0.01477941 0.02839212 0.10029694 0.14063378]
[0.03581558 0.11027609 0.07132603 0.0326665 0.0393842 0.05157923
0.29849825 0.26142471 0.03815447 0.02041243 0.0404625 ]
###Markdown
**5. Model Prediction**
###Code
vector = w2v.word_vector("works")
print(vector)
###Output
[-0.5965974 0.59358364 0.49175356 0.59782454 -0.10149338 0.5909372
-0.4941789 0.73069452 -0.13549471 -0.7486393 0.16786503]
###Markdown
**Finding Similar Words**
###Code
w2v.similar_vectors("works", 5)
###Output
works 1.0
language 0.34217254302544925
machine 0.20539544566784484
natural 0.16382679527923805
a 0.13091314242232238
|
docs/estimation_tutorial.ipynb | ###Markdown
Estimation TutorialIn this section, we dive into the topic of model estimation using **pydsge**. Note that for this tutorial we will assume a folder set-up of the form```analysis/├── README.md├── src/ │ ├── estimation.py or .ipynb │ └── model.yaml ├── data/│ └── example_data└── output/```This is because the estimation creates (intermediate) output results, which we will want to store.
###Code
# Just for the tutorial: Setting up example structure
import tempfile
import os
import shutil # For clean-up of temporary directory
from pathlib import Path # For Windows/Unix compatibility
# Temporary output folder
output_path = Path(tempfile.gettempdir(), 'output')
os.makedirs(output_path)
###Output
_____no_output_____
###Markdown
Parsing and loading the modelLet us first load the relevant packages. Besides the DSGE class we already know from [*getting started*](https://pydsge.readthedocs.io/en/latest/getting_started.html), we also want to import the `emcee` package. This will allow us to later specify the desired updating algorithms for sampling from the posterior distribution - we explain this in more detail below.
###Code
import pandas as pd
import numpy as np
import emcee # For specifying updating moves
from pydsge import DSGE, example
###Output
_____no_output_____
###Markdown
In this tutorial, we continue to use the example provided in `pydsge`. Like before, we specify the file paths of the model and the data. Please feel free to check-out both files, but from the previous tutorial you might remember that we're dealing with a five equations New Keynesian model and US quarterly data from 1995 to 2018.
###Code
yaml_file, data_file = example
###Output
_____no_output_____
###Markdown
We again parse the model and load-in the data. What is important is that we also specify a location where the (intermediate) output is stored. Here we assign the output folder, as discussed at the beginning. Note also that we can name the model and write a short description, which is very useful when working with several models.
###Code
# Parse the model
mod = DSGE.read(yaml_file)
# Give it a name
mod.name = 'Rank_tutorial'
mod.description = 'RANK, estimation tutorial'
# Storage location for output
mod.path = output_path
# Load data
df = pd.read_csv(data_file, parse_dates=['date'], index_col=['date'])
df.index.freq = 'Q' # let pandas know that this is quartely data
###Output
_____no_output_____
###Markdown
Remember that since the Great Recession, the Federal Funds Rate has been below the ZLB. That is why, like in [*getting started*](https://pydsge.readthedocs.io/en/latest/getting_started.html), we adjust the observed interest rate, so that the data is within reach of our model.
###Code
# adjust elb
zlb = mod.get_par('elb_level')
rate = df['FFR']
df['FFR'] = np.maximum(rate,zlb)
mod.load_data(df, start='1998Q1')
###Output
_____no_output_____
###Markdown
Preparing the estimation After importing the packages and loading the data, we still need to tell pydsge how to carry out the estimation of our model. The "prep_estim" method can be used to accomplish this. It can be called without any arguments and sets-up a non-linear model by default. However, to showcase some of this functionality, we decide to specify several arguments here.To perform the estimation, `pydsge` uses a Transposed-Ensemble Kalman Filter (TEnKF). For general information on its implementation, see the [EconSieve documentation](https://econsieve.readthedocs.io/en/latest/) , and for more details on running the filter in `pydsge` check-out the [*getting started tutorial*](https://pydsge.readthedocs.io/en/latest/getting_started.html). Again, the default filter is non-linear, but we can opt for a linear one by setting the argument `Linear` to `True`. To choose a custom number of ensemble members for the TEnKF, set `N` to a particular number (default is 300). We can also set a specific `seed`, the default seed is `0`. To get additional information on the estimation process, we can set `verbose` to `True`. Conveniently, this information includes an overview of the parameters’ distribution, their means and standard deviations. Moreover, if we already specified the covariance matrix of the measurement errors or want to reuse a previous result, we can load it into the `prep_estim` method by setting `Load.R` to `True`. Finally, we can turn parallelization on or off with the debug argument, which can be helpful in case any issues should arise.
###Code
mod.prep_estim(N=350, seed=0, verbose=True)
###Output
_____no_output_____
###Markdown
After finishing our set-up, the only thing left to prepare is to filter our observed FFR for hidden states. We can simply identify the variable through `index` and, given the present context, set the measurement error to a very small value. **@Gregor, check again**
###Code
mod.filter.R = mod.create_obs_cov(1e-1)
ind = mod.observables.index('FFR')
mod.filter.R[ind,ind] /= 1e1
###Output
_____no_output_____
###Markdown
Running the estimation Now that the we have all the variables and defined the type of estimation to perform, we can turn to estimating the model. To be able to deal with very high-dimensional models, `pdygse` uses *Markov Chain Monte Carlo* (MCMC) Integration to sample from the posterior distribution. For further information on MCMC, please refer to the `emcee` [website](https://emcee.readthedocs.io/en/stable/) and the additional resources provided there. We recommend running a **Tempered Ensemble MCMC** first, by using the `tmcmc` method. Doing this is particularly valuable for high-dimensional problems, since defining the initial states of the walkers in the parameterspace in this way is a powerful tool to improve sampling. However, due to its computational efficiency, we also use it for small models such as the one we are dealing with here. For our ensemble sampling, we can specify a variety of options. Note, `tmcmc` always requires the specification of the first four arguments, which are the i) number of steps, ii) number of walks, iii) number of temperatures, and iv) a temperature target! Here we do not want to set a target and, in turn, set `fmax = None`. Moreover, we have the option to set different "moves", i.e. coordinate updating algorithms for the walkers. As a wrapper for a lot of `emcee` functionality, `tmcmc` can work with many different "moves" - for a list and implementation details please consult the `emcee` documentation. For using them here, specify them as a list of tuples, containing the type of move and its "weight". If no move is specified, "StretchMove" is used. For seed setting of the log probability, the user can choose between three options, here we use the seed specified in `prep_estim`**@Gregor, check again** . Finally, the states are saved in the `p0` object as a numpy array in order to later pass them to our main sampling process.
###Code
fmax = None
moves = [(emcee.moves.DEMove(), 0.8),
(emcee.moves.DESnookerMove(), 0.2),]
p0 = mod.tmcmc(200, 200, 0, fmax, moves=moves, update_freq=100, lprob_seed='set')
mod.save()
###Output
_____no_output_____
###Markdown
As we can see, the output provides us with various important details. In particular, we learn that `mod.save()` saved the meta data of our model in the directory which we specified earlier in `mod.path`. This information is stored as an `.npz` file so that it is avialable even in the event of a crash and can be loaded anytime using `numpy.load()`. We now use the initial states derived above to conduct our full Bayesian estimation. Still, initial states do not have to be specified and, unless `mcmc` can identify previous runs or estimations, the initial values of the "prior" section in the `*.yaml` are used. The default number of sampling steps is 3000, so it makes sense to allow this to run in parallel. Again, if you want to avoid this, simply set `debug` to `True`. With `tune` we can determine the size of the Markov Chain we wish to retain**@Gregor, check again** . It is important to not confuse this with the updating frequency, which only affects the number of summary statements `pydsge`reports during the estimation. Note that, like in the `tmcmc`, we choose to continue using the seed specified earlier. Lastly, the option `append` lets us store all intermediate results**@Gregor, check again** . We pickle and store the meta information of this object in the path specified earlier.
###Code
mod.mcmc(p0,
moves=moves,
nsteps=3000,
tune=500,
update_freq=500,
lprob_seed='set',
append=True,
debug=True,
)
mod.save()
###Output
_____no_output_____
###Markdown
But, so where are our estimates? Remember that, so far, we have only drawn samples from our posterior distribution. Our converged (burnt-in) MCMC samples are currently stored in the `rank_test_sampler.h5` file created by `mcmc`. To get our parameter estimates, we now still need to draw a sample form the MCMC object.
###Code
pars = mod.get_par('posterior', nsamples=250, full=True)
###Output
_____no_output_____
###Markdown
Now, let's have a look at the estimated shocks. We can do this by using `extract()` which gives us the smoothed shocks. This method takes a variety of arguments, all of which have sensible default values. For example, here we specify the number of parameter draws in each verification sample to 1. **@Gregor, check again** Note that this method also takes an optional seed argument, but we here continue to use the default seed 0. It is important to emphasise that `pysdge` seeks to separate the model's set-up (meta) data and its results. To store the Markov Chains, shocks and parameter estimates we use the `save_rdict()` method, below.
###Code
epsdf = mod.extract(pars, nsamples=1)
mod.save_rdict(epsdf)
###Output
_____no_output_____
###Markdown
Finally, we take a closer look at the MCMC estimation results. In particular, `mcmc_summary()` summaries the convergence behaviour of our draws from the posterior distribution.
###Code
mod.mcmc_summary()
###Output
_____no_output_____
###Markdown
---
###Code
# Just for the tutorial: Cleaning the temporary directory
shutil.rmtree(output_path)
###Output
_____no_output_____
###Markdown
Estimation TutorialIn this section, we dive into the topic of model estimation using **pydsge**. Let us, just for the sake of this tutorial, set up a temporary directory structure:
###Code
# Just for the tutorial: Setting up example structure
import tempfile
import os
import shutil # For clean-up of temporary directory
from pathlib import Path # For Windows/Unix compatibility
# Temporary output folder
output_path = Path(tempfile.gettempdir(), 'output')
if not os.path.isdir(output_path):
os.makedirs(output_path)
###Output
_____no_output_____
###Markdown
Parsing and loading the modelLet us first load the relevant packages. Besides the DSGE class we already know from [*getting started*](https://pydsge.readthedocs.io/en/latest/getting_started.html), we also want to import the `emcee` package. This will allow us to later specify the desired updating algorithms for sampling from the posterior distribution - we explain this in more detail below.
###Code
import pandas as pd
import numpy as np
import emcee # For specifying updating moves
from pydsge import DSGE, example
###Output
_____no_output_____
###Markdown
In this tutorial, we continue to use the example provided in `pydsge`. Like before, we specify the file paths of the model and the data. Please feel free to check-out both files, but from the previous tutorial you might remember that we're dealing with a five equations New Keynesian model and US quarterly data from 1995 to 2018.
###Code
yaml_file, data_file = example
###Output
_____no_output_____
###Markdown
We again parse the model and load-in the data. What is important is that we also specify a location where the (intermediate) output is stored. Here we assign the output folder, as discussed at the beginning. Note also that we can name the model and write a short description, which is very useful when working with several models.
###Code
# Parse the model
mod = DSGE.read(yaml_file)
# Give it a name
mod.name = 'Rank_tutorial'
mod.description = 'RANK, estimation tutorial'
# Storage location for output
mod.path = output_path
# Load data
df = pd.read_csv(data_file, parse_dates=['date'], index_col=['date'])
df.index.freq = 'Q' # let pandas know that this is quartely data
###Output
_____no_output_____
###Markdown
Remember that since the Great Recession, the Federal Funds Rate has been below the ZLB. That is why, like in [*getting started*](https://pydsge.readthedocs.io/en/latest/getting_started.html), we adjust the observed interest rate, so that the data is "within reach" of our model.
###Code
# adjust elb
zlb = mod.get_par('elb_level')
rate = df['FFR']
df['FFR'] = np.maximum(rate,zlb)
mod.load_data(df, start='1998Q1')
###Output
_____no_output_____
###Markdown
Preparing the estimation After importing the packages and loading the data, we still need to tell pydsge how to carry out the estimation of our model. The "prep_estim" method can be used to accomplish this. It can be called without any arguments and sets-up a non-linear model by default. However, not all defaults are always a good good choice, and to showcase some of this functionality, we decide to specify several arguments here.To perform the estimation, `pydsge` uses a Transposed-Ensemble Kalman Filter (TEnKF). For general information on its implementation, see the [EconSieve documentation](https://econsieve.readthedocs.io/en/latest/) , and for more details on running the filter in `pydsge` check-out the [*getting started tutorial*](https://pydsge.readthedocs.io/en/latest/getting_started.html). Again, the default filter is non-linear, but we can opt for a linear one by setting the argument `linear` to `True`. To choose a custom number of ensemble members for the TEnKF, set `N` to a particular number (default is 300, for e.g. a medium scale model 400-500 is a good choice). We can also set a specific random seed with the argument `seed` (the default seed is `0`). To get additional information on the estimation process, we can set `verbose` to `True`. Conveniently, this information includes an overview of the parameters’ distribution, their means and standard deviations. Finally, if we already specified the covariance matrix of the measurement errors or want to reuse a previous result, we can load it into the `prep_estim` method by setting `Load.R` to `True`. If you run into problems you can turn parallelization off by setting `debug=True`.
###Code
mod.prep_estim(N=350, seed=0, verbose=True)
###Output
[estimation:] Model operational. 12 states, 3 observables, 3 shocks, 81 data points.
Adding parameters to the prior distribution...
- theta as beta (0.5, 0.1). Init @ 0.7813, with bounds (0.2, 0.95)
- sigma as normal (1.5, 0.375). Init @ 1.2312, with bounds (0.25, 3)
- phi_pi as normal (1.5, 0.25). Init @ 1.7985, with bounds (1.0, 3)
- phi_y as normal (0.125, 0.05). Init @ 0.0893, with bounds (0.001, 0.5)
- rho_u as beta (0.5, 0.2). Init @ 0.7, with bounds (0.01, 0.9999)
- rho_r as beta (0.5, 0.2). Init @ 0.7, with bounds (0.01, 0.9999)
- rho_z as beta (0.5, 0.2). Init @ 0.7, with bounds (0.01, 0.9999)
- rho as beta (0.75, 0.1). Init @ 0.8, with bounds (0.5, 0.975)
- sig_u as inv_gamma_dynare (0.1, 2). Init @ 0.5, with bounds (0.025, 5)
- sig_r as inv_gamma_dynare (0.1, 2). Init @ 0.5, with bounds (0.01, 3)
- sig_z as inv_gamma_dynare (0.1, 2). Init @ 0.5, with bounds (0.01, 3)
[estimation:] 11 priors detected. Adding parameters to the prior distribution.
###Markdown
As in the filtering tutorial, we set the covariance of measurement errors to correspond to the variances of the data. Additionally, we adjust the measurement errors of the Federal Funds rate since it is perfectly observable.
###Code
mod.filter.R = mod.create_obs_cov(1e-1)
ind = mod.observables.index('FFR')
mod.filter.R[ind,ind] /= 1e1
###Output
_____no_output_____
###Markdown
Running the estimation Lets turn to the actual estimation. For a variety of pretty good reasons, `pdygse` uses *Ensemble Markov Chain Monte Carlo* (Ensemble-MCMC) integration to sample from the posterior distribution. For further information on Ensemble-MCMC, please refer to the `emcee` [website](https://emcee.readthedocs.io/en/stable/) and the additional resources provided there. We first require an initial ensemble, which is provided by `tmcmc`. `tmcmc` is a very sophisticated function with many options, but right now, all we are interested in is to obtain a sample that represents the prior distribution:
###Code
p0 = mod.prior_sampler(50, verbose=True) # rule of thumb: number_of_parameters times 4
###Output
100%|██████████| 50/50 [00:01<00:00, 46.38it/s]
###Markdown
The parameter draws are saved in the object `p0` as a numpy array in order to later pass them to our main sampling process.
###Code
mod.save()
###Output
[save_meta:] Metadata saved as '/tmp/output/Rank_tutorial_meta'
###Markdown
`mod.save()` saved the meta data of our model in the directory which we specified earlier in `mod.path`. This information is stored as an `.npz` file so that it is avialable even in the event of a crash and can be loaded anytime using `numpy.load()`. For posterior sampling using `mcmc` we have the option to set different "moves", i.e. coordinate updating algorithms for the walkers. As a wrapper for a lot of `emcee` functionality, `mcmc` can work with many different "moves" - for a list and implementation details please consult the `emcee` documentation. For using them here, specify them as a list of tuples, containing the type of move and its "weight". If no move is specified, `StretchMove` is used.
###Code
moves = [(emcee.moves.DEMove(), 0.8),
(emcee.moves.DESnookerMove(), 0.2),]
###Output
_____no_output_____
###Markdown
We now use the initial states derived above to conduct our full Bayesian estimation using `mcmc`. Note that, instead of using the specified initial ensemble, `mcmc` can identify previous runs or estimations, or the initial values of the "prior" section in the `*.yaml` can be used. The default number of sampling steps is 3000, which is parallelized by default. With `tune` we can determine the size of the Markov Chain we wish to retain to represent the posterior, i.e. after burn-in. This is not to be confused this with the updating frequency, which only affects the number of summary statements `pydsge`reports during the estimation. With the option `lprob_seed` the user can choose how to set the random seed of the likelihood evaluation - here we use the seed specified in `prep_estim`.
###Code
mod.mcmc(p0,
moves=moves,
nsteps=3000,
tune=500,
update_freq=500,
) # this may take some time. Better run on a machine with MANY cores...
mod.save() # be sure to save the internal state!
###Output
_____no_output_____
###Markdown
Great. So where are our estimates? Our (hopefully) converged MCMC samples are currently stored in the `rank_test_sampler.h5` file created by `mcmc`. You can load and use this data using the methods introduced in the [*processing estimation results tutorial*](https://pydsge.readthedocs.io/en/latest/getting_started.html).
###Code
# Just for the tutorial: Cleaning the temporary directory
shutil.rmtree(output_path)
###Output
_____no_output_____
###Markdown
Estimation TutorialIn this section, we dive into the topic of model estimation using **pydsge**. Note that for this tutorial we will assume a folder set-up of the form```analysis/├── README.md├── src/ │ ├── estimation.py or .ipynb │ └── model.yaml ├── data/│ └── example_data└── output/```This is because the estimation creates (intermediate) output results, which we will want to store.
###Code
# Just for the tutorial: Setting up example structure
import tempfile
import os
import shutil # For clean-up of temporary directory
from pathlib import Path # For Windows/Unix compatibility
# Temporary output folder
output_path = Path(tempfile.gettempdir(), 'output')
os.makedirs(output_path)
###Output
_____no_output_____
###Markdown
Parsing and loading the modelLet us first load the relevant packages. Besides the DSGE class we already know from [*getting started*](https://pydsge.readthedocs.io/en/latest/getting_started.html), we also want to import the `emcee` package. This will allow us to later specify the desired updating algorithms for sampling from the posterior distribution - we explain this in more detail below.
###Code
import pandas as pd
import numpy as np
import emcee # For specifying updating moves
from pydsge import DSGE, example
###Output
_____no_output_____
###Markdown
In this tutorial, we continue to use the example provided in `pydsge`. Like before, we specify the file paths of the model and the data. Please feel free to check-out both files, but from the previous tutorial you might remember that we're dealing with a five equations New Keynesian model and US quarterly data from 1995 to 2018.
###Code
yaml_file, data_file = example
###Output
_____no_output_____
###Markdown
We again parse the model and load-in the data. What is important is that we also specify a location where the (intermediate) output is stored. Here we assign the output folder, as discussed at the beginning. Note also that we can name the model and write a short description, which is very useful when working with several models.
###Code
# Parse the model
mod = DSGE.read(yaml_file)
# Give it a name
mod.name = 'Rank_tutorial'
mod.description = 'RANK, estimation tutorial'
# Storage location for output
mod.path = output_path
# Load data
df = pd.read_csv(data_file, parse_dates=['date'], index_col=['date'])
df.index.freq = 'Q' # let pandas know that this is quartely data
###Output
_____no_output_____
###Markdown
Remember that since the Great Recession, the Federal Funds Rate has been below the ZLB. That is why, like in [*getting started*](https://pydsge.readthedocs.io/en/latest/getting_started.html), we adjust the observed interest rate, so that the data is within reach of our model.
###Code
# adjust elb
zlb = mod.get_par('elb_level')
rate = df['FFR']
df['FFR'] = np.maximum(rate,zlb)
mod.load_data(df, start='1998Q1')
###Output
_____no_output_____
###Markdown
Preparing the estimation After importing the packages and loading the data, we still need to tell pydsge how to carry out the estimation of our model. The "prep_estim" method can be used to accomplish this. It can be called without any arguments and sets-up a non-linear model by default. However, to showcase some of this functionality, we decide to specify several arguments here.To perform the estimation, `pydsge` uses a Transposed-Ensemble Kalman Filter (TEnKF). For general information on its implementation, see the [EconSieve documentation](https://econsieve.readthedocs.io/en/latest/) , and for more details on running the filter in `pydsge` check-out the [*getting started tutorial*](https://pydsge.readthedocs.io/en/latest/getting_started.html). Again, the default filter is non-linear, but we can opt for a linear one by setting the argument `Linear` to `True`. To choose a custom number of ensemble members for the TEnKF, set `N` to a particular number (default is 300). We can also set a specific `seed`, the default seed is `0`. To get additional information on the estimation process, we can set `verbose` to `True`. Conveniently, this information includes an overview of the parameters’ distribution, their means and standard deviations. Moreover, if we already specified the covariance matrix of the measurement errors or want to reuse a previous result, we can load it into the `prep_estim` method by setting `Load.R` to `True`. Finally, we can turn parallelization on or off with the debug argument, which can be helpful in case any issues should arise.
###Code
mod.prep_estim(N=350, seed=0, verbose=True)
###Output
_____no_output_____
###Markdown
After finishing our set-up, the only thing left to prepare is to filter our observed FFR for hidden states. We can simply identify the variable through `index` and, given the present context, set the measurement error to a very small value.
###Code
mod.filter.R = mod.create_obs_cov(1e-1)
ind = mod.observables.index('FFR')
mod.filter.R[ind,ind] /= 1e1
###Output
_____no_output_____
###Markdown
Running the estimation Now that the we have all the variables and defined the type of estimation to perform, we can turn to estimating the model. To be able to deal with very high-dimensional models, `pdygse` uses *Markov Chain Monte Carlo* (MCMC) Integration to sample from the posterior distribution. For further information on MCMC, please refer to the `emcee` [website](https://emcee.readthedocs.io/en/stable/) and the additional resources provided there. We recommend running a **Tempered Ensemble MCMC** first, by using the `tmcmc` method. Doing this is particularly valuable for high-dimensional problems, since defining the initial states of the walkers in the parameterspace in this way is a powerful tool to improve sampling. However, due to its computational efficiency, we also use it for small models such as the one we are dealing with here. For our ensemble sampling, we can specify a variety of options. Note, `tmcmc` always requires the specification of the first four arguments, which are the i) number of steps, ii) number of walks, iii) number of temperatures, and iv) a temperature target! Here we do not want to set a target and, in turn, set `fmax = None`. Moreover, we have the option to set different "moves", i.e. coordinate updating algorithms for the walkers. As a wrapper for a lot of `emcee` functionality, `tmcmc` can work with many different "moves" - for a list and implementation details please consult the `emcee` documentation. For using them here, specify them as a list of tuples, containing the type of move and its "weight". If no move is specified, "StretchMove" is used. For seed setting of the log probability, the user can choose between three options, here we use the seed specified in `prep_estim`. Finally, the states are saved in the `p0` object as a numpy array in order to later pass them to our main sampling process.
###Code
fmax = None
moves = [(emcee.moves.DEMove(), 0.8),
(emcee.moves.DESnookerMove(), 0.2),]
p0 = mod.tmcmc(200, 200, 0, fmax, moves=moves, update_freq=100, lprob_seed='set')
mod.save()
###Output
_____no_output_____
###Markdown
As we can see, the output provides us with various important details. In particular, we learn that `mod.save()` saved the meta data of our model in the directory which we specified earlier in `mod.path`. This information is stored as an `.npz` file so that it is avialable even in the event of a crash and can be loaded anytime using `numpy.load()`. We now use the initial states derived above to conduct our full Bayesian estimation. Still, initial states do not have to be specified and, unless `mcmc` can identify previous runs or estimations, the initial values of the "prior" section in the `*.yaml` are used. The default number of sampling steps is 3000, so it makes sense to allow this to run in parallel. Again, if you want to avoid this, simply set `debug` to `True`. With `tune` we can determine the size of the Markov Chain we wish to retain. It is important to not confuse this with the updating frequency, which only affects the number of summary statements `pydsge`reports during the estimation. Note that, like in the `tmcmc`, we choose to continue using the seed specified earlier. Lastly, the option `append` lets us store all intermediate results. We pickle and store the meta information of this object in the path specified earlier.
###Code
mod.mcmc(p0,
moves=moves,
nsteps=3000,
tune=500,
update_freq=500,
lprob_seed='set',
append=True,
debug=True,
)
mod.save()
###Output
_____no_output_____
###Markdown
But, so where are our estimates? Remember that, so far, we have only drawn samples from our posterior distribution. Our converged (burnt-in) MCMC samples are currently stored in the `rank_test_sampler.h5` file created by `mcmc`. To get our parameter estimates, we now still need to draw a sample form the MCMC object.
###Code
pars = mod.get_par('posterior', nsamples=250, full=True)
###Output
_____no_output_____
###Markdown
Now, let's have a look at the estimated shocks. We can do this by using `extract()` which gives us the smoothed shocks. This method takes a variety of arguments, all of which have sensible default values. For example, here we specify the number of parameter draws in each verification sample to 1. Note that this method also takes an optional seed argument, but we here continue to use the default seed 0. It is important to emphasise that `pysdge` seeks to separate the model's set-up (meta) data and its results. To store the Markov Chains, shocks and parameter estimates we use the `save_rdict()` method, below.
###Code
epsdf = mod.extract(pars, nsamples=1)
mod.save_rdict(epsdf)
###Output
_____no_output_____
###Markdown
Finally, we take a closer look at the MCMC estimation results. In particular, `mcmc_summary()` summaries the convergence behaviour of our draws from the posterior distribution.
###Code
mod.mcmc_summary()
###Output
_____no_output_____
###Markdown
---
###Code
# Just for the tutorial: Cleaning the temporary directory
shutil.rmtree(output_path)
###Output
_____no_output_____
###Markdown
Estimation TutorialIn this section, we dive into the topic of model estimation using **pydsge**. Let us, just for the sake of this tutorial, set up a temporary directory structure:
###Code
# Just for the tutorial: Setting up example structure
import tempfile
import os
import shutil # For clean-up of temporary directory
from pathlib import Path # For Windows/Unix compatibility
# Temporary output folder
output_path = Path(tempfile.gettempdir(), 'output')
if not os.path.isdir(output_path):
os.makedirs(output_path)
###Output
_____no_output_____
###Markdown
Parsing and loading the modelLet us first load the relevant packages. Besides the DSGE class we already know from [*getting started*](https://pydsge.readthedocs.io/en/latest/getting_started.html), we also want to import the `emcee` package. This will allow us to later specify the desired updating algorithms for sampling from the posterior distribution - we explain this in more detail below.
###Code
import pandas as pd
import numpy as np
import emcee # For specifying updating moves
from pydsge import DSGE, example
###Output
_____no_output_____
###Markdown
In this tutorial, we continue to use the example provided in `pydsge`. Like before, we specify the file paths of the model and the data. Please feel free to check-out both files, but from the previous tutorial you might remember that we're dealing with a five equations New Keynesian model and US quarterly data from 1995 to 2018.
###Code
yaml_file, data_file = example
###Output
_____no_output_____
###Markdown
We again parse the model and load-in the data. What is important is that we also specify a location where the (intermediate) output is stored. Here we assign the output folder, as discussed at the beginning. Note also that we can name the model and write a short description, which is very useful when working with several models.
###Code
# Parse the model
mod = DSGE.read(yaml_file)
# Give it a name
mod.name = 'Rank_tutorial'
mod.description = 'RANK, estimation tutorial'
# Storage location for output
mod.path = output_path
# Load data
df = pd.read_csv(data_file, parse_dates=['date'], index_col=['date'])
df.index.freq = 'Q' # let pandas know that this is quartely data
###Output
_____no_output_____
###Markdown
Remember that since the Great Recession, the Federal Funds Rate has been below the ZLB. That is why, like in [*getting started*](https://pydsge.readthedocs.io/en/latest/getting_started.html), we adjust the observed interest rate, so that the data is "within reach" of our model.
###Code
# adjust elb
zlb = mod.get_par('elb_level')
rate = df['FFR']
df['FFR'] = np.maximum(rate,zlb)
mod.load_data(df, start='1998Q1')
###Output
_____no_output_____
###Markdown
Preparing the estimation After importing the packages and loading the data, we still need to tell pydsge how to carry out the estimation of our model. The "prep_estim" method can be used to accomplish this. It can be called without any arguments and sets-up a non-linear model by default. However, not all defaults are always a good good choice, and to showcase some of this functionality, we decide to specify several arguments here.To perform the estimation, `pydsge` uses a Transposed-Ensemble Kalman Filter (TEnKF). For general information on its implementation, see the [EconSieve documentation](https://econsieve.readthedocs.io/en/latest/) , and for more details on running the filter in `pydsge` check-out the [*getting started tutorial*](https://pydsge.readthedocs.io/en/latest/getting_started.html). Again, the default filter is non-linear, but we can opt for a linear one by setting the argument `linear` to `True`. To choose a custom number of ensemble members for the TEnKF, set `N` to a particular number (default is 300, for e.g. a medium scale model 400-500 is a good choice). We can also set a specific random seed with the argument `seed` (the default seed is `0`). To get additional information on the estimation process, we can set `verbose` to `True`. Conveniently, this information includes an overview of the parameters’ distribution, their means and standard deviations. Finally, if we already specified the covariance matrix of the measurement errors or want to reuse a previous result, we can load it into the `prep_estim` method by setting `Load.R` to `True`. If you run into problems you can turn parallelization off by setting `debug=True`.
###Code
mod.prep_estim(N=350, seed=0, verbose=True)
###Output
[estimation:] Model operational. 12 states, 3 observables, 3 shocks, 81 data points.
Adding parameters to the prior distribution...
parameter theta as beta (0.5, 0.1). Init @ 0.7813, with bounds (0.2, 0.95)...
parameter sigma as normal (1.5, 0.375). Init @ 1.2312, with bounds (0.25, 3)...
parameter phi_pi as normal (1.5, 0.25). Init @ 1.7985, with bounds (1.0, 3)...
parameter phi_y as normal (0.125, 0.05). Init @ 0.0893, with bounds (0.001, 0.5)...
parameter rho_u as beta (0.5, 0.2). Init @ 0.7, with bounds (0.01, 0.9999)...
parameter rho_r as beta (0.5, 0.2). Init @ 0.7, with bounds (0.01, 0.9999)...
parameter rho_z as beta (0.5, 0.2). Init @ 0.7, with bounds (0.01, 0.9999)...
parameter rho as beta (0.75, 0.1). Init @ 0.8, with bounds (0.5, 0.975)...
parameter sig_u as inv_gamma_dynare (0.1, 2). Init @ 0.5, with bounds (0.025, 5)...
parameter sig_r as inv_gamma_dynare (0.1, 2). Init @ 0.5, with bounds (0.01, 3)...
parameter sig_z as inv_gamma_dynare (0.1, 2). Init @ 0.5, with bounds (0.01, 3)...
[estimation:] 11 priors detected. Adding parameters to the prior distribution.
###Markdown
As in the filtering tutorial, we set the covariance of measurement errors to correspond to the variances of the data. Additionally, we adjust the measurement errors of the Federal Funds rate since it is perfectly observable.
###Code
mod.filter.R = mod.create_obs_cov(1e-1)
ind = mod.observables.index('FFR')
mod.filter.R[ind,ind] /= 1e1
###Output
_____no_output_____
###Markdown
Running the estimation Lets turn to the actual estimation. For a variety of pretty good reasons, `pdygse` uses *Ensemble Markov Chain Monte Carlo* (Ensemble-MCMC) integration to sample from the posterior distribution. For further information on Ensemble-MCMC, please refer to the `emcee` [website](https://emcee.readthedocs.io/en/stable/) and the additional resources provided there. We first require an initial ensemble, which is provided by `tmcmc`. `tmcmc` is a very sophisticated function with many options, but right now, all we are interested in is to obtain a sample that represents the prior distribution:
###Code
p0 = mod.tmcmc(200) # 200 is a bit of an overkill for this small model... rule of thumb: number_of_parameters times 4
###Output
100%|██████████| 200/200 [00:07<00:00, 27.00it/s]
###Markdown
The parameter draws are saved in the object `p0` as a numpy array in order to later pass them to our main sampling process.
###Code
mod.save()
###Output
[save_meta:] Metadata saved as '/tmp/output/Rank_tutorial_meta'
###Markdown
`mod.save()` saved the meta data of our model in the directory which we specified earlier in `mod.path`. This information is stored as an `.npz` file so that it is avialable even in the event of a crash and can be loaded anytime using `numpy.load()`. For posterior sampling using `mcmc` we have the option to set different "moves", i.e. coordinate updating algorithms for the walkers. As a wrapper for a lot of `emcee` functionality, `mcmc` can work with many different "moves" - for a list and implementation details please consult the `emcee` documentation. For using them here, specify them as a list of tuples, containing the type of move and its "weight". If no move is specified, `StretchMove` is used.
###Code
moves = [(emcee.moves.DEMove(), 0.8),
(emcee.moves.DESnookerMove(), 0.2),]
###Output
_____no_output_____
###Markdown
We now use the initial states derived above to conduct our full Bayesian estimation using `mcmc`. Note that, instead of using the specified initial ensemble, `mcmc` can identify previous runs or estimations, or the initial values of the "prior" section in the `*.yaml` can be used. The default number of sampling steps is 3000, which is parallelized by default. With `tune` we can determine the size of the Markov Chain we wish to retain to represent the posterior, i.e. after burn-in. This is not to be confused this with the updating frequency, which only affects the number of summary statements `pydsge`reports during the estimation. With the option `lprob_seed` the user can choose how to set the random seed of the likelihood evaluation - here we use the seed specified in `prep_estim`.
###Code
mod.mcmc(p0,
moves=moves,
nsteps=3000,
tune=500,
update_freq=500,
lprob_seed='set'
) # this may take some time...
mod.save() # be sure to save the internal state!
###Output
[mcmc:] HDF backend at /tmp/output/Rank_tutorial_sampler.h5 already exists. Deleting...
###Markdown
Great. So where are our estimates? Our (hopefully) converged MCMC samples are currently stored in the `rank_test_sampler.h5` file created by `mcmc`. You can load and use this data using the methods introduced in the [*processing estimation results tutorial*](https://pydsge.readthedocs.io/en/latest/getting_started.html).
###Code
# Just for the tutorial: Cleaning the temporary directory
shutil.rmtree(output_path)
###Output
_____no_output_____ |
2016winter/assignment1/features.ipynb | ###Markdown
Image features exercise*Complete and hand in this completed worksheet (including its outputs and any supporting code outside of the worksheet) with your assignment submission. For more details see the [assignments page](http://vision.stanford.edu/teaching/cs231n/assignments.html) on the course website.*We have seen that we can achieve reasonable performance on an image classification task by training a linear classifier on the pixels of the input image. In this exercise we will show that we can improve our classification performance by training linear classifiers not on raw pixels but on features that are computed from the raw pixels.All of your work for this exercise will be done in this notebook.
###Code
import random
import numpy as np
from cs231n.data_utils import load_CIFAR10
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading extenrnal modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
###Output
_____no_output_____
###Markdown
Load dataSimilar to previous exercises, we will load CIFAR-10 data from disk.
###Code
from cs231n.features import color_histogram_hsv, hog_feature
def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000):
# Load the raw CIFAR-10 data
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# Subsample the data
mask = range(num_training, num_training + num_validation)
X_val = X_train[mask]
y_val = y_train[mask]
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
return X_train, y_train, X_val, y_val, X_test, y_test
X_train, y_train, X_val, y_val, X_test, y_test = get_CIFAR10_data()
###Output
_____no_output_____
###Markdown
Extract FeaturesFor each image we will compute a Histogram of OrientedGradients (HOG) as well as a color histogram using the hue channel in HSVcolor space. We form our final feature vector for each image by concatenatingthe HOG and color histogram feature vectors.Roughly speaking, HOG should capture the texture of the image while ignoringcolor information, and the color histogram represents the color of the inputimage while ignoring texture. As a result, we expect that using both togetherought to work better than using either alone. Verifying this assumption wouldbe a good thing to try for the bonus section.The `hog_feature` and `color_histogram_hsv` functions both operate on a singleimage and return a feature vector for that image. The extract_featuresfunction takes a set of images and a list of feature functions and evaluateseach feature function on each image, storing the results in a matrix whereeach column is the concatenation of all feature vectors for a single image.
###Code
from cs231n.features import *
num_color_bins = 10 # Number of bins in the color histogram
feature_fns = [hog_feature, lambda img: color_histogram_hsv(img, nbin=num_color_bins)]
X_train_feats = extract_features(X_train, feature_fns, verbose=True)
X_val_feats = extract_features(X_val, feature_fns)
X_test_feats = extract_features(X_test, feature_fns)
# Preprocessing: Subtract the mean feature
mean_feat = np.mean(X_train_feats, axis=0, keepdims=True)
X_train_feats -= mean_feat
X_val_feats -= mean_feat
X_test_feats -= mean_feat
# Preprocessing: Divide by standard deviation. This ensures that each feature
# has roughly the same scale.
std_feat = np.std(X_train_feats, axis=0, keepdims=True)
X_train_feats /= std_feat
X_val_feats /= std_feat
X_test_feats /= std_feat
# Preprocessing: Add a bias dimension
X_train_feats = np.hstack([X_train_feats, np.ones((X_train_feats.shape[0], 1))])
X_val_feats = np.hstack([X_val_feats, np.ones((X_val_feats.shape[0], 1))])
X_test_feats = np.hstack([X_test_feats, np.ones((X_test_feats.shape[0], 1))])
###Output
/home/anand/store/git/anandsaha/cs231n.assignments/2016winter/assignment1/cs231n/features.py:118: VisibleDeprecationWarning: using a non-integer number instead of an integer will result in an error in the future
orientation_histogram[:,:,i] = uniform_filter(temp_mag, size=(cx, cy))[cx/2::cx, cy/2::cy].T
###Markdown
Train SVM on featuresUsing the multiclass SVM code developed earlier in the assignment, train SVMs on top of the features extracted above; this should achieve better results than training SVMs directly on top of raw pixels.
###Code
# Use the validation set to tune the learning rate and regularization strength
from cs231n.classifiers.linear_classifier import LinearSVM
learning_rates = [1e-9, 1e-8, 1e-7]
regularization_strengths = [1e3, 1e4, 1e5, 1e6, 1e7]
results = {}
best_val = -1
best_svm = None
pass
################################################################################
# TODO: #
# Use the validation set to set the learning rate and regularization strength. #
# This should be identical to the validation that you did for the SVM; save #
# the best trained classifer in best_svm. You might also want to play #
# with different numbers of bins in the color histogram. If you are careful #
# you should be able to get accuracy of near 0.44 on the validation set. #
################################################################################
for lr in learning_rates:
for rs in regularization_strengths:
svm = LinearSVM()
loss_hist = svm.train(X_train_feats, y_train, learning_rate=lr, reg=rs,
num_iters=5000, verbose=False)
p_val = svm.predict(X_val_feats)
val_acc = np.mean(y_val == p_val)
p_train = svm.predict(X_train_feats)
train_acc = np.mean(y_train == p_train)
results[(lr, rs)] = (train_acc, val_acc)
if val_acc > best_val:
best_val = val_acc
best_svm = svm
################################################################################
# END OF YOUR CODE #
################################################################################
# Print out results.
for lr, reg in sorted(results):
train_accuracy, val_accuracy = results[(lr, reg)]
print ('lr %e reg %e train accuracy: %f val accuracy: %f' % (
lr, reg, train_accuracy, val_accuracy))
print ('best validation accuracy achieved during cross-validation: %f' % best_val)
# Evaluate your trained SVM on the test set
y_test_pred = best_svm.predict(X_test_feats)
test_accuracy = np.mean(y_test == y_test_pred)
print (test_accuracy)
# An important way to gain intuition about how an algorithm works is to
# visualize the mistakes that it makes. In this visualization, we show examples
# of images that are misclassified by our current system. The first column
# shows images that our system labeled as "plane" but whose true label is
# something other than "plane".
examples_per_class = 8
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
for cls, cls_name in enumerate(classes):
idxs = np.where((y_test != cls) & (y_test_pred == cls))[0]
idxs = np.random.choice(idxs, examples_per_class, replace=False)
for i, idx in enumerate(idxs):
plt.subplot(examples_per_class, len(classes), i * len(classes) + cls + 1)
plt.imshow(X_test[idx].astype('uint8'))
plt.axis('off')
if i == 0:
plt.title(cls_name)
plt.show()
###Output
_____no_output_____
###Markdown
Inline question 1:Describe the misclassification results that you see. Do they make sense? Neural Network on image featuresEarlier in this assigment we saw that training a two-layer neural network on raw pixels achieved better classification performance than linear classifiers on raw pixels. In this notebook we have seen that linear classifiers on image features outperform linear classifiers on raw pixels. For completeness, we should also try training a neural network on image features. This approach should outperform all previous approaches: you should easily be able to achieve over 55% classification accuracy on the test set; our best model achieves about 60% classification accuracy.
###Code
print (X_train_feats.shape)
from cs231n.classifiers.neural_net import TwoLayerNet
input_dim = X_train_feats.shape[1]
hidden_dim = 500
num_classes = 10
net = TwoLayerNet(input_dim, hidden_dim, num_classes)
best_net = None
################################################################################
# TODO: Train a two-layer neural network on image features. You may want to #
# cross-validate various parameters as in previous sections. Store your best #
# model in the best_net variable. #
################################################################################
learning_rates = [1e-4, 1e-3, 1e-2]
regularization_factors = [10, 100, 1e3, 1e4, 1e5]
hidden_sizes = [50, 100, 150, 200]
epochs = [1000, 1500, 2000]
grid_search = [(lr, rf, hs, ep) for lr in learning_rates for rf in regularization_factors for hs in hidden_sizes for ep in epochs]
best_val_acc = -1
total_iter = len(grid_search)
idx = 1
for lr, rf, hs, ep in grid_search:
input_size = input_dim # 32 * 32 * 3
hidden_size = hs
num_classes = 10
net = TwoLayerNet(input_size, hidden_size, num_classes)
print(idx, '/', total_iter)
idx += 1
# Train the network
stats = net.train(X_train_feats, y_train, X_val_feats, y_val,
num_iters=ep, batch_size=200,
learning_rate=lr, learning_rate_decay=0.95,
reg=rf, verbose=False)
# Predict on the validation set
val_acc = (net.predict(X_val_feats) == y_val).mean()
if val_acc > best_val_acc:
best_net = net
best_val_acc = val_acc
print ('Found better validation accuracy: ', val_acc)
print(lr, rf, hs, ep)
################################################################################
# END OF YOUR CODE #
################################################################################
# Run your neural net classifier on the test set. You should be able to
# get more than 55% accuracy.
test_acc = (net.predict(X_test_feats) == y_test).mean()
print test_acc
###Output
_____no_output_____ |
Labs/Lab4/Lab4.ipynb | ###Markdown
*** Names: [Insert Your Names Here]*** Lab 4 - Plotting and Fitting with Hubble's Law
###Code
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
###Output
_____no_output_____
###Markdown
Exercise 1In the cell below, I have transcribed the data from Edwin Hubble's original 1928 paper "A relation between distance and radial velocity among extra-galactic nebulae", available [here](https://www.pnas.org/content/pnas/15/3/168.full.pdf).a. Open the original paper. Use it and your knowledge of Python code to decipher what each line in the next two code cells is doing. Add a comment at the top of each line stating what it is doing and/or where in the paper it came from. b. Create a scatter plot from Hubble's data. To make a scatterplot in python, you use the same plt.plot function that we used for line graphs last week except after the x and y arguments, you add a string describing the type of plotting symbol that you want. [Here](https://matplotlib.org/3.1.1/api/markers_api.html) is a list of plot symbols. Note that you can combine these with colors so, for example, 'go' is green circles and 'rx' is red xs. Give your plot a title and axis labels to match Hubble's original. c. Write code that will print each entry in the list obj_list on its own line (you will need this for exercise 2, below).
###Code
NGC_nos = [6822,598,221,224,5457,4736,5194,4449,4214,
3031,3627,4826,5236,1068,5055,7331,4258,
4151,4382,4472,4486,4649]
obj_list = ['SMC', 'LMC']
for i in np.arange(len(NGC_nos)):
obj_list.append('NGC '+str(NGC_nos[i]))
dists = np.array([0.032,0.034,0.214,0.263,0.275,0.275,0.45,0.5,0.5,0.63,0.8,0.9,0.9,
0.9,0.9,1.0,1.1,1.1,1.4,1.7,2.0,2.0,2.0,2.0])#Mpc
vels = np.array([170.,290,-130,-70,-185,-220,200,290,270,200,300,-30,650,150,500,920,450,500,500,960,500,850,800,1000]) #km/sec
#plot goes here
#loop to print names goes here
###Output
_____no_output_____
###Markdown
Exercise 2Now, let's pull modern data for Hubble's galaxies. Copy and paste the list from Exercise 1c into the query form [here](http://ned.ipac.caltech.edu/forms/gmd.html). ***Before you click "Submit Query"***, scroll to the check boxes at the bottom of the page and make sure to check ***only*** the following: * User Input Object Name * Redshift * Redshift Uncertainty And in the bottom right panel: * Metric Distance * Mean * Standard Deviation * Number of measurementsOpen the Macintosh application "TextEdit" and copy and paste the table into it. From the Format menu, select "make plain text" and then save it as cat.txt in the same folder as your Lab3 notebook.The code cells below will "read in" the data using a python package called Pandas that we will learn about in great detail in the coming weeks. For now, just execute the cell below, which will create python lists stored in variables with descriptive names from your cat.txt file. a)Describe in words at least two patterns that you note in the tabular data b) Make a histogram for each of the following quantities: redshift, redshift_uncert, dist, and dist_uncert. All your plots should have axis labels, and for the histograms you should play around with the number of bins until you can justify your choice for this value. Discuss and compare the shapes of the distributions for each of the quantities in general, qualitative terms. c) Plot the uncertainty in redshift as a function of redshift for these galaxies and the uncertainty in distance as a function of distance. What patterns do you notice, if any in the relationships between these quantities and their uncertainties?
###Code
import pandas
cols = ['Obj Name', 'Redshift', 'Redshift Uncert', 'Dist Mean (Mpc)', 'Dist Std Dev (Mpc)', 'Num Obs']
df = pandas.read_csv('cat.txt', delimiter ='|', skiprows=3, header = 0, names = cols, skipinitialspace=True)
redshift = df["Redshift"].tolist()
redshift_uncert = df["Redshift Uncert"].tolist()
dists2 = df["Dist Mean (Mpc)"].tolist()
dists2_uncert = df["Dist Std Dev (Mpc)"].tolist()
#display table (python "data frame" object)
df
###Output
_____no_output_____
###Markdown
***Answer to Part a***
###Code
#plots for part b - redshift
#plots for part b - redshift uncertainty
#plots for part b - distance
#plots for part b - distance uncertainty
###Output
_____no_output_____
###Markdown
***Part B explanation***
###Code
#part c scatter plot 1
#part c scatter plot 2
###Output
_____no_output_____
###Markdown
***Part C explanation*** Exercise 3 The conversion between redshift (z) as provided in the database and recessional velocity as provided in Hubble's original paper is given by the formula below. $$z=\sqrt{\frac{1+\beta}{1-\beta}}$$where $\beta$=v/c. This formula can also be written as:$$\beta=\frac{(z+1)^2-1}{(z+1)^2+1}$$(a) Write a function with an appropriate docstring that applies this forumula to an input array. Your function should return an array of velocities in km/sec. b) Apply your new function to your redshift and redshift uncertainty arrays here to translate them to "recessional velocities", as in Hubble's original plot \* Note that technically we should do some more complicated error propagation here, and we will discuss this later in this class. Luckily though, this formula is roughly equivalent to z = v/c, which means that errors in z and v can be directly translated.
###Code
#part a here
#part b here
###Output
_____no_output_____
###Markdown
Exercise 4Make the following plots, with appropriate axis labels and titles. a) A plot of the new data similar to the one you made in exercise 1, only with error bars. Use the function plt.errorbar and inflate the errors in the modern recessional velocities by a factor of 10, because they are actually so small for these very nearby galaxies with today's measurement techniques, that we can't even see them unless we b) A plot showing both the new and old data overplotted, with different colors for each and a legend. c) A plot showing Hubble's distances vs. the new distances, with a " 1 to 1" line overplotted d) A plot showing Hubble's recessional velocities vs. the new velocities, with a "1 to 1" line overplotted e) Discuss at least two trends that you see in the graphs and make a data-driven argument for how they might explain the discrepancy between the modern values and Hubble's. As always, your explanations need not be lengthy, but they should be ***clear and specific***.
###Code
#Plot a here
#Plot b here
#Plot c here
# Plot d here
###Output
_____no_output_____
###Markdown
***Part e explanations here*** ***We will do the exercise below in class next week and you should not attempt it now. However, it builds directly on this lab, so take some time with your lab mates to think about how you will approach it, since you will only have one 50min class period in which to answer it.*** In-Class Exercise for Next Week Time for fitting! Use the lecture notes on Model fitting as a guide to help you. a) Fit a linear model to Hubble's data and to the modern data. Make a plot showing both datasets and both fit lines. The plot should include a legend with both the points and the lines. The lines should be labeled in the legend with their equations. b) Now, let's fit a linear model to the modern data that takes the error bars in the recessional velocities into account in the fit. The problem here though is that the uncertainties in redshifts/recessional velocities are VERY small for these galaxies. So small in fact that when you overplot error bars on the data points you can't even see them (you can do this to verify). So to demonstrate differences between weighted and unweighted fits here, let's inflate them by a factor of 50. Overplot both the unweighted and weighted lines together with the modern data (with y error bars) and an appropriate legend. c) Discuss at least one trend or effect that you see in each graph. As always, your explanations need not be lengthy, but they should be ***clear, supported with references to the plot, and specific***. d) We won't do fitting with x and y error bars, but you can easily make a plot that shows errors in both quantities using plt.errorbar. Do this using the TRUE errors in velocity and distance (not the inflated values), and use your plot to make an argument about whether the "Hubble's Law" line is a good fit to the data.
###Code
#import relevant modules here
#define a linear model function here
#calculate the values for your two fits here and print their values (to label lines)
#plot 1 goes here
#weighted fit goes here
#plot with error bars goes here
###Output
_____no_output_____
###Markdown
***Discuss trends or effects seen here***
###Code
#plot with x AND y errors goes here
from IPython.core.display import HTML
def css_styling():
styles = open("../../custom.css", "r").read()
return HTML(styles)
css_styling()
###Output
_____no_output_____
###Markdown
Lab 4 - Using Python to Read and Display .fits Files
###Code
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
%matplotlib inline
###Output
_____no_output_____
###Markdown
The cell above imports all of the .fits file handling functions from the astropy python library. To call any function from the library, type fits.functionname. To see a list of available functions, click in the cell below, move your cursor to just after the period, and hit tab. You'll see a dropdown list of available functions.
###Code
## don't execute this cell. It's incomplete and won't do anything.
fits.
###Output
_____no_output_____
###Markdown
To start, let's read in a .fits file with fits.open
###Code
data = fits.open('M13-001_I.proc.fit')
###Output
_____no_output_____
###Markdown
We've created a python object called data above, but data is a special type of python object called an HDUlist, which is more or less just a list of python objects. The .fits file format can have many extensions besides just a data array and a header (other data arrays, other text files, etc.). In most cases in astronomy, and in probably all the cases for this class, we are only concerned with the first object in the list, which contains the main data array and header. This first object has index 0. One useful method for an HDUlist is .info, which will tell you about how many objects are in the list. If you execute the cell below, you can see that there's only one, called PRIMARY, and .info also lists its dimensions and the type of numbers stored in the array (64-bit floats)
###Code
data.info()
###Output
_____no_output_____
###Markdown
More useful may be displaying the header in python, just like we did in DS9. The header is associated with the Primary HDU, so you need to index data with [0] in order to see it.
###Code
data[0].header
###Output
_____no_output_____
###Markdown
To display a .fits image, we can use the function imshow, as in the cell below, but first we need to make an ordinary 2D array from the image data, which is what the first line is for.
###Code
image = data[0].data
plt.imshow(image)
###Output
_____no_output_____
###Markdown
You can probably tell that the image is of a globular cluster, but obviously the choice of scale, etc. is not ideal. If you want to know what the colorscale and its min and max look like, add the line plt.colorbar(), as below. At the same time, let's increase the size of the image so that we can see it better.
###Code
plt.figure(figsize=(15,7.5))
plt.imshow(image)
plt.colorbar()
###Output
_____no_output_____
###Markdown
Exercise 1Spend no more than 5 minutes playing with the imshow function's keywords cmap (for "color map", options below) and vmin and vmax, the minimum and maximum values for the colorbar. Stop when you think you can better see the stars in the image.
###Code
#plotting code goes here
###Output
_____no_output_____
###Markdown
You can also zoom in on a region of the image by using the indices of the pixels you want. Note though, that the indices are in the form [ymin:ymax,xmin:xmax] AND that, if you look at the image above, python images display with the pixel 0,0 in the upper left corner rather than the lower left, as is more typical. Since most astronomical images are oriented for a lower left pixel origin (so that, for example, North is up and East is left in the image), you should generally use the option origin="lower" when displaying. For example, to zoom in on the center of the cluster:
###Code
plt.figure(figsize=(8,8))
plt.imshow(image[450:600,700:850], origin="lower")
plt.colorbar()
###Output
_____no_output_____
###Markdown
Exercise 2Create a nicely scaled image of the globular cluster that shows most of its stars and not a lot of empty space.
###Code
#code for image here
###Output
_____no_output_____
###Markdown
Lab 4 - Practice with Advanced Data Structures and Pandas
###Code
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import pandas as pd
###Output
_____no_output_____
###Markdown
Exercise 1--------------Below is a list of information on 50 of the largest near-earth asteroids.(a) Given this list of asteroid information, find and list all asteroids with semi-major axis (a) within 0.2AU of earth, and with eccentricities (e) less than 0.5. (b) Note that the object below is a list (denoted with square brackets) of tuples (denoted with round brackets), and that the orbit class object is a dictionary. Create a dictionary where the name of each asteroid is the key, and the object stored under that key is a three element tuple (semi-major axis (AU), eccentricity, orbit class). (c) using the list (and not the dictionary), print the list of asteroids according to: (i) alphabetical by asteroid name (ii) in order of increasing semi-major axis (iii) in order of increasing eccentricity (iv) alphabetically by class (two-stage sorting) *hint: use the "sorted" function rather than object.sort, and check out the function "itemgetter" from the python module "operator"**Bonus points if you can get it to print with the columns lined up nicely!*
###Code
# Each element is (name, semi-major axis (AU), eccentricity, orbit class)
# source: http://ssd.jpl.nasa.gov/sbdb_query.cgi
Asteroids = [('Eros', 1.457916888347732, 0.2226769029627053, 'AMO'),
('Albert', 2.629584157344544, 0.551788195302116, 'AMO'),
('Alinda', 2.477642943521562, 0.5675993715753302, 'AMO'),
('Ganymed', 2.662242764279804, 0.5339300994578989, 'AMO'),
('Amor', 1.918987277620309, 0.4354863345648127, 'AMO'),
('Icarus', 1.077941311539208, 0.826950446001521, 'APO'),
('Betulia', 2.196489260519891, 0.4876246891992282, 'AMO'),
('Geographos', 1.245477192797457, 0.3355407124897842, 'APO'),
('Ivar', 1.862724540418448, 0.3968541470639658, 'AMO'),
('Toro', 1.367247622946547, 0.4358829575017499, 'APO'),
('Apollo', 1.470694262588244, 0.5598306817483757, 'APO'),
('Antinous', 2.258479598510079, 0.6070051516585434, 'APO'),
('Daedalus', 1.460912865705988, 0.6144629118218898, 'APO'),
('Cerberus', 1.079965807367047, 0.4668134997419173, 'APO'),
('Sisyphus', 1.893726635847921, 0.5383319204425762, 'APO'),
('Quetzalcoatl', 2.544270656955212, 0.5704591861565643, 'AMO'),
('Boreas', 2.271958775354725, 0.4499332278634067, 'AMO'),
('Cuyo', 2.150453953345012, 0.5041719257675564, 'AMO'),
('Anteros', 1.430262719980132, 0.2558054402785934, 'AMO'),
('Tezcatlipoca', 1.709753263222791, 0.3647772103513082, 'AMO'),
('Midas', 1.775954494579457, 0.6503697243919138, 'APO'),
('Baboquivari', 2.646202507670927, 0.5295611095751231, 'AMO'),
('Anza', 2.26415089613359, 0.5371603112900858, 'AMO'),
('Aten', 0.9668828078092987, 0.1827831025175614, 'ATE'),
('Bacchus', 1.078135348117527, 0.3495569270441645, 'APO'),
('Ra-Shalom', 0.8320425524852308, 0.4364726062545577, 'ATE'),
('Adonis', 1.874315684524321, 0.763949321566, 'APO'),
('Tantalus', 1.289997492877751, 0.2990853014998932, 'APO'),
('Aristaeus', 1.599511990737142, 0.5030618532252225, 'APO'),
('Oljato', 2.172056090036035, 0.7125729402616418, 'APO'),
('Pele', 2.291471988746353, 0.5115484924883255, 'AMO'),
('Hephaistos', 2.159619960333728, 0.8374146846143349, 'APO'),
('Orthos', 2.404988778495748, 0.6569133796135244, 'APO'),
('Hathor', 0.8442121506103012, 0.4498204013480316, 'ATE'),
('Beltrovata', 2.104690977122337, 0.413731105995413, 'AMO'),
('Seneca', 2.516402574514213, 0.5708728441169761, 'AMO'),
('Krok', 2.152545170235639, 0.4478259793515817, 'AMO'),
('Eger', 1.404478323548423, 0.3542971360331806, 'APO'),
('Florence', 1.768227407864309, 0.4227761019048867, 'AMO'),
('Nefertiti', 1.574493139339916, 0.283902719273878, 'AMO'),
('Phaethon', 1.271195939723604, 0.8898716672181355, 'APO'),
('Ul', 2.102493486378346, 0.3951143067760007, 'AMO'),
('Seleucus', 2.033331705805067, 0.4559159977082651, 'AMO'),
('McAuliffe', 1.878722427225527, 0.3691521497610656, 'AMO'),
('Syrinx', 2.469752836845105, 0.7441934504192601, 'APO'),
('Orpheus', 1.209727780883745, 0.3229034563257626, 'APO'),
('Khufu', 0.989473784873371, 0.468479627898914, 'ATE'),
('Verenia', 2.093231870619781, 0.4865133359612604, 'AMO'),
('Don Quixote', 4.221712367193639, 0.7130894892477316, 'AMO'),
('Mera', 1.644476057737928, 0.3201425983025733, 'AMO')]
orbit_class = {'AMO':'Amor', 'APO':'Apollo', 'ATE':'Aten'}
###Output
_____no_output_____
###Markdown
Exercise 2 - Intro to the Exoplanet Database--------------For the second half of the semester, many or most of our in-class labs (and your second project) will revolve around a single dataset - the NASA Exoplanet Archive. We will explore this dataset in great detail and apply many of the statistics principles that were introduced in the first half of the course to it. Today you will begin just by exploring it. Your assignment is quite open ended. Simply explore the table for the rest of the class period and write/code up your results/investigations here. Find out basic information about the table and the types of entries in it. Compute descriptive statistics. Make plots. WORK WITH YOU PARTNER to decide what to explore. Don't divvy up tasks.
###Code
#read in the data, skipping the first 73 rows of ancillary information
data=pd.read_csv('planets030619.csv', skiprows=72)
data.columns
from IPython.core.display import HTML
def css_styling():
styles = open("../../custom.css", "r").read()
return HTML(styles)
css_styling()
###Output
_____no_output_____
###Markdown
**Lab 4: Working with 'real' data**Getting the Data
###Code
hf = h5py.File('gammaray_lab4.h5', 'r')
hf.keys()
data = np.array(hf.get('data'))
data[:,0]
hf.close()
###Output
_____no_output_____
###Markdown
**Problem 1**We are looking at the data from a gamma-ray satellite orbiting in low Earth orbit. It takes a reading of the number of particles detected every 100 milliseconds, and is in an approximately 90 minute orbit. While it is looking for gamma-ray bursts, virtually all of the particles detected are background cosmic rays. **1)** Make a few plots, generally exploring your data and making sure you understand it. Give a high level description of the data features you see. Specifically comment on whether you see signal contamination in your data, and how you plan to build a background pdf().Data is in format| gps time | Solar Phase | Longitude | Particle Counts || ----------- | ----------- | ----------- | ----------- || ... | ... | ... | ... |
###Code
fig, (ax1,ax2,ax3) = plt.subplots(1,3)
ax1.hist2d(data[0],data[3], bins = [500,31])
ax2.hist2d(data[1],data[3], bins = [72,31])
ax3.hist2d(data[2],data[3], bins = [72,31])
ax1.title.set_text('Gps Time vs. Observation Counts')
ax2.title.set_text('Solar Phase vs. Observation Counts')
ax3.title.set_text('Longitude vs. Observation Counts')
plt.show()
a = data[2][:200000]>150
b = data[2][:200000]<300
fig, (ax1,ax2) = plt.subplots(1,2)
ax1.hist(data[3][:200000][np.logical_and(a,b)], bins = np.arange(0.5,20.5,1), density = True)
mu = np.mean(data[3][:200000][np.logical_and(a,b)])
ax1.axvline(mu, linewidth = 2, color = 'red')
ax1.title.set_text('histogram of data with longitude between 150 and 300 degrees, mean: '+ str(np.round(mu,3)))
x = range(0,21)
ax2.bar(x, stats.poisson.pmf(x,mu), width = 1)
plt.show()
###Output
_____no_output_____
###Markdown
The distribution is roughly poisson distributed as seen in the 2d histograms above. When data is pulled from longitude values between 150 and 300, where the distribution looks relatively constant, it forms a poisson distribution with a mean just greater than 6. Solar phase has no noticable impact on the background distribution, but the longitude does have a noticable difference. The distribution is relatively constant between 150 degrees to around 300 degrees, but at about 320 degrees the mean jumps up and slowly decreases as the phase aproaches 150 again. The discontinuity in the 2d histogram of longitude versus observation counts appears to be signal contamination. **2)** The background is not consistent across the dataset. Find and describe as accurately as you can how the background changes.
###Code
blocks = plt.hist2d(data[2],data[3], bins = [72,31])
mean = np.zeros(72)
for i in range(0,72):
mean[i] = sum(blocks[2][1:]*blocks[0][i,:]/sum(blocks[0][i,:]))
plt.plot(blocks[1][1:]-2.5,mean, linewidth = 2, color = 'black')
plt.title('Mean observations as a function of longitude overlaid on 2d histogram')
plt.colorbar()
plt.show()
fig,(ax1, ax2) = plt.subplots(1,2)
ax1.plot(np.append(blocks[1][1:],blocks[1][1:]+360),np.append(mean,mean), linewidth = 2, color = 'black')
ax1.title.set_text('Mean number of observations looped across two orbits')
ax1.set_xlim([315,675])
ax2.plot(np.append(blocks[1][1:],blocks[1][1:]+360),np.append(mean,mean), linewidth = 2, color = 'black')
ax2.title.set_text('logy plot')
ax2.set_yscale('log')
ax2.set_xlim([315,675])
plt.show()
###Output
_____no_output_____
###Markdown
As described in the previous part, the background has a longitudinal dependance. Above I have plotted the mean number of observations in each horizontal bin, and this creates a function with an average for every 5 degree increment. This could become more accurate if more horizontal bins were used. The mean seams to decay somewhat exponentially, which is made more obvious through the looping of the mean over two periods. Using a log y scale however it is evident that the function is only exponential looking. **3)** Create a model for the background that includes time dependence, and explicitly compare your model to the data. How good is your model of the background?$$P_{k obs}(\phi) = \frac{\lambda(\phi)^k e^{-\lambda(\phi)}}{k!}$$where $\phi$ is the longitude. Since $\phi$ loops over time, developing an apporach that relies on $\phi$, is also a time dependant approach.
###Code
plt.rcParams["figure.figsize"] = (20,30)
fig, ((ax1,ax2),(ax3,ax4),(ax5,ax6),(ax7,ax8),(ax9,ax10),(ax11,ax12)) = plt.subplots(6,2)
x = range(0,25)
n = 0
ax1.bar(blocks[2][1:],blocks[0][n,:]/sum(blocks[0][n,:]), width = 1)
ax2.bar(x,stats.poisson.pmf(x,mean[n]),width = 1)
ax1.set_xlim([0,30])
ax1.axvline(mean[n], color = 'red', linewidth = 2)
ax2.set_xlim([0,30])
ax1.title.set_text('0 to 5 degree longitude')
ax2.title.set_text('Poisson with mean: ' + str(np.round(mean[n],4)))
n = 11
ax3.bar(blocks[2][1:],blocks[0][n,:]/sum(blocks[0][n,:]), width = 1)
ax4.bar(x,stats.poisson.pmf(x,mean[n]),width = 1)
ax3.axvline(mean[n], color = 'red', linewidth = 2)
ax3.set_xlim([0,30])
ax4.set_xlim([0,30])
ax3.title.set_text('55 to 60 degree longitude')
ax4.title.set_text('Poisson with mean: ' + str(np.round(mean[n],4)))
n = 23
ax5.bar(blocks[2][1:],blocks[0][n,:]/sum(blocks[0][n,:]), width = 1)
ax6.bar(x,stats.poisson.pmf(x,mean[n]),width = 1)
ax5.axvline(mean[n], color = 'red', linewidth = 2)
ax5.set_xlim([0,30])
ax6.set_xlim([0,30])
ax5.title.set_text('115 to 120 degree longitude')
ax6.title.set_text('Poisson with mean: ' + str(np.round(mean[n],4)))
n = 35
ax7.bar(blocks[2][1:],blocks[0][n,:]/sum(blocks[0][n,:]), width = 1)
ax8.bar(x,stats.poisson.pmf(x,mean[n]),width = 1)
ax7.axvline(mean[n], color = 'red', linewidth = 2)
ax7.set_xlim([0,30])
ax8.set_xlim([0,30])
ax7.title.set_text('175 to 180 degree longitude')
ax8.title.set_text('Poisson with mean: ' + str(np.round(mean[n],4)))
n = 47
ax9.bar(blocks[2][1:],blocks[0][n,:]/sum(blocks[0][n,:]), width = 1)
ax10.bar(x,stats.poisson.pmf(x,mean[n]),width = 1)
ax9.axvline(mean[n], color = 'red', linewidth = 2)
ax9.set_xlim([0,30])
ax10.set_xlim([0,30])
ax9.title.set_text('235 to 240 degree longitude')
ax10.title.set_text('Poisson with mean: ' + str(np.round(mean[n],4)))
n = 59
ax11.bar(blocks[2][1:],blocks[0][n,:]/sum(blocks[0][n,:]), width = 1)
ax12.bar(x,stats.poisson.pmf(x,mean[n]),width = 1)
ax11.axvline(mean[n], color = 'red', linewidth = 2)
ax11.set_xlim([0,30])
ax12.set_xlim([0,30])
ax11.title.set_text('295 to 300 degree longitude')
ax12.title.set_text('Poisson with mean: ' + str(np.round(mean[n],4)))
plt.show()
###Output
_____no_output_____
###Markdown
The plots above show slices from the 2d histogram of longitude versus number of observations, as compared to a poisson distribution with a mean corresponding to the mean of that slice. The general shape of the plots is close in all of the cases, variation could come from each slice representing a range of $5^{\circ}$. The line overlay is the mean of the corresponding poisson from the background distribution model. **4)** Because the background varies, your discovery sensitivity threshold (how many particles you would need to see) also varies. What is the '5-sigma' threshold for a 100 millisecond GRB at different times?
###Code
prob_5sigma = stats.norm.cdf(5)
stats.poisson.ppf(prob_5sigma,mean[0])
print('Threshold for 5 sigma between 0 and 5 degrees longitude is:',stats.poisson.ppf(prob_5sigma,mean[0]), 'gamma ray bursts in 100 milliseconds')
print('Threshold for 5 sigma between 55 and 60 degrees longitude is:',stats.poisson.ppf(prob_5sigma,mean[11]), 'gamma ray bursts in 100 milliseconds')
print('Threshold for 5 sigma between 115 and 120 degrees longitude is:',stats.poisson.ppf(prob_5sigma,mean[23]), 'gamma ray bursts in 100 milliseconds')
print('Threshold for 5 sigma between 175 and 180 degrees longitude is:',stats.poisson.ppf(prob_5sigma,mean[35]), 'gamma ray bursts in 100 milliseconds')
print('Threshold for 5 sigma between 235 and 240 degrees longitude is:',stats.poisson.ppf(prob_5sigma,mean[47]), 'gamma ray bursts in 100 milliseconds')
print('Threshold for 5 sigma between 295 and 300 degrees longitude is:',stats.poisson.ppf(prob_5sigma,mean[59]), 'gamma ray bursts in 100 milliseconds')
print('Threshold for 5 sigma between 355 and 360 degrees longitude is:',stats.poisson.ppf(prob_5sigma,mean[71]), 'gamma ray bursts in 100 milliseconds')
###Output
Threshold for 5 sigma between 0 and 5 degrees longitude is: 29.0 gamma ray bursts in 100 milliseconds
Threshold for 5 sigma between 55 and 60 degrees longitude is: 26.0 gamma ray bursts in 100 milliseconds
Threshold for 5 sigma between 115 and 120 degrees longitude is: 24.0 gamma ray bursts in 100 milliseconds
Threshold for 5 sigma between 175 and 180 degrees longitude is: 24.0 gamma ray bursts in 100 milliseconds
Threshold for 5 sigma between 235 and 240 degrees longitude is: 23.0 gamma ray bursts in 100 milliseconds
Threshold for 5 sigma between 295 and 300 degrees longitude is: 23.0 gamma ray bursts in 100 milliseconds
Threshold for 5 sigma between 355 and 360 degrees longitude is: 29.0 gamma ray bursts in 100 milliseconds
###Markdown
**Problem 2**Looking for transient signal **1)** Download Data
###Code
hf = h5py.File('images.h5', 'r')
hf.keys()
images = np.array(hf.get('imagestack'))
image1 = np.array(hf.get('image1'))
hf.close()
###Output
_____no_output_____
###Markdown
**2)** Explore the data. Is there signal contamination? Is the background time dependent? Is it consistent spatially? Develop a plan to calculate your background pdf().
###Code
fig1, ax = plt.subplots(1,1)
fig2, ((ax1,ax2),(ax3,ax4),(ax5,ax6),(ax7,ax8),(ax9,ax10)) = plt.subplots(5,2)
np.shape(images)
mean = images.sum(axis = 2)/10
plt. rcParams["image.cmap"] = 'gray'
ax.imshow(images[:,:,1])
n = 0
ax1.hist(np.reshape(images[:,:,n]-mean,40000), bins = 50)
ax1.set_yscale('log')
n = 1
ax2.hist(np.reshape(images[:,:,n]-mean,40000), bins = 50)
ax2.set_yscale('log')
n = 2
ax3.hist(np.reshape(images[:,:,n]-mean,40000), bins = 50)
ax3.set_yscale('log')
n = 3
ax4.hist(np.reshape(images[:,:,n]-mean,40000), bins = 50)
ax4.set_yscale('log')
n = 4
ax5.hist(np.reshape(images[:,:,n]-mean,40000), bins = 50)
ax5.set_yscale('log')
n=5
ax6.hist(np.reshape(images[:,:,n]-mean,40000), bins = 50)
ax6.set_yscale('log')
n=6
ax7.hist(np.reshape(images[:,:,n]-mean,40000), bins = 50)
ax7.set_yscale('log')
n=7
ax8.hist(np.reshape(images[:,:,n]-mean,40000), bins = 50)
ax8.set_yscale('log')
n=8
ax9.hist(np.reshape(images[:,:,n]-mean,40000), bins = 50)
ax9.set_yscale('log')
n=9
ax10.hist(np.reshape(images[:,:,n]-mean,40000), bins = 50)
ax10.set_yscale('log')
plt.show()
st_dev = np.zeros(10)
means = np.zeros(10)
for k in range(0,10):
st_dev[k] = np.std(np.reshape(images[:,:,k]-mean,40000))
means[k] = np.mean(np.reshape(images[:,:,k]-mean,40000))
mean_std = np.mean(st_dev)
means = (np.mean(means))
print(means,mean_std)
###Output
_____no_output_____
###Markdown
Through data exploration, I found that the distribution of the difference from the mean for each pixel is normally distributed. The distribution for each image is quite similar, and the average mean is basically zero, and the standard deviation is around 0.532. So for the background distribution, creating a normal distribution with such parameters should give a good representation of the distribution of the difference from the mean. **3)** Using your background distribution, hunt for your signal (transient). Describe what you find.
###Code
plt.rcParams["figure.figsize"] = (20,15)
fig1, ax = plt.subplots(1,1)
detection = np.max(image1-mean)
print(np.max(image1-mean),np.argmax(image1-mean))
ax.hist(np.reshape(image1-mean,40000), bins = 50, density = True)
ax.set_yscale('log')
plt.show()
###Output
2.5467839559237793 15253
###Markdown
The histogram above shows the difference from the mean of the image we are looking for a transient signal in. The largest values that would show up in the regular distance from the mean histograms were only a bit more than 2 above the mean, but the maximum here is 2.55 above the mean. This outlier may well be our signal, but how significant would it be?
###Code
prob = stats.norm.cdf(detection,loc = means, scale = mean_std)
sigma = stats.norm.ppf(prob)
print(sigma)
###Output
4.787830846866464
###Markdown
Using the background distribution from the previous problem, the probability of the background producing such an signal corresponds to 4.8 sigma on the standard normal. This is not beyond our cutoff for discover of 5 sigma. But what does this signal look like?
###Code
print(np.unravel_index(15253,(200,200)))
fig, ((ax1,ax2),(ax3,ax4)) = plt.subplots(2,2)
ax1.imshow(image1[66:86,43:63])
ax2.imshow(images[66:86,43:63,0])
ax3.imshow(images[66:86,43:63,1])
ax4.imshow(images[66:86,43:63,2])
ax1.title.set_text('Target Image')
plt.show()
###Output
(76, 53)
|
old_notebooks/market_movement_classification_full_and_cropped-Copy2.ipynb | ###Markdown
Setup Notebook
###Code
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:80% !important; }</style>"))
# Put these at the top of every notebook, to get automatic reloading and inline plotting
%reload_ext autoreload
%autoreload 2
%matplotlib inline
###Output
_____no_output_____
###Markdown
Predicting Price Movements of Cryptocurrencies - Using Convolutional Neural Networks to Classify 2D Images of Chart Data
###Code
# This file contains all the main external libs we'll use
from fastai.imports import *
from fastai.transforms import *
from fastai.conv_learner import *
from fastai.model import *
from fastai.dataset import *
from fastai.sgdr import *
from fastai.plots import *
# For downloading files
from IPython.display import FileLink, FileLinks
# For confusion matrix
from sklearn.metrics import confusion_matrix
PATH = 'data/btc/btcgraphs_cropped/'
!ls {PATH}
os.listdir(f'{PATH}train')
files = os.listdir(f'{PATH}train/DOWN')[:5]
files
img = plt.imread(f'{PATH}train/DOWN/{files[3]}')
print(f'{PATH}train/DOWN/{files[0]}')
print(f'{PATH}train/DOWN/{files[1]}')
plt.imshow(img)
FileLink(f'{PATH}train/DOWN/{files[3]}')
###Output
_____no_output_____
###Markdown
The Steps to Follow1. Enable data augmentation, and precompute=True1. Use `lr_find()` to find highest learning rate where loss is still clearly improving1. Train last layer from precomputed activations for 1-2 epochs1. Train last layer with data augmentation (i.e. precompute=False) for 2-3 epochs with cycle_len=11. Unfreeze all layers1. Set earlier layers to 3x-10x lower learning rate than next higher layer1. Use `lr_find()` again1. Train full network with cycle_mult=2 until over-fitting 0. Setup
###Code
arch = resnet34
sz = 480
batch_size = int(64)
###Output
_____no_output_____
###Markdown
1. Data Augmentation**Not using data augmentation this time** Starting without useing data augmentation because I don't think it makes sense for these graphs, we don't need to generalize to slightly different angles. All plots will always be straight on and square in the frame.
###Code
tfms = tfms_from_model(arch, sz)
data = ImageClassifierData.from_paths(PATH, bs=batch_size, tfms=tfms,
trn_name='train', val_name='valid')#, test_name='test')
###Output
_____no_output_____
###Markdown
2. Choose a Learning Rate
###Code
learn = ConvLearner.pretrained(arch, data, precompute=True)
learn.save('00_pretrained_480')
# learn.precompute = True
learn.load('00_pretrained_480')
lrf = learn.lr_find()
learn.sched.plot_lr()
learn.sched.plot()
learn.save('01_lr_found_480')
###Output
_____no_output_____
###Markdown
3. Train Last Layer
###Code
# learn.precompute = True
learn.load('01_lr_found_480')
learn.fit(1e-4, 1, cycle_save_name='01_weights')
learn.save("02_trained_once_480")
###Output
_____no_output_____
###Markdown
Accuracy TODODo some tests on accuracy of training on single epoch 4. Train Last Layer with Data Augmentation**Not actually using any augmentation, this is just a few more rounds of training**
###Code
# learn.precompute = True
learn.load("02_trained_once_480")
learn.precompute=False #I don't think this makes a difference without data augmentation
learn.fit(1e-4, 3, cycle_len=1, best_save_name="02_best_model", cycle_save_name='02_weights')
learn.save("03_trained_2x_480")
learn.load("trained_2_market_movement")
###Output
_____no_output_____
###Markdown
More accuracy test...
###Code
learn.unfreeze()
###Output
_____no_output_____
###Markdown
Using a relatively large learning rate to train the prvious layers because this data set is not very similar to ImageNet
###Code
lr = np.array([0.0001/9, 0.0001/3, 0.00001])
learn.fit(lr, 3, cycle_len=1, cycle_mult=2, \
best_save_name="03_best_model", cycle_save_name='03_weights')
learn.save("trained_3_market_movement")
learn.load("trained_3_market_movement")
###Output
_____no_output_____
###Markdown
Look at Results
###Code
data.val_y
data.classes
# this gives prediction for validation set. Predictions are in log scale
log_preds = learn.predict()
log_preds.shape
log_preds[:10]
preds = np.argmax(log_preds, axis=1) # from log probabilities to 0 or 1
probs = np.exp(log_preds[:,1]) # pr(dog)
probs
probs[1]
def rand_by_mask(mask):
return np.random.choice(np.where(mask)[0], 4, replace=False)
def rand_by_correct(is_correct):
return rand_by_mask((preds == data.val_y)==is_correct)
def plot_val_with_title(idxs, title):
imgs = np.stack([data.val_ds[x][0] for x in idxs])
title_probs = [probs[x] for x in idxs]
print(title)
return plots(data.val_ds.denorm(imgs), rows=1, titles=title_probs)
def plots(ims, figsize=(12,6), rows=1, titles=None):
f = plt.figure(figsize=figsize)
for i in range(len(ims)):
sp = f.add_subplot(rows, len(ims)//rows, i+1)
sp.axis('Off')
if titles is not None: sp.set_title(titles[i], fontsize=16)
plt.imshow(ims[i])
def load_img_id(ds, idx):
return np.array(PIL.Image.open(PATH+ds.fnames[idx]))
def plot_val_with_title(idxs, title):
imgs = [load_img_id(data.val_ds,x) for x in idxs]
title_probs = [probs[x] for x in idxs]
print(title)
return plots(imgs, rows=1, titles=title_probs, figsize=(16,8))
plot_val_with_title(rand_by_correct(True), "Correctly classified")
def most_by_mask(mask, mult):
idxs = np.where(mask)[0]
return idxs[np.argsort(mult * probs[idxs])[:4]]
def most_by_correct(y, is_correct):
mult = -1 if (y==1)==is_correct else 1
return most_by_mask(((preds == data.val_y)==is_correct) & (data.val_y == y), mult)
plot_val_with_title(most_by_correct(0, True), "Most correct DOWN")
plot_val_with_title(most_by_correct(1, True), "Most correct UP")
plot_val_with_title(most_by_correct(0, False), "Most incorrect DOWN")
###Output
_____no_output_____
###Markdown
Analyze Results
###Code
data.val_y
log_preds = learn.predict()
preds = np.argmax(log_preds, axis=1) # from log probabilities to 0 or 1
probs = np.exp(log_preds[:,1]) # pr(dog)
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(data.val_y, preds)
plot_confusion_matrix(cm, data.classes)
cm
(cm[0][0]+cm[1][1])/(np.sum(cm))
np.sum(cm)-(42313)
###Output
_____no_output_____ |
Clean_And_Analyze_Employee_Exit_Surveys.ipynb | ###Markdown
Clean and Analyze Employee Exit SurveysIn this project, I'll clean and analyze exit surveys from employees of the [Department of Education, Training and Employment (DETE)](https://en.wikipedia.org/wiki/Department_of_Education_and_Training_(Queensland)}) and the Technical and Further Education (TAFE) body of the Queensland government in Australia. The TAFE exit survey can be found [here](https://data.gov.au/dataset/ds-qld-89970a3b-182b-41ea-aea2-6f9f17b5907e/details?q=exit%20survey) and the survey for the DETE can be found [here](https://data.gov.au/dataset/ds-qld-fe96ff30-d157-4a81-851d-215f2a0fe26d/details?q=exit%20survey).In this project, I'll play the role of data analyst and pretend stakeholders want to know the following: - Are employees who only worked for the institutes for a short period of time resigning due to some kind of dissatisfaction? What about employees who have been there longer? - Are younger employees resigning due to some kind of dissatisfaction? What about older employees?They want us to combine the results for *both* surveys to answer these questions. However, although both used the same survey template, one of them customized some of the answers.Below is a preview of a couple columns we'll work with from the `dete_survey.csv`: - `ID`: An id used to identify the participant of the survey - `SeparationType`: The reason why the person's employment ended - `Cease Date`: The year or month the person's employment ended - `DETE Start Date`: The year the person began employment with the DETEBelow is a preview of a couple columns we'll work with from the `tafe_survey.csv`: - `Record ID`: An id used to identify the participant of the survey - `Reason for ceasing employment`: The reason why the person's employment ended - `LengthofServiceOverall. Overall Length of Service at Institute (in years)`: The length of the person's employment (in years) IntroductionLet's start by reading the datasets into pandas and exploring them.
###Code
# Read in the data
import pandas as pd
import numpy as np
dete_survey = pd.read_csv('dete_survey.csv')
# Quick exploration of the data
pd.options.display.max_columns = 150 # to avoid truncated output
dete_survey.head()
dete_survey.info()
#Read in the data
tafe_survey = pd.read_csv("tafe_survey.csv")
#Quick exploration of the data
tafe_survey.head()
tafe_survey.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 702 entries, 0 to 701
Data columns (total 72 columns):
Record ID 702 non-null float64
Institute 702 non-null object
WorkArea 702 non-null object
CESSATION YEAR 695 non-null float64
Reason for ceasing employment 701 non-null object
Contributing Factors. Career Move - Public Sector 437 non-null object
Contributing Factors. Career Move - Private Sector 437 non-null object
Contributing Factors. Career Move - Self-employment 437 non-null object
Contributing Factors. Ill Health 437 non-null object
Contributing Factors. Maternity/Family 437 non-null object
Contributing Factors. Dissatisfaction 437 non-null object
Contributing Factors. Job Dissatisfaction 437 non-null object
Contributing Factors. Interpersonal Conflict 437 non-null object
Contributing Factors. Study 437 non-null object
Contributing Factors. Travel 437 non-null object
Contributing Factors. Other 437 non-null object
Contributing Factors. NONE 437 non-null object
Main Factor. Which of these was the main factor for leaving? 113 non-null object
InstituteViews. Topic:1. I feel the senior leadership had a clear vision and direction 608 non-null object
InstituteViews. Topic:2. I was given access to skills training to help me do my job better 613 non-null object
InstituteViews. Topic:3. I was given adequate opportunities for personal development 610 non-null object
InstituteViews. Topic:4. I was given adequate opportunities for promotion within %Institute]Q25LBL% 608 non-null object
InstituteViews. Topic:5. I felt the salary for the job was right for the responsibilities I had 615 non-null object
InstituteViews. Topic:6. The organisation recognised when staff did good work 607 non-null object
InstituteViews. Topic:7. Management was generally supportive of me 614 non-null object
InstituteViews. Topic:8. Management was generally supportive of my team 608 non-null object
InstituteViews. Topic:9. I was kept informed of the changes in the organisation which would affect me 610 non-null object
InstituteViews. Topic:10. Staff morale was positive within the Institute 602 non-null object
InstituteViews. Topic:11. If I had a workplace issue it was dealt with quickly 601 non-null object
InstituteViews. Topic:12. If I had a workplace issue it was dealt with efficiently 597 non-null object
InstituteViews. Topic:13. If I had a workplace issue it was dealt with discreetly 601 non-null object
WorkUnitViews. Topic:14. I was satisfied with the quality of the management and supervision within my work unit 609 non-null object
WorkUnitViews. Topic:15. I worked well with my colleagues 605 non-null object
WorkUnitViews. Topic:16. My job was challenging and interesting 607 non-null object
WorkUnitViews. Topic:17. I was encouraged to use my initiative in the course of my work 610 non-null object
WorkUnitViews. Topic:18. I had sufficient contact with other people in my job 613 non-null object
WorkUnitViews. Topic:19. I was given adequate support and co-operation by my peers to enable me to do my job 609 non-null object
WorkUnitViews. Topic:20. I was able to use the full range of my skills in my job 609 non-null object
WorkUnitViews. Topic:21. I was able to use the full range of my abilities in my job. ; Category:Level of Agreement; Question:YOUR VIEWS ABOUT YOUR WORK UNIT] 608 non-null object
WorkUnitViews. Topic:22. I was able to use the full range of my knowledge in my job 608 non-null object
WorkUnitViews. Topic:23. My job provided sufficient variety 611 non-null object
WorkUnitViews. Topic:24. I was able to cope with the level of stress and pressure in my job 610 non-null object
WorkUnitViews. Topic:25. My job allowed me to balance the demands of work and family to my satisfaction 611 non-null object
WorkUnitViews. Topic:26. My supervisor gave me adequate personal recognition and feedback on my performance 606 non-null object
WorkUnitViews. Topic:27. My working environment was satisfactory e.g. sufficient space, good lighting, suitable seating and working area 610 non-null object
WorkUnitViews. Topic:28. I was given the opportunity to mentor and coach others in order for me to pass on my skills and knowledge prior to my cessation date 609 non-null object
WorkUnitViews. Topic:29. There was adequate communication between staff in my unit 603 non-null object
WorkUnitViews. Topic:30. Staff morale was positive within my work unit 606 non-null object
Induction. Did you undertake Workplace Induction? 619 non-null object
InductionInfo. Topic:Did you undertake a Corporate Induction? 432 non-null object
InductionInfo. Topic:Did you undertake a Institute Induction? 483 non-null object
InductionInfo. Topic: Did you undertake Team Induction? 440 non-null object
InductionInfo. Face to Face Topic:Did you undertake a Corporate Induction; Category:How it was conducted? 555 non-null object
InductionInfo. On-line Topic:Did you undertake a Corporate Induction; Category:How it was conducted? 555 non-null object
InductionInfo. Induction Manual Topic:Did you undertake a Corporate Induction? 555 non-null object
InductionInfo. Face to Face Topic:Did you undertake a Institute Induction? 530 non-null object
InductionInfo. On-line Topic:Did you undertake a Institute Induction? 555 non-null object
InductionInfo. Induction Manual Topic:Did you undertake a Institute Induction? 553 non-null object
InductionInfo. Face to Face Topic: Did you undertake Team Induction; Category? 555 non-null object
InductionInfo. On-line Topic: Did you undertake Team Induction?process you undertook and how it was conducted.] 555 non-null object
InductionInfo. Induction Manual Topic: Did you undertake Team Induction? 555 non-null object
Workplace. Topic:Did you and your Manager develop a Performance and Professional Development Plan (PPDP)? 608 non-null object
Workplace. Topic:Does your workplace promote a work culture free from all forms of unlawful discrimination? 594 non-null object
Workplace. Topic:Does your workplace promote and practice the principles of employment equity? 587 non-null object
Workplace. Topic:Does your workplace value the diversity of its employees? 586 non-null object
Workplace. Topic:Would you recommend the Institute as an employer to others? 581 non-null object
Gender. What is your Gender? 596 non-null object
CurrentAge. Current Age 596 non-null object
Employment Type. Employment Type 596 non-null object
Classification. Classification 596 non-null object
LengthofServiceOverall. Overall Length of Service at Institute (in years) 596 non-null object
LengthofServiceCurrent. Length of Service at current workplace (in years) 596 non-null object
dtypes: float64(2), object(70)
memory usage: 395.0+ KB
###Markdown
It's possible to make the following observations based on the work above: - The `dete_survey` dataframe contains `'Not Stated'` values that indicate values are missing, but they aren't represented as `NaN`. - Both the `dete_survey` and `tafe_survey` contain many columns that we don't need to complete our analysis. - Each dataframe contains many of the same columns, but the column names are different. - There are multiple columns/answers that indicate an employee resigned because they were dissatisfied. Identify Missing Values and Drop Unneccessary ColumnsFirst, I'll correct the `Not Stated` values and drop some of the columns I don't need for the analysis.
###Code
# Read in the data again, but this time read `Not Stated` values as `NaN`
dete_survey = pd.read_csv('dete_survey.csv', na_values='Not Stated')
# Quick exploration of the data
dete_survey.head()
# Remove columns we don't need for our analysis
dete_survey_updated = dete_survey.drop(dete_survey.columns[28:49], axis=1)
tafe_survey_updated = tafe_survey.drop(tafe_survey.columns[17:66], axis=1)
# Check that the columns were dropped
print(dete_survey_updated.columns)
print(tafe_survey_updated.columns)
###Output
Index(['ID', 'SeparationType', 'Cease Date', 'DETE Start Date',
'Role Start Date', 'Position', 'Classification', 'Region',
'Business Unit', 'Employment Status', 'Career move to public sector',
'Career move to private sector', 'Interpersonal conflicts',
'Job dissatisfaction', 'Dissatisfaction with the department',
'Physical work environment', 'Lack of recognition',
'Lack of job security', 'Work location', 'Employment conditions',
'Maternity/family', 'Relocation', 'Study/Travel', 'Ill Health',
'Traumatic incident', 'Work life balance', 'Workload',
'None of the above', 'Gender', 'Age', 'Aboriginal', 'Torres Strait',
'South Sea', 'Disability', 'NESB'],
dtype='object')
Index(['Record ID', 'Institute', 'WorkArea', 'CESSATION YEAR',
'Reason for ceasing employment',
'Contributing Factors. Career Move - Public Sector ',
'Contributing Factors. Career Move - Private Sector ',
'Contributing Factors. Career Move - Self-employment',
'Contributing Factors. Ill Health',
'Contributing Factors. Maternity/Family',
'Contributing Factors. Dissatisfaction',
'Contributing Factors. Job Dissatisfaction',
'Contributing Factors. Interpersonal Conflict',
'Contributing Factors. Study', 'Contributing Factors. Travel',
'Contributing Factors. Other', 'Contributing Factors. NONE',
'Gender. What is your Gender?', 'CurrentAge. Current Age',
'Employment Type. Employment Type', 'Classification. Classification',
'LengthofServiceOverall. Overall Length of Service at Institute (in years)',
'LengthofServiceCurrent. Length of Service at current workplace (in years)'],
dtype='object')
###Markdown
Rename ColumnsNext, we'll standardize the names of the columns we want to work with, because we eventually want to combine the dataframes.
###Code
# Clean the column names
dete_survey_updated.columns = dete_survey_updated.columns.str.lower().str.strip().str.replace(' ', '_')
# Check that the column names were updated correctly
dete_survey_updated.columns
# Update column names to match the names in dete_survey_updated
mapping = {'Record ID': 'id', 'CESSATION YEAR': 'cease_date', 'Reason for ceasing employment': 'separationtype', 'Gender. What is your Gender?': 'gender', 'CurrentAge. Current Age': 'age',
'Employment Type. Employment Type': 'employment_status',
'Classification. Classification': 'position',
'LengthofServiceOverall. Overall Length of Service at Institute (in years)': 'institute_service',
'LengthofServiceCurrent. Length of Service at current workplace (in years)': 'role_service'}
tafe_survey_updated = tafe_survey_updated.rename(mapping, axis = 1)
# Check that the specified column names were updated correctly
tafe_survey_updated.columns
###Output
_____no_output_____
###Markdown
Filter the DataFor this project, I'll only analyze survey respondents who *resigned*, so I'll only select separation types containing the string `'Resignation'`.
###Code
# Check the unique values for the separationtype column
tafe_survey_updated['separationtype'].value_counts()
# Check the unique values for the separationtype column
dete_survey_updated['separationtype'].value_counts()
# Update all separation types containing the word "resignation" to 'Resignation'
dete_survey_updated['separationtype'] = dete_survey_updated['separationtype'].str.split('-').str[0]
# Check the values in the separationtype column were updated correctly
dete_survey_updated['separationtype'].value_counts()
# Select only the resignation separation types from each dataframe
dete_resignations = dete_survey_updated[dete_survey_updated['separationtype'] == 'Resignation'].copy()
tafe_resignations = tafe_survey_updated[tafe_survey_updated['separationtype'] == 'Resignation'].copy()
###Output
_____no_output_____
###Markdown
Verify the Data Below, I clean and explore the `cease_date` and `dete_start_date` columns to make sure all of the years make sense. I'll use the following criteria: - Since the `cease_date` is the last year of the person's employment and the `dete_start_date` is the person's first year of employment, it wouldn't make sense to have years after the current date. - Given that most people in this field start working in their 20s, it's also unlikely that the `dete_start_date` was before the year 1940.
###Code
# Check the unique values
dete_resignations['cease_date'].value_counts()
# Extract the years and convert them to a float type
dete_resignations['cease_date'] = dete_resignations['cease_date'].str.split('/').str[-1]
dete_resignations['cease_date'] = dete_resignations['cease_date'].astype("float")
# Check the values again and look for outliers
dete_resignations['cease_date'].value_counts()
# Check the unique values and look for outliers
dete_resignations['dete_start_date'].value_counts().sort_values()
# Check the unique values
tafe_resignations['cease_date'].value_counts().sort_values()
###Output
_____no_output_____
###Markdown
Below are my findings:- The years in both dataframes don't completely align. The `tafe_survey_updated` dataframe contains some cease dates in 2009, but the `dete_survey_updated` dataframe does not. The `tafe_survey_updated` dataframe also contains many more cease dates in 2010 than the `dete_survey_updated` dataframe. Since I'm not concerned with analyzing the results by year, I'll leave them as is. Create a New ColumnSince my end goal is to answer the question below, I need a column containing the length of time an employee spent in their workplace, or years of service, in both dataframes. - End goal: Are employees who have only worked for the institutes for a short period of time resigning due to some kind of dissatisfaction? What about employees who have been at the job longer?The `tafe_resignations` dataframe already contains a "service" column, which I renamed to `institute_service`.Below, I calculate the years of service in the `dete_survey_updated` dataframe by subtracting the `dete_start_date` from the `cease_date` and create a new column named `institute_service`.
###Code
# Calculate the length of time an employee spent in their respective workplace and create a new column
dete_resignations['institute_service'] = dete_resignations['cease_date'] - dete_resignations['dete_start_date']
# Quick check of the result
dete_resignations['institute_service'].head()
###Output
_____no_output_____
###Markdown
Identify Dissatisfied EmployeesNext, I'll identify any employees who resigned because they were dissatisfied. Below are the columns I'll use to categorize employees as "dissatisfied" from each dataframe: 1. tafe_survey_updated: - `Contributing Factors. Dissatisfaction` - `Contributing Factors. Job Dissatisfaction` 2. dafe_survey_updated: - `job_dissatisfaction` - `dissatisfaction_with_the_department` - `physical_work_environment` - `lack_of_recognition` - `lack_of_job_security` - `work_location` - `employment_conditions` - `work_life_balance` - `workload` If the employee indicated any of the factors above caused them to resign, I'll mark them as `dissatisfied` in a new column. After changes, the new `dissatisfied` column will contain just the following values: - `True`: indicates a person resigned because they were dissatisfied in some way - `False`: indicates a person resigned because of a reason other than dissatisfaction with the job - `NaN`: indicates the value is missing
###Code
# Check the unique values
tafe_resignations['Contributing Factors. Dissatisfaction'].value_counts()
# Check the unique values
tafe_resignations['Contributing Factors. Job Dissatisfaction'].value_counts()
# Update the values in the contributing factors columns to be either True, False, or NaN
def update_vals(x):
if x == '-':
return False
elif pd.isnull(x):
return np.nan
else:
return True
tafe_resignations['dissatisfied'] = tafe_resignations[['Contributing Factors. Dissatisfaction', 'Contributing Factors. Job Dissatisfaction']].applymap(update_vals).any(1, skipna=False)
tafe_resignations_up = tafe_resignations.copy()
# Check the unique values after the updates
tafe_resignations_up['dissatisfied'].value_counts(dropna=False)
# Update the values in columns related to dissatisfaction to be either True, False, or NaN
dete_resignations['dissatisfied'] = dete_resignations[['job_dissatisfaction',
'dissatisfaction_with_the_department', 'physical_work_environment',
'lack_of_recognition', 'lack_of_job_security', 'work_location',
'employment_conditions', 'work_life_balance',
'workload']].any(1, skipna=False)
dete_resignations_up = dete_resignations.copy()
dete_resignations_up['dissatisfied'].value_counts(dropna=False)
###Output
_____no_output_____
###Markdown
Combining the DataBelow, I'll add an institute column so that I can differentiate the data from each survey after I combine them. Then, I'll combine the dataframes and drop any remaining columns I don't need.
###Code
# Add an institute column
dete_resignations_up['institute'] = 'DETE'
tafe_resignations_up['institute'] = 'TAFE'
# Combine the dataframes
combined = pd.concat([dete_resignations_up, tafe_resignations_up], ignore_index=True)
# Verify the number of non null values in each column
combined.notnull().sum().sort_values()
# Drop columns with less than 500 non null values
combined_updated = combined.dropna(thresh = 500, axis=1).copy()
###Output
_____no_output_____
###Markdown
Clean the Service Column Next, I'll clean the `institute_service` column and categorize employees according to the following definitions: - New: Less than 3 years in the workplace - Experienced: 3-6 years in the workplace - Established: 7-10 years in the workplace - Veteran: 11 or more years in the workplace The analysis is based on [this article](https://www.businesswire.com/news/home/20171108006002/en/Age-Number-Engage-Employees-Career-Stage), which makes the argument that understanding employee's needs according to career stage instead of age is more effective.
###Code
# Check the unique values
combined_updated['institute_service'].value_counts(dropna=False)
# Extract the years of service and convert the type to float
combined_updated['institute_service_up'] = combined_updated['institute_service'].astype('str').str.extract(r'(\d+)')
combined_updated['institute_service_up'] = combined_updated['institute_service_up'].astype('float')
# Check the years extracted are correct
combined_updated['institute_service_up'].value_counts()
# Convert years of service to categories
def transform_service(val):
if val >= 11:
return "Veteran"
elif 7 <= val < 11:
return "Established"
elif 3 <= val < 7:
return "Experienced"
elif pd.isnull(val):
return np.nan
else:
return "New"
combined_updated['service_cat'] = combined_updated['institute_service_up'].apply(transform_service)
# Quick check of the update
combined_updated['service_cat'].value_counts()
###Output
_____no_output_____
###Markdown
Perform Some Analysis Finally, I'll replace the missing values in the `dissatisfied` column with the most frequent value, `False`. Then, I'll calculate the percentage of employees who resigned due to dissatisfaction in each `service_cat` group and plot the results.
###Code
# Verify the unique values
combined_updated['dissatisfied'].value_counts(dropna=False)
# Replace missing values with the most frequent value, False
combined_updated['dissatisfied'] = combined_updated['dissatisfied'].fillna(False)
# Calculate the percentage of employees who resigned due to dissatisfaction in each category
dis_pct = combined_updated.pivot_table(index='service_cat', values='dissatisfied')
# Plot the results
%matplotlib inline
dis_pct.plot(kind='bar', rot=30)
###Output
_____no_output_____ |
fastai_scratch_with_tpu_mnist_4_experiment2.ipynb | ###Markdown
###Code
import os
assert os.environ['COLAB_TPU_ADDR'], 'Make sure to select TPU from Edit > Notebook settings > Hardware accelerator'
!curl https://course.fast.ai/setup/colab | bash
VERSION = "20200325" #@param ["1.5" , "20200325", "nightly"]
!curl https://raw.githubusercontent.com/pytorch/xla/master/contrib/scripts/env-setup.py -o pytorch-xla-env-setup.py
!python pytorch-xla-env-setup.py --version $VERSION
!pip freeze | grep torchvision
!pip install fastcore --upgrade
!pip install fastai2 --upgrade
pip install fastai --upgrade
from google.colab import drive
drive.mount('/content/drive')
%cd /content/drive/My\ Drive/course-v4/
!pwd
!pip install -r requirements.txt
%cd nbs
!pwd
###Output
/content/drive/My Drive/course-v4/nbs
###Markdown
Start of import libraries
###Code
from fastai2.vision.all import *
from utils import *
path = untar_data(URLs.MNIST_SAMPLE)
Path.BASE_PATH = path
path.ls()
###Output
_____no_output_____
###Markdown
Import torch xla libraries
###Code
import torch
import torch_xla
import torch_xla.core.xla_model as xm
###Output
_____no_output_____
###Markdown
define load data tensors in cpu
###Code
def load_tensors(dpath):
return torch.stack([tensor(Image.open(o))
for o in dpath.ls().sorted()]
).float()/255.
def count_images(dpath):
return len(dpath.ls())
train_x = torch.cat([load_tensors(path/'train'/'3'),
load_tensors(path/'train'/'7')]).view(-1,28*28)
valid_x = torch.cat([load_tensors(path/'valid'/'3'),
load_tensors(path/'valid'/'7')]).view(-1,28*28)
(train_x.device, valid_x.device)
(train_x.shape, valid_x.shape)
train_y = tensor([1]*count_images(path/'train'/'3') + [0]*count_images(path/'train'/'7')).unsqueeze(1)
valid_y = tensor([1]*count_images(path/'valid'/'3') + [0]*count_images(path/'valid'/'7')).unsqueeze(1)
(train_y.shape, valid_y.shape, train_y.device, valid_y.device)
train_dl = DataLoader(list(zip(train_x, train_y)),batch_size=256)
valid_dl = DataLoader(list(zip(valid_x, valid_y)), batch_size=256)
###Output
_____no_output_____
###Markdown
Get TPU Device
###Code
tpu_dev = xm.xla_device()
tpu_dev
###Output
_____no_output_____
###Markdown
Fix Model
###Code
torch.manual_seed(42)
np.random.seed(42)
###Output
_____no_output_____
###Markdown
Loss function
###Code
# define loss function using sigmoid to return a val between 0.0 and 1
def mnist_loss_sigmoid(qpreds, qtargs):
qqpreds = qpreds.sigmoid()
return torch.where(qtargs==1, 1.-qqpreds, qqpreds).mean()
###Output
_____no_output_____
###Markdown
Forward Pass + Back Propagation
###Code
# forward prop + back prop
def calc_grad(xb,yb,m):
qpreds = m(xb)
qloss = mnist_loss_sigmoid(qpreds,yb)
qloss.backward()
###Output
_____no_output_____
###Markdown
Basic Optimizer
###Code
class BasicOptimizer:
def __init__(self, params,lr): self.lr, self.params = lr,list(params)
def step(self, *args, **kwargs):
for p in self.params: p.data -= p.grad.data * self.lr
def zero_grad(self, *args, **kwargs):
for p in self.params: p.grad = None
###Output
_____no_output_____
###Markdown
Train Epoch
###Code
def train_epoch(qdl,qmodel,qopt, dev):
for xb,yb in qdl:
calc_grad(xb.to(dev),yb.to(dev),qmodel)
# qopt.step()
# replace optimizer step with xla device step computation
xm.optimizer_step(qopt, barrier=True)
qopt.zero_grad()
###Output
_____no_output_____
###Markdown
Compute Metrics
###Code
def batch_accuracy(qpreds, qtargets):
qqpreds = qpreds.sigmoid()
correct = (qqpreds > 0.5) == qtargets
return correct.float().mean()
def validate_epoch(qmodel, qdl, dev):
accs = [batch_accuracy(qmodel(xb.to(dev)), yb.to(dev)) for xb,yb in qdl]
return round(torch.stack(accs).mean().item(),4)
def train_model(qtrain_dl, qvalid_dl, qmodel, qopt, epochs, dev):
for i in range(epochs):
train_epoch(qtrain_dl, qmodel, qopt, dev)
print(validate_epoch(qmodel, qvalid_dl, dev), end=' ')
###Output
_____no_output_____
###Markdown
Build and Train Model
###Code
model = nn.Linear(28*28,1).to(tpu_dev)
optim = BasicOptimizer(model.parameters(),0.5)
# use basic Optimizer
train_model(train_dl, valid_dl, model, optim, 50, tpu_dev)
train_model(train_dl, valid_dl, model, SGD(model.parameters(),0.1), 50, tpu_dev)
simple_net = nn.Sequential(
nn.Linear(28*28,30),
nn.ReLU(),
nn.Linear(30,1)
).to(tpu_dev)
sgd_optim1 = SGD(simple_net.parameters(),0.1)
train_model(train_dl, valid_dl, simple_net, sgd_optim1, 50, tpu_dev)
resnet18_model = resnet18(pretrained=True).to(tpu_dev)
sgd_optim18 = SGD(resnet18_model.parameters(), 1e-2)
train_model(train_dl, valid_dl, resnet18_model, sgd_optim18, 1, tpu_dev)
###Output
_____no_output_____ |
demo/tables.ipynb | ###Markdown
Table widgets in the napari viewerBefore we talk about tables and widgets in napari, let's create a viewer, a simple test image and a labels layer:
###Code
import numpy as np
import napari
import pandas
from napari_skimage_regionprops import regionprops, add_table, get_table
viewer = napari.Viewer()
viewer.add_image(np.asarray([[1,2],[2,2]]))
viewer.add_labels(np.asarray([[1,2],[3,3]]))
###Output
_____no_output_____
###Markdown
Now, let's perform a measurement of `size` and `intensity` of the labeled objects in the given image. A table with results will be automatically added to the viewer
###Code
regionprops(
viewer.layers[0],
viewer.layers[1],
viewer,
size=True,
intensity=True
)
napari.utils.nbscreenshot(viewer)
###Output
_____no_output_____
###Markdown
We can also get the widget representing the table:
###Code
# The table is associated with a given labels layer:
labels = viewer.layers[1]
table = get_table(labels, viewer)
table
###Output
_____no_output_____
###Markdown
You can also read the content from the table as a dictionary. It is recommended to convert it into a pandas `DataFrame`:
###Code
content = pandas.DataFrame(table.get_content())
content
###Output
_____no_output_____
###Markdown
The content of this table can be changed programmatically. This also changes the `properties` of the associated layer.
###Code
new_values = {'A': [1, 2, 3],
'B': [4, 5, 6]
}
table.set_content(new_values)
napari.utils.nbscreenshot(viewer)
###Output
_____no_output_____
###Markdown
You can also append data to an existing table through the `append_content()` function: Suppose you have another measurement for the labels in your image, i.e. the "double area":
###Code
table.set_content(content.to_dict('list'))
double_area = {'label': content['label'].to_numpy(),
'Double area': content['area'].to_numpy() * 2.0}
###Output
_____no_output_____
###Markdown
You can now append this as a new column to the existing table:
###Code
table.append_content(double_area)
napari.utils.nbscreenshot(viewer)
###Output
_____no_output_____
###Markdown
*Note*: If the added data has columns in common withh the exisiting table (for instance, the labels columns), the tables will be merged on the commonly available columns. If no common columns exist, the data will simply be added to the table and the non-intersecting row/columns will be filled with NaN:
###Code
tripple_area = {'Tripple area': content['area'].to_numpy() * 3.0}
table.append_content(tripple_area)
napari.utils.nbscreenshot(viewer)
###Output
_____no_output_____
###Markdown
Note: Changing the label's `properties` does not invoke changes of the table...
###Code
new_values = {'C': [6, 7, 8],
'D': [9, 10, 11]
}
labels.properties = new_values
napari.utils.nbscreenshot(viewer)
###Output
_____no_output_____
###Markdown
But you can refresh the content:
###Code
table.update_content()
napari.utils.nbscreenshot(viewer)
###Output
_____no_output_____
###Markdown
You can remove the table from the viewer like this:
###Code
viewer.window.remove_dock_widget(table)
napari.utils.nbscreenshot(viewer)
###Output
_____no_output_____
###Markdown
Afterwards, the `get_table` method will return None:
###Code
get_table(labels, viewer)
###Output
_____no_output_____
###Markdown
To add the table again, just call `add_table` again. Note that the content of the properties of the labels have not been changed.
###Code
add_table(labels, viewer)
napari.utils.nbscreenshot(viewer)
###Output
_____no_output_____
###Markdown
Table widgets in the napari viewerBefore we talk about tables and widgets in napari, let's create a viewer, a simple test image and a labels layer:
###Code
import numpy as np
import napari
import pandas
from napari_skimage_regionprops import regionprops_table, add_table, get_table
viewer = napari.Viewer()
viewer.add_image(np.asarray([[1,2],[2,2]]))
viewer.add_labels(np.asarray([[1,2],[3,3]]))
###Output
_____no_output_____
###Markdown
Now, let's perform a measurement of `size` and `intensity` of the labeled objects in the given image. A table with results will be automatically added to the viewer
###Code
regionprops_table(
viewer.layers[0].data,
viewer.layers[1].data,
viewer,
size=True,
intensity=True
)
napari.utils.nbscreenshot(viewer)
###Output
_____no_output_____
###Markdown
We can also get the widget representing the table:
###Code
# The table is associated with a given labels layer:
labels = viewer.layers[1]
table = get_table(labels, viewer)
table
###Output
_____no_output_____
###Markdown
You can also read the content from the table as a dictionary. It is recommended to convert it into a pandas `DataFrame`:
###Code
content = pandas.DataFrame(table.get_content())
content
###Output
_____no_output_____
###Markdown
The content of this table can be changed programmatically. This also changes the `properties` of the associated layer.
###Code
new_values = {'A': [1, 2, 3],
'B': [4, 5, 6]
}
table.set_content(new_values)
napari.utils.nbscreenshot(viewer)
###Output
_____no_output_____
###Markdown
You can also append data to an existing table through the `append_content()` function: Suppose you have another measurement for the labels in your image, i.e. the "double area":
###Code
table.set_content(content.to_dict('list'))
double_area = {'label': content['label'].to_numpy(),
'Double area': content['area'].to_numpy() * 2.0}
###Output
_____no_output_____
###Markdown
You can now append this as a new column to the existing table:
###Code
table.append_content(double_area)
napari.utils.nbscreenshot(viewer)
###Output
_____no_output_____
###Markdown
*Note*: If the added data has columns in common withh the exisiting table (for instance, the labels columns), the tables will be merged on the commonly available columns. If no common columns exist, the data will simply be added to the table and the non-intersecting row/columns will be filled with NaN:
###Code
tripple_area = {'Tripple area': content['area'].to_numpy() * 3.0}
table.append_content(tripple_area)
napari.utils.nbscreenshot(viewer)
###Output
_____no_output_____
###Markdown
Note: Changing the label's `properties` does not invoke changes of the table...
###Code
new_values = {'C': [6, 7, 8],
'D': [9, 10, 11]
}
labels.properties = new_values
napari.utils.nbscreenshot(viewer)
###Output
_____no_output_____
###Markdown
But you can refresh the content:
###Code
table.update_content()
napari.utils.nbscreenshot(viewer)
###Output
_____no_output_____
###Markdown
You can remove the table from the viewer like this:
###Code
viewer.window.remove_dock_widget(table)
napari.utils.nbscreenshot(viewer)
###Output
_____no_output_____
###Markdown
Afterwards, the `get_table` method will return None:
###Code
get_table(labels, viewer)
###Output
_____no_output_____
###Markdown
To add the table again, just call `add_table` again. Note that the content of the properties of the labels have not been changed.
###Code
add_table(labels, viewer)
napari.utils.nbscreenshot(viewer)
###Output
_____no_output_____ |
Utility/svm_aa_rcv1.ipynb | ###Markdown
Gabriele Calarota
###Code
from google.colab import drive
drive.mount('/content/drive')
NUM_AUTHORS = 10
N_DOCS = 100
N_DOCS_TEST_SET = 0
NORMALIZE_WORDS_IN_DOCS = None
N_THRESHOLD = None
USE_BOW=False
USE_TFIDF=True
USE_W2V=False
PROJECT_NAME = "RCV1"
DATASET_FILENAME = 'Reuteurs/RCV1/rcv1_ccat_parsed_renamed.csv'
USE_TEXT_DISTORTION = False
K_text_distortion = 10000
!pip install -q tpot
import os
import re
from io import StringIO
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import tensorflow as tf
import xgboost as xgb
%matplotlib inline
from sklearn.svm import SVC
from keras.models import Sequential
from keras.layers.recurrent import LSTM, GRU
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.embeddings import Embedding
from keras.layers.normalization import BatchNormalization
from keras.utils import np_utils, to_categorical
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.callbacks import EarlyStopping
from sklearn import preprocessing, decomposition, model_selection, metrics
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import LabelEncoder
from sklearn.decomposition import TruncatedSVD
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB, BernoulliNB
from sklearn.decomposition import KernelPCA, PCA
from keras.layers import GlobalAvgPool1D, Conv1D, MaxPooling1D, Flatten, Bidirectional, SpatialDropout1D, AveragePooling1D
from keras.preprocessing import sequence, text
from keras.callbacks import EarlyStopping
from keras import backend as K
import nltk
from nltk import word_tokenize
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.corpus import wordnet as wn
from nltk.corpus import stopwords
from nltk.stem.snowball import SnowballStemmer
import tarfile
import zipfile
from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd
from sklearn.feature_selection import SelectFwe, f_classif
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline, make_union
from sklearn.svm import LinearSVC
from tpot.builtins import StackingEstimator
from tpot.export_utils import set_param_recursive
from sklearn.preprocessing import FunctionTransformer
from copy import copy
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, precision_recall_curve, classification_report
import nltk
nltk.download('stopwords')
nltk.download('punkt')
nltk.download('wordnet')
base_dir = '/content/drive/Shared drives/Tesi_AuthorshipAttribution_Calarota/Dataset/'
#base_dir = ""
def print_stats_dataset(dataframe):
num_text_total = len(dataframe)
df_group_by_author = dataframe.groupby('author')
df_count = df_group_by_author['articles'].count()
num_of_authors = df_group_by_author.size().reset_index(name='counts').count()
num_text_per_author_mean=df_group_by_author['articles'].count().mean()
text_length_per_author=df_group_by_author['articles'].apply(lambda x: np.mean(x.str.len())).reset_index(name='mean_len_text')
# dataframe['number_of_words'] = dataframe['articles'].apply(lambda x: len([word.lower() for sent in nltk.sent_tokenize(x) for word in nltk.word_tokenize(sent)]))
# print(f"Total words: {dataframe['number_of_words'].sum()}. Mean per article: {dataframe['number_of_words'].mean()}")
print(f"Numero di testi totale: {num_text_total}")
print(f"Numero di testi per autore in media: {num_text_per_author_mean}")
print(f"Numero di autori: {num_of_authors['author']}")
print(f"Lunghezza media testo per autore in media: {text_length_per_author['mean_len_text'].mean()}")
dataset = pd.read_csv(os.path.join(base_dir, DATASET_FILENAME))
dataset.head()
print_stats_dataset(dataset)
def get_top_ten_authors(dataframe, number_prune=10):
df_group_by_author = dataframe.groupby('author')
df_count = df_group_by_author['articles'].count()
num_of_authors = df_group_by_author.size().reset_index(name='counts')
sorted_authors = num_of_authors.sort_values(by='counts', ascending=False)
id_of_author = sorted_authors['author'].to_list()[:number_prune]
return id_of_author
list_of_top_ten_authors = get_top_ten_authors(dataset, number_prune=NUM_AUTHORS)
dataset = dataset[dataset.author.isin(list_of_top_ten_authors)]
print_stats_dataset(dataset)
def get_only_n_docs_for_authors(dataframe, n_docs=200, threshold_document_length=600):
if threshold_document_length is not None:
dataframe = dataframe[dataframe.articles.str.len() > threshold_document_length]
if n_docs is not None:
dataframe = dataframe.groupby('author').head(n_docs).reset_index(drop=True)
return dataframe
dataset = get_only_n_docs_for_authors(dataset, n_docs=N_DOCS+N_DOCS_TEST_SET, threshold_document_length=N_THRESHOLD)
print_stats_dataset(dataset)
dataset.head()
dataset['author'].value_counts().plot()
###Output
_____no_output_____
###Markdown
Train and test data are similarly distributed. An article can be attributed to an author based on the topic and content of the article or the author writing style or mix of both. In my basic approach, I will try to solve the problem by leveraging the frequency of words in the article, which represents the topic of an article. For this, I will construct a TF-IDF matrix. I am not going to rely on the default tokenizer provided by the scikit learn; I will create one for myself. The custom tokenizer involved three steps:* Tokenize the article into sentences and sentences into words* Filter the tokens with smaller lengths (assuming smaller words doesn't really say anything about the topic), whether a word is stop word or not, and whether the word is present in the dictionary or not* Stem the words I am also going to construct a raw counts matrix as some models like MultinomialNB often perform better on raw counts
###Code
def get_wk_bnc(k=2000):
bnc_df = pd.read_csv(os.path.join(base_dir, 'bnc_lemma_parsed.csv'))
select_df = bnc_df.head(k)
return list(select_df['word'].values)
if USE_TEXT_DISTORTION:
WK = get_wk_bnc(k=K_text_distortion)
print(len(WK))
def tokenize_and_stem(text):
"""
Below function tokenizes and lemmatizes the texts. It also does some cleaning by removing non dictionary words
This can be used to replace default tokenizer provided by feature extraction api of sklearn.
:param text: str
:return: list
"""
stemmer = SnowballStemmer("english")
stop_words = stopwords.words("english")
tokens = [word.lower() for sent in nltk.sent_tokenize(text) for word in nltk.word_tokenize(sent)]
filtered_tokens = []
for token in tokens:
if re.search(r'[a-zA-Z-]{4,}', token) and token not in stop_words and len(wn.synsets(token)) > 0:
token.strip()
filtered_tokens.append(token)
filtered_tokens = [stemmer.stem(token) for token in filtered_tokens]
return filtered_tokens
def simple_tokenizer(text):
text = re.sub('"([^"]*)"', '', text)
tokens = [word.lower() for sent in nltk.sent_tokenize(text) for word in nltk.word_tokenize(sent)]
filtered_tokens = []
for token in tokens:
if len(wn.synsets(token)) > 0:
token.strip()
filtered_tokens.append(token)
return filtered_tokens
def only_remove_quoting_tokenizer(text):
text = re.sub('"([^"]*)"', '', text)
tokens = [word.lower() for sent in nltk.sent_tokenize(text) for word in nltk.word_tokenize(sent)]
return tokens
def only_remove_quoting_tokenizer_with_threshold(text):
text = re.sub('"([^"]{1,})"', '', text)
tokens = [word.lower() for sent in nltk.sent_tokenize(text) for word in nltk.word_tokenize(sent)]
return tokens
def text_distortion_tokenizer_DV_MA(text):
text = re.sub('"([^"]*)"', '', text)
tokens = [word.lower().strip() for sent in nltk.sent_tokenize(text) for word in nltk.word_tokenize(sent)]
filtered_tokens = []
# implementing DV_MA
for token in tokens:
if token.lower() not in WK:
# replacing all digits in token with #
token = re.sub('\d', '#', token)
# replacing each letter in t with *
token = re.sub('[a-zA-Z]', '*', token)
filtered_tokens.append(token)
return filtered_tokens
def text_distortion_tokenizer_DV_SA(text):
text = re.sub('"([^"]*)"', '', text)
tokens = [word.lower().strip() for sent in nltk.sent_tokenize(text) for word in nltk.word_tokenize(sent)]
filtered_tokens = []
# implementing DV_MA
for token in tokens:
if token.lower() not in WK:
# replacing all digits in token with #
token = re.sub("\d+", "#", token)
# replacing each letter in t with *
token = re.sub('[a-zA-Z]+', '*', token)
filtered_tokens.append(token)
return filtered_tokens
# custom_tokenizer = tokenize_and_stem
# custom_tokenizer = text_distortion_tokenizer_DV_MA
# custom_tokenizer = text_distortion_tokenizer_DV_SA
# custom_tokenizer = simple_tokenizer
# custom_tokenizer = None
# custom_tokenizer = only_remove_quoting_tokenizer
custom_tokenizer = only_remove_quoting_tokenizer_with_threshold
tfidf_vec = TfidfVectorizer(max_df=0.75, max_features=None,
min_df=0.02, use_idf=False, tokenizer=custom_tokenizer, ngram_range=(1, 4))
counter_vect = CountVectorizer(max_df=0.8, max_features=10000,
min_df=0.02, tokenizer=custom_tokenizer, ngram_range=(1, 2))
# df_clean_test_set = dataset.groupby('author').head(N_DOCS_TEST_SET).reset_index(drop=True)
# df_clean_test_set.head()
# print_stats_dataset(df_clean_test_set)
# dataset = pd.concat([dataset,df_clean_test_set]).drop_duplicates(keep=False)
# print_stats_dataset(dataset)
if NORMALIZE_WORDS_IN_DOCS:
# dataset['articles'] = dataset['articles'].apply(lambda x: " ".join([word.lower() for sent in nltk.sent_tokenize(x) for word in nltk.word_tokenize(sent)][:NORMALIZE_WORDS_IN_DOCS]))
dataset['articles'] = dataset['articles'].apply(lambda x: " ".join(x.split(' ')[:NORMALIZE_WORDS_IN_DOCS]))
print_stats_dataset(dataset)
# Even split 50 & 50 per author and document
# df_train, df_test = train_test_split(dataset, test_size=0.2, random_state=0)
df_train = dataset.groupby('author').head(N_DOCS/2).reset_index(drop=True)
df_train.head()
print_stats_dataset(df_train)
# get difference between dataset and df_train for df_test
df_test = pd.concat([dataset,df_train]).drop_duplicates(keep=False)
df_test.head()
print_stats_dataset(df_test)
###Output
Numero di testi totale: 500
Numero di testi per autore in media: 50.0
Numero di autori: 10
Lunghezza media testo per autore in media: 3116.6679999999997
###Markdown
=> Extracting features
###Code
def tfidf_fit_transform():
# df_train, df_test = train_test_split(dataset, test_size=0.5, random_state=0)
tfidf_train = tfidf_vec.fit_transform(df_train['articles'])
tfidf_test = tfidf_vec.transform(df_test['articles'])
return tfidf_train, tfidf_test
def counter_fit_transform():
counter_train = counter_vect.fit_transform(df_train['articles'])
counter_test = counter_vect.transform(df_test['articles'])
return counter_train, counter_test
le = LabelEncoder()
df_train['target'] = le.fit_transform(df_train['author'])
# df_test['target'] = le.fit_transform(df_test['author'])
#df_test['author'] = df_test['author'].map(lambda s: '<unknown>' if s not in le.classes_ else s)
# le.classes_ = np.append(le.classes_, '<unknown>')
df_test['target'] = le.transform(df_test['author'])
###Output
_____no_output_____
###Markdown
While above models tried to use the classify the articles based on the words and their frequencies, I will try to build a sequence model that tries to capture the writing style of an author. However, I am dubious about the effectiveness of these models considering the limited amount of data.I will change the tokenizer by removing stem as I am going to replace words with Glove embeddings that provide relevant words vectors to all verb forms of a word. Below are the changes that I will make:Remove words in the quotes as they don't contribute to capture the writing style of the author.Not stemming the words to replace the words with corresponsing Glove vectorsNot removing stop words, as some authors whose articles aren't published online may not hesitate to use lot of stop words
###Code
def build_custom_w2v_model():
import numpy as np
import keras.backend as K
from keras.models import Sequential
from keras.layers import Dense, Embedding, Lambda
from keras.utils import np_utils
from keras.preprocessing import sequence
from keras.preprocessing.text import Tokenizer
import gensim
vectorize = Tokenizer()
vectorize.fit_on_texts(dataset['articles'])
data = vectorize.texts_to_sequences(dataset['articles'])
total_vocab = sum(len(s) for s in data)
word_count = len(vectorize.word_index) + 1
window_size = 2
print(f"total vocab: {total_vocab}")
print(f"word count: {word_count}")
def cbow_model(data, window_size, total_vocab):
total_length = window_size*2
for text in data:
text_len = len(text)
for idx, word in enumerate(text):
context_word = []
target = []
begin = idx - window_size
end = idx + window_size + 1
context_word.append([text[i] for i in range(begin, end) if 0 <= i < text_len and i != idx])
target.append(word)
contextual = sequence.pad_sequences(context_word, maxlen=total_length)
final_target = np_utils.to_categorical(target, total_vocab)
yield(contextual, final_target)
model = Sequential()
model.add(Embedding(input_dim=total_vocab, output_dim=100, input_length=window_size*2))
model.add(Lambda(lambda x: K.mean(x, axis=1), output_shape=(100,)))
model.add(Dense(total_vocab, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
for i in range(10):
cost = 0
for contextual, final_target in cbow_model(data, window_size, total_vocab):
cost += model.train_on_batch(contextual, final_target)
print(i, cost)
dimensions=100
vect_file = open(os.path.join(os.path.dirname(os.path.join(base_dir, DATASET_FILENAME)), 'vectors.txt') ,'w')
vect_file.write('{} {}\n'.format(total_vocab,dimensions))
weights = model.get_weights()[0]
for text, i in vectorize.word_index.items():
final_vec = ' '.join(map(str, list(weights[i, :])))
vect_file.write('{} {}\n'.format(text, final_vec))
vect_file.close()
if USE_W2V and not os.path.exists(os.path.join(os.path.dirname(os.path.join(base_dir, DATASET_FILENAME)), 'vectors.txt')):
build_custom_w2v_model()
from gensim.scripts.glove2word2vec import glove2word2vec
from gensim.models import KeyedVectors
class Word2VecVectorizer:
def __init__(self, model):
print("Loading in word vectors...")
self.word_vectors = model
print("Finished loading in word vectors")
def fit(self, data):
pass
def transform(self, data):
# determine the dimensionality of vectors
v = self.word_vectors.get_vector('king')
self.D = v.shape[0]
X = np.zeros((len(data), self.D))
n = 0
emptycount = 0
for sentence in data:
tokens = sentence.split()
vecs = []
m = 0
for word in tokens:
try:
# throws KeyError if word not found
vec = self.word_vectors.get_vector(word)
vecs.append(vec)
m += 1
except KeyError:
pass
if len(vecs) > 0:
vecs = np.array(vecs)
X[n] = vecs.mean(axis=0)
else:
emptycount += 1
n += 1
print("Numer of samples with no words found: %s / %s" % (emptycount, len(data)))
return X
def fit_transform(self, data):
self.fit(data)
return self.transform(data)
def w2v_fit_transform():
USE_GLOVE = False
if USE_GLOVE:
glove_path = os.path.join(base_dir, 'glove.6B.50d.txt')
word2vec_output_file = glove_path+'.word2vec'
if not os.path.exists(word2vec_output_file):
glove2word2vec(glove_path, word2vec_output_file)
else:
word2vec_output_file = os.path.join(os.path.dirname(
os.path.join(base_dir, DATASET_FILENAME)), 'vectors.txt')
model = KeyedVectors.load_word2vec_format(word2vec_output_file, binary=False)
# Set a word vectorizer
vectorizer = Word2VecVectorizer(model)
# Get the sentence embeddings for the train dataset
w2v_train = vectorizer.fit_transform(df_train['articles'])
# Get the sentence embeddings for the test dataset
w2v_test = vectorizer.transform(df_test['articles'])
print(w2v_train.shape,w2v_test.shape)
return w2v_train, w2v_test
def teapot_search():
from tpot import TPOTClassifier, TPOTRegressor
#tpot_settings = dict(verbosity=2, random_state = 1234, scoring = 'accuracy', warm_start = True, config_dict='TPOT sparse')
#auto_reg = TPOTRegressor(generations=2, population_size=5, **tpot_settings)
#auto_reg.fit(tfidf_train, df_train['target'])
#print(auto_reg.score(tfidf_test, df_test['target']))
#auto_reg.export('tpot_exported_pipeline.py')
#pipeline_optimizer = TPOTClassifier()
pipeline_optimizer = TPOTClassifier(generations=5, population_size=20, cv=5,
random_state=42, verbosity=2, scoring='accuracy', config_dict='TPOT sparse')
pipeline_optimizer.fit(tfidf_train, df_train['target'])
print(pipeline_optimizer.score(tfidf_test, df_test['target']))
pipeline_optimizer.export('tpot_exported_pipeline.py')
def use_features(tfidf=False, bow=False, w2v=False):
if tfidf:
return tfidf_fit_transform()
elif bow:
return counter_fit_transform()
elif w2v:
return w2v_fit_transform()
else:
return tfidf_train, tfidf_test
training_features, testing_features = use_features(tfidf=USE_TFIDF, bow=USE_BOW, w2v=USE_W2V)
training_target = df_train['target']
testing_target = df_test['target']
def double_pipeline():
# Average CV score on the training set was: 0.9173333333333333
exported_pipeline = make_pipeline(
make_union(
FunctionTransformer(copy),
SelectFwe(score_func=f_classif, alpha=0.004)
),
LinearSVC(C=10.0, dual=True, loss="squared_hinge", penalty="l2", tol=0.000001, max_iter=10)
)
# Fix random state for all the steps in exported pipeline
set_param_recursive(exported_pipeline.steps, 'random_state', 42)
return exported_pipeline
def single_pipeline():
# Average CV score on the training set was: 0.6912
exported_pipeline = LinearSVC(C=20.0, dual=True, loss="hinge", penalty="l2", tol=0.0001)
# Fix random state in exported estimator
if hasattr(exported_pipeline, 'random_state'):
setattr(exported_pipeline, 'random_state', 42)
return exported_pipeline
def onevsrestclassifier():
from sklearn.multiclass import OneVsRestClassifier
return OneVsRestClassifier(LinearSVC(C=10.0, dual=True, loss="squared_hinge", penalty="l2", tol=0.000001, multi_class='ovr', random_state=42, max_iter=10))
def linearsdg():
from sklearn.linear_model import SGDClassifier
return SGDClassifier(alpha=0.00001, penalty='elasticnet', max_iter=50, random_state=42)
exported_pipeline = double_pipeline()
exported_pipeline.fit(training_features, training_target)
predicted = exported_pipeline.predict(testing_features)
accuracy_result = accuracy_score(testing_target, predicted)
precision_result = precision_score(testing_target, predicted, average='macro')
recall_result = recall_score(testing_target, predicted, average='macro')
f1_result = f1_score(testing_target, predicted, average='macro')
print(f"Accuracy: {accuracy_result}\nPrecision: {precision_result}\nRecall: {recall_result}\nF1_macro: {f1_result}")
print(classification_report(testing_target, predicted))
###Output
precision recall f1-score support
0 0.83 0.88 0.85 50
1 0.91 1.00 0.95 50
2 0.85 0.92 0.88 50
3 0.87 0.82 0.85 50
4 0.98 1.00 0.99 50
5 1.00 0.94 0.97 50
6 1.00 0.92 0.96 50
7 0.91 1.00 0.95 50
8 0.96 1.00 0.98 50
9 0.93 0.74 0.82 50
accuracy 0.92 500
macro avg 0.92 0.92 0.92 500
weighted avg 0.92 0.92 0.92 500
###Markdown
REPEATED CROSS VALIDATION
###Code
# evaluate a logistic regression model using repeated k-fold cross-validation
from numpy import mean
from numpy import std
from sklearn.model_selection import RepeatedKFold
from sklearn.model_selection import cross_val_score
tfidf_dataset = tfidf_vec.fit_transform(dataset['articles'])
labels_dataset = le.fit_transform(dataset['author'])
X = tfidf_dataset
y = labels_dataset
from sklearn.model_selection import StratifiedKFold, KFold, StratifiedShuffleSplit
import numpy as np
skf = StratifiedKFold(n_splits=5)
for train, test in skf.split(X, y):
print('train - {} | test - {}'.format(
np.bincount(y[train]), np.bincount(y[test])))
# evaluate model
scores = cross_val_score(exported_pipeline, X, y, scoring='accuracy', cv=skf, n_jobs=-1)
# report performance
print('Accuracy: %.3f (%.3f)' % (mean(scores), std(scores)))
from sklearn.model_selection import StratifiedKFold, KFold, StratifiedShuffleSplit
import numpy as np
skf = StratifiedShuffleSplit(n_splits=10, test_size=0.5, random_state=42)
for train, test in skf.split(X, y):
print('train - {} | test - {}'.format(
np.bincount(y[train]), np.bincount(y[test])))
# evaluate model
scores = cross_val_score(exported_pipeline, X, y, scoring='accuracy', cv=skf, n_jobs=-1)
# report performance
print('Accuracy: %.3f (%.3f)' % (mean(scores), std(scores)))
# Set the parameters by cross-validation
def cross_validate():
tuned_parameters = [{'loss': ['hinge', 'squared_hinge'],
'C': [1, 10, 20, 100, 1000],
'dual': [True,False],
'tol': [0.001, 0.0001, 0.00001, 0.000001],
'max_iter': [10, 50, 100, 1000, 10000]
}]
clf = GridSearchCV(
LinearSVC(), tuned_parameters, scoring='accuracy', cv=skf, verbose=2
)
clf.fit(training_features, df_train['target'])
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
means = clf.cv_results_['mean_test_score']
stds = clf.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, clf.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r"
% (mean, std * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = df_test['target'], clf.predict(testing_features)
print(classification_report(y_true, y_pred))
print()
!pip install -q python-telegram-bot
from telegram import Bot
bot = Bot(token="627493222:AAE8dqAHnrx9JJ3AGxDwb-x2eiJqoXVBM8o")
bot.send_message(text="Training finito di SVM {} on {} authors with {} ndocs and {} threshold".format(PROJECT_NAME, NUM_AUTHORS, N_DOCS, N_THRESHOLD), chat_id="141928344")
###Output
[K |████████████████████████████████| 430kB 5.0MB/s
[K |████████████████████████████████| 61kB 5.8MB/s
[K |████████████████████████████████| 2.6MB 9.4MB/s
[?25h |
examples/rinna-gpt2-train/utils/model_predict.ipynb | ###Markdown
モデルテストローカル環境でモデルの推論を行います。Run の outputs フォルダのモデルファイルをダウンロード & ロードして利用します。
###Code
from azureml.core import Workspace
ws = Workspace.from_config()
# パラメータ
RUN_ID = ""
run_test = ws.get_run(RUN_ID)
run_test.run.download_files(prefix='outputs/models/', output_directory='./')
from transformers import T5Tokenizer, AutoModelForCausalLM
tokenizer = T5Tokenizer.from_pretrained("outputs/models/", do_lower_case=True)
model = AutoModelForCausalLM.from_pretrained("outputs/models/")
input = tokenizer.encode("こんにちは、", return_tensors="pt")
output = model.generate(input, do_sample=True, max_length=100, num_return_sequences=10)
print(tokenizer.batch_decode(output))
###Output
_____no_output_____ |
Biblioteca spaCy.ipynb | ###Markdown
Biblioteca spaCy * Biblioteca Python para processamento de textos para uso industrial* mais rápida que NLTK* suporte para mais de 61 linguagens
###Code
pip install -U spacy #pacote base
###Output
Collecting spacy
Downloading spacy-3.1.3-cp38-cp38-win_amd64.whl (12.0 MB)
Requirement already satisfied: tqdm<5.0.0,>=4.38.0 in c:\programdata\anaconda3\lib\site-packages (from spacy) (4.59.0)
Requirement already satisfied: setuptools in c:\programdata\anaconda3\lib\site-packages (from spacy) (52.0.0.post20210125)
Requirement already satisfied: packaging>=20.0 in c:\programdata\anaconda3\lib\site-packages (from spacy) (20.9)
Collecting srsly<3.0.0,>=2.4.1
Downloading srsly-2.4.1-cp38-cp38-win_amd64.whl (451 kB)
Collecting pydantic!=1.8,!=1.8.1,<1.9.0,>=1.7.4
Using cached pydantic-1.8.2-cp38-cp38-win_amd64.whl (2.0 MB)
Collecting wasabi<1.1.0,>=0.8.1
Using cached wasabi-0.8.2-py3-none-any.whl (23 kB)
Collecting spacy-legacy<3.1.0,>=3.0.8
Downloading spacy_legacy-3.0.8-py2.py3-none-any.whl (14 kB)
Collecting blis<0.8.0,>=0.4.0
Using cached blis-0.7.4-cp38-cp38-win_amd64.whl (6.5 MB)
Requirement already satisfied: requests<3.0.0,>=2.13.0 in c:\programdata\anaconda3\lib\site-packages (from spacy) (2.25.1)
Collecting typer<0.5.0,>=0.3.0
Downloading typer-0.4.0-py3-none-any.whl (27 kB)
Requirement already satisfied: jinja2 in c:\programdata\anaconda3\lib\site-packages (from spacy) (2.11.3)
Requirement already satisfied: numpy>=1.15.0 in c:\programdata\anaconda3\lib\site-packages (from spacy) (1.20.1)
Collecting catalogue<2.1.0,>=2.0.6
Downloading catalogue-2.0.6-py3-none-any.whl (17 kB)
Collecting thinc<8.1.0,>=8.0.9
Downloading thinc-8.0.10-cp38-cp38-win_amd64.whl (1.0 MB)
Collecting murmurhash<1.1.0,>=0.28.0
Using cached murmurhash-1.0.5-cp38-cp38-win_amd64.whl (21 kB)
Collecting preshed<3.1.0,>=3.0.2
Using cached preshed-3.0.5-cp38-cp38-win_amd64.whl (112 kB)
Collecting cymem<2.1.0,>=2.0.2
Using cached cymem-2.0.5-cp38-cp38-win_amd64.whl (36 kB)
Collecting pathy>=0.3.5
Downloading pathy-0.6.0-py3-none-any.whl (42 kB)
Requirement already satisfied: pyparsing>=2.0.2 in c:\programdata\anaconda3\lib\site-packages (from packaging>=20.0->spacy) (2.4.7)
Collecting smart-open<6.0.0,>=5.0.0
Downloading smart_open-5.2.1-py3-none-any.whl (58 kB)
Requirement already satisfied: typing-extensions>=3.7.4.3 in c:\programdata\anaconda3\lib\site-packages (from pydantic!=1.8,!=1.8.1,<1.9.0,>=1.7.4->spacy) (3.7.4.3)
Requirement already satisfied: urllib3<1.27,>=1.21.1 in c:\programdata\anaconda3\lib\site-packages (from requests<3.0.0,>=2.13.0->spacy) (1.26.4)
Requirement already satisfied: idna<3,>=2.5 in c:\programdata\anaconda3\lib\site-packages (from requests<3.0.0,>=2.13.0->spacy) (2.10)
Requirement already satisfied: chardet<5,>=3.0.2 in c:\programdata\anaconda3\lib\site-packages (from requests<3.0.0,>=2.13.0->spacy) (4.0.0)
Requirement already satisfied: certifi>=2017.4.17 in c:\programdata\anaconda3\lib\site-packages (from requests<3.0.0,>=2.13.0->spacy) (2020.12.5)
Requirement already satisfied: click<9.0.0,>=7.1.1 in c:\programdata\anaconda3\lib\site-packages (from typer<0.5.0,>=0.3.0->spacy) (7.1.2)
Requirement already satisfied: MarkupSafe>=0.23 in c:\programdata\anaconda3\lib\site-packages (from jinja2->spacy) (1.1.1)
Installing collected packages: murmurhash, cymem, catalogue, wasabi, typer, srsly, smart-open, pydantic, preshed, blis, thinc, spacy-legacy, pathy, spacy
Successfully installed blis-0.7.4 catalogue-2.0.6 cymem-2.0.5 murmurhash-1.0.5 pathy-0.6.0 preshed-3.0.5 pydantic-1.8.2 smart-open-5.2.1 spacy-3.1.3 spacy-legacy-3.0.8 srsly-2.4.1 thinc-8.0.10 typer-0.4.0 wasabi-0.8.2
Note: you may need to restart the kernel to use updated packages.
###Markdown
* pacote adicional para lematização
###Code
pip install -U spacy-lookups-data
###Output
Collecting spacy-lookups-data
Downloading spacy_lookups_data-1.0.3-py2.py3-none-any.whl (98.5 MB)
Requirement already satisfied: setuptools in c:\programdata\anaconda3\lib\site-packages (from spacy-lookups-data) (52.0.0.post20210125)
Installing collected packages: spacy-lookups-data
Successfully installed spacy-lookups-data-1.0.3
Note: you may need to restart the kernel to use updated packages.
###Markdown
* modelos de linguagem para o português:1. python –m spacy download pt_core_news_sm2. python –m spacy download pt_core_news_md3. python –m spacy download pt_core_news_lg* quanto mais dados mais lento fica, muda a acurácia
###Code
!python -m spacy download pt_core_news_lg
import spacy
texto = "Lorem 2021 Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industrys standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. It was popularised in the 1960s with the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing software like Aldus PageMaker including versions of Lorem Ipsum."
nlp = spacy.load("pt_core_news_lg") #carrega o modelo escolhido
doc = nlp(texto)
###Output
_____no_output_____
###Markdown
spaCy - Tokenização
###Code
tokens = [token for token in doc] #percorre token a token no texto passado
tokens
tokens = [token.orth_ for token in doc] #agora mostra como strings
tokens
###Output
_____no_output_____
###Markdown
* para cada string dentro de doc só add na lista se token for uma letra
###Code
alpha_tokens = [token.orth_ for token in doc if token.is_alpha]
print("Alpha tokens: %s" % (alpha_tokens))
digit_tokens = [token.orth_ for token in doc if token.is_digit] #se digito
print("Digit tokens: %s" % (digit_tokens))
punct_tokens = [token.orth_ for token in doc if token.is_punct] #se pontuação
print("Punct tokens: %s" % (punct_tokens))
corpus = open("corpus_teste.txt").read()
print(corpus)
import spacy
nlp = spacy.load("pt_core_news_lg") #manipula o texto para tokens de string melhor para manipular
doc = nlp(corpus)
tokens = [token.orth_ for token in doc]
tokens
alpha_tokens = [token.orth_ for token in doc if token.is_alpha]
print("Alpha tokens: %s" % (alpha_tokens))
digit_tokens = [token.orth_ for token in doc if token.is_digit]
print("Digit tokens: %s" % (digit_tokens))
punct_tokens = [token.orth_ for token in doc if token.is_punct]
print("Punct tokens: %s" % (punct_tokens))
###Output
Alpha tokens: ['Giants', 'batem', 'os', 'Patriots', 'no', 'Super', 'Bowl', 'XLII', 'Azarões', 'acabam', 'com', 'a', 'invencibilidade', 'de', 'New', 'England', 'e', 'ficam', 'com', 'o', 'da', 'temporada', 'm', 'Atualizado', 'em', 'm', 'Com', 'um', 'passe', 'de', 'Eli', 'Manning', 'para', 'Plaxico', 'Burress', 'a', 'segundos', 'do', 'fim', 'o', 'New', 'York', 'Giants', 'anotou', 'o', 'touchdown', 'decisivo', 'e', 'derrubou', 'o', 'favorito', 'New', 'England', 'Patriots', 'por', 'a', 'neste', 'domingo', 'em', 'Glendale', 'no', 'Super', 'Bowl', 'XLII', 'O', 'resultado', 'uma', 'das', 'maiores', 'zebras', 'da', 'do', 'Super', 'Bowl', 'acabou', 'com', 'a', 'temporada', 'perfeita', 'de', 'Tom', 'Brady', 'e', 'companhia', 'que', 'esperavam', 'fazer', 'ao', 'levantar', 'o', 'trofÃ', 'u', 'da', 'NFL', 'sem', 'sofrer', 'uma', 'derrota', 'no', 'ano', 'A', 'dos', 'Giants', 'porÃ', 'm', 'tambÃ', 'm', 'ficarÃ', 'para', 'a', 'Pela', 'primeira', 'vez', 'quarterbacks', 'triunfam', 'no', 'Super', 'Bowl', 'em', 'temporadas', 'consecutivas', 'No', 'ano', 'passado', 'Peyton', 'Manning', 'de', 'Eli', 'chegou', 'ao', 'da', 'NFL', 'pelo', 'Indianapolis', 'Colts', 'A', 'partida', 'Os', 'Giants', 'com', 'a', 'posse', 'de', 'bola', 'e', 'mostraram', 'logo', 'que', 'iriam', 'alongar', 'ao', 'suas', 'posses', 'de', 'bola', 'Misturando', 'corridas', 'com', 'Brandon', 'Jacobs', 'e', 'passes', 'curtos', 'o', 'time', 'de', 'Nova', 'York', 'chegou', 'Ã', 'red', 'zone', 'logo', 'na', 'primeira', 'campanha', 'O', 'no', 'entanto', 'parou', 'na', 'linha', 'de', 'jardas', 'e', 'Lawrence', 'Tynes', 'converteu', 'o', 'field', 'goal', 'de', 'jardas', 'para', 'abrir', 'o', 'placar', 'Eli', 'Manning', 'e', 'companhia', 'ficaram', 'com', 'a', 'bola', 'mas', 'o', 'ataque', 'dos', 'Patriots', 'entrou', 'em', 'campo', 'frio', 'Logo', 'no', 'retorno', 'do', 'kickoff', 'o', 'running', 'back', 'Laurence', 'Maroney', 'jardas', 'deixando', 'Tom', 'Brady', 'em', 'boa', 'Com', 'passes', 'curtos', 'os', 'Patriots', 'chegaram', 'Ã', 'linha', 'de', 'jardas', 'e', 'a', 'uma', 'penalidade', 'interferência', 'de', 'passe', 'do', 'linebacker', 'Antonio', 'Pierce', 'a', 'linha', 'de', 'uma', 'jarda', 'Maroney', 'pelo', 'e', 'anotou', 'o', 'primeiro', 'touchdown', 'do', 'jogo', 'Os', 'Giants', 'pareciam', 'rumo', 'Ã', 'virada', 'na', 'campanha', 'seguinte', 'Manning', 'achou', 'Amani', 'Toomer', 'para', 'um', 'de', 'jardas', 'e', 'o', 'time', 'de', 'Nova', 'York', 'entrou', 'novamente', 'na', 'red', 'zone', 'Com', 'a', 'bola', 'na', 'linha', 'de', 'jardas', 'dos', 'Patriots', 'os', 'Giants', 'sofreram', 'um', 'revÃ', 'Manning', 'passou', 'para', 'Steve', 'Smith', 'que', 'soltou', 'a', 'bola', 'Ellis', 'Hobbs', 'aproveitou', 'tomou', 'a', 'posse', 'para', 'os', 'Patriots', 'e', 'jardas', 'A', 'defesa', 'de', 'Nova', 'York', 'manteve', 'o', 'jogo', 'equilibrado', 'Com', 'dois', 'sacks', 'seguidos', 'os', 'Giants', 'o', 'punt', 'e', 'recuperaram', 'a', 'bola', 'Mas', 'a', 'campanha', 'seguinte', 'provou', 'ser', 'outra', 'para', 'Nova', 'York', 'O', 'time', 'chegou', 'Ã', 'linha', 'de', 'jardas', 'mas', 'Manning', 'sofreu', 'um', 'sack', 'e', 'cometeu', 'um', 'fumble', 'e', 'o', 'ataque', 'voltou', 'para', 'a', 'linha', 'de', 'jardas', 'conseguindo', 'pontuar', 'mais', 'uma', 'vez', 'Os', 'Patriots', 'tiveram', 'uma', 'última', 'chance', 'de', 'marcar', 'antes', 'do', 'intervalo', 'mas', 'a', 'segundos', 'do', 'fim', 'do', 'segundo', 'Brady', 'foi', 'novamente', 'sacado', 'Desta', 'vez', 'ele', 'cometeu', 'o', 'fumble', 'e', 'os', 'Giants', 'tomaram', 'a', 'posse', 'de', 'bola', 'Manning', 'tentou', 'um', 'passe', 'longo', 'de', 'jardas', 'nos', 'últimos', 'segundos', 'mas', 'teve', 'sucesso', 'O', 'jogo', 'continuou', 'amarrado', 'no', 'terceiro', 'quarto', 'com', 'as', 'defesas', 'levando', 'a', 'melhor', 'sobre', 'os', 'ataques', 'A', 'única', 'chance', 'de', 'pontuar', 'do', 'foi', 'dos', 'Patriots', 'que', 'chegaram', 'Ã', 'linha', 'de', 'jardas', 'dos', 'Giants', 'O', 'tÃ', 'cnico', 'Bill', 'Bellichick', 'porÃ', 'm', 'optou', 'por', 'uma', 'quarta', 'descida', 'em', 'vez', 'de', 'um', 'field', 'goal', 'Brady', 'tentou', 'um', 'passe', 'para', 'Jabar', 'Gaffney', 'mas', 'conseguiu', 'completar', 'O', 'último', 'arrasador', 'para', 'os', 'Giants', 'na', 'primeira', 'jogada', 'Manning', 'achou', 'o', 'tight', 'end', 'Kevin', 'Boss', 'para', 'um', 'de', 'jardas', 'que', 'deixou', 'o', 'time', 'na', 'linha', 'de', 'dos', 'Patriots', 'Outro', 'desta', 'vez', 'para', 'Steve', 'Smith', 'marcou', 'o', 'atÃ', 'a', 'linha', 'de', 'jardas', 'Duas', 'jogadas', 'depois', 'David', 'Tyree', 'pegou', 'um', 'passe', 'de', 'cinco', 'jardas', 'na', 'end', 'zone', 'para', 'anotar', 'o', 'touchdown', 'e', 'virar', 'o', 'jogo', 'Na', 'hora', 'da', 'o', 'ataque', 'dos', 'Patriots', 'voltou', 'a', 'funcionar', 'Com', 'uma', 'sÃ', 'rie', 'de', 'passes', 'curtos', 'e', 'variados', 'Brady', 'achou', 'Wes', 'Welker', 'Randy', 'Moss', 'e', 'Kevin', 'Faulk', 'seguidas', 'vezes', 'atÃ', 'chegar', 'Ã', 'red', 'zone', 'A', 'do', 'fim', 'o', 'quarterback', 'conectou', 'mais', 'uma', 'vez', 'com', 'Moss', 'que', 'se', 'desmarcou', 'e', 'ficou', 'livre', 'na', 'lateral', 'direita', 'da', 'end', 'zone', 'Quando', 'os', 'de', 'New', 'England', 'jÃ', 'comemoravam', 'a', 'o', 'inesperado', 'aconteceu', 'Em', 'uma', 'jogada', 'Eli', 'Manning', 'se', 'soltou', 'de', 'dois', 'marcadores', 'que', 'o', 'seguravam', 'pela', 'camisa', 'e', 'na', 'corrida', 'para', 'Amani', 'Toomer', 'O', 'wide', 'receiver', 'bem', 'marcado', 'saltou', 'e', 'conseguiu', 'a', 'fazer', 'para', 'um', 'de', 'jardas', 'deixando', 'os', 'Giants', 'na', 'linha', 'de', 'de', 'New', 'England', 'Quatro', 'jogadas', 'depois', 'a', 'segundos', 'do', 'fim', 'Manning', 'achou', 'Plaxico', 'Burress', 'na', 'end', 'zone', 'para', 'conseguir', 'o', 'touchdown', 'do']
Digit tokens: ['39', '17', '14', '17', '32', '43', '17', '38', '14', '23', '25', '39', '22', '50', '31', '45', '35', '12', '32', '24', '39']
Punct tokens: ['-', '-', '-', ',', ',', ',', '.', ',', ',', ',', '.', ',', ',', '¡', '.', ',', '.', ',', ',', ',', '.', ',', '.', ',', '.', ',', ',', '.', ',', '.', ',', ',', '.', ',', ',', '(', ')', ',', '.', '.', '.', ',', '.', ',', ',', '.', ',', ',', '.', '.', ',', '.', '.', ',', ',', ',', '.', ',', ',', ',', '.', ',', '.', ',', ',', ',', '.', ',', '.', ',', '.', ',', ',', '.', ',', '.', '.', ',', ',', ',', '.', ',', ',', '.', ',', '.', ',', '.', ',', ',', '.', ',', ',', '.', '¡', ',', '.', ',', ',', ',', '.', ',', ',', ',', '.', ',', ',', '.']
###Markdown
spaCy - Stemming e Lematização * não tem stemming padrão* lematização não é 100%
###Code
lemmas = [token.lemma_ for token in doc if token.pos_ == 'VERB'] #devolve somente os verbos
lemmas
###Output
_____no_output_____
###Markdown
spaCy - Etiquetador* [('palavra', 'classe palavra')]
###Code
pos = [(token.orth_,token.pos_) for token in doc]
pos
###Output
_____no_output_____
###Markdown
Análise morfologica
###Code
morfologica = [(token.orth_,token.morph) for token in doc]
morfologica
###Output
_____no_output_____
###Markdown
Entidades nomeadas
###Code
entidades_nomeadas = list(doc.ents) # nome
print(entidades_nomeadas)
detalhes_entidades = [(entidade, entidade.label_) for entidade in doc.ents] # nome, tipo organização
detalhes_entidades
###Output
[Giants, Patriots, Super Bowl XLII, Azarões, New England, Eli Manning, Plaxico Burress, New York Giants, New England Patriots, Glendale, Super Bowl XLII, Super Bowl, Tom Brady, ©u, NFL, Giants, ¡, Super Bowl, Peyton Manning, irmão de Eli, NFL, Indianapolis Colts, Giants, Brandon Jacobs, Nova York, à red zone, Lawrence Tynes, Eli Manning, Patriots, Laurence Maroney, Tom Brady, Patriots, à , Antonio Pierce, Giants, à , Manning, Amani Toomer, Nova York, red zone, Patriots, Giants, s. Manning, Steve Smith, Ellis Hobbs, Patriots, Nova York, Giants, Nova York, à , Manning, Patriots, Brady, Giants, Manning, últimos, única, Patriots, à , Giants, Bill Bellichick, Brady, Jabar Gaffney, Giants, Manning, Kevin Boss, Patriots, Steve Smith, David Tyree, end zone, Patriots, Brady, Wes Welker, Randy Moss, Kevin Faulk, à red zone, Moss, end zone, New England, ¡, Eli Manning, Amani Toomer, Giants, New England, Manning, Plaxico Burress, end zone]
###Markdown
displayCy* visualização de forma gráfica
###Code
html = spacy.displacy.render(doc, style ="ent")
output_path = open('entidades_nomeadas.html','w',encoding="utf-8")
output_path.write(html)
output_path.close()
###Output
_____no_output_____
###Markdown
Análise sintática* relação entre os tokens
###Code
sintaxe = [(token.orth_,token.dep_) for token in doc]
print(sintaxe)
###Output
[('Giants', 'nsubj'), ('batem', 'ROOT'), ('os', 'det'), ('Patriots', 'obj'), ('no', 'case'), ('Super', 'obl'), ('Bowl', 'flat:name'), ('XLII', 'flat:name'), ('\n', 'dep'), ('Azarões', 'flat:name'), ('acabam', 'conj'), ('com', 'case'), ('a', 'det'), ('invencibilidade', 'obl'), ('de', 'case'), ('New', 'nmod'), ('England', 'flat:name'), ('e', 'cc'), ('ficam', 'conj'), ('com', 'case'), ('o', 'det'), ('tÃ\xadtulo', 'obl'), ('da', 'case'), ('temporada', 'nmod'), ('\n', 'case'), ('04/02/2008', 'obl'), ('-', 'punct'), ('01h07', 'nmod'), ('m', 'obj'), ('-', 'punct'), ('Atualizado', 'acl'), ('em', 'case'), ('04/02/2008', 'obl'), ('-', 'punct'), ('09h49', 'nsubj'), ('m', 'punct'), ('\n\n', 'parataxis'), ('Com', 'case'), ('um', 'det'), ('passe', 'nmod'), ('de', 'case'), ('Eli', 'nmod'), ('Manning', 'flat:name'), ('para', 'case'), ('Plaxico', 'nmod'), ('Burress', 'flat:name'), ('a', 'case'), ('39', 'nummod'), ('segundos', 'nmod'), ('do', 'case'), ('fim', 'nmod'), (',', 'punct'), ('o', 'det'), ('New', 'nsubj'), ('York', 'flat:name'), ('Giants', 'flat:name'), ('anotou', 'ROOT'), ('o', 'det'), ('touchdown', 'obj'), ('decisivo', 'amod'), ('e', 'cc'), ('derrubou', 'conj'), ('o', 'det'), ('favorito', 'obj'), ('New', 'appos'), ('England', 'flat:name'), ('Patriots', 'flat:name'), ('por', 'case'), ('17', 'obl'), ('a', 'case'), ('14', 'nmod'), ('neste', 'case'), ('domingo', 'nmod'), (',', 'punct'), ('em', 'case'), ('Glendale', 'obl'), (',', 'punct'), ('no', 'case'), ('Super', 'obl'), ('Bowl', 'flat:name'), ('XLII', 'flat:name'), ('.', 'punct'), ('O', 'det'), ('resultado', 'nsubj'), (',', 'punct'), ('uma', 'appos'), ('das', 'case'), ('maiores', 'amod'), ('zebras', 'nmod'), ('da', 'case'), ('história', 'nmod'), ('do', 'case'), ('Super', 'nmod'), ('Bowl', 'flat:name'), (',', 'punct'), ('acabou', 'ROOT'), ('com', 'case'), ('a', 'det'), ('temporada', 'obl'), ('perfeita', 'amod'), ('de', 'case'), ('Tom', 'nmod'), ('Brady', 'flat:name'), ('e', 'cc'), ('companhia', 'conj'), (',', 'punct'), ('que', 'nsubj'), ('esperavam', 'acl:relcl'), ('fazer', 'xcomp'), ('história', 'obj'), ('ao', 'mark'), ('levantar', 'advcl'), ('o', 'det'), ('trofÃ', 'obj'), ('©', 'flat:name'), ('u', 'flat:name'), ('da', 'case'), ('NFL', 'nmod'), ('sem', 'mark'), ('sofrer', 'advcl'), ('uma', 'det'), ('derrota', 'obj'), ('no', 'case'), ('ano', 'nmod'), ('.', 'punct'), ('\n\n', 'ROOT'), ('A', 'det'), ('vitória', 'nsubj'), ('dos', 'case'), ('Giants', 'nmod'), (',', 'punct'), ('porÃ', 'case'), ('©', 'nmod'), ('m', 'punct'), (',', 'punct'), ('tambÃ', 'obl'), ('©', 'flat:name'), ('m', 'punct'), ('ficarÃ', 'ROOT'), ('¡', 'xcomp'), ('para', 'case'), ('a', 'det'), ('história', 'obl'), ('.', 'punct'), ('Pela', 'case'), ('primeira', 'case'), ('vez', 'nmod'), (',', 'punct'), ('irmãos', 'nsubj'), ('quarterbacks', 'nsubj'), ('triunfam', 'ROOT'), ('no', 'case'), ('Super', 'obl'), ('Bowl', 'flat:name'), ('em', 'case'), ('temporadas', 'obl'), ('consecutivas', 'amod'), ('.', 'punct'), ('No', 'case'), ('ano', 'obl'), ('passado', 'amod'), (',', 'punct'), ('Peyton', 'appos'), ('Manning', 'flat:name'), (',', 'punct'), ('irmão', 'ROOT'), ('de', 'case'), ('Eli', 'nmod'), (',', 'punct'), ('chegou', 'parataxis'), ('ao', 'case'), ('tÃ\xadtulo', 'obl'), ('máximo', 'xcomp'), ('da', 'case'), ('NFL', 'obl'), ('pelo', 'case'), ('Indianapolis', 'obl'), ('Colts', 'flat:name'), ('.', 'punct'), ('\n\n', 'ROOT'), ('A', 'det'), ('partida', 'nsubj'), ('\n\n', 'advmod'), ('Os', 'det'), ('Giants', 'nsubj'), ('começaram', 'ROOT'), ('com', 'case'), ('a', 'det'), ('posse', 'obl'), ('de', 'case'), ('bola', 'nmod'), (',', 'punct'), ('e', 'cc'), ('mostraram', 'conj'), ('logo', 'advmod'), ('que', 'mark'), ('iriam', 'aux'), ('alongar', 'ccomp'), ('ao', 'obj'), ('máximo', 'xcomp'), ('suas', 'det'), ('posses', 'obj'), ('de', 'case'), ('bola', 'nmod'), ('.', 'punct'), ('Misturando', 'advcl'), ('corridas', 'obj'), ('com', 'case'), ('Brandon', 'nmod'), ('Jacobs', 'flat:name'), ('e', 'cc'), ('passes', 'conj'), ('curtos', 'amod'), (',', 'punct'), ('o', 'det'), ('time', 'nsubj'), ('de', 'case'), ('Nova', 'nmod'), ('York', 'flat:name'), ('chegou', 'ROOT'), ('Ã', 'xcomp'), ('\xa0 ', 'advmod'), ('red', 'amod'), ('zone', 'obl'), ('logo', 'advmod'), ('na', 'case'), ('primeira', 'amod'), ('campanha', 'obl'), ('.', 'punct'), ('O', 'det'), ('avanço', 'nsubj'), (',', 'punct'), ('no', 'cc'), ('entanto', 'fixed'), (',', 'punct'), ('parou', 'ROOT'), ('na', 'case'), ('linha', 'obl'), ('de', 'case'), ('17', 'nummod'), ('jardas', 'nmod'), ('e', 'cc'), ('Lawrence', 'conj'), ('Tynes', 'flat:name'), ('converteu', 'conj'), ('o', 'det'), ('field', 'obj'), ('goal', 'flat:name'), ('de', 'case'), ('32', 'nummod'), ('jardas', 'nmod'), ('para', 'mark'), ('abrir', 'advcl'), ('o', 'det'), ('placar', 'obj'), ('.', 'punct'), ('\n\n', 'advmod'), ('Eli', 'nsubj'), ('Manning', 'flat:name'), ('e', 'cc'), ('companhia', 'conj'), ('ficaram', 'ROOT'), ('9m54s', 'obj'), ('com', 'case'), ('a', 'det'), ('bola', 'nmod'), (',', 'punct'), ('mas', 'cc'), ('o', 'det'), ('ataque', 'nsubj'), ('dos', 'case'), ('Patriots', 'nmod'), ('não', 'flat:name'), ('entrou', 'conj'), ('em', 'case'), ('campo', 'obl'), ('frio', 'amod'), ('.', 'punct'), ('Logo', 'advmod'), ('no', 'case'), ('retorno', 'obl'), ('do', 'case'), ('kickoff', 'nmod'), (',', 'punct'), ('o', 'det'), ('running', 'nsubj'), ('back', 'flat:name'), ('Laurence', 'appos'), ('Maroney', 'flat:name'), ('avançou', 'ROOT'), ('43', 'nummod'), ('jardas', 'nsubj'), (',', 'punct'), ('deixando', 'advcl'), ('Tom', 'obj'), ('Brady', 'flat:name'), ('em', 'case'), ('boa', 'amod'), ('posição', 'nmod'), ('.', 'punct'), ('Com', 'case'), ('passes', 'obl'), ('curtos', 'amod'), (',', 'punct'), ('os', 'det'), ('Patriots', 'nsubj'), ('chegaram', 'ROOT'), ('Ã', 'xcomp'), ('\xa0 ', 'amod'), ('linha', 'obj'), ('de', 'case'), ('17', 'nummod'), ('jardas', 'nmod'), ('e', 'cc'), (',', 'punct'), ('graças', 'conj'), ('a', 'case'), ('uma', 'det'), ('penalidade', 'obj'), ('(', 'punct'), ('interferência', 'appos'), ('de', 'case'), ('passe', 'nmod'), (')', 'punct'), ('do', 'case'), ('linebacker', 'nmod'), ('Antonio', 'appos'), ('Pierce', 'flat:name'), (',', 'punct'), ('alcançaram', 'parataxis'), ('a', 'det'), ('linha', 'nsubj'), ('de', 'case'), ('uma', 'det'), ('jarda', 'nmod'), ('.', 'punct'), ('Maroney', 'nsubj'), ('avançou', 'ROOT'), ('pelo', 'case'), ('chão', 'obl'), ('e', 'cc'), ('anotou', 'conj'), ('o', 'det'), ('primeiro', 'amod'), ('touchdown', 'obj'), ('do', 'case'), ('jogo', 'nmod'), ('.', 'punct'), ('\n\n', 'ROOT'), ('Os', 'det'), ('Giants', 'nsubj'), ('pareciam', 'ROOT'), ('rumo', 'advmod'), ('Ã', 'cop'), ('\xa0 ', 'advmod'), ('virada', 'xcomp'), ('na', 'case'), ('campanha', 'nmod'), ('seguinte', 'amod'), ('.', 'punct'), ('Manning', 'nsubj'), ('achou', 'ROOT'), ('Amani', 'obj'), ('Toomer', 'flat:name'), ('para', 'case'), ('um', 'det'), ('avanço', 'obl'), ('de', 'case'), ('38', 'nummod'), ('jardas', 'nmod'), (',', 'punct'), ('e', 'cc'), ('o', 'det'), ('time', 'nsubj'), ('de', 'case'), ('Nova', 'nmod'), ('York', 'flat:name'), ('entrou', 'conj'), ('novamente', 'advmod'), ('na', 'case'), ('red', 'amod'), ('zone', 'obl'), ('.', 'punct'), ('Com', 'case'), ('a', 'det'), ('bola', 'obl'), ('na', 'case'), ('linha', 'nmod'), ('de', 'case'), ('14', 'nummod'), ('jardas', 'nmod'), ('dos', 'case'), ('Patriots', 'nmod'), (',', 'punct'), ('os', 'det'), ('Giants', 'nsubj'), ('sofreram', 'ROOT'), ('um', 'det'), ('revÃ', 'obj'), ('©', 'flat:name'), ('s.', 'appos'), ('Manning', 'flat:name'), ('passou', 'conj'), ('para', 'case'), ('Steve', 'obl'), ('Smith', 'flat:name'), (',', 'punct'), ('que', 'nsubj'), ('soltou', 'acl:relcl'), ('a', 'det'), ('bola', 'obj'), ('.', 'punct'), ('Ellis', 'nsubj'), ('Hobbs', 'flat:name'), ('aproveitou', 'ROOT'), (',', 'punct'), ('tomou', 'conj'), ('a', 'det'), ('posse', 'obj'), ('para', 'case'), ('os', 'det'), ('Patriots', 'nmod'), (',', 'punct'), ('e', 'cc'), ('avançou', 'conj'), ('23', 'nummod'), ('jardas', 'nsubj'), ('.', 'punct'), ('\n\n', 'advmod'), ('A', 'det'), ('defesa', 'nsubj'), ('de', 'case'), ('Nova', 'nmod'), ('York', 'flat:name'), ('manteve', 'ROOT'), ('o', 'det'), ('jogo', 'obj'), ('equilibrado', 'amod'), ('.', 'punct'), ('Com', 'case'), ('dois', 'nummod'), ('sacks', 'obl'), ('seguidos', 'acl'), (',', 'punct'), ('os', 'det'), ('Giants', 'nsubj'), ('forçaram', 'ROOT'), ('o', 'det'), ('punt', 'nsubj'), ('e', 'cc'), ('recuperaram', 'conj'), ('a', 'det'), ('bola', 'obj'), ('.', 'punct'), ('Mas', 'cc'), ('a', 'det'), ('campanha', 'nsubj'), ('seguinte', 'amod'), ('provou', 'ROOT'), ('ser', 'cop'), ('outra', 'det'), ('decepção', 'xcomp'), ('para', 'case'), ('Nova', 'obl'), ('York', 'flat:name'), ('.', 'punct'), ('O', 'det'), ('time', 'nsubj'), ('chegou', 'ROOT'), ('Ã', 'xcomp'), ('\xa0 ', 'amod'), ('linha', 'obl'), ('de', 'case'), ('25', 'nummod'), ('jardas', 'nmod'), (',', 'punct'), ('mas', 'cc'), ('Manning', 'nsubj'), ('sofreu', 'conj'), ('um', 'det'), ('sack', 'obj'), ('e', 'cc'), ('cometeu', 'conj'), ('um', 'det'), ('fumble', 'obj'), (',', 'punct'), ('e', 'cc'), ('o', 'det'), ('ataque', 'nsubj'), ('voltou', 'conj'), ('para', 'case'), ('a', 'det'), ('linha', 'obl'), ('de', 'case'), ('39', 'nummod'), ('jardas', 'nmod'), (',', 'punct'), ('não', 'advmod'), ('conseguindo', 'advcl'), ('pontuar', 'xcomp'), ('mais', 'obl'), ('uma', 'case'), ('vez', 'obl'), ('.', 'punct'), ('\n\n', 'ROOT'), ('Os', 'det'), ('Patriots', 'nsubj'), ('tiveram', 'ROOT'), ('uma', 'det'), ('última', 'amod'), ('chance', 'obj'), ('de', 'mark'), ('marcar', 'acl'), ('antes', 'advmod'), ('do', 'case'), ('intervalo', 'obl'), (',', 'punct'), ('mas', 'conj'), (',', 'punct'), ('a', 'case'), ('22', 'nummod'), ('segundos', 'obl'), ('do', 'case'), ('fim', 'nmod'), ('do', 'case'), ('segundo', 'amod'), ('perÃ\xadodo', 'nmod'), (',', 'punct'), ('Brady', 'nsubj'), ('foi', 'aux:pass'), ('novamente', 'advmod'), ('sacado', 'conj'), ('.', 'punct'), ('Desta', 'case'), ('vez', 'obl'), (',', 'punct'), ('ele', 'nsubj'), ('cometeu', 'ROOT'), ('o', 'det'), ('fumble', 'obj'), ('e', 'cc'), ('os', 'det'), ('Giants', 'nsubj'), ('tomaram', 'conj'), ('a', 'det'), ('posse', 'obj'), ('de', 'case'), ('bola', 'nmod'), ('.', 'punct'), ('Manning', 'nsubj'), ('tentou', 'ROOT'), ('um', 'det'), ('passe', 'obj'), ('longo', 'amod'), (',', 'punct'), ('de', 'case'), ('50', 'nummod'), ('jardas', 'obl'), (',', 'punct'), ('nos', 'case'), ('últimos', 'amod'), ('segundos', 'obl'), (',', 'punct'), ('mas', 'cc'), ('não', 'advmod'), ('teve', 'conj'), ('sucesso', 'obj'), ('.', 'punct'), ('\n\n', 'advmod'), ('O', 'det'), ('jogo', 'nsubj'), ('continuou', 'ROOT'), ('amarrado', 'xcomp'), ('no', 'case'), ('terceiro', 'amod'), ('quarto', 'obl'), (',', 'punct'), ('com', 'case'), ('as', 'det'), ('defesas', 'obl'), ('levando', 'acl'), ('a', 'det'), ('melhor', 'obj'), ('sobre', 'case'), ('os', 'det'), ('ataques', 'obl'), ('.', 'punct'), ('A', 'det'), ('única', 'amod'), ('chance', 'nsubj'), ('de', 'mark'), ('pontuar', 'acl'), ('do', 'case'), ('perÃ\xadodo', 'obl'), ('foi', 'cop'), ('dos', 'case'), ('Patriots', 'ROOT'), (',', 'punct'), ('que', 'nsubj'), ('chegaram', 'acl:relcl'), ('Ã', 'cop'), ('\xa0 ', 'amod'), ('linha', 'obl'), ('de', 'case'), ('31', 'nummod'), ('jardas', 'nmod'), ('dos', 'case'), ('Giants', 'nmod'), ('.', 'punct'), ('O', 'det'), ('tÃ', 'nsubj'), ('©', 'flat:name'), ('cnico', 'amod'), ('Bill', 'appos'), ('Bellichick', 'flat:name'), (',', 'punct'), ('porÃ', 'case'), ('©', 'nmod'), ('m', 'flat:name'), (',', 'punct'), ('optou', 'ROOT'), ('por', 'case'), ('uma', 'det'), ('quarta', 'amod'), ('descida', 'obl'), ('em', 'case'), ('vez', 'obl'), ('de', 'case'), ('um', 'det'), ('field', 'obj'), ('goal', 'amod'), ('.', 'punct'), ('Brady', 'nsubj'), ('tentou', 'ROOT'), ('um', 'det'), ('passe', 'obj'), ('para', 'case'), ('Jabar', 'obl'), ('Gaffney', 'flat:name'), (',', 'punct'), ('mas', 'cc'), ('não', 'nsubj'), ('conseguiu', 'conj'), ('completar', 'xcomp'), ('.', 'punct'), ('\n\n', 'advmod'), ('O', 'det'), ('último', 'amod'), ('perÃ\xadodo', 'nsubj'), ('começou', 'ROOT'), ('arrasador', 'obj'), ('para', 'case'), ('os', 'det'), ('Giants', 'obl'), ('.', 'punct'), ('na', 'case'), ('primeira', 'amod'), ('jogada', 'obl'), (',', 'punct'), ('Manning', 'nsubj'), ('achou', 'ROOT'), ('o', 'det'), ('tight', 'obj'), ('end', 'flat:name'), ('Kevin', 'appos'), ('Boss', 'flat:name'), (',', 'punct'), ('para', 'case'), ('um', 'det'), ('incrÃ\xadvel', 'amod'), ('avanço', 'obj'), ('de', 'case'), ('45', 'nummod'), ('jardas', 'nmod'), (',', 'punct'), ('que', 'nsubj'), ('deixou', 'acl:relcl'), ('o', 'det'), ('time', 'obj'), ('na', 'case'), ('linha', 'obl'), ('de', 'case'), ('35', 'nmod'), ('dos', 'case'), ('Patriots', 'nmod'), ('.', 'punct'), ('Outro', 'det'), ('lançamento', 'nsubj'), (',', 'punct'), ('desta', 'case'), ('vez', 'nmod'), ('para', 'case'), ('Steve', 'nmod'), ('Smith', 'flat:name'), (',', 'punct'), ('marcou', 'ROOT'), ('o', 'det'), ('avanço', 'obj'), ('atÃ', 'punct'), ('©', 'cop'), ('a', 'det'), ('linha', 'ROOT'), ('de', 'case'), ('12', 'nummod'), ('jardas', 'nmod'), ('.', 'punct'), ('Duas', 'nummod'), ('jogadas', 'obl'), ('depois', 'advmod'), (',', 'punct'), ('David', 'nsubj'), ('Tyree', 'flat:name'), ('pegou', 'ROOT'), ('um', 'det'), ('passe', 'obj'), ('de', 'case'), ('cinco', 'nummod'), ('jardas', 'nmod'), ('na', 'case'), ('end', 'nmod'), ('zone', 'flat:name'), ('para', 'mark'), ('anotar', 'advcl'), ('o', 'det'), ('touchdown', 'obj'), ('e', 'cc'), ('virar', 'conj'), ('o', 'det'), ('jogo', 'obj'), ('.', 'punct'), ('\n\n', 'advmod'), ('Na', 'case'), ('hora', 'obl'), ('da', 'case'), ('decisão', 'nmod'), (',', 'punct'), ('o', 'det'), ('ataque', 'nsubj'), ('dos', 'case'), ('Patriots', 'nmod'), ('voltou', 'ROOT'), ('a', 'mark'), ('funcionar', 'xcomp'), ('.', 'punct'), ('Com', 'case'), ('uma', 'obl'), ('sÃ', 'flat:name'), ('©', 'flat:name'), ('rie', 'appos'), ('de', 'case'), ('passes', 'nmod'), ('curtos', 'amod'), ('e', 'cc'), ('variados', 'conj'), (',', 'punct'), ('Brady', 'nsubj'), ('achou', 'ROOT'), ('Wes', 'obj'), ('Welker', 'flat:name'), (',', 'punct'), ('Randy', 'dep'), ('Moss', 'flat:name'), ('e', 'cc'), ('Kevin', 'conj'), ('Faulk', 'flat:name'), ('seguidas', 'xcomp'), ('vezes', 'obl'), ('atÃ', 'advmod'), ('©', 'aux'), ('chegar', 'ROOT'), ('Ã', 'xcomp'), ('\xa0 ', 'advmod'), ('red', 'amod'), ('zone', 'obj'), ('.', 'punct'), ('A', 'case'), ('2m45s', 'obl'), ('do', 'case'), ('fim', 'nmod'), (',', 'punct'), ('o', 'det'), ('quarterback', 'nsubj'), ('conectou', 'ROOT'), ('mais', 'advmod'), ('uma', 'case'), ('vez', 'obl'), ('com', 'case'), ('Moss', 'obl'), (',', 'punct'), ('que', 'nsubj'), ('se', 'expl'), ('desmarcou', 'acl:relcl'), ('e', 'cc'), ('ficou', 'conj'), ('livre', 'xcomp'), ('na', 'case'), ('lateral', 'obl'), ('direita', 'amod'), ('da', 'case'), ('end', 'nmod'), ('zone', 'flat:name'), ('.', 'punct'), ('\n\n', 'advmod'), ('Quando', 'advmod'), ('os', 'det'), ('fãs', 'nsubj'), ('de', 'case'), ('New', 'nmod'), ('England', 'flat:name'), ('jÃ', 'flat:name'), ('¡', 'aux'), ('comemoravam', 'advcl'), ('a', 'det'), ('vitória', 'obj'), (',', 'punct'), ('o', 'det'), ('inesperado', 'nsubj'), ('aconteceu', 'ROOT'), ('.', 'punct'), ('Em', 'case'), ('uma', 'det'), ('jogada', 'obl'), ('incrÃ\xadvel', 'amod'), (',', 'punct'), ('Eli', 'nsubj'), ('Manning', 'flat:name'), ('se', 'expl'), ('soltou', 'ROOT'), ('de', 'case'), ('dois', 'nummod'), ('marcadores', 'obj'), ('que', 'nsubj'), ('o', 'obj'), ('seguravam', 'acl:relcl'), ('pela', 'case'), ('camisa', 'obl'), ('e', 'cc'), (',', 'punct'), ('na', 'case'), ('corrida', 'obl'), (',', 'punct'), ('lançou', 'conj'), ('para', 'case'), ('Amani', 'nmod'), ('Toomer', 'flat:name'), ('.', 'punct'), ('O', 'det'), ('wide', 'nsubj'), ('receiver', 'flat:name'), (',', 'punct'), ('bem', 'advmod'), ('marcado', 'acl'), (',', 'punct'), ('saltou', 'ROOT'), ('e', 'cc'), ('conseguiu', 'conj'), ('a', 'mark'), ('fazer', 'xcomp'), ('recepção', 'obj'), ('para', 'case'), ('um', 'det'), ('avanço', 'obl'), ('de', 'case'), ('32', 'nummod'), ('jardas', 'nmod'), (',', 'punct'), ('deixando', 'advcl'), ('os', 'det'), ('Giants', 'obj'), ('na', 'case'), ('linha', 'obl'), ('de', 'case'), ('24', 'nmod'), ('de', 'case'), ('New', 'nmod'), ('England', 'flat:name'), ('.', 'punct'), ('\n\n', 'advmod'), ('Quatro', 'nummod'), ('jogadas', 'obl'), ('depois', 'advmod'), (',', 'punct'), ('a', 'case'), ('39', 'nummod'), ('segundos', 'obl'), ('do', 'case'), ('fim', 'nmod'), (',', 'punct'), ('Manning', 'nsubj'), ('achou', 'ROOT'), ('Plaxico', 'obj'), ('Burress', 'flat:name'), ('na', 'case'), ('end', 'obl'), ('zone', 'flat:name'), ('para', 'mark'), ('conseguir', 'advcl'), ('o', 'det'), ('touchdown', 'obj'), ('do', 'case'), ('tÃ\xadtulo', 'nmod'), ('.', 'punct')]
###Markdown
Visualizar a árvore como gráfico
###Code
visualizar_sintaxe = spacy.displacy.render(doc,style='dep')
output_path = open('analise_dependencia.svg','w',encoding="utf-8")
output_path.write(visualizar_sintaxe)
output_path.close()
###Output
_____no_output_____ |
sound_field_analysis/delay_and_sum_infinite.ipynb | ###Markdown
Delay-and-Sum Beamformer - Linear Array of Infinite Length*This Jupyter notebook is part of a [collection of notebooks](../index.ipynb) in the masters course Selected Topics in Audio Signal Processing, Communications Engineering, Universität Rostock. Please direct questions and suggestions to [[email protected]](mailto:[email protected]).* BeampatternIn this example the beampattern of a delay-and-sum (DSB) beamformer for a linear array of infinite length is computed and plotted for various steering angles. For numerical evaluation the array of infinite length is approximated by a long array of finite length. First, two functions are defined for computation and illustration of the beampattern, respectively.
###Code
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
dx = 0.1 # spatial sampling interval (distance between microphones)
c = 343 # speed of sound
om = 2*np.pi * np.linspace(100, 8000, 1000) # angular frequencies
theta_pw = np.linspace(0, np.pi, 181) # angles of the incident plane waves
def compute_dsb_beampattern(theta, theta_pw, om, dx, nmic=5000):
"Compute beampattern of a delay-and-sub beamformer for given steering angle"
B = np.zeros(shape=(len(om), len(theta_pw)), dtype=complex)
for n in range(len(om)):
for mu in range(-nmic//2, nmic//2+1):
B[n, :] += np.exp(-1j * om[n]/c * mu*dx * (np.cos(theta_pw) - np.cos(theta)))
return B/nmic
def plot_dsb_beampattern(B, theta_pw, om):
"Plot beampattern of a delay-and-sub beamformer"
plt.figure(figsize=(10,10))
plt.imshow(20*np.log10(np.abs(B)), aspect='auto', vmin=-50, vmax=0, origin='lower', \
extent=[0, 180, om[0]/(2*np.pi), om[-1]/(2*np.pi)], cmap='viridis')
plt.xlabel(r'$\theta_{pw}$ in deg')
plt.ylabel('$f$ in Hz')
plt.title('Beampattern')
cb = plt.colorbar()
cb.set_label(r'$|\bar{P}(\theta, \theta_{pw}, \omega)|$ in dB')
###Output
_____no_output_____
###Markdown
Steering Angle $\theta = 90^\mathrm{o}$
###Code
B = compute_dsb_beampattern(np.pi/2, theta_pw, om, dx)
plot_dsb_beampattern(B, theta_pw, om)
###Output
_____no_output_____
###Markdown
Steering Angle $\theta = 45^\mathrm{o}$
###Code
B = compute_dsb_beampattern(np.pi/4, theta_pw, om, dx)
plot_dsb_beampattern(B, theta_pw, om)
###Output
_____no_output_____
###Markdown
Steering Angle $\theta = 0^\mathrm{o}$
###Code
B = compute_dsb_beampattern(0, theta_pw, om, dx)
plot_dsb_beampattern(B, theta_pw, om)
###Output
_____no_output_____ |
pytorch_NN_mechanics/.ipynb_checkpoints/04_Custom_BatchNorm_and_LSUV-checkpoint.ipynb | ###Markdown
Deep Dive into Normalization
###Code
from pathlib import Path
from IPython.core.debugger import set_trace
from fastai import datasets
import pickle, gzip, math, matplotlib as mpl
import matplotlib.pyplot as plt
import torch
from torch import nn, optim, tensor
from torch.nn import init
import torch.nn.functional as F
from torch.utils.data import DataLoader, SequentialSampler, RandomSampler
# Importing and setting seaborn for improved plots
#import seaborn as sns; sns.set(style='white')
# Importing partials module
from functools import partial
###Output
_____no_output_____
###Markdown
Initial Setup - Taking Previous NBs Into Account**This is just to illustrate the amount of work that goes into building a customized DL library for experimentation and model building.****The best method is to utilize the auto-export script and import all the necessary classes and modules.** Exports of NB1
###Code
import operator
def test(a,b,cmp,cname=None):
if cname is None: cname=cmp.__name__
assert cmp(a,b),f"{cname}:\n{a}\n{b}"
def test_eq(a,b): test(a,b,operator.eq,'==')
from pathlib import Path
from IPython.core.debugger import set_trace
from fastai import datasets
import pickle, gzip, math, torch, matplotlib as mpl
import matplotlib.pyplot as plt
from torch import tensor
MNIST_URL='http://deeplearning.net/data/mnist/mnist.pkl'
def near(a,b): return torch.allclose(a, b, rtol=1e-3, atol=1e-5)
def test_near(a,b): test(a,b,near)
###Output
_____no_output_____
###Markdown
Exports of NB2
###Code
def get_data():
path = datasets.download_data(MNIST_URL, ext='.gz')
with gzip.open(path, 'rb') as f:
((x_train, y_train), (x_valid, y_valid), _) = pickle.load(f, encoding='latin-1')
return map(tensor, (x_train,y_train,x_valid,y_valid))
def normalize(x, m, s): return (x-m)/s
def test_near_zero(a,tol=1e-3): assert a.abs()<tol, f"Near zero: {a}"
from torch.nn import init
def mse(output, targ): return (output.squeeze(-1) - targ).pow(2).mean()
from torch import nn
###Output
_____no_output_____
###Markdown
Exports of NB3
###Code
import torch.nn.functional as F
def accuracy(out, yb): return (torch.argmax(out, dim=1)==yb).float().mean()
from torch import optim
class Dataset():
def __init__(self, x, y): self.x,self.y = x,y
def __len__(self): return len(self.x)
def __getitem__(self, i): return self.x[i],self.y[i]
from torch.utils.data import DataLoader, SequentialSampler, RandomSampler
def get_dls(train_ds, valid_ds, bs, **kwargs):
return (DataLoader(train_ds, batch_size=bs, shuffle=True, **kwargs),
DataLoader(valid_ds, batch_size=bs*2, **kwargs))
###Output
_____no_output_____
###Markdown
Exports of NB4
###Code
class DataBunch():
def __init__(self, train_dl, valid_dl, c=None):
self.train_dl,self.valid_dl,self.c = train_dl,valid_dl,c
@property
def train_ds(self): return self.train_dl.dataset
@property
def valid_ds(self): return self.valid_dl.dataset
def get_model(data, lr=0.5, nh=50):
m = data.train_ds.x.shape[1]
model = nn.Sequential(nn.Linear(m,nh), nn.ReLU(), nn.Linear(nh,data.c))
return model, optim.SGD(model.parameters(), lr=lr)
class Learner():
def __init__(self, model, opt, loss_func, data):
self.model,self.opt,self.loss_func,self.data = model,opt,loss_func,data
import re
_camel_re1 = re.compile('(.)([A-Z][a-z]+)')
_camel_re2 = re.compile('([a-z0-9])([A-Z])')
def camel2snake(name):
s1 = re.sub(_camel_re1, r'\1_\2', name)
return re.sub(_camel_re2, r'\1_\2', s1).lower()
class Callback():
_order=0
def set_runner(self, run): self.run=run
def __getattr__(self, k): return getattr(self.run, k)
@property
def name(self):
name = re.sub(r'Callback$', '', self.__class__.__name__)
return camel2snake(name or 'callback')
from typing import *
def listify(o):
if o is None: return []
if isinstance(o, list): return o
if isinstance(o, str): return [o]
if isinstance(o, Iterable): return list(o)
return [o]
class AvgStats():
def __init__(self, metrics, in_train): self.metrics,self.in_train = listify(metrics),in_train
def reset(self):
self.tot_loss,self.count = 0.,0
self.tot_mets = [0.] * len(self.metrics)
@property
def all_stats(self): return [self.tot_loss.item()] + self.tot_mets
@property
def avg_stats(self): return [o/self.count for o in self.all_stats]
def __repr__(self):
if not self.count: return ""
return f"{'train' if self.in_train else 'valid'}: {self.avg_stats}"
def accumulate(self, run):
bn = run.xb.shape[0]
self.tot_loss += run.loss * bn
self.count += bn
for i,m in enumerate(self.metrics):
self.tot_mets[i] += m(run.pred, run.yb) * bn
###Output
_____no_output_____
###Markdown
Exports of NB5
###Code
def create_learner(model_func, loss_func, data):
return Learner(*model_func(data), loss_func, data)
def get_model_func(lr=0.5): return partial(get_model, lr=lr)
def annealer(f):
def _inner(start, end): return partial(f, start, end)
return _inner
@annealer
def sched_lin(start, end, pos): return start + pos*(end-start)
@annealer
def sched_cos(start, end, pos): return start + (1 + math.cos(math.pi*(1-pos))) * (end-start) / 2
@annealer
def sched_no(start, end, pos): return start
@annealer
def sched_exp(start, end, pos): return start * (end/start) ** pos
#This monkey-patch is there to be able to plot tensors
torch.Tensor.ndim = property(lambda x: len(x.shape))
def combine_scheds(pcts, scheds):
assert sum(pcts) == 1.
pcts = tensor([0] + listify(pcts))
assert torch.all(pcts >= 0)
pcts = torch.cumsum(pcts, 0)
def _inner(pos):
idx = (pos >= pcts).nonzero().max()
actual_pos = (pos-pcts[idx]) / (pcts[idx+1]-pcts[idx])
return scheds[idx](actual_pos)
return _inner
class Recorder(Callback):
def begin_fit(self):
self.lrs = [[] for _ in self.opt.param_groups]
self.losses = []
def after_batch(self):
if not self.in_train: return
for pg,lr in zip(self.opt.param_groups,self.lrs): lr.append(pg['lr'])
self.losses.append(self.loss.detach().cpu())
def plot_lr (self, pgid=-1): plt.plot(self.lrs[pgid])
def plot_loss(self, skip_last=0): plt.plot(self.losses[:len(self.losses)-skip_last])
class ParamScheduler(Callback):
_order=1
def __init__(self, pname, sched_funcs): self.pname,self.sched_funcs = pname,sched_funcs
def begin_fit(self):
if not isinstance(self.sched_funcs, (list,tuple)):
self.sched_funcs = [self.sched_funcs] * len(self.opt.param_groups)
def set_param(self):
assert len(self.opt.param_groups)==len(self.sched_funcs)
for pg,f in zip(self.opt.param_groups,self.sched_funcs):
pg[self.pname] = f(self.n_epochs/self.epochs)
def begin_batch(self):
if self.in_train: self.set_param()
def pg_dicts(pgs): return [{'params':o} for o in pgs]
###Output
_____no_output_____
###Markdown
Exports of NB5b
###Code
class Callback():
_order=0
def set_runner(self, run): self.run=run
def __getattr__(self, k): return getattr(self.run, k)
@property
def name(self):
name = re.sub(r'Callback$', '', self.__class__.__name__)
return camel2snake(name or 'callback')
def __call__(self, cb_name):
f = getattr(self, cb_name, None)
if f and f(): return True
return False
class TrainEvalCallback(Callback):
def begin_fit(self):
self.run.n_epochs=0.
self.run.n_iter=0
def after_batch(self):
if not self.in_train: return
self.run.n_epochs += 1./self.iters
self.run.n_iter += 1
def begin_epoch(self):
self.run.n_epochs=self.epoch
self.model.train()
self.run.in_train=True
def begin_validate(self):
self.model.eval()
self.run.in_train=False
class CancelTrainException(Exception): pass
class CancelEpochException(Exception): pass
class CancelBatchException(Exception): pass
class Runner():
def __init__(self, cbs=None, cb_funcs=None):
self.in_train = False
cbs = listify(cbs)
for cbf in listify(cb_funcs):
cb = cbf()
setattr(self, cb.name, cb)
cbs.append(cb)
self.stop,self.cbs = False,[TrainEvalCallback()]+cbs
@property
def opt(self): return self.learn.opt
@property
def model(self): return self.learn.model
@property
def loss_func(self): return self.learn.loss_func
@property
def data(self): return self.learn.data
def one_batch(self, xb, yb):
try:
self.xb,self.yb = xb,yb
self('begin_batch')
self.pred = self.model(self.xb)
self('after_pred')
self.loss = self.loss_func(self.pred, self.yb)
self('after_loss')
if not self.in_train: return
self.loss.backward()
self('after_backward')
self.opt.step()
self('after_step')
self.opt.zero_grad()
except CancelBatchException: self('after_cancel_batch')
finally: self('after_batch')
def all_batches(self, dl):
self.iters = len(dl)
try:
for xb,yb in dl: self.one_batch(xb, yb)
except CancelEpochException: self('after_cancel_epoch')
def fit(self, epochs, learn):
self.epochs,self.learn,self.loss = epochs,learn,tensor(0.)
try:
for cb in self.cbs: cb.set_runner(self)
self('begin_fit')
for epoch in range(epochs):
self.epoch = epoch
if not self('begin_epoch'): self.all_batches(self.data.train_dl)
with torch.no_grad():
if not self('begin_validate'): self.all_batches(self.data.valid_dl)
self('after_epoch')
except CancelTrainException: self('after_cancel_train')
finally:
self('after_fit')
self.learn = None
def __call__(self, cb_name):
res = False
for cb in sorted(self.cbs, key=lambda x: x._order): res = cb(cb_name) and res
return res
class AvgStatsCallback(Callback):
def __init__(self, metrics):
self.train_stats,self.valid_stats = AvgStats(metrics,True),AvgStats(metrics,False)
def begin_epoch(self):
self.train_stats.reset()
self.valid_stats.reset()
def after_loss(self):
stats = self.train_stats if self.in_train else self.valid_stats
with torch.no_grad(): stats.accumulate(self.run)
def after_epoch(self):
print(self.train_stats)
print(self.valid_stats)
class Recorder(Callback):
def begin_fit(self):
self.lrs = [[] for _ in self.opt.param_groups]
self.losses = []
def after_batch(self):
if not self.in_train: return
for pg,lr in zip(self.opt.param_groups,self.lrs): lr.append(pg['lr'])
self.losses.append(self.loss.detach().cpu())
def plot_lr (self, pgid=-1): plt.plot(self.lrs[pgid])
def plot_loss(self, skip_last=0): plt.plot(self.losses[:len(self.losses)-skip_last])
def plot(self, skip_last=0, pgid=-1):
losses = [o.item() for o in self.losses]
lrs = self.lrs[pgid]
n = len(losses)-skip_last
plt.xscale('log')
plt.plot(lrs[:n], losses[:n])
class ParamScheduler(Callback):
_order=1
def __init__(self, pname, sched_funcs): self.pname,self.sched_funcs = pname,sched_funcs
def begin_fit(self):
if not isinstance(self.sched_funcs, (list,tuple)):
self.sched_funcs = [self.sched_funcs] * len(self.opt.param_groups)
def set_param(self):
assert len(self.opt.param_groups)==len(self.sched_funcs)
for pg,f in zip(self.opt.param_groups,self.sched_funcs):
pg[self.pname] = f(self.n_epochs/self.epochs)
def begin_batch(self):
if self.in_train: self.set_param()
class LR_Find(Callback):
_order=1
def __init__(self, max_iter=100, min_lr=1e-6, max_lr=10):
self.max_iter,self.min_lr,self.max_lr = max_iter,min_lr,max_lr
self.best_loss = 1e9
def begin_batch(self):
if not self.in_train: return
pos = self.n_iter/self.max_iter
lr = self.min_lr * (self.max_lr/self.min_lr) ** pos
for pg in self.opt.param_groups: pg['lr'] = lr
def after_step(self):
if self.n_iter>=self.max_iter or self.loss>self.best_loss*10:
raise CancelTrainException()
if self.loss < self.best_loss: self.best_loss = self.loss
###Output
_____no_output_____
###Markdown
Exports of NB6
###Code
# Enable CUDA
#device = torch.device('cuda', 0)
#torch.cuda.set_device(device)
torch.set_num_threads(2)
def normalize_to(train, valid):
m,s = train.mean(),train.std()
return normalize(train, m, s), normalize(valid, m, s)
class Lambda(nn.Module):
def __init__(self, func):
super().__init__()
self.func = func
def forward(self, x): return self.func(x)
def flatten(x): return x.view(x.shape[0], -1)
class CudaCallback(Callback):
def begin_fit(self): self.model.cuda()
def begin_batch(self): self.run.xb,self.run.yb = self.xb.cuda(),self.yb.cuda()
class BatchTransformXCallback(Callback):
_order=2
def __init__(self, tfm): self.tfm = tfm
def begin_batch(self): self.run.xb = self.tfm(self.xb)
def view_tfm(*size):
def _inner(x): return x.view(*((-1,)+size))
return _inner
def get_runner(model, data, lr=0.6, cbs=None, opt_func=None, loss_func = F.cross_entropy):
if opt_func is None: opt_func = optim.SGD
opt = opt_func(model.parameters(), lr=lr)
learn = Learner(model, opt, loss_func, data)
return learn, Runner(cb_funcs=listify(cbs))
def children(m): return list(m.children())
class Hook():
def __init__(self, m, f): self.hook = m.register_forward_hook(partial(f, self))
def remove(self): self.hook.remove()
def __del__(self): self.remove()
def append_stats(hook, mod, inp, outp):
if not hasattr(hook,'stats'): hook.stats = ([],[])
means,stds = hook.stats
if mod.training:
means.append(outp.data.mean())
stds .append(outp.data.std())
class ListContainer():
def __init__(self, items): self.items = listify(items)
def __getitem__(self, idx):
try: return self.items[idx]
except TypeError:
if isinstance(idx[0],bool):
assert len(idx)==len(self) # bool mask
return [o for m,o in zip(idx,self.items) if m]
return [self.items[i] for i in idx]
def __len__(self): return len(self.items)
def __iter__(self): return iter(self.items)
def __setitem__(self, i, o): self.items[i] = o
def __delitem__(self, i): del(self.items[i])
def __repr__(self):
res = f'{self.__class__.__name__} ({len(self)} items)\n{self.items[:10]}'
if len(self)>10: res = res[:-1]+ '...]'
return res
from torch.nn import init
class Hooks(ListContainer):
def __init__(self, ms, f): super().__init__([Hook(m, f) for m in ms])
def __enter__(self, *args): return self
def __exit__ (self, *args): self.remove()
def __del__(self): self.remove()
def __delitem__(self, i):
self[i].remove()
super().__delitem__(i)
def remove(self):
for h in self: h.remove()
def get_cnn_layers(data, nfs, layer, **kwargs):
nfs = [1] + nfs
return [layer(nfs[i], nfs[i+1], 5 if i==0 else 3, **kwargs)
for i in range(len(nfs)-1)] + [
nn.AdaptiveAvgPool2d(1), Lambda(flatten), nn.Linear(nfs[-1], data.c)]
def conv_layer(ni, nf, ks=3, stride=2, **kwargs):
return nn.Sequential(
nn.Conv2d(ni, nf, ks, padding=ks//2, stride=stride), GeneralRelu(**kwargs))
class GeneralRelu(nn.Module):
def __init__(self, leak=None, sub=None, maxv=None):
super().__init__()
self.leak,self.sub,self.maxv = leak,sub,maxv
def forward(self, x):
x = F.leaky_relu(x,self.leak) if self.leak is not None else F.relu(x)
if self.sub is not None: x.sub_(self.sub)
if self.maxv is not None: x.clamp_max_(self.maxv)
return x
def init_cnn(m, uniform=False):
f = init.kaiming_uniform_ if uniform else init.kaiming_normal_
for l in m:
if isinstance(l, nn.Sequential):
f(l[0].weight, a=0.1)
l[0].bias.data.zero_()
def get_cnn_model(data, nfs, layer, **kwargs):
return nn.Sequential(*get_cnn_layers(data, nfs, layer, **kwargs))
def get_learn_run(nfs, data, lr, layer, cbs=None, opt_func=None, uniform=False, **kwargs):
model = get_cnn_model(data, nfs, layer, **kwargs)
init_cnn(model, uniform=uniform)
return get_runner(model, data, lr=lr, cbs=cbs, opt_func=opt_func)
from IPython.display import display, Javascript
def nb_auto_export():
display(Javascript("""{
const ip = IPython.notebook
if (ip) {
ip.save_notebook()
console.log('a')
const s = `!python notebook2script.py ${ip.notebook_name}`
if (ip.kernel) { ip.kernel.execute(s) }
}
}"""))
###Output
_____no_output_____
###Markdown
Building the ConvNet
###Code
x_train, y_train, x_valid, y_valid = get_data()
# Normalizing
x_train, x_valid = normalize_to(x_train, x_valid)
# Build Datasets
train_ds, valid_ds = Dataset(x_train, y_train), Dataset(x_valid, y_valid)
# Arch
nh, bs = 50, 512
c = y_train.max().item()+1
loss_func = F.cross_entropy
# Create databunch
data = DataBunch(*get_dls(train_ds, valid_ds, bs), c)
# Transforming
mnist_view = view_tfm(1, 28, 28)
# Callbacks
cbfs = [Recorder,
partial(AvgStatsCallback, accuracy),
CudaCallback,
partial(BatchTransformXCallback, mnist_view)]
nfs = [8, 16, 32, 64, 64]
learn, run = get_learn_run(nfs, data, lr=0.4,
layer=conv_layer, cbs=cbfs)
%time run.fit(2, learn)
###Output
train: [1.6192525, tensor(0.4702, device='cuda:0')]
valid: [0.25496630859375, tensor(0.9239, device='cuda:0')]
train: [0.310026640625, tensor(0.9089, device='cuda:0')]
valid: [0.15269254150390624, tensor(0.9537, device='cuda:0')]
CPU times: user 3.1 s, sys: 307 ms, total: 3.4 s
Wall time: 2.87 s
###Markdown
Custom BatchNorm
###Code
class BatchNorm(nn.Module):
def __init__(self, nf, mom=0.1, eps=1e-5):
super().__init__()
# NB: pytorch bn mom is opposite of what you'd expect
self.mom, self.eps = mom, eps
self.mults = nn.Parameter(torch.ones (nf,1,1))
self.adds = nn.Parameter(torch.zeros(nf,1,1))
self.register_buffer('vars', torch.ones(1,nf,1,1))
self.register_buffer('means', torch.zeros(1,nf,1,1))
def update_stats(self, x):
m = x.mean((0,2,3), keepdim=True)
v = x.var ((0,2,3), keepdim=True)
self.means.lerp_(m, self.mom)
self.vars.lerp_(v, self.mom)
return m,v
def forward(self, x):
if self.training:
with torch.no_grad(): m,v = self.update_stats(x)
else:
m,v = self.means,self.vars
x = (x-m) / (v+self.eps).sqrt()
return x*self.mults + self.adds
def conv_layer(ni, nf, ks=3, stride=2, bn=True, **kwargs):
# No bias needed in case of BN
layers = [nn.Conv2d(ni, nf, ks, padding=ks//2, stride=stride, bias=not bn),
GeneralRelu(**kwargs)]
if bn: layers.append(BatchNorm(nf)) #Custom BN
return nn.Sequential(*layers)
# Initalize
def init_cnn_(m, f):
if isinstance(m, nn.Conv2d):
f(m.weight, a=0.1)
if getattr(m, 'bias', None) is not None: m.bias.data.zero_()
for l in m.children():
init_cnn_(l, f)
def init_cnn(m, uniform=False):
f = init.kaiming_uniform_ if uniform else init.kaiming_normal_
init_cnn_(m, f)
def get_learn_run(nfs, data, lr, layer, cbs=None,
opt_func=None, uniform=False, **kwargs):
model = get_cnn_model(data, nfs, layer, **kwargs)
init_cnn(model, uniform=uniform)
return get_runner(model, data, lr=lr, cbs=cbs, opt_func=opt_func)
###Output
_____no_output_____
###Markdown
Using this during training to observe how it helps in keeping the activation means to 0 and std to 1.
###Code
learn, run = get_learn_run(nfs, data, lr=0.8, layer=conv_layer, cbs=cbfs)
with Hooks(learn.model, append_stats) as hooks:
run.fit(1, learn)
fig,(ax0,ax1) = plt.subplots(1,2, figsize=(10,4))
for h in hooks[:-1]:
ms,ss = h.stats
ax0.plot(ms[:10])
ax1.plot(ss[:10])
h.remove()
plt.legend(range(6));
fig,(ax0,ax1) = plt.subplots(1,2, figsize=(10,4))
for h in hooks[:-1]:
ms,ss = h.stats
ax0.plot(ms)
ax1.plot(ss)
###Output
train: [0.260148125, tensor(0.9219, device='cuda:0')]
valid: [0.203640283203125, tensor(0.9344, device='cuda:0')]
###Markdown
After applying BatchNorm using linear interpolation, we can see the improvement in our standard deviations and means.
###Code
learn, run = get_learn_run(nfs, data, 1.0, conv_layer, cbs=cbfs)
%time run.fit(5, learn)
###Output
train: [0.26607884765625, tensor(0.9183, device='cuda:0')]
valid: [0.2518082275390625, tensor(0.9188, device='cuda:0')]
train: [0.08462037109375, tensor(0.9741, device='cuda:0')]
valid: [0.07879784545898437, tensor(0.9751, device='cuda:0')]
train: [0.0603748388671875, tensor(0.9812, device='cuda:0')]
valid: [0.11693089599609376, tensor(0.9633, device='cuda:0')]
train: [0.0490041064453125, tensor(0.9846, device='cuda:0')]
valid: [0.06964392700195313, tensor(0.9784, device='cuda:0')]
train: [0.0403047900390625, tensor(0.9866, device='cuda:0')]
valid: [0.0707970458984375, tensor(0.9793, device='cuda:0')]
CPU times: user 5.89 s, sys: 35.9 ms, total: 5.92 s
Wall time: 4.93 s
###Markdown
Comparison to Built-in BatchNorm
###Code
def conv_layer(ni, nf, ks=3, stride=2, bn=True, **kwargs):
layers = [nn.Conv2d(ni, nf, ks, padding=ks//2, stride=stride, bias=not bn),
GeneralRelu(**kwargs)]
if bn: layers.append(nn.BatchNorm2d(nf, eps=1e-5, momentum=0.1))
return nn.Sequential(*layers)
learn, run = get_learn_run(nfs, data, 1.0, conv_layer, cbs=cbfs)
%time run.fit(5, learn)
###Output
train: [0.23452498046875, tensor(0.9251, device='cuda:0')]
valid: [0.08072618408203125, tensor(0.9748, device='cuda:0')]
train: [0.06257984375, tensor(0.9802, device='cuda:0')]
valid: [0.1142558837890625, tensor(0.9642, device='cuda:0')]
train: [0.0423348681640625, tensor(0.9868, device='cuda:0')]
valid: [0.07078927001953125, tensor(0.9797, device='cuda:0')]
train: [0.03153028076171875, tensor(0.9903, device='cuda:0')]
valid: [0.057710650634765624, tensor(0.9821, device='cuda:0')]
train: [0.02128148681640625, tensor(0.9938, device='cuda:0')]
valid: [0.05555340576171875, tensor(0.9835, device='cuda:0')]
CPU times: user 5.61 s, sys: 11.9 ms, total: 5.62 s
Wall time: 4.63 s
###Markdown
Adding the SchedulerAdding learning rate annealing:
###Code
sched = combine_scheds([0.3, 0.7], [sched_lin(0.6, 2.), sched_lin(2., 0.1)])
learn, run = get_learn_run(nfs, data, 0.9, conv_layer, cbs=cbfs +
[partial(ParamScheduler, 'lr', sched)])
run.fit(10, learn)
###Output
train: [0.242242890625, tensor(0.9275, device='cuda:0')]
valid: [0.1109781005859375, tensor(0.9654, device='cuda:0')]
train: [0.07245439453125, tensor(0.9776, device='cuda:0')]
valid: [0.07864197387695313, tensor(0.9756, device='cuda:0')]
train: [0.05993212890625, tensor(0.9809, device='cuda:0')]
valid: [0.10018829345703124, tensor(0.9698, device='cuda:0')]
train: [0.03866418701171875, tensor(0.9874, device='cuda:0')]
valid: [0.05641264038085937, tensor(0.9833, device='cuda:0')]
train: [0.02331113037109375, tensor(0.9927, device='cuda:0')]
valid: [0.05662333984375, tensor(0.9840, device='cuda:0')]
train: [0.01354578125, tensor(0.9962, device='cuda:0')]
valid: [0.048208865356445314, tensor(0.9855, device='cuda:0')]
train: [0.0076075390625, tensor(0.9980, device='cuda:0')]
valid: [0.04530145263671875, tensor(0.9874, device='cuda:0')]
train: [0.004427695922851562, tensor(0.9991, device='cuda:0')]
valid: [0.04266683349609375, tensor(0.9886, device='cuda:0')]
train: [0.0028356549072265625, tensor(0.9997, device='cuda:0')]
valid: [0.0436294677734375, tensor(0.9886, device='cuda:0')]
train: [0.0023242259216308594, tensor(0.9998, device='cuda:0')]
valid: [0.043790444946289066, tensor(0.9885, device='cuda:0')]
###Markdown
Additional Norms Layer Norm General equation for a norm layer with learnable affine:$$y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta$$The key difference with BN is:1. Don't keep moving averages.2. Don't average over the batch dimension but over the hidden dimension. This makes it independent of batch size.3. Instead of (0, 2, 3), we now have (1, 2, 3).4. Not nearly as good as BN, but works well enough on RNNs since we can't use BN in that scenario.
###Code
# This is the code implementation of the equation above
class LayerNorm (nn.Module):
__constants__ = ['eps']
def __init__(self, eps=1e-5):
super().__init__()
self.eps = eps
self.mult = nn.Parameter(tensor(1.))
self.add = nn.Parameter(tensor(0.))
def forward(self, x):
m = x.mean((1, 2, 3), keepdim=True)
v = x.var((1, 2, 3), keepdim=True)
x = (x - m) / (v+self.eps).sqrt()
return x*self.mult + self.add
def conv_layer_norm(ni, nf, ks=3, stride=2, bn=True, **kwargs):
layers = [nn.Conv2d(ni, nf, ks, padding=ks//2, stride=stride, bias=True),
GeneralRelu(**kwargs)]
if bn: layers.append(LayerNorm())
return nn.Sequential(*layers)
learn, run = get_learn_run(nfs, data, 0.8, conv_layer_norm, cbs=cbfs)
%time run.fit(3, learn)
###Output
train: [nan, tensor(0.1380, device='cuda:0')]
valid: [nan, tensor(0.0991, device='cuda:0')]
train: [nan, tensor(0.0986, device='cuda:0')]
valid: [nan, tensor(0.0991, device='cuda:0')]
train: [nan, tensor(0.0986, device='cuda:0')]
valid: [nan, tensor(0.0991, device='cuda:0')]
CPU times: user 4.09 s, sys: 16.1 ms, total: 4.1 s
Wall time: 3.51 s
###Markdown
Instance Norm |\begin{equation}\label{eq:bnorm} y_{tijk} = \frac{x_{tijk} - \mu_{i}}{\sqrt{\sigma_i^2 + \epsilon}}, \quad \mu_i = \frac{1}{HWT}\sum_{t=1}^T\sum_{l=1}^W \sum_{m=1}^H x_{tilm}, \quad \sigma_i^2 = \frac{1}{HWT}\sum_{t=1}^T\sum_{l=1}^W \sum_{m=1}^H (x_{tilm} - mu_i)^2.\end{equation}In order to combine the effects of instance-specific normalization and batch normalization, the authors propose to replace the latter by the *instance normalization* (also known as *contrast normalization*) layer:\begin{equation}\label{eq:inorm} y_{tijk} = \frac{x_{tijk} - \mu_{ti}}{\sqrt{\sigma_{ti}^2 + \epsilon}}, \quad \mu_{ti} = \frac{1}{HW}\sum_{l=1}^W \sum_{m=1}^H x_{tilm}, \quad \sigma_{ti}^2 = \frac{1}{HW}\sum_{l=1}^W \sum_{m=1}^H (x_{tilm} - mu_{ti})^2.\end{equation}This used for style transfer and **NOT** for image classification. A graphical depiction of the different types of norms :
###Code
class InstanceNorm(nn.Module):
__constants__ = ['eps']
def __init__(self, nf, eps=1e-0):
super().__init__()
self.eps = eps
self.mults = nn.Parameter(torch.ones (nf,1,1))
self.adds = nn.Parameter(torch.zeros(nf,1,1))
def forward(self, x):
m = x.mean((2,3), keepdim=True)
v = x.var ((2,3), keepdim=True)
res = (x-m) / ((v+self.eps).sqrt())
return res*self.mults + self.adds
def conv_instance_norm(ni, nf, ks=3, stride=2, bn=True, **kwargs):
layers = [nn.Conv2d(ni, nf, ks, padding=ks//2, stride=stride, bias=True),
GeneralRelu(**kwargs)]
if bn: layers.append(InstanceNorm(nf))
return nn.Sequential(*layers)
learn, run = get_learn_run(nfs, data, 0.1, conv_instance_norm, cbs=cbfs)
%time run.fit(3, learn)
###Output
train: [nan, tensor(0.0986, device='cuda:0')]
valid: [nan, tensor(0.0991, device='cuda:0')]
train: [nan, tensor(0.0986, device='cuda:0')]
valid: [nan, tensor(0.0991, device='cuda:0')]
train: [nan, tensor(0.0986, device='cuda:0')]
valid: [nan, tensor(0.0991, device='cuda:0')]
CPU times: user 4.5 s, sys: 19.6 ms, total: 4.52 s
Wall time: 3.92 s
###Markdown
Addressing the Issue of Small Batch Sizes Problem:Computing the statistics, i.e. mean and std. deviation, for a BatchNorm Layer on a small batch size gives us a standard deviation very close to 0 due to the lack of sufficient numbers of samples.
###Code
data = DataBunch(*get_dls(train_ds, valid_ds, 2), c)
def conv_layer(ni, nf, ks=3, stride=2 , bn=True, **kwargs):
layers = [nn.Conv2d(ni, nf, ks, padding=ks//2, stride=stride, bias=not bn),
GeneralRelu(**kwargs)]
if bn: layers.append(nn.BatchNorm2d(nf, eps=1e-5, momentum=0.1))
return nn.Sequential(*layers)
learn, run = get_learn_run(nfs, data, 0.4, conv_layer, cbs=cbfs)
%time run.fit(2, learn)
###Output
train: [2.3357021875, tensor(0.1712, device='cuda:0')]
valid: [288141.4912, tensor(0.1925, device='cuda:0')]
train: [2.32542328125, tensor(0.1792, device='cuda:0')]
valid: [38715509.9648, tensor(0.2931, device='cuda:0')]
CPU times: user 1min 54s, sys: 775 ms, total: 1min 55s
Wall time: 1min 54s
###Markdown
Extremely small batch sizes also add to the compute overhead!The performance is abysmal!! Solution: Running Batch NormAs indicated in the lessons, the solution is to use Running BatchNorm, which employs smoother running mean and variance for the mean and std dev.
###Code
class RunningBatchNorm(nn.Module):
def __init__(self, nf, mom=0.1, eps=1e-5):
super().__init__()
self.mom, self.eps = mom, eps
self.mults = nn.Parameter(torch.ones(nf, 1, 1))
self.adds = nn.Parameter(torch.ones(nf, 1, 1))
self.register_buffer('sums', torch.zeros(1, nf, 1, 1))
self.register_buffer('sqrs', torch.zeros(1, nf, 1, 1))
self.register_buffer('batch', torch.tensor(0.))
self.register_buffer('count', torch.tensor(0.))
self.register_buffer('step', torch.tensor(0.))
self.register_buffer('dbias', torch.tensor(0.))
def update_stats(self, x):
bs, nc, *_ = x.shape
self.sums.detach_()
self.sqrs.detach_()
dims = (0, 2, 3)
s = x.sum(dims, keepdim=True)
ss = (x*x).sum(dims, keepdim=True)
c = self.count.new_tensor(x.numel() / nc)
mom1 = 1 - (1-self.mom)/math.sqrt(bs-1)
self.mom1 = self.dbias.new_tensor(mom1)
self.sums.lerp_(s, self.mom1)
self.sqrs.lerp_(ss, self.mom1)
self.count.lerp_(c, self.mom1)
self.dbias = self.dbias*(1-self.mom1) + self.mom1
self.batch += bs
self.step += 1
def forward(self, x):
if self.training: self.update_stats(x)
sums = self.sums
sqrs = self.sqrs
c = self.count
if self.step < 100:
sums = sums / self.dbias
sqrs = sqrs / self.dbias
c = c / self.dbias
means = sums / c
vars = (sqrs / c).sub_(means*means)
if bool(self.batch < 20): vars.clamp_min_(0.01)
x = (x - means).div_((vars.add_(self.eps)).sqrt())
return x.mul_(self.mults).add_(self.adds)
# Lets apply the Running BatchNorm to a new Conv learner
def conv_running_bn(ni, nf, ks=3, stride=2, bn=True, **kwargs):
layers = [nn.Conv2d(ni, nf, ks, padding=ks//2, stride=stride,
bias=not bn), GeneralRelu(**kwargs)]
if bn: layers.append(RunningBatchNorm(nf))
return nn.Sequential(*layers)
learn, run = get_learn_run(nfs, data, 0.4, conv_running_bn, cbs=cbfs)
%time run.fit(1, learn)
###Output
train: [0.63196484375, tensor(0.8098, device='cuda:0')]
valid: [17.473784375, tensor(0.9446, device='cuda:0')]
CPU times: user 2min 18s, sys: 387 ms, total: 2min 19s
Wall time: 2min 18s
###Markdown
Maxing out the performance in a single epochWith a more reasonable batch size...
###Code
data = DataBunch(*get_dls(train_ds, valid_ds, 32), c)
learn, run = get_learn_run(nfs, data, 0.9, conv_running_bn, cbs=cbfs +
[partial(ParamScheduler, 'lr', sched_lin(1., 0.2))])
%time run.fit(1, learn)
# Changing batch size
data = DataBunch(*get_dls(train_ds, valid_ds, 128), c)
#cbfs.append(LR_Find)
learn, run = get_learn_run(nfs, data, 0.85, conv_running_bn, cbs=cbfs +
[partial(ParamScheduler, 'lr', sched_lin(0.9, 0.10))])
%time run.fit(1, learn)
###Output
train: [0.2676537109375, tensor(0.9299, device='cuda:0')]
valid: [0.09938456420898438, tensor(0.9758, device='cuda:0')]
CPU times: user 2.42 s, sys: 7.91 ms, total: 2.42 s
Wall time: 2.22 s
###Markdown
Layerwise Sequential Unit Variance (LSUV) Managing to keep the unit variances of our layer outputs in check as the model trains can prove to be quite a "fiddley" task, especially if we're adding dropout, or changing activation functions. These variations in outputs get exponentially worse as the model trains over multiple epochs.LSUV shifts this burden to the computer itself.
###Code
# Redefining our architecture
nh, bs = 50, 512
data = DataBunch(*get_dls(train_ds, valid_ds, bs), c)
# Recreating our ConvLayer class
class ConvLayer(nn.Module):
# Adding a subtraction hyper parameter
def __init__(self, ni, nf, ks=3, stride=2, sub=0., **kwargs):
super().__init__()
self.conv = nn.Conv2d(ni, nf, ks, padding=ks//2, stride=stride, bias=True)
self.relu = GeneralRelu(sub=sub, **kwargs)
def forward(self, x): return self.relu(self.conv(x))
@property
def bias(self): return -self.relu.sub
@bias.setter
def bias(self, v): self.relu.sub = -v
@property
def weight(self): return self.conv.weight
###Output
_____no_output_____
###Markdown
Create a learner and runner, without really worrying about how it initializes...
###Code
learn, run = get_learn_run(nfs, data, 0.6, ConvLayer, cbs=cbfs)
run.fit(2, learn)
###Output
train: [1.433776875, tensor(0.5219, device='cuda:0')]
valid: [0.226480224609375, tensor(0.9305, device='cuda:0')]
train: [0.30987103515625, tensor(0.9050, device='cuda:0')]
valid: [0.1582620849609375, tensor(0.9539, device='cuda:0')]
###Markdown
With the initial performance noted, let's recreate the model, this time with LSUV, and we will define a function which grabs a single mini-batch.
###Code
learn, run = get_learn_run(nfs, data, 0.6, ConvLayer, cbs=cbfs)
def get_batch(dl, runer):
run.xb, run.yb = next(iter(dl))
for cb in run.cbs: cb.set_runner(run)
run('begin_batch')
return run.xb, run.yb
xb, yb = get_batch(data.train_dl, run)
###Output
_____no_output_____
###Markdown
Now that we have our mini-batch, we will use a function which (using recursion) only gives us the outputs of the convolutional layers.
###Code
def find_modules(m, cond):
if cond(m): return [m]
return sum([find_modules(o, cond) for o in m.children()], [])
def is_lin_layer(l):
lin_layers = (nn.Conv1d, nn.Conv2d, nn.Conv3d, nn.Linear, nn.ReLu)
return isinstance(l, lin_layers)
mods = find_modules(learn.model, lambda o: isinstance(o, ConvLayer))
mods
###Output
_____no_output_____
###Markdown
Adding another helper function to grab the mean and std of the output of a hooked layer.
###Code
def append_stat(hook, mod, inp, outp):
d = outp.data
hook.mean, hook.std = d.mean().item(), d.std().item()
mdl = learn.model.cuda()
with Hooks(mods, append_stat) as hooks:
mdl(xb)
for hook in hooks: print(hook.mean, hook.std)
###Output
0.3897072970867157 0.6319916248321533
0.33694127202033997 0.5548509955406189
0.2168125957250595 0.3948383331298828
0.21095244586467743 0.3277290463447571
0.1627117097377777 0.2220781147480011
###Markdown
Here, our means are too high and the std. devs. are not close to 1. Therefore, we will adjust the bias terms to make the means 0 and then std. devs. must be adjusted to 1 (with a threshold of 1e-3).
###Code
def lsuv_module(m, xb):
h = Hook(m, append_stat)
# mdl(xb) is not None exists to pass xb through mdl while computing
# all activations in order to update the hooks.
while mdl(xb) is not None and abs(h.mean) > 1e-3: m.bias -= h.mean
while mdl(xb) is not None and abs(h.std-1)> 1e-3: m.weight.data /= h.std
h.remove()
return h.mean, h.std
###Output
_____no_output_____
###Markdown
Executing the initialization on all conv layers in order...
###Code
for m in mods: print(lsuv_module(m, xb))
###Output
(0.22692637145519257, 1.0000001192092896)
(0.11001376807689667, 1.0)
(0.15807662904262543, 0.9999999403953552)
(0.1515551060438156, 1.0000001192092896)
(0.2991049587726593, 1.0)
###Markdown
Now that our means and std. devs. are much more acceptable, the model will begin training on much better grounds.
###Code
%time run.fit(2, learn)
###Output
train: [0.4551003125, tensor(0.8555, device='cuda:0')]
valid: [0.1386651123046875, tensor(0.9577, device='cuda:0')]
train: [0.113353916015625, tensor(0.9646, device='cuda:0')]
valid: [0.10336754150390624, tensor(0.9667, device='cuda:0')]
CPU times: user 2.37 s, sys: 0 ns, total: 2.37 s
Wall time: 1.97 s
|
Returns_prediction_Time_Series.ipynb | ###Markdown
Based on ACF and PACF we suspect two potential models: ARMA(4,4) and ARMA(6,6). We will choose from these two models based on the value of Akaike Information Criterion
###Code
ytest.shape
model=SARIMAX(ytrain,order=(4,0,4))
model_fit=model.fit()
model_fit.aic
model2=SARIMAX(ytrain,order=(6,0,6))
model_fit2=model2.fit()
model_fit2.aic
###Output
_____no_output_____
###Markdown
For reference, let's also train the white noise model, i.e when p,d,q are 0, and see its AIC
###Code
model3=SARIMAX(ytrain,order=(0,0,0))
model_fit3=model3.fit()
model_fit3.aic
###Output
_____no_output_____
###Markdown
AIC is smallest when p & q are 4, so we stick to this model Next we perform the forecasting. We manually extract model coefficients and perform forecasting applying the direct definition of the ARMA(4,4) model Wxtracting the parameters:
###Code
pars=model_fit.params
###Output
_____no_output_____
###Markdown
Defining the array needed to make predictions:
###Code
array_for_pred=np.array(ytrain[-4:])
array_for_pred=np.append(array_for_pred, ytest[:-1])
###Output
_____no_output_____
###Markdown
Defining white noise, let it for now be standard normal rv's.
###Code
random_norm=np.random.normal(0,1,size=array_for_pred.shape[0])
###Output
_____no_output_____
###Markdown
Now we apply the direct definition of ARMA(4,4) and build single-step forecasts from it
###Code
predictions_norm=np.empty(shape=ytest.shape[0])
for i in range(0, ytest.shape[0]):
predictions_norm[i]=pars[0]*array_for_pred[i]+pars[1]*array_for_pred[i+1]+pars[2]*array_for_pred[i+2]+pars[3]*array_for_pred[i+3]+pars[4]*random_norm[i]+pars[5]*random_norm[i+1]+pars[6]*random_norm[i+2]+pars[7]*random_norm[i+3]
###Output
_____no_output_____
###Markdown
Now let's plot the results
###Code
plt.plot(predictions_norm,color='red',label='predicted')
plt.plot(ytest.values,label='actual')
plt.legend()
###Output
_____no_output_____
###Markdown
Wow... Looks like we got too much. The problem seems to be coming from white noise: it dominates the AR terms and this all results in too much variability in scale Solution? Let's try another white noise, with smaller standard deviation!
###Code
ytrain.std()
random_norm2=np.random.normal(0,0.02,size=array_for_pred.shape[0])
predictions_norm2=np.empty(shape=ytest.shape[0])
for i in range(0, ytest.shape[0]):
predictions_norm2[i]=pars[0]*array_for_pred[i]+pars[1]*array_for_pred[i+1]+pars[2]*array_for_pred[i+2]+pars[3]*array_for_pred[i+3]+pars[4]*random_norm2[i]+pars[5]*random_norm2[i+1]+pars[6]*random_norm2[i+2]+pars[7]*random_norm2[i+3]
plt.plot(predictions_norm2,color='red',label='predicted')
plt.plot(ytest.values,label='actual')
plt.legend()
###Output
_____no_output_____
###Markdown
Now that is much better! Although we seem to underestimate some shocks...
###Code
mean_absolute_error(ytest,predictions_norm2)
###Output
_____no_output_____
###Markdown
The MAE is higher though than it was for ML models... Computing and plotting prices from the returns...
###Code
start_test=aapl2.loc['2018-11-28'].values
predicted_price=np.empty(ytest.shape[0])
predicted_price[0]=start_test
for i in range(1, predicted_price.shape[0]):
predicted_price[i]=predicted_price[i-1]*(1+predictions_norm2[i])
actual_price=aapl2.loc['2018-11-28':].values
plt.plot(actual_price)
plt.plot(predicted_price,color='red')
plt.xlabel('Time, days')
plt.ylabel('Price, USD')
plt.legend(['Actual price','Price predicted by ARMA(4,4) with Gaussian WN'])
###Output
_____no_output_____
###Markdown
Well, the computed price doesn't match the actual price at all as it is unable to capture even the general upward trend. Finally, let's try classification
###Code
pred_cl=(predictions_norm2>0).astype(int)
from sklearn.metrics import accuracy_score
print('Accuracy score Time Series:' ,accuracy_score(y_cl_test,pred_cl))
###Output
Accuracy score Time Series: 0.5274725274725275
|
GA Data Science Final Project - 6- NLP - KMeans Movie Titles.ipynb | ###Markdown
GA Data Science Final Project - 6- NLP - KMeans Movie Titles From: http://brandonrose.org/clustering
###Code
import numpy as np
import pandas as pd
import nltk
import re
import os
import codecs
from sklearn import feature_extraction
import mpld3
df = pd.read_csv('issue_comments_jupyter_copy.csv')
df['org'] = df['org'].astype('str')
df['repo'] = df['repo'].astype('str')
df['comments'] = df['comments'].astype('str')
df['user'] = df['user'].astype('str')
comments = df.comments
with open ('all_comments.txt',"wb") as fd:
all_comments = comments.str.cat(sep=' ')
fd.write (all_comments)
###Output
_____no_output_____
###Markdown
Stopwords, stemming, and tokenizing This section is focused on defining some functions to manipulate the synopses. First, I load NLTK's list of English stop words. Stop words are words like "a", "the", or "in" which don't convey significant meaning. I'm sure there are much better explanations of this out there.
###Code
# load nltk's English stopwords as variable called 'stopwords'
stopwords = nltk.corpus.stopwords.words('english')
print stopwords[:,10]
###Output
_____no_output_____
###Markdown
Next I import the Snowball Stemmer which is actually part of NLTK. Stemming is just the process of breaking a word down into its root.
###Code
# load nltk's SnowballStemmer as variabled 'stemmer'
from nltk.stem.snowball import SnowballStemmer
stemmer = SnowballStemmer("english")
###Output
_____no_output_____
###Markdown
Below I define two functions: *tokenize_and_stem*: tokenizes (splits the synopsis into a list of its respective words (or tokens) and also stems each token *tokenize_only*: tokenizes the synopsis only I use both these functions to create a dictionary which becomes important in case I want to use stems for an algorithm, but later convert stems back to their full words for presentation purposes. Guess what, I do want to do that!
###Code
# here I define a tokenizer and stemmer which returns the set of stems in the text that it is passed
def tokenize_and_stem(text):
# first tokenize by sentence, then by word to ensure that punctuation is caught as it's own token
tokens = [word for sent in nltk.sent_tokenize(text) for word in nltk.word_tokenize(sent)]
filtered_tokens = []
# filter out any tokens not containing letters (e.g., numeric tokens, raw punctuation)
for token in tokens:
if re.search('[a-zA-Z]', token):
filtered_tokens.append(token)
stems = [stemmer.stem(t) for t in filtered_tokens]
return stems
def tokenize_only(text):
# first tokenize by sentence, then by word to ensure that punctuation is caught as it's own token
tokens = [word.lower() for sent in nltk.sent_tokenize(text) for word in nltk.word_tokenize(sent)]
filtered_tokens = []
# filter out any tokens not containing letters (e.g., numeric tokens, raw punctuation)
for token in tokens:
if re.search('[a-zA-Z]', token):
filtered_tokens.append(token)
return filtered_tokens
###Output
_____no_output_____
###Markdown
Below I use my stemming/tokenizing and tokenizing functions to iterate over the list of synopses to create two vocabularies: one stemmed and one only tokenized.
###Code
#not super pythonic, no, not at all.
#use extend so it's a big flat list of vocab
totalvocab_stemmed = []
totalvocab_tokenized = []
for i in all_comments:
allwords_stemmed = tokenize_and_stem(i) #for each item in 'synopses', tokenize/stem
totalvocab_stemmed.extend(allwords_stemmed) #extend the 'totalvocab_stemmed' list
allwords_tokenized = tokenize_only(i)
totalvocab_tokenized.extend(allwords_tokenized)
###Output
_____no_output_____
###Markdown
Using these two lists, I create a pandas DataFrame with the stemmed vocabulary as the index and the tokenized words as the column. The benefit of this is it provides an efficient way to look up a stem and return a full token. The downside here is that stems to tokens are one to many: the stem 'run' could be associated with 'ran', 'runs', 'running', etc. For my purposes this is fine--I'm perfectly happy returning the first token associated with the stem I need to look up.
###Code
vocab_frame = pd.DataFrame({'words': totalvocab_tokenized}, index = totalvocab_stemmed)
print 'there are ' + str(vocab_frame.shape[0]) + ' items in vocab_frame'
###Output
there are 4232455 items in vocab_frame
###Markdown
You'll notice there is clearly some repetition here. I could clean it up, but there are only 312209 items in the DataFrame which isn't huge overhead in looking up a stemmed word based on the stem-index. TF-IDF
###Code
from sklearn.feature_extraction.text import TfidfVectorizer
#define vectorizer parameters
tfidf_vectorizer = TfidfVectorizer(max_df=0.8, max_features=200000,
min_df=0.2, stop_words='english',
use_idf=True, tokenizer=tokenize_and_stem, ngram_range=(1,3))
%time tfidf_matrix = tfidf_vectorizer.fit_transform(comments) #fit the vectorizer to synopses
print(tfidf_matrix.shape)
terms = tfidf_vectorizer.get_feature_names()
from sklearn.metrics.pairwise import cosine_similarity
dist = 1 - cosine_similarity(tfidf_matrix)
print
print
###Output
###Markdown
K-means clusteringNow onto the fun part. Using the tf-idf matrix, you can run a slew of clustering algorithms to better understand the hidden structure within the synopses. I first chose k-means. K-means initializes with a pre-determined number of clusters (I chose 5). Each observation is assigned to a cluster (cluster assignment) so as to minimize the within cluster sum of squares. Next, the mean of the clustered observations is calculated and used as the new cluster centroid. Then, observations are reassigned to clusters and centroids recalculated in an iterative process until the algorithm reaches convergence.I found it took several runs for the algorithm to converge a global optimum as k-means is susceptible to reaching local optima.
###Code
from sklearn.cluster import KMeans
num_clusters = 5
km = KMeans(n_clusters=num_clusters)
%time km.fit(tfidf_matrix)
clusters = km.labels_.tolist()
from sklearn.externals import joblib
#uncomment the below to save your model
#since I've already run my model I am loading from the pickle
joblib.dump(km, 'doc_cluster.pkl')
km = joblib.load('doc_cluster.pkl')
clusters = km.labels_.tolist()
###Output
_____no_output_____
###Markdown
Here, I create a dictionary of titles, ranks, the synopsis, the cluster assignment, and the genre [rank and genre were scraped from IMDB].I convert this dictionary to a Pandas DataFrame for easy access. I'm a huge fan of Pandas and recommend taking a look at some of its awesome functionality which I'll use below, but not describe in a ton of detail.
###Code
comments = { 'comments': comments, 'cluster': clusters }
frame = pd.DataFrame(comments, index = [clusters], columns = ['comments', 'cluster'])
frame['cluster'].value_counts() #number of films per cluster (clusters from 0 to 4)
grouped = frame['comments'].groupby(frame['cluster']) #groupby cluster for aggregation purposes
grouped.mean() #average rank (1 to 100) per cluster
###Output
_____no_output_____
###Markdown
Note that clusters 4 and 0 have the lowest rank, which indicates that they, on average, contain films that were ranked as "better" on the top 100 list. Here is some fancy indexing and sorting on each cluster to identify which are the top n (I chose n=6) words that are nearest to the cluster centroid. This gives a good sense of the main topic of the cluster.
###Code
from __future__ import print_function
print("Top terms per cluster:")
print()
#sort cluster centers by proximity to centroid
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
for i in range(num_clusters):
print("Cluster %d words:" % i, end='')
for ind in order_centroids[i, :6]: #replace 6 with n words per cluster
print(' %s' % vocab_frame.ix[terms[ind].split(' ')].values.tolist()[0][0].encode('utf-8', 'ignore'), end=',')
print() #add whitespace
print() #add whitespace
print("Cluster %d titles:" % i, end='')
for title in frame.ix[i]['title'].values.tolist():
print(' %s,' % title, end='')
print() #add whitespace
print() #add whitespace
print()
print()
###Output
Top terms per cluster:
Cluster 0 words:
###Markdown
Visualizing document clustersIn this section, I demonstrate how you can visualize the document clustering output using matplotlib and mpld3 (a matplotlib wrapper for D3.js).First I define some dictionaries for going from cluster number to color and to cluster name. I based the cluster names off the words that were closest to each cluster centroid.
###Code
#set up colors per clusters using a dict
cluster_colors = {0: '#1b9e77', 1: '#d95f02', 2: '#7570b3', 3: '#e7298a', 4: '#66a61e'}
#set up cluster names using a dict
cluster_names = {0: 'Family, home, war',
1: 'Police, killed, murders',
2: 'Father, New York, brothers',
3: 'Dance, singing, love',
4: 'Killed, soldiers, captain'}
###Output
_____no_output_____
###Markdown
Next, I plot the labeled observations (films, film titles) colored by cluster using matplotlib. I won't get into too much detail about the matplotlib plot, but I tried to provide some helpful commenting.
###Code
#some ipython magic to show the matplotlib plots inline
%matplotlib inline
#create data frame that has the result of the MDS plus the cluster numbers and titles
df = pd.DataFrame(dict(x=xs, y=ys, label=clusters, title=titles))
#group by cluster
groups = df.groupby('label')
# set up plot
fig, ax = plt.subplots(figsize=(17, 9)) # set size
ax.margins(0.05) # Optional, just adds 5% padding to the autoscaling
#iterate through groups to layer the plot
#note that I use the cluster_name and cluster_color dicts with the 'name' lookup to return the appropriate color/label
for name, group in groups:
ax.plot(group.x, group.y, marker='o', linestyle='', ms=12,
label=cluster_names[name], color=cluster_colors[name],
mec='none')
ax.set_aspect('auto')
ax.tick_params(\
axis= 'x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelbottom='off')
ax.tick_params(\
axis= 'y', # changes apply to the y-axis
which='both', # both major and minor ticks are affected
left='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelleft='off')
ax.legend(numpoints=1) #show legend with only 1 point
#add label in x,y position with the label as the film title
for i in range(len(df)):
ax.text(df.ix[i]['x'], df.ix[i]['y'], df.ix[i]['title'], size=8)
plt.show() #show the plot
#uncomment the below to save the plot if need be
#plt.savefig('clusters_small_noaxes.png', dpi=200)
###Output
_____no_output_____ |
jupyter/pca/Group_Data_Analysis_PCA_4th_node_velocity.ipynb | ###Markdown
Group Data Analysis PCA 4th Trial - node velocity* Version: '0.0.4'* Date: 2021-05-03* Author: Jea Kwon* Description: Previously PCA analysis with avatar coordinates, spine aligned on plane. this time using spine aligned on axis
###Code
from avatarpy import Avatar
import os
import glob
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import cufflinks as cf
from scipy.stats import zscore
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
cf.go_offline(connected=True)
root = r"C:\Users\Jay\Desktop\avatar_young_adult\data\best1_20210503"
avatars = dict(
wt=dict(
young=[],
adult=[],
),
ko=dict(
young=[],
adult=[],
)
)
for path, subdirs, files in os.walk(root):
for name in files:
if name.lower().endswith('.csv'):
csv_path = os.path.join(path, name)
age = os.path.basename(os.path.dirname(path))
genotype = os.path.basename(os.path.dirname(os.path.dirname(path)))
avatars[genotype][age].append(Avatar(csv_path=csv_path, ID=name))
###Output
_____no_output_____
###Markdown
Create walking event data Definition of walking- Moved more than 5 cm in 1 second(20=Frame)- More details take a look Group_Data_Analysis_PCA_1st_Trial Event Search function
###Code
def get_event_indices(boo, event_length):
"""Returns list of event indices.
ex) [(start 1, end 1), (start 2, end 2), (start 3, end 3), ..., (start N, end N)]
"""
indices = np.arange(len(boo))
condition = np.nonzero(boo[1:] != boo[:-1])[0] + 1
split_indices = np.split(indices, condition)
true_indices = split_indices[0::2] if boo[0] else split_indices[1::2]
event_indice_pair = [(idx[0]-event_length+1, idx[0]+1) for idx in true_indices]
return event_indice_pair
###Output
_____no_output_____
###Markdown
Validation of event search - Take a look Group_Data_Analysis_PCA_2nd_Trial Collecting Event velocity data
###Code
ava = avatars['wt']['young'][0]
ava.velocity
###Output
_____no_output_____
###Markdown
- Take a look Group_Data_Analysis_PCA_2nd_Trial
###Code
wt_young_event_data = []
for avatar in avatars['wt']['young']:
boo = (avatar.distance['anus'].rolling(20).sum()>5).values # boolean array
event_indices = get_event_indices(boo, 20)
for i, idx in enumerate(event_indices):
x = avatar.velocity.loc[avatar.index[idx[0]:idx[1]]]
if x.shape[0]!=20:
continue
wt_young_event_data.append(x.values.flatten())
wt_young_event_data = np.stack(wt_young_event_data)
wt_adult_event_data = []
for avatar in avatars['wt']['adult']:
boo = (avatar.distance['anus'].rolling(20).sum()>5).values # boolean array
event_indices = get_event_indices(boo, 20)
event_data = []
for i, idx in enumerate(event_indices):
x = avatar.velocity.loc[avatar.index[idx[0]:idx[1]]]
if x.shape[0]!=20:
continue
wt_adult_event_data.append(x.values.flatten())
wt_adult_event_data = np.stack(wt_adult_event_data)
###Output
_____no_output_____
###Markdown
total 1857 events acquired from 5 wt young mice with 5 session. total 2248 events acquired from 5 wt adult mice with 5 session.
###Code
X = np.concatenate([wt_young_event_data, wt_adult_event_data])
X_ = StandardScaler().fit_transform(X)
pca = PCA(n_components=2)
pc = pca.fit_transform(X_)
y = np.concatenate([np.zeros(wt_young_event_data.shape[0]), np.ones(wt_adult_event_data.shape[0])])
pc_y = np.c_[pc,y]
df = pd.DataFrame(pc_y,columns=['PC1','PC2','genotype'])
sns.scatterplot(data=df,x='PC1',y='PC2',hue='genotype', alpha=0.2)
# plt.xlim(-10, 10)
# plt.ylim(-10, 10)
###Output
_____no_output_____ |
notebooks/Gmail API.ipynb | ###Markdown
Fetch all labels and tags on this account
###Code
try:
# Call the Gmail API
service = build('gmail', 'v1', credentials=creds)
results = service.users().labels().list(userId='me').execute()
labels = results.get('labels', [])
if not labels:
print('No labels found.')
else:
print('Labels:')
for label in labels:
print(label['name'])
except HttpError as error:
# TODO(developer) - Handle errors from gmail API.
print(f'An error occurred: {error}')
###Output
_____no_output_____
###Markdown
Get unread message IDs in InboxMessages are paginated, thus the iteration code
###Code
try:
service = build('gmail', 'v1', credentials=creds)
results = service.users().messages().list(userId='me', q="in:inbox is:unread").execute()
messages = []
if 'messages' in results:
messages.extend(results['messages'])
while 'nextPageToken' in results:
page_token = results['nextpagetoken']
results = service.users().messages().list(userId='me', q="in:inbox is:unread", pageToken=page_token).execut()
if 'messages' in results:
messages.extend(results['messages'])
except HttpError as error:
# TODO(developer) - Handle errors from gmail API.
print(f'An error occurred: {error}')
###Output
_____no_output_____
###Markdown
and their countThis is all I want for my polybar notifier
###Code
len(results['messages'])
###Output
_____no_output_____ |
Python Fundamentals/Exception Handling.ipynb | ###Markdown
Exception HandlingThe most common reason of an error in a Python program is when a certain statement is not in accordance with the prescribed usage. Such an error is called a syntax error. The Python interpreter immediately reports it, usually along with the reason.| Exception | Description || --- | --- || AssertionError | Raised when the assert statement fails.| AttributeError | Raised on the attribute assignment or reference fails.| EOFError | Raised when the input() function hits the end-of-file condition.| FloatingPointError | Raised when a floating point operation fails.| GeneratorExit | Raised when a generator's close() method is called.| ImportError | Raised when the imported module is not found.| IndexError | Raised when the index of a sequence is out of range.| KeyError | Raised when a key is not found in a dictionary.| KeyboardInterrupt | Raised when the user hits the interrupt key (Ctrl+c or delete).| MemoryError | Raised when an operation runs out of memory.| NameError | Raised when a variable is not found in the local or global scope.| NotImplementedError | Raised by abstract methods.| OSError | Raised when a system operation causes a system-related error.| OverflowError | Raised when the result of an arithmetic operation is too large to be represented.| ReferenceError | Raised when a weak reference proxy is used to access a garbage collected referent.| RuntimeError | Raised when an error does not fall under any other category.| StopIteration | Raised by the next() function to indicate that there is no further item to be returned by the iterator.| SyntaxError | Raised by the parser when a syntax error is encountered.| IndentationError | Raised when there is an incorrect indentation.| TabError | Raised when the indentation consists of inconsistent tabs and spaces.| SystemError | Raised when the interpreter detects internal error.| SystemExit | Raised by the sys.exit() function.| TypeError | Raised when a function or operation is applied to an object of an incorrect type.| UnboundLocalError | Raised when a reference is made to a local variable in a function or method, but no value has been bound to that variable.| UnicodeError | Raised when a Unicode-related encoding or decoding error occurs.| UnicodeEncodeError | Raised when a Unicode-related error occurs during encoding.| UnicodeDecodeError | Raised when a Unicode-related error occurs during decoding.| UnicodeTranslateError | Raised when a Unicode-related error occurs during translation.| ValueError | Raised when a function gets an argument of correct type but improper value.| ZeroDivisionError | Raised when the second operand of a division or module operation is zero.Python uses `try` and `except` keywords to handle exceptions. Both keywords are followed by indented blocks:``` pythontry: statement in try blockexcept: executed when error flagged in try block```Online resources: https://www.tutorialsteacher.com/python/exception-handling-in-python and https://www.tutorialsteacher.com/python/error-types-in-pythonNow, let's practice some **errors and exception handling*** transform all strings from list to upper, if the element is not string don't transform it* use try except block without use of 'if' statement
###Code
for x in ['today','i', 8, 2, 'eggs']:
try:
print(x.upper())
except AttributeError:
print(x)
###Output
TODAY
I
8
2
EGGS
###Markdown
**We have the function created below:**Luke Skywalker has family and friends. Help him remind them who is who. Given a string with a name, return the relation of that person to Luke.**Person --> Relation**- Darth Vader --> father- Leia --> sister- Han --> brother in law- R2D2 --> droid Examples> relation_to_luke("Darth Vader") ➞ "Luke, I am your father.">> relation_to_luke("Leia") ➞ "Luke, I am your sister.">> relation_to_luke("Han") ➞ "Luke, I am your brother in law."
###Code
def relation_to_luke(text):
_dict = []
_dict["Darth Vader"] = "father"
_dict["Leia"] = "sister"
_dict["Ham"] = "brother in law"
_dict["R2D2"] = "droid"
print(f"Luke, I am your {+ _dict[text]}")
###Output
_____no_output_____
###Markdown
Task IFix errors in the function above so we can run following code
###Code
def relation_to_luke(text):
_dict = {}
_dict["Darth Vader"] = "father"
_dict["Leia"] = "sister"
_dict["Han"] = "brother in law"
_dict["R2D2"] = "droid"
print(f"\"Luke, I am your {_dict[text]}\"")
relation_to_luke("Darth Vader")
relation_to_luke("Leia")
relation_to_luke("Han")
relation_to_luke("R2D2")
###Output
"Luke, I am your father"
"Luke, I am your sister"
"Luke, I am your brother in law"
"Luke, I am your droid"
###Markdown
Task IIUse exception handling so we can run the function with any string. In this case, the function will return following:**relation_to_luke("aaaa") ➞ "aaaa is not in the relation with Luke"**> Note> We **cannot** use **if** statement for this
###Code
string = input("Who do you want to check for Luke's relations? ")
try:
relation_to_luke(string)
except:
print(f"\"{string} is not in the relation with Luke\"")
###Output
Who do you want to check for Luke's relations? Leia
|
tutorials/BIDMach_parameter_tuning.ipynb | ###Markdown
BIDMach: parameter tuning In this notebook we'll explore automated parameter exploration by grid search.
###Code
import $exec.^.lib.bidmach_notebook_init
if (Mat.hasCUDA > 0) GPUmem
###Output
_____no_output_____
###Markdown
Dataset: Reuters RCV1 V2 The dataset is the widely used Reuters news article dataset RCV1 V2. This dataset and several others are loaded by running the script getdata.sh from the BIDMach/scripts directory. The data include both train and test subsets, and train and test labels (cats).
###Code
var dir = "../data/rcv1/" // adjust to point to the BIDMach/data/rcv1 directory
tic
val train = loadSMat(dir+"docs.smat.lz4")
val cats = loadFMat(dir+"cats.fmat.lz4")
val test = loadSMat(dir+"testdocs.smat.lz4")
val tcats = loadFMat(dir+"testcats.fmat.lz4")
toc
###Output
_____no_output_____
###Markdown
First lets enumerate some parameter combinations for learning rate and time exponent of the optimizer (texp)
###Code
val lrates = col(0.03f, 0.1f, 0.3f, 1f) // 4 values
val texps = col(0.3f, 0.4f, 0.5f, 0.6f, 0.7f) // 5 values
###Output
_____no_output_____
###Markdown
The next step is to enumerate all pairs of parameters. We can do this using the kron operator for now, this will eventually be a custom function:
###Code
val lrateparams = ones(texps.nrows, 1) ⊗ lrates
val texpparams = texps ⊗ ones(lrates.nrows,1)
lrateparams \ texpparams
###Output
_____no_output_____
###Markdown
Here's the learner again:
###Code
val (mm, opts) = GLM.learner(train, cats, GLM.logistic)
###Output
_____no_output_____
###Markdown
To keep things simple, we'll focus on just one category and train many models for it. The "targmap" option specifies a mapping from the actual base categories to the model categories. We'll map from category six to all our models:
###Code
val nparams = lrateparams.length
val targmap = zeros(nparams, 103)
targmap(?,6) = 1
opts.targmap = targmap
opts.lrate = lrateparams
opts.texp = texpparams
mm.train
val (pp, popts) = GLM.predictor(mm.model, test)
###Output
_____no_output_____
###Markdown
And invoke the predict method on the predictor:
###Code
pp.predict
val preds = FMat(pp.preds(0))
pp.model.asInstanceOf[GLM].mats.length
###Output
_____no_output_____
###Markdown
Although ll values are printed above, they are not meaningful (there is no target to compare the prediction with). We can now compare the accuracy of predictions (preds matrix) with ground truth (the tcats matrix).
###Code
val vcats = targmap * tcats // create some virtual cats
val lls = mean(ln(1e-7f + vcats ∘ preds + (1-vcats) ∘ (1-preds)),2) // actual logistic likelihood
mean(lls)
###Output
_____no_output_____
###Markdown
A more thorough measure is ROC area:
###Code
val rocs = roc2(preds, vcats, 1-vcats, 100) // Compute ROC curves for all categories
plot(rocs)
val aucs = mean(rocs)
###Output
_____no_output_____
###Markdown
The maxi2 function will find the max value and its index.
###Code
val (bestv, besti) = maxi2(aucs)
###Output
_____no_output_____
###Markdown
And using the best index we can find the optimal parameters:
###Code
texpparams(besti) \ lrateparams(besti)
###Output
_____no_output_____ |
homeworks/HW7/HW7-Final.ipynb | ###Markdown
Homework 7 Due Date: Wednesday, October 25th at 11:59 PM Problem 1: Linked List ClassWrite a linked list class called `LinkedList`. Remember, a singly linked list is made up of nodes each of which contain a value and a pointer. The first node is called the "head node".Here are the required methods:* `__init__(self, head)` where `head` is the value of the head node. You could make the head node an attribute.* `__len__(self)`: Returns the number of elements in the linked list.* `__getitem__(self, index)` returns the value of the node corresponding to `index`. Include checks to make sure that `index` is not out of range and that the user is not trying to index and empty list.* `__repr__(self)` returns `LinkedList(head_node)`.* `insert_front(self, element)` inserts a new node with value `element` at the beginning of the list.* `insert_back(self, element)` inserts a new node with value `element` at the end of the list.Note: An alternative implementation is to create a `Node` class. You are not required to make a `Node` class but you may if you prefer that implementation. Please don't steal that implementation from the online forums. I've seen those too.
###Code
class NodeLL():
def __init__(self,value,pointer):
self.value = value
self.next = pointer
def __repr__(self):
return str(self.value)
class LinkedList():
def __init__(self, head):
self.last_pointer = None
self.head_node = NodeLL(head,self.last_pointer)
self.len = 1
def __len__(self):
return len(self.nodes)
def __repr__(self):
return "LinkedList({})".format(repr(self.head_node))
def __getitem__(self,index):
node_num = 0
node = self.head_node
if not (index > self.len):
while node_num != index:
if(node.next != None):
node, node_num = node.next,node_num+1
return node.value
else:
return "No element at this index."
def insert_front(self,element):
new_node = NodeLL(element,self.head_node)
self.head_node = new_node
self.len += 1
def insert_back(self,element):
node = self.head_node
while node.next != None:
node = node.next
node.next = NodeLL(element,None)
self.len += 1
def __len__(self):
return self.len
#testing class
llist = LinkedList(1)
llist.insert_front(2)
llist.insert_front(3)
llist.__getitem__(4)
###Output
_____no_output_____
###Markdown
Problem 2: Binary Tree ClassA binary search tree is a binary tree with the invariant that for any particular node the left child is smaller and the right child is larger. Create the class `BinaryTree` with the following specifications:`__init__(self)`: Constructor takes no additional arguments`insert(self, val)`: This method will insert `val` into the tree(Optional) `remove(self, val)`: This will remove `val` from the tree.1. If the node to be deleted has no children then just remove it.2. If the node to be deleted has only one child, remove the node and replace it with its child.3. If the node to be deleted has two children, replace the node to be deleted with the maximum value in the left subtree. Finally, delete the node with the maximum value in the left-subtree.`getValues(self. depth)`: Return a list of the entire row of nodes at the specified depth with `None` at the index if there is no value in the tree. The length of the list should therefore be $2^{\text{depth}}$. Here is a sample output:```pythonbt = BinaryTree()arr = [20, 10, 17, 14, 3, 0]for i in arr: bt.insert(i)print("Height of binary tree is {}.\n".format(len(bt)))for i in range(len(bt)): print("Level {0} values: {1}".format(i, bt.getValues(i)))``````Height of binary tree is 4.Level 0 values: [20]Level 1 values: [10, None]Level 2 values: [3, 17, None, None]Level 3 values: [0, None, 14, None, None, None, None, None]```Note that you do not need to format your output in this way. Nor are you required to implement a `__len__` method to compute the height of the tree. I did this because it was convenient for illustration purposes. This example is simply meant to show you some output at each level of the tree.
###Code
class Node():
def __init__(self,value):
self.value = value
self.left_child = None
self.right_child = None
class BinaryTree():
def __init__(self):
self.tree = None
def insert(self,val):
if(self.tree == None):
self.tree = Node(val)
else:
self.add(val,self.tree)
def add(self,value,node):
if(value < node.value):
if(node.left_child == None):
node.left_child = Node(value)
else:
self.add(value,node.left_child)
else:
if(node.right_child == None):
node.right_child = Node(value)
else:
self.add(value,node.right_child)
def getValues(self,depth,node=None,vals=[]):
if(node == None):
if(self.tree == None):
return []
node = self.tree
vals = []
if (depth==0):
vals.append(node.value)
else:
if(node.left_child != None):
self.getValues(depth-1,node.left_child,vals)
else:
for i in range(int(2**(depth-1))):
vals.append(None)
if(node.right_child != None):
self.getValues(depth-1,node.right_child,vals)
else:
for i in range(int(2**(depth-1))):
vals.append(None)
return vals
def search(self,value, node,parent=None):
if(node != None):
if(value < node.value):
return self.search(value, node.left_child,node)
elif(value > node.value):
return self.search(value,node.right_child,node)
elif(node.value == value):
return parent,node
else:
raise Exception()
def remove(self, val):
parent, node = self.search(val,self.tree)
if(parent == None):
self.tree = None
return
if((node.left_child == None) and (node.right_child == None)):
if (parent.value > val):
parent.left_child = None
if (parent.value < val):
parent.right_child = None
if((node.left_child == None) or (node.right_child == None)):
if (node.left_child != None):
replacing_node = node.left_child
if (node.right_child != None):
replacing_node = node.right_child
if (parent.value > val):
parent.left_child = replacing_node
if (parent.value < val):
parent.right_child = replacing_node
if((node.left_child != None) and (node.right_child != None)):
replacing_node = node.left_child
if (parent.value > val):
parent.left_child = replacing_node
if (parent.value < val):
parent.right_child = replacing_node
#Testing insert() & getValues()
tree = BinaryTree()
tree.insert(2)
tree.insert(1)
tree.insert(3)
tree.insert(0)
tree.insert(4)
tree.insert(5)
tree.getValues(3)
#Testing remove()
t2 = BinaryTree()
t2.insert(2)
t2.insert(1)
t2.insert(4)
t2.insert(3)
t2.insert(5)
t2.remove(4)
t2.getValues(1)
###Output
_____no_output_____ |
Estudos/Python_Data_Science/Pandas/Curso_Pandas/Base_de_Dados.ipynb | ###Markdown
Relatório de Análise I Importando a Base de Dados
###Code
import pandas as pd
# importando
pd.read_csv('dados/aluguel.csv', sep=';')
dados = pd.read_csv('dados/aluguel.csv', sep=';')
dados
type(dados)
dados.info
dados
dados.head(10)
###Output
_____no_output_____
###Markdown
Informações Gerais Sobre a Base de Dados
###Code
dados.dtypes
tipos_de_dados = pd.DataFrame(dados.dtypes, columns = ['Tipoes de Dados'])
tipos_de_dados.columns.name = 'Variáveis'
tipos_de_dados
dados.shape
print('A base de dados apresenta {} registros(imóveis) e {} variáveis'.format(dados.shape[0], dados.shape[1]))
###Output
A base de dados apresenta 32960 registros(imóveis) e 9 variáveis
|
YesaLab1Part1.ipynb | ###Markdown
Allen Daniel Yesa Aditya Subramanian Muralidaran
###Code
sales1 = c(12,14,16,29,30,45,19,20,16, 19, 34, 20)
sales2 = rpois(12,34) # random numbers, Poisson distribution, mean at 34, 12 numbers
par(bg="cornsilk")
plot(sales1, col="blue", type="o",pch=22, ylim=c(0,100), xlab="Month", ylab="Sales" )
title(main="Sales by Month")
lines(sales2, type="o", pch=22, lty=2, col="red")
grid(nx=NULL, ny=NULL)
legend("topright", inset=.05, c("Sales1","Sales2"), fill=c("blue","red"), horiz=TRUE)
sales<-read.table(file.choose(), header=T)
sales # to verify that data has been read
barplot(as.matrix(sales), main="Sales Data", ylab= "Total",beside=T, col=rainbow(5))
fn<-boxplot(sales,col=c("orange","green"))$stats
text(1.45, fn[3,2], paste("Median =", fn[3,2]), adj=0, cex=.7)
text(0.45, fn[3,1],paste("Median =", fn[3,1]), adj=0, cex=.7)
grid(nx=NA, ny=NULL)
fb1<-read.csv(file.choose())
aapl1<-read.csv(file.choose())
par(bg="cornsilk")
plot(aapl1$Adj.Close, col="blue", type="o", ylim=c(150,200), xlab="Days", ylab="Price" )
lines(fb1$Adj.Close, type="o", pch=2, lty=2, col="red")
legend("topright", inset=.05, c("Apple","Facebook"), fill=c("blue","red"), horiz=TRUE)
hist(aapl1$Adj.Close, col=rainbow(8))
attach(BOD)
head(BOD)
summary(BOD)
detach(BOD)
head(uspop)
plot(uspop)
library("ggmap")
library("maptools")
library(maps)
visited <- c("SFO", "Chennai,India", "London", "Melbourne", "Lima,peru", "Johannesburg,SA")
ll.visited <- geocode(visited)
visit.x <- ll.visited$lon
visit.y <- ll.visited$lat
map("world", fill=TRUE, col="white", bg="lightblue", ylim=c(-60, 90), mar=c(0,0,0,0))
points(visit.x,visit.y, col="red", pch=36)
library("ggmap")
library("maptools")
library(maps)
visited <- c("SFO", "New York", "Buffalo,Newyork", "Dallas,Texas")
ll.visited <- geocode(visited)
visit.x <- ll.visited$lon
visit.y <- ll.visited$lat
map("state", fill=TRUE, col="white", bg="lightblue", mar=c(0,0,0,0))
points(visit.x,visit.y, col="red", pch=36)
library(lattice)
splom(mtcars[c(1,3,4,5,6)], main="MTCARS Data")
splom(mtcars[c(1,3,4,6)], main="MTCARS Data")
splom(mtcars[c(1,3,4,6)], col=rainbow(5),main="MTCARS Data")
splom(rock[c(1,2,3,4)], main="ROCK Data")
smokes = c("Y","N","N","Y","N","Y","Y","Y","N","Y")
amount = c(1,2,2,3,3,1,2,1,3,2)
table(smokes,amount)
barplot(table(smokes,amount))
data1<-read.csv(url("http://stat.columbia.edu/~rachel/datasets/nyt1.csv"))
head(data1)
data1$agecat<-cut(data1$Age,c(-Inf,0,18,24,34,44,54,64,Inf))
summary(data1)
library("doBy")
siterange<-function(x){c(length(x),min(x),mean(x),max(x))}
summaryBy(Age~agecat, data=data1,FUN=siterange)
library(ggplot2)
ggplot(data1,aes(x=agecat,y=Impressions,fill=agecat))+geom_boxplot()
ggplot(subset(data1,Clicks>0),aes(x=Clicks/Impressions,colour=agecat))+geom_density()
ggplot(data1,aes(x=Impressions,fill=agecat))+geom_histogram(binwidth=1)
data1$scode[data1$Impressions==0]<-"NoImps"
data1$scode[data1$Impressions>0]<-"Imps"
data1$scode[data1$Clicks>0]<-"Clicks"
data1$scode<-factor(data1$scode)
head(data1)
data3<-subset(data1,scode=="NoImps")
head(data3)
clen<-function(x){c(length(x))}
etable<-summaryBy(Impressions~scode+Gender+agecat,data=data1,FUN=clen)
###Output
_____no_output_____ |
Filters/AudioFilters.ipynb | ###Markdown
COM418 - Computers and MusicPaolo Prandoni, LCAV, EPFLPractical filters for Audio Processing
###Code
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from IPython.display import Audio
from scipy import signal
import import_ipynb
from FilterUtils import *
plt.rcParams['figure.figsize'] = 14, 4
matplotlib.rcParams.update({'font.size': 14})
DEFAULT_SF = 16000
###Output
_____no_output_____
###Markdown
IntroductionIn this notebook we will explore a complete set of "recipes" to design second-order digital IIR filters. The transfer function of a generic second-order section (also known as a **biquad**) has the canonical form$$ H(z) = \frac{b_0 + b_1 z^{-1} + b_{2}z^{-2}}{1 + a_1 z^{-1} + a_{2}z^{-2}}$$The routines defined in the rest of this notebook will allow you to compute the values of the five biquad parameters in order to implement a variety of different filter prototypes according to the desired specifications. We will also explore how to cascade second-order sections to implement higher-order filters with improved characteristics. Common practicesAlthough ultimately we will design digital filters, in audio applications it is important to become familiar with the main ideas behind _analog_ filter design and analysis; indeed, audio recording and production techniques have been developed and fine-tuned well before the advent of DSP and, even in today's world of DAWs, the language and many of the practices in current use still reflect the analog conventions of yore.In particular: * filter specifications are almost always expressed in terms of real-world frequencies in Hz rather than as normalized frequencies over $[-\pi, \pi]$; this of course implies that the underlying sampling frequency is known * plots of the magnitude response will usually be shown on a log-log graph, generally using a decibel scale for the amplitde and a decade scale for frequencies; this mirrors the way in which human audio perception is approximately logarithmic both in frequency and in scale. The companion notebook ``FilterUtils.ipynb`` implements a set of plotting routines that take these conventions into account. For example, this is how the magnitude response of the same leaky integrator looks like in the typical representations used in communication systems vs. audio equalization:
###Code
lam = 0.9
filter_props([1 - lam], [1, -lam])
analog_response([1 - lam], [1, -lam], DEFAULT_SF, dB=-50)
###Output
_____no_output_____
###Markdown
The biquad design strategyThe next section provides a set of functions to compute the five biquad filter coefficients for a desired response (lowpass, bandpass, etc) and an associated set of specifications (cutoff, attenuation, etc). Rather than tackling the search for the coefficients as an abstract optimization problem, each recipe starts from a well-known _analog_ second-order filter with the desired characteristic, and then converts it to a discrete-time filter using a mapping called the _bilinear transform_ .The reason for this approach is that the classic analog filter prototypes (Butterworth, Chebyshev, etc.) (and the topologies used for their implementation) are extremely well understood and have proven extremely reliable over more than a century of research and practical experimentation. The analog prototypesHistorically, the development of electronic filters began with the design of **passive** analog filters, that is, filters using only resistors, capacitors and inductors; indeed, an RLC circuit, that is, a network containing a resistor, a capacitor and an inductor, can implement the prototypical analog second-order section. Since these filters have no active elements that can provide signal amplification, the power of their output is at most equal to (but, in practice, smaller than) the power of the input. This implicitly guarantees the stability of these systems although, at least in theory, strong resonances can appear in the frequency response.Analog filters work by exploiting the frequency-dependent reactance of capacitors and inductors. The input-output characteristic for linear circuits using these electronic components is described by linear differential equations, which implies the existence of some form of _feedback_ in the circuits themselves. As a consequence, when we convert the analog prototypes to digital realizations, we invariably end up with IIR filters. Passive filters operating in the frequency range of audio applications require the use of bulky inductors and therefore **active** filters are usually preferred in the analog domain. In the digital domain, on the other hand, we are in fact free to use arbitrary gain factors (it's just multiplications!) and so the resulting transfer functions can approximate either type of design. The bilinear transformThe cookbook recipes below are obtained by mapping second-order analog filters prototypes to equivalent digital realization via the _bilinear transform_ . We will not go into full details but, as a quick reference, here are the main ideas behind the method. An analog filter is described by a transfer function $H(s)$, with $s\in \mathbb{C}$, which is the Laplace transform of the filter's continuous-time impulse response. The key facts about $H(s)$ are: * filter stability requires that all the poles of $H(s)$ lie in the left half of the complex plane (i.e. their real part must be negative) * the filter's frequency response is given by $H(j\Omega)$, that is, by the values of $H(s)$ along the imaginary axisThe bilinear transform maps the complex $z$-plane (discrete time) to the complex $s$-plane (continuous time) as$$ s \leftarrow c \frac{1 - z^{-1}}{1 + z^{-1}} = \Phi_{c}(z)$$where $c$ is a real-valued constant. Given a stable analog filter $H(s)$, the transfer function of its discrete-time version is $H_d(z) = H(\Phi_{c}(z))$ and it is relatively easy to verify that * the inside of the unit circle on the $z$-plane is mapped to the left half of the $s$-plane, which preserves stability * the unit circle on the $z$-plane is mapped to the imaginary axis of the $s$-plane The last property allows us to determine the frequency response of the digital filter as $H_d(e^{j\omega}) = H(\Phi_{c}(e^{j\omega})) = H(j\,c\tan(\omega/2))$, or: $$ \Omega \leftarrow c\tan(\omega/2) %\omega \leftarrow 2\arctan(\Omega/c)$$we can see that $\omega=0$ is mapped to $\Omega=0$, $\omega=\pi/2$ is mapped to $\Omega=c$, and $\omega=\pi$ is mapped to $\Omega=\infty$, which reveals the high nonlinearity of the frequency mapping. We usually need to precisely control a least one notable point $H_d(e^{j\omega_0})$ in the frequency response of the discrete-time filter; for example, in a resonator, we need to place the magnitude peak at a specific frequency $\omega_0$. To achieve this, we design the analog filter so that $H(1j) = H_d(e^{j\omega_0})$ and then we set $c = 1/\tan(\omega_0/2)$ in the bilinear operator; this adjustment, called **pre-warping** , is used in the recipes below. To illustrate the principle, here is a simple transfer function $H(s)$ that provide a triangular response centered at $\Omega=1$; the default width is $1/2$ and the width can be optionally scaled.along the imaginary axis; we can parametrize the width of the bell and its center position is at by default:
###Code
if __name__ == '__main__':
def H(f, scale=1):
return np.maximum(1 - 4 * np.abs(np.imag(f) - 1) / scale, 0)
f = np.linspace(0, 3, 1000)
plt.plot(f, H(1j * f));
###Output
_____no_output_____
###Markdown
Using the bilinear transform with pre-warping, we can move the equivalent discrete-time frequency response over the $[0, \pi]$ interval.
###Code
if __name__ == '__main__':
def BL(z, c=1):
return c * (1 - 1/z) / (1 + 1/z)
if __name__ == '__main__':
w = np.linspace(0, np.pi, 1000)
center_freqs = np.pi * np.arange(0.1, 0.9, 0.15)
for w0 in center_freqs:
c = 1 / np.tan(w0 / 2)
plt.plot(w, H(BL(np.exp(1j * w), c=c)))
###Output
_____no_output_____
###Markdown
Note that the nonlinear mapping between frequency axes has two consequences on the the discrete-time frequency response: * at low and high digital frequencies the response becomes more narrow; this can be compensated for by scaling the analog prototype * as we move to higher frequencies, the response is less and less symmetric; this is much harder to compensate for because it would require a different analog design and it is therefore an accepted tradeoff. The following example tries to keep the width of the response uniform
###Code
if __name__ == '__main__':
for w0 in center_freqs:
c = 1 / np.tan(w0 / 2)
scaling_factor = (c * c + 1) / (2 * c)
plt.plot(w, H(BL(np.exp(1j * w), c=c), scale=scaling_factor))
###Output
_____no_output_____
###Markdown
**Exercise**: how was the scaling factor derived? Can you improve on it? Using the bilinear transform with pre-warping, we can move the equivalent discrete-time frequency response over the $[0, \pi]$ interval What about FIRs?FIR filters are a great tool in digital signal processing; as opposed to IIR (which can be seen as a digital adaptation of electronic filters) FIRs offer: * unconditional stability * the possibility of a linear phase response * a great design algorithm (Parks-McClellan) even for arbitrary responses The price for stability and linear phase is a much higher computational cost: for the same specifications, an FIR filter will require up to a hundred times more operations per sample with respect to an IIR implementation. Linear phase, however, is not terribly relevant in audio applications because of the limited phase sensitivity in the human auditory system. On the other hand, especially in real-time applications, the primary goal in audio processing is to minimize the overall processing delay; since linear phase FIRs have a symmetric impulse response, and since a well-performing filter will have a very long impulse response, the associated delay often makes FIRs difficult to use. Even if we give up linear phase and implement an asymmetric, minimum-phase FIR, the computational cost may be too high.There are countless audiophile blogs debating the merits and demerits of FIRs in audio applications. Some of the purported negatives that are often quoted include * FIRs sound "cold" * linear phase FIRs cause pre-echos (because of their symmetric impulse response) * minimum-phase FIRs exhibit excessive ringing in the impulse responseIt must be said that these artefacts, if they can be noticed at all, are anyway extremely subtle and unlikely to compromise overall sound quality in a significant way. The major obstacle to the use of FIRs remains their inherent processing delay. The cookbookIn the following, we define a set of functions that return the five biquad coefficients for the most common types of audio filtering applications. Many of the formulas have been adapted from Robert Bristow-Johnson's famous [cookbook](https://webaudio.github.io/Audio-EQ-Cookbook/audio-eq-cookbook.html). Each function returns ``b`` and ``a``, two arrays of three floats each containing the coefficients of the transfer function$$ H(z) = \frac{b_0 + b_1 z^{-1} + b_{2}z^{-2}}{1 + a_1 z^{-1} + a_{2}z^{-2}} \qquad (a_0 = 1)$$ LowpassA second-order lowpass filter section will have a passband with approximately unit gain (0 dB) and a monotonically decreasing stopband. It is defined by two parameters: 1. the "quality factor" $Q$, which determines the shape of the magnitude response; by default $Q = \sqrt{1/2}$, which yields a Butterworth characteristic (i.e. a monotonically decreasing response). 1. the _corner frequency_ $f_c$ (also called the _cutoff_ frequency); the magnitude response will be equal to the quality factor $Q$ at $f_c$ and will decrease monotonically afterwards. For $Q = \sqrt{1/2}$, the attenuation at $f_c$ is equal to $20\log_{10}(\sqrt{1/2}) \approx -3$ dB, which yields a Butterworth (maximally flat) characteristic.
###Code
def LPF(fc, sf, Q=(1/np.sqrt(2))):
"""Biquad lowpass filter"""
w = 2 * np.pi * fc / sf
alpha = np.sin(w) / (2 * Q)
c = np.cos(w)
a = np.array([1 + alpha, -2 * c, 1 - alpha])
b = np.array([(1 - c) / 2, 1 - c, (1 - c) / 2])
return b / a[0], a / a[0]
if __name__ == '__main__':
CUTOFF = 1000
b, a = LPF(CUTOFF, DEFAULT_SF)
analog_response(b, a, DEFAULT_SF, dB=-50)
plt.axhline(y=-3, linewidth=0.5, color='r')
plt.axvline(x=CUTOFF, linewidth=0.5, color='r')
if __name__ == '__main__':
filter_props(b, a)
plt.gcf().get_axes()[0].axhline(y=np.sqrt(0.5), linewidth=0.5, color='r')
plt.gcf().get_axes()[0].axvline(x=(2 * np.pi * CUTOFF / DEFAULT_SF), linewidth=0.5, color='r')
###Output
_____no_output_____
###Markdown
When $Q = 1/\sqrt{2}$, as we said, the lowpass section corresponds to a Butterworth filter, that is, a filter with a maximally flat passband and a monotonically decreasing stopband. For higher $Q$ values the magnitude response exhibits a peak around $f_c$ which, in the time domain, corresponds to a damped oscillatory impulse response as shown in the following examples; for lower $Q$ values, the roll-off of the magnitude response will be less steep.While these $Q$ values are clearly not a good choice for a single-stage lowpass, values other than $1/\sqrt{2}$ become useful when cascading multiple sections, as we will see later.
###Code
if __name__ == '__main__':
_, (fr, ir) = plt.subplots(2, figsize=(16,9))
CUTOFF = 100
Q = [0.1, 0.5, 1/np.sqrt(2), 5, 20]
for n, q in enumerate(Q):
b, a = LPF(CUTOFF, DEFAULT_SF, Q=q)
analog_response(b, a, DEFAULT_SF, dB=-50, axis=fr, color=f'C{n}')
ir.plot(signal.lfilter(b, a, np.r_[1, np.zeros(2000)]))
###Output
_____no_output_____
###Markdown
HighpassA highpass filter is simply the complementary filter to a lowpass, with the same roles for $f_c$ and $Q$.
###Code
def HPF(fc, sf, Q=(1/np.sqrt(2))):
"""Biquad highpass filter"""
w = 2 * np.pi * fc / sf
alpha = np.sin(w) / (2 * Q)
c = np.cos(w)
a = np.array([1 + alpha, -2 * c, 1 - alpha])
b = np.array([(1 + c) / 2, -1 - c, (1 + c) / 2])
return b / a[0], a / a[0]
if __name__ == '__main__':
CUTOFF = 2500
b, a = HPF(CUTOFF, DEFAULT_SF)
analog_response(b, a, DEFAULT_SF, dB=-50)
plt.axhline(y=-3, linewidth=0.5, color='r')
plt.axvline(x=CUTOFF, linewidth=0.5, color='r')
if __name__ == '__main__':
filter_props(b, a)
plt.gcf().get_axes()[0].axhline(y=np.sqrt(0.5), linewidth=0.5, color='r')
plt.gcf().get_axes()[0].axvline(x=(2 * np.pi * CUTOFF / DEFAULT_SF), linewidth=0.5, color='r')
###Output
_____no_output_____
###Markdown
BandpassA second-order bandpass filter section will have approximately unit gain (0 dB) in the passband and will decrease monotonically to zero in the stopband. It is defined by two parameters: 1. the center frequency $f_c$, where the gain is unitary 1. the bandwidth $b = (f_+ - f_-)$, where $f_- < f_c < f_+$ are the first frequencies, left and right of $f_c$ where the attenuation reaches $-3$ dB. For the reasons explained above, note that the passband is almost but not exactly symmetric around $f_c$, with the asymmetry more pronounced towards the high end of the spectrum.
###Code
def BPF(fc, bw, sf):
"""Biquad bandpass filter"""
w = 2 * np.pi * fc / sf
alpha = np.tan(np. pi * bw / sf)
c = np.cos(w)
b = np.array([alpha, 0, -alpha])
a = np.array([1 + alpha, -2 * c, 1 - alpha])
return b / a[0], a / a[0]
if __name__ == '__main__':
CENTER, BANDWIDTH = 1000, 400
b, a = BPF(CENTER, BANDWIDTH, DEFAULT_SF)
analog_response(b, a, DEFAULT_SF, dB=-40)
plt.axhline(y=-3, linewidth=0.5, color='r')
plt.axvline(x=CENTER, linewidth=0.5, color='r')
plt.axvline(x=CENTER - BANDWIDTH / 2, linewidth=0.5, color='r')
plt.axvline(x=CENTER + BANDWIDTH / 2, linewidth=0.5, color='r')
if __name__ == '__main__':
filter_props(b, a)
plt.gcf().get_axes()[0].axhline(y=np.sqrt(0.5), linewidth=0.5, color='r')
plt.gcf().get_axes()[0].axvline(x=(2 * np.pi * CENTER / DEFAULT_SF), linewidth=0.5, color='r')
###Output
_____no_output_____
###Markdown
ResonatorWhen the bandwith is very small, the second order bandpass becomes a constant-gain resonator:
###Code
if __name__ == '__main__':
_, ax = plt.subplots()
BANDWIDTH = 10
FC = [100, 1000, 2000, 4000, 6000]
for n, fc in enumerate(FC):
b, a = BPF(fc, BANDWIDTH, DEFAULT_SF)
frequency_response(b, a, dB=-50, half=True, axis=ax)
###Output
_____no_output_____
###Markdown
NotchA notch filter is the complementary filter to a resonator; its attenuation reaches $-\infty$ at $f_c$ and its bandwidth is usually kept very small in order to selectively remove only a given frequency; this is achieved by placing a pair of complex-conjugate zeros _on_ the unit circle and by placing two poles very close to the zeros.
###Code
def notch(fc, bw, sf):
"""Biquad notch filter"""
w = 2 * np.pi * fc / sf
alpha = np.tan(np. pi * bw / sf)
c = np.cos(w)
b = np.array([1, -2 * c, 1])
a = np.array([1 + alpha, -2 * c, 1 - alpha])
return b / a[0], a / a[0]
if __name__ == '__main__':
CENTER, BANDWIDTH = 2000, 100
b, a = notch(CENTER, BANDWIDTH, DEFAULT_SF)
analog_response(b, a, DEFAULT_SF, dB=-40)
plt.axhline(y=-6, linewidth=0.5, color='r')
plt.axvline(x=CENTER, linewidth=0.5, color='r')
if __name__ == '__main__':
filter_props(b, a)
plt.gcf().get_axes()[0].axhline(y=np.sqrt(0.5), linewidth=0.5, color='r')
plt.gcf().get_axes()[0].axvline(x=(2 * np.pi * CENTER / DEFAULT_SF), linewidth=0.5, color='r')
###Output
_____no_output_____
###Markdown
ShelvesShelving filters are used to amplify either the low or the high end of a signal's spectrum. A high shelf, for instanance, provides an arbitrary gain for high frequencies and has approximately unit gain in the low end of the spectrum. Shelving filters, high or low, are defined by the following parameters: 1. the desired _shelf gain_ in dB 1. the midpoint frequency $f_c$, which corresponds to the frequency in the transition band where the gain reaches half its value. 1. the "quality factor" $Q$, which determines the steepnes off the transition band; as for lowpass filters, the default value $Q = 1/\sqrt{2}$ yields the steepest transition band while avoiding resonances. A common use case for shelving filters is in consumer audio appliances, where the standard "Bass" and "Treble" tone knobs control the gain of two complementary shelves with fixed midpoint frequency.
###Code
def LSH(fc, gain, sf, Q=(1/np.sqrt(2))):
"""Biquad low shelf"""
w = 2 * np.pi * fc / sf
A = 10 ** (gain / 40)
alpha = np.sin(w) / (2 * Q)
c = np.cos(w)
b = np.array([A * ((A + 1) - (A - 1) * c + 2 * np.sqrt(A) * alpha),
2 * A * ((A - 1) - (A + 1) * c),
A * ((A + 1) - (A - 1) * c - 2 * np.sqrt(A) * alpha)])
a = np.array([(A + 1) + (A - 1) * c + 2 * np.sqrt(A) * alpha,
-2 * ((A - 1) + (A + 1) * c),
(A + 1) + (A - 1) * c - 2 * np.sqrt(A) * alpha])
return b / a[0], a / a[0]
if __name__ == '__main__':
MIDPOINT, GAIN_DB = 200, 40
b, a = LSH(MIDPOINT, GAIN_DB, DEFAULT_SF)
analog_response(b, a, DEFAULT_SF, dB=-40)
plt.axhline(y=GAIN_DB / 2, linewidth=0.5, color='r')
plt.axvline(x=MIDPOINT, linewidth=0.5, color='r')
if __name__ == '__main__':
filter_props(b, a)
def HSH(fc, gain, sf, Q=(1/np.sqrt(2))):
"""Biquad high shelf"""
w = 2 * np.pi * fc / sf
A = 10 ** (gain / 40)
alpha = np.sin(w) / (2 * Q)
c = np.cos(w)
b = np.array([A * ((A + 1) + (A - 1) * c + 2 * np.sqrt(A) * alpha),
-2 * A * ((A - 1) + (A + 1) * c),
A * ((A + 1) + (A - 1) * c - 2 * np.sqrt(A) * alpha)])
a = np.array([(A + 1) - (A - 1) * c + 2 * np.sqrt(A) * alpha,
2 * ((A - 1) - (A + 1) * c),
(A + 1) - (A - 1) * c - 2 * np.sqrt(A) * alpha])
return b / a[0], a / a[0]
if __name__ == '__main__':
MIDPOINT, GAIN_DB = 2000, 40
b, a = HSH(MIDPOINT, GAIN_DB, DEFAULT_SF)
analog_response(b, a, DEFAULT_SF, dB=-40)
plt.axhline(y=GAIN_DB / 2, linewidth=0.5, color='r')
plt.axvline(x=MIDPOINT, linewidth=0.5, color='r')
if __name__ == '__main__':
filter_props(b, a)
###Output
_____no_output_____
###Markdown
Peaking EQA peaking equalizer filter is the fundamental ingrediend in multiband parametric equalization. Each filter provides an arbitrary boost or attenuation for a given frequency band centered around a peak freqency and flattens to unit gain elsewhere. The filter is defined by the following parameters: 1. the desired gain in dB (which can be negative) 1. the peak frequency $f_c$, where the desired gain is attained 1. the bandwidth of the filter, defined as the interval around $f_c$ where the gain is greater (or smaller, for attenuators) than half the desired gain in dB; for instance, if the desired gain is 40dB, all frequencies within the filter's bandwidth will be boosted by at least 20dB. Note that the bandwdidth is not exactly symmetrical around $f_c$
###Code
def PEQ(fc, bw, gain, sf):
"""Biquad bandpass filter """
w = 2 * np.pi * fc / sf
A = 10 ** (gain / 40)
alpha = np.tan(np. pi * bw / sf)
c = np.cos(w)
b = np.array([1 + alpha * A, -2 * c, 1 - alpha * A])
a = np.array([1 + alpha / A, -2 * c, 1 - alpha / A])
return b / a[0], a / a[0]
if __name__ == '__main__':
CENTER, BW, GAIN_DB = 800, 400, 40
b, a = PEQ(CENTER, BW, GAIN_DB, DEFAULT_SF)
analog_response(b, a, DEFAULT_SF, dB=-40)
plt.axhline(y=GAIN_DB / 2, linewidth=0.5, color='r')
plt.axvline(x=CENTER, linewidth=0.5, color='r')
if __name__ == '__main__':
filter_props(b, a)
###Output
_____no_output_____
###Markdown
Note that peaking EQ filters with opposite gains are perfectly complementary:
###Code
if __name__ == '__main__':
CENTER, BW, GAIN_DB = 800, 400, 40
b, a = PEQ(CENTER, BW, GAIN_DB, DEFAULT_SF)
y = signal.lfilter(b, a, np.r_[1, np.zeros(200)])
plt.plot(y)
b, a = PEQ(CENTER, BW, -GAIN_DB, DEFAULT_SF)
y = signal.lfilter(b, a, y)
plt.plot(y)
###Output
_____no_output_____
###Markdown
Cascades of biquadsThe performance of a single biquad filter may not be adequate for a given application: a second-order lowpass filter, for instance, may not provide a sufficent amount of rejection in the stopband because of its rather slow roll-off characteristic; or we may want to design an equalizer with multiple peaks and dips. In all cases, we usually want to implement the final design as a cascade of biquad sections, because of their inherent numerical robustness. Factorization of higher-order filtersThe first solution if a filter does not meet the requires specifications is to design a higher-order filter, possibly using different filter "recipes"; in the case of bandpass filters, for instance, we could try a Chebyshev or elliptic design. The resulting high-order transfer function can be then factored into a cascade of second-order sections (or, in the case of an odd-order filter, a cascade of second order-sections followed by a first-order filter):$$ H(z) = \frac{b_0 + b_1 z^{-1} + \ldots + b_{N-1}z^{-N+1}}{a_0 + a_1 z^{-1} + \ldots + a_{N-1}z^{-N+1}} = \prod_{k=0}^{N/2} \frac{b_{k,0} + b_{k,1} z^{-1} + b_{k,2}z^{-2}}{1 + a_{k,1} z^{-1} + a_{k,2}z^{-2}}$$The biquad elements returned by the factorization are not related to the "cookbook" prototypes of the previous section and therefore this method is simply an implementation strategy that focuses on second-order structures; the design algorithm, in other words, will be dependent on the particular type of filter. Nevertheless, both the design and the factorization are usually available in numerical packages such as Scipy, for instance and, in the following example, we illustrate the difference between a 6th-order elliptic lowpass and a single second-order butterworth. First we will use the full high-order realization and then we will show how a cascade of three second-order sections implements the same characteristic.Note that, when cascading transfer functions, the equivalent higher-order filter coefficients can be obtained simply by polynomial multiplication.
###Code
if __name__ == '__main__':
_, ax = plt.subplots()
CUTOFF = 1000
b, a = LPF(CUTOFF, DEFAULT_SF)
analog_response(b, a, DEFAULT_SF, dB=-60, axis=ax, color=f'C0')
eb, ea = signal.ellip(6, 1, 40, CUTOFF, fs=DEFAULT_SF)
analog_response(eb, ea, DEFAULT_SF, dB=-60, axis=ax, color=f'C3')
plt.axvline(x=CUTOFF, linewidth=0.5, color='r')
plt.axhline(y=-3, linewidth=0.5, color='r')
if __name__ == '__main__':
_, ax = plt.subplots()
CUTOFF = 1000
b, a = LPF(CUTOFF, DEFAULT_SF)
# this returns an array of second-order filter coefficients. Each row corresponds to a section,
# with the first three columns providing the numerator coefficients and the last three providing the denominator
soe = signal.ellip(6, 1, 40, CUTOFF, fs=DEFAULT_SF, output='sos')
cb, ca = [1], [1]
for n in range(0, 3):
b, a = soe[n][0:3], soe[n][3:6]
analog_response(b, a, DEFAULT_SF, dB=-60, axis=ax, color=f'C{n}:')
cb = np.polymul(b, cb)
ca = np.polymul(a, ca)
analog_response(cb, ca, DEFAULT_SF, dB=-60, axis=ax, color='C3')
###Output
_____no_output_____
###Markdown
Cascading lowpass and highpass biquadsA cascade of $N$ identical sections with transfer function $H(z)$ will yield the overall transfer function $H_c(z) = H^N(z)$ and thus the stopband attenuation in decibels will increase $N$-fold. For instance, the following example shows the cumulative magnitude responses obtained by cascading up to five identical second-order Butterworth lowpass sections:
###Code
if __name__ == '__main__':
_, ax = plt.subplots()
CUTOFF = 1000
b, a = LPF(CUTOFF, DEFAULT_SF)
cb, ca = b, a
for n in range(0, 5):
analog_response(cb, ca, DEFAULT_SF, dB=-60, axis=ax, color=f'C{n}')
ca = np.polymul(a, ca)
cb = np.polymul(b, cb)
plt.axvline(x=CUTOFF, linewidth=0.5, color='r')
plt.axhline(y=-3, linewidth=0.5, color='r')
plt.axhline(y=-15, linewidth=0.5, color='r')
###Output
_____no_output_____
###Markdown
As shown by the previous plot, a cascade of identical maximally flat lowpass sections yields a steeper roll-off and preserves the monotonicity of the response. However, since the passband of each filter is not perfectly flat, the $-3~\mathrm{dB}$ cutoff frequency of the cascade becomes smaller with each added section and the effective bandwidth of the filter is reduced. In the previous example, the original $-3~\mathrm{dB}$ cutoff frequency was $f_c = 1000~\mathrm{Hz}$ but the magnitude response of the cascade at $f_c$ is $-15~\mathrm{dB}$ whereas the actual $-3~\mathrm{dB}$ point has shifted close to $600~\mathrm{Hz}$.If our goal is to obtain a cascade with a maximally flat (Butterworth) response with a given $f_c$, an obvious approach is simply to factorize the transfer function of a high-order Butterworth as explained in the previous section. There is however a clever and simpler design strategy that is based on the geometric arrangement of the poles of an analog Butterworth filter of order $N$: * the $N$ complex-conjugate poles are equally spaced along a circular contour centered on the origin of the $s$-plane * the angle between poles is equal to $\pi/N$ With this, the pole angles in the upper $s$-plane are given by $$ \theta_n = \frac{\pi}{2N} + n\frac{\pi}{N} = \frac{(2n+1)\pi}{2N}, \qquad n = 0, \ldots, N/2$$
###Code
if __name__ == '__main__':
fig, sp = plt.subplots(1, 4, gridspec_kw={'wspace': 1})
for n in range(0, 4):
sp[n].plot(np.cos(np.linspace(0, 2 * np.pi, 100)), np.sin(np.linspace(0, 2 * np.pi, 100)), 'k:')
p = np.roots(signal.butter(2 * (n + 1), 1, analog=True)[1])
sp[n].plot(p.real, p.imag, 'C3x', ms=10, markeredgewidth=3.0)
sp[n].axis('square')
sp[n].set_xlim(-1.2, 1.2)
sp[n].set_ylim(-1.2, 1.2)
###Output
_____no_output_____
###Markdown
Now, a generic second-order analog filter will have a single pair of complex-conjugate poles at $p_{1,2} = \rho e^{\pm \theta}$ on the $s$-plane and, by cascading identical sections, we will only manage to increase the poles' multiplicity but we will not be able to change their position. In order to achieve a Butterworth pole configuration we will thus need to adjust the pole angle for each section; this is a simple task because it turns out that a second-order filter's quality factor $Q$ is related to the pole angle as $$ 1/Q = 2\cos \theta$$ which means that we can choose the suitable pole angle for each section simply by setting $Q_n = 1/(2\cos \theta_n)$. We can now design $N/2$ discrete-time biquads with the same $Q_n$ values to obtain the desired result.Below is the example for a cascade of five lowpass sections (i.e. a 10th-order filter) compared to a single biquad, both with cutoff $f_c = 1000~\mathrm{Hz}$; notice how the $-3~\mathrm{dB}$ point has not moved in spite of the much steeper rolloff.
###Code
if __name__ == '__main__':
_, ax = plt.subplots()
CUTOFF = 1000
b, a = LPF(CUTOFF, DEFAULT_SF)
analog_response(b, a, DEFAULT_SF, dB=-60, axis=ax, color='C0')
cb, ca, sections = [1], [1], 5
for n in range(0, sections):
iq = 2 * np.cos((2 * n + 1) * np.pi / (4 * sections))
b, a = LPF(CUTOFF, DEFAULT_SF, Q=1/iq)
ca = np.polymul(a, ca)
cb = np.polymul(b, cb)
analog_response(cb, ca, DEFAULT_SF, dB=-60, axis=ax, color='C1')
plt.axvline(x=CUTOFF, linewidth=0.5, color='r')
plt.axhline(y=-3, linewidth=0.5, color='r')
###Output
_____no_output_____
###Markdown
The resulting digital filter has its poles arranged on a circular contour centered in $z=1$ if the cutoff frequency is less than $\pi/2$ and centered on $z=-1$ otherwise.
###Code
if __name__ == '__main__':
filter_props(cb, ca, DEFAULT_SF, dB=-60)
###Output
_____no_output_____
###Markdown
Finally, the following plot shows the individual magnitude responses of the five sections. You can observe that the required $Q_n$ values lead to some biquad sections with a clear peak at the cutoff frequency, although the overall response is monotonic:
###Code
if __name__ == '__main__':
_, ax = plt.subplots()
CUTOFF = 1000
cb, ca, sections = [1], [1], 5
for n in range(0, sections):
iq = 2 * np.cos((2 * n + 1) * np.pi / (4 * sections))
b, a = LPF(CUTOFF, DEFAULT_SF, Q=1/iq)
analog_response(b, a, DEFAULT_SF, dB=-0, axis=ax, color=f'C{n+2}:')
ca = np.polymul(a, ca)
cb = np.polymul(b, cb)
analog_response(cb, ca, DEFAULT_SF, dB=-60, axis=ax, color='C1')
plt.axvline(x=CUTOFF, linewidth=0.5, color='r')
plt.axhline(y=-3, linewidth=0.5, color='r')
###Output
_____no_output_____
###Markdown
Combining shelving filtersShelving filters may be combined to create filters to boost a particular frequency range
###Code
if __name__ == '__main__':
cb, ca = LSH(1000, 20, DEFAULT_SF)
b, a = HSH(10, 20, DEFAULT_SF)
cb = np.polymul(b, cb)
ca = np.polymul(a, ca)
# normalize
analog_response(cb / 10, ca, DEFAULT_SF, dB=-50, points=10001)
###Output
_____no_output_____
###Markdown
Parametric equalizationPeaking equalizers with distinct bandwidths can be cascaded to obtain an arbitrary equalization curve for the entire range of input frequencies; indeed, this is the technique behind so-called _parametric equalizers_ where a bank of logarithmically spaced peaking eq's with independent gain controls allow the user to easily define a global equalization response.
###Code
if __name__ == '__main__':
cb, ca = np.ones(1), np.ones(1)
for n, g in enumerate([20, -10, 40]):
b, a = PEQ(10 ** (n+1), 10 ** (n + 1), g, DEFAULT_SF)
cb = np.polymul(b, cb)
ca = np.polymul(a, ca)
analog_response(cb, ca, DEFAULT_SF, dB=-50, points=10001)
###Output
_____no_output_____ |
14_RCNN/01_DenseDepth_DatasetCreation.ipynb | ###Markdown
Depth Project - Dataset Creation
###Code
from google.colab import drive
drive.mount('/content/gdrive')
cd gdrive/My\ Drive/DepthProject
!ls -l
###Output
total 2234578
drwx------ 2 root root 4096 May 3 15:54 DenseDepth
drwx------ 2 root root 4096 May 3 22:30 depth_dataset_cleaned
-rw------- 1 root root 1706380 May 3 15:22 depth_dataset_cleaned_raw.zip
-rw------- 1 root root 2286492537 May 3 14:31 depth_dataset_cleaned.zip
###Markdown
NoteI came up with the conclusion that writing the images directly to google drive is very slow, instead write them into a .zip file directly, which i did on my local machine, this notebook however shows the concept of how i had tried different methods This is the write the image to folder **PROCEED WITH ATMOST CAUTION**
###Code
!rm -r depth_dataset_cleaned/
###Output
^C
###Markdown
Count the number of processed folders, should be 100
###Code
!ls depth_dataset_cleaned/fg_bg/ | wc -l
###Output
100
###Markdown
Count the Processed files on each folder, should be 4000 in each
###Code
! find ./depth_dataset_cleaned/fg_bg/ -type d | awk '{print "echo -n \""$0" \";ls -l "$0" | grep -v total | wc -l" }' | sh
!ls depth_dataset_cleaned/fg_bg/
###Output
bg_000 bg_010 bg_020 bg_030 bg_040 bg_050 bg_060 bg_070 bg_080 bg_090
bg_001 bg_011 bg_021 bg_031 bg_041 bg_051 bg_061 bg_071 bg_081 bg_091
bg_002 bg_012 bg_022 bg_032 bg_042 bg_052 bg_062 bg_072 bg_082 bg_092
bg_003 bg_013 bg_023 bg_033 bg_043 bg_053 bg_063 bg_073 bg_083 bg_093
bg_004 bg_014 bg_024 bg_034 bg_044 bg_054 bg_064 bg_074 bg_084 bg_094
bg_005 bg_015 bg_025 bg_035 bg_045 bg_055 bg_065 bg_075 bg_085 bg_095
bg_006 bg_016 bg_026 bg_036 bg_046 bg_056 bg_066 bg_076 bg_086 bg_096
bg_007 bg_017 bg_027 bg_037 bg_047 bg_057 bg_067 bg_077 bg_087 bg_097
bg_008 bg_018 bg_028 bg_038 bg_048 bg_058 bg_068 bg_078 bg_088 bg_098
bg_009 bg_019 bg_029 bg_039 bg_049 bg_059 bg_069 bg_079 bg_089 bg_099
###Markdown
Unzip the Entire DATASETThis will take a long time, ~3-4 hours, instead use the partial dataset and then create own
###Code
!unzip -n depth_dataset_cleaned.zip
###Output
_____no_output_____
###Markdown
Unzip Partial, Create the Dataset later
###Code
!unzip depth_dataset_cleaned_raw.zip -d depth_dataset_cleaned/
###Output
_____no_output_____
###Markdown
Create the Dataset
###Code
import glob
import PIL
from PIL import Image
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm.auto import tqdm
from pathlib import Path
sns.set()
!ls depth_dataset_cleaned/
fgc_images = [f for f in glob.glob('depth_dataset_cleaned/fg/*.*')]
bgc_images = [f for f in glob.glob('depth_dataset_cleaned/bg/*.*')]
fgc_mask_images = [f for f in glob.glob('depth_dataset_cleaned/fg_mask/*.*')]
last_idx = 15
###Output
_____no_output_____
###Markdown
This was my attempt on creating the files direcctly into folders, this does work, however is a tedious process
###Code
idx = 0
for bidx, bg_image in enumerate(tqdm(bgc_images)):
if (bidx < last_idx):
continue
Path(f'depth_dataset_cleaned/labels/').mkdir(parents=True, exist_ok=True)
label_info = open(f"depth_dataset_cleaned/labels/bg_{bidx:03d}_label_info.txt","w+")
idx = 4000 * bidx
print(f'Processing BG {bidx}')
Path(f'depth_dataset_cleaned/fg_bg/bg_{bidx:03d}').mkdir(parents=True, exist_ok=True)
Path(f'depth_dataset_cleaned/fg_bg_mask/bg_{bidx:03d}').mkdir(parents=True, exist_ok=True)
for fidx, fg_image in enumerate(tqdm(fgc_images)):
# do the add fg to bg 20 times
for i in range(20):
# do this twice, one with flip once without
for should_flip in [True, False]:
background = Image.open(bg_image)
foreground = Image.open(fg_image)
fg_mask = Image.open(fgc_mask_images[fidx])
if should_flip:
foreground = foreground.transpose(PIL.Image.FLIP_LEFT_RIGHT)
fg_mask = fg_mask.transpose(PIL.Image.FLIP_LEFT_RIGHT)
b_width, b_height = background.size
f_width, f_height = foreground.size
max_y = b_height - f_height
max_x = b_width - f_width
pos_x = np.random.randint(low=0, high=max_x, size=1)[0]
pos_y = np.random.randint(low=0, high=max_y, size=1)[0]
background.paste(foreground, (pos_x, pos_y), foreground)
mask_bg = Image.new('L', background.size)
fg_mask = fg_mask.convert('L')
mask_bg.paste(fg_mask, (pos_x, pos_y), fg_mask)
background.save(f'depth_dataset_cleaned/fg_bg/bg_{bidx:03d}/fg_{fidx:03d}_bg_{bidx:03d}_{idx:06d}.jpg', optimize=True, quality=30)
mask_bg.save(f'depth_dataset_cleaned/fg_bg_mask/bg_{bidx:03d}/fg_{fidx:03d}_bg_{bidx:03d}_mask_{idx:06d}.jpg', optimize=True, quality=30)
label_info.write(f'fg_bg/bg_{bidx:03d}/fg_{fidx:03d}_bg_{bidx:03d}_{idx:06d}.jpg\tfg_bg_mask/bg_{bidx:03d}/fg_{fidx:03d}_bg_{bidx:03d}_mask_{idx:06d}.jpg\t{pos_x}\t{pos_y}\n')
idx = idx + 1
label_info.close()
last_idx = bidx
###Output
_____no_output_____
###Markdown
This is how i created the .zip filethe output is removed, because the actual run was made on local machine
###Code
idx = 0
# for each background image
for bidx, bg_image in enumerate(tqdm(bgc_images)):
# output zip file, open in append mode
out_zip = ZipFile('fg_bg.zip', mode='a', compression=zipfile.ZIP_STORED)
# labels for the craeted images
label_info = open(f't_label.txt', 'w+')
idx = 4000 * bidx
print(f'Processing BG {bidx}')
for fidx, fg_image in enumerate(tqdm(fgc_images)):
# do the add fg to bg 20 times
for i in range(20):
# do this twice, one with flip once without
for should_flip in [True, False]:
# open the bg and fg images
background = Image.open(bg_image)
foreground = Image.open(fg_image)
fg_mask = Image.open(fgc_mask_images[fidx])
# if the fg image should be flipped
if should_flip:
foreground = foreground.transpose(PIL.Image.FLIP_LEFT_RIGHT)
fg_mask = fg_mask.transpose(PIL.Image.FLIP_LEFT_RIGHT)
# choose a random point on the bg to paste the fg image
b_width, b_height = background.size
f_width, f_height = foreground.size
max_y = b_height - f_height
max_x = b_width - f_width
pos_x = np.random.randint(low=0, high=max_x, size=1)[0]
pos_y = np.random.randint(low=0, high=max_y, size=1)[0]
background.paste(foreground, (pos_x, pos_y), foreground)
mask_bg = Image.new('L', background.size)
fg_mask = fg_mask.convert('L')
mask_bg.paste(fg_mask, (pos_x, pos_y), fg_mask)
label_info.write(f'fg_bg/bg_{bidx:03d}/fg_{fidx:03d}_bg_{bidx:03d}_{idx:06d}.jpg\tfg_bg_mask/bg_{bidx:03d}/fg_{fidx:03d}_bg_{bidx:03d}_mask_{idx:06d}.jpg\t{pos_x}\t{pos_y}\n')
# save the background and the mask as temp .jpg files
background.save('b_temp.jpg', optimize=True, quality=30)
mask_bg.save('m_temp.jpg', optimize=True, quality=30)
# save the files to .zip file
out_zip.write('b_temp.jpg', f'depth_dataset_cleaned/fg_bg/bg_{bidx:03d}/fg_{fidx:03d}_bg_{bidx:03d}_{idx:06d}.jpg')
out_zip.write('m_temp.jpg', f'depth_dataset_cleaned/fg_bg_mask/bg_{bidx:03d}/fg_{fidx:03d}_bg_{bidx:03d}_mask_{idx:06d}.jpg')
idx = idx + 1
label_info.close()
# write the labels file to zip
out_zip.write('t_labels.txt', f'depth_dataset_cleaned/labels/bg_{bidx:03d}_label_info.txt')
# important: close the zip, else it gets corrupted
out_zip.close()
###Output
_____no_output_____ |
src_optimization/32_cg_several_coeff_01/e_plot.ipynb | ###Markdown
Plots
###Code
import matplotlib.pyplot as plt
from matplotlib import rcParams
import pandas as pd
import seaborn as sns
PHYSICAL_CORES=64
def plot(p_data, p_yId, p_xId, p_hueId, p_styleId, p_logScale=False, p_smt_marker=False, p_export_filename=None, p_xLabel=None, p_yLabel=None):
rcParams['figure.figsize'] = 12,8
rcParams['font.size'] = 12
rcParams['svg.fonttype'] = 'none'
plot = sns.lineplot(x=p_xId,
y=p_yId,
hue=p_hueId,
style=p_styleId,
data=p_data)
if p_logScale == True:
plot.set_yscale('log')
plot.set_xscale('log')
if p_xLabel != None:
plot.set(xlabel=p_xLabel)
else:
plot.set(xlabel=p_xId)
if p_yLabel != None:
plot.set(ylabel=p_yLabel)
else:
plot.set(ylabel=p_yId)
plt.grid(color='gainsboro')
plt.grid(True,which='minor', linestyle='--', linewidth=0.5, color='gainsboro')
if(p_smt_marker == True):
plt.axvline(PHYSICAL_CORES, linestyle='--', color='red', label='using SMT')
plt.legend()
if(p_export_filename != None):
plt.savefig(p_export_filename)
plt.show()
###Output
_____no_output_____
###Markdown
Gauss3 Efficiency by threads
###Code
import pandas as pd
import seaborn as sns
# sns.set_theme()
# sns.set_style("ticks")
data_frame = pd.read_csv('./e_efficiency_by_threads.csv')
data_frame = data_frame[data_frame.region_id == 'apply']
data_frame['efficiency_type'] = 'relative'
#
# NOTE: calc absolute efficiency
#
data_frame_copy = data_frame.copy()
data_frame_copy['efficiency_type'] = 'absolute'
ref_runtime = data_frame_copy[data_frame.impl_id == '\Verb{cg_nonconst_coeff_precalc}'][data_frame.threads == 1]['runtime'].values[0]
data_frame_copy['efficiency']=data_frame_copy.apply(lambda row: ref_runtime/(row['runtime'] * row['threads']), axis=1)
data_frame = data_frame_copy.append(data_frame)
data_frame = data_frame[data_frame.efficiency_type == 'absolute']
# display(data_frame)
plot(p_data=data_frame,
p_yId='runtime',
p_xId='threads',
p_hueId='impl_id',
p_styleId=None,
p_logScale=True,
p_smt_marker=True,
p_export_filename='runtime.svg',
p_xLabel="Threads",
p_yLabel="Runtime [s]")
plot(p_data=data_frame,
p_yId='efficiency',
p_xId='threads',
p_hueId='impl_id',
p_styleId=None,
p_logScale=True,
p_smt_marker=True,
p_export_filename='efficiency.svg',
p_xLabel="Threads",
p_yLabel="Absolute Efficiency")
# plot(p_data=data_frame,
# p_yId='iter',
# p_xId='threads',
# p_hueId='impl_id',
# p_styleId=None,
# p_logScale=False,
# p_core_marker=True)
###Output
/var/folders/zt/h71khkbd7ll9krscx1zncwlc0000gn/T/ipykernel_20324/181008364.py:16: UserWarning: Boolean Series key will be reindexed to match DataFrame index.
ref_runtime = data_frame_copy[data_frame.impl_id == '\Verb{cg_nonconst_coeff_precalc}'][data_frame.threads == 1]['runtime'].values[0]
|
tutorials/tensorflow_word2vec/TensorFlow Word2Vec.ipynb | ###Markdown
A TensorFlow Word2Vec Model for Word Similarity Prediction
###Code
import urllib.request
import collections
import math
import os
import random
import zipfile
import datetime as dt
import numpy as np
import tensorflow as tf
###Output
_____no_output_____
###Markdown
BackgroundWord2Vec is a model that was created by [Mikolov et al.](http://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf). It uses the concept of "word embeddings", which is a way to represent relationships between words using vectors. This makes it a useful tool to find words that are similar to eachother.Here is an example of an embedding matrix taken from the TensorFlow tutorial: DataThe data used here is a cleaned version of the first 10^9 bytes of an English Wikipedia dump performed on Mar. 3, 2006. See [this site](https://cs.fit.edu/~mmahoney/compression/textdata.html) for more information.
###Code
def maybe_download(filename, url, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
if not os.path.exists(filename):
filename, _ = urllib.request.urlretrieve(url + filename, filename)
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
url = 'http://mattmahoney.net/dc/'
filename = maybe_download('text8.zip', url, 31344016)
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words."""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
vocabulary = read_data(filename)
print(vocabulary[:7])
['anarchism', 'originated', 'as', 'a', 'term', 'of', 'abuse']
def build_dataset(words, n_words):
"""Process raw inputs into a dataset."""
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(n_words - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reversed_dictionary
def collect_data(vocabulary_size=10000):
"""Read data and create the dictionary"""
url = 'http://mattmahoney.net/dc/'
filename = maybe_download('text8.zip', url, 31344016)
vocabulary = read_data(filename)
print(vocabulary[:7])
data, count, dictionary, reverse_dictionary = build_dataset(vocabulary,
vocabulary_size)
del vocabulary # Hint to reduce memory.
return data, count, dictionary, reverse_dictionary
data_index = 0
def generate_batch(data, batch_size, num_skips, skip_window):
"""Generate batch data"""
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
context = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window input_word skip_window ]
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(batch_size // num_skips):
target = skip_window # input word at the center of the buffer
targets_to_avoid = [skip_window]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window] # this is the input word
context[i * num_skips + j, 0] = buffer[target] # these are the context words
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
# Backtrack a little bit to avoid skipping words in the end of a batch
data_index = (data_index + len(data) - span) % len(data)
return batch, context
vocabulary_size = 10000
data, count, dictionary, reverse_dictionary = collect_data(vocabulary_size=vocabulary_size)
###Output
Found and verified text8.zip
['anarchism', 'originated', 'as', 'a', 'term', 'of', 'abuse']
###Markdown
TensorFlow Model
###Code
graph = tf.Graph()
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a context.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = vocabulary_size # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.arange(valid_size) # np.random.choice(valid_window, valid_size, replace=False)
num_sampled = 64 # Number of negative examples to sample.
###Output
_____no_output_____
###Markdown
There is a fast scheme called Noise Contrastive Estimation (NCE). Instead of taking the probability of the context word compared to all of the possible context words in the vocabulary, this method randomly samples 2-20 possible context words and evaluates the probability only from these.
###Code
with graph.as_default():
# Input data.
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_context = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
nce_loss = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_weights,
biases=nce_biases,
labels=train_context,
inputs=embed,
num_sampled=num_sampled,
num_classes=vocabulary_size))
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(nce_loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Add variable initializer.
init = tf.global_variables_initializer()
###Output
_____no_output_____
###Markdown
Run the Model
###Code
def train(graph, num_steps):
with tf.Session(graph=graph) as session:
with session.as_default():
# We must initialize all variables before we use them.
init.run()
print('Initialized')
average_loss = 0
for step in range(num_steps):
batch_inputs, batch_context = generate_batch(data,
batch_size, num_skips, skip_window)
feed_dict = {train_inputs: batch_inputs, train_context: batch_context}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
_, loss_val = session.run([optimizer, nce_loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 1000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print('Average loss at step ', step, ': ', average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 1000 == 0:
sim = similarity.eval()
for i in range(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = 'Nearest to %s:' % valid_word
for k in range(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = '%s %s,' % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
saver = tf.train.Saver()
saver.save(session, os.path.join("model.ckpt"))
###Output
_____no_output_____
###Markdown
Training
###Code
num_steps = 10000
softmax_start_time = dt.datetime.now()
train(graph, num_steps=num_steps)
softmax_end_time = dt.datetime.now()
print("Training took {} minutes to run {} iterations".format(
(softmax_end_time-softmax_start_time).total_seconds()/60, str(num_steps)))
###Output
Initialized
Average loss at step 0 : 207.810714722
Nearest to gershwin: eminem, armored, disputes, acting, stands, authority, cantor, derivation,
Average loss at step 1000 : 31.9783105853
Nearest to gershwin: acting, authority, motor, rand, press, translation, certain, happy,
Average loss at step 2000 : 9.28103744006
Nearest to gershwin: disputes, dialects, authority, acting, motor, stands, don, elaborate,
Average loss at step 3000 : 4.79677247
Nearest to gershwin: disputes, dialects, authority, stands, acting, motor, feature, don,
Average loss at step 4000 : 3.50414316523
Nearest to gershwin: disputes, authority, dialects, stands, acting, motor, feature, don,
Average loss at step 5000 : 2.88645278847
Nearest to gershwin: disputes, authority, dialects, stands, acting, motor, feature, don,
Average loss at step 6000 : 2.80906002653
Nearest to gershwin: disputes, stands, authority, acting, dialects, feature, motor, translation,
Average loss at step 7000 : 2.6058074379
Nearest to gershwin: disputes, stands, authority, eminem, acting, dialects, feature, recurring,
Average loss at step 8000 : 2.51422900355
Nearest to gershwin: disputes, UNK, stands, authority, eminem, acting, dialects, feature,
Average loss at step 9000 : 2.43024307692
Nearest to gershwin: disputes, stands, authority, eminem, acting, dialects, feature, recurring,
Training took 1.3422461833333335 minutes to run 10000 iterations
###Markdown
Predict similarity
###Code
def predict_sim(input_word, model_path):
# Reinitialize things
with graph.as_default():
# Input data.
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_context = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Add variable initializer.
init = tf.global_variables_initializer()
with tf.Session(graph=graph) as session:
saver = tf.train.Saver()
saver.restore(session,
os.path.join(model_path, "model.ckpt"))
sim = similarity.eval()
if input_word in dictionary:
idx = dictionary[input_word]
valid_word = reverse_dictionary[idx]
top_k = 3 # number of nearest neighbors
nearest = (-sim[idx, :]).argsort()[1:top_k + 1]
log_str = 'Nearest to %s:' % valid_word
for k in range(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = '%s %s' % (log_str, close_word)
print(log_str)
else:
return 'Word not present in dictionary. Try a different one.'
###Output
_____no_output_____
###Markdown
Let's test the trained model and see if it can predict similar words.
###Code
# Define location of saved model
model_path = os.getcwd()
graph = tf.Graph()
predict_sim('science', model_path)
###Output
INFO:tensorflow:Restoring parameters from /Users/micheleenharris/Documents/bin/github/pybotframework/tutorials/tensorflow_word2vec/model.ckpt
Nearest to science: provides regardless vs
|
Assignment2/Task1.ipynb | ###Markdown
Filter 0<=HR<=0.2 and 0.8<=HR<=1.0
###Code
filtered_non_zero_helpful = []
for datum in non_zero_helpful:
if float(datum['helpful'][0])/datum['helpful'][1]<=0.2 and float(datum['helpful'][0])/datum['helpful'][1]>=0:
filtered_non_zero_helpful.append(datum)
if float(datum['helpful'][0])/datum['helpful'][1]<=1.0 and float(datum['helpful'][0])/datum['helpful'][1]>=0.8:
filtered_non_zero_helpful.append(datum)
OR = []
for datum in filtered_non_zero_helpful:
OR.append(datum['overall'])
HR = []
rate_1_HR=[]
rate_2_HR=[]
rate_3_HR=[]
rate_4_HR=[]
rate_5_HR=[]
for datum in filtered_non_zero_helpful:
HR.append(float(datum['helpful'][0])/datum['helpful'][1])
if datum['overall'] == 1.0:
rate_1_HR.append(float(datum['helpful'][0])/datum['helpful'][1])
if datum['overall'] == 2.0:
rate_2_HR.append(float(datum['helpful'][0])/datum['helpful'][1])
if datum['overall'] == 3.0:
rate_3_HR.append(float(datum['helpful'][0])/datum['helpful'][1])
if datum['overall'] == 4.0:
rate_4_HR.append(float(datum['helpful'][0])/datum['helpful'][1])
if datum['overall'] == 5.0:
rate_5_HR.append(float(datum['helpful'][0])/datum['helpful'][1])
plt.scatter(OR,HR,marker='o',color='b',alpha=0.5)
plt.xlabel('OR')
plt.ylabel('HR')
plt.title('HR versus OR for 0<=HR<=0.2 or 0.8<=HR<=1.0')
plt.savefig("HR_vs_OR")
plt.show()
plt.xticks(numpy.arange(0,6,1), ['0.0', '1.0', '2.0', '3.0', '4.0', '5.0'])
rating_ratio = numpy.array([rate_1_HR,rate_2_HR,rate_3_HR,rate_4_HR,rate_5_HR])
plt.violinplot(rating_ratio, vert=True, showmeans=True)
plt.xlabel('OR')
plt.ylabel('HR')
t = plt.title('HR versus OR for 0<=HR<=0.2 or 0.8<=HR<=1.0')
plt.savefig("HR_vs_OR")
plt.show()
filtered_relative_time_non_zero = []
for datum in filtered_non_zero_helpful:
filtered_relative_time_non_zero.append(float(datum['unixReviewTime']-min(item_time[datum['asin']]))/(3600*24*30))
filtered_test_length_non_zero = []
for datum in filtered_non_zero_helpful:
filtered_test_length_non_zero.append(len(datum['reviewText']))
n,bins,patches = plt.hist(filtered_relative_time_non_zero,50,facecolor='green')
plt.xlim([0,210])
plt.xlabel("TSFR / month")
plt.ylabel("number of reviews")
plt.title("Histogram of TSFR for reviews with 0<=HR<=0.2 or 0.8<=HR<=1.0")
plt.xticks(numpy.arange(0,220,20))
plt.savefig('filtered_non_zero_TSFR_histogram')
plt.show()
n,bins,patches = plt.hist(filtered_test_length_non_zero,500,facecolor='green')
plt.xlim([0,8000])
plt.xlabel("RL")
plt.ylabel("number of reviews")
plt.title("Histogram of RL for reviews with 0<=HR<=0.2 or 0.8<=HR<=1.0")
plt.xticks(numpy.arange(0,8000,1000))
plt.savefig('filtered_non_zero_RL_histogram')
plt.show()
for datum in all_data[:20]:
print datum
reviewer = []
count=0
for datum in all_data:
count+=1
if count % 1000 ==0:
print count
if not datum['reviewerID'] in reviewer:
reviewer.append(datum['reviewerID'])
print len(reviewer)
###Output
93985
|
course/project_build_tf_sentiment_model/01_input_pipeline.ipynb | ###Markdown
Input PipelineAs we're using TensorFlow we can make use of the `tf.data.Dataset` object. First, we'll load in our Numpy binaries from file:
###Code
import numpy as np
with open("movie-xids.npy", "rb") as f:
Xids = np.load(f, allow_pickle=True)
with open("movie-xmask.npy", "rb") as f:
Xmask = np.load(f, allow_pickle=True)
with open("movie-labels.npy", "rb") as f:
labels = np.load(f, allow_pickle=True)
###Output
_____no_output_____
###Markdown
We can take these three arrays and create a TF dataset object with them using `from_tensor_slices` like so:
###Code
import tensorflow as tf
dataset = tf.data.Dataset.from_tensor_slices((Xids, Xmask, labels))
dataset.take(1)
###Output
_____no_output_____
###Markdown
Each sample in our dataset is a tuple containing a single `Xids`, `Xmask`, and `labels` tensor. However, when feeding data into our model we need a two-item tuple in the format **(\, \)**. Now, we have two tensors for our inputs - so, what we do is enter our **\** tensor as a dictionary:```{ 'input_ids': , 'attention_mask': }```To rearrange the dataset format we can `map` a function that modifies the format like so:
###Code
def map_func(input_ids, masks, labels):
# we convert our three-item tuple into a two-item tuple where the input item is a dictionary
return {"input_ids": input_ids, "attention_mask": masks}, labels
# then we use the dataset map method to apply this transformation
dataset = dataset.map(map_func)
dataset.take(1)
###Output
_____no_output_____
###Markdown
Now we can see that our dataset sample format has been changed. Next, we need to shuffle our data, and batch it. We will take batch sizes of `16` and drop any samples that don't fit evenly into chunks of 16.
###Code
batch_size = 16
dataset = dataset.shuffle(10000).batch(batch_size, drop_remainder=True)
dataset.take(1)
###Output
_____no_output_____
###Markdown
Now our dataset samples are organized into batches of 16. The final step is to split our data into training and validation sets. For this we use the `take` and `skip` methods, creating and 90-10 split.
###Code
split = 0.9
# we need to calculate how many batches must be taken to create 90% training set
size = int((Xids.shape[0] / batch_size) * split)
size
train_ds = dataset.take(size)
val_ds = dataset.skip(size)
# free up memory
del dataset
###Output
_____no_output_____
###Markdown
Our two datasets are fully prepared for our model inputs. Now, we can save both to file using [`tf.data.experimental.save`](https://www.tensorflow.org/api_docs/python/tf/data/experimental/save).
###Code
tf.data.experimental.save(train_ds, "train")
tf.data.experimental.save(val_ds, "val")
###Output
_____no_output_____
###Markdown
In the next notebook we will be loading these files using `tf.data.experimental.load`. Which requires us to define the tensor `element_spec` - which describes the tensor shape. To find our dataset element spec we can write:
###Code
train_ds.element_spec
val_ds.element_spec == train_ds.element_spec
###Output
_____no_output_____
###Markdown
We will be using this tuple when loading our data in the next notebook.
###Code
ds = tf.data.experimental.load("train", element_spec=train_ds.element_spec)
###Output
_____no_output_____
###Markdown
Input PipelineAs we're using TensorFlow we can make use of the `tf.data.Dataset` object. First, we'll load in our Numpy binaries from file:
###Code
import numpy as np
with open('movie-xids.npy', 'rb') as f:
Xids = np.load(f, allow_pickle=True)
with open('movie-xmask.npy', 'rb') as f:
Xmask = np.load(f, allow_pickle=True)
with open('movie-labels.npy', 'rb') as f:
labels = np.load(f, allow_pickle=True)
###Output
_____no_output_____
###Markdown
We can take these three arrays and create a TF dataset object with them using `from_tensor_slices` like so:
###Code
import tensorflow as tf
dataset = tf.data.Dataset.from_tensor_slices((Xids, Xmask, labels))
dataset.take(1)
###Output
_____no_output_____
###Markdown
Each sample in our dataset is a tuple containing a single `Xids`, `Xmask`, and `labels` tensor. However, when feeding data into our model we need a two-item tuple in the format **(\, \)**. Now, we have two tensors for our inputs - so, what we do is enter our **\** tensor as a dictionary:```{ 'input_ids': , 'attention_mask': }```To rearrange the dataset format we can `map` a function that modifies the format like so:
###Code
def map_func(input_ids, masks, labels):
# we convert our three-item tuple into a two-item tuple where the input item is a dictionary
return {'input_ids': input_ids, 'attention_mask': masks}, labels
# then we use the dataset map method to apply this transformation
dataset = dataset.map(map_func)
dataset.take(1)
###Output
_____no_output_____
###Markdown
Now we can see that our dataset sample format has been changed. Next, we need to shuffle our data, and batch it. We will take batch sizes of `16` and drop any samples that don't fit evenly into chunks of 16.
###Code
batch_size = 16
dataset = dataset.shuffle(10000).batch(batch_size, drop_remainder=True)
dataset.take(1)
###Output
_____no_output_____
###Markdown
Now our dataset samples are organized into batches of 16. The final step is to split our data into training and validation sets. For this we use the `take` and `skip` methods, creating and 90-10 split.
###Code
split = 0.9
# we need to calculate how many batches must be taken to create 90% training set
size = int((Xids.shape[0] / batch_size) * split)
size
train_ds = dataset.take(size)
val_ds = dataset.skip(size)
# free up memory
del dataset
###Output
_____no_output_____
###Markdown
Our two datasets are fully prepared for our model inputs. Now, we can save both to file using [`tf.data.experimental.save`](https://www.tensorflow.org/api_docs/python/tf/data/experimental/save).
###Code
tf.data.experimental.save(train_ds, 'train')
tf.data.experimental.save(val_ds, 'val')
###Output
_____no_output_____
###Markdown
In the next notebook we will be loading these files using `tf.data.experimental.load`. Which requires us to define the tensor `element_spec` - which describes the tensor shape. To find our dataset element spec we can write:
###Code
train_ds.element_spec
val_ds.element_spec == train_ds.element_spec
###Output
_____no_output_____
###Markdown
We will be using this tuple when loading our data in the next notebook.
###Code
ds = tf.data.experimental.load('train', element_spec=train_ds.element_spec)
###Output
_____no_output_____
###Markdown
Input PipelineAs we're using TensorFlow we can make use of the `tf.data.Dataset` object. First, we'll load in our Numpy binaries from file:
###Code
import numpy as np
with open('movie-xids.npy', 'rb') as f:
Xids = np.load(f, allow_pickle=True)
with open('movie-xmask.npy', 'rb') as f:
Xmask = np.load(f, allow_pickle=True)
with open('movie-labels.npy', 'rb') as f:
labels = np.load(f, allow_pickle=True)
###Output
_____no_output_____
###Markdown
We can take these three arrays and create a TF dataset object with them using `from_tensor_slices` like so:
###Code
import tensorflow as tf
dataset = tf.data.Dataset.from_tensor_slices((Xids, Xmask, labels))
dataset.take(1)
###Output
_____no_output_____
###Markdown
Each sample in our dataset is a tuple containing a single `Xids`, `Xmask`, and `labels` tensor. However, when feeding data into our model we need a two-item tuple in the format **(\, \)**. Now, we have two tensors for our inputs - so, what we do is enter our **\** tensor as a dictionary:```{ 'input_ids': , 'attention_mask': }```To rearrange the dataset format we can `map` a function that modifies the format like so:
###Code
def map_func(input_ids, masks, labels):
# we convert our three-item tuple into a two-item tuple where the input item is a dictionary
return {'input_ids': input_ids, 'attention_mask': masks}, labels
# then we use the dataset map method to apply this transformation
dataset = dataset.map(map_func)
dataset.take(1)
###Output
_____no_output_____
###Markdown
Now we can see that our dataset sample format has been changed. Next, we need to shuffle our data, and batch it. We will take batch sizes of `16` and drop any samples that don't fit evenly into chunks of 16.
###Code
batch_size = 16
dataset = dataset.shuffle(10000).batch(batch_size, drop_remainder=True)
dataset.take(1)
###Output
_____no_output_____
###Markdown
Now our dataset samples are organized into batches of 16. The final step is to split our data into training and validation sets. For this we use the `take` and `skip` methods, creating and 90-10 split.
###Code
split = 0.9
# we need to calculate how many batches must be taken to create 90% training set
size = int((Xids.shape[0] / batch_size) * split)
size
train_ds = dataset.take(size)
val_ds = dataset.skip(size)
# free up memory
del dataset
###Output
_____no_output_____
###Markdown
Our two datasets are fully prepared for our model inputs. Now, we can save both to file using [`tf.data.experimental.save`](https://www.tensorflow.org/api_docs/python/tf/data/experimental/save).
###Code
tf.data.experimental.save(train_ds, 'train')
tf.data.experimental.save(val_ds, 'val')
###Output
_____no_output_____
###Markdown
In the next notebook we will be loading these files using `tf.data.experimental.load`. Which requires us to define the tensor `element_spec` - which describes the tensor shape. To find our dataset element spec we can write:
###Code
train_ds.element_spec
val_ds.element_spec == train_ds.element_spec
###Output
_____no_output_____
###Markdown
We will be using this tuple when loading our data in the next notebook.
###Code
ds = tf.data.experimental.load('train', element_spec=train_ds.element_spec)
###Output
_____no_output_____ |
analysis/cogsci2021/.ipynb_checkpoints/block_silhouette_cogsci2020_data_generator-checkpoint.ipynb | ###Markdown
Sanity Checks
###Code
# Ensure one to one gameID and workerId
# Should only happen if a repeat worker gets through
query = coll.find({"$and":[
{'workerId':{'$exists':True}},
{'condition':{'$ne':'practice'}},
{'eventType':'trial_end'},
{"$or":[{'iterationName':'pilot2'},
{'iterationName':'pilot3'},
{'iterationName':'pilot4'},
{'iterationName':'Exp2Pilot1'},
{'iterationName':'Exp2Pilot1_turk'},
{'iterationName':'Exp2Pilot1_turk'}]},
{'trialNum':0}]
})
df_trial_end_full = pd.DataFrame(list(query.sort('timeAbsolute')))
#df_trial_end_full[['workerId','gameID']]
assert (np.mean(df_trial_end_full['workerId'].value_counts()) == np.mean(df_trial_end_full['gameID'].value_counts()))
###Output
_____no_output_____
###Markdown
Trial Level Data
###Code
# Assuming that if trial 23 saves, then 0-22 have also saved
# get ids of people with trial 23 data
query = coll.find({"$and":[
{'condition':{'$ne':'practice'}},
{'eventType':'trial_end'},
{"$or":[{'iterationName':'Exp2Pilot3'},
{'iterationName':'Exp2Pilot3_batch2'}]},
#{'iterationName': iterationName}, #use this if one iteration name
{'trialNum': numTrials-1}]
})
complete_data_df = pd.DataFrame(query)
complete_data_ids = list(complete_data_df['workerId'])
# Filter for full datasets
query = coll.find({"$and":[
{'condition':{'$ne':'practice'}},
{'eventType':'trial_end'},
#{'iterationName': iterationName}, #use this if one iteration name
{"$or":[{'iterationName':'Exp2Pilot3'},
{'iterationName':'Exp2Pilot3_batch2'}]}]
})
df_trial_end_full = pd.DataFrame(list(query.sort('timeAbsolute')))
# filter dataframe for complete datasets
df_trial_end_full_filtered = df_trial_end_full[df_trial_end_full.workerId.isin(complete_data_ids)]
# reduce to crucial information
df_trial_end_reduced_filtered = df_trial_end_full_filtered[[
'gameID','trialNum','phase','condition','eventType','targetName','repetition','targetID', #trial identifiers
'nullScore','F1Score','normedScore','rawScoreDiscrete','nullScoreDiscrete','normedScoreDiscrete','scoreGapDiscrete', #scoring
'numBlocks','nPracticeAttempts','blockColor','blockColorID','blockFell','doNothingRepeats',#misc. trial info
'score','currBonus','timeBonus', #bonusing
'timeAbsolute','timeRelative','buildTime','buildStartTime','buildFinishTime','timeToBuild', #timing
'discreteWorld','allVertices', #world reconstruction
'browser','browserVersion','os','devMode', #developer info
#below here should be the same for every trial in a dataset
'iterationName',
'numTargets', 'prePostSetSize','numRepetitions', #pre-post info
'bonusThresholdLow','bonusThresholdMid','bonusThresholdHigh','timeThresholdYellow','timeThresholdRed', #bonus info
]]
#Fix error in data-saving- normedScoreDiscrete saved as rawScoreDiscrete
df_trial_end_reduced_filtered['normedScoreDiscrete'] = df_trial_end_reduced_filtered['rawScoreDiscrete']
df_trial_end_reduced_filtered.drop(['rawScoreDiscrete'], axis=1)
df = df_trial_end_reduced_filtered.sort_values(by=['gameID', 'timeAbsolute'])
###Output
/Users/will/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:32: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
###Markdown
Compute Spatial Reconstruction Accuracy
###Code
targetMaps = {}
with open(os.path.join(csv_dir,'targetMaps.txt')) as json_file:
targetMaps = json.load(json_file)
def getPrecision(arr1,arr2):
prod = np.multiply(arr1,arr2)
false_pos = np.subtract(arr2,prod)
numerator = np.sum(prod)
denominator = np.add(numerator,np.sum(false_pos))
recall = numerator/denominator
return recall
def getRecall(arr1,arr2):
prod = np.multiply(arr1,arr2)
false_neg = np.subtract(arr1,prod)
numerator = np.sum(prod)
denominator = np.add(np.sum(prod),np.sum(false_neg))
recall = numerator/denominator
return recall
def getF1Score(targetName, discreteWorld):
targetMap = targetMaps[targetName]
arr1 = 1*np.logical_not(np.array(targetMap))
arr2 = 1*np.logical_not(np.array(discreteWorld))
recall = getRecall(arr1, arr2)
precision = getPrecision(arr1, arr2)
numerator = np.multiply(precision, recall)
denominator = np.add(precision, recall)
if (denominator>0):
quotient = np.divide(numerator, denominator)
f1Score = np.multiply(2, quotient)
else:
f1Score = 0
# print('recall ' + str(recall))
# print('precision ' + str(precision))
return f1Score
def getF1ScoreLambda(row):
return(getF1Score(row['targetName'], row['discreteWorld']))
def getJaccard(targetName, discreteWorld):
targetMap = targetMaps[targetName]
arr1 = 1*np.logical_not(np.array(targetMap))
arr2 = 1*np.logical_not(np.array(discreteWorld))
prod = np.multiply(arr1,arr2)
true_pos = np.sum(prod)
false_pos = np.sum(np.subtract(arr2,prod))
false_neg = np.sum(np.subtract(arr1,prod))
# print(true_pos)
# print(false_pos)
# print(false_neg)
denomenator = np.add(false_neg,np.add(false_pos,true_pos))
jaccard = np.divide(true_pos,denomenator)
#print('recall ' + recall);
return jaccard
def getJaccardLambda(row):
return(getJaccard(row['targetName'], row['discreteWorld']))
# def getNullScore(targetName):
# targetMap = targetMaps[targetName]
# arr1 = 1*np.logical_not(np.array(targetMap))
# arr2 = 1*np.zeros(arr1.shape)
# recall = getRecall(arr1, arr2)
# precision = getPrecision(arr1, arr2)
# numerator = np.multiply(precision, recall)
# denominator = np.add(precision, recall)
# quotient = np.divide(numerator, denominator)
# f1Score = np.multiply(2, quotient)
# print('recall ', str(recall));
# print('precision ', str(precision));
# print('quotient ', str(quotient));
# return f1Score
df['rawF1DiscreteScore'] = df.apply(getF1ScoreLambda, axis=1)
df['jaccardDiscrete'] = df.apply(getJaccardLambda, axis=1)
# Make new column: phase_extended
# Same as phase but with 'repeated' split into 'repetition 1' and 'repetition 2'
phase_dict = {
'pre':0,
'repetition 1':1,
'repetition 2':2,
'post':3
}
ordered_phases = ['pre','repetition 1','repetition 2','post']
df['phase_extended'] = df['phase']
df.loc[(df.phase=='repeated') & (df.repetition==1),'phase_extended'] = 'repetition 1'
df.loc[(df.phase=='repeated') & (df.repetition==2),'phase_extended'] = 'repetition 2'
df['phase_number'] = df.phase_extended.astype("category").cat.set_categories(ordered_phases).cat.codes + 1
#Add useful variables for graphing
df['targetNumber'] = df['targetName'].apply(lambda x: x[-2:])
df['perfectScore'] = df.rawF1DiscreteScore == 1
df['gameID'].nunique()
###Output
_____no_output_____
###Markdown
Initial Block DataInitial block placements (before physics, after snapping, before falling)
###Code
query = coll.find({"$and":[
{'condition':{'$ne':'practice'}},
{'eventType':'initial'},
#{'iterationName': iterationName}, #use this if one iteration name
{"$or":[{'iterationName':'Exp2Pilot3'},
{'iterationName':'Exp2Pilot3_batch2'}]}]
})
df_initial_full = pd.DataFrame(list(query))
# filter dataframe for complete datasets
df_initial_full_filtered = df_initial_full[df_initial_full.workerId.isin(complete_data_ids)]
print('Loaded ' + str(df_initial_full_filtered.shape[0]) + ' complete sets of initial blocks')
# reduce to crucial information
df_initial_full_filtered.columns
df_initial_reduced_filtered = df_initial_full_filtered[[
'gameID','trialNum','phase','condition','eventType','targetName','repetition','targetID','blockNum', #trial identifiers
'nullScore','incrementalScore','normedIncrementalScore','rawScoreDiscrete','incrementalNormedScoreDiscretePrevious', #scoring
'score','currBonus', #bonusing
'timeAbsolute','timeRelative','timeBlockSelected','timeBlockPlaced','relativePlacementTime', #timing
'discreteWorld','vertices','blockKind','blockColorID','blockColor','blockCenterX', 'blockCenterY', #world reconstruction
'x_index','y_index','x_discrete','y_discrete','width_discrete','height_discrete'
]]
df_initial_reduced_filtered = df_initial_reduced_filtered.sort_values(by=['gameID', 'timeAbsolute'])
dfi = df_initial_reduced_filtered
dfi['phase_extended'] = dfi['phase']
dfi.loc[(dfi.phase=='repeated') & (dfi.repetition==1),'phase_extended'] = 'repetition 1'
dfi.loc[(dfi.phase=='repeated') & (dfi.repetition==2),'phase_extended'] = 'repetition 2'
# dfi['phase_number'] = dfi.phase_extended.astype("category",
# ordered=True,
# categories=ordered_phases).cat.codes
dfi['rawF1DiscreteScore'] = dfi.apply(getF1ScoreLambda, axis=1)
###Output
_____no_output_____
###Markdown
Settled Block DataBlock data after coming to rest (after physics)
###Code
query = coll.find({"$and":[
{'condition':{'$ne':'practice'}},
{'eventType':'settled'},
#{'iterationName': iterationName}, #use this if one iteration name
{"$or":[{'iterationName':'Exp2Pilot3'},
{'iterationName':'Exp2Pilot3_batch2'}]}]
})
df_settled_full = pd.DataFrame(list(query))
# filter dataframe for complete datasets
df_settled_full_filtered = df_settled_full[df_settled_full.workerId.isin(complete_data_ids)]
print('Loaded ' + str(df_settled_full_filtered.shape[0]) + ' complete sets of settled blocks')
# reduce to crucial information
df_settled_full_filtered.columns
df_settled_reduced_filtered = df_settled_full_filtered[[
'gameID','trialNum','phase','condition','eventType','targetName','repetition','targetID', #trial identifiers
'nullScore','incrementalScore','normedIncrementalScore','rawScoreDiscrete','incrementalNormedScoreDiscrete','numBlocks','blockFell', #scoring
'score','currBonus', #bonusing
'timeAbsolute','timeRelative',#timing
'discreteWorld','allVertices','blockKind','blockColorID','blockColor','blockCenterX', 'blockCenterY',#world reconstruction
'x_index','y_index','x_discrete','y_discrete'
]]
df_settled_reduced_filtered = df_settled_reduced_filtered.sort_values(by=['gameID', 'timeAbsolute'])
dfs = df_settled_reduced_filtered
dfs['rawF1DiscreteScore'] = dfs.apply(getF1ScoreLambda, axis=1)
###Output
_____no_output_____
###Markdown
Survey Data
###Code
query = coll.find({"$and":[
{'eventType':'survey_data'},
#{'iterationName': iterationName}, #use this if one iteration name
{"$or":[{'iterationName':'Exp2Pilot3'},
{'iterationName':'Exp2Pilot3_batch2'}]}]
})
df_survey = pd.DataFrame(list(query.sort('absoluteTime')))
df_survey[['gameID','age','comments','difficulty','fun','strategies','inputDevice','sex','score']]
###Output
_____no_output_____
###Markdown
Data Cleaning (bugs)
###Code
# Remove two block placements (potentially from refreshing?)
# These were recorded but don't seem to be a part of the final structure
# Believe they are from refreshing
dfi = dfi[~(((dfi.gameID == '4611-415301bd-3cd2-4751-9911-e530d1bce758') &
(dfi.trialNum==1) &
(dfi.blockNum == 1) &
(dfi.blockKind=='D')) |
((dfi.gameID == '2328-cf96d18d-a95b-4d1b-bc43-602ee1bf5835') &
(dfi.trialNum==0) &
(dfi.blockNum == 1) &
(dfi.blockKind=='E')))]
dfs = dfs[~(((dfi.gameID == '4611-415301bd-3cd2-4751-9911-e530d1bce758') &
(dfs.trialNum==1) &
(dfs.numBlocks == 1) &
(dfs.blockKind=='D')) |
((dfs.gameID == '2328-cf96d18d-a95b-4d1b-bc43-602ee1bf5835') &
(dfs.trialNum==0) &
(dfs.numBlocks == 1) &
(dfs.blockKind=='E')))]
# Mark a participant as buggy
df['buggy'] = False
dfs['buggy'] = False
dfi['buggy'] = False
df_survey['buggy'] = False
#Mark this participant as bugs found leading to >60s build time. Perhaps a very slow computer?
df.loc[df.gameID=="3988-e15c8e2e-0b53-43fd-a2d3-686d3efd6923",'buggy'] = True
dfs.loc[dfs.gameID=="3988-e15c8e2e-0b53-43fd-a2d3-686d3efd6923",'buggy'] = True
dfi.loc[dfi.gameID=="3988-e15c8e2e-0b53-43fd-a2d3-686d3efd6923",'buggy'] = True
df_survey.loc[df_survey.gameID=="3988-e15c8e2e-0b53-43fd-a2d3-686d3efd6923",'buggy'] = True
#Mark this participant as NaNs found for two scores.
df.loc[df.gameID=="4739-25f27c31-0d4c-46ae-a515-02351c69042d",'buggy'] = True
dfs.loc[dfs.gameID=="4739-25f27c31-0d4c-46ae-a515-02351c69042d",'buggy'] = True
dfi.loc[dfi.gameID=="4739-25f27c31-0d4c-46ae-a515-02351c69042d",'buggy'] = True
df_survey.loc[df_survey.gameID=="4739-25f27c31-0d4c-46ae-a515-02351c69042d",'buggy'] = True
df_survey['buggy'] = False
df_survey.loc[df_survey.gameID=="3988-e15c8e2e-0b53-43fd-a2d3-686d3efd6923",'buggy'] = True
df_survey.loc[df_survey.gameID=="4739-25f27c31-0d4c-46ae-a515-02351c69042d",'buggy'] = True
###Output
_____no_output_____
###Markdown
Inter-block-interval
###Code
def getMeanIBI(values):
'''Obtain mean time between block placements'''
ibis = []
for x, y in zip(values[0::], values[1::]):
#print(x,y)
ibi = y-x
assert(ibi >= 0)
ibis.append(y-x)
return np.mean(ibis)
def getMedianIBI(values):
'''Obtain mean time between block placements'''
ibis = []
for x, y in zip(values[0::], values[1::]):
#print(x,y)
ibi = y-x
assert(ibi >= 0)
ibis.append(y-x)
return np.median(ibis)
def getSDIBI(values):
'''Obtain mean time between block placements'''
ibis = []
for x, y in zip(values[0::], values[1::]):
#print(x,y)
ibi = y-x
assert(ibi >= 0)
ibis.append(y-x)
return np.std(ibis)
def getMinIBI(values):
'''Obtain mean time between block placements'''
ibis = []
for x, y in zip(values[0::], values[1::]):
#print(x,y)
ibi = y-x
assert(ibi >= 0)
ibis.append(y-x)
return np.min(ibis)
dfi = dfi.drop_duplicates(subset=['gameID','trialNum','blockNum'], keep='last')
dfIBIMean = dfi.sort_values('timeAbsolute').groupby(['gameID','trialNum'])['relativePlacementTime']\
.agg(getMeanIBI).reset_index()
dfIBIMean = dfIBIMean.rename(columns = {'relativePlacementTime':'meanIBI'})
df = pd.merge(df, dfIBIMean, how='left', on=['gameID','trialNum'])
dfIBIMin = dfi.sort_values('timeAbsolute').groupby(['gameID','trialNum'])['relativePlacementTime']\
.agg(getMinIBI).reset_index()
dfIBIMin = dfIBIMin.rename(columns = {'relativePlacementTime':'minIBI'})
df = pd.merge(df, dfIBIMin, how='left', on=['gameID','trialNum'])
thinking_time = dfi[dfi.blockNum==1][['gameID','trialNum','relativePlacementTime']]
thinking_time = thinking_time.rename(columns = {'relativePlacementTime':'thinkingTime'})
df = pd.merge(df, thinking_time, how='left', on=['gameID','trialNum'])
dfIBIMedian = dfi.sort_values('timeAbsolute').groupby(['gameID','trialNum'])['relativePlacementTime']\
.agg(getMedianIBI).reset_index()
dfIBIMedian = dfIBIMedian.rename(columns = {'relativePlacementTime':'medianIBI'})
df = pd.merge(df, dfIBIMedian, how='left', on=['gameID','trialNum'])
dfIBISD = dfi.sort_values('timeAbsolute').groupby(['gameID','trialNum'])['relativePlacementTime']\
.agg(getSDIBI).reset_index()
dfIBISD = dfIBISD.rename(columns = {'relativePlacementTime':'sdIBI'})
df = pd.merge(df, dfIBISD, how='left', on=['gameID','trialNum'])
df_trial_end_full_filtered
# Clean age data
df_survey.loc[(df_survey.age=='1978'),'age'] = '42'
df[~df.buggy]['gameID'].nunique()
###Output
_____no_output_____
###Markdown
Export Data
###Code
iterationName = 'Exp2Pilot3_all'
out_path = os.path.join(csv_dir,'block_silhouette_{}.csv'.format(iterationName))
df.to_csv(out_path)
out_path = os.path.join(csv_dir,'block_silhouette_initial_{}.csv'.format(iterationName))
dfi.to_csv(out_path)
out_path = os.path.join(csv_dir,'block_silhouette_settled_{}.csv'.format(iterationName))
dfs.to_csv(out_path)
out_path = os.path.join(csv_dir,'block_silhouette_{}_good.csv'.format(iterationName))
df[~df.buggy].to_csv(out_path)
out_path = os.path.join(csv_dir,'block_silhouette_initial_{}_good.csv'.format(iterationName))
dfi[~dfi.buggy].to_csv(out_path)
out_path = os.path.join(csv_dir,'block_silhouette_settled_{}_good.csv'.format(iterationName))
dfs[~dfs.buggy].to_csv(out_path)
out_path = os.path.join(csv_dir,'block_silhouette_survey_{}.csv'.format(iterationName))
df_survey.to_csv(out_path)
df_survey[~df_survey.buggy][['gameID','timeAbsolute','age','comments','difficulty','fun','strategies','inputDevice','sex','score']]
list(df_survey.age)
print('age mean: ', df_survey[~df_survey.buggy]['age'].apply(int).mean())
print('age std: ', df_survey[~df_survey.buggy]['age'].apply(int).std())
df_survey[~df_survey.buggy]['sex'].value_counts()
print('bonus mean: ', df_survey[~df_survey.buggy]['score'].mean())
print('bonus std: ', df_survey[~df_survey.buggy]['score'].std())
###Output
bonus mean: 0.4346938775510205
bonus std: 0.3270900872390786
|
essential/05.ipynb | ###Markdown
Chapter 5 Introduction to Numpy
###Code
!pip install -q numpy
###Output
_____no_output_____
###Markdown
Numpy mean numerical python. It is use for numerical operation and manipulation. Especially in array or matrix. | 1 | 2 | 3 || --- | --- | ---|| 4 | 5 | 6 || 7 | 8 | 9 |order 3 x 3
###Code
# import package numpy and give it an alias np
import numpy as np
a = np.array(
[
[1,2,3],
[4,5,6],
[7,8,9]
]
)
print(a)
print("dim = ", a.ndim)
print("shape = ", a.shape)
print("datatype = ", a.dtype)
print("size = ", a.size)
print()
alist = [
[1,2,3],
[4,5,6],
[7,8,9]
]
print(alist)
print(len(alist))
# print(size(alist))
# print(shape(alist))
print(type(a))
print(type(alist))
import numpy as np
blist = [31, 32, 33]
b = np.array(blist)
print(blist)
print()
print(b)
print("dim = ", b.ndim)
print("shape = ", b.shape)
print("datatype = ", b.dtype)
print("size = ", b.size)
print()
import numpy as np
clist = [1, 'Tue', 3, 'Wed']
c = np.array(clist)
print(clist)
print()
print(c)
print("dim = ", c.ndim)
print("shape = ", c.shape)
print("datatype = ", c.dtype)
print("size = ", c.size)
print()
###Output
[1, 'Tue', 3, 'Wed']
['1' 'Tue' '3' 'Wed']
dim = 1
shape = (4,)
datatype = <U11
size = 4
###Markdown
Class activity | 11 | 12 | 13 || --- | --- | ---|order 1 x 3 or row matrix $$\begin{pmatrix} 11 & 12 & 13\end{pmatrix}$$ Without specifying the datatype.
###Code
# import package numpy and give it an alias np
import numpy as np
a = np.array(
[
[11, 12, 13]
]
)
print(a)
print("dim = ", a.ndim)
print("shape = ", a.shape)
print("datatype = ", a.dtype)
print("size = ", a.size)
print()
###Output
[[11 12 13]]
dim = 2
shape = (1, 3)
datatype = int32
size = 3
###Markdown
Specifying datatype int64
###Code
# import package numpy and give it an alias np
import numpy as np
a = np.array(
[
[11, 12, 13]
], dtype='int64'
)
print(a)
print("dim = ", a.ndim)
print("shape = ", a.shape)
print("datatype = ", a.dtype)
print("size = ", a.size)
print()
###Output
[[11 12 13]]
dim = 2
shape = (1, 3)
datatype = int64
size = 3
###Markdown
Specifying datatype float
###Code
# import package numpy and give it an alias np
import numpy as np
a = np.array(
[
[11, 12, 13]
], dtype='float'
)
print(a)
print("dim = ", a.ndim)
print("shape = ", a.shape)
print("datatype = ", a.dtype)
print("size = ", a.size)
print()
###Output
[[11. 12. 13.]]
dim = 2
shape = (1, 3)
datatype = float64
size = 3
###Markdown
Specifying datatype float32
###Code
# import package numpy and give it an alias np
import numpy as np
a = np.array(
[
[11, 12, 13]
], dtype='float32'
)
print(a)
print("dim = ", a.ndim)
print("shape = ", a.shape)
print("datatype = ", a.dtype)
print("size = ", a.size)
print()
###Output
[[11. 12. 13.]]
dim = 2
shape = (1, 3)
datatype = float32
size = 3
###Markdown
float value
###Code
# import package numpy and give it an alias np
import numpy as np
a = np.array(
[
[11.0, 12.0, 13.0]
]
)
print(a)
print("dim = ", a.ndim)
print("shape = ", a.shape)
print("datatype = ", a.dtype)
print("size = ", a.size)
print()
###Output
[[11. 12. 13.]]
dim = 2
shape = (1, 3)
datatype = float64
size = 3
###Markdown
Class activity | 21 | |---|| 22 | | 31 | | 0 | order 4 x 1 or column matrix $$\begin{pmatrix} 21 \\ 22 \\ 31 \\ 0 \\ \end{pmatrix}$$
###Code
# import package numpy and give it an alias np
import numpy as np
a = np.array(
[
[21],
[22],
[31],
[0],
], dtype='uint8'
)
print(a)
print("dim = ", a.ndim)
print("shape = ", a.shape)
print("datatype = ", a.dtype)
print("size = ", a.size)
print()
###Output
[[21]
[22]
[31]
[ 0]]
dim = 2
shape = (4, 1)
datatype = uint8
size = 4
###Markdown
Minimum value in 8 bits|7|6|5|4|3|2|1|0||---|---|---|---|---|---|---|---||0|0|0|0|0|0|0|0| Maximum value in 8 bits|7|6|5|4|3|2|1|0||---|---|---|---|---|---|---|---||1|1|1|1|1|1|1|1| uint8 mean unsigned integer in 8 bitsmin = 0max = 255 int8 mean signed integer in 8 bitsmin = -128max = 127 Installation of numpy, scipy and pandas
###Code
!pip install -q numpy
def array_properties(a):
print("a = \n", a)
print("dim = ", a.ndim)
print("shape = ", a.shape)
print("datatype = ", a.dtype)
print("size = ", a.size)
print()
###Output
_____no_output_____
###Markdown
Creating array
###Code
import numpy as np
a = np.array([1, 2, 3])
print(a)
print()
print("a = \n", a)
print("dim = ", a.ndim)
print("shape = ", a.shape)
print("datatype = ", a.dtype)
print("size = ", a.size)
print()
import numpy as np
a_3x3 = np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3]])
array_properties(a_3x3)
###Output
a =
[[1 1 1]
[2 2 2]
[3 3 3]]
dim = 2
shape = (3, 3)
datatype = int32
size = 9
###Markdown
arange integer array creation. range used regularly in for loop
###Code
for n in range(10):
print(n)
start = 1
stop = 10 + 1
for n in range(start,stop):
print(n, end=', ')
start = 1
stop = 10 + 1
step = 2
for n in range(start, stop, step):
print(n, end=', ')
import numpy as np
seq_a = np.arange(1, 10)
array_properties(seq_a)
import numpy as np
ar10 = np.arange(10)
array_properties(ar10)
for n in ar10:
print(n, end=' | ')
start = 1
stop = 10 + 1
step = 2
ar10b = np.arange(start, stop, step)
array_properties(ar10b)
for n in ar10b:
print(n, end=' | ')
start = 1
stop = 10 + 1
step = 2
ar10b = np.arange(start, stop, step)
ar10b = np.uint8(ar10b)
array_properties(ar10b)
for n in ar10b:
print(n, end=' | ')
print()
print()
ar10b = np.float32(ar10b)
array_properties(ar10b)
###Output
a =
[1 3 5 7 9]
dim = 1
shape = (5,)
datatype = uint8
size = 5
1 | 3 | 5 | 7 | 9 |
a =
[1. 3. 5. 7. 9.]
dim = 1
shape = (5,)
datatype = float32
size = 5
###Markdown
Class Activity Create an array of integers [0, 5, 15, ..., 100] linspace create array of floating point value of a specific size.
###Code
import numpy as np
seq_a2 = np.linspace(1, 10, 15)
array_properties(seq_a2)
###Output
a =
[ 1. 1.64285714 2.28571429 2.92857143 3.57142857 4.21428571
4.85714286 5.5 6.14285714 6.78571429 7.42857143 8.07142857
8.71428571 9.35714286 10. ]
dim = 1
shape = (15,)
datatype = float64
size = 15
###Markdown
|1|2|3|4|5|6|7|8|9|10||---|---|---|---|---|---|---|---|---|---||0|1|2|3|4|5|6|7|8|9||0|x|x|x|x|x|x|x|x|10| 0 --> 0 1 --> x 2 --> x 9 --> 10
###Code
x = 10/9 * 9
x
import numpy as np
start = 0
stop = 10
size = 10
arls_0_10_10 = np.linspace(start, stop, size)
array_properties(arls_0_10_10)
import numpy as np
start = 0
stop = 10
size = 11
arls_0_10_11 = np.linspace(start, stop, size)
array_properties(arls_0_10_11)
###Output
a =
[ 0. 1. 2. 3. 4. 5. 6. 7. 8. 9. 10.]
dim = 1
shape = (11,)
datatype = float64
size = 11
###Markdown
zeroscreate array of zeros
###Code
import numpy as np
zer_10 = np.zeros((1,10))
array_properties(zer_10)
import numpy as np
zer_10a = np.zeros((1,10), dtype='float32')
array_properties(zer_10a)
import numpy as np
zer_10 = np.zeros((1,10), dtype=np.float32)
array_properties(zer_10)
import numpy as np
zer_10c = np.zeros((1,10), dtype='int')
array_properties(zer_10c)
import numpy as np
zer_10 = np.zeros((1,10), dtype=np.uint8)
array_properties(zer_10)
import numpy as np
zeros_arr = np.zeros((2, 4))
array_properties(zeros_arr)
import numpy as np
zer_4_5_3 = np.zeros((4,5,3), dtype=np.uint8)
array_properties(zer_4_5_3)
import numpy as np
zer_4_5_3_2 = np.zeros((4,5,3,2), dtype=np.uint8)
array_properties(zer_4_5_3_2)
###Output
a =
[[[[0 0]
[0 0]
[0 0]]
[[0 0]
[0 0]
[0 0]]
[[0 0]
[0 0]
[0 0]]
[[0 0]
[0 0]
[0 0]]
[[0 0]
[0 0]
[0 0]]]
[[[0 0]
[0 0]
[0 0]]
[[0 0]
[0 0]
[0 0]]
[[0 0]
[0 0]
[0 0]]
[[0 0]
[0 0]
[0 0]]
[[0 0]
[0 0]
[0 0]]]
[[[0 0]
[0 0]
[0 0]]
[[0 0]
[0 0]
[0 0]]
[[0 0]
[0 0]
[0 0]]
[[0 0]
[0 0]
[0 0]]
[[0 0]
[0 0]
[0 0]]]
[[[0 0]
[0 0]
[0 0]]
[[0 0]
[0 0]
[0 0]]
[[0 0]
[0 0]
[0 0]]
[[0 0]
[0 0]
[0 0]]
[[0 0]
[0 0]
[0 0]]]]
dim = 4
shape = (4, 5, 3, 2)
datatype = uint8
size = 120
###Markdown
onesCreate array of ones
###Code
import numpy as np
# specify the shape of the array in the `np.ones` function.
ones_arr = np.ones((4, 2))
array_properties(ones_arr)
import numpy as np
shape = (1,10)
ones_10 = np.ones(shape)
array_properties(ones_10)
import numpy as np
shape = (10,)
ones_10 = np.ones(shape)
array_properties(ones_10)
###Output
a =
[1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]
dim = 1
shape = (10,)
datatype = float64
size = 10
###Markdown
Class ActivityCreate an array of dim 3 filled with ones. Recap 1. Create array from list value2. convert list to array.3. Convert arrays of diffrent types (int32, int64, float64, float32, uint8, <U11)4. Create array of zeros5. Create array of ones
###Code
import numpy as np
emp_arr = np.empty((4, 4))
array_properties(emp_arr)
###Output
a =
[[4.67296746e-307 1.69121096e-306 1.20161526e-306 8.34441742e-308]
[1.78022342e-306 6.23058028e-307 9.79107872e-307 6.89807188e-307]
[7.56594375e-307 6.23060065e-307 1.78021527e-306 8.34454050e-308]
[1.11261027e-306 1.15706896e-306 1.33512173e-306 1.33504432e-306]]
dim = 2
shape = (4, 4)
datatype = float64
size = 16
###Markdown
Class ActivityCreate empty array of 1x10
###Code
import numpy as np
emp_1x10 = np.empty((1, 10))
array_properties(emp_1x10)
###Output
a =
[[1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]]
dim = 2
shape = (1, 10)
datatype = float64
size = 10
###Markdown
Reshape Create an array of 1x10 then reshape to 2x5
###Code
import numpy as np
ar1x10 = np.arange(1, 11)
array_properties(ar1x10)
# reshape class method
ar2x5 = ar1x10.reshape((2,5))
array_properties(ar2x5)
# reshape function of np
ar2x5 = np.reshape(ar1x10, (2,5))
array_properties(ar2x5)
import numpy as np
arls10 = np.linspace(10, 55, 10)
array_properties(arls10)
# reshape into wrong container
ar3x3 = ar1x10.reshape((3,3))
array_properties(ar3x3)
import numpy as np
a1 = np.arange(1, 13)
array_properties(a1)
a2 = np.reshape(a1, (3, 4))
array_properties(a2)
###Output
a =
[ 1 2 3 4 5 6 7 8 9 10 11 12]
dim = 1
shape = (12,)
datatype = int32
size = 12
a =
[[ 1 2 3 4]
[ 5 6 7 8]
[ 9 10 11 12]]
dim = 2
shape = (3, 4)
datatype = int32
size = 12
###Markdown
Reshape knowing only one dim size.
###Code
import numpy as np
arls10 = np.linspace(10, 55, 10)
array_properties(arls10)
# reshape into 5xunknown
ar5x_ = ar1x10.reshape((5,-1))
array_properties(ar5x_)
# reshape into unknown x5
ar_x5 = ar1x10.reshape((-1,5))
array_properties(ar_x5)
import numpy as np
arls10 = np.linspace(10, 55, 10)
array_properties(arls10)
# reshape into 3xunknown
ar3x_ = ar1x10.reshape((3,-1))
array_properties(ar3x_)
import numpy as np
a1 = np.arange(1, 13, 1.5)
array_properties(a1)
a2 = np.reshape(a1, (4, -1))
array_properties(a2)
import numpy as np
a1 = np.linspace(1, 10, 15).reshape(5, -1)
array_properties(a1)
###Output
a =
[[ 1. 1.64285714 2.28571429]
[ 2.92857143 3.57142857 4.21428571]
[ 4.85714286 5.5 6.14285714]
[ 6.78571429 7.42857143 8.07142857]
[ 8.71428571 9.35714286 10. ]]
dim = 2
shape = (5, 3)
datatype = float64
size = 15
###Markdown
Class ActivityCreate an array of shape 4x5 and reshape into 2x-1.
###Code
import numpy as np
ar20 = np.linspace(1,21,20)
array_properties(ar20)
ar4x5 = ar20.reshape((4,5))
array_properties(ar4x5)
ar2x_ = ar4x5.reshape((2,-1))
array_properties(ar2x_)
###Output
a =
[ 1. 2.05263158 3.10526316 4.15789474 5.21052632 6.26315789
7.31578947 8.36842105 9.42105263 10.47368421 11.52631579 12.57894737
13.63157895 14.68421053 15.73684211 16.78947368 17.84210526 18.89473684
19.94736842 21. ]
dim = 1
shape = (20,)
datatype = float64
size = 20
a =
[[ 1. 2.05263158 3.10526316 4.15789474 5.21052632]
[ 6.26315789 7.31578947 8.36842105 9.42105263 10.47368421]
[11.52631579 12.57894737 13.63157895 14.68421053 15.73684211]
[16.78947368 17.84210526 18.89473684 19.94736842 21. ]]
dim = 2
shape = (4, 5)
datatype = float64
size = 20
a =
[[ 1. 2.05263158 3.10526316 4.15789474 5.21052632 6.26315789
7.31578947 8.36842105 9.42105263 10.47368421]
[11.52631579 12.57894737 13.63157895 14.68421053 15.73684211 16.78947368
17.84210526 18.89473684 19.94736842 21. ]]
dim = 2
shape = (2, 10)
datatype = float64
size = 20
###Markdown
Class ActivityCreate an array of 24 elements or size.Reshape into multiples of 3 and 4 in row dimension.
###Code
import numpy as np
# create a 1 dim array of size 24
# reshape into array o 3x-1
# reshape into array of 4x-1
###Output
_____no_output_____
###Markdown
Class Activity Create an array of unsugned integer 8 bits of size 300 and reshape into (10,10,-1).
###Code
ar300 = np.arange(0,300,dtype='uint8')
print(ar300.shape)
ar10x10x_ = ar300.reshape((10,10,-1))
print(ar10x10x_.shape)
# 10,-1,3
ar10x_x3 = ar300.reshape((10,-1,3))
print(ar10x_x3.shape)
# -1,10,3
ar_x10x3 = ar300.reshape((-1,10,3))
print(ar_x10x3.shape)
# -1,10,3
ar_x_x3 = ar300.reshape((-1,-1,3))
print(ar_x_x3.shape)
###Output
_____no_output_____
###Markdown
random `rand` will geneartion random values between 0 - 1. It accepts the shape of the array to be created.
###Code
import numpy as np
# random array between 0-1 with shape (4,5)
a1 = np.random.rand(4, 5)
array_properties(a1)
###Output
a =
[[0.79355467 0.02398627 0.54799052 0.71533769 0.49971466]
[0.6456805 0.19082646 0.58991645 0.0107417 0.53653781]
[0.50731978 0.26904773 0.03908328 0.68412245 0.89308668]
[0.93382234 0.10272734 0.67082097 0.19888379 0.72827832]]
dim = 2
shape = (4, 5)
datatype = float64
size = 20
###Markdown
Class ActivityCreate an array with shape (3,7) of values between 0-1. randint Creates an array of integer. It accepts start, stop and shape. `randint(start, stop, shape)`
###Code
import numpy as np
a1 = np.random.randint(0, 10, (4,5))
array_properties(a1)
###Output
a =
[[2 1 5 8 3]
[4 8 4 5 1]
[9 8 4 5 2]
[1 7 2 3 0]]
dim = 2
shape = (4, 5)
datatype = int32
size = 20
###Markdown
Class ActivityCreate an array of unsigned integer of 8 bits with shape (100, 100, 3) and filled with random integer values. Accessing an array element 1 D Array
###Code
import numpy as np
a1 = np.arange(1, 13)
array_properties(a1)
###Output
a =
[ 1 2 3 4 5 6 7 8 9 10 11 12]
dim = 1
shape = (12,)
datatype = int32
size = 12
###Markdown
|index|0|1|2|3|4|5|6|7|8|9|10|11||---|---|---|---|---|---|---|---|---|---|---|---|---||value|1|2|3|4|5|6|7|8|9|10|11|12||special index|-12|-11|-10|-9|-8|-7|-6|-5|-4|-3|-2|-1|
###Code
print('first: ', a1[0])
print('second: ', a1[1])
print('third: ', a1[2])
print('ninth: ', a1[9-1])
print('last: ', a1[11])
print('last: ', a1[-1])
# a1[istart:istop]
# stop or istop excluded
print('index 0 to 4: ', a1[0:4])
# omitte the istart
print('index 0 to 4: ', a1[:4])
print('index 3 to 6: ', a1[3:6])
print('index 0 to 10: ', a1[0:10])
print('index 0 to 10: ', a1[:10])
# omitt istop equal last index
print('index 3 to last: ', a1[3:12])
print('index 3 to last: ', a1[3:])
# from istart to second to the last value
# index -1 equal last index.
print('index 3 to second to the last: ', a1[3:-1])
print('last 2 numbers: ', a1[10:])
print('last 2 numbers: ', a1[-2:])
# get odd positions
# a1[istart : istop : istep]
print('odd positions: ', a1[0::2])
print('even positions: ', a1[1::2])
import numpy as np
N = 12
a1 = np.arange(N+1)
array_properties(a1)
###Output
a =
[ 0 1 2 3 4 5 6 7 8 9 10 11 12]
dim = 1
shape = (13,)
datatype = int32
size = 13
###Markdown
Use for loop on array.
###Code
for i in range(N+1):
print(f'{i}: {a1[i]}')
###Output
0: 0
1: 1
2: 2
3: 3
4: 4
5: 5
6: 6
7: 7
8: 8
9: 9
10: 10
11: 11
12: 12
###Markdown
Assignment1. Create an array of shape (10, 10) with uint8 random values.2. print the shape, size and dimension, 3. Compute the sum of the third row to the last.4. Compute the average of the columns 2 D Array
###Code
import numpy as np
a14 = np.arange(14+1)
print('size of a14=', a14.size)
if (a14.size % 4) == 0:
reshape_a = a14.reshape((4,-1))
print(reshape_a.shape)
# divide result in float
print('16 / 3=', 16 / 3)
# remainder
print('16 % 3=', 16 % 3)
# whole or integer
print('16 // 3', 16 // 3, sep='=')
print('16 / 4=', 16 / 4)
print('16 % 4=', 16 % 4)
print('16 // 4=', 16 // 4)
import numpy as np
a1 = np.arange(1, 17)
a2 = np.resize(a1, (4,4))
array_properties(a2)
print('Element in row=1, column=1: ', a2[0,0])
print('Element in row=3, column=1: ', a2[2,0])
print('Element in row=2, column=3: ', a2[1,2])
array_properties(a2)
# row & index
row = 1
i = row - 1
# column & index
col = 2
j = col - 1
print(f'Element in row={row}, column={col}: {a2[i,j]}')
# get all elements in row 2
print(f'Elements in row= 2: {a2[1,:]}')
# alternative: get all elements in row 2 or dimension 1
print(f'Elements in row= 2: {a2[1]}')
# get all value in column 2 or dimension
print('Elements in col= 2: ', a2[:,1])
# alternative: wrong get all value in column 2 or dimension 2
print('Elements in col= 2: ', a2[,1])
print(f'Elements in rows= 1 to 3 and columns= 2 to 4: \n', a2[0:3,1:3])
print()
print(f'Elements in row index= 0 to 2 and column index= 1 to 2: \n', a2[0:3,1:3])
array_properties(a2)
rows, columns = a2.shape
for i in range(rows):
for j in range(columns):
print(f'{i},{j}: {a2[i,j]}', end=', ')
print()
print()
rows, columns = a2.shape
for i in range(rows):
for j in range(columns):
print(a2[i,j], end=', ')
print()
###Output
a =
[[ 1 2 3 4]
[ 5 6 7 8]
[ 9 10 11 12]
[13 14 15 16]]
dim = 2
shape = (4, 4)
datatype = int32
size = 16
0,0: 1, 0,1: 2, 0,2: 3, 0,3: 4,
1,0: 5, 1,1: 6, 1,2: 7, 1,3: 8,
2,0: 9, 2,1: 10, 2,2: 11, 2,3: 12,
3,0: 13, 3,1: 14, 3,2: 15, 3,3: 16,
1, 2, 3, 4,
5, 6, 7, 8,
9, 10, 11, 12,
13, 14, 15, 16,
###Markdown
3D Array
###Code
import numpy as np
a3 = np.arange(1, 9).reshape((2,2,2))
array_properties(a3)
print(a3[0,1,1])
#
print(a3[0,1,:])
print(a3[0,1])
#
print(a3[0,:,1])
print(a3[:,1,1])
a3D = np.array([
[
[111,112,113],
[121,122,123],
],
[
[211,212,213],
[221,222,223],
]
])
array_properties(a3D)
print('Element 1,2,2: ', a3D[0,1,1])
array_properties(a3)
rows_3d, rows, columns = a3.shape
for i in range(rows_3d):
for j in range(rows):
for k in range(columns):
print(f'{i},{j},{k}: {a3[i,j,k]}', end=', ')
print()
print()
print()
rows_3d, rows, columns = a3.shape
for i in range(rows_3d):
for j in range(rows):
for k in range(columns):
print(a3[i,j,k], end=', ')
print()
print()
###Output
a =
[[[1 2]
[3 4]]
[[5 6]
[7 8]]]
dim = 3
shape = (2, 2, 2)
datatype = int32
size = 8
0,0,0: 1, 0,0,1: 2,
0,1,0: 3, 0,1,1: 4,
1,0,0: 5, 1,0,1: 6,
1,1,0: 7, 1,1,1: 8,
1, 2,
3, 4,
5, 6,
7, 8,
###Markdown
Changing Array Element
###Code
import numpy as np
a3x3 = np.arange(1,10).reshape((3,-1))
print(a3x3)
###Output
[[1 2 3]
[4 5 6]
[7 8 9]]
###Markdown
||0|1|2||---|---|---|--||0|1|2|3||1|4|5|6||2|7|8|9|
###Code
# print value 6
print(a3x3[1,2])
# print 3
print(a3x3[0,2])
# change 3 to 33
a3x3[0,2] = 33
print(a3x3)
###Output
3
[[ 1 2 33]
[ 4 5 6]
[ 7 18 9]]
###Markdown
||0|1|2||---|---|---|--||0|1|2|33||1|4|5|6||2|7|8|9|
###Code
# change value 8 to 18
print(a3x3[2,1])
a3x3[2,1] = 18
print(a3x3)
import numpy as np
a1 = np.arange(9)
array_properties(a1)
print('Third element: ', a1[2])
a1[2] = a1[2] ** 2
print('Third element squared: ', a1[2])
###Output
a =
[0 1 2 3 4 5 6 7 8]
dim = 1
shape = (9,)
datatype = int32
size = 9
Third element: 2
Third element squared: 4
###Markdown
||0|1|2|3|4|5|6|7|8||---|---|---|---|---|---|---|---|---|---||0|0|1|2|3|4|5|6|7|8|
###Code
# 3 ^ 2 = 9
3 ** 2
import numpy as np
a2 = np.arange(1,10).reshape(3,3)
array_properties(a2)
print('2nd row, 2nd column element: ', a2[1,1])
a2[1,1] = a2[1,1] ** 2
print('2nd row, 2nd column element squared: ', a2[1,1])
print(a2)
print('Third row: ', a2[2,:])
a2[2] = a2[2,:] * 2
print('Third row doubled: ', a2[2,:])
print(a2)
print('First row: ', a2[0,:])
a2[0] = a2[0,:] + a2[1,:]
print('First row increased by second row: ', a2[0,:])
print(a2)
print('Second row: ', a2[1,:])
a2[1] = a2[1,:] - a2[2,:]
print('Second row decreased by third row: ', a2[1,:])
print(a2)
x = 3
y=4
print('x=', x, 'y=', y)
x = x - y
print('x=', x, 'y=', y)
###Output
x= 3 y= 4
x= -1 y= 4
###Markdown
Chapter 5
Introduction to Numpy
Installation of numpy, scipy and pandas
###Code
!pip install -q numpy
def array_properties(a):
print("a = \n", a)
print("dim = ", a.ndim)
print("shape = ", a.shape)
print("datatype = ", a.dtype)
print("size = ", a.size)
print()
###Output
_____no_output_____
###Markdown
Creating array
###Code
import numpy as np
a = np.array([1, 2, 3])
print(a)
print()
print("a = \n", a)
print("dim = ", a.ndim)
print("shape = ", a.shape)
print("datatype = ", a.dtype)
print("size = ", a.size)
print()
import numpy as np
a_3x3 = np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3]])
array_properties(a_3x3)
import numpy as np
seq_a = np.arange(1, 10)
array_properties(seq_a)
import numpy as np
seq_a2 = np.linspace(1, 10, 15)
array_properties(seq_a2)
import numpy as np
zeros_arr = np.zeros((2, 4))
array_properties(zeros_arr)
import numpy as np
ones_arr = np.ones((4, 2))
array_properties(ones_arr)
import numpy as np
emp_arr = np.empty((4, 4))
array_properties(emp_arr)
import numpy as np
a1 = np.arange(1, 13)
array_properties(a1)
a2 = np.reshape(a1, (3, 4))
array_properties(a2)
import numpy as np
a1 = np.arange(1, 13, 1.5)
array_properties(a1)
a2 = np.reshape(a1, (4, -1))
array_properties(a2)
import numpy as np
a1 = np.linspace(1, 10, 15).reshape(5, -1)
array_properties(a1)
import numpy as np
a1 = np.random.rand(4, 5)
array_properties(a1)
import numpy as np
a1 = np.random.randint(0, 10, (4,5))
array_properties(a1)
###Output
a =
[[8 4 1 0 5]
[6 0 2 1 5]
[9 9 9 5 1]
[6 1 0 4 6]]
dim = 2
shape = (4, 5)
datatype = int32
size = 20
###Markdown
Accessing an array element
1 D Array
###Code
import numpy as np
a1 = np.arange(1, 13)
array_properties(a1)
print('first: ', a1[0])
print('second: ', a1[1])
print('third: ', a1[3])
print('ninth: ', a1[9-1])
print('last: ', a1[11])
print('last: ', a1[-1])
print('index 0 to 4: ', a1[0:4])
print('index 0 to 4: ', a1[:4])
print('index 3 to 6: ', a1[3:6])
print('index 0 to 10: ', a1[0:10])
print('index 0 to 10: ', a1[:10])
print('index 3 to last: ', a1[3:12])
print('index 3 to last: ', a1[3:])
print('index 3 to second to the last: ', a1[3:-1])
print('last 2 numbers: ', a1[10:])
print('last 2 numbers: ', a1[-2:])
print('odd positions: ', a1[0::2])
print('even positions: ', a1[1::2])
import numpy as np
N = 12
a1 = np.arange(N+1)
array_properties(a1)
for i in range(N+1):
print(f'{i}: {a1[i]}')
###Output
0: 0
1: 1
2: 2
3: 3
4: 4
5: 5
6: 6
7: 7
8: 8
9: 9
10: 10
11: 11
12: 12
###Markdown
2 D Array
###Code
import numpy as np
a1 = np.arange(1, 17)
a2 = np.resize(a1, (4,4))
array_properties(a2)
print('Element in row=1, column=1: ', a2[0,0])
print('Element in row=3, column=1: ', a2[2,0])
print('Element in row=2, column=3: ', a2[1,2])
array_properties(a2)
# row & index
row = 1
i = row - 1
# column & index
col = 2
j = col - 1
print(f'Element in row={row}, column={col}: ', a2[i,j])
print(f'Elements in row= 2: ', a2[1,:])
print(f'Elements in col= 2: ', a2[:,1])
print(f'Elements in rows= 1 to 3 and columns= 2 to 4: \n', a2[0:3,1:3])
print()
print(f'Elements in row index= 0 to 2 and column index= 1 to 2: \n', a2[0:3,1:3])
array_properties(a2)
rows, columns = a2.shape
for i in range(rows):
for j in range(columns):
print(f'{i},{j}: {a2[i,j]}', end=', ')
print()
print()
rows, columns = a2.shape
for i in range(rows):
for j in range(columns):
print(a2[i,j], end=', ')
print()
###Output
a =
[[ 1 2 3 4]
[ 5 6 7 8]
[ 9 10 11 12]
[13 14 15 16]]
dim = 2
shape = (4, 4)
datatype = int32
size = 16
0,0: 1, 0,1: 2, 0,2: 3, 0,3: 4,
1,0: 5, 1,1: 6, 1,2: 7, 1,3: 8,
2,0: 9, 2,1: 10, 2,2: 11, 2,3: 12,
3,0: 13, 3,1: 14, 3,2: 15, 3,3: 16,
1, 2, 3, 4,
5, 6, 7, 8,
9, 10, 11, 12,
13, 14, 15, 16,
###Markdown
3D Array
###Code
import numpy as np
a3 = np.arange(1, 9).reshape((2,2,2))
array_properties(a3)
print(a3[0,1,1])
print(a3[0,1,:])
print(a3[0,:,1])
print(a3[:,1,1])
a3D = np.array([
[
[111,112,113],
[121,122,123],
],
[
[211,212,213],
[221,222,223],
]
])
array_properties(a3D)
print('Element 1,2,2: ', a3D[0,1,1])
array_properties(a3)
rows_3d, rows, columns = a3.shape
for i in range(rows_3d):
for j in range(rows):
for k in range(columns):
print(f'{i},{j},{k}: {a3[i,j,k]}', end=', ')
print()
print()
print()
rows_3d, rows, columns = a3.shape
for i in range(rows_3d):
for j in range(rows):
for k in range(columns):
print(a3[i,j,k], end=', ')
print()
print()
###Output
a =
[[[1 2]
[3 4]]
[[5 6]
[7 8]]]
dim = 3
shape = (2, 2, 2)
datatype = int32
size = 8
0,0,0: 1, 0,0,1: 2,
0,1,0: 3, 0,1,1: 4,
1,0,0: 5, 1,0,1: 6,
1,1,0: 7, 1,1,1: 8,
1, 2,
3, 4,
5, 6,
7, 8,
###Markdown
Changing Array Element
###Code
import numpy as np
a1 = np.arange(9)
array_properties(a1)
print('Third element: ', a1[2])
a1[2] = a1[2] ** 2
print('Third element squared: ', a1[2])
import numpy as np
a2 = np.arange(1,10).reshape(3,3)
array_properties(a2)
print('2nd row, 2nd column element: ', a2[1,1])
a2[1,1] = a2[1,1] ** 2
print('2nd row, 2nd column element squared: ', a2[1,1])
print(a2)
print('Third row: ', a2[2])
a2[2] = a2[2] * 2
print('Third row doubled: ', a2[2])
print(a2)
print('First row: ', a2[0])
a2[0] = a2[0] + a2[1]
print('First row increased by second row: ', a2[0])
print(a2)
print('Second row: ', a2[1])
a2[1] = a2[1] - a2[2]
print('Second row decreased by third row: ', a2[1])
print(a2)
###Output
Second row: [ 4 25 6]
Second row decreased by third row: [-10 9 -12]
[[ 5 27 9]
[-10 9 -12]
[ 14 16 18]]
|
examples/notebooks/l1_trend_filter.ipynb | ###Markdown
$\ell_1$ trend filtering**Reference:** S.-J. Kim, K. Koh, S. Boyd, and D. Gorinevsky. [*$\ell_1$ Trend Filtering*.](http://stanford.edu/~boyd/papers/l1_trend_filter.html) SIAM Review, 51(2):339-360, 2009. IntroductionThe problem of estimating underlying trends in time series data arises in a variety of disciplines. The $\ell_1$ trend filtering method produces trend estimates $z$ that are piecewise linear from the time series $y$.The $\ell_1$ trend estimation problem can be formulated as$$\text{minimize}~ \frac{1}{2}\|y - z\|_2^2 + \alpha \|Dz\|_1,$$with variable $z \in \mathbf{R}^q$, problem data $y \in \mathbf{R}^q$, and smoothing parameter $\alpha \geq 0$. Here $D \in \mathbf{R}^{(q-2) \times q}$ is the second difference matrix$$D = \left[\begin{array}{ccccccc}1 & -2 & 1 & 0 & \ldots & 0 &0 \\0 & 1 & -2 & 1 & \ldots & 0 & 0 \\\vdots & \vdots & \ddots & \ddots & \ddots & \vdots& \vdots \\0 & 0 & \ldots &1 & -2 & 1 & 0 \\0 & 0 & \ldots & 0 & 1 & -2 & 1 \end{array}\right].$$ Reformulate and Solve ProblemThis problem can be written in standard form by letting$$f_1(x_1) = \frac{1}{2}\|y - x_1\|_2^2, \quad f_2(x_2) = \alpha \|x_2\|_1,$$$$A_1 = D, \quad A_2 = -I, \quad b = 0,$$where the variables $x_1 \in \mathbf{R}^q$ and $x_2 \in \mathbf{R}^{q-2}$. We solve an instance where $y$ is a snapshot of the S&P 500 price for $q = 2000$ time steps and $\alpha = 0.01\|y\|_{\infty}$.
###Code
import numpy as np
from scipy import sparse
from a2dr import a2dr
from a2dr.proximal import *
# Load time series data: S&P 500 price log.
y = np.loadtxt(open("data/snp500.txt", "rb"), delimiter = ",")
q = y.size
alpha = 0.01*np.linalg.norm(y, np.inf)
# Form second difference matrix.
D = sparse.lil_matrix(sparse.eye(q))
D.setdiag(-2, k = 1)
D.setdiag(1, k = 2)
D = D[:(q-2),:]
# Convert problem to standard form.
prox_list = [lambda v, t: prox_sum_squares(v, t = 0.5*t, offset = y),
lambda v, t: prox_norm1(v, t = alpha*t)]
A_list = [D, -sparse.eye(q-2)]
b = np.zeros(q-2)
# Solve with A2DR.
a2dr_result = a2dr(prox_list, A_list, b)
# Save solution.
z_star = a2dr_result["x_vals"][0]
print("Solve time:", a2dr_result["solve_time"])
print("Number of iterations:", a2dr_result["num_iters"])
###Output
----------------------------------------------------------------------
a2dr v0.2.3.post3 - Prox-Affine Distributed Convex Optimization Solver
(c) Anqi Fu, Junzi Zhang
Stanford University 2019
----------------------------------------------------------------------
### Preconditioning starts ... ###
### Preconditioning finished. ###
max_iter = 1000, t_init (after preconditioning) = 7.00
eps_abs = 1.00e-06, eps_rel = 1.00e-08, precond = True
ada_reg = True, anderson = True, m_accel = 10
lam_accel = 1.00e-08, aa_method = lstsq, D_safe = 1.00e+06
eps_safe = 1.00e-06, M_safe = 10
variables n = 3998, constraints m = 1998
nnz(A) = 7992
Setup time: 1.89e-02
----------------------------------------------------
iter | total res | primal res | dual res | time (s)
----------------------------------------------------
0| 2.32e+01 3.34e-03 2.32e+01 6.48e-02
100| 5.27e-04 1.19e-04 5.14e-04 1.58e+00
200| 5.15e-05 1.85e-05 4.80e-05 2.94e+00
300| 1.40e-05 5.68e-06 1.28e-05 4.21e+00
400| 4.48e-06 1.98e-06 4.02e-06 5.44e+00
500| 2.61e-06 9.02e-07 2.44e-06 6.78e+00
546| 1.22e-06 4.89e-07 1.12e-06 7.40e+00
----------------------------------------------------
Status: Solved
Solve time: 7.40e+00
Total number of iterations: 547
Best total residual: 1.22e-06; reached at iteration 546
======================================================================
Solve time: 7.395859003067017
Number of iterations: 547
###Markdown
Plot Results
###Code
import matplotlib.pyplot as plt
# Show plots inline in ipython.
%matplotlib inline
# Plot properties.
plt.rc("text", usetex = True)
plt.rc("font", family = "serif")
font = {"weight" : "normal",
"size" : 16}
plt.rc("font", **font)
# Plot estimated trend with original signal.
plt.figure(figsize = (6, 6))
plt.plot(np.arange(1,q+1), y, "k:", linewidth = 1.0)
plt.plot(np.arange(1,q+1), z_star, "b-", linewidth = 2.0)
plt.xlabel("Time")
###Output
_____no_output_____ |
Prototype Notebook/legacy/Kriging 6.ipynb | ###Markdown
A matrix
###Code
def A_matrix(layers,dips, sig_z = 1., a = 6., C_0 = -14*1/6**2-0.2,
C_01 = 1, verbose = 0):
#CG = theano_CG
CG = C_G(dips)
CGI = C_GI(dips,layers,a = a, C_01=C_01)
CI = C_I(layers, a = a)
UG = U_G(dips)
UI = U_I(layers)
# print np.shape(UI)[0]
zeros = np.zeros((np.shape(UI)[0],np.shape(UI)[0]))
#print CG,CGI.transpose(),UG.transpose()
A1 = np.hstack((-CG,CGI.transpose(),UG.transpose()))
A2 = np.hstack((CGI,CI,UI.transpose()))
A3 = np.hstack((UG,UI,zeros))
A = np.vstack((A1,A2,A3))
return A
np.set_printoptions(precision = 2, linewidth= 130, suppress = True)
aa = A_matrix(layers, dips)
np.shape(aa)
#aa
###Output
/home/bl3/anaconda3/lib/python3.5/site-packages/ipykernel/__main__.py:32: RuntimeWarning: invalid value encountered in true_divide
/home/bl3/anaconda3/lib/python3.5/site-packages/ipykernel/__main__.py:32: RuntimeWarning: divide by zero encountered in true_divide
/home/bl3/anaconda3/lib/python3.5/site-packages/ipykernel/__main__.py:33: RuntimeWarning: invalid value encountered in true_divide
/home/bl3/anaconda3/lib/python3.5/site-packages/ipykernel/__main__.py:75: RuntimeWarning: invalid value encountered in true_divide
/home/bl3/anaconda3/lib/python3.5/site-packages/ipykernel/__main__.py:76: RuntimeWarning: divide by zero encountered in true_divide
/home/bl3/anaconda3/lib/python3.5/site-packages/ipykernel/__main__.py:76: RuntimeWarning: invalid value encountered in multiply
/home/bl3/anaconda3/lib/python3.5/site-packages/ipykernel/__main__.py:66: RuntimeWarning: invalid value encountered in true_divide
/home/bl3/anaconda3/lib/python3.5/site-packages/ipykernel/__main__.py:67: RuntimeWarning: divide by zero encountered in true_divide
/home/bl3/anaconda3/lib/python3.5/site-packages/ipykernel/__main__.py:67: RuntimeWarning: invalid value encountered in multiply
/home/bl3/anaconda3/lib/python3.5/site-packages/ipykernel/__main__.py:51: RuntimeWarning: invalid value encountered in true_divide
/home/bl3/anaconda3/lib/python3.5/site-packages/ipykernel/__main__.py:51: RuntimeWarning: divide by zero encountered in true_divide
/home/bl3/anaconda3/lib/python3.5/site-packages/ipykernel/__main__.py:52: RuntimeWarning: invalid value encountered in true_divide
###Markdown
Dual Kriging
###Code
def G_f(dips, dips_v):
a_g = np.asarray(dips)
b_g = np.asarray(dips_v)
# print a, a[:,0]
# print b,b[:,0]
Gx = b_g[:,0] - a_g[:,0] # x
Gy = b_g[:,1] -a_g[:,1] # y
G = np.hstack((Gx,Gy))
# G = np.array([-0.71,0.34,0.71,0.93])
return G
def b(dips,dips_v,n):
n -= len(dips)*2 # because x and y direction
G = G_f(dips,dips_v)
b = np.hstack((G, np.zeros(n)))
return b
###Output
_____no_output_____
###Markdown
Estimator normal
###Code
aa = A_matrix(layers, dips)
bb = b([dip_pos_1, dip_pos_2],
[dip_pos_1_v,dip_pos_2_v], len(aa))
# bb[1] = 0
print (bb)
sol = np.linalg.solve(aa,bb)
aa
bb
sol
x = [1,1]
def estimator(x, dips, layers, sol, sig_z = 1., a = 6., C_01 = 1, verbose = 0):
x = np.asarray(x).reshape(1,-1)
dips = np.asarray(dips)
layers = np.asarray(layers)
C_01 = C_01
n = 0
m = len(dips)
# print layers
# print x.reshape(1,-1), dips
r_i = me.euclidean_distances(dips,x)
hx = h_f_GI(dips, x, "x")
Cov_d1 = cov_cubic_d1_f(r_i, a = a)
KzGx = sol[:m] * np.squeeze( C_01*hx / r_i * Cov_d1)
hy = h_f_GI(dips, x, "y")
KzGy = sol[m:2*m] * np.squeeze( C_01 * hy / r_i * Cov_d1)
# KzGx[KzGx == 0] = -0.01
# KzGy[KzGy == 0] = -0.01
# print "KzGx", KzGx, sol[:m]
for s in range(len(layers)):
n += len(layers[s][1:])
a_l = cov_cubic_layer(x, layers[s][1:], a = a)
b_l = cov_cubic_layer(x, layers[s][0].reshape(1,-1), a = a)
aux = a_l-b_l
# aux[aux==0] = 0.000001
if s == 0:
L = np.array(sol[2*m:2*m+n]*(aux))
else:
L = np.hstack((L,sol[2*m+n2:2*m+n]*(aux)))
n2 = n
L = np.squeeze(L)
univ = (sol[2*m+n]*x[0,0] + # x
sol[2*m+n+1] * x[0,1] ) # y
# + sol[2*m+n+2]* x[0,0]**2 # xx
# + sol[2*m+n+3] * x[0,1]**2 # yy
# + sol[2*m+n+4] * x[0,0]*x[0,1]) #xy
if verbose != 0:
print (KzGx, KzGy, L ,univ)
print (Cov_d1, r_i)
print ("")
print (hx, hx/r_i)
print ("angaglkagm",hy/r_i, sol[m:2*m])
z_star = np.sum(KzGx)+np.sum(KzGy)+np.sum(L)+univ
return z_star
pot = np.zeros((100,100))
for i in range(100):
for j in range(100):
pot[i,j] = estimator([i/10.,j/10.],[dip_pos_1, dip_pos_2],
[layer_1, layer_2]
, sol, verbose = 0, C_01 = 1,
a = 6.)
plt.arrow(dip_pos_1[0],dip_pos_1[1], dip_pos_1_v[0]-dip_pos_1[0],
dip_pos_1_v[1]-dip_pos_1[1], head_width = 0.2)
plt.arrow(dip_pos_2[0],dip_pos_2[1],dip_pos_2_v[0]-dip_pos_2[0],
dip_pos_2_v[1]-dip_pos_2[1], head_width = 0.2)
plt.plot(layer_1[:,0],layer_1[:,1], "o")
plt.plot(layer_2[:,0],layer_2[:,1], "o")
plt.plot(layer_1[:,0],layer_1[:,1], )
plt.plot(layer_2[:,0],layer_2[:,1], )
plt.contour(pot.transpose(),30,extent = (0,10,0,10) )
plt.colorbar()
plt.xlim(0,10)
plt.ylim(0,10)
plt.title("GeoMigueller v 0.1")
print (dip_pos_1_v, dip_pos_2_v, layer_1)
###Output
/home/bl3/anaconda3/lib/python3.5/site-packages/ipykernel/__main__.py:15: RuntimeWarning: invalid value encountered in true_divide
/home/bl3/anaconda3/lib/python3.5/site-packages/ipykernel/__main__.py:17: RuntimeWarning: invalid value encountered in true_divide
###Markdown
La Buena
###Code
%matplotlib inline
def pla(angle1,angle2, C_0 = -14*1/6**2-0.2, C_01 = 1):
layer_1 = np.array([[1,7],[5,6], [6,8], [9,9] ])
layer_2 = np.array([[1,2],[5,3], [9,7]])
layer_3 = np.array([[1,1],[3,2],[7,4]])
dip_pos_1 = np.array([3,4])
dip_angle_1 = angle1
dip_pos_1_v = np.array([np.cos(np.deg2rad(dip_angle_1))*1,
np.sin(np.deg2rad(dip_angle_1))]) + dip_pos_1
dip_pos_2 = np.array([6,6])
dip_angle_2 = angle2
dip_pos_2_v = np.array([np.cos(np.deg2rad(dip_angle_2))*1,
np.sin(np.deg2rad(dip_angle_2))]) + dip_pos_2
dip_pos_3 = np.array([9,5])
dip_angle_3 = 90
dip_pos_3_v = np.array([np.cos(np.deg2rad(dip_angle_3))*1,
np.sin(np.deg2rad(dip_angle_3))]) + dip_pos_3
#print b([dip_pos_1,dip_pos_2], [dip_pos_1_v,dip_pos_2_v],13)
aa = A_matrix([layer_1,layer_2, layer_3],
[dip_pos_1,dip_pos_2, dip_pos_3], a = 6.,
C_0= C_0,
C_01 = C_01)
bb = b([dip_pos_1, dip_pos_2, dip_pos_3],
[dip_pos_1_v,dip_pos_2_v, dip_pos_3_v], len(aa))
# bb[1] = 0
print (bb)
sol = np.linalg.solve(aa,bb)
#sol[:-2] = 0
#print aa
print( sol)
pot = np.zeros((50,50))
for i in range(50):
for j in range(50):
pot[i,j] = estimator([i/5.,j/5.],[dip_pos_1, dip_pos_2, dip_pos_3],
[layer_1, layer_2, layer_3]
, sol, verbose = 0, C_01 = C_01,
a = 6.)
plt.arrow(dip_pos_1[0],dip_pos_1[1], dip_pos_1_v[0]-dip_pos_1[0],
dip_pos_1_v[1]-dip_pos_1[1], head_width = 0.2)
plt.arrow(dip_pos_2[0],dip_pos_2[1],dip_pos_2_v[0]-dip_pos_2[0],
dip_pos_2_v[1]-dip_pos_2[1], head_width = 0.2)
plt.arrow(dip_pos_3[0],dip_pos_3[1],dip_pos_3_v[0]-dip_pos_3[0],
dip_pos_3_v[1]-dip_pos_3[1], head_width = 0.2)
plt.plot(layer_1[:,0],layer_1[:,1], "o")
plt.plot(layer_2[:,0],layer_2[:,1], "o")
plt.plot(layer_3[:,0],layer_3[:,1], "o")
plt.plot(layer_1[:,0],layer_1[:,1], )
plt.plot(layer_2[:,0],layer_2[:,1], )
plt.plot(layer_3[:,0],layer_3[:,1], )
plt.contour(pot.transpose(),30,extent = (0,10,0,10) )
plt.colorbar()
plt.xlim(0,10)
plt.ylim(0,10)
plt.title("GeoMigueller v 0.1")
print (dip_pos_1_v, dip_pos_2_v, layer_1)
return pot
jhjs2 = pla(120,130,C_0=-0.5, C_01 = 1)
# jhjs = pla(120,-30, C_01 = 0.9)
jhjs = pla(120,-30)
jh = pla(120,0)
jhjs = pla(-2,0)
137.769228/3.184139677, -106.724083/-2.9572844241540727772132
3.16/0.15
59.12/3.15, 3.16/0.1425
51.109568/3.15, 2.669329/0.1425
45.047943/3.15, 2.29186/0.1425
layer_1 = np.array([[1,7],[5,7],[6,7], [9,8], ])
layer_2 = np.array([[1,1],[5,1],[9,1], ])
layer_3 = np.array([[1,1],[3,2],[7,4]])
dip_pos_1 = np.array([2,4])
dip_angle_1 = 45
dip_pos_1_v = np.array([np.cos(np.deg2rad(dip_angle_1))*1,
np.sin(np.deg2rad(dip_angle_1))]) + dip_pos_1
dip_pos_2 = np.array([9,7])
dip_angle_2 = 90
dip_pos_2_v = np.array([np.cos(np.deg2rad(dip_angle_2))*1,
np.sin(np.deg2rad(dip_angle_2))]) + dip_pos_2
dip_pos_3 = np.array([5,5])
dip_angle_3 = 90
dip_pos_3_v = np.array([np.cos(np.deg2rad(dip_angle_3))*1,
np.sin(np.deg2rad(dip_angle_3))]) + dip_pos_3
#print b([dip_pos_1,dip_pos_2], [dip_pos_1_v,dip_pos_2_v],13)
aa = A_matrix([layer_1,layer_2], [dip_pos_1,dip_pos_2], a = 6., alpha = 14)
bb = b([dip_pos_1,dip_pos_2], [dip_pos_1_v,dip_pos_2_v], 11)
print bb
sol = np.linalg.solve(aa,bb)
#sol[:-2] = 0
#print aa
print sol
pot = np.zeros((50,50))
for i in range(50):
for j in range(50):
pot[i,j] = estimator([i/5.,j/5.],[dip_pos_1,dip_pos_2],
[layer_1,layer_2], sol, verbose = 0, alpha = 14,
a = 6.)
plt.arrow(dip_pos_1[0],dip_pos_1[1], dip_pos_1_v[0]-dip_pos_1[0],
dip_pos_1_v[1]-dip_pos_1[1], head_width = 0.2)
plt.arrow(dip_pos_2[0],dip_pos_2[1],dip_pos_2_v[0]-dip_pos_2[0],
dip_pos_2_v[1]-dip_pos_2[1], head_width = 0.2)
#plt.arrow(dip_pos_3[0],dip_pos_3[1],dip_pos_3_v[0]-dip_pos_3[0],
# dip_pos_3_v[1]-dip_pos_3[1], head_width = 0.2)
plt.plot(layer_1[:,0],layer_1[:,1], "o")
plt.plot(layer_2[:,0],layer_2[:,1], "o")
#plt.plot(layer_3[:,0],layer_3[:,1], "o")
plt.plot(layer_1[:,0],layer_1[:,1], )
plt.plot(layer_2[:,0],layer_2[:,1], )
plt.contour(pot.transpose(),20,extent = (0,10,0,10) )
plt.colorbar()
plt.xlim(0,10)
plt.ylim(0,10)
print dip_pos_1_v, dip_pos_2_v, layer_1
np.cos(np.deg2rad(45))
layer_1 = np.array([[1,7],[5,7],[6,7], [9,7], ])
layer_2 = np.array([[1,1],[5,1],[9,1], ])
layer_3 = np.array([[1,1],[3,2],[7,4]])
dip_pos_1 = np.array([2,4])
dip_angle_1 = 100
dip_pos_1_v = np.array([np.cos(np.deg2rad(dip_angle_1))*1,
np.sin(np.deg2rad(dip_angle_1))]) + dip_pos_1
dip_pos_2 = np.array([8,5])
dip_angle_2 = 70
dip_pos_2_v = np.array([np.cos(np.deg2rad(dip_angle_2))*1,
np.sin(np.deg2rad(dip_angle_2))]) + dip_pos_2
dip_pos_3 = np.array([8,5])
dip_angle_3 = 90
dip_pos_3_v = np.array([np.cos(np.deg2rad(dip_angle_3))*1,
np.sin(np.deg2rad(dip_angle_3))]) + dip_pos_3
#print b([dip_pos_1,dip_pos_2], [dip_pos_1_v,dip_pos_2_v],13)
aa = A_matrix([layer_1,layer_2], [dip_pos_1,dip_pos_2], a = 6., alpha = 14)
bb = b([dip_pos_1,dip_pos_2], [dip_pos_1_v,dip_pos_2_v], 11)
print bb
sol = np.linalg.solve(aa,bb)
#sol[:-2] = 0
#print aa
print sol
pot = np.zeros((50,50))
for i in range(50):
for j in range(50):
pot[i,j] = estimator([i/5.,j/5.],[dip_pos_1,dip_pos_2],
[layer_1,layer_2], sol, verbose = 0, alpha = 14,
a = 6.)
plt.arrow(dip_pos_1[0],dip_pos_1[1], dip_pos_1_v[0]-dip_pos_1[0],
dip_pos_1_v[1]-dip_pos_1[1], head_width = 0.2)
plt.arrow(dip_pos_2[0],dip_pos_2[1],dip_pos_2_v[0]-dip_pos_2[0],
dip_pos_2_v[1]-dip_pos_2[1], head_width = 0.2)
#plt.arrow(dip_pos_3[0],dip_pos_3[1],dip_pos_3_v[0]-dip_pos_3[0],
# dip_pos_3_v[1]-dip_pos_3[1], head_width = 0.2)
plt.plot(layer_1[:,0],layer_1[:,1], "o")
plt.plot(layer_2[:,0],layer_2[:,1], "o")
#plt.plot(layer_3[:,0],layer_3[:,1], "o")
plt.plot(layer_1[:,0],layer_1[:,1], )
plt.plot(layer_2[:,0],layer_2[:,1], )
plt.contour(pot.transpose(),20,extent = (0,10,0,10) )
plt.colorbar()
plt.xlim(0,10)
plt.ylim(0,10)
print dip_pos_1_v, dip_pos_2_v, layer_1
%matplotlib inline
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
fig = plt.figure()
ax = fig.gca(projection='3d')
X = np.arange(0, 10, 0.1)
Y = np.arange(0, 10, 0.1)
X, Y = np.meshgrid(X, Y)
Z = pot.transpose()
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
ax.set_xlabel("x")
ax.set_ylabel("y")
print "layer1",(pot.transpose()[1,7],pot.transpose()[3,4],pot.transpose()[8,5],
pot.transpose()[9,7])
print "layer2",pot.transpose()[1,3],pot.transpose()[3,4]
print "layer3",pot.transpose()[1,1],pot.transpose()[3,1],pot.transpose()[7,4]
layer_1 = np.array([[5,5],[3,5]])
layer_2 = np.array([[1,3],[5,3],[7,3],[9,3]])
dip_pos_1 = np.array([2,4])
dip_angle_1 = 90
dip_pos_1_v = np.array([np.cos(np.deg2rad(dip_angle_1))*1,
np.sin(np.deg2rad(dip_angle_1))]) + dip_pos_1
dip_pos_2 = np.array([6,4])
dip_angle_2 = 90
dip_pos_2_v = np.array([np.cos(np.deg2rad(dip_angle_2))*1,
np.sin(np.deg2rad(dip_angle_2))]) + dip_pos_2
#print b([dip_pos_1,dip_pos_2], [dip_pos_1_v,dip_pos_2_v],13)
bb = b([dip_pos_1], [dip_pos_1_v], 15 )
sol = np.linalg.solve(aa,bb)
print sol
pot = np.zeros((20,20))
for i in range(20):
for j in range(20):
pot[i,j] = estimator([i/2.,j/2.],[dip_pos_1,dip_pos_2],
[layer_1,], sol, verbose = 0)
plt.arrow(dip_pos_1[0],dip_pos_1[1], dip_pos_1_v[0]-dip_pos_1[0],
dip_pos_1_v[1]-dip_pos_1[1], head_width = 0.2)
plt.arrow(dip_pos_2[0],dip_pos_2[1],dip_pos_2_v[0]-dip_pos_2[0],
dip_pos_2_v[1]-dip_pos_2[1], head_width = 0.2)
plt.plot(layer_1[:,0],layer_1[:,1], "o")
plt.plot(layer_2[:,0],layer_2[:,1], "o")
plt.plot(layer_1[:,0],layer_1[:,1], )
plt.plot(layer_2[:,0],layer_2[:,1], )
plt.contour(pot,20, extent = (0,10,0,10) )
plt.colorbar()
plt.xlim(0,10)
plt.ylim(0,10)
print dip_pos_1_v, dip_pos_2_v, layer_1
plt.arrow?
###Output
_____no_output_____
###Markdown
Normal Universal cookriging
###Code
def G_f(dips,x):
dips = np.asarray(dips)
a = np.asarray(dips)
b = np.asarray(x)
# print a, a[:,0]
# print b,b[:,0]
Gx = b[0] - a[:,0]
Gy = b[1] -a[:,1]
G = np.hstack((Gx,Gy))
return G
def b(x, dips,n):
n -= len(dips)*2 # because x and y direction
G = G_f(dips,x)
b = np.hstack((G, np.zeros(n)))
return b,G
b([1,1],[dip_pos_1,dip_pos_2],13)
bb,g = b([1,1],[dip_pos_1,dip_pos_2],13)
len(bb)
sol = np.linalg.solve(aa,bb)
sol
dip_pos_1, dip_pos_2
z1 = dip_pos_1_v - dip_pos_1
z2 = dip_pos_2_v - dip_pos_2
print z1, z2
g
#=====================
# THE GRADIENTS
def h_f(dips, direct):
if direct == "x":
return np.abs(np.subtract.outer(dips[:,0],dips[:,0]))
if direct == "y":
return np.abs(np.subtract.outer(dips[:,1],dips[:,1]))
def C_G(dips, sig_z = 1., a = 6., nugget= 0.01):
dips = np.asarray(dips)
r = me.euclidean_distances(dips)
for i in "xy":
for j in "xy":
if j == "x":
h1 = h_f(dips, direct = i)
h2 = h_f(dips, direct = j)
# print h1,h2
C_G_row = (sig_z*h1*h2/a**2/r**2*
(1/r*cov_cubic_d1_f(r)-cov_cubic_d2_f(r)))
# print 1/r*cov_cubic_d1_f(r), cov_cubic_d2_f(r)
else:
h1 = h_f(dips, direct = i)
h2 = h_f(dips, direct = j)
C_G_row = np.hstack((C_G_row, (sig_z*h1*h2/a**2/r**2*
(1/r*cov_cubic_d1_f(r)-cov_cubic_d2_f(r)))))
if i == "x":
C_G = C_G_row
else:
C_G = np.vstack((C_G, C_G_row))
return np.nan_to_num(C_G)
###Output
_____no_output_____
###Markdown
Estimator geomodeller (maybe)
###Code
def estimator(x, dips, layers, sol, sig_z = 1., a = 6., alpha = 1, verbose = 0):
x = np.asarray(x).reshape(1,-1)
dips = np.asarray(dips)
layers = np.asarray(layers)
n = 0
m = len(dips)
# print layers
# print x.reshape(1,-1), dips
r_i = me.euclidean_distances(dips,x)
hx = h_f_GI(dips, x, "x")
Cov_d1 = cov_cubic_d1_f(r_i)
KzGx = sol[:m] * np.squeeze(alpha * sig_z / a**2 * hx / r_i * Cov_d1)
hy = h_f_GI(dips, x, "y")
KzGy = sol[m:2*m] * np.squeeze(alpha * sig_z / a**2 * hy / r_i * Cov_d1)
for s in range(len(layers)):
n += len(layers[s][1:])
a = cov_cubic_layer(x, layers[s][1:])
b = cov_cubic_layer(x, layers[s][0].reshape(1,-1))
# print a,b
if s == 0:
L = np.array(sol[2*m:2*m+n]*(a-b))
else:
L = np.hstack((L,sol[2*m+n2:2*m+n]*(a-b)))
n2 = n
L = np.squeeze(L)
# print m,n
univ = (sol[2*m+n]*x[0,0]**2 + sol[2*m+n+1] * x[0,1]**2
+ sol[2*m+n+2]* x[0,0]*x[0,1]
+ sol[2*m+n+3] * x[0,0]
+ sol[2*m+n+4] * x[0,1])
if verbose != 0:
print KzGx, KzGy, L, univ
z_star = np.sum(KzGx)+np.sum(KzGy)+np.sum(L)+univ
return z_star
#========================================
#THE INTERACTION GRADIENTS/INTERFACES
def h_f_GI(dips, layers, direct):
if direct == "x":
return (np.subtract.outer(dips[:,0],layers[:,0]))
if direct == "y":
return (np.subtract.outer(dips[:,1],layers[:,1]))
def C_GI(dips,layers, sig_z = 1., a = 6., alpha = 14, verbose = 0):
dips = np.asarray(dips)
layers = np.asarray(layers)
for k in range(len(layers)):
for i in "xy":
r = me.euclidean_distances(dips,layers[k])
h1 = h_f_GI(dips,layers[k], i)
Cov_d1 = cov_cubic_d1_f(r)
if verbose != 0:
print "dips", dips
print "layers", layers
print "h1", h1, h1[:,0]
print ""
print "r", r, r[:,0]
print ""
print "Cov_d1", Cov_d1
if i == "x":
cov_1 = alpha * sig_z / a**2 * h1[:,0] / r[:,0] * Cov_d1[:,0]
cov_j = alpha * sig_z / a**2 * h1[:,1:] / r[:,1:] * Cov_d1[:,1:]
# C_GI_row = alpha * sig_z / a**2 * h1 / r * Cov_d1
#print "cov_j, cov_1", cov_j, cov_1.reshape(-1,1)
# pdb.set_trace()
C_GI_row = cov_j.transpose()-cov_1#.transpose()
else:
cov_1 = alpha * sig_z / a**2 * h1[:,0] / r[:,0] * Cov_d1[:,0]
cov_j = alpha * sig_z / a**2 * h1[:,1:] / r[:,1:] * Cov_d1[:,1:]
#C_GI_row = np.hstack((C_GI_row,
# alpha * sig_z / a**2 * h1 / r * Cov_d1))
#pdb.set_trace()
C_GI_row = np.hstack((C_GI_row, cov_j.transpose()-cov_1))
#.reshape(-1,1)))
if k==0:
C_GI = C_GI_row
else:
#pdb.set_trace()
C_GI = np.vstack((C_GI,C_GI_row))
return C_GI
###Output
_____no_output_____ |
practice/courses/Udacity Intro to Tensorflow/l08c06_forecasting_with_rnn.ipynb | ###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Forecasting with an RNN Run in Google Colab View source on GitHub Setup
###Code
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
keras = tf.keras
def plot_series(time, series, format="-", start=0, end=None, label=None):
plt.plot(time[start:end], series[start:end], format, label=label)
plt.xlabel("Time")
plt.ylabel("Value")
if label:
plt.legend(fontsize=14)
plt.grid(True)
def trend(time, slope=0):
return slope * time
def seasonal_pattern(season_time):
"""Just an arbitrary pattern, you can change it if you wish"""
return np.where(season_time < 0.4,
np.cos(season_time * 2 * np.pi),
1 / np.exp(3 * season_time))
def seasonality(time, period, amplitude=1, phase=0):
"""Repeats the same pattern at each period"""
season_time = ((time + phase) % period) / period
return amplitude * seasonal_pattern(season_time)
def white_noise(time, noise_level=1, seed=None):
rnd = np.random.RandomState(seed)
return rnd.randn(len(time)) * noise_level
def window_dataset(series, window_size, batch_size=32,
shuffle_buffer=1000):
dataset = tf.data.Dataset.from_tensor_slices(series)
dataset = dataset.window(window_size + 1, shift=1, drop_remainder=True)
dataset = dataset.flat_map(lambda window: window.batch(window_size + 1))
dataset = dataset.shuffle(shuffle_buffer)
dataset = dataset.map(lambda window: (window[:-1], window[-1]))
dataset = dataset.batch(batch_size).prefetch(1)
return dataset
def model_forecast(model, series, window_size):
ds = tf.data.Dataset.from_tensor_slices(series)
ds = ds.window(window_size, shift=1, drop_remainder=True)
ds = ds.flat_map(lambda w: w.batch(window_size))
ds = ds.batch(32).prefetch(1)
forecast = model.predict(ds)
return forecast
time = np.arange(4 * 365 + 1)
slope = 0.05
baseline = 10
amplitude = 40
series = baseline + trend(time, slope) + seasonality(time, period=365, amplitude=amplitude)
noise_level = 5
noise = white_noise(time, noise_level, seed=42)
series += noise
plt.figure(figsize=(10, 6))
plot_series(time, series)
plt.show()
split_time = 1000
time_train = time[:split_time]
x_train = series[:split_time]
time_valid = time[split_time:]
x_valid = series[split_time:]
###Output
_____no_output_____
###Markdown
Simple RNN Forecasting
###Code
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 30
train_set = window_dataset(x_train, window_size, batch_size=128)
model = keras.models.Sequential([
keras.layers.Lambda(lambda x: tf.expand_dims(x, axis=-1),
input_shape=[None]),
keras.layers.SimpleRNN(100, return_sequences=True),
keras.layers.SimpleRNN(100),
keras.layers.Dense(1),
keras.layers.Lambda(lambda x: x * 200.0) #scale
])
lr_schedule = keras.callbacks.LearningRateScheduler(
lambda epoch: 1e-7 * 10**(epoch / 20))
optimizer = keras.optimizers.SGD(lr=1e-7, momentum=0.9)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
history = model.fit(train_set, epochs=100, callbacks=[lr_schedule])
plt.semilogx(history.history["lr"], history.history["loss"])
plt.axis([1e-7, 1e-4, 0, 30])
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 30
train_set = window_dataset(x_train, window_size, batch_size=128)
valid_set = window_dataset(x_valid, window_size, batch_size=128)
model = keras.models.Sequential([
keras.layers.Lambda(lambda x: tf.expand_dims(x, axis=-1),
input_shape=[None]),
keras.layers.SimpleRNN(100, return_sequences=True),
keras.layers.SimpleRNN(100),
keras.layers.Dense(1),
keras.layers.Lambda(lambda x: x * 200.0)
])
optimizer = keras.optimizers.SGD(lr=1.5e-6, momentum=0.9)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
early_stopping = keras.callbacks.EarlyStopping(patience=50)
model_checkpoint = keras.callbacks.ModelCheckpoint(
"my_checkpoint", save_best_only=True)
model.fit(train_set, epochs=500,
validation_data=valid_set,
callbacks=[early_stopping, model_checkpoint])
model = keras.models.load_model("my_checkpoint")
rnn_forecast = model_forecast(
model,
series[split_time - window_size:-1],
window_size)[:, 0]
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid)
plot_series(time_valid, rnn_forecast)
keras.metrics.mean_absolute_error(x_valid, rnn_forecast).numpy()
###Output
_____no_output_____
###Markdown
Sequence-to-Sequence Forecasting
###Code
def seq2seq_window_dataset(series, window_size, batch_size=32,
shuffle_buffer=1000):
series = tf.expand_dims(series, axis=-1)
ds = tf.data.Dataset.from_tensor_slices(series)
ds = ds.window(window_size + 1, shift=1, drop_remainder=True)
ds = ds.flat_map(lambda w: w.batch(window_size + 1))
ds = ds.shuffle(shuffle_buffer)
ds = ds.map(lambda w: (w[:-1], w[1:]))
return ds.batch(batch_size).prefetch(1)
for X_batch, Y_batch in seq2seq_window_dataset(tf.range(10), 3,
batch_size=1):
print("X:", X_batch.numpy())
print("Y:", Y_batch.numpy())
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 30
train_set = seq2seq_window_dataset(x_train, window_size,
batch_size=128)
model = keras.models.Sequential([
keras.layers.SimpleRNN(100, return_sequences=True,
input_shape=[None, 1]),
keras.layers.SimpleRNN(100, return_sequences=True),
keras.layers.Dense(1),
keras.layers.Lambda(lambda x: x * 200)
])
lr_schedule = keras.callbacks.LearningRateScheduler(
lambda epoch: 1e-7 * 10**(epoch / 30))
optimizer = keras.optimizers.SGD(lr=1e-7, momentum=0.9)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
history = model.fit(train_set, epochs=100, callbacks=[lr_schedule])
plt.semilogx(history.history["lr"], history.history["loss"])
plt.axis([1e-7, 1e-4, 0, 30])
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 30
train_set = seq2seq_window_dataset(x_train, window_size,
batch_size=128)
valid_set = seq2seq_window_dataset(x_valid, window_size,
batch_size=128)
model = keras.models.Sequential([
keras.layers.SimpleRNN(100, return_sequences=True,
input_shape=[None, 1]),
keras.layers.SimpleRNN(100, return_sequences=True),
keras.layers.Dense(1),
keras.layers.Lambda(lambda x: x * 200.0)
])
optimizer = keras.optimizers.SGD(lr=1e-6, momentum=0.9)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
early_stopping = keras.callbacks.EarlyStopping(patience=10)
model.fit(train_set, epochs=500,
validation_data=valid_set,
callbacks=[early_stopping])
rnn_forecast = model_forecast(model, series[..., np.newaxis], window_size)
rnn_forecast = rnn_forecast[split_time - window_size:-1, -1, 0]
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid)
plot_series(time_valid, rnn_forecast)
keras.metrics.mean_absolute_error(x_valid, rnn_forecast).numpy()
###Output
_____no_output_____ |
src/Split_DataSet.ipynb | ###Markdown
Split Folders packageSplit folders with files (e.g. images) into train, validation and test (dataset) folders.The input folder shoud have the following format:input/ class1/ img1.jpg img2.jpg ... class2/ imgWhatever.jpg ... ...In order to give you this:output/ train/ class1/ img1.jpg ... class2/ imga.jpg ... val/ class1/ img2.jpg ... class2/ imgb.jpg ... test/ class1/ img3.jpg ... class2/ imgc.jpg ...This should get you started to do some serious deep learning on your data. Read here why it's a good idea to split your data intro three different sets.You may only split into a training and validation set.The data gets split before it gets shuffled.A seed lets you reproduce the splits.Works on any file types.Allows randomized oversampling for imbalanced datasets.(Should) work on all operating systems. UsageYou you can use split_folders as Python module or as a Command Line Interface (CLI).If your datasets is balanced (each class has the same number of samples), choose ratio otherwise fixed. NB: oversampling is turned off by default.
###Code
import split_folders
# Split with a ratio.
# To only split into training and validation set, set a tuple to `ratio`, i.e, `(.8, .2)`.
split_folders.ratio('input_folder', output="output", seed=1337, ratio=(.8, .1, .1)) # default values
# Split val/test with a fixed number of items e.g. 100 for each set.
# To only split into training and validation set, use a single number to `fixed`, i.e., `10`.
split_folders.fixed('input_folder', output="output", seed=1337, fixed=(100, 100), oversample=False) # default values
import split_folders
input_folder = "D:/Projects/UCMerced_LandUse/Images"
output_folder = "D:/Projects/UCMerced_LandUse_split/Images"
print("DataSet split into train/validation/test started")
split_folders.ratio(input_folder, output_folder, seed=1337, ratio=(.8, .1, .1))
print("DataSet split into train/validation/test completed!")
###Output
DataSet split into train/validation/test started
DataSet split into train/validation/test completed!
|
notebooks/matplotlib-customize.ipynb | ###Markdown
Making custom plots with matplotlibBy [Terence Parr](https://parrt.cs.usfca.edu). If you like visualization in machine learning, check out my stuff at [explained.ai](https://explained.ai).The matplotlib library has a lot of capabilities, but there's a lot of customization that you can do above and beyond the basic plotting functionality. You can even create your own kinds of plots by using the drawing and annotation primitives.
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.patches as patches # for drawing shapes
%config InlineBackend.figure_format = 'retina'
df_cars = pd.read_csv("data/cars.csv")
# Get average miles per gallon for each car with the same number of cylinders
avg_mpg = df_cars.groupby('CYL').mean()['MPG']
avg_wgt = df_cars.groupby('CYL').mean()['WGT'] # do the same for average weight
# Get average miles per gallon for each car with the same weight
avg_mpg_per_wgt = df_cars.groupby('WGT').mean()['MPG']
# Get the unique list of cylinders in numerical order
cyl = sorted(df_cars['CYL'].unique())
# Get a list of all mpg values for three specific cylinder sizes
cyl4 = df_cars[df_cars['CYL']==4]['MPG'].values
cyl6 = df_cars[df_cars['CYL']==6]['MPG'].values
cyl8 = df_cars[df_cars['CYL']==8]['MPG'].values
###Output
_____no_output_____
###Markdown
Annotating graphs with text and linesOnce you've drawn plot, it's a good idea to go back and annotated to highlight interesting features. Let's get the cars data again and redraw the histogram of car weights, but this time let's annotate it.
###Code
fig, ax = plt.subplots(figsize=(4,3))
wgt = df_cars['WGT']
n, bins, hpatches = ax.hist(wgt, color='#FEE08F') # save the results of hist
ax.set_xlabel("Weight (lbs)")
ax.set_ylabel("Count at that weight")
ax.set_title("Weight histogram")
# iterate through the rectangles associated with each bar
for rect in hpatches:
rect.set_linewidth(.5)
rect.set_edgecolor('grey')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_linewidth(.5)
ax.spines['bottom'].set_linewidth(.5)
# --------------------------------------------------------------------------------
# New stuff a horizontal line, and annotated arrow, and a wedge beneath the X axis.
# --------------------------------------------------------------------------------
mx = wgt.mean()
my = np.mean(n)
# Add an arrow with text pointing to something
ax.annotate('check this out', xy=(2500, 60), xytext=(2800, 80),
arrowprops=dict(color='black',arrowstyle='->'), fontsize=11)
ax.text(max(wgt), my+1, "mean count",
horizontalalignment='right', fontsize=11)
# Draw a horizontal dashed line at the mean
ax.plot([min(wgt),max(wgt)], [my,my], ':', c='#415BA3', lw=.8)
# Draw a wedge underneath the axis
tria = [(mx,0),(mx+90,-5),(mx-90,-5)]
tria = np.array(tria)
wedge = patches.Polygon(tria, closed=True, facecolor='#415BA3')
wedge.set_clip_on(False) # absolutely critical to drawing outside the graph area
ax.add_patch(wedge)
ax.tick_params(axis='x', which='major', pad=10) # make room for the wedge
ax.text(mx+90,-5,"mean",fontsize=9)
ax.set_ylim(0,90)
plt.show()
###Output
_____no_output_____
###Markdown
Exercise 1Add annotations to the following plot to show the intersections. You will have to move the legend to the center right as well.
###Code
fig, ax = plt.subplots(figsize=(4,3)) # make one subplot (ax) on the figure
ax.plot(cyl, avg_mpg, c='#4574B4', label="mpg") # Those are 6-digit hexadecimal numbers for red-green-blue
ax.plot(cyl, avg_wgt/100, c='#F46C43', label="wgt")
# ... add annotations here ...
plt.legend(loc='center right')
plt.show()
###Output
_____no_output_____
###Markdown
Your result might look something like this: Adding shapes to graphsLet's say we want to fill a two-dimensional region with different color shape. To do that, we need to add so-called [Patches](https://matplotlib.org/api/patches_api.html?highlight=patchesmodule-matplotlib.patches) to the drawing area. We need a new import:
###Code
import matplotlib.patches as patches
###Output
_____no_output_____
###Markdown
The basic idea is to create a patch and then add it to the drawing area, `ax`. We also have to set the X and Y limits because the library does not figure this out from the patches we add.
###Code
fig, ax = plt.subplots(figsize=(4,3))
ax.set_xlim(0,50)
ax.set_ylim(0,50)
rect = patches.Rectangle(xy=(5,20), width=40, height=25,
facecolor='#E0F4F7', linewidth=.5, edgecolor="grey")
ax.add_patch(rect)
rect = patches.Rectangle(xy=(20,10), width=10, height=20, alpha=.75,
facecolor='#FEE08F', linewidth=.5, edgecolor="grey")
ax.add_patch(rect)
ax.add_patch( patches.Wedge(center=(5,5), r=10, theta1=0, theta2=90,
facecolor='#73ADD2', linewidth=.5, edgecolor="black") )
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_linewidth(.5)
ax.spines['bottom'].set_linewidth(.5)
plt.show()
###Output
_____no_output_____
###Markdown
Note: the order in which we add the patches is relevant. Drawing the orange after the cyan puts the orange one on top. I have set the alpha channel to be slightly transparent on the orange one. Exercise 2Fill in the following code so that it draws rectangles at random locations, with random width and height, and random color. It might look like this:
###Code
fig, ax = plt.subplots(figsize=(4,3))
size = 50
ax.set_xlim(0,size)
ax.set_ylim(0,size)
n = 5
xy = np.random.rand(n,2) * size
w = np.random.rand(n) * size/2
h = np.random.rand(n) * size/2
# get mapping of n colors in the coolwarm colormap
cmap = plt.get_cmap('coolwarm')
colors=cmap(np.linspace(0,1,num=n)) # get n colors
# ... Draw random rectangles ...
plt.show()
###Output
_____no_output_____
###Markdown
Strip plotsBox plots are a common mechanism to display information about the distribution of a collection of numbers. However, the box plot is still showing more or less point statistics. A violin plot tries to show the shape of the distribution by varying the width. I actually prefer something called a strip plot, but it is not a standard plot so we have to do it ourselves. The idea is simply to scatterplot all values but add noise to the X or Y values, depending on the orientation. Let's make a vertical strip plot for three series from the cars data set. If we just plot all of the miles per gallon values for 4, 6, and 8 cylinder cars, we get the following unsatisfying graph. Despite setting the transparency setting, we still don't have a clear idea about where the density lies.
###Code
fig, ax = plt.subplots(figsize=(4,3))
n4 = len(cyl4)
n6 = len(cyl6)
n8 = len(cyl8)
ax.scatter([4]*n4, cyl4, alpha=.2)
ax.scatter([6]*n6, cyl6, alpha=.2)
ax.scatter([8]*n8, cyl8, alpha=.2)
ax.set_xlabel("Cylinders")
ax.set_ylabel("MPG")
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_linewidth(.5)
ax.spines['bottom'].set_linewidth(.5)
plt.show()
fig, ax = plt.subplots(figsize=(4,3))
n4 = len(cyl4)
n6 = len(cyl6)
n8 = len(cyl8)
sigma = .05
mu = 0
x_noise4 = np.random.normal(mu, sigma, size=n4)
x_noise6 = np.random.normal(mu, sigma, size=n6)
x_noise8 = np.random.normal(mu, sigma, size=n8)
ax.scatter(4+x_noise4, cyl4, alpha=.2)
ax.scatter(6+x_noise6, cyl6, alpha=.2)
ax.scatter(8+x_noise8, cyl8, alpha=.2)
pad = 4*sigma
ax.set_xlim(4-pad,8+pad)
ax.set_xlabel("Cylinders")
ax.set_ylabel("MPG")
ax.set_title("Strip plot of # cylinders vs MPG")
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_linewidth(.5)
ax.spines['bottom'].set_linewidth(.5)
plt.show()
###Output
_____no_output_____
###Markdown
Exercise 3Using the same cylinder vs mpg data, create a horizontal strip plot where the number of cylinders is on the vertical axis and the miles per gallon is on the horizontal axis. Line + text drawingsThere are times when we want something that looks a bit more like an "infographic". As an example, let's look at some world happiness scores and see how they change from 2015 to 2016 (data is in the [data directory](https://github.com/parrt/msds593/tree/master/notebooks/data)):
###Code
df_2015 = pd.read_csv("data/happy-2015.csv")
df_2016 = pd.read_csv("data/happy-2016.csv")
df_2015.head(2)
countries = ['Finland','Canada','Norway']
countries = ['Syria','Togo','Burundi']
scores = dict()
for c in countries:
a = df_2015.loc[df_2015['Country']==c, "Happiness Score"].iloc[0]
b = df_2016.loc[df_2016['Country']==c, "Happiness Score"].iloc[0]
scores[c] = (a,b)
scores
###Output
_____no_output_____
###Markdown
Now that we've pulled out the data we want for three countries, let's do some plotting with just lines in text. The axes are a bit tricky to get right.
###Code
fig, ax = plt.subplots(figsize=(3,3))
# Let's use 0 as the left-hand side and 1 as the right-hand side
# (below we will set labels to 2015 for 0 and 2016 for 1)
ax.set_xlim(0-.1,1+.1)
ax.set_ylim(2.7,3.32)
# Draw lines and text associated with scores
for c in scores:
a,b = scores[c]
color = '#878787'
if c=='Togo':
color = '#F46C43'
ax.plot([0,1], [a,b], 'o-', lw=2, c=color)
ax.text(0-.04, a, f"{a:.1f}", color='#878787',
horizontalalignment='right', verticalalignment='center')
ax.text(1+.04, b, f"{b:.1f}", color='#878787',
horizontalalignment='left', verticalalignment='center')
ax.text(0-.20, a, c, color='#878787',
horizontalalignment='right', verticalalignment='center')
# Make the axes look right
ax.set_title("Happiness scores\n2015 - 2016")
ax.spines['bottom'].set_bounds(0, 1)
ax.set_xticks([0,1])
ax.set_xticklabels(['2015','2016'])
ax.set_yticks([])
# Only show the bottom axis
ax.spines['left'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_linewidth(.5)
plt.show()
###Output
_____no_output_____
###Markdown
Making custom plots with matplotlibBy [Terence Parr](https://parrt.cs.usfca.edu). If you like visualization in machine learning, check out my stuff at [explained.ai](https://explained.ai).The matplotlib library has a lot of capabilities, but there's a lot of customization that you can do above and beyond the basic plotting functionality. You can even create your own kinds of plots by using the drawing and annotation primitives.
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.patches as patches # for drawing shapes
%config InlineBackend.figure_format = 'retina'
df_cars = pd.read_csv("data/cars.csv")
# Get average miles per gallon for each car with the same number of cylinders
avg_mpg = df_cars.groupby('CYL').mean()['MPG']
avg_wgt = df_cars.groupby('CYL').mean()['WGT'] # do the same for average weight
# Get average miles per gallon for each car with the same weight
avg_mpg_per_wgt = df_cars.groupby('WGT').mean()['MPG']
# Get the unique list of cylinders in numerical order
cyl = sorted(df_cars['CYL'].unique())
# Get a list of all mpg values for three specific cylinder sizes
cyl4 = df_cars[df_cars['CYL']==4]['MPG'].values
cyl6 = df_cars[df_cars['CYL']==6]['MPG'].values
cyl8 = df_cars[df_cars['CYL']==8]['MPG'].values
###Output
_____no_output_____
###Markdown
Annotating graphs with text and linesOnce you've drawn a plot, it's a good idea to go back and annotate it to highlight interesting features. Let's get the cars data again and redraw the histogram of car weights, but this time let's annotate it.
###Code
fig, ax = plt.subplots(figsize=(4,3))
wgt = df_cars['WGT']
n, bins, hpatches = ax.hist(wgt, color='#FEE08F') # save the results of hist
ax.set_xlabel("Weight (lbs)")
ax.set_ylabel("Count at that weight")
ax.set_title("Weight histogram")
# iterate through the rectangles associated with each bar
for rect in hpatches:
rect.set_linewidth(.5)
rect.set_edgecolor('grey')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_linewidth(.5)
ax.spines['bottom'].set_linewidth(.5)
# --------------------------------------------------------------------------------
# New stuff: a horizontal line, and annotated arrow, and a wedge beneath the X axis.
# --------------------------------------------------------------------------------
mx = wgt.mean()
my = np.mean(n)
# Add an arrow with text pointing to something
ax.annotate('check this out', xy=(2500, 60), xytext=(2800, 80),
arrowprops=dict(color='black',arrowstyle='->'), fontsize=11)
ax.text(max(wgt), my+1, "mean count",
horizontalalignment='right', fontsize=11)
# Draw a horizontal dashed line at the mean
ax.plot([min(wgt),max(wgt)], [my,my], ':', c='#415BA3', lw=.8)
# Draw a wedge underneath the axis
tria = [(mx,0),(mx+90,-5),(mx-90,-5)]
tria = np.array(tria)
wedge = patches.Polygon(tria, closed=True, facecolor='#415BA3')
wedge.set_clip_on(False) # absolutely critical to drawing outside the graph area
ax.add_patch(wedge)
ax.text(mx+90,-5,"mean",fontsize=9)
ax.tick_params(axis='x', pad=10) # make room for the wedge
ax.set_ylim(0,90)
plt.show()
###Output
_____no_output_____
###Markdown
Exercise 1Add annotations to the following plot to show the intersections. You should maybe also move the legend to the center right as well.
###Code
fig, ax = plt.subplots(figsize=(4,3)) # make one subplot (ax) on the figure
ax.plot(cyl, avg_mpg, c='#4574B4', label="mpg") # Those are 6-digit hexadecimal numbers for red-green-blue
ax.plot(cyl, avg_wgt/100, c='#F46C43', label="wgt")
# ... add annotations here ...
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Your result might look something like this: Adding shapes to graphsLet's say we want to fill a two-dimensional region with different color shapes. To do that, we need to add so-called [Patches](https://matplotlib.org/api/patches_api.html?highlight=patchesmodule-matplotlib.patches) to the drawing area. We need a new import:
###Code
import matplotlib.patches as patches
###Output
_____no_output_____
###Markdown
The basic idea is to create a patch and then add it to the drawing area, `ax`. We also have to set the X and Y limits because the library does not figure this out from the patches we add.
###Code
fig, ax = plt.subplots(figsize=(4,3))
ax.set_xlim(0,50)
ax.set_ylim(0,50)
rect = patches.Rectangle(xy=(5,20), width=40, height=25,
facecolor='#E0F4F7', linewidth=.5, edgecolor="grey")
ax.add_patch(rect)
rect = patches.Rectangle(xy=(20,10), width=10, height=20, alpha=.75,
facecolor='#FEE08F', linewidth=.5, edgecolor="grey")
ax.add_patch(rect)
wedge = patches.Wedge(center=(5,5), r=10, theta1=0, theta2=90,
facecolor='#73ADD2', linewidth=.5, edgecolor="black")
ax.add_patch(wedge)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_linewidth(.5)
ax.spines['bottom'].set_linewidth(.5)
plt.show()
###Output
_____no_output_____
###Markdown
Note: the order in which we add the patches is relevant. Drawing the orange after the cyan puts the orange one on top. I have set the alpha channel to be slightly transparent on the orange one. Exercise 2Fill in the following code so that it draws rectangles at random locations, with random width and height, and random color. It might look like this:
###Code
fig, ax = plt.subplots(figsize=(4,3))
size = 50
ax.set_xlim(0,size)
ax.set_ylim(0,size)
n = 5
xy = np.random.rand(n,2) * size
w = np.random.rand(n) * size/2
h = np.random.rand(n) * size/2
# get mapping of n colors in the coolwarm colormap
cmap = plt.get_cmap('coolwarm')
colors=cmap(np.linspace(0,1,num=n)) # get n colors
# ... Draw random rectangles ...
plt.show()
xy[3]
###Output
_____no_output_____
###Markdown
Strip plotsBox plots are a common mechanism to display information about the distribution of a collection of numbers. However, the box plot is still showing more or less just point statistics. A violin plot tries to show the shape of the distribution by varying the width. I actually prefer something called a strip plot, but it is not a standard plot so we have to do it ourselves. The idea is simply to scatterplot all values but add noise to the X or Y values, depending on the orientation. Let's make a vertical strip plot for three series from the cars data set. If we just plot all of the miles per gallon values for 4, 6, and 8 cylinder cars, we get the following unsatisfying graph. Despite setting the transparency setting, we still don't have a clear idea about where the density lies along the Y direction.
###Code
fig, ax = plt.subplots(figsize=(4,3))
n4 = len(cyl4)
n6 = len(cyl6)
n8 = len(cyl8)
ax.scatter([4]*n4, cyl4, alpha=.2)
ax.scatter([6]*n6, cyl6, alpha=.2)
ax.scatter([8]*n8, cyl8, alpha=.2)
ax.set_xlabel("Cylinders")
ax.set_ylabel("MPG")
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_linewidth(.5)
ax.spines['bottom'].set_linewidth(.5)
plt.show()
###Output
_____no_output_____
###Markdown
To fix this, all we have to do now is add some noise to the X coordinate for each point:
###Code
fig, ax = plt.subplots(figsize=(4,3))
n4 = len(cyl4)
n6 = len(cyl6)
n8 = len(cyl8)
sigma = .05
mu = 0
x_noise4 = np.random.normal(mu, sigma, size=n4)
x_noise6 = np.random.normal(mu, sigma, size=n6)
x_noise8 = np.random.normal(mu, sigma, size=n8)
ax.scatter(4+x_noise4, cyl4, alpha=.2) # plot at X=4 plus some noise; Y is same as before
ax.scatter(6+x_noise6, cyl6, alpha=.2)
ax.scatter(8+x_noise8, cyl8, alpha=.2)
pad = 4*sigma
ax.set_xlim(4-pad,8+pad) # leave room for the noisy X coordinates so they don't get clipped
ax.set_xlabel("Cylinders")
ax.set_ylabel("MPG")
ax.set_title("Strip plot of # cylinders vs MPG")
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_linewidth(.5)
ax.spines['bottom'].set_linewidth(.5)
plt.show()
###Output
_____no_output_____
###Markdown
Exercise 3Using the same cylinder vs mpg data, create a horizontal strip plot where the number of cylinders is on the vertical axis and the miles per gallon is on the horizontal axis. Hints: flip the x,y labels and the arguments of `scatter()`. Line + text drawingsThere are times when we want something that looks a bit more like an "infographic". As an example, let's look at some world happiness scores and see how they change from 2015 to 2016 (data is in the [data directory](https://github.com/parrt/msds593/tree/master/notebooks/data)):
###Code
df_2015 = pd.read_csv("data/happy-2015.csv")
df_2016 = pd.read_csv("data/happy-2016.csv")
df_2015.head(2)
countries = ['Finland','Canada','Norway']
countries = ['Syria','Togo','Burundi']
scores = dict()
for c in countries:
a = df_2015.loc[df_2015['Country']==c, "Happiness Score"].iloc[0]
b = df_2016.loc[df_2016['Country']==c, "Happiness Score"].iloc[0]
scores[c] = (a,b)
scores
###Output
_____no_output_____
###Markdown
Now that we've pulled out the data we want for three countries, let's do some plotting with just lines in text. The axes are a bit tricky to get right.
###Code
fig, ax = plt.subplots(figsize=(3,3))
# Let's use 0 as the left-hand side and 1 as the right-hand side
# (below we will set labels to 2015 for 0 and 2016 for 1)
ax.set_xlim(0-.1,1+.1)
ax.set_ylim(2.7,3.32)
# Draw lines and text associated with scores
for c in scores:
a,b = scores[c]
color = '#878787'
if c=='Togo':
color = '#F46C43'
ax.plot([0,1], [a,b], 'o-', lw=2, c=color)
ax.text(0-.04, a, f"{a:.1f}", color='#878787',
horizontalalignment='right', verticalalignment='center')
ax.text(1+.04, b, f"{b:.1f}", color='#878787',
horizontalalignment='left', verticalalignment='center')
ax.text(0-.20, a, c, color='#878787',
horizontalalignment='right', verticalalignment='center')
# Make the axes look right
ax.set_title("Happiness scores\n2015 - 2016")
ax.spines['bottom'].set_bounds(0, 1)
ax.set_xticks([0,1])
ax.set_xticklabels(['2015','2016'])
ax.set_yticks([])
# Only show the bottom axis
ax.spines['left'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_linewidth(.5)
plt.show()
###Output
_____no_output_____
###Markdown
Making custom plots with matplotlibBy [Terence Parr](https://parrt.cs.usfca.edu). If you like visualization in machine learning, check out my stuff at [explained.ai](https://explained.ai).The matplotlib library has a lot of capabilities, but there's a lot of customization that you can do above and beyond the basic plotting functionality. You can even create your own kinds of plots by using the drawing and annotation primitives.
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.patches as patches # for drawing shapes
%config InlineBackend.figure_format = 'retina'
df_cars = pd.read_csv("data/cars.csv")
# Get average miles per gallon for each car with the same number of cylinders
avg_mpg = df_cars.groupby('CYL').mean()['MPG']
avg_wgt = df_cars.groupby('CYL').mean()['WGT'] # do the same for average weight
# Get average miles per gallon for each car with the same weight
avg_mpg_per_wgt = df_cars.groupby('WGT').mean()['MPG']
# Get the unique list of cylinders in numerical order
cyl = sorted(df_cars['CYL'].unique())
# Get a list of all mpg values for three specific cylinder sizes
cyl4 = df_cars[df_cars['CYL']==4]['MPG'].values
cyl6 = df_cars[df_cars['CYL']==6]['MPG'].values
cyl8 = df_cars[df_cars['CYL']==8]['MPG'].values
###Output
_____no_output_____
###Markdown
Annotating graphs with text and linesOnce you've drawn a plot, it's a good idea to go back and annotate it to highlight interesting features. Let's get the cars data again and redraw the histogram of car weights, but this time let's annotate it.
###Code
fig, ax = plt.subplots(figsize=(4,3))
wgt = df_cars['WGT']
n, bins, hpatches = ax.hist(wgt, color='#FEE08F') # save the results of hist
ax.set_xlabel("Weight (lbs)")
ax.set_ylabel("Count at that weight")
ax.set_title("Weight histogram")
# iterate through the rectangles associated with each bar
for rect in hpatches:
rect.set_linewidth(.5)
rect.set_edgecolor('grey')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_linewidth(.5)
ax.spines['bottom'].set_linewidth(.5)
# --------------------------------------------------------------------------------
# New stuff: a horizontal line, and annotated arrow, and a wedge beneath the X axis.
# --------------------------------------------------------------------------------
mx = wgt.mean()
my = np.mean(n)
# Add an arrow with text pointing to something
ax.annotate('check this out', xy=(2500, 60), xytext=(2800, 80),
arrowprops=dict(color='black',arrowstyle='->'), fontsize=11)
ax.text(max(wgt), my+1, "mean count",
horizontalalignment='right', fontsize=11)
# Draw a horizontal dashed line at the mean
ax.plot([min(wgt),max(wgt)], [my,my], ':', c='#415BA3', lw=.8)
# Draw a wedge underneath the axis
tria = [(mx,0),(mx+90,-5),(mx-90,-5)]
tria = np.array(tria)
wedge = patches.Polygon(tria, closed=True, facecolor='#415BA3')
wedge.set_clip_on(False) # absolutely critical to drawing outside the graph area
ax.add_patch(wedge)
ax.text(mx+90,-5,"mean",fontsize=9)
ax.tick_params(axis='x', pad=10) # make room for the wedge
ax.set_ylim(0,90)
plt.show()
###Output
_____no_output_____
###Markdown
Exercise 1Add annotations to the following plot to show the intersections. You should maybe also move the legend to the center right as well.
###Code
fig, ax = plt.subplots(figsize=(4,3)) # make one subplot (ax) on the figure
ax.plot(cyl, avg_mpg, c='#4574B4', label="mpg") # Those are 6-digit hexadecimal numbers for red-green-blue
ax.plot(cyl, avg_wgt/100, c='#F46C43', label="wgt")
# ... add annotations here ...
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Your result might look something like this: Adding shapes to graphsLet's say we want to fill a two-dimensional region with different color shapes. To do that, we need to add so-called [Patches](https://matplotlib.org/api/patches_api.html?highlight=patchesmodule-matplotlib.patches) to the drawing area. We need a new import:
###Code
import matplotlib.patches as patches
###Output
_____no_output_____
###Markdown
The basic idea is to create a patch and then add it to the drawing area, `ax`. We also have to set the X and Y limits because the library does not figure this out from the patches we add.
###Code
fig, ax = plt.subplots(figsize=(4,3))
ax.set_xlim(0,50)
ax.set_ylim(0,50)
rect = patches.Rectangle(xy=(5,20), width=40, height=25,
facecolor='#E0F4F7', linewidth=.5, edgecolor="grey")
ax.add_patch(rect)
rect = patches.Rectangle(xy=(20,10), width=10, height=20, alpha=.75,
facecolor='#FEE08F', linewidth=.5, edgecolor="grey")
ax.add_patch(rect)
wedge = patches.Wedge(center=(5,5), r=10, theta1=0, theta2=90,
facecolor='#73ADD2', linewidth=.5, edgecolor="black")
ax.add_patch(wedge)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_linewidth(.5)
ax.spines['bottom'].set_linewidth(.5)
plt.show()
###Output
_____no_output_____
###Markdown
Note: the order in which we add the patches is relevant. Drawing the orange after the cyan puts the orange one on top. I have set the alpha channel to be slightly transparent on the orange one. Exercise 2Fill in the following code so that it draws rectangles at random locations, with random width and height, and random color. It might look like this:
###Code
fig, ax = plt.subplots(figsize=(4,3))
size = 50
ax.set_xlim(0,size)
ax.set_ylim(0,size)
n = 5
xy = np.random.rand(n,2) * size
w = np.random.rand(n) * size/2
h = np.random.rand(n) * size/2
# get mapping of n colors in the coolwarm colormap
cmap = plt.get_cmap('coolwarm')
colors=cmap(np.linspace(0,1,num=n)) # get n colors
# ... Draw random rectangles ...
plt.show()
xy[3]
###Output
_____no_output_____
###Markdown
Strip plotsBox plots are a common mechanism to display information about the distribution of a collection of numbers. However, the box plot is still showing more or less just point statistics. A violin plot tries to show the shape of the distribution by varying the width. I actually prefer something called a strip plot, but it is not a standard plot so we have to do it ourselves. The idea is simply to scatterplot all values but add noise to the X or Y values, depending on the orientation. Let's make a vertical strip plot for three series from the cars data set. If we just plot all of the miles per gallon values for 4, 6, and 8 cylinder cars, we get the following unsatisfying graph. Despite setting the transparency setting, we still don't have a clear idea about where the density lies along the Y direction.
###Code
fig, ax = plt.subplots(figsize=(4,3))
n4 = len(cyl4)
n6 = len(cyl6)
n8 = len(cyl8)
ax.scatter([4]*n4, cyl4, alpha=.2)
ax.scatter([6]*n6, cyl6, alpha=.2)
ax.scatter([8]*n8, cyl8, alpha=.2)
ax.set_xlabel("Cylinders")
ax.set_ylabel("MPG")
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_linewidth(.5)
ax.spines['bottom'].set_linewidth(.5)
plt.show()
###Output
_____no_output_____
###Markdown
To fix this, all we have to do now is add some noise to the X coordinate for each point:
###Code
fig, ax = plt.subplots(figsize=(4,3))
n4 = len(cyl4)
n6 = len(cyl6)
n8 = len(cyl8)
sigma = .05
mu = 0
x_noise4 = np.random.normal(mu, sigma, size=n4)
x_noise6 = np.random.normal(mu, sigma, size=n6)
x_noise8 = np.random.normal(mu, sigma, size=n8)
ax.scatter(4+x_noise4, cyl4, alpha=.2) # plot at X=4 plus some noise; Y is same as before
ax.scatter(6+x_noise6, cyl6, alpha=.2)
ax.scatter(8+x_noise8, cyl8, alpha=.2)
pad = 4*sigma
ax.set_xlim(4-pad,8+pad) # leave room for the noisy X coordinates so they don't get clipped
ax.set_xlabel("Cylinders")
ax.set_ylabel("MPG")
ax.set_title("Strip plot of # cylinders vs MPG")
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_linewidth(.5)
ax.spines['bottom'].set_linewidth(.5)
plt.show()
###Output
_____no_output_____
###Markdown
Exercise 3Using the same cylinder vs mpg data, create a horizontal strip plot where the number of cylinders is on the vertical axis and the miles per gallon is on the horizontal axis. Hints: flip the x,y labels and the arguments of `scatter()`. Line + text drawingsThere are times when we want something that looks a bit more like an "infographic". As an example, let's look at some world happiness scores and see how they change from 2015 to 2016 (data is in the [data directory](https://github.com/parrt/msds593/tree/master/notebooks/data)):
###Code
df_2015 = pd.read_csv("data/happy-2015.csv")
df_2016 = pd.read_csv("data/happy-2016.csv")
df_2015.head(2)
countries = ['Finland','Canada','Norway']
countries = ['Syria','Togo','Burundi']
scores = dict()
for c in countries:
a = df_2015.loc[df_2015['Country']==c, "Happiness Score"].iloc[0]
b = df_2016.loc[df_2016['Country']==c, "Happiness Score"].iloc[0]
scores[c] = (a,b)
scores
###Output
_____no_output_____
###Markdown
Now that we've pulled out the data we want for three countries, let's do some plotting with just lines in text. The axes are a bit tricky to get right.
###Code
fig, ax = plt.subplots(figsize=(3,3))
# Let's use 0 as the left-hand side and 1 as the right-hand side
# (below we will set labels to 2015 for 0 and 2016 for 1)
ax.set_xlim(0-.1,1+.1)
ax.set_ylim(2.7,3.32)
# Draw lines and text associated with scores
for c in scores:
a,b = scores[c]
color = '#878787'
if c=='Togo':
color = '#F46C43'
ax.plot([0,1], [a,b], 'o-', lw=2, c=color)
ax.text(0-.04, a, f"{a:.1f}", color='#878787',
horizontalalignment='right', verticalalignment='center')
ax.text(1+.04, b, f"{b:.1f}", color='#878787',
horizontalalignment='left', verticalalignment='center')
ax.text(0-.20, a, c, color='#878787',
horizontalalignment='right', verticalalignment='center')
# Make the axes look right
ax.set_title("Happiness scores\n2015 - 2016")
ax.spines['bottom'].set_bounds(0, 1)
ax.set_xticks([0,1])
ax.set_xticklabels(['2015','2016'])
ax.set_yticks([])
# Only show the bottom axis
ax.spines['left'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_linewidth(.5)
plt.show()
###Output
_____no_output_____ |
notebooks/ch01_python.ipynb | ###Markdown
1章 Python入門PyTorchを使ったディープラーニング・プログラミングで重要になる概念だけを抜き出して説明する
###Code
# 必要ライブラリの導入
!pip install japanize_matplotlib | tail -n 1
# 必要ライブラリのインポート
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import japanize_matplotlib
# warning表示off
import warnings
warnings.simplefilter('ignore')
# デフォルトフォントサイズ変更
plt.rcParams['font.size'] = 14
# デフォルトグラフサイズ変更
plt.rcParams['figure.figsize'] = (6,6)
# デフォルトで方眼表示ON
plt.rcParams['axes.grid'] = True
# numpyの表示桁数設定
np.set_printoptions(suppress=True, precision=5)
###Output
_____no_output_____
###Markdown
1.2 コンテナ変数にご用心Pythonでは、変数は単に実際のデータ構造へのポインタに過ぎない。 Numpy配列などでは、このことを意識しないと思わぬ結果を招く場合がある。 NumPy変数間
###Code
# Numpy配列 x1 を定義
x = np.array([5, 7, 9])
# 変数yにxを代入する
# このとき、実体は共通なまま
y = x
# 結果確認
print(x)
print(y)
# ここでxの特定の要素の値を変更する
x[1] = -1
# すると、yも連動して値が変わる
print(x)
print(y)
# yも同時に変化して困る場合は、代入時にcopy関数を利用する
x = np.array([5, 7, 9])
y = x.copy()
# すると、xの特定の要素値の変更がyに影響しなくなる
x[1] = -1
print(x)
print(y)
###Output
_____no_output_____
###Markdown
テンソルとNumPy間
###Code
import torch
# x1: shape=[5] となるすべて値が1テンソル
x1 = torch.ones(5)
# 結果確認
print(x1)
# x2 x1から生成したNumPy
x2 = x1.data.numpy()
# 結果確認
print(x2)
# x1の値を変更
x1[1] = -1
# 連動してx2の値も変わる
print(x1)
print(x2)
# 安全な方法
# x1 テンソル
x1 = torch.ones(5)
# x2 x1から生成したNumPy
x2 = x1.data.numpy().copy()
x1[1] = -1
# 結果確認
print(x1)
print(x2)
###Output
_____no_output_____
###Markdown
1.3 数学上の合成関数とPythonの合成関数数学上の合成関数がPythonでどう実装されるか確認する $f(x) = 2x^2 + 2$を関数として定義する
###Code
def f(x):
return (2 * x**2 + 2)
# xをnumpy配列で定義
x = np.arange(-2, 2.1, 0.25)
print(x)
# f(x)の結果をyに代入
y = f(x)
print(y)
# 関数のグラフ表示
plt.plot(x, y)
plt.show()
# 3つの基本関数の定義
def f1(x):
return(x**2)
def f2(x):
return(x*2)
def f3(x):
return(x+2)
# 合成関数を作る
x1 = f1(x)
x2 = f2(x1)
y = f3(x2)
# 合成関数の値の確認
print(y)
# 合成関数のグラフ表示
plt.plot(x, y)
plt.show()
###Output
_____no_output_____
###Markdown
1.4 数学上の微分とPythonでの数値微分実装Pythonでは、関数もまた、変数名は単なるポインタで、実体は別にある。 このことを利用すると、「関数を引数とする関数」を作ることが可能になる。 ここで関数を数値微分する関数``diff``を定義する。 数値微分の計算には、普通の微分の定義式よりいい近似式である $f'(x) = \dfrac{f(x+h)-f(x-h)}{2h}$を利用する。
###Code
# 関数を微分する関数fdiffの定義
def fdiff(f):
# 関数fを引数に微分した結果の関数をdiffとして定義
def diff(x):
h = 1e-6
return (f(x+h) - f(x-h)) / (2*h)
# fdiffの戻りは微分した結果の関数diff
return diff
###Output
_____no_output_____
###Markdown
2次関数fに対して、今作った関数fdiffを適用して、数値微分計算をしてみる。
###Code
# 2次関数の数値微分
# fの微分結果の関数diffを取得
diff = fdiff(f)
# 微分結果を計算しy_dashに代入
y_dash = diff(x)
# 結果確認
print(y_dash)
# 結果のグラフ表示
plt.plot(x, y, label=r'y = f(x)', c='b')
plt.plot(x, y_dash, label=r"y = f '(x)", c='k')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
シグモイド関数 $g(x) = \dfrac{1}{1 + \exp(-x)}$に対して同じことをやってみる。
###Code
# シグモイド関数の定義
def g(x):
return 1 / (1 + np.exp(-x))
# シグモイド関数の計算
y = g(x)
print(y)
# 関数のグラフ表示
plt.plot(x, y)
plt.show()
# シグモイド関数の数値微分
# gを微分した関数を取得
diff = fdiff(g)
# diffを用いて微分結果y_dashを計算
y_dash = diff(x)
# 結果確認
print(y_dash)
# 結果のグラフ表示
plt.plot(x, y, label=r'y = f(x)', c='b')
plt.plot(x, y_dash, label=r"y = f '(x)", c='k')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
シグモイド関数の微分結果は$y(1-y)$となることがわかっている。 これはyの二次関数で、$y=\dfrac{1}{2}$の時に最大値$\dfrac{1}{4}$を取る。 上のグラフはその結果と一致していて、数値微分が正しくできていることがわかる。 1.5 オブジェクト指向プログラミング入門
###Code
# グラフ描画用ライブラリ
import matplotlib.pyplot as plt
# 円描画に必要なライブラリ
import matplotlib.patches as patches
# クラス Point の定義
class Point:
# インスタンス生成時にxとyの2つの引数を持つ
def __init__(self, x, y):
# インスタンスの属性xに第一引数をセットする
self.x = x
# インスタンスの属性yに第二引数をセットする
self.y = y
# 描画関数 drawの定義 (引数はなし)
def draw(self):
# (x, y)に点を描画する
plt.plot(self.x, self.y, marker='o', markersize=10, c='k')
# クラスPointからインスタンス変数p1とp2を生成する
p1 = Point(2,3)
p2 = Point(-1, -2)
# p1とp2の属性x, yの参照
print(p1.x, p1.y)
print(p2.x, p2.y)
# p1とp2のdraw関数を呼び出し、2つの点を描画する
p1.draw()
p2.draw()
plt.xlim(-4, 4)
plt.ylim(-4, 4)
plt.show()
# Pointの子クラスCircleの定義その1
class Circle1(Point):
# Circleはインスタンス生成時に引数x,y,rを持つ
def __init__(self, x, y, r):
# xとyは、親クラスの属性として設定
super().__init__(x, y)
# rは、Circleの属性として設定
self.r = r
# この段階でdraw関数は定義しない
# クラスCircleからインスタンス変数c1_1を生成する
c1_1 = Circle1(1, 0, 2)
# c1_1の属性の確認
print(c1_1.x, c1_1.y, c1_1.r)
# p1, p2, c1_1 のそれぞれのfraw関数を呼び出す
ax = plt.subplot()
p1.draw()
p2.draw()
c1_1.draw()
plt.xlim(-4, 4)
plt.ylim(-4, 4)
plt.show()
###Output
_____no_output_____
###Markdown
この段階でdraw関数は親で定義した関数が呼ばれていることがわかる
###Code
# Pointの子クラスCircleの定義その2
class Circle2(Point):
# Circleはインスタンス生成時に引数x,y,rを持つ
def __init__(self, x, y, r):
# xとyは、親クラスの属性として設定
super().__init__(x, y)
# rは、Circleの属性として設定
self.r = r
# draw関数は、子クラス独自に円の描画を行う
def draw(self):
# 円の描画
c = patches.Circle(xy=(self.x, self.y), radius=self.r, fc='b', ec='k')
ax.add_patch(c)
# クラスCircle2からインスタンス変数c2_1を生成する
c2_1 = Circle2(1, 0, 2)
# p1, p2, c2_1 のそれぞれのfraw関数を呼び出す
ax = plt.subplot()
p1.draw()
p2.draw()
c2_1.draw()
plt.xlim(-4, 4)
plt.ylim(-4, 4)
plt.show()
###Output
_____no_output_____
###Markdown
親のdarw関数の代わりに子のdraw関数が呼ばれたことがわかる では、この関数と親の関数を両方呼びたいときはどうしたらいいか
###Code
# Pointの子クラスCircleの定義その3
class Circle3(Point):
# Circleはインスタンス生成時に引数x,y,rを持つ
def __init__(self, x, y, r):
# xとyは、親クラスの属性として設定
super().__init__(x, y)
# rは、Circleの属性として設定
self.r = r
# Circleのdraw関数は、親の関数呼び出しの後で、円の描画も独自に行う
def draw(self):
# 親クラスのdraw関数呼び出し
super().draw()
# 円の描画
c = patches.Circle(xy=(self.x, self.y), radius=self.r, fc='b', ec='k')
ax.add_patch(c)
# クラスCircle3からインスタンス変数c3_1を生成する
c3_1 = Circle3(1, 0, 2)
# p1, p2, c3_1 のそれぞれのfraw関数を呼び出す
ax = plt.subplot()
p1.draw()
p2.draw()
c3_1.draw()
plt.xlim(-4, 4)
plt.ylim(-4, 4)
plt.show()
###Output
_____no_output_____
###Markdown
無事、両方を呼び出すことができた 1.6 インスタンスを関数として呼び出し可能にする
###Code
# 関数クラスHの定義
class H:
def __call__(self, x):
return 2*x**2 + 2
# hが関数として動作することを確認する
# numpy配列としてxの定義
x = np.arange(-2, 2.1, 0.25)
print(x)
# Hクラスのインスタンスとしてhを生成
h = H()
# 関数hの呼び出し
y = h(x)
print(y)
# グラフ描画
plt.plot(x, y)
plt.show()
###Output
_____no_output_____
###Markdown
1장 Python 입문PyTorchを使ったディープラーニング・プログラミングで重要になる概念だけを抜き出して説明する
###Code
# 必要ライブラリの導入
!pip install japanize_matplotlib | tail -n 1
pip install matplotlib
pip install japanize_matplotlib
# 必要ライブラリのインポート
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import japanize_matplotlib
# warning表示off
import warnings
warnings.simplefilter('ignore')
# デフォルトフォントサイズ変更
plt.rcParams['font.size'] = 14
# デフォルトグラフサイズ変更
plt.rcParams['figure.figsize'] = (6,6)
# デフォルトで方眼表示ON
plt.rcParams['axes.grid'] = True
# numpyの表示桁数設定
np.set_printoptions(suppress=True, precision=5)
###Output
_____no_output_____
###Markdown
1.2 컨테이너 타입 변수에 주의Pythonでは、変数は単に実際のデータ構造へのポインタに過ぎない。 Numpy配列などでは、このことを意識しないと思わぬ結果を招く場合がある。 NumPy変数間
###Code
# 넘파이 배열 x를 정의
x = np.array([5, 7, 9])
# 변수 y에 x를 대입
y = x
# 결과 확인
print(x)
print(y)
# x의 특정 요소를 변경
x[1] = -1
# y도 따라서 값이 바뀜
print(x)
print(y)
# y도 동시에 변하면 안 되는 경우는, 대입 시 copy 함수를 이용
x = np.array([5, 7, 9])
y = x.copy()
# x의 특정 요소 값이 변해도, y에는 영향이 없음
x[1] = -1
print(x)
print(y)
###Output
[ 5 -1 9]
[5 7 9]
###Markdown
テンソルとNumPy間
###Code
import torch
# x1: shape=[5]가 되는 모든 값이 1인 텐서
x1 = torch.ones(5)
# 결과 확인
print(x1)
# x2: x1로부터 생성한 넘파이 배열
x2 = x1.data.numpy()
# 결과 확인
print(x2)
# x1의 값을 변경
x1[1] = -1
# x2의 값도 같이 변함
print(x1)
print(x2)
# 안전한 방법
# x1: 텐서
x1 = torch.ones(5)
# x2: x1를 copy한 넘파이
x2 = x1.data.numpy().copy()
x1[1] = -1
# 결과 확인
print(x1)
print(x2)
###Output
tensor([ 1., -1., 1., 1., 1.])
[1. 1. 1. 1. 1.]
###Markdown
1.3 ‘합성 함수’를 파이썬으로 구현하기数学上の合成関数がPythonでどう実装されるか確認する $f(x) = 2x^2 + 2$を関数として定義する
###Code
def f(x):
return (2 * x**2 + 2)
# 넘파이 배열로 x를 정의
x = np.arange(-2, 2.1, 0.25)
print(x)
# f(x)의 결과를 y에 대입
y = f(x)
print(y)
# 함수를 그래프로 그리기
fig1 = plt.gcf()
plt.plot(x, y)
plt.show()
fig1.savefig('ex01-09.tif', format='tif')
# 세 가지 기본 함수의 정의
def f1(x):
return(x**2)
def f2(x):
return(x*2)
def f3(x):
return(x+2)
# 합성 함수 만들기
x1 = f1(x)
x2 = f2(x1)
y = f3(x2)
# 合成関数の値の確認
print(y)
# 合成関数のグラフ表示
plt.plot(x, y)
plt.show()
###Output
_____no_output_____
###Markdown
1.5 커스텀 클래스 정의하기Pythonでは、関数もまた、変数名は単なるポインタで、実体は別にある。 このことを利用すると、「関数を引数とする関数」を作ることが可能になる。 ここで関数を数値微分する関数``diff``を定義する。 数値微分の計算には、普通の微分の定義式よりいい近似式である $f'(x) = \dfrac{f(x+h)-f(x-h)}{2h}$を利用する。
###Code
# 함수를 미분하는 함수 fdiff의 정의
def fdiff(f):
# 함수 f를 인수로 미분한 결과 함수를 diff 로 정의
def diff(x):
h = 1e-6
return (f(x+h) - f(x-h)) / (2*h)
# fdiff의 반환은 미분한 결과 함수 diff
return diff
###Output
_____no_output_____
###Markdown
2次関数fに対して、今作った関数fdiffを適用して、数値微分計算をしてみる。
###Code
# 2차함수의 수치미분
# f의 미분결과 함수 diff를 취득
diff = fdiff(f)
# 미분결과를 계산하고 y_dash에 대입
y_dash = diff(x)
# 결과 확인
print(y_dash)
# 결과 그래프 출력
fig1 = plt.gcf()
plt.plot(x, y, label=r'y = f(x)', c='b')
plt.plot(x, y_dash, label=r"y = f '(x)", c='k')
plt.legend()
plt.show()
fig1.savefig('ex01-13.tif', format='tif')
###Output
_____no_output_____
###Markdown
シグモイド関数 $g(x) = \dfrac{1}{1 + \exp(-x)}$に対して同じことをやってみる。
###Code
# 시그모이드 함수의 정의
def g(x):
return 1 / (1 + np.exp(-x))
# 시그모이드 함수 계산
y = g(x)
print(y)
# 그래프 출력
fig1 = plt.gcf()
plt.plot(x, y)
plt.show()
fig1.savefig('ex01-16.tif', format='tif', dpi=300)
# 시그모이드 함수의 수치미분
# g를 미분한 함수 취득
diff = fdiff(g)
# diff를 사용해 미분 결과 y_dash를 계산
y_dash = diff(x)
# 결과 확인
print(y_dash)
# 結果のグラフ表示
fig1 = plt.gcf()
plt.plot(x, y, label=r'y = f(x)', c='b')
plt.plot(x, y_dash, label=r"y = f '(x)", c='k')
plt.legend()
plt.show()
fig1.savefig('ex01-18.tif', format='tif', dpi=300)
###Output
_____no_output_____
###Markdown
シグモイド関数の微分結果は$y(1-y)$となることがわかっている。 これはyの二次関数で、$y=\dfrac{1}{2}$の時に最大値$\dfrac{1}{4}$を取る。 上のグラフはその結果と一致していて、数値微分が正しくできていることがわかる。 1.5 커스텀 클래스 정의하기
###Code
# 그래프 출력을 위한 라이브러리
import matplotlib.pyplot as plt
# 원을 그리기 위해 필요한 라이브러리
import matplotlib.patches as patches
# Point 클래스 정의
class Point:
# 인스턴스 생성 시에 두개두 개의 인수 x와 y를 가짐
def __init__(self, x, y):
# 인스턴스 속성 x에 첫 번째 인수를 할당
self.x = x
# 인스턴스 속성 y에 두 번째 인수를 할당
self.y = y
# draw 함수 정의(인수 없음)
def draw(self):
# (x, y)에 점을 그림
plt.plot(self.x, self.y, marker='o', markersize=10, c='k')
# Point 클래스로 인스턴스 변수 p1과 p2 생성
p1 = Point(2,3)
p2 = Point(-1, -2)
# p1과 p2의 속성 x, y
print(p1.x, p1.y)
print(p2.x, p2.y)
# p1과 p2의 draw 함수를 호출하고, 두 개의 점을 출력함
fig1 = plt.gcf()
p1.draw()
p2.draw()
plt.xlim(-4, 4)
plt.ylim(-4, 4)
plt.show()
fig1.savefig('ex01-22.tif', format='tif', dpi=300)
# Point의 자식 클래스 Circle 정의 1
class Circle1(Point):
# Circle은 인스턴스 생성 시에 인수 x, y, r을 가짐
def __init__(self, x, y, r):
# xとyは、親クラスの属性として設定
super().__init__(x, y)
# r은 Circle의 속성으로 설정
self.r = r
# 이 단계에서 draw 함수는 정의하지 않음
# Circle1 클래스에서 인스턴스 변수 c1_1을 생성
c1_1 = Circle1(1, 0, 2)
# c1_1의 속성 확인
print(c1_1.x, c1_1.y, c1_1.r)
# p1, p2, c1_1의 각 draw 함수를 호출
fig1 = plt.gcf()
ax = plt.subplot()
p1.draw()
p2.draw()
c1_1.draw()
plt.xlim(-4, 4)
plt.ylim(-4, 4)
plt.show()
fig1.savefig('ex01-25.tif', format='tif', dpi=300)
###Output
_____no_output_____
###Markdown
この段階でdraw関数は親で定義した関数が呼ばれていることがわかる
###Code
# Point의 자식 클래스 Circle의 정의 2
class Circle2(Point):
# Circle은 인스턴스 생성 시에 인수 x, y, r을 가짐
def __init__(self, x, y, r):
# x와 y는 부모 클래스의 속성으로 설정
super().__init__(x, y)
# r은 Circle의 속성으로 설정
self.r = r
# draw 함수는 자식 클래스만 따로 원을 그림
def draw(self):
# 원 그리기
c = patches.Circle(xy=(self.x, self.y), radius=self.r, fc='b', ec='k')
ax.add_patch(c)
# 클래스 Circle2로부터 인스턴스 변수 c2_1를 생성
c2_1 = Circle2(1, 0, 2)
# p1, p2, c2_1의 각 draw 함수를 호출
fig1 = plt.gcf()
ax = plt.subplot()
p1.draw()
p2.draw()
c2_1.draw()
plt.xlim(-4, 4)
plt.ylim(-4, 4)
plt.show()
fig1.savefig('ex01-27.tif', format='tif', dpi=300)
###Output
_____no_output_____
###Markdown
親のdarw関数の代わりに子のdraw関数が呼ばれたことがわかる では、この関数と親の関数を両方呼びたいときはどうしたらいいか
###Code
# Point의 자식 클래스 Circle의 정의 3
class Circle3(Point):
# Circle은 인스턴스 생성 시에 인수 x, y, r을 가짐
def __init__(self, x, y, r):
# x와 y는 부모 클래스의 속성으로 설정
super().__init__(x, y)
# r은 Circle의 속성으로 설정
self.r = r
# Circle의 draw 함수는 부모의 함수를 호출 한 다음, 원 그리기를 독자적으로 수행함
def draw(self):
# 부모 클래스의 draw 함수 호출
super().draw()
# 원 그리기
c = patches.Circle(xy=(self.x, self.y), radius=self.r, fc='b', ec='k')
ax.add_patch(c)
# Circle3 클래스로부터 인스턴스 변수 c3_1를 생성
c3_1 = Circle3(1, 0, 2)
# p1, p2, c3_1의 각 draw 함수를 호출
fig1 = plt.gcf()
ax = plt.subplot()
p1.draw()
p2.draw()
c3_1.draw()
plt.xlim(-4, 4)
plt.ylim(-4, 4)
plt.show()
fig1.savefig('ex01-29.tif', format='tif', dpi=300)
###Output
_____no_output_____
###Markdown
無事、両方を呼び出すことができた 1.6 인스턴스를 함수로 사용하는 방법
###Code
# 함수 클래스 H의 정의
class H:
def __call__(self, x):
return 2*x**2 + 2
# h가 함수로 동작하는지 확인
# 넘파이 배열 x를 정의
x = np.arange(-2, 2.1, 0.25)
print(x)
# H 클래스의 인스턴스로 h를 생성
h = H()
# 함수 h 호출
y = h(x)
print(y)
# 그래프 출력
fig1 = plt.gcf()
plt.plot(x, y)
plt.show()
fig1.savefig('ex01-32.tif', format='tif', dpi=300)
###Output
_____no_output_____
###Markdown
1장 파이썬 입문파이토치를 사용한 딥러닝 프로그래밍에서 중요한 개념을 위주로 설명함
###Code
# 필요한 라이브러리 설치
!pip install japanize_matplotlib | tail -n 1
# 라이브러리 임포트
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import japanize_matplotlib
# warning表示off
import warnings
warnings.simplefilter('ignore')
# デフォルトフォントサイズ変更
plt.rcParams['font.size'] = 14
# デフォルトグラフサイズ変更
plt.rcParams['figure.figsize'] = (6,6)
# デフォルトで方眼表示ON
plt.rcParams['axes.grid'] = True
# numpyの表示桁数設定
np.set_printoptions(suppress=True, precision=5)
###Output
_____no_output_____
###Markdown
1.2 컨테이너 변수에 주의파이썬에서 변수는 단지 실제 데이터 구조로 향하는 포인터에 지나지 않음 넘파이 배열 등에서는 이 점을 의식하지 않으면 생각지도 못한 결과를 초래하는 경우가 있음 넘파이 변수 간
###Code
# Numpy配列 x1 を定義
x = np.array([5, 7, 9])
# 変数yにxを代入する
# このとき、実体は共通なまま
y = x
# 結果確認
print(x)
print(y)
# ここでxの特定の要素の値を変更する
x[1] = -1
# すると、yも連動して値が変わる
print(x)
print(y)
# yも同時に変化して困る場合は、代入時にcopy関数を利用する
x = np.array([5, 7, 9])
y = x.copy()
# すると、xの特定の要素値の変更がyに影響しなくなる
x[1] = -1
print(x)
print(y)
###Output
_____no_output_____
###Markdown
テンソルとNumPy間
###Code
import torch
# x1: shape=[5] となるすべて値が1テンソル
x1 = torch.ones(5)
# 結果確認
print(x1)
# x2 x1から生成したNumPy
x2 = x1.data.numpy()
# 結果確認
print(x2)
# x1の値を変更
x1[1] = -1
# 連動してx2の値も変わる
print(x1)
print(x2)
# 安全な方法
# x1 テンソル
x1 = torch.ones(5)
# x2 x1から生成したNumPy
x2 = x1.data.numpy().copy()
x1[1] = -1
# 結果確認
print(x1)
print(x2)
###Output
_____no_output_____
###Markdown
1.3 数学上の合成関数とPythonの合成関数数学上の合成関数がPythonでどう実装されるか確認する $f(x) = 2x^2 + 2$を関数として定義する
###Code
def f(x):
return (2 * x**2 + 2)
# xをnumpy配列で定義
x = np.arange(-2, 2.1, 0.25)
print(x)
# f(x)の結果をyに代入
y = f(x)
print(y)
# 関数のグラフ表示
plt.plot(x, y)
plt.show()
# 3つの基本関数の定義
def f1(x):
return(x**2)
def f2(x):
return(x*2)
def f3(x):
return(x+2)
# 合成関数を作る
x1 = f1(x)
x2 = f2(x1)
y = f3(x2)
# 合成関数の値の確認
print(y)
# 合成関数のグラフ表示
plt.plot(x, y)
plt.show()
###Output
_____no_output_____
###Markdown
1.4 数学上の微分とPythonでの数値微分実装Pythonでは、関数もまた、変数名は単なるポインタで、実体は別にある。 このことを利用すると、「関数を引数とする関数」を作ることが可能になる。 ここで関数を数値微分する関数``diff``を定義する。 数値微分の計算には、普通の微分の定義式よりいい近似式である $f'(x) = \dfrac{f(x+h)-f(x-h)}{2h}$を利用する。
###Code
# 関数を微分する関数fdiffの定義
def fdiff(f):
# 関数fを引数に微分した結果の関数をdiffとして定義
def diff(x):
h = 1e-6
return (f(x+h) - f(x-h)) / (2*h)
# fdiffの戻りは微分した結果の関数diff
return diff
###Output
_____no_output_____
###Markdown
2次関数fに対して、今作った関数fdiffを適用して、数値微分計算をしてみる。
###Code
# 2次関数の数値微分
# fの微分結果の関数diffを取得
diff = fdiff(f)
# 微分結果を計算しy_dashに代入
y_dash = diff(x)
# 結果確認
print(y_dash)
# 結果のグラフ表示
plt.plot(x, y, label=r'y = f(x)', c='b')
plt.plot(x, y_dash, label=r"y = f '(x)", c='k')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
シグモイド関数 $g(x) = \dfrac{1}{1 + \exp(-x)}$に対して同じことをやってみる。
###Code
# シグモイド関数の定義
def g(x):
return 1 / (1 + np.exp(-x))
# シグモイド関数の計算
y = g(x)
print(y)
# 関数のグラフ表示
plt.plot(x, y)
plt.show()
# シグモイド関数の数値微分
# gを微分した関数を取得
diff = fdiff(g)
# diffを用いて微分結果y_dashを計算
y_dash = diff(x)
# 結果確認
print(y_dash)
# 結果のグラフ表示
plt.plot(x, y, label=r'y = f(x)', c='b')
plt.plot(x, y_dash, label=r"y = f '(x)", c='k')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
シグモイド関数の微分結果は$y(1-y)$となることがわかっている。 これはyの二次関数で、$y=\dfrac{1}{2}$の時に最大値$\dfrac{1}{4}$を取る。 上のグラフはその結果と一致していて、数値微分が正しくできていることがわかる。 1.5 オブジェクト指向プログラミング入門
###Code
# グラフ描画用ライブラリ
import matplotlib.pyplot as plt
# 円描画に必要なライブラリ
import matplotlib.patches as patches
# クラス Point の定義
class Point:
# インスタンス生成時にxとyの2つの引数を持つ
def __init__(self, x, y):
# インスタンスの属性xに第一引数をセットする
self.x = x
# インスタンスの属性yに第二引数をセットする
self.y = y
# 描画関数 drawの定義 (引数はなし)
def draw(self):
# (x, y)に点を描画する
plt.plot(self.x, self.y, marker='o', markersize=10, c='k')
# クラスPointからインスタンス変数p1とp2を生成する
p1 = Point(2,3)
p2 = Point(-1, -2)
# p1とp2の属性x, yの参照
print(p1.x, p1.y)
print(p2.x, p2.y)
# p1とp2のdraw関数を呼び出し、2つの点を描画する
p1.draw()
p2.draw()
plt.xlim(-4, 4)
plt.ylim(-4, 4)
plt.show()
# Pointの子クラスCircleの定義その1
class Circle1(Point):
# Circleはインスタンス生成時に引数x,y,rを持つ
def __init__(self, x, y, r):
# xとyは、親クラスの属性として設定
super().__init__(x, y)
# rは、Circleの属性として設定
self.r = r
# この段階でdraw関数は定義しない
# クラスCircleからインスタンス変数c1_1を生成する
c1_1 = Circle1(1, 0, 2)
# c1_1の属性の確認
print(c1_1.x, c1_1.y, c1_1.r)
# p1, p2, c1_1 のそれぞれのfraw関数を呼び出す
ax = plt.subplot()
p1.draw()
p2.draw()
c1_1.draw()
plt.xlim(-4, 4)
plt.ylim(-4, 4)
plt.show()
###Output
_____no_output_____
###Markdown
この段階でdraw関数は親で定義した関数が呼ばれていることがわかる
###Code
# Pointの子クラスCircleの定義その2
class Circle2(Point):
# Circleはインスタンス生成時に引数x,y,rを持つ
def __init__(self, x, y, r):
# xとyは、親クラスの属性として設定
super().__init__(x, y)
# rは、Circleの属性として設定
self.r = r
# draw関数は、子クラス独自に円の描画を行う
def draw(self):
# 円の描画
c = patches.Circle(xy=(self.x, self.y), radius=self.r, fc='b', ec='k')
ax.add_patch(c)
# クラスCircle2からインスタンス変数c2_1を生成する
c2_1 = Circle2(1, 0, 2)
# p1, p2, c2_1 のそれぞれのfraw関数を呼び出す
ax = plt.subplot()
p1.draw()
p2.draw()
c2_1.draw()
plt.xlim(-4, 4)
plt.ylim(-4, 4)
plt.show()
###Output
_____no_output_____
###Markdown
親のdarw関数の代わりに子のdraw関数が呼ばれたことがわかる では、この関数と親の関数を両方呼びたいときはどうしたらいいか
###Code
# Pointの子クラスCircleの定義その3
class Circle3(Point):
# Circleはインスタンス生成時に引数x,y,rを持つ
def __init__(self, x, y, r):
# xとyは、親クラスの属性として設定
super().__init__(x, y)
# rは、Circleの属性として設定
self.r = r
# Circleのdraw関数は、親の関数呼び出しの後で、円の描画も独自に行う
def draw(self):
# 親クラスのdraw関数呼び出し
super().draw()
# 円の描画
c = patches.Circle(xy=(self.x, self.y), radius=self.r, fc='b', ec='k')
ax.add_patch(c)
# クラスCircle3からインスタンス変数c3_1を生成する
c3_1 = Circle3(1, 0, 2)
# p1, p2, c3_1 のそれぞれのfraw関数を呼び出す
ax = plt.subplot()
p1.draw()
p2.draw()
c3_1.draw()
plt.xlim(-4, 4)
plt.ylim(-4, 4)
plt.show()
###Output
_____no_output_____
###Markdown
無事、両方を呼び出すことができた 1.6 インスタンスを関数として呼び出し可能にする
###Code
# 関数クラスHの定義
class H:
def __call__(self, x):
return 2*x**2 + 2
# hが関数として動作することを確認する
# numpy配列としてxの定義
x = np.arange(-2, 2.1, 0.25)
print(x)
# Hクラスのインスタンスとしてhを生成
h = H()
# 関数hの呼び出し
y = h(x)
print(y)
# グラフ描画
plt.plot(x, y)
plt.show()
###Output
_____no_output_____ |
codebase/notebooks/05_transition_analyses/Transitions_01_Calibrate_viability_threshold.ipynb | ###Markdown
Data-driven calibration of transition viability thresholdsWe used the hierarchy of occupations inherent in the ESCO data set to derive a data-driven threshold for viable transitions along with an additional indicator for transitions that are highly viable. 0. Import dependencies and inputs
###Code
%run ../notebook_preamble_Transitions.ipy
import mapping_career_causeways.plotting_utils as plotting_utils
from scipy.stats import percentileofscore
from itertools import combinations
data = load_data.Data()
def flatten_without_diagonal(W):
"""
Return all matrix's W values, except the diagonal, in a flat vector,
"""
return np.concatenate((
W[np.triu_indices(W.shape[0], k=1)],
W[np.tril_indices(W.shape[0], k=-1)]))
# Import occupation table
occupations = data.occupation_hierarchy
# Combined similarity measure
W = load_data.Similarities().W_combined
occupations.head(1)
###Output
_____no_output_____
###Markdown
1. Calibrate viability tresholdWe set the threshold for viability to correspond to the typical similarity between closely related occupationsthat belong to the same ISCO unit group. For example, shop assistants and sales processors are both in the ISCO unit group ‘Shop sales assistants’ with the four-digit code 5223, and it is reasonable to assume that the transition between these two occupations should be viable.We calculated the average within-group occupation similarity for each ISCO unit group that had more than one occupation (using the combined similarity measure) and used the distribution of these within-group averages to make a judgement on the viability threshold. In the interest of obtaining more robust estimates of within-group averages, we used all occupations from the ESCO framework.
###Code
# Occupational hierarchy level that we are using (here, ISCO unit groups)
group_category = 'isco_level_4'
# Get all parent occupations *with at least 1 child*
df = occupations.groupby(group_category).count()
parents_with_children = df[df.id>1].index.to_list()
### Calculate within-group similarity
# similarity values
w_same_group = []
# compared occupation IDs
pairs = []
# list of lists for each group of occupations
w_within_groups = []
for j in range(len(parents_with_children)):
ids = occupations[occupations[group_category] == parents_with_children[j]].id.to_list()
w_within_group = []
for pair in list(combinations(ids,2)):
# Transitions in both directions
w_same_group.append(W[pair])
pairs.append(pair)
w_same_group.append(W[(pair[1],pair[0])])
pairs.append((pair[1],pair[0]))
# List of lists, storing each groups within-group similarities
w_within_group.append(W[pair])
w_within_group.append(W[(pair[1],pair[0])])
w_within_groups.append(w_within_group)
# Calculate the average within-group similarity for each group of occupations
mean_within_group_sim = [np.mean(y) for y in w_within_groups]
# Median & mean average-within-group similarities
print(np.median(mean_within_group_sim))
print(np.mean(mean_within_group_sim))
# Check the spread of the aveage-within-group similarities (in terms of standard deviations)
print(f'-2SD: {np.mean(mean_within_group_sim) - 2*np.std(mean_within_group_sim) :.3f}')
print(f'-1.5SD: {np.mean(mean_within_group_sim) - 1.5*np.std(mean_within_group_sim) :.3f}')
print(f'-1SD: {np.mean(mean_within_group_sim) - 1*np.std(mean_within_group_sim) :.3f}')
print(f'0SD: {np.mean(mean_within_group_sim) :.2f}')
print(f'+1SD: {np.mean(mean_within_group_sim) + 1*np.std(mean_within_group_sim) :.3f}')
print(f'+1.5SD: {np.mean(mean_within_group_sim) + 1.5*np.std(mean_within_group_sim) :.3f}')
print(f'+2SD: {np.mean(mean_within_group_sim) + 2*np.std(mean_within_group_sim) :.3f}')
###Output
-2SD: 0.150
-1.5SD: 0.231
-1SD: 0.313
0SD: 0.48
+1SD: 0.641
+1.5SD: 0.723
+2SD: 0.805
###Markdown
Interestingly, there was considerable variation across different ISCO unit groups, and hence we set the viability threshold as the mean minus one standard deviation of these within-group averages (rounded to the first decimal point). This yielded a viability threshold equal to 0.30, with approximately 80 per cent of the within-group transitions above this threshold.
###Code
VIABILITY_THRESHOLD = np.round(np.mean(mean_within_group_sim) - 1*np.std(mean_within_group_sim), 1)
print(VIABILITY_THRESHOLD)
# Fraction of transitions above this threshold
all_w = [x for y in w_within_groups for x in y]
np.sum(np.array(all_w)>VIABILITY_THRESHOLD)/len(all_w)
# Fraction of ISCO unit groups above this threshold
np.sum(np.array(mean_within_group_sim)>VIABILITY_THRESHOLD)/len(mean_within_group_sim)
###Output
_____no_output_____
###Markdown
1.1 Visualise the distribution
###Code
# Distribution of within-group similarities
sns.set_style("ticks")
plt.figure(figsize=(7,5))
sns.distplot(mean_within_group_sim, kde=False, rug=True, bins=20)
# Viability threshold
plt.plot([VIABILITY_THRESHOLD, VIABILITY_THRESHOLD], [0, 60], c='r')
plt.xlabel('Within-group similarity (ISCO unit groups)', fontsize=16)
plt.ylabel('Number of unit groups', fontsize=16)
plt.ylim([0, 60])
plt.tick_params(axis='both', which='major', labelsize=14)
plotting_utils.export_figure('fig_54')
plt.show()
###Output
_____no_output_____
###Markdown
Check examples
###Code
df_isco_titles = pd.DataFrame(data={
'sim': mean_within_group_sim,
'isco': parents_with_children}).merge(data.isco_titles[['isco', 'isco_title']], how='left')
df_isco_titles.sort_values('sim')
###Output
_____no_output_____
###Markdown
2. Calibrate highly viable transitionsThe ESCO framework defines a further hierarchy of broader and narrower ESCO occupations that goes beyond theISCO unit groups (cf. Figure 47, page 85 in the Mapping Career Causeways report). For example, butcher is related to two other, narrower occupations: halal butcher and kosher butcher. We leveraged this hierarchy toderive an indicator for highly viable transitions by defining ‘broad ESCO groups’ that contain the broad ESCO level5 occupation and all its narrower occupations (Figure 55, page 94 in the report).Analogous to the calibration process of the viability threshold, we set the indicator for highly viable transitions equal to the mean minus one standard deviation of the average within-group similarities of all broad ESCO groups rounded to the nearest decimal point.
###Code
# Occupational hierarchy level that we are using (here, ISCO unit groups)
group_category = 'top_level_parent_id'
# Get all broader top level parent occupations *with children (narrower occupations)*
df = occupations.groupby(group_category).count()
parents_with_children = df[df.id>1].index.to_list()
## Calculate within-group similarity across all broader top-level occupations
# similarity values
w_same_group = []
# compared occupation IDs
pairs = []
# list of lists for each group of occupations
w_within_groups = []
for j in range(len(parents_with_children)):
ids = occupations[occupations[group_category] == parents_with_children[j]].id.to_list()
w_within_group = []
for pair in list(combinations(ids,2)):
w_same_group.append(W[pair])
pairs.append(pair)
w_same_group.append(W[(pair[1],pair[0])])
pairs.append((pair[1],pair[0]))
w_within_group.append(W[pair])
w_within_group.append(W[(pair[1],pair[0])])
w_within_groups.append(w_within_group)
# Calculate the average within-group similarity for each group of occupations
mean_within_group_sim = [np.mean(y) for y in w_within_groups]
# Median & mean average-within-group similarities
print(np.median(mean_within_group_sim))
print(np.mean(mean_within_group_sim))
# Standard deviations
print(f'-2SD: {np.mean(mean_within_group_sim) - 2*np.std(mean_within_group_sim) :.2f}')
print(f'-1.5SD: {np.mean(mean_within_group_sim) - 1.5*np.std(mean_within_group_sim) :.2f}')
print(f'-1SD: {np.mean(mean_within_group_sim) - 1*np.std(mean_within_group_sim) :.2f}')
print(f'0SD: {np.mean(mean_within_group_sim) :.2f}')
print(f'+1SD: {np.mean(mean_within_group_sim) + 1*np.std(mean_within_group_sim) :.2f}')
print(f'+1.5SD: {np.mean(mean_within_group_sim) + 1.5*np.std(mean_within_group_sim) :.2f}')
print(f'+2SD: {np.mean(mean_within_group_sim) + 2*np.std(mean_within_group_sim) :.2f}')
HIGHLY_VIABLE_THRESHOLD = np.round(np.mean(mean_within_group_sim) - 1*np.std(mean_within_group_sim), 1)
print(HIGHLY_VIABLE_THRESHOLD)
###Output
0.4
###Markdown
2.1 Visualise the distribution
###Code
# Distribution of within-group similarities
sns.set_style("ticks")
plt.figure(figsize=(7,5))
sns.distplot(mean_within_group_sim, kde=False, rug=True, bins=15)
plt.plot([HIGHLY_VIABLE_THRESHOLD, HIGHLY_VIABLE_THRESHOLD], [0, 60], c='r')
plt.xlabel('Within-group similarity (Broad ESCO occupation groups)', fontsize=16)
plt.ylabel('Number of broad ESCO groups', fontsize=16)
plt.ylim([0, 45])
plt.tick_params(axis='both', which='major', labelsize=14)
plotting_utils.export_figure('fig_56')
plt.show()
###Output
_____no_output_____
###Markdown
Check examples
###Code
df_occ_titles = pd.DataFrame(data={
'sim':mean_within_group_sim,
'id': parents_with_children}).merge(data.occ[['id','preferred_label']], how='left')
df_occ_titles.sort_values('sim')
###Output
_____no_output_____
###Markdown
3. Summarise the viability thresholdsBased on the observations above, we define transition similarities in the following way:- **Viable** transitions have similarity above 0.30. This corresponds to about mean minus one standard deviation of within--group similarity for four-digit ISCO unit groups.- **Highly viable transitions** have similarity above 0.40. This corresponds to mean minus one standard deviation for within-group similarity for broad ESCO occupation groups.
###Code
VIABILITY_THRESHOLD
HIGHLY_VIABLE_THRESHOLD
###Output
_____no_output_____
###Markdown
4. Visualise the distribution of all similarities
###Code
# All transition similarities
w = flatten_without_diagonal(W)
# Characterise the thresholds with respect to all possible transitions (between all ESCO occupations)
print(f'Viable transitions are in the {percentileofscore(w, VIABILITY_THRESHOLD):.1f} precentile')
print(f'Highly viable transitions are in the {percentileofscore(w, HIGHLY_VIABLE_THRESHOLD):.1f} precentile')
# Distribution of all similarities
sns.set_style("ticks")
plt.figure(figsize=(7,5))
sns.distplot(w, kde=False)
# Viability thresholds
plt.plot([VIABILITY_THRESHOLD, VIABILITY_THRESHOLD], [0, 2e+6], c='r')
plt.plot([HIGHLY_VIABLE_THRESHOLD, HIGHLY_VIABLE_THRESHOLD], [0, 2e+6], c='b')
plt.xlabel('Occupation similarity', fontsize=16)
plt.ylabel('Number of comparisons (millions)', fontsize=16)
plt.ylim([0, 1.3e+6])
plt.tick_params(axis='both', which='major', labelsize=14)
plotting_utils.export_figure('fig_57')
plt.show()
###Output
_____no_output_____ |
.ipynb_checkpoints/Water-checkpoint.ipynb | ###Markdown
Driven Data Water Contest Import needed libraries
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#import data
train = pd.read_csv('/Users/ericp/OneDrive/Documents/GitHub/datadrivenH2O/train.csv')
target = pd.read_csv('/Users/ericp/OneDrive/Documents/GitHub/datadrivenH2O/target.csv')
test = pd.read_csv('/Users/ericp/OneDrive/Documents/GitHub/datadrivenH2O/test.csv')
train_id = train['id']
test_id = test['id']
train = train.drop(['id'], axis = 1)
test = test.drop(['id'], axis = 1)
target = target.drop(['id'], axis = 1)
colnames = train.columns
train_shape = train.shape[0]
train.head(10)
#look at train / test shapes
print(train.shape)
print(test.shape)
print(target.shape)
#look at the value_counts of the target variable
target['status_group'].value_counts()
#LabelEncode target variable
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
target['status_group'] = le.fit_transform(target['status_group'])
target['status_group'].value_counts()
#functional = 0
#non-functional = 1
#functional needs repair = 2
#look at correlations of variables with target
#check for correlation between target and predictors
target_corr = list()
for c, v in enumerate(train, start = 1):
target_corr.append(target.corrwith(train[v], method = 'spearman'))
target_corr = pd.Series(data = target_corr, index = train.columns, name = 'correlation')
target_corr = abs(target_corr)
target_corr
#not a lot highly correlated with the target
#combine data
combine = pd.concat([train, test], axis = 0).reset_index(drop = True)
#look for missing values
miss_vals = pd.Series(combine.isnull().sum(), name = 'PctMissing')
miss_vals = miss_vals[miss_vals!=0]
miss_vals = miss_vals.sort_values(ascending = False)
print(miss_vals)
#pct missing
miss_vals / len (combine)
#these variables all seem to be about area / region. Might be best to use the mode of the region they're in
combine['subvillage'] = combine.groupby('region')['subvillage'].transform(lambda x:x.fillna(x.mode()[0]))
combine['public_meeting'] = combine.groupby('region')['public_meeting'].transform(lambda x:x.fillna(x.mode()[0]))
combine['permit'] = combine.groupby('region')['permit'].transform(lambda x:x.fillna(x.mode()[0]))
combine['funder'] = combine.groupby('region')['funder'].transform(lambda x:x.fillna(x.mode()[0]))
combine['installer'] = combine.groupby('region')['funder'].transform(lambda x:x.fillna(x.mode()[0]))
combine['scheme_management'] = combine.groupby('region')['scheme_management'].transform(lambda x:x.fillna(x.mode()[0]))
#check missing values
miss_vals = pd.Series(combine.isnull().sum(), name = 'PctMissing')
miss_vals = miss_vals[miss_vals!=0]
miss_vals = miss_vals.sort_values(ascending = False)
print(miss_vals)
#missing values all filled now.
#scheme_management and scheme_name seem to be redundant. Lots of missing values for scheme_name. Will delete.
#region and region_code same. Will delete region
#extraction_type_group seems identical to extraction_type and extraction_type_class
#waterpoint_type and water_type_group same.
#quantity and quantity_group look same too.
#payment and payment_type same
combine = combine.drop(['scheme_name', 'region', 'extraction_type_group', 'extraction_type_class',
'management_group', 'payment_type', 'waterpoint_type_group', 'quantity_group', 'payment_type'], axis = 1)
combine.shape
dtyp = pd.Series(combine.dtypes, name = 'dtype')
dtyp
#let's look at object columns for how many unique values they have in them.
obj_cols = combine.select_dtypes('object').columns
for col in obj_cols:
print('combine',col,':',len(combine[col].unique()))
funder_counts = combine['funder'].value_counts().sort_values(ascending = False)
installer_counts = combine['installer'].value_counts().sort_values(ascending = False)
wpt_counts = combine['wpt_name'].value_counts().sort_values(ascending = False)
subvillage_counts = combine['subvillage'].value_counts().sort_values(ascending = False)
lga_counts = combine['lga'].value_counts().sort_values(ascending = False)
ward_counts = combine['ward'].value_counts().sort_values(ascending = False)
#consider category encoding these variables
count_cols = ['funder_counts', 'installer_counts', 'wpt_counts', 'subvillage_counts', 'lga_counts', 'ward_counts']
from category_encoders import TargetEncoder
encoder = TargetEncoder()
combine['subvillage_encoded'] = encoder.fit_transform(combine['Animal'], train['Target'])
#split date column
combine[['day', 'month', 'year']] = combine.str.date_recorded('/', expand = True)
#some items as int are actually objects
combine['construction_year'] = combine['construction_year'].astype('object')
#check correlations between variables. See if there's some that can be deleted
f = plt.figure(figsize=(8, 8))
plt.matshow(combine.corr(), fignum=f.number)
plt.yticks(range(combine.select_dtypes(['number']).shape[1]), combine.select_dtypes(['number']).columns, fontsize=14)
cb = plt.colorbar()
cb.ax.tick_params(labelsize=14)
plt.title('Correlation Matrix', fontsize=16);
corr_df = combine.corr()
corr_df
#all correlations quite low. Keep all predictors.
#label encode columns where there is structure (i.e. levels)
le_columns = ['date_recorded', 'permit', 'water_quality', 'quality_group', 'quantity', 'public_meeting', 'permit']
#label encode these variables
for column in le_columns:
combine[column] = le.fit_transform(combine[column])
#get dummies
combine = pd.get_dummies(combine)
train = combine[:train_shape]
test = combine[train_shape:]
print(train.shape)
print(test.shape)
#import necessary libraries
from sklearn.ensemble import RandomForestClassifier
from lightgbm import LGBMClassifier
from xgboost import XGBClassifier
#create metric to determine accuracy
from sklearn.model_selection import KFold, cross_val_score
def accuracy(model, X, y, n = 5):
kf = KFold(n, random_state = 1, shuffle = True)
acc = cross_val_score(model, X, y, scoring = 'accuracy', cv = kf)
return acc
###Output
_____no_output_____
###Markdown
Create Base Models
###Code
#create base models
lgb = LGBMClassifier()
rf = RandomForestClassifier()
xgb = XGBClassifier()
###Output
_____no_output_____
###Markdown
Base Model Scores
###Code
score = accuracy(rf, train, target)
###Output
_____no_output_____ |
branches/1st-edition/ch09.ipynb | ###Markdown
Data Aggregation and Group Operations
###Code
from __future__ import division
from numpy.random import randn
import numpy as np
import os
import matplotlib.pyplot as plt
np.random.seed(12345)
plt.rc('figure', figsize=(10, 6))
from pandas import Series, DataFrame
import pandas as pd
np.set_printoptions(precision=4)
pd.options.display.notebook_repr_html = False
%matplotlib inline
###Output
_____no_output_____
###Markdown
GroupBy mechanics
###Code
df = DataFrame({'key1' : ['a', 'a', 'b', 'b', 'a'],
'key2' : ['one', 'two', 'one', 'two', 'one'],
'data1' : np.random.randn(5),
'data2' : np.random.randn(5)})
df
grouped = df['data1'].groupby(df['key1'])
grouped
grouped.mean()
means = df['data1'].groupby([df['key1'], df['key2']]).mean()
means
means.unstack()
states = np.array(['Ohio', 'California', 'California', 'Ohio', 'Ohio'])
years = np.array([2005, 2005, 2006, 2005, 2006])
df['data1'].groupby([states, years]).mean()
df.groupby('key1').mean()
df.groupby(['key1', 'key2']).mean()
df.groupby(['key1', 'key2']).size()
###Output
_____no_output_____
###Markdown
Iterating over groups
###Code
for name, group in df.groupby('key1'):
print(name)
print(group)
for (k1, k2), group in df.groupby(['key1', 'key2']):
print((k1, k2))
print(group)
pieces = dict(list(df.groupby('key1')))
pieces['b']
df.dtypes
grouped = df.groupby(df.dtypes, axis=1)
dict(list(grouped))
###Output
_____no_output_____
###Markdown
Selecting a column or subset of columns
###Code
df.groupby('key1')['data1']
df.groupby('key1')[['data2']]
df['data1'].groupby(df['key1'])
df[['data2']].groupby(df['key1'])
###Output
_____no_output_____
###Markdown
df.groupby(['key1', 'key2'])[['data2']].mean() s_grouped = df.groupby(['key1', 'key2'])['data2']s_grouped s_grouped.mean()
###Code
### Grouping with dicts and Series
###Output
_____no_output_____
###Markdown
people = DataFrame(np.random.randn(5, 5), columns=['a', 'b', 'c', 'd', 'e'], index=['Joe', 'Steve', 'Wes', 'Jim', 'Travis'])people.ix[2:3, ['b', 'c']] = np.nan Add a few NA valuespeople mapping = {'a': 'red', 'b': 'red', 'c': 'blue', 'd': 'blue', 'e': 'red', 'f' : 'orange'} by_column = people.groupby(mapping, axis=1)by_column.sum() map_series = Series(mapping)map_series people.groupby(map_series, axis=1).count()
###Code
### Grouping with functions
###Output
_____no_output_____
###Markdown
people.groupby(len).sum() key_list = ['one', 'one', 'one', 'two', 'two']people.groupby([len, key_list]).min()
###Code
### Grouping by index levels
###Output
_____no_output_____
###Markdown
columns = pd.MultiIndex.from_arrays([['US', 'US', 'US', 'JP', 'JP'], [1, 3, 5, 1, 3]], names=['cty', 'tenor'])hier_df = DataFrame(np.random.randn(4, 5), columns=columns)hier_df hier_df.groupby(level='cty', axis=1).count()
###Code
## Data aggregation
###Output
_____no_output_____
###Markdown
df grouped = df.groupby('key1')grouped['data1'].quantile(0.9) def peak_to_peak(arr): return arr.max() - arr.min()grouped.agg(peak_to_peak) grouped.describe() tips = pd.read_csv('ch08/tips.csv') Add tip percentage of total billtips['tip_pct'] = tips['tip'] / tips['total_bill']tips[:6]
###Code
### Column-wise and multiple function application
###Output
_____no_output_____
###Markdown
grouped = tips.groupby(['sex', 'smoker']) grouped_pct = grouped['tip_pct']grouped_pct.agg('mean') grouped_pct.agg(['mean', 'std', peak_to_peak]) grouped_pct.agg([('foo', 'mean'), ('bar', np.std)]) functions = ['count', 'mean', 'max']result = grouped['tip_pct', 'total_bill'].agg(functions)result result['tip_pct'] ftuples = [('Durchschnitt', 'mean'), ('Abweichung', np.var)]grouped['tip_pct', 'total_bill'].agg(ftuples) grouped.agg({'tip' : np.max, 'size' : 'sum'}) grouped.agg({'tip_pct' : ['min', 'max', 'mean', 'std'], 'size' : 'sum'})
###Code
### Returning aggregated data in "unindexed" form
###Output
_____no_output_____
###Markdown
tips.groupby(['sex', 'smoker'], as_index=False).mean()
###Code
## Group-wise operations and transformations
###Output
_____no_output_____
###Markdown
df k1_means = df.groupby('key1').mean().add_prefix('mean_')k1_means pd.merge(df, k1_means, left_on='key1', right_index=True) key = ['one', 'two', 'one', 'two', 'one']people.groupby(key).mean() people.groupby(key).transform(np.mean) def demean(arr): return arr - arr.mean()demeaned = people.groupby(key).transform(demean)demeaned demeaned.groupby(key).mean()
###Code
### Apply: General split-apply-combine
###Output
_____no_output_____
###Markdown
def top(df, n=5, column='tip_pct'): return df.sort_index(by=column)[-n:]top(tips, n=6) tips.groupby('smoker').apply(top) tips.groupby(['smoker', 'day']).apply(top, n=1, column='total_bill') result = tips.groupby('smoker')['tip_pct'].describe()result result.unstack('smoker')
###Code
f = lambda x: x.describe()
grouped.apply(f)
###Output
_____no_output_____
###Markdown
Suppressing the group keys
###Code
tips.groupby('smoker', group_keys=False).apply(top)
###Output
_____no_output_____
###Markdown
Quantile and bucket analysis
###Code
frame = DataFrame({'data1': np.random.randn(1000),
'data2': np.random.randn(1000)})
factor = pd.cut(frame.data1, 4)
factor[:10]
def get_stats(group):
return {'min': group.min(), 'max': group.max(),
'count': group.count(), 'mean': group.mean()}
grouped = frame.data2.groupby(factor)
grouped.apply(get_stats).unstack()
#ADAPT the output is not sorted in the book while this is the case now (swap first two lines)
# Return quantile numbers
grouping = pd.qcut(frame.data1, 10, labels=False)
grouped = frame.data2.groupby(grouping)
grouped.apply(get_stats).unstack()
###Output
_____no_output_____
###Markdown
Example: Filling missing values with group-specific values
###Code
s = Series(np.random.randn(6))
s[::2] = np.nan
s
s.fillna(s.mean())
states = ['Ohio', 'New York', 'Vermont', 'Florida',
'Oregon', 'Nevada', 'California', 'Idaho']
group_key = ['East'] * 4 + ['West'] * 4
data = Series(np.random.randn(8), index=states)
data[['Vermont', 'Nevada', 'Idaho']] = np.nan
data
data.groupby(group_key).mean()
fill_mean = lambda g: g.fillna(g.mean())
data.groupby(group_key).apply(fill_mean)
fill_values = {'East': 0.5, 'West': -1}
fill_func = lambda g: g.fillna(fill_values[g.name])
data.groupby(group_key).apply(fill_func)
###Output
_____no_output_____
###Markdown
Example: Random sampling and permutation
###Code
# Hearts, Spades, Clubs, Diamonds
suits = ['H', 'S', 'C', 'D']
card_val = (range(1, 11) + [10] * 3) * 4
base_names = ['A'] + range(2, 11) + ['J', 'K', 'Q']
cards = []
for suit in ['H', 'S', 'C', 'D']:
cards.extend(str(num) + suit for num in base_names)
deck = Series(card_val, index=cards)
deck[:13]
def draw(deck, n=5):
return deck.take(np.random.permutation(len(deck))[:n])
draw(deck)
get_suit = lambda card: card[-1] # last letter is suit
deck.groupby(get_suit).apply(draw, n=2)
# alternatively
deck.groupby(get_suit, group_keys=False).apply(draw, n=2)
###Output
_____no_output_____
###Markdown
Example: Group weighted average and correlation
###Code
df = DataFrame({'category': ['a', 'a', 'a', 'a', 'b', 'b', 'b', 'b'],
'data': np.random.randn(8),
'weights': np.random.rand(8)})
df
grouped = df.groupby('category')
get_wavg = lambda g: np.average(g['data'], weights=g['weights'])
grouped.apply(get_wavg)
close_px = pd.read_csv('ch09/stock_px.csv', parse_dates=True, index_col=0)
close_px.info()
close_px[-4:]
rets = close_px.pct_change().dropna()
spx_corr = lambda x: x.corrwith(x['SPX'])
by_year = rets.groupby(lambda x: x.year)
by_year.apply(spx_corr)
# Annual correlation of Apple with Microsoft
by_year.apply(lambda g: g['AAPL'].corr(g['MSFT']))
###Output
_____no_output_____
###Markdown
Example: Group-wise linear regression
###Code
import statsmodels.api as sm
def regress(data, yvar, xvars):
Y = data[yvar]
X = data[xvars]
X['intercept'] = 1.
result = sm.OLS(Y, X).fit()
return result.params
by_year.apply(regress, 'AAPL', ['SPX'])
###Output
_____no_output_____
###Markdown
Pivot tables and Cross-tabulation
###Code
tips.pivot_table(index=['sex', 'smoker'])
tips.pivot_table(['tip_pct', 'size'], index=['sex', 'day'],
columns='smoker')
tips.pivot_table(['tip_pct', 'size'], index=['sex', 'day'],
columns='smoker', margins=True)
tips.pivot_table('tip_pct', index=['sex', 'smoker'], columns='day',
aggfunc=len, margins=True)
tips.pivot_table('size', index=['time', 'sex', 'smoker'],
columns='day', aggfunc='sum', fill_value=0)
###Output
_____no_output_____
###Markdown
Cross-tabulations: crosstab
###Code
from StringIO import StringIO
data = """\
Sample Gender Handedness
1 Female Right-handed
2 Male Left-handed
3 Female Right-handed
4 Male Right-handed
5 Male Left-handed
6 Male Right-handed
7 Female Right-handed
8 Female Left-handed
9 Male Right-handed
10 Female Right-handed"""
data = pd.read_table(StringIO(data), sep='\s+')
data
pd.crosstab(data.Gender, data.Handedness, margins=True)
pd.crosstab([tips.time, tips.day], tips.smoker, margins=True)
###Output
_____no_output_____
###Markdown
Example: 2012 Federal Election Commission Database
###Code
fec = pd.read_csv('ch09/P00000001-ALL.csv')
fec.info()
fec.ix[123456]
unique_cands = fec.cand_nm.unique()
unique_cands
unique_cands[2]
parties = {'Bachmann, Michelle': 'Republican',
'Cain, Herman': 'Republican',
'Gingrich, Newt': 'Republican',
'Huntsman, Jon': 'Republican',
'Johnson, Gary Earl': 'Republican',
'McCotter, Thaddeus G': 'Republican',
'Obama, Barack': 'Democrat',
'Paul, Ron': 'Republican',
'Pawlenty, Timothy': 'Republican',
'Perry, Rick': 'Republican',
"Roemer, Charles E. 'Buddy' III": 'Republican',
'Romney, Mitt': 'Republican',
'Santorum, Rick': 'Republican'}
fec.cand_nm[123456:123461]
fec.cand_nm[123456:123461].map(parties)
# Add it as a column
fec['party'] = fec.cand_nm.map(parties)
fec['party'].value_counts()
(fec.contb_receipt_amt > 0).value_counts()
fec = fec[fec.contb_receipt_amt > 0]
fec_mrbo = fec[fec.cand_nm.isin(['Obama, Barack', 'Romney, Mitt'])]
###Output
_____no_output_____
###Markdown
Donation statistics by occupation and employer
###Code
fec.contbr_occupation.value_counts()[:10]
occ_mapping = {
'INFORMATION REQUESTED PER BEST EFFORTS' : 'NOT PROVIDED',
'INFORMATION REQUESTED' : 'NOT PROVIDED',
'INFORMATION REQUESTED (BEST EFFORTS)' : 'NOT PROVIDED',
'C.E.O.': 'CEO'
}
# If no mapping provided, return x
f = lambda x: occ_mapping.get(x, x)
fec.contbr_occupation = fec.contbr_occupation.map(f)
emp_mapping = {
'INFORMATION REQUESTED PER BEST EFFORTS' : 'NOT PROVIDED',
'INFORMATION REQUESTED' : 'NOT PROVIDED',
'SELF' : 'SELF-EMPLOYED',
'SELF EMPLOYED' : 'SELF-EMPLOYED',
}
# If no mapping provided, return x
f = lambda x: emp_mapping.get(x, x)
fec.contbr_employer = fec.contbr_employer.map(f)
by_occupation = fec.pivot_table('contb_receipt_amt',
index='contbr_occupation',
columns='party', aggfunc='sum')
over_2mm = by_occupation[by_occupation.sum(1) > 2000000]
over_2mm
over_2mm.plot(kind='barh')
def get_top_amounts(group, key, n=5):
totals = group.groupby(key)['contb_receipt_amt'].sum()
# Order totals by key in descending order
return totals.order(ascending=False)[-n:]
grouped = fec_mrbo.groupby('cand_nm')
grouped.apply(get_top_amounts, 'contbr_occupation', n=7)
grouped.apply(get_top_amounts, 'contbr_employer', n=10)
###Output
_____no_output_____
###Markdown
Bucketing donation amounts
###Code
bins = np.array([0, 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000])
labels = pd.cut(fec_mrbo.contb_receipt_amt, bins)
labels
grouped = fec_mrbo.groupby(['cand_nm', labels])
grouped.size().unstack(0)
bucket_sums = grouped.contb_receipt_amt.sum().unstack(0)
bucket_sums
normed_sums = bucket_sums.div(bucket_sums.sum(axis=1), axis=0)
normed_sums
normed_sums[:-2].plot(kind='barh', stacked=True)
###Output
_____no_output_____
###Markdown
Donation statistics by state
###Code
grouped = fec_mrbo.groupby(['cand_nm', 'contbr_st'])
totals = grouped.contb_receipt_amt.sum().unstack(0).fillna(0)
totals = totals[totals.sum(1) > 100000]
totals[:10]
percent = totals.div(totals.sum(1), axis=0)
percent[:10]
###Output
_____no_output_____ |
RNN_Captioning_PyTorch.ipynb | ###Markdown
TODO
###Code
# 载入 COCO 数据集;返回一个数据集字典。
# 这个笔记本使用降维的特征,
# 也可以改变 pca_features 标志来使用原始的4096维特征。
data_root = os.path.expanduser("~/.datasets/cs231n/coco_captioning")
# 设置 pca_features=True 表示使用降维成512维的图像特征。
data = load_coco_data(base_dir=data_root, pca_features=True)
# Print out all the keys and values from the data dictionary
for k, v in data.items():
if type(v) == np.ndarray:
print(k, type(v), v.shape, v.dtype)
else:
print(k, type(v), len(v))
###Output
train_captions <class 'numpy.ndarray'> (400135, 17) int32
train_image_idxs <class 'numpy.ndarray'> (400135,) int32
val_captions <class 'numpy.ndarray'> (195954, 17) int32
val_image_idxs <class 'numpy.ndarray'> (195954,) int32
train_features <class 'numpy.ndarray'> (82783, 512) float32
val_features <class 'numpy.ndarray'> (40504, 512) float32
idx_to_word <class 'list'> 1004
word_to_idx <class 'dict'> 1004
train_urls <class 'numpy.ndarray'> (82783,) <U63
val_urls <class 'numpy.ndarray'> (40504,) <U63
###Markdown
观察数据 TODO
###Code
# Sample a minibatch and show the images and captions
batch_size = 3
captions, features, urls = sample_coco_minibatch(data, batch_size=batch_size)
for i, (caption, url) in enumerate(zip(captions, urls)):
plt.imshow(image_from_url(url))
plt.axis('off')
caption_str = decode_captions(caption, data['idx_to_word'])
plt.title(caption_str)
plt.show()
###Output
_____no_output_____
###Markdown
Vanilla RNN: step forward TODO Vanilla RNN: forward TODO Word embedding: forward TODO Temporal Affine layer Temporal Softmax loss 基于 RNN 的图像描述RNN for image captioning 在小数据集上把模型训练至过拟合Overfit small data
###Code
# Small data
small_data = COCODataset(data_root, batch_size=25, max_train=50, pca_features=True, seed=231)
# Device
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
# Model
N, D, W, H = 32, 20, 30, 40
# Random seed
torch.manual_seed(231)
word_to_idx = data['word_to_idx']
V = len(word_to_idx)
T = 13 # max_length
batch_size = N
input_dim = data['train_features'].shape[1]
timesteps = T
hidden_dim = 512
wordvec_dim = 256
vocab_size = V
small_rnn_model = CaptioningRNN(word_to_idx,
input_dim=input_dim,
hidden_dim=hidden_dim,
wordvec_dim=wordvec_dim,
cell_type='rnn').to(device)
###Output
_____no_output_____
###Markdown
Train the model TODO
###Code
def train(epochs, model, train_data, optimizer, scheduler, device, verbose=True, print_every=10):
"""
Run optimization to train the model.
"""
assert train_data.split == "train"
loss_history = []
num_train = len(train_data)
iterations_per_epoch = max(num_train // train_data.batch_size, 1)
num_iterations = epochs * iterations_per_epoch
for epoch in range(epochs):
for iter in range(iterations_per_epoch):
optimizer.zero_grad()
captions, features, urls = train_data.sample()
captions, features = torch.from_numpy(captions).long().to(device), torch.from_numpy(features).to(device)
# Compute loss and gradient
loss = model(features, captions)
loss_history.append(loss.item())
# Perform a parameter update
loss.backward()
optimizer.step()
t = epoch * iterations_per_epoch + iter
if verbose and t % print_every == 0:
print('(Iteration %d / %d) loss: %f' % (
t + 1, num_iterations, loss_history[-1]))
scheduler.step()
return loss_history
optimizer = torch.optim.Adam(small_rnn_model.parameters(), lr=5e-3)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.95)
num_epochs = 50
loss_history = train(num_epochs, small_rnn_model, small_data, optimizer, scheduler,
device=device, verbose=True, print_every=10)
# Plot the training losses
plt.plot(loss_history)
plt.xlabel('Iteration')
plt.ylabel('Loss')
plt.title('Training loss history')
plt.show()
###Output
(Iteration 1 / 100) loss: 75.367485
(Iteration 11 / 100) loss: 18.323118
(Iteration 21 / 100) loss: 5.468186
(Iteration 31 / 100) loss: 1.920819
(Iteration 41 / 100) loss: 0.644701
(Iteration 51 / 100) loss: 0.362541
(Iteration 61 / 100) loss: 0.209141
(Iteration 71 / 100) loss: 0.197056
(Iteration 81 / 100) loss: 0.151377
(Iteration 91 / 100) loss: 0.155879
|
plots/Box plots/Newtons Cooling Law.ipynb | ###Markdown
ContentThis notebook goes interprets the 30 data loss items corresponding to a particular learning parameter. These plots are shown on graphs to show the distrubition of data. Additionally, a standard deviation graph is shown at the end to demonstrate how close the data plots are to the mean. We start with these default learning parameter shown below. learning rate = 0.0001 batch size = 50 time step = 1 number of epoches = 100we then begin to tune those learning parameter at a time, then combine the best together. Learning rate Here we are focusing on the learning rate to see how it affects the distrubition of data
###Code
data = pd.read_csv("../../data/boxplots/newton/lr.csv")
lr_df = pd.DataFrame(data=data)
lr_df.head()
###Output
_____no_output_____
###Markdown
These are the boxplots
###Code
boxplots(3, lr_df)
###Output
_____no_output_____
###Markdown
averages
###Code
avg_lr = averages(lr_df)
avg_lr
###Output
_____no_output_____
###Markdown
Batch size Dataframe
###Code
data = pd.read_csv("../../data/boxplots/newton/batch_size.csv")
bs_df = pd.DataFrame(data=data)
bs_df
bs_df.head()
###Output
_____no_output_____
###Markdown
boxplots
###Code
boxplots(3, bs_df)
###Output
_____no_output_____
###Markdown
averages
###Code
avg_bs = averages(bs_df)
avg_bs
###Output
_____no_output_____
###Markdown
Number of epoches dataframe
###Code
data = pd.read_csv("../../data/boxplots/newton/num_epoches.csv")
epoch_df = pd.DataFrame(data=data)
epoch_df.head()
###Output
_____no_output_____
###Markdown
boxplots
###Code
boxplots(3, epoch_df)
###Output
_____no_output_____
###Markdown
averages
###Code
avg_epoch = averages(epoch_df)
avg_epoch
###Output
_____no_output_____
###Markdown
Custome This is looking at custome pairing, parameter match with certain parameters to see the affect in the loss
###Code
data = pd.read_csv("../../data/boxplots/newton/custome.csv")
custome_df = pd.DataFrame(data=data)
custome_df.head()
boxplots(2, custome_df)
avg_custome = averages(custome_df)
avg_custome
###Output
_____no_output_____
###Markdown
This is showing the a higher learning rate has more loss. This also correspond directly to the number of epoches that are performed. A higher learning loss need more epoches for it to begin to converge to a number. This would be studied in more detail letter.This table also suggest that a smaller batch size is more effective than a higher batch size. This is shown across the comparasions. Layers We are looking at the affect off the number of layers and the training loss. Dataframe
###Code
data = pd.read_csv("../../data/boxplots/newton/layers.csv")
layers_df = pd.DataFrame(data=data)
layers_df.head()
boxplots(2, layers_df)
avg_layers = averages(layers_df)
avg_layers
###Output
_____no_output_____ |
docs/source/auto_examples/image/compute_segment_hne.ipynb | ###Markdown
Cell-segmentation for H&E stains================================This example shows how to use processing and segmentation functions tosegment images with H&E stains.For a general example of how to use squidpy.im.segment seesphx\_glr\_auto\_examples\_image\_compute\_segment\_fluo.py.Note that we only provide very basic segmentation models. If you requireprecise cell-segmentation and cell-counts, you might want to add morepre-processing and/or use a pre-trained model to do the segmentation(using squidpy.im.SegmentationCustom).
###Code
import squidpy as sq
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# load H&E stained tissue image and crop to a smaller segment
img = sq.datasets.visium_hne_image_crop()
crop = img.crop_corner(0, 0, size=1000)
###Output
_____no_output_____
###Markdown
Before segmenting the image, we smooth it using squidpy.im.process.
###Code
# smooth image
sq.im.process(crop, layer="image", method="smooth", sigma=4)
# plot the result
fig, axes = plt.subplots(1, 2)
for layer, ax in zip(["image", "image_smooth"], axes):
crop.show(layer, ax=ax)
ax.set_title(layer)
###Output
_____no_output_____
###Markdown
We will use channel 0 to do the segmentation, as this channel containsmost of the nuclei information within an H&E stain. Instead of usingautomatic threshold with [Otsu'smethod](https://en.wikipedia.org/wiki/Otsu%27s_method), we will define amanual fixed threshold. Note that using Otsu's method to determine thethreshold also yields good results.Judging by peak in the histogram and the thresholded example image, athreshold of 0.36, seems to be a good choice for this example.
###Code
fig, axes = plt.subplots(1, 3, figsize=(15, 4))
crop.show("image_smooth", cmap="gray", ax=axes[0])
axes[1].imshow(crop["image_smooth"][:, :, 0] < 0.36)
_ = sns.histplot(np.array(crop["image_smooth"]).flatten(), bins=50, ax=axes[2])
plt.tight_layout()
###Output
_____no_output_____
###Markdown
We use squidpy.im.segment with `method="watershed"` to do thesegmentation. Since, opposite to the fluorescence DAPI stain, in the H&Estain nuclei appear darker, we need to indicate to the model that itshould treat lower-intensity values as foreground. We do this byspecifying the `geq = False` in the `kwargs`.
###Code
sq.im.segment(img=crop, layer="image_smooth", method="watershed", thresh=0.36, geq=False)
###Output
_____no_output_____
###Markdown
The segmented crop is saved in the layer segmented\_watershed. Thisbehavior can be changed with the arguments `copy` and `layer_added`. Theresult of the segmentation is a label image that can be used to extractfeatures like the number of cells from the image.
###Code
print(crop)
print(f"number of segments in crop: {len(np.unique(crop['segmented_watershed']))}")
fig, axes = plt.subplots(1, 2)
crop.show("image", channel=0, ax=axes[0])
_ = axes[0].set_title("H&E")
crop.show("segmented_watershed", cmap="jet", interpolation="none", ax=axes[1])
_ = axes[1].set_title("segmentation")
###Output
_____no_output_____
###Markdown
Cell-segmentation for H&E stains================================This example shows how to use processing and segmentation functions tosegment images with H&E stains.For a general example of how to use `squidpy.im.segment`, see`sphx_glr_auto_examples_image_compute_segment_fluo.py`.Note that we only provide a basic built-in segmentation model. If yourequire precise cell-segmentation and cell-counts, you might want to addmore pre-processing and/or use a pre-trained model to do thesegmentation (using `squidpy.im.SegmentationCustom`).::: {.seealso}- `sphx_glr_auto_examples_image_compute_segment_fluo.py` for an example on how to calculate a cell-segmentation of a fluorescence image.- [Nuclei Segmentation using Cellpose](../../external_tutorials/tutorial_cellpose_segmentation.ipynb) for a tutorial on using Cellpose as a custom segmentation function.- [Nuclei Segmentation using StarDist](../../external_tutorials/tutorial_stardist.ipynb) for a tutorial on using StarDist as a custom segmentation function.:::
###Code
import squidpy as sq
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# load the H&E stained tissue image and crop to a smaller segment
img = sq.datasets.visium_hne_image_crop()
crop = img.crop_corner(0, 0, size=1000)
###Output
_____no_output_____
###Markdown
Before segmenting the image, we smooth it using `squidpy.im.process`.
###Code
# smooth image
sq.im.process(crop, layer="image", method="smooth", sigma=4)
# plot the result
fig, axes = plt.subplots(1, 2)
for layer, ax in zip(["image", "image_smooth"], axes):
crop.show(layer, ax=ax)
ax.set_title(layer)
###Output
_____no_output_____
###Markdown
We will use channel 0 to do the segmentation, as this channel containsmost of the nuclei information within an H&E stain. Instead of usingautomatic threshold with [Otsu\'smethod](https://en.wikipedia.org/wiki/Otsu%27s_method), we will define amanual fixed threshold. Note that using Otsu\'s method to determine thethreshold also yields good results.Judging by peak in the histogram and the thresholded example image, athreshold of 90, seems to be a good choice for this example.
###Code
fig, axes = plt.subplots(1, 3, figsize=(15, 4))
crop.show("image_smooth", cmap="gray", ax=axes[0])
axes[1].imshow(crop["image_smooth"][:, :, 0, 0] < 90)
_ = sns.histplot(np.array(crop["image_smooth"]).flatten(), bins=50, ax=axes[2])
plt.tight_layout()
###Output
_____no_output_____
###Markdown
We use `squidpy.im.segment` with `method = 'watershed'` to do thesegmentation. Since, opposite to the fluorescence DAPI stain, in the H&Estain nuclei appear darker, we need to indicate to the model that itshould treat lower-intensity values as foreground. We do this byspecifying the `geq = False` in the `kwargs`.
###Code
sq.im.segment(img=crop, layer="image_smooth", method="watershed", thresh=90, geq=False)
###Output
_____no_output_____
###Markdown
The segmented crop is saved in the layer[segmented\_watershed]{.title-ref}. This behavior can be changed withthe arguments `copy` and `layer_added`. The result of the segmentationis a label image that can be used to extract features like the number ofcells from the image.
###Code
print(crop)
print(f"Number of segments in crop: {len(np.unique(crop['segmented_watershed']))}")
fig, axes = plt.subplots(1, 2)
crop.show("image", channel=0, ax=axes[0])
_ = axes[0].set_title("H&E")
crop.show("segmented_watershed", cmap="jet", interpolation="none", ax=axes[1])
_ = axes[1].set_title("segmentation")
###Output
_____no_output_____ |
examples/translation-tf.ipynb | ###Markdown
If you're opening this Notebook on colab, you will probably need to install 🤗 Transformers and 🤗 Datasets. Uncomment the following cell and run it. We also use the `sacrebleu` and `sentencepiece` libraries - you may need to install these even if you already have 🤗 Transformers!
###Code
#! pip install transformers[sentencepiece] datasets
#! pip install sacrebleu sentencepiece
#! pip install huggingface_hub
###Output
_____no_output_____
###Markdown
If you're opening this notebook locally, make sure your environment has an install from the last version of those libraries.To be able to share your model with the community and generate results like the one shown in the picture below via the inference API, there are a few more steps to follow.First you have to store your authentication token from the Hugging Face website (sign up [here](https://huggingface.co/join) if you haven't already!) then uncomment the following cell and input your token:
###Code
from huggingface_hub import notebook_login
notebook_login()
###Output
_____no_output_____
###Markdown
Then you need to install Git-LFS and setup Git if you haven't already. Uncomment the following instructions and adapt with your name and email:
###Code
# !apt install git-lfs
# !git config --global user.email "[email protected]"
# !git config --global user.name "Your Name"
###Output
_____no_output_____
###Markdown
Make sure your version of Transformers is at least 4.16.0 since some of the functionality we use was introduced in that version:
###Code
import transformers
print(transformers.__version__)
###Output
4.16.0.dev0
###Markdown
You can find a script version of this notebook to fine-tune your model in a distributed fashion using multiple GPUs or TPUs [here](https://github.com/huggingface/transformers/tree/master/examples/seq2seq). Fine-tuning a model on a translation task In this notebook, we will see how to fine-tune one of the [🤗 Transformers](https://github.com/huggingface/transformers) model for a translation task. We will use the [WMT dataset](http://www.statmt.org/wmt16/), a machine translation dataset composed from a collection of various sources, including news commentaries and parliament proceedings.We will see how to easily load the dataset for this task using 🤗 Datasets and how to fine-tune a model on it using Keras.
###Code
model_checkpoint = "Helsinki-NLP/opus-mt-en-ROMANCE"
###Output
_____no_output_____
###Markdown
This notebook is built to run with any model checkpoint from the [Model Hub](https://huggingface.co/models) as long as that model has a sequence-to-sequence version in the Transformers library. Here we picked the [`Helsinki-NLP/opus-mt-en-romance`](https://huggingface.co/Helsinki-NLP/opus-mt-en-ROMANCE) checkpoint. Loading the dataset We will use the [🤗 Datasets](https://github.com/huggingface/datasets) library to download the data and get the metric we need to use for evaluation (to compare our model to the benchmark). This can be easily done with the functions `load_dataset` and `load_metric`. We use the English/Romanian part of the WMT dataset here.
###Code
from datasets import load_dataset, load_metric
raw_datasets = load_dataset("wmt16", "ro-en")
metric = load_metric("sacrebleu")
###Output
Reusing dataset wmt16 (/home/matt/.cache/huggingface/datasets/wmt16/ro-en/1.0.0/af3c5d746b307726d0de73ebe7f10545361b9cb6f75c83a1734c000e48b6264f)
###Markdown
The `dataset` object itself is [`DatasetDict`](https://huggingface.co/docs/datasets/package_reference/main_classes.htmldatasetdict), which contains one key for the training, validation and test set:
###Code
raw_datasets
###Output
_____no_output_____
###Markdown
To access an actual element, you need to select a split first, then give an index:
###Code
raw_datasets["train"][0]
###Output
_____no_output_____
###Markdown
To get a sense of what the data looks like, the following function will show some examples picked randomly in the dataset.
###Code
import datasets
import random
import pandas as pd
from IPython.display import display, HTML
def show_random_elements(dataset, num_examples=5):
assert num_examples <= len(
dataset
), "Can't pick more elements than there are in the dataset."
picks = []
for _ in range(num_examples):
pick = random.randint(0, len(dataset) - 1)
while pick in picks:
pick = random.randint(0, len(dataset) - 1)
picks.append(pick)
df = pd.DataFrame(dataset[picks])
for column, typ in dataset.features.items():
if isinstance(typ, datasets.ClassLabel):
df[column] = df[column].transform(lambda i: typ.names[i])
display(HTML(df.to_html()))
show_random_elements(raw_datasets["train"])
###Output
_____no_output_____
###Markdown
The metric is an instance of [`datasets.Metric`](https://huggingface.co/docs/datasets/package_reference/main_classes.htmldatasets.Metric):
###Code
metric
###Output
_____no_output_____
###Markdown
You can call its `compute` method with your predictions and labels, which need to be list of decoded strings (list of list for the labels):
###Code
fake_preds = ["hello there", "general kenobi"]
fake_labels = [["hello there"], ["general kenobi"]]
metric.compute(predictions=fake_preds, references=fake_labels)
###Output
_____no_output_____
###Markdown
Preprocessing the data Before we can feed those texts to our model, we need to preprocess them. This is done by a 🤗 Transformers `Tokenizer` which will (as the name indicates) tokenize the inputs (including converting the tokens to their corresponding IDs in the pretrained vocabulary) and put it in a format the model expects, as well as generate the other inputs that model requires.To do all of this, we instantiate our tokenizer with the `AutoTokenizer.from_pretrained` method, which will ensure:- we get a tokenizer that corresponds to the model architecture we want to use,- we download the vocabulary used when pretraining this specific checkpoint.That vocabulary will be cached, so it's not downloaded again the next time we run the cell.
###Code
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
###Output
_____no_output_____
###Markdown
For the mBART tokenizer (like we have here), we need to set the source and target languages (so the texts are preprocessed properly). You can check the language codes [here](https://huggingface.co/facebook/mbart-large-cc25) if you are using this notebook on a different pairs of languages.
###Code
if "mbart" in model_checkpoint:
tokenizer.src_lang = "en-XX"
tokenizer.tgt_lang = "ro-RO"
###Output
_____no_output_____
###Markdown
By default, the call above will use one of the fast tokenizers (backed by Rust) from the 🤗 Tokenizers library. You can directly call this tokenizer on one sentence or a pair of sentences:
###Code
tokenizer("Hello, this is a sentence!")
###Output
_____no_output_____
###Markdown
Depending on the model you selected, you will see different keys in the dictionary returned by the cell above. They don't matter much for what we're doing here (just know they are required by the model we will instantiate later), you can learn more about them in [this tutorial](https://huggingface.co/transformers/preprocessing.html) if you're interested.Instead of one sentence, we can pass along a list of sentences:
###Code
tokenizer(["Hello, this is a sentence!", "This is another sentence."])
###Output
_____no_output_____
###Markdown
To prepare the targets for our model, we need to tokenize them inside the `as_target_tokenizer` context manager. This will make sure the tokenizer uses the special tokens corresponding to the targets:
###Code
with tokenizer.as_target_tokenizer():
print(tokenizer(["Hello, this is a sentence!", "This is another sentence."]))
###Output
{'input_ids': [[14232, 244, 2, 69, 160, 6, 9, 10513, 1101, 84, 0], [13486, 6, 160, 6, 3778, 4853, 10513, 1101, 3, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]}
###Markdown
If you are using one of the five T5 checkpoints that require a special prefix to put before the inputs, you should adapt the following cell.
###Code
if model_checkpoint in ["t5-small", "t5-base", "t5-larg", "t5-3b", "t5-11b"]:
prefix = "translate English to Romanian: "
else:
prefix = ""
###Output
_____no_output_____
###Markdown
We can then write the function that will preprocess our samples. We just feed them to the `tokenizer` with the argument `truncation=True`. This will ensure that an input longer that what the model selected can handle will be truncated to the maximum length accepted by the model. The padding will be dealt with later on (in a data collator) so we pad examples to the longest length in the batch and not the whole dataset.
###Code
max_input_length = 128
max_target_length = 128
source_lang = "en"
target_lang = "ro"
def preprocess_function(examples):
inputs = [prefix + ex[source_lang] for ex in examples["translation"]]
targets = [ex[target_lang] for ex in examples["translation"]]
model_inputs = tokenizer(inputs, max_length=max_input_length, truncation=True)
# Setup the tokenizer for targets
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=max_target_length, truncation=True)
model_inputs["labels"] = labels["input_ids"]
return model_inputs
###Output
_____no_output_____
###Markdown
This function works with one or several examples. In the case of several examples, the tokenizer will return a list of lists for each key:
###Code
preprocess_function(raw_datasets["train"][:2])
###Output
_____no_output_____
###Markdown
To apply this function on all the pairs of sentences in our dataset, we just use the `map` method of our `dataset` object we created earlier. This will apply the function on all the elements of all the splits in `dataset`, so our training, validation and testing data will be preprocessed in one single command.
###Code
tokenized_datasets = raw_datasets.map(preprocess_function, batched=True)
###Output
Loading cached processed dataset at /home/matt/.cache/huggingface/datasets/wmt16/ro-en/1.0.0/af3c5d746b307726d0de73ebe7f10545361b9cb6f75c83a1734c000e48b6264f/cache-703f402232e7c8b6.arrow
Loading cached processed dataset at /home/matt/.cache/huggingface/datasets/wmt16/ro-en/1.0.0/af3c5d746b307726d0de73ebe7f10545361b9cb6f75c83a1734c000e48b6264f/cache-6fce55dd900db78d.arrow
Loading cached processed dataset at /home/matt/.cache/huggingface/datasets/wmt16/ro-en/1.0.0/af3c5d746b307726d0de73ebe7f10545361b9cb6f75c83a1734c000e48b6264f/cache-c212144f77499ba4.arrow
###Markdown
Even better, the results are automatically cached by the 🤗 Datasets library to avoid spending time on this step the next time you run your notebook. The 🤗 Datasets library is normally smart enough to detect when the function you pass to map has changed (and thus requires to not use the cache data). For instance, it will properly detect if you change the task in the first cell and rerun the notebook. 🤗 Datasets warns you when it uses cached files, you can pass `load_from_cache_file=False` in the call to `map` to not use the cached files and force the preprocessing to be applied again.Note that we passed `batched=True` to encode the texts by batches together. This is to leverage the full benefit of the fast tokenizer we loaded earlier, which will use multi-threading to treat the texts in a batch concurrently. Fine-tuning the model Now that our data is ready, we can download the pretrained model and fine-tune it. Since our task is of the sequence-to-sequence kind, we use the `AutoModelForSeq2SeqLM` class. Like with the tokenizer, the `from_pretrained` method will download and cache the model for us.
###Code
from transformers import TFAutoModelForSeq2SeqLM, DataCollatorForSeq2Seq
model = TFAutoModelForSeq2SeqLM.from_pretrained(model_checkpoint)
###Output
2022-01-27 17:20:20.831271: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:936] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2022-01-27 17:20:20.838671: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:936] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2022-01-27 17:20:20.839963: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:936] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2022-01-27 17:20:20.841512: I tensorflow/core/platform/cpu_feature_guard.cc:151] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
2022-01-27 17:20:20.844184: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:936] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2022-01-27 17:20:20.844852: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:936] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2022-01-27 17:20:20.845497: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:936] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2022-01-27 17:20:21.184971: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:936] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2022-01-27 17:20:21.185660: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:936] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2022-01-27 17:20:21.186417: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:936] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2022-01-27 17:20:21.187043: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 21665 MB memory: -> device: 0, name: GeForce RTX 3090, pci bus id: 0000:21:00.0, compute capability: 8.6
2022-01-27 17:20:22.278352: I tensorflow/stream_executor/cuda/cuda_blas.cc:1786] TensorFloat-32 will be used for the matrix multiplication. This will only be logged once.
All model checkpoint layers were used when initializing TFMarianMTModel.
All the layers of TFMarianMTModel were initialized from the model checkpoint at Helsinki-NLP/opus-mt-en-ROMANCE.
If your task is similar to the task the model of the checkpoint was trained on, you can already use TFMarianMTModel for predictions without further training.
###Markdown
Note that we don't get a warning like in our classification example. This means we used all the weights of the pretrained model and there is no randomly initialized head in this case. Next we set some parameters like the learning rate and the `batch_size`and customize the weight decay. The last two arguments are to setup everything so we can push the model to the [Hub](https://huggingface.co/models) at the end of training. Remove the two of them if you didn't follow the installation steps at the top of the notebook, otherwise you can change the value of push_to_hub_model_id to something you would prefer.
###Code
batch_size = 16
learning_rate = 2e-5
weight_decay = 0.01
num_train_epochs = 1
model_name = model_checkpoint.split("/")[-1]
push_to_hub_model_id = f"{model_name}-finetuned-{source_lang}-to-{target_lang}"
###Output
_____no_output_____
###Markdown
Then, we need a special kind of data collator, which will not only pad the inputs to the maximum length in the batch, but also the labels. Note that our data collators are multi-framework, so make sure you set `return_tensors='tf'` so you get `tf.Tensor` objects back and not something else!
###Code
data_collator = DataCollatorForSeq2Seq(tokenizer, model=model, return_tensors="tf")
###Output
_____no_output_____
###Markdown
Now we convert our input datasets to TF datasets using this collator. There's a built-in method for this: `to_tf_dataset()`. Make sure to specify the collator we just created as our `collate_fn`!Computing the `BLEU` metric can be slow because it requires the model to generate outputs token-by-token. To speed things up, we make a `generation_dataset` that contains only 200 examples from the validation dataset, and use this for `BLEU` computations.
###Code
train_dataset = tokenized_datasets["train"].to_tf_dataset(
batch_size=batch_size,
columns=["input_ids", "attention_mask", "labels"],
shuffle=True,
collate_fn=data_collator,
)
validation_dataset = tokenized_datasets["validation"].to_tf_dataset(
batch_size=batch_size,
columns=["input_ids", "attention_mask", "labels"],
shuffle=False,
collate_fn=data_collator,
)
generation_dataset = (
tokenized_datasets["validation"]
.shuffle()
.select(list(range(48)))
.to_tf_dataset(
batch_size=8,
columns=["input_ids", "attention_mask", "labels"],
shuffle=False,
collate_fn=data_collator,
)
)
###Output
Loading cached shuffled indices for dataset at /home/matt/.cache/huggingface/datasets/wmt16/ro-en/1.0.0/af3c5d746b307726d0de73ebe7f10545361b9cb6f75c83a1734c000e48b6264f/cache-8a74e7f4b1ceddbf.arrow
###Markdown
Now we initialize our loss and optimizer and compile the model. Note that most Transformers models compute loss internally, so we can just leave the loss argument blank to use the internal loss instead. For the optimizer, we can use the `AdamWeightDecay` optimizer in the Transformer library.
###Code
from transformers import AdamWeightDecay
import tensorflow as tf
optimizer = AdamWeightDecay(learning_rate=learning_rate, weight_decay_rate=weight_decay)
model.compile(optimizer=optimizer)
###Output
No loss specified in compile() - the model's internal loss computation will be used as the loss. Don't panic - this is a common way to train TensorFlow models in Transformers! Please ensure your labels are passed as keys in the input dict so that they are accessible to the model during the forward pass. To disable this behaviour, please pass a loss argument, or explicitly pass loss=None if you do not want your model to compute a loss.
###Markdown
Now we can train our model. We can also add a few optional callbacks here, which you can remove if they aren't useful to you. In no particular order, these are:- PushToHubCallback will sync up our model with the Hub - this allows us to resume training from other machines, share the model after training is finished, and even test the model's inference quality midway through training!- TensorBoard is a built-in Keras callback that logs TensorBoard metrics.- KerasMetricCallback is a callback for computing advanced metrics. There are a number of common metrics in NLP like ROUGE which are hard to fit into your compiled training loop because they depend on decoding predictions and labels back to strings with the tokenizer, and calling arbitrary Python functions to compute the metric. The KerasMetricCallback will wrap a metric function, outputting metrics as training progresses.If this is the first time you've seen `KerasMetricCallback`, it's worth explaining what exactly is going on here. The callback takes two main arguments - a `metric_fn` and an `eval_dataset`. It then iterates over the `eval_dataset` and collects the model's outputs for each sample, before passing the `list` of predictions and the associated `list` of labels to the user-defined `metric_fn`. If the `predict_with_generate` argument is `True`, then it will call `model.generate()` for each input sample instead of `model.predict()` - this is useful for metrics that expect generated text from the model, like `ROUGE` and `BLEU`.This callback allows complex metrics to be computed each epoch that would not function as a standard Keras Metric. Metric values are printed each epoch, and can be used by other callbacks like `TensorBoard` or `EarlyStopping`.
###Code
from transformers.keras_callbacks import KerasMetricCallback
import numpy as np
def metric_fn(eval_predictions):
preds, labels = eval_predictions
prediction_lens = [
np.count_nonzero(pred != tokenizer.pad_token_id) for pred in preds
]
decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
# We use -100 to mask labels - replace it with the tokenizer pad token when decoding
# so that no output is emitted for these
labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
# Some simple post-processing
decoded_preds = [pred.strip() for pred in decoded_preds]
decoded_labels = [[label.strip()] for label in decoded_labels]
result = metric.compute(predictions=decoded_preds, references=decoded_labels)
result = {"bleu": result["score"]}
result["gen_len"] = np.mean(prediction_lens)
return result
metric_callback = KerasMetricCallback(
metric_fn=metric_fn, eval_dataset=generation_dataset, predict_with_generate=True
)
###Output
WARNING:root:No label_cols specified for KerasMetricCallback, assuming you want the 'labels' key.
###Markdown
With the metric callback ready, now we can specify the other callbacks and fit our model:
###Code
from transformers.keras_callbacks import PushToHubCallback
from tensorflow.keras.callbacks import TensorBoard
tensorboard_callback = TensorBoard(log_dir="./translation_model_save/logs")
push_to_hub_callback = PushToHubCallback(
output_dir="./translation_model_save",
tokenizer=tokenizer,
hub_model_id=push_to_hub_model_id,
)
# callbacks = [tensorboard_callback, metric_callback, push_to_hub_callback]
callbacks = [metric_callback, tensorboard_callback, push_to_hub_callback]
model.fit(
train_dataset, validation_data=validation_dataset, epochs=1, callbacks=callbacks
)
###Output
/home/matt/PycharmProjects/notebooks/examples/translation_model_save is already a clone of https://huggingface.co/Rocketknight1/opus-mt-en-ROMANCE-finetuned-en-to-ro. Make sure you pull the latest changes with `repo.git_pull()`.
WARNING:huggingface_hub.repository:/home/matt/PycharmProjects/notebooks/examples/translation_model_save is already a clone of https://huggingface.co/Rocketknight1/opus-mt-en-ROMANCE-finetuned-en-to-ro. Make sure you pull the latest changes with `repo.git_pull()`.
###Markdown
If you're opening this Notebook on colab, you will probably need to install 🤗 Transformers and 🤗 Datasets. Uncomment the following cell and run it.
###Code
#! pip install git+https://github.com/huggingface/transformers.git
#! pip install git+https://github.com/huggingface/datasets.git
#! pip install sacrebleu sentencepiece
###Output
_____no_output_____
###Markdown
If you're opening this notebook locally, make sure your environment has an install from the last version of those libraries.To be able to share your model with the community and generate results like the one shown in the picture below via the inference API, there are a few more steps to follow.First you have to store your authentication token from the Hugging Face website (sign up [here](https://huggingface.co/join) if you haven't already!) then uncomment the following cell and input your username and password (this only works on Colab, in a regular notebook, you need to do this in a terminal):
###Code
from huggingface_hub import notebook_login
notebook_login()
###Output
_____no_output_____
###Markdown
Then you need to install Git-LFS and setup Git if you haven't already. Uncomment the following instructions and adapt with your name and email:
###Code
# !apt install git-lfs
# !git config --global user.email "[email protected]"
# !git config --global user.name "Your Name"
###Output
_____no_output_____
###Markdown
Make sure your version of Transformers is at least 4.8.1 since the functionality was introduced in that version:
###Code
import transformers
print(transformers.__version__)
###Output
4.15.0.dev0
###Markdown
You can find a script version of this notebook to fine-tune your model in a distributed fashion using multiple GPUs or TPUs [here](https://github.com/huggingface/transformers/tree/master/examples/seq2seq). Fine-tuning a model on a translation task In this notebook, we will see how to fine-tune one of the [🤗 Transformers](https://github.com/huggingface/transformers) model for a translation task. We will use the [WMT dataset](http://www.statmt.org/wmt16/), a machine translation dataset composed from a collection of various sources, including news commentaries and parliament proceedings.We will see how to easily load the dataset for this task using 🤗 Datasets and how to fine-tune a model on it using Keras.
###Code
model_checkpoint = "Helsinki-NLP/opus-mt-en-ROMANCE"
###Output
_____no_output_____
###Markdown
This notebook is built to run with any model checkpoint from the [Model Hub](https://huggingface.co/models) as long as that model has a sequence-to-sequence version in the Transformers library. Here we picked the [`Helsinki-NLP/opus-mt-en-romance`](https://huggingface.co/Helsinki-NLP/opus-mt-en-ROMANCE) checkpoint. Loading the dataset We will use the [🤗 Datasets](https://github.com/huggingface/datasets) library to download the data and get the metric we need to use for evaluation (to compare our model to the benchmark). This can be easily done with the functions `load_dataset` and `load_metric`. We use the English/Romanian part of the WMT dataset here.
###Code
from datasets import load_dataset, load_metric
raw_datasets = load_dataset("wmt16", "ro-en")
metric = load_metric("sacrebleu")
###Output
Reusing dataset wmt16 (/home/matt/.cache/huggingface/datasets/wmt16/ro-en/1.0.0/af3c5d746b307726d0de73ebe7f10545361b9cb6f75c83a1734c000e48b6264f)
###Markdown
The `dataset` object itself is [`DatasetDict`](https://huggingface.co/docs/datasets/package_reference/main_classes.htmldatasetdict), which contains one key for the training, validation and test set:
###Code
raw_datasets
###Output
_____no_output_____
###Markdown
To access an actual element, you need to select a split first, then give an index:
###Code
raw_datasets["train"][0]
###Output
_____no_output_____
###Markdown
To get a sense of what the data looks like, the following function will show some examples picked randomly in the dataset.
###Code
import datasets
import random
import pandas as pd
from IPython.display import display, HTML
def show_random_elements(dataset, num_examples=5):
assert num_examples <= len(
dataset
), "Can't pick more elements than there are in the dataset."
picks = []
for _ in range(num_examples):
pick = random.randint(0, len(dataset) - 1)
while pick in picks:
pick = random.randint(0, len(dataset) - 1)
picks.append(pick)
df = pd.DataFrame(dataset[picks])
for column, typ in dataset.features.items():
if isinstance(typ, datasets.ClassLabel):
df[column] = df[column].transform(lambda i: typ.names[i])
display(HTML(df.to_html()))
show_random_elements(raw_datasets["train"])
###Output
_____no_output_____
###Markdown
The metric is an instance of [`datasets.Metric`](https://huggingface.co/docs/datasets/package_reference/main_classes.htmldatasets.Metric):
###Code
metric
###Output
_____no_output_____
###Markdown
You can call its `compute` method with your predictions and labels, which need to be list of decoded strings (list of list for the labels):
###Code
fake_preds = ["hello there", "general kenobi"]
fake_labels = [["hello there"], ["general kenobi"]]
metric.compute(predictions=fake_preds, references=fake_labels)
###Output
_____no_output_____
###Markdown
Preprocessing the data Before we can feed those texts to our model, we need to preprocess them. This is done by a 🤗 Transformers `Tokenizer` which will (as the name indicates) tokenize the inputs (including converting the tokens to their corresponding IDs in the pretrained vocabulary) and put it in a format the model expects, as well as generate the other inputs that model requires.To do all of this, we instantiate our tokenizer with the `AutoTokenizer.from_pretrained` method, which will ensure:- we get a tokenizer that corresponds to the model architecture we want to use,- we download the vocabulary used when pretraining this specific checkpoint.That vocabulary will be cached, so it's not downloaded again the next time we run the cell.
###Code
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
###Output
_____no_output_____
###Markdown
For the mBART tokenizer (like we have here), we need to set the source and target languages (so the texts are preprocessed properly). You can check the language codes [here](https://huggingface.co/facebook/mbart-large-cc25) if you are using this notebook on a different pairs of languages.
###Code
if "mbart" in model_checkpoint:
tokenizer.src_lang = "en-XX"
tokenizer.tgt_lang = "ro-RO"
###Output
_____no_output_____
###Markdown
By default, the call above will use one of the fast tokenizers (backed by Rust) from the 🤗 Tokenizers library. You can directly call this tokenizer on one sentence or a pair of sentences:
###Code
tokenizer("Hello, this one sentence!")
###Output
_____no_output_____
###Markdown
Depending on the model you selected, you will see different keys in the dictionary returned by the cell above. They don't matter much for what we're doing here (just know they are required by the model we will instantiate later), you can learn more about them in [this tutorial](https://huggingface.co/transformers/preprocessing.html) if you're interested.Instead of one sentence, we can pass along a list of sentences:
###Code
tokenizer(["Hello, this one sentence!", "This is another sentence."])
###Output
_____no_output_____
###Markdown
To prepare the targets for our model, we need to tokenize them inside the `as_target_tokenizer` context manager. This will make sure the tokenizer uses the special tokens corresponding to the targets:
###Code
with tokenizer.as_target_tokenizer():
print(tokenizer(["Hello, this one sentence!", "This is another sentence."]))
###Output
{'input_ids': [[14232, 244, 2, 69, 49, 420, 10513, 1101, 84, 0], [13486, 6, 160, 6, 3778, 4853, 10513, 1101, 3, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]}
###Markdown
If you are using one of the five T5 checkpoints that require a special prefix to put before the inputs, you should adapt the following cell.
###Code
if model_checkpoint in ["t5-small", "t5-base", "t5-larg", "t5-3b", "t5-11b"]:
prefix = "translate English to Romanian: "
else:
prefix = ""
###Output
_____no_output_____
###Markdown
We can then write the function that will preprocess our samples. We just feed them to the `tokenizer` with the argument `truncation=True`. This will ensure that an input longer that what the model selected can handle will be truncated to the maximum length accepted by the model. The padding will be dealt with later on (in a data collator) so we pad examples to the longest length in the batch and not the whole dataset.
###Code
max_input_length = 128
max_target_length = 128
source_lang = "en"
target_lang = "ro"
def preprocess_function(examples):
inputs = [prefix + ex[source_lang] for ex in examples["translation"]]
targets = [ex[target_lang] for ex in examples["translation"]]
model_inputs = tokenizer(inputs, max_length=max_input_length, truncation=True)
# Setup the tokenizer for targets
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=max_target_length, truncation=True)
model_inputs["labels"] = labels["input_ids"]
return model_inputs
###Output
_____no_output_____
###Markdown
This function works with one or several examples. In the case of several examples, the tokenizer will return a list of lists for each key:
###Code
preprocess_function(raw_datasets["train"][:2])
###Output
_____no_output_____
###Markdown
To apply this function on all the pairs of sentences in our dataset, we just use the `map` method of our `dataset` object we created earlier. This will apply the function on all the elements of all the splits in `dataset`, so our training, validation and testing data will be preprocessed in one single command.
###Code
tokenized_datasets = raw_datasets.map(preprocess_function, batched=True)
###Output
_____no_output_____
###Markdown
Even better, the results are automatically cached by the 🤗 Datasets library to avoid spending time on this step the next time you run your notebook. The 🤗 Datasets library is normally smart enough to detect when the function you pass to map has changed (and thus requires to not use the cache data). For instance, it will properly detect if you change the task in the first cell and rerun the notebook. 🤗 Datasets warns you when it uses cached files, you can pass `load_from_cache_file=False` in the call to `map` to not use the cached files and force the preprocessing to be applied again.Note that we passed `batched=True` to encode the texts by batches together. This is to leverage the full benefit of the fast tokenizer we loaded earlier, which will use multi-threading to treat the texts in a batch concurrently. Fine-tuning the model Now that our data is ready, we can download the pretrained model and fine-tune it. Since our task is of the sequence-to-sequence kind, we use the `AutoModelForSeq2SeqLM` class. Like with the tokenizer, the `from_pretrained` method will download and cache the model for us.
###Code
from transformers import TFAutoModelForSeq2SeqLM, DataCollatorForSeq2Seq
model = TFAutoModelForSeq2SeqLM.from_pretrained(model_checkpoint)
###Output
2021-12-16 15:27:12.199280: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-12-16 15:27:12.205905: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-12-16 15:27:12.206911: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-12-16 15:27:12.208815: I tensorflow/core/platform/cpu_feature_guard.cc:142] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
2021-12-16 15:27:12.211878: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-12-16 15:27:12.212625: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-12-16 15:27:12.213264: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-12-16 15:27:12.537211: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-12-16 15:27:12.537964: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-12-16 15:27:12.538600: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-12-16 15:27:12.539246: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1510] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 21788 MB memory: -> device: 0, name: GeForce RTX 3090, pci bus id: 0000:21:00.0, compute capability: 8.6
2021-12-16 15:27:13.702916: I tensorflow/stream_executor/cuda/cuda_blas.cc:1760] TensorFloat-32 will be used for the matrix multiplication. This will only be logged once.
All model checkpoint layers were used when initializing TFMarianMTModel.
All the layers of TFMarianMTModel were initialized from the model checkpoint at Helsinki-NLP/opus-mt-en-ROMANCE.
If your task is similar to the task the model of the checkpoint was trained on, you can already use TFMarianMTModel for predictions without further training.
###Markdown
Note that we don't get a warning like in our classification example. This means we used all the weights of the pretrained model and there is no randomly initialized head in this case. Next we set some parameters like the learning rate and the `batch_size`and customize the weight decay. The last two arguments are to setup everything so we can push the model to the [Hub](https://huggingface.co/models) at the end of training. Remove the two of them if you didn't follow the installation steps at the top of the notebook, otherwise you can change the value of push_to_hub_model_id to something you would prefer.
###Code
batch_size = 16
learning_rate = 2e-5
weight_decay = 0.01
num_train_epochs = 1
model_name = model_checkpoint.split("/")[-1]
push_to_hub_model_id = f"{model_name}-finetuned-{source_lang}-to-{target_lang}"
###Output
_____no_output_____
###Markdown
Then, we need a special kind of data collator, which will not only pad the inputs to the maximum length in the batch, but also the labels. Note that our data collators are multi-framework, so make sure you set `return_tensors='tf'` so you get `tf.Tensor` objects back and not something else!
###Code
data_collator = DataCollatorForSeq2Seq(tokenizer, model=model, return_tensors="tf")
###Output
_____no_output_____
###Markdown
Now we convert our input datasets to TF datasets using this collator. There's a built-in method for this: `to_tf_dataset()`. Make sure to specify the collator we just created as our `collate_fn`!
###Code
train_dataset = tokenized_datasets["train"].to_tf_dataset(
batch_size=batch_size,
columns=["input_ids", "attention_mask", "labels"],
shuffle=True,
collate_fn=data_collator,
)
validation_dataset = tokenized_datasets["validation"].to_tf_dataset(
batch_size=batch_size,
columns=["input_ids", "attention_mask", "labels"],
shuffle=False,
collate_fn=data_collator,
)
###Output
_____no_output_____
###Markdown
Now we initialize our loss and optimizer and compile the model. Note that most Transformers models compute loss internally, so we can just leave the loss argument blank to use the internal loss instead. For the optimizer, we can use the `AdamWeightDecay` optimizer in the Transformer library.
###Code
from transformers import AdamWeightDecay
import tensorflow as tf
optimizer = AdamWeightDecay(learning_rate=learning_rate, weight_decay_rate=weight_decay)
model.compile(optimizer=optimizer)
###Output
No loss specified in compile() - the model's internal loss computation will be used as the loss. Don't panic - this is a common way to train TensorFlow models in Transformers! Please ensure your labels are passed as the 'labels' key of the input dict so that they are accessible to the model during the forward pass. To disable this behaviour, please pass a loss argument, or explicitly pass loss=None if you do not want your model to compute a loss.
###Markdown
Now we can train our model. We can also add a callback to sync up our model with the Hub - this allows us to resume training from other machines and even test the model's inference quality midway through training! Make sure to change the `username` if you do. If you don't want to do this, simply remove the callbacks argument in the call to `fit()`.
###Code
from transformers.keras_callbacks import PushToHubCallback
username = "Rocketknight1"
callback = PushToHubCallback(
output_dir="./translation_model_save",
tokenizer=tokenizer,
hub_model_id=f"{username}/{push_to_hub_model_id}",
)
model.fit(
train_dataset, validation_data=validation_dataset, epochs=1, callbacks=[callback]
)
###Output
/home/matt/miniconda3/envs/tensorflow26/lib/python3.9/site-packages/huggingface_hub/hf_api.py:715: FutureWarning: `create_repo` now takes `token` as an optional positional argument. Be sure to adapt your code!
warnings.warn(
Cloning https://huggingface.co/Rocketknight1/opus-mt-en-ROMANCE-finetuned-en-to-ro into local empty directory.
###Markdown
Now we've finished training our model, but the loss value can be a little hard to interpret. Let's use the metric we loaded earlier to score our model's outputs on the validation set. Note that because the sequence length is variable, we can't use `model.predict()` to get predictions for the whole dataset at once, as the outputs from each batch cannot be concatenated together. Instead, let's process the validation set a batch at a time, converting the predicted outputs to strings so that the metric can judge them.
###Code
import numpy as np
all_predictions = []
all_labels = []
prediction_lens = []
for batch in validation_dataset:
labels = batch["labels"]
preds = model(batch)["logits"]
token_preds = np.argmax(preds, axis=-1)
decoded_preds = tokenizer.batch_decode(token_preds, skip_special_tokens=True)
prediction_lens.extend(
[np.count_nonzero(pred != tokenizer.pad_token_id) for pred in token_preds]
)
# We use -100 to mask labels - replace it with the tokenizer pad token when decoding
# so that no output is emitted for these
labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
# Some simple post-processing
decoded_preds = [pred.strip() for pred in decoded_preds]
decoded_labels = [[label.strip()] for label in decoded_labels]
all_predictions.extend(decoded_preds)
all_labels.extend(decoded_labels)
result = metric.compute(predictions=all_predictions, references=all_labels)
result = {"bleu": result["score"]}
result["gen_len"] = np.mean(prediction_lens)
result = {k: round(v, 4) for k, v in result.items()}
print(result)
###Output
{'bleu': 13.3242, 'gen_len': 81.8009}
###Markdown
If you're opening this Notebook on colab, you will probably need to install 🤗 Transformers and 🤗 Datasets. Uncomment the following cell and run it.
###Code
#! pip install git+https://github.com/huggingface/transformers.git
#! pip install git+https://github.com/huggingface/datasets.git
#! pip install sacrebleu sentencepiece
###Output
_____no_output_____
###Markdown
If you're opening this notebook locally, make sure your environment has an install from the last version of those libraries.To be able to share your model with the community and generate results like the one shown in the picture below via the inference API, there are a few more steps to follow.First you have to store your authentication token from the Hugging Face website (sign up [here](https://huggingface.co/join) if you haven't already!) then uncomment the following cell and input your username and password (this only works on Colab, in a regular notebook, you need to do this in a terminal):
###Code
from huggingface_hub import notebook_login
notebook_login()
###Output
Login successful
Your token has been saved to /home/matt/.huggingface/token
###Markdown
Then you need to install Git-LFS and setup Git if you haven't already. Uncomment the following instructions and adapt with your name and email:
###Code
# !apt install git-lfs
# !git config --global user.email "[email protected]"
# !git config --global user.name "Your Name"
###Output
_____no_output_____
###Markdown
Make sure your version of Transformers is at least 4.8.1 since the functionality was introduced in that version:
###Code
import transformers
print(transformers.__version__)
###Output
4.11.0.dev0
###Markdown
You can find a script version of this notebook to fine-tune your model in a distributed fashion using multiple GPUs or TPUs [here](https://github.com/huggingface/transformers/tree/master/examples/seq2seq). Fine-tuning a model on a translation task In this notebook, we will see how to fine-tune one of the [🤗 Transformers](https://github.com/huggingface/transformers) model for a translation task. We will use the [WMT dataset](http://www.statmt.org/wmt16/), a machine translation dataset composed from a collection of various sources, including news commentaries and parliament proceedings.We will see how to easily load the dataset for this task using 🤗 Datasets and how to fine-tune a model on it using Keras.
###Code
model_checkpoint = "Helsinki-NLP/opus-mt-en-ROMANCE"
###Output
_____no_output_____
###Markdown
This notebook is built to run with any model checkpoint from the [Model Hub](https://huggingface.co/models) as long as that model has a sequence-to-sequence version in the Transformers library. Here we picked the [`Helsinki-NLP/opus-mt-en-romance`](https://huggingface.co/Helsinki-NLP/opus-mt-en-ROMANCE) checkpoint. Loading the dataset We will use the [🤗 Datasets](https://github.com/huggingface/datasets) library to download the data and get the metric we need to use for evaluation (to compare our model to the benchmark). This can be easily done with the functions `load_dataset` and `load_metric`. We use the English/Romanian part of the WMT dataset here.
###Code
from datasets import load_dataset, load_metric
raw_datasets = load_dataset("wmt16", "ro-en")
metric = load_metric("sacrebleu")
###Output
Reusing dataset wmt16 (/home/matt/.cache/huggingface/datasets/wmt16/ro-en/1.0.0/0d9fb3e814712c785176ad8cdb9f465fbe6479000ee6546725db30ad8a8b5f8a)
###Markdown
The `dataset` object itself is [`DatasetDict`](https://huggingface.co/docs/datasets/package_reference/main_classes.htmldatasetdict), which contains one key for the training, validation and test set:
###Code
raw_datasets
###Output
_____no_output_____
###Markdown
To access an actual element, you need to select a split first, then give an index:
###Code
raw_datasets["train"][0]
###Output
_____no_output_____
###Markdown
To get a sense of what the data looks like, the following function will show some examples picked randomly in the dataset.
###Code
import datasets
import random
import pandas as pd
from IPython.display import display, HTML
def show_random_elements(dataset, num_examples=5):
assert num_examples <= len(
dataset
), "Can't pick more elements than there are in the dataset."
picks = []
for _ in range(num_examples):
pick = random.randint(0, len(dataset) - 1)
while pick in picks:
pick = random.randint(0, len(dataset) - 1)
picks.append(pick)
df = pd.DataFrame(dataset[picks])
for column, typ in dataset.features.items():
if isinstance(typ, datasets.ClassLabel):
df[column] = df[column].transform(lambda i: typ.names[i])
display(HTML(df.to_html()))
show_random_elements(raw_datasets["train"])
###Output
_____no_output_____
###Markdown
The metric is an instance of [`datasets.Metric`](https://huggingface.co/docs/datasets/package_reference/main_classes.htmldatasets.Metric):
###Code
metric
###Output
_____no_output_____
###Markdown
You can call its `compute` method with your predictions and labels, which need to be list of decoded strings (list of list for the labels):
###Code
fake_preds = ["hello there", "general kenobi"]
fake_labels = [["hello there"], ["general kenobi"]]
metric.compute(predictions=fake_preds, references=fake_labels)
###Output
_____no_output_____
###Markdown
Preprocessing the data Before we can feed those texts to our model, we need to preprocess them. This is done by a 🤗 Transformers `Tokenizer` which will (as the name indicates) tokenize the inputs (including converting the tokens to their corresponding IDs in the pretrained vocabulary) and put it in a format the model expects, as well as generate the other inputs that model requires.To do all of this, we instantiate our tokenizer with the `AutoTokenizer.from_pretrained` method, which will ensure:- we get a tokenizer that corresponds to the model architecture we want to use,- we download the vocabulary used when pretraining this specific checkpoint.That vocabulary will be cached, so it's not downloaded again the next time we run the cell.
###Code
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
###Output
/home/matt/miniconda3/envs/tensorflow26/lib/python3.9/site-packages/transformers/configuration_utils.py:336: UserWarning: Passing `gradient_checkpointing` to a config initialization is deprecated and will be removed in v5 Transformers. Using `model.gradient_checkpointing_enable()` instead, or if you are using the `Trainer` API, pass `gradient_checkpointing=True` in your `TrainingArguments`.
warnings.warn(
###Markdown
For the mBART tokenizer (like we have here), we need to set the source and target languages (so the texts are preprocessed properly). You can check the language codes [here](https://huggingface.co/facebook/mbart-large-cc25) if you are using this notebook on a different pairs of languages.
###Code
if "mbart" in model_checkpoint:
tokenizer.src_lang = "en-XX"
tokenizer.tgt_lang = "ro-RO"
###Output
_____no_output_____
###Markdown
By default, the call above will use one of the fast tokenizers (backed by Rust) from the 🤗 Tokenizers library. You can directly call this tokenizer on one sentence or a pair of sentences:
###Code
tokenizer("Hello, this one sentence!")
###Output
_____no_output_____
###Markdown
Depending on the model you selected, you will see different keys in the dictionary returned by the cell above. They don't matter much for what we're doing here (just know they are required by the model we will instantiate later), you can learn more about them in [this tutorial](https://huggingface.co/transformers/preprocessing.html) if you're interested.Instead of one sentence, we can pass along a list of sentences:
###Code
tokenizer(["Hello, this one sentence!", "This is another sentence."])
###Output
_____no_output_____
###Markdown
To prepare the targets for our model, we need to tokenize them inside the `as_target_tokenizer` context manager. This will make sure the tokenizer uses the special tokens corresponding to the targets:
###Code
with tokenizer.as_target_tokenizer():
print(tokenizer(["Hello, this one sentence!", "This is another sentence."]))
###Output
{'input_ids': [[14232, 244, 2, 69, 49, 420, 10513, 1101, 84, 0], [13486, 6, 160, 6, 3778, 4853, 10513, 1101, 3, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]}
###Markdown
If you are using one of the five T5 checkpoints that require a special prefix to put before the inputs, you should adapt the following cell.
###Code
if model_checkpoint in ["t5-small", "t5-base", "t5-larg", "t5-3b", "t5-11b"]:
prefix = "translate English to Romanian: "
else:
prefix = ""
###Output
_____no_output_____
###Markdown
We can then write the function that will preprocess our samples. We just feed them to the `tokenizer` with the argument `truncation=True`. This will ensure that an input longer that what the model selected can handle will be truncated to the maximum length accepted by the model. The padding will be dealt with later on (in a data collator) so we pad examples to the longest length in the batch and not the whole dataset.
###Code
max_input_length = 128
max_target_length = 128
source_lang = "en"
target_lang = "ro"
def preprocess_function(examples):
inputs = [prefix + ex[source_lang] for ex in examples["translation"]]
targets = [ex[target_lang] for ex in examples["translation"]]
model_inputs = tokenizer(inputs, max_length=max_input_length, truncation=True)
# Setup the tokenizer for targets
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=max_target_length, truncation=True)
model_inputs["labels"] = labels["input_ids"]
return model_inputs
###Output
_____no_output_____
###Markdown
This function works with one or several examples. In the case of several examples, the tokenizer will return a list of lists for each key:
###Code
preprocess_function(raw_datasets["train"][:2])
###Output
_____no_output_____
###Markdown
To apply this function on all the pairs of sentences in our dataset, we just use the `map` method of our `dataset` object we created earlier. This will apply the function on all the elements of all the splits in `dataset`, so our training, validation and testing data will be preprocessed in one single command.
###Code
tokenized_datasets = raw_datasets.map(preprocess_function, batched=True)
###Output
Loading cached processed dataset at /home/matt/.cache/huggingface/datasets/wmt16/ro-en/1.0.0/0d9fb3e814712c785176ad8cdb9f465fbe6479000ee6546725db30ad8a8b5f8a/cache-0dbaad7302f5fc8a.arrow
Loading cached processed dataset at /home/matt/.cache/huggingface/datasets/wmt16/ro-en/1.0.0/0d9fb3e814712c785176ad8cdb9f465fbe6479000ee6546725db30ad8a8b5f8a/cache-7b00c0d7c83b3dae.arrow
Loading cached processed dataset at /home/matt/.cache/huggingface/datasets/wmt16/ro-en/1.0.0/0d9fb3e814712c785176ad8cdb9f465fbe6479000ee6546725db30ad8a8b5f8a/cache-0139ae41a84c7c82.arrow
###Markdown
Even better, the results are automatically cached by the 🤗 Datasets library to avoid spending time on this step the next time you run your notebook. The 🤗 Datasets library is normally smart enough to detect when the function you pass to map has changed (and thus requires to not use the cache data). For instance, it will properly detect if you change the task in the first cell and rerun the notebook. 🤗 Datasets warns you when it uses cached files, you can pass `load_from_cache_file=False` in the call to `map` to not use the cached files and force the preprocessing to be applied again.Note that we passed `batched=True` to encode the texts by batches together. This is to leverage the full benefit of the fast tokenizer we loaded earlier, which will use multi-threading to treat the texts in a batch concurrently. Fine-tuning the model Now that our data is ready, we can download the pretrained model and fine-tune it. Since our task is of the sequence-to-sequence kind, we use the `AutoModelForSeq2SeqLM` class. Like with the tokenizer, the `from_pretrained` method will download and cache the model for us.
###Code
from transformers import TFAutoModelForSeq2SeqLM, DataCollatorForSeq2Seq
model = TFAutoModelForSeq2SeqLM.from_pretrained(model_checkpoint)
###Output
/home/matt/miniconda3/envs/tensorflow26/lib/python3.9/site-packages/transformers/configuration_utils.py:336: UserWarning: Passing `gradient_checkpointing` to a config initialization is deprecated and will be removed in v5 Transformers. Using `model.gradient_checkpointing_enable()` instead, or if you are using the `Trainer` API, pass `gradient_checkpointing=True` in your `TrainingArguments`.
warnings.warn(
2021-09-25 15:40:08.447263: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-09-25 15:40:08.453712: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-09-25 15:40:08.454388: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-09-25 15:40:08.455776: I tensorflow/core/platform/cpu_feature_guard.cc:142] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
2021-09-25 15:40:08.458373: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-09-25 15:40:08.459054: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-09-25 15:40:08.459723: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-09-25 15:40:08.766450: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-09-25 15:40:08.767128: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-09-25 15:40:08.767774: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-09-25 15:40:08.768387: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1510] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 21671 MB memory: -> device: 0, name: GeForce RTX 3090, pci bus id: 0000:21:00.0, compute capability: 8.6
2021-09-25 15:40:09.836488: I tensorflow/stream_executor/cuda/cuda_blas.cc:1760] TensorFloat-32 will be used for the matrix multiplication. This will only be logged once.
All model checkpoint layers were used when initializing TFMarianMTModel.
All the layers of TFMarianMTModel were initialized from the model checkpoint at Helsinki-NLP/opus-mt-en-ROMANCE.
If your task is similar to the task the model of the checkpoint was trained on, you can already use TFMarianMTModel for predictions without further training.
###Markdown
Note that we don't get a warning like in our classification example. This means we used all the weights of the pretrained model and there is no randomly initialized head in this case. Next we set some parameters like the learning rate and the `batch_size`and customize the weight decay. The last two arguments are to setup everything so we can push the model to the [Hub](https://huggingface.co/models) at the end of training. Remove the two of them if you didn't follow the installation steps at the top of the notebook, otherwise you can change the value of push_to_hub_model_id to something you would prefer.
###Code
batch_size = 16
learning_rate = 2e-5
weight_decay = 0.01
num_train_epochs = 1
model_name = model_checkpoint.split("/")[-1]
push_to_hub_model_id = f"{model_name}-finetuned-{source_lang}-to-{target_lang}"
###Output
_____no_output_____
###Markdown
Then, we need a special kind of data collator, which will not only pad the inputs to the maximum length in the batch, but also the labels. Note that our data collators are multi-framework, so make sure you set `return_tensors='tf'` so you get `tf.Tensor` objects back and not something else!
###Code
data_collator = DataCollatorForSeq2Seq(tokenizer, model=model, return_tensors="tf")
###Output
_____no_output_____
###Markdown
Now we convert our input datasets to TF datasets using this collator. There's a built-in method for this: `to_tf_dataset()`. Make sure to specify the collator we just created as our `collate_fn`!
###Code
train_dataset = tokenized_datasets["train"].to_tf_dataset(
batch_size=batch_size,
columns=["input_ids", "attention_mask", "labels"],
shuffle=True,
collate_fn=data_collator,
)
validation_dataset = tokenized_datasets["validation"].to_tf_dataset(
batch_size=batch_size,
columns=["input_ids", "attention_mask", "labels"],
shuffle=False,
collate_fn=data_collator,
)
###Output
/home/matt/miniconda3/envs/tensorflow26/lib/python3.9/site-packages/datasets/formatting/formatting.py:167: VisibleDeprecationWarning: Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray.
return np.array(array, copy=False, **self.np_array_kwargs)
###Markdown
Now we initialize our loss and optimizer and compile the model. Note that most Transformers models compute loss internally, so we can just leave the loss argument blank to use the internal loss instead. For the optimizer, we can use the `AdamWeightDecay` optimizer in the Transformer library.
###Code
from transformers import AdamWeightDecay
import tensorflow as tf
optimizer = AdamWeightDecay(learning_rate=learning_rate, weight_decay_rate=weight_decay)
model.compile(optimizer=optimizer)
###Output
No loss specified in compile() - the model's internal loss computation will be used as the loss. To disable this behaviour, please explicitly pass loss=None.
###Markdown
Now we can train our model. We can also add a callback to sync up our model with the Hub - this allows us to resume training from other machines and even test the model's inference quality midway through training! Make sure to change the `username` if you do. If you don't want to do this, simply remove the callbacks argument in the call to `fit()`.
###Code
from transformers.keras_callbacks import PushToHubCallback
username = "Rocketknight1"
callback = PushToHubCallback(
output_dir="./translation_model_save",
tokenizer=tokenizer,
hub_model_id=f"{username}/{push_to_hub_model_id}",
)
model.fit(
train_dataset, validation_data=validation_dataset, epochs=1, callbacks=[callback]
)
###Output
2021-09-25 15:40:10.883395: I tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc:185] None of the MLIR Optimization Passes are enabled (registered 2)
###Markdown
Now we've finished training our model, but the loss value can be a little hard to interpret. Let's use the metric we loaded earlier to score our model's outputs on the validation set. Note that because the sequence length is variable, we can't use `model.predict()` to get predictions for the whole dataset at once, as the outputs from each batch cannot be concatenated together. Instead, let's process the validation set a batch at a time, converting the predicted outputs to strings so that the metric can judge them.
###Code
import numpy as np
all_predictions = []
all_labels = []
prediction_lens = []
for batch, dummy_labels in validation_dataset:
labels = batch["labels"]
preds = model(batch)["logits"]
token_preds = np.argmax(preds, axis=-1)
decoded_preds = tokenizer.batch_decode(token_preds, skip_special_tokens=True)
prediction_lens.extend(
[np.count_nonzero(pred != tokenizer.pad_token_id) for pred in token_preds]
)
# We use -100 to mask labels - replace it with the tokenizer pad token when decoding
# so that no output is emitted for these
labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
# Some simple post-processing
decoded_preds = [pred.strip() for pred in decoded_preds]
decoded_labels = [[label.strip()] for label in decoded_labels]
all_predictions.extend(decoded_preds)
all_labels.extend(decoded_labels)
result = metric.compute(predictions=all_predictions, references=all_labels)
result = {"bleu": result["score"]}
result["gen_len"] = np.mean(prediction_lens)
result = {k: round(v, 4) for k, v in result.items()}
print(result)
###Output
{'bleu': 13.4253, 'gen_len': 81.8009}
###Markdown
If you're opening this Notebook on colab, you will probably need to install 🤗 Transformers and 🤗 Datasets. Uncomment the following cell and run it. We also use the `sacrebleu` and `sentencepiece` libraries - you may need to install these even if you already have 🤗 Transformers!
###Code
#! pip install transformers datasets
#! pip install sacrebleu sentencepiece
#! pip install huggingface_hub
###Output
_____no_output_____
###Markdown
If you're opening this notebook locally, make sure your environment has an install from the last version of those libraries.To be able to share your model with the community and generate results like the one shown in the picture below via the inference API, there are a few more steps to follow.First you have to store your authentication token from the Hugging Face website (sign up [here](https://huggingface.co/join) if you haven't already!) then uncomment the following cell and input your token:
###Code
from huggingface_hub import notebook_login
notebook_login()
###Output
_____no_output_____
###Markdown
Then you need to install Git-LFS and setup Git if you haven't already. Uncomment the following instructions and adapt with your name and email:
###Code
# !apt install git-lfs
# !git config --global user.email "[email protected]"
# !git config --global user.name "Your Name"
###Output
_____no_output_____
###Markdown
Make sure your version of Transformers is at least 4.16.0 since some of the functionality we use was introduced in that version:
###Code
import transformers
print(transformers.__version__)
###Output
4.16.0.dev0
###Markdown
You can find a script version of this notebook to fine-tune your model in a distributed fashion using multiple GPUs or TPUs [here](https://github.com/huggingface/transformers/tree/master/examples/seq2seq). Fine-tuning a model on a translation task In this notebook, we will see how to fine-tune one of the [🤗 Transformers](https://github.com/huggingface/transformers) model for a translation task. We will use the [WMT dataset](http://www.statmt.org/wmt16/), a machine translation dataset composed from a collection of various sources, including news commentaries and parliament proceedings.We will see how to easily load the dataset for this task using 🤗 Datasets and how to fine-tune a model on it using Keras.
###Code
model_checkpoint = "Helsinki-NLP/opus-mt-en-ROMANCE"
###Output
_____no_output_____
###Markdown
This notebook is built to run with any model checkpoint from the [Model Hub](https://huggingface.co/models) as long as that model has a sequence-to-sequence version in the Transformers library. Here we picked the [`Helsinki-NLP/opus-mt-en-romance`](https://huggingface.co/Helsinki-NLP/opus-mt-en-ROMANCE) checkpoint. Loading the dataset We will use the [🤗 Datasets](https://github.com/huggingface/datasets) library to download the data and get the metric we need to use for evaluation (to compare our model to the benchmark). This can be easily done with the functions `load_dataset` and `load_metric`. We use the English/Romanian part of the WMT dataset here.
###Code
from datasets import load_dataset, load_metric
raw_datasets = load_dataset("wmt16", "ro-en")
metric = load_metric("sacrebleu")
###Output
Reusing dataset wmt16 (/home/matt/.cache/huggingface/datasets/wmt16/ro-en/1.0.0/af3c5d746b307726d0de73ebe7f10545361b9cb6f75c83a1734c000e48b6264f)
###Markdown
The `dataset` object itself is [`DatasetDict`](https://huggingface.co/docs/datasets/package_reference/main_classes.htmldatasetdict), which contains one key for the training, validation and test set:
###Code
raw_datasets
###Output
_____no_output_____
###Markdown
To access an actual element, you need to select a split first, then give an index:
###Code
raw_datasets["train"][0]
###Output
_____no_output_____
###Markdown
To get a sense of what the data looks like, the following function will show some examples picked randomly in the dataset.
###Code
import datasets
import random
import pandas as pd
from IPython.display import display, HTML
def show_random_elements(dataset, num_examples=5):
assert num_examples <= len(
dataset
), "Can't pick more elements than there are in the dataset."
picks = []
for _ in range(num_examples):
pick = random.randint(0, len(dataset) - 1)
while pick in picks:
pick = random.randint(0, len(dataset) - 1)
picks.append(pick)
df = pd.DataFrame(dataset[picks])
for column, typ in dataset.features.items():
if isinstance(typ, datasets.ClassLabel):
df[column] = df[column].transform(lambda i: typ.names[i])
display(HTML(df.to_html()))
show_random_elements(raw_datasets["train"])
###Output
_____no_output_____
###Markdown
The metric is an instance of [`datasets.Metric`](https://huggingface.co/docs/datasets/package_reference/main_classes.htmldatasets.Metric):
###Code
metric
###Output
_____no_output_____
###Markdown
You can call its `compute` method with your predictions and labels, which need to be list of decoded strings (list of list for the labels):
###Code
fake_preds = ["hello there", "general kenobi"]
fake_labels = [["hello there"], ["general kenobi"]]
metric.compute(predictions=fake_preds, references=fake_labels)
###Output
_____no_output_____
###Markdown
Preprocessing the data Before we can feed those texts to our model, we need to preprocess them. This is done by a 🤗 Transformers `Tokenizer` which will (as the name indicates) tokenize the inputs (including converting the tokens to their corresponding IDs in the pretrained vocabulary) and put it in a format the model expects, as well as generate the other inputs that model requires.To do all of this, we instantiate our tokenizer with the `AutoTokenizer.from_pretrained` method, which will ensure:- we get a tokenizer that corresponds to the model architecture we want to use,- we download the vocabulary used when pretraining this specific checkpoint.That vocabulary will be cached, so it's not downloaded again the next time we run the cell.
###Code
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
###Output
_____no_output_____
###Markdown
For the mBART tokenizer (like we have here), we need to set the source and target languages (so the texts are preprocessed properly). You can check the language codes [here](https://huggingface.co/facebook/mbart-large-cc25) if you are using this notebook on a different pairs of languages.
###Code
if "mbart" in model_checkpoint:
tokenizer.src_lang = "en-XX"
tokenizer.tgt_lang = "ro-RO"
###Output
_____no_output_____
###Markdown
By default, the call above will use one of the fast tokenizers (backed by Rust) from the 🤗 Tokenizers library. You can directly call this tokenizer on one sentence or a pair of sentences:
###Code
tokenizer("Hello, this is a sentence!")
###Output
_____no_output_____
###Markdown
Depending on the model you selected, you will see different keys in the dictionary returned by the cell above. They don't matter much for what we're doing here (just know they are required by the model we will instantiate later), you can learn more about them in [this tutorial](https://huggingface.co/transformers/preprocessing.html) if you're interested.Instead of one sentence, we can pass along a list of sentences:
###Code
tokenizer(["Hello, this is a sentence!", "This is another sentence."])
###Output
_____no_output_____
###Markdown
To prepare the targets for our model, we need to tokenize them inside the `as_target_tokenizer` context manager. This will make sure the tokenizer uses the special tokens corresponding to the targets:
###Code
with tokenizer.as_target_tokenizer():
print(tokenizer(["Hello, this is a sentence!", "This is another sentence."]))
###Output
{'input_ids': [[14232, 244, 2, 69, 160, 6, 9, 10513, 1101, 84, 0], [13486, 6, 160, 6, 3778, 4853, 10513, 1101, 3, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]}
###Markdown
If you are using one of the five T5 checkpoints that require a special prefix to put before the inputs, you should adapt the following cell.
###Code
if model_checkpoint in ["t5-small", "t5-base", "t5-larg", "t5-3b", "t5-11b"]:
prefix = "translate English to Romanian: "
else:
prefix = ""
###Output
_____no_output_____
###Markdown
We can then write the function that will preprocess our samples. We just feed them to the `tokenizer` with the argument `truncation=True`. This will ensure that an input longer that what the model selected can handle will be truncated to the maximum length accepted by the model. The padding will be dealt with later on (in a data collator) so we pad examples to the longest length in the batch and not the whole dataset.
###Code
max_input_length = 128
max_target_length = 128
source_lang = "en"
target_lang = "ro"
def preprocess_function(examples):
inputs = [prefix + ex[source_lang] for ex in examples["translation"]]
targets = [ex[target_lang] for ex in examples["translation"]]
model_inputs = tokenizer(inputs, max_length=max_input_length, truncation=True)
# Setup the tokenizer for targets
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=max_target_length, truncation=True)
model_inputs["labels"] = labels["input_ids"]
return model_inputs
###Output
_____no_output_____
###Markdown
This function works with one or several examples. In the case of several examples, the tokenizer will return a list of lists for each key:
###Code
preprocess_function(raw_datasets["train"][:2])
###Output
_____no_output_____
###Markdown
To apply this function on all the pairs of sentences in our dataset, we just use the `map` method of our `dataset` object we created earlier. This will apply the function on all the elements of all the splits in `dataset`, so our training, validation and testing data will be preprocessed in one single command.
###Code
tokenized_datasets = raw_datasets.map(preprocess_function, batched=True)
###Output
Loading cached processed dataset at /home/matt/.cache/huggingface/datasets/wmt16/ro-en/1.0.0/af3c5d746b307726d0de73ebe7f10545361b9cb6f75c83a1734c000e48b6264f/cache-703f402232e7c8b6.arrow
Loading cached processed dataset at /home/matt/.cache/huggingface/datasets/wmt16/ro-en/1.0.0/af3c5d746b307726d0de73ebe7f10545361b9cb6f75c83a1734c000e48b6264f/cache-6fce55dd900db78d.arrow
Loading cached processed dataset at /home/matt/.cache/huggingface/datasets/wmt16/ro-en/1.0.0/af3c5d746b307726d0de73ebe7f10545361b9cb6f75c83a1734c000e48b6264f/cache-c212144f77499ba4.arrow
###Markdown
Even better, the results are automatically cached by the 🤗 Datasets library to avoid spending time on this step the next time you run your notebook. The 🤗 Datasets library is normally smart enough to detect when the function you pass to map has changed (and thus requires to not use the cache data). For instance, it will properly detect if you change the task in the first cell and rerun the notebook. 🤗 Datasets warns you when it uses cached files, you can pass `load_from_cache_file=False` in the call to `map` to not use the cached files and force the preprocessing to be applied again.Note that we passed `batched=True` to encode the texts by batches together. This is to leverage the full benefit of the fast tokenizer we loaded earlier, which will use multi-threading to treat the texts in a batch concurrently. Fine-tuning the model Now that our data is ready, we can download the pretrained model and fine-tune it. Since our task is of the sequence-to-sequence kind, we use the `AutoModelForSeq2SeqLM` class. Like with the tokenizer, the `from_pretrained` method will download and cache the model for us.
###Code
from transformers import TFAutoModelForSeq2SeqLM, DataCollatorForSeq2Seq
model = TFAutoModelForSeq2SeqLM.from_pretrained(model_checkpoint)
###Output
2022-01-27 17:20:20.831271: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:936] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2022-01-27 17:20:20.838671: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:936] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2022-01-27 17:20:20.839963: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:936] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2022-01-27 17:20:20.841512: I tensorflow/core/platform/cpu_feature_guard.cc:151] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
2022-01-27 17:20:20.844184: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:936] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2022-01-27 17:20:20.844852: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:936] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2022-01-27 17:20:20.845497: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:936] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2022-01-27 17:20:21.184971: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:936] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2022-01-27 17:20:21.185660: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:936] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2022-01-27 17:20:21.186417: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:936] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2022-01-27 17:20:21.187043: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 21665 MB memory: -> device: 0, name: GeForce RTX 3090, pci bus id: 0000:21:00.0, compute capability: 8.6
2022-01-27 17:20:22.278352: I tensorflow/stream_executor/cuda/cuda_blas.cc:1786] TensorFloat-32 will be used for the matrix multiplication. This will only be logged once.
All model checkpoint layers were used when initializing TFMarianMTModel.
All the layers of TFMarianMTModel were initialized from the model checkpoint at Helsinki-NLP/opus-mt-en-ROMANCE.
If your task is similar to the task the model of the checkpoint was trained on, you can already use TFMarianMTModel for predictions without further training.
###Markdown
Note that we don't get a warning like in our classification example. This means we used all the weights of the pretrained model and there is no randomly initialized head in this case. Next we set some parameters like the learning rate and the `batch_size`and customize the weight decay. The last two arguments are to setup everything so we can push the model to the [Hub](https://huggingface.co/models) at the end of training. Remove the two of them if you didn't follow the installation steps at the top of the notebook, otherwise you can change the value of push_to_hub_model_id to something you would prefer.
###Code
batch_size = 16
learning_rate = 2e-5
weight_decay = 0.01
num_train_epochs = 1
model_name = model_checkpoint.split("/")[-1]
push_to_hub_model_id = f"{model_name}-finetuned-{source_lang}-to-{target_lang}"
###Output
_____no_output_____
###Markdown
Then, we need a special kind of data collator, which will not only pad the inputs to the maximum length in the batch, but also the labels. Note that our data collators are multi-framework, so make sure you set `return_tensors='tf'` so you get `tf.Tensor` objects back and not something else!
###Code
data_collator = DataCollatorForSeq2Seq(tokenizer, model=model, return_tensors="tf")
###Output
_____no_output_____
###Markdown
Now we convert our input datasets to TF datasets using this collator. There's a built-in method for this: `to_tf_dataset()`. Make sure to specify the collator we just created as our `collate_fn`!Computing the `BLEU` metric can be slow because it requires the model to generate outputs token-by-token. To speed things up, we make a `generation_dataset` that contains only 200 examples from the validation dataset, and use this for `BLEU` computations.
###Code
train_dataset = tokenized_datasets["train"].to_tf_dataset(
batch_size=batch_size,
columns=["input_ids", "attention_mask", "labels"],
shuffle=True,
collate_fn=data_collator,
)
validation_dataset = tokenized_datasets["validation"].to_tf_dataset(
batch_size=batch_size,
columns=["input_ids", "attention_mask", "labels"],
shuffle=False,
collate_fn=data_collator,
)
generation_dataset = (
tokenized_datasets["validation"]
.shuffle()
.select(list(range(48)))
.to_tf_dataset(
batch_size=8,
columns=["input_ids", "attention_mask", "labels"],
shuffle=False,
collate_fn=data_collator,
)
)
###Output
Loading cached shuffled indices for dataset at /home/matt/.cache/huggingface/datasets/wmt16/ro-en/1.0.0/af3c5d746b307726d0de73ebe7f10545361b9cb6f75c83a1734c000e48b6264f/cache-8a74e7f4b1ceddbf.arrow
###Markdown
Now we initialize our loss and optimizer and compile the model. Note that most Transformers models compute loss internally, so we can just leave the loss argument blank to use the internal loss instead. For the optimizer, we can use the `AdamWeightDecay` optimizer in the Transformer library.
###Code
from transformers import AdamWeightDecay
import tensorflow as tf
optimizer = AdamWeightDecay(learning_rate=learning_rate, weight_decay_rate=weight_decay)
model.compile(optimizer=optimizer)
###Output
No loss specified in compile() - the model's internal loss computation will be used as the loss. Don't panic - this is a common way to train TensorFlow models in Transformers! Please ensure your labels are passed as keys in the input dict so that they are accessible to the model during the forward pass. To disable this behaviour, please pass a loss argument, or explicitly pass loss=None if you do not want your model to compute a loss.
###Markdown
Now we can train our model. We can also add a few optional callbacks here, which you can remove if they aren't useful to you. In no particular order, these are:- PushToHubCallback will sync up our model with the Hub - this allows us to resume training from other machines, share the model after training is finished, and even test the model's inference quality midway through training!- TensorBoard is a built-in Keras callback that logs TensorBoard metrics.- KerasMetricCallback is a callback for computing advanced metrics. There are a number of common metrics in NLP like ROUGE which are hard to fit into your compiled training loop because they depend on decoding predictions and labels back to strings with the tokenizer, and calling arbitrary Python functions to compute the metric. The KerasMetricCallback will wrap a metric function, outputting metrics as training progresses.If this is the first time you've seen `KerasMetricCallback`, it's worth explaining what exactly is going on here. The callback takes two main arguments - a `metric_fn` and an `eval_dataset`. It then iterates over the `eval_dataset` and collects the model's outputs for each sample, before passing the `list` of predictions and the associated `list` of labels to the user-defined `metric_fn`. If the `predict_with_generate` argument is `True`, then it will call `model.generate()` for each input sample instead of `model.predict()` - this is useful for metrics that expect generated text from the model, like `ROUGE` and `BLEU`.This callback allows complex metrics to be computed each epoch that would not function as a standard Keras Metric. Metric values are printed each epoch, and can be used by other callbacks like `TensorBoard` or `EarlyStopping`.
###Code
from transformers.keras_callbacks import KerasMetricCallback
import numpy as np
def metric_fn(eval_predictions):
preds, labels = eval_predictions
prediction_lens = [
np.count_nonzero(pred != tokenizer.pad_token_id) for pred in preds
]
decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
# We use -100 to mask labels - replace it with the tokenizer pad token when decoding
# so that no output is emitted for these
labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
# Some simple post-processing
decoded_preds = [pred.strip() for pred in decoded_preds]
decoded_labels = [[label.strip()] for label in decoded_labels]
result = metric.compute(predictions=decoded_preds, references=decoded_labels)
result = {"bleu": result["score"]}
result["gen_len"] = np.mean(prediction_lens)
return result
metric_callback = KerasMetricCallback(
metric_fn=metric_fn, eval_dataset=generation_dataset, predict_with_generate=True
)
###Output
WARNING:root:No label_cols specified for KerasMetricCallback, assuming you want the 'labels' key.
###Markdown
With the metric callback ready, now we can specify the other callbacks and fit our model:
###Code
from transformers.keras_callbacks import PushToHubCallback
from tensorflow.keras.callbacks import TensorBoard
tensorboard_callback = TensorBoard(log_dir="./translation_model_save/logs")
push_to_hub_callback = PushToHubCallback(
output_dir="./translation_model_save",
tokenizer=tokenizer,
hub_model_id=push_to_hub_model_id,
)
# callbacks = [tensorboard_callback, metric_callback, push_to_hub_callback]
callbacks = [metric_callback, tensorboard_callback, push_to_hub_callback]
model.fit(
train_dataset, validation_data=validation_dataset, epochs=1, callbacks=callbacks
)
###Output
/home/matt/PycharmProjects/notebooks/examples/translation_model_save is already a clone of https://huggingface.co/Rocketknight1/opus-mt-en-ROMANCE-finetuned-en-to-ro. Make sure you pull the latest changes with `repo.git_pull()`.
WARNING:huggingface_hub.repository:/home/matt/PycharmProjects/notebooks/examples/translation_model_save is already a clone of https://huggingface.co/Rocketknight1/opus-mt-en-ROMANCE-finetuned-en-to-ro. Make sure you pull the latest changes with `repo.git_pull()`.
|
notebooks/03. code update.ipynb | ###Markdown
The goal of this notebook is to code a decision tree classifier that can be used with the following API. ```Pythondf = pd.read_csv("data.csv")``````train_df, test_df = train_test_split(df, test_size=0.2)tree = decision_tree_algorithm(train_df)accuracy = calculate_accuracy(test_df, tree)``` Import Statements
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
import random
from pprint import pprint
###Output
_____no_output_____
###Markdown
Load and Prepare Data Format of the data- last column of the data frame must contain the label and it must also be called "label"- there should be no missing values in the data frame
###Code
df = pd.read_csv("../data/Iris.csv")
df = df.drop("Id", axis=1)
df = df.rename(columns={"species": "label"})
df.head()
###Output
_____no_output_____
###Markdown
Train-Test-Split
###Code
def train_test_split(df, test_size):
if isinstance(test_size, float):
test_size = round(test_size * len(df))
indices = df.index.tolist()
test_indices = random.sample(population=indices, k=test_size)
test_df = df.loc[test_indices]
train_df = df.drop(test_indices)
return train_df, test_df
random.seed(0)
train_df, test_df = train_test_split(df, test_size=20)
###Output
_____no_output_____
###Markdown
Helper Functions
###Code
data = train_df.values
data[:5]
###Output
_____no_output_____
###Markdown
Data pure?
###Code
def check_purity(data):
label_column = data[:, -1]
unique_classes = np.unique(label_column)
if len(unique_classes) == 1:
return True
else:
return False
###Output
_____no_output_____
###Markdown
Classify
###Code
def classify_data(data):
label_column = data[:, -1]
unique_classes, counts_unique_classes = np.unique(label_column, return_counts=True)
index = counts_unique_classes.argmax()
classification = unique_classes[index]
return classification
###Output
_____no_output_____
###Markdown
Potential splits?
###Code
def get_potential_splits(data):
potential_splits = {}
_, n_columns = data.shape
for column_index in range(n_columns - 1): # excluding the last column which is the label
values = data[:, column_index]
unique_values = np.unique(values)
potential_splits[column_index] = unique_values
return potential_splits
###Output
_____no_output_____
###Markdown
Split Data
###Code
def split_data(data, split_column, split_value):
split_column_values = data[:, split_column]
type_of_feature = FEATURE_TYPES[split_column]
if type_of_feature == "continuous":
data_below = data[split_column_values <= split_value]
data_above = data[split_column_values > split_value]
# feature is categorical
else:
data_below = data[split_column_values == split_value]
data_above = data[split_column_values != split_value]
return data_below, data_above
###Output
_____no_output_____
###Markdown
Lowest Overall Entropy?
###Code
def calculate_entropy(data):
label_column = data[:, -1]
_, counts = np.unique(label_column, return_counts=True)
probabilities = counts / counts.sum()
entropy = sum(probabilities * -np.log2(probabilities))
return entropy
def calculate_overall_entropy(data_below, data_above):
n = len(data_below) + len(data_above)
p_data_below = len(data_below) / n
p_data_above = len(data_above) / n
overall_entropy = (p_data_below * calculate_entropy(data_below)
+ p_data_above * calculate_entropy(data_above))
return overall_entropy
def determine_best_split(data, potential_splits):
overall_entropy = 9999
for column_index in potential_splits:
for value in potential_splits[column_index]:
data_below, data_above = split_data(data, split_column=column_index, split_value=value)
current_overall_entropy = calculate_overall_entropy(data_below, data_above)
if current_overall_entropy <= overall_entropy:
overall_entropy = current_overall_entropy
best_split_column = column_index
best_split_value = value
return best_split_column, best_split_value
###Output
_____no_output_____
###Markdown
Decision Tree Algorithm Representation of the Decision Tree
###Code
sub_tree = {question: [yes_answer, no_answer]}
###Output
_____no_output_____
###Markdown
example_tree = {"petal_width <= 0.8": ["Iris-setosa", {"petal_width <= 1.65": [{"petal_length <= 4.9": ["Iris-versicolor", "Iris-virginica"]}, "Iris-virginica"]}]}
###Code
### Determine Type of Feature
###Output
_____no_output_____
###Markdown
def determine_type_of_feature(df): feature_types = [] n_unique_values_treshold = 15 for feature in df.columns: if feature != "label": unique_values = df[feature].unique() example_value = unique_values[0] if (isinstance(example_value, str)) or (len(unique_values) <= n_unique_values_treshold): feature_types.append("categorical") else: feature_types.append("continuous") return feature_types
###Code
### Algorithm
###Output
_____no_output_____
###Markdown
def decision_tree_algorithm(df, counter=0, min_samples=2, max_depth=5): data preparations if counter == 0: global COLUMN_HEADERS, FEATURE_TYPES COLUMN_HEADERS = df.columns FEATURE_TYPES = determine_type_of_feature(df) data = df.values else: data = df base cases if (check_purity(data)) or (len(data) < min_samples) or (counter == max_depth): classification = classify_data(data) return classification recursive part else: counter += 1 helper functions potential_splits = get_potential_splits(data) split_column, split_value = determine_best_split(data, potential_splits) data_below, data_above = split_data(data, split_column, split_value) check for empty data if len(data_below) == 0 or len(data_above) == 0: classification = classify_data(data) return classification determine question feature_name = COLUMN_HEADERS[split_column] type_of_feature = FEATURE_TYPES[split_column] if type_of_feature == "continuous": question = "{} <= {}".format(feature_name, split_value) feature is categorical else: question = "{} = {}".format(feature_name, split_value) instantiate sub-tree sub_tree = {question: []} find answers (recursion) yes_answer = decision_tree_algorithm(data_below, counter, min_samples, max_depth) no_answer = decision_tree_algorithm(data_above, counter, min_samples, max_depth) If the answers are the same, then there is no point in asking the qestion. This could happen when the data is classified even though it is not pure yet (min_samples or max_depth base case). if yes_answer == no_answer: sub_tree = yes_answer else: sub_tree[question].append(yes_answer) sub_tree[question].append(no_answer) return sub_tree tree = decision_tree_algorithm(train_df, max_depth=3)pprint(tree)
###Code
# Classification
###Output
_____no_output_____
###Markdown
sub_tree = {question: [yes_answer, no_answer]}
###Code
example = test_df.iloc[0]
example
def classify_example(example, tree):
question = list(tree.keys())[0]
feature_name, comparison_operator, value = question.split(" ")
# ask question
if comparison_operator == "<=":
if example[feature_name] <= float(value):
answer = tree[question][0]
else:
answer = tree[question][1]
# feature is categorical
else:
if str(example[feature_name]) == value:
answer = tree[question][0]
else:
answer = tree[question][1]
# base case
if not isinstance(answer, dict):
return answer
# recursive part
else:
residual_tree = answer
return classify_example(example, residual_tree)
classify_example(example, tree)
###Output
_____no_output_____
###Markdown
Calculate Accuracy
###Code
def calculate_accuracy(df, tree):
df["classification"] = df.apply(classify_example, args=(tree,), axis=1)
df["classification_correct"] = df["classification"] == df["label"]
accuracy = df["classification_correct"].mean()
return accuracy
accuracy = calculate_accuracy(test_df, tree)
accuracy
###Output
_____no_output_____
###Markdown
Titanic Data Set Load and Prepare Data
###Code
df = pd.read_csv("../data/Titanic.csv")
df["label"] = df.Survived
df = df.drop(["PassengerId", "Survived", "Name", "Ticket", "Cabin"], axis=1)
# handling missing values
median_age = df.Age.median()
mode_embarked = df.Embarked.mode()[0]
df = df.fillna({"Age": median_age, "Embarked": mode_embarked})
###Output
_____no_output_____
###Markdown
Decision Tree Algorithm
###Code
random.seed(0)
train_df, test_df = train_test_split(df, 0.2)
tree = decision_tree_algorithm(train_df, max_depth=10)
accuracy = calculate_accuracy(test_df, tree)
pprint(tree, width=50)
accuracy
###Output
{'Sex = male': [{'Fare <= 9.4833': [{'Age <= 32.0': [{'Age <= 30.5': [{'Fare <= 7.7958': [{'Fare <= 7.7417': [{'Fare <= 7.2292': [{'Age <= 27.0': [{'Age <= 25.0': [0,
1]},
0]},
0]},
{'Age <= 19.0': [0,
{'Age <= 21.0': [1,
0]}]}]},
{'Age <= 20.0': [{'Fare <= 8.05': [{'Fare <= 7.8958': [0,
{'Fare <= 7.925': [1,
0]}]},
0]},
{'Fare <= 8.4583': [0,
{'Fare <= 8.6625': [{'Age <= 26.0': [0,
{'Age <= 27.0': [1,
0]}]},
0]}]}]}]},
{'Fare <= 7.775': [0,
{'Fare <= 7.8542': [1,
{'Age <= 31.0': [1,
0]}]}]}]},
0]},
{'Age <= 6.0': [{'Pclass = 3': [{'Fare <= 20.575': [1,
{'Fare <= 31.275': [0,
{'Fare <= 31.3875': [1,
0]}]}]},
1]},
{'Pclass = 1': [{'Age <= 52.0': [{'Fare <= 30.5': [{'Fare <= 26.0': [0,
{'Fare <= 29.7': [{'Fare <= 26.55': [1,
0]},
1]}]},
{'Fare <= 227.525': [{'SibSp = 0': [{'Age <= 17.0': [1,
0]},
{'Fare <= 110.8833': [{'Fare <= 57.0': [1,
0]},
1]}]},
1]}]},
{'Age <= 71.0': [{'Embarked = S': [0,
{'Age <= 56.0': [1,
0]}]},
1]}]},
{'Age <= 34.0': [{'Fare <= 56.4958': [{'Fare <= 46.9': [{'Embarked = C': [{'Pclass = 3': [{'Parch = 1': [1,
0]},
0]},
{'Age <= 9.0': [{'SibSp = 4': [0,
1]},
0]}]},
{'Age <= 28.0': [{'Age <= 26.0': [1,
0]},
1]}]},
0]},
{'Fare <= 10.5': [1,
0]}]}]}]}]},
{'Pclass = 3': [{'Fare <= 24.15': [{'Age <= 36.0': [{'Embarked = S': [{'Age <= 31.0': [{'Fare <= 16.7': [{'Fare <= 10.5167': [{'Fare <= 9.8417': [{'Age <= 19.0': [1,
0]},
0]},
{'Fare <= 12.475': [1,
{'Fare <= 14.4': [0,
1]}]}]},
{'Fare <= 18.0': [0,
{'Fare <= 20.525': [1,
0]}]}]},
1]},
{'Age <= 16.0': [1,
{'Age <= 18.0': [0,
{'Age <= 29.0': [{'Fare <= 7.8792': [1,
{'Fare <= 15.2458': [0,
1]}]},
0]}]}]}]},
0]},
{'Fare <= 31.275': [0,
{'Fare <= 31.3875': [1,
0]}]}]},
{'Fare <= 28.7125': [{'Fare <= 27.75': [{'Age <= 23.0': [1,
{'Age <= 55.0': [{'Age <= 26.0': [{'Age <= 25.0': [{'Fare <= 13.0': [0,
1]},
0]},
{'Age <= 36.0': [1,
{'Age <= 38.0': [0,
1]}]}]},
{'Fare <= 10.5': [0,
1]}]}]},
0]},
1]}]}]}
|
Chapter04/.ipynb_checkpoints/Exercise 4.01-checkpoint.ipynb | ###Markdown
1. Install xlrd
###Code
!pip install xlrd
###Output
Requirement already satisfied: xlrd in c:\programdata\anaconda3\lib\site-packages (1.2.0)
###Markdown
2. Load the Excel file
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_excel("Sample - Superstore.xls")
df.head()
###Output
_____no_output_____
###Markdown
3. Drop this column altogether from the DataFrame
###Code
df.drop('Row ID',axis=1,inplace=True)
df.head()
###Output
_____no_output_____
###Markdown
4. Check the number of rows and columns.
###Code
df.shape
###Output
_____no_output_____ |
P0 bike_sharing/.ipynb_checkpoints/Bay_Area_Bike_Share_Analysis-checkpoint.ipynb | ###Markdown
湾区自行车共享分析说明:[点此查看此文档的英文版本](https://github.com/udacity/data-analyst/tree/master/projects/bike_sharing)。 简介> **提示**:诸如此类的引用部分可以为如何导航和使用 iPython notebook 提供有用说明。湾区自行车共享系统([Bay Area Bike Share](http://www.bayareabikeshare.com/))是一家为旧金山、雷德伍德城、帕罗奥多、芒廷维尤和圣荷西的客户提供按需自行车租赁的公司。使用者可在每个城市的各种车站解锁自行车,然后在同城内的任何一个车站还车。使用者可通过按年订购或购买 3 日或 24 小时通票来付费。使用者的出行次数无限制,三十分钟内的行程不收取额外费用;更长行程将产生超时费。在此项目中,你将以一名数据分析师的身份执行数据的探索性分析。你将了解数据分析过程的两个重要部分:数据整理和探索性数据分析。但是在你开始查看数据前,先思考几个你需要理解的关于自行车共享数据的问题,例如,如果你在 Bay Area Bike Share 工作,你会想要获得什么类型的信息来做出更明智的业务决策?或者你可以思考你是否会成为自行车共享服务的使用者。哪些因素可能会影响你使用此服务的方式?**问题 1**:至少写下两个你认为可以用数据来回答的问题。**答案**:将此文本替换为你的回答!> **提示**:如果你双击此单元格,你会看到文本发生变化,所有样式均被清除。这将允许你编辑此文本块。此文本块使用 [Markdown](http://daringfireball.net/projects/markdown/syntax) 编写,这是一种使用标题、链接、斜体和许多其他选项为文本添加样式的方式。你将在之后的纳米学位课程中了解关于 Markdown 的更多信息。按 shift + Enter 或 Shift + Return 预览此单元格。 使用可视化交流数据发现作为一名数据分析师,有效交流发现结果的能力是这项工作的重要部分。毕竟,你的分析能力再高也得通过好的交流能力去传达。在 2014 年,Bay Area Bike Share 举行了一项[开放数据挑战](http://www.bayareabikeshare.com/datachallenge-2014),以鼓励数据分析师基于他们的开放数据集创建可视化。你将在这个项目中创建自己的可视化,但在开始之前,请阅读来自“最佳分析奖项”得主 Tyler Field 的[分析报告(英文)](http://thfield.github.io/babs/index.html)。通读整个报告并回答以下问题:**问题 2**:在你看来,哪种可视化可提供最有趣的见解?你是否能根据 Tyler 的分析回答你在之前提出的任何问题?能或不能的原因是什么?**答案**:将此文本替换为你的回答! 数据整理现在是时候由你自己来探索数据了。Bay Area Bike Share 的[开放数据](http://www.bayareabikeshare.com/open-data)页中第 1 年和第 2 年的数据已提供在项目资料中;你无需下载任何其他信息。此数据由三个部分组成:第 1 年上半年(从 `201402` 开始的文件),第 1 年下半年(从 `201408` 开始的文件),以及第 2 年全年(从 `201508` 开始的文件)。每个部分关联三个主要数据文件:行程数据(展示系统中每个行程的信息)(`*_trip_data.csv`),系统中车站的信息(`*_station_data.csv`),及系统中每个城市的每日天气数据(`*_weather_data.csv`)。在处理大量数据时,最好先从数据样本开始。这样更容易检查我们的数据整理步骤是否有效,因为我们完成代码所需的时间将更少。如果我们对整个过程的进展较为满意,那就可以着手整理整个数据集啦。因为大量的数据包含在行程信息中,我们的目标应该为取行程数据的子集来进行测试。首先我们仅看看第 1 个月的自行车行程数据,从 2013 年 8 月 29 日到 2013 年 9 月 30 日。下面的代码会取第一年上半年的数据,然后将第一个月的数据值写在输出文件上。此代码利用了数据按日期排序的事实(尽管需要指出的是,前两天是按行程时间而非按年月顺序排序)。首先,运行下方第一个代码单元格来加载你将在分析中使用的所有数据包和函数。然后,运行第二个代码单元格以读取第一个行程数据文件的子集,然后编写一个新文件,其中仅包含我们初步感兴趣的子集。> **提示**:你可以像格式化 Markdown 单元格那样点击单元格然后使用键盘快捷键 **Shift + Enter** 或 **Shift + Return**,来运行代码单元格。或者,也可以在选中代码单元格后点击工具栏上的 Play 按钮执行它。单元格运行时,你会在单元格左侧的消息中看到一个星号,即 `In [*]:`。在执行完成时,星号将变为一个数字,例如 `In [1]`。如果有输出,将显示 `Out [1]:`,用适当的数字来匹配“In”的数字。
###Code
# 导入所有需要的包盒函数
import csv
from datetime import datetime
import numpy as np
import pandas as pd
from babs_datacheck import question_3
from babs_visualizations import usage_stats, usage_plot
from IPython.display import display
%matplotlib inline
# 文档地址
file_in = '201402_trip_data.csv'
file_out = '201309_trip_data.csv'
with open(file_out, 'w') as f_out, open(file_in, 'r') as f_in:
# 设置 CSV 读写对象
in_reader = csv.reader(f_in)
out_writer = csv.writer(f_out)
# 从 in-file 向 out-file 写入行,直到遇到特定日期
while True:
datarow = next(in_reader)
# 行程开始日期在第三列,为 m/d/yyyy HH:MM 格式
if datarow[2][:9] == '10/1/2013':
break
out_writer.writerow(datarow)
###Output
_____no_output_____
###Markdown
精简行程数据第一步是观察数据集的结构,看看我们是否需要执行任何数据整理。下面的单元格会读取你在之前单元格中创建的抽样数据文件,然后打印出表中的前几行。
###Code
sample_data = pd.read_csv('201309_trip_data.csv')
display(sample_data.head())
###Output
_____no_output_____
###Markdown
在这个探索环节,我们将精简出影响出行次数的行程数据中的因素。首先将注意力放在几个选定列:行程持续时间、开始时间、起始车站、终止车站及订购类型。开始时间将分为年、月和小时部分。我们将添加一列作为星期几,并将起始车站和终止车站转变为起始和终止城市。现在我们来解决整理过程的最后部分。运行下面的代码单元格,看看车站信息的结构,然后观察代码将如何创建车站城市映射。注意车站映射设立为一个函数 `create_station_mapping()`。因为可随时间推移可添加更多车站或进行删除,在我们准备好开始探索时,此函数将允许我们在数据的所有三个部分结合车站信息。
###Code
# 显示车站数据文档的前几行数据。
station_info = pd.read_csv('201402_station_data.csv')
display(station_info.head())
# 这个函数会稍后被另一个函数调用,以创建映射。
def create_station_mapping(station_data):
"""
Create a mapping from station IDs to cities, returning the
result as a dictionary.
"""
station_map = {}
for data_file in station_data:
with open(data_file, 'r') as f_in:
# 设置 csv 读取对象 - 注意,我们使用的是 DictReader,他会将
# 文档第一行作为表头,即每一行的字典键值
weather_reader = csv.DictReader(f_in)
for row in weather_reader:
station_map[row['station_id']] = row['landmark']
return station_map
###Output
_____no_output_____
###Markdown
现在你可以使用映射到来精简行程数据到上述选定列。这将在下面的 `summarise_data()` 函数中执行。作为此函数的部分,将使用 `datetime` 模块从原始数据文件解析作为 `datetim` 对象 (`strptime`) 的时间戳字符串,该字符串可随后输出为不同的字符串格式 (`strftime`)。解析的对象也有很多属性和方法来快速获取要完成 `summarise_data()` 函数,你将需要先完成两个任务。首先,你需要执行一个运算将行程持续时间的单位从秒转化为分钟。(一分钟为 60 秒)。第二,你需要为年、月、小时和星期几创建列。你可参阅 [datetime 模块中的 datetime 对象文档](https://docs.python.org/2/library/datetime.htmldatetime-objects)。**请找到合适的属性和方法来完成下面的代码**。
###Code
def summarise_data(trip_in, station_data, trip_out):
"""
This function takes trip and station information and outputs a new
data file with a condensed summary of major trip information. The
trip_in and station_data arguments will be lists of data files for
the trip and station information, respectively, while trip_out
specifies the location to which the summarized data will be written.
"""
# generate dictionary of station - city mapping
station_map = create_station_mapping(station_data)
with open(trip_out, 'w') as f_out:
# set up csv writer object
out_colnames = ['duration', 'start_date', 'start_year',
'start_month', 'start_hour', 'weekday',
'start_city', 'end_city', 'subscription_type']
trip_writer = csv.DictWriter(f_out, fieldnames = out_colnames)
trip_writer.writeheader()
for data_file in trip_in:
with open(data_file, 'r') as f_in:
# set up csv reader object
trip_reader = csv.DictReader(f_in)
# collect data from and process each row
for row in trip_reader:
new_point = {}
# convert duration units from seconds to minutes
### Question 3a: Add a mathematical operation below ###
### to convert durations from seconds to minutes. ###
new_point['duration'] = float(row['Duration']) ________
# reformat datestrings into multiple columns
### Question 3b: Fill in the blanks below to generate ###
### the expected time values. ###
trip_date = datetime.strptime(row['Start Date'], '%m/%d/%Y %H:%M')
new_point['start_date'] = trip_date.strftime('%Y-%m-%d')
new_point['start_year'] = trip_date.________
new_point['start_month'] = trip_date.________
new_point['start_hour'] = trip_date.________
new_point['weekday'] = trip_date.________
# remap start and end terminal with start and end city
new_point['start_city'] = station_map[row['Start Terminal']]
new_point['end_city'] = station_map[row['End Terminal']]
# two different column names for subscribers depending on file
if 'Subscription Type' in row:
new_point['subscription_type'] = row['Subscription Type']
else:
new_point['subscription_type'] = row['Subscriber Type']
# write the processed information to the output file.
trip_writer.writerow(new_point)
###Output
_____no_output_____
###Markdown
**问题 3**:运行下面的代码块以调用你在上文单元格中完成的 `summarise_data()` 函数。它会提取 `trip_in` 和 `station_data` 变量中所列文件包含的数据,然后在 `trip_out` 变量中指定的位置编写新的文件。如果你正确执行了数据整理,下面的代码块会打印出 `dataframe` 的前几行,并显示一条消息确认数据点计数是正确的。
###Code
# Process the data by running the function we wrote above.
station_data = ['201402_station_data.csv']
trip_in = ['201309_trip_data.csv']
trip_out = '201309_trip_summary.csv'
summarise_data(trip_in, station_data, trip_out)
# Load in the data file and print out the first few rows
sample_data = pd.read_csv(trip_out)
display(sample_data.head())
# Verify the dataframe by counting data points matching each of the time features.
question_3(sample_data)
###Output
_____no_output_____
###Markdown
> **提示**:如果你保存了 jupyter Notebook,运行数据块的输出也将被保存。但是,你的工作空间的状态会在每次开启新会话时重置。请确保你从之前的会话中运行了所有必要的代码块,以在继续上次中断的工作前重建变量和函数。 探索性数据分析现在你已在一个文件中保存了一些数据,那么我们来看看数据的某些初步趋势。`babs_visualizations.py` 脚本中已编写了一些代码,用来帮助你汇总和可视化数据;它们已导出为函数 `usage_stats()` 和 `usage_plot()`。在此部分,我们将了解这些函数的一些用途,你将在项目的最后部分自行使用这些函数。首先,运行以下单元格来加载数据,然后使用 `usage_stats()` 函数查看该服务运营的第一个月的总行程数,以及关于行程持续时间的一些统计数据。
###Code
trip_data = pd.read_csv('201309_trip_summary.csv')
usage_stats(trip_data)
###Output
_____no_output_____
###Markdown
你会看到第一个月共有超过 27,000 次行程,且平均行程持续时间大于行程持续时间中值(即 50% 的行程短于它,而 50% 的行程长于它的点)。事实上,平均值大于 75% 的最短持续时间。这个现象非常有意思,我们稍后再看。首先我们来看看这些行程如何按订购类型区分。要对数据进行直观的了解,一个简单的方式是将它绘制成图。为此我们将使用 `usage_plot()` 函数。这个函数的第二个参数允许我们算出选定变量的行程的总数,在一个图中显示信息。下面的表达式将展示共有多少客户和订购者行程。现在就来试试吧!
###Code
usage_plot(trip_data, 'subscription_type')
###Output
_____no_output_____
###Markdown
看起来在第一个月,订购者的行程比客户的行程多大约 50%。现在我们来尝试一个不同的变量。来看看行程的持续时间状况如何?
###Code
usage_plot(trip_data, 'duration')
###Output
_____no_output_____
###Markdown
看起来挺奇怪的,不是吗?看看 x 轴的持续时间值。大多数骑行时间都是 30 分钟或更少,因为单个行程的额外时间要收取超时费。第一个柱子跨度显示的持续时间达到了约 1000 分钟,或超过 16 个小时。根据我们从 `usage_stats()` 获得的统计数据,某些行程的持续时间非常长,导致平均值远远高于中值:这个图的效果非常夸张,对我们用处不大。在探索数据时,你经常需要使用可视化函数参数来使数据更易于理解。这里就要用到 `usage_plot()` 函数的第三个参数。可为数据点设置过滤器,作为一系列条件。首先我们限制为不足 60 分钟的行程。
###Code
usage_plot(trip_data, 'duration', ['duration < 60'])
###Output
_____no_output_____
###Markdown
这样看起来就好多啦!你可以看到大多数行程实际上持续时间都不足 30 分钟,但你还可以通过其他方法来使展示效果更好。因为最短持续时间非 0,左侧的柱子稍高于 0。我们想要找到 30 分钟的明确边界,这样如果一些柱子尺寸和边界对应某些分钟点时,图上就看起来清晰多了。好消息是你可以使用可选的“boundary”和“bin_width”参数调整图。通过将“boundary”设置为 0,其中一个柱边界(这里为最左侧的柱子)将从 0 开始,而不是最短行程持续时间。以及通过将“bin_width”设为 5,每个柱子将以 5 分钟时间间隔总计时间点。
###Code
usage_plot(trip_data, 'duration', ['duration < 60'], boundary = 0, bin_width = 5)
###Output
_____no_output_____
###Markdown
**问题 4**:哪个 5 分钟行程持续时间显示了最多的出行次数?这个范围内大约有多少次出行?**答案**:将此文本替换为你的回答! 像这样的视觉调整虽然较小,但是却对你理解数据和向他人传达你的发现大有帮助。 自己执行分析现在你已使用数据集的小样本完成了一些探索,是时候更进一步,将所有数据整理到一个文件中并看看你能发现什么趋势。下面的代码将使用与之前一样的 `summarise_data()` 函数来处理数据。在运行下面的单元格后,你便将所有的数据处理到了一个数据文件中。注意该函数在运行时不会显示任何输出,而且要花费较长的时间才能完成,因为你现在使用的数据比之前的样本数据多。
###Code
station_data = ['201402_station_data.csv',
'201408_station_data.csv',
'201508_station_data.csv' ]
trip_in = ['201402_trip_data.csv',
'201408_trip_data.csv',
'201508_trip_data.csv' ]
trip_out = 'babs_y1_y2_summary.csv'
# This function will take in the station data and trip data and
# write out a new data file to the name listed above in trip_out.
summarise_data(trip_in, station_data, trip_out)
###Output
_____no_output_____
###Markdown
由于 `summarise_data()` 函数已创建了一个独立文件,因此无需再次运行上面的单元格,即使你关掉 notebook 并开启一个新会话。你可以直接在数据集中加载,然后从那里进行探索。
###Code
trip_data = pd.read_csv('babs_y1_y2_summary.csv')
display(trip_data.head())
###Output
_____no_output_____
###Markdown
现在轮到你自己使用 `usage_stats()` 和 `usage_plot()` 探索新数据集,并报告你的发现了!下面是如何使用 `usage_plot()` 函数的一些提示:- 第一个参数(必须):加载的 dataframe,将从这里分析数据。- 第二个参数(必须):区分出行次数的变量。- 第三个参数(可选):数据过滤器,限制将计数的数据点。过滤器应作为一系列条件提供,每个元素应该为采用以下格式的一个字符串:`' '`,使用以下任意一个运算符:>、=、<=、==、!=。数据点必须满足所有条件才能计算在内或可视化。例如,`["duration < 15", "start_city == 'San Francisco'"]` 仅保留起始点为旧金山,且持续时间不足 15 分钟的行程。如果数据在数值变量上进行拆分(从而创建一个直方图),可使用关键字设置一些附加参数。- "n_bins" 指定成果图中柱子的数量(默认为 10 条)。- "bin_width" 指定每个柱子的宽(默认为用数据范围除以柱子的数量)。"n_bins" 和 "bin_width" 不可同时使用。- "boundary" 指定一个柱边界的位置;另一个柱边界将放在那个值的附近(这可能导致绘制多余的柱子)。此参数可以与 "n_bins" 和 "bin_width" 参数一起使用。你也可以对 `usage_stats()` 函数添加一些自定义。该函数的第二个参数可用于设置过滤器条件,如同用 `usage_plot()` 设置一样。
###Code
usage_stats(trip_data)
usage_plot(trip_data)
###Output
_____no_output_____
###Markdown
使用上面的函数探索一些不同的变量,并记录你发现的一些趋势。如果你想用其他方式或多个方式探索数据集,可自行创建更多的单元格。> **提示**: 要向 notebook 添加更多单元格,你可以使用上面的菜单栏中的“在上方插入单元格”和“在下方插入单元格”选项。工具栏中也有添加新单元格的图标,以及用于在文档中上下移动单元格的附加图标。默认情况下,新单元格为代码式;你也可以从单元格菜单或工具栏中的下拉菜单中指定单元格类型(代码式或 Markdown)。完成探索后,将你认为最有趣的两个可视化复制到下方的单元格中,然后用几句话回答以下问题,说明你的发现及你选择这些数字的原因。确保调整柱子的数量或限制,使它们有效传达数据发现。可自行用从 `usage_stats()` 中生成的任何额外数字进行补充,或放置多个可视化来支持你的观察。
###Code
# Final Plot 1
usage_plot(trip_data)
###Output
_____no_output_____
###Markdown
**问题 5a**:上述可视化有何有趣之处?你为什么选择它**答案**:将此文本替换为你的回答
###Code
# Final Plot 2
usage_plot(trip_data)
###Output
_____no_output_____ |
.ipynb_checkpoints/graphiques-checkpoint-old.ipynb | ###Markdown
Comparaison des algorithmes 1) Aléatoire VS Aléatoire 1.1) Noir VS blanc
###Code
#os.system(myCmd1)
#os.system(myCmd2)
stats("data/alea_noirVSblanc.dat")
#plt.savefig("Graphs2/Histo_alea_noirVSblanc.svg", format = 'svg')
gaussienne("data/alea_noirVSblanc.dat", "Blanc")
#plt.savefig("Graphs2/Gaussienne_alea_noirVSblanc.svg", format = 'svg')
###Output
[ 0. 1. 2. 3. 4. 5. 6. 7. 8. 9. 10. 11. 12. 13. 14. 15. 16. 17.
18. 19. 20. 21. 22. 23. 24. 25. 26. 27. 28. 29. 30. 31. 32. 33. 34. 35.
36. 37. 38. 39. 40. 41. 42. 43. 44. 45. 46. 47. 48. 49. 50. 51. 52. 53.
54. 55. 56. 57. 58. 59. 60. 61. 62. 63.] [ 14 1 1 6 5 16 31 57 98 171 206 295 430 593
740 870 1171 1378 1537 1932 2167 2546 2697 2885 3284 3356 3615 3957
4038 4171 4185 4164 4217 4041 4029 3848 3670 3550 3310 3006 2768 2641
2280 2011 1719 1498 1330 1187 946 774 676 521 395 315 241 136
115 72 48 19 13 3 3 1]
###Markdown
1.2) Joueur 1 VS joueur 2 (noir 50%, blanc 50%)
###Code
stats_50("data/alea_jAVSjB_2.dat", "Alea A", "Alea B")
#plt.savefig("Graphs2/Histo_alea_jAVSjB.svg", format = 'svg')
gaussienne("data/alea_jAVSjB_2.dat", "Aléa A")
#plt.savefig("Graphs2/Gaussienne_alea_jAVSjB.svg", format = 'svg')
###Output
[ 0. 1. 2. 3. 4. 5. 6. 7. 8. 9. 10. 11. 12. 13. 14. 15. 16. 17.
18. 19. 20. 21. 22. 23. 24. 25. 26. 27. 28. 29. 30. 31. 32. 33. 34. 35.
36. 37. 38. 39. 40. 41. 42. 43. 44. 45. 46. 47. 48. 49. 50. 51. 52. 53.
54. 55. 56. 57. 58. 59. 60. 61. 62. 63.] [ 32 1 2 11 12 23 29 63 95 155 231 309 426 562
717 840 1031 1267 1507 1750 2031 2224 2448 2692 3030 3389 3539 3680
3885 3915 4022 4162 4244 4075 4158 3889 3835 3752 3388 3247 2981 2729
2492 2312 1928 1715 1430 1271 1084 820 730 542 417 276 222 155
96 60 29 19 14 8 1 1]
###Markdown
2) RetourneMax VS Aléatoire
###Code
stats_50("data/retourneMax.dat", "RetourneMax", "Aléatoire")
#plt.savefig("Graphs2/Histo_retourneMax.svg", format = 'svg')
gaussienne("data/retourneMax.dat", "RetourneMax")
#plt.savefig("Graphs2/Gaussienne_retourneMax.svg", format = 'svg')
###Output
[ 0. 2. 3. 4. 5. 6. 7. 8. 9. 10. 11. 12. 13. 14. 15. 16. 17. 18.
19. 20. 21. 22. 23. 24. 25. 26. 27. 28. 29. 30. 31. 32. 33. 34. 35. 36.
37. 38. 39. 40. 41. 42. 43. 44. 45. 46. 47. 48. 49. 50. 51. 52. 53. 54.
55. 56. 57. 58. 59. 60. 61. 62. 63. 64.] [ 2 1 5 9 9 26 46 77 120 186 253 335 1223 600
1029 855 1706 1234 2190 1576 2430 1898 2647 2329 2831 2624 2977 3038
3227 3061 3535 3432 3441 3477 3531 3440 3421 3278 3179 3140 3090 2929
2640 2513 2375 2270 1935 1687 1499 1271 1173 930 801 608 475 355
286 242 188 137 87 59 28 4]
###Markdown
3) MinMax VS Aléatoire 3.1) Prof 1
###Code
stats_50("data/MinMax_prof1.dat", "MinMax", "Aléatoire")
#plt.savefig("Graphs2/Histo_MinMax_prof1.svg", format = 'svg')
gaussienne("data/MinMax_prof1.dat", "MinMax prof1")
#plt.savefig("Graphs2/Gaussienne_MinMax_prof1.svg", format = 'svg')
###Output
[ 0. 1. 2. 3. 4. 5. 6. 7. 8. 9. 10. 11. 12. 13. 14. 15. 16. 17.
18. 19. 20. 21. 22. 23. 24. 25. 26. 27. 28. 29. 30. 31. 32. 33. 34. 35.
36. 37. 38. 39. 40. 41. 42. 43. 44. 45. 46. 47. 48. 49. 50. 51. 52. 53.
54. 55. 56. 57. 58. 59. 60. 61. 62. 63. 64.] [ 239 80 72 95 130 148 223 304 381 502 626 771 925 1018
1214 1336 1409 1574 1692 1812 1911 2057 2129 2113 2169 2169 2262 2273
2432 2499 2468 2536 2623 2707 2719 2807 2856 3031 3175 3206 3251 3126
3023 2811 2785 2693 2523 2314 2147 1768 1640 1436 1208 961 880 676
581 484 319 277 188 109 71 32 4]
###Markdown
3.2) Prof 2
###Code
stats_50("data/MinMax_prof2.dat", "MinMax", "Aléatoire")
plt.savefig("Graphs2/Histo_MinMax_prof2.svg", format = 'svg')
gaussienne("data/MinMax_prof2.dat", "MinMax prof2")
#plt.savefig("Graphs2/Gaussienne_MinMax_prof2.svg", format = 'svg')
###Output
[ 0. 1. 2. 3. 4. 5. 6. 7. 8. 9. 10. 11. 12. 13. 14. 15. 16. 17.
18. 19. 20. 21. 22. 23. 24. 25. 26. 27. 28. 29. 30. 31. 32. 33. 34. 35.
36. 37. 38. 39. 40. 41. 42. 43. 44. 45. 46. 47. 48. 49. 50. 51. 52. 53.
54. 55. 56. 57. 58. 59. 60. 61. 62. 63. 64.] [ 220 52 50 79 75 122 179 263 360 481 594 738 800 1020
1099 1299 1410 1464 1685 1718 1868 1965 1970 2044 2115 2152 2152 2207
2350 2348 2426 2411 2418 2628 2640 2721 2992 2952 3175 3285 3396 3385
3330 3292 3091 2871 2681 2382 2150 1925 1696 1499 1240 1061 823 729
553 426 306 251 161 136 49 34 6]
###Markdown
3.3) Prof 3
###Code
stats_50("data/MinMax_prof3.dat", "MinMax", "Aléatoire")
#plt.savefig("Graphs2/Histo_MinMax_prof3.svg", format = 'svg')
gaussienne("data/MinMax_prof3.dat", "MinMax prof3")
#plt.savefig("Graphs2/Gaussienne_MinMax_prof3.svg", format = 'svg')
###Output
[ 0. 1. 2. 3. 4. 5. 6. 7. 8. 9. 10. 11. 12. 13. 14. 15. 16. 17.
18. 19. 20. 21. 22. 23. 24. 25. 26. 27. 28. 29. 30. 31. 32. 33. 34. 35.
36. 37. 38. 39. 40. 41. 42. 43. 44. 45. 46. 47. 48. 49. 50. 51. 52. 53.
54. 55. 56. 57. 58. 59. 60. 61. 62. 63. 64.] [ 186 38 63 79 95 143 190 241 348 469 581 690 782 948
1127 1292 1404 1594 1652 1681 1888 1872 2005 1972 2116 2111 2226 2268
2264 2294 2410 2482 2550 2602 2683 2860 2942 3035 3063 3354 3474 3371
3288 3313 3143 2894 2651 2332 2170 1951 1730 1454 1246 1054 843 687
558 412 296 226 134 94 49 21 9]
###Markdown
4) Regroupement de plot 4.1) Plot
###Code
#plot("data/retourneMax.dat", "RetourneMax")
plot("data/MinMax_prof1.dat", "MinMax prof1")
plot("data/MinMax_prof2.dat", "MinMax prof2")
plot("data/MinMax_prof3.dat", "MinMax prof3")
#plt.savefig("Graphs2/Superposition_MinMax_1-3.svg", format = 'svg')
###Output
[ 0. 1. 2. 3. 4. 5. 6. 7. 8. 9. 10. 11. 12. 13. 14. 15. 16. 17.
18. 19. 20. 21. 22. 23. 24. 25. 26. 27. 28. 29. 30. 31. 32. 33. 34. 35.
36. 37. 38. 39. 40. 41. 42. 43. 44. 45. 46. 47. 48. 49. 50. 51. 52. 53.
54. 55. 56. 57. 58. 59. 60. 61. 62. 63. 64.] [ 239 80 72 95 130 148 223 304 381 502 626 771 925 1018
1214 1336 1409 1574 1692 1812 1911 2057 2129 2113 2169 2169 2262 2273
2432 2499 2468 2536 2623 2707 2719 2807 2856 3031 3175 3206 3251 3126
3023 2811 2785 2693 2523 2314 2147 1768 1640 1436 1208 961 880 676
581 484 319 277 188 109 71 32 4]
[ 0. 1. 2. 3. 4. 5. 6. 7. 8. 9. 10. 11. 12. 13. 14. 15. 16. 17.
18. 19. 20. 21. 22. 23. 24. 25. 26. 27. 28. 29. 30. 31. 32. 33. 34. 35.
36. 37. 38. 39. 40. 41. 42. 43. 44. 45. 46. 47. 48. 49. 50. 51. 52. 53.
54. 55. 56. 57. 58. 59. 60. 61. 62. 63. 64.] [ 220 52 50 79 75 122 179 263 360 481 594 738 800 1020
1099 1299 1410 1464 1685 1718 1868 1965 1970 2044 2115 2152 2152 2207
2350 2348 2426 2411 2418 2628 2640 2721 2992 2952 3175 3285 3396 3385
3330 3292 3091 2871 2681 2382 2150 1925 1696 1499 1240 1061 823 729
553 426 306 251 161 136 49 34 6]
[ 0. 1. 2. 3. 4. 5. 6. 7. 8. 9. 10. 11. 12. 13. 14. 15. 16. 17.
18. 19. 20. 21. 22. 23. 24. 25. 26. 27. 28. 29. 30. 31. 32. 33. 34. 35.
36. 37. 38. 39. 40. 41. 42. 43. 44. 45. 46. 47. 48. 49. 50. 51. 52. 53.
54. 55. 56. 57. 58. 59. 60. 61. 62. 63. 64.] [ 186 38 63 79 95 143 190 241 348 469 581 690 782 948
1127 1292 1404 1594 1652 1681 1888 1872 2005 1972 2116 2111 2226 2268
2264 2294 2410 2482 2550 2602 2683 2860 2942 3035 3063 3354 3474 3371
3288 3313 3143 2894 2651 2332 2170 1951 1730 1454 1246 1054 843 687
558 412 296 226 134 94 49 21 9]
###Markdown
4.2) Histo
###Code
histo_groupe()
plt.tight_layout()
#plt.savefig("Graphs2/Superposition_histo.svg", format = 'svg')
###Output
_____no_output_____ |
intro_to_tensorflow.ipynb | ###Markdown
TensorFlow Neural Network Lab In this lab, you'll use all the tools you learned from *Introduction to TensorFlow* to label images of English letters! The data you are using, notMNIST, consists of images of a letter from A to J in different fonts.The above images are a few examples of the data you'll be training on. After training the network, you will compare your prediction model against test data. Your goal, by the end of this lab, is to make predictions against that test set with at least an 80% accuracy. Let's jump in! To start this lab, you first need to import all the necessary modules. Run the code below. If it runs successfully, it will print "`All modules imported`".
###Code
import hashlib
import os
import pickle
from urllib.request import urlretrieve
import numpy as np
from PIL import Image
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import resample
from tqdm import tqdm
from zipfile import ZipFile
print('All modules imported.')
###Output
All modules imported.
###Markdown
The notMNIST dataset is too large for many computers to handle. It contains 500,000 images for just training. You'll be using a subset of this data, 15,000 images for each label (A-J).
###Code
def download(url, file):
"""
Download file from <url>
:param url: URL to file
:param file: Local file path
"""
if not os.path.isfile(file):
print('Downloading ' + file + '...')
urlretrieve(url, file)
print('Download Finished')
# Download the training and test dataset.
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_train.zip', 'notMNIST_train.zip')
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_test.zip', 'notMNIST_test.zip')
# Make sure the files aren't corrupted
assert hashlib.md5(open('notMNIST_train.zip', 'rb').read()).hexdigest() == 'c8673b3f28f489e9cdf3a3d74e2ac8fa',\
'notMNIST_train.zip file is corrupted. Remove the file and try again.'
assert hashlib.md5(open('notMNIST_test.zip', 'rb').read()).hexdigest() == '5d3c7e653e63471c88df796156a9dfa9',\
'notMNIST_test.zip file is corrupted. Remove the file and try again.'
# Wait until you see that all files have been downloaded.
print('All files downloaded.')
def uncompress_features_labels(file):
"""
Uncompress features and labels from a zip file
:param file: The zip file to extract the data from
"""
features = []
labels = []
with ZipFile(file) as zipf:
# Progress Bar
filenames_pbar = tqdm(zipf.namelist(), unit='files')
# Get features and labels from all files
for filename in filenames_pbar:
# Check if the file is a directory
if not filename.endswith('/'):
with zipf.open(filename) as image_file:
image = Image.open(image_file)
image.load()
# Load image data as 1 dimensional array
# We're using float32 to save on memory space
feature = np.array(image, dtype=np.float32).flatten()
# Get the the letter from the filename. This is the letter of the image.
label = os.path.split(filename)[1][0]
features.append(feature)
labels.append(label)
return np.array(features), np.array(labels)
# Get the features and labels from the zip files
train_features, train_labels = uncompress_features_labels('notMNIST_train.zip')
test_features, test_labels = uncompress_features_labels('notMNIST_test.zip')
# Limit the amount of data to work with a docker container
docker_size_limit = 150000
train_features, train_labels = resample(train_features, train_labels, n_samples=docker_size_limit)
# Set flags for feature engineering. This will prevent you from skipping an important step.
is_features_normal = False
is_labels_encod = False
# Wait until you see that all features and labels have been uncompressed.
print('All features and labels uncompressed.')
###Output
100%|██████████| 210001/210001 [00:45<00:00, 4653.33files/s]
100%|██████████| 10001/10001 [00:02<00:00, 4893.45files/s]
###Markdown
Problem 1The first problem involves normalizing the features for your training and test data.Implement Min-Max scaling in the `normalize_grayscale()` function to a range of `a=0.1` and `b=0.9`. After scaling, the values of the pixels in the input data should range from 0.1 to 0.9.Since the raw notMNIST image data is in [grayscale](https://en.wikipedia.org/wiki/Grayscale), the current values range from a min of 0 to a max of 255.Min-Max Scaling:$X'=a+{\frac {\left(X-X_{\min }\right)\left(b-a\right)}{X_{\max }-X_{\min }}}$*If you're having trouble solving problem 1, you can view the solution [here](https://github.com/udacity/deep-learning/blob/master/intro-to-tensorflow/intro_to_tensorflow_solution.ipynb).*
###Code
# Problem 1 - Implement Min-Max scaling for grayscale image data
def normalize_grayscale(image_data):
"""
Normalize the image data with Min-Max scaling to a range of [0.1, 0.9]
:param image_data: The image data to be normalized
:return: Normalized image data
"""
# TODO: Implement Min-Max scaling for grayscale image data
a = 0.1
b = 0.9
grayscale_min = 0
grayscale_max = 255
return a + (((image_data - grayscale_min)*(b - a))/(grayscale_max - grayscale_min))
### DON'T MODIFY ANYTHING BELOW ###
# Test Cases
np.testing.assert_array_almost_equal(
normalize_grayscale(np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 255])),
[0.1, 0.103137254902, 0.106274509804, 0.109411764706, 0.112549019608, 0.11568627451, 0.118823529412, 0.121960784314,
0.125098039216, 0.128235294118, 0.13137254902, 0.9],
decimal=3)
np.testing.assert_array_almost_equal(
normalize_grayscale(np.array([0, 1, 10, 20, 30, 40, 233, 244, 254,255])),
[0.1, 0.103137254902, 0.13137254902, 0.162745098039, 0.194117647059, 0.225490196078, 0.830980392157, 0.865490196078,
0.896862745098, 0.9])
if not is_features_normal:
train_features = normalize_grayscale(train_features)
test_features = normalize_grayscale(test_features)
is_features_normal = True
print('Tests Passed!')
if not is_labels_encod:
# Turn labels into numbers and apply One-Hot Encoding
encoder = LabelBinarizer()
encoder.fit(train_labels)
train_labels = encoder.transform(train_labels)
test_labels = encoder.transform(test_labels)
# Change to float32, so it can be multiplied against the features in TensorFlow, which are float32
train_labels = train_labels.astype(np.float32)
test_labels = test_labels.astype(np.float32)
is_labels_encod = True
print('Labels One-Hot Encoded')
assert is_features_normal, 'You skipped the step to normalize the features'
assert is_labels_encod, 'You skipped the step to One-Hot Encode the labels'
# Get randomized datasets for training and validation
train_features, valid_features, train_labels, valid_labels = train_test_split(
train_features,
train_labels,
test_size=0.05,
random_state=832289)
print('Training features and labels randomized and split.')
# Save the data for easy access
pickle_file = 'notMNIST.pickle'
if not os.path.isfile(pickle_file):
print('Saving data to pickle file...')
try:
with open('notMNIST.pickle', 'wb') as pfile:
pickle.dump(
{
'train_dataset': train_features,
'train_labels': train_labels,
'valid_dataset': valid_features,
'valid_labels': valid_labels,
'test_dataset': test_features,
'test_labels': test_labels,
},
pfile, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
print('Data cached in pickle file.')
###Output
Saving data to pickle file...
Data cached in pickle file.
###Markdown
CheckpointAll your progress is now saved to the pickle file. If you need to leave and comeback to this lab, you no longer have to start from the beginning. Just run the code block below and it will load all the data and modules required to proceed.
###Code
%matplotlib inline
# Load the modules
import pickle
import math
import numpy as np
import tensorflow as tf
from tqdm import tqdm
import matplotlib.pyplot as plt
# Reload the data
pickle_file = 'notMNIST.pickle'
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f)
train_features = pickle_data['train_dataset']
train_labels = pickle_data['train_labels']
valid_features = pickle_data['valid_dataset']
valid_labels = pickle_data['valid_labels']
test_features = pickle_data['test_dataset']
test_labels = pickle_data['test_labels']
del pickle_data # Free up memory
print('Data and modules loaded.')
###Output
/Users/pablomateofdez/anaconda3/envs/dlnd-tf-lab/lib/python3.5/importlib/_bootstrap.py:222: RuntimeWarning: compiletime version 3.6 of module 'tensorflow.python.framework.fast_tensor_util' does not match runtime version 3.5
return f(*args, **kwds)
###Markdown
Problem 2Now it's time to build a simple neural network using TensorFlow. Here, your network will be just an input layer and an output layer.For the input here the images have been flattened into a vector of $28 \times 28 = 784$ features. Then, we're trying to predict the image digit so there are 10 output units, one for each label. Of course, feel free to add hidden layers if you want, but this notebook is built to guide you through a single layer network. For the neural network to train on your data, you need the following float32 tensors: - `features` - Placeholder tensor for feature data (`train_features`/`valid_features`/`test_features`) - `labels` - Placeholder tensor for label data (`train_labels`/`valid_labels`/`test_labels`) - `weights` - Variable Tensor with random numbers from a truncated normal distribution. - See `tf.truncated_normal()` documentation for help. - `biases` - Variable Tensor with all zeros. - See `tf.zeros()` documentation for help.*If you're having trouble solving problem 2, review "TensorFlow Linear Function" section of the class. If that doesn't help, the solution for this problem is available [here](intro_to_tensorflow_solution.ipynb).*
###Code
# All the pixels in the image (28 * 28 = 784)
features_count = 784
# All the labels
labels_count = 10
# TODO: Set the features and labels tensors
features = tf.placeholder(tf.float32)
labels = tf.placeholder(tf.float32)
# TODO: Set the weights and biases tensors
weights = tf.Variable(tf.truncated_normal((features_count, labels_count)))
biases = tf.Variable(tf.zeros(labels_count))
### DON'T MODIFY ANYTHING BELOW ###
#Test Cases
from tensorflow.python.ops.variables import Variable
assert features._op.name.startswith('Placeholder'), 'features must be a placeholder'
assert labels._op.name.startswith('Placeholder'), 'labels must be a placeholder'
assert isinstance(weights, Variable), 'weights must be a TensorFlow variable'
assert isinstance(biases, Variable), 'biases must be a TensorFlow variable'
assert features._shape == None or (\
features._shape.dims[0].value is None and\
features._shape.dims[1].value in [None, 784]), 'The shape of features is incorrect'
assert labels._shape == None or (\
labels._shape.dims[0].value is None and\
labels._shape.dims[1].value in [None, 10]), 'The shape of labels is incorrect'
assert weights._variable._shape == (784, 10), 'The shape of weights is incorrect'
assert biases._variable._shape == (10), 'The shape of biases is incorrect'
assert features._dtype == tf.float32, 'features must be type float32'
assert labels._dtype == tf.float32, 'labels must be type float32'
# Feed dicts for training, validation, and test session
train_feed_dict = {features: train_features, labels: train_labels}
valid_feed_dict = {features: valid_features, labels: valid_labels}
test_feed_dict = {features: test_features, labels: test_labels}
# Linear Function WX + b
logits = tf.matmul(features, weights) + biases
prediction = tf.nn.softmax(logits)
# Cross entropy
cross_entropy = -tf.reduce_sum(labels * tf.log(prediction), reduction_indices=1)
# Training loss
loss = tf.reduce_mean(cross_entropy)
# Create an operation that initializes all variables
init = tf.global_variables_initializer()
# Test Cases
with tf.Session() as session:
session.run(init)
session.run(loss, feed_dict=train_feed_dict)
session.run(loss, feed_dict=valid_feed_dict)
session.run(loss, feed_dict=test_feed_dict)
biases_data = session.run(biases)
assert not np.count_nonzero(biases_data), 'biases must be zeros'
print('Tests Passed!')
# Determine if the predictions are correct
is_correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(labels, 1))
# Calculate the accuracy of the predictions
accuracy = tf.reduce_mean(tf.cast(is_correct_prediction, tf.float32))
print('Accuracy function created.')
###Output
Accuracy function created.
###Markdown
Problem 3Below are 2 parameter configurations for training the neural network. In each configuration, one of the parameters has multiple options. For each configuration, choose the option that gives the best acccuracy.Parameter configurations:Configuration 1* **Epochs:** 1* **Learning Rate:** * 0.8 * 0.5 * 0.1 * 0.05 * 0.01Configuration 2* **Epochs:** * 1 * 2 * 3 * 4 * 5* **Learning Rate:** 0.2The code will print out a Loss and Accuracy graph, so you can see how well the neural network performed.*If you're having trouble solving problem 3, you can view the solution [here](intro_to_tensorflow_solution.ipynb).*
###Code
# Change if you have memory restrictions
batch_size = 128
# TODO: Find the best parameters for each configuration
# --------------- Mi Código ---------------
epochs = 1
learning_rate = 0.1
# --------------- Mi Código ---------------
### DON'T MODIFY ANYTHING BELOW ###
# Gradient Descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# The accuracy measured against the validation set
validation_accuracy = 0.0
# Measurements use for graphing loss and accuracy
log_batch_step = 50
batches = []
loss_batch = []
train_acc_batch = []
valid_acc_batch = []
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(train_features)/batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i*batch_size
batch_features = train_features[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer and get loss
_, l = session.run(
[optimizer, loss],
feed_dict={features: batch_features, labels: batch_labels})
# Log every 50 batches
if not batch_i % log_batch_step:
# Calculate Training and Validation accuracy
training_accuracy = session.run(accuracy, feed_dict=train_feed_dict)
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
# Log batches
previous_batch = batches[-1] if batches else 0
batches.append(log_batch_step + previous_batch)
loss_batch.append(l)
train_acc_batch.append(training_accuracy)
valid_acc_batch.append(validation_accuracy)
# Check accuracy against Validation data
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
loss_plot = plt.subplot(211)
loss_plot.set_title('Loss')
loss_plot.plot(batches, loss_batch, 'g')
loss_plot.set_xlim([batches[0], batches[-1]])
acc_plot = plt.subplot(212)
acc_plot.set_title('Accuracy')
acc_plot.plot(batches, train_acc_batch, 'r', label='Training Accuracy')
acc_plot.plot(batches, valid_acc_batch, 'x', label='Validation Accuracy')
acc_plot.set_ylim([0, 1.0])
acc_plot.set_xlim([batches[0], batches[-1]])
acc_plot.legend(loc=4)
plt.tight_layout()
plt.show()
print('Validation accuracy at {}'.format(validation_accuracy))
###Output
Epoch 1/1: 100%|██████████| 1114/1114 [00:05<00:00, 201.15batches/s]
###Markdown
TestYou're going to test your model against your hold out dataset/testing data. This will give you a good indicator of how well the model will do in the real world. You should have a test accuracy of at least 80%.
###Code
### DON'T MODIFY ANYTHING BELOW ###
# The accuracy measured against the test set
test_accuracy = 0.0
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(train_features)/batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i*batch_size
batch_features = train_features[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer
_ = session.run(optimizer, feed_dict={features: batch_features, labels: batch_labels})
# Check accuracy against Test data
test_accuracy = session.run(accuracy, feed_dict=test_feed_dict)
assert test_accuracy >= 0.80, 'Test accuracy at {}, should be equal to or greater than 0.80'.format(test_accuracy)
print('Nice Job! Test Accuracy is {}'.format(test_accuracy))
###Output
_____no_output_____
###Markdown
TensorFlow Neural Network Lab In this lab, you'll use all the tools you learned from *Introduction to TensorFlow* to label images of English letters! The data you are using, notMNIST, consists of images of a letter from A to J in different fonts.The above images are a few examples of the data you'll be training on. After training the network, you will compare your prediction model against test data. Your goal, by the end of this lab, is to make predictions against that test set with at least an 80% accuracy. Let's jump in! To start this lab, you first need to import all the necessary modules. Run the code below. If it runs successfully, it will print "`All modules imported`".
###Code
import hashlib
import os
import pickle
from urllib.request import urlretrieve
import numpy as np
from PIL import Image
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import resample
from tqdm import tqdm
from zipfile import ZipFile
print('All modules imported.')
###Output
All modules imported.
###Markdown
The notMNIST dataset is too large for many computers to handle. It contains 500,000 images for just training. You'll be using a subset of this data, 15,000 images for each label (A-J).
###Code
def download(url, file):
"""
Download file from <url>
:param url: URL to file
:param file: Local file path
"""
if not os.path.isfile(file):
print('Downloading ' + file + '...')
urlretrieve(url, file)
print('Download Finished')
# Download the training and test dataset.
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_train.zip', 'notMNIST_train.zip')
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_test.zip', 'notMNIST_test.zip')
# Make sure the files aren't corrupted
assert hashlib.md5(open('notMNIST_train.zip', 'rb').read()).hexdigest() == 'c8673b3f28f489e9cdf3a3d74e2ac8fa',\
'notMNIST_train.zip file is corrupted. Remove the file and try again.'
assert hashlib.md5(open('notMNIST_test.zip', 'rb').read()).hexdigest() == '5d3c7e653e63471c88df796156a9dfa9',\
'notMNIST_test.zip file is corrupted. Remove the file and try again.'
# Wait until you see that all files have been downloaded.
print('All files downloaded.')
def uncompress_features_labels(file):
"""
Uncompress features and labels from a zip file
:param file: The zip file to extract the data from
"""
features = []
labels = []
with ZipFile(file) as zipf:
# Progress Bar
filenames_pbar = tqdm(zipf.namelist(), unit='files')
# Get features and labels from all files
for filename in filenames_pbar:
# Check if the file is a directory
if not filename.endswith('/'):
with zipf.open(filename) as image_file:
image = Image.open(image_file)
image.load()
# Load image data as 1 dimensional array
# We're using float32 to save on memory space
feature = np.array(image, dtype=np.float32).flatten()
# Get the the letter from the filename. This is the letter of the image.
label = os.path.split(filename)[1][0]
features.append(feature)
labels.append(label)
return np.array(features), np.array(labels)
# Get the features and labels from the zip files
train_features, train_labels = uncompress_features_labels('notMNIST_train.zip')
test_features, test_labels = uncompress_features_labels('notMNIST_test.zip')
# Limit the amount of data to work with a docker container
docker_size_limit = 150000
train_features, train_labels = resample(train_features, train_labels, n_samples=docker_size_limit)
# Set flags for feature engineering. This will prevent you from skipping an important step.
is_features_normal = False
is_labels_encod = False
# Wait until you see that all features and labels have been uncompressed.
print('All features and labels uncompressed.')
###Output
100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████| 210001/210001 [01:28<00:00, 2368.91files/s]
100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████| 10001/10001 [00:04<00:00, 2016.19files/s]
###Markdown
Problem 1The first problem involves normalizing the features for your training and test data.Implement Min-Max scaling in the `normalize_grayscale()` function to a range of `a=0.1` and `b=0.9`. After scaling, the values of the pixels in the input data should range from 0.1 to 0.9.Since the raw notMNIST image data is in [grayscale](https://en.wikipedia.org/wiki/Grayscale), the current values range from a min of 0 to a max of 255.Min-Max Scaling:$X'=a+{\frac {\left(X-X_{\min }\right)\left(b-a\right)}{X_{\max }-X_{\min }}}$*If you're having trouble solving problem 1, you can view the solution [here](https://github.com/udacity/deep-learning/blob/master/intro-to-tensorflow/intro_to_tensorflow_solution.ipynb).*
###Code
# Problem 1 - Implement Min-Max scaling for grayscale image data
def normalize_grayscale(image_data):
"""
Normalize the image data with Min-Max scaling to a range of [0.1, 0.9]
:param image_data: The image data to be normalized
:return: Normalized image data
"""
# TODO: Implement Min-Max scaling for grayscale image data
# print (image_data)
result = []
a = 0.1
b = 0.9
xmin = 0
xmax = 255
return a + ((image_data - xmin) * (b-a)) / (xmax - xmin)
### DON'T MODIFY ANYTHING BELOW ###
# Test Cases
np.testing.assert_array_almost_equal(
normalize_grayscale(np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 255])),
[0.1, 0.103137254902, 0.106274509804, 0.109411764706, 0.112549019608, 0.11568627451, 0.118823529412, 0.121960784314,
0.125098039216, 0.128235294118, 0.13137254902, 0.9],
decimal=3)
np.testing.assert_array_almost_equal(
normalize_grayscale(np.array([0, 1, 10, 20, 30, 40, 233, 244, 254,255])),
[0.1, 0.103137254902, 0.13137254902, 0.162745098039, 0.194117647059, 0.225490196078, 0.830980392157, 0.865490196078,
0.896862745098, 0.9])
if not is_features_normal:
train_features = normalize_grayscale(train_features)
test_features = normalize_grayscale(test_features)
is_features_normal = True
print('Tests Passed!')
if not is_labels_encod:
# Turn labels into numbers and apply One-Hot Encoding
encoder = LabelBinarizer()
encoder.fit(train_labels)
train_labels = encoder.transform(train_labels)
test_labels = encoder.transform(test_labels)
# Change to float32, so it can be multiplied against the features in TensorFlow, which are float32
train_labels = train_labels.astype(np.float32)
test_labels = test_labels.astype(np.float32)
is_labels_encod = True
print('Labels One-Hot Encoded')
print (train_labels.size)
print (train_labels.shape)
assert is_features_normal, 'You skipped the step to normalize the features'
assert is_labels_encod, 'You skipped the step to One-Hot Encode the labels'
# Get randomized datasets for training and validation
train_features, valid_features, train_labels, valid_labels = train_test_split(
train_features,
train_labels,
test_size=0.05,
random_state=832289)
print('Training features and labels randomized and split.')
# Save the data for easy access
pickle_file = 'notMNIST.pickle'
if not os.path.isfile(pickle_file):
print('Saving data to pickle file...')
try:
with open('notMNIST.pickle', 'wb') as pfile:
pickle.dump(
{
'train_dataset': train_features,
'train_labels': train_labels,
'valid_dataset': valid_features,
'valid_labels': valid_labels,
'test_dataset': test_features,
'test_labels': test_labels,
},
pfile, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
print('Data cached in pickle file.')
###Output
Saving data to pickle file...
Data cached in pickle file.
###Markdown
CheckpointAll your progress is now saved to the pickle file. If you need to leave and comeback to this lab, you no longer have to start from the beginning. Just run the code block below and it will load all the data and modules required to proceed.
###Code
%matplotlib inline
# Load the modules
import pickle
import math
import numpy as np
import tensorflow as tf
from tqdm import tqdm
import matplotlib.pyplot as plt
# Reload the data
pickle_file = 'notMNIST.pickle'
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f)
train_features = pickle_data['train_dataset']
train_labels = pickle_data['train_labels']
valid_features = pickle_data['valid_dataset']
valid_labels = pickle_data['valid_labels']
test_features = pickle_data['test_dataset']
test_labels = pickle_data['test_labels']
del pickle_data # Free up memory
print('Data and modules loaded.')
###Output
Data and modules loaded.
###Markdown
Problem 2Now it's time to build a simple neural network using TensorFlow. Here, your network will be just an input layer and an output layer.For the input here the images have been flattened into a vector of $28 \times 28 = 784$ features. Then, we're trying to predict the image digit so there are 10 output units, one for each label. Of course, feel free to add hidden layers if you want, but this notebook is built to guide you through a single layer network. For the neural network to train on your data, you need the following float32 tensors: - `features` - Placeholder tensor for feature data (`train_features`/`valid_features`/`test_features`) - `labels` - Placeholder tensor for label data (`train_labels`/`valid_labels`/`test_labels`) - `weights` - Variable Tensor with random numbers from a truncated normal distribution. - See `tf.truncated_normal()` documentation for help. - `biases` - Variable Tensor with all zeros. - See `tf.zeros()` documentation for help.*If you're having trouble solving problem 2, review "TensorFlow Linear Function" section of the class. If that doesn't help, the solution for this problem is available [here](intro_to_tensorflow_solution.ipynb).*
###Code
# All the pixels in the image (28 * 28 = 784)
features_count = 784
# All the labels
labels_count = 10
# TODO: Set the features and labels tensors
features = tf.placeholder(tf.float32)
labels = tf.placeholder(tf.float32)
# TODO: Set the weights and biases tensors
weights = tf.Variable(tf.truncated_normal((features_count, labels_count)))
biases = tf.Variable(tf.zeros(labels_count))
### DON'T MODIFY ANYTHING BELOW ###
#Test Cases
from tensorflow.python.ops.variables import Variable
assert features._op.name.startswith('Placeholder'), 'features must be a placeholder'
assert labels._op.name.startswith('Placeholder'), 'labels must be a placeholder'
assert isinstance(weights, Variable), 'weights must be a TensorFlow variable'
assert isinstance(biases, Variable), 'biases must be a TensorFlow variable'
assert features._shape == None or (\
features._shape.dims[0].value is None and\
features._shape.dims[1].value in [None, 784]), 'The shape of features is incorrect'
assert labels._shape == None or (\
labels._shape.dims[0].value is None and\
labels._shape.dims[1].value in [None, 10]), 'The shape of labels is incorrect'
assert weights._variable._shape == (784, 10), 'The shape of weights is incorrect'
assert biases._variable._shape == (10), 'The shape of biases is incorrect'
assert features._dtype == tf.float32, 'features must be type float32'
assert labels._dtype == tf.float32, 'labels must be type float32'
# Feed dicts for training, validation, and test session
train_feed_dict = {features: train_features, labels: train_labels}
valid_feed_dict = {features: valid_features, labels: valid_labels}
test_feed_dict = {features: test_features, labels: test_labels}
# Linear Function WX + b
logits = tf.matmul(features, weights) + biases
prediction = tf.nn.softmax(logits)
# Cross entropy
cross_entropy = -tf.reduce_sum(labels * tf.log(prediction), reduction_indices=1)
# Training loss
loss = tf.reduce_mean(cross_entropy)
# Create an operation that initializes all variables
init = tf.global_variables_initializer()
# Test Cases
with tf.Session() as session:
session.run(init)
session.run(loss, feed_dict=train_feed_dict)
session.run(loss, feed_dict=valid_feed_dict)
session.run(loss, feed_dict=test_feed_dict)
biases_data = session.run(biases)
assert not np.count_nonzero(biases_data), 'biases must be zeros'
print('Tests Passed!')
# Determine if the predictions are correct
is_correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(labels, 1))
# Calculate the accuracy of the predictions
accuracy = tf.reduce_mean(tf.cast(is_correct_prediction, tf.float32))
print('Accuracy function created.')
###Output
Accuracy function created.
###Markdown
Problem 3Below are 2 parameter configurations for training the neural network. In each configuration, one of the parameters has multiple options. For each configuration, choose the option that gives the best acccuracy.Parameter configurations:Configuration 1* **Epochs:** 1* **Learning Rate:** * 0.8 * 0.5 * 0.1 * 0.05 * 0.01Configuration 2* **Epochs:** * 1 * 2 * 3 * 4 * 5* **Learning Rate:** 0.2The code will print out a Loss and Accuracy graph, so you can see how well the neural network performed.*If you're having trouble solving problem 3, you can view the solution [here](intro_to_tensorflow_solution.ipynb).*
###Code
# Change if you have memory restrictions
batch_size = 128
# TODO: Find the best parameters for each configuration
epochs = 4
learning_rate = 0.2
### DON'T MODIFY ANYTHING BELOW ###
# Gradient Descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# The accuracy measured against the validation set
validation_accuracy = 0.0
# Measurements use for graphing loss and accuracy
log_batch_step = 50
batches = []
loss_batch = []
train_acc_batch = []
valid_acc_batch = []
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(train_features)/batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i*batch_size
batch_features = train_features[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer and get loss
_, l = session.run(
[optimizer, loss],
feed_dict={features: batch_features, labels: batch_labels})
# Log every 50 batches
if not batch_i % log_batch_step:
# Calculate Training and Validation accuracy
training_accuracy = session.run(accuracy, feed_dict=train_feed_dict)
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
# Log batches
previous_batch = batches[-1] if batches else 0
batches.append(log_batch_step + previous_batch)
loss_batch.append(l)
train_acc_batch.append(training_accuracy)
valid_acc_batch.append(validation_accuracy)
# Check accuracy against Validation data
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
loss_plot = plt.subplot(211)
loss_plot.set_title('Loss')
loss_plot.plot(batches, loss_batch, 'g')
loss_plot.set_xlim([batches[0], batches[-1]])
acc_plot = plt.subplot(212)
acc_plot.set_title('Accuracy')
acc_plot.plot(batches, train_acc_batch, 'r', label='Training Accuracy')
acc_plot.plot(batches, valid_acc_batch, 'x', label='Validation Accuracy')
acc_plot.set_ylim([0, 1.0])
acc_plot.set_xlim([batches[0], batches[-1]])
acc_plot.legend(loc=4)
plt.tight_layout()
plt.show()
print('Validation accuracy at {}'.format(validation_accuracy))
###Output
Epoch 1/4: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████| 1114/1114 [00:14<00:00, 78.92batches/s]
Epoch 2/4: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████| 1114/1114 [00:13<00:00, 82.73batches/s]
Epoch 3/4: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████| 1114/1114 [00:15<00:00, 72.88batches/s]
Epoch 4/4: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████| 1114/1114 [00:14<00:00, 75.28batches/s]
###Markdown
TestYou're going to test your model against your hold out dataset/testing data. This will give you a good indicator of how well the model will do in the real world. You should have a test accuracy of at least 80%.
###Code
### DON'T MODIFY ANYTHING BELOW ###
# The accuracy measured against the test set
test_accuracy = 0.0
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(train_features)/batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i*batch_size
batch_features = train_features[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer
_ = session.run(optimizer, feed_dict={features: batch_features, labels: batch_labels})
# Check accuracy against Test data
test_accuracy = session.run(accuracy, feed_dict=test_feed_dict)
assert test_accuracy >= 0.80, 'Test accuracy at {}, should be equal to or greater than 0.80'.format(test_accuracy)
print('Nice Job! Test Accuracy is {}'.format(test_accuracy))
###Output
Epoch 1/4: 100%|██████████████████████████████████████████████████████████████████████████████████████████████████| 1114/1114 [00:01<00:00, 831.16batches/s]
Epoch 2/4: 100%|██████████████████████████████████████████████████████████████████████████████████████████████████| 1114/1114 [00:01<00:00, 865.93batches/s]
Epoch 3/4: 100%|██████████████████████████████████████████████████████████████████████████████████████████████████| 1114/1114 [00:01<00:00, 843.01batches/s]
Epoch 4/4: 100%|██████████████████████████████████████████████████████████████████████████████████████████████████| 1114/1114 [00:01<00:00, 578.70batches/s]
###Markdown
TensorFlow Neural Network Lab In this lab, you'll use all the tools you learned from *Introduction to TensorFlow* to label images of English letters! The data you are using, notMNIST, consists of images of a letter from A to J in different fonts.The above images are a few examples of the data you'll be training on. After training the network, you will compare your prediction model against test data. Your goal, by the end of this lab, is to make predictions against that test set with at least an 80% accuracy. Let's jump in! To start this lab, you first need to import all the necessary modules. Run the code below. If it runs successfully, it will print "`All modules imported`".
###Code
import hashlib
import os
import pickle
from urllib.request import urlretrieve
import numpy as np
from PIL import Image
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import resample
from tqdm import tqdm
from zipfile import ZipFile
print('All modules imported.')
###Output
All modules imported.
###Markdown
The notMNIST dataset is too large for many computers to handle. It contains 500,000 images for just training. You'll be using a subset of this data, 15,000 images for each label (A-J).
###Code
def download(url, file):
"""
Download file from <url>
:param url: URL to file
:param file: Local file path
"""
if not os.path.isfile(file):
print('Downloading ' + file + '...')
urlretrieve(url, file)
print('Download Finished')
# Download the training and test dataset.
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_train.zip', 'notMNIST_train.zip')
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_test.zip', 'notMNIST_test.zip')
# Make sure the files aren't corrupted
assert hashlib.md5(open('notMNIST_train.zip', 'rb').read()).hexdigest() == 'c8673b3f28f489e9cdf3a3d74e2ac8fa',\
'notMNIST_train.zip file is corrupted. Remove the file and try again.'
assert hashlib.md5(open('notMNIST_test.zip', 'rb').read()).hexdigest() == '5d3c7e653e63471c88df796156a9dfa9',\
'notMNIST_test.zip file is corrupted. Remove the file and try again.'
# Wait until you see that all files have been downloaded.
print('All files downloaded.')
def uncompress_features_labels(file):
"""
Uncompress features and labels from a zip file
:param file: The zip file to extract the data from
"""
features = []
labels = []
with ZipFile(file) as zipf:
# Progress Bar
filenames_pbar = tqdm(zipf.namelist(), unit='files')
# Get features and labels from all files
for filename in filenames_pbar:
# Check if the file is a directory
if not filename.endswith('/'):
with zipf.open(filename) as image_file:
image = Image.open(image_file)
image.load()
# Load image data as 1 dimensional array
# We're using float32 to save on memory space
feature = np.array(image, dtype=np.float32).flatten()
# Get the the letter from the filename. This is the letter of the image.
label = os.path.split(filename)[1][0]
features.append(feature)
labels.append(label)
return np.array(features), np.array(labels)
# Get the features and labels from the zip files
train_features, train_labels = uncompress_features_labels('notMNIST_train.zip')
test_features, test_labels = uncompress_features_labels('notMNIST_test.zip')
# Limit the amount of data to work with a docker container
docker_size_limit = 150000
train_features, train_labels = resample(train_features, train_labels, n_samples=docker_size_limit)
# Set flags for feature engineering. This will prevent you from skipping an important step.
is_features_normal = False
is_labels_encod = False
# Wait until you see that all features and labels have been uncompressed.
print('All features and labels uncompressed.')
###Output
100%|██████████| 210001/210001 [00:39<00:00, 5265.49files/s]
100%|██████████| 10001/10001 [00:01<00:00, 5451.40files/s]
###Markdown
Problem 1The first problem involves normalizing the features for your training and test data.Implement Min-Max scaling in the `normalize_grayscale()` function to a range of `a=0.1` and `b=0.9`. After scaling, the values of the pixels in the input data should range from 0.1 to 0.9.Since the raw notMNIST image data is in [grayscale](https://en.wikipedia.org/wiki/Grayscale), the current values range from a min of 0 to a max of 255.Min-Max Scaling:$X'=a+{\frac {\left(X-X_{\min }\right)\left(b-a\right)}{X_{\max }-X_{\min }}}$*If you're having trouble solving problem 1, you can view the solution [here](https://github.com/udacity/deep-learning/blob/master/intro-to-tensorflow/intro_to_tensorflow_solution.ipynb).*
###Code
# Problem 1 - Implement Min-Max scaling for grayscale image data
def normalize_grayscale(image_data):
"""
Normalize the image data with Min-Max scaling to a range of [0.1, 0.9]
:param image_data: The image data to be normalized
:return: Normalized image data
"""
# TODO: Implement Min-Max scaling for grayscale image data
# Note: I am utilizing array broadcasting here
return 0.1 + (((image_data - 0) * (0.9 - 0.1)) / (255 - 0))
### DON'T MODIFY ANYTHING BELOW ###
# Test Cases
np.testing.assert_array_almost_equal(
normalize_grayscale(np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 255])),
[0.1, 0.103137254902, 0.106274509804, 0.109411764706, 0.112549019608, 0.11568627451, 0.118823529412, 0.121960784314,
0.125098039216, 0.128235294118, 0.13137254902, 0.9],
decimal=3)
np.testing.assert_array_almost_equal(
normalize_grayscale(np.array([0, 1, 10, 20, 30, 40, 233, 244, 254,255])),
[0.1, 0.103137254902, 0.13137254902, 0.162745098039, 0.194117647059, 0.225490196078, 0.830980392157, 0.865490196078,
0.896862745098, 0.9])
if not is_features_normal:
train_features = normalize_grayscale(train_features)
test_features = normalize_grayscale(test_features)
is_features_normal = True
print('Tests Passed!')
if not is_labels_encod:
# Turn labels into numbers and apply One-Hot Encoding
encoder = LabelBinarizer()
encoder.fit(train_labels)
train_labels = encoder.transform(train_labels)
test_labels = encoder.transform(test_labels)
# Change to float32, so it can be multiplied against the features in TensorFlow, which are float32
train_labels = train_labels.astype(np.float32)
test_labels = test_labels.astype(np.float32)
is_labels_encod = True
print('Labels One-Hot Encoded')
assert is_features_normal, 'You skipped the step to normalize the features'
assert is_labels_encod, 'You skipped the step to One-Hot Encode the labels'
# Get randomized datasets for training and validation
train_features, valid_features, train_labels, valid_labels = train_test_split(
train_features,
train_labels,
test_size=0.05,
random_state=832289)
print('Training features and labels randomized and split.')
# Save the data for easy access
pickle_file = 'notMNIST.pickle'
if not os.path.isfile(pickle_file):
print('Saving data to pickle file...')
try:
with open('notMNIST.pickle', 'wb') as pfile:
pickle.dump(
{
'train_dataset': train_features,
'train_labels': train_labels,
'valid_dataset': valid_features,
'valid_labels': valid_labels,
'test_dataset': test_features,
'test_labels': test_labels,
},
pfile, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
print('Data cached in pickle file.')
###Output
Saving data to pickle file...
Data cached in pickle file.
###Markdown
CheckpointAll your progress is now saved to the pickle file. If you need to leave and comeback to this lab, you no longer have to start from the beginning. Just run the code block below and it will load all the data and modules required to proceed.
###Code
%matplotlib inline
# Load the modules
import pickle
import math
import numpy as np
import tensorflow as tf
from tqdm import tqdm
import matplotlib.pyplot as plt
# Reload the data
pickle_file = 'notMNIST.pickle'
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f)
train_features = pickle_data['train_dataset']
train_labels = pickle_data['train_labels']
valid_features = pickle_data['valid_dataset']
valid_labels = pickle_data['valid_labels']
test_features = pickle_data['test_dataset']
test_labels = pickle_data['test_labels']
del pickle_data # Free up memory
print('Data and modules loaded.')
###Output
/Users/lucaslingle/anaconda3/envs/dlnd-tf-lab/lib/python3.5/site-packages/matplotlib/font_manager.py:273: UserWarning: Matplotlib is building the font cache using fc-list. This may take a moment.
warnings.warn('Matplotlib is building the font cache using fc-list. This may take a moment.')
/Users/lucaslingle/anaconda3/envs/dlnd-tf-lab/lib/python3.5/site-packages/matplotlib/font_manager.py:273: UserWarning: Matplotlib is building the font cache using fc-list. This may take a moment.
warnings.warn('Matplotlib is building the font cache using fc-list. This may take a moment.')
###Markdown
Problem 2Now it's time to build a simple neural network using TensorFlow. Here, your network will be just an input layer and an output layer.For the input here the images have been flattened into a vector of $28 \times 28 = 784$ features. Then, we're trying to predict the image digit so there are 10 output units, one for each label. Of course, feel free to add hidden layers if you want, but this notebook is built to guide you through a single layer network. For the neural network to train on your data, you need the following float32 tensors: - `features` - Placeholder tensor for feature data (`train_features`/`valid_features`/`test_features`) - `labels` - Placeholder tensor for label data (`train_labels`/`valid_labels`/`test_labels`) - `weights` - Variable Tensor with random numbers from a truncated normal distribution. - See `tf.truncated_normal()` documentation for help. - `biases` - Variable Tensor with all zeros. - See `tf.zeros()` documentation for help.*If you're having trouble solving problem 2, review "TensorFlow Linear Function" section of the class. If that doesn't help, the solution for this problem is available [here](intro_to_tensorflow_solution.ipynb).*
###Code
# All the pixels in the image (28 * 28 = 784)
features_count = 784
# All the labels
labels_count = 10
# TODO: Set the features and labels tensors
features = tf.placeholder(tf.float32, [None, features_count])
labels = tf.placeholder(tf.float32, [None, labels_count])
# TODO: Set the weights and biases tensors
# Note: for this network, we have no hidden layers.
weights = tf.Variable(tf.truncated_normal([features_count, labels_count]))
biases = tf.Variable(tf.zeros([labels_count], tf.float32))
### DON'T MODIFY ANYTHING BELOW ###
#Test Cases
from tensorflow.python.ops.variables import Variable
assert features._op.name.startswith('Placeholder'), 'features must be a placeholder'
assert labels._op.name.startswith('Placeholder'), 'labels must be a placeholder'
assert isinstance(weights, Variable), 'weights must be a TensorFlow variable'
assert isinstance(biases, Variable), 'biases must be a TensorFlow variable'
assert features._shape == None or (\
features._shape.dims[0].value is None and\
features._shape.dims[1].value in [None, 784]), 'The shape of features is incorrect'
assert labels._shape == None or (\
labels._shape.dims[0].value is None and\
labels._shape.dims[1].value in [None, 10]), 'The shape of labels is incorrect'
assert weights._variable._shape == (784, 10), 'The shape of weights is incorrect'
assert biases._variable._shape == (10), 'The shape of biases is incorrect'
assert features._dtype == tf.float32, 'features must be type float32'
assert labels._dtype == tf.float32, 'labels must be type float32'
# Feed dicts for training, validation, and test session
train_feed_dict = {features: train_features, labels: train_labels}
valid_feed_dict = {features: valid_features, labels: valid_labels}
test_feed_dict = {features: test_features, labels: test_labels}
# Linear Function WX + b
logits = tf.matmul(features, weights) + biases
prediction = tf.nn.softmax(logits)
# Cross entropy
cross_entropy = -tf.reduce_sum(labels * tf.log(prediction), reduction_indices=1)
# Training loss
loss = tf.reduce_mean(cross_entropy)
# Create an operation that initializes all variables
init = tf.global_variables_initializer()
# Test Cases
with tf.Session() as session:
session.run(init)
session.run(loss, feed_dict=train_feed_dict)
session.run(loss, feed_dict=valid_feed_dict)
session.run(loss, feed_dict=test_feed_dict)
biases_data = session.run(biases)
assert not np.count_nonzero(biases_data), 'biases must be zeros'
print('Tests Passed!')
# Determine if the predictions are correct
is_correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(labels, 1))
# Calculate the accuracy of the predictions
accuracy = tf.reduce_mean(tf.cast(is_correct_prediction, tf.float32))
print('Accuracy function created.')
###Output
Accuracy function created.
###Markdown
Problem 3Below are 2 parameter configurations for training the neural network. In each configuration, one of the parameters has multiple options. For each configuration, choose the option that gives the best acccuracy.Parameter configurations:Configuration 1* **Epochs:** 1* **Learning Rate:** * 0.8 * 0.5 * 0.1 * 0.05 * 0.01Configuration 2* **Epochs:** * 1 * 2 * 3 * 4 * 5* **Learning Rate:** 0.2The code will print out a Loss and Accuracy graph, so you can see how well the neural network performed.*If you're having trouble solving problem 3, you can view the solution [here](intro_to_tensorflow_solution.ipynb).*
###Code
# Change if you have memory restrictions
batch_size = 128
# TODO: Find the best parameters for each configuration
epochs = 4
learning_rate = 0.2
### DON'T MODIFY ANYTHING BELOW ###
# Gradient Descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# The accuracy measured against the validation set
validation_accuracy = 0.0
# Measurements use for graphing loss and accuracy
log_batch_step = 50
batches = []
loss_batch = []
train_acc_batch = []
valid_acc_batch = []
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(train_features)/batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i*batch_size
batch_features = train_features[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer and get loss
_, l = session.run(
[optimizer, loss],
feed_dict={features: batch_features, labels: batch_labels})
# Log every 50 batches
if not batch_i % log_batch_step:
# Calculate Training and Validation accuracy
training_accuracy = session.run(accuracy, feed_dict=train_feed_dict)
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
# Log batches
previous_batch = batches[-1] if batches else 0
batches.append(log_batch_step + previous_batch)
loss_batch.append(l)
train_acc_batch.append(training_accuracy)
valid_acc_batch.append(validation_accuracy)
# Check accuracy against Validation data
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
loss_plot = plt.subplot(211)
loss_plot.set_title('Loss')
loss_plot.plot(batches, loss_batch, 'g')
loss_plot.set_xlim([batches[0], batches[-1]])
acc_plot = plt.subplot(212)
acc_plot.set_title('Accuracy')
acc_plot.plot(batches, train_acc_batch, 'r', label='Training Accuracy')
acc_plot.plot(batches, valid_acc_batch, 'x', label='Validation Accuracy')
acc_plot.set_ylim([0, 1.0])
acc_plot.set_xlim([batches[0], batches[-1]])
acc_plot.legend(loc=4)
plt.tight_layout()
plt.show()
print('Validation accuracy at {}'.format(validation_accuracy))
###Output
Epoch 1/4: 100%|██████████| 1114/1114 [00:05<00:00, 214.04batches/s]
Epoch 2/4: 100%|██████████| 1114/1114 [00:05<00:00, 199.64batches/s]
Epoch 3/4: 100%|██████████| 1114/1114 [00:05<00:00, 214.81batches/s]
Epoch 4/4: 100%|██████████| 1114/1114 [00:05<00:00, 204.25batches/s]
###Markdown
TestYou're going to test your model against your hold out dataset/testing data. This will give you a good indicator of how well the model will do in the real world. You should have a test accuracy of at least 80%.
###Code
### DON'T MODIFY ANYTHING BELOW ###
# The accuracy measured against the test set
test_accuracy = 0.0
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(train_features)/batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i*batch_size
batch_features = train_features[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer
_ = session.run(optimizer, feed_dict={features: batch_features, labels: batch_labels})
# Check accuracy against Test data
test_accuracy = session.run(accuracy, feed_dict=test_feed_dict)
assert test_accuracy >= 0.80, 'Test accuracy at {}, should be equal to or greater than 0.80'.format(test_accuracy)
print('Nice Job! Test Accuracy is {}'.format(test_accuracy))
###Output
Epoch 1/4: 100%|██████████| 1114/1114 [00:01<00:00, 1102.00batches/s]
Epoch 2/4: 100%|██████████| 1114/1114 [00:01<00:00, 1048.24batches/s]
Epoch 3/4: 100%|██████████| 1114/1114 [00:00<00:00, 1303.30batches/s]
Epoch 4/4: 100%|██████████| 1114/1114 [00:00<00:00, 1291.03batches/s] |
ipynb/Germany-Bayern-SK-Kempten.ipynb | ###Markdown
Germany: SK Kempten (Bayern)* Homepage of project: https://oscovida.github.io* Plots are explained at http://oscovida.github.io/plots.html* [Execute this Jupyter Notebook using myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/Germany-Bayern-SK-Kempten.ipynb)
###Code
import datetime
import time
start = datetime.datetime.now()
print(f"Notebook executed on: {start.strftime('%d/%m/%Y %H:%M:%S%Z')} {time.tzname[time.daylight]}")
%config InlineBackend.figure_formats = ['svg']
from oscovida import *
overview(country="Germany", subregion="SK Kempten", weeks=5);
overview(country="Germany", subregion="SK Kempten");
compare_plot(country="Germany", subregion="SK Kempten", dates="2020-03-15:");
# load the data
cases, deaths = germany_get_region(landkreis="SK Kempten")
# compose into one table
table = compose_dataframe_summary(cases, deaths)
# show tables with up to 500 rows
pd.set_option("max_rows", 500)
# display the table
table
###Output
_____no_output_____
###Markdown
Explore the data in your web browser- If you want to execute this notebook, [click here to use myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/Germany-Bayern-SK-Kempten.ipynb)- and wait (~1 to 2 minutes)- Then press SHIFT+RETURN to advance code cell to code cell- See http://jupyter.org for more details on how to use Jupyter Notebook Acknowledgements:- Johns Hopkins University provides data for countries- Robert Koch Institute provides data for within Germany- Atlo Team for gathering and providing data from Hungary (https://atlo.team/koronamonitor/)- Open source and scientific computing community for the data tools- Github for hosting repository and html files- Project Jupyter for the Notebook and binder service- The H2020 project Photon and Neutron Open Science Cloud ([PaNOSC](https://www.panosc.eu/))--------------------
###Code
print(f"Download of data from Johns Hopkins university: cases at {fetch_cases_last_execution()} and "
f"deaths at {fetch_deaths_last_execution()}.")
# to force a fresh download of data, run "clear_cache()"
print(f"Notebook execution took: {datetime.datetime.now()-start}")
###Output
_____no_output_____
###Markdown
Germany: SK Kempten (Bayern)* Homepage of project: https://oscovida.github.io* [Execute this Jupyter Notebook using myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/Germany-Bayern-SK-Kempten.ipynb)
###Code
import datetime
import time
start = datetime.datetime.now()
print(f"Notebook executed on: {start.strftime('%d/%m/%Y %H:%M:%S%Z')} {time.tzname[time.daylight]}")
%config InlineBackend.figure_formats = ['svg']
from oscovida import *
overview(country="Germany", subregion="SK Kempten");
# load the data
cases, deaths, region_label = germany_get_region(landkreis="SK Kempten")
# compose into one table
table = compose_dataframe_summary(cases, deaths)
# show tables with up to 500 rows
pd.set_option("max_rows", 500)
# display the table
table
###Output
_____no_output_____
###Markdown
Explore the data in your web browser- If you want to execute this notebook, [click here to use myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/Germany-Bayern-SK-Kempten.ipynb)- and wait (~1 to 2 minutes)- Then press SHIFT+RETURN to advance code cell to code cell- See http://jupyter.org for more details on how to use Jupyter Notebook Acknowledgements:- Johns Hopkins University provides data for countries- Robert Koch Institute provides data for within Germany- Open source and scientific computing community for the data tools- Github for hosting repository and html files- Project Jupyter for the Notebook and binder service- The H2020 project Photon and Neutron Open Science Cloud ([PaNOSC](https://www.panosc.eu/))--------------------
###Code
print(f"Download of data from Johns Hopkins university: cases at {fetch_cases_last_execution()} and "
f"deaths at {fetch_deaths_last_execution()}.")
# to force a fresh download of data, run "clear_cache()"
print(f"Notebook execution took: {datetime.datetime.now()-start}")
###Output
_____no_output_____
###Markdown
Germany: SK Kempten (Bayern)* Homepage of project: https://oscovida.github.io* Plots are explained at http://oscovida.github.io/plots.html* [Execute this Jupyter Notebook using myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/Germany-Bayern-SK-Kempten.ipynb)
###Code
import datetime
import time
start = datetime.datetime.now()
print(f"Notebook executed on: {start.strftime('%d/%m/%Y %H:%M:%S%Z')} {time.tzname[time.daylight]}")
%config InlineBackend.figure_formats = ['svg']
from oscovida import *
overview(country="Germany", subregion="SK Kempten", weeks=5);
overview(country="Germany", subregion="SK Kempten");
compare_plot(country="Germany", subregion="SK Kempten", dates="2020-03-15:");
# load the data
cases, deaths = germany_get_region(landkreis="SK Kempten")
# get population of the region for future normalisation:
inhabitants = population(country="Germany", subregion="SK Kempten")
print(f'Population of country="Germany", subregion="SK Kempten": {inhabitants} people')
# compose into one table
table = compose_dataframe_summary(cases, deaths)
# show tables with up to 1000 rows
pd.set_option("max_rows", 1000)
# display the table
table
###Output
_____no_output_____
###Markdown
Explore the data in your web browser- If you want to execute this notebook, [click here to use myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/Germany-Bayern-SK-Kempten.ipynb)- and wait (~1 to 2 minutes)- Then press SHIFT+RETURN to advance code cell to code cell- See http://jupyter.org for more details on how to use Jupyter Notebook Acknowledgements:- Johns Hopkins University provides data for countries- Robert Koch Institute provides data for within Germany- Atlo Team for gathering and providing data from Hungary (https://atlo.team/koronamonitor/)- Open source and scientific computing community for the data tools- Github for hosting repository and html files- Project Jupyter for the Notebook and binder service- The H2020 project Photon and Neutron Open Science Cloud ([PaNOSC](https://www.panosc.eu/))--------------------
###Code
print(f"Download of data from Johns Hopkins university: cases at {fetch_cases_last_execution()} and "
f"deaths at {fetch_deaths_last_execution()}.")
# to force a fresh download of data, run "clear_cache()"
print(f"Notebook execution took: {datetime.datetime.now()-start}")
###Output
_____no_output_____ |
Notebooks/Fetch_Data.ipynb | ###Markdown
Fetch DataIn this notebook we extract all the liks 0f first 250 page of Truecar website. the number of pages can be changed with MAX_PAGE variable.after the page's extraction, we have to extract all the urls of the car ads so we can get the information of each car. importing the needed modules
###Code
from bs4 import BeautifulSoup
import requests
import re
import csv
MAX_PAGE = 250
# defining base_url to create a list of all page urls
BASE_URL = "https://www.truecar.com/used-cars-for-sale/listings/"
# defining host_name for creating urls for each car ad
HOST_NAME = "https://www.truecar.com"
###Output
_____no_output_____
###Markdown
in this part we are going to create a list of all page urls and loop throuhg all pages to extract all urls of the cars.output files :* **pages.txt** : containing all the page urls* **urls.txt** : containing all the urls> Pages algorithm: https://www.truecar.com/used-cars-for-sale/listings/?page=num
###Code
pages = list()
urls = list()
failed_pages = list()
def url_scraper():
with open("pages.txt", "w") as f:
for i in range(1,MAX_PAGE+1):
page = BASE_URL + "?page=" + str(i)
f.write(page+"\n")
pages.append(page)
for page in pages:
try:
response = requests.get(page)
response.raise_for_status()
except:
failed_pages.append(page)
continue
src = response.text
soup = BeautifulSoup(src, "html.parser")
ads = soup.find_all("a", attrs={
"data-test" : "vehicleCardLink"
})
url_list = [HOST_NAME+link["href"] for link in ads]
with open("urls.txt", "a+") as f:
for url in url_list:
urls.append(url)
f.write(url+"\n")
###Output
_____no_output_____
###Markdown
The main scraping part happens in this part. we loop through all car urls and try to extract its information.> **Notice** : this is a long process. make sure you have a stable internet connection and dont forget to run the next block of code before closing notebook to save extracted data.
###Code
car_list = ()
failed_urls = list()
def scraper(url):
try:
response = requests.get(url)
response.raise_for_status()
except:
failed_urls.append(url)
pass
try:
src = response.text
soup = BeautifulSoup(src, "html.parser")
detail = soup.find("div", attrs={"data-test" : "vdpPreProspectTopDetails"})
price = detail.find("div", attrs={"data-test" : "vdpPreProspectPrice"})
price = int(re.sub("[^\d]", "", price.text))
mileage = detail.find("p", attrs={"class" : "margin-top-1"})
mileage = int(re.sub("[^\d]", "", mileage.text))
titles = soup.find("h1", attrs={
"class" : "heading-base d-flex flex-column margin-right-2",
"data-qa" : "Heading"
})
name = list(titles.children)[1].text
name = name.replace("\xa0"," ")
features = list(soup.find("div", attrs={"data-test" : "vdpOverviewSection"}).div)
style = features[0].div.div.p.text
exterior_color = features[1].div.div.p.text
interior_color = features[2].div.div.p.text
mpg = features[3].div.div.p.text
mpg_city, mpg_highway = re.match("(\d{1,2}) cty / (\d{1,2}) hwy", mpg).groups()
engine = features[4].div.div.p.text
drive_type = features[5].div.div.p.text
fuel_type = features[6].div.div.p.text
transmission = features[7].div.div.p.text
except:
failed_urls.append(url)
pass
car = [name,style,exterior_color,interior_color,engine,
drive_type,fuel_type,transmission,mileage,mpg_city,
mpg_highway,price]
car_list.append(car)
###Output
_____no_output_____
###Markdown
exporting a csv file
###Code
with open("cars.csv", "w") as cars:
csvwriter = csv.writer(cars)
csvwriter.writerows(car_list)
###Output
_____no_output_____ |
CurvyTemperature/.ipynb_checkpoints/Copy of TemperatureLapseRate-checkpoint.ipynb | ###Markdown
Temperature Lapse Rate AnalysisThis script demonstrates basic time series analysis of temperature and elevation data using scientific Python libraries such as [NumPY](http://www.numpy.org/). This example uses temperature data that is stored in HydroShare.Prepared: September 28, 2016Authors: Claire Beveridge, University of Washington; Christina Bandaragoda, University of Washington; Tony Castronova, Utah State University 1. Script Setup and Preparation 1.1 Imported required libraries:Before we begin our processing, we must import several libaries into this notebook.* datetime: Manipulate dates and times in simple and complex ways* hs_utils: Interact with HydroShare, including resource querying, dowloading and creation* matplotlib: 2D plotting library* numpy: Numerical library used to read and analyze data* pandas: high-performance, easy-to-use data structures and data analysis tools for the Python programming language**Note:** You may see some matplotlib warnings if this is the first time you are running this notebook. These warnings can be ignored.Next we need to establish a secure connection with HydroShare. This is done by simply instantiating the hydroshare class that is defined within hs_utils. In addition to connecting with HydroShare, this command also sets environment variables for several parameters that may useful to you:1. Your username2. The ID of the resource which launched the notebook3. The type of resource that launched this notebook4. The url for the notebook server.
###Code
from datetime import datetime
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
import pandas
# import hs_utils
# establish a secure connection to HydroShare
# hs = hs_utils.hydroshare()
###Output
_____no_output_____
###Markdown
1.2 Import and format data Retrieve a raster resource using its IDThis example uses temperature data that is stored in HydroShare at the following url: http://www.hydroshare.org/resource/8822c54c2a7f4a99b9373a0d026550d8. The data for our processing routines can be retrieved using the getResourceFromHydroShare function by passing in the global identifier from the url above.NumPY is a numerical library that we will be using to read and analyze this turbidity data. To get started, the `genfromtxt` command is used to parse the textfile into NumPY arrays. This is a powerful function that allows us to skip commented lines, strip whitespace, as well as transform date strings into python objects.
###Code
# IMPORT FROM HYDROSHARE WHEN WE GET TO THAT PART!
# # get some resource content. The resource content is returned as a dictionary
# content = hs.getResourceFromHydroShare('0e49df4b97f94247a8d52bac4adeb14a')
# Import elevation for each Lapse Rate sensor as a floating point number
Elevation= np.genfromtxt('Elevation.csv', delimiter=',',skip_header=1)
elev_Lapse2=np.array((Elevation[0][1]), dtype='float64')
elev_Lapse3=np.array((Elevation[1][1]), dtype='float64')
elev_Lapse4=np.array((Elevation[2][1]), dtype='float64')
elev_Lapse5=np.array((Elevation[3][1]), dtype='float64')
elev_Lapse6=np.array((Elevation[4][1]), dtype='float64')
elev_Lapse7=np.array((Elevation[5][1]), dtype='float64')
elev_Lapse7
# Import temperature data from csv files
Lapse2= np.genfromtxt('Lapse2_8-16-16_2180.csv', delimiter=',',autostrip=True,skip_header=15,
converters={0: lambda x: datetime.strptime(x.decode("utf-8"),"%m/%d/%y %I:%M:%S %p")})
n_Lapse2=len(Lapse2) # n is number of samples in the record
datetime_Lapse2=np.empty(n_Lapse2,dtype=object)
temp_Lapse2=np.empty(n_Lapse2,dtype='float64')
for x in range(0,n_Lapse2): # Cycle through all days in sequence
datetime_Lapse2[x]=Lapse2[x][0]
temp_Lapse2[x]=Lapse2[x][2]
Lapse4= np.genfromtxt('Lapse4_8-16-16_3465.csv', delimiter=',',autostrip=True,skip_header=15,
converters={0: lambda x: datetime.strptime(x.decode("utf-8"),"%m/%d/%y %I:%M:%S %p")})
n_Lapse4=len(Lapse4) # n is number of samples in the record
datetime_Lapse4=np.empty(n_Lapse4,dtype=object)
temp_Lapse4=np.empty(n_Lapse4,dtype='float64')
for x in range(0,n_Lapse4): # Cycle through all days in sequence
datetime_Lapse4[x]=Lapse4[x][0]
temp_Lapse4[x]=Lapse4[x][2]
Lapse4_ground=np.genfromtxt('Lapse4_8-16-16_ground.csv', delimiter=',',autostrip=True,skip_header=15,
converters={0: lambda x: datetime.strptime(x.decode("utf-8"),"%m/%d/%y %I:%M:%S %p")})
n_Lapse4_ground=len(Lapse4_ground) # n is number of samples in the record
datetime_Lapse4_ground=np.empty(n_Lapse4_ground,dtype=object)
temp_Lapse4_ground=np.empty(n_Lapse4_ground,dtype='float64')
for x in range(0,n_Lapse4_ground): # Cycle through all days in sequence
datetime_Lapse4_ground[x]=Lapse4_ground[x][0]
temp_Lapse4_ground[x]=Lapse4_ground[x][2]
Lapse6= np.genfromtxt('Lapse6_8-16-16_5168.csv', delimiter=',',autostrip=True,skip_header=15,
converters={0: lambda x: datetime.strptime(x.decode("utf-8"),"%m/%d/%y %I:%M:%S %p")})
n_Lapse6=len(Lapse6) # n is number of samples in the record
datetime_Lapse6=np.empty(n_Lapse6,dtype=object)
temp_Lapse6=np.empty(n_Lapse6,dtype='float64')
for x in range(0,n_Lapse6): # Cycle through all days in sequence
datetime_Lapse6[x]=Lapse6[x][0]
temp_Lapse6[x]=Lapse6[x][2]
Lapse6_ground=np.genfromtxt('Lapse6_8-16-16_ground.csv', delimiter=',',autostrip=True,skip_header=15,
converters={0: lambda x: datetime.strptime(x.decode("utf-8"),"%m/%d/%y %I:%M:%S %p")})
n_Lapse6_ground=len(Lapse6_ground) # n is number of samples in the record
datetime_Lapse6_ground=np.empty(n_Lapse6_ground,dtype=object)
temp_Lapse6_ground=np.empty(n_Lapse6_ground,dtype='float64')
for x in range(0,n_Lapse6_ground): # Cycle through all days in sequence
datetime_Lapse6_ground[x]=Lapse6_ground[x][0]
temp_Lapse6_ground[x]=Lapse6_ground[x][2]
Lapse7= np.genfromtxt('Lapse7_8-16-16_5719.csv', delimiter=',',autostrip=True,skip_header=20,
converters={0: lambda x: datetime.strptime(x.decode("utf-8"),"%m/%d/%y %I:%M:%S %p")})
n_Lapse7=len(Lapse7) # n is number of samples in the record
datetime_Lapse7=np.empty(n_Lapse7,dtype=object)
temp_Lapse7=np.empty(n_Lapse7,dtype='float64')
for x in range(0,n_Lapse7): # Cycle through all days in sequence
datetime_Lapse7[x]=Lapse7[x][0]
temp_Lapse7[x]=Lapse7[x][2]
Lapse7_ground=np.genfromtxt('Lapse7_8-16-16_ground.csv', delimiter=',',autostrip=True,skip_header=15,
converters={0: lambda x: datetime.strptime(x.decode("utf-8"),"%m/%d/%y %I:%M:%S %p")})
n_Lapse7_ground=len(Lapse7_ground) # n is number of samples in the record
datetime_Lapse7_ground=np.empty(n_Lapse7_ground,dtype=object)
temp_Lapse7_ground=np.empty(n_Lapse7_ground,dtype='float64')
for x in range(0,n_Lapse7_ground): # Cycle through all days in sequence
datetime_Lapse7_ground[x]=Lapse7_ground[x][0]
temp_Lapse7_ground[x]=Lapse7_ground[x][2]
Lapse7_RH=np.genfromtxt('Lapse7_8-16-16_RH.csv', delimiter=',',autostrip=True,skip_header=20,
converters={0: lambda x: datetime.strptime(x.decode("utf-8"),"%m/%d/%y %I:%M:%S %p")})
n_Lapse7_RH=len(Lapse7_RH) # n is number of samples in the record
datetime_Lapse7_RH=np.empty(n_Lapse7_RH,dtype=object)
temp_Lapse7_RH=np.empty(n_Lapse7_RH,dtype='float64')
for x in range(0,n_Lapse7_RH): # Cycle through all days in sequence
datetime_Lapse7_RH[x]=Lapse7_RH[x][0]
temp_Lapse7_RH[x]=Lapse7_RH[x][2]
###Output
_____no_output_____
###Markdown
2. Plot dataUse the script below to visualize your data! Below is a basic way of doing this, but matplotlib offers many options for data visualization. Check out the documentation for mroe plotting options! 2.a. Plot Time Series of Air Temperature, Ground Temperature, and Relative Humidity
###Code
# Create a figure, specifiying figure size
fig1, ax1=plt.subplots(1,1,figsize=(10, 5))
# Plot data and specify label of each line (for legend)
plt.plot(datetime_Lapse2,temp_Lapse2,'b--',label='Elev=2180 m')
plt.plot(datetime_Lapse4,temp_Lapse4,'m--',label='Elev=3465 m')
plt.plot(datetime_Lapse6,temp_Lapse6,'c--',label='Elev=5168 m')
plt.plot(datetime_Lapse7,temp_Lapse7,'g--',label='Elev=5719 m')
# Set axes and figure titles
plt.title('Time Series of Air Temperature')
plt.xlabel('Date')
plt.xticks(rotation=40) # Rotate axis tick values as necessary
plt.ylabel('Air Temperature (deg C)')
# display a legend and specify the location (either 'best' or a value 1-10)
plt.legend(loc='best')
fig2, ax2=plt.subplots(1,1,figsize=(10, 5))
plt.plot(datetime_Lapse4_ground,temp_Lapse4_ground,'m--',label='Elev=3465 m')
plt.plot(datetime_Lapse6_ground,temp_Lapse6_ground,'c--',label='Elev=5168 m')
plt.plot(datetime_Lapse7_ground,temp_Lapse7_ground,'g--',label='Elev=5719 m')
plt.title('Time Series of Ground Temperature')
plt.xlabel('Date')
plt.xticks(rotation=40)
plt.ylabel('Ground Temperature (deg C)')
plt.legend(loc='best')
fig3, ax3=plt.subplots(1,1,figsize=(10, 5))
plt.plot(datetime_Lapse7_RH,temp_Lapse7_RH,'g--',label='Elev=5719 m')
plt.title('Time Series of Relative Humidity')
plt.xlabel('Date')
plt.xticks(rotation=40)
plt.ylabel('Relative Humidity (%)')
plt.legend(loc='best')
###Output
_____no_output_____ |
01a_Intro_to_python.ipynb | ###Markdown
Intro to (/review of) python In the next few lessons, we'll review basic python and programming concepts, as well as go over the fundamentals of how to use some common python packages, such as:- numpy- pandas- matplotlib Importing packagesPackages are collections of pre-written code made available for reuse. In the previous lesson, we installed some necessary packages using the `pip` python package manager. Packages are convenient because they save you from having to implement every feature and function on your own. The widely-used packages also provide a standard, common set of tools for others to develop with --- allowing interoperability between programs.There are a few different ways to import package in python:The simplest is just to `import {packagename}`. For this lesson, we'll use `numpy` as the example package```import numpy```The functions, classes, and variables of the `numpy` package can then be accessed using "dot" notation: for example, the numpy array class can be accessed with `numpy.array`---A variant of this is to use `import {packagename} as {shortname}`, as in:```import numpy as np```This reduces the number of characters needed to type, and can be convenient if the package name is long, or you need to use many things from the same package. Accessing the numpy array class, for example, can be done with ```np.array```---If you only need a subset of items from a package, for example, a single class, function, or a submodule (subpackage of the main package), you can use the syntax ``` from {packagename} import {element}```, as in:```from numpy import array```This allows you to use the `array` class directly, without importing the rest of the numpy package, and without needing to use the package prefix dot notation.For example, if you use this import method, then writing```test_array = array([0])```would be equivalent to writing```test_array = numpy.array([0])test_array = np.array([0])```using the previous import styles, respectively.
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
DocumentationPackages contain functions, classes, and variables which may be helpful. Crucial to the usability of a package is the documentation (or API reference), which (should) list all of the contents of the package, and how to use them.To get the built-in help about a function or class, use the `help()` commandDocumentation for most common packages are also usually available online. For example, the documentation for [numpy can be found here](https://docs.scipy.org/doc/numpy/reference/)
###Code
help(print)
###Output
_____no_output_____
###Markdown
Commenting codeIn order for your code to be readable to others (or your future self), you should provide comments on your code to explain what you are doing. The comment character in python is ``, and any text following a `` symbol will not be interpreted as code by python.
###Code
array_of_zeros = np.zeros([3,3]) # this creates a 3x3 array full of zeros
print(array_of_zeros) # the print() function displays the value of the variable on screen
###Output
_____no_output_____
###Markdown
Code flow LoopsA key part of programming is automating repetitive tasks, such as applying the same operation to a list of inputs. This is achieved using "loops"; most commonly, the `for` loop.In its simplest form, a python loop iterates over a list, and runs the code within the loop with the variable set equal to the respective element of the list.
###Code
idx_list = [0,1,2,3,4,5]
for idx in idx_list: # loop over idx_list, set idx equal to each element sequentially
print('idx is equal to {}'.format(idx)) # print the current value of idx
###Output
_____no_output_____
###Markdown
Lists are not the only kinds of objects that can be iterated over (also known as an iterable). A special kind of object, called a generator, does not explicitly store every single value in memory, but instead stores the current value, and the rule to generate the next value. This can often be faster than explicitly storing every element.As an analogy, if you wanted to send to your friend the following sequence of numbers: [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 45, 47, 49, 51, 53, 55, 57, 59], you could write each number down and send them the entire list. Or you could write "the sequence of numbers starting at 1, increasing by 2, but less than 60"If the sequence is very long, then the second representation becomes preferable to write, because you don't need to explicitly write out every single element. One common generator that is used in python is `range(start, end, increment)`, which creates a generator that produces the sequence of numbers starting at `start`, increments by `increment`, and is less than (__but not equal to__) `end`. If `increment` is not set, it defaults to 1.This is often used in conjunction with iteration:
###Code
for idx in range(0,6): #equivalent to the above
print('idx is equal to {}'.format(idx)) # print the current value of idx
for idx in range(6): # if you only give one argument, it automatically starts from 0
print('idx is currently {}'.format(idx)) # print the current value of idx
###Output
_____no_output_____
###Markdown
ConditionalsSometimes you want to execute code only if certain conditions are met. The `if`, `elif` (short for else-if), and `else` keywords are used for this purpose
###Code
for idx in range(1,6):
if idx > 2 : # only execute the following indented block of code if idx is greater than 2
print('idx={}, which is greater than 2'.format(idx))
elif idx == 2: # only execute if the above condition isn't met, and also idx==2
# note that == is used to check for equality; single = is the assignment operator
print('idx={} is equal to 2'.format(idx))
else: # execute this code if none of the above conditions are met
print('idx={} is less than 2'.format(idx))
###Output
_____no_output_____
###Markdown
You can combine different conditions using the keywords `and` and `or`, and negate conditions using `not`
###Code
x = 5
y = 10
print(x==5 or y==11) # true because first statement is true
print(x==5 and y==11) # false because not both are true
print(x==5 and not y==11) # true because second condition is negated (flipped)
###Output
_____no_output_____
###Markdown
List comprehensionYou can generate a list from any iterable in a couple ways. This is called list comprehension.The simplest way is just to call `list()` on the generator object
###Code
list(range(6))
###Output
_____no_output_____
###Markdown
Another way is using the following syntax:```[x for x in iterable]```for example:
###Code
list_of_numbers = [x for x in range(6)]
print(list_of_numbers)
###Output
_____no_output_____
###Markdown
However, the list comprehension syntax is actually more powerful than that: it allows for functions to be called within the expression```[expression(x) for x in iterable]```
###Code
list_of_first_five_squares = [x**2 for x in range(6)] # the double star ** expression denotes exponentiation
# hence, the above gives the first 5 square numbers, including zero
print(list_of_first_five_squares)
###Output
_____no_output_____
###Markdown
In fact, the list comprehension syntax is even more powerful: it can also include conditional statements```[expression(x) for x in iterable if condition]```
###Code
list_of_first_few_odd_squares = [x**2 for x in range(10) if np.mod(x,2) == 1]
print(list_of_first_few_odd_squares)
###Output
_____no_output_____
###Markdown
Now try to print a list of the first few even cubes using list comprehension. FunctionsFunctions are a way to repeat the same lines of code, potentially with different inputs. If you find yourself writing a lot of repetitive code that shares the same structure, you may want to try and formulate it as a function. Functions are declared using the `def` keyword. In this example, we will write a function that checks if a number is prime.
###Code
def is_prime(number):
sqrt_num = int(np.sqrt(number)) # we only need to check integer factors up to the square root of the number, rounded down (int() always rounds down)
for potential_factor in range(2,sqrt_num+1): #range(a,b) iterates from the value a to b-1
if np.mod(number, potential_factor) == 0: #np.mod() is the modulo (aka remainder) function; thus, if the remainder is zero, then it divides evenly
return False # if it divides evenly, then it's not prime, then we can return and end the function
return True #if we get through all of the potential factors and haven't found a factor, then it's prime
is_prime(101.0)
###Output
_____no_output_____
###Markdown
Data structures ListsWe already looked at one python data structure: the list. Lists are _ordered_ collections of values, denoted with square brackets. Lists are _ordered_ in the sense that the order of their elements matter. The list [1,2,3,4] is not the same as [4,3,2,1]
###Code
a_list = [2,0,15,5] # square brackets denote a list
another_list = [15,0,5,2]
print('a_list = {}; another_list = {}'.format(a_list, another_list)) # the .format() function of strings allows you to plug in the variable values in the respective curly braces {}
print('is a_list equal to another_list?')
print(a_list == another_list) # print out the truth value of whether a_list is the same as another_list (it shouldn't be, because they have different ordering)
yet_another_list = [2,0,15,5]
print('but it is equal to yet_another_list:')
print(a_list == yet_another_list)
###Output
_____no_output_____
###Markdown
List elements can be any python object, including strings, numbers, and other lists
###Code
diverse_list = ['a', False, [0,0,0], 1.0, 10]
print('the elements of diverse_list are: {}'.format(diverse_list))
print('the data types of the elements are {}'.format([type(x) for x in diverse_list])) # using list comprehension to get the type of each element
###Output
_____no_output_____
###Markdown
You can access a specific element of a list using the square bracket notation (this is known as indexing)```list_name[idx]```Index values can be negative, which start counting from the end. So `list_name[-1]` gives the __last__ element of the list
###Code
first_element_of_diverse_list = diverse_list[0] # python starts counting at 0, so the first element is at index 0
print(first_element_of_diverse_list)
last_element_of_diverse_list = diverse_list[-1]
print(last_element_of_diverse_list)
###Output
_____no_output_____
###Markdown
You can "slice" a list using the colon `:` notation```list_name[start_idx:end_idx]```Note that the slice starts at the start_idx, but __does not include__ the element at end_idx.If you omit either start_idx or end_idx, it automatically starts at the first element/ends at the last element respectively
###Code
print(diverse_list[0:2]) # gets the elements at index 0 and 1
print(diverse_list[:2]) # equivalent to the above
print(diverse_list[2:]) # gets all elements from index 2 to the end
print(diverse_list[:]) # gets all elements
###Output
_____no_output_____
###Markdown
Lists are modifiable: you can append and delete entries, as well as change the values of elements
###Code
diverse_list.append('new entry') # add a value to the end
print('appended an entry to diverse_list: {}'.format(diverse_list))
diverse_list[0] = 'changed entry' # change the value of entry at index 0
print('changed an entry of diverse_list: {}'.format(diverse_list))
first_entry = diverse_list.pop(0) # remove (and return) the value at element 0
print('removed "{}" from diverse_list: {}'.format(first_entry, diverse_list))
diverse_list.remove('new entry') # you can also remove the first entry with a specific value, in this case, the "new entry"
print('removed "new entry" from diverse_list: {}'.format(diverse_list))
diverse_list.insert(0,'a') # insert the value 'a' at index 0
print('inserted "a" back into diverse_list: {}'.format(diverse_list))
###Output
_____no_output_____
###Markdown
TuplesTuples are unchangeable, ordered sequences of elements, grouped with regular parentheses:```('a','b','c')```
###Code
a_tuple = ('a','b','c')
print('the first element of a_tuple is "{}"'.format(a_tuple[0])) # tuples can be indexed like lists
a_tuple[0] = 10 # however, unlike lists, you cannot change their values once they are set
###Output
_____no_output_____
###Markdown
DictionariesDictionaries are data structures that store _mappings_ from "keys" to respective "values". You can think of them as lookup tables which return a specific value for a given key. For example, an english dictionary (the book) could be stored as a python dictionary, where the "keys" are each of the words in english, and the "values" are the respective definitions.They are defined using the curly braces, or the `dict()` function:```dictionary = {key: value, key2: value2}dictionary = dict([(key, value),(key2, value2)])```Keys can be a variety of data types, including numeric, strings, and tuples. However, they cannot be changeable objects, such as lists, or other dictionaries. Values, on the other hand, can be any data type.Accessing the dictionary values are done using square brackets using the syntax:```dictionary[key] returns the value associated with key```
###Code
pokemon_types = {'bulbasaur':'grass', 'charmander':'fire', 'squirtle':'water'}
pokemon_types
print(pokemon_types['bulbasaur'])
###Output
_____no_output_____
###Markdown
You can add or change an element to a dictionary using the following syntax:```dictionary[key] = value```
###Code
pokemon_types['bulbasaur'] = 'grass/poison' #bulbasaur is actually dual typed, so we'll change its entry
pokemon_types['ivysaur'] = 'grass/poison' #let's add an evolution
pokemon_types
###Output
_____no_output_____
###Markdown
You can get a list of all of the keys to a dictionary using the `.keys()` function, similarly with the `.values()` function.
###Code
print(pokemon_types.keys())
print(pokemon_types.values())
###Output
_____no_output_____
###Markdown
You can use the function `.items()` to get a list of `(key, value)` tuples. This is often useful for looping
###Code
for k, v in pokemon_types.items():
print('the type of {} is {}'.format(k,v))
###Output
_____no_output_____
###Markdown
NumpyNumpy is a package for python which provides various tools to make math and numerical computation much easier. One of the key components is the numpy array, which enables matrices.The content in this section is adapted from the Python Data Science Handbook, which is [freely available online](https://github.com/jakevdp/PythonDataScienceHandbook) numpy arraysnumpy arrays provide the ability to create matrices, which are essentially 2-dimensional lists. (They can also be used to create even higher-dimensional arrays: tensors, etc)Unlike python lists, numpy arrays must all have the same data type (e.g. numeric, string). Arrays can be created from python lists:
###Code
print('a vector can be created from a list {}'.format(np.array([1, 4, 2, 5, 3])))
print('a matrix can be created from a list of lists:\n {}'.format(np.array([[1,1,1],[2,2,2],[3,3,3]]))) #\n is the newline character and makes the following text appear on the next line
###Output
_____no_output_____
###Markdown
There are also a bunch of built-in functions for generating arrays.
###Code
# Create a length-10 integer array filled with zeros
np.zeros(10, dtype=int)
# Create a 3x5 floating-point array filled with ones
np.ones((3, 5), dtype=float)
# Create a 3x5 array filled with 3.14
np.full((3, 5), 3.14)
# Create an array filled with a linear sequence
# Starting at 0, ending at 20, stepping by 2
# (this is similar to the built-in range() function)
np.arange(0, 20, 2)
# Create an array of five values evenly spaced between 0 and 1
np.linspace(0, 1, 5)
# Create a 3x3 array of uniformly distributed
# random values between 0 and 1
np.random.random((3, 3))
# Create a 3x3 array of normally distributed random values
# with mean 0 and standard deviation 1
np.random.normal(0, 1, (3, 3))
# Create a 3x3 array of random integers in the interval [0, 10)
np.random.randint(0, 10, (3, 3))
# Create a 3x3 identity matrix
np.eye(3)
# Create an uninitialized array of three integers
# The values will be whatever happens to already exist at that memory location
np.empty(3)
###Output
_____no_output_____
###Markdown
Array attributes
###Code
x1 = np.random.randint(10, size=6) # One-dimensional array
x2 = np.random.randint(10, size=(3, 4)) # Two-dimensional array
x3 = np.random.randint(10, size=(3, 4, 5)) # Three-dimensional array
print('x1=',x1)
print('x2=',x2)
print('x3=',x3)
print("x3 ndim: ", x3.ndim)
print("x3 shape:", x3.shape)
print("x3 size: ", x3.size)
###Output
_____no_output_____
###Markdown
Array IndexingYou can index arrays much in the same way that you can index python listsOne-dimensional arrays function just like lists
###Code
print('x1 is', x1)
print('first entry is', x1[0]) # first entry of the array
print('second and third entries are', x1[[1,2]])
print('first three entries are', x1[:3]) # slice the array
print('a second set of colons in the slice allow you to set the interval:', x1[::2]) # every other element of x1
print('you can reverse the order using a negative interval:', x1[3::-1]) # count backwards from entry at index 3 to the beginning
###Output
_____no_output_____
###Markdown
Multi-dimensional arrays are indexed using a tuple of indices. The indices for a 2d array are ordered as `(row_idx, col_idx)`
###Code
print(x2)
print('the element in the second row, third column is: ', x2[(1,2)])
###Output
_____no_output_____
###Markdown
You can slice multidimensional arrays as well!If you change the value of an entry in a slice, you change the value in the original object. This is what's known as a "view" of an array. Slices do not return an independent object, but instead can be thought of as just a reference to a subset of elements in the original object.However, if you do not want this behavior, you can avoid it by making a copy using the `.copy()` function.
###Code
print('x2 is originally: \n', x2)
slice_of_x2 = x2[1:,2:] # slices the 2nd row to the end, and 3rd column to the end
copied_slice_of_x2 = x2[1:,2:].copy() # note that slices provide a direct view of the original object, if you want an independent copy, use the .copy()
print('slice_of_x2 is:\n',slice_of_x2)
print('copied_slice_of_x2 is: \n', copied_slice_of_x2)
# let's change the value of slice_of_x2
slice_of_x2[0,0] = 99 # we changed the value of top left element to 99; this corresponds to the element in the 2nd row, 3rd column of x2
print('now x2 is: \n', x2)
print('slice_of_x2 is:\n',slice_of_x2)
print('and copied_slice_of_x2 is:\n',copied_slice_of_x2)
# If you change the value of a copy, it does not affect the original object
copied_slice_of_x2[0,0] = -50
print('copied_slice_of_x2 is: \n', copied_slice_of_x2)
print('x2 is unchanged by this operation:\n', x2)
###Output
_____no_output_____
###Markdown
ReshapingYou can reshape an array using the `.reshape()` function
###Code
print('np.arange(12):',np.arange(12))
reshaped = np.arange(12).reshape(3,4)
print('reshaped into a 3x4 array: \n',reshaped)
###Output
_____no_output_____
###Markdown
You can combine and split arrays in numpy. However, we won't be going too much in depth with that. Check out [this tutorial](https://github.com/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/02.02-The-Basics-Of-NumPy-Arrays.ipynb) for more info in that realm. Boolean arrays and maskingBoolean data represents True/False values, which can also be expressed as 1 or 0 respectively.You can compute boolean operations on arrays in numpy
###Code
even_entries_bool = np.mod(reshaped,2)==0
print(even_entries_bool)
###Output
_____no_output_____
###Markdown
You can then use those arrays to select the entries which match that criteria
###Code
reshaped[even_entries_bool]
###Output
_____no_output_____
###Markdown
ExerciseUsing the `is_prime()` function we previously wrote, write a function which takes a numpy array and returns a boolean array of the prime entries with the same shape as the input array
###Code
def is_prime_array(input_array):
"""
returns a boolean array of the same shape as input_array
with True if the element in the same position of input_array is prime
and False otherwise
example:
x = np.array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
is_prime_array(x)
should return
[[False, False, True, True],
[False, True, False, True],
[False, False, False, True]]
"""
# fill in with your code
return output_array
###Output
_____no_output_____
###Markdown
Random functionsYou'll often need random numbers in programming. For example: taking a random sample of data, simulating a coin flip/dice roll, and generating simulated data.Numpy has a bunch of built-in random functions for this purpose. These functions are accessible in the `np.random` submodule
###Code
np.random.rand(2,3,4) # generates uniform random numbers between 0,1
# arguments of rand(a,b,c,d,...) determine the dimensions of the array
# in this case, we created a 2x3x4 3d array
np.random.rand(10) # we can use this just to get a list of 10 random numbers from [0,1)
np.random.randn(5) # randn is the standard normal distribution (gaussian)
# by default, it has mean=0 and variance=1
# you can scale the gaussian to have different mean and variance
# for example, to have mean=3 and variance=2
def scaled_randn(mean, var, n_samples):
return mean + np.random.randn(n_samples)*var
scaled_randn(3,2,100)
# randint gives random integers. Arguments are (low, high, size)
# randint operates on the interval [low,high) (high is not included)
np.random.randint(1,10,20)
# if you want it to be inclusive, then you should call randint(low,high+1,size)
np.random.randint(1,11,20)
#randint is useful to generate a random sample with replacement of a collection
letters = np.array(['a','b','c','d','e','f'])
rand_idx = np.random.randint(0,len(letters),10) #len(letters) is the length of letters
letters[rand_idx]
# if you don't want sampling with replacement, you can use permutation
np.random.permutation(letters)
# another way to do sampling is with the choice(x[, size, replace, p]) function
print(np.random.choice(letters)) # x is the only required argument, will just return one random entry
print(np.random.choice(letters, 10)) # size lets you specify how many to sample
print(np.random.choice(letters, [2,3])) # can be multi dimensional
print(np.random.choice(letters, [2,3], False)) # whether to sample with replacement (default True)
# by default, choice() uses a uniform random probability (i.e. fair dice)
# sometimes you want to weight certain outcomes to be more likely
# p allows you to do that by specifying the probabilities of each outcome
# let's make 'a' be much more likely than the others
print(np.random.choice(letters, 100, True, [0.75,0.05,0.05,0.05,0.05,0.05]))
###Output
_____no_output_____
###Markdown
Intro to (/review of) python In the next few lessons, we'll review basic python and programming concepts, as well as go over the fundamentals of how to use some common python packages, such as:- numpy- pandas- matplotlib Importing packagesPackages are collections of pre-written code made available for reuse. In the previous lesson, we installed some necessary packages using the `pip` python package manager. Packages are convenient because they save you from having to implement every feature and function on your own. The widely-used packages also provide a standard, common set of tools for others to develop with --- allowing interoperability between programs.There are a few different ways to import package in python:The simplest is just to `import {packagename}`. For this lesson, we'll use `numpy` as the example package```import numpy```The functions, classes, and variables of the `numpy` package can then be accessed using "dot" notation: for example, the numpy array class can be accessed with `numpy.array`---A variant of this is to use `import {packagename} as {shortname}`, as in:```import numpy as np```This reduces the number of characters needed to type, and can be convenient if the package name is long, or you need to use many things from the same package. Accessing the numpy array class, for example, can be done with ```np.array```---If you only need a subset of items from a package, for example, a single class, function, or a submodule (subpackage of the main package), you can use the syntax ``` from {packagename} import {element}```, as in:```from numpy import array```This allows you to use the `array` class directly, without importing the rest of the numpy package, and without needing to use the package prefix dot notation.For example, if you use this import method, then writing```test_array = array([0])```would be equivalent to writing```test_array = numpy.array([0])test_array = np.array([0])```using the previous import styles, respectively.
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
DocumentationPackages contain functions, classes, and variables which may be helpful. Crucial to the usability of a package is the documentation (or API reference), which (should) list all of the contents of the package, and how to use them.To get the built-in help about a function or class, use the `help()` commandDocumentation for most common packages are also usually available online. For example, the documentation for [numpy can be found here](https://docs.scipy.org/doc/numpy/reference/)
###Code
help(print)
###Output
Help on built-in function print in module builtins:
print(...)
print(value, ..., sep=' ', end='\n', file=sys.stdout, flush=False)
Prints the values to a stream, or to sys.stdout by default.
Optional keyword arguments:
file: a file-like object (stream); defaults to the current sys.stdout.
sep: string inserted between values, default a space.
end: string appended after the last value, default a newline.
flush: whether to forcibly flush the stream.
###Markdown
Commenting codeIn order for your code to be readable to others (or your future self), you should provide comments on your code to explain what you are doing. The comment character in python is ``, and any text following a `` symbol will not be interpreted as code by python.
###Code
array_of_zeros = np.zeros([3,3]) # this creates a 3x3 array full of zeros
print(array_of_zeros) # the print() function displays the value of the variable on screen
###Output
_____no_output_____
###Markdown
Code flow LoopsA key part of programming is automating repetitive tasks, such as applying the same operation to a list of inputs. This is achieved using "loops"; most commonly, the `for` loop.In its simplest form, a python loop iterates over a list, and runs the code within the loop with the variable set equal to the respective element of the list.
###Code
idx_list = [0,1,2,3,4,5]
for idx in idx_list: # loop over idx_list, set idx equal to each element sequentially
print('idx is equal to {}'.format(idx)) # print the current value of idx
print(f'this is another way to print {idx}')
###Output
idx is equal to 0
this is another way to print 0
idx is equal to 1
this is another way to print 1
idx is equal to 2
this is another way to print 2
idx is equal to 3
this is another way to print 3
idx is equal to 4
this is another way to print 4
idx is equal to 5
this is another way to print 5
###Markdown
Lists are not the only kinds of objects that can be iterated over (also known as an iterable). A special kind of object, called a generator, does not explicitly store every single value in memory, but instead stores the current value, and the rule to generate the next value. This can often be faster than explicitly storing every element.As an analogy, if you wanted to send to your friend the following sequence of numbers: [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 45, 47, 49, 51, 53, 55, 57, 59], you could write each number down and send them the entire list. Or you could write "the sequence of numbers starting at 1, increasing by 2, but less than 60"If the sequence is very long, then the second representation becomes preferable to write, because you don't need to explicitly write out every single element. One common generator that is used in python is `range(start, end, increment)`, which creates a generator that produces the sequence of numbers starting at `start`, increments by `increment`, and is less than (__but not equal to__) `end`. If `increment` is not set, it defaults to 1.This is often used in conjunction with iteration:
###Code
for idx in range(0,6): #equivalent to the above
print('idx is equal to {}'.format(idx)) # print the current value of idx
for idx in range(6): # if you only give one argument, it automatically starts from 0
print('idx is currently {}'.format(idx)) # print the current value of idx
###Output
_____no_output_____
###Markdown
ConditionalsSometimes you want to execute code only if certain conditions are met. The `if`, `elif` (short for else-if), and `else` keywords are used for this purpose
###Code
for idx in range(1,6):
if idx > 2 : # only execute the following indented block of code if idx is greater than 2
print('idx={}, which is greater than 2'.format(idx))
elif idx == 2: # only execute if the above condition isn't met, and also idx==2
# note that == is used to check for equality; single = is the assignment operator
print('idx={} is equal to 2'.format(idx))
else: # execute this code if none of the above conditions are met
print('idx={} is less than 2'.format(idx))
###Output
_____no_output_____
###Markdown
You can combine different conditions using the keywords `and` and `or`, and negate conditions using `not`
###Code
x = 5
y = 10
print(x==5 or y==11) # true because first statement is true
print(x==5 and y==11) # false because not both are true
print(x==5 and not y==11) # true because second condition is negated (flipped)
###Output
_____no_output_____
###Markdown
List comprehensionYou can generate a list from any iterable in a couple ways. This is called list comprehension.The simplest way is just to call `list()` on the generator object
###Code
list(range(6))
###Output
_____no_output_____
###Markdown
Another way is using the following syntax:```[x for x in iterable]```for example:
###Code
list_of_numbers = [x for x in range(6)]
print(list_of_numbers)
###Output
[0, 1, 2, 3, 4, 5]
###Markdown
However, the list comprehension syntax is actually more powerful than that: it allows for functions to be called within the expression```[expression(x) for x in iterable]```
###Code
list_of_first_five_squares = [x**2 for x in range(6)] # the double star ** expression denotes exponentiation
# hence, the above gives the first 5 square numbers, including zero
print(list_of_first_five_squares)
###Output
_____no_output_____
###Markdown
In fact, the list comprehension syntax is even more powerful: it can also include conditional statements```[expression(x) for x in iterable if condition]```
###Code
list_of_first_few_odd_squares = [x**2 for x in range(10) if np.mod(x,2) == 1]
print(list_of_first_few_odd_squares)
###Output
_____no_output_____
###Markdown
Now try to print a list of the first few even cubes using list comprehension. FunctionsFunctions are a way to repeat the same lines of code, potentially with different inputs. If you find yourself writing a lot of repetitive code that shares the same structure, you may want to try and formulate it as a function. Functions are declared using the `def` keyword. In this example, we will write a function that checks if a number is prime.
###Code
def is_prime(number):
sqrt_num = int(np.sqrt(number)) # we only need to check integer factors up to the square root of the number, rounded down (int() always rounds down)
for potential_factor in range(2,sqrt_num+1): #range(a,b) iterates from the value a to b-1
if np.mod(number, potential_factor) == 0: #np.mod() is the modulo (aka remainder) function; thus, if the remainder is zero, then it divides evenly
return False # if it divides evenly, then it's not prime, then we can return and end the function
return True #if we get through all of the potential factors and haven't found a factor, then it's prime
is_prime(101.0)
###Output
_____no_output_____
###Markdown
Data structures ListsWe already looked at one python data structure: the list. Lists are _ordered_ collections of values, denoted with square brackets. Lists are _ordered_ in the sense that the order of their elements matter. The list [1,2,3,4] is not the same as [4,3,2,1]
###Code
a_list = [2,0,15,5] # square brackets denote a list
another_list = [15,0,5,2]
print('a_list = {}; another_list = {}'.format(a_list, another_list)) # the .format() function of strings allows you to plug in the variable values in the respective curly braces {}
print('is a_list equal to another_list?')
print(a_list == another_list) # print out the truth value of whether a_list is the same as another_list (it shouldn't be, because they have different ordering)
yet_another_list = [2,0,15,5]
print('but it is equal to yet_another_list:')
print(a_list == yet_another_list)
###Output
a_list = [2, 0, 15, 5]; another_list = [15, 0, 5, 2]
is a_list equal to another_list?
False
but it is equal to yet_another_list:
True
###Markdown
List elements can be any python object, including strings, numbers, and other lists
###Code
diverse_list = ['a', False, [0,0,0], 1.0, 10]
print('the elements of diverse_list are: {}'.format(diverse_list))
print('the data types of the elements are {}'.format([type(x) for x in diverse_list])) # using list comprehension to get the type of each element
###Output
the elements of diverse_list are: ['a', False, [0, 0, 0], 1.0, 10]
the data types of the elements are [<class 'str'>, <class 'bool'>, <class 'list'>, <class 'float'>, <class 'int'>]
###Markdown
You can access a specific element of a list using the square bracket notation (this is known as indexing)```list_name[idx]```Index values can be negative, which start counting from the end. So `list_name[-1]` gives the __last__ element of the list
###Code
first_element_of_diverse_list = diverse_list[0] # python starts counting at 0, so the first element is at index 0
print(first_element_of_diverse_list)
last_element_of_diverse_list = diverse_list[-1]
print(last_element_of_diverse_list)
###Output
a
10
###Markdown
You can "slice" a list using the colon `:` notation```list_name[start_idx:end_idx]```Note that the slice starts at the start_idx, but __does not include__ the element at end_idx.If you omit either start_idx or end_idx, it automatically starts at the first element/ends at the last element respectively
###Code
print(diverse_list[0:2]) # gets the elements at index 0 and 1
print(diverse_list[:2]) # equivalent to the above
print(diverse_list[2:]) # gets all elements from index 2 to the end
print(diverse_list[:]) # gets all elements
###Output
_____no_output_____
###Markdown
Lists are modifiable: you can append and delete entries, as well as change the values of elements
###Code
diverse_list.append('new entry') # add a value to the end
print('appended an entry to diverse_list: {}'.format(diverse_list))
diverse_list[0] = 'changed entry' # change the value of entry at index 0
print('changed an entry of diverse_list: {}'.format(diverse_list))
first_entry = diverse_list.pop(0) # remove (and return) the value at element 0
print('removed "{}" from diverse_list: {}'.format(first_entry, diverse_list))
diverse_list.remove('new entry') # you can also remove the first entry with a specific value, in this case, the "new entry"
print('removed "new entry" from diverse_list: {}'.format(diverse_list))
diverse_list.insert(0,'a') # insert the value 'a' at index 0
print('inserted "a" back into diverse_list: {}'.format(diverse_list))
###Output
appended an entry to diverse_list: ['a', False, [0, 0, 0], 1.0, 10, 'new entry']
changed an entry of diverse_list: ['changed entry', False, [0, 0, 0], 1.0, 10, 'new entry']
removed "changed entry" from diverse_list: [False, [0, 0, 0], 1.0, 10, 'new entry']
removed "new entry" from diverse_list: [False, [0, 0, 0], 1.0, 10]
inserted "a" back into diverse_list: ['a', False, [0, 0, 0], 1.0, 10]
###Markdown
TuplesTuples are unchangeable, ordered sequences of elements, grouped with regular parentheses:```('a','b','c')```
###Code
a_tuple = ('a','b','c')
print('the first element of a_tuple is "{}"'.format(a_tuple[0])) # tuples can be indexed like lists
a_tuple[0] = 10 # however, unlike lists, you cannot change their values once they are set
###Output
_____no_output_____
###Markdown
DictionariesDictionaries are data structures that store _mappings_ from "keys" to respective "values". You can think of them as lookup tables which return a specific value for a given key. For example, an english dictionary (the book) could be stored as a python dictionary, where the "keys" are each of the words in english, and the "values" are the respective definitions.They are defined using the curly braces, or the `dict()` function:```dictionary = {key: value, key2: value2}dictionary = dict([(key, value),(key2, value2)])```Keys can be a variety of data types, including numeric, strings, and tuples. However, they cannot be changeable objects, such as lists, or other dictionaries. Values, on the other hand, can be any data type.Accessing the dictionary values are done using square brackets using the syntax:```dictionary[key] returns the value associated with key```
###Code
pokemon_types = {'bulbasaur':'grass', 'charmander':'fire', 'squirtle':'water'}
pokemon_types
print(pokemon_types['bulbasaur'])
###Output
_____no_output_____
###Markdown
You can add or change an element to a dictionary using the following syntax:```dictionary[key] = value```
###Code
pokemon_types['bulbasaur'] = 'grass/poison' #bulbasaur is actually dual typed, so we'll change its entry
pokemon_types['ivysaur'] = 'grass/poison' #let's add an evolution
pokemon_types
###Output
_____no_output_____
###Markdown
You can get a list of all of the keys to a dictionary using the `.keys()` function, similarly with the `.values()` function.
###Code
print(pokemon_types.keys())
print(pokemon_types.values())
###Output
_____no_output_____
###Markdown
You can use the function `.items()` to get a list of `(key, value)` tuples. This is often useful for looping
###Code
for k, v in pokemon_types.items():
print('the type of {} is {}'.format(k,v))
###Output
_____no_output_____
###Markdown
NumpyNumpy is a package for python which provides various tools to make math and numerical computation much easier. One of the key components is the numpy array, which enables matrices.The content in this section is adapted from the Python Data Science Handbook, which is [freely available online](https://github.com/jakevdp/PythonDataScienceHandbook) numpy arraysnumpy arrays provide the ability to create matrices, which are essentially 2-dimensional lists. (They can also be used to create even higher-dimensional arrays: tensors, etc)Unlike python lists, numpy arrays must all have the same data type (e.g. numeric, string). Arrays can be created from python lists:
###Code
print('a vector can be created from a list {}'.format(np.array([1, 4, 2, 5, 3])))
print('a matrix can be created from a list of lists:\n {}'.format(np.array([[1,1,1],[2,2,2],[3,3,3]]))) #\n is the newline character and makes the following text appear on the next line
###Output
_____no_output_____
###Markdown
There are also a bunch of built-in functions for generating arrays.
###Code
# Create a length-10 integer array filled with zeros
np.zeros(10, dtype=int)
# Create a 3x5 floating-point array filled with ones
np.ones((3, 5), dtype=float)
# Create a 3x5 array filled with 3.14
np.full((3, 5), 3.14)
# Create an array filled with a linear sequence
# Starting at 0, ending at 20, stepping by 2
# (this is similar to the built-in range() function)
np.arange(0, 20, 2)
# Create an array of five values evenly spaced between 0 and 1
np.linspace(0, 1, 5)
# Create a 3x3 array of uniformly distributed
# random values between 0 and 1
np.random.random((3, 3))
# Create a 3x3 array of normally distributed random values
# with mean 0 and standard deviation 1
np.random.normal(0, 1, (3, 3))
# Create a 3x3 array of random integers in the interval [0, 10)
np.random.randint(0, 10, (3, 3))
# Create a 3x3 identity matrix
np.eye(3)
# Create an uninitialized array of three integers
# The values will be whatever happens to already exist at that memory location
np.empty(3)
###Output
_____no_output_____
###Markdown
Array attributes
###Code
x1 = np.random.randint(10, size=6) # One-dimensional array
x2 = np.random.randint(10, size=(3, 4)) # Two-dimensional array
x3 = np.random.randint(10, size=(3, 4, 5)) # Three-dimensional array
print('x1=',x1)
print('x2=',x2)
print('x3=',x3)
print("x3 ndim: ", x3.ndim)
print("x3 shape:", x3.shape)
print("x3 size: ", x3.size)
###Output
x3 ndim: 3
x3 shape: (3, 4, 5)
x3 size: 60
###Markdown
Array IndexingYou can index arrays much in the same way that you can index python listsOne-dimensional arrays function just like lists
###Code
print('x1 is', x1)
print('first entry is', x1[0]) # first entry of the array
print('second and third entries are', x1[[1,2]])
print('first three entries are', x1[:3]) # slice the array
print('a second set of colons in the slice allow you to set the interval:', x1[::2]) # every other element of x1
print('you can reverse the order using a negative interval:', x1[3::-1]) # count backwards from entry at index 3 to the beginning
###Output
x1 is [4 7 2 1 2 5]
first entry is 4
second and third entries are [7 2]
first three entries are [4 7 2]
a second set of colons in the slice allow you to set the interval: [4 2 2]
you can reverse the order using a negative interval: [1 2 7 4]
###Markdown
Multi-dimensional arrays are indexed using a tuple of indices. The indices for a 2d array are ordered as `(row_idx, col_idx)`
###Code
print(x2)
print('the element in the second row, third column is: ', x2[(1,2)])
###Output
[[3 7 4 6]
[7 5 5 9]
[1 3 5 1]]
the element in the second row, third column is: 5
###Markdown
You can slice multidimensional arrays as well!If you change the value of an entry in a slice, you change the value in the original object. This is what's known as a "view" of an array. Slices do not return an independent object, but instead can be thought of as just a reference to a subset of elements in the original object.However, if you do not want this behavior, you can avoid it by making a copy using the `.copy()` function.
###Code
print('x2 is originally: \n', x2)
slice_of_x2 = x2[1:,2:] # slices the 2nd row to the end, and 3rd column to the end
copied_slice_of_x2 = x2[1:,2:].copy() # note that slices provide a direct view of the original object, if you want an independent copy, use the .copy()
print('slice_of_x2 is:\n',slice_of_x2)
print('copied_slice_of_x2 is: \n', copied_slice_of_x2)
# let's change the value of slice_of_x2
slice_of_x2[0,0] = 99 # we changed the value of top left element to 99; this corresponds to the element in the 2nd row, 3rd column of x2
print('now x2 is: \n', x2)
print('slice_of_x2 is:\n',slice_of_x2)
print('and copied_slice_of_x2 is:\n',copied_slice_of_x2)
# If you change the value of a copy, it does not affect the original object
copied_slice_of_x2[0,0] = -50
print('copied_slice_of_x2 is: \n', copied_slice_of_x2)
print('x2 is unchanged by this operation:\n', x2)
###Output
copied_slice_of_x2 is:
[[-50 9]
[ 5 1]]
x2 is unchanged by this operation:
[[ 3 7 4 6]
[ 7 5 99 9]
[ 1 3 5 1]]
###Markdown
ReshapingYou can reshape an array using the `.reshape()` function
###Code
print('np.arange(12):',np.arange(12))
reshaped = np.arange(12).reshape(3,4)
print('reshaped into a 3x4 array: \n',reshaped)
###Output
np.arange(12): [ 0 1 2 3 4 5 6 7 8 9 10 11]
reshaped into a 3x4 array:
[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
###Markdown
You can combine and split arrays in numpy. However, we won't be going too much in depth with that. Check out [this tutorial](https://github.com/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/02.02-The-Basics-Of-NumPy-Arrays.ipynb) for more info in that realm. Boolean arrays and maskingBoolean data represents True/False values, which can also be expressed as 1 or 0 respectively.You can compute boolean operations on arrays in numpy
###Code
even_entries_bool = np.mod(reshaped,2)==0
print(even_entries_bool)
###Output
[[ True False True False]
[ True False True False]
[ True False True False]]
###Markdown
You can then use those arrays to select the entries which match that criteria
###Code
reshaped[even_entries_bool]
###Output
_____no_output_____
###Markdown
ExerciseUsing the `is_prime()` function we previously wrote, write a function which takes a numpy array and returns a boolean array of the prime entries with the same shape as the input array
###Code
import numpy as np
def is_prime_array(input_array):
"""
returns a boolean array of the same shape as input_array
with True if the element in the same position of input_array is prime
and False otherwise
example:
x = np.array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
is_prime_array(x)
should return
[[False, False, True, True],
[False, True, False, True],
[False, False, False, True]]
"""
# fill in with your code
output_array = []
for i in input_array:
for num in i:
if num > 1:
for i in range(2, int(np.sqrt(num)) + 1):
if num % i == 0:
output_array.append(False)
break
else:
output_array.append(True)
if num == 0:
output_array.append(False)
if num == 1:
output_array.append(False)
return np.reshape(np.asarray(output_array), (3,4))
is_prime_array(np.array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]]))
###Output
_____no_output_____
###Markdown
Random functionsYou'll often need random numbers in programming. For example: taking a random sample of data, simulating a coin flip/dice roll, and generating simulated data.Numpy has a bunch of built-in random functions for this purpose. These functions are accessible in the `np.random` submodule
###Code
np.random.rand(2,3,4) # generates uniform random numbers between 0,1
# arguments of rand(a,b,c,d,...) determine the dimensions of the array
# in this case, we created a 2x3x4 3d array
np.random.rand(10) # we can use this just to get a list of 10 random numbers from [0,1)
np.random.randn(5) # randn is the standard normal distribution (gaussian)
# by default, it has mean=0 and variance=1
# you can scale the gaussian to have different mean and variance
# for example, to have mean=3 and variance=2
def scaled_randn(mean, var, n_samples):
return mean + np.random.randn(n_samples)*var
scaled_randn(3,2,100)
# randint gives random integers. Arguments are (low, high, size)
# randint operates on the interval [low,high) (high is not included)
np.random.randint(1,10,20)
# if you want it to be inclusive, then you should call randint(low,high+1,size)
np.random.randint(1,11,20)
#randint is useful to generate a random sample with replacement of a collection
letters = np.array(['a','b','c','d','e','f'])
rand_idx = np.random.randint(0,len(letters),10) #len(letters) is the length of letters
letters[rand_idx]
# if you don't want sampling with replacement, you can use permutation
np.random.permutation(letters)
# another way to do sampling is with the choice(x[, size, replace, p]) function
print(np.random.choice(letters)) # x is the only required argument, will just return one random entry
print(np.random.choice(letters, 10)) # size lets you specify how many to sample
print(np.random.choice(letters, [2,3])) # can be multi dimensional
print(np.random.choice(letters, [2,3], False)) # whether to sample with replacement (default True)
# by default, choice() uses a uniform random probability (i.e. fair dice)
# sometimes you want to weight certain outcomes to be more likely
# p allows you to do that by specifying the probabilities of each outcome
# let's make 'a' be much more likely than the others
print(np.random.choice(letters, 100, True, [0.75,0.05,0.05,0.05,0.05,0.05]))
###Output
['a' 'a' 'a' 'a' 'a' 'a' 'a' 'a' 'c' 'a' 'a' 'b' 'c' 'f' 'a' 'b' 'a' 'a'
'a' 'a' 'f' 'b' 'b' 'a' 'a' 'a' 'a' 'a' 'a' 'a' 'a' 'e' 'a' 'e' 'a' 'a'
'b' 'a' 'a' 'a' 'a' 'a' 'b' 'a' 'c' 'a' 'd' 'a' 'a' 'a' 'a' 'a' 'a' 'a'
'a' 'd' 'a' 'b' 'a' 'a' 'a' 'e' 'a' 'a' 'd' 'd' 'a' 'a' 'a' 'c' 'a' 'c'
'a' 'a' 'c' 'a' 'a' 'a' 'a' 'a' 'a' 'a' 'c' 'a' 'e' 'a' 'a' 'a' 'a' 'e'
'a' 'a' 'a' 'a' 'a' 'a' 'a' 'a' 'f' 'a']
###Markdown
Intro to (/review of) python In the next few lessons, we'll review basic python and programming concepts, as well as go over the fundamentals of how to use some common python packages, such as:- numpy- pandas- matplotlib Importing packagesPackages are collections of pre-written code made available for reuse. In the previous lesson, we installed some necessary packages using the `pip` python package manager. Packages are convenient because they save you from having to implement every feature and function on your own. The widely-used packages also provide a standard, common set of tools for others to develop with --- allowing interoperability between programs.There are a few different ways to import package in python:The simplest is just to `import {packagename}`. For this lesson, we'll use `numpy` as the example package```import numpy```The functions, classes, and variables of the `numpy` package can then be accessed using "dot" notation: for example, the numpy array class can be accessed with `numpy.array`---A variant of this is to use `import {packagename} as {shortname}`, as in:```import numpy as np```This reduces the number of characters needed to type, and can be convenient if the package name is long, or you need to use many things from the same package. Accessing the numpy array class, for example, can be done with ```np.array```---If you only need a subset of items from a package, for example, a single class, function, or a submodule (subpackage of the main package), you can use the syntax ``` from {packagename} import {element}```, as in:```from numpy import array```This allows you to use the `array` class directly, without importing the rest of the numpy package, and without needing to use the package prefix dot notation.For example, if you use this import method, then writing```test_array = array([0])```would be equivalent to writing```test_array = numpy.array([0])test_array = np.array([0])```using the previous import styles, respectively.
###Code
import numpy as np
a = np.array([0,1])
print(a)
###Output
[0 1]
###Markdown
DocumentationPackages contain functions, classes, and variables which may be helpful. Crucial to the usability of a package is the documentation (or API reference), which (should) list all of the contents of the package, and how to use them.To get the built-in help about a function or class, use the `help()` commandDocumentation for most common packages are also usually available online. For example, the documentation for [numpy can be found here](https://docs.scipy.org/doc/numpy/reference/)
###Code
help(print)
###Output
_____no_output_____
###Markdown
Commenting codeIn order for your code to be readable to others (or your future self), you should provide comments on your code to explain what you are doing. The comment character in python is ``, and any text following a `` symbol will not be interpreted as code by python.
###Code
array_of_zeros = np.zeros([3,3]) # this creates a 3x3 array full of zeros
print(array_of_zeros) # the print() function displays the value of the variable on screen
###Output
_____no_output_____
###Markdown
Code flow LoopsA key part of programming is automating repetitive tasks, such as applying the same operation to a list of inputs. This is achieved using "loops"; most commonly, the `for` loop.In its simplest form, a python loop iterates over a list, and runs the code within the loop with the variable set equal to the respective element of the list.
###Code
idx_list = [0,1,2,3,4,5]
for idx in idx_list: # loop over idx_list, set idx equal to each element sequentially
print('idx is equal to {}'.format(idx)) # print the current value of idx
###Output
_____no_output_____
###Markdown
Lists are not the only kinds of objects that can be iterated over (also known as an iterable). A special kind of object, called a generator, does not explicitly store every single value in memory, but instead stores the current value, and the rule to generate the next value. This can often be faster than explicitly storing every element.As an analogy, if you wanted to send to your friend the following sequence of numbers: [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 45, 47, 49, 51, 53, 55, 57, 59], you could write each number down and send them the entire list. Or you could write "the sequence of numbers starting at 1, increasing by 2, but less than 60"If the sequence is very long, then the second representation becomes preferable to write, because you don't need to explicitly write out every single element. One common generator that is used in python is `range(start, end, increment)`, which creates a generator that produces the sequence of numbers starting at `start`, increments by `increment`, and is less than (__but not equal to__) `end`. If `increment` is not set, it defaults to 1.This is often used in conjunction with iteration:
###Code
for idx in range(0,6): #equivalent to the above
print('idx is equal to {}, two times that is {}'.format(idx, 2*idx)) # print the current value of idx
print(f'new way to format strings is {idx}, two times that is {2*idx}')
for idx in range(6): # if you only give one argument, it automatically starts from 0
print('idx is currently {}'.format(idx)) # print the current value of idx
np.arange(0, 5, 0.2)
###Output
_____no_output_____
###Markdown
ConditionalsSometimes you want to execute code only if certain conditions are met. The `if`, `elif` (short for else-if), and `else` keywords are used for this purpose
###Code
for idx in range(1,6):
if idx > 2 : # only execute the following indented block of code if idx is greater than 2
print('idx={}, which is greater than 2'.format(idx))
elif idx == 2: # only execute if the above condition isn't met, and also idx==2
# note that == is used to check for equality; single = is the assignment operator
print('idx={} is equal to 2'.format(idx))
else: # execute this code if none of the above conditions are met
print('idx={} is less than 2'.format(idx))
###Output
idx=1 is less than 2
idx=2 is equal to 2
idx=3, which is greater than 2
idx=4, which is greater than 2
idx=5, which is greater than 2
###Markdown
You can combine different conditions using the keywords `and` and `or`, and negate conditions using `not`
###Code
x = 5
y = 10
print(x==5 or y==11) # true because first statement is true
print(x==5 and y==11) # false because not both are true
print(x==5 and not y==11) # true because second condition is negated (flipped)
print(True^True)
print(True&True)
print(True|True)
print(True^~False)
###Output
False
True
True
-2
###Markdown
List comprehensionYou can generate a list from any iterable in a couple ways. This is called list comprehension.The simplest way is just to call `list()` on the generator object
###Code
list(range(6))
###Output
_____no_output_____
###Markdown
Another way is using the following syntax:```[x for x in iterable]```for example:
###Code
list_of_numbers = [x for x in range(6)]
print(list_of_numbers)
###Output
_____no_output_____
###Markdown
However, the list comprehension syntax is actually more powerful than that: it allows for functions to be called within the expression```[expression(x) for x in iterable]```
###Code
list_of_first_five_squares = [x**2 for x in range(6)] # the double star ** expression denotes exponentiation
# hence, the above gives the first 5 square numbers, including zero
print(list_of_first_five_squares)
###Output
_____no_output_____
###Markdown
In fact, the list comprehension syntax is even more powerful: it can also include conditional statements```[expression(x) for x in iterable if condition]```
###Code
list_of_first_few_odd_squares = [x**2 for x in range(10) if np.mod(x,2) == 1]
print(list_of_first_few_odd_squares)
###Output
_____no_output_____
###Markdown
Now try to print a list of the first few even cubes using list comprehension. FunctionsFunctions are a way to repeat the same lines of code, potentially with different inputs. If you find yourself writing a lot of repetitive code that shares the same structure, you may want to try and formulate it as a function. Functions are declared using the `def` keyword. In this example, we will write a function that checks if a number is prime.
###Code
def is_prime(number):
sqrt_num = int(np.sqrt(number)) # we only need to check integer factors up to the square root of the number, rounded down (int() always rounds down)
for potential_factor in range(2,sqrt_num+1): #range(a,b) iterates from the value a to b-1
if np.mod(number, potential_factor) == 0: #np.mod() is the modulo (aka remainder) function; thus, if the remainder is zero, then it divides evenly
return False # if it divides evenly, then it's not prime, then we can return and end the function
return True #if we get through all of the potential factors and haven't found a factor, then it's prime
is_prime(101.0)
###Output
_____no_output_____
###Markdown
Data structures ListsWe already looked at one python data structure: the list. Lists are _ordered_ collections of values, denoted with square brackets. Lists are _ordered_ in the sense that the order of their elements matter. The list [1,2,3,4] is not the same as [4,3,2,1]
###Code
a_list = [2,0,15,5] # square brackets denote a list
another_list = [15,0,5,2]
print('a_list = {}; another_list = {}'.format(a_list, another_list)) # the .format() function of strings allows you to plug in the variable values in the respective curly braces {}
print('is a_list equal to another_list?')
print(a_list == another_list) # print out the truth value of whether a_list is the same as another_list (it shouldn't be, because they have different ordering)
yet_another_list = [2,0,15,5]
print('but it is equal to yet_another_list:')
print(a_list == yet_another_list)
###Output
_____no_output_____
###Markdown
List elements can be any python object, including strings, numbers, and other lists
###Code
diverse_list = ['a', False, [0,0,0], 1.0, 10]
print('the elements of diverse_list are: {}'.format(diverse_list))
print('the data types of the elements are {}'.format([type(x) for x in diverse_list])) # using list comprehension to get the type of each element
###Output
_____no_output_____
###Markdown
You can access a specific element of a list using the square bracket notation (this is known as indexing)```list_name[idx]```Index values can be negative, which start counting from the end. So `list_name[-1]` gives the __last__ element of the list
###Code
first_element_of_diverse_list = diverse_list[0] # python starts counting at 0, so the first element is at index 0
print(first_element_of_diverse_list)
last_element_of_diverse_list = diverse_list[-1]
print(last_element_of_diverse_list)
###Output
_____no_output_____
###Markdown
You can "slice" a list using the colon `:` notation```list_name[start_idx:end_idx]```Note that the slice starts at the start_idx, but __does not include__ the element at end_idx.If you omit either start_idx or end_idx, it automatically starts at the first element/ends at the last element respectively
###Code
print(diverse_list[0:2]) # gets the elements at index 0 and 1
print(diverse_list[:2]) # equivalent to the above
print(diverse_list[2:]) # gets all elements from index 2 to the end
print(diverse_list[:]) # gets all elements
###Output
_____no_output_____
###Markdown
Lists are modifiable: you can append and delete entries, as well as change the values of elements
###Code
diverse_list.append('new entry') # add a value to the end
print('appended an entry to diverse_list: {}'.format(diverse_list))
diverse_list[0] = 'changed entry' # change the value of entry at index 0
print('changed an entry of diverse_list: {}'.format(diverse_list))
first_entry = diverse_list.pop(0) # remove (and return) the value at element 0
print('removed "{}" from diverse_list: {}'.format(first_entry, diverse_list))
diverse_list.remove('new entry') # you can also remove the first entry with a specific value, in this case, the "new entry"
print('removed "new entry" from diverse_list: {}'.format(diverse_list))
diverse_list.insert(0,'a') # insert the value 'a' at index 0
print('inserted "a" back into diverse_list: {}'.format(diverse_list))
###Output
_____no_output_____
###Markdown
TuplesTuples are unchangeable, ordered sequences of elements, grouped with regular parentheses:```('a','b','c')```
###Code
a_tuple = ('a','b','c')
print('the first element of a_tuple is "{}"'.format(a_tuple[0])) # tuples can be indexed like lists
a_tuple[0] = 10 # however, unlike lists, you cannot change their values once they are set
###Output
_____no_output_____
###Markdown
DictionariesDictionaries are data structures that store _mappings_ from "keys" to respective "values". You can think of them as lookup tables which return a specific value for a given key. For example, an english dictionary (the book) could be stored as a python dictionary, where the "keys" are each of the words in english, and the "values" are the respective definitions.They are defined using the curly braces, or the `dict()` function:```dictionary = {key: value, key2: value2}dictionary = dict([(key, value),(key2, value2)])```Keys can be a variety of data types, including numeric, strings, and tuples. However, they cannot be changeable objects, such as lists, or other dictionaries. Values, on the other hand, can be any data type.Accessing the dictionary values are done using square brackets using the syntax:```dictionary[key] returns the value associated with key```
###Code
pokemon_types = {'bulbasaur':'grass', 'charmander':'fire', 'squirtle':'water'}
pokemon_types
print(pokemon_types['bulbasaur'])
###Output
_____no_output_____
###Markdown
You can add or change an element to a dictionary using the following syntax:```dictionary[key] = value```
###Code
pokemon_types['bulbasaur'] = 'grass/poison' #bulbasaur is actually dual typed, so we'll change its entry
pokemon_types['ivysaur'] = 'grass/poison' #let's add an evolution
pokemon_types
###Output
_____no_output_____
###Markdown
You can get a list of all of the keys to a dictionary using the `.keys()` function, similarly with the `.values()` function.
###Code
print(pokemon_types.keys())
print(pokemon_types.values())
###Output
_____no_output_____
###Markdown
You can use the function `.items()` to get a list of `(key, value)` tuples. This is often useful for looping
###Code
for k, v in pokemon_types.items():
print('the type of {} is {}'.format(k,v))
###Output
_____no_output_____
###Markdown
NumpyNumpy is a package for python which provides various tools to make math and numerical computation much easier. One of the key components is the numpy array, which enables matrices.The content in this section is adapted from the Python Data Science Handbook, which is [freely available online](https://github.com/jakevdp/PythonDataScienceHandbook) numpy arraysnumpy arrays provide the ability to create matrices, which are essentially 2-dimensional lists. (They can also be used to create even higher-dimensional arrays: tensors, etc)Unlike python lists, numpy arrays must all have the same data type (e.g. numeric, string). Arrays can be created from python lists:
###Code
print('a vector can be created from a list {}'.format(np.array([1, 4, 2, 5, 3])))
print('a matrix can be created from a list of lists:\n {}'.format(np.array([[1,1,1],[2,2,2],[3,3,3]]))) #\n is the newline character and makes the following text appear on the next line
###Output
_____no_output_____
###Markdown
There are also a bunch of built-in functions for generating arrays.
###Code
# Create a length-10 integer array filled with zeros
np.zeros(10, dtype=int)
# Create a 3x5 floating-point array filled with ones
np.ones((3, 5), dtype=float)
# Create a 3x5 array filled with 3.14
np.full((3, 5), 3.14)
# Create an array filled with a linear sequence
# Starting at 0, ending at 20, stepping by 2
# (this is similar to the built-in range() function)
np.arange(0, 20, 2)
# Create an array of five values evenly spaced between 0 and 1
np.linspace(0, 1, 5)
# Create a 3x3 array of uniformly distributed
# random values between 0 and 1
np.random.random((3, 3))
# Create a 3x3 array of normally distributed random values
# with mean 0 and standard deviation 1
np.random.normal(0, 1, (3, 3))
# Create a 3x3 array of random integers in the interval [0, 10)
np.random.randint(0, 10, (3, 3))
# Create a 3x3 identity matrix
np.eye(3)
# Create an uninitialized array of three integers
# The values will be whatever happens to already exist at that memory location
np.empty(3)
###Output
_____no_output_____
###Markdown
Array attributes
###Code
x1 = np.random.randint(10, size=6) # One-dimensional array
x2 = np.random.randint(10, size=(3, 4)) # Two-dimensional array
x3 = np.random.randint(10, size=(3, 4, 5)) # Three-dimensional array
print('x1=',x1)
print('x2=',x2)
print('x3=',x3)
print("x3 ndim: ", x3.ndim)
print("x3 shape:", x3.shape)
print("x3 size: ", x3.size)
###Output
_____no_output_____
###Markdown
Array IndexingYou can index arrays much in the same way that you can index python listsOne-dimensional arrays function just like lists
###Code
print('x1 is', x1)
print('first entry is', x1[0]) # first entry of the array
print('second and third entries are', x1[[1,2]])
print('first three entries are', x1[:3]) # slice the array
print('a second set of colons in the slice allow you to set the interval:', x1[::2]) # every other element of x1
print('you can reverse the order using a negative interval:', x1[3::-1]) # count backwards from entry at index 3 to the beginning
###Output
_____no_output_____
###Markdown
Multi-dimensional arrays are indexed using a tuple of indices. The indices for a 2d array are ordered as `(row_idx, col_idx)`
###Code
print(x2)
print('the element in the second row, third column is: ', x2[(1,2)])
###Output
_____no_output_____
###Markdown
You can slice multidimensional arrays as well!If you change the value of an entry in a slice, you change the value in the original object. This is what's known as a "view" of an array. Slices do not return an independent object, but instead can be thought of as just a reference to a subset of elements in the original object.However, if you do not want this behavior, you can avoid it by making a copy using the `.copy()` function.
###Code
print('x2 is originally: \n', x2)
slice_of_x2 = x2[1:,2:] # slices the 2nd row to the end, and 3rd column to the end
copied_slice_of_x2 = x2[1:,2:].copy() # note that slices provide a direct view of the original object, if you want an independent copy, use the .copy()
print('slice_of_x2 is:\n',slice_of_x2)
print('copied_slice_of_x2 is: \n', copied_slice_of_x2)
# let's change the value of slice_of_x2
slice_of_x2[0,0] = 99 # we changed the value of top left element to 99; this corresponds to the element in the 2nd row, 3rd column of x2
print('now x2 is: \n', x2)
print('slice_of_x2 is:\n',slice_of_x2)
print('and copied_slice_of_x2 is:\n',copied_slice_of_x2)
# If you change the value of a copy, it does not affect the original object
copied_slice_of_x2[0,0] = -50
print('copied_slice_of_x2 is: \n', copied_slice_of_x2)
print('x2 is unchanged by this operation:\n', x2)
###Output
_____no_output_____
###Markdown
ReshapingYou can reshape an array using the `.reshape()` function
###Code
print('np.arange(12):',np.arange(12))
reshaped = np.arange(12).reshape(3,4)
print('reshaped into a 3x4 array: \n',reshaped)
###Output
_____no_output_____
###Markdown
You can combine and split arrays in numpy. However, we won't be going too much in depth with that. Check out [this tutorial](https://github.com/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/02.02-The-Basics-Of-NumPy-Arrays.ipynb) for more info in that realm. Boolean arrays and maskingBoolean data represents True/False values, which can also be expressed as 1 or 0 respectively.You can compute boolean operations on arrays in numpy
###Code
even_entries_bool = np.mod(reshaped,2)==0
print(even_entries_bool)
###Output
_____no_output_____
###Markdown
You can then use those arrays to select the entries which match that criteria
###Code
reshaped[even_entries_bool]
###Output
_____no_output_____
###Markdown
ExerciseUsing the `is_prime()` function we previously wrote, write a function which takes a numpy array and returns a boolean array of the prime entries with the same shape as the input array
###Code
def is_prime_array(input_array):
"""
returns a boolean array of the same shape as input_array
with True if the element in the same position of input_array is prime
and False otherwise
example:
x = np.array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
is_prime_array(x)
should return
[[False, False, True, True],
[False, True, False, True],
[False, False, False, True]]
"""
# fill in with your code
return output_array
###Output
_____no_output_____
###Markdown
Random functionsYou'll often need random numbers in programming. For example: taking a random sample of data, simulating a coin flip/dice roll, and generating simulated data.Numpy has a bunch of built-in random functions for this purpose. These functions are accessible in the `np.random` submodule
###Code
np.random.rand(2,3,4) # generates uniform random numbers between 0,1
# arguments of rand(a,b,c,d,...) determine the dimensions of the array
# in this case, we created a 2x3x4 3d array
np.random.rand(10) # we can use this just to get a list of 10 random numbers from [0,1)
np.random.randn(5) # randn is the standard normal distribution (gaussian)
# by default, it has mean=0 and variance=1
# you can scale the gaussian to have different mean and variance
# for example, to have mean=3 and variance=2
def scaled_randn(mean, var, n_samples):
return mean + np.random.randn(n_samples)*var
scaled_randn(3,2,100)
# randint gives random integers. Arguments are (low, high, size)
# randint operates on the interval [low,high) (high is not included)
np.random.randint(1,10,20)
# if you want it to be inclusive, then you should call randint(low,high+1,size)
np.random.randint(1,11,20)
#randint is useful to generate a random sample with replacement of a collection
letters = np.array(['a','b','c','d','e','f'])
rand_idx = np.random.randint(0,len(letters),10) #len(letters) is the length of letters
letters[rand_idx]
# if you don't want sampling with replacement, you can use permutation
np.random.permutation(letters)
# another way to do sampling is with the choice(x[, size, replace, p]) function
print(np.random.choice(letters)) # x is the only required argument, will just return one random entry
print(np.random.choice(letters, 10)) # size lets you specify how many to sample
print(np.random.choice(letters, [2,3])) # can be multi dimensional
print(np.random.choice(letters, [2,3], False)) # whether to sample with replacement (default True)
# by default, choice() uses a uniform random probability (i.e. fair dice)
# sometimes you want to weight certain outcomes to be more likely
# p allows you to do that by specifying the probabilities of each outcome
# let's make 'a' be much more likely than the others
print(np.random.choice(letters, 100, True, [0.75,0.05,0.05,0.05,0.05,0.05]))
###Output
_____no_output_____
###Markdown
Intro to (/review of) python In the next few lessons, we'll review basic python and programming concepts, as well as go over the fundamentals of how to use some common python packages, such as:- numpy- pandas- matplotlib Importing packagesPackages are collections of pre-written code made available for reuse. In the previous lesson, we installed some necessary packages using the `pip` python package manager. Packages are convenient because they save you from having to implement every feature and function on your own. The widely-used packages also provide a standard, common set of tools for others to develop with --- allowing interoperability between programs.There are a few different ways to import package in python:The simplest is just to `import {packagename}`. For this lesson, we'll use `numpy` as the example package```import numpy```The functions, classes, and variables of the `numpy` package can then be accessed using "dot" notation: for example, the numpy array class can be accessed with `numpy.array`---A variant of this is to use `import {packagename} as {shortname}`, as in:```import numpy as np```This reduces the number of characters needed to type, and can be convenient if the package name is long, or you need to use many things from the same package. Accessing the numpy array class, for example, can be done with ```np.array```---If you only need a subset of items from a package, for example, a single class, function, or a submodule (subpackage of the main package), you can use the syntax ``` from {packagename} import {element}```, as in:```from numpy import array```This allows you to use the `array` class directly, without importing the rest of the numpy package, and without needing to use the package prefix dot notation.For example, if you use this import method, then writing```test_array = array([0])```would be equivalent to writing```test_array = numpy.array([0])test_array = np.array([0])```using the previous import styles, respectively.
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
DocumentationPackages contain functions, classes, and variables which may be helpful. Crucial to the usability of a package is the documentation (or API reference), which (should) list all of the contents of the package, and how to use them.To get the built-in help about a function or class, use the `help()` commandDocumentation for most common packages are also usually available online. For example, the documentation for [numpy can be found here](https://docs.scipy.org/doc/numpy/reference/)
###Code
help(print)
###Output
Help on built-in function print in module builtins:
print(...)
print(value, ..., sep=' ', end='\n', file=sys.stdout, flush=False)
Prints the values to a stream, or to sys.stdout by default.
Optional keyword arguments:
file: a file-like object (stream); defaults to the current sys.stdout.
sep: string inserted between values, default a space.
end: string appended after the last value, default a newline.
flush: whether to forcibly flush the stream.
###Markdown
Commenting codeIn order for your code to be readable to others (or your future self), you should provide comments on your code to explain what you are doing. The comment character in python is ``, and any text following a `` symbol will not be interpreted as code by python.
###Code
array_of_zeros = np.zeros([3,3]) # this creates a 3x3 array full of zeros
print(array_of_zeros) # the print() function displays the value of the variable on screen
###Output
_____no_output_____
###Markdown
Code flow LoopsA key part of programming is automating repetitive tasks, such as applying the same operation to a list of inputs. This is achieved using "loops"; most commonly, the `for` loop.In its simplest form, a python loop iterates over a list, and runs the code within the loop with the variable set equal to the respective element of the list.
###Code
idx_list = [0,1,2,3,4,5]
for idx in idx_list: # loop over idx_list, set idx equal to each element sequentially
print('idx is equal to {}'.format(idx)) # print the current value of idx
print(f'gabe is doing {idx}')
###Output
idx is equal to 0
gabe is doing 0
idx is equal to 1
gabe is doing 1
idx is equal to 2
gabe is doing 2
idx is equal to 3
gabe is doing 3
idx is equal to 4
gabe is doing 4
idx is equal to 5
gabe is doing 5
###Markdown
Lists are not the only kinds of objects that can be iterated over (also known as an iterable). A special kind of object, called a generator, does not explicitly store every single value in memory, but instead stores the current value, and the rule to generate the next value. This can often be faster than explicitly storing every element.As an analogy, if you wanted to send to your friend the following sequence of numbers: [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 45, 47, 49, 51, 53, 55, 57, 59], you could write each number down and send them the entire list. Or you could write "the sequence of numbers starting at 1, increasing by 2, but less than 60"If the sequence is very long, then the second representation becomes preferable to write, because you don't need to explicitly write out every single element. One common generator that is used in python is `range(start, end, increment)`, which creates a generator that produces the sequence of numbers starting at `start`, increments by `increment`, and is less than (__but not equal to__) `end`. If `increment` is not set, it defaults to 1.This is often used in conjunction with iteration:
###Code
for idx in range(0,6): #equivalent to the above
print('idx is equal to {}'.format(idx)) # print the current value of idx
for idx in range(6): # if you only give one argument, it automatically starts from 0
print('idx is currently {}'.format(idx)) # print the current value of idx
###Output
idx is currently 0
idx is currently 1
idx is currently 2
idx is currently 3
idx is currently 4
idx is currently 5
###Markdown
ConditionalsSometimes you want to execute code only if certain conditions are met. The `if`, `elif` (short for else-if), and `else` keywords are used for this purpose
###Code
for idx in range(1,6):
if idx > 2 : # only execute the following indented block of code if idx is greater than 2
print('idx={}, which is greater than 2'.format(idx))
elif idx == 2: # only execute if the above condition isn't met, and also idx==2
# note that == is used to check for equality; single = is the assignment operator
print('idx={} is equal to 2'.format(idx))
else: # execute this code if none of the above conditions are met
print('idx={} is less than 2'.format(idx))
###Output
idx=1 is less than 2
idx=2 is equal to 2
idx=3, which is greater than 2
idx=4, which is greater than 2
idx=5, which is greater than 2
###Markdown
You can combine different conditions using the keywords `and` and `or`, and negate conditions using `not`
###Code
x = 5
y = 10
print(x==5 or y==11) # true because first statement is true
print(x==5 and y==11) # false because not both are true
print(x==5 and not y==11) # true because second condition is negated (flipped)
###Output
True
False
True
###Markdown
List comprehensionYou can generate a list from any iterable in a couple ways. This is called list comprehension.The simplest way is just to call `list()` on the generator object
###Code
list(range(6))
###Output
_____no_output_____
###Markdown
Another way is using the following syntax:```[x for x in iterable]```for example:
###Code
list_of_numbers = [x for x in range(6)]
print(list_of_numbers)
###Output
[0, 1, 2, 3, 4, 5]
###Markdown
However, the list comprehension syntax is actually more powerful than that: it allows for functions to be called within the expression```[expression(x) for x in iterable]```
###Code
list_of_first_five_squares = [x**2 for x in range(6)] # the double star ** expression denotes exponentiation
# hence, the above gives the first 5 square numbers, including zero
print(list_of_first_five_squares)
###Output
[0, 1, 4, 9, 16, 25]
###Markdown
In fact, the list comprehension syntax is even more powerful: it can also include conditional statements```[expression(x) for x in iterable if condition]```
###Code
list_of_first_few_odd_squares = [x**2 for x in range(10) if np.mod(x,2) == 1]
print(list_of_first_few_odd_squares)
###Output
[1, 9, 25, 49, 81]
###Markdown
Now try to print a list of the first few even cubes using list comprehension. FunctionsFunctions are a way to repeat the same lines of code, potentially with different inputs. If you find yourself writing a lot of repetitive code that shares the same structure, you may want to try and formulate it as a function. Functions are declared using the `def` keyword. In this example, we will write a function that checks if a number is prime.
###Code
def is_prime(number):
sqrt_num = int(np.sqrt(number)) # we only need to check integer factors up to the square root of the number, rounded down (int() always rounds down)
for potential_factor in range(2,sqrt_num+1): #range(a,b) iterates from the value a to b-1
if np.mod(number, potential_factor) == 0: #np.mod() is the modulo (aka remainder) function; thus, if the remainder is zero, then it divides evenly
return False # if it divides evenly, then it's not prime, then we can return and end the function
return True #if we get through all of the potential factors and haven't found a factor, then it's prime
is_prime(101.0)
###Output
_____no_output_____
###Markdown
Data structures ListsWe already looked at one python data structure: the list. Lists are _ordered_ collections of values, denoted with square brackets. Lists are _ordered_ in the sense that the order of their elements matter. The list [1,2,3,4] is not the same as [4,3,2,1]
###Code
a_list = [2,0,15,5] # square brackets denote a list
another_list = [15,0,5,2]
print('a_list = {}; another_list = {}'.format(a_list, another_list)) # the .format() function of strings allows you to plug in the variable values in the respective curly braces {}
print('is a_list equal to another_list?')
print(a_list == another_list) # print out the truth value of whether a_list is the same as another_list (it shouldn't be, because they have different ordering)
yet_another_list = [2,0,15,5]
print('but it is equal to yet_another_list:')
print(a_list == yet_another_list)
###Output
a_list = [2, 0, 15, 5]; another_list = [15, 0, 5, 2]
is a_list equal to another_list?
False
but it is equal to yet_another_list:
True
###Markdown
List elements can be any python object, including strings, numbers, and other lists
###Code
diverse_list = ['a', False, [0,0,0], 1.0, 10]
print('the elements of diverse_list are: {}'.format(diverse_list))
print('the data types of the elements are {}'.format([type(x) for x in diverse_list])) # using list comprehension to get the type of each element
###Output
the elements of diverse_list are: ['a', False, [0, 0, 0], 1.0, 10]
the data types of the elements are [<class 'str'>, <class 'bool'>, <class 'list'>, <class 'float'>, <class 'int'>]
###Markdown
You can access a specific element of a list using the square bracket notation (this is known as indexing)```list_name[idx]```Index values can be negative, which start counting from the end. So `list_name[-1]` gives the __last__ element of the list
###Code
first_element_of_diverse_list = diverse_list[0] # python starts counting at 0, so the first element is at index 0
print(first_element_of_diverse_list)
last_element_of_diverse_list = diverse_list[-1]
print(last_element_of_diverse_list)
###Output
a
10
###Markdown
You can "slice" a list using the colon `:` notation```list_name[start_idx:end_idx]```Note that the slice starts at the start_idx, but __does not include__ the element at end_idx.If you omit either start_idx or end_idx, it automatically starts at the first element/ends at the last element respectively
###Code
print(diverse_list[0:2]) # gets the elements at index 0 and 1
print(diverse_list[:2]) # equivalent to the above
print(diverse_list[2:]) # gets all elements from index 2 to the end
print(diverse_list[:]) # gets all elements
###Output
['a', False]
['a', False]
[[0, 0, 0], 1.0, 10]
['a', False, [0, 0, 0], 1.0, 10]
###Markdown
Lists are modifiable: you can append and delete entries, as well as change the values of elements
###Code
diverse_list.append('new entry') # add a value to the end
print('appended an entry to diverse_list: {}'.format(diverse_list))
diverse_list[0] = 'changed entry' # change the value of entry at index 0
print('changed an entry of diverse_list: {}'.format(diverse_list))
first_entry = diverse_list.pop(0) # remove (and return) the value at element 0
print('removed "{}" from diverse_list: {}'.format(first_entry, diverse_list))
diverse_list.remove('new entry') # you can also remove the first entry with a specific value, in this case, the "new entry"
print('removed "new entry" from diverse_list: {}'.format(diverse_list))
diverse_list.insert(0,'a') # insert the value 'a' at index 0
print('inserted "a" back into diverse_list: {}'.format(diverse_list))
###Output
appended an entry to diverse_list: ['a', False, [0, 0, 0], 1.0, 10, 'new entry']
changed an entry of diverse_list: ['changed entry', False, [0, 0, 0], 1.0, 10, 'new entry']
removed "changed entry" from diverse_list: [False, [0, 0, 0], 1.0, 10, 'new entry']
removed "new entry" from diverse_list: [False, [0, 0, 0], 1.0, 10]
inserted "a" back into diverse_list: ['a', False, [0, 0, 0], 1.0, 10]
###Markdown
TuplesTuples are unchangeable, ordered sequences of elements, grouped with regular parentheses:```('a','b','c')```
###Code
a_tuple = ('a','b','c')
print('the first element of a_tuple is "{}"'.format(a_tuple[0])) # tuples can be indexed like lists
a_tuple[0] = 10 # however, unlike lists, you cannot change their values once they are set
###Output
_____no_output_____
###Markdown
DictionariesDictionaries are data structures that store _mappings_ from "keys" to respective "values". You can think of them as lookup tables which return a specific value for a given key. For example, an english dictionary (the book) could be stored as a python dictionary, where the "keys" are each of the words in english, and the "values" are the respective definitions.They are defined using the curly braces, or the `dict()` function:```dictionary = {key: value, key2: value2}dictionary = dict([(key, value),(key2, value2)])```Keys can be a variety of data types, including numeric, strings, and tuples. However, they cannot be changeable objects, such as lists, or other dictionaries. Values, on the other hand, can be any data type.Accessing the dictionary values are done using square brackets using the syntax:```dictionary[key] returns the value associated with key```
###Code
pokemon_types = {'bulbasaur':'grass', 'charmander':'fire', 'squirtle':'water'}
pokemon_types
print(pokemon_types['bulbasaur'])
###Output
grass
###Markdown
You can add or change an element to a dictionary using the following syntax:```dictionary[key] = value```
###Code
pokemon_types['bulbasaur'] = 'grass/poison' #bulbasaur is actually dual typed, so we'll change its entry
pokemon_types['ivysaur'] = 'grass/poison' #let's add an evolution
pokemon_types
###Output
_____no_output_____
###Markdown
You can get a list of all of the keys to a dictionary using the `.keys()` function, similarly with the `.values()` function.
###Code
print(pokemon_types.keys())
print(pokemon_types.values())
###Output
dict_keys(['bulbasaur', 'charmander', 'squirtle', 'ivysaur'])
dict_values(['grass/poison', 'fire', 'water', 'grass/poison'])
###Markdown
You can use the function `.items()` to get a list of `(key, value)` tuples. This is often useful for looping
###Code
for k, v in pokemon_types.items():
print('the type of {} is {}'.format(k,v))
###Output
the type of bulbasaur is grass/poison
the type of charmander is fire
the type of squirtle is water
the type of ivysaur is grass/poison
###Markdown
NumpyNumpy is a package for python which provides various tools to make math and numerical computation much easier. One of the key components is the numpy array, which enables matrices.The content in this section is adapted from the Python Data Science Handbook, which is [freely available online](https://github.com/jakevdp/PythonDataScienceHandbook) numpy arraysnumpy arrays provide the ability to create matrices, which are essentially 2-dimensional lists. (They can also be used to create even higher-dimensional arrays: tensors, etc)Unlike python lists, numpy arrays must all have the same data type (e.g. numeric, string). Arrays can be created from python lists:
###Code
print('a vector can be created from a list {}'.format(np.array([1, 4, 2, 5, 3])))
print('a matrix can be created from a list of lists:\n {}'.format(np.array([[1,1,1],[2,2,2],[3,3,3]]))) #\n is the newline character and makes the following text appear on the next line
###Output
a vector can be created from a list [1 4 2 5 3]
a matrix can be created from a list of lists:
[[1 1 1]
[2 2 2]
[3 3 3]]
###Markdown
There are also a bunch of built-in functions for generating arrays.
###Code
# Create a length-10 integer array filled with zeros
np.zeros(10, dtype=int)
# Create a 3x5 floating-point array filled with ones
np.ones((3, 5), dtype=float)
# Create a 3x5 array filled with 3.14
np.full((3, 5), 3.14)
# Create an array filled with a linear sequence
# Starting at 0, ending at 20, stepping by 2
# (this is similar to the built-in range() function)
np.arange(0, 20, 2)
# Create an array of five values evenly spaced between 0 and 1
np.linspace(0, 1, 5)
# Create a 3x3 array of uniformly distributed
# random values between 0 and 1
np.random.random((3, 3))
# Create a 3x3 array of normally distributed random values
# with mean 0 and standard deviation 1
np.random.normal(0, 1, (3, 3))
# Create a 3x3 array of random integers in the interval [0, 10)
np.random.randint(0, 10, (3, 3))
# Create a 3x3 identity matrix
np.eye(3)
# Create an uninitialized array of three integers
# The values will be whatever happens to already exist at that memory location
np.empty(3)
###Output
_____no_output_____
###Markdown
Array attributes
###Code
x1 = np.random.randint(10, size=6) # One-dimensional array
x2 = np.random.randint(10, size=(3, 4)) # Two-dimensional array
x3 = np.random.randint(10, size=(3, 4, 5)) # Three-dimensional array
print('x1=',x1)
print('x2=',x2)
print('x3=',x3)
print("x3 ndim: ", x3.ndim)
print("x3 shape:", x3.shape)
print("x3 size: ", x3.size)
###Output
x3 ndim: 3
x3 shape: (3, 4, 5)
x3 size: 60
###Markdown
Array IndexingYou can index arrays much in the same way that you can index python listsOne-dimensional arrays function just like lists
###Code
print('x1 is', x1)
print('first entry is', x1[0]) # first entry of the array
print('second and third entries are', x1[[1,2]])
print('first three entries are', x1[:3]) # slice the array
print('a second set of colons in the slice allow you to set the interval:', x1[::2]) # every other element of x1
print('you can reverse the order using a negative interval:', x1[3::-1]) # count backwards from entry at index 3 to the beginning
###Output
x1 is [6 4 9 5 5 3]
first entry is 6
second and third entries are [4 9]
first three entries are [6 4 9]
a second set of colons in the slice allow you to set the interval: [6 9 5]
you can reverse the order using a negative interval: [5 9 4 6]
###Markdown
Multi-dimensional arrays are indexed using a tuple of indices. The indices for a 2d array are ordered as `(row_idx, col_idx)`
###Code
print(x2)
print('the element in the second row, third column is: ', x2[(1,2)])
###Output
[[1 6 0 2]
[1 7 4 7]
[8 4 1 3]]
the element in the second row, third column is: 4
###Markdown
You can slice multidimensional arrays as well!If you change the value of an entry in a slice, you change the value in the original object. This is what's known as a "view" of an array. Slices do not return an independent object, but instead can be thought of as just a reference to a subset of elements in the original object.However, if you do not want this behavior, you can avoid it by making a copy using the `.copy()` function.
###Code
print('x2 is originally: \n', x2)
slice_of_x2 = x2[1:,2:] # slices the 2nd row to the end, and 3rd column to the end
copied_slice_of_x2 = x2[1:,2:].copy() # note that slices provide a direct view of the original object, if you want an independent copy, use the .copy()
print('slice_of_x2 is:\n',slice_of_x2)
print('copied_slice_of_x2 is: \n', copied_slice_of_x2)
# let's change the value of slice_of_x2
slice_of_x2[0,0] = 99 # we changed the value of top left element to 99; this corresponds to the element in the 2nd row, 3rd column of x2
print('now x2 is: \n', x2)
print('slice_of_x2 is:\n',slice_of_x2)
print('and copied_slice_of_x2 is:\n',copied_slice_of_x2)
# If you change the value of a copy, it does not affect the original object
copied_slice_of_x2[0,0] = -50
print('copied_slice_of_x2 is: \n', copied_slice_of_x2)
print('x2 is unchanged by this operation:\n', x2)
###Output
copied_slice_of_x2 is:
[[-50 7]
[ 1 3]]
x2 is unchanged by this operation:
[[ 1 6 0 2]
[ 1 7 99 7]
[ 8 4 1 3]]
###Markdown
ReshapingYou can reshape an array using the `.reshape()` function
###Code
print('np.arange(12):',np.arange(12))
reshaped = np.arange(12).reshape(3,4)
print('reshaped into a 3x4 array: \n',reshaped)
###Output
np.arange(12): [ 0 1 2 3 4 5 6 7 8 9 10 11]
reshaped into a 3x4 array:
[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
###Markdown
You can combine and split arrays in numpy. However, we won't be going too much in depth with that. Check out [this tutorial](https://github.com/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/02.02-The-Basics-Of-NumPy-Arrays.ipynb) for more info in that realm. Boolean arrays and maskingBoolean data represents True/False values, which can also be expressed as 1 or 0 respectively.You can compute boolean operations on arrays in numpy
###Code
even_entries_bool = np.mod(reshaped,2)==0
print(even_entries_bool)
###Output
[[ True False True False]
[ True False True False]
[ True False True False]]
###Markdown
You can then use those arrays to select the entries which match that criteria
###Code
reshaped[even_entries_bool]
###Output
_____no_output_____
###Markdown
ExerciseUsing the `is_prime()` function we previously wrote, write a function which takes a numpy array and returns a boolean array of the prime entries with the same shape as the input array
###Code
import numpy as np
def is_prime_array(input_array):
"""
returns a boolean array of the same shape as input_array
with True if the element in the same position of input_array is prime
and False otherwise
example:
x = np.array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
is_prime_array(x)
should return
[[False, False, True, True],
[False, True, False, True],
[False, False, False, True]]
"""
new_ar = []
for ar in input_array:
for item in ar:
new_ar = np.append(new_ar, bool(is_prime(item)))
shpe = np.shape(input_array)
new_ar = new_ar.reshape(shpe)
return new_ar
def is_prime(number):
sqrt_num = int(np.sqrt(number)) # we only need to check integer factors up to the square root of the number, rounded down (int() always rounds down)
for potential_factor in range(2,sqrt_num+1): #range(a,b) iterates from the value a to b-1
if np.mod(number, potential_factor) == 0: #np.mod() is the modulo (aka remainder) function; thus, if the remainder is zero, then it divides evenly
return True # if it divides evenly, then it's not prime, then we can return and end the function
return False #if we get through all of the potential factors and haven't found a factor, then it's prime
x = np.array([[ 0, 1, 2, 3],[ 4, 5, 6, 7],[ 8, 9, 10, 11]])
print(is_prime_array(x))
print(bool(1))
###Output
[[0. 0. 0. 0.]
[1. 0. 1. 0.]
[1. 1. 1. 0.]]
True
###Markdown
Random functionsYou'll often need random numbers in programming. For example: taking a random sample of data, simulating a coin flip/dice roll, and generating simulated data.Numpy has a bunch of built-in random functions for this purpose. These functions are accessible in the `np.random` submodule
###Code
np.random.rand(2,3,4) # generates uniform random numbers between 0,1
# arguments of rand(a,b,c,d,...) determine the dimensions of the array
# in this case, we created a 2x3x4 3d array
np.random.rand(10) # we can use this just to get a list of 10 random numbers from [0,1)
np.random.randn(5) # randn is the standard normal distribution (gaussian)
# by default, it has mean=0 and variance=1
# you can scale the gaussian to have different mean and variance
# for example, to have mean=3 and variance=2
def scaled_randn(mean, var, n_samples):
return mean + np.random.randn(n_samples)*var
scaled_randn(3,2,100)
# randint gives random integers. Arguments are (low, high, size)
# randint operates on the interval [low,high) (high is not included)
np.random.randint(1,10,20)
# if you want it to be inclusive, then you should call randint(low,high+1,size)
np.random.randint(1,11,20)
#randint is useful to generate a random sample with replacement of a collection
letters = np.array(['a','b','c','d','e','f'])
rand_idx = np.random.randint(0,len(letters),10) #len(letters) is the length of letters
letters[rand_idx]
# if you don't want sampling with replacement, you can use permutation
np.random.permutation(letters)
# another way to do sampling is with the choice(x[, size, replace, p]) function
print(np.random.choice(letters)) # x is the only required argument, will just return one random entry
print(np.random.choice(letters, 10)) # size lets you specify how many to sample
print(np.random.choice(letters, [2,3])) # can be multi dimensional
print(np.random.choice(letters, [2,3], False)) # whether to sample with replacement (default True)
# by default, choice() uses a uniform random probability (i.e. fair dice)
# sometimes you want to weight certain outcomes to be more likely
# p allows you to do that by specifying the probabilities of each outcome
# let's make 'a' be much more likely than the others
print(np.random.choice(letters, 100, True, [0.75,0.05,0.05,0.05,0.05,0.05]))
###Output
['a' 'f' 'a' 'a' 'a' 'a' 'a' 'a' 'a' 'a' 'a' 'a' 'a' 'a' 'a' 'a' 'a' 'b'
'a' 'c' 'a' 'a' 'a' 'c' 'a' 'a' 'a' 'a' 'a' 'a' 'a' 'f' 'a' 'a' 'a' 'a'
'c' 'a' 'e' 'a' 'a' 'a' 'a' 'a' 'e' 'd' 'a' 'd' 'b' 'd' 'd' 'a' 'f' 'a'
'a' 'a' 'a' 'a' 'e' 'a' 'a' 'a' 'a' 'a' 'b' 'a' 'a' 'a' 'a' 'a' 'a' 'a'
'c' 'a' 'a' 'f' 'a' 'a' 'a' 'b' 'a' 'a' 'a' 'f' 'a' 'a' 'a' 'a' 'e' 'a'
'a' 'a' 'a' 'a' 'd' 'e' 'a' 'd' 'a' 'f']
###Markdown
Intro to (/review of) python In the next few lessons, we'll review basic python and programming concepts, as well as go over the fundamentals of how to use some common python packages, such as:- numpy- pandas- matplotlib Importing packagesPackages are collections of pre-written code made available for reuse. In the previous lesson, we installed some necessary packages using the `pip` python package manager. Packages are convenient because they save you from having to implement every feature and function on your own. The widely-used packages also provide a standard, common set of tools for others to develop with --- allowing interoperability between programs.There are a few different ways to import package in python:The simplest is just to `import {packagename}`. For this lesson, we'll use `numpy` as the example package```import numpy```The functions, classes, and variables of the `numpy` package can then be accessed using "dot" notation: for example, the numpy array class can be accessed with `numpy.array`---A variant of this is to use `import {packagename} as {shortname}`, as in:```import numpy as np```This reduces the number of characters needed to type, and can be convenient if the package name is long, or you need to use many things from the same package. Accessing the numpy array class, for example, can be done with ```np.array```---If you only need a subset of items from a package, for example, a single class, function, or a submodule (subpackage of the main package), you can use the syntax ``` from {packagename} import {element}```, as in:```from numpy import array```This allows you to use the `array` class directly, without importing the rest of the numpy package, and without needing to use the package prefix dot notation.For example, if you use this import method, then writing```test_array = array([0])```would be equivalent to writing```test_array = numpy.array([0])test_array = np.array([0])```using the previous import styles, respectively.
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
DocumentationPackages contain functions, classes, and variables which may be helpful. Crucial to the usability of a package is the documentation (or API reference), which (should) list all of the contents of the package, and how to use them.To get the built-in help about a function or class, use the `help()` commandDocumentation for most common packages are also usually available online. For example, the documentation for [numpy can be found here](https://docs.scipy.org/doc/numpy/reference/)
###Code
help(print)
###Output
Help on built-in function print in module builtins:
print(...)
print(value, ..., sep=' ', end='\n', file=sys.stdout, flush=False)
Prints the values to a stream, or to sys.stdout by default.
Optional keyword arguments:
file: a file-like object (stream); defaults to the current sys.stdout.
sep: string inserted between values, default a space.
end: string appended after the last value, default a newline.
flush: whether to forcibly flush the stream.
###Markdown
Commenting codeIn order for your code to be readable to others (or your future self), you should provide comments on your code to explain what you are doing. The comment character in python is ``, and any text following a `` symbol will not be interpreted as code by python.
###Code
array_of_zeros = np.zeros([3,3]) # this creates a 3x3 array full of zeros
print(array_of_zeros) # the print() function displays the value of the variable on screen
###Output
_____no_output_____
###Markdown
Code flow LoopsA key part of programming is automating repetitive tasks, such as applying the same operation to a list of inputs. This is achieved using "loops"; most commonly, the `for` loop.In its simplest form, a python loop iterates over a list, and runs the code within the loop with the variable set equal to the respective element of the list.
###Code
idx_list = [0,1,2,3,4,5]
for idx in idx_list: # loop over idx_list, set idx equal to each element sequentially
print('idx is equal to {}'.format(idx)) # print the current value of idx
###Output
idx is equal to 0
idx is equal to 1
idx is equal to 2
idx is equal to 3
idx is equal to 4
idx is equal to 5
###Markdown
Lists are not the only kinds of objects that can be iterated over (also known as an iterable). A special kind of object, called a generator, does not explicitly store every single value in memory, but instead stores the current value, and the rule to generate the next value. This can often be faster than explicitly storing every element.As an analogy, if you wanted to send to your friend the following sequence of numbers: [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 45, 47, 49, 51, 53, 55, 57, 59], you could write each number down and send them the entire list. Or you could write "the sequence of numbers starting at 1, increasing by 2, but less than 60"If the sequence is very long, then the second representation becomes preferable to write, because you don't need to explicitly write out every single element. One common generator that is used in python is `range(start, end, increment)`, which creates a generator that produces the sequence of numbers starting at `start`, increments by `increment`, and is less than (__but not equal to__) `end`. If `increment` is not set, it defaults to 1.This is often used in conjunction with iteration:
###Code
for idx in range(0,6): #equivalent to the above
print('idx is equal to {}'.format(idx)) # print the current value of idx
for idx in range(6): # if you only give one argument, it automatically starts from 0
print('idx is currently {}'.format(idx)) # print the current value of idx
###Output
idx is currently 0
idx is currently 1
idx is currently 2
idx is currently 3
idx is currently 4
idx is currently 5
###Markdown
ConditionalsSometimes you want to execute code only if certain conditions are met. The `if`, `elif` (short for else-if), and `else` keywords are used for this purpose
###Code
for idx in range(1,6):
if idx > 2 : # only execute the following indented block of code if idx is greater than 2
print('idx={}, which is greater than 2'.format(idx))
elif idx == 2: # only execute if the above condition isn't met, and also idx==2
# note that == is used to check for equality; single = is the assignment operator
print('idx={} is equal to 2'.format(idx))
else: # execute this code if none of the above conditions are met
print('idx={} is less than 2'.format(idx))
###Output
idx=1 is less than 2
idx=2 is equal to 2
idx=3, which is greater than 2
idx=4, which is greater than 2
idx=5, which is greater than 2
###Markdown
You can combine different conditions using the keywords `and` and `or`, and negate conditions using `not`
###Code
x = 5
y = 10
print(x==5 or y==11) # true because first statement is true
print(x==5 and y==11) # false because not both are true
print(x==5 and not y==11) # true because second condition is negated (flipped)
###Output
True
False
True
###Markdown
List comprehensionYou can generate a list from any iterable in a couple ways. This is called list comprehension.The simplest way is just to call `list()` on the generator object
###Code
list(range(6))
###Output
_____no_output_____
###Markdown
Another way is using the following syntax:```[x for x in iterable]```for example:
###Code
list_of_numbers = [x for x in range(6)]
print(list_of_numbers)
###Output
[0, 1, 2, 3, 4, 5]
###Markdown
However, the list comprehension syntax is actually more powerful than that: it allows for functions to be called within the expression```[expression(x) for x in iterable]```
###Code
list_of_first_five_squares = [x**2 for x in range(6)] # the double star ** expression denotes exponentiation
# hence, the above gives the first 5 square numbers, including zero
print(list_of_first_five_squares)
###Output
[0, 1, 4, 9, 16, 25]
###Markdown
In fact, the list comprehension syntax is even more powerful: it can also include conditional statements```[expression(x) for x in iterable if condition]```
###Code
list_of_first_few_odd_squares = [x**2 for x in range(10) if np.mod(x,2) == 1]
print(list_of_first_few_odd_squares)
###Output
[1, 9, 25, 49, 81]
###Markdown
Now try to print a list of the first few even cubes using list comprehension. FunctionsFunctions are a way to repeat the same lines of code, potentially with different inputs. If you find yourself writing a lot of repetitive code that shares the same structure, you may want to try and formulate it as a function. Functions are declared using the `def` keyword. In this example, we will write a function that checks if a number is prime.
###Code
def is_prime(number):
sqrt_num = int(np.sqrt(number)) # we only need to check integer factors up to the square root of the number, rounded down (int() always rounds down)
for potential_factor in range(2,sqrt_num+1): #range(a,b) iterates from the value a to b-1
if np.mod(number, potential_factor) == 0: #np.mod() is the modulo (aka remainder) function; thus, if the remainder is zero, then it divides evenly
return False # if it divides evenly, then it's not prime, then we can return and end the function
return True #if we get through all of the potential factors and haven't found a factor, then it's prime
is_prime(101.0)
###Output
_____no_output_____
###Markdown
Data structures ListsWe already looked at one python data structure: the list. Lists are _ordered_ collections of values, denoted with square brackets. Lists are _ordered_ in the sense that the order of their elements matter. The list [1,2,3,4] is not the same as [4,3,2,1]
###Code
a_list = [2,0,15,5] # square brackets denote a list
another_list = [15,0,5,2]
print('a_list = {}; another_list = {}'.format(a_list, another_list)) # the .format() function of strings allows you to plug in the variable values in the respective curly braces {}
print('is a_list equal to another_list?')
print(a_list == another_list) # print out the truth value of whether a_list is the same as another_list (it shouldn't be, because they have different ordering)
yet_another_list = [2,0,15,5]
print('but it is equal to yet_another_list:')
print(a_list == yet_another_list)
###Output
_____no_output_____
###Markdown
List elements can be any python object, including strings, numbers, and other lists
###Code
diverse_list = ['a', False, [0,0,0], 1.0, 10]
print('the elements of diverse_list are: {}'.format(diverse_list))
print('the data types of the elements are {}'.format([type(x) for x in diverse_list])) # using list comprehension to get the type of each element
###Output
_____no_output_____
###Markdown
You can access a specific element of a list using the square bracket notation (this is known as indexing)```list_name[idx]```Index values can be negative, which start counting from the end. So `list_name[-1]` gives the __last__ element of the list
###Code
first_element_of_diverse_list = diverse_list[0] # python starts counting at 0, so the first element is at index 0
print(first_element_of_diverse_list)
last_element_of_diverse_list = diverse_list[-1]
print(last_element_of_diverse_list)
###Output
_____no_output_____
###Markdown
You can "slice" a list using the colon `:` notation```list_name[start_idx:end_idx]```Note that the slice starts at the start_idx, but __does not include__ the element at end_idx.If you omit either start_idx or end_idx, it automatically starts at the first element/ends at the last element respectively
###Code
print(diverse_list[0:2]) # gets the elements at index 0 and 1
print(diverse_list[:2]) # equivalent to the above
print(diverse_list[2:]) # gets all elements from index 2 to the end
print(diverse_list[:]) # gets all elements
###Output
_____no_output_____
###Markdown
Lists are modifiable: you can append and delete entries, as well as change the values of elements
###Code
diverse_list.append('new entry') # add a value to the end
print('appended an entry to diverse_list: {}'.format(diverse_list))
diverse_list[0] = 'changed entry' # change the value of entry at index 0
print('changed an entry of diverse_list: {}'.format(diverse_list))
first_entry = diverse_list.pop(0) # remove (and return) the value at element 0
print('removed "{}" from diverse_list: {}'.format(first_entry, diverse_list))
diverse_list.remove('new entry') # you can also remove the first entry with a specific value, in this case, the "new entry"
print('removed "new entry" from diverse_list: {}'.format(diverse_list))
diverse_list.insert(0,'a') # insert the value 'a' at index 0
print('inserted "a" back into diverse_list: {}'.format(diverse_list))
###Output
_____no_output_____
###Markdown
TuplesTuples are unchangeable, ordered sequences of elements, grouped with regular parentheses:```('a','b','c')```
###Code
a_tuple = ('a','b','c')
print('the first element of a_tuple is "{}"'.format(a_tuple[0])) # tuples can be indexed like lists
a_tuple[0] = 10 # however, unlike lists, you cannot change their values once they are set
###Output
_____no_output_____
###Markdown
DictionariesDictionaries are data structures that store _mappings_ from "keys" to respective "values". You can think of them as lookup tables which return a specific value for a given key. For example, an english dictionary (the book) could be stored as a python dictionary, where the "keys" are each of the words in english, and the "values" are the respective definitions.They are defined using the curly braces, or the `dict()` function:```dictionary = {key: value, key2: value2}dictionary = dict([(key, value),(key2, value2)])```Keys can be a variety of data types, including numeric, strings, and tuples. However, they cannot be changeable objects, such as lists, or other dictionaries. Values, on the other hand, can be any data type.Accessing the dictionary values are done using square brackets using the syntax:```dictionary[key] returns the value associated with key```
###Code
pokemon_types = {'bulbasaur':'grass', 'charmander':'fire', 'squirtle':'water'}
pokemon_types
print(pokemon_types['bulbasaur'])
###Output
_____no_output_____
###Markdown
You can add or change an element to a dictionary using the following syntax:```dictionary[key] = value```
###Code
pokemon_types['bulbasaur'] = 'grass/poison' #bulbasaur is actually dual typed, so we'll change its entry
pokemon_types['ivysaur'] = 'grass/poison' #let's add an evolution
pokemon_types
###Output
_____no_output_____
###Markdown
You can get a list of all of the keys to a dictionary using the `.keys()` function, similarly with the `.values()` function.
###Code
print(pokemon_types.keys())
print(pokemon_types.values())
###Output
_____no_output_____
###Markdown
You can use the function `.items()` to get a list of `(key, value)` tuples. This is often useful for looping
###Code
for k, v in pokemon_types.items():
print('the type of {} is {}'.format(k,v))
###Output
_____no_output_____
###Markdown
NumpyNumpy is a package for python which provides various tools to make math and numerical computation much easier. One of the key components is the numpy array, which enables matrices.The content in this section is adapted from the Python Data Science Handbook, which is [freely available online](https://github.com/jakevdp/PythonDataScienceHandbook) numpy arraysnumpy arrays provide the ability to create matrices, which are essentially 2-dimensional lists. (They can also be used to create even higher-dimensional arrays: tensors, etc)Unlike python lists, numpy arrays must all have the same data type (e.g. numeric, string). Arrays can be created from python lists:
###Code
print('a vector can be created from a list {}'.format(np.array([1, 4, 2, 5, 3])))
print('a matrix can be created from a list of lists:\n {}'.format(np.array([[1,1,1],[2,2,2],[3,3,3]]))) #\n is the newline character and makes the following text appear on the next line
###Output
_____no_output_____
###Markdown
There are also a bunch of built-in functions for generating arrays.
###Code
# Create a length-10 integer array filled with zeros
np.zeros(10, dtype=int)
# Create a 3x5 floating-point array filled with ones
np.ones((3, 5), dtype=float)
# Create a 3x5 array filled with 3.14
np.full((3, 5), 3.14)
# Create an array filled with a linear sequence
# Starting at 0, ending at 20, stepping by 2
# (this is similar to the built-in range() function)
np.arange(0, 20, 2)
# Create an array of five values evenly spaced between 0 and 1
np.linspace(0, 1, 5)
# Create a 3x3 array of uniformly distributed
# random values between 0 and 1
np.random.random((3, 3))
# Create a 3x3 array of normally distributed random values
# with mean 0 and standard deviation 1
np.random.normal(0, 1, (3, 3))
# Create a 3x3 array of random integers in the interval [0, 10)
np.random.randint(0, 10, (3, 3))
# Create a 3x3 identity matrix
np.eye(3)
# Create an uninitialized array of three integers
# The values will be whatever happens to already exist at that memory location
np.empty(3)
###Output
_____no_output_____
###Markdown
Array attributes
###Code
x1 = np.random.randint(10, size=6) # One-dimensional array
x2 = np.random.randint(10, size=(3, 4)) # Two-dimensional array
x3 = np.random.randint(10, size=(3, 4, 5)) # Three-dimensional array
print('x1=',x1)
print('x2=',x2)
print('x3=',x3)
print("x3 ndim: ", x3.ndim)
print("x3 shape:", x3.shape)
print("x3 size: ", x3.size)
###Output
_____no_output_____
###Markdown
Array IndexingYou can index arrays much in the same way that you can index python listsOne-dimensional arrays function just like lists
###Code
print('x1 is', x1)
print('first entry is', x1[0]) # first entry of the array
print('second and third entries are', x1[[1,2]])
print('first three entries are', x1[:3]) # slice the array
print('a second set of colons in the slice allow you to set the interval:', x1[::2]) # every other element of x1
print('you can reverse the order using a negative interval:', x1[3::-1]) # count backwards from entry at index 3 to the beginning
###Output
_____no_output_____
###Markdown
Multi-dimensional arrays are indexed using a tuple of indices. The indices for a 2d array are ordered as `(row_idx, col_idx)`
###Code
print(x2)
print('the element in the second row, third column is: ', x2[(1,2)])
###Output
_____no_output_____
###Markdown
You can slice multidimensional arrays as well!If you change the value of an entry in a slice, you change the value in the original object. This is what's known as a "view" of an array. Slices do not return an independent object, but instead can be thought of as just a reference to a subset of elements in the original object.However, if you do not want this behavior, you can avoid it by making a copy using the `.copy()` function.
###Code
print('x2 is originally: \n', x2)
slice_of_x2 = x2[1:,2:] # slices the 2nd row to the end, and 3rd column to the end
copied_slice_of_x2 = x2[1:,2:].copy() # note that slices provide a direct view of the original object, if you want an independent copy, use the .copy()
print('slice_of_x2 is:\n',slice_of_x2)
print('copied_slice_of_x2 is: \n', copied_slice_of_x2)
# let's change the value of slice_of_x2
slice_of_x2[0,0] = 99 # we changed the value of top left element to 99; this corresponds to the element in the 2nd row, 3rd column of x2
print('now x2 is: \n', x2)
print('slice_of_x2 is:\n',slice_of_x2)
print('and copied_slice_of_x2 is:\n',copied_slice_of_x2)
# If you change the value of a copy, it does not affect the original object
copied_slice_of_x2[0,0] = -50
print('copied_slice_of_x2 is: \n', copied_slice_of_x2)
print('x2 is unchanged by this operation:\n', x2)
###Output
_____no_output_____
###Markdown
ReshapingYou can reshape an array using the `.reshape()` function
###Code
print('np.arange(12):',np.arange(12))
reshaped = np.arange(12).reshape(3,4)
print('reshaped into a 3x4 array: \n',reshaped)
###Output
np.arange(12): [ 0 1 2 3 4 5 6 7 8 9 10 11]
reshaped into a 3x4 array:
[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
###Markdown
You can combine and split arrays in numpy. However, we won't be going too much in depth with that. Check out [this tutorial](https://github.com/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/02.02-The-Basics-Of-NumPy-Arrays.ipynb) for more info in that realm. Boolean arrays and maskingBoolean data represents True/False values, which can also be expressed as 1 or 0 respectively.You can compute boolean operations on arrays in numpy
###Code
even_entries_bool = np.mod(reshaped,2)==0
print(even_entries_bool)
###Output
[[ True False True False]
[ True False True False]
[ True False True False]]
###Markdown
You can then use those arrays to select the entries which match that criteria
###Code
reshaped[even_entries_bool]
###Output
_____no_output_____
###Markdown
ExerciseUsing the `is_prime()` function we previously wrote, write a function which takes a numpy array and returns a boolean array of the prime entries with the same shape as the input array
###Code
def is_prime_array(input_array):
"""
returns a boolean array of the same shape as input_array
with True if the element in the same position of input_array is prime
and False otherwise
example:
x = np.array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
is_prime_array(x)
should return
[[False, False, True, True],
[False, True, False, True],
[False, False, False, True]]
"""
# fill in with your code
if num > 1:
for i in range(2, num//2):
if (num % i) == 0:
return False
break
else:
return True
else:
return True
return output_array
def is_prime(number)
if np.mod(number,)
def is_prime_array(input_array):
v_is_prime = np.vectorize(is_prime)
output_array = v_is_prime(input_array)
#alternative-list comprehension
###Output
_____no_output_____
###Markdown
Random functionsYou'll often need random numbers in programming. For example: taking a random sample of data, simulating a coin flip/dice roll, and generating simulated data.Numpy has a bunch of built-in random functions for this purpose. These functions are accessible in the `np.random` submodule
###Code
np.random.rand(2,3,4) # generates uniform random numbers between 0,1
# arguments of rand(a,b,c,d,...) determine the dimensions of the array
# in this case, we created a 2x3x4 3d array
np.random.rand(10) # we can use this just to get a list of 10 random numbers from [0,1)
np.random.randn(5) # randn is the standard normal distribution (gaussian)
# by default, it has mean=0 and variance=1
# you can scale the gaussian to have different mean and variance
# for example, to have mean=3 and variance=2
def scaled_randn(mean, var, n_samples):
return mean + np.random.randn(n_samples)*var
scaled_randn(3,2,100)
# randint gives random integers. Arguments are (low, high, size)
# randint operates on the interval [low,high) (high is not included)
np.random.randint(1,10,20)
# if you want it to be inclusive, then you should call randint(low,high+1,size)
np.random.randint(1,11,20)
#randint is useful to generate a random sample with replacement of a collection
letters = np.array(['a','b','c','d','e','f'])
rand_idx = np.random.randint(0,len(letters),10) #len(letters) is the length of letters
letters[rand_idx]
# if you don't want sampling with replacement, you can use permutation
np.random.permutation(letters)
# another way to do sampling is with the choice(x[, size, replace, p]) function
print(np.random.choice(letters)) # x is the only required argument, will just return one random entry
print(np.random.choice(letters, 10)) # size lets you specify how many to sample
print(np.random.choice(letters, [2,3])) # can be multi dimensional
print(np.random.choice(letters, [2,3], False)) # whether to sample with replacement (default True)
# by default, choice() uses a uniform random probability (i.e. fair dice)
# sometimes you want to weight certain outcomes to be more likely
# p allows you to do that by specifying the probabilities of each outcome
# let's make 'a' be much more likely than the others
print(np.random.choice(letters, 100, True, [0.75,0.05,0.05,0.05,0.05,0.05]))
if num > 1:
for i in range(2,num):
if (num % i) == 0:
return False
break
else:
return True
else:
return False
###Output
_____no_output_____
###Markdown
Intro to (/review of) python In the next few lessons, we'll review basic python and programming concepts, as well as go over the fundamentals of how to use some common python packages, such as:- numpy- pandas- matplotlib Importing packagesPackages are collections of pre-written code made available for reuse. In the previous lesson, we installed some necessary packages using the `pip` python package manager. Packages are convenient because they save you from having to implement every feature and function on your own. The widely-used packages also provide a standard, common set of tools for others to develop with --- allowing interoperability between programs.There are a few different ways to import package in python:The simplest is just to `import {packagename}`. For this lesson, we'll use `numpy` as the example package```import numpy```The functions, classes, and variables of the `numpy` package can then be accessed using "dot" notation: for example, the numpy array class can be accessed with `numpy.array`---A variant of this is to use `import {packagename} as {shortname}`, as in:```import numpy as np```This reduces the number of characters needed to type, and can be convenient if the package name is long, or you need to use many things from the same package. Accessing the numpy array class, for example, can be done with ```np.array```---If you only need a subset of items from a package, for example, a single class, function, or a submodule (subpackage of the main package), you can use the syntax ``` from {packagename} import {element}```, as in:```from numpy import array```This allows you to use the `array` class directly, without importing the rest of the numpy package, and without needing to use the package prefix dot notation.For example, if you use this import method, then writing```test_array = array([0])```would be equivalent to writing```test_array = numpy.array([0])test_array = np.array([0])```using the previous import styles, respectively.
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
DocumentationPackages contain functions, classes, and variables which may be helpful. Crucial to the usability of a package is the documentation (or API reference), which (should) list all of the contents of the package, and how to use them.To get the built-in help about a function or class, use the `help()` commandDocumentation for most common packages are also usually available online. For example, the documentation for [numpy can be found here](https://docs.scipy.org/doc/numpy/reference/)
###Code
help(print)
###Output
Help on built-in function print in module builtins:
print(...)
print(value, ..., sep=' ', end='\n', file=sys.stdout, flush=False)
Prints the values to a stream, or to sys.stdout by default.
Optional keyword arguments:
file: a file-like object (stream); defaults to the current sys.stdout.
sep: string inserted between values, default a space.
end: string appended after the last value, default a newline.
flush: whether to forcibly flush the stream.
###Markdown
Commenting codeIn order for your code to be readable to others (or your future self), you should provide comments on your code to explain what you are doing. The comment character in python is ``, and any text following a `` symbol will not be interpreted as code by python.
###Code
array_of_zeros = np.zeros([3,3]) # this creates a 3x3 array full of zeros
print(array_of_zeros) # the print() function displays the value of the variable on screen
###Output
[[0. 0. 0.]
[0. 0. 0.]
[0. 0. 0.]]
###Markdown
Code flow LoopsA key part of programming is automating repetitive tasks, such as applying the same operation to a list of inputs. This is achieved using "loops"; most commonly, the `for` loop.In its simplest form, a python loop iterates over a list, and runs the code within the loop with the variable set equal to the respective element of the list.
###Code
idx_list = [0,1,2,3,4,5]
for idx in idx_list: # loop over idx_list, set idx equal to each element sequentially
print('idx is equal to {}'.format(idx)) # print the current value of idx
###Output
idx is equal to 0
idx is equal to 1
idx is equal to 2
idx is equal to 3
idx is equal to 4
idx is equal to 5
###Markdown
Lists are not the only kinds of objects that can be iterated over (also known as an iterable). A special kind of object, called a generator, does not explicitly store every single value in memory, but instead stores the current value, and the rule to generate the next value. This can often be faster than explicitly storing every element.As an analogy, if you wanted to send to your friend the following sequence of numbers: [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 45, 47, 49, 51, 53, 55, 57, 59], you could write each number down and send them the entire list. Or you could write "the sequence of numbers starting at 1, increasing by 2, but less than 60"If the sequence is very long, then the second representation becomes preferable to write, because you don't need to explicitly write out every single element. One common generator that is used in python is `range(start, end, increment)`, which creates a generator that produces the sequence of numbers starting at `start`, increments by `increment`, and is less than (__but not equal to__) `end`. If `increment` is not set, it defaults to 1.This is often used in conjunction with iteration:
###Code
for idx in range(0,6): #equivalent to the above
print('idx is equal to {}'.format(idx)) # print the current value of idx
for idx in range(6): # if you only give one argument, it automatically starts from 0
print('idx is currently {}'.format(idx)) # print the current value of idx
###Output
idx is currently 0
idx is currently 1
idx is currently 2
idx is currently 3
idx is currently 4
idx is currently 5
###Markdown
ConditionalsSometimes you want to execute code only if certain conditions are met. The `if`, `elif` (short for else-if), and `else` keywords are used for this purpose
###Code
for idx in range(1,6):
if idx > 2 : # only execute the following indented block of code if idx is greater than 2
print('idx={}, which is greater than 2'.format(idx))
elif idx == 2: # only execute if the above condition isn't met, and also idx==2
# note that == is used to check for equality; single = is the assignment operator
print('idx={} is equal to 2'.format(idx))
else: # execute this code if none of the above conditions are met
print('idx={} is less than 2'.format(idx))
###Output
idx=1 is less than 2
idx=2 is equal to 2
idx=3, which is greater than 2
idx=4, which is greater than 2
idx=5, which is greater than 2
###Markdown
You can combine different conditions using the keywords `and` and `or`, and negate conditions using `not`
###Code
x = 5
y = 10
print(x==5 or y==11) # true because first statement is true
print(x==5 and y==11) # false because not both are true
print(x==5 and not y==11) # true because second condition is negated (flipped)
###Output
True
False
True
###Markdown
List comprehensionYou can generate a list from any iterable in a couple ways. This is called list comprehension.The simplest way is just to call `list()` on the generator object
###Code
list(range(6))
###Output
_____no_output_____
###Markdown
Another way is using the following syntax:```[x for x in iterable]```for example:
###Code
list_of_numbers = [x for x in range(6)]
print(list_of_numbers)
###Output
[0, 1, 2, 3, 4, 5]
###Markdown
However, the list comprehension syntax is actually more powerful than that: it allows for functions to be called within the expression```[expression(x) for x in iterable]```
###Code
list_of_first_five_squares = [x**2 for x in range(6)] # the double star ** expression denotes exponentiation
# hence, the above gives the first 5 square numbers, including zero
print(list_of_first_five_squares)
###Output
[0, 1, 4, 9, 16, 25]
###Markdown
In fact, the list comprehension syntax is even more powerful: it can also include conditional statements```[expression(x) for x in iterable if condition]```
###Code
list_of_first_few_odd_squares = [x**2 for x in range(10) if np.mod(x,2) == 1]
print(list_of_first_few_odd_squares)
###Output
[1, 9, 25, 49, 81]
###Markdown
Now try to print a list of the first few even cubes using list comprehension. FunctionsFunctions are a way to repeat the same lines of code, potentially with different inputs. If you find yourself writing a lot of repetitive code that shares the same structure, you may want to try and formulate it as a function. Functions are declared using the `def` keyword. In this example, we will write a function that checks if a number is prime.
###Code
def is_prime(number):
if type(number) is int:
if number < 2: # all negative numbers, 0, and 1 are not prime
return False
sqrt_num = int(np.sqrt(number)) # we only need to check integer factors up to the square root of the number, rounded down (int() always rounds down)
for potential_factor in range(2,sqrt_num+1): #range(a,b) iterates from the value a to b-1
if np.mod(number, potential_factor) == 0: #np.mod() is the modulo (aka remainder) function; thus, if the remainder is zero, then it divides evenly
return False # if it divides evenly, then it's not prime, then we can return and end the function
return True #if we get through all of the potential factors and haven't found a factor, then it's prime
return False
is_prime(3)
###Output
_____no_output_____
###Markdown
Data structures ListsWe already looked at one python data structure: the list. Lists are _ordered_ collections of values, denoted with square brackets. Lists are _ordered_ in the sense that the order of their elements matter. The list [1,2,3,4] is not the same as [4,3,2,1]
###Code
a_list = [2,0,15,5] # square brackets denote a list
another_list = [15,0,5,2]
print('a_list = {}; another_list = {}'.format(a_list, another_list)) # the .format() function of strings allows you to plug in the variable values in the respective curly braces {}
print('is a_list equal to another_list?')
print(a_list == another_list) # print out the truth value of whether a_list is the same as another_list (it shouldn't be, because they have different ordering)
yet_another_list = [2,0,15,5]
print('but it is equal to yet_another_list:')
print(a_list == yet_another_list)
###Output
_____no_output_____
###Markdown
List elements can be any python object, including strings, numbers, and other lists
###Code
diverse_list = ['a', False, [0,0,0], 1.0, 10]
print('the elements of diverse_list are: {}'.format(diverse_list))
print('the data types of the elements are {}'.format([type(x) for x in diverse_list])) # using list comprehension to get the type of each element
###Output
_____no_output_____
###Markdown
You can access a specific element of a list using the square bracket notation (this is known as indexing)```list_name[idx]```Index values can be negative, which start counting from the end. So `list_name[-1]` gives the __last__ element of the list
###Code
first_element_of_diverse_list = diverse_list[0] # python starts counting at 0, so the first element is at index 0
print(first_element_of_diverse_list)
last_element_of_diverse_list = diverse_list[-1]
print(last_element_of_diverse_list)
###Output
_____no_output_____
###Markdown
You can "slice" a list using the colon `:` notation```list_name[start_idx:end_idx]```Note that the slice starts at the start_idx, but __does not include__ the element at end_idx.If you omit either start_idx or end_idx, it automatically starts at the first element/ends at the last element respectively
###Code
print(diverse_list[0:2]) # gets the elements at index 0 and 1
print(diverse_list[:2]) # equivalent to the above
print(diverse_list[2:]) # gets all elements from index 2 to the end
print(diverse_list[:]) # gets all elements
###Output
_____no_output_____
###Markdown
Lists are modifiable: you can append and delete entries, as well as change the values of elements
###Code
diverse_list.append('new entry') # add a value to the end
print('appended an entry to diverse_list: {}'.format(diverse_list))
diverse_list[0] = 'changed entry' # change the value of entry at index 0
print('changed an entry of diverse_list: {}'.format(diverse_list))
first_entry = diverse_list.pop(0) # remove (and return) the value at element 0
print('removed "{}" from diverse_list: {}'.format(first_entry, diverse_list))
diverse_list.remove('new entry') # you can also remove the first entry with a specific value, in this case, the "new entry"
print('removed "new entry" from diverse_list: {}'.format(diverse_list))
diverse_list.insert(0,'a') # insert the value 'a' at index 0
print('inserted "a" back into diverse_list: {}'.format(diverse_list))
###Output
_____no_output_____
###Markdown
TuplesTuples are unchangeable, ordered sequences of elements, grouped with regular parentheses:```('a','b','c')```
###Code
a_tuple = ('a','b','c')
print('the first element of a_tuple is "{}"'.format(a_tuple[0])) # tuples can be indexed like lists
a_tuple[0] = 10 # however, unlike lists, you cannot change their values once they are set
###Output
_____no_output_____
###Markdown
DictionariesDictionaries are data structures that store _mappings_ from "keys" to respective "values". You can think of them as lookup tables which return a specific value for a given key. For example, an english dictionary (the book) could be stored as a python dictionary, where the "keys" are each of the words in english, and the "values" are the respective definitions.They are defined using the curly braces, or the `dict()` function:```dictionary = {key: value, key2: value2}dictionary = dict([(key, value),(key2, value2)])```Keys can be a variety of data types, including numeric, strings, and tuples. However, they cannot be changeable objects, such as lists, or other dictionaries. Values, on the other hand, can be any data type.Accessing the dictionary values are done using square brackets using the syntax:```dictionary[key] returns the value associated with key```
###Code
pokemon_types = {'bulbasaur':'grass', 'charmander':'fire', 'squirtle':'water'}
pokemon_types
print(pokemon_types['bulbasaur'])
###Output
_____no_output_____
###Markdown
You can add or change an element to a dictionary using the following syntax:```dictionary[key] = value```
###Code
pokemon_types['bulbasaur'] = 'grass/poison' #bulbasaur is actually dual typed, so we'll change its entry
pokemon_types['ivysaur'] = 'grass/poison' #let's add an evolution
pokemon_types
###Output
_____no_output_____
###Markdown
You can get a list of all of the keys to a dictionary using the `.keys()` function, similarly with the `.values()` function.
###Code
print(pokemon_types.keys())
print(pokemon_types.values())
###Output
_____no_output_____
###Markdown
You can use the function `.items()` to get a list of `(key, value)` tuples. This is often useful for looping
###Code
for k, v in pokemon_types.items():
print('the type of {} is {}'.format(k,v))
###Output
_____no_output_____
###Markdown
NumpyNumpy is a package for python which provides various tools to make math and numerical computation much easier. One of the key components is the numpy array, which enables matrices.The content in this section is adapted from the Python Data Science Handbook, which is [freely available online](https://github.com/jakevdp/PythonDataScienceHandbook) numpy arraysnumpy arrays provide the ability to create matrices, which are essentially 2-dimensional lists. (They can also be used to create even higher-dimensional arrays: tensors, etc)Unlike python lists, numpy arrays must all have the same data type (e.g. numeric, string). Arrays can be created from python lists:
###Code
print('a vector can be created from a list {}'.format(np.array([1, 4, 2, 5, 3])))
print('a matrix can be created from a list of lists:\n {}'.format(np.array([[1,1,1],[2,2,2],[3,3,3]]))) #\n is the newline character and makes the following text appear on the next line
###Output
_____no_output_____
###Markdown
There are also a bunch of built-in functions for generating arrays.
###Code
# Create a length-10 integer array filled with zeros
np.zeros(10, dtype=int)
# Create a 3x5 floating-point array filled with ones
np.ones((3, 5), dtype=float)
# Create a 3x5 array filled with 3.14
np.full((3, 5), 3.14)
# Create an array filled with a linear sequence
# Starting at 0, ending at 20, stepping by 2
# (this is similar to the built-in range() function)
np.arange(0, 20, 2)
# Create an array of five values evenly spaced between 0 and 1
np.linspace(0, 1, 5)
# Create a 3x3 array of uniformly distributed
# random values between 0 and 1
np.random.random((3, 3))
# Create a 3x3 array of normally distributed random values
# with mean 0 and standard deviation 1
np.random.normal(0, 1, (3, 3))
# Create a 3x3 array of random integers in the interval [0, 10)
np.random.randint(0, 10, (3, 3))
# Create a 3x3 identity matrix
np.eye(3)
# Create an uninitialized array of three integers
# The values will be whatever happens to already exist at that memory location
np.empty(3)
###Output
_____no_output_____
###Markdown
Array attributes
###Code
x1 = np.random.randint(10, size=6) # One-dimensional array
x2 = np.random.randint(10, size=(3, 4)) # Two-dimensional array
x3 = np.random.randint(10, size=(3, 4, 5)) # Three-dimensional array
print('x1=',x1)
print('x2=',x2)
print('x3=',x3)
print("x3 ndim: ", x3.ndim)
print("x3 shape:", x3.shape)
print("x3 size: ", x3.size)
###Output
_____no_output_____
###Markdown
Array IndexingYou can index arrays much in the same way that you can index python listsOne-dimensional arrays function just like lists
###Code
print('x1 is', x1)
print('first entry is', x1[0]) # first entry of the array
print('second and third entries are', x1[[1,2]])
print('first three entries are', x1[:3]) # slice the array
print('a second set of colons in the slice allow you to set the interval:', x1[::2]) # every other element of x1
print('you can reverse the order using a negative interval:', x1[3::-1]) # count backwards from entry at index 3 to the beginning
###Output
_____no_output_____
###Markdown
Multi-dimensional arrays are indexed using a tuple of indices. The indices for a 2d array are ordered as `(row_idx, col_idx)`
###Code
print(x2)
print('the element in the second row, third column is: ', x2[(1,2)])
###Output
_____no_output_____
###Markdown
You can slice multidimensional arrays as well!If you change the value of an entry in a slice, you change the value in the original object. This is what's known as a "view" of an array. Slices do not return an independent object, but instead can be thought of as just a reference to a subset of elements in the original object.However, if you do not want this behavior, you can avoid it by making a copy using the `.copy()` function.
###Code
print('x2 is originally: \n', x2)
slice_of_x2 = x2[1:,2:] # slices the 2nd row to the end, and 3rd column to the end
copied_slice_of_x2 = x2[1:,2:].copy() # note that slices provide a direct view of the original object, if you want an independent copy, use the .copy()
print('slice_of_x2 is:\n',slice_of_x2)
print('copied_slice_of_x2 is: \n', copied_slice_of_x2)
# let's change the value of slice_of_x2
slice_of_x2[0,0] = 99 # we changed the value of top left element to 99; this corresponds to the element in the 2nd row, 3rd column of x2
print('now x2 is: \n', x2)
print('slice_of_x2 is:\n',slice_of_x2)
print('and copied_slice_of_x2 is:\n',copied_slice_of_x2)
# If you change the value of a copy, it does not affect the original object
copied_slice_of_x2[0,0] = -50
print('copied_slice_of_x2 is: \n', copied_slice_of_x2)
print('x2 is unchanged by this operation:\n', x2)
###Output
_____no_output_____
###Markdown
ReshapingYou can reshape an array using the `.reshape()` function
###Code
print('np.arange(12):',np.arange(12))
reshaped = np.arange(12).reshape(3,4)
print('reshaped into a 3x4 array: \n',reshaped)
###Output
_____no_output_____
###Markdown
You can combine and split arrays in numpy. However, we won't be going too much in depth with that. Check out [this tutorial](https://github.com/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/02.02-The-Basics-Of-NumPy-Arrays.ipynb) for more info in that realm. Boolean arrays and maskingBoolean data represents True/False values, which can also be expressed as 1 or 0 respectively.You can compute boolean operations on arrays in numpy
###Code
even_entries_bool = np.mod(reshaped,2)==0
print(even_entries_bool)
###Output
_____no_output_____
###Markdown
You can then use those arrays to select the entries which match that criteria
###Code
reshaped[even_entries_bool]
###Output
_____no_output_____
###Markdown
ExerciseUsing the `is_prime()` function we previously wrote, write a function which takes a numpy array and returns a boolean array of the prime entries with the same shape as the input array
###Code
def is_prime_array(input_array):
"""
returns a boolean array of the same shape as input_array
with True if the element in the same position of input_array is prime
and False otherwise
example:
x = np.array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
is_prime_array(x)
should return
[[False, False, True, True],
[False, True, False, True],
[False, False, False, True]]
"""
# fill in with your code
return output_array
###Output
_____no_output_____
###Markdown
Random functionsYou'll often need random numbers in programming. For example: taking a random sample of data, simulating a coin flip/dice roll, and generating simulated data.Numpy has a bunch of built-in random functions for this purpose. These functions are accessible in the `np.random` submodule
###Code
np.random.rand(2,3,4) # generates uniform random numbers between 0,1
# arguments of rand(a,b,c,d,...) determine the dimensions of the array
# in this case, we created a 2x3x4 3d array
np.random.rand(10) # we can use this just to get a list of 10 random numbers from [0,1)
np.random.randn(5) # randn is the standard normal distribution (gaussian)
# by default, it has mean=0 and variance=1
# you can scale the gaussian to have different mean and variance
# for example, to have mean=3 and variance=2
def scaled_randn(mean, var, n_samples):
return mean + np.random.randn(n_samples)*var
scaled_randn(3,2,100)
# randint gives random integers. Arguments are (low, high, size)
# randint operates on the interval [low,high) (high is not included)
np.random.randint(1,10,20)
# if you want it to be inclusive, then you should call randint(low,high+1,size)
np.random.randint(1,11,20)
#randint is useful to generate a random sample with replacement of a collection
letters = np.array(['a','b','c','d','e','f'])
rand_idx = np.random.randint(0,len(letters),10) #len(letters) is the length of letters
letters[rand_idx]
# if you don't want sampling with replacement, you can use permutation
np.random.permutation(letters)
# another way to do sampling is with the choice(x[, size, replace, p]) function
print(np.random.choice(letters)) # x is the only required argument, will just return one random entry
print(np.random.choice(letters, 10)) # size lets you specify how many to sample
print(np.random.choice(letters, [2,3])) # can be multi dimensional
print(np.random.choice(letters, [2,3], False)) # whether to sample with replacement (default True)
# by default, choice() uses a uniform random probability (i.e. fair dice)
# sometimes you want to weight certain outcomes to be more likely
# p allows you to do that by specifying the probabilities of each outcome
# let's make 'a' be much more likely than the others
print(np.random.choice(letters, 100, True, [0.75,0.05,0.05,0.05,0.05,0.05]))
###Output
_____no_output_____ |
scraping_bs4_baseball.ipynb | ###Markdown
https://sports.news.naver.com/kbaseball/record/index?category=kbo
###Code
from selenium import webdriver
browser = webdriver.Chrome('./chromedriver.exe')
browser.get('https://sports.news.naver.com/kbaseball/record/index?category=kbo')
html = browser.page_source
from bs4 import BeautifulSoup
soup = BeautifulSoup(html, 'html.parser')
soup
tags = soup.select('tbody#regularTeamRecordList_table > tr')
tags
tags[0].select('th')
tags[0].select('span')[0]
###Output
_____no_output_____
###Markdown
순위 : th팀명 : span[]
###Code
contents = []
for tag in tags :
content = []
rank = tag.select('th')[0].text.strip()
content.append(rank)
for dt in tag.select('span') :
data = dt.text.strip()
content.append(data)
contents.append(content)
contents
import pandas as pd
df = pd.DataFrame(contents, columns=['순위','팀','경기수','승','패','무','게임차','연속','출루율','장타율','최근10경기'])
df
###Output
_____no_output_____ |
tutorials/hydro_thermal/TS.ipynb | ###Markdown
$X_t = e^{\epsilon_t}[\mu_t+\gamma_t \frac{\mu_t}{\mu_{t-1}}(X_{t-1}-\mu_{t-1})]$$=e^{\epsilon_t}\gamma_t \frac{\mu_t}{\mu_{t-1}}X_{t-1}+e^{\epsilon_t}(1-\gamma_t)\mu_{t}$
###Code
def sampler(t):
def inner(random_state):
noise = numpy.exp(random_state.multivariate_normal(mean=[0]*4, cov=sigma[t%12]))
coef = [None]*4
rhs = [None]*4
for i in range(4):
coef[i] = -noise[i]*gamma[t%12][i]*exp_mu[t%12][i]/exp_mu[(t-1)%12][i]
rhs[i] = noise[i]*(1-gamma[t%12][i])*exp_mu[t%12][i]
return (coef+rhs)
return inner
T = 3
HydroThermal = MSLP(T=T, bound=0, discount=0.9906)
for t in range(T):
m = HydroThermal[t]
stored_now, stored_past = m.addStateVars(4, ub=hydro_['UB'][:4], name="stored")
inflow_now, inflow_past = m.addStateVars(4, name="inflow")
spill = m.addVars(4, obj=0.001, name="spill")
hydro = m.addVars(4, ub=hydro_['UB'][-4:], name="hydro")
deficit = m.addVars(
[(i,j) for i in range(4) for j in range(4)],
ub = [
demand.iloc[t%12][i] * deficit_['DEPTH'][j]
for i in range(4) for j in range(4)
],
obj = [
deficit_['OBJ'][j]
for i in range(4) for j in range(4)
],
name = "deficit")
thermal = [None] * 4
for i in range(4):
thermal[i] = m.addVars(
len(thermal_[i]),
ub=thermal_[i]['UB'],
lb=thermal_[i]['LB'],
obj=thermal_[i]['OBJ'],
name="thermal_{}".format(i)
)
exchange = m.addVars(5,5, obj=exchange_cost.values.flatten(),
ub=exchange_ub.values.flatten(), name="exchange")
thermal_sum = m.addVars(4, name="thermal_sum")
m.addConstrs(thermal_sum[i] == gurobipy.quicksum(thermal[i].values()) for i in range(4))
for i in range(4):
m.addConstr(
thermal_sum[i]
+ gurobipy.quicksum(deficit[(i,j)] for j in range(4))
+ hydro[i]
- gurobipy.quicksum(exchange[(i,j)] for j in range(5))
+ gurobipy.quicksum(exchange[(j,i)] for j in range(5))
== demand.iloc[t%12][i]
)
m.addConstr(
gurobipy.quicksum(exchange[(j,4)] for j in range(5))
- gurobipy.quicksum(exchange[(4,j)] for j in range(5))
== 0
)
m.addConstrs(
stored_now[i] + spill[i] + hydro[i] - stored_past[i] == inflow_now[i]
for i in range(4)
)
if t == 0:
m.addConstrs(stored_past[i] == stored_initial[i] for i in range(4))
m.addConstrs(inflow_now[i] == inflow_initial[i] for i in range(4))
else:
TS = m.addConstrs(inflow_now[i] + inflow_past[i] == 0 for i in range(4))
m.add_continuous_uncertainty(
uncertainty=sampler(t-1),
locations=(
[(TS[i],inflow_past[i]) for i in range(4)]
+ [TS[i] for i in range(4)]
),
)
HydroThermal.discretize(n_samples=100, random_state=888)
SDDP(HydroThermal).solve(
logFile=0,
max_iterations=100,
freq_evaluations=20,
n_simulations=-1,
tol=1e-2
)
result = EvaluationTrue(HydroThermal)
result.run(n_simulations=1000, random_state=666)
result.CI
###Output
_____no_output_____ |
notebooks/text2sql.ipynb | ###Markdown
Few-shot learning for tex to sql (Thai)
###Code
!pip install padthai
from padthai import *
data = [
'Q: ดึงข้อมูล DEPARTMENT ที่มีจำนวนน้อยกว่า 5 คนจากตาราง Worker\nA: SELECT DEPARTMENT, COUNT(WOKRED_ID) as "Number of Workers" FROM Worker GROUP BY DEPARTMENT HAVING COUNT(WORKED_ID) < 5;',
'Q: แสดง DEPARTMENT พร้อมจำนวนคนในแต่ละ DEPARTMENT จากตาราง Worker\nA: SELECT DEPARTMENT, COUNT(DEPARTMENT) as "Number of Workers" FROM Worker GROUP BY DEPARTMENT;',
'Q: แสดงรายการล่าสุดจากตาราง Worker\nA: SELECT * FROM Worker ORDER BY LAST_NAME DESC LIMIT 1;',
'Q: ดึงข้อมูลทั้งหมดจากตาราง Worker\nA: SELECT * FROM Worker;',
'Q: ดึงข้อมูลทั้งหมดจากตาราง buy\nA: SELECT * FROM buy;'
] # try add more data
gptneo_model = GPTNeoFewShot('./text2sql-gptneo-model', model_name='gpt-neo', size="125M")
gptneo_model.train(
data,
logging_dir='./log_text2sql_model_gptneo',
num_train_epochs=10,
train_size=0.9,
batch_size=2,
save_every_epochs=False
)
gptneo_model.gen('Q: แสดงรายการล่าสุดจากตาราง DEPARTMENT\nA: ',max_length=100)
###Output
_____no_output_____ |
lesson2-week1/Gradient Checking v1/Gradient+Checking+v1.ipynb | ###Markdown
Gradient CheckingWelcome to the final assignment for this week! In this assignment you will learn to implement and use gradient checking. You are part of a team working to make mobile payments available globally, and are asked to build a deep learning model to detect fraud--whenever someone makes a payment, you want to see if the payment might be fraudulent, such as if the user's account has been taken over by a hacker. But backpropagation is quite challenging to implement, and sometimes has bugs. Because this is a mission-critical application, your company's CEO wants to be really certain that your implementation of backpropagation is correct. Your CEO says, "Give me a proof that your backpropagation is actually working!" To give this reassurance, you are going to use "gradient checking".Let's do it!
###Code
# Packages
import numpy as np
from testCases import *
from gc_utils import sigmoid, relu, dictionary_to_vector,
vector_to_dictionary, gradients_to_vector
###Output
_____no_output_____
###Markdown
1) How does gradient checking work?Backpropagation computes the gradients $\frac{\partial J}{\partial \theta}$, where $\theta$ denotes the parameters of the model. $J$ is computed using forward propagation and your loss function.Because forward propagation is relatively easy to implement, you're confident you got that right, and so you're almost 100% sure that you're computing the cost $J$ correctly. Thus, you can use your code for computing $J$ to verify the code for computing $\frac{\partial J}{\partial \theta}$. Let's look back at the definition of a derivative (or gradient):$$ \frac{\partial J}{\partial \theta} = \lim_{\varepsilon \to 0} \frac{J(\theta + \varepsilon) - J(\theta - \varepsilon)}{2 \varepsilon} \tag{1}$$If you're not familiar with the "$\displaystyle \lim_{\varepsilon \to 0}$" notation, it's just a way of saying "when $\varepsilon$ is really really small."We know the following:- $\frac{\partial J}{\partial \theta}$ is what you want to make sure you're computing correctly. - You can compute $J(\theta + \varepsilon)$ and $J(\theta - \varepsilon)$ (in the case that $\theta$ is a real number), since you're confident your implementation for $J$ is correct. Lets use equation (1) and a small value for $\varepsilon$ to convince your CEO that your code for computing $\frac{\partial J}{\partial \theta}$ is correct! 2) 1-dimensional gradient checkingConsider a 1D linear function $J(\theta) = \theta x$. The model contains only a single real-valued parameter $\theta$, and takes $x$ as input.You will implement code to compute $J(.)$ and its derivative $\frac{\partial J}{\partial \theta}$. You will then use gradient checking to make sure your derivative computation for $J$ is correct. **Figure 1** : **1D linear model** The diagram above shows the key computation steps: First start with $x$, then evaluate the function $J(x)$ ("forward propagation"). Then compute the derivative $\frac{\partial J}{\partial \theta}$ ("backward propagation"). **Exercise**: implement "forward propagation" and "backward propagation" for this simple function. I.e., compute both $J(.)$ ("forward propagation") and its derivative with respect to $\theta$ ("backward propagation"), in two separate functions.
###Code
# GRADED FUNCTION: forward_propagation
def forward_propagation(x, theta):
"""
Implement the linear forward propagation (compute J) presented
in Figure 1 (J(theta) = theta * x)
Arguments:
x -- a real-valued input
theta -- our parameter, a real number as well
Returns:
J -- the value of function J, computed using the formula J(theta) = theta * x
"""
### START CODE HERE ### (approx. 1 line)
J = np.dot(theta, x)
### END CODE HERE ###
return J
x, theta = 2, 4
J = forward_propagation(x, theta)
print ("J = " + str(J))
###Output
J = 8
###Markdown
**Expected Output**: ** J ** 8 **Exercise**: Now, implement the backward propagation step (derivative computation) of Figure 1. That is, compute the derivative of $J(\theta) = \theta x$ with respect to $\theta$. To save you from doing the calculus, you should get $dtheta = \frac { \partial J }{ \partial \theta} = x$.
###Code
# GRADED FUNCTION: backward_propagation
def backward_propagation(x, theta):
"""
Computes the derivative of J with respect to theta (see Figure 1).
Arguments:
x -- a real-valued input
theta -- our parameter, a real number as well
Returns:
dtheta -- the gradient of the cost with respect to theta
"""
### START CODE HERE ### (approx. 1 line)
dtheta = x
### END CODE HERE ###
return dtheta
x, theta = 2, 4
dtheta = backward_propagation(x, theta)
print ("dtheta = " + str(dtheta))
###Output
dtheta = 2
###Markdown
**Expected Output**: ** dtheta ** 2 **Exercise**: To show that the `backward_propagation()` function is correctly computing the gradient $\frac{\partial J}{\partial \theta}$, let's implement gradient checking.**Instructions**:- First compute "gradapprox" using the formula above (1) and a small value of $\varepsilon$. Here are the Steps to follow: 1. $\theta^{+} = \theta + \varepsilon$ 2. $\theta^{-} = \theta - \varepsilon$ 3. $J^{+} = J(\theta^{+})$ 4. $J^{-} = J(\theta^{-})$ 5. $gradapprox = \frac{J^{+} - J^{-}}{2 \varepsilon}$- Then compute the gradient using backward propagation, and store the result in a variable "grad"- Finally, compute the relative difference between "gradapprox" and the "grad" using the following formula:$$ difference = \frac {\mid\mid grad - gradapprox \mid\mid_2}{\mid\mid grad \mid\mid_2 + \mid\mid gradapprox \mid\mid_2} \tag{2}$$You will need 3 Steps to compute this formula: - 1'. compute the numerator using np.linalg.norm(...) - 2'. compute the denominator. You will need to call np.linalg.norm(...) twice. - 3'. divide them.- If this difference is small (say less than $10^{-7}$), you can be quite confident that you have computed your gradient correctly. Otherwise, there may be a mistake in the gradient computation.
###Code
# GRADED FUNCTION: gradient_check
def gradient_check(x, theta, epsilon = 1e-7):
"""
Implement the backward propagation presented in Figure 1.
Arguments:
x -- a real-valued input
theta -- our parameter, a real number as well
epsilon -- tiny shift to the input to compute approximated
radient with formula(1)
Returns:
difference -- difference (2) between the approximated
gradient and the backward propagation gradient
"""
# Compute gradapprox using left side of formula (1).
#epsilon is small enough, you don't need to worry about the limit.
### START CODE HERE ### (approx. 5 lines)
thetaplus = theta + epsilon # Step 1
thetaminus = theta - epsilon # Step 2
J_plus = forward_propagation(x, thetaplus) # Step 3
J_minus = forward_propagation(x, thetaminus) # Step 4
gradapprox = (J_plus - J_minus)/ (2*epsilon) # Step 5
### END CODE HERE ###
# Check if gradapprox is close enough to the output
#of backward_propagation()
### START CODE HERE ### (approx. 1 line)
grad = backward_propagation(x, theta)
### END CODE HERE ###
### START CODE HERE ### (approx. 1 line)
numerator = np.linalg.norm(grad - gradapprox) # Step 1'
denominator = np.linalg.norm(grad) + np.linalg.norm(gradapprox) # Step 2'
difference = np.divide(numerator, denominator) # Step 3'
### END CODE HERE ###
if difference < 1e-7:
print ("The gradient is correct!")
else:
print ("The gradient is wrong!")
return difference
x, theta = 2, 4
difference = gradient_check(x, theta)
print("difference = " + str(difference))
###Output
The gradient is correct!
difference = 2.91933588329e-10
###Markdown
**Expected Output**:The gradient is correct! ** difference ** 2.9193358103083e-10 Congrats, the difference is smaller than the $10^{-7}$ threshold. So you can have high confidence that you've correctly computed the gradient in `backward_propagation()`. Now, in the more general case, your cost function $J$ has more than a single 1D input. When you are training a neural network, $\theta$ actually consists of multiple matrices $W^{[l]}$ and biases $b^{[l]}$! It is important to know how to do a gradient check with higher-dimensional inputs. Let's do it! 3) N-dimensional gradient checking The following figure describes the forward and backward propagation of your fraud detection model. **Figure 2** : **deep neural network***LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID*Let's look at your implementations for forward propagation and backward propagation.
###Code
def forward_propagation_n(X, Y, parameters):
"""
Implements the forward propagation (and computes the cost) presented in Figure 3.
Arguments:
X -- training set for m examples
Y -- labels for m examples
parameters -- python dictionary containing your parameters "W1", "b1",
"W2", "b2", "W3", "b3":
W1 -- weight matrix of shape (5, 4)
b1 -- bias vector of shape (5, 1)
W2 -- weight matrix of shape (3, 5)
b2 -- bias vector of shape (3, 1)
W3 -- weight matrix of shape (1, 3)
b3 -- bias vector of shape (1, 1)
Returns:
cost -- the cost function (logistic cost for one example)
"""
# retrieve parameters
m = X.shape[1]
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
W3 = parameters["W3"]
b3 = parameters["b3"]
# LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID
Z1 = np.dot(W1, X) + b1
A1 = relu(Z1)
Z2 = np.dot(W2, A1) + b2
A2 = relu(Z2)
Z3 = np.dot(W3, A2) + b3
A3 = sigmoid(Z3)
# Cost
logprobs = np.multiply(-np.log(A3),Y) +
np.multiply(-np.log(1 - A3), 1 - Y)
cost = 1./m * np.sum(logprobs)
cache = (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3)
return cost, cache
###Output
_____no_output_____
###Markdown
Now, run backward propagation.
###Code
def backward_propagation_n(X, Y, cache):
"""
Implement the backward propagation presented in figure 2.
Arguments:
X -- input datapoint, of shape (input size, 1)
Y -- true "label"
cache -- cache output from forward_propagation_n()
Returns:
gradients -- A dictionary with the gradients of the cost with respect
to each parameter, activation and pre-activation variables.
"""
m = X.shape[1]
(Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache
dZ3 = A3 - Y
dW3 = 1./m * np.dot(dZ3, A2.T)
db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True)
dA2 = np.dot(W3.T, dZ3)
dZ2 = np.multiply(dA2, np.int64(A2 > 0))
dW2 = 1./m * np.dot(dZ2, A1.T) * 2
db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True)
dA1 = np.dot(W2.T, dZ2)
dZ1 = np.multiply(dA1, np.int64(A1 > 0))
dW1 = 1./m * np.dot(dZ1, X.T)
db1 = 4./m * np.sum(dZ1, axis=1, keepdims = True)
gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3,
"dA2": dA2, "dZ2": dZ2, "dW2": dW2, "db2": db2,
"dA1": dA1, "dZ1": dZ1, "dW1": dW1, "db1": db1}
return gradients
###Output
_____no_output_____
###Markdown
You obtained some results on the fraud detection test set but you are not 100% sure of your model. Nobody's perfect! Let's implement gradient checking to verify if your gradients are correct. **How does gradient checking work?**.As in 1) and 2), you want to compare "gradapprox" to the gradient computed by backpropagation. The formula is still:$$ \frac{\partial J}{\partial \theta} = \lim_{\varepsilon \to 0} \frac{J(\theta + \varepsilon) - J(\theta - \varepsilon)}{2 \varepsilon} \tag{1}$$However, $\theta$ is not a scalar anymore. It is a dictionary called "parameters". We implemented a function "`dictionary_to_vector()`" for you. It converts the "parameters" dictionary into a vector called "values", obtained by reshaping all parameters (W1, b1, W2, b2, W3, b3) into vectors and concatenating them.The inverse function is "`vector_to_dictionary`" which outputs back the "parameters" dictionary. **Figure 2** : **dictionary_to_vector() and vector_to_dictionary()** You will need these functions in gradient_check_n()We have also converted the "gradients" dictionary into a vector "grad" using gradients_to_vector(). You don't need to worry about that.**Exercise**: Implement gradient_check_n().**Instructions**: Here is pseudo-code that will help you implement the gradient check.For each i in num_parameters:- To compute `J_plus[i]`: 1. Set $\theta^{+}$ to `np.copy(parameters_values)` 2. Set $\theta^{+}_i$ to $\theta^{+}_i + \varepsilon$ 3. Calculate $J^{+}_i$ using to `forward_propagation_n(x, y, vector_to_dictionary(`$\theta^{+}$ `))`. - To compute `J_minus[i]`: do the same thing with $\theta^{-}$- Compute $gradapprox[i] = \frac{J^{+}_i - J^{-}_i}{2 \varepsilon}$Thus, you get a vector gradapprox, where gradapprox[i] is an approximation of the gradient with respect to `parameter_values[i]`. You can now compare this gradapprox vector to the gradients vector from backpropagation. Just like for the 1D case (Steps 1', 2', 3'), compute: $$ difference = \frac {\| grad - gradapprox \|_2}{\| grad \|_2 + \| gradapprox \|_2 } \tag{3}$$
###Code
# GRADED FUNCTION: gradient_check_n
def gradient_check_n(parameters, gradients, X, Y, epsilon = 1e-7):
"""
Checks if backward_propagation_n computes correctly the gradient
of the cost output by forward_propagation_n
Arguments:
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3":
grad -- output of backward_propagation_n, contains gradients of
the cost with respect to the parameters.
x -- input datapoint, of shape (input size, 1)
y -- true "label"
epsilon -- tiny shift to the input to compute approximated gradient
with formula(1)
Returns:
difference -- difference (2) between the approximated gradient and
the backward propagation gradient
"""
# Set-up variables
parameters_values, _ = dictionary_to_vector(parameters)
grad = gradients_to_vector(gradients)
num_parameters = parameters_values.shape[0]
J_plus = np.zeros((num_parameters, 1))
J_minus = np.zeros((num_parameters, 1))
gradapprox = np.zeros((num_parameters, 1))
# Compute gradapprox
for i in range(num_parameters):
# Compute J_plus[i]. Inputs: "parameters_values, epsilon". Output = "J_plus[i]".
# "_" is used because the function you have to outputs t
#wo parameters but we only care about the first one
### START CODE HERE ### (approx. 3 lines)
thetaplus = np.copy(parameters_values) # Step 1
thetaplus[i][0] = thetaplus[i][0] + epsilon # Step 2
J_plus[i], _ = forward_propagation_n(X, Y, vector_to_dictionary(thetaplus))# Step 3
### END CODE HERE ###
# Compute J_minus[i]. Inputs: "parameters_values, epsilon". Output = "J_minus[i]".
### START CODE HERE ### (approx. 3 lines)
thetaminus = np.copy(parameters_values) # Step 1
thetaminus[i][0] = thetaminus[i][0] - epsilon # Step 2
J_minus[i], _ = forward_propagation_n(X, Y, vector_to_dictionary(thetaminus)) # Step 3
### END CODE HERE ###
# Compute gradapprox[i]
### START CODE HERE ### (approx. 1 line)
gradapprox[i] = (J_plus[i] - J_minus[i])/ (2*epsilon)
### END CODE HERE ###
# Compare gradapprox to backward propagation gradients by computing difference.
### START CODE HERE ### (approx. 1 line)
numerator = np.linalg.norm(grad - gradapprox) # Step 1'
denominator = np.linalg.norm(grad) + np.linalg.norm(gradapprox) # Step 2'
difference = numerator/denominator # Step 3'
### END CODE HERE ###
if difference > 2e-7:
print ("\033[93m" + "There is a mistake in the backward propagation!
difference = " + str(difference) + "\033[0m")
else:
print ("\033[92m" + "Your backward propagation works perfectly fine!
difference = " + str(difference) + "\033[0m")
return difference
X, Y, parameters = gradient_check_n_test_case()
cost, cache = forward_propagation_n(X, Y, parameters)
gradients = backward_propagation_n(X, Y, cache)
difference = gradient_check_n(parameters, gradients, X, Y)
###Output
[93mThere is a mistake in the backward propagation! difference = 0.285093156781[0m
|
notebooks/Day2_3-Dimensionality-Reduction.ipynb | ###Markdown
Dimensionality ReductionIn machine learning, we are often dealing with very large datasets, not only in terms of the number of rows, but also in the number of columns (*i.e.* features or predictors). This presents a challenge in choosing which variables ought to be included in a particular analysis. Inevitably, some features will be correlated with other features, implying that they are partially redundant in terms of explaining part of the variability in the outcome variable.To deal with this, we can apply one of several dimensionality reduction techniques, which aim to identify latent variables that are associated with both the features and the outcomes, but are complementary with one another in terms of the variability that they explain. Principal Component AnalysisThe first **unsupervised learning** method that we will look at is Principal Component Analysis (PCA).It is a technique to reduce the dimensionality of the data, by creating a linear projection.That is, we find new features to represent the data that are a linear combination of the old data (i.e. we rotate it). Thus, we can think of PCA as a projection of our data onto a *new* feature space.The way PCA finds these new directions is by looking for the directions of maximum variance.Usually only few components that explain most of the variance in the data are kept. Here, the premise is to reduce the size (dimensionality) of a dataset while capturing most of its information. There are many reason why dimensionality reduction can be useful: It can reduce the computational cost when running learning algorithms, decrease the storage space, and may help with the so-called "curse of dimensionality," which we will discuss in greater detail later.Here is an illustraion using the iris dataset we've seen previously.
###Code
from sklearn.datasets import load_iris
iris = load_iris()
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
iris_df = (pd.DataFrame(iris.data, columns=iris.feature_names)
.assign(species=iris.target_names[iris.target]))
iris.feature_names
###Output
_____no_output_____
###Markdown
It's hard to visualize a 4-dimensional dataset simultaneously, but we can plot the data pairwise to get an idea of how the output (species labels) can be discriminated on the basis of each variable relative to another.
###Code
from itertools import combinations
for xy in combinations(iris.feature_names, 2):
x, y = xy
sns.lmplot(x, y,
data=iris_df,
fit_reg=False,
hue="species");
###Output
_____no_output_____
###Markdown
We can see, for example, that the petal variables appear to be redundant with respect to one another. What PCA will do is formulate a set of **orthogonal** varibles, where the number of orthogonal axes is smaller than the number of original variables. It then **projects** the original data onto these axes to obtain transformed variables. The key concept is that each set of axes constructed maximizes the amount of residual variability explained. We can then fit models to the subset of orthogonal variables that accounts for most of the variability.Let's do a PCA by hand first, before using scikit-learn: StandardizationAs we saw in the previous unit, an important first step for many datasets is to **standardize** the original data. Its important for all variables to be on the same scale because the algorithm will be seeking to maximize variance along each axis. If one variable is numerically larger than another variable, it will tend to have larger variance, and will therefore garner undue attention from the algorithm. This dataset is approximately on the same scale, though there are differences, particularly in the fourth variable (petal width):
###Code
iris.data[:5]
###Output
_____no_output_____
###Markdown
Let's apply a standardization transformation from scikit-learn:
###Code
from sklearn.preprocessing import StandardScaler
X_std = StandardScaler().fit_transform(iris.data)
X_std[:5]
###Output
_____no_output_____
###Markdown
EigendecompositionThe PCA algorithm is driven by the eigenvalues and eigenvectors of the original dataset. - The eigenvectors determine the direction of each component- The eigenvalues determine the length (magnitude) of the componentThe eigendecomposition is performed on the covariance matrix of the data, which we can derive here using NumPy.
###Code
Σ = np.cov(X_std.T)
evals, evecs = np.linalg.eig(Σ)
evals
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import proj3d
from matplotlib.patches import FancyArrowPatch
variables = [name[:name.find(' (')]for name in iris.feature_names]
class Arrow3D(FancyArrowPatch):
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))
FancyArrowPatch.draw(self, renderer)
fig = plt.figure(figsize=(7,7))
ax = fig.add_subplot(111, projection='3d')
ax.plot(X_std[:,0], X_std[:,1], X_std[:,2], 'o', markersize=8,
color='green',
alpha=0.2)
mean_x, mean_y, mean_z = X_std.mean(0)[:-1]
ax.plot([mean_x], [mean_y], [mean_z], 'o', markersize=10, color='red', alpha=0.5)
for v in evecs:
a = Arrow3D([mean_x, v[0]], [mean_y, v[1]], [mean_z, v[2]], mutation_scale=20, lw=3, arrowstyle="-|>", color="r")
ax.add_artist(a)
ax.set_xlabel(variables[0])
ax.set_ylabel(variables[1])
ax.set_zlabel(variables[2])
plt.title('Eigenvectors')
###Output
_____no_output_____
###Markdown
Selecting componentsThe eigenvectors are the principle components, which are normalized linear combinations of the original features. They are ordered, in terms of the amount of variation in the dataset that they account for.
###Code
fig, axes = plt.subplots(2, 1)
total = evals.sum()
variance_explained = 100* np.sort(evals)[::-1]/total
axes[0].bar(range(4), variance_explained)
axes[0].set_xticks(range(4));
axes[0].set_xticklabels(['Component ' + str(i+1) for i in range(4)])
axes[1].plot(range(5), np.r_[0, variance_explained.cumsum()])
axes[1].set_xticks(range(5));
###Output
_____no_output_____
###Markdown
Projecting the dataThe next step is to **project** the original data onto the orthogonal axes.Let's extract the first two eigenvectors and use them as the projection matrix for the original (standardized) variables.
###Code
W = evecs[:, :2]
Y = X_std @ W
df_proj = pd.DataFrame(np.hstack((Y, iris.target.astype(int).reshape(-1, 1))),
columns=['Component 1', 'Component 2', 'Species'])
sns.lmplot('Component 1', 'Component 2',
data=df_proj,
fit_reg=False,
hue='Species')
###Output
_____no_output_____
###Markdown
PCA in scikit-learn`scikit-learn` provides a PCA transformation in its `decomposition` module.
###Code
from sklearn.decomposition import PCA
pca = PCA(n_components=3, whiten=True).fit(iris.data)
X_pca = pca.transform(iris.data)
iris_df['First Component'] = X_pca[:, 0]
iris_df['Second Component'] = X_pca[:, 1]
iris_df['Third Component'] = X_pca[:, 2]
sns.lmplot('First Component', 'Second Component',
data=iris_df,
fit_reg=False,
hue="species");
sns.lmplot('Second Component', 'Third Component',
data=iris_df,
fit_reg=False,
hue="species");
###Output
_____no_output_____
###Markdown
ExerciseImport the wine dataset and perform PCA on the predictor variables, and decide how many principal components would you select.
###Code
wine = pd.read_table('../data/wine.dat', sep='\s+')
wine.head()
# Write your answer here
###Output
_____no_output_____ |
Urbanfitters_Scraping/code.ipynb | ###Markdown
---Scarape the data from urbanout fitters websitehttps://www.urbanoutfitters.com/?ref=logo
###Code
from urllib.request import urlopen as ureq
from bs4 import BeautifulSoup
import requests
url = 'https://www.urbanoutfitters.com/home'
page = requests.get(url)
html = BeautifulSoup(page.content,'html.parser')
div1 = html.findAll('div')
print(len(div1))
div1
###Output
_____no_output_____ |
tests_notebook.ipynb | ###Markdown
This file is part of the [test suite](./tests) and will be moved there when [nbval116](https://github.com/computationalmodelling/nbval/issues/116issuecomment-793148404) is fixed.See [DEMO.ipynb](DEMO.ipynb) instead for notebook examples.
###Code
from functools import partial
from time import sleep
from tqdm.notebook import tqdm_notebook
from tqdm.notebook import tnrange
# avoid displaying widgets by default (pollutes output cells)
tqdm = partial(tqdm_notebook, display=False)
trange = partial(tnrange, display=False)
help(tqdm_notebook.display)
# NBVAL_TEST_NAME: basic use
with tqdm_notebook(range(9)) as t:
for i in t:
print(i)
assert t.container.children[1].bar_style == 'success'
t = tqdm_notebook(total=9)
t.update()
t.refresh()
# NBVAL_TEST_NAME: reset
print(t)
t.reset(total=5)
t.update(1)
print(t)
# NBVAL_TEST_NAME: bar_style
assert t.container.children[1].bar_style != 'danger'
t.close()
assert t.container.children[1].bar_style == 'danger'
# NBVAL_TEST_NAME: repr
with trange(1, 9) as t:
print(t)
print(t.container)
it = iter(t)
print(next(it))
print(t)
print(t.container)
t = trange(9)
# NBVAL_TEST_NAME: display pre
print(t)
print(t.container)
for i in t:
pass
# NBVAL_TEST_NAME: display post
print(t)
print(t.container)
# NBVAL_TEST_NAME: no total
with tqdm(desc="no total") as t:
print(t)
t.update()
print(t)
# NBVAL_TEST_NAME: ncols
with trange(9, ncols=66) as t:
print(t)
for i in t:
if i == 1:
break
print(t)
# NBVAL_TEST_NAME: leave
assert (False, None) != (getattr(t.container, "visible", False), getattr(t.container, "_ipython_display_", None))
for total in (1, 9):
with tqdm(total=total, leave=False) as t:
print(t)
t.update()
print(t)
assert total != 1 or (False, None) == (
getattr(t.container, "visible", False), getattr(t.container, "_ipython_display_", None)
)
# NBVAL_TEST_NAME: no total
with tqdm() as t:
print(t)
t.update()
print(t)
# NBVAL_TEST_NAME: reset and disable
for disable in (None, True):
print("disable:", disable)
with tqdm(total=1, disable=disable) as t:
print(t)
t.update()
print(t)
t.reset(total=9)
print(t)
t.update()
print(t)
with tqdm(disable=disable) as t:
print(t)
t.update()
print(t)
t.reset(total=1)
print(t)
t.update()
print(t)
# NBVAL_TEST_NAME: bar_format
with tqdm(total=1, bar_format='{l_bar}{r_bar}') as t:
print(t)
t.update()
print(t)
with tqdm(total=1, bar_format='{l_bar}{bar}') as t:
print(t)
t.update()
print(t)
# NBVAL_TEST_NAME: colour
assert t.colour != 'yellow'
with tqdm(total=1, colour='yellow') as t:
print(t)
t.update()
print(t)
assert t.colour == 'yellow'
# NBVAL_TEST_NAME: delay no trigger
with tqdm_notebook(total=1, delay=10) as t:
t.update()
# NBVAL_TEST_NAME: delay trigger
with tqdm_notebook(total=1, delay=0.1) as t:
sleep(0.1)
t.update()
###Output
_____no_output_____ |
Mucinous/Data_Processing.ipynb | ###Markdown
Imports
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
#Allows dataset from drive to be utilized
from google.colab import drive
drive.mount("/content/drive", force_remount=True)
#Dataset from location in drive
ds=pd.read_csv(LOCATION_OF_ORIGINAL_DATASET)
###Output
_____no_output_____
###Markdown
Initial Dataset VisualizedThese are various ways to analyze the amount of null values in each column of the dataset
###Code
ds.shape
print(ds)
#ds.head()
ds.loc[:, ds.isnull().any()].head()
pd.set_option('display.max_rows', 149)
#print(ds)
ds.isnull().sum()
#Heatmap of dataset showing the null values
sns.heatmap(ds.isnull(),yticklabels=False,cbar=False,cmap='viridis')
###Output
_____no_output_____
###Markdown
Data Manipulation Columns to drop
###Code
drop_columns = [
'LUNG_RADS_DIAMETER_MM',
'cancer_type',
'eus_performed',
'fna_performed',
'cyto_results',
'operation_performed',
'path_cyst_id',
'path_mucin',
'path_cea',
'other_cysts',
'eus_dx',
'eus_consistency',
'multiples',
'ENTROPY_VOXELS',
'KURTOSIS_VOXELS',
'MEAN_DEVIATION_VOXELS',
'SKEWNESS_VOXELS',
'STD_DEV_VOXELS',
'VARIANCE_VOXELS',
#'ENERGY_VOXELS',
#'MAX_VOXELS',
#'MEAN_VOXELS',
#'MEDIAN_VOXELS',
#'MIN_VOXELS',
#'ROOT_MEAN_SQUARE_VOXELS',
#'SOLID_VOLUME_VOXELS',
#'UNIFORMITY_VOXELS',
#'VOLUME_VOXELS',
'path_duct',
'path_cyst_cat',
'path_dysplastic_margin',
'path_malignancy',
'path_grade_dysplasia',
'cystid',
'character_comment',
'othercyst_comments',
'path_cyst_count',
'path_dx',
'path_num_cysts',
'study_idnumber',
'study_id',
'path_available',
'serum_ca19',
'serum_cea',
'path_size',
'mucin_cyst',
'path_size',
'comments_clinical',
'path_pancreatitis',
'operation_performed_other',
'eus_fluid_other',
'eus_color',
'eus_other',
'amylase_cyst',
'cea_cyst',
'ca19_cyst',
'cyst_location_other',
'max_duct_dil',
'comments_rads',
'cancer_type',
'pancreatitis_dx_year',
'first_scan_reason',
'rad_dx_first_scan',
'rad_dx_last_scan',
'cea_cat',
'ANTPOST_LENGTH_END_MM_X',
'ANTPOST_LENGTH_END_MM_Y',
'ANTPOST_LENGTH_END_MM_Z',
'ANTPOST_LENGTH_START_MM_X',
'ANTPOST_LENGTH_START_MM_Y',
'ANTPOST_LENGTH_START_MM_Z',
'AUTO_CORONAL_LONG_AXIS_END_MM_X',
'AUTO_CORONAL_LONG_AXIS_END_MM_Y',
'AUTO_CORONAL_LONG_AXIS_END_MM_Z',
'AUTO_CORONAL_LONG_AXIS_END_VOXEL',
'V',
'W',
'AUTO_CORONAL_LONG_AXIS_START_MM_',
'Z',
'AA',
'AUTO_CORONAL_LONG_AXIS_START_VOX',
'AC',
'AD',
'AUTO_CORONAL_SHORT_AXIS_END_MM_X',
'AUTO_CORONAL_SHORT_AXIS_END_MM_Y',
'AUTO_CORONAL_SHORT_AXIS_END_MM_Z',
'AUTO_CORONAL_SHORT_AXIS_END_VOXE',
'AI',
'AJ',
'AUTO_CORONAL_SHORT_AXIS_START_MM',
'AM',
'AN',
'AUTO_CORONAL_SHORT_AXIS_START_VO',
'AP',
'AQ',
'AUTO_LARGEST_PLANAR_DIAMETER_END',
'AS',
'AT',
'AU',
'AV',
'AW',
'AUTO_LARGEST_PLANAR_DIAMETER_STA',
'AZ',
'BA',
'BB',
'BC',
'BD',
'AUTO_LARGEST_PLANAR_ORTHO_DIAMET',
'BF',
'BG',
'BH',
'BI',
'BJ',
'BK',
'BL',
'BM',
'BN',
'BO',
'BP',
'BQ',
'AUTO_SAGITTAL_LONG_AXIS_END_MM_X',
'AUTO_SAGITTAL_LONG_AXIS_END_MM_Y',
'AUTO_SAGITTAL_LONG_AXIS_END_MM_Z',
'AUTO_SAGITTAL_LONG_AXIS_END_VOXE',
'BV',
'BW',
'AUTO_SAGITTAL_LONG_AXIS_START_MM',
'BZ',
'CA',
'AUTO_SAGITTAL_LONG_AXIS_START_VO',
'CC',
'CD',
'AUTO_SAGITTAL_SHORT_AXIS_END_MM_',
'CF',
'CG',
'AUTO_SAGITTAL_SHORT_AXIS_END_VOX',
'CI',
'CJ',
'AUTO_SAGITTAL_SHORT_AXIS_START_M',
'CM',
'CN',
'AUTO_SAGITTAL_SHORT_AXIS_START_V',
'CP',
'CQ',
'CENTROID_X_MM',
'CENTROID_Y_MM',
'CENTROID_Z_MM',
'CONFIRMATION_STATUS',
'CORONAL_LONG_AXIS_END_MM_X',
'CORONAL_LONG_AXIS_END_MM_Y',
'CORONAL_LONG_AXIS_END_MM_Z',
'CORONAL_LONG_AXIS_END_VOXELS_X',
'CORONAL_LONG_AXIS_END_VOXELS_Y',
'CORONAL_LONG_AXIS_END_VOXELS_Z',
'CORONAL_LONG_AXIS_START_MM_X',
'CORONAL_LONG_AXIS_START_MM_Y',
'CORONAL_LONG_AXIS_START_MM_Z',
'CORONAL_LONG_AXIS_START_VOXELS_X',
'CORONAL_LONG_AXIS_START_VOXELS_Y',
'CORONAL_LONG_AXIS_START_VOXELS_Z',
'CORONAL_SHORT_AXIS_END_MM_X',
'CORONAL_SHORT_AXIS_END_MM_Y',
'CORONAL_SHORT_AXIS_END_MM_Z',
'CORONAL_SHORT_AXIS_END_VOXELS_X',
'CORONAL_SHORT_AXIS_END_VOXELS_Y',
'CORONAL_SHORT_AXIS_END_VOXELS_Z',
'CORONAL_SHORT_AXIS_START_MM_X',
'CORONAL_SHORT_AXIS_START_MM_Y',
'CORONAL_SHORT_AXIS_START_MM_Z',
'CORONAL_SHORT_AXIS_START_VOXELS_',
'DY',
'DZ',
'CRANIALCAUDAL_LENGTH_END_MM_X',
'CRANIALCAUDAL_LENGTH_END_MM_Y',
'CRANIALCAUDAL_LENGTH_END_MM_Z',
'CRANIALCAUDAL_LENGTH_START_MM_X',
'CRANIALCAUDAL_LENGTH_START_MM_Y',
'CRANIALCAUDAL_LENGTH_START_MM_Z',
'FOOTPRINT_END_MM_X',
'FOOTPRINT_END_MM_Y',
'FOOTPRINT_END_MM_Z',
'FOOTPRINT_END_VOXELS_X',
'FOOTPRINT_END_VOXELS_Y',
'FOOTPRINT_END_VOXELS_Z',
'FOOTPRINT_START_MM_X',
'FOOTPRINT_START_MM_Y',
'FOOTPRINT_START_MM_Z',
'FOOTPRINT_START_VOXELS_X',
'FOOTPRINT_START_VOXELS_Y',
'FOOTPRINT_START_VOXELS_Z',
'FOOTPRINT_X_MM',
'FOOTPRINT_Y_MM',
'FOOTPRINT_Z_MM',
'INIT_DRAG_LONG_END_PATIENT_X',
'INIT_DRAG_LONG_END_PATIENT_Y',
'INIT_DRAG_LONG_END_PATIENT_Z',
'INIT_DRAG_LONG_START_PATIENT_X',
'INIT_DRAG_LONG_START_PATIENT_Y',
'INIT_DRAG_LONG_START_PATIENT_Z',
'L1_AXIS_END_X_MM',
'L1_AXIS_END_Y_MM',
'L1_AXIS_END_Z_MM',
'L1_AXIS_START_X_MM',
'L1_AXIS_START_Y_MM',
'L1_AXIS_START_Z_MM',
'L1_UNIT_AXIS_X_MM',
'L1_UNIT_AXIS_Y_MM',
'L1_UNIT_AXIS_Z_MM',
'L2_AXIS_END_X_MM',
'L2_AXIS_END_Y_MM',
'L2_AXIS_END_Z_MM',
'L2_AXIS_START_X_MM',
'L2_AXIS_START_Y_MM',
'L2_AXIS_START_Z_MM',
'L2_UNIT_AXIS_X_MM',
'L2_UNIT_AXIS_Y_MM',
'L2_UNIT_AXIS_Z_MM',
'L3_AXIS_END_X_MM',
'L3_AXIS_END_Y_MM',
'L3_AXIS_END_Z_MM',
'L3_AXIS_START_X_MM',
'L3_AXIS_START_Y_MM',
'L3_AXIS_START_Z_MM',
'L3_UNIT_AXIS_X_MM',
'L3_UNIT_AXIS_Y_MM',
'L3_UNIT_AXIS_Z_MM',
'LARGEST_PLANAR_DIAMETER_END_MM_X',
'LARGEST_PLANAR_DIAMETER_END_MM_Y',
'LARGEST_PLANAR_DIAMETER_END_MM_Z',
'LARGEST_PLANAR_DIAMETER_END_VOXE',
'HD',
'HE',
'LARGEST_PLANAR_DIAMETER_START_MM',
'HH',
'HI',
'LARGEST_PLANAR_DIAMETER_START_VO',
'HK',
'HL',
'LARGEST_PLANAR_ORTHO_DIAMETER_EN',
'HN',
'HO',
'HP',
'HQ',
'HR',
'LARGEST_PLANAR_ORTHO_DIAMETER_ST',
'HU',
'HV',
'HW',
'HX',
'HY',
'LESION_TYPE',
'LUNG_RADS',
'LUNG_RADS_ISOLATION',
'PERCENT_AIR',
'PERCENT_GGO',
'PERCENT_SOLID',
'PERCENT_SOLID_INCL_AIR',
'SAGITTAL_LONG_AXIS_END_MM_X',
'SAGITTAL_LONG_AXIS_END_MM_Y',
'SAGITTAL_LONG_AXIS_END_MM_Z',
'SAGITTAL_LONG_AXIS_END_VOXELS_X',
'SAGITTAL_LONG_AXIS_END_VOXELS_Y',
'SAGITTAL_LONG_AXIS_END_VOXELS_Z',
'SAGITTAL_LONG_AXIS_START_MM_X',
'SAGITTAL_LONG_AXIS_START_MM_Y',
'SAGITTAL_LONG_AXIS_START_MM_Z',
'SAGITTAL_LONG_AXIS_START_VOXELS_',
'JG',
'JH',
'SAGITTAL_SHORT_AXIS_END_MM_X',
'SAGITTAL_SHORT_AXIS_END_MM_Y',
'SAGITTAL_SHORT_AXIS_END_MM_Z',
'SAGITTAL_SHORT_AXIS_END_VOXELS_X',
'SAGITTAL_SHORT_AXIS_END_VOXELS_Y',
'SAGITTAL_SHORT_AXIS_END_VOXELS_Z',
'SAGITTAL_SHORT_AXIS_START_MM_X',
'SAGITTAL_SHORT_AXIS_START_MM_Y',
'SAGITTAL_SHORT_AXIS_START_MM_Z',
'SAGITTAL_SHORT_AXIS_START_VOXELS',
'JT',
'JU',
'SLICE_INDEX',
'TRANSVERSE_LENGTH_END_MM_X',
'TRANSVERSE_LENGTH_END_MM_Y',
'TRANSVERSE_LENGTH_END_MM_Z',
'TRANSVERSE_LENGTH_START_MM_X',
'TRANSVERSE_LENGTH_START_MM_Y',
'TRANSVERSE_LENGTH_START_MM_Z',
'VOLUMETRIC_LENGTH_END_MM_X',
'VOLUMETRIC_LENGTH_END_MM_Y',
'VOLUMETRIC_LENGTH_END_MM_Z',
'VOLUMETRIC_LENGTH_END_VOXELS_X',
'VOLUMETRIC_LENGTH_END_VOXELS_Y',
'VOLUMETRIC_LENGTH_END_VOXELS_Z',
'VOLUMETRIC_LENGTH_START_MM_X',
'VOLUMETRIC_LENGTH_START_MM_Y',
'VOLUMETRIC_LENGTH_START_MM_Z',
'VOLUMETRIC_LENGTH_START_VOXELS_X',
'VOLUMETRIC_LENGTH_START_VOXELS_Y',
'VOLUMETRIC_LENGTH_START_VOXELS_Z',
'AVG_DENSITY',
'MASS_GRAMS',
'AVG_DENSITY_OF_GGO_REGION',
'AVG_DENSITY_OF_SOLID_REGION',
'EA',
'EB',
'INIT_DRAG_AXIAL_LA_END_MM_X',
'INIT_DRAG_AXIAL_LA_END_MM_Y',
'INIT_DRAG_AXIAL_LA_END_MM_Z',
'INIT_DRAG_AXIAL_LA_START_MM_X',
'INIT_DRAG_AXIAL_LA_START_MM_Y',
'INIT_DRAG_AXIAL_LA_START_MM_Z',
'HM',
'HS',
'HT',
'HZ',
'IC',
'ID',
'IE',
'IF',
'IG',
'MESH_STRUCTURE_MODIFIED',
'JP',
'JQ',
'KC',
'KD',
'SEG_BOUNDING_BOX_END_MM_X',
'SEG_BOUNDING_BOX_END_MM_Y',
'SEG_BOUNDING_BOX_END_MM_Z',
'SEG_BOUNDING_BOX_START_MM_X',
'SEG_BOUNDING_BOX_START_MM_Y',
'SEG_BOUNDING_BOX_START_MM_Z',
'MRNFROMHEALTHMYNE',
'ABS_CHANGE_BL_LA',
'ABS_CHANGE_BL_SA',
'ABS_CHANGE_BL_SLDV',
'ABS_CHANGE_BL_VOL',
'ABS_CHANGE_PR_LA',
'ABS_CHANGE_PR_SA',
'ABS_CHANGE_PR_SLDV',
'ABS_CHANGE_PR_VOL',
'AE',
'AF',
'AL',
'AR',
'AY',
'BE',
'BR',
'BS',
'BT',
'BU',
'BX',
'BY',
'CE',
'CL',
'CO',
'CR',
'CS',
'CV',
'CW',
'CY',
'CZ',
'EK',
'EL',
'DOUBLING_TIME_LA',
'DOUBLING_TIME_LA_DAYS',
'DOUBLING_TIME_SA',
'DOUBLING_TIME_SA_DAYS',
'DOUBLING_TIME_SLDV',
'DOUBLING_TIME_SLDV_DAYS',
'DOUBLING_TIME_VOL',
'DOUBLING_TIME_VOL_DAYS',
'INIT_DRAG_AXIAL_SA_END_MM_X',
'INIT_DRAG_AXIAL_SA_END_MM_Y',
'INIT_DRAG_AXIAL_SA_END_MM_Z',
'INIT_DRAG_AXIAL_SA_START_MM_X',
'INIT_DRAG_AXIAL_SA_START_MM_Y',
'INIT_DRAG_AXIAL_SA_START_MM_Z',
'IJ',
'IK',
'IN',
'IO',
'IQ',
'IR',
'IT',
'IU',
'IV',
'IW',
'IX',
'JA',
'JB',
'JC',
'JD',
'JE',
'PERCENT_CHANGE_BL_LA',
'PERCENT_CHANGE_BL_SA',
'PERCENT_CHANGE_BL_SLDV',
'PERCENT_CHANGE_BL_VOL',
'PERCENT_CHANGE_PR_LA',
'PERCENT_CHANGE_PR_SA',
'PERCENT_CHANGE_PR_SLDV',
'PERCENT_CHANGE_PR_VOL',
'RATE_OF_GROWTH_LA',
'RATE_OF_GROWTH_SA',
'RATE_OF_GROWTH_SLDV',
'RATE_OF_GROWTH_VOL',
'LA',
'LB',
'LN',
'LO',
]
ds.drop(drop_columns,axis=1,inplace=True)
###Output
_____no_output_____
###Markdown
Patients to drop
###Code
#Rows (Patients) to Drop
ds = ds[ds['mucinous'].notna()] # One patient missing mucinous value
###Output
_____no_output_____
###Markdown
Fill column data
###Code
#Data to Fill
#ds['']=ds[''].fillna(ds[''].mode()[0])
ds['height']=ds['height'].fillna(ds['height'].mode()[0])
###Output
_____no_output_____
###Markdown
Manipulated Data Visualized
###Code
#Shape after Dropping
ds.shape
#Heatmap of dataset showing the null values
sns.heatmap(ds.isnull(),yticklabels=False,cbar=False,cmap='viridis')
pd.set_option('display.max_rows', 5)
pd.set_option('display.max_columns', 5)
ds.isnull().sum()
#print(ds)
###Output
_____no_output_____
###Markdown
Export Modified dataset
###Code
#Export current modified data set
ds.to_csv('original_processed.csv',index=False)
!cp original_processed.csv {DATASET_SAVE_LOCATION}
#Mucinous Dataset
dataframe =pd.read_csv(DATASET_SAVE_LOCATION)
# Drop hgd_malignant row
dataframe.drop('hgd_malignancy',axis=1,inplace=True)
#Export to .csv and save in drive
dataframe.to_csv('mucinous_processed.csv',index=False)
!cp mucinous_processed.csv {DATASET_SAVE_LOCATION}
#hgd dataset
dataframe =pd.read_csv(DATASET_SAVE_LOCATION)
# Find and Delete nonmucinous rows
nonMucinous = dataframe[ dataframe['mucinous'] == 0 ].index
dataframe.drop(nonMucinous, inplace=True)
dataframe.drop('mucinous',axis=1,inplace=True)
#Change missing values in 'hgd_malignancy' to 0
dataframe['hgd_malignancy']=dataframe['hgd_malignancy'].fillna(value=0)
#Export to .csv and save in drive
dataframe.to_csv('hgd_processed.csv',index=False)
!cp hgd_processed.csv {DATASET_SAVE_LOCATION}
###Output
_____no_output_____
###Markdown
Texture only feature set
###Code
#Mucinous Dataset
dataframe =pd.read_csv(DATASET_SAVE_LOCATION)
# Drop hgd_malignant row
dataframe.drop('hgd_malignancy',axis=1,inplace=True)
#Export to .csv and save in drive
dataframe.to_csv('texture_feature_set_mucinous_processed.csv',index=False)
!cp texture_feature_set_mucinous_processed.csv {DATASET_SAVE_LOCATION}
#hgd dataset
dataframe =pd.read_csv(DATASET_SAVE_LOCATION)
# Find and Delete nonmucinous rows
nonMucinous = dataframe[ dataframe['mucinous'] == 0 ].index
dataframe.drop(nonMucinous, inplace=True)
dataframe.drop('mucinous',axis=1,inplace=True)
#Change missing values in 'hgd_malignancy' to 0
dataframe['hgd_malignancy']=dataframe['hgd_malignancy'].fillna(value=0)
#Export to .csv and save in drive
dataframe.to_csv('texture_feature_set_hgd_processed.csv',index=False)
!cp texture_feature_set_hgd_processed.csv {DATASET_SAVE_LOCATION}
###Output
_____no_output_____
###Markdown
Clinical Only Features
###Code
#Mucinous Dataset
dataframe = pd.read_csv(DATASET_SAVE_LOCATION)
# Drop hgd_malignant row
dataframe.drop('hgd_malignancy',axis=1,inplace=True)
#Export to .csv and save in drive
dataframe.to_csv('clinical_data_mucinous_processed.csv',index=False)
!cp clinical_data_mucinous_processed.csv {DATASET_SAVE_LOCATION}
#hgd dataset
dataframe =pd.read_csv(DATASET_SAVE_LOCATION)
# Find and Delete nonmucinous rows
nonMucinous = dataframe[ dataframe['mucinous'] == 0 ].index
dataframe.drop(nonMucinous, inplace=True)
dataframe.drop('mucinous',axis=1,inplace=True)
#Change missing values in 'hgd_malignancy' to 0
dataframe['hgd_malignancy']=dataframe['hgd_malignancy'].fillna(value=0)
#Export to .csv and save in drive
dataframe.to_csv('clinical_data_hgd_processed.csv',index=False)
!cp clinical_data_hgd_processed.csv {DATASET_SAVE_LOCATION}
###Output
_____no_output_____ |
Stats_notebook_GoogleColab_Ans.ipynb | ###Markdown
STATISTICS WORKSHOP __Version: March 2022__ __USING THE NOTEBOOK__ The present notebook is composed of text and code cells. The former include the instructions for the activity and look just like regular text in a webpage. Cells that have "Answer:" at the beginning of them are also text cells. To write your answer just double click on them so the cursor appears and you can type your answer. When you are done click "shift" + "enter". The code cells look like gray squares with empty square brackets to their left ([ ]). To run the code inside a code cell you'll need to hover on the top left corner of the box, and when the empty square brackets change to a "play" sign just click on it (alternatively: click on the code cell and then click "shift" + "enter"), this will make the outcome of the code to appear underneath the cell. The following code cell will upload all the libraries and functions we'll need for the workshop. Please run it.
###Code
# importing functions and libraries from a python file in a GitHub repo
import os
if not os.path.exists('stats-notebooks'):
!git clone https://github.com/gapatino/stats-notebooks.git
%run stats-notebooks/statfuncs.py
# set formatting
import warnings
warnings.filterwarnings('ignore')
%matplotlib inline
sns.set()
pd.options.display.float_format = '{:.3f}'.format
np.set_printoptions(precision=3, suppress=True)
###Output
Cloning into 'stats-notebooks'...
remote: Enumerating objects: 51, done.[K
remote: Total 51 (delta 0), reused 0 (delta 0), pack-reused 51[K
Unpacking objects: 100% (51/51), done.
###Markdown
__LOADING THE DATABASE__ In this exercise we will use a database of patients evaluated for obstructive sleep apnea syndrome (OSAS). Each patient filled out a survey where epidemiological characteristics and symptoms were recorded. The database will contain some of those characteristics along with whether they had OSAS or not, and its severity, based on a measure of how frequently the patient stops breathing through the night called the Apnea-Hypopnea Index (ahi). We will upload the data we'll work with into memory from a CSV file in the website GitHub and put it in a variable called "data". Please execute the following code cells.
###Code
data = pd.read_csv("https://raw.githubusercontent.com/gapatino/stats-notebooks/master/stats_workshop_database.csv")
###Output
_____no_output_____
###Markdown
Then define some of the columns in the database as categorical variables
###Code
data['gender']=data['gender'].astype('category')
data['osas_severity']=data['osas_severity'].astype('category')
###Output
_____no_output_____
###Markdown
Let's look at the data by displaying the first 10 rows of it
###Code
data.head(10)
###Output
_____no_output_____
###Markdown
__APPLICATION EXERCISE__ Below you will find questions about analyzing this data. After each question you will find a code cell and a text cell. Please enter the code for the appropriate statistical test in the code cell below it and run it, based on the output of the test answer the question in the text cell. If you need additional code cells you can add them by clicking on the button with the plus sign at the top of the page. __Question 1__ What is the type of each variable (column) in the dataset table? Hint: You don't need to run any functions to answer this ANSWER: __Question 2__ What is the mean and standard deviation of the age of male subjects?
###Code
parammct(data=data, independent='gender', dependent='age')
###Output
_____no_output_____
###Markdown
ANSWER: __Question 3__ Does the BMI values have a normal distribution across OSAS patients and controls?
###Code
histograms(data=data, independent='osas', dependent='bmi')
###Output
_____no_output_____
###Markdown
ANSWER: __Question 4__ What is the median and interquartile range of BMI among smokers?
###Code
non_parammct(data=data, independent='smoking', dependent='bmi')
###Output
_____no_output_____
###Markdown
ANSWER: __Question 5__ What is the range of AHI among subjects that snore?
###Code
non_parammct(data=data, independent='snoring', dependent='ahi')
###Output
_____no_output_____
###Markdown
ANSWER: __Question 6__ How many levels of OSAS severity are there and how many subjects are in each of them?
###Code
non_parammct(data=data, independent='osas_severity', dependent='bmi')
###Output
_____no_output_____
###Markdown
ANSWER: __Question 7__ Is there a difference in the mean age of subjects with and without OSAS?
###Code
t_test(data=data, independent='osas', dependent='age')
###Output
_____no_output_____
###Markdown
ANSWER: __Question 8__ Is there a difference in the mean BMI of subjects across the severity levels of OSAS?
###Code
anova(data=data, independent='osas_severity', dependent='bmi')
tukey(data=data, independent='osas_severity', dependent='bmi')
###Output
_____no_output_____
###Markdown
ANSWER: __Question 9__ Is there a difference in the number of subjects with apnea between those with and without OSAS?
###Code
chi_square(data=data, variable1='osas', variable2='apnea')
###Output
_____no_output_____
###Markdown
ANSWER: __Question 10__ Can the age predict if a subject will have OSAS?
###Code
logistic_reg(data=data, independent='age', dependent='osas')
###Output
Optimization terminated successfully.
Current function value: 0.268658
Iterations 7
|
_notebooks/2022-03-04-numpy.ipynb | ###Markdown
" Numpy! "> "Awesome summary"- toc:true- branch: master- badges: true- comments: true- author: Jaeeon- categories: [fastpages, jupyter] **도구 - 넘파이(NumPy)***넘파이(NumPy)는 파이썬의 과학 컴퓨팅을 위한 기본 라이브러리입니다. 넘파이의 핵심은 강력한 N-차원 배열 객체입니다. 또한 선형 대수, 푸리에(Fourier) 변환, 유사 난수 생성과 같은 유용한 함수들도 제공합니다." 구글 코랩에서 실행하기 배열 생성 `numpy`를 임포트해 보죠. 대부분의 사람들이 `np`로 알리아싱하여 임포트합니다:
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
`np.zeros` `zeros` 함수는 0으로 채워진 배열을 만듭니다:
###Code
np.zeros(5)
###Output
_____no_output_____
###Markdown
2D 배열(즉, 행렬)을 만들려면 원하는 행과 열의 크기를 튜플로 전달합니다. 예를 들어 다음은 $3 \times 4$ 크기의 행렬입니다:
###Code
np.zeros((3,4))
###Output
_____no_output_____
###Markdown
용어* 넘파이에서 각 차원을 **축**(axis) 이라고 합니다* 축의 개수를 **랭크**(rank) 라고 합니다. * 예를 들어, 위의 $3 \times 4$ 행렬은 랭크 2인 배열입니다(즉 2차원입니다). * 첫 번째 축의 길이는 3이고 두 번째 축의 길이는 4입니다.* 배열의 축 길이를 배열의 **크기**(shape)라고 합니다. * 예를 들어, 위 행렬의 크기는 `(3, 4)`입니다. * 랭크는 크기의 길이와 같습니다.* 배열의 **사이즈**(size)는 전체 원소의 개수입니다. 축의 길이를 모두 곱해서 구할 수 있습니다(가령, $3 \times 4=12$).
###Code
a = np.zeros((3,4))
a
a.shape
a.ndim # len(a.shape)와 같습니다
a.size
###Output
_____no_output_____
###Markdown
N-차원 배열임의의 랭크 수를 가진 N-차원 배열을 만들 수 있습니다. 예를 들어, 다음은 크기가 `(2,3,4)`인 3D 배열(랭크=3)입니다:
###Code
np.zeros((2,2,5))
###Output
_____no_output_____
###Markdown
배열 타입넘파이 배열의 타입은 `ndarray`입니다:
###Code
type(np.zeros((3,4)))
###Output
_____no_output_____
###Markdown
`np.ones``ndarray`를 만들 수 있는 넘파이 함수가 많습니다.다음은 1로 채워진 $3 \times 4$ 크기의 행렬입니다:
###Code
np.ones((3,4))
###Output
_____no_output_____
###Markdown
`np.full`주어진 값으로 지정된 크기의 배열을 초기화합니다. 다음은 `π`로 채워진 $3 \times 4$ 크기의 행렬입니다.
###Code
np.full((3,4), np.pi)
###Output
_____no_output_____
###Markdown
`np.empty`초기화되지 않은 $2 \times 3$ 크기의 배열을 만듭니다(배열의 내용은 예측이 불가능하며 메모리 상황에 따라 달라집니다):
###Code
np.empty((2,3))
###Output
_____no_output_____
###Markdown
np.array`array` 함수는 파이썬 리스트를 사용하여 `ndarray`를 초기화합니다:
###Code
np.array([[1,2,3,4], [10, 20, 30, 40]])
###Output
_____no_output_____
###Markdown
`np.arange`파이썬의 기본 `range` 함수와 비슷한 넘파이 `arange` 함수를 사용하여 `ndarray`를 만들 수 있습니다:
###Code
np.arange(1, 5)
###Output
_____no_output_____
###Markdown
부동 소수도 가능합니다:
###Code
np.arange(1.0, 5.0)
###Output
_____no_output_____
###Markdown
파이썬의 기본 `range` 함수처럼 건너 뛰는 정도를 지정할 수 있습니다:
###Code
np.arange(1, 5, 0.5)
###Output
_____no_output_____
###Markdown
부동 소수를 사용하면 원소의 개수가 일정하지 않을 수 있습니다. 예를 들면 다음과 같습니다:
###Code
print(np.arange(0, 5/3, 1/3)) # 부동 소수 오차 때문에, 최댓값은 4/3 또는 5/3이 됩니다.
print(np.arange(0, 5/3, 0.333333333))
print(np.arange(0, 5/3, 0.333333334))
###Output
[0. 0.33333333 0.66666667 1. 1.33333333 1.66666667]
[0. 0.33333333 0.66666667 1. 1.33333333 1.66666667]
[0. 0.33333333 0.66666667 1. 1.33333334]
###Markdown
`np.linspace`이런 이유로 부동 소수를 사용할 땐 `arange` 대신에 `linspace` 함수를 사용하는 것이 좋습니다. `linspace` 함수는 지정된 개수만큼 두 값 사이를 나눈 배열을 반환합니다(`arange`와는 다르게 최댓값이 **포함**됩니다):
###Code
print(np.linspace(0, 5/3, 6))
###Output
[0. 0.33333333 0.66666667 1. 1.33333333 1.66666667]
###Markdown
`np.rand`와 `np.randn`넘파이의 `random` 모듈에는 `ndarray`를 랜덤한 값으로 초기화할 수 있는 함수들이 많이 있습니다.예를 들어, 다음은 (균등 분포인) 0과 1사이의 랜덤한 부동 소수로 $3 \times 4$ 행렬을 초기화합니다:
###Code
np.random.rand(3,4)
###Output
_____no_output_____
###Markdown
다음은 평균이 0이고 분산이 1인 일변량 [정규 분포](https://ko.wikipedia.org/wiki/%EC%A0%95%EA%B7%9C_%EB%B6%84%ED%8F%AC)(가우시안 분포)에서 샘플링한 랜덤한 부동 소수를 담은 $3 \times 4$ 행렬입니다:
###Code
np.random.randn(3,4)
###Output
_____no_output_____
###Markdown
이 분포의 모양을 알려면 맷플롯립을 사용해 그려보는 것이 좋습니다(더 자세한 것은 [맷플롯립 튜토리얼](tools_matplotlib.ipynb)을 참고하세요):
###Code
%matplotlib inline
import matplotlib.pyplot as plt
plt.hist(np.random.rand(100000), density=True, bins=100, histtype="step", color="blue", label="rand")
plt.hist(np.random.randn(100000), density=True, bins=100, histtype="step", color="red", label="randn")
plt.axis([-2.5, 2.5, 0, 1.1])
plt.legend(loc = "upper left")
plt.title("Random distributions")
plt.xlabel("Value")
plt.ylabel("Density")
plt.show()
###Output
_____no_output_____
###Markdown
np.fromfunction함수를 사용하여 `ndarray`를 초기화할 수도 있습니다:
###Code
def my_function(z, y, x):
return x + 10 * y + 100 * z
np.fromfunction(my_function, (3, 2, 10))
###Output
_____no_output_____
###Markdown
넘파이는 먼저 크기가 `(3, 2, 10)`인 세 개의 `ndarray`(차원마다 하나씩)를 만듭니다. 각 배열은 축을 따라 좌표 값과 같은 값을 가집니다. 예를 들어, `z` 축에 있는 배열의 모든 원소는 z-축의 값과 같습니다: [[[ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.] [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]] [[ 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.] [ 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]] [[ 2. 2. 2. 2. 2. 2. 2. 2. 2. 2.] [ 2. 2. 2. 2. 2. 2. 2. 2. 2. 2.]]]위의 식 `x + 10 * y + 100 * z`에서 `x`, `y`, `z`는 사실 `ndarray`입니다(배열의 산술 연산에 대해서는 아래에서 설명합니다). 중요한 점은 함수 `my_function`이 원소마다 호출되는 것이 아니고 딱 **한 번** 호출된다는 점입니다. 그래서 매우 효율적으로 초기화할 수 있습니다. 배열 데이터 `dtype`넘파이의 `ndarray`는 모든 원소가 동일한 타입(보통 숫자)을 가지기 때문에 효율적입니다. `dtype` 속성으로 쉽게 데이터 타입을 확인할 수 있습니다:
###Code
c = np.arange(1, 5)
print(c.dtype, c)
c = np.arange(1.0, 5.0)
print(c.dtype, c)
###Output
float64 [1. 2. 3. 4.]
###Markdown
넘파이가 데이터 타입을 결정하도록 내버려 두는 대신 `dtype` 매개변수를 사용해서 배열을 만들 때 명시적으로 지정할 수 있습니다:
###Code
d = np.arange(1, 5, dtype=np.complex64)
print(d.dtype, d)
###Output
complex64 [1.+0.j 2.+0.j 3.+0.j 4.+0.j]
###Markdown
가능한 데이터 타입은 `int8`, `int16`, `int32`, `int64`, `uint8`|`16`|`32`|`64`, `float16`|`32`|`64`, `complex64`|`128`가 있습니다. 전체 리스트는 [온라인 문서](http://docs.scipy.org/doc/numpy/user/basics.types.html)를 참고하세요. `itemsize``itemsize` 속성은 각 아이템의 크기(바이트)를 반환합니다:
###Code
e = np.arange(1, 5, dtype=np.complex64)
e.itemsize
###Output
_____no_output_____
###Markdown
`data` 버퍼배열의 데이터는 1차원 바이트 버퍼로 메모리에 저장됩니다. `data` 속성을 사용해 참조할 수 있습니다(사용할 일은 거의 없겠지만요).
###Code
f = np.array([[1,2],[1000, 2000]], dtype=np.int32)
f.data
###Output
_____no_output_____
###Markdown
파이썬 2에서는 `f.data`가 버퍼이고 파이썬 3에서는 memoryview입니다.
###Code
if (hasattr(f.data, "tobytes")):
data_bytes = f.data.tobytes() # python 3
else:
data_bytes = memoryview(f.data).tobytes() # python 2
data_bytes
###Output
_____no_output_____
###Markdown
여러 개의 `ndarray`가 데이터 버퍼를 공유할 수 있습니다. 하나를 수정하면 다른 것도 바뀝니다. 잠시 후에 예를 살펴 보겠습니다. 배열 크기 변경 자신을 변경`ndarray`의 `shape` 속성을 지정하면 간단히 크기를 바꿀 수 있습니다. 배열의 원소 개수는 동일하게 유지됩니다.
###Code
g = np.arange(24)
print(g)
print("랭크:", g.ndim)
g.shape = (6, 4)
print(g)
print("랭크:", g.ndim)
g.shape = (2, 3, 4)
print(g)
print("랭크:", g.ndim)
###Output
[[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
[[12 13 14 15]
[16 17 18 19]
[20 21 22 23]]]
랭크: 3
###Markdown
`reshape``reshape` 함수는 동일한 데이터를 가리키는 새로운 `ndarray` 객체를 반환합니다. 한 배열을 수정하면 다른 것도 함께 바뀝니다.
###Code
g2 = g.reshape(4,6)
print(g2)
print("랭크:", g2.ndim)
###Output
[[ 0 1 2 3 4 5]
[ 6 7 8 9 10 11]
[12 13 14 15 16 17]
[18 19 20 21 22 23]]
랭크: 2
###Markdown
행 1, 열 2의 원소를 999로 설정합니다(인덱싱 방식은 아래를 참고하세요).
###Code
g2[1, 2] = 999
g2
###Output
_____no_output_____
###Markdown
이에 상응하는 `g`의 원소도 수정됩니다.
###Code
g
###Output
_____no_output_____
###Markdown
`ravel`마지막으로 `ravel` 함수는 동일한 데이터를 가리키는 새로운 1차원 `ndarray`를 반환합니다:
###Code
g.ravel()
###Output
_____no_output_____
###Markdown
산술 연산일반적인 산술 연산자(`+`, `-`, `*`, `/`, `//`, `**` 등)는 모두 `ndarray`와 사용할 수 있습니다. 이 연산자는 원소별로 적용됩니다:
###Code
a = np.array([14, 23, 32, 41])
b = np.array([5, 4, 3, 2])
print("a + b =", a + b)
print("a - b =", a - b)
print("a * b =", a * b)
print("a / b =", a / b)
print("a // b =", a // b)
print("a % b =", a % b)
print("a ** b =", a ** b)
###Output
a + b = [19 27 35 43]
a - b = [ 9 19 29 39]
a * b = [70 92 96 82]
a / b = [ 2.8 5.75 10.66666667 20.5 ]
a // b = [ 2 5 10 20]
a % b = [4 3 2 1]
a ** b = [537824 279841 32768 1681]
###Markdown
여기 곱셈은 행렬 곱셈이 아닙니다. 행렬 연산은 아래에서 설명합니다.배열의 크기는 같아야 합니다. 그렇지 않으면 넘파이가 브로드캐스팅 규칙을 적용합니다. 브로드캐스팅 일반적으로 넘파이는 동일한 크기의 배열을 기대합니다. 그렇지 않은 상황에는 브로드캐시틍 규칙을 적용합니다: 규칙 1배열의 랭크가 동일하지 않으면 랭크가 맞을 때까지 랭크가 작은 배열 앞에 1을 추가합니다.
###Code
h = np.arange(5).reshape(1, 1, 5)
h
###Output
_____no_output_____
###Markdown
여기에 `(1,1,5)` 크기의 3D 배열에 `(5,)` 크기의 1D 배열을 더해 보죠. 브로드캐스팅의 규칙 1이 적용됩니다!
###Code
h + [10, 20, 30, 40, 50] # 다음과 동일합니다: h + [[[10, 20, 30, 40, 50]]]
###Output
_____no_output_____
###Markdown
규칙 2특정 차원이 1인 배열은 그 차원에서 크기가 가장 큰 배열의 크기에 맞춰 동작합니다. 배열의 원소가 차원을 따라 반복됩니다.
###Code
k = np.arange(6).reshape(2, 3)
k
###Output
_____no_output_____
###Markdown
`(2,3)` 크기의 2D `ndarray`에 `(2,1)` 크기의 2D 배열을 더해 보죠. 넘파이는 브로드캐스팅 규칙 2를 적용합니다:
###Code
k + [[100], [200]] # 다음과 같습니다: k + [[100, 100, 100], [200, 200, 200]]
###Output
_____no_output_____
###Markdown
규칙 1과 2를 합치면 다음과 같이 동작합니다:
###Code
k + [100, 200, 300] # 규칙 1 적용: [[100, 200, 300]], 규칙 2 적용: [[100, 200, 300], [100, 200, 300]]
###Output
_____no_output_____
###Markdown
또 매우 간단히 다음 처럼 해도 됩니다:
###Code
k + 1000 # 다음과 같습니다: k + [[1000, 1000, 1000], [1000, 1000, 1000]]
###Output
_____no_output_____
###Markdown
규칙 3규칙 1 & 2을 적용했을 때 모든 배열의 크기가 맞아야 합니다.
###Code
try:
k + [33, 44]
except ValueError as e:
print(e)
###Output
operands could not be broadcast together with shapes (2,3) (2,)
###Markdown
브로드캐스팅 규칙은 산술 연산 뿐만 아니라 넘파이 연산에서 많이 사용됩니다. 아래에서 더 보도록 하죠. 브로드캐스팅에 관한 더 자세한 정보는 [온라인 문서](https://docs.scipy.org/doc/numpy-dev/user/basics.broadcasting.html)를 참고하세요. 업캐스팅`dtype`이 다른 배열을 합칠 때 넘파이는 (실제 값에 상관없이) 모든 값을 다룰 수 있는 타입으로 업캐스팅합니다.
###Code
k1 = np.arange(0, 5, dtype=np.uint8)
print(k1.dtype, k1)
k2 = k1 + np.array([5, 6, 7, 8, 9], dtype=np.int8)
print(k2.dtype, k2)
###Output
int16 [ 5 7 9 11 13]
###Markdown
모든 `int8`과 `uint8` 값(-128에서 255까지)을 표현하기 위해 `int16`이 필요합니다. 이 코드에서는 `uint8`이면 충분하지만 업캐스팅되었습니다.
###Code
k3 = k1 + 1.5
print(k3.dtype, k3)
###Output
float64 [1.5 2.5 3.5 4.5 5.5]
###Markdown
조건 연산자 조건 연산자도 원소별로 적용됩니다:
###Code
m = np.array([20, -5, 30, 40])
m < [15, 16, 35, 36]
###Output
_____no_output_____
###Markdown
브로드캐스팅을 사용합니다:
###Code
m < 25 # m < [25, 25, 25, 25] 와 동일
###Output
_____no_output_____
###Markdown
불리언 인덱싱과 함께 사용하면 아주 유용합니다(아래에서 설명하겠습니다).
###Code
m[m < 25]
###Output
_____no_output_____
###Markdown
수학 함수와 통계 함수 `ndarray`에서 사용할 수 있는 수학 함수와 통계 함수가 많습니다. `ndarray` 메서드일부 함수는 `ndarray` 메서드로 제공됩니다. 예를 들면:
###Code
a = np.array([[-2.5, 3.1, 7], [10, 11, 12]])
print(a)
print("평균 =", a.mean())
###Output
[[-2.5 3.1 7. ]
[10. 11. 12. ]]
평균 = 6.766666666666667
###Markdown
이 명령은 크기에 상관없이 `ndarray`에 있는 모든 원소의 평균을 계산합니다.다음은 유용한 `ndarray` 메서드입니다:
###Code
for func in (a.min, a.max, a.sum, a.prod, a.std, a.var):
print(func.__name__, "=", func())
###Output
min = -2.5
max = 12.0
sum = 40.6
prod = -71610.0
std = 5.084835843520964
var = 25.855555555555554
###Markdown
이 함수들은 선택적으로 매개변수 `axis`를 사용합니다. 지정된 축을 따라 원소에 연산을 적용하는데 사용합니다. 예를 들면:
###Code
c=np.arange(24).reshape(2,3,4)
c
c.sum(axis=0) # 첫 번째 축을 따라 더함, 결과는 3x4 배열
c.sum(axis=1) # 두 번째 축을 따라 더함, 결과는 2x4 배열
###Output
_____no_output_____
###Markdown
여러 축에 대해서 더할 수도 있습니다:
###Code
c.sum(axis=(0,2)) # 첫 번째 축과 세 번째 축을 따라 더함, 결과는 (3,) 배열
0+1+2+3 + 12+13+14+15, 4+5+6+7 + 16+17+18+19, 8+9+10+11 + 20+21+22+23
###Output
_____no_output_____
###Markdown
일반 함수넘파이는 일반 함수(universal function) 또는 **ufunc**라고 부르는 원소별 함수를 제공합니다. 예를 들면 `square` 함수는 원본 `ndarray`를 복사하여 각 원소를 제곱한 새로운 `ndarray` 객체를 반환합니다:
###Code
a = np.array([[-2.5, 3.1, 7], [10, 11, 12]])
np.square(a)
###Output
_____no_output_____
###Markdown
다음은 유용한 단항 일반 함수들입니다:
###Code
print("원본 ndarray")
print(a)
for func in (np.abs, np.sqrt, np.exp, np.log, np.sign, np.ceil, np.modf, np.isnan, np.cos):
print("\n", func.__name__)
print(func(a))
###Output
원본 ndarray
[[-2.5 3.1 7. ]
[10. 11. 12. ]]
absolute
[[ 2.5 3.1 7. ]
[10. 11. 12. ]]
sqrt
[[ nan 1.76068169 2.64575131]
[3.16227766 3.31662479 3.46410162]]
exp
[[8.20849986e-02 2.21979513e+01 1.09663316e+03]
[2.20264658e+04 5.98741417e+04 1.62754791e+05]]
log
[[ nan 1.13140211 1.94591015]
[2.30258509 2.39789527 2.48490665]]
sign
[[-1. 1. 1.]
[ 1. 1. 1.]]
ceil
[[-2. 4. 7.]
[10. 11. 12.]]
modf
(array([[-0.5, 0.1, 0. ],
[ 0. , 0. , 0. ]]), array([[-2., 3., 7.],
[10., 11., 12.]]))
isnan
[[False False False]
[False False False]]
cos
[[-0.80114362 -0.99913515 0.75390225]
[-0.83907153 0.0044257 0.84385396]]
###Markdown
이항 일반 함수두 개의 `ndarray`에 원소별로 적용되는 이항 함수도 많습니다. 두 배열이 동일한 크기가 아니면 브로드캐스팅 규칙이 적용됩니다:
###Code
a = np.array([1, -2, 3, 4])
b = np.array([2, 8, -1, 7])
np.add(a, b) # a + b 와 동일
np.greater(a, b) # a > b 와 동일
np.maximum(a, b)
np.copysign(a, b)
###Output
_____no_output_____
###Markdown
배열 인덱싱 1차원 배열1차원 넘파이 배열은 보통의 파이썬 배열과 비슷하게 사용할 수 있습니다:
###Code
a = np.array([1, 5, 3, 19, 13, 7, 3])
a[3]
a[2:5]
a[2:-1]
a[:2]
a[2::2]
a[::-1]
###Output
_____no_output_____
###Markdown
물론 원소를 수정할 수 있죠:
###Code
a[3]=999
a
###Output
_____no_output_____
###Markdown
슬라이싱을 사용해 `ndarray`를 수정할 수 있습니다:
###Code
a[2:5] = [997, 998, 999]
a
###Output
_____no_output_____
###Markdown
보통의 파이썬 배열과 차이점보통의 파이썬 배열과 대조적으로 `ndarray` 슬라이싱에 하나의 값을 할당하면 슬라이싱 전체에 복사됩니다. 위에서 언급한 브로드캐스팅 덕택입니다.
###Code
a[2:5] = -1
a
###Output
_____no_output_____
###Markdown
또한 이런 식으로 `ndarray` 크기를 늘리거나 줄일 수 없습니다:
###Code
try:
a[2:5] = [1,2,3,4,5,6] # 너무 길어요
except ValueError as e:
print(e)
###Output
cannot copy sequence with size 6 to array axis with dimension 3
###Markdown
원소를 삭제할 수도 없습니다:
###Code
try:
del a[2:5]
except ValueError as e:
print(e)
###Output
cannot delete array elements
###Markdown
중요한 점은 `ndarray`의 슬라이싱은 같은 데이터 버퍼를 바라보는 뷰(view)입니다. 슬라이싱된 객체를 수정하면 실제 원본 `ndarray`가 수정됩니다!
###Code
a_slice = a[2:6]
a_slice[1] = 1000
a # 원본 배열이 수정됩니다!
a[3] = 2000
a_slice # 비슷하게 원본 배열을 수정하면 슬라이싱 객체에도 반영됩니다!
###Output
_____no_output_____
###Markdown
데이터를 복사하려면 `copy` 메서드를 사용해야 합니다:
###Code
another_slice = a[2:6].copy()
another_slice[1] = 3000
a # 원본 배열이 수정되지 않습니다
a[3] = 4000
another_slice # 마찬가지로 원본 배열을 수정해도 복사된 배열은 바뀌지 않습니다
###Output
_____no_output_____
###Markdown
다차원 배열다차원 배열은 비슷한 방식으로 각 축을 따라 인덱싱 또는 슬라이싱해서 사용합니다. 콤마로 구분합니다:
###Code
b = np.arange(48).reshape(4, 12)
b
b[1, 2] # 행 1, 열 2
b[1, :] # 행 1, 모든 열
b[:, 1] # 모든 행, 열 1
###Output
_____no_output_____
###Markdown
**주의**: 다음 두 표현에는 미묘한 차이가 있습니다:
###Code
b[1, :]
b[1:2, :]
###Output
_____no_output_____
###Markdown
첫 번째 표현식은 `(12,)` 크기인 1D 배열로 행이 하나입니다. 두 번째는 `(1, 12)` 크기인 2D 배열로 같은 행을 반환합니다. 팬시 인덱싱(Fancy indexing)관심 대상의 인덱스 리스트를 지정할 수도 있습니다. 이를 팬시 인덱싱이라고 부릅니다.
###Code
b[(0,2), 2:5] # 행 0과 2, 열 2에서 4(5-1)까지
b[:, (-1, 2, -1)] # 모든 행, 열 -1 (마지막), 2와 -1 (다시 반대 방향으로)
###Output
_____no_output_____
###Markdown
여러 개의 인덱스 리스트를 지정하면 인덱스에 맞는 값이 포함된 1D `ndarray`를 반환됩니다.
###Code
b[(-1, 2, -1, 2), (5, 9, 1, 9)] # returns a 1D array with b[-1, 5], b[2, 9], b[-1, 1] and b[2, 9] (again)
###Output
_____no_output_____
###Markdown
고차원고차원에서도 동일한 방식이 적용됩니다. 몇 가지 예를 살펴 보겠습니다:
###Code
c = b.reshape(4,2,6)
c
c[2, 1, 4] # 행렬 2, 행 1, 열 4
c[2, :, 3] # 행렬 2, 모든 행, 열 3
###Output
_____no_output_____
###Markdown
어떤 축에 대한 인덱스를 지정하지 않으면 이 축의 모든 원소가 반환됩니다:
###Code
c[2, 1] # 행렬 2, 행 1, 모든 열이 반환됩니다. c[2, 1, :]와 동일합니다.
###Output
_____no_output_____
###Markdown
생략 부호 (`...`)생략 부호(`...`)를 쓰면 모든 지정하지 않은 축의 원소를 포함합니다.
###Code
c[2, ...] # 행렬 2, 모든 행, 모든 열. c[2, :, :]와 동일
c[2, 1, ...] # 행렬 2, 행 1, 모든 열. c[2, 1, :]와 동일
c[2, ..., 3] # 행렬 2, 모든 행, 열 3. c[2, :, 3]와 동일
c[..., 3] # 모든 행렬, 모든 행, 열 3. c[:, :, 3]와 동일
###Output
_____no_output_____
###Markdown
불리언 인덱싱불리언 값을 가진 `ndarray`를 사용해 축의 인덱스를 지정할 수 있습니다.
###Code
b = np.arange(48).reshape(4, 12)
b
rows_on = np.array([True, False, True, False])
b[rows_on, :] # 행 0과 2, 모든 열. b[(0, 2), :]와 동일
cols_on = np.array([False, True, False] * 4)
b[:, cols_on] # 모든 행, 열 1, 4, 7, 10
###Output
_____no_output_____
###Markdown
`np.ix_`여러 축에 걸쳐서는 불리언 인덱싱을 사용할 수 없고 `ix_` 함수를 사용합니다:
###Code
b[np.ix_(rows_on, cols_on)]
np.ix_(rows_on, cols_on)
###Output
_____no_output_____
###Markdown
`ndarray`와 같은 크기의 불리언 배열을 사용하면 해당 위치가 `True`인 모든 원소를 담은 1D 배열이 반환됩니다. 일반적으로 조건 연산자와 함께 사용합니다:
###Code
b[b % 3 == 1]
###Output
_____no_output_____
###Markdown
반복`ndarray`를 반복하는 것은 일반적인 파이썬 배열을 반복한는 것과 매우 유사합니다. 다차원 배열을 반복하면 첫 번째 축에 대해서 수행됩니다.
###Code
c = np.arange(24).reshape(2, 3, 4) # 3D 배열 (두 개의 3x4 행렬로 구성됨)
c
for m in c:
print("아이템:")
print(m)
for i in range(len(c)): # len(c) == c.shape[0]
print("아이템:")
print(c[i])
###Output
아이템:
[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
아이템:
[[12 13 14 15]
[16 17 18 19]
[20 21 22 23]]
###Markdown
`ndarray`에 있는 모든 원소를 반복하려면 `flat` 속성을 사용합니다:
###Code
for i in c.flat:
print("아이템:", i)
###Output
아이템: 0
아이템: 1
아이템: 2
아이템: 3
아이템: 4
아이템: 5
아이템: 6
아이템: 7
아이템: 8
아이템: 9
아이템: 10
아이템: 11
아이템: 12
아이템: 13
아이템: 14
아이템: 15
아이템: 16
아이템: 17
아이템: 18
아이템: 19
아이템: 20
아이템: 21
아이템: 22
아이템: 23
###Markdown
배열 쌓기종종 다른 배열을 쌓아야 할 때가 있습니다. 넘파이는 이를 위해 몇 개의 함수를 제공합니다. 먼저 배열 몇 개를 만들어 보죠.
###Code
q1 = np.full((3,4), 1.0)
q1
q2 = np.full((4,4), 2.0)
q2
q3 = np.full((3,4), 3.0)
q3
###Output
_____no_output_____
###Markdown
`vstack``vstack` 함수를 사용하여 수직으로 쌓아보죠:
###Code
q4 = np.vstack((q1, q2, q3))
q4
q4.shape
###Output
_____no_output_____
###Markdown
q1, q2, q3가 모두 같은 크기이므로 가능합니다(수직으로 쌓기 때문에 수직 축은 크기가 달라도 됩니다). `hstack``hstack`을 사용해 수평으로도 쌓을 수 있습니다:
###Code
q5 = np.hstack((q1, q3))
q5
q5.shape
###Output
_____no_output_____
###Markdown
q1과 q3가 모두 3개의 행을 가지고 있기 때문에 가능합니다. q2는 4개의 행을 가지고 있기 때문에 q1, q3와 수평으로 쌓을 수 없습니다:
###Code
try:
q5 = np.hstack((q1, q2, q3))
except ValueError as e:
print(e)
###Output
all the input array dimensions for the concatenation axis must match exactly, but along dimension 0, the array at index 0 has size 3 and the array at index 1 has size 4
###Markdown
`concatenate``concatenate` 함수는 지정한 축으로도 배열을 쌓습니다.
###Code
q7 = np.concatenate((q1, q2, q3), axis=0) # vstack과 동일
q7
q7.shape
###Output
_____no_output_____
###Markdown
예상했겠지만 `hstack`은 `axis=1`으로 `concatenate`를 호출하는 것과 같습니다. `stack``stack` 함수는 새로운 축을 따라 배열을 쌓습니다. 모든 배열은 같은 크기를 가져야 합니다.
###Code
q8 = np.stack((q1, q3))
q8
q8.shape
###Output
_____no_output_____
###Markdown
배열 분할분할은 쌓기의 반대입니다. 예를 들어 `vsplit` 함수는 행렬을 수직으로 분할합니다.먼저 6x4 행렬을 만들어 보죠:
###Code
r = np.arange(24).reshape(6,4)
r
###Output
_____no_output_____
###Markdown
수직으로 동일한 크기로 나누어 보겠습니다:
###Code
r1, r2, r3 = np.vsplit(r, 3)
r1
r2
r3
###Output
_____no_output_____
###Markdown
`split` 함수는 주어진 축을 따라 배열을 분할합니다. `vsplit`는 `axis=0`으로 `split`를 호출하는 것과 같습니다. `hsplit` 함수는 `axis=1`로 `split`를 호출하는 것과 같습니다:
###Code
r4, r5 = np.hsplit(r, 2)
r4
r5
###Output
_____no_output_____
###Markdown
배열 전치`transpose` 메서드는 주어진 순서대로 축을 뒤바꾸어 `ndarray` 데이터에 대한 새로운 뷰를 만듭니다.예를 위해 3D 배열을 만들어 보죠:
###Code
t = np.arange(24).reshape(4,2,3)
t
###Output
_____no_output_____
###Markdown
`0, 1, 2`(깊이, 높이, 너비) 축을 `1, 2, 0` (깊이→너비, 높이→깊이, 너비→높이) 순서로 바꾼 `ndarray`를 만들어 보겠습니다:
###Code
t1 = t.transpose((1,2,0))
t1
t1.shape
###Output
_____no_output_____
###Markdown
`transpose` 기본값은 차원의 순서를 역전시킵니다:
###Code
t2 = t.transpose() # t.transpose((2, 1, 0))와 동일
t2
t2.shape
###Output
_____no_output_____
###Markdown
넘파이는 두 축을 바꾸는 `swapaxes` 함수를 제공합니다. 예를 들어 깊이와 높이를 뒤바꾸어 `t`의 새로운 뷰를 만들어 보죠:
###Code
t3 = t.swapaxes(0,1) # t.transpose((1, 0, 2))와 동일
t3
t3.shape
###Output
_____no_output_____
###Markdown
선형 대수학넘파이 2D 배열을 사용하면 파이썬에서 행렬을 효율적으로 표현할 수 있습니다. 주요 행렬 연산을 간단히 둘러 보겠습니다. 선형 대수학, 벡터와 행렬에 관한 자세한 내용은 [Linear Algebra tutorial](math_linear_algebra.ipynb)를 참고하세요. 행렬 전치`T` 속성은 랭크가 2보다 크거나 같을 때 `transpose()`를 호출하는 것과 같습니다:
###Code
m1 = np.arange(10).reshape(2,5)
m1
m1.T
###Output
_____no_output_____
###Markdown
`T` 속성은 랭크가 0이거나 1인 배열에는 아무런 영향을 미치지 않습니다:
###Code
m2 = np.arange(5)
m2
m2.T
###Output
_____no_output_____
###Markdown
먼저 1D 배열을 하나의 행이 있는 행렬(2D)로 바꾼다음 전치를 수행할 수 있습니다:
###Code
m2r = m2.reshape(1,5)
m2r
m2r.T
###Output
_____no_output_____
###Markdown
행렬 곱셈두 개의 행렬을 만들어 `dot` 메서드로 행렬 [곱셈](https://ko.wikipedia.org/wiki/%ED%96%89%EB%A0%AC_%EA%B3%B1%EC%85%88)을 실행해 보죠.
###Code
n1 = np.arange(10).reshape(2, 5)
n1
n2 = np.arange(15).reshape(5,3)
n2
n1.dot(n2)
###Output
_____no_output_____
###Markdown
**주의**: 앞서 언급한 것처럼 `n1*n2`는 행렬 곱셈이 아니라 원소별 곱셈(또는 [아다마르 곱](https://ko.wikipedia.org/wiki/%EC%95%84%EB%8B%A4%EB%A7%88%EB%A5%B4_%EA%B3%B1)이라 부릅니다)입니다. 역행렬과 유사 역행렬`numpy.linalg` 모듈 안에 많은 선형 대수 함수들이 있습니다. 특히 `inv` 함수는 정방 행렬의 역행렬을 계산합니다:
###Code
import numpy.linalg as linalg
m3 = np.array([[1,2,3],[5,7,11],[21,29,31]])
m3
linalg.inv(m3)
###Output
_____no_output_____
###Markdown
`pinv` 함수를 사용하여 [유사 역행렬](https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_pseudoinverse)을 계산할 수도 있습니다:
###Code
linalg.pinv(m3)
###Output
_____no_output_____
###Markdown
단위 행렬행렬과 그 행렬의 역행렬을 곱하면 단위 행렬이 됩니다(작은 소숫점 오차가 있습니다):
###Code
m3.dot(linalg.inv(m3))
###Output
_____no_output_____
###Markdown
`eye` 함수는 NxN 크기의 단위 행렬을 만듭니다:
###Code
np.eye(3)
###Output
_____no_output_____
###Markdown
QR 분해`qr` 함수는 행렬을 [QR 분해](https://en.wikipedia.org/wiki/QR_decomposition)합니다:
###Code
q, r = linalg.qr(m3)
q
r
q.dot(r) # q.r는 m3와 같습니다
###Output
_____no_output_____
###Markdown
행렬식`det` 함수는 [행렬식](https://en.wikipedia.org/wiki/Determinant)을 계산합니다:
###Code
linalg.det(m3) # 행렬식 계산
###Output
_____no_output_____
###Markdown
고윳값과 고유벡터`eig` 함수는 정방 행렬의 [고윳값과 고유벡터](https://en.wikipedia.org/wiki/Eigenvalues_and_eigenvectors)를 계산합니다:
###Code
eigenvalues, eigenvectors = linalg.eig(m3)
eigenvalues # λ
eigenvectors # v
m3.dot(eigenvectors) - eigenvalues * eigenvectors # m3.v - λ*v = 0
###Output
_____no_output_____
###Markdown
특잇값 분해`svd` 함수는 행렬을 입력으로 받아 그 행렬의 [특잇값 분해](https://en.wikipedia.org/wiki/Singular_value_decomposition)를 반환합니다:
###Code
m4 = np.array([[1,0,0,0,2], [0,0,3,0,0], [0,0,0,0,0], [0,2,0,0,0]])
m4
U, S_diag, V = linalg.svd(m4)
U
S_diag
###Output
_____no_output_____
###Markdown
`svd` 함수는 Σ의 대각 원소 값만 반환합니다. 전체 Σ 행렬은 다음과 같이 만듭니다:
###Code
S = np.zeros((4, 5))
S[np.diag_indices(4)] = S_diag
S # Σ
V
U.dot(S).dot(V) # U.Σ.V == m4
###Output
_____no_output_____
###Markdown
대각원소와 대각합
###Code
np.diag(m3) # m3의 대각 원소입니다(왼쪽 위에서 오른쪽 아래)
np.trace(m3) # np.diag(m3).sum()와 같습니다
###Output
_____no_output_____
###Markdown
선형 방정식 풀기 `solve` 함수는 다음과 같은 선형 방정식을 풉니다:* $2x + 6y = 6$* $5x + 3y = -9$
###Code
coeffs = np.array([[2, 6], [5, 3]])
depvars = np.array([6, -9])
solution = linalg.solve(coeffs, depvars)
solution
###Output
_____no_output_____
###Markdown
solution을 확인해 보죠:
###Code
coeffs.dot(solution), depvars # 네 같네요
###Output
_____no_output_____
###Markdown
좋습니다! 다른 방식으로도 solution을 확인해 보죠:
###Code
np.allclose(coeffs.dot(solution), depvars)
###Output
_____no_output_____
###Markdown
벡터화한 번에 하나씩 개별 배열 원소에 대해 연산을 실행하는 대신 배열 연산을 사용하면 훨씬 효율적인 코드를 만들 수 있습니다. 이를 벡터화라고 합니다. 이를 사용하여 넘파이의 최적화된 성능을 활용할 수 있습니다.예를 들어, $sin(xy/40.5)$ 식을 기반으로 768x1024 크기 배열을 생성하려고 합니다. 중첩 반복문 안에 파이썬의 math 함수를 사용하는 것은 **나쁜** 방법입니다:
###Code
import math
data = np.empty((768, 1024))
for y in range(768):
for x in range(1024):
data[y, x] = math.sin(x*y/40.5) # 매우 비효율적입니다!
###Output
_____no_output_____
###Markdown
작동은 하지만 순수한 파이썬 코드로 반복문이 진행되기 때문에 아주 비효율적입니다. 이 알고리즘을 벡터화해 보죠. 먼저 넘파이 `meshgrid` 함수로 좌표 벡터를 사용해 행렬을 만듭니다.
###Code
x_coords = np.arange(0, 1024) # [0, 1, 2, ..., 1023]
y_coords = np.arange(0, 768) # [0, 1, 2, ..., 767]
X, Y = np.meshgrid(x_coords, y_coords)
X
Y
###Output
_____no_output_____
###Markdown
여기서 볼 수 있듯이 `X`와 `Y` 모두 768x1024 배열입니다. `X`에 있는 모든 값은 수평 좌표에 해당합니다. `Y`에 있는 모든 값은 수직 좌표에 해당합니다.이제 간단히 배열 연산을 사용해 계산할 수 있습니다:
###Code
data = np.sin(X*Y/40.5)
###Output
_____no_output_____
###Markdown
맷플롯립의 `imshow` 함수를 사용해 이 데이터를 그려보죠([matplotlib tutorial](tools_matplotlib.ipynb)을 참조하세요).
###Code
import matplotlib.pyplot as plt
import matplotlib.cm as cm
fig = plt.figure(1, figsize=(7, 6))
plt.imshow(data, cmap=cm.hot)
plt.show()
###Output
_____no_output_____
###Markdown
저장과 로딩넘파이는 `ndarray`를 바이너리 또는 텍스트 포맷으로 손쉽게 저장하고 로드할 수 있습니다. 바이너리 `.npy` 포맷랜덤 배열을 만들고 저장해 보죠.
###Code
a = np.random.rand(2,3)
a
np.save("my_array", a)
###Output
_____no_output_____
###Markdown
끝입니다! 파일 이름의 확장자를 지정하지 않았기 때문에 넘파이는 자동으로 `.npy`를 붙입니다. 파일 내용을 확인해 보겠습니다:
###Code
with open("my_array.npy", "rb") as f:
content = f.read()
content
###Output
_____no_output_____
###Markdown
이 파일을 넘파이 배열로 로드하려면 `load` 함수를 사용합니다:
###Code
a_loaded = np.load("my_array.npy")
a_loaded
###Output
_____no_output_____
###Markdown
텍스트 포맷배열을 텍스트 포맷으로 저장해 보죠:
###Code
np.savetxt("my_array.csv", a)
###Output
_____no_output_____
###Markdown
파일 내용을 확인해 보겠습니다:
###Code
with open("my_array.csv", "rt") as f:
print(f.read())
###Output
5.435937959464737235e-01 9.288630656918674955e-01 1.535157809943688001e-02
4.157283012656532994e-01 9.102126992826775620e-01 5.512970782648904944e-01
###Markdown
이 파일은 탭으로 구분된 CSV 파일입니다. 다른 구분자를 지정할 수도 있습니다:
###Code
np.savetxt("my_array.csv", a, delimiter=",")
###Output
_____no_output_____
###Markdown
이 파일을 로드하려면 `loadtxt` 함수를 사용합니다:
###Code
a_loaded = np.loadtxt("my_array.csv", delimiter=",")
a_loaded
###Output
_____no_output_____
###Markdown
압축된 `.npz` 포맷여러 개의 배열을 압축된 한 파일로 저장하는 것도 가능합니다:
###Code
b = np.arange(24, dtype=np.uint8).reshape(2, 3, 4)
b
np.savez("my_arrays", my_a=a, my_b=b)
###Output
_____no_output_____
###Markdown
파일 내용을 확인해 보죠. `.npz` 파일 확장자가 자동으로 추가되었습니다.
###Code
with open("my_arrays.npz", "rb") as f:
content = f.read()
repr(content)[:180] + "[...]"
###Output
_____no_output_____
###Markdown
다음과 같이 이 파일을 로드할 수 있습니다:
###Code
my_arrays = np.load("my_arrays.npz")
my_arrays
###Output
_____no_output_____
###Markdown
게으른 로딩을 수행하는 딕셔너리와 유사한 객체입니다:
###Code
my_arrays.keys()
my_arrays["my_a"]
###Output
_____no_output_____
###Markdown
"Numpy 기본!"> "numpy 기본 코드 실습(한글)"- toc:true- branch: master- badges: true- comments: true- author: Jiho Yeo- categories: [jupyter, python] **도구 - 넘파이(NumPy)***넘파이(NumPy)는 파이썬의 과학 컴퓨팅을 위한 기본 라이브러리입니다. 넘파이의 핵심은 강력한 N-차원 배열 객체입니다. 또한 선형 대수, 푸리에(Fourier) 변환, 유사 난수 생성과 같은 유용한 함수들도 제공합니다." 구글 코랩에서 실행하기 배열 생성 `numpy`를 임포트해 보죠. 대부분의 사람들이 `np`로 알리아싱하여 임포트합니다:
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
`np.zeros` `zeros` 함수는 0으로 채워진 배열을 만듭니다:
###Code
np.zeros(5)
###Output
_____no_output_____
###Markdown
2D 배열(즉, 행렬)을 만들려면 원하는 행과 열의 크기를 튜플로 전달합니다. 예를 들어 다음은 $3 \times 4$ 크기의 행렬입니다:
###Code
np.zeros((3,4))
###Output
_____no_output_____
###Markdown
용어* 넘파이에서 각 차원을 **축**(axis) 이라고 합니다* 축의 개수를 **랭크**(rank) 라고 합니다. * 예를 들어, 위의 $3 \times 4$ 행렬은 랭크 2인 배열입니다(즉 2차원입니다). * 첫 번째 축의 길이는 3이고 두 번째 축의 길이는 4입니다.* 배열의 축 길이를 배열의 **크기**(shape)라고 합니다. * 예를 들어, 위 행렬의 크기는 `(3, 4)`입니다. * 랭크는 크기의 길이와 같습니다.* 배열의 **사이즈**(size)는 전체 원소의 개수입니다. 축의 길이를 모두 곱해서 구할 수 있습니다(가령, $3 \times 4=12$).
###Code
a = np.zeros((3,4))
a
a.shape
a.ndim # len(a.shape)와 같습니다
a.size
###Output
_____no_output_____
###Markdown
N-차원 배열임의의 랭크 수를 가진 N-차원 배열을 만들 수 있습니다. 예를 들어, 다음은 크기가 `(2,3,4)`인 3D 배열(랭크=3)입니다:
###Code
np.zeros((2,2,5))
###Output
_____no_output_____
###Markdown
배열 타입넘파이 배열의 타입은 `ndarray`입니다:
###Code
type(np.zeros((3,4)))
###Output
_____no_output_____
###Markdown
`np.ones``ndarray`를 만들 수 있는 넘파이 함수가 많습니다.다음은 1로 채워진 $3 \times 4$ 크기의 행렬입니다:
###Code
np.ones((3,4))
###Output
_____no_output_____
###Markdown
`np.full`주어진 값으로 지정된 크기의 배열을 초기화합니다. 다음은 `π`로 채워진 $3 \times 4$ 크기의 행렬입니다.
###Code
np.full((3,4), np.pi)
###Output
_____no_output_____
###Markdown
`np.empty`초기화되지 않은 $2 \times 3$ 크기의 배열을 만듭니다(배열의 내용은 예측이 불가능하며 메모리 상황에 따라 달라집니다):
###Code
np.empty((2,3))
###Output
_____no_output_____
###Markdown
np.array`array` 함수는 파이썬 리스트를 사용하여 `ndarray`를 초기화합니다:
###Code
np.array([[1,2,3,4], [10, 20, 30, 40]])
###Output
_____no_output_____
###Markdown
`np.arange`파이썬의 기본 `range` 함수와 비슷한 넘파이 `arange` 함수를 사용하여 `ndarray`를 만들 수 있습니다:
###Code
np.arange(1, 5)
###Output
_____no_output_____
###Markdown
부동 소수도 가능합니다:
###Code
np.arange(1.0, 5.0)
###Output
_____no_output_____
###Markdown
파이썬의 기본 `range` 함수처럼 건너 뛰는 정도를 지정할 수 있습니다:
###Code
np.arange(1, 5, 0.5)
###Output
_____no_output_____
###Markdown
부동 소수를 사용하면 원소의 개수가 일정하지 않을 수 있습니다. 예를 들면 다음과 같습니다:
###Code
print(np.arange(0, 5/3, 1/3)) # 부동 소수 오차 때문에, 최댓값은 4/3 또는 5/3이 됩니다.
print(np.arange(0, 5/3, 0.333333333))
print(np.arange(0, 5/3, 0.333333334))
###Output
[0. 0.33333333 0.66666667 1. 1.33333333 1.66666667]
[0. 0.33333333 0.66666667 1. 1.33333333 1.66666667]
[0. 0.33333333 0.66666667 1. 1.33333334]
###Markdown
`np.linspace`이런 이유로 부동 소수를 사용할 땐 `arange` 대신에 `linspace` 함수를 사용하는 것이 좋습니다. `linspace` 함수는 지정된 개수만큼 두 값 사이를 나눈 배열을 반환합니다(`arange`와는 다르게 최댓값이 **포함**됩니다):
###Code
print(np.linspace(0, 5/3, 6))
###Output
[0. 0.33333333 0.66666667 1. 1.33333333 1.66666667]
###Markdown
`np.rand`와 `np.randn`넘파이의 `random` 모듈에는 `ndarray`를 랜덤한 값으로 초기화할 수 있는 함수들이 많이 있습니다.예를 들어, 다음은 (균등 분포인) 0과 1사이의 랜덤한 부동 소수로 $3 \times 4$ 행렬을 초기화합니다:
###Code
np.random.rand(3,4)
###Output
_____no_output_____
###Markdown
다음은 평균이 0이고 분산이 1인 일변량 [정규 분포](https://ko.wikipedia.org/wiki/%EC%A0%95%EA%B7%9C_%EB%B6%84%ED%8F%AC)(가우시안 분포)에서 샘플링한 랜덤한 부동 소수를 담은 $3 \times 4$ 행렬입니다:
###Code
np.random.randn(3,4)
###Output
_____no_output_____
###Markdown
이 분포의 모양을 알려면 맷플롯립을 사용해 그려보는 것이 좋습니다(더 자세한 것은 [맷플롯립 튜토리얼](tools_matplotlib.ipynb)을 참고하세요):
###Code
%matplotlib inline
import matplotlib.pyplot as plt
plt.hist(np.random.rand(100000), density=True, bins=100, histtype="step", color="blue", label="rand")
plt.hist(np.random.randn(100000), density=True, bins=100, histtype="step", color="red", label="randn")
plt.axis([-2.5, 2.5, 0, 1.1])
plt.legend(loc = "upper left")
plt.title("Random distributions")
plt.xlabel("Value")
plt.ylabel("Density")
plt.show()
###Output
_____no_output_____
###Markdown
np.fromfunction함수를 사용하여 `ndarray`를 초기화할 수도 있습니다:
###Code
def my_function(z, y, x):
return x + 10 * y + 100 * z
np.fromfunction(my_function, (3, 2, 10))
###Output
_____no_output_____
###Markdown
넘파이는 먼저 크기가 `(3, 2, 10)`인 세 개의 `ndarray`(차원마다 하나씩)를 만듭니다. 각 배열은 축을 따라 좌표 값과 같은 값을 가집니다. 예를 들어, `z` 축에 있는 배열의 모든 원소는 z-축의 값과 같습니다: [[[ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.] [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]] [[ 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.] [ 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]] [[ 2. 2. 2. 2. 2. 2. 2. 2. 2. 2.] [ 2. 2. 2. 2. 2. 2. 2. 2. 2. 2.]]]위의 식 `x + 10 * y + 100 * z`에서 `x`, `y`, `z`는 사실 `ndarray`입니다(배열의 산술 연산에 대해서는 아래에서 설명합니다). 중요한 점은 함수 `my_function`이 원소마다 호출되는 것이 아니고 딱 **한 번** 호출된다는 점입니다. 그래서 매우 효율적으로 초기화할 수 있습니다. 배열 데이터 `dtype`넘파이의 `ndarray`는 모든 원소가 동일한 타입(보통 숫자)을 가지기 때문에 효율적입니다. `dtype` 속성으로 쉽게 데이터 타입을 확인할 수 있습니다:
###Code
c = np.arange(1, 5)
print(c.dtype, c)
c = np.arange(1.0, 5.0)
print(c.dtype, c)
###Output
float64 [1. 2. 3. 4.]
###Markdown
넘파이가 데이터 타입을 결정하도록 내버려 두는 대신 `dtype` 매개변수를 사용해서 배열을 만들 때 명시적으로 지정할 수 있습니다:
###Code
d = np.arange(1, 5, dtype=np.complex64)
print(d.dtype, d)
###Output
complex64 [1.+0.j 2.+0.j 3.+0.j 4.+0.j]
###Markdown
가능한 데이터 타입은 `int8`, `int16`, `int32`, `int64`, `uint8`|`16`|`32`|`64`, `float16`|`32`|`64`, `complex64`|`128`가 있습니다. 전체 리스트는 [온라인 문서](http://docs.scipy.org/doc/numpy/user/basics.types.html)를 참고하세요. `itemsize``itemsize` 속성은 각 아이템의 크기(바이트)를 반환합니다:
###Code
e = np.arange(1, 5, dtype=np.complex64)
e.itemsize
###Output
_____no_output_____
###Markdown
`data` 버퍼배열의 데이터는 1차원 바이트 버퍼로 메모리에 저장됩니다. `data` 속성을 사용해 참조할 수 있습니다(사용할 일은 거의 없겠지만요).
###Code
f = np.array([[1,2],[1000, 2000]], dtype=np.int32)
f.data
###Output
_____no_output_____
###Markdown
파이썬 2에서는 `f.data`가 버퍼이고 파이썬 3에서는 memoryview입니다.
###Code
if (hasattr(f.data, "tobytes")):
data_bytes = f.data.tobytes() # python 3
else:
data_bytes = memoryview(f.data).tobytes() # python 2
data_bytes
###Output
_____no_output_____
###Markdown
여러 개의 `ndarray`가 데이터 버퍼를 공유할 수 있습니다. 하나를 수정하면 다른 것도 바뀝니다. 잠시 후에 예를 살펴 보겠습니다. 배열 크기 변경 자신을 변경`ndarray`의 `shape` 속성을 지정하면 간단히 크기를 바꿀 수 있습니다. 배열의 원소 개수는 동일하게 유지됩니다.
###Code
g = np.arange(24)
print(g)
print("랭크:", g.ndim)
g.shape = (6, 4)
print(g)
print("랭크:", g.ndim)
g.shape = (2, 3, 4)
print(g)
print("랭크:", g.ndim)
###Output
[[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
[[12 13 14 15]
[16 17 18 19]
[20 21 22 23]]]
랭크: 3
###Markdown
`reshape``reshape` 함수는 동일한 데이터를 가리키는 새로운 `ndarray` 객체를 반환합니다. 한 배열을 수정하면 다른 것도 함께 바뀝니다.
###Code
g2 = g.reshape(4,6)
print(g2)
print("랭크:", g2.ndim)
###Output
[[ 0 1 2 3 4 5]
[ 6 7 8 9 10 11]
[12 13 14 15 16 17]
[18 19 20 21 22 23]]
랭크: 2
###Markdown
행 1, 열 2의 원소를 999로 설정합니다(인덱싱 방식은 아래를 참고하세요).
###Code
g2[1, 2] = 999
g2
###Output
_____no_output_____
###Markdown
이에 상응하는 `g`의 원소도 수정됩니다.
###Code
g
###Output
_____no_output_____
###Markdown
`ravel`마지막으로 `ravel` 함수는 동일한 데이터를 가리키는 새로운 1차원 `ndarray`를 반환합니다:
###Code
g.ravel()
###Output
_____no_output_____
###Markdown
산술 연산일반적인 산술 연산자(`+`, `-`, `*`, `/`, `//`, `**` 등)는 모두 `ndarray`와 사용할 수 있습니다. 이 연산자는 원소별로 적용됩니다:
###Code
a = np.array([14, 23, 32, 41])
b = np.array([5, 4, 3, 2])
print("a + b =", a + b)
print("a - b =", a - b)
print("a * b =", a * b)
print("a / b =", a / b)
print("a // b =", a // b)
print("a % b =", a % b)
print("a ** b =", a ** b)
###Output
a + b = [19 27 35 43]
a - b = [ 9 19 29 39]
a * b = [70 92 96 82]
a / b = [ 2.8 5.75 10.66666667 20.5 ]
a // b = [ 2 5 10 20]
a % b = [4 3 2 1]
a ** b = [537824 279841 32768 1681]
###Markdown
여기 곱셈은 행렬 곱셈이 아닙니다. 행렬 연산은 아래에서 설명합니다.배열의 크기는 같아야 합니다. 그렇지 않으면 넘파이가 브로드캐스팅 규칙을 적용합니다. 브로드캐스팅 일반적으로 넘파이는 동일한 크기의 배열을 기대합니다. 그렇지 않은 상황에는 브로드캐시틍 규칙을 적용합니다: 규칙 1배열의 랭크가 동일하지 않으면 랭크가 맞을 때까지 랭크가 작은 배열 앞에 1을 추가합니다.
###Code
h = np.arange(5).reshape(1, 1, 5)
h
###Output
_____no_output_____
###Markdown
여기에 `(1,1,5)` 크기의 3D 배열에 `(5,)` 크기의 1D 배열을 더해 보죠. 브로드캐스팅의 규칙 1이 적용됩니다!
###Code
h + [10, 20, 30, 40, 50] # 다음과 동일합니다: h + [[[10, 20, 30, 40, 50]]]
###Output
_____no_output_____
###Markdown
규칙 2특정 차원이 1인 배열은 그 차원에서 크기가 가장 큰 배열의 크기에 맞춰 동작합니다. 배열의 원소가 차원을 따라 반복됩니다.
###Code
k = np.arange(6).reshape(2, 3)
k
###Output
_____no_output_____
###Markdown
`(2,3)` 크기의 2D `ndarray`에 `(2,1)` 크기의 2D 배열을 더해 보죠. 넘파이는 브로드캐스팅 규칙 2를 적용합니다:
###Code
k + [[100], [200]] # 다음과 같습니다: k + [[100, 100, 100], [200, 200, 200]]
###Output
_____no_output_____
###Markdown
규칙 1과 2를 합치면 다음과 같이 동작합니다:
###Code
k + [100, 200, 300] # 규칙 1 적용: [[100, 200, 300]], 규칙 2 적용: [[100, 200, 300], [100, 200, 300]]
###Output
_____no_output_____
###Markdown
또 매우 간단히 다음 처럼 해도 됩니다:
###Code
k + 1000 # 다음과 같습니다: k + [[1000, 1000, 1000], [1000, 1000, 1000]]
###Output
_____no_output_____
###Markdown
규칙 3규칙 1 & 2을 적용했을 때 모든 배열의 크기가 맞아야 합니다.
###Code
try:
k + [33, 44]
except ValueError as e:
print(e)
###Output
operands could not be broadcast together with shapes (2,3) (2,)
###Markdown
브로드캐스팅 규칙은 산술 연산 뿐만 아니라 넘파이 연산에서 많이 사용됩니다. 아래에서 더 보도록 하죠. 브로드캐스팅에 관한 더 자세한 정보는 [온라인 문서](https://docs.scipy.org/doc/numpy-dev/user/basics.broadcasting.html)를 참고하세요. 업캐스팅`dtype`이 다른 배열을 합칠 때 넘파이는 (실제 값에 상관없이) 모든 값을 다룰 수 있는 타입으로 업캐스팅합니다.
###Code
k1 = np.arange(0, 5, dtype=np.uint8)
print(k1.dtype, k1)
k2 = k1 + np.array([5, 6, 7, 8, 9], dtype=np.int8)
print(k2.dtype, k2)
###Output
int16 [ 5 7 9 11 13]
###Markdown
모든 `int8`과 `uint8` 값(-128에서 255까지)을 표현하기 위해 `int16`이 필요합니다. 이 코드에서는 `uint8`이면 충분하지만 업캐스팅되었습니다.
###Code
k3 = k1 + 1.5
print(k3.dtype, k3)
###Output
float64 [1.5 2.5 3.5 4.5 5.5]
###Markdown
조건 연산자 조건 연산자도 원소별로 적용됩니다:
###Code
m = np.array([20, -5, 30, 40])
m < [15, 16, 35, 36]
###Output
_____no_output_____
###Markdown
브로드캐스팅을 사용합니다:
###Code
m < 25 # m < [25, 25, 25, 25] 와 동일
###Output
_____no_output_____
###Markdown
불리언 인덱싱과 함께 사용하면 아주 유용합니다(아래에서 설명하겠습니다).
###Code
m[m < 25]
###Output
_____no_output_____
###Markdown
수학 함수와 통계 함수 `ndarray`에서 사용할 수 있는 수학 함수와 통계 함수가 많습니다. `ndarray` 메서드일부 함수는 `ndarray` 메서드로 제공됩니다. 예를 들면:
###Code
a = np.array([[-2.5, 3.1, 7], [10, 11, 12]])
print(a)
print("평균 =", a.mean())
###Output
[[-2.5 3.1 7. ]
[10. 11. 12. ]]
평균 = 6.766666666666667
###Markdown
이 명령은 크기에 상관없이 `ndarray`에 있는 모든 원소의 평균을 계산합니다.다음은 유용한 `ndarray` 메서드입니다:
###Code
for func in (a.min, a.max, a.sum, a.prod, a.std, a.var):
print(func.__name__, "=", func())
###Output
min = -2.5
max = 12.0
sum = 40.6
prod = -71610.0
std = 5.084835843520964
var = 25.855555555555554
###Markdown
이 함수들은 선택적으로 매개변수 `axis`를 사용합니다. 지정된 축을 따라 원소에 연산을 적용하는데 사용합니다. 예를 들면:
###Code
c=np.arange(24).reshape(2,3,4)
c
c.sum(axis=0) # 첫 번째 축을 따라 더함, 결과는 3x4 배열
c.sum(axis=1) # 두 번째 축을 따라 더함, 결과는 2x4 배열
###Output
_____no_output_____
###Markdown
여러 축에 대해서 더할 수도 있습니다:
###Code
c.sum(axis=(0,2)) # 첫 번째 축과 세 번째 축을 따라 더함, 결과는 (3,) 배열
0+1+2+3 + 12+13+14+15, 4+5+6+7 + 16+17+18+19, 8+9+10+11 + 20+21+22+23
###Output
_____no_output_____
###Markdown
일반 함수넘파이는 일반 함수(universal function) 또는 **ufunc**라고 부르는 원소별 함수를 제공합니다. 예를 들면 `square` 함수는 원본 `ndarray`를 복사하여 각 원소를 제곱한 새로운 `ndarray` 객체를 반환합니다:
###Code
a = np.array([[-2.5, 3.1, 7], [10, 11, 12]])
np.square(a)
###Output
_____no_output_____
###Markdown
다음은 유용한 단항 일반 함수들입니다:
###Code
print("원본 ndarray")
print(a)
for func in (np.abs, np.sqrt, np.exp, np.log, np.sign, np.ceil, np.modf, np.isnan, np.cos):
print("\n", func.__name__)
print(func(a))
###Output
원본 ndarray
[[-2.5 3.1 7. ]
[10. 11. 12. ]]
absolute
[[ 2.5 3.1 7. ]
[10. 11. 12. ]]
sqrt
[[ nan 1.76068169 2.64575131]
[3.16227766 3.31662479 3.46410162]]
exp
[[8.20849986e-02 2.21979513e+01 1.09663316e+03]
[2.20264658e+04 5.98741417e+04 1.62754791e+05]]
log
[[ nan 1.13140211 1.94591015]
[2.30258509 2.39789527 2.48490665]]
sign
[[-1. 1. 1.]
[ 1. 1. 1.]]
ceil
[[-2. 4. 7.]
[10. 11. 12.]]
modf
(array([[-0.5, 0.1, 0. ],
[ 0. , 0. , 0. ]]), array([[-2., 3., 7.],
[10., 11., 12.]]))
isnan
[[False False False]
[False False False]]
cos
[[-0.80114362 -0.99913515 0.75390225]
[-0.83907153 0.0044257 0.84385396]]
###Markdown
이항 일반 함수두 개의 `ndarray`에 원소별로 적용되는 이항 함수도 많습니다. 두 배열이 동일한 크기가 아니면 브로드캐스팅 규칙이 적용됩니다:
###Code
a = np.array([1, -2, 3, 4])
b = np.array([2, 8, -1, 7])
np.add(a, b) # a + b 와 동일
np.greater(a, b) # a > b 와 동일
np.maximum(a, b)
np.copysign(a, b)
###Output
_____no_output_____
###Markdown
배열 인덱싱 1차원 배열1차원 넘파이 배열은 보통의 파이썬 배열과 비슷하게 사용할 수 있습니다:
###Code
a = np.array([1, 5, 3, 19, 13, 7, 3])
a[3]
a[2:5]
a[2:-1]
a[:2]
a[2::2]
a[::-1]
###Output
_____no_output_____
###Markdown
물론 원소를 수정할 수 있죠:
###Code
a[3]=999
a
###Output
_____no_output_____
###Markdown
슬라이싱을 사용해 `ndarray`를 수정할 수 있습니다:
###Code
a[2:5] = [997, 998, 999]
a
###Output
_____no_output_____
###Markdown
보통의 파이썬 배열과 차이점보통의 파이썬 배열과 대조적으로 `ndarray` 슬라이싱에 하나의 값을 할당하면 슬라이싱 전체에 복사됩니다. 위에서 언급한 브로드캐스팅 덕택입니다.
###Code
a[2:5] = -1
a
###Output
_____no_output_____
###Markdown
또한 이런 식으로 `ndarray` 크기를 늘리거나 줄일 수 없습니다:
###Code
try:
a[2:5] = [1,2,3,4,5,6] # 너무 길어요
except ValueError as e:
print(e)
###Output
cannot copy sequence with size 6 to array axis with dimension 3
###Markdown
원소를 삭제할 수도 없습니다:
###Code
try:
del a[2:5]
except ValueError as e:
print(e)
###Output
cannot delete array elements
###Markdown
중요한 점은 `ndarray`의 슬라이싱은 같은 데이터 버퍼를 바라보는 뷰(view)입니다. 슬라이싱된 객체를 수정하면 실제 원본 `ndarray`가 수정됩니다!
###Code
a_slice = a[2:6]
a_slice[1] = 1000
a # 원본 배열이 수정됩니다!
a[3] = 2000
a_slice # 비슷하게 원본 배열을 수정하면 슬라이싱 객체에도 반영됩니다!
###Output
_____no_output_____
###Markdown
데이터를 복사하려면 `copy` 메서드를 사용해야 합니다:
###Code
another_slice = a[2:6].copy()
another_slice[1] = 3000
a # 원본 배열이 수정되지 않습니다
a[3] = 4000
another_slice # 마찬가지로 원본 배열을 수정해도 복사된 배열은 바뀌지 않습니다
###Output
_____no_output_____
###Markdown
다차원 배열다차원 배열은 비슷한 방식으로 각 축을 따라 인덱싱 또는 슬라이싱해서 사용합니다. 콤마로 구분합니다:
###Code
b = np.arange(48).reshape(4, 12)
b
b[1, 2] # 행 1, 열 2
b[1, :] # 행 1, 모든 열
b[:, 1] # 모든 행, 열 1
###Output
_____no_output_____
###Markdown
**주의**: 다음 두 표현에는 미묘한 차이가 있습니다:
###Code
b[1, :]
b[1:2, :]
###Output
_____no_output_____
###Markdown
첫 번째 표현식은 `(12,)` 크기인 1D 배열로 행이 하나입니다. 두 번째는 `(1, 12)` 크기인 2D 배열로 같은 행을 반환합니다. 팬시 인덱싱(Fancy indexing)관심 대상의 인덱스 리스트를 지정할 수도 있습니다. 이를 팬시 인덱싱이라고 부릅니다.
###Code
b[(0,2), 2:5] # 행 0과 2, 열 2에서 4(5-1)까지
b[:, (-1, 2, -1)] # 모든 행, 열 -1 (마지막), 2와 -1 (다시 반대 방향으로)
###Output
_____no_output_____
###Markdown
여러 개의 인덱스 리스트를 지정하면 인덱스에 맞는 값이 포함된 1D `ndarray`를 반환됩니다.
###Code
b[(-1, 2, -1, 2), (5, 9, 1, 9)] # returns a 1D array with b[-1, 5], b[2, 9], b[-1, 1] and b[2, 9] (again)
###Output
_____no_output_____
###Markdown
고차원고차원에서도 동일한 방식이 적용됩니다. 몇 가지 예를 살펴 보겠습니다:
###Code
c = b.reshape(4,2,6)
c
c[2, 1, 4] # 행렬 2, 행 1, 열 4
c[2, :, 3] # 행렬 2, 모든 행, 열 3
###Output
_____no_output_____
###Markdown
어떤 축에 대한 인덱스를 지정하지 않으면 이 축의 모든 원소가 반환됩니다:
###Code
c[2, 1] # 행렬 2, 행 1, 모든 열이 반환됩니다. c[2, 1, :]와 동일합니다.
###Output
_____no_output_____
###Markdown
생략 부호 (`...`)생략 부호(`...`)를 쓰면 모든 지정하지 않은 축의 원소를 포함합니다.
###Code
c[2, ...] # 행렬 2, 모든 행, 모든 열. c[2, :, :]와 동일
c[2, 1, ...] # 행렬 2, 행 1, 모든 열. c[2, 1, :]와 동일
c[2, ..., 3] # 행렬 2, 모든 행, 열 3. c[2, :, 3]와 동일
c[..., 3] # 모든 행렬, 모든 행, 열 3. c[:, :, 3]와 동일
###Output
_____no_output_____
###Markdown
불리언 인덱싱불리언 값을 가진 `ndarray`를 사용해 축의 인덱스를 지정할 수 있습니다.
###Code
b = np.arange(48).reshape(4, 12)
b
rows_on = np.array([True, False, True, False])
b[rows_on, :] # 행 0과 2, 모든 열. b[(0, 2), :]와 동일
cols_on = np.array([False, True, False] * 4)
b[:, cols_on] # 모든 행, 열 1, 4, 7, 10
###Output
_____no_output_____
###Markdown
`np.ix_`여러 축에 걸쳐서는 불리언 인덱싱을 사용할 수 없고 `ix_` 함수를 사용합니다:
###Code
b[np.ix_(rows_on, cols_on)]
np.ix_(rows_on, cols_on)
###Output
_____no_output_____
###Markdown
`ndarray`와 같은 크기의 불리언 배열을 사용하면 해당 위치가 `True`인 모든 원소를 담은 1D 배열이 반환됩니다. 일반적으로 조건 연산자와 함께 사용합니다:
###Code
b[b % 3 == 1]
###Output
_____no_output_____
###Markdown
반복`ndarray`를 반복하는 것은 일반적인 파이썬 배열을 반복한는 것과 매우 유사합니다. 다차원 배열을 반복하면 첫 번째 축에 대해서 수행됩니다.
###Code
c = np.arange(24).reshape(2, 3, 4) # 3D 배열 (두 개의 3x4 행렬로 구성됨)
c
for m in c:
print("아이템:")
print(m)
for i in range(len(c)): # len(c) == c.shape[0]
print("아이템:")
print(c[i])
###Output
아이템:
[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
아이템:
[[12 13 14 15]
[16 17 18 19]
[20 21 22 23]]
###Markdown
`ndarray`에 있는 모든 원소를 반복하려면 `flat` 속성을 사용합니다:
###Code
for i in c.flat:
print("아이템:", i)
###Output
아이템: 0
아이템: 1
아이템: 2
아이템: 3
아이템: 4
아이템: 5
아이템: 6
아이템: 7
아이템: 8
아이템: 9
아이템: 10
아이템: 11
아이템: 12
아이템: 13
아이템: 14
아이템: 15
아이템: 16
아이템: 17
아이템: 18
아이템: 19
아이템: 20
아이템: 21
아이템: 22
아이템: 23
###Markdown
배열 쌓기종종 다른 배열을 쌓아야 할 때가 있습니다. 넘파이는 이를 위해 몇 개의 함수를 제공합니다. 먼저 배열 몇 개를 만들어 보죠.
###Code
q1 = np.full((3,4), 1.0)
q1
q2 = np.full((4,4), 2.0)
q2
q3 = np.full((3,4), 3.0)
q3
###Output
_____no_output_____
###Markdown
`vstack``vstack` 함수를 사용하여 수직으로 쌓아보죠:
###Code
q4 = np.vstack((q1, q2, q3))
q4
q4.shape
###Output
_____no_output_____
###Markdown
q1, q2, q3가 모두 같은 크기이므로 가능합니다(수직으로 쌓기 때문에 수직 축은 크기가 달라도 됩니다). `hstack``hstack`을 사용해 수평으로도 쌓을 수 있습니다:
###Code
q5 = np.hstack((q1, q3))
q5
q5.shape
###Output
_____no_output_____
###Markdown
q1과 q3가 모두 3개의 행을 가지고 있기 때문에 가능합니다. q2는 4개의 행을 가지고 있기 때문에 q1, q3와 수평으로 쌓을 수 없습니다:
###Code
try:
q5 = np.hstack((q1, q2, q3))
except ValueError as e:
print(e)
###Output
all the input array dimensions for the concatenation axis must match exactly, but along dimension 0, the array at index 0 has size 3 and the array at index 1 has size 4
###Markdown
`concatenate``concatenate` 함수는 지정한 축으로도 배열을 쌓습니다.
###Code
q7 = np.concatenate((q1, q2, q3), axis=0) # vstack과 동일
q7
q7.shape
###Output
_____no_output_____
###Markdown
예상했겠지만 `hstack`은 `axis=1`으로 `concatenate`를 호출하는 것과 같습니다. `stack``stack` 함수는 새로운 축을 따라 배열을 쌓습니다. 모든 배열은 같은 크기를 가져야 합니다.
###Code
q8 = np.stack((q1, q3))
q8
q8.shape
###Output
_____no_output_____
###Markdown
배열 분할분할은 쌓기의 반대입니다. 예를 들어 `vsplit` 함수는 행렬을 수직으로 분할합니다.먼저 6x4 행렬을 만들어 보죠:
###Code
r = np.arange(24).reshape(6,4)
r
###Output
_____no_output_____
###Markdown
수직으로 동일한 크기로 나누어 보겠습니다:
###Code
r1, r2, r3 = np.vsplit(r, 3)
r1
r2
r3
###Output
_____no_output_____
###Markdown
`split` 함수는 주어진 축을 따라 배열을 분할합니다. `vsplit`는 `axis=0`으로 `split`를 호출하는 것과 같습니다. `hsplit` 함수는 `axis=1`로 `split`를 호출하는 것과 같습니다:
###Code
r4, r5 = np.hsplit(r, 2)
r4
r5
###Output
_____no_output_____
###Markdown
배열 전치`transpose` 메서드는 주어진 순서대로 축을 뒤바꾸어 `ndarray` 데이터에 대한 새로운 뷰를 만듭니다.예를 위해 3D 배열을 만들어 보죠:
###Code
t = np.arange(24).reshape(4,2,3)
t
###Output
_____no_output_____
###Markdown
`0, 1, 2`(깊이, 높이, 너비) 축을 `1, 2, 0` (깊이→너비, 높이→깊이, 너비→높이) 순서로 바꾼 `ndarray`를 만들어 보겠습니다:
###Code
t1 = t.transpose((1,2,0))
t1
t1.shape
###Output
_____no_output_____
###Markdown
`transpose` 기본값은 차원의 순서를 역전시킵니다:
###Code
t2 = t.transpose() # t.transpose((2, 1, 0))와 동일
t2
t2.shape
###Output
_____no_output_____
###Markdown
넘파이는 두 축을 바꾸는 `swapaxes` 함수를 제공합니다. 예를 들어 깊이와 높이를 뒤바꾸어 `t`의 새로운 뷰를 만들어 보죠:
###Code
t3 = t.swapaxes(0,1) # t.transpose((1, 0, 2))와 동일
t3
t3.shape
###Output
_____no_output_____
###Markdown
선형 대수학넘파이 2D 배열을 사용하면 파이썬에서 행렬을 효율적으로 표현할 수 있습니다. 주요 행렬 연산을 간단히 둘러 보겠습니다. 선형 대수학, 벡터와 행렬에 관한 자세한 내용은 [Linear Algebra tutorial](math_linear_algebra.ipynb)를 참고하세요. 행렬 전치`T` 속성은 랭크가 2보다 크거나 같을 때 `transpose()`를 호출하는 것과 같습니다:
###Code
m1 = np.arange(10).reshape(2,5)
m1
m1.T
###Output
_____no_output_____
###Markdown
`T` 속성은 랭크가 0이거나 1인 배열에는 아무런 영향을 미치지 않습니다:
###Code
m2 = np.arange(5)
m2
m2.T
###Output
_____no_output_____
###Markdown
먼저 1D 배열을 하나의 행이 있는 행렬(2D)로 바꾼다음 전치를 수행할 수 있습니다:
###Code
m2r = m2.reshape(1,5)
m2r
m2r.T
###Output
_____no_output_____
###Markdown
행렬 곱셈두 개의 행렬을 만들어 `dot` 메서드로 행렬 [곱셈](https://ko.wikipedia.org/wiki/%ED%96%89%EB%A0%AC_%EA%B3%B1%EC%85%88)을 실행해 보죠.
###Code
n1 = np.arange(10).reshape(2, 5)
n1
n2 = np.arange(15).reshape(5,3)
n2
n1.dot(n2)
###Output
_____no_output_____
###Markdown
**주의**: 앞서 언급한 것처럼 `n1*n2`는 행렬 곱셈이 아니라 원소별 곱셈(또는 [아다마르 곱](https://ko.wikipedia.org/wiki/%EC%95%84%EB%8B%A4%EB%A7%88%EB%A5%B4_%EA%B3%B1)이라 부릅니다)입니다. 역행렬과 유사 역행렬`numpy.linalg` 모듈 안에 많은 선형 대수 함수들이 있습니다. 특히 `inv` 함수는 정방 행렬의 역행렬을 계산합니다:
###Code
import numpy.linalg as linalg
m3 = np.array([[1,2,3],[5,7,11],[21,29,31]])
m3
linalg.inv(m3)
###Output
_____no_output_____
###Markdown
`pinv` 함수를 사용하여 [유사 역행렬](https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_pseudoinverse)을 계산할 수도 있습니다:
###Code
linalg.pinv(m3)
###Output
_____no_output_____
###Markdown
단위 행렬행렬과 그 행렬의 역행렬을 곱하면 단위 행렬이 됩니다(작은 소숫점 오차가 있습니다):
###Code
m3.dot(linalg.inv(m3))
###Output
_____no_output_____
###Markdown
`eye` 함수는 NxN 크기의 단위 행렬을 만듭니다:
###Code
np.eye(3)
###Output
_____no_output_____
###Markdown
QR 분해`qr` 함수는 행렬을 [QR 분해](https://en.wikipedia.org/wiki/QR_decomposition)합니다:
###Code
q, r = linalg.qr(m3)
q
r
q.dot(r) # q.r는 m3와 같습니다
###Output
_____no_output_____
###Markdown
행렬식`det` 함수는 [행렬식](https://en.wikipedia.org/wiki/Determinant)을 계산합니다:
###Code
linalg.det(m3) # 행렬식 계산
###Output
_____no_output_____
###Markdown
고윳값과 고유벡터`eig` 함수는 정방 행렬의 [고윳값과 고유벡터](https://en.wikipedia.org/wiki/Eigenvalues_and_eigenvectors)를 계산합니다:
###Code
eigenvalues, eigenvectors = linalg.eig(m3)
eigenvalues # λ
eigenvectors # v
m3.dot(eigenvectors) - eigenvalues * eigenvectors # m3.v - λ*v = 0
###Output
_____no_output_____
###Markdown
특잇값 분해`svd` 함수는 행렬을 입력으로 받아 그 행렬의 [특잇값 분해](https://en.wikipedia.org/wiki/Singular_value_decomposition)를 반환합니다:
###Code
m4 = np.array([[1,0,0,0,2], [0,0,3,0,0], [0,0,0,0,0], [0,2,0,0,0]])
m4
U, S_diag, V = linalg.svd(m4)
U
S_diag
###Output
_____no_output_____
###Markdown
`svd` 함수는 Σ의 대각 원소 값만 반환합니다. 전체 Σ 행렬은 다음과 같이 만듭니다:
###Code
S = np.zeros((4, 5))
S[np.diag_indices(4)] = S_diag
S # Σ
V
U.dot(S).dot(V) # U.Σ.V == m4
###Output
_____no_output_____
###Markdown
대각원소와 대각합
###Code
np.diag(m3) # m3의 대각 원소입니다(왼쪽 위에서 오른쪽 아래)
np.trace(m3) # np.diag(m3).sum()와 같습니다
###Output
_____no_output_____
###Markdown
선형 방정식 풀기 `solve` 함수는 다음과 같은 선형 방정식을 풉니다:* $2x + 6y = 6$* $5x + 3y = -9$
###Code
coeffs = np.array([[2, 6], [5, 3]])
depvars = np.array([6, -9])
solution = linalg.solve(coeffs, depvars)
solution
###Output
_____no_output_____
###Markdown
solution을 확인해 보죠:
###Code
coeffs.dot(solution), depvars # 네 같네요
###Output
_____no_output_____
###Markdown
좋습니다! 다른 방식으로도 solution을 확인해 보죠:
###Code
np.allclose(coeffs.dot(solution), depvars)
###Output
_____no_output_____
###Markdown
벡터화한 번에 하나씩 개별 배열 원소에 대해 연산을 실행하는 대신 배열 연산을 사용하면 훨씬 효율적인 코드를 만들 수 있습니다. 이를 벡터화라고 합니다. 이를 사용하여 넘파이의 최적화된 성능을 활용할 수 있습니다.예를 들어, $sin(xy/40.5)$ 식을 기반으로 768x1024 크기 배열을 생성하려고 합니다. 중첩 반복문 안에 파이썬의 math 함수를 사용하는 것은 **나쁜** 방법입니다:
###Code
import math
data = np.empty((768, 1024))
for y in range(768):
for x in range(1024):
data[y, x] = math.sin(x*y/40.5) # 매우 비효율적입니다!
###Output
_____no_output_____
###Markdown
작동은 하지만 순수한 파이썬 코드로 반복문이 진행되기 때문에 아주 비효율적입니다. 이 알고리즘을 벡터화해 보죠. 먼저 넘파이 `meshgrid` 함수로 좌표 벡터를 사용해 행렬을 만듭니다.
###Code
x_coords = np.arange(0, 1024) # [0, 1, 2, ..., 1023]
y_coords = np.arange(0, 768) # [0, 1, 2, ..., 767]
X, Y = np.meshgrid(x_coords, y_coords)
X
Y
###Output
_____no_output_____
###Markdown
여기서 볼 수 있듯이 `X`와 `Y` 모두 768x1024 배열입니다. `X`에 있는 모든 값은 수평 좌표에 해당합니다. `Y`에 있는 모든 값은 수직 좌표에 해당합니다.이제 간단히 배열 연산을 사용해 계산할 수 있습니다:
###Code
data = np.sin(X*Y/40.5)
###Output
_____no_output_____
###Markdown
맷플롯립의 `imshow` 함수를 사용해 이 데이터를 그려보죠([matplotlib tutorial](tools_matplotlib.ipynb)을 참조하세요).
###Code
import matplotlib.pyplot as plt
import matplotlib.cm as cm
fig = plt.figure(1, figsize=(7, 6))
plt.imshow(data, cmap=cm.hot)
plt.show()
###Output
_____no_output_____
###Markdown
저장과 로딩넘파이는 `ndarray`를 바이너리 또는 텍스트 포맷으로 손쉽게 저장하고 로드할 수 있습니다. 바이너리 `.npy` 포맷랜덤 배열을 만들고 저장해 보죠.
###Code
a = np.random.rand(2,3)
a
np.save("my_array", a)
###Output
_____no_output_____
###Markdown
끝입니다! 파일 이름의 확장자를 지정하지 않았기 때문에 넘파이는 자동으로 `.npy`를 붙입니다. 파일 내용을 확인해 보겠습니다:
###Code
with open("my_array.npy", "rb") as f:
content = f.read()
content
###Output
_____no_output_____
###Markdown
이 파일을 넘파이 배열로 로드하려면 `load` 함수를 사용합니다:
###Code
a_loaded = np.load("my_array.npy")
a_loaded
###Output
_____no_output_____
###Markdown
텍스트 포맷배열을 텍스트 포맷으로 저장해 보죠:
###Code
np.savetxt("my_array.csv", a)
###Output
_____no_output_____
###Markdown
파일 내용을 확인해 보겠습니다:
###Code
with open("my_array.csv", "rt") as f:
print(f.read())
###Output
5.435937959464737235e-01 9.288630656918674955e-01 1.535157809943688001e-02
4.157283012656532994e-01 9.102126992826775620e-01 5.512970782648904944e-01
###Markdown
이 파일은 탭으로 구분된 CSV 파일입니다. 다른 구분자를 지정할 수도 있습니다:
###Code
np.savetxt("my_array.csv", a, delimiter=",")
###Output
_____no_output_____
###Markdown
이 파일을 로드하려면 `loadtxt` 함수를 사용합니다:
###Code
a_loaded = np.loadtxt("my_array.csv", delimiter=",")
a_loaded
###Output
_____no_output_____
###Markdown
압축된 `.npz` 포맷여러 개의 배열을 압축된 한 파일로 저장하는 것도 가능합니다:
###Code
b = np.arange(24, dtype=np.uint8).reshape(2, 3, 4)
b
np.savez("my_arrays", my_a=a, my_b=b)
###Output
_____no_output_____
###Markdown
파일 내용을 확인해 보죠. `.npz` 파일 확장자가 자동으로 추가되었습니다.
###Code
with open("my_arrays.npz", "rb") as f:
content = f.read()
repr(content)[:180] + "[...]"
###Output
_____no_output_____
###Markdown
다음과 같이 이 파일을 로드할 수 있습니다:
###Code
my_arrays = np.load("my_arrays.npz")
my_arrays
###Output
_____no_output_____
###Markdown
게으른 로딩을 수행하는 딕셔너리와 유사한 객체입니다:
###Code
my_arrays.keys()
my_arrays["my_a"]
###Output
_____no_output_____
###Markdown
"Numpy 기본"> "numpy 기본 코드 실습(한글)"- toc:true- branch: master- badges: true- comments: true- author: Jiho Yeo- categories: [jupyter, python] **도구 - 넘파이(NumPy)***넘파이(NumPy)는 파이썬의 과학 컴퓨팅을 위한 기본 라이브러리입니다. 넘파이의 핵심은 강력한 N-차원 배열 객체입니다. 또한 선형 대수, 푸리에(Fourier) 변환, 유사 난수 생성과 같은 유용한 함수들도 제공합니다." 구글 코랩에서 실행하기 배열 생성 `numpy`를 임포트해 보죠. 대부분의 사람들이 `np`로 알리아싱하여 임포트합니다:
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
`np.zeros` `zeros` 함수는 0으로 채워진 배열을 만듭니다:
###Code
np.zeros(5)
###Output
_____no_output_____
###Markdown
2D 배열(즉, 행렬)을 만들려면 원하는 행과 열의 크기를 튜플로 전달합니다. 예를 들어 다음은 $3 \times 4$ 크기의 행렬입니다:
###Code
np.zeros((3,4))
###Output
_____no_output_____
###Markdown
용어* 넘파이에서 각 차원을 **축**(axis) 이라고 합니다* 축의 개수를 **랭크**(rank) 라고 합니다. * 예를 들어, 위의 $3 \times 4$ 행렬은 랭크 2인 배열입니다(즉 2차원입니다). * 첫 번째 축의 길이는 3이고 두 번째 축의 길이는 4입니다.* 배열의 축 길이를 배열의 **크기**(shape)라고 합니다. * 예를 들어, 위 행렬의 크기는 `(3, 4)`입니다. * 랭크는 크기의 길이와 같습니다.* 배열의 **사이즈**(size)는 전체 원소의 개수입니다. 축의 길이를 모두 곱해서 구할 수 있습니다(가령, $3 \times 4=12$).
###Code
a = np.zeros((3,4))
a
a.shape
a.ndim # len(a.shape)와 같습니다
a.size
###Output
_____no_output_____
###Markdown
N-차원 배열임의의 랭크 수를 가진 N-차원 배열을 만들 수 있습니다. 예를 들어, 다음은 크기가 `(2,3,4)`인 3D 배열(랭크=3)입니다:
###Code
np.zeros((2,2,5))
###Output
_____no_output_____
###Markdown
배열 타입넘파이 배열의 타입은 `ndarray`입니다:
###Code
type(np.zeros((3,4)))
###Output
_____no_output_____
###Markdown
`np.ones``ndarray`를 만들 수 있는 넘파이 함수가 많습니다.다음은 1로 채워진 $3 \times 4$ 크기의 행렬입니다:
###Code
np.ones((3,4))
###Output
_____no_output_____
###Markdown
`np.full`주어진 값으로 지정된 크기의 배열을 초기화합니다. 다음은 `π`로 채워진 $3 \times 4$ 크기의 행렬입니다.
###Code
np.full((3,4), np.pi)
###Output
_____no_output_____
###Markdown
`np.empty`초기화되지 않은 $2 \times 3$ 크기의 배열을 만듭니다(배열의 내용은 예측이 불가능하며 메모리 상황에 따라 달라집니다):
###Code
np.empty((2,3))
###Output
_____no_output_____
###Markdown
np.array`array` 함수는 파이썬 리스트를 사용하여 `ndarray`를 초기화합니다:
###Code
np.array([[1,2,3,4], [10, 20, 30, 40]])
###Output
_____no_output_____
###Markdown
`np.arange`파이썬의 기본 `range` 함수와 비슷한 넘파이 `arange` 함수를 사용하여 `ndarray`를 만들 수 있습니다:
###Code
np.arange(1, 5)
###Output
_____no_output_____
###Markdown
부동 소수도 가능합니다:
###Code
np.arange(1.0, 5.0)
###Output
_____no_output_____
###Markdown
파이썬의 기본 `range` 함수처럼 건너 뛰는 정도를 지정할 수 있습니다:
###Code
np.arange(1, 5, 0.5)
###Output
_____no_output_____
###Markdown
부동 소수를 사용하면 원소의 개수가 일정하지 않을 수 있습니다. 예를 들면 다음과 같습니다:
###Code
print(np.arange(0, 5/3, 1/3)) # 부동 소수 오차 때문에, 최댓값은 4/3 또는 5/3이 됩니다.
print(np.arange(0, 5/3, 0.333333333))
print(np.arange(0, 5/3, 0.333333334))
###Output
[0. 0.33333333 0.66666667 1. 1.33333333 1.66666667]
[0. 0.33333333 0.66666667 1. 1.33333333 1.66666667]
[0. 0.33333333 0.66666667 1. 1.33333334]
###Markdown
`np.linspace`이런 이유로 부동 소수를 사용할 땐 `arange` 대신에 `linspace` 함수를 사용하는 것이 좋습니다. `linspace` 함수는 지정된 개수만큼 두 값 사이를 나눈 배열을 반환합니다(`arange`와는 다르게 최댓값이 **포함**됩니다):
###Code
print(np.linspace(0, 5/3, 6))
###Output
[0. 0.33333333 0.66666667 1. 1.33333333 1.66666667]
###Markdown
`np.rand`와 `np.randn`넘파이의 `random` 모듈에는 `ndarray`를 랜덤한 값으로 초기화할 수 있는 함수들이 많이 있습니다.예를 들어, 다음은 (균등 분포인) 0과 1사이의 랜덤한 부동 소수로 $3 \times 4$ 행렬을 초기화합니다:
###Code
np.random.rand(3,4)
###Output
_____no_output_____
###Markdown
다음은 평균이 0이고 분산이 1인 일변량 [정규 분포](https://ko.wikipedia.org/wiki/%EC%A0%95%EA%B7%9C_%EB%B6%84%ED%8F%AC)(가우시안 분포)에서 샘플링한 랜덤한 부동 소수를 담은 $3 \times 4$ 행렬입니다:
###Code
np.random.randn(3,4)
###Output
_____no_output_____
###Markdown
이 분포의 모양을 알려면 맷플롯립을 사용해 그려보는 것이 좋습니다(더 자세한 것은 [맷플롯립 튜토리얼](tools_matplotlib.ipynb)을 참고하세요):
###Code
%matplotlib inline
import matplotlib.pyplot as plt
plt.hist(np.random.rand(100000), density=True, bins=100, histtype="step", color="blue", label="rand")
plt.hist(np.random.randn(100000), density=True, bins=100, histtype="step", color="red", label="randn")
plt.axis([-2.5, 2.5, 0, 1.1])
plt.legend(loc = "upper left")
plt.title("Random distributions")
plt.xlabel("Value")
plt.ylabel("Density")
plt.show()
###Output
_____no_output_____
###Markdown
np.fromfunction함수를 사용하여 `ndarray`를 초기화할 수도 있습니다:
###Code
def my_function(z, y, x):
return x + 10 * y + 100 * z
np.fromfunction(my_function, (3, 2, 10))
###Output
_____no_output_____
###Markdown
넘파이는 먼저 크기가 `(3, 2, 10)`인 세 개의 `ndarray`(차원마다 하나씩)를 만듭니다. 각 배열은 축을 따라 좌표 값과 같은 값을 가집니다. 예를 들어, `z` 축에 있는 배열의 모든 원소는 z-축의 값과 같습니다: [[[ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.] [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]] [[ 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.] [ 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]] [[ 2. 2. 2. 2. 2. 2. 2. 2. 2. 2.] [ 2. 2. 2. 2. 2. 2. 2. 2. 2. 2.]]]위의 식 `x + 10 * y + 100 * z`에서 `x`, `y`, `z`는 사실 `ndarray`입니다(배열의 산술 연산에 대해서는 아래에서 설명합니다). 중요한 점은 함수 `my_function`이 원소마다 호출되는 것이 아니고 딱 **한 번** 호출된다는 점입니다. 그래서 매우 효율적으로 초기화할 수 있습니다. 배열 데이터 `dtype`넘파이의 `ndarray`는 모든 원소가 동일한 타입(보통 숫자)을 가지기 때문에 효율적입니다. `dtype` 속성으로 쉽게 데이터 타입을 확인할 수 있습니다:
###Code
c = np.arange(1, 5)
print(c.dtype, c)
c = np.arange(1.0, 5.0)
print(c.dtype, c)
###Output
float64 [1. 2. 3. 4.]
###Markdown
넘파이가 데이터 타입을 결정하도록 내버려 두는 대신 `dtype` 매개변수를 사용해서 배열을 만들 때 명시적으로 지정할 수 있습니다:
###Code
d = np.arange(1, 5, dtype=np.complex64)
print(d.dtype, d)
###Output
complex64 [1.+0.j 2.+0.j 3.+0.j 4.+0.j]
###Markdown
가능한 데이터 타입은 `int8`, `int16`, `int32`, `int64`, `uint8`|`16`|`32`|`64`, `float16`|`32`|`64`, `complex64`|`128`가 있습니다. 전체 리스트는 [온라인 문서](http://docs.scipy.org/doc/numpy/user/basics.types.html)를 참고하세요. `itemsize``itemsize` 속성은 각 아이템의 크기(바이트)를 반환합니다:
###Code
e = np.arange(1, 5, dtype=np.complex64)
e.itemsize
###Output
_____no_output_____
###Markdown
`data` 버퍼배열의 데이터는 1차원 바이트 버퍼로 메모리에 저장됩니다. `data` 속성을 사용해 참조할 수 있습니다(사용할 일은 거의 없겠지만요).
###Code
f = np.array([[1,2],[1000, 2000]], dtype=np.int32)
f.data
###Output
_____no_output_____
###Markdown
파이썬 2에서는 `f.data`가 버퍼이고 파이썬 3에서는 memoryview입니다.
###Code
if (hasattr(f.data, "tobytes")):
data_bytes = f.data.tobytes() # python 3
else:
data_bytes = memoryview(f.data).tobytes() # python 2
data_bytes
###Output
_____no_output_____
###Markdown
여러 개의 `ndarray`가 데이터 버퍼를 공유할 수 있습니다. 하나를 수정하면 다른 것도 바뀝니다. 잠시 후에 예를 살펴 보겠습니다. 배열 크기 변경 자신을 변경`ndarray`의 `shape` 속성을 지정하면 간단히 크기를 바꿀 수 있습니다. 배열의 원소 개수는 동일하게 유지됩니다.
###Code
g = np.arange(24)
print(g)
print("랭크:", g.ndim)
g.shape = (6, 4)
print(g)
print("랭크:", g.ndim)
g.shape = (2, 3, 4)
print(g)
print("랭크:", g.ndim)
###Output
[[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
[[12 13 14 15]
[16 17 18 19]
[20 21 22 23]]]
랭크: 3
###Markdown
`reshape``reshape` 함수는 동일한 데이터를 가리키는 새로운 `ndarray` 객체를 반환합니다. 한 배열을 수정하면 다른 것도 함께 바뀝니다.
###Code
g2 = g.reshape(4,6)
print(g2)
print("랭크:", g2.ndim)
###Output
[[ 0 1 2 3 4 5]
[ 6 7 8 9 10 11]
[12 13 14 15 16 17]
[18 19 20 21 22 23]]
랭크: 2
###Markdown
행 1, 열 2의 원소를 999로 설정합니다(인덱싱 방식은 아래를 참고하세요).
###Code
g2[1, 2] = 999
g2
###Output
_____no_output_____
###Markdown
이에 상응하는 `g`의 원소도 수정됩니다.
###Code
g
###Output
_____no_output_____
###Markdown
`ravel`마지막으로 `ravel` 함수는 동일한 데이터를 가리키는 새로운 1차원 `ndarray`를 반환합니다:
###Code
g.ravel()
###Output
_____no_output_____
###Markdown
산술 연산일반적인 산술 연산자(`+`, `-`, `*`, `/`, `//`, `**` 등)는 모두 `ndarray`와 사용할 수 있습니다. 이 연산자는 원소별로 적용됩니다:
###Code
a = np.array([14, 23, 32, 41])
b = np.array([5, 4, 3, 2])
print("a + b =", a + b)
print("a - b =", a - b)
print("a * b =", a * b)
print("a / b =", a / b)
print("a // b =", a // b)
print("a % b =", a % b)
print("a ** b =", a ** b)
###Output
a + b = [19 27 35 43]
a - b = [ 9 19 29 39]
a * b = [70 92 96 82]
a / b = [ 2.8 5.75 10.66666667 20.5 ]
a // b = [ 2 5 10 20]
a % b = [4 3 2 1]
a ** b = [537824 279841 32768 1681]
###Markdown
여기 곱셈은 행렬 곱셈이 아닙니다. 행렬 연산은 아래에서 설명합니다.배열의 크기는 같아야 합니다. 그렇지 않으면 넘파이가 브로드캐스팅 규칙을 적용합니다. 브로드캐스팅 일반적으로 넘파이는 동일한 크기의 배열을 기대합니다. 그렇지 않은 상황에는 브로드캐시틍 규칙을 적용합니다: 규칙 1배열의 랭크가 동일하지 않으면 랭크가 맞을 때까지 랭크가 작은 배열 앞에 1을 추가합니다.
###Code
h = np.arange(5).reshape(1, 1, 5)
h
###Output
_____no_output_____
###Markdown
여기에 `(1,1,5)` 크기의 3D 배열에 `(5,)` 크기의 1D 배열을 더해 보죠. 브로드캐스팅의 규칙 1이 적용됩니다!
###Code
h + [10, 20, 30, 40, 50] # 다음과 동일합니다: h + [[[10, 20, 30, 40, 50]]]
###Output
_____no_output_____
###Markdown
규칙 2특정 차원이 1인 배열은 그 차원에서 크기가 가장 큰 배열의 크기에 맞춰 동작합니다. 배열의 원소가 차원을 따라 반복됩니다.
###Code
k = np.arange(6).reshape(2, 3)
k
###Output
_____no_output_____
###Markdown
`(2,3)` 크기의 2D `ndarray`에 `(2,1)` 크기의 2D 배열을 더해 보죠. 넘파이는 브로드캐스팅 규칙 2를 적용합니다:
###Code
k + [[100], [200]] # 다음과 같습니다: k + [[100, 100, 100], [200, 200, 200]]
###Output
_____no_output_____
###Markdown
규칙 1과 2를 합치면 다음과 같이 동작합니다:
###Code
k + [100, 200, 300] # 규칙 1 적용: [[100, 200, 300]], 규칙 2 적용: [[100, 200, 300], [100, 200, 300]]
###Output
_____no_output_____
###Markdown
또 매우 간단히 다음 처럼 해도 됩니다:
###Code
k + 1000 # 다음과 같습니다: k + [[1000, 1000, 1000], [1000, 1000, 1000]]
###Output
_____no_output_____
###Markdown
규칙 3규칙 1 & 2을 적용했을 때 모든 배열의 크기가 맞아야 합니다.
###Code
try:
k + [33, 44]
except ValueError as e:
print(e)
###Output
operands could not be broadcast together with shapes (2,3) (2,)
###Markdown
브로드캐스팅 규칙은 산술 연산 뿐만 아니라 넘파이 연산에서 많이 사용됩니다. 아래에서 더 보도록 하죠. 브로드캐스팅에 관한 더 자세한 정보는 [온라인 문서](https://docs.scipy.org/doc/numpy-dev/user/basics.broadcasting.html)를 참고하세요. 업캐스팅`dtype`이 다른 배열을 합칠 때 넘파이는 (실제 값에 상관없이) 모든 값을 다룰 수 있는 타입으로 업캐스팅합니다.
###Code
k1 = np.arange(0, 5, dtype=np.uint8)
print(k1.dtype, k1)
k2 = k1 + np.array([5, 6, 7, 8, 9], dtype=np.int8)
print(k2.dtype, k2)
###Output
int16 [ 5 7 9 11 13]
###Markdown
모든 `int8`과 `uint8` 값(-128에서 255까지)을 표현하기 위해 `int16`이 필요합니다. 이 코드에서는 `uint8`이면 충분하지만 업캐스팅되었습니다.
###Code
k3 = k1 + 1.5
print(k3.dtype, k3)
###Output
float64 [1.5 2.5 3.5 4.5 5.5]
###Markdown
조건 연산자 조건 연산자도 원소별로 적용됩니다:
###Code
m = np.array([20, -5, 30, 40])
m < [15, 16, 35, 36]
###Output
_____no_output_____
###Markdown
브로드캐스팅을 사용합니다:
###Code
m < 25 # m < [25, 25, 25, 25] 와 동일
###Output
_____no_output_____
###Markdown
불리언 인덱싱과 함께 사용하면 아주 유용합니다(아래에서 설명하겠습니다).
###Code
m[m < 25]
###Output
_____no_output_____
###Markdown
수학 함수와 통계 함수 `ndarray`에서 사용할 수 있는 수학 함수와 통계 함수가 많습니다. `ndarray` 메서드일부 함수는 `ndarray` 메서드로 제공됩니다. 예를 들면:
###Code
a = np.array([[-2.5, 3.1, 7], [10, 11, 12]])
print(a)
print("평균 =", a.mean())
###Output
[[-2.5 3.1 7. ]
[10. 11. 12. ]]
평균 = 6.766666666666667
###Markdown
이 명령은 크기에 상관없이 `ndarray`에 있는 모든 원소의 평균을 계산합니다.다음은 유용한 `ndarray` 메서드입니다:
###Code
for func in (a.min, a.max, a.sum, a.prod, a.std, a.var):
print(func.__name__, "=", func())
###Output
min = -2.5
max = 12.0
sum = 40.6
prod = -71610.0
std = 5.084835843520964
var = 25.855555555555554
###Markdown
이 함수들은 선택적으로 매개변수 `axis`를 사용합니다. 지정된 축을 따라 원소에 연산을 적용하는데 사용합니다. 예를 들면:
###Code
c=np.arange(24).reshape(2,3,4)
c
c.sum(axis=0) # 첫 번째 축을 따라 더함, 결과는 3x4 배열
c.sum(axis=1) # 두 번째 축을 따라 더함, 결과는 2x4 배열
###Output
_____no_output_____
###Markdown
여러 축에 대해서 더할 수도 있습니다:
###Code
c.sum(axis=(0,2)) # 첫 번째 축과 세 번째 축을 따라 더함, 결과는 (3,) 배열
0+1+2+3 + 12+13+14+15, 4+5+6+7 + 16+17+18+19, 8+9+10+11 + 20+21+22+23
###Output
_____no_output_____
###Markdown
일반 함수넘파이는 일반 함수(universal function) 또는 **ufunc**라고 부르는 원소별 함수를 제공합니다. 예를 들면 `square` 함수는 원본 `ndarray`를 복사하여 각 원소를 제곱한 새로운 `ndarray` 객체를 반환합니다:
###Code
a = np.array([[-2.5, 3.1, 7], [10, 11, 12]])
np.square(a)
###Output
_____no_output_____
###Markdown
다음은 유용한 단항 일반 함수들입니다:
###Code
print("원본 ndarray")
print(a)
for func in (np.abs, np.sqrt, np.exp, np.log, np.sign, np.ceil, np.modf, np.isnan, np.cos):
print("\n", func.__name__)
print(func(a))
###Output
원본 ndarray
[[-2.5 3.1 7. ]
[10. 11. 12. ]]
absolute
[[ 2.5 3.1 7. ]
[10. 11. 12. ]]
sqrt
[[ nan 1.76068169 2.64575131]
[3.16227766 3.31662479 3.46410162]]
exp
[[8.20849986e-02 2.21979513e+01 1.09663316e+03]
[2.20264658e+04 5.98741417e+04 1.62754791e+05]]
log
[[ nan 1.13140211 1.94591015]
[2.30258509 2.39789527 2.48490665]]
sign
[[-1. 1. 1.]
[ 1. 1. 1.]]
ceil
[[-2. 4. 7.]
[10. 11. 12.]]
modf
(array([[-0.5, 0.1, 0. ],
[ 0. , 0. , 0. ]]), array([[-2., 3., 7.],
[10., 11., 12.]]))
isnan
[[False False False]
[False False False]]
cos
[[-0.80114362 -0.99913515 0.75390225]
[-0.83907153 0.0044257 0.84385396]]
###Markdown
이항 일반 함수두 개의 `ndarray`에 원소별로 적용되는 이항 함수도 많습니다. 두 배열이 동일한 크기가 아니면 브로드캐스팅 규칙이 적용됩니다:
###Code
a = np.array([1, -2, 3, 4])
b = np.array([2, 8, -1, 7])
np.add(a, b) # a + b 와 동일
np.greater(a, b) # a > b 와 동일
np.maximum(a, b)
np.copysign(a, b)
###Output
_____no_output_____
###Markdown
배열 인덱싱 1차원 배열1차원 넘파이 배열은 보통의 파이썬 배열과 비슷하게 사용할 수 있습니다:
###Code
a = np.array([1, 5, 3, 19, 13, 7, 3])
a[3]
a[2:5]
a[2:-1]
a[:2]
a[2::2]
a[::-1]
###Output
_____no_output_____
###Markdown
물론 원소를 수정할 수 있죠:
###Code
a[3]=999
a
###Output
_____no_output_____
###Markdown
슬라이싱을 사용해 `ndarray`를 수정할 수 있습니다:
###Code
a[2:5] = [997, 998, 999]
a
###Output
_____no_output_____
###Markdown
보통의 파이썬 배열과 차이점보통의 파이썬 배열과 대조적으로 `ndarray` 슬라이싱에 하나의 값을 할당하면 슬라이싱 전체에 복사됩니다. 위에서 언급한 브로드캐스팅 덕택입니다.
###Code
a[2:5] = -1
a
###Output
_____no_output_____
###Markdown
또한 이런 식으로 `ndarray` 크기를 늘리거나 줄일 수 없습니다:
###Code
try:
a[2:5] = [1,2,3,4,5,6] # 너무 길어요
except ValueError as e:
print(e)
###Output
cannot copy sequence with size 6 to array axis with dimension 3
###Markdown
원소를 삭제할 수도 없습니다:
###Code
try:
del a[2:5]
except ValueError as e:
print(e)
###Output
cannot delete array elements
###Markdown
중요한 점은 `ndarray`의 슬라이싱은 같은 데이터 버퍼를 바라보는 뷰(view)입니다. 슬라이싱된 객체를 수정하면 실제 원본 `ndarray`가 수정됩니다!
###Code
a_slice = a[2:6]
a_slice[1] = 1000
a # 원본 배열이 수정됩니다!
a[3] = 2000
a_slice # 비슷하게 원본 배열을 수정하면 슬라이싱 객체에도 반영됩니다!
###Output
_____no_output_____
###Markdown
데이터를 복사하려면 `copy` 메서드를 사용해야 합니다:
###Code
another_slice = a[2:6].copy()
another_slice[1] = 3000
a # 원본 배열이 수정되지 않습니다
a[3] = 4000
another_slice # 마찬가지로 원본 배열을 수정해도 복사된 배열은 바뀌지 않습니다
###Output
_____no_output_____
###Markdown
다차원 배열다차원 배열은 비슷한 방식으로 각 축을 따라 인덱싱 또는 슬라이싱해서 사용합니다. 콤마로 구분합니다:
###Code
b = np.arange(48).reshape(4, 12)
b
b[1, 2] # 행 1, 열 2
b[1, :] # 행 1, 모든 열
b[:, 1] # 모든 행, 열 1
###Output
_____no_output_____
###Markdown
**주의**: 다음 두 표현에는 미묘한 차이가 있습니다:
###Code
b[1, :]
b[1:2, :]
###Output
_____no_output_____
###Markdown
첫 번째 표현식은 `(12,)` 크기인 1D 배열로 행이 하나입니다. 두 번째는 `(1, 12)` 크기인 2D 배열로 같은 행을 반환합니다. 팬시 인덱싱(Fancy indexing)관심 대상의 인덱스 리스트를 지정할 수도 있습니다. 이를 팬시 인덱싱이라고 부릅니다.
###Code
b[(0,2), 2:5] # 행 0과 2, 열 2에서 4(5-1)까지
b[:, (-1, 2, -1)] # 모든 행, 열 -1 (마지막), 2와 -1 (다시 반대 방향으로)
###Output
_____no_output_____
###Markdown
여러 개의 인덱스 리스트를 지정하면 인덱스에 맞는 값이 포함된 1D `ndarray`를 반환됩니다.
###Code
b[(-1, 2, -1, 2), (5, 9, 1, 9)] # returns a 1D array with b[-1, 5], b[2, 9], b[-1, 1] and b[2, 9] (again)
###Output
_____no_output_____
###Markdown
고차원고차원에서도 동일한 방식이 적용됩니다. 몇 가지 예를 살펴 보겠습니다:
###Code
c = b.reshape(4,2,6)
c
c[2, 1, 4] # 행렬 2, 행 1, 열 4
c[2, :, 3] # 행렬 2, 모든 행, 열 3
###Output
_____no_output_____
###Markdown
어떤 축에 대한 인덱스를 지정하지 않으면 이 축의 모든 원소가 반환됩니다:
###Code
c[2, 1] # 행렬 2, 행 1, 모든 열이 반환됩니다. c[2, 1, :]와 동일합니다.
###Output
_____no_output_____
###Markdown
생략 부호 (`...`)생략 부호(`...`)를 쓰면 모든 지정하지 않은 축의 원소를 포함합니다.
###Code
c[2, ...] # 행렬 2, 모든 행, 모든 열. c[2, :, :]와 동일
c[2, 1, ...] # 행렬 2, 행 1, 모든 열. c[2, 1, :]와 동일
c[2, ..., 3] # 행렬 2, 모든 행, 열 3. c[2, :, 3]와 동일
c[..., 3] # 모든 행렬, 모든 행, 열 3. c[:, :, 3]와 동일
###Output
_____no_output_____
###Markdown
불리언 인덱싱불리언 값을 가진 `ndarray`를 사용해 축의 인덱스를 지정할 수 있습니다.
###Code
b = np.arange(48).reshape(4, 12)
b
rows_on = np.array([True, False, True, False])
b[rows_on, :] # 행 0과 2, 모든 열. b[(0, 2), :]와 동일
cols_on = np.array([False, True, False] * 4)
b[:, cols_on] # 모든 행, 열 1, 4, 7, 10
###Output
_____no_output_____
###Markdown
`np.ix_`여러 축에 걸쳐서는 불리언 인덱싱을 사용할 수 없고 `ix_` 함수를 사용합니다:
###Code
b[np.ix_(rows_on, cols_on)]
np.ix_(rows_on, cols_on)
###Output
_____no_output_____
###Markdown
`ndarray`와 같은 크기의 불리언 배열을 사용하면 해당 위치가 `True`인 모든 원소를 담은 1D 배열이 반환됩니다. 일반적으로 조건 연산자와 함께 사용합니다:
###Code
b[b % 3 == 1]
###Output
_____no_output_____
###Markdown
반복`ndarray`를 반복하는 것은 일반적인 파이썬 배열을 반복한는 것과 매우 유사합니다. 다차원 배열을 반복하면 첫 번째 축에 대해서 수행됩니다.
###Code
c = np.arange(24).reshape(2, 3, 4) # 3D 배열 (두 개의 3x4 행렬로 구성됨)
c
for m in c:
print("아이템:")
print(m)
for i in range(len(c)): # len(c) == c.shape[0]
print("아이템:")
print(c[i])
###Output
아이템:
[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
아이템:
[[12 13 14 15]
[16 17 18 19]
[20 21 22 23]]
###Markdown
`ndarray`에 있는 모든 원소를 반복하려면 `flat` 속성을 사용합니다:
###Code
for i in c.flat:
print("아이템:", i)
###Output
아이템: 0
아이템: 1
아이템: 2
아이템: 3
아이템: 4
아이템: 5
아이템: 6
아이템: 7
아이템: 8
아이템: 9
아이템: 10
아이템: 11
아이템: 12
아이템: 13
아이템: 14
아이템: 15
아이템: 16
아이템: 17
아이템: 18
아이템: 19
아이템: 20
아이템: 21
아이템: 22
아이템: 23
###Markdown
배열 쌓기종종 다른 배열을 쌓아야 할 때가 있습니다. 넘파이는 이를 위해 몇 개의 함수를 제공합니다. 먼저 배열 몇 개를 만들어 보죠.
###Code
q1 = np.full((3,4), 1.0)
q1
q2 = np.full((4,4), 2.0)
q2
q3 = np.full((3,4), 3.0)
q3
###Output
_____no_output_____
###Markdown
`vstack``vstack` 함수를 사용하여 수직으로 쌓아보죠:
###Code
q4 = np.vstack((q1, q2, q3))
q4
q4.shape
###Output
_____no_output_____
###Markdown
q1, q2, q3가 모두 같은 크기이므로 가능합니다(수직으로 쌓기 때문에 수직 축은 크기가 달라도 됩니다). `hstack``hstack`을 사용해 수평으로도 쌓을 수 있습니다:
###Code
q5 = np.hstack((q1, q3))
q5
q5.shape
###Output
_____no_output_____
###Markdown
q1과 q3가 모두 3개의 행을 가지고 있기 때문에 가능합니다. q2는 4개의 행을 가지고 있기 때문에 q1, q3와 수평으로 쌓을 수 없습니다:
###Code
try:
q5 = np.hstack((q1, q2, q3))
except ValueError as e:
print(e)
###Output
all the input array dimensions for the concatenation axis must match exactly, but along dimension 0, the array at index 0 has size 3 and the array at index 1 has size 4
###Markdown
`concatenate``concatenate` 함수는 지정한 축으로도 배열을 쌓습니다.
###Code
q7 = np.concatenate((q1, q2, q3), axis=0) # vstack과 동일
q7
q7.shape
###Output
_____no_output_____
###Markdown
예상했겠지만 `hstack`은 `axis=1`으로 `concatenate`를 호출하는 것과 같습니다. `stack``stack` 함수는 새로운 축을 따라 배열을 쌓습니다. 모든 배열은 같은 크기를 가져야 합니다.
###Code
q8 = np.stack((q1, q3))
q8
q8.shape
###Output
_____no_output_____
###Markdown
배열 분할분할은 쌓기의 반대입니다. 예를 들어 `vsplit` 함수는 행렬을 수직으로 분할합니다.먼저 6x4 행렬을 만들어 보죠:
###Code
r = np.arange(24).reshape(6,4)
r
###Output
_____no_output_____
###Markdown
수직으로 동일한 크기로 나누어 보겠습니다:
###Code
r1, r2, r3 = np.vsplit(r, 3)
r1
r2
r3
###Output
_____no_output_____
###Markdown
`split` 함수는 주어진 축을 따라 배열을 분할합니다. `vsplit`는 `axis=0`으로 `split`를 호출하는 것과 같습니다. `hsplit` 함수는 `axis=1`로 `split`를 호출하는 것과 같습니다:
###Code
r4, r5 = np.hsplit(r, 2)
r4
r5
###Output
_____no_output_____
###Markdown
배열 전치`transpose` 메서드는 주어진 순서대로 축을 뒤바꾸어 `ndarray` 데이터에 대한 새로운 뷰를 만듭니다.예를 위해 3D 배열을 만들어 보죠:
###Code
t = np.arange(24).reshape(4,2,3)
t
###Output
_____no_output_____
###Markdown
`0, 1, 2`(깊이, 높이, 너비) 축을 `1, 2, 0` (깊이→너비, 높이→깊이, 너비→높이) 순서로 바꾼 `ndarray`를 만들어 보겠습니다:
###Code
t1 = t.transpose((1,2,0))
t1
t1.shape
###Output
_____no_output_____
###Markdown
`transpose` 기본값은 차원의 순서를 역전시킵니다:
###Code
t2 = t.transpose() # t.transpose((2, 1, 0))와 동일
t2
t2.shape
###Output
_____no_output_____
###Markdown
넘파이는 두 축을 바꾸는 `swapaxes` 함수를 제공합니다. 예를 들어 깊이와 높이를 뒤바꾸어 `t`의 새로운 뷰를 만들어 보죠:
###Code
t3 = t.swapaxes(0,1) # t.transpose((1, 0, 2))와 동일
t3
t3.shape
###Output
_____no_output_____
###Markdown
선형 대수학넘파이 2D 배열을 사용하면 파이썬에서 행렬을 효율적으로 표현할 수 있습니다. 주요 행렬 연산을 간단히 둘러 보겠습니다. 선형 대수학, 벡터와 행렬에 관한 자세한 내용은 [Linear Algebra tutorial](math_linear_algebra.ipynb)를 참고하세요. 행렬 전치`T` 속성은 랭크가 2보다 크거나 같을 때 `transpose()`를 호출하는 것과 같습니다:
###Code
m1 = np.arange(10).reshape(2,5)
m1
m1.T
###Output
_____no_output_____
###Markdown
`T` 속성은 랭크가 0이거나 1인 배열에는 아무런 영향을 미치지 않습니다:
###Code
m2 = np.arange(5)
m2
m2.T
###Output
_____no_output_____
###Markdown
먼저 1D 배열을 하나의 행이 있는 행렬(2D)로 바꾼다음 전치를 수행할 수 있습니다:
###Code
m2r = m2.reshape(1,5)
m2r
m2r.T
###Output
_____no_output_____
###Markdown
행렬 곱셈두 개의 행렬을 만들어 `dot` 메서드로 행렬 [곱셈](https://ko.wikipedia.org/wiki/%ED%96%89%EB%A0%AC_%EA%B3%B1%EC%85%88)을 실행해 보죠.
###Code
n1 = np.arange(10).reshape(2, 5)
n1
n2 = np.arange(15).reshape(5,3)
n2
n1.dot(n2)
###Output
_____no_output_____
###Markdown
**주의**: 앞서 언급한 것처럼 `n1*n2`는 행렬 곱셈이 아니라 원소별 곱셈(또는 [아다마르 곱](https://ko.wikipedia.org/wiki/%EC%95%84%EB%8B%A4%EB%A7%88%EB%A5%B4_%EA%B3%B1)이라 부릅니다)입니다. 역행렬과 유사 역행렬`numpy.linalg` 모듈 안에 많은 선형 대수 함수들이 있습니다. 특히 `inv` 함수는 정방 행렬의 역행렬을 계산합니다:
###Code
import numpy.linalg as linalg
m3 = np.array([[1,2,3],[5,7,11],[21,29,31]])
m3
linalg.inv(m3)
###Output
_____no_output_____
###Markdown
`pinv` 함수를 사용하여 [유사 역행렬](https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_pseudoinverse)을 계산할 수도 있습니다:
###Code
linalg.pinv(m3)
###Output
_____no_output_____
###Markdown
단위 행렬행렬과 그 행렬의 역행렬을 곱하면 단위 행렬이 됩니다(작은 소숫점 오차가 있습니다):
###Code
m3.dot(linalg.inv(m3))
###Output
_____no_output_____
###Markdown
`eye` 함수는 NxN 크기의 단위 행렬을 만듭니다:
###Code
np.eye(3)
###Output
_____no_output_____
###Markdown
QR 분해`qr` 함수는 행렬을 [QR 분해](https://en.wikipedia.org/wiki/QR_decomposition)합니다:
###Code
q, r = linalg.qr(m3)
q
r
q.dot(r) # q.r는 m3와 같습니다
###Output
_____no_output_____
###Markdown
행렬식`det` 함수는 [행렬식](https://en.wikipedia.org/wiki/Determinant)을 계산합니다:
###Code
linalg.det(m3) # 행렬식 계산
###Output
_____no_output_____
###Markdown
고윳값과 고유벡터`eig` 함수는 정방 행렬의 [고윳값과 고유벡터](https://en.wikipedia.org/wiki/Eigenvalues_and_eigenvectors)를 계산합니다:
###Code
eigenvalues, eigenvectors = linalg.eig(m3)
eigenvalues # λ
eigenvectors # v
m3.dot(eigenvectors) - eigenvalues * eigenvectors # m3.v - λ*v = 0
###Output
_____no_output_____
###Markdown
특잇값 분해`svd` 함수는 행렬을 입력으로 받아 그 행렬의 [특잇값 분해](https://en.wikipedia.org/wiki/Singular_value_decomposition)를 반환합니다:
###Code
m4 = np.array([[1,0,0,0,2], [0,0,3,0,0], [0,0,0,0,0], [0,2,0,0,0]])
m4
U, S_diag, V = linalg.svd(m4)
U
S_diag
###Output
_____no_output_____
###Markdown
`svd` 함수는 Σ의 대각 원소 값만 반환합니다. 전체 Σ 행렬은 다음과 같이 만듭니다:
###Code
S = np.zeros((4, 5))
S[np.diag_indices(4)] = S_diag
S # Σ
V
U.dot(S).dot(V) # U.Σ.V == m4
###Output
_____no_output_____
###Markdown
대각원소와 대각합
###Code
np.diag(m3) # m3의 대각 원소입니다(왼쪽 위에서 오른쪽 아래)
np.trace(m3) # np.diag(m3).sum()와 같습니다
###Output
_____no_output_____
###Markdown
선형 방정식 풀기 `solve` 함수는 다음과 같은 선형 방정식을 풉니다:* $2x + 6y = 6$* $5x + 3y = -9$
###Code
coeffs = np.array([[2, 6], [5, 3]])
depvars = np.array([6, -9])
solution = linalg.solve(coeffs, depvars)
solution
###Output
_____no_output_____
###Markdown
solution을 확인해 보죠:
###Code
coeffs.dot(solution), depvars # 네 같네요
###Output
_____no_output_____
###Markdown
좋습니다! 다른 방식으로도 solution을 확인해 보죠:
###Code
np.allclose(coeffs.dot(solution), depvars)
###Output
_____no_output_____
###Markdown
벡터화한 번에 하나씩 개별 배열 원소에 대해 연산을 실행하는 대신 배열 연산을 사용하면 훨씬 효율적인 코드를 만들 수 있습니다. 이를 벡터화라고 합니다. 이를 사용하여 넘파이의 최적화된 성능을 활용할 수 있습니다.예를 들어, $sin(xy/40.5)$ 식을 기반으로 768x1024 크기 배열을 생성하려고 합니다. 중첩 반복문 안에 파이썬의 math 함수를 사용하는 것은 **나쁜** 방법입니다:
###Code
import math
data = np.empty((768, 1024))
for y in range(768):
for x in range(1024):
data[y, x] = math.sin(x*y/40.5) # 매우 비효율적입니다!
###Output
_____no_output_____
###Markdown
작동은 하지만 순수한 파이썬 코드로 반복문이 진행되기 때문에 아주 비효율적입니다. 이 알고리즘을 벡터화해 보죠. 먼저 넘파이 `meshgrid` 함수로 좌표 벡터를 사용해 행렬을 만듭니다.
###Code
x_coords = np.arange(0, 1024) # [0, 1, 2, ..., 1023]
y_coords = np.arange(0, 768) # [0, 1, 2, ..., 767]
X, Y = np.meshgrid(x_coords, y_coords)
X
Y
###Output
_____no_output_____
###Markdown
여기서 볼 수 있듯이 `X`와 `Y` 모두 768x1024 배열입니다. `X`에 있는 모든 값은 수평 좌표에 해당합니다. `Y`에 있는 모든 값은 수직 좌표에 해당합니다.이제 간단히 배열 연산을 사용해 계산할 수 있습니다:
###Code
data = np.sin(X*Y/40.5)
###Output
_____no_output_____
###Markdown
맷플롯립의 `imshow` 함수를 사용해 이 데이터를 그려보죠([matplotlib tutorial](tools_matplotlib.ipynb)을 참조하세요).
###Code
import matplotlib.pyplot as plt
import matplotlib.cm as cm
fig = plt.figure(1, figsize=(7, 6))
plt.imshow(data, cmap=cm.hot)
plt.show()
###Output
_____no_output_____
###Markdown
저장과 로딩넘파이는 `ndarray`를 바이너리 또는 텍스트 포맷으로 손쉽게 저장하고 로드할 수 있습니다. 바이너리 `.npy` 포맷랜덤 배열을 만들고 저장해 보죠.
###Code
a = np.random.rand(2,3)
a
np.save("my_array", a)
###Output
_____no_output_____
###Markdown
끝입니다! 파일 이름의 확장자를 지정하지 않았기 때문에 넘파이는 자동으로 `.npy`를 붙입니다. 파일 내용을 확인해 보겠습니다:
###Code
with open("my_array.npy", "rb") as f:
content = f.read()
content
###Output
_____no_output_____
###Markdown
이 파일을 넘파이 배열로 로드하려면 `load` 함수를 사용합니다:
###Code
a_loaded = np.load("my_array.npy")
a_loaded
###Output
_____no_output_____
###Markdown
텍스트 포맷배열을 텍스트 포맷으로 저장해 보죠:
###Code
np.savetxt("my_array.csv", a)
###Output
_____no_output_____
###Markdown
파일 내용을 확인해 보겠습니다:
###Code
with open("my_array.csv", "rt") as f:
print(f.read())
###Output
5.435937959464737235e-01 9.288630656918674955e-01 1.535157809943688001e-02
4.157283012656532994e-01 9.102126992826775620e-01 5.512970782648904944e-01
###Markdown
이 파일은 탭으로 구분된 CSV 파일입니다. 다른 구분자를 지정할 수도 있습니다:
###Code
np.savetxt("my_array.csv", a, delimiter=",")
###Output
_____no_output_____
###Markdown
이 파일을 로드하려면 `loadtxt` 함수를 사용합니다:
###Code
a_loaded = np.loadtxt("my_array.csv", delimiter=",")
a_loaded
###Output
_____no_output_____
###Markdown
압축된 `.npz` 포맷여러 개의 배열을 압축된 한 파일로 저장하는 것도 가능합니다:
###Code
b = np.arange(24, dtype=np.uint8).reshape(2, 3, 4)
b
np.savez("my_arrays", my_a=a, my_b=b)
###Output
_____no_output_____
###Markdown
파일 내용을 확인해 보죠. `.npz` 파일 확장자가 자동으로 추가되었습니다.
###Code
with open("my_arrays.npz", "rb") as f:
content = f.read()
repr(content)[:180] + "[...]"
###Output
_____no_output_____
###Markdown
다음과 같이 이 파일을 로드할 수 있습니다:
###Code
my_arrays = np.load("my_arrays.npz")
my_arrays
###Output
_____no_output_____
###Markdown
게으른 로딩을 수행하는 딕셔너리와 유사한 객체입니다:
###Code
my_arrays.keys()
my_arrays["my_a"]
###Output
_____no_output_____
###Markdown
"Numpy 기본"> "numpy 기본 코드 실습(한글)"- toc:true- branch: master- badges: true- comments: true- author: HyunsooKim- categories: [jupyter, python] **도구 - 넘파이(NumPy)***넘파이(NumPy)는 파이썬의 과학 컴퓨팅을 위한 기본 라이브러리입니다. 넘파이의 핵심은 강력한 N-차원 배열 객체입니다. 또한 선형 대수, 푸리에(Fourier) 변환, 유사 난수 생성과 같은 유용한 함수들도 제공합니다." 구글 코랩에서 실행하기 배열 생성 `numpy`를 임포트해 보죠. 대부분의 사람들이 `np`로 알리아싱하여 임포트합니다:
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
`np.zeros` `zeros` 함수는 0으로 채워진 배열을 만듭니다:
###Code
np.zeros(5)
###Output
_____no_output_____
###Markdown
2D 배열(즉, 행렬)을 만들려면 원하는 행과 열의 크기를 튜플로 전달합니다. 예를 들어 다음은 $3 \times 4$ 크기의 행렬입니다:
###Code
np.zeros((3,4))
###Output
_____no_output_____
###Markdown
용어* 넘파이에서 각 차원을 **축**(axis) 이라고 합니다* 축의 개수를 **랭크**(rank) 라고 합니다. * 예를 들어, 위의 $3 \times 4$ 행렬은 랭크 2인 배열입니다(즉 2차원입니다). * 첫 번째 축의 길이는 3이고 두 번째 축의 길이는 4입니다.* 배열의 축 길이를 배열의 **크기**(shape)라고 합니다. * 예를 들어, 위 행렬의 크기는 `(3, 4)`입니다. * 랭크는 크기의 길이와 같습니다.* 배열의 **사이즈**(size)는 전체 원소의 개수입니다. 축의 길이를 모두 곱해서 구할 수 있습니다(가령, $3 \times 4=12$).
###Code
a = np.zeros((3,4))
a
a.shape
a.ndim # len(a.shape)와 같습니다
a.size
###Output
_____no_output_____
###Markdown
N-차원 배열임의의 랭크 수를 가진 N-차원 배열을 만들 수 있습니다. 예를 들어, 다음은 크기가 `(2,3,4)`인 3D 배열(랭크=3)입니다:
###Code
np.zeros((2,2,5))
###Output
_____no_output_____
###Markdown
배열 타입넘파이 배열의 타입은 `ndarray`입니다:
###Code
type(np.zeros((3,4)))
###Output
_____no_output_____
###Markdown
`np.ones``ndarray`를 만들 수 있는 넘파이 함수가 많습니다.다음은 1로 채워진 $3 \times 4$ 크기의 행렬입니다:
###Code
np.ones((3,4))
###Output
_____no_output_____
###Markdown
`np.full`주어진 값으로 지정된 크기의 배열을 초기화합니다. 다음은 `π`로 채워진 $3 \times 4$ 크기의 행렬입니다.
###Code
np.full((3,4), np.pi)
###Output
_____no_output_____
###Markdown
`np.empty`초기화되지 않은 $2 \times 3$ 크기의 배열을 만듭니다(배열의 내용은 예측이 불가능하며 메모리 상황에 따라 달라집니다):
###Code
np.empty((2,3))
###Output
_____no_output_____
###Markdown
np.array`array` 함수는 파이썬 리스트를 사용하여 `ndarray`를 초기화합니다:
###Code
np.array([[1,2,3,4], [10, 20, 30, 40]])
###Output
_____no_output_____
###Markdown
`np.arange`파이썬의 기본 `range` 함수와 비슷한 넘파이 `arange` 함수를 사용하여 `ndarray`를 만들 수 있습니다:
###Code
np.arange(1, 5)
###Output
_____no_output_____
###Markdown
부동 소수도 가능합니다:
###Code
np.arange(1.0, 5.0)
###Output
_____no_output_____
###Markdown
파이썬의 기본 `range` 함수처럼 건너 뛰는 정도를 지정할 수 있습니다:
###Code
np.arange(1, 5, 0.5)
###Output
_____no_output_____
###Markdown
부동 소수를 사용하면 원소의 개수가 일정하지 않을 수 있습니다. 예를 들면 다음과 같습니다:
###Code
print(np.arange(0, 5/3, 1/3)) # 부동 소수 오차 때문에, 최댓값은 4/3 또는 5/3이 됩니다.
print(np.arange(0, 5/3, 0.333333333))
print(np.arange(0, 5/3, 0.333333334))
###Output
[0. 0.33333333 0.66666667 1. 1.33333333 1.66666667]
[0. 0.33333333 0.66666667 1. 1.33333333 1.66666667]
[0. 0.33333333 0.66666667 1. 1.33333334]
###Markdown
`np.linspace`이런 이유로 부동 소수를 사용할 땐 `arange` 대신에 `linspace` 함수를 사용하는 것이 좋습니다. `linspace` 함수는 지정된 개수만큼 두 값 사이를 나눈 배열을 반환합니다(`arange`와는 다르게 최댓값이 **포함**됩니다):
###Code
print(np.linspace(0, 5/3, 6))
###Output
[0. 0.33333333 0.66666667 1. 1.33333333 1.66666667]
###Markdown
`np.rand`와 `np.randn`넘파이의 `random` 모듈에는 `ndarray`를 랜덤한 값으로 초기화할 수 있는 함수들이 많이 있습니다.예를 들어, 다음은 (균등 분포인) 0과 1사이의 랜덤한 부동 소수로 $3 \times 4$ 행렬을 초기화합니다:
###Code
np.random.rand(3,4)
###Output
_____no_output_____
###Markdown
다음은 평균이 0이고 분산이 1인 일변량 [정규 분포](https://ko.wikipedia.org/wiki/%EC%A0%95%EA%B7%9C_%EB%B6%84%ED%8F%AC)(가우시안 분포)에서 샘플링한 랜덤한 부동 소수를 담은 $3 \times 4$ 행렬입니다:
###Code
np.random.randn(3,4)
###Output
_____no_output_____
###Markdown
이 분포의 모양을 알려면 맷플롯립을 사용해 그려보는 것이 좋습니다(더 자세한 것은 [맷플롯립 튜토리얼](tools_matplotlib.ipynb)을 참고하세요):
###Code
%matplotlib inline
import matplotlib.pyplot as plt
plt.hist(np.random.rand(100000), density=True, bins=100, histtype="step", color="blue", label="rand")
plt.hist(np.random.randn(100000), density=True, bins=100, histtype="step", color="red", label="randn")
plt.axis([-2.5, 2.5, 0, 1.1])
plt.legend(loc = "upper left")
plt.title("Random distributions")
plt.xlabel("Value")
plt.ylabel("Density")
plt.show()
###Output
_____no_output_____
###Markdown
np.fromfunction함수를 사용하여 `ndarray`를 초기화할 수도 있습니다:
###Code
def my_function(z, y, x):
return x + 10 * y + 100 * z
np.fromfunction(my_function, (3, 2, 10))
###Output
_____no_output_____
###Markdown
넘파이는 먼저 크기가 `(3, 2, 10)`인 세 개의 `ndarray`(차원마다 하나씩)를 만듭니다. 각 배열은 축을 따라 좌표 값과 같은 값을 가집니다. 예를 들어, `z` 축에 있는 배열의 모든 원소는 z-축의 값과 같습니다: [[[ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.] [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]] [[ 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.] [ 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]] [[ 2. 2. 2. 2. 2. 2. 2. 2. 2. 2.] [ 2. 2. 2. 2. 2. 2. 2. 2. 2. 2.]]]위의 식 `x + 10 * y + 100 * z`에서 `x`, `y`, `z`는 사실 `ndarray`입니다(배열의 산술 연산에 대해서는 아래에서 설명합니다). 중요한 점은 함수 `my_function`이 원소마다 호출되는 것이 아니고 딱 **한 번** 호출된다는 점입니다. 그래서 매우 효율적으로 초기화할 수 있습니다. 배열 데이터 `dtype`넘파이의 `ndarray`는 모든 원소가 동일한 타입(보통 숫자)을 가지기 때문에 효율적입니다. `dtype` 속성으로 쉽게 데이터 타입을 확인할 수 있습니다:
###Code
c = np.arange(1, 5)
print(c.dtype, c)
c = np.arange(1.0, 5.0)
print(c.dtype, c)
###Output
float64 [1. 2. 3. 4.]
###Markdown
넘파이가 데이터 타입을 결정하도록 내버려 두는 대신 `dtype` 매개변수를 사용해서 배열을 만들 때 명시적으로 지정할 수 있습니다:
###Code
d = np.arange(1, 5, dtype=np.complex64)
print(d.dtype, d)
###Output
complex64 [1.+0.j 2.+0.j 3.+0.j 4.+0.j]
###Markdown
가능한 데이터 타입은 `int8`, `int16`, `int32`, `int64`, `uint8`|`16`|`32`|`64`, `float16`|`32`|`64`, `complex64`|`128`가 있습니다. 전체 리스트는 [온라인 문서](http://docs.scipy.org/doc/numpy/user/basics.types.html)를 참고하세요. `itemsize``itemsize` 속성은 각 아이템의 크기(바이트)를 반환합니다:
###Code
e = np.arange(1, 5, dtype=np.complex64)
e.itemsize
###Output
_____no_output_____
###Markdown
`data` 버퍼배열의 데이터는 1차원 바이트 버퍼로 메모리에 저장됩니다. `data` 속성을 사용해 참조할 수 있습니다(사용할 일은 거의 없겠지만요).
###Code
f = np.array([[1,2],[1000, 2000]], dtype=np.int32)
f.data
###Output
_____no_output_____
###Markdown
파이썬 2에서는 `f.data`가 버퍼이고 파이썬 3에서는 memoryview입니다.
###Code
if (hasattr(f.data, "tobytes")):
data_bytes = f.data.tobytes() # python 3
else:
data_bytes = memoryview(f.data).tobytes() # python 2
data_bytes
###Output
_____no_output_____
###Markdown
여러 개의 `ndarray`가 데이터 버퍼를 공유할 수 있습니다. 하나를 수정하면 다른 것도 바뀝니다. 잠시 후에 예를 살펴 보겠습니다. 배열 크기 변경 자신을 변경`ndarray`의 `shape` 속성을 지정하면 간단히 크기를 바꿀 수 있습니다. 배열의 원소 개수는 동일하게 유지됩니다.
###Code
g = np.arange(24)
print(g)
print("랭크:", g.ndim)
g.shape = (6, 4)
print(g)
print("랭크:", g.ndim)
g.shape = (2, 3, 4)
print(g)
print("랭크:", g.ndim)
###Output
[[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
[[12 13 14 15]
[16 17 18 19]
[20 21 22 23]]]
랭크: 3
###Markdown
`reshape``reshape` 함수는 동일한 데이터를 가리키는 새로운 `ndarray` 객체를 반환합니다. 한 배열을 수정하면 다른 것도 함께 바뀝니다.
###Code
g2 = g.reshape(4,6)
print(g2)
print("랭크:", g2.ndim)
###Output
[[ 0 1 2 3 4 5]
[ 6 7 8 9 10 11]
[12 13 14 15 16 17]
[18 19 20 21 22 23]]
랭크: 2
###Markdown
행 1, 열 2의 원소를 999로 설정합니다(인덱싱 방식은 아래를 참고하세요).
###Code
g2[1, 2] = 999
g2
###Output
_____no_output_____
###Markdown
이에 상응하는 `g`의 원소도 수정됩니다.
###Code
g
###Output
_____no_output_____
###Markdown
`ravel`마지막으로 `ravel` 함수는 동일한 데이터를 가리키는 새로운 1차원 `ndarray`를 반환합니다:
###Code
g.ravel()
###Output
_____no_output_____
###Markdown
산술 연산일반적인 산술 연산자(`+`, `-`, `*`, `/`, `//`, `**` 등)는 모두 `ndarray`와 사용할 수 있습니다. 이 연산자는 원소별로 적용됩니다:
###Code
a = np.array([14, 23, 32, 41])
b = np.array([5, 4, 3, 2])
print("a + b =", a + b)
print("a - b =", a - b)
print("a * b =", a * b)
print("a / b =", a / b)
print("a // b =", a // b)
print("a % b =", a % b)
print("a ** b =", a ** b)
###Output
a + b = [19 27 35 43]
a - b = [ 9 19 29 39]
a * b = [70 92 96 82]
a / b = [ 2.8 5.75 10.66666667 20.5 ]
a // b = [ 2 5 10 20]
a % b = [4 3 2 1]
a ** b = [537824 279841 32768 1681]
###Markdown
여기 곱셈은 행렬 곱셈이 아닙니다. 행렬 연산은 아래에서 설명합니다.배열의 크기는 같아야 합니다. 그렇지 않으면 넘파이가 브로드캐스팅 규칙을 적용합니다. 브로드캐스팅 일반적으로 넘파이는 동일한 크기의 배열을 기대합니다. 그렇지 않은 상황에는 브로드캐시틍 규칙을 적용합니다: 규칙 1배열의 랭크가 동일하지 않으면 랭크가 맞을 때까지 랭크가 작은 배열 앞에 1을 추가합니다.
###Code
h = np.arange(5).reshape(1, 1, 5)
h
###Output
_____no_output_____
###Markdown
여기에 `(1,1,5)` 크기의 3D 배열에 `(5,)` 크기의 1D 배열을 더해 보죠. 브로드캐스팅의 규칙 1이 적용됩니다!
###Code
h + [10, 20, 30, 40, 50] # 다음과 동일합니다: h + [[[10, 20, 30, 40, 50]]]
###Output
_____no_output_____
###Markdown
규칙 2특정 차원이 1인 배열은 그 차원에서 크기가 가장 큰 배열의 크기에 맞춰 동작합니다. 배열의 원소가 차원을 따라 반복됩니다.
###Code
k = np.arange(6).reshape(2, 3)
k
###Output
_____no_output_____
###Markdown
`(2,3)` 크기의 2D `ndarray`에 `(2,1)` 크기의 2D 배열을 더해 보죠. 넘파이는 브로드캐스팅 규칙 2를 적용합니다:
###Code
k + [[100], [200]] # 다음과 같습니다: k + [[100, 100, 100], [200, 200, 200]]
###Output
_____no_output_____
###Markdown
규칙 1과 2를 합치면 다음과 같이 동작합니다:
###Code
k + [100, 200, 300] # 규칙 1 적용: [[100, 200, 300]], 규칙 2 적용: [[100, 200, 300], [100, 200, 300]]
###Output
_____no_output_____
###Markdown
또 매우 간단히 다음 처럼 해도 됩니다:
###Code
k + 1000 # 다음과 같습니다: k + [[1000, 1000, 1000], [1000, 1000, 1000]]
###Output
_____no_output_____
###Markdown
규칙 3규칙 1 & 2을 적용했을 때 모든 배열의 크기가 맞아야 합니다.
###Code
try:
k + [33, 44]
except ValueError as e:
print(e)
###Output
operands could not be broadcast together with shapes (2,3) (2,)
###Markdown
브로드캐스팅 규칙은 산술 연산 뿐만 아니라 넘파이 연산에서 많이 사용됩니다. 아래에서 더 보도록 하죠. 브로드캐스팅에 관한 더 자세한 정보는 [온라인 문서](https://docs.scipy.org/doc/numpy-dev/user/basics.broadcasting.html)를 참고하세요. 업캐스팅`dtype`이 다른 배열을 합칠 때 넘파이는 (실제 값에 상관없이) 모든 값을 다룰 수 있는 타입으로 업캐스팅합니다.
###Code
k1 = np.arange(0, 5, dtype=np.uint8)
print(k1.dtype, k1)
k2 = k1 + np.array([5, 6, 7, 8, 9], dtype=np.int8)
print(k2.dtype, k2)
###Output
int16 [ 5 7 9 11 13]
###Markdown
모든 `int8`과 `uint8` 값(-128에서 255까지)을 표현하기 위해 `int16`이 필요합니다. 이 코드에서는 `uint8`이면 충분하지만 업캐스팅되었습니다.
###Code
k3 = k1 + 1.5
print(k3.dtype, k3)
###Output
float64 [1.5 2.5 3.5 4.5 5.5]
###Markdown
조건 연산자 조건 연산자도 원소별로 적용됩니다:
###Code
m = np.array([20, -5, 30, 40])
m < [15, 16, 35, 36]
###Output
_____no_output_____
###Markdown
브로드캐스팅을 사용합니다:
###Code
m < 25 # m < [25, 25, 25, 25] 와 동일
###Output
_____no_output_____
###Markdown
불리언 인덱싱과 함께 사용하면 아주 유용합니다(아래에서 설명하겠습니다).
###Code
m[m < 25]
###Output
_____no_output_____
###Markdown
수학 함수와 통계 함수 `ndarray`에서 사용할 수 있는 수학 함수와 통계 함수가 많습니다. `ndarray` 메서드일부 함수는 `ndarray` 메서드로 제공됩니다. 예를 들면:
###Code
a = np.array([[-2.5, 3.1, 7], [10, 11, 12]])
print(a)
print("평균 =", a.mean())
###Output
[[-2.5 3.1 7. ]
[10. 11. 12. ]]
평균 = 6.766666666666667
###Markdown
이 명령은 크기에 상관없이 `ndarray`에 있는 모든 원소의 평균을 계산합니다.다음은 유용한 `ndarray` 메서드입니다:
###Code
for func in (a.min, a.max, a.sum, a.prod, a.std, a.var):
print(func.__name__, "=", func())
###Output
min = -2.5
max = 12.0
sum = 40.6
prod = -71610.0
std = 5.084835843520964
var = 25.855555555555554
###Markdown
이 함수들은 선택적으로 매개변수 `axis`를 사용합니다. 지정된 축을 따라 원소에 연산을 적용하는데 사용합니다. 예를 들면:
###Code
c=np.arange(24).reshape(2,3,4)
c
c.sum(axis=0) # 첫 번째 축을 따라 더함, 결과는 3x4 배열
c.sum(axis=1) # 두 번째 축을 따라 더함, 결과는 2x4 배열
###Output
_____no_output_____
###Markdown
여러 축에 대해서 더할 수도 있습니다:
###Code
c.sum(axis=(0,2)) # 첫 번째 축과 세 번째 축을 따라 더함, 결과는 (3,) 배열
0+1+2+3 + 12+13+14+15, 4+5+6+7 + 16+17+18+19, 8+9+10+11 + 20+21+22+23
###Output
_____no_output_____
###Markdown
일반 함수넘파이는 일반 함수(universal function) 또는 **ufunc**라고 부르는 원소별 함수를 제공합니다. 예를 들면 `square` 함수는 원본 `ndarray`를 복사하여 각 원소를 제곱한 새로운 `ndarray` 객체를 반환합니다:
###Code
a = np.array([[-2.5, 3.1, 7], [10, 11, 12]])
np.square(a)
###Output
_____no_output_____
###Markdown
다음은 유용한 단항 일반 함수들입니다:
###Code
print("원본 ndarray")
print(a)
for func in (np.abs, np.sqrt, np.exp, np.log, np.sign, np.ceil, np.modf, np.isnan, np.cos):
print("\n", func.__name__)
print(func(a))
###Output
원본 ndarray
[[-2.5 3.1 7. ]
[10. 11. 12. ]]
absolute
[[ 2.5 3.1 7. ]
[10. 11. 12. ]]
sqrt
[[ nan 1.76068169 2.64575131]
[3.16227766 3.31662479 3.46410162]]
exp
[[8.20849986e-02 2.21979513e+01 1.09663316e+03]
[2.20264658e+04 5.98741417e+04 1.62754791e+05]]
log
[[ nan 1.13140211 1.94591015]
[2.30258509 2.39789527 2.48490665]]
sign
[[-1. 1. 1.]
[ 1. 1. 1.]]
ceil
[[-2. 4. 7.]
[10. 11. 12.]]
modf
(array([[-0.5, 0.1, 0. ],
[ 0. , 0. , 0. ]]), array([[-2., 3., 7.],
[10., 11., 12.]]))
isnan
[[False False False]
[False False False]]
cos
[[-0.80114362 -0.99913515 0.75390225]
[-0.83907153 0.0044257 0.84385396]]
###Markdown
이항 일반 함수두 개의 `ndarray`에 원소별로 적용되는 이항 함수도 많습니다. 두 배열이 동일한 크기가 아니면 브로드캐스팅 규칙이 적용됩니다:
###Code
a = np.array([1, -2, 3, 4])
b = np.array([2, 8, -1, 7])
np.add(a, b) # a + b 와 동일
np.greater(a, b) # a > b 와 동일
np.maximum(a, b)
np.copysign(a, b)
###Output
_____no_output_____
###Markdown
배열 인덱싱 1차원 배열1차원 넘파이 배열은 보통의 파이썬 배열과 비슷하게 사용할 수 있습니다:
###Code
a = np.array([1, 5, 3, 19, 13, 7, 3])
a[3]
a[2:5]
a[2:-1]
a[:2]
a[2::2]
a[::-1]
###Output
_____no_output_____
###Markdown
물론 원소를 수정할 수 있죠:
###Code
a[3]=999
a
###Output
_____no_output_____
###Markdown
슬라이싱을 사용해 `ndarray`를 수정할 수 있습니다:
###Code
a[2:5] = [997, 998, 999]
a
###Output
_____no_output_____
###Markdown
보통의 파이썬 배열과 차이점보통의 파이썬 배열과 대조적으로 `ndarray` 슬라이싱에 하나의 값을 할당하면 슬라이싱 전체에 복사됩니다. 위에서 언급한 브로드캐스팅 덕택입니다.
###Code
a[2:5] = -1
a
###Output
_____no_output_____
###Markdown
또한 이런 식으로 `ndarray` 크기를 늘리거나 줄일 수 없습니다:
###Code
try:
a[2:5] = [1,2,3,4,5,6] # 너무 길어요
except ValueError as e:
print(e)
###Output
cannot copy sequence with size 6 to array axis with dimension 3
###Markdown
원소를 삭제할 수도 없습니다:
###Code
try:
del a[2:5]
except ValueError as e:
print(e)
###Output
cannot delete array elements
###Markdown
중요한 점은 `ndarray`의 슬라이싱은 같은 데이터 버퍼를 바라보는 뷰(view)입니다. 슬라이싱된 객체를 수정하면 실제 원본 `ndarray`가 수정됩니다!
###Code
a_slice = a[2:6]
a_slice[1] = 1000
a # 원본 배열이 수정됩니다!
a[3] = 2000
a_slice # 비슷하게 원본 배열을 수정하면 슬라이싱 객체에도 반영됩니다!
###Output
_____no_output_____
###Markdown
데이터를 복사하려면 `copy` 메서드를 사용해야 합니다:
###Code
another_slice = a[2:6].copy()
another_slice[1] = 3000
a # 원본 배열이 수정되지 않습니다
a[3] = 4000
another_slice # 마찬가지로 원본 배열을 수정해도 복사된 배열은 바뀌지 않습니다
###Output
_____no_output_____
###Markdown
다차원 배열다차원 배열은 비슷한 방식으로 각 축을 따라 인덱싱 또는 슬라이싱해서 사용합니다. 콤마로 구분합니다:
###Code
b = np.arange(48).reshape(4, 12)
b
b[1, 2] # 행 1, 열 2
b[1, :] # 행 1, 모든 열
b[:, 1] # 모든 행, 열 1
###Output
_____no_output_____
###Markdown
**주의**: 다음 두 표현에는 미묘한 차이가 있습니다:
###Code
b[1, :]
b[1:2, :]
###Output
_____no_output_____
###Markdown
첫 번째 표현식은 `(12,)` 크기인 1D 배열로 행이 하나입니다. 두 번째는 `(1, 12)` 크기인 2D 배열로 같은 행을 반환합니다. 팬시 인덱싱(Fancy indexing)관심 대상의 인덱스 리스트를 지정할 수도 있습니다. 이를 팬시 인덱싱이라고 부릅니다.
###Code
b[(0,2), 2:5] # 행 0과 2, 열 2에서 4(5-1)까지
b[:, (-1, 2, -1)] # 모든 행, 열 -1 (마지막), 2와 -1 (다시 반대 방향으로)
###Output
_____no_output_____
###Markdown
여러 개의 인덱스 리스트를 지정하면 인덱스에 맞는 값이 포함된 1D `ndarray`를 반환됩니다.
###Code
b[(-1, 2, -1, 2), (5, 9, 1, 9)] # returns a 1D array with b[-1, 5], b[2, 9], b[-1, 1] and b[2, 9] (again)
###Output
_____no_output_____
###Markdown
고차원고차원에서도 동일한 방식이 적용됩니다. 몇 가지 예를 살펴 보겠습니다:
###Code
c = b.reshape(4,2,6)
c
c[2, 1, 4] # 행렬 2, 행 1, 열 4
c[2, :, 3] # 행렬 2, 모든 행, 열 3
###Output
_____no_output_____
###Markdown
어떤 축에 대한 인덱스를 지정하지 않으면 이 축의 모든 원소가 반환됩니다:
###Code
c[2, 1] # 행렬 2, 행 1, 모든 열이 반환됩니다. c[2, 1, :]와 동일합니다.
###Output
_____no_output_____
###Markdown
생략 부호 (`...`)생략 부호(`...`)를 쓰면 모든 지정하지 않은 축의 원소를 포함합니다.
###Code
c[2, ...] # 행렬 2, 모든 행, 모든 열. c[2, :, :]와 동일
c[2, 1, ...] # 행렬 2, 행 1, 모든 열. c[2, 1, :]와 동일
c[2, ..., 3] # 행렬 2, 모든 행, 열 3. c[2, :, 3]와 동일
c[..., 3] # 모든 행렬, 모든 행, 열 3. c[:, :, 3]와 동일
###Output
_____no_output_____
###Markdown
불리언 인덱싱불리언 값을 가진 `ndarray`를 사용해 축의 인덱스를 지정할 수 있습니다.
###Code
b = np.arange(48).reshape(4, 12)
b
rows_on = np.array([True, False, True, False])
b[rows_on, :] # 행 0과 2, 모든 열. b[(0, 2), :]와 동일
cols_on = np.array([False, True, False] * 4)
b[:, cols_on] # 모든 행, 열 1, 4, 7, 10
###Output
_____no_output_____
###Markdown
`np.ix_`여러 축에 걸쳐서는 불리언 인덱싱을 사용할 수 없고 `ix_` 함수를 사용합니다:
###Code
b[np.ix_(rows_on, cols_on)]
np.ix_(rows_on, cols_on)
###Output
_____no_output_____
###Markdown
`ndarray`와 같은 크기의 불리언 배열을 사용하면 해당 위치가 `True`인 모든 원소를 담은 1D 배열이 반환됩니다. 일반적으로 조건 연산자와 함께 사용합니다:
###Code
b[b % 3 == 1]
###Output
_____no_output_____
###Markdown
반복`ndarray`를 반복하는 것은 일반적인 파이썬 배열을 반복한는 것과 매우 유사합니다. 다차원 배열을 반복하면 첫 번째 축에 대해서 수행됩니다.
###Code
c = np.arange(24).reshape(2, 3, 4) # 3D 배열 (두 개의 3x4 행렬로 구성됨)
c
for m in c:
print("아이템:")
print(m)
for i in range(len(c)): # len(c) == c.shape[0]
print("아이템:")
print(c[i])
###Output
아이템:
[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
아이템:
[[12 13 14 15]
[16 17 18 19]
[20 21 22 23]]
###Markdown
`ndarray`에 있는 모든 원소를 반복하려면 `flat` 속성을 사용합니다:
###Code
for i in c.flat:
print("아이템:", i)
###Output
아이템: 0
아이템: 1
아이템: 2
아이템: 3
아이템: 4
아이템: 5
아이템: 6
아이템: 7
아이템: 8
아이템: 9
아이템: 10
아이템: 11
아이템: 12
아이템: 13
아이템: 14
아이템: 15
아이템: 16
아이템: 17
아이템: 18
아이템: 19
아이템: 20
아이템: 21
아이템: 22
아이템: 23
###Markdown
배열 쌓기종종 다른 배열을 쌓아야 할 때가 있습니다. 넘파이는 이를 위해 몇 개의 함수를 제공합니다. 먼저 배열 몇 개를 만들어 보죠.
###Code
q1 = np.full((3,4), 1.0)
q1
q2 = np.full((4,4), 2.0)
q2
q3 = np.full((3,4), 3.0)
q3
###Output
_____no_output_____
###Markdown
`vstack``vstack` 함수를 사용하여 수직으로 쌓아보죠:
###Code
q4 = np.vstack((q1, q2, q3))
q4
q4.shape
###Output
_____no_output_____
###Markdown
q1, q2, q3가 모두 같은 크기이므로 가능합니다(수직으로 쌓기 때문에 수직 축은 크기가 달라도 됩니다). `hstack``hstack`을 사용해 수평으로도 쌓을 수 있습니다:
###Code
q5 = np.hstack((q1, q3))
q5
q5.shape
###Output
_____no_output_____
###Markdown
q1과 q3가 모두 3개의 행을 가지고 있기 때문에 가능합니다. q2는 4개의 행을 가지고 있기 때문에 q1, q3와 수평으로 쌓을 수 없습니다:
###Code
try:
q5 = np.hstack((q1, q2, q3))
except ValueError as e:
print(e)
###Output
all the input array dimensions for the concatenation axis must match exactly, but along dimension 0, the array at index 0 has size 3 and the array at index 1 has size 4
###Markdown
`concatenate``concatenate` 함수는 지정한 축으로도 배열을 쌓습니다.
###Code
q7 = np.concatenate((q1, q2, q3), axis=0) # vstack과 동일
q7
q7.shape
###Output
_____no_output_____
###Markdown
예상했겠지만 `hstack`은 `axis=1`으로 `concatenate`를 호출하는 것과 같습니다. `stack``stack` 함수는 새로운 축을 따라 배열을 쌓습니다. 모든 배열은 같은 크기를 가져야 합니다.
###Code
q8 = np.stack((q1, q3))
q8
q8.shape
###Output
_____no_output_____
###Markdown
배열 분할분할은 쌓기의 반대입니다. 예를 들어 `vsplit` 함수는 행렬을 수직으로 분할합니다.먼저 6x4 행렬을 만들어 보죠:
###Code
r = np.arange(24).reshape(6,4)
r
###Output
_____no_output_____
###Markdown
수직으로 동일한 크기로 나누어 보겠습니다:
###Code
r1, r2, r3 = np.vsplit(r, 3)
r1
r2
r3
###Output
_____no_output_____
###Markdown
`split` 함수는 주어진 축을 따라 배열을 분할합니다. `vsplit`는 `axis=0`으로 `split`를 호출하는 것과 같습니다. `hsplit` 함수는 `axis=1`로 `split`를 호출하는 것과 같습니다:
###Code
r4, r5 = np.hsplit(r, 2)
r4
r5
###Output
_____no_output_____
###Markdown
배열 전치`transpose` 메서드는 주어진 순서대로 축을 뒤바꾸어 `ndarray` 데이터에 대한 새로운 뷰를 만듭니다.예를 위해 3D 배열을 만들어 보죠:
###Code
t = np.arange(24).reshape(4,2,3)
t
###Output
_____no_output_____
###Markdown
`0, 1, 2`(깊이, 높이, 너비) 축을 `1, 2, 0` (깊이→너비, 높이→깊이, 너비→높이) 순서로 바꾼 `ndarray`를 만들어 보겠습니다:
###Code
t1 = t.transpose((1,2,0))
t1
t1.shape
###Output
_____no_output_____
###Markdown
`transpose` 기본값은 차원의 순서를 역전시킵니다:
###Code
t2 = t.transpose() # t.transpose((2, 1, 0))와 동일
t2
t2.shape
###Output
_____no_output_____
###Markdown
넘파이는 두 축을 바꾸는 `swapaxes` 함수를 제공합니다. 예를 들어 깊이와 높이를 뒤바꾸어 `t`의 새로운 뷰를 만들어 보죠:
###Code
t3 = t.swapaxes(0,1) # t.transpose((1, 0, 2))와 동일
t3
t3.shape
###Output
_____no_output_____
###Markdown
선형 대수학넘파이 2D 배열을 사용하면 파이썬에서 행렬을 효율적으로 표현할 수 있습니다. 주요 행렬 연산을 간단히 둘러 보겠습니다. 선형 대수학, 벡터와 행렬에 관한 자세한 내용은 [Linear Algebra tutorial](math_linear_algebra.ipynb)를 참고하세요. 행렬 전치`T` 속성은 랭크가 2보다 크거나 같을 때 `transpose()`를 호출하는 것과 같습니다:
###Code
m1 = np.arange(10).reshape(2,5)
m1
m1.T
###Output
_____no_output_____
###Markdown
`T` 속성은 랭크가 0이거나 1인 배열에는 아무런 영향을 미치지 않습니다:
###Code
m2 = np.arange(5)
m2
m2.T
###Output
_____no_output_____
###Markdown
먼저 1D 배열을 하나의 행이 있는 행렬(2D)로 바꾼다음 전치를 수행할 수 있습니다:
###Code
m2r = m2.reshape(1,5)
m2r
m2r.T
###Output
_____no_output_____
###Markdown
행렬 곱셈두 개의 행렬을 만들어 `dot` 메서드로 행렬 [곱셈](https://ko.wikipedia.org/wiki/%ED%96%89%EB%A0%AC_%EA%B3%B1%EC%85%88)을 실행해 보죠.
###Code
n1 = np.arange(10).reshape(2, 5)
n1
n2 = np.arange(15).reshape(5,3)
n2
n1.dot(n2)
###Output
_____no_output_____
###Markdown
**주의**: 앞서 언급한 것처럼 `n1*n2`는 행렬 곱셈이 아니라 원소별 곱셈(또는 [아다마르 곱](https://ko.wikipedia.org/wiki/%EC%95%84%EB%8B%A4%EB%A7%88%EB%A5%B4_%EA%B3%B1)이라 부릅니다)입니다. 역행렬과 유사 역행렬`numpy.linalg` 모듈 안에 많은 선형 대수 함수들이 있습니다. 특히 `inv` 함수는 정방 행렬의 역행렬을 계산합니다:
###Code
import numpy.linalg as linalg
m3 = np.array([[1,2,3],[5,7,11],[21,29,31]])
m3
linalg.inv(m3)
###Output
_____no_output_____
###Markdown
`pinv` 함수를 사용하여 [유사 역행렬](https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_pseudoinverse)을 계산할 수도 있습니다:
###Code
linalg.pinv(m3)
###Output
_____no_output_____
###Markdown
단위 행렬행렬과 그 행렬의 역행렬을 곱하면 단위 행렬이 됩니다(작은 소숫점 오차가 있습니다):
###Code
m3.dot(linalg.inv(m3))
###Output
_____no_output_____
###Markdown
`eye` 함수는 NxN 크기의 단위 행렬을 만듭니다:
###Code
np.eye(3)
###Output
_____no_output_____
###Markdown
QR 분해`qr` 함수는 행렬을 [QR 분해](https://en.wikipedia.org/wiki/QR_decomposition)합니다:
###Code
q, r = linalg.qr(m3)
q
r
q.dot(r) # q.r는 m3와 같습니다
###Output
_____no_output_____
###Markdown
행렬식`det` 함수는 [행렬식](https://en.wikipedia.org/wiki/Determinant)을 계산합니다:
###Code
linalg.det(m3) # 행렬식 계산
###Output
_____no_output_____
###Markdown
고윳값과 고유벡터`eig` 함수는 정방 행렬의 [고윳값과 고유벡터](https://en.wikipedia.org/wiki/Eigenvalues_and_eigenvectors)를 계산합니다:
###Code
eigenvalues, eigenvectors = linalg.eig(m3)
eigenvalues # λ
eigenvectors # v
m3.dot(eigenvectors) - eigenvalues * eigenvectors # m3.v - λ*v = 0
###Output
_____no_output_____
###Markdown
특잇값 분해`svd` 함수는 행렬을 입력으로 받아 그 행렬의 [특잇값 분해](https://en.wikipedia.org/wiki/Singular_value_decomposition)를 반환합니다:
###Code
m4 = np.array([[1,0,0,0,2], [0,0,3,0,0], [0,0,0,0,0], [0,2,0,0,0]])
m4
U, S_diag, V = linalg.svd(m4)
U
S_diag
###Output
_____no_output_____
###Markdown
`svd` 함수는 Σ의 대각 원소 값만 반환합니다. 전체 Σ 행렬은 다음과 같이 만듭니다:
###Code
S = np.zeros((4, 5))
S[np.diag_indices(4)] = S_diag
S # Σ
V
U.dot(S).dot(V) # U.Σ.V == m4
###Output
_____no_output_____
###Markdown
대각원소와 대각합
###Code
np.diag(m3) # m3의 대각 원소입니다(왼쪽 위에서 오른쪽 아래)
np.trace(m3) # np.diag(m3).sum()와 같습니다
###Output
_____no_output_____
###Markdown
선형 방정식 풀기 `solve` 함수는 다음과 같은 선형 방정식을 풉니다:* $2x + 6y = 6$* $5x + 3y = -9$
###Code
coeffs = np.array([[2, 6], [5, 3]])
depvars = np.array([6, -9])
solution = linalg.solve(coeffs, depvars)
solution
###Output
_____no_output_____
###Markdown
solution을 확인해 보죠:
###Code
coeffs.dot(solution), depvars # 네 같네요
###Output
_____no_output_____
###Markdown
좋습니다! 다른 방식으로도 solution을 확인해 보죠:
###Code
np.allclose(coeffs.dot(solution), depvars)
###Output
_____no_output_____
###Markdown
벡터화한 번에 하나씩 개별 배열 원소에 대해 연산을 실행하는 대신 배열 연산을 사용하면 훨씬 효율적인 코드를 만들 수 있습니다. 이를 벡터화라고 합니다. 이를 사용하여 넘파이의 최적화된 성능을 활용할 수 있습니다.예를 들어, $sin(xy/40.5)$ 식을 기반으로 768x1024 크기 배열을 생성하려고 합니다. 중첩 반복문 안에 파이썬의 math 함수를 사용하는 것은 **나쁜** 방법입니다:
###Code
import math
data = np.empty((768, 1024))
for y in range(768):
for x in range(1024):
data[y, x] = math.sin(x*y/40.5) # 매우 비효율적입니다!
###Output
_____no_output_____
###Markdown
작동은 하지만 순수한 파이썬 코드로 반복문이 진행되기 때문에 아주 비효율적입니다. 이 알고리즘을 벡터화해 보죠. 먼저 넘파이 `meshgrid` 함수로 좌표 벡터를 사용해 행렬을 만듭니다.
###Code
x_coords = np.arange(0, 1024) # [0, 1, 2, ..., 1023]
y_coords = np.arange(0, 768) # [0, 1, 2, ..., 767]
X, Y = np.meshgrid(x_coords, y_coords)
X
Y
###Output
_____no_output_____
###Markdown
여기서 볼 수 있듯이 `X`와 `Y` 모두 768x1024 배열입니다. `X`에 있는 모든 값은 수평 좌표에 해당합니다. `Y`에 있는 모든 값은 수직 좌표에 해당합니다.이제 간단히 배열 연산을 사용해 계산할 수 있습니다:
###Code
data = np.sin(X*Y/40.5)
###Output
_____no_output_____
###Markdown
맷플롯립의 `imshow` 함수를 사용해 이 데이터를 그려보죠([matplotlib tutorial](tools_matplotlib.ipynb)을 참조하세요).
###Code
import matplotlib.pyplot as plt
import matplotlib.cm as cm
fig = plt.figure(1, figsize=(7, 6))
plt.imshow(data, cmap=cm.hot)
plt.show()
###Output
_____no_output_____
###Markdown
저장과 로딩넘파이는 `ndarray`를 바이너리 또는 텍스트 포맷으로 손쉽게 저장하고 로드할 수 있습니다. 바이너리 `.npy` 포맷랜덤 배열을 만들고 저장해 보죠.
###Code
a = np.random.rand(2,3)
a
np.save("my_array", a)
###Output
_____no_output_____
###Markdown
끝입니다! 파일 이름의 확장자를 지정하지 않았기 때문에 넘파이는 자동으로 `.npy`를 붙입니다. 파일 내용을 확인해 보겠습니다:
###Code
with open("my_array.npy", "rb") as f:
content = f.read()
content
###Output
_____no_output_____
###Markdown
이 파일을 넘파이 배열로 로드하려면 `load` 함수를 사용합니다:
###Code
a_loaded = np.load("my_array.npy")
a_loaded
###Output
_____no_output_____
###Markdown
텍스트 포맷배열을 텍스트 포맷으로 저장해 보죠:
###Code
np.savetxt("my_array.csv", a)
###Output
_____no_output_____
###Markdown
파일 내용을 확인해 보겠습니다:
###Code
with open("my_array.csv", "rt") as f:
print(f.read())
###Output
5.435937959464737235e-01 9.288630656918674955e-01 1.535157809943688001e-02
4.157283012656532994e-01 9.102126992826775620e-01 5.512970782648904944e-01
###Markdown
이 파일은 탭으로 구분된 CSV 파일입니다. 다른 구분자를 지정할 수도 있습니다:
###Code
np.savetxt("my_array.csv", a, delimiter=",")
###Output
_____no_output_____
###Markdown
이 파일을 로드하려면 `loadtxt` 함수를 사용합니다:
###Code
a_loaded = np.loadtxt("my_array.csv", delimiter=",")
a_loaded
###Output
_____no_output_____
###Markdown
압축된 `.npz` 포맷여러 개의 배열을 압축된 한 파일로 저장하는 것도 가능합니다:
###Code
b = np.arange(24, dtype=np.uint8).reshape(2, 3, 4)
b
np.savez("my_arrays", my_a=a, my_b=b)
###Output
_____no_output_____
###Markdown
파일 내용을 확인해 보죠. `.npz` 파일 확장자가 자동으로 추가되었습니다.
###Code
with open("my_arrays.npz", "rb") as f:
content = f.read()
repr(content)[:180] + "[...]"
###Output
_____no_output_____
###Markdown
다음과 같이 이 파일을 로드할 수 있습니다:
###Code
my_arrays = np.load("my_arrays.npz")
my_arrays
###Output
_____no_output_____
###Markdown
게으른 로딩을 수행하는 딕셔너리와 유사한 객체입니다:
###Code
my_arrays.keys()
my_arrays["my_a"]
###Output
_____no_output_____
###Markdown
"Numpy 기본"> "numpy 기본 코드 실습(한글)"- toc:true - branch: master- badges: true- comments: true- author: Jiho Yeo- categories: [jupyter, python] **도구 - 넘파이(NumPy)***넘파이(NumPy)는 파이썬의 과학 컴퓨팅을 위한 기본 라이브러리입니다. 넘파이의 핵심은 강력한 N-차원 배열 객체입니다. 또한 선형 대수, 푸리에(Fourier) 변환, 유사 난수 생성과 같은 유용한 함수들도 제공합니다." 구글 코랩에서 실행하기 배열 생성 `numpy`를 임포트해 보죠. 대부분의 사람들이 `np`로 알리아싱하여 임포트합니다:
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
`np.zeros` `zeros` 함수는 0으로 채워진 배열을 만듭니다:
###Code
np.zeros(5)
###Output
_____no_output_____
###Markdown
2D 배열(즉, 행렬)을 만들려면 원하는 행과 열의 크기를 튜플로 전달합니다. 예를 들어 다음은 $3 \times 4$ 크기의 행렬입니다:
###Code
np.zeros((3,4))
###Output
_____no_output_____
###Markdown
용어* 넘파이에서 각 차원을 **축**(axis) 이라고 합니다* 축의 개수를 **랭크**(rank) 라고 합니다. * 예를 들어, 위의 $3 \times 4$ 행렬은 랭크 2인 배열입니다(즉 2차원입니다). * 첫 번째 축의 길이는 3이고 두 번째 축의 길이는 4입니다.* 배열의 축 길이를 배열의 **크기**(shape)라고 합니다. * 예를 들어, 위 행렬의 크기는 `(3, 4)`입니다. * 랭크는 크기의 길이와 같습니다.* 배열의 **사이즈**(size)는 전체 원소의 개수입니다. 축의 길이를 모두 곱해서 구할 수 있습니다(가령, $3 \times 4=12$).
###Code
a = np.zeros((3,4))
a
a.shape
a.ndim # len(a.shape)와 같습니다
a.size
###Output
_____no_output_____
###Markdown
N-차원 배열임의의 랭크 수를 가진 N-차원 배열을 만들 수 있습니다. 예를 들어, 다음은 크기가 `(2,3,4)`인 3D 배열(랭크=3)입니다:
###Code
np.zeros((2,2,5))
###Output
_____no_output_____
###Markdown
배열 타입넘파이 배열의 타입은 `ndarray`입니다:
###Code
type(np.zeros((3,4)))
###Output
_____no_output_____
###Markdown
`np.ones``ndarray`를 만들 수 있는 넘파이 함수가 많습니다.다음은 1로 채워진 $3 \times 4$ 크기의 행렬입니다:
###Code
np.ones((3,4))
###Output
_____no_output_____
###Markdown
`np.full`주어진 값으로 지정된 크기의 배열을 초기화합니다. 다음은 `π`로 채워진 $3 \times 4$ 크기의 행렬입니다.
###Code
np.full((3,4), np.pi)
###Output
_____no_output_____
###Markdown
`np.empty`초기화되지 않은 $2 \times 3$ 크기의 배열을 만듭니다(배열의 내용은 예측이 불가능하며 메모리 상황에 따라 달라집니다):
###Code
np.empty((2,3))
###Output
_____no_output_____
###Markdown
np.array`array` 함수는 파이썬 리스트를 사용하여 `ndarray`를 초기화합니다:
###Code
np.array([[1,2,3,4], [10, 20, 30, 40]])
###Output
_____no_output_____
###Markdown
`np.arange`파이썬의 기본 `range` 함수와 비슷한 넘파이 `arange` 함수를 사용하여 `ndarray`를 만들 수 있습니다:
###Code
np.arange(1, 5)
###Output
_____no_output_____
###Markdown
부동 소수도 가능합니다:
###Code
np.arange(1.0, 5.0)
###Output
_____no_output_____
###Markdown
파이썬의 기본 `range` 함수처럼 건너 뛰는 정도를 지정할 수 있습니다:
###Code
np.arange(1, 5, 0.5)
###Output
_____no_output_____
###Markdown
부동 소수를 사용하면 원소의 개수가 일정하지 않을 수 있습니다. 예를 들면 다음과 같습니다:
###Code
print(np.arange(0, 5/3, 1/3)) # 부동 소수 오차 때문에, 최댓값은 4/3 또는 5/3이 됩니다.
print(np.arange(0, 5/3, 0.333333333))
print(np.arange(0, 5/3, 0.333333334))
###Output
[0. 0.33333333 0.66666667 1. 1.33333333 1.66666667]
[0. 0.33333333 0.66666667 1. 1.33333333 1.66666667]
[0. 0.33333333 0.66666667 1. 1.33333334]
###Markdown
`np.linspace`이런 이유로 부동 소수를 사용할 땐 `arange` 대신에 `linspace` 함수를 사용하는 것이 좋습니다. `linspace` 함수는 지정된 개수만큼 두 값 사이를 나눈 배열을 반환합니다(`arange`와는 다르게 최댓값이 **포함**됩니다):
###Code
print(np.linspace(0, 5/3, 6))
###Output
[0. 0.33333333 0.66666667 1. 1.33333333 1.66666667]
###Markdown
`np.rand`와 `np.randn`넘파이의 `random` 모듈에는 `ndarray`를 랜덤한 값으로 초기화할 수 있는 함수들이 많이 있습니다.예를 들어, 다음은 (균등 분포인) 0과 1사이의 랜덤한 부동 소수로 $3 \times 4$ 행렬을 초기화합니다:
###Code
np.random.rand(3,4)
###Output
_____no_output_____
###Markdown
다음은 평균이 0이고 분산이 1인 일변량 [정규 분포](https://ko.wikipedia.org/wiki/%EC%A0%95%EA%B7%9C_%EB%B6%84%ED%8F%AC)(가우시안 분포)에서 샘플링한 랜덤한 부동 소수를 담은 $3 \times 4$ 행렬입니다:
###Code
np.random.randn(3,4)
###Output
_____no_output_____
###Markdown
이 분포의 모양을 알려면 맷플롯립을 사용해 그려보는 것이 좋습니다(더 자세한 것은 [맷플롯립 튜토리얼](tools_matplotlib.ipynb)을 참고하세요):
###Code
%matplotlib inline
import matplotlib.pyplot as plt
plt.hist(np.random.rand(100000), density=True, bins=100, histtype="step", color="blue", label="rand")
plt.hist(np.random.randn(100000), density=True, bins=100, histtype="step", color="red", label="randn")
plt.axis([-2.5, 2.5, 0, 1.1])
plt.legend(loc = "upper left")
plt.title("Random distributions")
plt.xlabel("Value")
plt.ylabel("Density")
plt.show()
###Output
_____no_output_____
###Markdown
np.fromfunction함수를 사용하여 `ndarray`를 초기화할 수도 있습니다:
###Code
def my_function(z, y, x):
return x + 10 * y + 100 * z
np.fromfunction(my_function, (3, 2, 10))
###Output
_____no_output_____
###Markdown
넘파이는 먼저 크기가 `(3, 2, 10)`인 세 개의 `ndarray`(차원마다 하나씩)를 만듭니다. 각 배열은 축을 따라 좌표 값과 같은 값을 가집니다. 예를 들어, `z` 축에 있는 배열의 모든 원소는 z-축의 값과 같습니다: [[[ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.] [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]] [[ 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.] [ 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]] [[ 2. 2. 2. 2. 2. 2. 2. 2. 2. 2.] [ 2. 2. 2. 2. 2. 2. 2. 2. 2. 2.]]]위의 식 `x + 10 * y + 100 * z`에서 `x`, `y`, `z`는 사실 `ndarray`입니다(배열의 산술 연산에 대해서는 아래에서 설명합니다). 중요한 점은 함수 `my_function`이 원소마다 호출되는 것이 아니고 딱 **한 번** 호출된다는 점입니다. 그래서 매우 효율적으로 초기화할 수 있습니다. 배열 데이터 `dtype`넘파이의 `ndarray`는 모든 원소가 동일한 타입(보통 숫자)을 가지기 때문에 효율적입니다. `dtype` 속성으로 쉽게 데이터 타입을 확인할 수 있습니다:
###Code
c = np.arange(1, 5)
print(c.dtype, c)
c = np.arange(1.0, 5.0)
print(c.dtype, c)
###Output
float64 [1. 2. 3. 4.]
###Markdown
넘파이가 데이터 타입을 결정하도록 내버려 두는 대신 `dtype` 매개변수를 사용해서 배열을 만들 때 명시적으로 지정할 수 있습니다:
###Code
d = np.arange(1, 5, dtype=np.complex64)
print(d.dtype, d)
###Output
complex64 [1.+0.j 2.+0.j 3.+0.j 4.+0.j]
###Markdown
가능한 데이터 타입은 `int8`, `int16`, `int32`, `int64`, `uint8`|`16`|`32`|`64`, `float16`|`32`|`64`, `complex64`|`128`가 있습니다. 전체 리스트는 [온라인 문서](http://docs.scipy.org/doc/numpy/user/basics.types.html)를 참고하세요. `itemsize``itemsize` 속성은 각 아이템의 크기(바이트)를 반환합니다:
###Code
e = np.arange(1, 5, dtype=np.complex64)
e.itemsize
###Output
_____no_output_____
###Markdown
`data` 버퍼배열의 데이터는 1차원 바이트 버퍼로 메모리에 저장됩니다. `data` 속성을 사용해 참조할 수 있습니다(사용할 일은 거의 없겠지만요).
###Code
f = np.array([[1,2],[1000, 2000]], dtype=np.int32)
f.data
###Output
_____no_output_____
###Markdown
파이썬 2에서는 `f.data`가 버퍼이고 파이썬 3에서는 memoryview입니다.
###Code
if (hasattr(f.data, "tobytes")):
data_bytes = f.data.tobytes() # python 3
else:
data_bytes = memoryview(f.data).tobytes() # python 2
data_bytes
###Output
_____no_output_____
###Markdown
여러 개의 `ndarray`가 데이터 버퍼를 공유할 수 있습니다. 하나를 수정하면 다른 것도 바뀝니다. 잠시 후에 예를 살펴 보겠습니다. 배열 크기 변경 자신을 변경`ndarray`의 `shape` 속성을 지정하면 간단히 크기를 바꿀 수 있습니다. 배열의 원소 개수는 동일하게 유지됩니다.
###Code
g = np.arange(24)
print(g)
print("랭크:", g.ndim)
g.shape = (6, 4)
print(g)
print("랭크:", g.ndim)
g.shape = (2, 3, 4)
print(g)
print("랭크:", g.ndim)
###Output
[[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
[[12 13 14 15]
[16 17 18 19]
[20 21 22 23]]]
랭크: 3
###Markdown
`reshape``reshape` 함수는 동일한 데이터를 가리키는 새로운 `ndarray` 객체를 반환합니다. 한 배열을 수정하면 다른 것도 함께 바뀝니다.
###Code
g2 = g.reshape(4,6)
print(g2)
print("랭크:", g2.ndim)
###Output
[[ 0 1 2 3 4 5]
[ 6 7 8 9 10 11]
[12 13 14 15 16 17]
[18 19 20 21 22 23]]
랭크: 2
###Markdown
행 1, 열 2의 원소를 999로 설정합니다(인덱싱 방식은 아래를 참고하세요).
###Code
g2[1, 2] = 999
g2
###Output
_____no_output_____
###Markdown
이에 상응하는 `g`의 원소도 수정됩니다.
###Code
g
###Output
_____no_output_____
###Markdown
`ravel`마지막으로 `ravel` 함수는 동일한 데이터를 가리키는 새로운 1차원 `ndarray`를 반환합니다:
###Code
g.ravel()
###Output
_____no_output_____
###Markdown
산술 연산일반적인 산술 연산자(`+`, `-`, `*`, `/`, `//`, `**` 등)는 모두 `ndarray`와 사용할 수 있습니다. 이 연산자는 원소별로 적용됩니다:
###Code
a = np.array([14, 23, 32, 41])
b = np.array([5, 4, 3, 2])
print("a + b =", a + b)
print("a - b =", a - b)
print("a * b =", a * b)
print("a / b =", a / b)
print("a // b =", a // b)
print("a % b =", a % b)
print("a ** b =", a ** b)
###Output
a + b = [19 27 35 43]
a - b = [ 9 19 29 39]
a * b = [70 92 96 82]
a / b = [ 2.8 5.75 10.66666667 20.5 ]
a // b = [ 2 5 10 20]
a % b = [4 3 2 1]
a ** b = [537824 279841 32768 1681]
###Markdown
여기 곱셈은 행렬 곱셈이 아닙니다. 행렬 연산은 아래에서 설명합니다.배열의 크기는 같아야 합니다. 그렇지 않으면 넘파이가 브로드캐스팅 규칙을 적용합니다. 브로드캐스팅 일반적으로 넘파이는 동일한 크기의 배열을 기대합니다. 그렇지 않은 상황에는 브로드캐시틍 규칙을 적용합니다: 규칙 1배열의 랭크가 동일하지 않으면 랭크가 맞을 때까지 랭크가 작은 배열 앞에 1을 추가합니다.
###Code
h = np.arange(5).reshape(1, 1, 5)
h
###Output
_____no_output_____
###Markdown
여기에 `(1,1,5)` 크기의 3D 배열에 `(5,)` 크기의 1D 배열을 더해 보죠. 브로드캐스팅의 규칙 1이 적용됩니다!
###Code
h + [10, 20, 30, 40, 50] # 다음과 동일합니다: h + [[[10, 20, 30, 40, 50]]]
###Output
_____no_output_____
###Markdown
규칙 2특정 차원이 1인 배열은 그 차원에서 크기가 가장 큰 배열의 크기에 맞춰 동작합니다. 배열의 원소가 차원을 따라 반복됩니다.
###Code
k = np.arange(6).reshape(2, 3)
k
###Output
_____no_output_____
###Markdown
`(2,3)` 크기의 2D `ndarray`에 `(2,1)` 크기의 2D 배열을 더해 보죠. 넘파이는 브로드캐스팅 규칙 2를 적용합니다:
###Code
k + [[100], [200]] # 다음과 같습니다: k + [[100, 100, 100], [200, 200, 200]]
###Output
_____no_output_____
###Markdown
규칙 1과 2를 합치면 다음과 같이 동작합니다:
###Code
k + [100, 200, 300] # 규칙 1 적용: [[100, 200, 300]], 규칙 2 적용: [[100, 200, 300], [100, 200, 300]]
###Output
_____no_output_____
###Markdown
또 매우 간단히 다음 처럼 해도 됩니다:
###Code
k + 1000 # 다음과 같습니다: k + [[1000, 1000, 1000], [1000, 1000, 1000]]
###Output
_____no_output_____
###Markdown
규칙 3규칙 1 & 2을 적용했을 때 모든 배열의 크기가 맞아야 합니다.
###Code
try:
k + [33, 44]
except ValueError as e:
print(e)
###Output
operands could not be broadcast together with shapes (2,3) (2,)
###Markdown
브로드캐스팅 규칙은 산술 연산 뿐만 아니라 넘파이 연산에서 많이 사용됩니다. 아래에서 더 보도록 하죠. 브로드캐스팅에 관한 더 자세한 정보는 [온라인 문서](https://docs.scipy.org/doc/numpy-dev/user/basics.broadcasting.html)를 참고하세요. 업캐스팅`dtype`이 다른 배열을 합칠 때 넘파이는 (실제 값에 상관없이) 모든 값을 다룰 수 있는 타입으로 업캐스팅합니다.
###Code
k1 = np.arange(0, 5, dtype=np.uint8)
print(k1.dtype, k1)
k2 = k1 + np.array([5, 6, 7, 8, 9], dtype=np.int8)
print(k2.dtype, k2)
###Output
int16 [ 5 7 9 11 13]
###Markdown
모든 `int8`과 `uint8` 값(-128에서 255까지)을 표현하기 위해 `int16`이 필요합니다. 이 코드에서는 `uint8`이면 충분하지만 업캐스팅되었습니다.
###Code
k3 = k1 + 1.5
print(k3.dtype, k3)
###Output
float64 [1.5 2.5 3.5 4.5 5.5]
###Markdown
조건 연산자 조건 연산자도 원소별로 적용됩니다:
###Code
m = np.array([20, -5, 30, 40])
m < [15, 16, 35, 36]
###Output
_____no_output_____
###Markdown
브로드캐스팅을 사용합니다:
###Code
m < 25 # m < [25, 25, 25, 25] 와 동일
###Output
_____no_output_____
###Markdown
불리언 인덱싱과 함께 사용하면 아주 유용합니다(아래에서 설명하겠습니다).
###Code
m[m < 25]
###Output
_____no_output_____
###Markdown
수학 함수와 통계 함수 `ndarray`에서 사용할 수 있는 수학 함수와 통계 함수가 많습니다. `ndarray` 메서드일부 함수는 `ndarray` 메서드로 제공됩니다. 예를 들면:
###Code
a = np.array([[-2.5, 3.1, 7], [10, 11, 12]])
print(a)
print("평균 =", a.mean())
###Output
[[-2.5 3.1 7. ]
[10. 11. 12. ]]
평균 = 6.766666666666667
###Markdown
이 명령은 크기에 상관없이 `ndarray`에 있는 모든 원소의 평균을 계산합니다.다음은 유용한 `ndarray` 메서드입니다:
###Code
for func in (a.min, a.max, a.sum, a.prod, a.std, a.var):
print(func.__name__, "=", func())
###Output
min = -2.5
max = 12.0
sum = 40.6
prod = -71610.0
std = 5.084835843520964
var = 25.855555555555554
###Markdown
이 함수들은 선택적으로 매개변수 `axis`를 사용합니다. 지정된 축을 따라 원소에 연산을 적용하는데 사용합니다. 예를 들면:
###Code
c=np.arange(24).reshape(2,3,4)
c
c.sum(axis=0) # 첫 번째 축을 따라 더함, 결과는 3x4 배열
c.sum(axis=1) # 두 번째 축을 따라 더함, 결과는 2x4 배열
###Output
_____no_output_____
###Markdown
여러 축에 대해서 더할 수도 있습니다:
###Code
c.sum(axis=(0,2)) # 첫 번째 축과 세 번째 축을 따라 더함, 결과는 (3,) 배열
0+1+2+3 + 12+13+14+15, 4+5+6+7 + 16+17+18+19, 8+9+10+11 + 20+21+22+23
###Output
_____no_output_____
###Markdown
일반 함수넘파이는 일반 함수(universal function) 또는 **ufunc**라고 부르는 원소별 함수를 제공합니다. 예를 들면 `square` 함수는 원본 `ndarray`를 복사하여 각 원소를 제곱한 새로운 `ndarray` 객체를 반환합니다:
###Code
a = np.array([[-2.5, 3.1, 7], [10, 11, 12]])
np.square(a)
###Output
_____no_output_____
###Markdown
다음은 유용한 단항 일반 함수들입니다:
###Code
print("원본 ndarray")
print(a)
for func in (np.abs, np.sqrt, np.exp, np.log, np.sign, np.ceil, np.modf, np.isnan, np.cos):
print("\n", func.__name__)
print(func(a))
###Output
원본 ndarray
[[-2.5 3.1 7. ]
[10. 11. 12. ]]
absolute
[[ 2.5 3.1 7. ]
[10. 11. 12. ]]
sqrt
[[ nan 1.76068169 2.64575131]
[3.16227766 3.31662479 3.46410162]]
exp
[[8.20849986e-02 2.21979513e+01 1.09663316e+03]
[2.20264658e+04 5.98741417e+04 1.62754791e+05]]
log
[[ nan 1.13140211 1.94591015]
[2.30258509 2.39789527 2.48490665]]
sign
[[-1. 1. 1.]
[ 1. 1. 1.]]
ceil
[[-2. 4. 7.]
[10. 11. 12.]]
modf
(array([[-0.5, 0.1, 0. ],
[ 0. , 0. , 0. ]]), array([[-2., 3., 7.],
[10., 11., 12.]]))
isnan
[[False False False]
[False False False]]
cos
[[-0.80114362 -0.99913515 0.75390225]
[-0.83907153 0.0044257 0.84385396]]
###Markdown
이항 일반 함수두 개의 `ndarray`에 원소별로 적용되는 이항 함수도 많습니다. 두 배열이 동일한 크기가 아니면 브로드캐스팅 규칙이 적용됩니다:
###Code
a = np.array([1, -2, 3, 4])
b = np.array([2, 8, -1, 7])
np.add(a, b) # a + b 와 동일
np.greater(a, b) # a > b 와 동일
np.maximum(a, b)
np.copysign(a, b)
###Output
_____no_output_____
###Markdown
배열 인덱싱 1차원 배열1차원 넘파이 배열은 보통의 파이썬 배열과 비슷하게 사용할 수 있습니다:
###Code
a = np.array([1, 5, 3, 19, 13, 7, 3])
a[3]
a[2:5]
a[2:-1]
a[:2]
a[2::2]
a[::-1]
###Output
_____no_output_____
###Markdown
물론 원소를 수정할 수 있죠:
###Code
a[3]=999
a
###Output
_____no_output_____
###Markdown
슬라이싱을 사용해 `ndarray`를 수정할 수 있습니다:
###Code
a[2:5] = [997, 998, 999]
a
###Output
_____no_output_____
###Markdown
보통의 파이썬 배열과 차이점보통의 파이썬 배열과 대조적으로 `ndarray` 슬라이싱에 하나의 값을 할당하면 슬라이싱 전체에 복사됩니다. 위에서 언급한 브로드캐스팅 덕택입니다.
###Code
a[2:5] = -1
a
###Output
_____no_output_____
###Markdown
또한 이런 식으로 `ndarray` 크기를 늘리거나 줄일 수 없습니다:
###Code
try:
a[2:5] = [1,2,3,4,5,6] # 너무 길어요
except ValueError as e:
print(e)
###Output
cannot copy sequence with size 6 to array axis with dimension 3
###Markdown
원소를 삭제할 수도 없습니다:
###Code
try:
del a[2:5]
except ValueError as e:
print(e)
###Output
cannot delete array elements
###Markdown
중요한 점은 `ndarray`의 슬라이싱은 같은 데이터 버퍼를 바라보는 뷰(view)입니다. 슬라이싱된 객체를 수정하면 실제 원본 `ndarray`가 수정됩니다!
###Code
a_slice = a[2:6]
a_slice[1] = 1000
a # 원본 배열이 수정됩니다!
a[3] = 2000
a_slice # 비슷하게 원본 배열을 수정하면 슬라이싱 객체에도 반영됩니다!
###Output
_____no_output_____
###Markdown
데이터를 복사하려면 `copy` 메서드를 사용해야 합니다:
###Code
another_slice = a[2:6].copy()
another_slice[1] = 3000
a # 원본 배열이 수정되지 않습니다
a[3] = 4000
another_slice # 마찬가지로 원본 배열을 수정해도 복사된 배열은 바뀌지 않습니다
###Output
_____no_output_____
###Markdown
다차원 배열다차원 배열은 비슷한 방식으로 각 축을 따라 인덱싱 또는 슬라이싱해서 사용합니다. 콤마로 구분합니다:
###Code
b = np.arange(48).reshape(4, 12)
b
b[1, 2] # 행 1, 열 2
b[1, :] # 행 1, 모든 열
b[:, 1] # 모든 행, 열 1
###Output
_____no_output_____
###Markdown
**주의**: 다음 두 표현에는 미묘한 차이가 있습니다:
###Code
b[1, :]
b[1:2, :]
###Output
_____no_output_____
###Markdown
첫 번째 표현식은 `(12,)` 크기인 1D 배열로 행이 하나입니다. 두 번째는 `(1, 12)` 크기인 2D 배열로 같은 행을 반환합니다. 팬시 인덱싱(Fancy indexing)관심 대상의 인덱스 리스트를 지정할 수도 있습니다. 이를 팬시 인덱싱이라고 부릅니다.
###Code
b[(0,2), 2:5] # 행 0과 2, 열 2에서 4(5-1)까지
b[:, (-1, 2, -1)] # 모든 행, 열 -1 (마지막), 2와 -1 (다시 반대 방향으로)
###Output
_____no_output_____
###Markdown
여러 개의 인덱스 리스트를 지정하면 인덱스에 맞는 값이 포함된 1D `ndarray`를 반환됩니다.
###Code
b[(-1, 2, -1, 2), (5, 9, 1, 9)] # returns a 1D array with b[-1, 5], b[2, 9], b[-1, 1] and b[2, 9] (again)
###Output
_____no_output_____
###Markdown
고차원고차원에서도 동일한 방식이 적용됩니다. 몇 가지 예를 살펴 보겠습니다:
###Code
c = b.reshape(4,2,6)
c
c[2, 1, 4] # 행렬 2, 행 1, 열 4
c[2, :, 3] # 행렬 2, 모든 행, 열 3
###Output
_____no_output_____
###Markdown
어떤 축에 대한 인덱스를 지정하지 않으면 이 축의 모든 원소가 반환됩니다:
###Code
c[2, 1] # 행렬 2, 행 1, 모든 열이 반환됩니다. c[2, 1, :]와 동일합니다.
###Output
_____no_output_____
###Markdown
생략 부호 (`...`)생략 부호(`...`)를 쓰면 모든 지정하지 않은 축의 원소를 포함합니다.
###Code
c[2, ...] # 행렬 2, 모든 행, 모든 열. c[2, :, :]와 동일
c[2, 1, ...] # 행렬 2, 행 1, 모든 열. c[2, 1, :]와 동일
c[2, ..., 3] # 행렬 2, 모든 행, 열 3. c[2, :, 3]와 동일
c[..., 3] # 모든 행렬, 모든 행, 열 3. c[:, :, 3]와 동일
###Output
_____no_output_____
###Markdown
불리언 인덱싱불리언 값을 가진 `ndarray`를 사용해 축의 인덱스를 지정할 수 있습니다.
###Code
b = np.arange(48).reshape(4, 12)
b
rows_on = np.array([True, False, True, False])
b[rows_on, :] # 행 0과 2, 모든 열. b[(0, 2), :]와 동일
cols_on = np.array([False, True, False] * 4)
b[:, cols_on] # 모든 행, 열 1, 4, 7, 10
###Output
_____no_output_____
###Markdown
`np.ix_`여러 축에 걸쳐서는 불리언 인덱싱을 사용할 수 없고 `ix_` 함수를 사용합니다:
###Code
b[np.ix_(rows_on, cols_on)]
np.ix_(rows_on, cols_on)
###Output
_____no_output_____
###Markdown
`ndarray`와 같은 크기의 불리언 배열을 사용하면 해당 위치가 `True`인 모든 원소를 담은 1D 배열이 반환됩니다. 일반적으로 조건 연산자와 함께 사용합니다:
###Code
b[b % 3 == 1]
###Output
_____no_output_____
###Markdown
반복`ndarray`를 반복하는 것은 일반적인 파이썬 배열을 반복한는 것과 매우 유사합니다. 다차원 배열을 반복하면 첫 번째 축에 대해서 수행됩니다.
###Code
c = np.arange(24).reshape(2, 3, 4) # 3D 배열 (두 개의 3x4 행렬로 구성됨)
c
for m in c:
print("아이템:")
print(m)
for i in range(len(c)): # len(c) == c.shape[0]
print("아이템:")
print(c[i])
###Output
아이템:
[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
아이템:
[[12 13 14 15]
[16 17 18 19]
[20 21 22 23]]
###Markdown
`ndarray`에 있는 모든 원소를 반복하려면 `flat` 속성을 사용합니다:
###Code
for i in c.flat:
print("아이템:", i)
###Output
아이템: 0
아이템: 1
아이템: 2
아이템: 3
아이템: 4
아이템: 5
아이템: 6
아이템: 7
아이템: 8
아이템: 9
아이템: 10
아이템: 11
아이템: 12
아이템: 13
아이템: 14
아이템: 15
아이템: 16
아이템: 17
아이템: 18
아이템: 19
아이템: 20
아이템: 21
아이템: 22
아이템: 23
###Markdown
배열 쌓기종종 다른 배열을 쌓아야 할 때가 있습니다. 넘파이는 이를 위해 몇 개의 함수를 제공합니다. 먼저 배열 몇 개를 만들어 보죠.
###Code
q1 = np.full((3,4), 1.0)
q1
q2 = np.full((4,4), 2.0)
q2
q3 = np.full((3,4), 3.0)
q3
###Output
_____no_output_____
###Markdown
`vstack``vstack` 함수를 사용하여 수직으로 쌓아보죠:
###Code
q4 = np.vstack((q1, q2, q3))
q4
q4.shape
###Output
_____no_output_____
###Markdown
q1, q2, q3가 모두 같은 크기이므로 가능합니다(수직으로 쌓기 때문에 수직 축은 크기가 달라도 됩니다). `hstack``hstack`을 사용해 수평으로도 쌓을 수 있습니다:
###Code
q5 = np.hstack((q1, q3))
q5
q5.shape
###Output
_____no_output_____
###Markdown
q1과 q3가 모두 3개의 행을 가지고 있기 때문에 가능합니다. q2는 4개의 행을 가지고 있기 때문에 q1, q3와 수평으로 쌓을 수 없습니다:
###Code
try:
q5 = np.hstack((q1, q2, q3))
except ValueError as e:
print(e)
###Output
all the input array dimensions for the concatenation axis must match exactly, but along dimension 0, the array at index 0 has size 3 and the array at index 1 has size 4
###Markdown
`concatenate``concatenate` 함수는 지정한 축으로도 배열을 쌓습니다.
###Code
q7 = np.concatenate((q1, q2, q3), axis=0) # vstack과 동일
q7
q7.shape
###Output
_____no_output_____
###Markdown
예상했겠지만 `hstack`은 `axis=1`으로 `concatenate`를 호출하는 것과 같습니다. `stack``stack` 함수는 새로운 축을 따라 배열을 쌓습니다. 모든 배열은 같은 크기를 가져야 합니다.
###Code
q8 = np.stack((q1, q3))
q8
q8.shape
###Output
_____no_output_____
###Markdown
배열 분할분할은 쌓기의 반대입니다. 예를 들어 `vsplit` 함수는 행렬을 수직으로 분할합니다.먼저 6x4 행렬을 만들어 보죠:
###Code
r = np.arange(24).reshape(6,4)
r
###Output
_____no_output_____
###Markdown
수직으로 동일한 크기로 나누어 보겠습니다:
###Code
r1, r2, r3 = np.vsplit(r, 3)
r1
r2
r3
###Output
_____no_output_____
###Markdown
`split` 함수는 주어진 축을 따라 배열을 분할합니다. `vsplit`는 `axis=0`으로 `split`를 호출하는 것과 같습니다. `hsplit` 함수는 `axis=1`로 `split`를 호출하는 것과 같습니다:
###Code
r4, r5 = np.hsplit(r, 2)
r4
r5
###Output
_____no_output_____
###Markdown
배열 전치`transpose` 메서드는 주어진 순서대로 축을 뒤바꾸어 `ndarray` 데이터에 대한 새로운 뷰를 만듭니다.예를 위해 3D 배열을 만들어 보죠:
###Code
t = np.arange(24).reshape(4,2,3)
t
###Output
_____no_output_____
###Markdown
`0, 1, 2`(깊이, 높이, 너비) 축을 `1, 2, 0` (깊이→너비, 높이→깊이, 너비→높이) 순서로 바꾼 `ndarray`를 만들어 보겠습니다:
###Code
t1 = t.transpose((1,2,0))
t1
t1.shape
###Output
_____no_output_____
###Markdown
`transpose` 기본값은 차원의 순서를 역전시킵니다:
###Code
t2 = t.transpose() # t.transpose((2, 1, 0))와 동일
t2
t2.shape
###Output
_____no_output_____
###Markdown
넘파이는 두 축을 바꾸는 `swapaxes` 함수를 제공합니다. 예를 들어 깊이와 높이를 뒤바꾸어 `t`의 새로운 뷰를 만들어 보죠:
###Code
t3 = t.swapaxes(0,1) # t.transpose((1, 0, 2))와 동일
t3
t3.shape
###Output
_____no_output_____
###Markdown
선형 대수학넘파이 2D 배열을 사용하면 파이썬에서 행렬을 효율적으로 표현할 수 있습니다. 주요 행렬 연산을 간단히 둘러 보겠습니다. 선형 대수학, 벡터와 행렬에 관한 자세한 내용은 [Linear Algebra tutorial](math_linear_algebra.ipynb)를 참고하세요. 행렬 전치`T` 속성은 랭크가 2보다 크거나 같을 때 `transpose()`를 호출하는 것과 같습니다:
###Code
m1 = np.arange(10).reshape(2,5)
m1
m1.T
###Output
_____no_output_____
###Markdown
`T` 속성은 랭크가 0이거나 1인 배열에는 아무런 영향을 미치지 않습니다:
###Code
m2 = np.arange(5)
m2
m2.T
###Output
_____no_output_____
###Markdown
먼저 1D 배열을 하나의 행이 있는 행렬(2D)로 바꾼다음 전치를 수행할 수 있습니다:
###Code
m2r = m2.reshape(1,5)
m2r
m2r.T
###Output
_____no_output_____
###Markdown
행렬 곱셈두 개의 행렬을 만들어 `dot` 메서드로 행렬 [곱셈](https://ko.wikipedia.org/wiki/%ED%96%89%EB%A0%AC_%EA%B3%B1%EC%85%88)을 실행해 보죠.
###Code
n1 = np.arange(10).reshape(2, 5)
n1
n2 = np.arange(15).reshape(5,3)
n2
n1.dot(n2)
###Output
_____no_output_____
###Markdown
**주의**: 앞서 언급한 것처럼 `n1*n2`는 행렬 곱셈이 아니라 원소별 곱셈(또는 [아다마르 곱](https://ko.wikipedia.org/wiki/%EC%95%84%EB%8B%A4%EB%A7%88%EB%A5%B4_%EA%B3%B1)이라 부릅니다)입니다. 역행렬과 유사 역행렬`numpy.linalg` 모듈 안에 많은 선형 대수 함수들이 있습니다. 특히 `inv` 함수는 정방 행렬의 역행렬을 계산합니다:
###Code
import numpy.linalg as linalg
m3 = np.array([[1,2,3],[5,7,11],[21,29,31]])
m3
linalg.inv(m3)
###Output
_____no_output_____
###Markdown
`pinv` 함수를 사용하여 [유사 역행렬](https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_pseudoinverse)을 계산할 수도 있습니다:
###Code
linalg.pinv(m3)
###Output
_____no_output_____
###Markdown
단위 행렬행렬과 그 행렬의 역행렬을 곱하면 단위 행렬이 됩니다(작은 소숫점 오차가 있습니다):
###Code
m3.dot(linalg.inv(m3))
###Output
_____no_output_____
###Markdown
`eye` 함수는 NxN 크기의 단위 행렬을 만듭니다:
###Code
np.eye(3)
###Output
_____no_output_____
###Markdown
QR 분해`qr` 함수는 행렬을 [QR 분해](https://en.wikipedia.org/wiki/QR_decomposition)합니다:
###Code
q, r = linalg.qr(m3)
q
r
q.dot(r) # q.r는 m3와 같습니다
###Output
_____no_output_____
###Markdown
행렬식`det` 함수는 [행렬식](https://en.wikipedia.org/wiki/Determinant)을 계산합니다:
###Code
linalg.det(m3) # 행렬식 계산
###Output
_____no_output_____
###Markdown
고윳값과 고유벡터`eig` 함수는 정방 행렬의 [고윳값과 고유벡터](https://en.wikipedia.org/wiki/Eigenvalues_and_eigenvectors)를 계산합니다:
###Code
eigenvalues, eigenvectors = linalg.eig(m3)
eigenvalues # λ
eigenvectors # v
m3.dot(eigenvectors) - eigenvalues * eigenvectors # m3.v - λ*v = 0
###Output
_____no_output_____
###Markdown
특잇값 분해`svd` 함수는 행렬을 입력으로 받아 그 행렬의 [특잇값 분해](https://en.wikipedia.org/wiki/Singular_value_decomposition)를 반환합니다:
###Code
m4 = np.array([[1,0,0,0,2], [0,0,3,0,0], [0,0,0,0,0], [0,2,0,0,0]])
m4
U, S_diag, V = linalg.svd(m4)
U
S_diag
###Output
_____no_output_____
###Markdown
`svd` 함수는 Σ의 대각 원소 값만 반환합니다. 전체 Σ 행렬은 다음과 같이 만듭니다:
###Code
S = np.zeros((4, 5))
S[np.diag_indices(4)] = S_diag
S # Σ
V
U.dot(S).dot(V) # U.Σ.V == m4
###Output
_____no_output_____
###Markdown
대각원소와 대각합
###Code
np.diag(m3) # m3의 대각 원소입니다(왼쪽 위에서 오른쪽 아래)
np.trace(m3) # np.diag(m3).sum()와 같습니다
###Output
_____no_output_____
###Markdown
선형 방정식 풀기 `solve` 함수는 다음과 같은 선형 방정식을 풉니다:* $2x + 6y = 6$* $5x + 3y = -9$
###Code
coeffs = np.array([[2, 6], [5, 3]])
depvars = np.array([6, -9])
solution = linalg.solve(coeffs, depvars)
solution
###Output
_____no_output_____
###Markdown
solution을 확인해 보죠:
###Code
coeffs.dot(solution), depvars # 네 같네요
###Output
_____no_output_____
###Markdown
좋습니다! 다른 방식으로도 solution을 확인해 보죠:
###Code
np.allclose(coeffs.dot(solution), depvars)
###Output
_____no_output_____
###Markdown
벡터화한 번에 하나씩 개별 배열 원소에 대해 연산을 실행하는 대신 배열 연산을 사용하면 훨씬 효율적인 코드를 만들 수 있습니다. 이를 벡터화라고 합니다. 이를 사용하여 넘파이의 최적화된 성능을 활용할 수 있습니다.예를 들어, $sin(xy/40.5)$ 식을 기반으로 768x1024 크기 배열을 생성하려고 합니다. 중첩 반복문 안에 파이썬의 math 함수를 사용하는 것은 **나쁜** 방법입니다:
###Code
import math
data = np.empty((768, 1024))
for y in range(768):
for x in range(1024):
data[y, x] = math.sin(x*y/40.5) # 매우 비효율적입니다!
###Output
_____no_output_____
###Markdown
작동은 하지만 순수한 파이썬 코드로 반복문이 진행되기 때문에 아주 비효율적입니다. 이 알고리즘을 벡터화해 보죠. 먼저 넘파이 `meshgrid` 함수로 좌표 벡터를 사용해 행렬을 만듭니다.
###Code
x_coords = np.arange(0, 1024) # [0, 1, 2, ..., 1023]
y_coords = np.arange(0, 768) # [0, 1, 2, ..., 767]
X, Y = np.meshgrid(x_coords, y_coords)
X
Y
###Output
_____no_output_____
###Markdown
여기서 볼 수 있듯이 `X`와 `Y` 모두 768x1024 배열입니다. `X`에 있는 모든 값은 수평 좌표에 해당합니다. `Y`에 있는 모든 값은 수직 좌표에 해당합니다.이제 간단히 배열 연산을 사용해 계산할 수 있습니다:
###Code
data = np.sin(X*Y/40.5)
###Output
_____no_output_____
###Markdown
맷플롯립의 `imshow` 함수를 사용해 이 데이터를 그려보죠([matplotlib tutorial](tools_matplotlib.ipynb)을 참조하세요).
###Code
import matplotlib.pyplot as plt
import matplotlib.cm as cm
fig = plt.figure(1, figsize=(7, 6))
plt.imshow(data, cmap=cm.hot)
plt.show()
###Output
_____no_output_____
###Markdown
저장과 로딩넘파이는 `ndarray`를 바이너리 또는 텍스트 포맷으로 손쉽게 저장하고 로드할 수 있습니다. 바이너리 `.npy` 포맷랜덤 배열을 만들고 저장해 보죠.
###Code
a = np.random.rand(2,3)
a
np.save("my_array", a)
###Output
_____no_output_____
###Markdown
끝입니다! 파일 이름의 확장자를 지정하지 않았기 때문에 넘파이는 자동으로 `.npy`를 붙입니다. 파일 내용을 확인해 보겠습니다:
###Code
with open("my_array.npy", "rb") as f:
content = f.read()
content
###Output
_____no_output_____
###Markdown
이 파일을 넘파이 배열로 로드하려면 `load` 함수를 사용합니다:
###Code
a_loaded = np.load("my_array.npy")
a_loaded
###Output
_____no_output_____
###Markdown
텍스트 포맷배열을 텍스트 포맷으로 저장해 보죠:
###Code
np.savetxt("my_array.csv", a)
###Output
_____no_output_____
###Markdown
파일 내용을 확인해 보겠습니다:
###Code
with open("my_array.csv", "rt") as f:
print(f.read())
###Output
5.435937959464737235e-01 9.288630656918674955e-01 1.535157809943688001e-02
4.157283012656532994e-01 9.102126992826775620e-01 5.512970782648904944e-01
###Markdown
이 파일은 탭으로 구분된 CSV 파일입니다. 다른 구분자를 지정할 수도 있습니다:
###Code
np.savetxt("my_array.csv", a, delimiter=",")
###Output
_____no_output_____
###Markdown
이 파일을 로드하려면 `loadtxt` 함수를 사용합니다:
###Code
a_loaded = np.loadtxt("my_array.csv", delimiter=",")
a_loaded
###Output
_____no_output_____
###Markdown
압축된 `.npz` 포맷여러 개의 배열을 압축된 한 파일로 저장하는 것도 가능합니다:
###Code
b = np.arange(24, dtype=np.uint8).reshape(2, 3, 4)
b
np.savez("my_arrays", my_a=a, my_b=b)
###Output
_____no_output_____
###Markdown
파일 내용을 확인해 보죠. `.npz` 파일 확장자가 자동으로 추가되었습니다.
###Code
with open("my_arrays.npz", "rb") as f:
content = f.read()
repr(content)[:180] + "[...]"
###Output
_____no_output_____
###Markdown
다음과 같이 이 파일을 로드할 수 있습니다:
###Code
my_arrays = np.load("my_arrays.npz")
my_arrays
###Output
_____no_output_____
###Markdown
게으른 로딩을 수행하는 딕셔너리와 유사한 객체입니다:
###Code
my_arrays.keys()
my_arrays["my_a"]
###Output
_____no_output_____ |
Radiohead.ipynb | ###Markdown
We start by extracting the tracks from the Spotify API using our previous scripts. I previously created an csv with the resulting data frame using the pandas library.
###Code
df = pd.read_csv('Radiohead.csv')
df.head()
###Output
_____no_output_____
###Markdown
From the beginning we can see that the get_tracks script also extracts the special editions of radiohead albums (Kid A mnesia was recently released as a compilation). For this analysis we only need the "original" canonical releases. We will use pandas to filter out this special edition albums
###Code
#This is a list with the names of the albums we don't want
special_albums= ['KID A MNESIA', 'OK Computer OKNOTOK 1997 2017', 'TKOL RMX 1234567', 'In Rainbows (Disk 2)','I Might Be Wrong']
df = df[~df['album_name'].isin(special_albums)]
df['album_name'].unique()
###Output
_____no_output_____
###Markdown
Now having the album we can look at the feature called valence. Spotify is not very clear about these audio features, but according to their documentation valence is a numerical value that meassures how 'sad' a song is. Valence can be a number between 0 and 1, being 1 the saddest a song can be.
###Code
df.groupby('album_name').agg({'valence':np.mean}).sort_values(by='valence')
###Output
_____no_output_____
###Markdown
With this quick analysis we can see that radiohead is a pretty gloomy band, with each album having an average valence of lower than 0.45. This won't be surprising to longtime fans. We can also visualize how each song in every albums has a unique valence, with the help seaborn and its catplot we can analyze categorical variables such as 'Album name'
###Code
sns.set_theme(style='whitegrid')
sns.catplot(data=df, y='album_name', x='valence')
plt.title('Valence through Albums')
plt.ylabel('')
plt.xlabel('Valence')
###Output
_____no_output_____
###Markdown
Using a Boxplot, we can see how consistently sad is every album
###Code
sns.set_theme(style='whitegrid')
axis = sns.boxplot(data= df, y= 'album_name', x='valence')
axis_2 = sns.swarmplot(data= df, y= 'album_name', x='valence', color='.25')
plt.title('Valence through Albums')
plt.ylabel('')
plt.xlabel('Valence')
###Output
_____no_output_____
###Markdown
Another interesting feature is danceability.
###Code
df.groupby('album_name').agg({'danceability':np.mean}).sort_values(by='danceability')
sns.set_theme(style='whitegrid')
axis = sns.boxplot(data= df, y= 'album_name', x='danceability')
axis_2 = sns.swarmplot(data= df, y= 'album_name', x='danceability', color='.25')
plt.title('Danceability through Albums')
plt.ylabel('')
plt.xlabel('Danceability')
###Output
_____no_output_____
###Markdown
Now, these results are interesting. It seems that Radiohead has increased its danceability throught its career, starting from Kid A where the band famously began to increase their output of songs with more electronic elements. We can also observe that the saddest Radiohead album is also one of the most danceable albums. So, we should visualize if there is a correlation between valence and danceability. A scatterplot should be helpful.
###Code
sns.set_theme(style='whitegrid')
sns.scatterplot(data=df, x = 'danceability', y= 'valence')
plt.title("Danceability vs Valence")
plt.xlabel("Danceability")
plt.ylabel("Valence")
df[['danceability','valence']].corr()
###Output
_____no_output_____
###Markdown
A closer inspection tell us that there is not a big correlation between a song valence and its danceability. Which is really a little counterintuitive. We should analyse the rest of the features.
###Code
sns.heatmap(df.iloc[:,6:17].corr(method = 'pearson'))
sns.set_theme(style='whitegrid')
sns.scatterplot(data=df, x = 'energy', y= 'valence')
df[['energy','valence']].corr()
sns.set_theme(style='whitegrid')
sns.scatterplot(data=df, x = 'loudness', y= 'valence')
df[['loudness','valence']].corr()
sns.set_theme(style='whitegrid')
sns.scatterplot(data=df, x = 'acousticness', y= 'valence')
df[['acousticness','valence']].corr()
sns.set_theme(style='whitegrid')
sns.scatterplot(data=df, x = 'energy', y= 'acousticness')
df[['energy','acousticness']].corr(method = 'pearson')
###Output
_____no_output_____ |
SVM-stock.ipynb | ###Markdown
Stock versionThis notebook is based on Kaggle solution https://www.kaggle.com/napetrov/tps04-svm-with-scikit-learn-intelex for Tabular Playground Series - Apr 2021
###Code
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.svm import LinearSVC
from sklearn.svm import SVC
from sklearn.linear_model import SGDClassifier
from sklearn.impute import SimpleImputer
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OrdinalEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
###Output
_____no_output_____
###Markdown
Next set of cell read data and perform feature engineering operations
###Code
train = pd.read_csv('./SVM/train.csv', index_col='PassengerId')
test = pd.read_csv('./SVM/test.csv', index_col='PassengerId')
target = train.pop('Survived')
train.drop(['Name', 'Ticket', 'Cabin'], axis=1, inplace=True)
test.drop(['Name', 'Ticket', 'Cabin'], axis=1, inplace=True)
test_prepared = test.copy()
train_prepared = train.copy()
test_prepared['Age'].fillna((train['Age'].median()), inplace=True)
train_prepared['Age'].fillna((train['Age'].median()), inplace=True)
test_prepared['Fare'].fillna((train['Fare'].median()), inplace=True)
train_prepared['Fare'].fillna((train['Fare'].median()), inplace=True)
test_prepared['Embarked'].fillna('S', inplace=True)
train_prepared['Embarked'].fillna('S', inplace=True)
for col in ['Pclass', 'Sex', 'Embarked']:
le = LabelEncoder()
le.fit(train_prepared[col])
train_prepared[col] = le.transform(train_prepared[col])
test_prepared[col] = le.transform(test_prepared[col])
train_prepared.head()
train_prepared_scaled = train_prepared.copy()
test_prepared_scaled = test_prepared.copy()
scaler = StandardScaler()
scaler.fit(train_prepared)
train_prepared_scaled = scaler.transform(train_prepared_scaled)
test_prepared_scaled = scaler.transform(test_prepared_scaled)
train_prepared_scaled = pd.DataFrame(train_prepared_scaled, columns=train_prepared.columns)
test_prepared_scaled = pd.DataFrame(test_prepared_scaled, columns=train_prepared.columns)
X_train, X_valid, y_train, y_valid = train_test_split(train_prepared_scaled, target, test_size=0.1, random_state=0)
###Output
_____no_output_____
###Markdown
And here we start trining for SVM with RBF kernel - it would take a while to complete
###Code
%%time
svc_kernel_rbf = SVC(kernel='rbf', random_state=0, C=0.01)
svc_kernel_rbf.fit(X_train, y_train)
y_pred = svc_kernel_rbf.predict(X_valid)
accuracy_score(y_pred, y_valid)
%%time
final_pred = svc_kernel_rbf.predict(test_prepared_scaled)
###Output
CPU times: user 5min 34s, sys: 44 ms, total: 5min 34s
Wall time: 5min 34s
|
Data Projects/NamesByState.ipynb | ###Markdown
Simple Bayes Filter to Predict US Birth States by NameA simple program that generates the most likely state you were born in given your name or name and birth year.Using the dataset found here: https://www.ssa.gov/oact/babynames/limits.html
###Code
# Import necessary libraries
import nltk
import numpy as np
import pandas as pd
import matplotlib as plt
import seaborn as sns
import os
import random
import plotly.plotly as py
# Setting some visualization parameters
#sns.set(style="darkgrid")
# Import the list of names by state
fpath = os.getcwd() + "/namesbystate/"
nameFiles = os.listdir(fpath)
# Build the dataframe
stateDFs = []
for nf in nameFiles:
state = nf.split(".")[0]
with open(fpath + nf) as namesFile:
stateDFs.append(pd.read_table(namesFile, sep=",", names=["State","Gender","Year","Name","Count"]))
names = pd.concat(stateDFs)
# Analyze the dataframe
#print(names.size)
#print(names.columns)
#pd.isnull(names).any()
#names.nunique()
#g = sns.FacetGrid(tips, row="sex", col="time", margin_titles=True)
#bins = np.linspace(0, 60, 13)
#g.map(plt.hist, "total_bill", color="steelblue", bins=bins)
# Get the overall frequencies, which names are most popular across the dataset?
name_freq = names.groupby('Name').sum().Count.sort_values(ascending=False)
name_freq[:5]
# best = names_freq.nlargest()
# Small collection of analysis functions
def mostPopularByYear(year, gender="Both"):
# separate by gender if requested
if gender == "F":
yearNames = names.loc[(names['Year'] == year) & (names["Gender"] == "F") ]
elif gender == "M":
yearNames = names.loc[(names['Year'] == year) & (names["Gender"] == "M") ]
else:
yearNames = names.loc[names['Year'] == year]
freq = yearNames.groupby('Name').sum().Count.sort_values(ascending=False)
return (freq.index[0], freq[0])
def mostPopularByYearRange(minYear, maxYear, gender="Both"):
if gender == "F":
yearNames = names.loc[(names['Year'] >= minYear) & (names['Year'] <= maxYear) & (names["Gender"] == "F") ]
elif gender == "M":
yearNames = names.loc[(names['Year'] >= minYear) & (names['Year'] <= maxYear) & (names["Gender"] == "M") ]
else:
yearNames = names.loc[(names['Year'] >= minYear) & (names['Year'] <= maxYear)]
freq = yearNames.groupby('Name').sum().Count.sort_values(ascending=False)
return (freq.index[0], freq[0])
#def mostPopularByYearAndState(year, state):
mostPopularByYear(2010, "M")
#mostPopularByYearRange(1920, 1980, "F")
# Build and train the Bayes classifier
# rowCount = names.shape[0]
# featureSet = []
# for index, row in names.iterrows():
# # explanatory variables, response variable
# featureSet.append( ({"Name": row["Name"], "Count": row["Count"]} , row["State"]) )
# train_set, test_set = featureSet[np.floor(rowCount*.7):], featureSet[:np.floor(rowCount*.3)]
# classifier = nltk.NaiveBayesClassifier.train(train_set)
# df2 = names.loc[(names['Year'] == 2010) & ((names['State'] == "UT") | (names['State'] == "TX"))]
# df2.head()
# rowCount = df2.shape[0]
# featureSet = []
# for index, row in df2.iterrows():
# for _ in range(row["Count"]):
# # explanatory variable, response variable
# featureSet.append( ({"Name":row["Name"]}, row["State"]) )
# print(featureSet[:5])
# random.shuffle(featureSet)
# train_set, test_set = featureSet[rowCount//2:], featureSet[:rowCount//2]
# classifier = nltk.NaiveBayesClassifier.train(train_set)
# df2.loc[(df2["Name"] == "Peter") & (df2["State"] == "TX")]
# Create plotly plot
scl = [[0.0, 'rgb(242,240,247)'],[0.2, 'rgb(218,218,235)'],[0.4, 'rgb(188,189,220)'],\
[0.6, 'rgb(158,154,200)'],[0.8, 'rgb(117,107,177)'],[1.0, 'rgb(84,39,143)']]
maryDF = names.loc[(names["Year"] == 2000) & (names["Name"] == "Mary")]
data = [ dict(
type='choropleth',
colorscale = scl,
autocolorscale = False,
locations = maryDF['State'],
z = maryDF['Count'],
locationmode = 'USA-states',
#text = df['text'],
marker = dict(
line = dict (
color = 'rgb(255,255,255)',
width = 2
) ),
colorbar = dict(
title = "Usage")
) ]
layout = dict(
title = 'Name Usage by State',
geo = dict(
scope='usa',
projection=dict( type='albers usa' ),
showlakes = True,
lakecolor = 'rgb(255, 255, 255)'),
)
fig = dict( data=data, layout=layout )
py.iplot( fig, filename='d3-cloropleth-map' )
def predictState(name):
# get row count and build out the simple feature set
return classifier.classify(name)
#predictState({"Name": "y"})
print(nltk.classify.accuracy(classifier, test_set))
###Output
0.8946028513238289
|
Fall2019/MSDS400/pythonPractice/Module7/Extrema.ipynb | ###Markdown
Extrema
###Code
import numpy as np
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
extrema() is a classification function used to evaluate a trio of points.The function will evaluate the middle point of trio to determine if itrepresents a relative maxima or minima for the trio. The result will bea boolean value True or False which will be used later. Note that if themiddle point is not an extrema, the value False will be returned.
###Code
def extrema(a, b, c):
x = max(a, b, c)
z = min(a, b, c)
epsilon = 0.0000001 # This is a safeguard against minor differences.
result = False
if abs(b - x) < epsilon:
result = True
if abs(b - z) < epsilon:
result = True
return result
###Output
_____no_output_____
###Markdown
This is a user supplied function. Example is Lial Figure 8 Section 13.1.
###Code
def f(x):
y = (x ** 8) ** .333 - 16.0 * (x ** 2) ** .33
return y
###Output
_____no_output_____
###Markdown
The following extrema evaluation will be over a defined interval. Grid pointswill be defined and the function extreme() will compare trios of values.Define interval endpoints for a closed interval [xa,xb].
###Code
xa = -1.0
xb = +9.0
###Output
_____no_output_____
###Markdown
n = number of grid points. The interval [xa,xb] will be subdivided.Adding delta to xb insures xb is included in the array generated. For thispurpose, np.arange() will be used to create a numpy array of floating pointvalues to be used in subsequent calculations.
###Code
n = 1000
delta = (xb - xa) / n
x = np.arange(xa, xb + delta, delta)
y = f(x)
value = [False] # This defines the list value which will contain Boolean values.
value = value * len(x) # This expands the list to the length of x.
###Output
_____no_output_____
###Markdown
We are going to check each trio of points during the grid search.If a local extrema is found, the boolean value will be set to True.Otherwise it will remain False. The interval endpoints are always localextrema so we define their boolean values first.
###Code
L = len(x)
value[0] = True # This will correspond to one endpoint.
value[L - 1] = True # This corresponds to the other.
###Output
_____no_output_____
###Markdown
The for loop will check each consecutive trios of f values with the functionextrema() to identify local extrema. Only when an extrema is found will theboolean value in the list value be changed to True.
###Code
for x_index in range(L - 2):
first_x = x[x_index]
second_x = x[x_index + 1]
third_x = x[x_index + 2]
a = f(first_x)
b = f(second_x)
c = f(third_x)
is_second_x_extrema = extrema(a, b, c)
value[x_index + 1] = is_second_x_extrema
for k in range(L - 2):
value[k + 1] = extrema(f(x[k]), f(x[k + 1]), f(x[k + 2]))
max_value = max(y) # We check the list to find the global maxima.
min_value = min(y) # We check the list to find the global minima.
###Output
_____no_output_____
###Markdown
The following for loop checks the boolean value for each point. If the valueis True, that point will be plotted yellow. The global maximum is plotted asred and the minimum is plotted as green. We follow this up by plotting thevalues of x and y.
###Code
error = 0.0000001 # The error parameter guards against roundoff error.
# The code which follows assigns colors to maxima and minima and plots them.
plt.figure()
for k in range(L):
if value[k] is True:
plt.scatter(x[k], y[k], s=60, c='y')
if abs(max_value - y[k]) < error:
plt.scatter(x[k], y[k], s=60, c='r')
if abs(min_value - y[k]) < error:
plt.scatter(x[k], y[k], s=60, c='b')
plt.plot(x, y, c='k') # This plots the line on the chart.
plt.xlabel('x-axis')
plt.ylabel('y-axis')
plt.title('Plot Showing Absolute and Relative Extrema')
plt.show()
###Output
_____no_output_____ |
project3/.Trash-0/files/project_3_solution 3.ipynb | ###Markdown
Project 3: Smart Beta Portfolio and Portfolio Optimization InstructionsEach problem consists of a function to implement and instructions on how to implement the function. The parts of the function that need to be implemented are marked with a ` TODO` comment. After implementing the function, run the cell to test it against the unit tests we've provided. For each problem, we provide one or more unit tests from our `project_tests` package. These unit tests won't tell you if your answer is correct, but will warn you of any major errors. Your code will be checked for the correct solution when you submit it Udacity. PackagesWhen you implement the functions, you'll only need to use the [Pandas](https://pandas.pydata.org/) and [Numpy](http://www.numpy.org/) packages. Don't import any other packages, otherwise the grader willn't be able to run your code.The other packages that we're importing is `helper` and `project_tests`. These are custom packages built to help you solve the problems. The `helper` module contains utility functions and graph functions. The `project_tests` contains the unit tests for all the problems. Install Packages
###Code
import sys
!{sys.executable} -m pip install -r requirements.txt
###Output
Collecting numpy==1.12.0 (from -r requirements.txt (line 1))
Downloading https://files.pythonhosted.org/packages/7d/db/04b13cd69a66657e27dd8af68650b5c8c511501f108358653fca8e52bf86/numpy-1.12.0-cp36-cp36m-manylinux1_x86_64.whl (16.8MB)
[K 100% |████████████████████████████████| 16.8MB 35kB/s eta 0:00:01 7% |██▌ | 1.3MB 9.9MB/s eta 0:00:02 15% |█████ | 2.6MB 14.7MB/s eta 0:00:01 20% |██████▋ | 3.5MB 16.4MB/s eta 0:00:01 27% |████████▋ | 4.6MB 24.1MB/s eta 0:00:01 42% |█████████████▊ | 7.2MB 28.3MB/s eta 0:00:01 49% |████████████████ | 8.4MB 23.3MB/s eta 0:00:01 77% |████████████████████████▋ | 13.0MB 23.6MB/s eta 0:00:01 84% |███████████████████████████ | 14.2MB 26.1MB/s eta 0:00:01 98% |███████████████████████████████▌| 16.5MB 19.6MB/s eta 0:00:01
[?25hCollecting scipy==0.18.1 (from -r requirements.txt (line 2))
Downloading https://files.pythonhosted.org/packages/74/c0/f0bf4eaef1b6aa7bdd1ae5597ce1d9e729417b3ca085c47d0f1c640d34f8/scipy-0.18.1-cp36-cp36m-manylinux1_x86_64.whl (42.5MB)
[K 100% |████████████████████████████████| 42.5MB 12kB/s eta 0:00:01 5% |█▉ | 2.4MB 24.0MB/s eta 0:00:02 11% |███▌ | 4.7MB 24.5MB/s eta 0:00:02 31% |██████████ | 13.4MB 27.9MB/s eta 0:00:02 33% |██████████▉ | 14.4MB 17.7MB/s eta 0:00:02 40% |█████████████ | 17.4MB 19.0MB/s eta 0:00:02 49% |███████████████▉ | 21.1MB 27.3MB/s eta 0:00:01 52% |████████████████▊ | 22.2MB 21.4MB/s eta 0:00:01 83% |██████████████████████████▋ | 35.4MB 16.2MB/s eta 0:00:01 90% |████████████████████████████▉ | 38.4MB 19.7MB/s eta 0:00:01 92% |█████████████████████████████▋ | 39.4MB 22.2MB/s eta 0:00:01 97% |███████████████████████████████▏| 41.3MB 21.8MB/s eta 0:00:01
[?25hCollecting pandas==0.19.2 (from -r requirements.txt (line 3))
Downloading https://files.pythonhosted.org/packages/f1/33/b455d0af521b76b1982eac1ed1c30c9e67f9885f54c3349aef0b0c547d85/pandas-0.19.2-cp36-cp36m-manylinux1_x86_64.whl (18.9MB)
[K 100% |████████████████████████████████| 18.9MB 30kB/s eta 0:00:01 10% |███▎ | 2.0MB 23.4MB/s eta 0:00:01 15% |█████ | 2.9MB 20.2MB/s eta 0:00:01 20% |██████▌ | 3.8MB 16.5MB/s eta 0:00:01 31% |██████████ | 5.9MB 18.1MB/s eta 0:00:01 35% |███████████▎ | 6.7MB 14.4MB/s eta 0:00:01 45% |██████████████▋ | 8.6MB 22.5MB/s eta 0:00:01 50% |████████████████▎ | 9.6MB 20.6MB/s eta 0:00:01 56% |██████████████████▏ | 10.8MB 23.8MB/s eta 0:00:01 61% |███████████████████▊ | 11.6MB 21.2MB/s eta 0:00:01 77% |█████████████████████████ | 14.7MB 19.9MB/s eta 0:00:01
[?25hRequirement already satisfied: pytz>=2011k in /opt/conda/lib/python3.6/site-packages (from pandas==0.19.2->-r requirements.txt (line 3))
Requirement already satisfied: python-dateutil>=2 in /opt/conda/lib/python3.6/site-packages (from pandas==0.19.2->-r requirements.txt (line 3))
Requirement already satisfied: six>=1.5 in /opt/conda/lib/python3.6/site-packages (from python-dateutil>=2->pandas==0.19.2->-r requirements.txt (line 3))
Installing collected packages: numpy, scipy, pandas
Found existing installation: numpy 1.12.1
Uninstalling numpy-1.12.1:
Successfully uninstalled numpy-1.12.1
Found existing installation: scipy 0.19.1
Uninstalling scipy-0.19.1:
Successfully uninstalled scipy-0.19.1
Found existing installation: pandas 0.20.3
Uninstalling pandas-0.20.3:
Successfully uninstalled pandas-0.20.3
Successfully installed numpy-1.12.0 pandas-0.19.2 scipy-0.18.1
[33mYou are using pip version 9.0.1, however version 10.0.1 is available.
You should consider upgrading via the 'pip install --upgrade pip' command.[0m
###Markdown
Load Packages
###Code
import pandas as pd
import numpy as np
import helper
import project_tests
###Output
_____no_output_____
###Markdown
Market DataThe data source we'll be using is the [Wiki End of Day data](https://www.quandl.com/databases/WIKIP) hosted at [Quandl](https://www.quandl.com). This contains data for many stocks, but we'll just be looking at the S&P 500 stocks. We'll also make things a little easier to solve by narrowing our range of time from 2007-06-30 to 2017-09-30. Set API KeySet the `quandl.ApiConfig.api_key ` variable to your Quandl api key. You can find your Quandl api key [here](https://www.quandl.com/account/api).
###Code
import quandl
# TODO: Add your Quandl API Key
quandl.ApiConfig.api_key = ''
###Output
_____no_output_____
###Markdown
Download Data
###Code
import os
snp500_file_path = 'data/tickers_SnP500.txt'
wiki_file_path = 'data/WIKI_PRICES.csv'
start_date, end_date = '2013-07-01', '2017-06-30'
use_columns = ['date', 'ticker', 'adj_close', 'adj_volume', 'ex-dividend']
if not os.path.exists(wiki_file_path):
with open(snp500_file_path) as f:
tickers = f.read().split()
print('Downloading data...')
helper.download_quandl_dataset('WIKI', 'PRICES', wiki_file_path, use_columns, tickers, start_date, end_date)
print('Data downloaded')
else:
print('Data already downloaded')
###Output
_____no_output_____
###Markdown
Load Data
###Code
df = pd.read_csv(wiki_file_path)
###Output
_____no_output_____
###Markdown
Create the UniverseWe'll be selecting dollar volume stocks for our stock universe. This universe is similar to large market cap stocks, because they are the highly liquid.
###Code
percent_top_dollar = 0.2
high_volume_symbols = helper.large_dollar_volume_stocks(df, 'adj_close', 'adj_volume', percent_top_dollar)
df = df[df['ticker'].isin(high_volume_symbols)]
###Output
_____no_output_____
###Markdown
2-D MatricesIn the previous projects, we used a [multiindex](https://pandas.pydata.org/pandas-docs/stable/advanced.html) to store all the data in a single dataframe. As you work with larger datasets, it come infeasable to store all the data in memory. Starting with this project, we'll be storing all our data as 2-D matrices to match what you'll be expecting in the real world.
###Code
close = df.reset_index().pivot(index='ticker', columns='date', values='adj_close')
volume = df.reset_index().pivot(index='ticker', columns='date', values='adj_volume')
ex_dividend = df.reset_index().pivot(index='ticker', columns='date', values='ex-dividend')
###Output
_____no_output_____
###Markdown
View DataTo see what one of these 2-d matrices looks like, let's take a look at the closing prices matrix.
###Code
helper.print_dataframe(close)
###Output
_____no_output_____
###Markdown
Part 1: Smart Beta PortfolioIn Part 1 of this project, you'll build a smart beta portfolio using dividend yield. To see how well it performs, you'll compare this portfolio to an index. Index WeightsAfter building the smart beta portfolio, should compare it to a similar strategy or index.Implement `generate_dollar_volume_weights` to generate the weights for this index. For each date, generate the weights based on dollar volume traded for that date. For example, assume the following is dollar volume traded data:| | 10/02/2010 | 10/03/2010 ||----------|------------|------------|| **AAPL** | 2 | 2 || **BBC** | 5 | 6 || **GGL** | 1 | 2 || **ZGB** | 6 | 5 |The weights should be the following:| | 10/02/2010 | 10/03/2010 ||----------|------------|------------|| **AAPL** | 0.142 | 0.133 || **BBC** | 0.357 | 0.400 || **GGL** | 0.071 | 0.133 || **ZGB** | 0.428 | 0.333 |
###Code
def generate_dollar_volume_weights(close, volume):
"""
Generate dollar volume weights.
Parameters
----------
close : DataFrame
Close price for each ticker and date
volume : str
Volume for each ticker and date
Returns
-------
dollar_volume_weights : DataFrame
The dollar volume weights for each ticker and date
"""
assert close.index.equals(volume.index)
assert close.columns.equals(volume.columns)
#TODO: Implement function
dollar_volume = close * volume
return dollar_volume / dollar_volume.sum()
project_tests.test_generate_dollar_volume_weights(generate_dollar_volume_weights)
###Output
_____no_output_____
###Markdown
View DataLet's generate the index weights using `generate_dollar_volume_weights` and view them using a heatmap.
###Code
index_weights = generate_dollar_volume_weights(close, volume)
helper.plot_weights(index_weights, 'Index Weights')
###Output
_____no_output_____
###Markdown
ETF WeightsNow that we have the index weights, it's time to build the weights for the smart beta ETF. Let's build an ETF portfolio that is based on dividends. This is a common factor used to build portfolios. Unlike most portfolios, we'll be using a single factor for simplicity.Implement `calculate_dividend_weights` to returns the weights for each stock based on its total dividend yield over time. This is similar to generating the weight for the index, but it's dividend data instead.
###Code
def calculate_dividend_weights(ex_dividend):
"""
Calculate dividend weights.
Parameters
----------
ex_dividend : DataFrame
Ex-dividend for each stock and date
Returns
-------
dividend_weights : DataFrame
Weights for each stock and date
"""
#TODO: Implement function
dividend_cumsum_per_ticker = ex_dividend.T.cumsum().T
return dividend_cumsum_per_ticker/dividend_cumsum_per_ticker.sum()
project_tests.test_calculate_dividend_weights(calculate_dividend_weights)
###Output
_____no_output_____
###Markdown
View DataLet's generate the ETF weights using `calculate_dividend_weights` and view them using a heatmap.
###Code
etf_weights = calculate_dividend_weights(ex_dividend)
helper.plot_weights(etf_weights, 'ETF Weights')
###Output
_____no_output_____
###Markdown
ReturnsImplement `generate_returns` to generate the returns. Note this isn't log returns. Since we're not dealing with volatility, we don't have to use log returns.
###Code
def generate_returns(close):
"""
Generate returns for ticker and date.
Parameters
----------
close : DataFrame
Close price for each ticker and date
Returns
-------
returns : Dataframe
The returns for each ticker and date
"""
#TODO: Implement function
return (close.T / close.T.shift(1) -1).T
project_tests.test_generate_returns(generate_returns)
###Output
_____no_output_____
###Markdown
View DataLet's generate the closing returns using `generate_returns` and view them using a heatmap.
###Code
returns = generate_returns(close)
helper.plot_returns(returns, 'Close Returns')
###Output
_____no_output_____
###Markdown
Weighted ReturnsWith the returns of each stock computed, we can use it to compute the returns for for an index or ETF. Implement `generate_weighted_returns` to create weighted returns using returns and weights for an Index or ETF.
###Code
def generate_weighted_returns(returns, weights):
"""
Generate weighted returns.
Parameters
----------
returns : DataFrame
Returns for each ticker and date
weights : DataFrame
Weights for each ticker and date
Returns
-------
weighted_returns : DataFrame
Weighted returns for each ticker and date
"""
assert returns.index.equals(weights.index)
assert returns.columns.equals(weights.columns)
#TODO: Implement function
return returns * weights
project_tests.test_generate_weighted_returns(generate_weighted_returns)
###Output
_____no_output_____
###Markdown
View DataLet's generate the etf and index returns using `generate_weighted_returns` and view them using a heatmap.
###Code
index_weighted_returns = generate_weighted_returns(returns, index_weights)
etf_weighted_returns = generate_weighted_returns(returns, etf_weights)
helper.plot_returns(index_weighted_returns, 'Index Returns')
helper.plot_returns(etf_weighted_returns, 'ETF Returns')
###Output
_____no_output_____
###Markdown
Cumulative ReturnsImplement `calculate_cumulative_returns` to calculate the cumulative returns over time.
###Code
def calculate_cumulative_returns(returns):
"""
Calculate cumulative returns.
Parameters
----------
returns : DataFrame
Returns for each ticker and date
Returns
-------
cumulative_returns : Pandas Series
Cumulative returns for each date
"""
#TODO: Implement function
return (pd.Series([0]).append(returns.sum()) + 1).cumprod().iloc[1:]
project_tests.test_calculate_cumulative_returns(calculate_cumulative_returns)
###Output
_____no_output_____
###Markdown
View DataLet's generate the etf and index cumulative returns using `calculate_cumulative_returns` and compare the two.
###Code
index_weighted_cumulative_returns = calculate_cumulative_returns(index_weighted_returns)
etf_weighted_cumulative_returns = calculate_cumulative_returns(etf_weighted_returns)
helper.plot_benchmark_returns(index_weighted_cumulative_returns, etf_weighted_cumulative_returns, 'Smart Beta ETF vs Index')
###Output
_____no_output_____
###Markdown
Tracking ErrorIn order to check the performance of the smart beta portfolio, we can compare it against the index. Implement `tracking_error` to return the tracking error between the etf and index over time.
###Code
def tracking_error(index_weighted_cumulative_returns, etf_weighted_cumulative_returns):
"""
Calculate the tracking error.
Parameters
----------
index_weighted_cumulative_returns : Pandas Series
The weighted index Cumulative returns for each date
etf_weighted_cumulative_returns : Pandas Series
The weighted etf Cumulative returns for each date
Returns
-------
tracking_error : Pandas Series
The tracking error for each date
"""
assert index_weighted_cumulative_returns.index.equals(etf_weighted_cumulative_returns.index)
#TODO: Implement function
tracking_error = index_weighted_cumulative_returns - etf_weighted_cumulative_returns
return tracking_error
project_tests.test_tracking_error(tracking_error)
###Output
_____no_output_____
###Markdown
View DataLet's generate the tracking error using `tracking_error` and graph it over time.
###Code
smart_beta_tracking_error = tracking_error(index_weighted_cumulative_returns, etf_weighted_cumulative_returns)
helper.plot_tracking_error(smart_beta_tracking_error, 'Smart Beta Tracking Error')
###Output
_____no_output_____
###Markdown
Part 2: Portfolio OptimizationIn Part 2, you'll optimize the index you created in part 1. You'll use `cvxopt` to optimize the convex problem of finding the optimal weights for the portfolio. Just like before, we'll compare these results to the index. CovarianceImplement `get_covariance` to calculate the covariance of `returns` and `weighted_index_returns`. We'll use this to feed into our convex optimization function. By using covariance, we can prevent the optimizer from going all in on a few stocks.
###Code
def get_covariance(returns, weighted_index_returns):
"""
Calculate covariance matrices.
Parameters
----------
returns : DataFrame
Returns for each ticker and date
weighted_index_returns : DataFrame
Weighted index returns for each ticker and date
Returns
-------
xtx, xty : (2 dimensional Ndarray, 1 dimensional Ndarray)
"""
assert returns.index.equals(weighted_index_returns.index)
assert returns.columns.equals(weighted_index_returns.columns)
#TODO: Implement function
returns = returns.fillna(0)
weighted_index_returns = weighted_index_returns.sum().fillna(0)
xtx = returns.dot(returns.T)
xty = returns.dot(np.matrix(weighted_index_returns).T)[0]
return xtx.values, xty.values
project_tests.test_get_covariance(get_covariance)
###Output
_____no_output_____
###Markdown
View DataLet's look the the covariance generated from `get_covariance`.
###Code
xtx, xty = get_covariance(returns, index_weighted_returns)
xtx = pd.DataFrame(xtx, returns.index, returns.index)
xty = pd.Series(xty, returns.index)
helper.plot_covariance(xty, xtx)
###Output
_____no_output_____
###Markdown
Quadratic ProgrammingNow that you have the covariance, we can use this to optimize the weights. Implement `solve_qp` to return the optimal `x` in the convex function with the following constraints:- Sum of all x is 1- x >= 0
###Code
import cvxopt
def solve_qp(P, q):
"""
Find the solution for minimize 0.5P*x*x - q*x with the following constraints:
- sum of all x equals to 1
- All x are greater than or equal to 0
Parameters
----------
P : 2 dimensional Ndarray
q : 1 dimensional Ndarray
Returns
-------
x : 1 dimensional Ndarray
The solution for x
"""
assert len(P.shape) == 2
assert len(q.shape) == 1
assert P.shape[0] == P.shape[1] == q.shape[0]
#TODO: Implement function
nn = len(q)
g = cvxopt.spmatrix(-1, range(nn), range(nn))
a = cvxopt.matrix(np.ones(nn), (1,nn))
b = cvxopt.matrix(1.0)
h = cvxopt.matrix(np.zeros(nn))
P = cvxopt.matrix(P)
q = -cvxopt.matrix(q)
# Min cov
# Max return
cvxopt.solvers.options['show_progress'] = False
sol = cvxopt.solvers.qp(P, q, g, h, a, b)
if 'optimal' not in sol['status']:
return np.array([])
return np.array(sol['x']).flatten()
project_tests.test_solve_qp(solve_qp)
###Output
_____no_output_____
###Markdown
Run the following cell to generate optimal weights using `solve_qp`.
###Code
raw_optim_etf_weights = solve_qp(xtx.values, xty.values)
raw_optim_etf_weights_per_date = np.tile(raw_optim_etf_weights, (len(returns.columns), 1))
optim_etf_weights = pd.DataFrame(raw_optim_etf_weights_per_date.T, returns.index, returns.columns)
###Output
_____no_output_____
###Markdown
Optimized PortfolioWith our optimized etf weights built using quadratic programming, let's compare it to the index. Run the next cell to calculate the optimized etf returns and compare the returns to the index returns.
###Code
optim_etf_returns = generate_weighted_returns(returns, optim_etf_weights)
optim_etf_cumulative_returns = calculate_cumulative_returns(optim_etf_returns)
helper.plot_benchmark_returns(index_weighted_cumulative_returns, optim_etf_cumulative_returns, 'Optimized ETF vs Index')
optim_etf_tracking_error = tracking_error(index_weighted_cumulative_returns, optim_etf_cumulative_returns)
helper.plot_tracking_error(optim_etf_tracking_error, 'Optimized ETF Tracking Error')
###Output
_____no_output_____
###Markdown
Rebalance PortfolioThe optimized etf portfolio used different weights for each day. After calculating in transaction fees, this amount of turnover to the portfolio can reduce the total returns. Let's find the optimal times to rebalance the portfolio instead of doing it every day.Implement `rebalance_portfolio` to rebalance a portfolio.
###Code
def rebalance_portfolio(returns, weighted_index_returns, shift_size, chunk_size):
"""
Get weights for each rebalancing of the portfolio.
Parameters
----------
returns : DataFrame
Returns for each ticker and date
weighted_index_returns : DataFrame
Weighted index returns for each ticker and date
shift_size : int
The number of days between each rebalance
chunk_size : int
The number of days to look in the past for rebalancing
Returns
-------
all_rebalance_weights : list of Ndarrays
The etf weights for each point they are rebalanced
"""
assert returns.index.equals(weighted_index_returns.index)
assert returns.columns.equals(weighted_index_returns.columns)
assert shift_size > 0
assert chunk_size >= 0
#TODO: Implement function
date_len = returns.shape[1]
all_rebalance_weights = []
for shift in range(chunk_size, date_len, shift_size):
start_idx = shift - chunk_size
xtx, xty = get_covariance(returns.iloc[:, start_idx:shift], weighted_index_returns.iloc[:, start_idx:shift])
all_rebalance_weights.append(solve_qp(xtx, xty))
return all_rebalance_weights
project_tests.test_rebalance_portfolio(rebalance_portfolio)
###Output
_____no_output_____
###Markdown
Run the following cell to rebalance the portfolio using `rebalance_portfolio`.
###Code
chunk_size = 250
shift_size = 5
all_rebalance_weights = rebalance_portfolio(returns, index_weighted_returns, shift_size, chunk_size)
###Output
_____no_output_____
###Markdown
Portfolio Rebalance CostWith the portfolio rebalanced, we need to use a metric to measure the cost of rebalancing the portfolio. Implement `get_rebalance_cost` to calculate the rebalance cost.
###Code
def get_rebalance_cost(all_rebalance_weights, shift_size, rebalance_count):
"""
Get the cost of all the rebalancing.
Parameters
----------
all_rebalance_weights : list of Ndarrays
ETF Returns for each ticker and date
shift_size : int
The number of days between each rebalance
rebalance_count : int
Number of times the portfolio was rebalanced
Returns
-------
rebalancing_cost : float
The cost of all the rebalancing
"""
assert shift_size > 0
assert rebalance_count > 0
#TODO: Implement function
all_rebalance_weights_df = pd.DataFrame(np.array(all_rebalance_weights))
rebalance_total = (all_rebalance_weights_df - all_rebalance_weights_df.shift(-1)).abs().sum().sum()
return (shift_size / rebalance_count) * rebalance_total
project_tests.test_get_rebalance_cost(get_rebalance_cost)
###Output
_____no_output_____
###Markdown
Run the following cell to get the rebalance cost from `get_rebalance_cost`.
###Code
unconstrained_costs = get_rebalance_cost(all_rebalance_weights, shift_size, returns.shape[1])
print(unconstrained_costs)
# IGNORE THIS CODE
# THIS CODE IS TEST CODE FOR BUILDING PROJECT
# THIS WILL BE REMOVED BEFORE FINAL PROJECT
# Error checking while refactoring
assert np.isclose(optim_etf_weights, np.load('check_data/po_weights.npy'), equal_nan=True).all()
assert np.isclose(optim_etf_tracking_error, np.load('check_data/po_tracking_error.npy'), equal_nan=True).all()
assert np.isclose(smart_beta_tracking_error, np.load('check_data/sb_tracking_error.npy'), equal_nan=True).all()
# Error checking while refactoring
assert np.isclose(unconstrained_costs, 0.10739965758876144), unconstrained_costs
###Output
_____no_output_____ |
05_callback.training_utils.ipynb | ###Markdown
Training Utility Callbacks> Very basic Callbacks to enhance the training experience including CUDA support
###Code
#export
# Contains code used/modified by fastai_minima author from fastai
# Copyright 2019 the fast.ai team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language
#hide
from nbdev.showdoc import *
from fastcore.test import *
#export
from fastai_minima.callback.core import Callback
from fastai_minima.learner import Learner
from fastai_minima.utils import defaults, noop, default_device, to_device
from fastprogress.fastprogress import progress_bar,master_bar
from fastcore.basics import patch, ifnone
from contextlib import contextmanager
#export
class ProgressCallback(Callback):
"A `Callback` to handle the display of progress bars"
order,_stateattrs = 60,('mbar','pbar')
def before_fit(self):
"Setup the master bar over the epochs"
assert hasattr(self.learn, 'recorder')
if self.create_mbar: self.mbar = master_bar(list(range(self.n_epoch)))
if self.learn.logger != noop:
self.old_logger,self.learn.logger = self.logger,self._write_stats
self._write_stats(self.recorder.metric_names)
else: self.old_logger = noop
def before_epoch(self):
"Update the master bar"
if getattr(self, 'mbar', False): self.mbar.update(self.epoch)
def before_train(self):
"Launch a progress bar over the training dataloader"
self._launch_pbar()
def before_validate(self):
"Launch a progress bar over the validation dataloader"
self._launch_pbar()
def after_train(self):
"Close the progress bar over the training dataloader"
self.pbar.on_iter_end()
def after_validate(self):
"Close the progress bar over the validation dataloader"
self.pbar.on_iter_end()
def after_batch(self):
"Update the current progress bar"
self.pbar.update(self.iter+1)
if hasattr(self, 'smooth_loss'): self.pbar.comment = f'{self.smooth_loss:.4f}'
def _launch_pbar(self):
self.pbar = progress_bar(self.dl, parent=getattr(self, 'mbar', None), leave=False)
self.pbar.update(0)
def after_fit(self):
"Close the master bar"
if getattr(self, 'mbar', False):
self.mbar.on_iter_end()
delattr(self, 'mbar')
if hasattr(self, 'old_logger'): self.learn.logger = self.old_logger
def _write_stats(self, log):
if getattr(self, 'mbar', False): self.mbar.write([f'{l:.6f}' if isinstance(l, float) else str(l) for l in log], table=True)
if not hasattr(defaults, 'callbacks'): defaults.callbacks = [TrainEvalCallback, Recorder, ProgressCallback]
elif ProgressCallback not in defaults.callbacks: defaults.callbacks.append(ProgressCallback)
#hide
import torch
from torch.utils.data import TensorDataset, DataLoader
from fastai_minima.learner import DataLoaders
from torch import nn
def synth_dbunch(a=2, b=3, bs=16, n_train=10, n_valid=2):
"A simple dataset where `x` is random and `y = a*x + b` plus some noise."
def get_data(n):
x = torch.randn(int(bs*n))
return TensorDataset(x, a*x + b + 0.1*torch.randn(int(bs*n)))
train_ds = get_data(n_train)
valid_ds = get_data(n_valid)
train_dl = DataLoader(train_ds, batch_size=bs, shuffle=True, num_workers=0)
valid_dl = DataLoader(valid_ds, batch_size=bs, num_workers=0)
return DataLoaders(train_dl, valid_dl)
def synth_learner(n_train=10, n_valid=2, lr=defaults.lr, **kwargs):
data = synth_dbunch(n_train=n_train,n_valid=n_valid)
return Learner(data, RegModel(), loss_func=nn.MSELoss(), lr=lr, **kwargs)
class RegModel(nn.Module):
"A r"
def __init__(self):
super().__init__()
self.a,self.b = nn.Parameter(torch.randn(1)),nn.Parameter(torch.randn(1))
def forward(self, x): return x*self.a + self.b
learn = synth_learner()
learn.fit(5)
#export
@patch
@contextmanager
def no_bar(self:Learner):
"Context manager that deactivates the use of progress bars"
has_progress = hasattr(self, 'progress')
if has_progress: self.remove_cb(self.progress)
try: yield self
finally:
if has_progress: self.add_cb(ProgressCallback())
learn = synth_learner()
with learn.no_bar(): learn.fit(5)
#hide
#Check validate works without any training
import torch.nn.functional as F
def tst_metric(out, targ): return F.mse_loss(out, targ)
learn = synth_learner(metrics=tst_metric)
preds,targs = learn.validate()
show_doc(ProgressCallback.before_fit)
show_doc(ProgressCallback.before_epoch)
show_doc(ProgressCallback.before_train)
show_doc(ProgressCallback.before_validate)
show_doc(ProgressCallback.after_batch)
show_doc(ProgressCallback.after_train)
show_doc(ProgressCallback.after_validate)
show_doc(ProgressCallback.after_fit)
#export
class CollectDataCallback(Callback):
"Collect all batches, along with `pred` and `loss`, into `self.data`. Mainly for testing"
def before_fit(self): self.data = L()
def after_batch(self):
self.data.append(self.learn.to_detach((self.xb,self.yb,self.pred,self.loss)))
# export
class CudaCallback(Callback):
"Move data to CUDA device"
def __init__(self, device=None): self.device = ifnone(device, default_device())
def before_batch(self): self.learn.xb,self.learn.yb = to_device(self.xb),to_device(self.yb)
def before_fit(self): self.model.to(self.device)
###Output
_____no_output_____ |
sorting_searching/quick_sort/quick_sort_challenge.ipynb | ###Markdown
This notebook was prepared by [Donne Martin](http://donnemartin.com). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges). Challenge Notebook Problem: Implement quick sort.* [Constraints](Constraints)* [Test Cases](Test-Cases)* [Algorithm](Algorithm)* [Code](Code)* [Unit Test](Unit-Test)* [Solution Notebook](Solution-Notebook) Constraints* Is a naive solution sufficient (ie not in-place)? * Yes* Are duplicates allowed? * Yes* Can we assume the input is valid? * No* Can we assume this fits memory? * Yes Test Cases* None -> Exception* Empty input -> []* One element -> [element]* Two or more elements AlgorithmRefer to the [Solution Notebook](http://nbviewer.ipython.org/github/donnemartin/interactive-coding-challenges/blob/master/sorting_searching/quick_sort/quick_sort_solution.ipynb). If you are stuck and need a hint, the solution notebook's algorithm discussion might be a good place to start. Code
###Code
class QuickSort(object):
def sort(self, data):
# TODO: Implement me
pass
###Output
_____no_output_____
###Markdown
Unit Test**The following unit test is expected to fail until you solve the challenge.**
###Code
# %load test_quick_sort.py
from nose.tools import assert_equal, assert_raises
class TestQuickSort(object):
def test_quick_sort(self):
quick_sort = QuickSort()
print('None input')
assert_raises(TypeError, quick_sort.sort, None)
print('Empty input')
assert_equal(quick_sort.sort([]), [])
print('One element')
assert_equal(quick_sort.sort([5]), [5])
print('Two or more elements')
data = [5, 1, 7, 2, 6, -3, 5, 7, -1]
assert_equal(quick_sort.sort(data), sorted(data))
print('Success: test_quick_sort\n')
def main():
test = TestQuickSort()
test.test_quick_sort()
if __name__ == '__main__':
main()
###Output
_____no_output_____
###Markdown
This notebook was prepared by [Donne Martin](http://donnemartin.com). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges). Challenge Notebook Problem: Implement quick sort.* [Constraints](Constraints)* [Test Cases](Test-Cases)* [Algorithm](Algorithm)* [Code](Code)* [Unit Test](Unit-Test)* [Solution Notebook](Solution-Notebook) Constraints* Is a naiive solution sufficient (ie not in-place)? * Yes* Are duplicates allowed? * Yes Test Cases* None -> None* Empty input -> []* One element -> [element]* Two or more elements AlgorithmRefer to the [Solution Notebook](http://nbviewer.ipython.org/github/donnemartin/interactive-coding-challenges/blob/master/sorting_searching/quick_sort/quick_sort_solution.ipynb). If you are stuck and need a hint, the solution notebook's algorithm discussion might be a good place to start. Code
###Code
def quick_sort(data):
# TODO: Implement me
pass
###Output
_____no_output_____
###Markdown
Unit Test**The following unit test is expected to fail until you solve the challenge.**
###Code
# %load test_quick_sort.py
from nose.tools import assert_equal
class TestQuickSort(object):
def test_quick_sort(self, func):
print('None input')
data = None
sorted_data = func(data)
assert_equal(sorted_data, None)
print('Empty input')
data = []
sorted_data = func(data)
assert_equal(sorted_data, [])
print('One element')
data = [5]
sorted_data = func(data)
assert_equal(sorted_data, [5])
print('Two or more elements')
data = [5, 1, 7, 2, 6, -3, 5, 7, -1]
sorted_data = func(data)
assert_equal(sorted_data, sorted(data))
print('Success: test_quick_sort\n')
def main():
test = TestQuickSort()
test.test_quick_sort(quick_sort)
if __name__ == '__main__':
main()
###Output
_____no_output_____
###Markdown
This notebook was prepared by [Donne Martin](http://donnemartin.com). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges). Challenge Notebook Problem: Implement quick sort.* [Constraints](Constraints)* [Test Cases](Test-Cases)* [Algorithm](Algorithm)* [Code](Code)* [Unit Test](Unit-Test)* [Solution Notebook](Solution-Notebook) Constraints* Is a naive solution sufficient (ie not in-place)? * Yes* Are duplicates allowed? * Yes* Can we assume the input is valid? * No* Can we assume this fits memory? * Yes Test Cases* None -> Exception* Empty input -> []* One element -> [element]* Two or more elements AlgorithmRefer to the [Solution Notebook](http://nbviewer.ipython.org/github/donnemartin/interactive-coding-challenges/blob/master/sorting_searching/quick_sort/quick_sort_solution.ipynb). If you are stuck and need a hint, the solution notebook's algorithm discussion might be a good place to start. Code
###Code
class QuickSort(object):
def sort(self, data):
if data is None:
raise TypeError()
return self._sort(data, 0, len(data) - 1)
def _swap(self, data, i, j):
tmp = data[i]
data[i] = data[j]
data[j] = tmp
# partition that use only one for, choose pivot as data[low]
def _partition4(self, data, low, high):
pivot = data[low]
firstlow = high
# move all numbers that > pivot to firstlow ~ high
for i in range(high, low, -1):
if (data[i] > pivot):
self._swap(data, i, firstlow)
firstlow -= 1
self._swap(data, low, firstlow)
return firstlow
# partition that use only one for, choose pivot as data[high]
def _partition3(self, data, low, high):
if low >= high:
return low
pivot = data[high]
firsthigh = low
# move all numbers that < pivot to low ~ firsthigh
for i in range(low, high):
if (data[i] < pivot):
self._swap(data, i, firsthigh)
firsthigh += 1
self._swap(data, high, firsthigh)
return firsthigh
# partition that choose pivot as data[high]
def _partition2(self, data, low, high):
if low >= high:
return low
pivot = data[high]
i = low
j = high - 1
while (1):
while (i < high):
if (data[i] > pivot):
break;
i += 1
while (j > low):
if (data[j] < pivot):
break;
j -= 1
if (i >= j):
break;
self._swap(data, i, j)
self._swap(data, high, i)
return i
# partition that choose pivot as data[low]
def _partition(self, data, low, high):
if low >= high:
return low
pivot = data[low]
i = low + 1
j = high
while (1):
# search from i to j, find a number > pivot
while (i < high):
if (data[i] > pivot):
break
i += 1
# search from j to i, find a number < pivot
while (j > low):
if (data[j] < pivot):
break
j -= 1
if i >= j:
# low + 1 to i-1 are numbers that < pivot
# j + 1 to high are numbers that >= pivot
break
self._swap(data, i, j)
# data[j] is a number that < pivot, data[j + 1] is a number that > pivot,
# swap data[j] and pivot so that [low, j] are numbers that < pivot, including pivot.
self._swap(data, low, j)
return j
def _sort(self, data, low, high):
if low >= high:
return data
p = self._partition4(data, low, high)
self._sort(data, low, p-1)
self._sort(data, p+1, high)
return data
###Output
_____no_output_____
###Markdown
Unit Test**The following unit test is expected to fail until you solve the challenge.**
###Code
# %load test_quick_sort.py
from nose.tools import assert_equal, assert_raises
class TestQuickSort(object):
def test_quick_sort(self):
quick_sort = QuickSort()
print('None input')
assert_raises(TypeError, quick_sort.sort, None)
print('Empty input')
assert_equal(quick_sort.sort([]), [])
print('One element')
assert_equal(quick_sort.sort([5]), [5])
print('Two or more elements')
data = [5, 1, 7, 2, 6, -3, 5, 7, -1]
assert_equal(quick_sort.sort(data), sorted(data))
print('Success: test_quick_sort\n')
def main():
test = TestQuickSort()
test.test_quick_sort()
if __name__ == '__main__':
main()
###Output
None input
Empty input
One element
Two or more elements
Success: test_quick_sort
###Markdown
This notebook was prepared by [Donne Martin](http://donnemartin.com). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges). Challenge Notebook Problem: Implement quick sort.* [Constraints](Constraints)* [Test Cases](Test-Cases)* [Algorithm](Algorithm)* [Code](Code)* [Unit Test](Unit-Test)* [Solution Notebook](Solution-Notebook) Constraints* Is a naiive solution sufficient (ie not in-place)? * Yes Test Cases* Empty input -> []* One element -> [element]* Two or more elements AlgorithmRefer to the [Solution Notebook](http://nbviewer.ipython.org/github/donnemartin/interactive-coding-challenges/blob/master/sorting_searching/quick_sort/quick_sort_solution.ipynb). If you are stuck and need a hint, the solution notebook's algorithm discussion might be a good place to start. Code
###Code
def quick_sort(data):
# TODO: Implement me
pass
###Output
_____no_output_____
###Markdown
Unit Test**The following unit test is expected to fail until you solve the challenge.**
###Code
# %load test_quick_sort.py
from nose.tools import assert_equal
class TestQuickSort(object):
def test_quick_sort(self, func):
print('Empty input')
data = []
sorted_data = func(data)
assert_equal(sorted_data, [])
print('One element')
data = [5]
sorted_data = func(data)
assert_equal(sorted_data, [5])
print('Two or more elements')
data = [5, 1, 7, 2, 6, -3, 5, 7, -1]
sorted_data = func(data)
assert_equal(sorted_data, sorted(data))
print('Success: test_quick_sort\n')
def main():
test = TestQuickSort()
test.test_quick_sort(quick_sort)
try:
test.test_quick_sort(quick_sort_alt)
except NameError:
# Alternate solutions are only defined
# in the solutions file
pass
if __name__ == '__main__':
main()
###Output
_____no_output_____
###Markdown
This notebook was prepared by [Donne Martin](http://donnemartin.com). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges). Challenge Notebook Problem: Implement quick sort.* [Constraints](Constraints)* [Test Cases](Test-Cases)* [Algorithm](Algorithm)* [Code](Code)* [Unit Test](Unit-Test)* [Solution Notebook](Solution-Notebook) Constraints* Is a naive solution sufficient (ie not in-place)? * Yes* Are duplicates allowed? * Yes* Can we assume the input is valid? * No* Can we assume this fits memory? * Yes Test Cases* None -> Exception* Empty input -> []* One element -> [element]* Two or more elements AlgorithmRefer to the [Solution Notebook](http://nbviewer.ipython.org/github/donnemartin/interactive-coding-challenges/blob/master/sorting_searching/quick_sort/quick_sort_solution.ipynb). If you are stuck and need a hint, the solution notebook's algorithm discussion might be a good place to start. Code
###Code
class QuickSort(object):
def sort(self, data):
def quickSort(alist):
quickSortHelper(alist, 0, len(alist) - 1)
def quickSortHelper(alist, first, last):
if first < last:
splitpoint = partition(alist, first, last)
quickSortHelper(alist, first, splitpoint - 1)
quickSortHelper(alist, splitpoint + 1, last)
def partition(alist,first,last):
pivotvalue = alist[last]
leftmark = first
rightmark = last - 1
while True:
while alist[leftmark] < pivotvalue:
leftmark = leftmark + 1
while alist[rightmark] > pivotvalue:
rightmark = rightmark -1
if leftmark < rightmark:
alist[leftmark], alist[rightmark] = alist[rightmark], alist[leftmark]
else:
break
alist[last], alist[leftmark] = alist[leftmark], alist[last]
return leftmark
def quick_sort(left, right):
if right - left < 1:
return
else:
pivot = right
l = left
r = right - 1
while True:
while data[l] < data[pivot]:
l += 1
while data[r] > data[pivot]:
r -= 1
if l < r:
data[l], data[r] = data[r], data[l]
else:
data[l], data[pivot] = data[pivot], data[l]
break
pivot = l
quick_sort(left, pivot - 1)
quick_sort(pivot + 1, right)
if data is None:
raise TypeError
quickSort(data)
#quick_sort(0, len(data) - 1)
return data
# TODO: Implement me
pass
###Output
_____no_output_____
###Markdown
Unit Test**The following unit test is expected to fail until you solve the challenge.**
###Code
# %load test_quick_sort.py
import unittest
class TestQuickSort(unittest.TestCase):
def test_quick_sort(self):
quick_sort = QuickSort()
print('None input')
self.assertRaises(TypeError, quick_sort.sort, None)
print('Empty input')
self.assertEqual(quick_sort.sort([]), [])
print('One element')
self.assertEqual(quick_sort.sort([5]), [5])
print('Two or more elements')
data = [5, 1, 7, 2, 6, -3, 5, 7, -1]
self.assertEqual(quick_sort.sort(data), sorted(data))
print('Success: test_quick_sort\n')
def main():
test = TestQuickSort()
test.test_quick_sort()
if __name__ == '__main__':
main()
###Output
None input
Empty input
One element
Two or more elements
Success: test_quick_sort
###Markdown
This notebook was prepared by [Donne Martin](http://donnemartin.com). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges). Challenge Notebook Problem: Implement quick sort.* [Constraints](Constraints)* [Test Cases](Test-Cases)* [Algorithm](Algorithm)* [Code](Code)* [Unit Test](Unit-Test)* [Solution Notebook](Solution-Notebook) Constraints* Is a naive solution sufficient (ie not in-place)? * Yes* Are duplicates allowed? * Yes* Can we assume the input is valid? * No* Can we assume this fits memory? * Yes Test Cases* None -> Exception* Empty input -> []* One element -> [element]* Two or more elements AlgorithmRefer to the [Solution Notebook](http://nbviewer.ipython.org/github/donnemartin/interactive-coding-challenges/blob/master/sorting_searching/quick_sort/quick_sort_solution.ipynb). If you are stuck and need a hint, the solution notebook's algorithm discussion might be a good place to start. Code
###Code
class QuickSort(object):
def sort(self, data):
# TODO: Implement me
length = len(data)
if data is None:
return None
elif length<=1:
return data
else:
pivot = data[0]
lower = [lower for lower in data[1:] if lower<=pivot]
upper = [upper for upper in data[1:] if upper>pivot]
return self.sort(lower) + [pivot] + self.sort(upper)
###Output
_____no_output_____
###Markdown
Unit Test**The following unit test is expected to fail until you solve the challenge.**
###Code
# %load test_quick_sort.py
from nose.tools import assert_equal, assert_raises
class TestQuickSort(object):
def test_quick_sort(self):
quick_sort = QuickSort()
print('None input')
assert_raises(TypeError, quick_sort.sort, None)
print('Empty input')
assert_equal(quick_sort.sort([]), [])
print('One element')
assert_equal(quick_sort.sort([5]), [5])
print('Two or more elements')
data = [5, 1, 7, 2, 6, -3, 5, 7, -1]
assert_equal(quick_sort.sort(data), sorted(data))
print('Success: test_quick_sort\n')
def main():
test = TestQuickSort()
test.test_quick_sort()
if __name__ == '__main__':
main()
###Output
None input
Empty input
One element
Two or more elements
Success: test_quick_sort
###Markdown
This notebook was prepared by [Donne Martin](http://donnemartin.com). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges). Challenge Notebook Problem: Implement quick sort.* [Constraints](Constraints)* [Test Cases](Test-Cases)* [Algorithm](Algorithm)* [Code](Code)* [Unit Test](Unit-Test)* [Solution Notebook](Solution-Notebook) Constraints* Is a naive solution sufficient (ie not in-place)? * Yes* Are duplicates allowed? * Yes* Can we assume the input is valid? * No* Can we assume this fits memory? * Yes Test Cases* None -> Exception* Empty input -> []* One element -> [element]* Two or more elements AlgorithmRefer to the [Solution Notebook](http://nbviewer.ipython.org/github/donnemartin/interactive-coding-challenges/blob/master/sorting_searching/quick_sort/quick_sort_solution.ipynb). If you are stuck and need a hint, the solution notebook's algorithm discussion might be a good place to start. Code
###Code
class QuickSort(object):
def sort(self, data):
# TODO: Implement me
pass
###Output
_____no_output_____
###Markdown
Unit Test**The following unit test is expected to fail until you solve the challenge.**
###Code
# %load test_quick_sort.py
import unittest
class TestQuickSort(unittest.TestCase):
def test_quick_sort(self):
quick_sort = QuickSort()
print('None input')
self.assertRaises(TypeError, quick_sort.sort, None)
print('Empty input')
self.assertEqual(quick_sort.sort([]), [])
print('One element')
self.assertEqual(quick_sort.sort([5]), [5])
print('Two or more elements')
data = [5, 1, 7, 2, 6, -3, 5, 7, -1]
self.assertEqual(quick_sort.sort(data), sorted(data))
print('Success: test_quick_sort\n')
def main():
test = TestQuickSort()
test.test_quick_sort()
if __name__ == '__main__':
main()
###Output
_____no_output_____ |
notebooks/subscriptions.ipynb | ###Markdown
Subscriptions AnalysisThis notebook analyses subscriptions to different products
###Code
# Check Python version for compatibility/reference
import sys
print(sys.executable)
print(sys.version)
print(sys.version_info)
import pandas as pd
import numpy as np
# Check Pandas and Numpy version numbering for compatibility/reference
print(f"{'Pandas version:'} \t{pd.__version__}")
print(f"{'NumPy version:'} \t\t{np.__version__}")
# Read the contents of the csv file into a Pandas dataframe
# Signal that the 'start', 'end', and 'cancelled' columns should be datetime objects
df = pd.read_csv('../data_files/subscriptions.csv',
parse_dates=['start','end','cancelled'],
infer_datetime_format=True)
# Check head of dataframe
df.head(10)
###Output
_____no_output_____
###Markdown
Data Quality and Data Cleansing
###Code
# Get information on data types and presence of null cancelled values
# Also confirms start', 'end', and 'cancelled' columns have datetime data type
df.info()
# Convert 0 to False and 1 to True
df['is_free'] = np.where(df['is_free'] == 1, True, False)
# Verify change
df.head(5)
###Output
_____no_output_____
###Markdown
Interrogate the Dataset for Cleansing
###Code
# Confirm that all records contains a start and end date
df['start'].notnull().sum() == df['end'].notnull().sum() == df['start'].count()
# Get the earliest dated start date record
min(df['start'])
# Get the latest dated start date record
max(df['start'])
###Output
_____no_output_____
###Markdown
Calculate Some Headline Data
###Code
# Number of records
df['start'].count()
# Number of Cancelled records
cancelled_recs = df['cancelled'].notnull().sum()
cancelled_recs
# Number of Uncancelled records
uncancelled_recs = df['cancelled'].isnull().sum()
uncancelled_recs
# Sanity check to ensure that the sum of uncancelled and cancelled records equals the total records
total_recs = cancelled_recs + uncancelled_recs
total_recs
# Number of distinct accounts
df['account_id'].nunique()
# Get row count by subscription title to get a sense of most/least common subscriptions
df['title'].value_counts()
# Sanity check to ensure that the sum of the title groupings equals the total records
df['title'].value_counts().sum() == total_recs
# Compile array of the dataset's records where the 'start' column is future-dated
from datetime import datetime
fd_recs = np.where(df['start'] > datetime.now())
# Count the number of numpy array elements where the 'start' column is future-dated
np.count_nonzero(fd_recs)
###Output
_____no_output_____
###Markdown
START FROM HERE NEED TO WORK OUT HOW TO DROP THE 13 FUTURE_DATED ROWS FROM DATAFRAME
###Code
# Remove future-dated rows from the dataframe
# df = df.drop(fd_recs.index, axis=0)
# Check the number of records has now been reduced by 13
df['start'].count()
###Output
_____no_output_____
###Markdown
Append Helper Columns to Analysis
###Code
# Add subscription type column to differentiate between 'Pro' and 'Basic' subscriptions
# This means that future subscription types could be added so long as their title included 'Pro'
df['sub_type'] = np.where(df['title'].str.contains('Pro'), 'Pro', 'Basic')
# Add subscription level column to differentiate between 'Trial' and 'Paid' subscriptions
df['sub_level'] = np.where(df['title'].str.contains('Trial'), 'Trial', 'Paid')
df['sub_duration (s)'] = np.where(df['cancelled'].isnull(), df['end'] - df['start'], df['cancelled'] - df['start'])
df.head(5)
# Check data type of sub_duration helper column
df.dtypes['sub_duration (s)']
# Convert sub_duration field into duration in seconds
df['sub_duration (s)'] = [td.total_seconds() for td in df['sub_duration (s)']]
df.head()
# Create grouping table for each account and subscription level
# It seems appropriate to group on subscriptions which are trials versus those that are paid
sub_lengths_by_account = df.sort_values(['sub_level'], ascending=False).groupby(['account_id', 'sub_level']).agg({'sub_duration (s)':np.sum})
# Append a column that calculates the duration of the subscription in the number of days
sub_lengths_by_account['sub_duration (days)'] = sub_lengths_by_account['sub_duration (s)']/24/60/60
# Drop 'sub_duration (s)' column
sub_lengths_by_account = sub_lengths_by_account.drop(columns=['sub_duration (s)'])
# Round the duration in days to 2 d.p.
# Display first 20 rowssufficient to see examples of trial only, paid only, and trial and paid
sub_lengths_by_account.round(2).head(20)
###Output
_____no_output_____ |
TGAN/TGAN-master/examples/Usage_Example.ipynb | ###Markdown
Usage ExampleIn this notebook we will show the most basic usage of **TGAN** in order to generate samples from agiven dataset. 1. Load the dataThe first step is to load the data wich we will use to fit TGAN. In order to do so, we will firstimport the function `tgan.data.load_data` and call it with the name the dataset that we want to load.In this case, we will load the `census` dataset, which we will use during the subsequent steps, and obtain two objects:1. `data` will contain a `pandas.DataFrame` with the table of data from the `census` dataset ready to be used to fit the model.2. `continous_columns` will contain a `list` with the indices of continuous columns.
###Code
from tgan.data import load_demo_data
data, continuous_columns = load_demo_data('census')
data.head(3).T
continuous_columns
###Output
_____no_output_____
###Markdown
2. Create a TGAN instanceThe next step is to import TGAN and create an instance of the model.To do so, we need to import the `tgan.model.TGANModel` class and call it.This will create a TGAN instance with the default parameters.
###Code
from tgan.model import TGANModel
tgan = TGANModel(continuous_columns)
###Output
_____no_output_____
###Markdown
3. Fit the modelThe third step is to pass the data that we have loaded previously to the `TGANModel.fit` method tostart the fitting.This process will not return anything, however, the progress of the fitting will be printed into screen.**NOTE** Depending on the performance of the system you are running, and the parameters selectedfor the model, this step can take up to a few hours.
###Code
tgan.fit(data)
###Output
W0716 20:46:38.831994 139854960686848 deprecation_wrapper.py:119] From /home/matias-desktop/anaconda3/lib/python3.7/site-packages/tensorpack/graph_builder/model_desc.py:29: The name tf.get_default_graph is deprecated. Please use tf.compat.v1.get_default_graph instead.
W0716 20:46:38.832826 139854960686848 deprecation_wrapper.py:119] From /home/matias-desktop/anaconda3/lib/python3.7/site-packages/tensorpack/graph_builder/model_desc.py:39: The name tf.placeholder is deprecated. Please use tf.compat.v1.placeholder instead.
W0716 20:46:38.881527 139854960686848 deprecation_wrapper.py:119] From /home/matias-desktop/anaconda3/lib/python3.7/site-packages/tensorpack/input_source/input_source.py:219: The name tf.FIFOQueue is deprecated. Please use tf.queue.FIFOQueue instead.
###Markdown
4. Sample new dataAfter the model has been fit, we are ready to generate new samples by calling the `TGANModel.sample`method passing it the desired amount of samples.The returned object, `samples`, is a `pandas.DataFrame` containing a table of synthetic data withthe same format as the input data and 1000 rows as we requested.
###Code
num_samples = 1000
samples = tgan.sample(num_samples)
samples.head(3)
###Output
_____no_output_____
###Markdown
5. Save and Load a modelIn the steps above we saw that the fitting process is slow, so we probably would like to avoid having to fit every we want to generate samples. Instead we can fit a model once, save it, and load it every time we want to sample new data.If we have a fitted model, we can save it by calling the `TGANModel.save` method, that only takesas argument the path to store the model into. Similarly, the `TGANModel.load` allows to load a model stored on disk by passing as argument a path where the model is stored.At this point we could use this model instance to generate more samples.
###Code
model_path = 'demo/my_model'
tgan.save(model_path)
new_tgan = TGANModel.load(model_path)
new_samples = new_tgan.sample(num_samples)
new_samples.head(3)
###Output
_____no_output_____
###Markdown
Loading custom datasetsIn the previous steps we used some demonstration data but we did not show how to load your own dataset.In order to do so you can use `pandas.read_csv` by passing it the path to the CSV file that you want to load.Additionally, you will need to create 0-indexed list of columns indices to be considered continuous.For example, if we want to load a local CSV file, `path/to/my.csv`, that has as continuous columns their first 4 columns, that is, indices `[0,1,2,3]`, we would do it like this:
###Code
import pandas as pd
data = pd.read_csv('/home/matias-desktop/Descargas/Datos2.csv')
continuous_columns = [0,1]
print (continuous_columns)
###Output
[0, 1]
###Markdown
Model ParametersIf you want to change the default behavior of TGANModel, such as as different `batch_size` or`num_epochs`, you can do so by passing different arguments when creating the instance. Have b Model general behavior* continous_columns (`list[int]`, required): List of columns to be considered continuous.* output (`str`, default=`output`): Path to store the model and its artifacts.* gpu (`list[str]`, default=`[]`): Comma separated list of GPU(s) to use. Neural network definition and fitting* max_epoch (`int`, default=`100`): Number of epochs to use during training.* steps_per_epoch (`int`, default=`10000`): Number of steps to run on each epoch.* save_checkpoints(`bool`, default=True): Whether or not to store checkpoints of the model after each training epoch.* restore_session(`bool`, default=True): Whether or not continue training from the last checkpoint.* batch_size (`int`, default=`200`): Size of the batch to feed the model at each step.* z_dim (`int`, default=`100`): Number of dimensions in the noise input for the generator.* noise (`float`, default=`0.2`): Upper bound to the gaussian noise added to categorical columns.* l2norm (`float`, default=`0.00001`): L2 reguralization coefficient when computing losses.* learning_rate (`float`, default=`0.001`): Learning rate for the optimizer.* num_gen_rnn (`int`, default=`400`):* num_gen_feature (`int`, default=`100`): Number of features of in the generator.* num_dis_layers (`int`, default=`2`):* num_dis_hidden (`int`, default=`200`):* optimizer (`str`, default=`AdamOptimizer`): Name of the optimizer to use during `fit`, possible values are: [`GradientDescentOptimizer`, `AdamOptimizer`, `AdadeltaOptimizer`].If we wanted to create an identical instance to the one created on step 2, but passing the arguments in a explicit way we will do something like this:
###Code
tgan = TGANModel(
continuous_columns,
output='output',
gpu=None,
max_epoch=5,
steps_per_epoch=100,
save_checkpoints=True,
restore_session=True,
batch_size=200,
z_dim=200,
noise=0.2,
l2norm=0.00001,
learning_rate=0.001,
num_gen_rnn=100,
num_gen_feature=100,
num_dis_layers=1,
num_dis_hidden=100,
optimizer='AdamOptimizer'
)
###Output
_____no_output_____
###Markdown
Command-line interfaceWe include a command-line interface that allows users to access TGAN functionality. Currently only one action is supported. Random hyperparameter search InputTo run random searchs for the best model hyperparameters for a given dataset, we will need:* A dataset, in a csv file, without any missing value, only columns of type `bool`, `str`, `int` or `float` and only one type for column, as specified in [Data Format Input](data-format-input).* A JSON file containing the configuration for the search. This configuration shall contain: * `name`: Name of the experiment. A folder with this name will be created. * `num_random_search`: Number of iterations in hyper parameter search. * `train_csv`: Path to the csv file containing the dataset. * `continuous_cols`: List of column indices, starting at 0, to be considered continuous. * `epoch`: Number of epoches to train the model. * `steps_per_epoch`: Number of optimization steps in each epoch. * `sample_rows`: Number of rows to sample when evaluating the model.You can see an example of such a json file in [examples/config.json](examples/config.json), which youcan download and use as a template. ExecutionOnce we have prepared everything we can launch the random hyperparameter search with this command:``` bashtgan experiments config.json results.json```Where the first argument, `config.json`, is the path to your configuration JSON, and the second,`results.json`, is the path to store the summary of the execution.This will run the random search, wich basically consist of the folling steps:1. We fetch and split our data between test and train.2. We randomly select the hyperparameters to test.3. Then, for each hyperparameter combination, we train a TGAN model using the real training data T and generate a synthetic training dataset Tsynth.4. We then train machine learning models on both the real and synthetic datasets.5. We use these trained models on real test data and see how well they perform. OutputOne the experiment has finished, the following can be found:* A JSON file, in the example above called `results.json`, containing a summary of the experiments. This JSON will contain a key for each experiment `name`, and on it, an array of length `num_random_search`, with the selected parameters and its evaluation score. For a configuration like the example, the summary will look like this:``` python{ 'census': [ { "steps_per_epoch" : 10000, "num_gen_feature" : 300, "num_dis_hidden" : 300, "batch_size" : 100, "num_gen_rnn" : 400, "score" : 0.937802280415988, "max_epoch" : 5, "num_dis_layers" : 4, "learning_rate" : 0.0002, "z_dim" : 100, "noise" : 0.2 }, ... 9 more nodes ]}```* A set of folders, each one names after the `name` specified in the JSON configuration, containedin the `experiments` folder. In each folder, sampled data and the models can be found. For a configurationlike the example, this will look like this:```experiments/ census/ data/ Sampled data with each of the models in the random search. model_0/ logs/ Training logs model/ Tensorflow model checkpoints model_1/ 9 more folders, one for each model in the random search ...``` CitationIf you use TGAN, please cite the following work:> Lei Xu, Kalyan Veeramachaneni. 2018. Synthesizing Tabular Data using Generative Adversarial Networks.```LaTeX@article{xu2018synthesizing, title={Synthesizing Tabular Data using Generative Adversarial Networks}, author={Xu, Lei and Veeramachaneni, Kalyan}, journal={arXiv preprint arXiv:1811.11264}, year={2018}}```You can find the original paper [here](https://arxiv.org/pdf/1811.11264.pdf)
###Code
import pandas as pd
from tgan.data import load_demo_data
data = pd.read_csv('/home/matias-desktop/Descargas/Datos2.csv')
continuous_columns = [0,1]
data_serie.head(5)
from tgan.model import TGANModel
tgan = TGANModel(continuous_columns)
print (tgan)
tgan.fit(data)
import pandas as pd
from tgan.model import TGANModel
data = pd.read_csv('/home/matias-desktop/Descargas/Datos2.csv')
print (data[0:10])
continuous_columns = [0]
print (continuous_columns)
tgan = TGANModel(continuous_columns)
tgan.fit(data)
###Output
a b
0 2162.88312 15.689
1 2171.84576 15.555
2 2171.87139 15.558
3 2183.80511 15.633
4 2183.83081 15.643
5 2184.80467 15.583
6 2184.83030 15.572
7 2188.85523 15.538
8 2191.76540 15.676
9 2191.80118 15.680
[0]
[32m[0716 22:55:41 @input_source.py:222][0m Setting up the queue 'QueueInput_13/input_queue' for CPU prefetching ...
|
fork/ms-learn-ml-crash-course-python-master/07. Advanced SVMs - Python.ipynb | ###Markdown
Exercise 7 - Advanced Support Vector Machines=====Support vector machines let us predict catergories. In this example we will be looking at practically using SVMs by formatting data correctly, visualising the SVM model and then evaluating the SVM model.We will be looking at __prions__ - misfolded proteins that are associated with several fatal neurodegenerative diseases (kind of like Daleks, if you have seen Doctor Who). Looking at examples of proteins mass and weight, we will build a predictive model to detect prions in blood samples. Run the code below to set up the graphing features for this notebook.
###Code
# Run this code!
# It sets up the graphing configuration
import warnings
warnings.filterwarnings("ignore")
import matplotlib.pyplot as graph
%matplotlib inline
graph.rcParams['figure.figsize'] = (15,5)
graph.rcParams["font.family"] = 'DejaVu Sans'
graph.rcParams["font.size"] = '12'
graph.rcParams['image.cmap'] = 'rainbow'
###Output
_____no_output_____
###Markdown
Step 1-----Lets load up the data first, and save it temporarily as rawData. Our dataset is called "PrionData.csv". Replace `` with `'Data/PrionData.csv'` and then __Run the code__.
###Code
import pandas as pd
import numpy as np
###
# REPLACE <addPathToData> BELOW WITH 'Data/PrionData.csv' (INCLUDING THE QUOTES) TO LOAD THE DATA FROM THAT FILE
###
rawData = pd.read_csv(<addPathToData>)
###
###Output
_____no_output_____
###Markdown
Step 2-----Lets take a look at the data. In the cell below replace the text `` with `print(rawData.head())` and then __Run the code__.
###Code
###
# REPLACE <printDataHere> with print(rawData.head()) TO VIEW THE TOP 5 DATA POINTS OF THE DATA SET
###
<printDataHere>
###
###Output
_____no_output_____
###Markdown
Looks like we have an extra column, this happens regularly when exporting data sets from a program like Excel and then importing them into a dataframe.Step 3-----Lets get rid of that extra column, and then check that it's gone. __Run the code__ below.
###Code
# Run this box to remove the extra column.
dataset = rawData.drop(['Unnamed: 0'], axis = 1)
print(dataset.head())
###Output
_____no_output_____
###Markdown
All gone!Step 4-----Let's graph the data set to better understand what we're working with.Looking at the output of the last step we can see the 'categories' we're looking at is called __prion_status__ (the label). In the cell below replace: 1. `` with `'mass'` 2. `` with `'weight'` then __run the code__.
###Code
###
# REPLACE THE <addMass> BELOW WITH 'mass' (INCLUDING THE QUOTES)
###
X = dataset[<addMass>]
###
##
# REPLACE THE <addWeight> BELOW WITH 'weight' (INCLUDING THE QUOTES)
###
Y = dataset[<addWeight>]
###
# This makes a list that says which items are prions and which are not
target = dataset['prion_status'] == 'prion'
graph.scatter(X, Y, c = target, zorder = 10, s = 40)
graph.title("Classification plot for prion data")
graph.ylabel("Mass")
graph.xlabel("Weight")
graph.show()
###Output
_____no_output_____
###Markdown
Step 5-------Let's split up our data into test and training sets. We'll start by checking the total number of instances in our dataset by using the DataFrame attribute *shape*. The first number is the one we want. In the cell below replace `` with `shape` and then __Run the code__.
###Code
###
# REPLACE THE <addShape> BELOW WITH THE NAME OF THE ATTRIBUTE WE WANT TO LOOK AT - shape
###
dataset.<addShape>
###
###Output
_____no_output_____
###Markdown
Step 6-----Step 5 has told us that we have nearly 500 data points. We'll use 400 examples for our training set, and the remainder for our test set. Replace the `` below with `400` and run the cell.
###Code
# This makes our training set out of the first 400 examples
train_X = dataset.drop(['prion_status'], 1).truncate(after = 399)
train_Y = dataset['prion_status'].truncate(after = 399)
###
# REPLACE THE <add400> BELOW WITH 400 TO MAKE THE TEST SET OUT OF THE REMAINING EXAMPLES
###
test_X = dataset.drop(['prion_status'], 1).truncate(before = <add400>).reset_index(drop = True)
test_Y = dataset['prion_status'].truncate(before = <add400>).reset_index(drop = True)
###
###Output
_____no_output_____
###Markdown
Step 7-----Well done! Lets look at a summary of our training data. In the cell below replace `` with `describe()` then __run the code__.
###Code
###
# REPLACE THE <addDescribe> BELOW WITH 'describe()'
###
print(train_X.<addDescribe>)
print(train_Y.<addDescribe>)
###
###Output
_____no_output_____
###Markdown
314 non-prions out of 400, which means there's 86 prions in there. That looks about right if we refer to the graph we made in Step 4.Let's take a look at our test set too. Use the `describe()` function again, this time looking at __test__ instead of train.
###Code
###
# REPLACE THE <addDescribe> BELOW WITH describe()
###
print(test_X.<addDescribe>)
print(test_Y.<addDescribe>)
###
###Output
_____no_output_____
###Markdown
Looks good to me! Alright, enough of that - lets make an SVM.Step 8-----Below we will make an SVM, similar to the previous exercise.Remember, the syntax for SVM's is:`SVM_Model = svm.SVC().fit(features, labels)` In the cell below replace: 1. `` with `train_X` 2. `` with `train_Y` and then __run the code__.
###Code
from sklearn import svm
###
# REPLACE <addFeatures> WITH train_X and <addLabels> WITH train_Y
###
SVM_Model = svm.SVC(gamma = 'auto').fit(<addFeatures>, <addLabels>)
###
print("done!")
###Output
_____no_output_____
###Markdown
Well done! We've made a SVM Model from our training set.Step 9-----Lets use our model to make some predictions. __Run the code__ in the cell below.
###Code
# Don't edit this! Just hit run to plot the graph
#This makes a plot of our SVM
def plot_SVM(clf, data, target):
#Make a list of which are prions
is_prion = target == 'prion'
graph.scatter(data['mass'], data['weight'], c = is_prion, zorder = 10, edgecolor = 'k', s = 40)
# Put the result into a colour plot
XX, YY = np.mgrid[0:1:255j, 0:1:255j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()]).reshape(XX.shape)
graph.pcolormesh(XX, YY, Z > 0)
graph.contour(XX, YY, Z, colors = ['k', 'k', 'k'], linestyles = ['--', '-', '--'], levels = [-.5, 0, .5])
graph.ylim(0, 1)
graph.xlim(0, 1)
graph.show()
#Call the code to plot our SVM
plot_SVM(SVM_Model, train_X, train_Y)
###Output
_____no_output_____
###Markdown
Step 10-------The SVM has done a reasonable job of separating our test dataset into two. Now lets take a look at our test set.Remember our syntax for plotting SVM's is: `plot_SVM(SVM_Model, features, labels)`Add our __test__ set below to see how it looks. In the cell below replace: 1. `` with `test_X` 2. `` with `test_Y` and then __run the code__.
###Code
###
# REPLACE <addTestX> WITH test_X AND <addTestY> WITH test_Y
###
plot_SVM(SVM_Model, <addTestX>, <addTestY>)
###
###Output
_____no_output_____
###Markdown
Step 11-----Graphing is a good way to see how our model has done, but sometimes numbers can be better. Lets calculate the accuracy of our SVM in each dataset. In the cell below replace: 1. `` with `train_X` 2. `` with `test_X` 3. `` with `train_Y` 4. `` with `test_Y` and then __run the code__.
###Code
###
# REPLACE <addTrainX> WITH train_X AND <addTestX> with test_X FEATURE SETS TO GENERATE THE PREDICTIONS
###
train_P = SVM_Model.predict(<addTrainX>.values)
test_P = SVM_Model.predict(<addTestX>.values)
###
# This function evaluates the SVM's accuracy
def evaluate_SVM(pred, real, name):
matches = pred == real #see where predicted and real are the same
accuracy = sum(matches)/len(matches)*100 #convert to percent
print(name, "Set Accuracy:", accuracy, "%")
###
# REPLACE <addTrainY> WITH train_Y AND <addTestY> with test_Y
###
evaluate_SVM(train_P, <addTrainY>, 'Train')
evaluate_SVM(test_P, <addTestY>, 'Test')
###
###Output
_____no_output_____
###Markdown
That's a good result. Conclusion------Well done! We've taken a data set, cleaned and prepared it, made a SVM, and then evaluated it. Well done!You can go back to the course now, or you can try using different kernels with your SVM below.OPTIONAL: Step 12-----Want to have a play around with different kernels for your SVM models? It's really easy!The standard kernel is a Radial Basis Function kernel. But there's a few more you can choose from - linear (`linear`), polynomial (`poly`), and sigmoid (`sigmoid`). Lets try them out.If you wanted to use a linear kernel, all you need to do is add `kernel='linear'` to your model. Like this:`SVM_Model = svm.SVC(kernel='linear')`Give it a go with all the different kernels below. The first one is done for you Run the cell below
###Code
def assess_SVM(SVM_Model):
# Plot the new linear SVM model
plot_SVM(SVM_Model, train_X, train_Y)
plot_SVM(SVM_Model, test_X, test_Y)
# Use the model to predict the training and test sets.
train_P = SVM_Model.predict(train_X.values)
test_P = SVM_Model.predict(test_X.values)
# Evaluate the model using the training and test sets
evaluate_SVM(train_P, train_Y, 'Train')
evaluate_SVM(test_P, test_Y, 'Test')
# Make a new linear SVM model
SVM_Model = svm.SVC(kernel = 'linear').fit(train_X, train_Y)
assess_SVM(SVM_Model)
###Output
_____no_output_____
###Markdown
You can see the hyperplane is a linear line!Now lets try a sigmoid kernel. Replace `` with `'sigmoid'` then run the cell.
###Code
# Make a new sigmoid SVM model
###
# REPLACE THE <replaceThis> BELOW WITH 'sigmoid' (INCLUDING THE QUOTES)
###
SVM_Model = svm.SVC(kernel = <replaceThis>, gamma = 4, coef0 = 0).fit(train_X, train_Y)
###
assess_SVM(SVM_Model)
###Output
_____no_output_____
###Markdown
Perhaps a sigmoid kernel isn't a good idea for this data set....Lets try a polynomial kernel Replace `` with `'polynomial'` then run the cell.
###Code
# Make a new polynomial SVM model
###
# REPLACE THE <replaceWithPoly> BELOW WITH 'poly' (INCLUDING THE QUOTES)
###
SVM_Model = svm.SVC(kernel = <replaceWithPoly>, gamma = 10, degree = 3, coef0 = 0).fit(train_X, train_Y)
###
assess_SVM(SVM_Model)
###Output
_____no_output_____ |
ML-Base-MOOC/chapt-9 Decision Tree/02- Gini Coefficient.ipynb | ###Markdown
基尼系数  $$G = 1 - \sum_{i=1}^kp_i^2$$- 对于二分类问题$$G = 1 - x^2 - (1-x)^2$$$$\Downarrow$$$$= -2x^2 + 2x$$- 可以看出,对于二分类问题,当$x = \frac{1}{2}$ 时,基尼系数又最大值- 即此时,系统不确定性最大 1. 基尼系数
###Code
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, 2:]
y = iris.target
from sklearn.tree import DecisionTreeClassifier
dt_clf = DecisionTreeClassifier(max_depth=2, criterion='gini')
dt_clf.fit(X, y)
def plot_decision_boundary(model, axis):
x0, x1 = np.meshgrid(
np.linspace(axis[0], axis[1], int((axis[1] - axis[0])*100)).reshape(1, -1),
np.linspace(axis[2], axis[3], int((axis[3] - axis[2])*100)).reshape(-1, 1)
)
X_new = np.c_[x0.ravel(), x1.ravel()]
y_predic = model.predict(X_new)
zz = y_predic.reshape(x0.shape)
from matplotlib.colors import ListedColormap
custom_cmap = ListedColormap(['#EF9A9A', '#FFF590', '#90CAF9'])
plt.contourf(x0, x1, zz, linewidth=5, cmap=custom_cmap)
plot_decision_boundary(dt_clf, axis=(0.5, 7.5, 0, 3))
plt.scatter(X[y==0, 0], X[y==0, 1])
plt.scatter(X[y==1, 0], X[y==1, 1])
plt.scatter(X[y==2, 0], X[y==2, 1])
def gini(p):
return 1 - p**2 - (1-x)**2
x = np.linspace(0.01, 0.99)
plt.plot(x, gini(x))
###Output
_____no_output_____
###Markdown
2. 模拟使用基尼系数划分
###Code
from collections import Counter
from math import log
# 基于维度 d 的 value 值进行划分
def split(X, y, d, value):
index_a = (X[:, d] <= value)
index_b = (X[:, d] > value)
return X[index_a], X[index_b], y[index_a], y[index_b]
# 计算每一类样本点的基尼系数的和
def gini(y):
counter = Counter(y)
res = 1.0
for num in counter.values():
p = num / len(y)
res -= p**2
return res
# 寻找要划分的 value 值
def try_split(X, y):
best_g = float('inf') # 最小的基尼系数的值
best_d, best_v = -1, -1 # 划分的维度,划分的位置
# 遍历每一个维度
for d in range(X.shape[1]):
# 每两个样本点在 d 这个维度中间的值. 首先把 d 维所有样本排序
sorted_index = np.argsort(X[:, d])
for i in range(1, len(X)):
if X[sorted_index[i-1], d] != X[sorted_index[i], d]:
v = (X[sorted_index[i-1], d] + X[sorted_index[i], d]) / 2
x_l, x_r, y_l, y_r = split(X, y, d, v)
# 计算当前划分后的两部分结果基尼系数是多少
g = gini(y_l) + gini(y_r)
if g < best_g:
best_g, best_d, best_v = g, d, v
return best_g, best_d, best_v
best_g, best_d, best_v = try_split(X, y)
print("best_g = ", best_g)
print("best_d = ", best_d)
print("best_v = ", best_v)
###Output
best_g = 0.5
best_d = 0
best_v = 2.45
###Markdown
**可以看出,在第 0 个维度(x轴)的 2.45 处划分,有最小的基尼系数 0.5**
###Code
X1_l, X1_r, y1_l, y1_r = split(X, y, best_d, best_v)
# 从上图可以看出,经过一次划分,粉红色部分只有一类,故基尼系数为 0
gini(y1_l)
gini(y1_r)
best_g2, best_d2, best_v2 = try_split(X1_r, y1_r)
print("best_g = ", best_g2)
print("best_d", best_d2)
print("best_v", best_v2)
###Output
best_g = 0.2105714900645938
best_d 1
best_v 1.75
###Markdown
基尼系数  $$G = 1 - \sum_{i=1}^kp_i^2$$- 对于二分类问题$$G = 1 - x^2 - (1-x)^2$$$$\Downarrow$$$$= -2x^2 + 2x$$- 可以看出,对于二分类问题,当$x = \frac{1}{2}$ 时,基尼系数又最大值- 即此时,系统不确定性最大 1. 基尼系数
###Code
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, 2:]
y = iris.target
from sklearn.tree import DecisionTreeClassifier
dt_clf = DecisionTreeClassifier(max_depth=2, criterion='gini')
dt_clf.fit(X, y)
def plot_decision_boundary(model, axis):
x0, x1 = np.meshgrid(
np.linspace(axis[0], axis[1], int((axis[1] - axis[0])*100)).reshape(1, -1),
np.linspace(axis[2], axis[3], int((axis[3] - axis[2])*100)).reshape(-1, 1)
)
X_new = np.c_[x0.ravel(), x1.ravel()]
y_predic = model.predict(X_new)
zz = y_predic.reshape(x0.shape)
from matplotlib.colors import ListedColormap
custom_cmap = ListedColormap(['#EF9A9A', '#FFF590', '#90CAF9'])
plt.contourf(x0, x1, zz, linewidth=5, cmap=custom_cmap)
plot_decision_boundary(dt_clf, axis=(0.5, 7.5, 0, 3))
plt.scatter(X[y==0, 0], X[y==0, 1])
plt.scatter(X[y==1, 0], X[y==1, 1])
plt.scatter(X[y==2, 0], X[y==2, 1])
###Output
D:\appCommon\Anaconda3\lib\site-packages\matplotlib\contour.py:1000: UserWarning: The following kwargs were not used by contour: 'linewidth'
s)
###Markdown
2. 模拟使用基尼系数划分
###Code
from collections import Counter
from math import log
# 基于维度 d 的 value 值进行划分
def split(X, y, d, value):
index_a = (X[:, d] <= value)
index_b = (X[:, d] > value)
return X[index_a], X[index_b], y[index_a], y[index_b]
# 计算每一类样本点的基尼系数的和
def gini(y):
counter = Counter(y)
res = 1.0
for num in counter.values():
p = num / len(y)
res -= p**2
return res
# 寻找要划分的 value 值
def try_split(X, y):
best_g = float('inf') # 最小的基尼系数的值
best_d, best_v = -1, -1 # 划分的维度,划分的位置
# 遍历每一个维度
for d in range(X.shape[1]):
# 每两个样本点在 d 这个维度中间的值. 首先把 d 维所有样本排序
sorted_index = np.argsort(X[:, d])
for i in range(1, len(X)):
if X[sorted_index[i-1], d] != X[sorted_index[i], d]:
v = (X[sorted_index[i-1], d] + X[sorted_index[i], d]) / 2
x_l, x_r, y_l, y_r = split(X, y, d, v)
# 计算当前划分后的两部分结果基尼系数是多少
g = gini(y_l) + gini(y_r)
if g < best_g:
best_g, best_d, best_v = g, d, v
return best_g, best_d, best_v
best_g, best_d, best_v = try_split(X, y)
print("best_g = ", best_g)
print("best_d = ", best_d)
print("best_v = ", best_v)
###Output
best_g = 0.5
best_d = 0
best_v = 2.45
###Markdown
**可以看出,在第 0 个维度(x轴)的 2.45 处划分,有最小的基尼系数 0.5**
###Code
X1_l, X1_r, y1_l, y1_r = split(X, y, best_d, best_v)
# 从上图可以看出,经过一次划分,粉红色部分只有一类,故基尼系数为 0
gini(y1_l)
gini(y1_r)
best_g2, best_d2, best_v2 = try_split(X1_r, y1_r)
print("best_g = ", best_g2)
print("best_d", best_d2)
print("best_v", best_v2)
###Output
best_g = 0.2105714900645938
best_d 1
best_v 1.75
|
Climate Visualization Example/ClimateDataExample.ipynb | ###Markdown
Sky Visualization from the LCD datasetThe Local Climatological Data (LCD) summaries provide a synopsis of climatic values for a single weatherstation over a specific month. The summaries are a product of surface observations from both manual andautomated (`AWOS`, `ASOS`) stations with source data taken from the National Centers for EnvironmentalInformation’s Integrated Surface Data (`ISD`) dataset. Geographic availability includes thousands of locationsworldwide. Climatic values given include hourly, daily, and monthly measurements of temperature, dew point,humidity, winds, sky condition, weather type, atmospheric pressure and more. Sky Conditions A report of each cloud layer (up to 3) giving the following information.Each layer given in the following format: `ccc`:`ll`-`xxx` where:`ccc` and `ll` are the coverage of a layer is in oktas (i.e. eighths) of sky covered by cloud as per the following table:| ccc | ll | description|----------|-----|--------| CLR | 0 | clear sky| FEW | 1-2 | few clouds| SCT | 3-4 | scattered clouds| BKN | 5-8 | broken clouds| OVC | 8 | overcast| VV | 9-10| obscuration (full, partial) And `xxx` is the Cloud base height at lowest point of layer. In the case of an obscuration this value represents the vertical visibility from the point of observation. Given in hundreds of feet (e.g. 50 = 5000 ft, 120 = 12000 feet).In some cases a cloud base height will be given without the corresponding cloud amount. In these case the cloud amount is missing or not reported.Up to 3 layers can be reported however by definition when clear skies are reported it will be reported as only one layer as `CLR`-`00`.**Note**: Since up to 3 cloud layers can be reported, the full state of the sky can best be determined by the contraction given for the last layer. In other words if three layers are reported and the third layer uses `BKN` then the total state of sky is `BKN` which is similar in definition to *mostly cloudy.* `OVC` is similar to *cloudy* or overcast and `FEW` or `SCT` is similar to *partly cloudy.* It should also be noted that in cases where there are more than 3 cloud layers, the highest layers will not be reported. Run this cell for additional info
###Code
import os
with open('link.txt', 'r') as link_file:
url = link_file.readline()
os.startfile(url)
###Output
_____no_output_____
###Markdown
Imports
###Code
from PIL import Image, ImageDraw, ImageFont
from random import randint
from ipywidgets import interact
import pandas as pd
import re
###Output
_____no_output_____
###Markdown
Read Data from Table
###Code
data = pd.read_csv('ChicagoAirport.csv')
sky_data = data['HourlySkyConditions']
sky_data
###Output
_____no_output_____
###Markdown
Parsing Sky Condition Code using External File
###Code
def process_sky_data(entry):
entry2 = f'"{entry}"'
a = !process_sky_data.py $entry2
return eval(a[0])
test_data = "FEW:02 55 OVC:08 80"
process_sky_data(test_data)
###Output
_____no_output_____
###Markdown
Cloud Layer Visualization
###Code
@interact
def show_sky(hour=(0, len(data)-1)):
layers = process_sky_data(sky_data[hour])
test_image = Image.new('RGBA', (100, 350), color=(0, 128, 255, 255))
draw = ImageDraw.Draw(test_image)
unicode_font = ImageFont.truetype("ARLRDBD.TTF", size=50)
def draw_layer(coverage, height):
color = (255, 255, 255, 100) if coverage in range(0, 9) else (0, 0, 0, 255)
shape = "•" if coverage in range(0, 9) else '_'
new_base = Image.new('RGBA', (100, 350), color=(0, 0, 0, 0))
height2 = 350-height
for _ in range(2*coverage**2):
new_layer = Image.new('RGBA', (100, 350), color=(0, 0, 0, 0))
layer_draw = ImageDraw.Draw(new_layer)
x = randint(-30, 130)
y = randint(height2-60, height2-30)
layer_draw.text((x, y), shape, font=unicode_font, fill=color)
new_base = Image.alpha_composite(new_base, new_layer)
return new_base
for coverage, height in layers:
clouds = draw_layer(coverage, height)
test_image = Image.alpha_composite(test_image, clouds)
return test_image
###Output
_____no_output_____ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.