text
stringlengths 5
261k
| id
stringlengths 16
106
| metadata
dict | __index_level_0__
int64 0
266
|
---|---|---|---|
from keras_core.api_export import keras_core_export
from keras_core.layers.preprocessing.tf_data_layer import TFDataLayer
from keras_core.random.seed_generator import SeedGenerator
@keras_core_export("keras_core.layers.RandomContrast")
class RandomContrast(TFDataLayer):
"""A preprocessing layer which randomly adjusts contrast during training.
This layer will randomly adjust the contrast of an image or images
by a random factor. Contrast is adjusted independently
for each channel of each image during training.
For each channel, this layer computes the mean of the image pixels in the
channel and then adjusts each component `x` of each pixel to
`(x - mean) * contrast_factor + mean`.
Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
in integer or floating point dtype.
By default, the layer will output floats.
**Note:** This layer is safe to use inside a `tf.data` pipeline
(independently of which backend you're using).
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Args:
factor: a positive float represented as fraction of value, or a tuple of
size 2 representing lower and upper bound.
When represented as a single float, lower = upper.
The contrast factor will be randomly picked between
`[1.0 - lower, 1.0 + upper]`. For any pixel x in the channel,
the output will be `(x - mean) * factor + mean`
where `mean` is the mean value of the channel.
seed: Integer. Used to create a random seed.
"""
def __init__(self, factor, seed=None, **kwargs):
super().__init__(**kwargs)
self.factor = factor
if isinstance(factor, (tuple, list)):
self.lower = factor[0]
self.upper = factor[1]
else:
self.lower = self.upper = factor
if self.lower < 0.0 or self.upper < 0.0 or self.lower > 1.0:
raise ValueError(
"`factor` argument cannot have negative values or values "
"greater than 1."
f"Received: factor={factor}"
)
self.seed = seed
self.generator = SeedGenerator(seed)
def call(self, inputs, training=True):
inputs = self.backend.cast(inputs, self.compute_dtype)
if training:
seed_generator = self._get_seed_generator(self.backend._backend)
factor = self.backend.random.uniform(
shape=(),
minval=1.0 - self.lower,
maxval=1.0 + self.upper,
seed=seed_generator,
dtype=self.compute_dtype,
)
outputs = self._adjust_constrast(inputs, factor)
outputs = self.backend.numpy.clip(outputs, 0, 255)
self.backend.numpy.reshape(outputs, self.backend.shape(inputs))
return outputs
else:
return inputs
def _adjust_constrast(self, inputs, contrast_factor):
# reduce mean on height
inp_mean = self.backend.numpy.mean(inputs, axis=-3, keepdims=True)
# reduce mean on width
inp_mean = self.backend.numpy.mean(inp_mean, axis=-2, keepdims=True)
outputs = (inputs - inp_mean) * contrast_factor + inp_mean
return outputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
"factor": self.factor,
"seed": self.seed,
}
base_config = super().get_config()
return {**base_config, **config}
|
keras-core/keras_core/layers/preprocessing/random_contrast.py/0
|
{
"file_path": "keras-core/keras_core/layers/preprocessing/random_contrast.py",
"repo_id": "keras-core",
"token_count": 1612
}
| 43 |
import numpy as np
from keras_core import backend
from keras_core.api_export import keras_core_export
from keras_core.layers.preprocessing.index_lookup import IndexLookup
from keras_core.utils import backend_utils
from keras_core.utils.module_utils import tensorflow as tf
@keras_core_export("keras_core.layers.StringLookup")
class StringLookup(IndexLookup):
"""A preprocessing layer that maps strings to (possibly encoded) indices.
This layer translates a set of arbitrary strings into integer output via a
table-based vocabulary lookup. This layer will perform no splitting or
transformation of input strings. For a layer than can split and tokenize
natural language, see the `keras_core.layers.TextVectorization` layer.
The vocabulary for the layer must be either supplied on construction or
learned via `adapt()`. During `adapt()`, the layer will analyze a data set,
determine the frequency of individual strings tokens, and create a
vocabulary from them. If the vocabulary is capped in size, the most frequent
tokens will be used to create the vocabulary and all others will be treated
as out-of-vocabulary (OOV).
There are two possible output modes for the layer.
When `output_mode` is `"int"`,
input strings are converted to their index in the vocabulary (an integer).
When `output_mode` is `"multi_hot"`, `"count"`, or `"tf_idf"`, input strings
are encoded into an array where each dimension corresponds to an element in
the vocabulary.
The vocabulary can optionally contain a mask token as well as an OOV token
(which can optionally occupy multiple indices in the vocabulary, as set
by `num_oov_indices`).
The position of these tokens in the vocabulary is fixed. When `output_mode`
is `"int"`, the vocabulary will begin with the mask token (if set), followed
by OOV indices, followed by the rest of the vocabulary. When `output_mode`
is `"multi_hot"`, `"count"`, or `"tf_idf"` the vocabulary will begin with
OOV indices and instances of the mask token will be dropped.
**Note:** This layer uses TensorFlow internally. It cannot
be used as part of the compiled computation graph of a model with
any backend other than TensorFlow.
It can however be used with any backend when running eagerly.
It can also always be used as part of an input preprocessing pipeline
with any backend (outside the model itself), which is how we recommend
to use this layer.
**Note:** This layer is safe to use inside a `tf.data` pipeline
(independently of which backend you're using).
Args:
max_tokens: Maximum size of the vocabulary for this layer. This should
only be specified when adapting the vocabulary or when setting
`pad_to_max_tokens=True`. If None, there is no cap on the size of
the vocabulary. Note that this size includes the OOV
and mask tokens. Defaults to `None`.
num_oov_indices: The number of out-of-vocabulary tokens to use.
If this value is more than 1, OOV inputs are modulated to
determine their OOV value.
If this value is 0, OOV inputs will cause an error when calling
the layer. Defaults to `1`.
mask_token: A token that represents masked inputs. When `output_mode` is
`"int"`, the token is included in vocabulary and mapped to index 0.
In other output modes, the token will not appear
in the vocabulary and instances of the mask token
in the input will be dropped. If set to `None`,
no mask term will be added. Defaults to `None`.
oov_token: Only used when `invert` is True. The token to return for OOV
indices. Defaults to `"[UNK]"`.
vocabulary: Optional. Either an array of integers or a string path to a
text file. If passing an array, can pass a tuple, list,
1D NumPy array, or 1D tensor containing the integer vocbulary terms.
If passing a file path, the file should contain one line per term
in the vocabulary. If this argument is set,
there is no need to `adapt()` the layer.
vocabulary_dtype: The dtype of the vocabulary terms, for example
`"int64"` or `"int32"`. Defaults to `"int64"`.
idf_weights: Only valid when `output_mode` is `"tf_idf"`.
A tuple, list, 1D NumPy array, or 1D tensor or the same length
as the vocabulary, containing the floating point inverse document
frequency weights, which will be multiplied by per sample term
counts for the final TF-IDF weight.
If the `vocabulary` argument is set, and `output_mode` is
`"tf_idf"`, this argument must be supplied.
invert: Only valid when `output_mode` is `"int"`.
If `True`, this layer will map indices to vocabulary items
instead of mapping vocabulary items to indices.
Defaults to `False`.
output_mode: Specification for the output of the layer. Values can be
`"int"`, `"one_hot"`, `"multi_hot"`, `"count"`, or `"tf_idf"`
configuring the layer as follows:
- `"int"`: Return the vocabulary indices of the input tokens.
- `"one_hot"`: Encodes each individual element in the input into an
array the same size as the vocabulary,
containing a 1 at the element index. If the last dimension
is size 1, will encode on that dimension.
If the last dimension is not size 1, will append a new
dimension for the encoded output.
- `"multi_hot"`: Encodes each sample in the input into a single
array the same size as the vocabulary,
containing a 1 for each vocabulary term present in the sample.
Treats the last dimension as the sample dimension,
if input shape is `(..., sample_length)`,
output shape will be `(..., num_tokens)`.
- `"count"`: As `"multi_hot"`, but the int array contains
a count of the number of times the token at that index
appeared in the sample.
- `"tf_idf"`: As `"multi_hot"`, but the TF-IDF algorithm is
applied to find the value in each token slot.
For `"int"` output, any shape of input and output is supported.
For all other output modes, currently only output up to rank 2
is supported. Defaults to `"int"`.
pad_to_max_tokens: Only applicable when `output_mode` is `"multi_hot"`,
`"count"`, or `"tf_idf"`. If `True`, the output will have
its feature axis padded to `max_tokens` even if the number
of unique tokens in the vocabulary is less than `max_tokens`,
resulting in a tensor of shape `(batch_size, max_tokens)`
regardless of vocabulary size. Defaults to `False`.
sparse: Boolean. Only applicable to `"multi_hot"`, `"count"`, and
`"tf_idf"` output modes. Only supported with TensorFlow
backend. If `True`, returns a `SparseTensor`
instead of a dense `Tensor`. Defaults to `False`.
encoding: Optional. The text encoding to use to interpret the input
strings. Defaults to `"utf-8"`.
Examples:
**Creating a lookup layer with a known vocabulary**
This example creates a lookup layer with a pre-existing vocabulary.
>>> vocab = ["a", "b", "c", "d"]
>>> data = [["a", "c", "d"], ["d", "z", "b"]]
>>> layer = StringLookup(vocabulary=vocab)
>>> layer(data)
array([[1, 3, 4],
[4, 0, 2]])
**Creating a lookup layer with an adapted vocabulary**
This example creates a lookup layer and generates the vocabulary by
analyzing the dataset.
>>> data = [["a", "c", "d"], ["d", "z", "b"]]
>>> layer = StringLookup()
>>> layer.adapt(data)
>>> layer.get_vocabulary()
['[UNK]', 'd', 'z', 'c', 'b', 'a']
Note that the OOV token `"[UNK]"` has been added to the vocabulary.
The remaining tokens are sorted by frequency
(`"d"`, which has 2 occurrences, is first) then by inverse sort order.
>>> data = [["a", "c", "d"], ["d", "z", "b"]]
>>> layer = StringLookup()
>>> layer.adapt(data)
>>> layer(data)
array([[5, 3, 1],
[1, 2, 4]])
**Lookups with multiple OOV indices**
This example demonstrates how to use a lookup layer with multiple OOV
indices. When a layer is created with more than one OOV index, any OOV
values are hashed into the number of OOV buckets, distributing OOV values in
a deterministic fashion across the set.
>>> vocab = ["a", "b", "c", "d"]
>>> data = [["a", "c", "d"], ["m", "z", "b"]]
>>> layer = StringLookup(vocabulary=vocab, num_oov_indices=2)
>>> layer(data)
array([[2, 4, 5],
[0, 1, 3]])
Note that the output for OOV value 'm' is 0, while the output for OOV value
`"z"` is 1. The in-vocab terms have their output index increased by 1 from
earlier examples (a maps to 2, etc) in order to make space for the extra OOV
value.
**One-hot output**
Configure the layer with `output_mode='one_hot'`. Note that the first
`num_oov_indices` dimensions in the ont_hot encoding represent OOV values.
>>> vocab = ["a", "b", "c", "d"]
>>> data = ["a", "b", "c", "d", "z"]
>>> layer = StringLookup(vocabulary=vocab, output_mode='one_hot')
>>> layer(data)
array([[0., 1., 0., 0., 0.],
[0., 0., 1., 0., 0.],
[0., 0., 0., 1., 0.],
[0., 0., 0., 0., 1.],
[1., 0., 0., 0., 0.]], dtype=float32)
**Multi-hot output**
Configure the layer with `output_mode='multi_hot'`. Note that the first
`num_oov_indices` dimensions in the multi_hot encoding represent OOV values.
>>> vocab = ["a", "b", "c", "d"]
>>> data = [["a", "c", "d", "d"], ["d", "z", "b", "z"]]
>>> layer = StringLookup(vocabulary=vocab, output_mode='multi_hot')
>>> layer(data)
array([[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]], dtype=float32)
**Token count output**
Configure the layer with `output_mode='count'`. As with multi_hot output,
the first `num_oov_indices` dimensions in the output represent OOV values.
>>> vocab = ["a", "b", "c", "d"]
>>> data = [["a", "c", "d", "d"], ["d", "z", "b", "z"]]
>>> layer = StringLookup(vocabulary=vocab, output_mode='count')
>>> layer(data)
array([[0., 1., 0., 1., 2.],
[2., 0., 1., 0., 1.]], dtype=float32)
**TF-IDF output**
Configure the layer with `output_mode="tf_idf"`. As with multi_hot output,
the first `num_oov_indices` dimensions in the output represent OOV values.
Each token bin will output `token_count * idf_weight`, where the idf weights
are the inverse document frequency weights per token. These should be
provided along with the vocabulary. Note that the `idf_weight` for OOV
values will default to the average of all idf weights passed in.
>>> vocab = ["a", "b", "c", "d"]
>>> idf_weights = [0.25, 0.75, 0.6, 0.4]
>>> data = [["a", "c", "d", "d"], ["d", "z", "b", "z"]]
>>> layer = StringLookup(output_mode="tf_idf")
>>> layer.set_vocabulary(vocab, idf_weights=idf_weights)
>>> layer(data)
array([[0. , 0.25, 0. , 0.6 , 0.8 ],
[1.0 , 0. , 0.75, 0. , 0.4 ]], dtype=float32)
To specify the idf weights for oov values, you will need to pass the entire
vocabularly including the leading oov token.
>>> vocab = ["[UNK]", "a", "b", "c", "d"]
>>> idf_weights = [0.9, 0.25, 0.75, 0.6, 0.4]
>>> data = [["a", "c", "d", "d"], ["d", "z", "b", "z"]]
>>> layer = StringLookup(output_mode="tf_idf")
>>> layer.set_vocabulary(vocab, idf_weights=idf_weights)
>>> layer(data)
array([[0. , 0.25, 0. , 0.6 , 0.8 ],
[1.8 , 0. , 0.75, 0. , 0.4 ]], dtype=float32)
When adapting the layer in `"tf_idf"` mode, each input sample will be
considered a document, and IDF weight per token will be calculated as
`log(1 + num_documents / (1 + token_document_count))`.
**Inverse lookup**
This example demonstrates how to map indices to strings using this layer.
(You can also use `adapt()` with `inverse=True`, but for simplicity we'll
pass the vocab in this example.)
>>> vocab = ["a", "b", "c", "d"]
>>> data = [[1, 3, 4], [4, 0, 2]]
>>> layer = StringLookup(vocabulary=vocab, invert=True)
>>> layer(data)
array([[b'a', b'c', b'd'],
[b'd', b'[UNK]', b'b']], dtype=object)
Note that the first index correspond to the oov token by default.
**Forward and inverse lookup pairs**
This example demonstrates how to use the vocabulary of a standard lookup
layer to create an inverse lookup layer.
>>> vocab = ["a", "b", "c", "d"]
>>> data = [["a", "c", "d"], ["d", "z", "b"]]
>>> layer = StringLookup(vocabulary=vocab)
>>> i_layer = StringLookup(vocabulary=vocab, invert=True)
>>> int_data = layer(data)
>>> i_layer(int_data)
array([[b'a', b'c', b'd'],
[b'd', b'[UNK]', b'b']], dtype=object)
In this example, the input value `"z"` resulted in an output of `"[UNK]"`,
since 1000 was not in the vocabulary - it got represented as an OOV, and all
OOV values are returned as `"[UNK]"` in the inverse layer. Also, note that
for the inverse to work, you must have already set the forward layer
vocabulary either directly or via `adapt()` before calling
`get_vocabulary()`.
"""
def __init__(
self,
max_tokens=None,
num_oov_indices=1,
mask_token=None,
oov_token="[UNK]",
vocabulary=None,
idf_weights=None,
invert=False,
output_mode="int",
pad_to_max_tokens=False,
sparse=False,
encoding="utf-8",
name=None,
**kwargs,
):
if not tf.available:
raise ImportError(
"Layer StringLookup requires TensorFlow. "
"Install it via `pip install tensorflow`."
)
if sparse and backend.backend() != "tensorflow":
raise ValueError(
"`sparse` can only be set to True with the "
"TensorFlow backend."
)
super().__init__(
max_tokens=max_tokens,
num_oov_indices=num_oov_indices,
mask_token=mask_token,
oov_token=oov_token,
vocabulary=vocabulary,
idf_weights=idf_weights,
invert=invert,
output_mode=output_mode,
pad_to_max_tokens=pad_to_max_tokens,
sparse=sparse,
name=name,
vocabulary_dtype="string",
**kwargs,
)
self.encoding = encoding
self._convert_input_args = False
self._allow_non_tensor_positional_args = True
self.supports_jit = False
def adapt(self, data, steps=None):
"""Computes a vocabulary of interger terms from tokens in a dataset.
Calling `adapt()` on a `StringLookup` layer is an alternative to passing
in a precomputed vocabulary on construction via the `vocabulary`
argument. A `StringLookup` layer should always be either adapted over a
dataset or supplied with a vocabulary.
During `adapt()`, the layer will build a vocabulary of all string tokens
seen in the dataset, sorted by occurrence count, with ties broken by
sort order of the tokens (high to low). At the end of `adapt()`, if
`max_tokens` is set, the vocabulary wil be truncated to `max_tokens`
size. For example, adapting a layer with `max_tokens=1000` will compute
the 1000 most frequent tokens occurring in the input dataset. If
`output_mode='tf-idf'`, `adapt()` will also learn the document
frequencies of each token in the input dataset.
Arguments:
data: The data to train on. It can be passed either as a
batched `tf.data.Dataset`, as a list of strings,
or as a NumPy array.
steps: Integer or `None`.
Total number of steps (batches of samples) to process.
If `data` is a `tf.data.Dataset`, and `steps` is `None`,
`adapt()` will run until the input dataset is exhausted.
When passing an infinitely
repeating dataset, you must specify the `steps` argument. This
argument is not supported with array inputs or list inputs.
"""
super().adapt(data, steps=steps)
# Overridden methods from IndexLookup.
def _tensor_vocab_to_numpy(self, vocabulary):
vocabulary = vocabulary.numpy()
return np.array(
[tf.compat.as_text(x, self.encoding) for x in vocabulary]
)
def get_config(self):
config = {"encoding": self.encoding}
base_config = super().get_config()
# There is only one valid dtype for strings, so we don't expose this.
del base_config["vocabulary_dtype"]
return {**base_config, **config}
def call(self, inputs):
if isinstance(inputs, (tf.Tensor, tf.RaggedTensor)):
tf_inputs = True
else:
tf_inputs = False
if not isinstance(inputs, (np.ndarray, list, tuple)):
inputs = tf.convert_to_tensor(backend.convert_to_numpy(inputs))
outputs = super().call(inputs)
if (
not tf_inputs
and backend.backend() != "tensorflow"
and not backend_utils.in_tf_graph()
):
outputs = backend.convert_to_tensor(outputs)
return outputs
|
keras-core/keras_core/layers/preprocessing/string_lookup.py/0
|
{
"file_path": "keras-core/keras_core/layers/preprocessing/string_lookup.py",
"repo_id": "keras-core",
"token_count": 7113
}
| 44 |
import numpy as np
import pytest
from keras_core import layers
from keras_core import testing
from keras_core.backend.common.keras_tensor import KerasTensor
class UpSamplingTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_upsampling_1d(self):
self.run_layer_test(
layers.UpSampling1D,
init_kwargs={"size": 2},
input_shape=(3, 5, 4),
expected_output_shape=(3, 10, 4),
expected_output_dtype="float32",
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
def test_upsampling_1d_correctness(self):
self.assertAllClose(
layers.UpSampling1D(size=2)(np.arange(12).reshape((2, 2, 3))),
np.array(
[
[
[0.0, 1.0, 2.0],
[0.0, 1.0, 2.0],
[3.0, 4.0, 5.0],
[3.0, 4.0, 5.0],
],
[
[6.0, 7.0, 8.0],
[6.0, 7.0, 8.0],
[9.0, 10.0, 11.0],
[9.0, 10.0, 11.0],
],
]
),
)
def test_upsampling_1d_correctness_with_ones(self):
self.assertAllClose(
layers.UpSampling1D(size=3)(np.ones((2, 1, 5))), np.ones((2, 3, 5))
)
def test_upsampling_1d_with_dynamic_batch_size(self):
x = KerasTensor([None, 2, 3])
self.assertEqual(layers.UpSampling1D(size=2)(x).shape, (None, 4, 3))
self.assertEqual(layers.UpSampling1D(size=4)(x).shape, (None, 8, 3))
def test_upsampling_1d_with_dynamic_shape(self):
y = KerasTensor([2, None, 3])
self.assertEqual(layers.UpSampling1D(size=2)(y).shape, (2, None, 3))
self.assertEqual(layers.UpSampling1D(size=4)(y).shape, (2, None, 3))
z = KerasTensor([2, 3, None])
self.assertEqual(layers.UpSampling1D(size=2)(z).shape, (2, 6, None))
self.assertEqual(layers.UpSampling1D(size=4)(z).shape, (2, 12, None))
|
keras-core/keras_core/layers/reshaping/up_sampling1d_test.py/0
|
{
"file_path": "keras-core/keras_core/layers/reshaping/up_sampling1d_test.py",
"repo_id": "keras-core",
"token_count": 1297
}
| 45 |
import numpy as np
import pytest
from keras_core import initializers
from keras_core import layers
from keras_core import testing
class ConvLSTM1DTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_basics(self):
self.run_layer_test(
layers.ConvLSTM1D,
init_kwargs={"filters": 5, "kernel_size": 3, "padding": "same"},
input_shape=(3, 2, 4, 3),
expected_output_shape=(3, 4, 5),
expected_num_trainable_weights=3,
expected_num_non_trainable_weights=0,
supports_masking=True,
)
self.run_layer_test(
layers.ConvLSTM1D,
init_kwargs={
"filters": 5,
"kernel_size": 3,
"padding": "valid",
"recurrent_dropout": 0.5,
},
input_shape=(3, 2, 8, 3),
call_kwargs={"training": True},
expected_output_shape=(3, 6, 5),
expected_num_trainable_weights=3,
expected_num_non_trainable_weights=0,
supports_masking=True,
)
self.run_layer_test(
layers.ConvLSTM1D,
init_kwargs={
"filters": 5,
"kernel_size": 3,
"padding": "valid",
"return_sequences": True,
},
input_shape=(3, 2, 8, 3),
expected_output_shape=(3, 2, 6, 5),
expected_num_trainable_weights=3,
expected_num_non_trainable_weights=0,
supports_masking=True,
)
def test_correctness(self):
sequence = np.arange(120).reshape((2, 3, 4, 5)).astype("float32") / 10
layer = layers.ConvLSTM1D(
filters=2,
kernel_size=3,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[[0.40807986, 0.40807986], [0.46421072, 0.46421072]],
[[0.80933154, 0.80933154], [0.8233646, 0.8233646]],
]
),
output,
)
|
keras-core/keras_core/layers/rnn/conv_lstm1d_test.py/0
|
{
"file_path": "keras-core/keras_core/layers/rnn/conv_lstm1d_test.py",
"repo_id": "keras-core",
"token_count": 1274
}
| 46 |
import tree
from keras_core import ops
from keras_core.api_export import keras_core_export
from keras_core.layers.layer import Layer
from keras_core.saving import serialization_lib
@keras_core_export("keras_core.layers.StackedRNNCells")
class StackedRNNCells(Layer):
"""Wrapper allowing a stack of RNN cells to behave as a single cell.
Used to implement efficient stacked RNNs.
Args:
cells: List of RNN cell instances.
Examples:
```python
batch_size = 3
sentence_length = 5
num_features = 2
new_shape = (batch_size, sentence_length, num_features)
x = np.reshape(np.arange(30), new_shape)
rnn_cells = [keras_core.layers.LSTMCell(128) for _ in range(2)]
stacked_lstm = keras_core.layers.StackedRNNCells(rnn_cells)
lstm_layer = keras_core.layers.RNN(stacked_lstm)
result = lstm_layer(x)
```
"""
def __init__(self, cells, **kwargs):
super().__init__(**kwargs)
for cell in cells:
if "call" not in dir(cell):
raise ValueError(
"All cells must have a `call` method. "
f"Received cell without a `call` method: {cell}"
)
if "state_size" not in dir(cell):
raise ValueError(
"All cells must have a `state_size` attribute. "
f"Received cell without a `state_size`: {cell}"
)
self.cells = cells
@property
def state_size(self):
return [c.state_size for c in self.cells]
@property
def output_size(self):
if getattr(self.cells[-1], "output_size", None) is not None:
return self.cells[-1].output_size
elif isinstance(self.cells[-1].state_size, (list, tuple)):
return self.cells[-1].state_size[0]
else:
return self.cells[-1].state_size
def get_initial_state(self, batch_size=None):
initial_states = []
for cell in self.cells:
get_initial_state_fn = getattr(cell, "get_initial_state", None)
if get_initial_state_fn:
initial_states.append(
get_initial_state_fn(batch_size=batch_size)
)
else:
if isinstance(cell.state_size, int):
initial_states.append(
ops.zeros(
(batch_size, cell.state_size),
dtype=self.compute_dtype,
)
)
else:
initial_states.append(
[
ops.zeros((batch_size, d), dtype=self.compute_dtype)
for d in cell.state_size
]
)
return initial_states
def call(self, inputs, states, training=False, **kwargs):
# Call the cells in order and store the returned states.
new_states = []
for cell, states in zip(self.cells, states):
state_is_list = tree.is_nested(states)
states = list(states) if tree.is_nested(states) else [states]
if isinstance(cell, Layer) and cell._call_has_training_arg:
kwargs["training"] = training
else:
kwargs.pop("training", None)
cell_call_fn = cell.__call__ if callable(cell) else cell.call
inputs, states = cell_call_fn(inputs, states, **kwargs)
if len(states) == 1 and not state_is_list:
states = states[0]
new_states.append(states)
if len(new_states) == 1:
new_states = new_states[0]
return inputs, new_states
def build(self, input_shape):
for cell in self.cells:
if isinstance(cell, Layer) and not cell.built:
cell.build(input_shape)
cell.built = True
if getattr(cell, "output_size", None) is not None:
output_dim = cell.output_size
elif isinstance(cell.state_size, (list, tuple)):
output_dim = cell.state_size[0]
else:
output_dim = cell.state_size
batch_size = tree.flatten(input_shape)[0]
input_shape = (batch_size, output_dim)
self.built = True
def get_config(self):
cells = []
for cell in self.cells:
cells.append(serialization_lib.serialize_keras_object(cell))
config = {"cells": cells}
base_config = super().get_config()
return {**base_config, **config}
@classmethod
def from_config(cls, config, custom_objects=None):
cells = []
for cell_config in config.pop("cells"):
cells.append(
serialization_lib.deserialize_keras_object(
cell_config, custom_objects=custom_objects
)
)
return cls(cells, **config)
|
keras-core/keras_core/layers/rnn/stacked_rnn_cells.py/0
|
{
"file_path": "keras-core/keras_core/layers/rnn/stacked_rnn_cells.py",
"repo_id": "keras-core",
"token_count": 2503
}
| 47 |
import os
import numpy as np
import pytest
import keras_core
from keras_core import layers
from keras_core import models
from keras_core import ops
from keras_core import testing
from keras_core.legacy.saving import legacy_h5_format
from keras_core.saving import object_registration
from keras_core.saving import serialization_lib
# TODO: more thorough testing. Correctness depends
# on exact weight ordering for each layer, so we need
# to test across all types of layers.
# TODO: reenable tests after tf_keras is available.
tf_keras = None
def get_sequential_model(keras):
return keras.Sequential(
[
keras.layers.Input((3,), batch_size=2),
keras.layers.Dense(4, activation="relu"),
keras.layers.BatchNormalization(
moving_mean_initializer="uniform", gamma_initializer="uniform"
),
keras.layers.Dense(5, activation="softmax"),
]
)
def get_functional_model(keras):
inputs = keras.Input((3,), batch_size=2)
x = keras.layers.Dense(4, activation="relu")(inputs)
residual = x
x = keras.layers.BatchNormalization(
moving_mean_initializer="uniform", gamma_initializer="uniform"
)(x)
x = keras.layers.Dense(4, activation="relu")(x)
x = keras.layers.add([x, residual])
outputs = keras.layers.Dense(5, activation="softmax")(x)
return keras.Model(inputs, outputs)
def get_subclassed_model(keras):
class MyModel(keras.Model):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.dense_1 = keras.layers.Dense(3, activation="relu")
self.dense_2 = keras.layers.Dense(1, activation="sigmoid")
def call(self, x):
return self.dense_2(self.dense_1(x))
model = MyModel()
model(np.random.random((2, 3)))
return model
@pytest.mark.requires_trainable_backend
class LegacyH5WeightsTest(testing.TestCase):
def _check_reloading_weights(self, ref_input, model, tf_keras_model):
ref_output = tf_keras_model(ref_input)
initial_weights = model.get_weights()
# Check weights only file
temp_filepath = os.path.join(self.get_temp_dir(), "weights.h5")
tf_keras_model.save_weights(temp_filepath)
model.load_weights(temp_filepath)
output = model(ref_input)
self.assertAllClose(ref_output, output, atol=1e-5)
model.set_weights(initial_weights)
model.load_weights(temp_filepath)
output = model(ref_input)
self.assertAllClose(ref_output, output, atol=1e-5)
def DISABLED_test_sequential_model_weights(self):
model = get_sequential_model(keras_core)
tf_keras_model = get_sequential_model(tf_keras)
ref_input = np.random.random((2, 3))
self._check_reloading_weights(ref_input, model, tf_keras_model)
def DISABLED_test_functional_model_weights(self):
model = get_functional_model(keras_core)
tf_keras_model = get_functional_model(tf_keras)
ref_input = np.random.random((2, 3))
self._check_reloading_weights(ref_input, model, tf_keras_model)
def DISABLED_test_subclassed_model_weights(self):
model = get_subclassed_model(keras_core)
tf_keras_model = get_subclassed_model(tf_keras)
ref_input = np.random.random((2, 3))
self._check_reloading_weights(ref_input, model, tf_keras_model)
@pytest.mark.requires_trainable_backend
class LegacyH5WholeModelTest(testing.TestCase):
def _check_reloading_model(self, ref_input, model):
# Whole model file
ref_output = model(ref_input)
temp_filepath = os.path.join(self.get_temp_dir(), "model.h5")
legacy_h5_format.save_model_to_hdf5(model, temp_filepath)
loaded = legacy_h5_format.load_model_from_hdf5(temp_filepath)
output = loaded(ref_input)
self.assertAllClose(ref_output, output, atol=1e-5)
def DISABLED_test_sequential_model(self):
model = get_sequential_model(keras_core)
ref_input = np.random.random((2, 3))
self._check_reloading_model(ref_input, model)
def DISABLED_test_functional_model(self):
model = get_functional_model(keras_core)
ref_input = np.random.random((2, 3))
self._check_reloading_model(ref_input, model)
def DISABLED_test_compiled_model_with_various_layers(self):
model = models.Sequential()
model.add(layers.Dense(2, input_shape=(3,)))
model.add(layers.RepeatVector(3))
model.add(layers.TimeDistributed(layers.Dense(3)))
model.compile(optimizer="rmsprop", loss="mse")
ref_input = np.random.random((1, 3))
self._check_reloading_model(ref_input, model)
def DISABLED_test_saving_lambda(self):
mean = ops.random.uniform((4, 2, 3))
std = ops.abs(ops.random.uniform((4, 2, 3))) + 1e-5
inputs = layers.Input(shape=(4, 2, 3))
output = layers.Lambda(
lambda image, mu, std: (image - mu) / std,
arguments={"mu": mean, "std": std},
)(inputs)
model = models.Model(inputs, output)
model.compile(loss="mse", optimizer="sgd", metrics=["acc"])
temp_filepath = os.path.join(self.get_temp_dir(), "lambda_model.h5")
legacy_h5_format.save_model_to_hdf5(model, temp_filepath)
loaded = legacy_h5_format.load_model_from_hdf5(temp_filepath)
self.assertAllClose(mean, loaded.layers[1].arguments["mu"])
self.assertAllClose(std, loaded.layers[1].arguments["std"])
def DISABLED_test_saving_include_optimizer_false(self):
model = models.Sequential()
model.add(layers.Dense(1))
model.compile("adam", loss="mse")
x, y = np.ones((10, 10)), np.ones((10, 1))
model.fit(x, y)
ref_output = model(x)
temp_filepath = os.path.join(self.get_temp_dir(), "model.h5")
legacy_h5_format.save_model_to_hdf5(
model, temp_filepath, include_optimizer=False
)
loaded = legacy_h5_format.load_model_from_hdf5(temp_filepath)
output = loaded(x)
# Assert that optimizer does not exist in loaded model
with self.assertRaises(AttributeError):
_ = loaded.optimizer
# Compare output
self.assertAllClose(ref_output, output, atol=1e-5)
def DISABLED_test_custom_sequential_registered_no_scope(self):
@object_registration.register_keras_serializable(package="my_package")
class MyDense(layers.Dense):
def __init__(self, units, **kwargs):
super().__init__(units, **kwargs)
inputs = layers.Input(shape=[1])
custom_layer = MyDense(1)
model = models.Sequential(layers=[inputs, custom_layer])
ref_input = np.array([5])
self._check_reloading_model(ref_input, model)
def DISABLED_test_custom_functional_registered_no_scope(self):
@object_registration.register_keras_serializable(package="my_package")
class MyDense(layers.Dense):
def __init__(self, units, **kwargs):
super().__init__(units, **kwargs)
inputs = layers.Input(shape=[1])
outputs = MyDense(1)(inputs)
model = models.Model(inputs, outputs)
ref_input = np.array([5])
self._check_reloading_model(ref_input, model)
def DISABLED_test_nested_layers(self):
class MyLayer(layers.Layer):
def __init__(self, sublayers, **kwargs):
super().__init__(**kwargs)
self.sublayers = sublayers
def call(self, x):
prev_input = x
for layer in self.sublayers:
prev_input = layer(prev_input)
return prev_input
def get_config(self):
config = super().get_config()
config["sublayers"] = serialization_lib.serialize_keras_object(
self.sublayers
)
return config
@classmethod
def from_config(cls, config):
config[
"sublayers"
] = serialization_lib.deserialize_keras_object(
config["sublayers"]
)
return cls(**config)
@object_registration.register_keras_serializable(package="Foo")
class RegisteredSubLayer(layers.Layer):
pass
layer = MyLayer(
[
layers.Dense(2, name="MyDense"),
RegisteredSubLayer(name="MySubLayer"),
]
)
model = models.Sequential([layer])
with self.subTest("test_JSON"):
from keras_core.models.model import model_from_json
model_json = model.to_json()
self.assertIn("Foo>RegisteredSubLayer", model_json)
loaded_model = model_from_json(
model_json, custom_objects={"MyLayer": MyLayer}
)
loaded_layer = loaded_model.layers[0]
self.assertIsInstance(loaded_layer.sublayers[0], layers.Dense)
self.assertEqual(loaded_layer.sublayers[0].name, "MyDense")
self.assertIsInstance(loaded_layer.sublayers[1], RegisteredSubLayer)
self.assertEqual(loaded_layer.sublayers[1].name, "MySubLayer")
with self.subTest("test_H5"):
temp_filepath = os.path.join(self.get_temp_dir(), "model.h5")
legacy_h5_format.save_model_to_hdf5(model, temp_filepath)
loaded_model = legacy_h5_format.load_model_from_hdf5(
temp_filepath, custom_objects={"MyLayer": MyLayer}
)
loaded_layer = loaded_model.layers[0]
self.assertIsInstance(loaded_layer.sublayers[0], layers.Dense)
self.assertEqual(loaded_layer.sublayers[0].name, "MyDense")
self.assertIsInstance(loaded_layer.sublayers[1], RegisteredSubLayer)
self.assertEqual(loaded_layer.sublayers[1].name, "MySubLayer")
@pytest.mark.requires_trainable_backend
class LegacyH5BackwardsCompatTest(testing.TestCase):
def _check_reloading_model(self, ref_input, model, tf_keras_model):
# Whole model file
ref_output = tf_keras_model(ref_input)
temp_filepath = os.path.join(self.get_temp_dir(), "model.h5")
tf_keras_model.save(temp_filepath)
loaded = legacy_h5_format.load_model_from_hdf5(temp_filepath)
output = loaded(ref_input)
self.assertAllClose(ref_output, output, atol=1e-5)
def DISABLED_test_sequential_model(self):
model = get_sequential_model(keras_core)
tf_keras_model = get_sequential_model(tf_keras)
ref_input = np.random.random((2, 3))
self._check_reloading_model(ref_input, model, tf_keras_model)
def DISABLED_test_functional_model(self):
tf_keras_model = get_functional_model(tf_keras)
model = get_functional_model(keras_core)
ref_input = np.random.random((2, 3))
self._check_reloading_model(ref_input, model, tf_keras_model)
def DISABLED_test_compiled_model_with_various_layers(self):
model = models.Sequential()
model.add(layers.Dense(2, input_shape=(3,)))
model.add(layers.RepeatVector(3))
model.add(layers.TimeDistributed(layers.Dense(3)))
model.compile(optimizer="rmsprop", loss="mse")
tf_keras_model = tf_keras.Sequential()
tf_keras_model.add(tf_keras.layers.Dense(2, input_shape=(3,)))
tf_keras_model.add(tf_keras.layers.RepeatVector(3))
tf_keras_model.add(
tf_keras.layers.TimeDistributed(tf_keras.layers.Dense(3))
)
tf_keras_model.compile(optimizer="rmsprop", loss="mse")
ref_input = np.random.random((1, 3))
self._check_reloading_model(ref_input, model, tf_keras_model)
def DISABLED_test_saving_lambda(self):
mean = np.random.random((4, 2, 3))
std = np.abs(np.random.random((4, 2, 3))) + 1e-5
inputs = tf_keras.layers.Input(shape=(4, 2, 3))
output = tf_keras.layers.Lambda(
lambda image, mu, std: (image - mu) / std,
arguments={"mu": mean, "std": std},
output_shape=inputs.shape,
)(inputs)
tf_keras_model = tf_keras.Model(inputs, output)
tf_keras_model.compile(loss="mse", optimizer="sgd", metrics=["acc"])
temp_filepath = os.path.join(self.get_temp_dir(), "lambda_model.h5")
tf_keras_model.save(temp_filepath)
loaded = legacy_h5_format.load_model_from_hdf5(temp_filepath)
self.assertAllClose(mean, loaded.layers[1].arguments["mu"])
self.assertAllClose(std, loaded.layers[1].arguments["std"])
def DISABLED_test_saving_include_optimizer_false(self):
tf_keras_model = tf_keras.Sequential()
tf_keras_model.add(tf_keras.layers.Dense(1))
tf_keras_model.compile("adam", loss="mse")
x, y = np.ones((10, 10)), np.ones((10, 1))
tf_keras_model.fit(x, y)
ref_output = tf_keras_model(x)
temp_filepath = os.path.join(self.get_temp_dir(), "model.h5")
tf_keras_model.save(temp_filepath, include_optimizer=False)
loaded = legacy_h5_format.load_model_from_hdf5(temp_filepath)
output = loaded(x)
# Assert that optimizer does not exist in loaded model
with self.assertRaises(AttributeError):
_ = loaded.optimizer
# Compare output
self.assertAllClose(ref_output, output, atol=1e-5)
def DISABLED_test_custom_sequential_registered_no_scope(self):
@tf_keras.saving.register_keras_serializable(package="my_package")
class MyDense(tf_keras.layers.Dense):
def __init__(self, units, **kwargs):
super().__init__(units, **kwargs)
inputs = tf_keras.layers.Input(shape=[1])
custom_layer = MyDense(1)
tf_keras_model = tf_keras.Sequential(layers=[inputs, custom_layer])
# Re-implement and re-register in Keras Core
@object_registration.register_keras_serializable(package="my_package")
class MyDense(layers.Dense):
def __init__(self, units, **kwargs):
super().__init__(units, **kwargs)
inputs = layers.Input(shape=[1])
custom_layer = MyDense(1)
model = models.Sequential(layers=[inputs, custom_layer])
ref_input = np.array([5])
self._check_reloading_model(ref_input, model, tf_keras_model)
def DISABLED_test_custom_functional_registered_no_scope(self):
@tf_keras.saving.register_keras_serializable(package="my_package")
class MyDense(tf_keras.layers.Dense):
def __init__(self, units, **kwargs):
super().__init__(units, **kwargs)
inputs = tf_keras.layers.Input(shape=[1])
outputs = MyDense(1)(inputs)
tf_keras_model = tf_keras.Model(inputs, outputs)
# Re-implement and re-register in Keras Core
@object_registration.register_keras_serializable(package="my_package")
class MyDense(layers.Dense):
def __init__(self, units, **kwargs):
super().__init__(units, **kwargs)
inputs = layers.Input(shape=[1])
outputs = MyDense(1)(inputs)
model = models.Model(inputs, outputs)
ref_input = np.array([5])
self._check_reloading_model(ref_input, model, tf_keras_model)
def DISABLED_test_nested_layers(self):
class MyLayer(tf_keras.layers.Layer):
def __init__(self, sublayers, **kwargs):
super().__init__(**kwargs)
self.sublayers = sublayers
def call(self, x):
prev_input = x
for layer in self.sublayers:
prev_input = layer(prev_input)
return prev_input
def get_config(self):
config = super().get_config()
config["sublayers"] = tf_keras.saving.serialize_keras_object(
self.sublayers
)
return config
@classmethod
def from_config(cls, config):
config["sublayers"] = tf_keras.saving.deserialize_keras_object(
config["sublayers"]
)
return cls(**config)
@tf_keras.saving.register_keras_serializable(package="Foo")
class RegisteredSubLayer(layers.Layer):
def call(self, x):
return x
layer = MyLayer(
[
tf_keras.layers.Dense(2, name="MyDense"),
RegisteredSubLayer(name="MySubLayer"),
]
)
tf_keras_model = tf_keras.Sequential([layer])
x = np.random.random((4, 2))
ref_output = tf_keras_model(x)
# Save TF Keras model to H5 file
temp_filepath = os.path.join(self.get_temp_dir(), "model.h5")
tf_keras_model.save(temp_filepath)
# Re-implement in Keras Core
class MyLayer(layers.Layer):
def __init__(self, sublayers, **kwargs):
super().__init__(**kwargs)
self.sublayers = sublayers
def call(self, x):
prev_input = x
for layer in self.sublayers:
prev_input = layer(prev_input)
return prev_input
def get_config(self):
config = super().get_config()
config["sublayers"] = serialization_lib.serialize_keras_object(
self.sublayers
)
return config
@classmethod
def from_config(cls, config):
config[
"sublayers"
] = serialization_lib.deserialize_keras_object(
config["sublayers"]
)
return cls(**config)
# Re-implement and re-register in Keras Core
@object_registration.register_keras_serializable(package="Foo")
class RegisteredSubLayer(layers.Layer):
def call(self, x):
return x
# Load in Keras Core
loaded_model = legacy_h5_format.load_model_from_hdf5(
temp_filepath, custom_objects={"MyLayer": MyLayer}
)
loaded_layer = loaded_model.layers[0]
output = loaded_model(x)
# Ensure nested layer structure
self.assertIsInstance(loaded_layer.sublayers[0], layers.Dense)
self.assertEqual(loaded_layer.sublayers[0].name, "MyDense")
self.assertIsInstance(loaded_layer.sublayers[1], RegisteredSubLayer)
self.assertEqual(loaded_layer.sublayers[1].name, "MySubLayer")
# Compare output
self.assertAllClose(ref_output, output, atol=1e-5)
@pytest.mark.requires_trainable_backend
class DirectoryCreationTest(testing.TestCase):
def DISABLED_test_directory_creation_on_save(self):
"""Test if directory is created on model save."""
model = get_sequential_model(keras_core)
nested_dirpath = os.path.join(
self.get_temp_dir(), "dir1", "dir2", "dir3"
)
filepath = os.path.join(nested_dirpath, "model.h5")
self.assertFalse(os.path.exists(nested_dirpath))
legacy_h5_format.save_model_to_hdf5(model, filepath)
self.assertTrue(os.path.exists(nested_dirpath))
loaded_model = legacy_h5_format.load_model_from_hdf5(filepath)
self.assertEqual(model.to_json(), loaded_model.to_json())
|
keras-core/keras_core/legacy/saving/legacy_h5_format_test.py/0
|
{
"file_path": "keras-core/keras_core/legacy/saving/legacy_h5_format_test.py",
"repo_id": "keras-core",
"token_count": 9183
}
| 48 |
from keras_core.api_export import keras_core_export
from keras_core.losses.losses import categorical_hinge
from keras_core.losses.losses import hinge
from keras_core.losses.losses import squared_hinge
from keras_core.metrics import reduction_metrics
@keras_core_export("keras_core.metrics.Hinge")
class Hinge(reduction_metrics.MeanMetricWrapper):
"""Computes the hinge metric between `y_true` and `y_pred`.
`y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
provided we will convert them to -1 or 1.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = keras_core.metrics.Hinge()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
>>> m.result()
1.3
>>> m.reset_state()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],
... sample_weight=[1, 0])
>>> m.result()
1.1
"""
def __init__(self, name="hinge", dtype=None):
super().__init__(fn=hinge, name=name, dtype=dtype)
def get_config(self):
return {"name": self.name, "dtype": self.dtype}
@keras_core_export("keras_core.metrics.SquaredHinge")
class SquaredHinge(reduction_metrics.MeanMetricWrapper):
"""Computes the hinge metric between `y_true` and `y_pred`.
`y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
provided we will convert them to -1 or 1.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = keras_core.metrics.SquaredHinge()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
>>> m.result()
1.86
>>> m.reset_state()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],
... sample_weight=[1, 0])
>>> m.result()
1.46
"""
def __init__(self, name="squared_hinge", dtype=None):
super().__init__(fn=squared_hinge, name=name, dtype=dtype)
def get_config(self):
return {"name": self.name, "dtype": self.dtype}
@keras_core_export("keras_core.metrics.CategoricalHinge")
class CategoricalHinge(reduction_metrics.MeanMetricWrapper):
"""Computes the categorical hinge metric between `y_true` and `y_pred`.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = keras_core.metrics.CategoricalHinge()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
>>> m.result().numpy()
1.4000001
>>> m.reset_state()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],
... sample_weight=[1, 0])
>>> m.result()
1.2
"""
def __init__(self, name="categorical_hinge", dtype=None):
super().__init__(fn=categorical_hinge, name=name, dtype=dtype)
def get_config(self):
return {"name": self.name, "dtype": self.dtype}
|
keras-core/keras_core/metrics/hinge_metrics.py/0
|
{
"file_path": "keras-core/keras_core/metrics/hinge_metrics.py",
"repo_id": "keras-core",
"token_count": 1339
}
| 49 |
import tree
from keras_core import backend
from keras_core import utils
from keras_core.api_export import keras_core_export
from keras_core.layers import Input
from keras_core.layers import InputLayer
from keras_core.models.functional import Functional
from keras_core.models.functional import functional_like_constructor
from keras_core.models.sequential import Sequential
from keras_core.saving import serialization_lib
@keras_core_export("keras_core.models.clone_model")
def clone_model(model, input_tensors=None, clone_function=None):
"""Clone a Functional or Sequential `Model` instance.
Model cloning is similar to calling a model on new inputs,
except that it creates new layers (and thus new weights) instead
of sharing the weights of the existing layers.
Note that
`clone_model` will not preserve the uniqueness of shared objects within the
model (e.g. a single variable attached to two distinct layers will be
restored as two separate variables).
Args:
model: Instance of `Model`
(could be a Functional model or a Sequential model).
input_tensors: optional list of input tensors or InputLayer objects
to build the model upon. If not provided,
new `Input` objects will be created.
clone_function: Callable to be used to clone each layer in the target
model (except `Input` instances). It takes as argument the
layer instance to be cloned, and returns the corresponding layer
instance to be used in the model copy. If unspecified, this callable
becomes the following serialization/deserialization function:
`lambda layer: layer.__class__.from_config(layer.get_config())`.
By passing a custom callable, you can customize your copy of the
model, e.g. by wrapping certain layers of interest (you might want
to replace all `LSTM` instances with equivalent
`Bidirectional(LSTM(...))` instances, for example).
Defaults to `None`.
Returns:
An instance of `Model` reproducing the behavior
of the original model, on top of new inputs tensors,
using newly instantiated weights. The cloned model may behave
differently from the original model if a custom `clone_function`
modifies the layer.
Examples:
Basic usage:
```python
# Create a test Sequential model.
model = keras_core.Sequential([
keras_core.layers.Input(shape=(728,)),
keras_core.layers.Dense(32, activation='relu'),
keras_core.layers.Dense(1, activation='sigmoid'),
])
# Create a copy of the test model (with freshly initialized weights).
new_model = clone_model(model)
```
Using a `clone_function` to make a model deterministic by setting the
random seed everywhere:
```python
def clone_function(layer):
config = layer.get_config()
if "seed" in config:
config["seed"] = 1337
return layer.__class__.from_config(config)
new_model = clone_model(model)
```
Note that subclassed models cannot be cloned by default,
since their internal layer structure is not known.
To achieve equivalent functionality
as `clone_model` in the case of a subclassed model, simply make sure
that the model class implements `get_config()`
(and optionally `from_config()`), and call:
```python
new_model = model.__class__.from_config(model.get_config())
```
In the case of a subclassed model, you cannot using a custom
`clone_function`.
"""
if isinstance(model, Sequential):
return _clone_sequential_model(
model, input_tensors=input_tensors, clone_function=clone_function
)
if isinstance(model, Functional):
# If the get_config() method is the same as a regular Functional
# model, we're safe to use _clone_functional_model (which relies
# on a Functional constructor). In the case where the get_config
# is custom, this may not necessarily work, but if clone_function
# or input_tensors are passed, we attempt it anyway
# in order to preserve backwards compatibility.
if utils.is_default(model.get_config) or (
clone_function or input_tensors
):
return _clone_functional_model(
model,
input_tensors=input_tensors,
clone_function=clone_function,
)
# Case of a custom model class
if clone_function or input_tensors:
raise ValueError(
"Arguments clone_function and input_tensors "
"are only supported for Sequential models "
"or Functional models. Received model of "
f"type '{model.__class__.__name__}', with "
f"clone_function={clone_function} and "
f"input_tensors={input_tensors}"
)
config = serialization_lib.serialize_keras_object(model)
return serialization_lib.deserialize_keras_object(
config, custom_objects={model.__class__.__name__: model.__class__}
)
def _clone_sequential_model(model, input_tensors=None, clone_function=None):
"""Clone a `Sequential` model instance.
Model cloning is similar to calling a model on new inputs,
except that it creates new layers (and thus new weights) instead
of sharing the weights of the existing layers.
Args:
model: Instance of `Sequential`.
input_tensors: optional list of input tensors
to build the model upon. If not provided,
placeholders will be created.
clone_function: callable to be applied on non-input layers in the model.
By default, it clones the layer (without copying the weights).
Returns:
An instance of `Sequential` reproducing the behavior
of the original model, on top of new inputs tensors,
using newly instantiated weights.
"""
if clone_function is None:
def _clone_layer(layer):
return layer.__class__.from_config(layer.get_config())
clone_function = _clone_layer
if not isinstance(model, Sequential):
raise ValueError(
"Expected `model` argument "
"to be a `Sequential` model instance. "
f"Received: model={model}"
)
if not callable(clone_function):
raise ValueError(
"Expected `clone_function` argument to be a callable. "
f"Received: clone_function={clone_function}"
)
new_layers = [clone_function(layer) for layer in model.layers]
if isinstance(model._layers[0], InputLayer):
ref_input_layer = model._layers[0]
input_name = ref_input_layer.name
input_batch_shape = ref_input_layer.batch_shape
input_dtype = ref_input_layer._dtype
else:
input_name = None
input_dtype = None
input_batch_shape = None
if input_tensors:
if isinstance(input_tensors, (list, tuple)):
if len(input_tensors) != 1:
raise ValueError(
"Argument `input_tensors` must contain a single tensor."
)
input_tensors = input_tensors[0]
if not isinstance(input_tensors, backend.KerasTensor):
raise ValueError(
"Argument `input_tensors` must be a KerasTensor. "
f"Received invalid value: input_tensors={input_tensors}"
)
inputs = Input(tensor=input_tensors, name=input_name)
new_layers = [inputs] + new_layers
else:
if input_batch_shape is not None:
inputs = Input(
tensor=input_tensors,
batch_shape=input_batch_shape,
dtype=input_dtype,
name=input_name,
)
new_layers = [inputs] + new_layers
return Sequential(new_layers, name=model.name, trainable=model.trainable)
def _clone_functional_model(model, input_tensors=None, clone_function=None):
"""Clone a `Functional` model instance.
Model cloning is similar to calling a model on new inputs,
except that it creates new layers (and thus new weights) instead
of sharing the weights of the existing layers.
Input layers are always cloned.
Args:
model: Instance of `Functional`.
input_tensors: optional list of input tensors
to build the model upon. If not provided,
placeholders will be created.
clone_function: callable to be applied on non-input layers in the model.
By default, it clones the layer (without copying the weights).
Returns:
An instance of `Functional` reproducing the behavior
of the original model, on top of new inputs tensors,
using newly instantiated weights.
"""
if clone_function is None:
seen = {}
def _clone_layer(layer):
if layer in seen:
return seen[layer]
new_layer = layer.__class__.from_config(layer.get_config())
seen[layer] = new_layer
return new_layer
clone_function = _clone_layer
if not callable(clone_function):
raise ValueError(
"Expected `clone_function` argument to be a callable. "
f"Received: clone_function={clone_function}"
)
if not isinstance(model, Functional):
raise ValueError(
"Expected `model` argument "
f"to be a Functional Model instance. Received: model={model}"
)
if input_tensors is not None:
input_tensors = tree.flatten(input_tensors)
if not all(isinstance(x, backend.KerasTensor) for x in input_tensors):
raise ValueError(
"All entries in `input_tensors` must be KerasTensors. "
f"Received invalid values: inputs_tensors={input_tensors}"
)
else:
input_tensors = tree.map_structure(
lambda x: Input(x.shape, dtype=x.dtype, name=x.name), model.input
)
def operation_fn(layer):
new_layer = clone_function(layer)
return new_layer
output_tensors = model._run_through_graph(
input_tensors, operation_fn=operation_fn
)
if functional_like_constructor(model.__class__):
new_model = model.__class__(
input_tensors, output_tensors, name=model.name
)
else:
# This may be incorrect: the new model will end up having a different
# class than the original. However various existing models rely
# on this behavior, so we keep it.
new_model = Functional(input_tensors, output_tensors, name=model.name)
return new_model
|
keras-core/keras_core/models/cloning.py/0
|
{
"file_path": "keras-core/keras_core/models/cloning.py",
"repo_id": "keras-core",
"token_count": 4271
}
| 50 |
import math
import numpy as np
import pytest
import scipy.ndimage
import tensorflow as tf
from absl.testing import parameterized
from keras_core import backend
from keras_core import testing
from keras_core.backend.common.keras_tensor import KerasTensor
from keras_core.ops import image as kimage
class ImageOpsDynamicShapeTest(testing.TestCase):
def test_resize(self):
x = KerasTensor([None, 20, 20, 3])
out = kimage.resize(x, size=(15, 15))
self.assertEqual(out.shape, (None, 15, 15, 3))
x = KerasTensor([None, None, 3])
out = kimage.resize(x, size=(15, 15))
self.assertEqual(out.shape, (15, 15, 3))
def test_affine_transform(self):
x = KerasTensor([None, 20, 20, 3])
transform = KerasTensor([None, 8])
out = kimage.affine_transform(x, transform)
self.assertEqual(out.shape, (None, 20, 20, 3))
def test_extract_patches(self):
x = KerasTensor([None, 20, 20, 3])
p_h, p_w = 5, 5
out = kimage.extract_patches(x, (p_h, p_w))
self.assertEqual(out.shape, (None, 4, 4, 75))
out = kimage.extract_patches(x, 5)
self.assertEqual(out.shape, (None, 4, 4, 75))
def test_map_coordinates(self):
input = KerasTensor([20, 20, None])
coordinates = KerasTensor([3, 15, 15, None])
out = kimage.map_coordinates(input, coordinates, 0)
self.assertEqual(out.shape, coordinates.shape[1:])
class ImageOpsStaticShapeTest(testing.TestCase):
def test_resize(self):
x = KerasTensor([20, 20, 3])
out = kimage.resize(x, size=(15, 15))
self.assertEqual(out.shape, (15, 15, 3))
def test_affine_transform(self):
x = KerasTensor([20, 20, 3])
transform = KerasTensor([8])
out = kimage.affine_transform(x, transform)
self.assertEqual(out.shape, (20, 20, 3))
def test_extract_patches(self):
x = KerasTensor([20, 20, 3])
p_h, p_w = 5, 5
out = kimage.extract_patches(x, (p_h, p_w))
self.assertEqual(out.shape, (4, 4, 75))
out = kimage.extract_patches(x, 5)
self.assertEqual(out.shape, (4, 4, 75))
def test_map_coordinates(self):
input = KerasTensor([20, 20, 3])
coordinates = KerasTensor([3, 15, 15, 3])
out = kimage.map_coordinates(input, coordinates, 0)
self.assertEqual(out.shape, coordinates.shape[1:])
AFFINE_TRANSFORM_INTERPOLATIONS = { # map to order
"nearest": 0,
"bilinear": 1,
}
def _compute_affine_transform_coordinates(image, transform):
need_squeeze = False
if len(image.shape) == 3: # unbatched
need_squeeze = True
image = np.expand_dims(image, axis=0)
transform = np.expand_dims(transform, axis=0)
batch_size = image.shape[0]
# get indices
meshgrid = np.meshgrid(
*[np.arange(size) for size in image.shape[1:]], indexing="ij"
)
indices = np.concatenate(
[np.expand_dims(x, axis=-1) for x in meshgrid], axis=-1
)
indices = np.tile(indices, (batch_size, 1, 1, 1, 1))
# swap the values
transform[:, 4], transform[:, 0] = (
transform[:, 0].copy(),
transform[:, 4].copy(),
)
transform[:, 5], transform[:, 2] = (
transform[:, 2].copy(),
transform[:, 5].copy(),
)
# deal with transform
transform = np.pad(transform, pad_width=[[0, 0], [0, 1]], constant_values=1)
transform = np.reshape(transform, (batch_size, 3, 3))
offset = np.pad(transform[:, 0:2, 2], pad_width=[[0, 0], [0, 1]])
transform[:, 0:2, 2] = 0
# transform the indices
coordinates = np.einsum("Bhwij, Bjk -> Bhwik", indices, transform)
coordinates = np.moveaxis(coordinates, source=-1, destination=1)
coordinates += np.reshape(a=offset, newshape=(*offset.shape, 1, 1, 1))
if need_squeeze:
coordinates = np.squeeze(coordinates, axis=0)
return coordinates
def _fixed_map_coordinates(
input, coordinates, order, fill_mode="constant", fill_value=0.0
):
# SciPy's implementation of map_coordinates handles boundaries incorrectly,
# unless mode='reflect'. For order=1, this only affects interpolation
# outside the bounds of the original array.
# https://github.com/scipy/scipy/issues/2640
padding = [
(
max(-np.floor(c.min()).astype(int) + 1, 0),
max(np.ceil(c.max()).astype(int) + 1 - size, 0),
)
for c, size in zip(coordinates, input.shape)
]
shifted_coords = [c + p[0] for p, c in zip(padding, coordinates)]
pad_mode = {
"nearest": "edge",
"mirror": "reflect",
"reflect": "symmetric",
}.get(fill_mode, fill_mode)
if fill_mode == "constant":
padded = np.pad(
input, padding, mode=pad_mode, constant_values=fill_value
)
else:
padded = np.pad(input, padding, mode=pad_mode)
result = scipy.ndimage.map_coordinates(
padded, shifted_coords, order=order, mode=fill_mode, cval=fill_value
)
return result
class ImageOpsCorrectnessTest(testing.TestCase, parameterized.TestCase):
@parameterized.parameters(
[
("bilinear", True, "channels_last"),
("nearest", True, "channels_last"),
("lanczos3", True, "channels_last"),
("lanczos5", True, "channels_last"),
("bicubic", True, "channels_last"),
("bilinear", False, "channels_last"),
("nearest", False, "channels_last"),
("lanczos3", False, "channels_last"),
("lanczos5", False, "channels_last"),
("bicubic", False, "channels_last"),
("bilinear", True, "channels_first"),
]
)
def test_resize(self, interpolation, antialias, data_format):
if backend.backend() == "torch":
if "lanczos" in interpolation:
self.skipTest(
"Resizing with Lanczos interpolation is "
"not supported by the PyTorch backend. "
f"Received: interpolation={interpolation}."
)
if interpolation == "bicubic" and antialias is False:
self.skipTest(
"Resizing with Bicubic interpolation in "
"PyTorch backend produces noise. Please "
"turn on anti-aliasing. "
f"Received: interpolation={interpolation}, "
f"antialias={antialias}."
)
# Unbatched case
if data_format == "channels_first":
x = np.random.random((3, 50, 50)) * 255
else:
x = np.random.random((50, 50, 3)) * 255
out = kimage.resize(
x,
size=(25, 25),
interpolation=interpolation,
antialias=antialias,
data_format=data_format,
)
if data_format == "channels_first":
x = np.transpose(x, (1, 2, 0))
ref_out = tf.image.resize(
x, size=(25, 25), method=interpolation, antialias=antialias
)
if data_format == "channels_first":
ref_out = np.transpose(ref_out, (2, 0, 1))
self.assertEqual(tuple(out.shape), tuple(ref_out.shape))
self.assertAllClose(ref_out, out, atol=0.3)
# Batched case
if data_format == "channels_first":
x = np.random.random((2, 3, 50, 50)) * 255
else:
x = np.random.random((2, 50, 50, 3)) * 255
out = kimage.resize(
x,
size=(25, 25),
interpolation=interpolation,
antialias=antialias,
data_format=data_format,
)
if data_format == "channels_first":
x = np.transpose(x, (0, 2, 3, 1))
ref_out = tf.image.resize(
x, size=(25, 25), method=interpolation, antialias=antialias
)
if data_format == "channels_first":
ref_out = np.transpose(ref_out, (0, 3, 1, 2))
self.assertEqual(tuple(out.shape), tuple(ref_out.shape))
self.assertAllClose(ref_out, out, atol=0.3)
@parameterized.parameters(
[
("bilinear", "constant", "channels_last"),
("nearest", "constant", "channels_last"),
("bilinear", "nearest", "channels_last"),
("nearest", "nearest", "channels_last"),
("bilinear", "wrap", "channels_last"),
("nearest", "wrap", "channels_last"),
("bilinear", "mirror", "channels_last"),
("nearest", "mirror", "channels_last"),
("bilinear", "reflect", "channels_last"),
("nearest", "reflect", "channels_last"),
("bilinear", "constant", "channels_first"),
]
)
def test_affine_transform(self, interpolation, fill_mode, data_format):
if backend.backend() == "tensorflow" and fill_mode == "mirror":
self.skipTest(
"In tensorflow backend, applying affine_transform with "
"fill_mode=mirror is not supported"
)
if backend.backend() == "tensorflow" and fill_mode == "wrap":
self.skipTest(
"In tensorflow backend, the numerical results of applying "
"affine_transform with fill_mode=wrap is inconsistent with"
"scipy"
)
# TODO: `nearest` interpolation in jax and torch causes random index
# shifting, resulting in significant differences in output which leads
# to failure
if backend.backend() in ("jax", "torch") and interpolation == "nearest":
self.skipTest(
f"In {backend.backend()} backend, "
f"interpolation={interpolation} causes index shifting and "
"leads test failure"
)
# Unbatched case
if data_format == "channels_first":
x = np.random.random((3, 50, 50)).astype("float32") * 255
else:
x = np.random.random((50, 50, 3)).astype("float32") * 255
transform = np.random.random(size=(6)).astype("float32")
transform = np.pad(transform, (0, 2)) # makes c0, c1 always 0
out = kimage.affine_transform(
x,
transform,
interpolation=interpolation,
fill_mode=fill_mode,
data_format=data_format,
)
if data_format == "channels_first":
x = np.transpose(x, (1, 2, 0))
coordinates = _compute_affine_transform_coordinates(x, transform)
ref_out = _fixed_map_coordinates(
x,
coordinates,
order=AFFINE_TRANSFORM_INTERPOLATIONS[interpolation],
fill_mode=fill_mode,
)
if data_format == "channels_first":
ref_out = np.transpose(ref_out, (2, 0, 1))
self.assertEqual(tuple(out.shape), tuple(ref_out.shape))
self.assertAllClose(ref_out, out, atol=1e-3, rtol=1e-3)
# Batched case
if data_format == "channels_first":
x = np.random.random((2, 3, 50, 50)).astype("float32") * 255
else:
x = np.random.random((2, 50, 50, 3)).astype("float32") * 255
transform = np.random.random(size=(2, 6)).astype("float32")
transform = np.pad(transform, [(0, 0), (0, 2)]) # makes c0, c1 always 0
out = kimage.affine_transform(
x,
transform,
interpolation=interpolation,
fill_mode=fill_mode,
data_format=data_format,
)
if data_format == "channels_first":
x = np.transpose(x, (0, 2, 3, 1))
coordinates = _compute_affine_transform_coordinates(x, transform)
ref_out = np.stack(
[
_fixed_map_coordinates(
x[i],
coordinates[i],
order=AFFINE_TRANSFORM_INTERPOLATIONS[interpolation],
fill_mode=fill_mode,
)
for i in range(x.shape[0])
],
axis=0,
)
if data_format == "channels_first":
ref_out = np.transpose(ref_out, (0, 3, 1, 2))
self.assertEqual(tuple(out.shape), tuple(ref_out.shape))
self.assertAllClose(ref_out, out, atol=1e-3, rtol=1e-3)
@parameterized.parameters(
[
((5, 5), None, 1, "valid", "channels_last"),
((3, 3), (2, 2), 1, "valid", "channels_last"),
((5, 5), None, 1, "valid", "channels_first"),
((3, 3), (2, 2), 1, "valid", "channels_first"),
((5, 5), None, 1, "same", "channels_last"),
((3, 3), (2, 2), 1, "same", "channels_last"),
((5, 5), None, 1, "same", "channels_first"),
((3, 3), (2, 2), 1, "same", "channels_first"),
((5, 5), (1, 1), 3, "same", "channels_first"),
((5, 5), (2, 2), 3, "same", "channels_first"),
((5, 5), (2, 2), 3, "same", "channels_last"),
]
)
def test_extract_patches(
self, size, strides, dilation_rate, padding, data_format
):
if (
data_format == "channels_first"
and backend.backend() == "tensorflow"
):
pytest.skip("channels_first unsupported on CPU with TF")
if (
isinstance(strides, tuple)
and backend.backend() == "tensorflow"
and dilation_rate > 1
):
pytest.skip("dilation_rate>1 with strides>1 not supported with TF")
if data_format == "channels_first":
image = np.random.uniform(size=(1, 3, 20, 20))
else:
image = np.random.uniform(size=(1, 20, 20, 3))
patch_h, patch_w = size[0], size[1]
if strides is None:
strides_h, strides_w = patch_h, patch_w
else:
strides_h, strides_w = strides[0], strides[1]
patches_out = kimage.extract_patches(
backend.convert_to_tensor(image, dtype="float32"),
size=size,
strides=strides,
dilation_rate=dilation_rate,
padding=padding,
data_format=data_format,
)
if data_format == "channels_first":
patches_out = backend.numpy.transpose(
patches_out, axes=[0, 2, 3, 1]
)
if data_format == "channels_first":
image = np.transpose(image, [0, 2, 3, 1])
patches_ref = tf.image.extract_patches(
image,
sizes=(1, patch_h, patch_w, 1),
strides=(1, strides_h, strides_w, 1),
rates=(1, dilation_rate, dilation_rate, 1),
padding=padding.upper(),
)
self.assertEqual(tuple(patches_out.shape), tuple(patches_ref.shape))
self.assertAllClose(
patches_ref.numpy(), backend.convert_to_numpy(patches_out), atol=0.3
)
@parameterized.product(
# (input_shape, coordinates_shape)
shape=[((5,), (7,)), ((3, 4, 5), (2, 3, 4))],
# TODO: scipy.ndimage.map_coordinates does not support float16
# TODO: torch cpu does not support round & floor for float16
dtype=["uint8", "int32", "float32"],
order=[0, 1],
fill_mode=["constant", "nearest", "wrap", "mirror", "reflect"],
)
def test_map_coordinates(self, shape, dtype, order, fill_mode):
input_shape, coordinates_shape = shape
input = np.arange(math.prod(input_shape), dtype=dtype).reshape(
input_shape
)
coordinates_dtype = "float32" if "int" in dtype else dtype
coordinates = [
(size - 1)
* np.random.uniform(size=coordinates_shape).astype(
coordinates_dtype
)
for size in input_shape
]
output = kimage.map_coordinates(input, coordinates, order, fill_mode)
expected = _fixed_map_coordinates(input, coordinates, order, fill_mode)
self.assertAllClose(output, expected)
|
keras-core/keras_core/ops/image_test.py/0
|
{
"file_path": "keras-core/keras_core/ops/image_test.py",
"repo_id": "keras-core",
"token_count": 7818
}
| 51 |
from keras_core import ops
from keras_core.api_export import keras_core_export
from keras_core.optimizers import optimizer
@keras_core_export(["keras_core.optimizers.Adadelta"])
class Adadelta(optimizer.Optimizer):
"""Optimizer that implements the Adadelta algorithm.
Adadelta optimization is a stochastic gradient descent method that is based
on adaptive learning rate per dimension to address two drawbacks:
- The continual decay of learning rates throughout training.
- The need for a manually selected global learning rate.
Adadelta is a more robust extension of Adagrad that adapts learning rates
based on a moving window of gradient updates, instead of accumulating all
past gradients. This way, Adadelta continues learning even when many updates
have been done. Compared to Adagrad, in the original version of Adadelta you
don't have to set an initial learning rate. In this version, the initial
learning rate can be set, as in most other Keras optimizers.
Args:
learning_rate: A float, a
`keras_core.optimizers.schedules.LearningRateSchedule` instance, or
a callable that takes no arguments and returns the actual value to
use. The learning rate. Defaults to `0.001`. Note that `Adadelta`
tends to benefit from higher initial learning rate values compared
to other optimizers. To match the exact form in the original paper,
use 1.0.
rho: A floating point value. The decay rate. Defaults to `0.95`.
epsilon: Small floating point value for maintaining numerical stability.
{{base_optimizer_keyword_args}}
Reference:
- [Zeiler, 2012](http://arxiv.org/abs/1212.5701)
"""
def __init__(
self,
learning_rate=0.001,
rho=0.95,
epsilon=1e-7,
weight_decay=None,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
name="adadelta",
**kwargs,
):
super().__init__(
learning_rate=learning_rate,
weight_decay=weight_decay,
clipnorm=clipnorm,
clipvalue=clipvalue,
global_clipnorm=global_clipnorm,
use_ema=use_ema,
ema_momentum=ema_momentum,
ema_overwrite_frequency=ema_overwrite_frequency,
name=name,
**kwargs,
)
self.rho = rho
self.epsilon = epsilon
def build(self, var_list):
if self.built:
return
super().build(var_list)
self._accumulated_grads = []
self._accumulated_delta_vars = []
for var in var_list:
self._accumulated_grads.append(
self.add_variable_from_reference(var, "accumulated_grad")
)
self._accumulated_delta_vars.append(
self.add_variable_from_reference(var, "accumulated_delta_var")
)
def update_step(self, grad, variable, learning_rate):
"""Update step given gradient and the associated model variable."""
lr = ops.cast(learning_rate, variable.dtype)
grad = ops.cast(grad, variable.dtype)
rho = self.rho
accumulated_grad = self._accumulated_grads[
self._get_variable_index(variable)
]
accumulated_delta_var = self._accumulated_delta_vars[
self._get_variable_index(variable)
]
def rms(x):
return ops.sqrt(x + self.epsilon)
accumulated_grad.assign(
rho * accumulated_grad + (1 - rho) * grad * grad
)
delta_var = -rms(accumulated_delta_var) * grad / rms(accumulated_grad)
accumulated_delta_var.assign(
rho * accumulated_delta_var + (1 - rho) * delta_var * delta_var
)
variable.assign(variable + lr * delta_var)
def get_config(self):
config = super().get_config()
config.update(
{
"rho": self.rho,
"epsilon": self.epsilon,
}
)
return config
Adadelta.__doc__ = Adadelta.__doc__.replace(
"{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args
)
|
keras-core/keras_core/optimizers/adadelta.py/0
|
{
"file_path": "keras-core/keras_core/optimizers/adadelta.py",
"repo_id": "keras-core",
"token_count": 1891
}
| 52 |
import numpy as np
import pytest
import keras_core
from keras_core import backend
from keras_core import ops
from keras_core import testing
from keras_core.optimizers.lion import Lion
class LionTest(testing.TestCase):
def test_config(self):
optimizer = Lion(
learning_rate=0.5,
beta_1=0.5,
beta_2=0.67,
)
self.run_class_serialization_test(optimizer)
def test_single_step(self):
optimizer = Lion(learning_rate=0.5)
grads = ops.array([1.0, 6.0, 7.0, 2.0])
vars = backend.Variable([1.0, 2.0, 3.0, 4.0])
optimizer.apply_gradients(zip([grads], [vars]))
self.assertAllClose(vars, [0.5, 1.5, 2.5, 3.5], rtol=1e-4, atol=1e-4)
def test_weight_decay(self):
grads, var1, var2, var3 = (
ops.zeros(()),
backend.Variable(2.0),
backend.Variable(2.0, name="exclude"),
backend.Variable(2.0),
)
optimizer_1 = Lion(learning_rate=1.0, weight_decay=0.004)
optimizer_1.apply_gradients(zip([grads], [var1]))
optimizer_2 = Lion(learning_rate=1.0, weight_decay=0.004)
optimizer_2.exclude_from_weight_decay(var_names=["exclude"])
optimizer_2.apply_gradients(zip([grads, grads], [var1, var2]))
optimizer_3 = Lion(learning_rate=1.0, weight_decay=0.004)
optimizer_3.exclude_from_weight_decay(var_list=[var3])
optimizer_3.apply_gradients(zip([grads, grads], [var1, var3]))
self.assertAlmostEqual(var1.numpy(), 1.9760959, decimal=6)
self.assertAlmostEqual(var2.numpy(), 2.0, decimal=6)
self.assertAlmostEqual(var3.numpy(), 2.0, decimal=6)
def test_correctness_with_golden(self):
optimizer = Lion()
x = backend.Variable(np.ones([10]))
grads = ops.arange(0.1, 1.1, 0.1)
first_grads = ops.full((10,), 0.01)
golden = np.tile(
[[0.999], [0.998], [0.997], [0.996], [0.995]],
(1, 10),
)
optimizer.apply_gradients(zip([first_grads], [x]))
for i in range(5):
self.assertAllClose(x, golden[i], rtol=5e-4, atol=5e-4)
optimizer.apply_gradients(zip([grads], [x]))
def test_clip_norm(self):
optimizer = Lion(clipnorm=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [2**0.5 / 2, 2**0.5 / 2])
def test_clip_value(self):
optimizer = Lion(clipvalue=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [1.0, 1.0])
@pytest.mark.requires_trainable_backend
def test_ema(self):
# TODO: test correctness
model = keras_core.Sequential([keras_core.layers.Dense(10)])
model.compile(optimizer=Lion(use_ema=True), loss="mse")
x = keras_core.ops.zeros((1, 5))
y = keras_core.ops.zeros((1, 10))
model.fit(x, y)
|
keras-core/keras_core/optimizers/lion_test.py/0
|
{
"file_path": "keras-core/keras_core/optimizers/lion_test.py",
"repo_id": "keras-core",
"token_count": 1508
}
| 53 |
import numpy as np
import pytest
from absl.testing import parameterized
import keras_core
from keras_core import backend
from keras_core import ops
from keras_core import testing
from keras_core.random import random
from keras_core.random import seed_generator
class RandomTest(testing.TestCase, parameterized.TestCase):
@parameterized.parameters(
{"seed": 10, "shape": (5,), "mean": 0, "stddev": 1},
{"seed": 10, "shape": (2, 3), "mean": 0, "stddev": 1},
{"seed": 10, "shape": (2, 3, 4), "mean": 0, "stddev": 1},
{"seed": 10, "shape": (2, 3), "mean": 10, "stddev": 1},
{"seed": 10, "shape": (2, 3), "mean": 10, "stddev": 3},
)
def test_normal(self, seed, shape, mean, stddev):
np.random.seed(seed)
np_res = np.random.normal(loc=mean, scale=stddev, size=shape)
res = random.normal(shape, mean=mean, stddev=stddev, seed=seed)
self.assertEqual(res.shape, shape)
self.assertEqual(res.shape, np_res.shape)
@parameterized.parameters(
{"seed": 10, "shape": (5,), "minval": 0, "maxval": 1},
{"seed": 10, "shape": (2, 3), "minval": 0, "maxval": 1},
{"seed": 10, "shape": (2, 3, 4), "minval": 0, "maxval": 2},
{"seed": 10, "shape": (2, 3), "minval": -1, "maxval": 1},
{"seed": 10, "shape": (2, 3), "minval": 1, "maxval": 3},
)
def test_uniform(self, seed, shape, minval, maxval):
np.random.seed(seed)
np_res = np.random.uniform(low=minval, high=maxval, size=shape)
res = random.uniform(shape, minval=minval, maxval=maxval, seed=seed)
self.assertEqual(res.shape, shape)
self.assertEqual(res.shape, np_res.shape)
self.assertLessEqual(ops.max(res), maxval)
self.assertGreaterEqual(ops.max(res), minval)
@parameterized.parameters(
{"seed": 10, "num_samples": 1, "batch_size": 1},
{"seed": 10, "num_samples": 5, "batch_size": 2},
{"seed": 10, "num_samples": 10, "batch_size": 4},
{"seed": 10, "num_samples": 15, "batch_size": 8},
)
def test_categorical(self, seed, num_samples, batch_size):
np.random.seed(seed)
# Create logits that definitely favors the batch index after a softmax
# is applied. Without a softmax, this would be close to random.
logits = np.eye(batch_size) * 1e5 + 1e6
res = random.categorical(logits, num_samples, seed=seed)
# Outputs should have shape `(batch_size, num_samples)`, where each
# output index matches the batch index.
self.assertEqual(res.shape, (batch_size, num_samples))
expected = np.tile(np.arange(batch_size)[:, None], (1, num_samples))
self.assertAllClose(res, expected)
def test_categorical_errors(self):
with self.assertRaises(ValueError):
random.categorical(np.ones((5,)), 5)
with self.assertRaises(ValueError):
random.categorical(np.ones((5, 5, 5)), 5)
@parameterized.parameters(
{"seed": 10, "shape": (5,), "min": 0, "max": 10, "dtype": "uint16"},
{"seed": 10, "shape": (2, 3), "min": 0, "max": 10, "dtype": "uint32"},
{"seed": 10, "shape": (2, 3, 4), "min": 0, "max": 2, "dtype": "int8"},
{"seed": 10, "shape": (2, 3), "min": -1, "max": 1, "dtype": "int16"},
{"seed": 10, "shape": (2, 3), "min": 1, "max": 3, "dtype": "int32"},
)
def test_randint(self, seed, shape, min, max, dtype):
np.random.seed(seed)
np_res = np.random.randint(low=min, high=max, size=shape)
res = random.randint(
shape, minval=min, maxval=max, seed=seed, dtype=dtype
)
self.assertEqual(res.shape, shape)
self.assertEqual(res.shape, np_res.shape)
self.assertLessEqual(ops.max(res), max)
self.assertGreaterEqual(ops.max(res), min)
# Torch has incomplete dtype support for uints; will remap some dtypes.
if keras_core.backend.backend() != "torch":
self.assertEqual(backend.standardize_dtype(res.dtype), dtype)
@parameterized.parameters(
{"seed": 10, "shape": (5,), "mean": 0, "stddev": 1},
{"seed": 10, "shape": (2, 3), "mean": 0, "stddev": 1},
{"seed": 10, "shape": (2, 3, 4), "mean": 0, "stddev": 1},
{"seed": 10, "shape": (2, 3), "mean": 10, "stddev": 1},
{"seed": 10, "shape": (2, 3), "mean": 10, "stddev": 3},
# Test list shapes.
{"seed": 10, "shape": [2, 3], "mean": 10, "stddev": 3},
)
def test_truncated_normal(self, seed, shape, mean, stddev):
np.random.seed(seed)
np_res = np.random.normal(loc=mean, scale=stddev, size=shape)
res = random.truncated_normal(
shape, mean=mean, stddev=stddev, seed=seed
)
self.assertEqual(res.shape, tuple(shape))
self.assertEqual(res.shape, np_res.shape)
self.assertLessEqual(ops.max(res), mean + 2 * stddev)
self.assertGreaterEqual(ops.max(res), mean - 2 * stddev)
def test_dropout(self):
x = ops.ones((3, 5))
self.assertAllClose(random.dropout(x, rate=0, seed=0), x)
x_res = random.dropout(x, rate=0.8, seed=0)
self.assertGreater(ops.max(x_res), ops.max(x))
self.assertGreater(ops.sum(x_res == 0), 2)
@pytest.mark.skipif(
keras_core.backend.backend() != "jax",
reason="This test requires `jax` as the backend.",
)
def test_dropout_jax_jit_stateless(self):
import jax
import jax.numpy as jnp
x = ops.ones(3)
@jax.jit
def train_step(x):
with keras_core.backend.StatelessScope():
x = keras_core.layers.Dropout(rate=0.1)(x, training=True)
return x
x = train_step(x)
self.assertIsInstance(x, jnp.ndarray)
def test_dropout_noise_shape(self):
inputs = ops.ones((2, 3, 5, 7))
x = random.dropout(
inputs, rate=0.3, noise_shape=[None, 3, 5, None], seed=0
)
self.assertEqual(x.shape, (2, 3, 5, 7))
@pytest.mark.skipif(
keras_core.backend.backend() != "jax",
reason="This test requires `jax` as the backend.",
)
def test_jax_rngkey_seed(self):
import jax
import jax.numpy as jnp
seed = 1234
rng = jax.random.PRNGKey(seed)
self.assertEqual(rng.shape, (2,))
self.assertEqual(rng.dtype, jnp.uint32)
x = random.randint((3, 5), 0, 10, seed=rng)
self.assertIsInstance(x, jnp.ndarray)
@pytest.mark.skipif(
keras_core.backend.backend() != "jax",
reason="This test requires `jax` as the backend.",
)
def test_jax_unseed_disallowed_during_tracing(self):
import jax
@jax.jit
def jit_fn():
return random.randint((2, 2), 0, 10, seed=None)
with self.assertRaisesRegex(
ValueError, "you should only use seeded random ops"
):
jit_fn()
def test_global_seed_generator(self):
# Check that unseeded RNG calls use and update global_rng_state()
def random_numbers(seed):
rng_state = seed_generator.global_seed_generator().state
rng_state.assign(seed)
x = random.normal((), seed=None)
y = random.normal((), seed=None)
return x, y, rng_state.value
if backend.backend() == "tensorflow":
import tensorflow as tf
random_numbers = tf.function(jit_compile=True)(random_numbers)
seed = ops.zeros((2,))
seed0 = ops.convert_to_numpy(seed)
x1, y1, seed = random_numbers(seed)
x1 = ops.convert_to_numpy(x1)
y1 = ops.convert_to_numpy(y1)
seed1 = ops.convert_to_numpy(seed)
x2, y2, seed = random_numbers(seed)
x2 = ops.convert_to_numpy(x2)
y2 = ops.convert_to_numpy(y2)
seed2 = ops.convert_to_numpy(seed)
x3, y3, seed = random_numbers(seed)
x3 = ops.convert_to_numpy(x3)
y3 = ops.convert_to_numpy(y3)
seed3 = ops.convert_to_numpy(seed)
self.assertNotEqual(seed0[1], seed1[1])
self.assertNotEqual(seed1[1], seed2[1])
self.assertNotEqual(seed2[1], seed3[1])
self.assertGreater(np.abs(x1 - y1), 1e-4)
self.assertGreater(np.abs(x1 - y1), 1e-4)
self.assertGreater(np.abs(x2 - y2), 1e-4)
self.assertGreater(np.abs(x3 - y3), 1e-4)
self.assertGreater(np.abs(x1 - x2), 1e-4)
self.assertGreater(np.abs(x1 - x3), 1e-4)
self.assertGreater(np.abs(x2 - x3), 1e-4)
self.assertGreater(np.abs(y1 - y2), 1e-4)
self.assertGreater(np.abs(y1 - y3), 1e-4)
self.assertGreater(np.abs(y2 - y3), 1e-4)
seed_generator.global_seed_generator().state.assign(seed)
def test_shuffle(self):
x = np.arange(100).reshape(10, 10)
# Test axis=0
y = random.shuffle(x, seed=0)
self.assertFalse(np.all(x == ops.convert_to_numpy(y)))
self.assertAllClose(np.sum(x, axis=0), ops.sum(y, axis=0))
self.assertNotAllClose(np.sum(x, axis=1), ops.sum(y, axis=1))
# Test axis=1
y = random.shuffle(x, axis=1, seed=0)
self.assertFalse(np.all(x == ops.convert_to_numpy(y)))
self.assertAllClose(np.sum(x, axis=1), ops.sum(y, axis=1))
self.assertNotAllClose(np.sum(x, axis=0), ops.sum(y, axis=0))
|
keras-core/keras_core/random/random_test.py/0
|
{
"file_path": "keras-core/keras_core/random/random_test.py",
"repo_id": "keras-core",
"token_count": 4560
}
| 54 |
import numpy as np
def get_test_data(
train_samples, test_samples, input_shape, num_classes, random_seed=None
):
"""Generates test data to train a model on.
Args:
train_samples: Integer, how many training samples to generate.
test_samples: Integer, how many test samples to generate.
input_shape: Tuple of integers, shape of the inputs.
num_classes: Integer, number of classes for the data and targets.
random_seed: Integer, random seed used by Numpy to generate data.
Returns:
A tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
"""
if random_seed is not None:
np.random.seed(random_seed)
num_sample = train_samples + test_samples
templates = 2 * num_classes * np.random.random((num_classes,) + input_shape)
y = np.random.randint(0, num_classes, size=(num_sample,))
x = np.zeros((num_sample,) + input_shape, dtype=np.float32)
for i in range(num_sample):
x[i] = templates[y[i]] + np.random.normal(
loc=0, scale=1.0, size=input_shape
)
return (
(x[:train_samples], y[:train_samples]),
(x[train_samples:], y[train_samples:]),
)
|
keras-core/keras_core/testing/test_utils.py/0
|
{
"file_path": "keras-core/keras_core/testing/test_utils.py",
"repo_id": "keras-core",
"token_count": 489
}
| 55 |
import numpy as np
import pytest
import tensorflow as tf
from keras_core import backend
from keras_core import testing
from keras_core.trainers.data_adapters.torch_data_adapter import (
TorchDataLoaderAdapter,
)
@pytest.mark.skipif(
backend.backend() != "torch",
reason="Backend does not support TorchDataLoaderAdapter.",
)
class TestTorchDataLoaderAdapter(testing.TestCase):
def test_basic_dataloader(self):
import torch
from torch.utils.data import DataLoader
from torch.utils.data import TensorDataset
x = torch.normal(2, 3, size=(34, 4))
y = torch.normal(1, 3, size=(34, 2))
base_ds = TensorDataset(x, y)
base_dataloader = DataLoader(base_ds, batch_size=16)
adapter = TorchDataLoaderAdapter(base_dataloader)
self.assertEqual(adapter.num_batches, 3)
self.assertEqual(adapter.batch_size, 16)
self.assertEqual(adapter.has_partial_batch, True)
self.assertEqual(adapter.partial_batch_size, 2)
gen = adapter.get_numpy_iterator()
for i, batch in enumerate(gen):
self.assertEqual(len(batch), 2)
bx, by = batch
self.assertIsInstance(bx, np.ndarray)
self.assertIsInstance(by, np.ndarray)
self.assertEqual(bx.dtype, by.dtype)
self.assertEqual(bx.dtype, "float32")
if i < 2:
self.assertEqual(bx.shape, (16, 4))
self.assertEqual(by.shape, (16, 2))
else:
self.assertEqual(bx.shape, (2, 4))
self.assertEqual(by.shape, (2, 2))
ds = adapter.get_torch_dataloader()
for i, batch in enumerate(ds):
self.assertEqual(len(batch), 2)
bx, by = batch
self.assertIsInstance(bx, torch.Tensor)
self.assertIsInstance(by, torch.Tensor)
self.assertEqual(bx.dtype, by.dtype)
self.assertEqual(bx.dtype, torch.float32)
if i < 2:
self.assertEqual(tuple(bx.shape), (16, 4))
self.assertEqual(tuple(by.shape), (16, 2))
else:
self.assertEqual(tuple(bx.shape), (2, 4))
self.assertEqual(tuple(by.shape), (2, 2))
ds = adapter.get_tf_dataset()
for i, batch in enumerate(ds):
self.assertEqual(len(batch), 2)
bx, by = batch
self.assertIsInstance(bx, tf.Tensor)
self.assertIsInstance(by, tf.Tensor)
self.assertEqual(bx.dtype, by.dtype)
self.assertEqual(bx.dtype, tf.float32)
if i < 2:
self.assertEqual(tuple(bx.shape), (16, 4))
self.assertEqual(tuple(by.shape), (16, 2))
else:
self.assertEqual(tuple(bx.shape), (2, 4))
self.assertEqual(tuple(by.shape), (2, 2))
|
keras-core/keras_core/trainers/data_adapters/torch_data_adapter_test.py/0
|
{
"file_path": "keras-core/keras_core/trainers/data_adapters/torch_data_adapter_test.py",
"repo_id": "keras-core",
"token_count": 1470
}
| 56 |
import hashlib
import os
import pathlib
import re
import shutil
import tarfile
import urllib
import warnings
import zipfile
from urllib.request import urlretrieve
from keras_core.api_export import keras_core_export
from keras_core.backend import config
from keras_core.utils import io_utils
from keras_core.utils.module_utils import gfile
from keras_core.utils.progbar import Progbar
def path_to_string(path):
"""Convert `PathLike` objects to their string representation.
If given a non-string typed path object, converts it to its string
representation.
If the object passed to `path` is not among the above, then it is
returned unchanged. This allows e.g. passthrough of file objects
through this function.
Args:
path: `PathLike` object that represents a path
Returns:
A string representation of the path argument, if Python support exists.
"""
if isinstance(path, os.PathLike):
return os.fspath(path)
return path
def resolve_path(path):
return os.path.realpath(os.path.abspath(path))
def is_path_in_dir(path, base_dir):
return resolve_path(os.path.join(base_dir, path)).startswith(base_dir)
def is_link_in_dir(info, base):
tip = resolve_path(os.path.join(base, os.path.dirname(info.name)))
return is_path_in_dir(info.linkname, base_dir=tip)
def filter_safe_paths(members):
base_dir = resolve_path(".")
for finfo in members:
valid_path = False
if is_path_in_dir(finfo.name, base_dir):
valid_path = True
yield finfo
elif finfo.issym() or finfo.islnk():
if is_link_in_dir(finfo, base_dir):
valid_path = True
yield finfo
if not valid_path:
warnings.warn(
"Skipping invalid path during archive extraction: "
f"'{finfo.name}'.",
stacklevel=2,
)
def extract_archive(file_path, path=".", archive_format="auto"):
"""Extracts an archive if it matches a support format.
Supports `.tar`, `.tar.gz`, `.tar.bz`, and `.zip` formats.
Args:
file_path: Path to the archive file.
path: Where to extract the archive file.
archive_format: Archive format to try for extracting the file.
Options are `"auto"`, `"tar"`, `"zip"`, and `None`.
`"tar"` includes `.tar`, `.tar.gz`, and `.tar.bz` files.
The default `"auto"` uses `["tar", "zip"]`.
`None` or an empty list will return no matches found.
Returns:
`True` if a match was found and an archive extraction was completed,
`False` otherwise.
"""
if archive_format is None:
return False
if archive_format == "auto":
archive_format = ["tar", "zip"]
if isinstance(archive_format, str):
archive_format = [archive_format]
file_path = path_to_string(file_path)
path = path_to_string(path)
for archive_type in archive_format:
if archive_type == "tar":
open_fn = tarfile.open
is_match_fn = tarfile.is_tarfile
if archive_type == "zip":
open_fn = zipfile.ZipFile
is_match_fn = zipfile.is_zipfile
if is_match_fn(file_path):
with open_fn(file_path) as archive:
try:
if zipfile.is_zipfile(file_path):
# Zip archive.
archive.extractall(path)
else:
# Tar archive, perhaps unsafe. Filter paths.
archive.extractall(
path, members=filter_safe_paths(archive)
)
except (tarfile.TarError, RuntimeError, KeyboardInterrupt):
if os.path.exists(path):
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
raise
return True
return False
@keras_core_export("keras_core.utils.get_file")
def get_file(
fname=None,
origin=None,
untar=False,
md5_hash=None,
file_hash=None,
cache_subdir="datasets",
hash_algorithm="auto",
extract=False,
archive_format="auto",
cache_dir=None,
):
"""Downloads a file from a URL if it not already in the cache.
By default the file at the url `origin` is downloaded to the
cache_dir `~/.keras`, placed in the cache_subdir `datasets`,
and given the filename `fname`. The final location of a file
`example.txt` would therefore be `~/.keras/datasets/example.txt`.
Files in `.tar`, `.tar.gz`, `.tar.bz`, and `.zip` formats can
also be extracted.
Passing a hash will verify the file after download. The command line
programs `shasum` and `sha256sum` can compute the hash.
Example:
```python
path_to_downloaded_file = get_file(
origin="https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz",
extract=True,
)
```
Args:
fname: Name of the file. If an absolute path, e.g. `"/path/to/file.txt"`
is specified, the file will be saved at that location.
If `None`, the name of the file at `origin` will be used.
origin: Original URL of the file.
untar: Deprecated in favor of `extract` argument.
boolean, whether the file should be decompressed
md5_hash: Deprecated in favor of `file_hash` argument.
md5 hash of the file for verification
file_hash: The expected hash string of the file after download.
The sha256 and md5 hash algorithms are both supported.
cache_subdir: Subdirectory under the Keras cache dir where the file is
saved. If an absolute path, e.g. `"/path/to/folder"` is
specified, the file will be saved at that location.
hash_algorithm: Select the hash algorithm to verify the file.
options are `"md5'`, `"sha256'`, and `"auto'`.
The default 'auto' detects the hash algorithm in use.
extract: True tries extracting the file as an Archive, like tar or zip.
archive_format: Archive format to try for extracting the file.
Options are `"auto'`, `"tar'`, `"zip'`, and `None`.
`"tar"` includes tar, tar.gz, and tar.bz files.
The default `"auto"` corresponds to `["tar", "zip"]`.
None or an empty list will return no matches found.
cache_dir: Location to store cached files, when None it
defaults ether `$KERAS_HOME` if the `KERAS_HOME` environment
variable is set or `~/.keras/`.
Returns:
Path to the downloaded file.
**⚠️ Warning on malicious downloads ⚠️**
Downloading something from the Internet carries a risk.
NEVER download a file/archive if you do not trust the source.
We recommend that you specify the `file_hash` argument
(if the hash of the source file is known) to make sure that the file you
are getting is the one you expect.
"""
if origin is None:
raise ValueError(
'Please specify the "origin" argument (URL of the file '
"to download)."
)
if cache_dir is None:
cache_dir = config.keras_home()
if md5_hash is not None and file_hash is None:
file_hash = md5_hash
hash_algorithm = "md5"
datadir_base = os.path.expanduser(cache_dir)
if not os.access(datadir_base, os.W_OK):
datadir_base = os.path.join("/tmp", ".keras")
datadir = os.path.join(datadir_base, cache_subdir)
os.makedirs(datadir, exist_ok=True)
fname = path_to_string(fname)
if not fname:
fname = os.path.basename(urllib.parse.urlsplit(origin).path)
if not fname:
raise ValueError(
"Can't parse the file name from the origin provided: "
f"'{origin}'."
"Please specify the `fname` as the input param."
)
if untar:
if fname.endswith(".tar.gz"):
fname = pathlib.Path(fname)
# The 2 `.with_suffix()` are because of `.tar.gz` as pathlib
# considers it as 2 suffixes.
fname = fname.with_suffix("").with_suffix("")
fname = str(fname)
untar_fpath = os.path.join(datadir, fname)
fpath = untar_fpath + ".tar.gz"
else:
fpath = os.path.join(datadir, fname)
download = False
if os.path.exists(fpath):
# File found; verify integrity if a hash was provided.
if file_hash is not None:
if not validate_file(fpath, file_hash, algorithm=hash_algorithm):
io_utils.print_msg(
"A local file was found, but it seems to be "
f"incomplete or outdated because the {hash_algorithm} "
"file hash does not match the original value of "
f"{file_hash} "
"so we will re-download the data."
)
download = True
else:
download = True
if download:
io_utils.print_msg(f"Downloading data from {origin}")
class DLProgbar:
"""Manage progress bar state for use in urlretrieve."""
def __init__(self):
self.progbar = None
self.finished = False
def __call__(self, block_num, block_size, total_size):
if not self.progbar:
if total_size == -1:
total_size = None
self.progbar = Progbar(total_size)
current = block_num * block_size
if total_size is None:
self.progbar.update(current)
else:
if current < total_size:
self.progbar.update(current)
elif not self.finished:
self.progbar.update(self.progbar.target)
self.finished = True
error_msg = "URL fetch failure on {}: {} -- {}"
try:
try:
urlretrieve(origin, fpath, DLProgbar())
except urllib.error.HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except urllib.error.URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except (Exception, KeyboardInterrupt):
if os.path.exists(fpath):
os.remove(fpath)
raise
# Validate download if succeeded and user provided an expected hash
# Security conscious users would get the hash of the file from a
# separate channel and pass it to this API to prevent MITM / corruption:
if os.path.exists(fpath) and file_hash is not None:
if not validate_file(fpath, file_hash, algorithm=hash_algorithm):
raise ValueError(
"Incomplete or corrupted file detected. "
f"The {hash_algorithm} "
"file hash does not match the provided value "
f"of {file_hash}."
)
if untar:
if not os.path.exists(untar_fpath):
status = extract_archive(fpath, datadir, archive_format="tar")
if not status:
warnings.warn("Could not extract archive.", stacklevel=2)
return untar_fpath
if extract:
status = extract_archive(fpath, datadir, archive_format)
if not status:
warnings.warn("Could not extract archive.", stacklevel=2)
# TODO: return extracted fpath if we extracted an archive,
# rather than the archive path.
return fpath
def resolve_hasher(algorithm, file_hash=None):
"""Returns hash algorithm as hashlib function."""
if algorithm == "sha256":
return hashlib.sha256()
if algorithm == "auto" and file_hash is not None and len(file_hash) == 64:
return hashlib.sha256()
# This is used only for legacy purposes.
return hashlib.md5()
def hash_file(fpath, algorithm="sha256", chunk_size=65535):
"""Calculates a file sha256 or md5 hash.
Example:
>>> hash_file('/path/to/file.zip')
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
Args:
fpath: Path to the file being validated.
algorithm: Hash algorithm, one of `"auto"`, `"sha256"`, or `"md5"`.
The default `"auto"` detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
Returns:
The file hash.
"""
if isinstance(algorithm, str):
hasher = resolve_hasher(algorithm)
else:
hasher = algorithm
with open(fpath, "rb") as fpath_file:
for chunk in iter(lambda: fpath_file.read(chunk_size), b""):
hasher.update(chunk)
return hasher.hexdigest()
def validate_file(fpath, file_hash, algorithm="auto", chunk_size=65535):
"""Validates a file against a sha256 or md5 hash.
Args:
fpath: path to the file being validated
file_hash: The expected hash string of the file.
The sha256 and md5 hash algorithms are both supported.
algorithm: Hash algorithm, one of `"auto"`, `"sha256"`, or `"md5"`.
The default `"auto"` detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
Returns:
Boolean, whether the file is valid.
"""
hasher = resolve_hasher(algorithm, file_hash)
if str(hash_file(fpath, hasher, chunk_size)) == str(file_hash):
return True
else:
return False
def is_remote_path(filepath):
"""
Determines if a given filepath indicates a remote location.
This function checks if the filepath represents a known remote pattern
such as GCS (`/gcs`), CNS (`/cns`), CFS (`/cfs`), HDFS (`/hdfs`)
Args:
filepath (str): The path to be checked.
Returns:
bool: True if the filepath is a recognized remote path, otherwise False
"""
if re.match(r"^(/cns|/cfs|/gcs|/hdfs|.*://).*$", str(filepath)):
return True
return False
# Below are gfile-replacement utils.
def _raise_if_no_gfile(path):
raise ValueError(
"Handling remote paths requires installing TensorFlow "
f"(in order to use gfile). Received path: {path}"
)
def exists(path):
if is_remote_path(path):
if gfile.available:
return gfile.exists(path)
else:
_raise_if_no_gfile(path)
return os.path.exists(path)
def File(path, mode="r"):
if is_remote_path(path):
if gfile.available:
return gfile.GFile(path, mode=mode)
else:
_raise_if_no_gfile(path)
return open(path, mode=mode)
def join(path, *paths):
if is_remote_path(path):
if gfile.available:
return gfile.join(path, *paths)
else:
_raise_if_no_gfile(path)
return os.path.join(path, *paths)
def isdir(path):
if is_remote_path(path):
if gfile.available:
return gfile.isdir(path)
else:
_raise_if_no_gfile(path)
return os.path.isdir(path)
def rmtree(path):
if is_remote_path(path):
if gfile.available:
return gfile.rmtree(path)
else:
_raise_if_no_gfile(path)
return shutil.rmtree(path)
def listdir(path):
if is_remote_path(path):
if gfile.available:
return gfile.listdir(path)
else:
_raise_if_no_gfile(path)
return os.listdir(path)
def copy(src, dst):
if is_remote_path(src) or is_remote_path(dst):
if gfile.available:
return gfile.copy(src, dst)
else:
_raise_if_no_gfile(f"src={src} dst={dst}")
return shutil.copy(src, dst)
def makedirs(path):
if is_remote_path(path):
if gfile.available:
return gfile.makedirs(path)
else:
_raise_if_no_gfile(path)
return os.makedirs(path)
|
keras-core/keras_core/utils/file_utils.py/0
|
{
"file_path": "keras-core/keras_core/utils/file_utils.py",
"repo_id": "keras-core",
"token_count": 7314
}
| 57 |
import binascii
import codecs
import marshal
import os
import types as python_types
def default(method):
"""Decorates a method to detect overrides in subclasses."""
method._is_default = True
return method
def is_default(method):
"""Check if a method is decorated with the `default` wrapper."""
return getattr(method, "_is_default", False)
def func_dump(func):
"""Serializes a user-defined function.
Args:
func: the function to serialize.
Returns:
A tuple `(code, defaults, closure)`.
"""
if os.name == "nt":
raw_code = marshal.dumps(func.__code__).replace(b"\\", b"/")
code = codecs.encode(raw_code, "base64").decode("ascii")
else:
raw_code = marshal.dumps(func.__code__)
code = codecs.encode(raw_code, "base64").decode("ascii")
defaults = func.__defaults__
if func.__closure__:
closure = tuple(c.cell_contents for c in func.__closure__)
else:
closure = None
return code, defaults, closure
def func_load(code, defaults=None, closure=None, globs=None):
"""Deserializes a user defined function.
Args:
code: bytecode of the function.
defaults: defaults of the function.
closure: closure of the function.
globs: dictionary of global objects.
Returns:
A function object.
"""
if isinstance(code, (tuple, list)): # unpack previous dump
code, defaults, closure = code
if isinstance(defaults, list):
defaults = tuple(defaults)
def ensure_value_to_cell(value):
"""Ensures that a value is converted to a python cell object.
Args:
value: Any value that needs to be casted to the cell type
Returns:
A value wrapped as a cell object (see function "func_load")
"""
def dummy_fn():
value # just access it so it gets captured in .__closure__
cell_value = dummy_fn.__closure__[0]
if not isinstance(value, type(cell_value)):
return cell_value
return value
if closure is not None:
closure = tuple(ensure_value_to_cell(_) for _ in closure)
try:
raw_code = codecs.decode(code.encode("ascii"), "base64")
except (UnicodeEncodeError, binascii.Error):
raw_code = code.encode("raw_unicode_escape")
code = marshal.loads(raw_code)
if globs is None:
globs = globals()
return python_types.FunctionType(
code, globs, name=code.co_name, argdefs=defaults, closure=closure
)
def to_list(x):
"""Normalizes a list/tensor into a list.
If a tensor is passed, we return
a list of size 1 containing the tensor.
Args:
x: target object to be normalized.
Returns:
A list.
"""
if isinstance(x, list):
return x
return [x]
def remove_long_seq(maxlen, seq, label):
"""Removes sequences that exceed the maximum length.
Args:
maxlen: Int, maximum length of the output sequences.
seq: List of lists, where each sublist is a sequence.
label: List where each element is an integer.
Returns:
new_seq, new_label: shortened lists for `seq` and `label`.
"""
new_seq, new_label = [], []
for x, y in zip(seq, label):
if len(x) < maxlen:
new_seq.append(x)
new_label.append(y)
return new_seq, new_label
def removeprefix(x, prefix):
"""Backport of `removeprefix` from PEP-616 (Python 3.9+)"""
if len(prefix) > 0 and x.startswith(prefix):
return x[len(prefix) :]
else:
return x
def removesuffix(x, suffix):
"""Backport of `removesuffix` from PEP-616 (Python 3.9+)"""
if len(suffix) > 0 and x.endswith(suffix):
return x[: -len(suffix)]
else:
return x
|
keras-core/keras_core/utils/python_utils.py/0
|
{
"file_path": "keras-core/keras_core/utils/python_utils.py",
"repo_id": "keras-core",
"token_count": 1579
}
| 58 |
import inspect
import os
import traceback
import types
from functools import wraps
import tree
from keras_core import backend
from keras_core.api_export import keras_core_export
from keras_core.backend.common import global_state
_EXCLUDED_PATHS = (
os.path.abspath(os.path.join(__file__, "..", "..")),
os.path.join("tensorflow", "python"),
)
@keras_core_export("keras_core.config.enable_traceback_filtering")
def enable_traceback_filtering():
"""Turn on traceback filtering.
Raw Keras tracebacks (also known as stack traces)
involve many internal frames, which can be
challenging to read through, while not being actionable for end users.
By default, Keras filters internal frames in most exceptions that it
raises, to keep traceback short, readable, and focused on what's
actionable for you (your own code).
See also `keras_core.config.disable_traceback_filtering()` and
`keras_core.config.is_traceback_filtering_enabled()`.
If you have previously disabled traceback filtering via
`keras_core.config.disable_traceback_filtering()`, you can re-enable it via
`keras_core.config.enable_traceback_filtering()`.
"""
global_state.set_global_attribute("traceback_filtering", True)
@keras_core_export("keras_core.config.disable_traceback_filtering")
def disable_traceback_filtering():
"""Turn off traceback filtering.
Raw Keras tracebacks (also known as stack traces)
involve many internal frames, which can be
challenging to read through, while not being actionable for end users.
By default, Keras filters internal frames in most exceptions that it
raises, to keep traceback short, readable, and focused on what's
actionable for you (your own code).
See also `keras_core.config.enable_traceback_filtering()` and
`keras_core.config.is_traceback_filtering_enabled()`.
If you have previously disabled traceback filtering via
`keras_core.config.disable_traceback_filtering()`, you can re-enable it via
`keras_core.config.enable_traceback_filtering()`.
"""
global_state.set_global_attribute("traceback_filtering", False)
@keras_core_export("keras_core.config.is_traceback_filtering_enabled")
def is_traceback_filtering_enabled():
"""Check if traceback filtering is enabled.
Raw Keras tracebacks (also known as stack traces)
involve many internal frames, which can be
challenging to read through, while not being actionable for end users.
By default, Keras filters internal frames in most exceptions that it
raises, to keep traceback short, readable, and focused on what's
actionable for you (your own code).
See also `keras_core.config.enable_traceback_filtering()` and
`keras_core.config.disable_traceback_filtering()`.
If you have previously disabled traceback filtering via
`keras_core.config.disable_traceback_filtering()`, you can re-enable it via
`keras_core.config.enable_traceback_filtering()`.
Returns:
Boolean, `True` if traceback filtering is enabled,
and `False` otherwise.
"""
return global_state.get_global_attribute("traceback_filtering", True)
def include_frame(fname):
for exclusion in _EXCLUDED_PATHS:
if exclusion in fname:
return False
return True
def _process_traceback_frames(tb):
"""Iterate through traceback frames and return a new, filtered traceback."""
last_tb = None
tb_list = list(traceback.walk_tb(tb))
for f, line_no in reversed(tb_list):
if include_frame(f.f_code.co_filename):
last_tb = types.TracebackType(last_tb, f, f.f_lasti, line_no)
if last_tb is None and tb_list:
# If no frames were kept during filtering, create a new traceback
# from the outermost function.
f, line_no = tb_list[-1]
last_tb = types.TracebackType(last_tb, f, f.f_lasti, line_no)
return last_tb
def filter_traceback(fn):
"""Filter out Keras-internal traceback frames in exceptions raised by fn."""
@wraps(fn)
def error_handler(*args, **kwargs):
if not is_traceback_filtering_enabled():
return fn(*args, **kwargs)
filtered_tb = None
try:
return fn(*args, **kwargs)
except Exception as e:
filtered_tb = _process_traceback_frames(e.__traceback__)
# To get the full stack trace, call:
# `keras_core.config.disable_traceback_filtering()`
raise e.with_traceback(filtered_tb) from None
finally:
del filtered_tb
return error_handler
def inject_argument_info_in_traceback(fn, object_name=None):
"""Add information about call argument values to an error message.
Arguments:
fn: Function to wrap. Exceptions raised by the this function will be
re-raised with additional information added to the error message,
displaying the values of the different arguments that the function
was called with.
object_name: String, display name of the class/function being called,
e.g. `'layer "layer_name" (LayerClass)'`.
Returns:
A wrapped version of `fn`.
"""
if backend.backend() == "tensorflow":
from tensorflow import errors as tf_errors
else:
tf_errors = None
@wraps(fn)
def error_handler(*args, **kwargs):
if not is_traceback_filtering_enabled():
return fn(*args, **kwargs)
signature = None
bound_signature = None
try:
return fn(*args, **kwargs)
except Exception as e:
if hasattr(e, "_keras_call_info_injected"):
# Only inject info for the innermost failing call
raise e
signature = inspect.signature(fn)
try:
# The first argument is `self`, so filter it out
bound_signature = signature.bind(*args, **kwargs)
except TypeError:
# Likely unbindable arguments
raise e
# Add argument context
arguments_context = []
for arg in list(signature.parameters.values()):
if arg.name in bound_signature.arguments:
value = tree.map_structure(
format_argument_value,
bound_signature.arguments[arg.name],
)
else:
value = arg.default
arguments_context.append(f" • {arg.name}={value}")
if arguments_context:
arguments_context = "\n".join(arguments_context)
# Get original error message and append information to it.
if tf_errors is not None and isinstance(e, tf_errors.OpError):
message = e.message
elif e.args:
# Canonically, the 1st argument in an exception is the error
# message. This works for all built-in Python exceptions.
message = e.args[0]
else:
message = ""
display_name = f"{object_name if object_name else fn.__name__}"
message = (
f"Exception encountered when calling {display_name}.\n\n"
f"\x1b[1m{message}\x1b[0m\n\n"
f"Arguments received by {display_name}:\n"
f"{arguments_context}"
)
# Reraise exception, with added context
if tf_errors is not None and isinstance(e, tf_errors.OpError):
new_e = e.__class__(e.node_def, e.op, message, e.error_code)
else:
try:
# For standard exceptions such as ValueError, TypeError,
# etc.
new_e = e.__class__(message)
except TypeError:
# For any custom error that doesn't have a standard
# signature.
new_e = RuntimeError(message)
new_e._keras_call_info_injected = True
else:
new_e = e
raise new_e.with_traceback(e.__traceback__) from None
finally:
del signature
del bound_signature
return error_handler
def format_argument_value(value):
if backend.is_tensor(value):
# Simplified representation for eager / graph tensors
# to keep messages readable
if backend.backend() == "tensorflow":
tensor_cls = "tf.Tensor"
elif backend.backend() == "jax":
tensor_cls = "jnp.ndarray"
elif backend.backend() == "torch":
tensor_cls = "torch.Tensor"
elif backend.backend() == "numpy":
tensor_cls = "np.ndarray"
else:
tensor_cls = "array"
return (
f"{tensor_cls}(shape={value.shape}, "
f"dtype={backend.standardize_dtype(value.dtype)})"
)
return repr(value)
|
keras-core/keras_core/utils/traceback_utils.py/0
|
{
"file_path": "keras-core/keras_core/utils/traceback_utils.py",
"repo_id": "keras-core",
"token_count": 3913
}
| 59 |
sudo pip install --upgrade pip
sudo pip install -r requirements.txt --progress-bar off
sudo pip install -e ".[tests]"
sudo apt update
sudo apt install -y clang-format
|
keras-cv/.devcontainer/setup.sh/0
|
{
"file_path": "keras-cv/.devcontainer/setup.sh",
"repo_id": "keras-cv",
"token_count": 46
}
| 60 |
build_file: "keras-cv/.kokoro/github/ubuntu/gpu/build.sh"
action {
define_artifacts {
regex: "**/sponge_log.log"
regex: "**/sponge_log.xml"
}
}
env_vars: {
key: "KERAS2"
value: "1"
}
# Set timeout to 60 mins from default 180 mins
timeout_mins: 60
|
keras-cv/.kokoro/github/ubuntu/gpu/keras2/presubmit.cfg/0
|
{
"file_path": "keras-cv/.kokoro/github/ubuntu/gpu/keras2/presubmit.cfg",
"repo_id": "keras-cv",
"token_count": 116
}
| 61 |
import math
import random
import time
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import tensorflow as tf
import keras_cv
from keras_cv.metrics import coco
def produce_random_data(
include_confidence=False, num_images=128, num_classes=20
):
"""Generates a fake list of bounding boxes for use in this test.
Returns:
a tensor list of size [128, 25, 5/6]. This represents 128 images, 25
bboxes and 5/6 dimensions to represent each bbox depending on if
confidence is set.
"""
images = []
for _ in range(num_images):
num_boxes = math.floor(25 * random.uniform(0, 1))
classes_in_image = np.floor(np.random.rand(num_boxes, 1) * num_classes)
bboxes = np.random.rand(num_boxes, 4)
boxes = np.concatenate([bboxes, classes_in_image], axis=-1)
if include_confidence:
confidence = np.random.rand(num_boxes, 1)
boxes = np.concatenate([boxes, confidence], axis=-1)
images.append(
keras_cv.utils.bounding_box.xywh_to_corners(
tf.constant(boxes, dtype=tf.float32)
)
)
images = [keras_cv.bounding_box.to_dense(x, max_boxes=25) for x in images]
return tf.stack(images, axis=0)
y_true = produce_random_data()
y_pred = produce_random_data(include_confidence=True)
class_ids = list(range(20))
n_images = [128, 256, 512, 512 + 256, 1024]
update_state_runtimes = []
result_runtimes = []
end_to_end_runtimes = []
for images in n_images:
y_true = produce_random_data(num_images=images)
y_pred = produce_random_data(num_images=images, include_confidence=True)
metric = coco._COCOMeanAveragePrecision(class_ids)
# warm up
metric.update_state(y_true, y_pred)
metric.result()
start = time.time()
metric.update_state(y_true, y_pred)
update_state_done = time.time()
r = metric.result()
end = time.time()
update_state_runtimes.append(update_state_done - start)
result_runtimes.append(end - update_state_done)
end_to_end_runtimes.append(end - start)
print("end_to_end_runtimes", end_to_end_runtimes)
data = pd.DataFrame(
{
"n_images": n_images,
"update_state_runtimes": update_state_runtimes,
"result_runtimes": result_runtimes,
"end_to_end_runtimes": end_to_end_runtimes,
}
)
sns.lineplot(data=data, x="n_images", y="update_state_runtimes")
plt.xlabel("Number of Images")
plt.ylabel("update_state() runtime (seconds)")
plt.title("Runtime of update_state()")
plt.show()
sns.lineplot(data=data, x="n_images", y="result_runtimes")
plt.xlabel("Number of Images")
plt.ylabel("result() runtime (seconds)")
plt.title("Runtime of result()")
plt.show()
sns.lineplot(data=data, x="n_images", y="end_to_end_runtimes")
plt.xlabel("Number of Images")
plt.ylabel("End to end runtime (seconds)")
plt.title("Runtimes of update_state() followed by result()")
plt.show()
|
keras-cv/benchmarks/metrics/coco/mean_average_precision_performance.py/0
|
{
"file_path": "keras-cv/benchmarks/metrics/coco/mean_average_precision_performance.py",
"repo_id": "keras-cv",
"token_count": 1229
}
| 62 |
import time
import numpy as np
import tensorflow as tf
from matplotlib import pyplot as plt
from tensorflow import keras
from keras_cv.layers import BaseImageAugmentationLayer
from keras_cv.layers import RandomSharpness
from keras_cv.utils import preprocessing
class OldRandomSharpness(BaseImageAugmentationLayer):
"""Randomly performs the sharpness operation on given images.
The sharpness operation first performs a blur operation, then blends between
the original image and the blurred image. This operation makes the edges of
an image less sharp than they were in the original image.
References:
- [PIL](https://pillow.readthedocs.io/en/stable/reference/ImageEnhance.html)
Args:
factor: A tuple of two floats, a single float or
`keras_cv.FactorSampler`. `factor` controls the extent to which the
image sharpness is impacted. `factor=0.0` makes this layer perform a
no-op operation, while a value of 1.0 uses the sharpened result
entirely. Values between 0 and 1 result in linear interpolation
between the original image and the sharpened image. Values should be
between `0.0` and `1.0`. If a tuple is used, a `factor` is sampled
between the two values for every image augmented. If a single float
is used, a value between `0.0` and the passed float is sampled. In
order to ensure the value is always the same, please pass a tuple
with two identical floats: `(0.5, 0.5)`.
value_range: the range of values the incoming images will have.
Represented as a two number tuple written [low, high].
This is typically either `[0, 1]` or `[0, 255]` depending
on how your preprocessing pipeline is set up.
""" # noqa: E501
def __init__(
self,
factor,
value_range,
seed=None,
**kwargs,
):
super().__init__(seed=seed, **kwargs)
self.value_range = value_range
self.factor = preprocessing.parse_factor(factor)
self.seed = seed
def get_random_transformation(self, **kwargs):
return self.factor(dtype=self.compute_dtype)
def augment_image(self, image, transformation=None, **kwargs):
image = preprocessing.transform_value_range(
image,
original_range=self.value_range,
target_range=(0, 255),
dtype=self.compute_dtype,
)
original_image = image
# Make image 4D for conv operation.
image = tf.expand_dims(image, axis=0)
# [1 1 1]
# [1 5 1]
# [1 1 1]
# all divided by 13 is the default 3x3 gaussian smoothing kernel.
# Correlating or Convolving with this filter is equivalent to performing
# a gaussian blur.
kernel = (
tf.constant(
[[1, 1, 1], [1, 5, 1], [1, 1, 1]],
dtype=self.compute_dtype,
shape=[3, 3, 1, 1],
)
/ 13.0
)
# Tile across channel dimension.
channels = tf.shape(image)[-1]
kernel = tf.tile(kernel, [1, 1, channels, 1])
strides = [1, 1, 1, 1]
smoothed_image = tf.nn.depthwise_conv2d(
image, kernel, strides, padding="VALID", dilations=[1, 1]
)
smoothed_image = tf.clip_by_value(smoothed_image, 0.0, 255.0)
smoothed_image = tf.squeeze(smoothed_image, axis=0)
# For the borders of the resulting image, fill in the values of the
# original image.
mask = tf.ones_like(smoothed_image)
padded_mask = tf.pad(mask, [[1, 1], [1, 1], [0, 0]])
padded_smoothed_image = tf.pad(smoothed_image, [[1, 1], [1, 1], [0, 0]])
result = tf.where(
tf.equal(padded_mask, 1), padded_smoothed_image, original_image
)
# Blend the final result.
result = preprocessing.blend(original_image, result, transformation)
result = preprocessing.transform_value_range(
result,
original_range=(0, 255),
target_range=self.value_range,
dtype=self.compute_dtype,
)
return result
def augment_bounding_boxes(self, bounding_boxes, transformation, **kwargs):
return bounding_boxes
def augment_label(self, label, transformation=None, **kwargs):
return label
def augment_segmentation_mask(
self, segmentation_mask, transformation, **kwargs
):
return segmentation_mask
def get_config(self):
config = super().get_config()
config.update(
{
"factor": self.factor,
"value_range": self.value_range,
"seed": self.seed,
}
)
return config
class RandomSharpnessTest(tf.test.TestCase):
def test_consistency_with_old_implementation(self):
images = tf.random.uniform(shape=(2, 64, 64, 3), minval=0, maxval=255)
old_layer = OldRandomSharpness(value_range=(0, 255), factor=(0.5, 0.5))
new_layer = RandomSharpness(value_range=(0, 255), factor=(0.5, 0.5))
old_output = old_layer(images)
new_output = new_layer(images)
self.assertAllClose(old_output, new_output)
if __name__ == "__main__":
# Run benchmark
(x_train, _), _ = keras.datasets.cifar10.load_data()
x_train = x_train.astype(np.float32)
num_images = [1000, 2000, 5000, 10000]
results = {}
aug_candidates = [RandomSharpness, OldRandomSharpness]
aug_args = {"value_range": (0, 255), "factor": 0.5}
for aug in aug_candidates:
# Eager Mode
c = aug.__name__
layer = aug(**aug_args)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
layer(x_train[:n_images])
t0 = time.time()
r1 = layer(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# Graph Mode
c = aug.__name__ + " Graph Mode"
layer = aug(**aug_args)
@tf.function()
def apply_aug(inputs):
return layer(inputs)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
apply_aug(x_train[:n_images])
t0 = time.time()
r1 = apply_aug(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# XLA Mode
c = aug.__name__ + " XLA Mode"
layer = aug(**aug_args)
@tf.function(jit_compile=True)
def apply_aug(inputs):
return layer(inputs)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
apply_aug(x_train[:n_images])
t0 = time.time()
r1 = apply_aug(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison.png")
# So we can actually see more relevant margins
del results[aug_candidates[1].__name__]
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison_no_old_eager.png")
# Run unit tests
tf.test.main()
|
keras-cv/benchmarks/vectorized_random_sharpness.py/0
|
{
"file_path": "keras-cv/benchmarks/vectorized_random_sharpness.py",
"repo_id": "keras-cv",
"token_count": 3608
}
| 63 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for preprocessing demos."""
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
from keras_cv import bounding_box
def preprocess_voc(inputs, format, image_size):
"""mapping function to create batched image and bbox coordinates"""
inputs["image"] = tf.image.resize(inputs["image"], image_size)
inputs["objects"]["bbox"] = bounding_box.convert_format(
inputs["objects"]["bbox"],
images=inputs["image"],
source="rel_yxyx",
target=format,
)
return {
"images": inputs["image"],
"bounding_boxes": inputs["objects"]["bbox"],
}
def load_voc_dataset(
bounding_box_format,
name="voc/2007",
batch_size=9,
image_size=(224, 224),
):
dataset = tfds.load(name, split=tfds.Split.TRAIN, shuffle_files=True)
dataset = dataset.map(
lambda x: preprocess_voc(
x, format=bounding_box_format, image_size=image_size
),
num_parallel_calls=tf.data.AUTOTUNE,
)
dataset = dataset.padded_batch(
batch_size, padding_values={"images": None, "bounding_boxes": -1.0}
)
return dataset
def visualize_data(data, bounding_box_format):
data = next(iter(data))
images = data["images"]
bounding_boxes = data["bounding_boxes"]
output_images = visualize_bounding_boxes(
images, bounding_boxes, bounding_box_format
).numpy()
gallery_show(output_images)
def visualize_bounding_boxes(image, bounding_boxes, bounding_box_format):
color = np.array([[255.0, 0.0, 0.0]])
bounding_boxes = bounding_box.convert_format(
bounding_boxes,
source=bounding_box_format,
target="rel_yxyx",
images=image,
)
return tf.image.draw_bounding_boxes(image, bounding_boxes, color, name=None)
def gallery_show(images):
images = images.astype(int)
for i in range(9):
image = images[i]
plt.subplot(3, 3, i + 1)
plt.imshow(image.astype("uint8"))
plt.axis("off")
plt.show()
|
keras-cv/examples/layers/object_detection/demo_utils.py/0
|
{
"file_path": "keras-cv/examples/layers/object_detection/demo_utils.py",
"repo_id": "keras-cv",
"token_count": 1028
}
| 64 |
"""
Title: Generate an image from a text prompt using StableDiffusion
Author: fchollet
Date created: 2022/09/24
Last modified: 2022/09/24
Description: Use StableDiffusion to generate an image according to a short text
description.
"""
from PIL import Image
from keras_cv.models import StableDiffusion
model = StableDiffusion(img_height=512, img_width=512, jit_compile=True)
img = model.text_to_image(
"Photograph of a beautiful horse running through a field"
)
Image.fromarray(img[0]).save("horse.png")
print("Saved at horse.png")
|
keras-cv/examples/models/generative/stable_diffusion/text_to_image.py/0
|
{
"file_path": "keras-cv/examples/models/generative/stable_diffusion/text_to_image.py",
"repo_id": "keras-cv",
"token_count": 182
}
| 65 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Keras backend module.
This module adds a temporary Keras API surface that is fully under KerasCV
control. The goal is to allow us to write Keras 3-like code everywhere, while
still supporting Keras 2. We do this by using the `keras_core` package with
Keras 2 to backport Keras 3 numerics APIs (`keras.ops` and `keras.random`) into
Keras 2. The sub-modules exposed are as follows:
- `config`: check which version of Keras is being run.
- `keras`: The full `keras` API with compat shims for older Keras versions.
- `ops`: `keras.ops` for Keras 3 or `keras_core.ops` for Keras 2.
- `random`: `keras.random` for Keras 3 or `keras_core.ops` for Keras 2.
"""
from keras_cv.backend import config # noqa: E402
from keras_cv.backend import keras # noqa: E402
from keras_cv.backend import ops # noqa: E402
from keras_cv.backend import random # noqa: E402
from keras_cv.backend import tf_ops # noqa: E402
def assert_tf_keras(src):
if config.keras_3():
raise NotImplementedError(
f"KerasCV component {src} does not yet support Keras 3, and can "
"only be used in `tf.keras`."
)
def supports_ragged():
return not config.keras_3()
|
keras-cv/keras_cv/backend/__init__.py/0
|
{
"file_path": "keras-cv/keras_cv/backend/__init__.py",
"repo_id": "keras-cv",
"token_count": 587
}
| 66 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv import backend
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import ops
from keras_cv.bounding_box.to_ragged import to_ragged
from keras_cv.bounding_box.validate_format import validate_format
@keras_cv_export("keras_cv.bounding_box.mask_invalid_detections")
def mask_invalid_detections(bounding_boxes, output_ragged=False):
"""masks out invalid detections with -1s.
This utility is mainly used on the output of non-max suppression operations.
The output of non-max-suppression contains all the detections, even invalid
ones. Users are expected to use `num_detections` to determine how many boxes
are in each image.
In contrast, KerasCV expects all bounding boxes to be padded with -1s.
This function uses the value of `num_detections` to mask out
invalid boxes with -1s.
Args:
bounding_boxes: a dictionary complying with KerasCV bounding box format.
In addition to the normal required keys, these boxes are also
expected to have a `num_detections` key.
output_ragged: whether to output RaggedTensor based bounding
boxes.
Returns:
bounding boxes with proper masking of the boxes according to
`num_detections`. This allows proper interop with non-max supression.
Returned boxes match the specification fed to the function, so if the
bounding box tensor uses `tf.RaggedTensor` to represent boxes the
returned value will also return `tf.RaggedTensor` representations.
"""
# ensure we are complying with KerasCV bounding box format.
info = validate_format(bounding_boxes)
if info["ragged"]:
raise ValueError(
"`bounding_box.mask_invalid_detections()` requires inputs to be "
"Dense tensors. Please call "
"`bounding_box.to_dense(bounding_boxes)` before passing your boxes "
"to `bounding_box.mask_invalid_detections()`."
)
if "num_detections" not in bounding_boxes:
raise ValueError(
"`bounding_boxes` must have key 'num_detections' "
"to be used with `bounding_box.mask_invalid_detections()`."
)
boxes = bounding_boxes.get("boxes")
classes = bounding_boxes.get("classes")
confidence = bounding_boxes.get("confidence", None)
num_detections = bounding_boxes.get("num_detections")
# Create a mask to select only the first N boxes from each batch
mask = ops.cast(
ops.expand_dims(ops.arange(boxes.shape[1]), axis=0),
num_detections.dtype,
)
mask = mask < num_detections[:, None]
classes = ops.where(mask, classes, -ops.ones_like(classes))
if confidence is not None:
confidence = ops.where(mask, confidence, -ops.ones_like(confidence))
# reuse mask for boxes
mask = ops.expand_dims(mask, axis=-1)
mask = ops.repeat(mask, repeats=boxes.shape[-1], axis=-1)
boxes = ops.where(mask, boxes, -ops.ones_like(boxes))
result = bounding_boxes.copy()
result["boxes"] = boxes
result["classes"] = classes
if confidence is not None:
result["confidence"] = confidence
if output_ragged and backend.supports_ragged():
return to_ragged(result)
return result
|
keras-cv/keras_cv/bounding_box/mask_invalid_detections.py/0
|
{
"file_path": "keras-cv/keras_cv/bounding_box/mask_invalid_detections.py",
"repo_id": "keras-cv",
"token_count": 1362
}
| 67 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import tensorflow as tf
from tensorflow import keras
from keras_cv.callbacks import WaymoEvaluationCallback
from keras_cv.tests.test_case import TestCase
NUM_RECORDS = 10
POINT_FEATURES = 3
NUM_POINTS = 20
NUM_BOXES = 2
BOX_FEATURES = 7
METRIC_KEYS = [
"average_precision_vehicle_l1",
"average_precision_vehicle_l2",
"average_precision_ped_l1",
"average_precision_ped_l2",
]
class WaymoEvaluationCallbackTest(TestCase):
@pytest.mark.skipif(True, reason="Requires Waymo Open Dataset")
def test_model_fit(self):
# Silly hypothetical model
model = self.build_model()
points = tf.random.normal((NUM_RECORDS, POINT_FEATURES, NUM_POINTS))
# Some random boxes, and some -1 boxes (to mimic padding ragged boxes)
boxes = tf.concat(
[
tf.random.uniform((NUM_RECORDS // 2, NUM_BOXES, BOX_FEATURES)),
tf.cast(
tf.fill((NUM_RECORDS // 2, NUM_BOXES, BOX_FEATURES), -1),
tf.float32,
),
],
axis=0,
)
dataset = tf.data.Dataset.from_tensor_slices(
(
points,
{
"3d_boxes": {
"boxes": boxes,
"classes": np.ones((NUM_RECORDS, NUM_BOXES)),
"difficulty": np.ones((NUM_RECORDS, NUM_BOXES)),
"mask": tf.concat(
[
np.ones((NUM_RECORDS // 2, NUM_BOXES)),
np.zeros((NUM_RECORDS // 2, NUM_BOXES)),
],
axis=0,
),
}
},
)
).batch(5)
callback = WaymoEvaluationCallback(validation_data=dataset)
history = model.fit(points, boxes, callbacks=[callback])
self.assertAllInSet(METRIC_KEYS, history.history.keys())
def build_model(self):
inputs = keras.Input(shape=(POINT_FEATURES, NUM_POINTS))
x = keras.layers.Flatten()(inputs)
# Add extra features for class and confidence
x = keras.layers.Dense(NUM_BOXES * (BOX_FEATURES + 2))(x)
x = keras.layers.Reshape((NUM_BOXES, BOX_FEATURES + 2))(x)
x = keras.layers.Lambda(
lambda x: {
"3d_boxes": {
"boxes": x[:, :, :7],
"classes": tf.abs(x[:, :, 7]),
"confidence": x[:, :, 8],
}
}
)(x)
class MeanLoss(keras.losses.Loss):
def call(self, y_true, y_pred):
return tf.reduce_mean(y_pred, axis=-1)
model = keras.Model(inputs=inputs, outputs=x)
model.compile(loss=MeanLoss())
return model
|
keras-cv/keras_cv/callbacks/waymo_evaluation_callback_test.py/0
|
{
"file_path": "keras-cv/keras_cv/callbacks/waymo_evaluation_callback_test.py",
"repo_id": "keras-cv",
"token_count": 1757
}
| 68 |
/* Copyright 2022 The KerasCV Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#define EIGEN_USE_THREADS
#include "keras_cv/custom_ops/box_util.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/lib/core/errors.h"
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
namespace kerascv {
class WithinBoxOp : public OpKernel {
public:
explicit WithinBoxOp(OpKernelConstruction* ctx) : OpKernel(ctx) {}
void Compute(OpKernelContext* ctx) override {
const Tensor& points = ctx->input(0);
const Tensor& boxes = ctx->input(1);
const int num_points = points.dim_size(0);
const int num_boxes = boxes.dim_size(0);
Tensor* box_indices = nullptr;
OP_REQUIRES_OK(
ctx, ctx->allocate_output("box_indices", TensorShape({num_points}),
&box_indices));
auto boxes_indices_t = box_indices->flat<int>();
for (auto i = 0; i < num_points; ++i) boxes_indices_t(i) = -1;
std::vector<box::Upright3DBox> boxes_vec = box::ParseBoxesFromTensor(boxes);
std::vector<box::Vertex> points_vec = box::ParseVerticesFromTensor(points);
std::vector<int> p_indices_x(num_points);
// index x range [0, num_points)
std::iota(p_indices_x.begin(), p_indices_x.end(), 0);
// index y range [0, num_points)
std::vector<int> p_indices_y(p_indices_x);
// sort, return sorted value and indices
std::sort(p_indices_x.begin(), p_indices_x.end(),
[&points_vec](const int& a, const int& b) -> bool {
return points_vec[a].x < points_vec[b].x;
});
std::sort(p_indices_y.begin(), p_indices_y.end(),
[&points_vec](const int& a, const int& b) -> bool {
return points_vec[a].y < points_vec[b].y;
});
std::vector<double> sorted_points_x;
sorted_points_x.reserve(num_points);
std::vector<double> sorted_points_y;
sorted_points_y.reserve(num_points);
for (int i = 0; i < num_points; ++i) {
sorted_points_x.emplace_back(points_vec[p_indices_x[i]].x);
sorted_points_y.emplace_back(points_vec[p_indices_y[i]].y);
}
// for each box, find all point indices whose x values are within box
// boundaries when the box is rotated, the box boundary is the minimum and
// maximum x for all vertices
std::vector<int> points_x_min =
box::GetMinXIndexFromBoxes(boxes_vec, sorted_points_x);
std::vector<int> points_x_max =
box::GetMaxXIndexFromBoxes(boxes_vec, sorted_points_x);
std::vector<std::unordered_set<int>> points_x_indices(num_boxes);
auto set_fn_x = [&points_x_min, &points_x_max, &p_indices_x,
&points_x_indices](int64_t begin, int64_t end) {
for (int64_t idx = begin; idx < end; ++idx) {
std::unordered_set<int> p_set;
int p_start = points_x_min[idx];
int p_end = points_x_max[idx];
for (auto p_idx = p_start; p_idx <= p_end; ++p_idx) {
p_set.insert(p_indices_x[p_idx]);
}
points_x_indices[idx] = p_set;
}
};
const CPUDevice& device = ctx->eigen_device<CPUDevice>();
const Eigen::TensorOpCost cost(num_points, num_boxes, 3);
device.parallelFor(num_boxes, cost, set_fn_x);
// for each box, find all point indices whose y values are within box
// boundaries when the box is rotated, the box boundary is the minimum and
// maximum x for all vertices
std::vector<int> points_y_min =
box::GetMinYIndexFromBoxes(boxes_vec, sorted_points_y);
std::vector<int> points_y_max =
box::GetMaxYIndexFromBoxes(boxes_vec, sorted_points_y);
std::vector<std::unordered_set<int>> points_y_indices(num_boxes);
auto set_fn_y = [&points_y_min, &points_y_max, &p_indices_y,
&points_y_indices](int64_t begin, int64_t end) {
for (int64_t idx = begin; idx < end; ++idx) {
std::unordered_set<int> p_set;
int p_start = points_y_min[idx];
int p_end = points_y_max[idx];
for (auto p_idx = p_start; p_idx <= p_end; ++p_idx) {
p_set.insert(p_indices_y[p_idx]);
}
points_y_indices[idx] = p_set;
}
};
device.parallelFor(num_boxes, cost, set_fn_y);
// for the intersection of x indices set and y indices set, check if
// those points are within the box
auto within_fn = [&points_x_indices, &points_y_indices, &boxes_vec,
&points_vec,
&boxes_indices_t](int64_t begin, int64_t end) {
for (int64_t idx = begin; idx < end; ++idx) {
std::unordered_set<int>& set_a = points_x_indices[idx];
std::unordered_set<int>& set_b = points_y_indices[idx];
std::unordered_set<int> p_set;
for (auto val : set_a) {
if (set_b.find(val) != set_b.end()) {
p_set.insert(val);
}
}
box::Upright3DBox& box = boxes_vec[idx];
for (auto p_idx : p_set) {
box::Vertex& point = points_vec[p_idx];
if (box.WithinBox3D(point)) {
boxes_indices_t(p_idx) = idx;
}
}
}
};
device.parallelFor(num_boxes, cost, within_fn);
}
};
REGISTER_KERNEL_BUILDER(Name("KcvWithinBox").Device(DEVICE_CPU), WithinBoxOp);
} // namespace kerascv
} // namespace tensorflow
|
keras-cv/keras_cv/custom_ops/kernels/withinbox_op.cc/0
|
{
"file_path": "keras-cv/keras_cv/custom_ops/kernels/withinbox_op.cc",
"repo_id": "keras-cv",
"token_count": 2641
}
| 69 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data loader for the Waymo Open Dataset."""
import os
import tensorflow as tf
from keras_cv.datasets.waymo import transformer
from keras_cv.utils import assert_waymo_open_dataset_installed
try:
import waymo_open_dataset
except ImportError:
waymo_open_dataset = None
from keras_cv.api_export import keras_cv_export
def _generate_frames(segments, transformer):
def _generator():
for record in segments:
frame = waymo_open_dataset.dataset_pb2.Frame()
frame.ParseFromString(record.numpy())
yield transformer(frame)
return _generator
@keras_cv_export(
"keras_cv.datasets.waymo.load", package="keras_cv.datasets.waymo"
)
def load(
tfrecord_path,
transformer=transformer.build_tensors_from_wod_frame,
output_signature=transformer.WOD_FRAME_OUTPUT_SIGNATURE,
):
"""
Loads the Waymo Open Dataset and transforms frames into features as
tensors.
References:
- [Waymo Dataset Research Paper](https://arxiv.org/abs/1912.04838)
- [Waymo Dataset Website](https://waymo.com/open/)
Args:
tfrecord_path: a string pointing to the directory containing the raw
tfrecords in the Waymo Open Dataset, or a list of strings pointing
to the tfrecords themselves
transformer: a Python function which transforms a Waymo Open Dataset
Frame object into tensors, defaults to convert range image to point
cloud.
output_signature: the type specification of the tensors created by the
transformer. This is often a dictionary from feature column names to
tf.TypeSpecs, defaults to point cloud representations of Waymo Open
Dataset data.
Returns:
tf.data.Dataset containing the features extracted from Frames using the
provided transformer.
Example:
```python
from keras_cv.datasets.waymo import load
def simple_transformer(frame):
return {"timestamp_micros": frame.timestamp_micros}
output_signature = {"timestamp_micros": tf.TensorSpec((), tf.int64)}
load("/path/to/tfrecords", simple_transformer, output_signature)
```
"""
assert_waymo_open_dataset_installed("keras_cv.datasets.waymo.load()")
if isinstance(tfrecord_path, list):
filenames = tfrecord_path
else:
filenames = tf.data.TFRecordDataset.list_files(
os.path.join(tfrecord_path, "*.tfrecord")
)
segments = tf.data.TFRecordDataset(filenames)
return tf.data.Dataset.from_generator(
_generate_frames(segments, transformer),
output_signature=output_signature,
)
|
keras-cv/keras_cv/datasets/waymo/load.py/0
|
{
"file_path": "keras-cv/keras_cv/datasets/waymo/load.py",
"repo_id": "keras-cv",
"token_count": 1190
}
| 70 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from tensorflow import keras
from keras_cv.layers import FeaturePyramid
from keras_cv.tests.test_case import TestCase
class FeaturePyramidTest(TestCase):
def test_return_type_dict(self):
layer = FeaturePyramid(min_level=2, max_level=5)
c2 = np.ones([2, 64, 64, 3])
c3 = np.ones([2, 32, 32, 3])
c4 = np.ones([2, 16, 16, 3])
c5 = np.ones([2, 8, 8, 3])
inputs = {2: c2, 3: c3, 4: c4, 5: c5}
output = layer(inputs)
self.assertTrue(isinstance(output, dict))
self.assertEquals(sorted(output.keys()), [2, 3, 4, 5])
def test_result_shapes(self):
layer = FeaturePyramid(min_level=2, max_level=5)
c2 = np.ones([2, 64, 64, 3])
c3 = np.ones([2, 32, 32, 3])
c4 = np.ones([2, 16, 16, 3])
c5 = np.ones([2, 8, 8, 3])
inputs = {2: c2, 3: c3, 4: c4, 5: c5}
output = layer(inputs)
for level in inputs.keys():
self.assertEquals(output[level].shape[1], inputs[level].shape[1])
self.assertEquals(output[level].shape[2], inputs[level].shape[2])
self.assertEquals(output[level].shape[3], layer.num_channels)
# Test with different resolution and channel size
c2 = np.ones([2, 64, 128, 4])
c3 = np.ones([2, 32, 64, 8])
c4 = np.ones([2, 16, 32, 16])
c5 = np.ones([2, 8, 16, 32])
inputs = {2: c2, 3: c3, 4: c4, 5: c5}
layer = FeaturePyramid(min_level=2, max_level=5)
output = layer(inputs)
for level in inputs.keys():
self.assertEquals(output[level].shape[1], inputs[level].shape[1])
self.assertEquals(output[level].shape[2], inputs[level].shape[2])
self.assertEquals(output[level].shape[3], layer.num_channels)
def test_with_keras_input_tensor(self):
# This mimic the model building with Backbone network
layer = FeaturePyramid(min_level=2, max_level=5)
c2 = keras.layers.Input([64, 64, 3])
c3 = keras.layers.Input([32, 32, 3])
c4 = keras.layers.Input([16, 16, 3])
c5 = keras.layers.Input([8, 8, 3])
inputs = {2: c2, 3: c3, 4: c4, 5: c5}
output = layer(inputs)
for level in inputs.keys():
self.assertEquals(output[level].shape[1], inputs[level].shape[1])
self.assertEquals(output[level].shape[2], inputs[level].shape[2])
self.assertEquals(output[level].shape[3], layer.num_channels)
def test_invalid_lateral_layers(self):
lateral_layers = [keras.layers.Conv2D(256, 1)] * 3
with self.assertRaisesRegexp(
ValueError, "Expect lateral_layers to be a dict"
):
_ = FeaturePyramid(
min_level=2, max_level=5, lateral_layers=lateral_layers
)
lateral_layers = {
2: keras.layers.Conv2D(256, 1),
3: keras.layers.Conv2D(256, 1),
4: keras.layers.Conv2D(256, 1),
}
with self.assertRaisesRegexp(
ValueError, "with keys as .* [2, 3, 4, 5]"
):
_ = FeaturePyramid(
min_level=2, max_level=5, lateral_layers=lateral_layers
)
def test_invalid_output_layers(self):
output_layers = [keras.layers.Conv2D(256, 3)] * 3
with self.assertRaisesRegexp(
ValueError, "Expect output_layers to be a dict"
):
_ = FeaturePyramid(
min_level=2, max_level=5, output_layers=output_layers
)
output_layers = {
2: keras.layers.Conv2D(256, 3),
3: keras.layers.Conv2D(256, 3),
4: keras.layers.Conv2D(256, 3),
}
with self.assertRaisesRegexp(
ValueError, "with keys as .* [2, 3, 4, 5]"
):
_ = FeaturePyramid(
min_level=2, max_level=5, output_layers=output_layers
)
def test_invalid_input_features(self):
layer = FeaturePyramid(min_level=2, max_level=5)
c2 = np.ones([2, 64, 64, 3])
c3 = np.ones([2, 32, 32, 3])
c4 = np.ones([2, 16, 16, 3])
c5 = np.ones([2, 8, 8, 3])
inputs = {2: c2, 3: c3, 4: c4, 5: c5}
# Build required for Keas 3
_ = layer(inputs)
list_input = [c2, c3, c4, c5]
with self.assertRaisesRegexp(
ValueError, "expects input features to be a dict"
):
layer(list_input)
dict_input_with_missing_feature = {2: c2, 3: c3, 4: c4}
with self.assertRaisesRegexp(
ValueError, "Expect feature keys.*[2, 3, 4, 5]"
):
layer(dict_input_with_missing_feature)
|
keras-cv/keras_cv/layers/feature_pyramid_test.py/0
|
{
"file_path": "keras-cv/keras_cv/layers/feature_pyramid_test.py",
"repo_id": "keras-cv",
"token_count": 2569
}
| 71 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Mapping
from typing import Optional
from typing import Tuple
from typing import Union
import tensorflow as tf
from tensorflow import keras
from keras_cv import bounding_box
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import assert_tf_keras
@keras_cv_export("keras_cv.layers.ROIGenerator")
class ROIGenerator(keras.layers.Layer):
"""
Generates region of interests (ROI, or proposal) from scores.
Mainly used in Region CNN (RCNN) networks.
This works for a multi-level input, both boxes and scores are dictionary
inputs with the same set of keys.
Users can configure top k and threshold differently in train and inference.
Users can choose to combine all levels if NMS across all levels are desired.
The following steps are applied to pair of (boxes, scores):
1) pre_nms_topk scores and boxes sorted and selected per level
2) nms applied and selected post_nms_topk scores and ROIs per level
3) combined scores and ROIs across all levels
4) post_nms_topk scores and ROIs sorted and selected
Args:
bounding_box_format: a case-insensitive string.
For detailed information on the supported format, see the
[KerasCV bounding box documentation](https://keras.io/api/keras_cv/bounding_box/formats/).
pre_nms_topk_train: int. number of top k scoring proposals to keep
before applying NMS in training mode. When RPN is run on multiple
feature maps / levels (as in FPN) this number is per
feature map / level.
nms_score_threshold_train: float. score threshold to use for NMS in
training mode.
nms_iou_threshold_train: float. IOU threshold to use for NMS in training
mode.
post_nms_topk_train: int. number of top k scoring proposals to keep
after applying NMS in training mode. When RPN is run on multiple
feature maps / levels (as in FPN) this number is per
feature map / level.
pre_nms_topk_test: int. number of top k scoring proposals to keep before
applying NMS in inference mode. When RPN is run on multiple
feature maps / levels (as in FPN) this number is per
feature map / level.
nms_score_threshold_test: float. score threshold to use for NMS in
inference mode.
nms_iou_threshold_test: float. IOU threshold to use for NMS in inference
mode.
post_nms_topk_test: int. number of top k scoring proposals to keep after
applying NMS in inference mode. When RPN is run on multiple
feature maps / levels (as in FPN) this number is per
feature map / level.
Usage:
```python
roi_generator = ROIGenerator("xyxy")
boxes = {2: tf.random.normal([32, 5, 4])}
scores = {2: tf.random.normal([32, 5])}
rois, roi_scores = roi_generator(boxes, scores, training=True)
```
""" # noqa: E501
def __init__(
self,
bounding_box_format,
pre_nms_topk_train: int = 2000,
nms_score_threshold_train: float = 0.0,
nms_iou_threshold_train: float = 0.7,
post_nms_topk_train: int = 1000,
pre_nms_topk_test: int = 1000,
nms_score_threshold_test: float = 0.0,
nms_iou_threshold_test: float = 0.7,
post_nms_topk_test: int = 1000,
**kwargs,
):
assert_tf_keras("keras_cv.layers.ROIGenerator")
super().__init__(**kwargs)
self.bounding_box_format = bounding_box_format
self.pre_nms_topk_train = pre_nms_topk_train
self.nms_score_threshold_train = nms_score_threshold_train
self.nms_iou_threshold_train = nms_iou_threshold_train
self.post_nms_topk_train = post_nms_topk_train
self.pre_nms_topk_test = pre_nms_topk_test
self.nms_score_threshold_test = nms_score_threshold_test
self.nms_iou_threshold_test = nms_iou_threshold_test
self.post_nms_topk_test = post_nms_topk_test
self.built = True
def call(
self,
multi_level_boxes: Union[tf.Tensor, Mapping[int, tf.Tensor]],
multi_level_scores: Union[tf.Tensor, Mapping[int, tf.Tensor]],
training: Optional[bool] = None,
) -> Tuple[tf.Tensor, tf.Tensor]:
"""
Args:
multi_level_boxes: float Tensor. A dictionary or single Tensor of
boxes, one per level. Shape is [batch_size, num_boxes, 4] each
level, in `bounding_box_format`. The boxes from RPNs are usually
encoded as deltas w.r.t to anchors, they need to be decoded before
passing in here.
multi_level_scores: float Tensor. A dictionary or single Tensor of
scores, typically confidence scores, one per level. Shape is
[batch_size, num_boxes] each level.
Returns:
rois: float Tensor of [batch_size, post_nms_topk, 4]
roi_scores: float Tensor of [batch_size, post_nms_topk]
"""
if training:
pre_nms_topk = self.pre_nms_topk_train
post_nms_topk = self.post_nms_topk_train
nms_score_threshold = self.nms_score_threshold_train
nms_iou_threshold = self.nms_iou_threshold_train
else:
pre_nms_topk = self.pre_nms_topk_test
post_nms_topk = self.post_nms_topk_test
nms_score_threshold = self.nms_score_threshold_test
nms_iou_threshold = self.nms_iou_threshold_test
def per_level_gen(boxes, scores):
scores_shape = scores.get_shape().as_list()
# scores can also be [batch_size, num_boxes, 1]
if len(scores_shape) == 3:
scores = tf.squeeze(scores, axis=-1)
_, num_boxes = scores.get_shape().as_list()
level_pre_nms_topk = min(num_boxes, pre_nms_topk)
level_post_nms_topk = min(num_boxes, post_nms_topk)
scores, sorted_indices = tf.nn.top_k(
scores, k=level_pre_nms_topk, sorted=True
)
boxes = tf.gather(boxes, sorted_indices, batch_dims=1)
# convert from input format to yxyx for the TF NMS operation
boxes = bounding_box.convert_format(
boxes,
source=self.bounding_box_format,
target="yxyx",
)
# TODO(tanzhenyu): consider supporting soft / batched nms for accl
selected_indices, num_valid = tf.image.non_max_suppression_padded(
boxes,
scores,
max_output_size=level_post_nms_topk,
iou_threshold=nms_iou_threshold,
score_threshold=nms_score_threshold,
pad_to_max_output_size=True,
sorted_input=True,
canonicalized_coordinates=True,
)
# convert back to input format
boxes = bounding_box.convert_format(
boxes,
source="yxyx",
target=self.bounding_box_format,
)
level_rois = tf.gather(boxes, selected_indices, batch_dims=1)
level_roi_scores = tf.gather(scores, selected_indices, batch_dims=1)
level_rois = level_rois * tf.cast(
tf.reshape(tf.range(level_post_nms_topk), [1, -1, 1])
< tf.reshape(num_valid, [-1, 1, 1]),
level_rois.dtype,
)
level_roi_scores = level_roi_scores * tf.cast(
tf.reshape(tf.range(level_post_nms_topk), [1, -1])
< tf.reshape(num_valid, [-1, 1]),
level_roi_scores.dtype,
)
return level_rois, level_roi_scores
if not isinstance(multi_level_boxes, dict):
return per_level_gen(multi_level_boxes, multi_level_scores)
rois = []
roi_scores = []
for level in sorted(multi_level_scores.keys()):
boxes = multi_level_boxes[level]
scores = multi_level_scores[level]
level_rois, level_roi_scores = per_level_gen(boxes, scores)
rois.append(level_rois)
roi_scores.append(level_roi_scores)
rois = tf.concat(rois, axis=1)
roi_scores = tf.concat(roi_scores, axis=1)
_, num_valid_rois = roi_scores.get_shape().as_list()
overall_top_k = min(num_valid_rois, post_nms_topk)
roi_scores, sorted_indices = tf.nn.top_k(
roi_scores, k=overall_top_k, sorted=True
)
rois = tf.gather(rois, sorted_indices, batch_dims=1)
return rois, roi_scores
def get_config(self):
config = {
"bounding_box_format": self.bounding_box_format,
"pre_nms_topk_train": self.pre_nms_topk_train,
"nms_score_threshold_train": self.nms_score_threshold_train,
"nms_iou_threshold_train": self.nms_iou_threshold_train,
"post_nms_topk_train": self.post_nms_topk_train,
"pre_nms_topk_test": self.pre_nms_topk_test,
"nms_score_threshold_test": self.nms_score_threshold_test,
"nms_iou_threshold_test": self.nms_iou_threshold_test,
"post_nms_topk_test": self.post_nms_topk_test,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
|
keras-cv/keras_cv/layers/object_detection/roi_generator.py/0
|
{
"file_path": "keras-cv/keras_cv/layers/object_detection/roi_generator.py",
"repo_id": "keras-cv",
"token_count": 4625
}
| 72 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.layers.object_detection_3d import voxel_utils
EPSILON = 1e-4
VOXEL_FEATURE_MIN = -1000
def compute_point_voxel_id(point_voxel_xyz, voxel_spatial_size):
"""Computes point voxel IDs.
Args:
point_voxel_xyz: [B, N, dim] voxel coordinates for each point
voxel_spatial_size: voxel spatial size
Returns:
point_voxel_id: [B, N] unique ID of each voxel.
"""
batch_size, _, dim = list(point_voxel_xyz.shape)
if batch_size is None:
batch_size = ops.shape(point_voxel_xyz)[0]
assert dim == len(voxel_spatial_size), f"{point_voxel_xyz.shape}"
voxel_spatial_size_prod = [
np.prod(voxel_spatial_size[i:]).item() for i in range(dim)
]
voxel_spatial_size_prod_shift = voxel_spatial_size_prod[1:] + [1]
point_voxel_xyz_multiplied = point_voxel_xyz * ops.array(
voxel_spatial_size_prod_shift, dtype=point_voxel_xyz.dtype
)
# [B, N]
point_voxel_id = ops.sum(point_voxel_xyz_multiplied, axis=-1)
if batch_size == 1:
return point_voxel_id
batch_multiplier = (
ops.arange(batch_size, dtype="int32") * voxel_spatial_size_prod[0]
)
batch_multiplier = ops.cast(
ops.expand_dims(batch_multiplier, axis=-1), point_voxel_id.dtype
)
return point_voxel_id + batch_multiplier
class PointToVoxel(keras.layers.Layer):
"""Voxelization layer."""
def __init__(self, voxel_size, spatial_size, **kwargs):
"""Voxelization layer constructor.
Args:
voxel_size: voxel size in each xyz dimension.
spatial_size: max/min range in each dim in global coordinate frame.
name: layer name
**kwargs: additional key value args (e.g. dtype) passed to the parent
class.
"""
super().__init__(**kwargs)
dim = len(voxel_size)
assert len(spatial_size) == 2 * dim, f"{spatial_size}"
self._voxel_size = voxel_size
self._spatial_size = spatial_size
self._voxel_spatial_size = voxel_utils.compute_voxel_spatial_size(
spatial_size, self._voxel_size
)
# TODO(tanzhenyu): consider using keras masking.
def call(self, point_xyz, point_mask):
"""Dynamically voxelizes points.
B: batch_size.
N: number of points.
dim: the input dimension.
Args:
point_xyz: [B, N, dim] point xyz in global coordinate relative to sdc.
point_mask: [B, N] valid point mask.
Returns:
point_voxel_feature: [B, N, dim] voxel feature (delta_{x,y,z}).
point_voxel_id: [B, N] voxel ID of each point. Invalid voxels have
Id's set to 0.
point_voxel_mask: [B, N] validpoint voxel boolean mask.
"""
# [B, N, dim]
# convert from point coordinate to voxel index
point_voxel_xyz_float = ops.floor(
point_xyz / ops.array(self._voxel_size, point_xyz.dtype) + 0.5
)
# [B, N, dim]
# delta to the nearest voxel
point_voxel_feature = ops.cast(
point_xyz
- (
point_voxel_xyz_float
* ops.array(self._voxel_size, dtype=point_voxel_xyz_float.dtype)
),
point_xyz.dtype,
)
# [B, N, dim]
point_voxel_xyz_int = ops.cast(point_voxel_xyz_float, "int32")
# [dim]
# get xmin, ymin, zmin
voxel_origin = voxel_utils.compute_voxel_origin(
self._spatial_size, self._voxel_size
)
# [B, N, dim]
# convert point voxel to positive voxel index
point_voxel_xyz = point_voxel_xyz_int - ops.cast(
ops.expand_dims(ops.expand_dims(voxel_origin, axis=0), axis=0),
point_voxel_xyz_int.dtype,
)
# [B, N]
# remove points outside the voxel boundary
point_voxel_mask = ops.logical_and(
point_voxel_xyz >= 0,
point_voxel_xyz
< ops.array(self._voxel_spatial_size, dtype=point_voxel_xyz.dtype),
)
point_voxel_mask = ops.all(point_voxel_mask, axis=-1)
point_voxel_mask = ops.logical_and(point_voxel_mask, point_mask)
# [B, N]
point_voxel_mask_int = ops.cast(point_voxel_mask, dtype="int32")
# [B, N] for voxel_id, int constant for num_voxels, in the range of
# [0, B * num_voxels]
point_voxel_id = ops.cast(
compute_point_voxel_id(point_voxel_xyz, self._voxel_spatial_size),
point_voxel_mask_int.dtype,
)
# [B, N]
point_voxel_id = point_voxel_id * point_voxel_mask_int
return point_voxel_feature, point_voxel_id, point_voxel_mask
@keras_cv_export("keras_cv.layers.DynamicVoxelization")
class DynamicVoxelization(keras.layers.Layer):
"""Dynamic voxelization and pool layer.
This layer assigns and pools points into voxels,
then it concatenates with point features and feed into a neural network,
and max pools all point features inside each voxel.
Args:
voxel_size: the x, y, z dimension of each voxel.
spatial_size: the x, y, z boundary of voxels
Returns:
voxelized feature, a float Tensor.
"""
def __init__(self, voxel_size, spatial_size, **kwargs):
super().__init__(**kwargs)
self._voxelization_layer = PointToVoxel(
voxel_size=voxel_size, spatial_size=spatial_size
)
self._voxel_size = voxel_size
self._spatial_size = spatial_size
self._voxel_spatial_size = voxel_utils.compute_voxel_spatial_size(
spatial_size, self._voxel_size
)
self._voxel_spatial_size_volume = np.prod(
self._voxel_spatial_size
).item()
self.point_net_dense = keras.layers.Dense(128)
self.point_net_norm = keras.layers.BatchNormalization()
self.point_net_activation = keras.layers.ReLU()
self.built = True
def call(self, point_xyz, point_feature, point_mask, training=True):
"""Voxelizes and learns voxel features with a point net.
B: batch_size.
N: number of points.
dim: the input dimension.
Args:
point_xyz: [B, N, 3] point xyz in global coordinate.
point_feature: [B, N, dim] point feature inputs.
point_mask: [B, N] valid point mask.
training: whether it is in training mode.
Returns:
voxel_feature: [B, x_max, y_max, {z_max,}, mlp_dimension] voxel
features. If z_max is 1, z-dim is squeezed.
"""
(
point_voxel_feature,
point_voxel_id,
point_voxel_mask,
) = self._voxelization_layer(point_xyz=point_xyz, point_mask=point_mask)
# TODO(tanzhenyu): move compute_point_voxel_id to here, so PointToVoxel
# layer is more generic.
point_feature = ops.concatenate(
[point_feature, point_voxel_feature], axis=-1
)
batch_size = list(point_feature.shape)[0] or ops.shape(point_feature)[0]
# [B, N, 1]
point_mask_float = ops.expand_dims(
ops.cast(point_voxel_mask, point_feature.dtype), axis=-1
)
# [B, N, dim]
point_feature = point_feature * point_mask_float
point_feature = self.point_net_dense(point_feature)
point_feature = self.point_net_norm(
point_feature, training=training, mask=point_mask
)
point_feature = self.point_net_activation(point_feature)
# [B, N, new_dim]
point_feature = point_feature * point_mask_float
new_dim = list(point_feature.shape)[-1]
point_feature = ops.reshape(point_feature, [-1, new_dim])
point_voxel_id = ops.cast(ops.reshape(point_voxel_id, [-1]), "int32")
# [B * num_voxels, new_dim]
voxel_feature = ops.segment_max(
point_feature,
point_voxel_id,
batch_size * self._voxel_spatial_size_volume,
)
# unsorted_segment_max sets empty values to -inf(float).
voxel_feature_valid_mask = voxel_feature > VOXEL_FEATURE_MIN
voxel_feature = voxel_feature * ops.cast(
voxel_feature_valid_mask, dtype=voxel_feature.dtype
)
out_shape = [batch_size] + self._voxel_spatial_size + [new_dim]
if out_shape[-2] == 1:
out_shape = out_shape[:-2] + [out_shape[-1]]
voxel_feature = ops.reshape(voxel_feature, out_shape)
return voxel_feature
def compute_output_shape(self, input_shape):
return tuple([input_shape[0]] + self._voxel_spatial_size[:-1] + [128])
|
keras-cv/keras_cv/layers/object_detection_3d/voxelization.py/0
|
{
"file_path": "keras-cv/keras_cv/layers/object_detection_3d/voxelization.py",
"repo_id": "keras-cv",
"token_count": 4461
}
| 73 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
from absl.testing import parameterized
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.layers.preprocessing.equalization import Equalization
from keras_cv.tests.test_case import TestCase
class EqualizationTest(TestCase):
def test_return_shapes(self):
xs = 255 * np.ones((2, 512, 512, 3), dtype=np.int32)
layer = Equalization(value_range=(0, 255))
xs = layer(xs)
self.assertEqual(xs.shape, (2, 512, 512, 3))
self.assertAllEqual(xs, 255 * np.ones((2, 512, 512, 3)))
@pytest.mark.tf_keras_only
def test_return_shapes_inside_model(self):
layer = Equalization(value_range=(0, 255))
inp = keras.layers.Input(shape=[512, 512, 5])
out = layer(inp)
model = keras.models.Model(inp, out)
self.assertEqual(model.output_shape, (None, 512, 512, 5))
def test_equalizes_to_all_bins(self):
xs = np.random.uniform(size=(2, 512, 512, 3), low=0, high=255).astype(
np.float32
)
layer = Equalization(value_range=(0, 255))
xs = layer(xs)
for i in range(0, 256):
self.assertTrue(np.any(ops.convert_to_numpy(xs) == i))
@parameterized.named_parameters(
("float32", np.float32), ("int32", np.int32), ("int64", np.int64)
)
def test_input_dtypes(self, dtype):
xs = np.random.uniform(size=(2, 512, 512, 3), low=0, high=255).astype(
dtype
)
layer = Equalization(value_range=(0, 255))
xs = ops.convert_to_numpy(layer(xs))
for i in range(0, 256):
self.assertTrue(np.any(xs == i))
self.assertAllInRange(xs, 0, 255)
@parameterized.named_parameters(("0_255", 0, 255), ("0_1", 0, 1))
def test_output_range(self, lower, upper):
xs = np.random.uniform(
size=(2, 512, 512, 3), low=lower, high=upper
).astype(np.float32)
layer = Equalization(value_range=(lower, upper))
xs = ops.convert_to_numpy(layer(xs))
self.assertAllInRange(xs, lower, upper)
|
keras-cv/keras_cv/layers/preprocessing/equalization_test.py/0
|
{
"file_path": "keras-cv/keras_cv/layers/preprocessing/equalization_test.py",
"repo_id": "keras-cv",
"token_count": 1121
}
| 74 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv import core
from keras_cv.api_export import keras_cv_export
from keras_cv.layers import preprocessing as cv_preprocessing
from keras_cv.layers.preprocessing.random_augmentation_pipeline import (
RandomAugmentationPipeline,
)
from keras_cv.utils import preprocessing as preprocessing_utils
@keras_cv_export("keras_cv.layers.RandAugment")
class RandAugment(RandomAugmentationPipeline):
"""RandAugment performs the Rand Augment operation on input images.
This layer can be thought of as an all-in-one image augmentation layer. The
policy implemented by this layer has been benchmarked extensively and is
effective on a wide variety of datasets.
The policy operates as follows:
For each augmentation in the range `[0, augmentations_per_image]`,
the policy selects a random operation from a list of operations.
It then samples a random number and if that number is less than
`rate` applies it to the given image.
References:
- [RandAugment](https://arxiv.org/abs/1909.13719)
Args:
value_range: the range of values the incoming images will have.
Represented as a two number tuple written [low, high].
This is typically either `[0, 1]` or `[0, 255]` depending
on how your preprocessing pipeline is set up.
augmentations_per_image: the number of layers to use in the rand augment
policy, defaults to `3`.
magnitude: magnitude is the mean of the normal distribution used to
sample the magnitude used for each data augmentation. Magnitude
should be a float in the range `[0, 1]`. A magnitude of `0`
indicates that the augmentations are as weak as possible (not
recommended), while a value of `1.0` implies use of the strongest
possible augmentation. All magnitudes are clipped to the range
`[0, 1]` after sampling. Defaults to `0.5`.
magnitude_stddev: the standard deviation to use when drawing values for
the perturbations. Keep in mind magnitude will still be clipped to
the range `[0, 1]` after samples are drawn from the normal
distribution. Defaults to `0.15`.
rate: the rate at which to apply each augmentation. This parameter is
applied on a per-distortion layer, per image. Should be in the range
`[0, 1]`. To reproduce the original RandAugment paper results, set
this to `10/11`. The original `RandAugment` paper includes an
Identity transform. By setting the rate to 10/11 in our
implementation, the behavior is identical to sampling an Identity
augmentation 10/11th of the time. Defaults to `1.0`.
geometric: whether to include geometric augmentations. This
should be set to False when performing object detection. Defaults to
True.
Usage:
```python
(x_test, y_test), _ = keras.datasets.cifar10.load_data()
rand_augment = keras_cv.layers.RandAugment(
value_range=(0, 255), augmentations_per_image=3, magnitude=0.5
)
x_test = rand_augment(x_test)
```
"""
def __init__(
self,
value_range,
augmentations_per_image=3,
magnitude=0.5,
magnitude_stddev=0.15,
rate=10 / 11,
geometric=True,
seed=None,
**kwargs,
):
# As an optimization RandAugment makes all internal layers use (0, 255)
# and we handle range transformation at the _augment level.
if magnitude < 0.0 or magnitude > 1:
raise ValueError(
"`magnitude` must be in the range [0, 1], got "
f"`magnitude={magnitude}`"
)
if magnitude_stddev < 0.0 or magnitude_stddev > 1:
raise ValueError(
"`magnitude_stddev` must be in the range [0, 1], got "
f"`magnitude_stddev={magnitude}`"
)
super().__init__(
layers=RandAugment.get_standard_policy(
(0, 255),
magnitude,
magnitude_stddev,
geometric=geometric,
seed=seed,
),
augmentations_per_image=augmentations_per_image,
rate=rate,
**kwargs,
seed=seed,
)
self.magnitude = float(magnitude)
self.value_range = value_range
self.seed = seed
self.geometric = geometric
self.magnitude_stddev = float(magnitude_stddev)
def _augment(self, sample):
sample["images"] = preprocessing_utils.transform_value_range(
sample["images"], self.value_range, (0, 255)
)
result = super()._augment(sample)
result["images"] = preprocessing_utils.transform_value_range(
result["images"], (0, 255), self.value_range
)
result["images"]
return result
@staticmethod
def get_standard_policy(
value_range, magnitude, magnitude_stddev, geometric=True, seed=None
):
policy = create_rand_augment_policy(magnitude, magnitude_stddev)
auto_contrast = cv_preprocessing.AutoContrast(
**policy["auto_contrast"], value_range=value_range, seed=seed
)
equalize = cv_preprocessing.Equalization(
**policy["equalize"], value_range=value_range, seed=seed
)
solarize = cv_preprocessing.Solarization(
**policy["solarize"], value_range=value_range, seed=seed
)
color = cv_preprocessing.RandomColorDegeneration(
**policy["color"], seed=seed
)
contrast = cv_preprocessing.RandomContrast(
**policy["contrast"], value_range=value_range, seed=seed
)
brightness = cv_preprocessing.RandomBrightness(
**policy["brightness"], value_range=value_range, seed=seed
)
layers = [
auto_contrast,
equalize,
solarize,
color,
contrast,
brightness,
]
if geometric:
shear_x = cv_preprocessing.RandomShear(
**policy["shear_x"], seed=seed
)
shear_y = cv_preprocessing.RandomShear(
**policy["shear_y"], seed=seed
)
translate_x = cv_preprocessing.RandomTranslation(
**policy["translate_x"], seed=seed
)
translate_y = cv_preprocessing.RandomTranslation(
**policy["translate_y"], seed=seed
)
layers += [shear_x, shear_y, translate_x, translate_y]
return layers
def get_config(self):
config = super().get_config()
config.update(
{
"value_range": self.value_range,
"augmentations_per_image": self.augmentations_per_image,
"magnitude": self.magnitude,
"magnitude_stddev": self.magnitude_stddev,
"rate": self.rate,
"geometric": self.geometric,
"seed": self.seed,
}
)
# layers is recreated in the constructor
del config["layers"]
return config
def auto_contrast_policy(magnitude, magnitude_stddev):
return {}
def equalize_policy(magnitude, magnitude_stddev):
return {}
def solarize_policy(magnitude, magnitude_stddev):
# We cap additions at 110, because if we add more than 110 we will be nearly
# nullifying the information contained in the image, making the model train
# on noise
maximum_addition_value = 110
addition_factor = core.NormalFactorSampler(
mean=magnitude * maximum_addition_value,
stddev=magnitude_stddev * maximum_addition_value,
min_value=0,
max_value=maximum_addition_value,
)
threshold_factor = core.NormalFactorSampler(
mean=(255 - (magnitude * 255)),
stddev=(magnitude_stddev * 255),
min_value=0,
max_value=255,
)
return {
"addition_factor": addition_factor,
"threshold_factor": threshold_factor,
}
def color_policy(magnitude, magnitude_stddev):
factor = core.NormalFactorSampler(
mean=magnitude,
stddev=magnitude_stddev,
min_value=0,
max_value=1,
)
return {"factor": factor}
def contrast_policy(magnitude, magnitude_stddev):
# TODO(lukewood): should we integrate RandomContrast with `factor`?
# RandomContrast layer errors when factor=0
factor = max(magnitude, 0.001)
return {"factor": factor}
def brightness_policy(magnitude, magnitude_stddev):
# TODO(lukewood): should we integrate RandomBrightness with `factor`?
return {"factor": magnitude}
def shear_x_policy(magnitude, magnitude_stddev):
factor = core.NormalFactorSampler(
mean=magnitude,
stddev=magnitude_stddev,
min_value=0,
max_value=1,
)
return {"x_factor": factor, "y_factor": 0}
def shear_y_policy(magnitude, magnitude_stddev):
factor = core.NormalFactorSampler(
mean=magnitude,
stddev=magnitude_stddev,
min_value=0,
max_value=1,
)
return {"x_factor": 0, "y_factor": factor}
def translate_x_policy(magnitude, magnitude_stddev):
# TODO(lukewood): should we integrate RandomTranslation with `factor`?
return {"width_factor": magnitude, "height_factor": 0}
def translate_y_policy(magnitude, magnitude_stddev):
# TODO(lukewood): should we integrate RandomTranslation with `factor`?
return {"width_factor": 0, "height_factor": magnitude}
POLICY_PAIRS = {
"auto_contrast": auto_contrast_policy,
"equalize": equalize_policy,
"solarize": solarize_policy,
"color": color_policy,
"contrast": contrast_policy,
"brightness": brightness_policy,
"shear_x": shear_x_policy,
"shear_y": shear_y_policy,
"translate_x": translate_x_policy,
"translate_y": translate_y_policy,
}
def create_rand_augment_policy(magnitude, magnitude_stddev):
result = {}
for name, policy_fn in POLICY_PAIRS.items():
result[name] = policy_fn(magnitude, magnitude_stddev)
return result
|
keras-cv/keras_cv/layers/preprocessing/rand_augment.py/0
|
{
"file_path": "keras-cv/keras_cv/layers/preprocessing/rand_augment.py",
"repo_id": "keras-cv",
"token_count": 4559
}
| 75 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.api_export import keras_cv_export
from keras_cv.layers import preprocessing
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
VectorizedBaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing as preprocessing_utils
@keras_cv_export("keras_cv.layers.RandomColorJitter")
class RandomColorJitter(VectorizedBaseImageAugmentationLayer):
"""RandomColorJitter class randomly apply brightness, contrast, saturation
and hue image processing operation sequentially and randomly on the
input. It expects input as RGB image. The expected image should be
`(0-255)` pixel ranges.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `channels_last` format
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `channels_last` format
Args:
value_range: the range of values the incoming images will have.
Represented as a two number tuple written [low, high].
This is typically either `[0, 1]` or `[0, 255]` depending
on how your preprocessing pipeline is set up.
brightness_factor: Float or a list/tuple of 2 floats between -1.0
and 1.0. The factor is used to determine the lower bound and
upper bound of the brightness adjustment. A float value will be
chosen randomly between the limits. When -1.0 is chosen, the
output image will be black, and when 1.0 is chosen, the image
will be fully white. When only one float is provided, eg, 0.2,
then -0.2 will be used for lower bound and 0.2 will be used for
upper bound.
contrast_factor: A positive float represented as fraction of value,
or a tuple of size 2 representing lower and upper bound. When
represented as a single float, lower = upper. The contrast factor
will be randomly picked between `[1.0 - lower, 1.0 + upper]`.
saturation_factor: Either a tuple of two floats or a single float.
`factor` controls the extent to which the image saturation is
impacted. `factor=0.5` makes this layer perform a no-op operation.
`factor=0.0` makes the image to be fully grayscale. `factor=1.0`
makes the image to be fully saturated.
hue_factor: A tuple of two floats, a single float or
`keras_cv.FactorSampler`. `factor` controls the extent to which the
image sharpness is impacted. `factor=0.0` makes this layer perform
a no-op operation, while a value of 1.0 performs the most aggressive
contrast adjustment available. If a tuple is used, a `factor` is
sampled between the two values for every image augmented. If a
single float is used, a value between `0.0` and the passed float is
sampled. In order to ensure the value is always the same, please
pass a tuple with two identical floats: `(0.5, 0.5)`.
seed: Integer. Used to create a random seed.
Usage:
```python
(images, labels), _ = keras.datasets.cifar10.load_data()
color_jitter = keras_cv.layers.RandomColorJitter(
value_range=(0, 255),
brightness_factor=(-0.2, 0.5),
contrast_factor=(0.5, 0.9),
saturation_factor=(0.5, 0.9),
hue_factor=(0.5, 0.9),
)
augmented_images = color_jitter(images)
```
"""
def __init__(
self,
value_range,
brightness_factor,
contrast_factor,
saturation_factor,
hue_factor,
seed=None,
**kwargs,
):
super().__init__(**kwargs)
self.value_range = value_range
self.brightness_factor = brightness_factor
self.contrast_factor = contrast_factor
self.saturation_factor = saturation_factor
self.hue_factor = hue_factor
self.seed = seed
self.random_brightness = preprocessing.RandomBrightness(
factor=self.brightness_factor, value_range=(0, 255), seed=self.seed
)
self.random_contrast = preprocessing.RandomContrast(
factor=self.contrast_factor, value_range=(0, 255), seed=self.seed
)
self.random_saturation = preprocessing.RandomSaturation(
factor=self.saturation_factor, seed=self.seed
)
self.random_hue = preprocessing.RandomHue(
factor=self.hue_factor, value_range=(0, 255), seed=self.seed
)
def augment_ragged_image(self, image, transformation, **kwargs):
return self.augment_images(
images=image, transformations=transformation, **kwargs
)
def augment_images(self, images, transformations=None, **kwargs):
images = preprocessing_utils.transform_value_range(
images,
original_range=self.value_range,
target_range=(0, 255),
dtype=self.compute_dtype,
)
images = self.random_brightness(images)
images = self.random_contrast(images)
images = self.random_saturation(images)
images = self.random_hue(images)
images = preprocessing_utils.transform_value_range(
images,
original_range=(0, 255),
target_range=self.value_range,
dtype=self.compute_dtype,
)
return images
def augment_labels(self, labels, transformations, **kwargs):
return labels
def augment_segmentation_masks(
self, segmentation_masks, transformations, **kwargs
):
return segmentation_masks
def augment_bounding_boxes(self, bounding_boxes, transformations, **kwargs):
return bounding_boxes
def get_config(self):
config = super().get_config()
config.update(
{
"value_range": self.value_range,
"brightness_factor": self.brightness_factor,
"contrast_factor": self.contrast_factor,
"saturation_factor": self.saturation_factor,
"hue_factor": self.hue_factor,
"seed": self.seed,
}
)
return config
@classmethod
def from_config(cls, config):
return cls(**config)
|
keras-cv/keras_cv/layers/preprocessing/random_color_jitter.py/0
|
{
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_color_jitter.py",
"repo_id": "keras-cv",
"token_count": 2786
}
| 76 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing
@keras_cv_export("keras_cv.layers.RandomJpegQuality")
class RandomJpegQuality(BaseImageAugmentationLayer):
"""Applies Random Jpeg compression artifacts to an image.
Performs the jpeg compression algorithm on the image. This layer can be used
in order to ensure your model is robust to artifacts introduced by JPEG
compression.
Args:
factor: 2 element tuple or 2 element list. During augmentation, a random
number is drawn from the factor distribution. This value is passed to
`tf.image.adjust_jpeg_quality()`.
seed: Integer. Used to create a random seed.
Usage:
```python
layer = keras_cv.RandomJpegQuality(factor=(75, 100)))
(images, labels), _ = keras.datasets.cifar10.load_data()
augmented_images = layer(images)
```
"""
def __init__(self, factor, seed=None, **kwargs):
super().__init__(**kwargs)
if isinstance(factor, (float, int)):
raise ValueError(
"RandomJpegQuality() expects factor to be a 2 element "
"tuple, list or a `keras_cv.FactorSampler`. "
"RandomJpegQuality() received `factor={factor}`."
)
self.seed = seed
self.factor = preprocessing.parse_factor(
factor,
min_value=0,
max_value=100,
param_name="factor",
seed=self.seed,
)
def get_random_transformation(self, **kwargs):
return self.factor(dtype=tf.int32)
def augment_image(self, image, transformation=None, **kwargs):
jpeg_quality = transformation
return tf.image.adjust_jpeg_quality(image, jpeg_quality)
def augment_bounding_boxes(self, bounding_boxes, **kwargs):
return bounding_boxes
def augment_label(self, label, transformation=None, **kwargs):
return label
def augment_segmentation_mask(
self, segmentation_mask, transformation, **kwargs
):
return segmentation_mask
def get_config(self):
config = super().get_config()
config.update({"factor": self.factor, "seed": self.seed})
return config
|
keras-cv/keras_cv/layers/preprocessing/random_jpeg_quality.py/0
|
{
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_jpeg_quality.py",
"repo_id": "keras-cv",
"token_count": 1087
}
| 77 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
# In order to support both unbatched and batched inputs, the horizontal
# and vertical axis is reverse indexed
H_AXIS = -3
W_AXIS = -2
@keras_cv_export("keras_cv.layers.Rescaling")
class Rescaling(BaseImageAugmentationLayer):
"""A preprocessing layer which rescales input values to a new range.
This layer rescales every value of an input (often an image) by multiplying
by `scale` and adding `offset`.
For instance:
1. To rescale an input in the ``[0, 255]`` range
to be in the `[0, 1]` range, you would pass `scale=1./255`.
2. To rescale an input in the ``[0, 255]`` range to be in the `[-1, 1]`
range, you would pass `scale=1./127.5, offset=-1`.
Inputs can be of integer or floating point dtype, and by default the layer
will output floats.
Input shape:
Arbitrary.
Output shape:
Same as input.
Args:
scale: Float, the scale to apply to the inputs.
offset: Float, the offset to apply to the inputs.
"""
def __init__(self, scale, offset=0.0, **kwargs):
super().__init__(**kwargs)
self.scale = scale
self.offset = offset
def augment_image(self, image, transformation, **kwargs):
dtype = self.compute_dtype
scale = tf.cast(self.scale, dtype)
offset = tf.cast(self.offset, dtype)
return tf.cast(image, dtype) * scale + offset
def augment_label(self, label, transformation, **kwargs):
return label
def augment_segmentation_mask(
self, segmentation_mask, transformation, **kwargs
):
return segmentation_mask
def augment_bounding_boxes(
self, bounding_boxes, transformation=None, **kwargs
):
return bounding_boxes
def get_config(self):
config = {
"scale": self.scale,
"offset": self.offset,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
|
keras-cv/keras_cv/layers/preprocessing/rescaling.py/0
|
{
"file_path": "keras-cv/keras_cv/layers/preprocessing/rescaling.py",
"repo_id": "keras-cv",
"token_count": 975
}
| 78 |
# Copyright 2022 Waymo LLC.
#
# Licensed under the terms in https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing_3d/waymo/LICENSE # noqa: E501
|
keras-cv/keras_cv/layers/preprocessing_3d/waymo/__init__.py/0
|
{
"file_path": "keras-cv/keras_cv/layers/preprocessing_3d/waymo/__init__.py",
"repo_id": "keras-cv",
"token_count": 65
}
| 79 |
# Copyright 2022 Waymo LLC.
#
# Licensed under the terms in https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing_3d/waymo/LICENSE # noqa: E501
import os
import numpy as np
import pytest
import tensorflow as tf
from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d
from keras_cv.layers.preprocessing_3d.waymo.group_points_by_bounding_boxes import ( # noqa: E501
GroupPointsByBoundingBoxes,
)
from keras_cv.tests.test_case import TestCase
POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS
BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES
OBJECT_POINT_CLOUDS = base_augmentation_layer_3d.OBJECT_POINT_CLOUDS
OBJECT_BOUNDING_BOXES = base_augmentation_layer_3d.OBJECT_BOUNDING_BOXES
class GroupPointsByBoundingBoxesTest(TestCase):
def test_augment_point_clouds_and_bounding_boxes(self):
add_layer = GroupPointsByBoundingBoxes(
label_index=1,
min_points_per_bounding_boxes=1,
max_points_per_bounding_boxes=2,
)
point_clouds = np.array(
[
[
[0, 1, 2, 3, 4],
[10, 1, 2, 3, 4],
[0, -1, 2, 3, 4],
[100, 100, 2, 3, 4],
]
]
* 2
).astype("float32")
bounding_boxes = np.array(
[
[
[0, 0, 0, 4, 4, 4, 0, 1],
[10, 1, 2, 2, 2, 2, 0, 1],
[20, 20, 20, 1, 1, 1, 0, 1],
]
]
* 2
).astype("float32")
inputs = {
POINT_CLOUDS: point_clouds,
BOUNDING_BOXES: bounding_boxes,
"dummy_item": np.random.uniform(size=(2, 2, 2)),
}
outputs = add_layer(inputs)
object_point_clouds = np.array(
[
[
[[0, 1, 2, 3, 4], [0, -1, 2, 3, 4]],
[[10, 1, 2, 3, 4], [0, 0, 0, 0, 0]],
]
]
* 2
).astype("float32")
object_bounding_boxes = np.array(
[[[0, 0, 0, 4, 4, 4, 0, 1], [10, 1, 2, 2, 2, 2, 0, 1]]] * 2
).astype("float32")
self.assertAllClose(inputs[POINT_CLOUDS], outputs[POINT_CLOUDS])
self.assertAllClose(inputs[BOUNDING_BOXES], outputs[BOUNDING_BOXES])
self.assertAllClose(inputs["dummy_item"], outputs["dummy_item"])
# Sort the point clouds due to the orders of points are different when
# using Tensorflow and Metal+Tensorflow (MAC).
outputs[OBJECT_POINT_CLOUDS] = tf.sort(
outputs[OBJECT_POINT_CLOUDS], axis=-2
)
object_point_clouds = tf.sort(object_point_clouds, axis=-2)
self.assertAllClose(outputs[OBJECT_POINT_CLOUDS], object_point_clouds)
self.assertAllClose(
outputs[OBJECT_BOUNDING_BOXES], object_bounding_boxes
)
def test_augment_batch_point_clouds_and_bounding_boxes(self):
add_layer = GroupPointsByBoundingBoxes(
label_index=1,
min_points_per_bounding_boxes=1,
max_points_per_bounding_boxes=2,
)
point_clouds = np.array(
[
[
[
[0, 1, 2, 3, 4],
[10, 1, 2, 3, 4],
[0, -1, 2, 3, 4],
[100, 100, 2, 3, 4],
]
]
* 2
]
* 3
).astype("float32")
bounding_boxes = np.array(
[
[
[
[0, 0, 0, 4, 4, 4, 0, 1],
[10, 1, 2, 2, 2, 2, 0, 1],
[20, 20, 20, 1, 1, 1, 0, 1],
]
]
* 2
]
* 3
).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
object_point_clouds = np.array(
[
[
[[0, 1, 2, 3, 4], [0, -1, 2, 3, 4]],
[[10, 1, 2, 3, 4], [0, 0, 0, 0, 0]],
]
* 3
]
* 2
).astype("float32")
object_bounding_boxes = np.array(
[[[0, 0, 0, 4, 4, 4, 0, 1], [10, 1, 2, 2, 2, 2, 0, 1]] * 3] * 2
).astype("float32")
self.assertAllClose(inputs[POINT_CLOUDS], outputs[POINT_CLOUDS])
self.assertAllClose(inputs[BOUNDING_BOXES], outputs[BOUNDING_BOXES])
# Sort the point clouds due to the orders of points are different when
# using Tensorflow and Metal+Tensorflow (MAC).
outputs[OBJECT_POINT_CLOUDS] = tf.sort(
outputs[OBJECT_POINT_CLOUDS], axis=-2
)
object_point_clouds = tf.sort(object_point_clouds, axis=-2)
self.assertAllClose(outputs[OBJECT_POINT_CLOUDS], object_point_clouds)
self.assertAllClose(
outputs[OBJECT_BOUNDING_BOXES], object_bounding_boxes
)
@pytest.mark.skipif(
"TEST_CUSTOM_OPS" not in os.environ
or os.environ["TEST_CUSTOM_OPS"] != "true",
reason="Requires binaries compiled from source",
)
def test_augment_point_clouds_and_bounding_boxes_v2(self):
add_layer = GroupPointsByBoundingBoxes(
label_index=1,
min_points_per_bounding_boxes=1,
max_points_per_bounding_boxes=2,
)
point_clouds = np.array(
[
[
[0, 1, 2, 3, 4],
[10, 1, 2, 3, 4],
[0, -1, 2, 3, 4],
[100, 100, 2, 3, 4],
]
]
* 2
).astype("float32")
bounding_boxes = np.array(
[
[
[0, 0, 0, 4, 4, 4, 0, 1],
[10, 1, 2, 2, 2, 2, 0, 1],
[20, 20, 20, 1, 1, 1, 0, 1],
]
]
* 2
).astype("float32")
point_clouds = tf.convert_to_tensor(point_clouds)
bounding_boxes = tf.convert_to_tensor(bounding_boxes)
outputs = add_layer.augment_point_clouds_bounding_boxes_v2(
point_clouds=point_clouds, bounding_boxes=bounding_boxes
)
object_point_clouds, object_bounding_boxes = outputs[0], outputs[1]
expected_object_point_clouds = np.array(
[
[
[[0, 1, 2, 3, 4], [0, -1, 2, 3, 4]],
[[10, 1, 2, 3, 4], [0, 0, 0, 0, 0]],
]
]
* 2
).astype("float32")
expected_object_bounding_boxes = np.array(
[[[0, 0, 0, 4, 4, 4, 0, 1], [10, 1, 2, 2, 2, 2, 0, 1]]] * 2
).astype("float32")
self.assertAllClose(
expected_object_point_clouds, object_point_clouds.to_tensor()
)
self.assertAllClose(
expected_object_bounding_boxes, object_bounding_boxes.to_tensor()
)
|
keras-cv/keras_cv/layers/preprocessing_3d/waymo/group_points_by_bounding_boxes_test.py/0
|
{
"file_path": "keras-cv/keras_cv/layers/preprocessing_3d/waymo/group_points_by_bounding_boxes_test.py",
"repo_id": "keras-cv",
"token_count": 4159
}
| 80 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
@keras_cv_export("keras_cv.layers.SegFormerMultiheadAttention")
class SegFormerMultiheadAttention(keras.layers.Layer):
def __init__(self, project_dim, num_heads, sr_ratio):
"""
Efficient MultiHeadAttention implementation as a Keras layer.
A huge bottleneck in scaling transformers is the self-attention layer
with an O(n^2) complexity.
SegFormerMultiheadAttention performs a sequence reduction (SR) operation
with a given ratio, to reduce the sequence length before performing key and value projections,
reducing the O(n^2) complexity to O(n^2/R) where R is the sequence reduction ratio.
References:
- [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) (CVPR 2021) # noqa: E501
- [NVlabs' official implementation](https://github.com/NVlabs/SegFormer/blob/master/mmseg/models/backbones/mix_transformer.py) # noqa: E501
- [@sithu31296's reimplementation](https://github.com/sithu31296/semantic-segmentation/blob/main/semseg/models/backbones/mit.py) # noqa: E501
- [Ported from the TensorFlow implementation from DeepVision](https://github.com/DavidLandup0/deepvision/blob/main/deepvision/layers/efficient_attention.py) # noqa: E501
Args:
project_dim: integer, the dimensionality of the projection
of the `SegFormerMultiheadAttention` layer.
num_heads: integer, the number of heads to use in the
attention computation.
sr_ratio: integer, the sequence reduction ratio to perform
on the sequence before key and value projections.
Basic usage:
```
tensor = tf.random.uniform([1, 196, 32])
output = keras_cv.layers.SegFormerMultiheadAttention(project_dim=768,
num_heads=2,
sr_ratio=4)(tensor)
print(output.shape) # (1, 196, 32)
```
"""
super().__init__()
self.num_heads = num_heads
self.sr_ratio = sr_ratio
self.scale = (project_dim // num_heads) ** -0.5
self.q = keras.layers.Dense(project_dim)
self.k = keras.layers.Dense(project_dim)
self.v = keras.layers.Dense(project_dim)
self.proj = keras.layers.Dense(project_dim)
if sr_ratio > 1:
self.sr = keras.layers.Conv2D(
filters=project_dim,
kernel_size=sr_ratio,
strides=sr_ratio,
padding="same",
)
self.norm = keras.layers.LayerNormalization()
def call(self, x):
input_shape = ops.shape(x)
H, W = int(math.sqrt(input_shape[1])), int(math.sqrt(input_shape[1]))
B, C = input_shape[0], input_shape[2]
q = self.q(x)
q = ops.reshape(
q,
(
input_shape[0],
input_shape[1],
self.num_heads,
input_shape[2] // self.num_heads,
),
)
q = ops.transpose(q, [0, 2, 1, 3])
if self.sr_ratio > 1:
x = ops.reshape(
ops.transpose(x, [0, 2, 1]),
(B, H, W, C),
)
x = self.sr(x)
x = ops.reshape(x, [input_shape[0], input_shape[2], -1])
x = ops.transpose(x, [0, 2, 1])
x = self.norm(x)
k = self.k(x)
v = self.v(x)
k = ops.transpose(
ops.reshape(
k,
[B, -1, self.num_heads, C // self.num_heads],
),
[0, 2, 1, 3],
)
v = ops.transpose(
ops.reshape(
v,
[B, -1, self.num_heads, C // self.num_heads],
),
[0, 2, 1, 3],
)
attn = (q @ ops.transpose(k, [0, 1, 3, 2])) * self.scale
attn = ops.nn.softmax(attn, axis=-1)
attn = attn @ v
attn = ops.reshape(
ops.transpose(attn, [0, 2, 1, 3]),
[input_shape[0], input_shape[1], input_shape[2]],
)
x = self.proj(attn)
return x
|
keras-cv/keras_cv/layers/segformer_multihead_attention.py/0
|
{
"file_path": "keras-cv/keras_cv/layers/segformer_multihead_attention.py",
"repo_id": "keras-cv",
"token_count": 2401
}
| 81 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.backend.config import keras_3
from keras_cv.losses import FocalLoss
from keras_cv.tests.test_case import TestCase
class FocalTest(TestCase):
def test_output_shape(self):
y_true = np.random.uniform(size=[2, 5], low=0, high=1)
y_pred = np.random.uniform(size=[2, 5], low=0, high=1)
focal_loss = FocalLoss(reduction="sum")
self.assertAllEqual(focal_loss(y_true, y_pred).shape, [])
def test_output_shape_reduction_none(self):
y_true = np.random.uniform(size=[2, 5], low=0, high=1)
y_pred = np.random.uniform(size=[2, 5], low=0, high=1)
focal_loss = FocalLoss(reduction="none")
self.assertAllEqual(
focal_loss(y_true, y_pred).shape,
[
2,
],
)
def test_output_shape_from_logits(self):
y_true = np.random.uniform(size=[2, 5], low=0, high=1)
y_pred = np.random.uniform(size=[2, 5], low=-10, high=10)
focal_loss = FocalLoss(reduction="none", from_logits=True)
self.assertAllEqual(
focal_loss(y_true, y_pred).shape,
[
2,
],
)
def test_from_logits_argument(self):
rng = np.random.default_rng(1337)
y_true = rng.uniform(size=(2, 8, 10)).astype("float64")
y_logits = rng.uniform(low=-1000, high=1000, size=(2, 8, 10)).astype(
"float64"
)
y_pred = ops.cast(ops.sigmoid(y_logits), "float32")
focal_loss_on_logits = FocalLoss(from_logits=True)
focal_loss = FocalLoss()
# TODO(ianstenbit): This probably warrants some more investigation.
# In the current implementation, I've verified that training RetinaNet
# works in all backends with this implementation.
# TF backend somehow has different numerics.
expected_loss = (
31.11176
if keras_3() and keras.backend.backend() != "tensorflow"
else 925.28081
)
self.assertAllClose(
focal_loss_on_logits(y_true, y_logits), expected_loss
)
self.assertAllClose(focal_loss(y_true, y_pred), 31.11176)
|
keras-cv/keras_cv/losses/focal_test.py/0
|
{
"file_path": "keras-cv/keras_cv/losses/focal_test.py",
"repo_id": "keras-cv",
"token_count": 1246
}
| 82 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from keras_cv.api_export import keras_cv_export
from keras_cv.models.backbones.densenet.densenet_backbone import (
DenseNetBackbone,
)
from keras_cv.models.backbones.densenet.densenet_backbone_presets import (
backbone_presets,
)
from keras_cv.utils.python_utils import classproperty
ALIAS_DOCSTRING = """DenseNetBackbone model with {num_layers} layers.
Reference:
- [Densely Connected Convolutional Networks (CVPR 2017)](https://arxiv.org/abs/1608.06993)
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](https://keras.io/guides/transfer_learning/).
Args:
include_rescaling: bool, whether to rescale the inputs. If set
to `True`, inputs will be passed through a `Rescaling(1/255.0)`
layer.
input_shape: optional shape tuple, defaults to (None, None, 3).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
Examples:
```python
input_data = tf.ones(shape=(8, 224, 224, 3))
# Randomly initialized backbone
model = DenseNet{num_layers}Backbone()
output = model(input_data)
```
""" # noqa: E501
@keras_cv_export("keras_cv.models.DenseNet121Backbone")
class DenseNet121Backbone(DenseNetBackbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return DenseNetBackbone.from_preset("densenet121", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {
"densenet121_imagenet": copy.deepcopy(
backbone_presets["densenet121_imagenet"]
),
}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include weights.""" # noqa: E501
return cls.presets
@keras_cv_export("keras_cv.models.DenseNet169Backbone")
class DenseNet169Backbone(DenseNetBackbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return DenseNetBackbone.from_preset("densenet169", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {
"densenet169_imagenet": copy.deepcopy(
backbone_presets["densenet169_imagenet"]
),
}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include weights.""" # noqa: E501
return cls.presets
@keras_cv_export("keras_cv.models.DenseNet201Backbone")
class DenseNet201Backbone(DenseNetBackbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return DenseNetBackbone.from_preset("densenet201", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {
"densenet201_imagenet": copy.deepcopy(
backbone_presets["densenet201_imagenet"]
),
}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include weights.""" # noqa: E501
return cls.presets
setattr(DenseNet121Backbone, "__doc__", ALIAS_DOCSTRING.format(num_layers=121))
setattr(DenseNet169Backbone, "__doc__", ALIAS_DOCSTRING.format(num_layers=169))
setattr(DenseNet201Backbone, "__doc__", ALIAS_DOCSTRING.format(num_layers=201))
|
keras-cv/keras_cv/models/backbones/densenet/densenet_aliases.py/0
|
{
"file_path": "keras-cv/keras_cv/models/backbones/densenet/densenet_aliases.py",
"repo_id": "keras-cv",
"token_count": 2183
}
| 83 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import pytest
from absl.testing import parameterized
from keras_cv.backend import keras
from keras_cv.models.backbones.efficientnet_v1.efficientnet_v1_aliases import (
EfficientNetV1B0Backbone,
)
from keras_cv.models.backbones.efficientnet_v1.efficientnet_v1_backbone import (
EfficientNetV1Backbone,
)
from keras_cv.tests.test_case import TestCase
from keras_cv.utils.train import get_feature_extractor
class EfficientNetV1BackboneTest(TestCase):
def setUp(self):
self.input_batch = np.ones(shape=(8, 224, 224, 3))
def test_valid_call(self):
model = EfficientNetV1Backbone(
stackwise_kernel_sizes=[3, 3, 5, 3, 5, 5, 3],
stackwise_num_repeats=[1, 2, 2, 3, 3, 4, 1],
stackwise_input_filters=[32, 16, 24, 40, 80, 112, 192],
stackwise_output_filters=[16, 24, 40, 80, 112, 192, 320],
stackwise_expansion_ratios=[1, 6, 6, 6, 6, 6, 6],
stackwise_strides=[1, 2, 2, 2, 1, 2, 1],
stackwise_squeeze_and_excite_ratios=[
0.25,
0.25,
0.25,
0.25,
0.25,
0.25,
0.25,
],
width_coefficient=1.0,
depth_coefficient=1.0,
include_rescaling=False,
)
model(self.input_batch)
def test_valid_call_alias_model_with_rescaling(self):
model = EfficientNetV1B0Backbone(include_rescaling=True)
model(self.input_batch)
def test_valid_call_with_rescaling(self):
model = EfficientNetV1Backbone(
stackwise_kernel_sizes=[3, 3, 5, 3, 5, 5, 3],
stackwise_num_repeats=[1, 2, 2, 3, 3, 4, 1],
stackwise_input_filters=[32, 16, 24, 40, 80, 112, 192],
stackwise_output_filters=[16, 24, 40, 80, 112, 192, 320],
stackwise_expansion_ratios=[1, 6, 6, 6, 6, 6, 6],
stackwise_strides=[1, 2, 2, 2, 1, 2, 1],
stackwise_squeeze_and_excite_ratios=[
0.25,
0.25,
0.25,
0.25,
0.25,
0.25,
0.25,
],
width_coefficient=1.0,
depth_coefficient=1.0,
include_rescaling=True,
)
model(self.input_batch)
@pytest.mark.large # Saving is slow, so mark these large.
def test_saved_model(self):
model = EfficientNetV1Backbone(
stackwise_kernel_sizes=[3, 3, 5, 3, 5, 5, 3],
stackwise_num_repeats=[1, 2, 2, 3, 3, 4, 1],
stackwise_input_filters=[32, 16, 24, 40, 80, 112, 192],
stackwise_output_filters=[16, 24, 40, 80, 112, 192, 320],
stackwise_expansion_ratios=[1, 6, 6, 6, 6, 6, 6],
stackwise_strides=[1, 2, 2, 2, 1, 2, 1],
stackwise_squeeze_and_excite_ratios=[
0.25,
0.25,
0.25,
0.25,
0.25,
0.25,
0.25,
],
width_coefficient=1.0,
depth_coefficient=1.0,
include_rescaling=True,
)
model_output = model(self.input_batch)
save_path = os.path.join(
self.get_temp_dir(), "efficientnet_v1_backbone.keras"
)
model.save(save_path)
restored_model = keras.models.load_model(save_path)
# Check we got the real object back.
self.assertIsInstance(restored_model, EfficientNetV1Backbone)
# Check that output matches.
restored_output = restored_model(self.input_batch)
self.assertAllClose(model_output, restored_output)
@pytest.mark.large # Saving is slow, so mark these large.
def test_saved_alias_model(self):
model = EfficientNetV1B0Backbone()
model_output = model(self.input_batch)
save_path = os.path.join(
self.get_temp_dir(), "efficientnet_v1_backbone.keras"
)
model.save(save_path)
restored_model = keras.models.load_model(save_path)
# Check we got the real object back.
# Note that these aliases serialized as the base class
self.assertIsInstance(restored_model, EfficientNetV1Backbone)
# Check that output matches.
restored_output = restored_model(self.input_batch)
self.assertAllClose(model_output, restored_output)
def test_feature_pyramid_inputs(self):
model = EfficientNetV1B0Backbone()
backbone_model = get_feature_extractor(
model,
model.pyramid_level_inputs.values(),
model.pyramid_level_inputs.keys(),
)
input_size = 256
inputs = keras.Input(shape=[input_size, input_size, 3])
outputs = backbone_model(inputs)
levels = ["P1", "P2", "P3", "P4", "P5"]
self.assertEquals(list(outputs.keys()), levels)
self.assertEquals(
outputs["P1"].shape,
(None, input_size // 2**1, input_size // 2**1, 16),
)
self.assertEquals(
outputs["P2"].shape,
(None, input_size // 2**2, input_size // 2**2, 24),
)
self.assertEquals(
outputs["P3"].shape,
(None, input_size // 2**3, input_size // 2**3, 40),
)
self.assertEquals(
outputs["P4"].shape,
(None, input_size // 2**4, input_size // 2**4, 112),
)
self.assertEquals(
outputs["P5"].shape,
(None, input_size // 2**5, input_size // 2**5, 1280),
)
@parameterized.named_parameters(
("one_channel", 1),
("four_channels", 4),
)
def test_application_variable_input_channels(self, num_channels):
model = EfficientNetV1Backbone(
stackwise_kernel_sizes=[3, 3, 5, 3, 5, 5, 3],
stackwise_num_repeats=[1, 2, 2, 3, 3, 4, 1],
stackwise_input_filters=[32, 16, 24, 40, 80, 112, 192],
stackwise_output_filters=[16, 24, 40, 80, 112, 192, 320],
stackwise_expansion_ratios=[1, 6, 6, 6, 6, 6, 6],
stackwise_strides=[1, 2, 2, 2, 1, 2, 1],
stackwise_squeeze_and_excite_ratios=[
0.25,
0.25,
0.25,
0.25,
0.25,
0.25,
0.25,
],
width_coefficient=1.0,
depth_coefficient=1.0,
include_rescaling=True,
)
self.assertEqual(model.output_shape, (None, None, None, 1280))
|
keras-cv/keras_cv/models/backbones/efficientnet_v1/efficientnet_v1_backbone_test.py/0
|
{
"file_path": "keras-cv/keras_cv/models/backbones/efficientnet_v1/efficientnet_v1_backbone_test.py",
"repo_id": "keras-cv",
"token_count": 3658
}
| 84 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MobileNetV3 model preset configurations."""
backbone_presets_no_weights = {
"mobilenet_v3_small": {
"metadata": {
"description": (
"MobileNetV3 model with 14 layers where the batch "
"normalization and hard-swish activation are applied after the "
"convolution layers."
),
"params": 933502,
"official_name": "MobileNetV3",
"path": "mobilenetv3",
},
"kaggle_handle": "kaggle://keras/mobilenetv3/keras/mobilenet_v3_small/2", # noqa: E501
},
"mobilenet_v3_large": {
"metadata": {
"description": (
"MobileNetV3 model with 28 layers where the batch "
"normalization and hard-swish activation are applied after the "
"convolution layers."
),
"params": 2994518,
"official_name": "MobileNetV3",
"path": "mobilenetv3",
},
"kaggle_handle": "kaggle://keras/mobilenetv3/keras/mobilenet_v3_large/2", # noqa: E501
},
}
backbone_presets_with_weights = {
"mobilenet_v3_large_imagenet": {
"metadata": {
"description": (
"MobileNetV3 model with 28 layers where the batch "
"normalization and hard-swish activation are applied after the "
"convolution layers. "
"Pre-trained on the ImageNet 2012 classification task."
),
"params": 2994518,
"official_name": "MobileNetV3",
"path": "mobilenetv3",
},
"kaggle_handle": "kaggle://keras/mobilenetv3/keras/mobilenet_v3_large_imagenet/2", # noqa: E501
},
"mobilenet_v3_small_imagenet": {
"metadata": {
"description": (
"MobileNetV3 model with 14 layers where the batch "
"normalization and hard-swish activation are applied after the "
"convolution layers. "
"Pre-trained on the ImageNet 2012 classification task."
),
"params": 933502,
"official_name": "MobileNetV3",
"path": "mobilenetv3",
},
"kaggle_handle": "kaggle://keras/mobilenetv3/keras/mobilenet_v3_small_imagenet/2", # noqa: E501
},
}
backbone_presets = {
**backbone_presets_no_weights,
**backbone_presets_with_weights,
}
|
keras-cv/keras_cv/models/backbones/mobilenet_v3/mobilenet_v3_backbone_presets.py/0
|
{
"file_path": "keras-cv/keras_cv/models/backbones/mobilenet_v3/mobilenet_v3_backbone_presets.py",
"repo_id": "keras-cv",
"token_count": 1380
}
| 85 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import pytest
from tensorflow import data as tf_data
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.backend.config import keras_3
from keras_cv.models import CLIP
from keras_cv.models.feature_extractor.clip import CLIPProcessor
from keras_cv.tests.test_case import TestCase
VOCAB_PATH = keras.utils.get_file(
None,
"https://storage.googleapis.com/keras-cv/models/clip/vocab.json",
)
MERGE_PATH = keras.utils.get_file(
None,
"https://storage.googleapis.com/keras-cv/models/clip/merges.txt",
)
MODEL_PATH = keras.utils.get_file(
None,
"https://storage.googleapis.com/keras-cv/models/clip/clip-vit-base-patch32.weights.h5", # noqa: E501
)
class CLIPTest(TestCase):
@pytest.mark.large
def test_clip_model_golden_values(self):
model = CLIP()
model.load_weights(MODEL_PATH)
processed_image = np.ones(shape=[1, 224, 224, 3])
processed_text = np.ones(shape=[3, 77])
attention_mask = np.ones(shape=[3, 77])
image_logits, text_logits = model(
processed_image, processed_text, attention_mask
)
print(image_logits)
self.assertAllClose(image_logits, [[2.932678, 2.932678, 2.932675]])
self.assertAllClose(
text_logits, ops.transpose([[2.932678, 2.932678, 2.932675]])
)
def test_clip_preprocessor(self):
processor = CLIPProcessor(224, VOCAB_PATH, MERGE_PATH)
processed_text, attention_mask = processor.process_texts(
["mountains", "cat on tortoise"]
)
self.assertAllClose(
processed_text[:, :3], [[49406, 5873, 49407], [49406, 2368, 525]]
)
self.assertAllClose(
attention_mask[0, :5], [True, True, True, False, False]
)
def test_clip_preprocessor_tf_data(self):
processor = CLIPProcessor(224, VOCAB_PATH, MERGE_PATH)
text_input = ["a bus", "a dog", "a cat"]
dataset = tf_data.Dataset.from_tensor_slices(text_input)
dataset.map(processor.process_texts)
@pytest.mark.large
def test_presets(self):
self.skipTest("TODO: Enable after Kaggle model is public")
model = CLIP.from_preset("clip-vit-base-patch32")
processed_image = np.ones(shape=[1, 224, 224, 3])
processed_text = np.ones(shape=[3, 77])
attention_mask = np.ones(shape=[3, 77])
image_logits, text_logits = model(
processed_image, processed_text, attention_mask
)
@pytest.mark.large
def test_image_encoder_golden_values(self):
model = CLIP()
model.load_weights(MODEL_PATH)
processed_image = np.ones(shape=[1, 224, 224, 3])
processed_text = np.ones(shape=[3, 77])
attention_mask = np.ones(shape=[3, 77])
model(processed_image, processed_text, attention_mask)
self.assertAllClose(
model.image_embeddings[:, :5],
[[0.023215, 0.026526, 0.008914, -0.091689, 0.021791]],
)
@pytest.mark.large
def test_text_encoder_golden_values(self):
model = CLIP()
processed_image = np.ones(shape=[1, 224, 224, 3])
processed_text = np.ones(shape=[3, 77])
attention_mask = np.ones(shape=[3, 77])
model(processed_image, processed_text, attention_mask)
print(model.text_embeddings)
self.assertAllClose(
model.text_embeddings[0, :3],
[-0.018502, 0.000906, 0.020372],
)
@pytest.mark.large # Saving is slow, so mark these large.
def test_saved_model(self):
model = CLIP()
processed_image = np.ones(shape=[1, 224, 224, 3])
processed_text = np.ones(shape=[3, 77])
attention_mask = np.ones(shape=[3, 77])
model_output, _ = model(processed_image, processed_text, attention_mask)
save_path = os.path.join(self.get_temp_dir(), "model.keras")
if keras_3():
model.save(save_path)
else:
model.save(save_path, save_format="keras_v3")
restored_model = keras.models.load_model(save_path)
# Check we got the real object back.
self.assertIsInstance(restored_model, CLIP)
# Check that output matches.
restored_output, _ = restored_model(
processed_image, processed_text, attention_mask
)
self.assertAllClose(model_output, restored_output)
|
keras-cv/keras_cv/models/feature_extractor/clip/clip_model_test.py/0
|
{
"file_path": "keras-cv/keras_cv/models/feature_extractor/clip/clip_model_test.py",
"repo_id": "keras-cv",
"token_count": 2165
}
| 86 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.models.backbones.backbone import Backbone
from keras_cv.models.object_detection_3d.center_pillar_backbone_presets import (
backbone_presets,
)
from keras_cv.utils.python_utils import classproperty
@keras_cv_export("keras_cv.models.CenterPillarBackbone")
class CenterPillarBackbone(Backbone):
"""A UNet backbone for CenterPillar models.
All up and down blocks scale by a factor of two. Skip connections are
included.
All function parameters require curried functions as inputs which return a
function that acts on tensors as inputs.
Reference: [U-Net: Convolutional Networks for Biomedical Image Segmentation](https://arxiv.org/abs/1505.04597)
Args:
stackwise_down_blocks: a list of integers representing the number of
sub-blocks in each downsampling block.
stackwise_down_filters: a list of integers representing the number of
filters in each downsampling block.
stackwise_up_filters: a list of integers representing the number of
filters in each upsampling block.
input_shape: the rank 3 shape of the input to the UNet.
""" # noqa: E501
def __init__(
self,
stackwise_down_blocks,
stackwise_down_filters,
stackwise_up_filters,
input_shape=(None, None, 128),
**kwargs
):
self.stackwise_down_blocks = stackwise_down_blocks
self.stackwise_down_filters = stackwise_down_filters
self.stackwise_up_filters = stackwise_up_filters
input = keras.layers.Input(shape=input_shape)
x = input
x = keras.layers.Conv2D(
128,
1,
1,
padding="same",
kernel_initializer=keras.initializers.VarianceScaling(),
kernel_regularizer=keras.regularizers.L2(l2=1e-4),
)(x)
x = keras.layers.BatchNormalization()(x)
x = keras.layers.ReLU()(x)
x = Block(128, downsample=False)(x)
skip_connections = []
# Filters refers to the number of convolutional filters in each block,
# while num_blocks refers to the number of sub-blocks within a block
# (Note that only the first sub-block will perform downsampling)
for filters, num_blocks in zip(
stackwise_down_filters, stackwise_down_blocks
):
skip_connections.append(x)
x = DownSampleBlock(filters, num_blocks)(x)
for filters in stackwise_up_filters:
x = UpSampleBlock(filters)(x, skip_connections.pop())
output = x
super().__init__(
inputs=input,
outputs=output,
**kwargs,
)
def get_config(self):
config = super().get_config()
config.update(
{
"stackwise_down_blocks": self.stackwise_down_blocks,
"stackwise_down_filters": self.stackwise_down_filters,
"stackwise_up_filters": self.stackwise_up_filters,
}
)
return config
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return copy.deepcopy(backbone_presets)
def Block(filters, downsample):
"""A default block which serves as an example of the block interface.
This is the base block definition for a CenterPillar model.
"""
def apply(x):
input_depth = list(x.shape)[-1]
stride = 2 if downsample else 1
residual = x
x = keras.layers.Conv2D(
filters,
3,
stride,
padding="same",
use_bias=False,
kernel_initializer=keras.initializers.VarianceScaling(),
kernel_regularizer=keras.regularizers.L2(l2=1e-4),
)(x)
x = keras.layers.BatchNormalization()(x)
x = keras.layers.ReLU()(x)
x = keras.layers.Conv2D(
filters,
3,
1,
padding="same",
use_bias=False,
kernel_initializer=keras.initializers.VarianceScaling(),
kernel_regularizer=keras.regularizers.L2(l2=1e-4),
)(x)
x = keras.layers.BatchNormalization()(x)
x = keras.layers.ReLU()(x)
if downsample:
residual = keras.layers.MaxPool2D(
pool_size=2, strides=2, padding="same"
)(residual)
if input_depth != filters:
residual = keras.layers.Conv2D(
filters,
1,
1,
padding="same",
use_bias=False,
kernel_initializer=keras.initializers.VarianceScaling(),
kernel_regularizer=keras.regularizers.L2(l2=1e-4),
)(residual)
residual = keras.layers.BatchNormalization()(residual)
residual = keras.layers.ReLU()(residual)
x = keras.layers.Add()([x, residual])
return x
return apply
def SkipBlock(filters):
def apply(x):
x = keras.layers.Conv2D(
filters,
1,
1,
use_bias=False,
kernel_initializer=keras.initializers.VarianceScaling(),
kernel_regularizer=keras.regularizers.L2(l2=1e-4),
)(x)
x = keras.layers.BatchNormalization()(x)
x = keras.layers.ReLU()(x)
return x
return apply
def DownSampleBlock(filters, num_blocks):
def apply(x):
x = Block(filters, downsample=True)(x)
for _ in range(num_blocks - 1):
x = Block(filters, downsample=False)(x)
return x
return apply
def UpSampleBlock(filters):
def apply(x, lateral_input):
x = keras.layers.Conv2DTranspose(
filters,
3,
2,
padding="same",
use_bias=False,
kernel_initializer=keras.initializers.VarianceScaling(),
kernel_regularizer=keras.regularizers.L2(l2=1e-4),
)(x)
x = keras.layers.BatchNormalization()(x)
x = keras.layers.ReLU()(x)
lateral_input = SkipBlock(filters)(lateral_input)
x = keras.layers.Add()([x, lateral_input])
x = Block(filters, downsample=False)(x)
return x
return apply
|
keras-cv/keras_cv/models/object_detection_3d/center_pillar_backbone.py/0
|
{
"file_path": "keras-cv/keras_cv/models/object_detection_3d/center_pillar_backbone.py",
"repo_id": "keras-cv",
"token_count": 3186
}
| 87 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SegFormer model preset configurations."""
from keras_cv.models.backbones.mix_transformer.mix_transformer_backbone_presets import ( # noqa: E501
backbone_presets,
)
presets_no_weights = {
"segformer_b0": {
"metadata": {
"description": ("SegFormer model with MiTB0 backbone."),
"params": 3719027,
"official_name": "SegFormerB0",
"path": "segformer_b0",
},
"kaggle_handle": "kaggle://keras/segformer/keras/segformer_b0/2",
},
"segformer_b1": {
"metadata": {
"description": ("SegFormer model with MiTB1 backbone."),
"params": 13682643,
"official_name": "SegFormerB1",
"path": "segformer_b1",
},
"kaggle_handle": "kaggle://keras/segformer/keras/segformer_b1/2",
},
"segformer_b2": {
"metadata": {
"description": ("SegFormer model with MiTB2 backbone."),
"params": 24727507,
"official_name": "SegFormerB2",
"path": "segformer_b2",
},
"kaggle_handle": "kaggle://keras/segformer/keras/segformer_b2/2",
},
"segformer_b3": {
"metadata": {
"description": ("SegFormer model with MiTB3 backbone."),
"params": 44603347,
"official_name": "SegFormerB3",
"path": "segformer_b3",
},
"kaggle_handle": "kaggle://keras/segformer/keras/segformer_b3/2",
},
"segformer_b4": {
"metadata": {
"description": ("SegFormer model with MiTB4 backbone."),
"params": 61373907,
"official_name": "SegFormerB4",
"path": "segformer_b4",
},
"kaggle_handle": "kaggle://keras/segformer/keras/segformer_b4/2",
},
"segformer_b5": {
"metadata": {
"description": ("SegFormer model with MiTB5 backbone."),
"params": 81974227,
"official_name": "SegFormerB5",
"path": "segformer_b5",
},
"kaggle_handle": "kaggle://keras/segformer/keras/segformer_b5/2",
},
}
presets_with_weights = {
"segformer_b0_imagenet": {
"metadata": {
"description": (
"SegFormer model with a pretrained MiTB0 backbone."
),
"params": 3719027,
"official_name": "SegFormerB0",
"path": "segformer_b0",
},
"kaggle_handle": "kaggle://keras/segformer/keras/segformer_b0_imagenet/2", # noqa: E501
},
}
presets = {
**backbone_presets, # Add MiTBackbone presets
**presets_no_weights,
**presets_with_weights,
}
|
keras-cv/keras_cv/models/segmentation/segformer/segformer_presets.py/0
|
{
"file_path": "keras-cv/keras_cv/models/segmentation/segformer/segformer_presets.py",
"repo_id": "keras-cv",
"token_count": 1517
}
| 88 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
_UNCONDITIONAL_TOKENS = [
49406,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
]
_ALPHAS_CUMPROD = [
0.99915,
0.998296,
0.9974381,
0.9965762,
0.99571025,
0.9948404,
0.9939665,
0.9930887,
0.9922069,
0.9913211,
0.9904313,
0.98953754,
0.9886398,
0.9877381,
0.9868324,
0.98592263,
0.98500896,
0.9840913,
0.9831696,
0.982244,
0.98131436,
0.9803808,
0.97944313,
0.97850156,
0.977556,
0.9766064,
0.97565293,
0.9746954,
0.9737339,
0.9727684,
0.97179896,
0.97082555,
0.96984816,
0.96886677,
0.9678814,
0.96689206,
0.96589875,
0.9649015,
0.96390027,
0.9628951,
0.9618859,
0.96087277,
0.95985574,
0.95883465,
0.9578097,
0.95678073,
0.95574784,
0.954711,
0.95367026,
0.9526256,
0.9515769,
0.95052433,
0.94946784,
0.94840735,
0.947343,
0.94627476,
0.9452025,
0.9441264,
0.9430464,
0.9419625,
0.9408747,
0.939783,
0.9386874,
0.93758786,
0.9364845,
0.93537724,
0.9342661,
0.9331511,
0.9320323,
0.9309096,
0.929783,
0.9286526,
0.9275183,
0.9263802,
0.92523825,
0.92409253,
0.92294294,
0.9217895,
0.92063236,
0.9194713,
0.9183065,
0.9171379,
0.91596556,
0.9147894,
0.9136095,
0.91242576,
0.9112383,
0.9100471,
0.9088522,
0.9076535,
0.9064511,
0.90524495,
0.9040351,
0.90282154,
0.9016043,
0.90038335,
0.8991587,
0.8979304,
0.8966984,
0.89546275,
0.89422345,
0.8929805,
0.89173394,
0.89048374,
0.88922995,
0.8879725,
0.8867115,
0.88544685,
0.88417864,
0.88290685,
0.8816315,
0.88035256,
0.8790701,
0.87778413,
0.8764946,
0.8752016,
0.873905,
0.87260497,
0.8713014,
0.8699944,
0.86868393,
0.86737,
0.8660526,
0.8647318,
0.86340755,
0.8620799,
0.8607488,
0.85941434,
0.8580765,
0.8567353,
0.8553907,
0.8540428,
0.85269153,
0.85133696,
0.84997904,
0.84861785,
0.8472533,
0.8458856,
0.8445145,
0.84314024,
0.84176266,
0.8403819,
0.8389979,
0.8376107,
0.8362203,
0.83482677,
0.83343,
0.8320301,
0.8306271,
0.8292209,
0.82781166,
0.82639927,
0.8249838,
0.82356524,
0.8221436,
0.82071894,
0.81929123,
0.81786054,
0.8164268,
0.8149901,
0.8135504,
0.81210774,
0.81066215,
0.8092136,
0.8077621,
0.80630773,
0.80485046,
0.8033903,
0.80192727,
0.8004614,
0.79899275,
0.79752123,
0.7960469,
0.7945698,
0.7930899,
0.79160726,
0.7901219,
0.7886338,
0.787143,
0.7856495,
0.7841533,
0.78265446,
0.78115296,
0.7796488,
0.77814204,
0.7766327,
0.7751208,
0.7736063,
0.77208924,
0.7705697,
0.7690476,
0.767523,
0.7659959,
0.7644664,
0.76293445,
0.7614,
0.7598632,
0.75832397,
0.75678235,
0.75523835,
0.75369203,
0.7521434,
0.75059247,
0.7490392,
0.7474837,
0.7459259,
0.7443659,
0.74280363,
0.7412392,
0.7396726,
0.7381038,
0.73653287,
0.7349598,
0.7333846,
0.73180735,
0.730228,
0.7286466,
0.7270631,
0.7254777,
0.72389024,
0.72230077,
0.7207094,
0.71911603,
0.7175208,
0.7159236,
0.71432453,
0.7127236,
0.71112084,
0.7095162,
0.7079098,
0.7063016,
0.70469165,
0.70307994,
0.7014665,
0.69985133,
0.6982345,
0.696616,
0.6949958,
0.69337404,
0.69175065,
0.69012564,
0.6884991,
0.68687093,
0.6852413,
0.68361014,
0.6819775,
0.6803434,
0.67870784,
0.6770708,
0.6754324,
0.6737926,
0.67215145,
0.670509,
0.66886514,
0.66722,
0.6655736,
0.66392595,
0.662277,
0.6606269,
0.65897554,
0.657323,
0.65566933,
0.6540145,
0.6523586,
0.6507016,
0.6490435,
0.64738435,
0.6457241,
0.64406294,
0.6424008,
0.64073765,
0.63907355,
0.63740855,
0.6357426,
0.6340758,
0.6324082,
0.6307397,
0.6290704,
0.6274003,
0.6257294,
0.62405777,
0.6223854,
0.62071234,
0.6190386,
0.61736417,
0.6156891,
0.61401343,
0.6123372,
0.6106603,
0.6089829,
0.607305,
0.6056265,
0.6039476,
0.60226816,
0.6005883,
0.598908,
0.59722733,
0.5955463,
0.59386486,
0.5921831,
0.59050107,
0.5888187,
0.5871361,
0.5854532,
0.5837701,
0.5820868,
0.5804033,
0.5787197,
0.5770359,
0.575352,
0.57366806,
0.571984,
0.5702999,
0.5686158,
0.56693166,
0.56524754,
0.5635635,
0.5618795,
0.56019557,
0.5585118,
0.5568281,
0.55514455,
0.5534612,
0.551778,
0.5500951,
0.5484124,
0.54673,
0.5450478,
0.54336596,
0.54168445,
0.54000324,
0.53832245,
0.5366421,
0.53496206,
0.5332825,
0.53160346,
0.5299248,
0.52824676,
0.5265692,
0.52489215,
0.5232157,
0.5215398,
0.51986456,
0.51818997,
0.51651603,
0.51484275,
0.5131702,
0.5114983,
0.5098272,
0.50815684,
0.5064873,
0.50481856,
0.50315064,
0.50148356,
0.4998174,
0.4981521,
0.49648774,
0.49482432,
0.49316183,
0.49150035,
0.48983985,
0.4881804,
0.486522,
0.48486462,
0.4832084,
0.48155323,
0.4798992,
0.47824633,
0.47659463,
0.4749441,
0.47329482,
0.4716468,
0.47,
0.46835446,
0.46671024,
0.46506736,
0.4634258,
0.46178558,
0.46014675,
0.45850933,
0.45687333,
0.45523876,
0.45360568,
0.45197406,
0.45034397,
0.44871536,
0.44708833,
0.44546285,
0.44383895,
0.44221666,
0.440596,
0.43897697,
0.43735963,
0.43574396,
0.43412998,
0.43251774,
0.43090722,
0.4292985,
0.42769152,
0.42608637,
0.42448303,
0.4228815,
0.42128187,
0.4196841,
0.41808826,
0.4164943,
0.4149023,
0.41331223,
0.41172415,
0.41013804,
0.40855396,
0.4069719,
0.4053919,
0.40381396,
0.4022381,
0.40066436,
0.39909273,
0.39752322,
0.3959559,
0.39439073,
0.39282778,
0.39126703,
0.3897085,
0.3881522,
0.3865982,
0.38504648,
0.38349706,
0.38194993,
0.38040516,
0.37886274,
0.37732267,
0.375785,
0.37424973,
0.37271687,
0.37118647,
0.36965853,
0.36813304,
0.36661002,
0.36508954,
0.36357155,
0.3620561,
0.36054322,
0.3590329,
0.35752517,
0.35602003,
0.35451752,
0.35301763,
0.3515204,
0.3500258,
0.3485339,
0.3470447,
0.34555823,
0.34407446,
0.34259343,
0.34111515,
0.33963963,
0.33816692,
0.336697,
0.3352299,
0.33376563,
0.3323042,
0.33084565,
0.32938993,
0.32793713,
0.3264872,
0.32504022,
0.32359615,
0.32215503,
0.32071686,
0.31928164,
0.31784943,
0.3164202,
0.314994,
0.3135708,
0.31215066,
0.31073356,
0.3093195,
0.30790854,
0.30650064,
0.30509588,
0.30369422,
0.30229566,
0.30090025,
0.299508,
0.2981189,
0.29673296,
0.29535022,
0.2939707,
0.29259437,
0.29122123,
0.28985137,
0.28848472,
0.28712133,
0.2857612,
0.28440437,
0.2830508,
0.28170055,
0.2803536,
0.27900997,
0.27766964,
0.27633268,
0.27499905,
0.2736688,
0.27234194,
0.27101842,
0.2696983,
0.26838157,
0.26706827,
0.26575837,
0.26445192,
0.26314887,
0.2618493,
0.26055318,
0.2592605,
0.25797132,
0.2566856,
0.2554034,
0.25412467,
0.25284946,
0.25157773,
0.2503096,
0.24904492,
0.24778382,
0.24652626,
0.24527225,
0.2440218,
0.24277493,
0.24153163,
0.24029191,
0.23905578,
0.23782326,
0.23659433,
0.23536903,
0.23414734,
0.23292927,
0.23171483,
0.23050404,
0.22929688,
0.22809339,
0.22689353,
0.22569734,
0.22450483,
0.22331597,
0.2221308,
0.22094932,
0.21977153,
0.21859743,
0.21742703,
0.21626033,
0.21509734,
0.21393807,
0.21278252,
0.21163069,
0.21048258,
0.20933822,
0.20819758,
0.2070607,
0.20592754,
0.20479813,
0.20367248,
0.20255059,
0.20143245,
0.20031808,
0.19920748,
0.19810064,
0.19699757,
0.19589828,
0.19480278,
0.19371104,
0.1926231,
0.19153893,
0.19045855,
0.18938197,
0.18830918,
0.18724018,
0.18617497,
0.18511358,
0.18405597,
0.18300217,
0.18195218,
0.18090598,
0.1798636,
0.17882504,
0.17779027,
0.1767593,
0.17573217,
0.17470883,
0.1736893,
0.1726736,
0.1716617,
0.17065361,
0.16964935,
0.1686489,
0.16765225,
0.16665943,
0.16567042,
0.16468522,
0.16370384,
0.16272627,
0.16175252,
0.16078258,
0.15981644,
0.15885411,
0.1578956,
0.15694089,
0.15599,
0.15504292,
0.15409963,
0.15316014,
0.15222447,
0.15129258,
0.1503645,
0.14944021,
0.14851972,
0.14760303,
0.14669013,
0.14578101,
0.14487568,
0.14397413,
0.14307636,
0.14218238,
0.14129217,
0.14040573,
0.13952307,
0.13864417,
0.13776903,
0.13689767,
0.13603005,
0.13516618,
0.13430607,
0.13344972,
0.1325971,
0.13174823,
0.1309031,
0.13006169,
0.12922402,
0.12839006,
0.12755983,
0.12673332,
0.12591052,
0.12509143,
0.12427604,
0.12346435,
0.12265636,
0.121852055,
0.12105144,
0.1202545,
0.11946124,
0.11867165,
0.11788572,
0.11710346,
0.11632485,
0.115549885,
0.11477857,
0.11401089,
0.11324684,
0.11248643,
0.11172963,
0.11097645,
0.110226884,
0.10948092,
0.10873855,
0.10799977,
0.107264586,
0.106532976,
0.105804935,
0.10508047,
0.10435956,
0.1036422,
0.10292839,
0.10221813,
0.1015114,
0.10080819,
0.100108504,
0.09941233,
0.098719664,
0.0980305,
0.09734483,
0.09666264,
0.09598393,
0.095308684,
0.09463691,
0.093968585,
0.09330372,
0.092642285,
0.09198428,
0.09132971,
0.09067855,
0.090030804,
0.089386456,
0.088745505,
0.088107936,
0.08747375,
0.08684293,
0.08621547,
0.085591376,
0.084970616,
0.08435319,
0.0837391,
0.08312833,
0.08252087,
0.08191671,
0.08131585,
0.08071827,
0.080123976,
0.07953294,
0.078945175,
0.078360654,
0.077779375,
0.07720133,
0.07662651,
0.07605491,
0.07548651,
0.07492131,
0.0743593,
0.07380046,
0.073244795,
0.07269229,
0.07214294,
0.07159673,
0.07105365,
0.070513695,
0.06997685,
0.069443114,
0.06891247,
0.06838491,
0.067860425,
0.06733901,
0.066820644,
0.06630533,
0.06579305,
0.0652838,
0.06477757,
0.06427433,
0.0637741,
0.063276865,
0.06278259,
0.062291294,
0.061802953,
0.06131756,
0.0608351,
0.060355574,
0.05987896,
0.059405252,
0.058934443,
0.05846652,
0.058001474,
0.057539295,
0.05707997,
0.056623492,
0.05616985,
0.05571903,
0.055271026,
0.054825824,
0.05438342,
0.053943794,
0.053506944,
0.05307286,
0.052641522,
0.052212927,
0.051787063,
0.051363923,
0.05094349,
0.050525755,
0.05011071,
0.04969834,
0.049288645,
0.0488816,
0.048477206,
0.048075445,
0.04767631,
0.047279786,
0.04688587,
0.046494544,
0.046105802,
0.04571963,
0.04533602,
0.04495496,
0.04457644,
0.044200446,
0.04382697,
0.043456003,
0.043087535,
0.042721547,
0.042358037,
0.04199699,
0.041638397,
0.041282244,
0.040928524,
0.040577225,
0.040228333,
0.039881844,
0.039537743,
0.039196018,
0.038856663,
0.038519662,
0.038185004,
0.037852682,
0.037522685,
0.037195,
0.036869615,
0.036546525,
0.036225714,
0.03590717,
0.035590887,
0.035276853,
0.034965057,
0.034655485,
0.03434813,
0.03404298,
0.033740025,
0.033439253,
0.033140652,
0.032844216,
0.03254993,
0.032257784,
0.03196777,
0.031679876,
0.031394087,
0.031110398,
0.030828796,
0.030549273,
0.030271813,
0.02999641,
0.029723052,
0.029451728,
0.029182427,
0.02891514,
0.028649855,
0.028386563,
0.028125253,
0.02786591,
0.027608532,
0.027353102,
0.027099613,
0.026848052,
0.026598409,
0.026350675,
0.02610484,
0.02586089,
0.02561882,
0.025378617,
0.025140269,
0.024903767,
0.0246691,
0.02443626,
0.024205236,
0.023976017,
0.023748592,
0.023522953,
0.023299087,
0.023076987,
0.022856642,
0.02263804,
0.022421172,
0.022206029,
0.0219926,
0.021780876,
0.021570845,
0.021362498,
0.021155827,
0.020950818,
0.020747466,
0.020545758,
0.020345684,
0.020147236,
0.019950403,
0.019755175,
0.019561544,
0.019369498,
0.019179028,
0.018990126,
0.01880278,
0.018616982,
0.018432721,
0.01824999,
0.018068777,
0.017889075,
0.017710872,
0.01753416,
0.017358929,
0.017185168,
0.017012872,
0.016842028,
0.016672628,
0.016504662,
0.016338123,
0.016173,
0.016009282,
0.015846964,
0.015686033,
0.015526483,
0.015368304,
0.015211486,
0.0150560215,
0.014901901,
0.014749114,
0.014597654,
0.014447511,
0.0142986765,
0.014151142,
0.014004898,
0.013859936,
0.013716248,
0.0135738235,
0.013432656,
0.013292736,
0.013154055,
0.013016605,
0.012880377,
0.012745362,
0.012611552,
0.012478939,
0.012347515,
0.01221727,
0.012088198,
0.0119602885,
0.0118335355,
0.011707929,
0.011583461,
0.011460125,
0.011337912,
0.011216813,
0.011096821,
0.010977928,
0.0108601255,
0.010743406,
0.010627762,
0.0105131855,
0.010399668,
0.010287202,
0.01017578,
0.010065395,
0.009956039,
0.009847702,
0.009740381,
0.0096340645,
0.009528747,
0.009424419,
0.009321076,
0.009218709,
0.00911731,
0.009016872,
0.008917389,
0.008818853,
0.008721256,
0.008624591,
0.008528852,
0.00843403,
0.00834012,
0.008247114,
0.008155004,
0.008063785,
0.007973449,
0.007883989,
0.007795398,
0.0077076694,
0.0076207966,
0.0075347726,
0.007449591,
0.0073652444,
0.007281727,
0.0071990318,
0.007117152,
0.0070360815,
0.0069558136,
0.0068763415,
0.006797659,
0.00671976,
0.0066426382,
0.0065662866,
0.006490699,
0.0064158696,
0.006341792,
0.00626846,
0.0061958674,
0.0061240084,
0.0060528764,
0.0059824656,
0.0059127696,
0.0058437833,
0.0057755,
0.0057079145,
0.00564102,
0.0055748112,
0.0055092825,
0.005444428,
0.005380241,
0.0053167176,
0.005253851,
0.005191636,
0.005130066,
0.0050691366,
0.0050088423,
0.0049491767,
0.004890135,
0.0048317118,
0.004773902,
0.004716699,
0.0046600983,
]
|
keras-cv/keras_cv/models/stable_diffusion/constants.py/0
|
{
"file_path": "keras-cv/keras_cv/models/stable_diffusion/constants.py",
"repo_id": "keras-cv",
"token_count": 11192
}
| 89 |
# Copyright 2023 The KerasCV Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.point_cloud.point_cloud import _box_area
from keras_cv.point_cloud.point_cloud import _center_xyzWHD_to_corner_xyz
from keras_cv.point_cloud.point_cloud import _is_on_lefthand_side
from keras_cv.point_cloud.point_cloud import coordinate_transform
from keras_cv.point_cloud.point_cloud import group_points_by_boxes
from keras_cv.point_cloud.point_cloud import is_within_any_box3d
from keras_cv.point_cloud.point_cloud import is_within_any_box3d_v2
from keras_cv.point_cloud.point_cloud import is_within_any_box3d_v3
from keras_cv.point_cloud.point_cloud import is_within_box2d
from keras_cv.point_cloud.point_cloud import is_within_box3d
from keras_cv.point_cloud.point_cloud import spherical_coordinate_transform
from keras_cv.point_cloud.point_cloud import within_a_frustum
from keras_cv.point_cloud.point_cloud import within_box3d_index
from keras_cv.point_cloud.point_cloud import wrap_angle_radians
|
keras-cv/keras_cv/point_cloud/__init__.py/0
|
{
"file_path": "keras-cv/keras_cv/point_cloud/__init__.py",
"repo_id": "keras-cv",
"token_count": 480
}
| 90 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import waymo_open_dataset
except ImportError:
waymo_open_dataset = None
try:
import cv2
except ImportError:
cv2 = None
try:
import matplotlib
except ImportError:
matplotlib = None
try:
import pycocotools
except ImportError:
pycocotools = None
def assert_cv2_installed(symbol_name):
if cv2 is None:
raise ImportError(
f"{symbol_name} requires the `cv2` package. "
"Please install the package using "
"`pip install opencv-python`."
)
def assert_matplotlib_installed(symbol_name):
if matplotlib is None:
raise ImportError(
f"{symbol_name} requires the `matplotlib` package. "
"Please install the package using "
"`pip install matplotlib`."
)
def assert_waymo_open_dataset_installed(symbol_name):
if waymo_open_dataset is None:
raise ImportError(
f"{symbol_name} requires the `waymo-open-dataset-tf` package. "
"Please install the package from source. "
"Installation instructions can be found at "
"https://github.com/waymo-research/waymo-open-dataset"
"/blob/master/docs/quick_start.md"
)
def assert_pycocotools_installed(symbol_name):
if pycocotools is None:
raise ImportError(
f"{symbol_name} requires the `pycocotools` package. "
"Please install the package using "
"`pip install pycocotools`."
)
|
keras-cv/keras_cv/utils/conditional_imports.py/0
|
{
"file_path": "keras-cv/keras_cv/utils/conditional_imports.py",
"repo_id": "keras-cv",
"token_count": 829
}
| 91 |
import hashlib
import json
import keras
import numpy as np
import tensorflow as tf
import keras_cv
filepath = tf.keras.utils.get_file(origin="https://i.imgur.com/9i63gLN.jpg")
image = keras.utils.load_img(filepath)
image = np.array(image)
image = np.array([image]).astype(float)
original_models_with_weights = [
keras_cv.models.efficientnet_v2.EfficientNetV2S,
keras_cv.models.efficientnet_v2.EfficientNetV2B0,
keras_cv.models.efficientnet_v2.EfficientNetV2B1,
keras_cv.models.efficientnet_v2.EfficientNetV2B2,
]
presets_with_weights = [
"efficientnetv2_s_imagenet_classifier",
"efficientnetv2_b0_imagenet_classifier",
"efficientnetv2_b1_imagenet_classifier",
"efficientnetv2_b2_imagenet_classifier",
]
preset_updates = {}
for original_model_cls, preset_name in zip(
original_models_with_weights, presets_with_weights
):
original_model = original_model_cls(
include_rescaling=True,
include_top=True,
num_classes=1000,
weights="imagenet",
)
model = keras_cv.models.ImageClassifier.from_preset(
preset_name, load_weights=False
)
original_layers = list(original_model._flatten_layers())
original_layers = [
layer for layer in original_layers if "dropout" not in layer.name
]
new_layers = list(model._flatten_layers())
new_layers = [layer for layer in new_layers if "backbone" not in layer.name]
for original_layer, new_layer in zip(original_layers, new_layers):
new_layer.set_weights(original_layer.get_weights())
output_one = model.predict(image)
output_two = original_model.predict(image)
deltas = output_one - output_two
# As tiny delta as possible
delta = 0.00001
assert all(((output_one - output_two) < delta).flatten().tolist())
weights_path = f"efficientnet_v2/{preset_name}.h5"
model.save_weights(weights_path)
weights_hash = hashlib.md5(open(weights_path, "rb").read()).hexdigest()
preset_updates[preset_name] = {
"weights_url": f"https://storage.googleapis.com/keras-cv/models/{weights_path}", # noqa: E501
"weights_hash": weights_hash,
}
with open("efficientnet_v2/preset_updates.json", "w") as f:
json.dump(preset_updates, f, indent=4)
print("Please run:")
print("`gsutil cp -r efficientnet_v2/ gs://keras-cv/models/`")
print('`gsutil acl ch -u AllUsers:R "gs://keras-cv/models/efficientnet_v2/*"`')
|
keras-cv/shell/backbone_converters/convert_efficientnet_v2_backbones.py/0
|
{
"file_path": "keras-cv/shell/backbone_converters/convert_efficientnet_v2_backbones.py",
"repo_id": "keras-cv",
"token_count": 991
}
| 92 |
## レイヤーの重み初期化方法
初期化用引数で,Kerasレイヤーの重みをランダムに初期化する確率分布を指定できます.
初期化用引数のキーワードはレイヤーにより異なりますが,大抵は単純に `kernel_initializer` 及び `bias_initializer` です:
```python
model.add(Dense(64,
kernel_initializer='random_uniform',
bias_initializer='zeros'))
```
## 利用可能な初期化方法
以下の初期化方法は全て `keras.initializers` モジュールとして定義されています.
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/initializers.py#L9)
### Initializer
```python
keras.initializers.Initializer()
```
これは初期化クラスの基底クラスです.
---
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/initializers.py#L24)
### Zeros
```python
keras.initializers.Zeros()
```
全て重みを0で初期化します.
---
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/initializers.py#L32)
### Ones
```python
keras.initializers.Ones()
```
全て重みを1で初期化します.
---
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/initializers.py#L40)
### Constant
```python
keras.initializers.Constant(value=0)
```
全て重みを定数で初期化します.
__引数__
- __value__: 浮動小数点数またはテンソルです
---
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/initializers.py#L57)
### RandomNormal
```python
keras.initializers.RandomNormal(mean=0.0, stddev=0.05, seed=None)
```
正規分布に従って重みを初期化します.
__引数__
- __mean__: 浮動小数点数またはスカラテンソルであって分布の平均です
- __stddev__: 浮動小数点数またはスカラテンソルであって分布の標準偏差です
- __seed__: 整数.乱数生成に使われます
---
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/initializers.py#L85)
### RandomUniform
```python
keras.initializers.RandomUniform(minval=-0.05, maxval=0.05, seed=None)
```
一様分布に従って重みを初期化します.
__引数__
- __minval__: 浮動小数点数またはスカラテンソル.乱数を発生する範囲の下限です
- __maxval__: 浮動小数点数またはスカラテンソル.乱数を発生する範囲の上限です
- __seed__: 整数.乱数生成に使われます
---
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/initializers.py#L113)
### TruncatedNormal
```python
keras.initializers.TruncatedNormal(mean=0.0, stddev=0.05, seed=None)
```
切断正規分布に従って重みを初期化します.
これは正規分布と似ていますが,平均より標準偏差の分以上離れた値は切り捨てらます.これはニューラルネットワークの重みの初期化方法として推奨されます.
__引数__
- __mean__: 浮動小数点数またはスカラテンソルであって分布の平均です
- __stddev__: 浮動小数点数またはスカラテンソルであって分布の標準偏差です
- __seed__: 整数.乱数生成に使われます
---
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/initializers.py#L146)
### VarianceScaling
```python
keras.initializers.VarianceScaling(scale=1.0, mode='fan_in', distribution='normal', seed=None)
```
重みテンソルのサイズ(`shape`)に合わせてスケーリングした初期化を行います.
`distribution="normal"` としたとき,
平均を 0 とし標準偏差を
`stddev = sqrt(scale / n)`
とした切断正規分布が使われます.
ここで `n` は
- `mode="fan_in"` のとき,入力ユニットの数
- `mode="fan_out"` のとき,出力ユニットの数
- `mode="fan_avg"` のとき,入力ユニットと出力ユニットの数の平均
が使われます.
`distribution="uniform"` としたとき,
[-limit, limit] を範囲とする一様分布が用いられます.
ここで `limit = sqrt(3 * scale / n)` です.
__引数__
- __scale__: スケーリング値(正の実数)
- __mode__: "fan_in","fan_out","fan_avg" のいずれか
- __distribution__: 用いる確率分布."normal","uniform" のいずれか
- __seed__: 整数.乱数生成に使われます
---
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/initializers.py#L219)
### Orthogonal
```python
keras.initializers.Orthogonal(gain=1.0, seed=None)
```
重みテンソルが直交行列となるように初期化されます.
__引数__
- __gain__: 最後に直交行列に乗ずる係数です
- __seed__: 整数.乱数生成に使われます
__参考文献__
Saxe et al., http://arxiv.org/abs/1312.6120
---
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/initializers.py#L256)
### Identity
```python
keras.initializers.Identity(gain=1.0)
```
単位行列で初期化されます.
これは重みテンソルが2次正方行列の場合のみ使えます.
__引数__
- __gain__: 最後に単位行列に乗ずる係数です
---
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/initializers.py#L304)
### glorot_normal
```python
glorot_normal(seed=None)
```
Glorot の正規分布(Xavier の正規分布とも呼ばれます)による初期化を返します.
これは平均を 0 ,標準偏差を
`stddev = sqrt(2 / (fan_in + fan_out))`
とする切断正規分布と同じです.
ここで `fan_in` は入力ユニット数,`fant_out` は出力ユニット数です.
__引数__
- __seed__: 整数.乱数生成に使われます
__戻り値__
初期化インスタンス
__参考文献__
Glorot & Bengio, AISTATS 2010 -
http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf
---
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/initializers.py#L328)
### glorot_uniform
```python
glorot_uniform(seed=None)
```
Glorot の一様分布(Xavier の一様分布とも呼ばれます)による初期化を返します.
これは limit を `sqrt(6 / (fan_in + fan_out))`
としたとき [limit, -limit] を範囲とする一様分布と同じです.
ここで `fan_in` は入力ユニット数,`fant_out` は出力ユニット数です.
__引数__
- __seed__: 整数.乱数生成に使われます
__戻り値__
初期化インスタンス
__参考文献__
Glorot & Bengio, AISTATS 2010 -
http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf
---
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/initializers.py#L352)
### he_normal
```python
he_normal(seed=None)
```
He の正規分布による初期化を返します.
これは平均を 0 ,標準偏差を
`stddev = sqrt(2 / fan_in)`
とする切断正規分布です.
ここで `fan_in` は入力ユニット数です.
__引数__
- __seed__: 整数.乱数生成に使われます
__戻り値__
初期化インスタンス
__参考文献__
He et al., http://arxiv.org/abs/1502.01852
---
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/initializers.py#L374)
### lecun_normal
```python
lecun_normal(seed=None)
```
LeCunの正規分布による初期化.
平均を 0 ,標準偏差を`stddev = sqrt(1 / fan_in)`とする切断正規分布からサンプルします.
ここで `fan_in` は入力ユニット数です.
__引数__
- __seed__: 整数.乱数生成に使われます
__戻り値__
初期化インスタンス
__参考文献__
- [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)
- [Efficient Backprop](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)
---
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/initializers.py#L397)
### he_uniform
```python
he_uniform(seed=None)
```
He の一様分布による初期化を返します.
これは limit を
`sqrt(6 / fan_in)`
としたとき [limit, -limit] を範囲とする一様分布を用います.
ここで `fan_in` は入力ユニット数です.
__引数__
- __seed__: 整数.乱数生成に使われます
__戻り値__
初期化インスタンス
__参考文献__
He et al., http://arxiv.org/abs/1502.01852
---
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/initializers.py#L281)
### lecun_uniform
```python
lecun_uniform(seed=None)
```
LeCun の一様分布による初期化を返します.
これは limit を
`sqrt(3 / fan_in)`
とするとき
[-limit, limit]
を範囲とする一様分布を用います.
ここで `fan_in` は入力ユニット数です.
__引数__
- __seed__: 整数.乱数生成に使われます
__戻り値__
初期化インスタンス
__参考文献__
LeCun 98, Efficient Backprop - http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf
---
初期化は,文字列(上記の利用可能な初期化方法のいずれかとマッチしなければならない)かcallableとして渡される.
```python
from keras import initializers
model.add(Dense(64, kernel_initializer=initializers.random_normal(stddev=0.01)))
# also works; will use the default parameters.
model.add(Dense(64, kernel_initializer='random_normal'))
```
## カスタマイズ
callable なオブジェクトを渡す場合には,初期化しようとする変数の `shape` と `dtype` を引数に取るように設計してください.
```python
from keras import backend as K
def my_init(shape, dtype=None):
return K.random_normal(shape, dtype=dtype)
model.add(Dense(64, kernel_initializer=my_init))
```
|
keras-docs-ja/sources/initializers.md/0
|
{
"file_path": "keras-docs-ja/sources/initializers.md",
"repo_id": "keras-docs-ja",
"token_count": 4872
}
| 93 |
# モデルについて
Kerasには2つの利用可能なモデルがあります: 1つは[Sequentialモデル](/models/sequential),そしてもう1つは[functional APIとともに用いるモデルクラス](/models/model).
これらのモデルには,共通のメソッドと属性が多数あります.
- `model.layers`はモデルに含れるレイヤーを平滑化したリストです.
- `model.inputs`はモデルの入力テンソルのリストです.
- `model.outputs`はモデルの出力テンソルのリストです.
- `model.summary()`: モデルの要約を出力します.[utils.print_summary](/utils/#print_summary)へのショートカットです.
- `model.get_config()`: モデルの設定を持つ辞書を返します.下記のように,モデルはそれ自身の設定から再インスタンス化できます.
```python
config = model.get_config()
model = Model.from_config(config)
# or, for Sequential:
model = Sequential.from_config(config)
```
- `model.get_weights()`: モデルの全ての重みテンソルをNumpy 配列を要素にもつリスト返します.
- `model.set_weights(weights)`: Numpy 配列のリストからモデルの重みの値をセットします.リスト中のNumpy 配列のshapeは`get_weights()`で得られるリスト中のNumpy配列のshapeと同じ必要があります.
- `model.to_json()`: モデルの表現をJSON文字列として返します.このモデルの表現は,重みを含まないアーキテクチャのみであることに注意してください.下記の様に,JSON文字列から同じアーキテクチャのモデル(重みについては初期化されます)を再インスタンス化できます.
```python
from keras.models import model_from_json
json_string = model.to_json()
model = model_from_json(json_string)
```
- `model.to_yaml()`: モデルの表現をYAML文字列として返します.このモデルの表現は,重みを含まないアーキテクチャのみであることに注意してください.下記の様に,YAML文字列から同じアーキテクチャのモデル(重みについては初期化されます)を再インスタンス化できます.
```python
from keras.models import model_from_yaml
yaml_string = model.to_yaml()
model = model_from_yaml(yaml_string)
```
- `model.save_weights(filepath)`: モデルの重みをHDF5形式のファイルに保存します.
- `model.load_weights(filepath, by_name=False)`: (`save_weights`によって作られた) モデルの重みをHDF5形式のファイルから読み込みます.デフォルトでは,アーキテクチャは不変であることが望まれます.(いくつかのレイヤーが共通した)異なるアーキテクチャに重みを読み込む場合,`by_name=True`を使うことで,同名のレイヤーにのみ読み込み可能です.
注意:`h5py`のインストール方法についてはFAQの[Kerasでモデルを保存するためにHDF5やh5pyをインストールするには?](/faq/#how-can-i-install-HDF5-or-h5py-to-save-my-models-in-Keras)も参照してください.
## モデルの派生
これらの2種類のモデルに加えて,`Model`クラスを継承して`call`メソッドで順伝播を
実装することにより独自にカスタムしたモデルを作成することができます(`Model`クラスの継承APIはKeras 2.2.0で導入されました).
次は`Model`を継承して単純なマルチレイヤーパーセプトロンモデルを作成した例です:
```python
import keras
class SimpleMLP(keras.Model):
def __init__(self, use_bn=False, use_dp=False, num_classes=10):
super(SimpleMLP, self).__init__(name='mlp')
self.use_bn = use_bn
self.use_dp = use_dp
self.num_classes = num_classes
self.dense1 = keras.layers.Dense(32, activation='relu')
self.dense2 = keras.layers.Dense(num_classes, activation='softmax')
if self.use_dp:
self.dp = keras.layers.Dropout(0.5)
if self.use_bn:
self.bn = keras.layers.BatchNormalization(axis=-1)
def call(self, inputs):
x = self.dense1(inputs)
if self.use_dp:
x = self.dp(x)
if self.use_bn:
x = self.bn(x)
return self.dense2(x)
model = SimpleMLP()
model.compile(...)
model.fit(...)
```
レイヤーは`__init__(self, ...)`で定義されており,順伝播は`call(self, inputs)`で記述しています.`call`では`self.add_loss(loss_tensor)`を呼ぶことで(カスタムレイヤーのように)カスタムした損失も指定できます.
静的なレイヤーのグラフと違って,派生したモデルにおいてモデルのトポロジーはPythonのコードで定義されます.
これはモデルのトポロジーは検査もシリアライズもできないということです.結果として,次のメソッドや属性は**派生したモデルでは使えません**:
- `model.inputs`や`model.outputs`.
- `model.to_yaml()`や`model.to_json()`
- `model.get_config()`や`model.save()`.
**キーポイント:**仕事に対して適切なAPIを使ってください.`Model`の継承APIを使えば複雑なモデルの実装で優れた柔軟性を得られますが,(次のような機能も不足している上に)大きなコストも発生してしまいます:より冗長になり,より複雑になり,エラーの機会もより増えてしまうのです.可能であればfunctional APIを使った方がユーザフレンドリです.
|
keras-docs-ja/sources/models/about-keras-models.md/0
|
{
"file_path": "keras-docs-ja/sources/models/about-keras-models.md",
"repo_id": "keras-docs-ja",
"token_count": 2614
}
| 94 |
# 케라스 FAQ: 자주 묻는 케라스 질문
- [케라스는 어떻게 인용해야 합니까?](#케라스는-어떻게-인용해야-합니까)
- [케라스를 GPU에서 실행하려면 어떻게 해야 합니까?](#케라스를-gpu에서-실행하려면-어떻게-해야-합니까)
- [케라스를 여러 대의 GPU에서 실행하려면 어떻게 해야 합니까?](#케라스를-여러-대의-gpu에서-실행하려면-어떻게-해야-합니까)
- ["샘플", "배치", "에폭"은 무슨 뜻입니까?](#샘플-배치-에폭은-무슨-뜻입니까)
- [케라스 모델은 어떻게 저장합니까?](#케라스-모델은-어떻게-저장합니까)
- [왜 훈련 손실이 테스트 손실보다 훨씬 높습니까?](#왜-훈련-손실이-테스트-손실보다-훨씬-높습니까)
- [중간층의 아웃풋은 어떻게 얻을 수 있습니까?](#중간층의-아웃풋은-어떻게-얻을-수-있습니까)
- [메모리를 초과하는 데이터 세트에 케라스를 사용하려면 어떻게 해야 합니까?](#메모리를-초과하는-데이터셋에-케라스를-사용하려면-어떻게-해야-합니까)
- [검증 손실이 더이상 감소하지 않는 경우 훈련을 어떻게 중단할 수 있습니까?](#검증-손실이-더이상-감소하지-않는-경우-훈련을-어떻게-중단할-수-있습니까)
- [검증 분리는 어떻게 계산됩니까?](#검증-분리는-어떻게-계산됩니까)
- [훈련 중 데이터가 섞입니까?](#훈련-중-데이터가-섞입니까)
- [각 에폭별 훈련 손실/검증 손실/정확도는 어떻게 기록할 수 있습니까?](#각-에폭별-훈련-손실--검증-손실--정확도는-어떻게-기록할-수-있습니까)
- [케라스 층들을 "동결" 시키려면 어떻게 해야 합니까?](#케라스-층들을-동결-시키려면-어떻게-해야-합니까)
- [상태형 순환 신경망을 사용하려면 어떻게 해야 합니까?](#상태형-순환-신경망을-사용하려면-어떻게-해야-합니까)
- [Sequential 모델에서 층을 없애려면 어떻게 해야 합니까?](#sequential-모델에서-층을-없애려면-어떻게-해야-합니까)
- [케라스에서 선행 훈련된 모델을 사용하려면 어떻게 해야 합니까?](#케라스에서-선행-훈련된-모델을-사용하려면-어떻게-해야-합니까)
- [케라스에서 HDF5 을 사용하려면 어떻게 해야 합니까?](#케라스에서-hdf5-인풋을-사용하려면-어떻게-해야-합니까)
- [케라스 구성 파일은 어디에 저장됩니까?](#케라스-구성-파일은-어디에-저장됩니까)
- [개발 중 케라스를 사용해 재현 가능한 결과를 얻으려면 어떻게 해야 합니까?](#개발-중-케라스를-사용해-재현-가능한-결과를-얻으려면-어떻게-해야-합니까)
- [HDF5나 h5py를 설치해 케라스 모델을 저장하려면 어떻게 해야 합니까?](#hdf5나-h5py를-설치해-케라스-모델을-저장하려면-어떻게-해야-합니까)
---
### 케라스는 어떻게 인용해야 합니까?
연구 중 케라스가 도움이 되었다면 출판 시 케라스를 인용해 주십시오. 다음은 BibTeX 등재 예시입니다.
```
@misc{chollet2015keras,
title={Keras},
author={Chollet, Fran\c{c}ois and others},
year={2015},
howpublished={\url{https://keras.io}},
}
```
---
### 케라스를 GPU에서 실행하려면 어떻게 해야 합니까?
**TensorFlow** 혹은 **CNTK** 백엔드를 사용하는 경우, 사용 가능한 GPU가 감지되면 코드가 자동으로 GPU에서 작동됩니다.
**Theano** 백엔드를 사용하는 경우, 다음 방법 중 하나를 사용하시면 됩니다:
**방법 1**: Theano 플래그를 사용합니다.
```bash
THEANO_FLAGS=device=gpu,floatX=float32 python my_keras_script.py
```
본인 장치의 식별자에 따라 'gpu' 표기를 바꾸어야 할 수도 있습니다(예. `gpu0`, `gpu1`, 등).
**방법 2**: `.theanorc`를 설정합니다: [설명](http://deeplearning.net/software/theano/library/config.html)
**방법 3**: 코드 첫 부분에 수동으로`theano.config.device`, `theano.config.floatX`를 설정합니다:
```python
import theano
theano.config.device = 'gpu'
theano.config.floatX = 'float32'
```
---
### 케라스를 여러 대의 GPU에서 실행하려면 어떻게 해야 합니까??
**텐서플로** 백엔드를 사용하실 것을 권장합니다. 단일 모델을 여러 대의 GPU에 실행하려면 다음의 두 가지 방법이 있습니다: **데이터 병렬처리**와 **장치 병렬처리**.
대부분의 경우에는 데이터 병렬처리를 사용하면 됩니다.
#### 데이터 병렬처리
데이터 병렬처리는 목표 모델을 장치마다 복제하고, 각각의 복제본으로 입력 데이터의 각기 다른 부분을 처리하는 방식으로 구성됩니다.
케라스의 내장 유틸리티 `keras.utils.multi_gpu_model`은 어떤 모델도 병렬 데이터 버전으로 만들 수 있으며, 8대의 GPU까지는 거의 비례하여 속도를 증가시킬 수 있습니다.
더 자세한 정보는 [multi_gpu_model](/utils/#multi_gpu_model)문서를 참고하십시오. 다음은 간단한 예시입니다:
```python
from keras.utils import multi_gpu_model
# `model`을 8대의 GPU에 복제합니다.
# 이 예시에서는 장치에 8대의 GPU가 있는 것으로 가정합니다.
parallel_model = multi_gpu_model(model, gpus=8)
parallel_model.compile(loss='categorical_crossentropy',
optimizer='rmsprop')
# `fit` 호출이 8대 GPU에 분산됩니다.
# 배치 크기가 256이므로 각 GPU는 32개 샘플을 처리합니다.
parallel_model.fit(x, y, epochs=20, batch_size=256)
```
#### 장치 병렬처리
장치 병렬처리는 같은 모델의 다른 부분을 각기 다른 장치에서 실행하는 방식입니다. 두 브랜치로 구성된 모델과 같은 병렬 구조의 모델에 가장 적합합니다.
TensorFlow 디바이스 스코프를 사용해 이를 구현할 수 있습니다. 다음은 간단한 예시입니다.
```python
# 공유된 LSTM이 두 가지 다른 시퀀스를 병렬로 인코딩하는 모델
input_a = keras.Input(shape=(140, 256))
input_b = keras.Input(shape=(140, 256))
shared_lstm = keras.layers.LSTM(64)
# 첫 번째 시퀀스를 1대의 GPU에서 처리합니다
with tf.device_scope('/gpu:0'):
encoded_a = shared_lstm(tweet_a)
# 다음 시퀀스를 다른 GPU에서 처리합니다
with tf.device_scope('/gpu:1'):
encoded_b = shared_lstm(tweet_b)
# CPU에 결과들을 합칩니다
with tf.device_scope('/cpu:0'):
merged_vector = keras.layers.concatenate([encoded_a, encoded_b],
axis=-1)
```
---
### "샘플", "배치", "에폭"은 무슨 뜻입니까?
다음은 케라스를 올바르게 이용하기 위해 알아두어야 할 몇 가지 일반적인 정의입니다.
- **샘플**: 데이터셋의 요소 하나
- *예시:* 하나의 이미지는 합성곱 신경망의 **샘플**입니다
- *예시:* 오디오 파일 하나는 음성 인식 모델의 **샘플**입니다.
- **배치**: *N* 샘플들의 모음. 하나의 **배치**에서의 샘플들은 독립적으로 동시에 처리됩니다. 훈련에서 하나의 배치는 모델을 1회만 업데이트합니다.
- **배치**는 일반적으로 단일 입력보다 입력 데이터에서의 분포를 더욱 정확히 추정합니다. 배치가 클수록 더 좋은 추정치를 예측할 수 있지만 1회 업데이트를 위한 배치 처리 과정이 오래 걸립니다. 배치가 크면 보통 평가/예측도 빨라지기에 메모리를 초과하지 않는 선에서 배치 크기를 가능한 크게 하는 것을 권장합니다.
- **에폭**: 임의의 차단 장치로, 보통 "전체 데이터셋을 1회 통과하는 것"으로 정의됩니다. 훈련을 별개의 단계로 구분하는 데에 사용되며, 이는 로그 기록이나 주기적 평가에 유용합니다.
- 케라스 모델의 `fit` 메소드와 함께 `validation_data`, 혹은 `validation_split`을 사용하는 경우, 각 **에폭**이 끝날 때마다 평가가 실행됩니다.
- 케라스에서는 **에폭**의 끝에서 실행되도록 특별히 고안된 [콜백](https://keras.io/callbacks/)을 추가할 수 있습니다. 학습률 변화와 모델 체크포인트(저장)가 그 사례입니다.
---
### 케라스 모델은 어떻게 저장합니까?
#### Saving/loading whole models (architecture + weights + optimizer state)
*It is not recommended to use pickle or cPickle to save a Keras model.*
You can use `model.save(filepath)` to save a Keras model into a single HDF5 file which will contain:
- the architecture of the model, allowing to re-create the model
- the weights of the model
- the training configuration (loss, optimizer)
- the state of the optimizer, allowing to resume training exactly where you left off.
You can then use `keras.models.load_model(filepath)` to reinstantiate your model.
`load_model` will also take care of compiling the model using the saved training configuration (unless the model was never compiled in the first place).
Example:
```python
from keras.models import load_model
model.save('my_model.h5') # creates a HDF5 file 'my_model.h5'
del model # deletes the existing model
# returns a compiled model
# identical to the previous one
model = load_model('my_model.h5')
```
Please also see [How can I install HDF5 or h5py to save my models in Keras?](#how-can-i-install-hdf5-or-h5py-to-save-my-models-in-keras) for instructions on how to install `h5py`.
#### Saving/loading only a model's architecture
If you only need to save the **architecture of a model**, and not its weights or its training configuration, you can do:
```python
# save as JSON
json_string = model.to_json()
# save as YAML
yaml_string = model.to_yaml()
```
The generated JSON / YAML files are human-readable and can be manually edited if needed.
You can then build a fresh model from this data:
```python
# model reconstruction from JSON:
from keras.models import model_from_json
model = model_from_json(json_string)
# model reconstruction from YAML:
from keras.models import model_from_yaml
model = model_from_yaml(yaml_string)
```
#### Saving/loading only a model's weights
If you need to save the **weights of a model**, you can do so in HDF5 with the code below:
```python
model.save_weights('my_model_weights.h5')
```
Assuming you have code for instantiating your model, you can then load the weights you saved into a model with the *same* architecture:
```python
model.load_weights('my_model_weights.h5')
```
If you need to load the weights into a *different* architecture (with some layers in common), for instance for fine-tuning or transfer-learning, you can load them by *layer name*:
```python
model.load_weights('my_model_weights.h5', by_name=True)
```
Example:
```python
"""
Assuming the original model looks like this:
model = Sequential()
model.add(Dense(2, input_dim=3, name='dense_1'))
model.add(Dense(3, name='dense_2'))
...
model.save_weights(fname)
"""
# new model
model = Sequential()
model.add(Dense(2, input_dim=3, name='dense_1')) # will be loaded
model.add(Dense(10, name='new_dense')) # will not be loaded
# load weights from first model; will only affect the first layer, dense_1.
model.load_weights(fname, by_name=True)
```
Please also see [How can I install HDF5 or h5py to save my models in Keras?](#how-can-i-install-hdf5-or-h5py-to-save-my-models-in-keras) for instructions on how to install `h5py`.
#### Handling custom layers (or other custom objects) in saved models
If the model you want to load includes custom layers or other custom classes or functions,
you can pass them to the loading mechanism via the `custom_objects` argument:
```python
from keras.models import load_model
# Assuming your model includes instance of an "AttentionLayer" class
model = load_model('my_model.h5', custom_objects={'AttentionLayer': AttentionLayer})
```
Alternatively, you can use a [custom object scope](https://keras.io/utils/#customobjectscope):
```python
from keras.utils import CustomObjectScope
with CustomObjectScope({'AttentionLayer': AttentionLayer}):
model = load_model('my_model.h5')
```
Custom objects handling works the same way for `load_model`, `model_from_json`, `model_from_yaml`:
```python
from keras.models import model_from_json
model = model_from_json(json_string, custom_objects={'AttentionLayer': AttentionLayer})
```
---
### 왜 훈련 손실이 테스트 손실보다 훨씬 높습니까?
A Keras model has two modes: training and testing. Regularization mechanisms, such as Dropout and L1/L2 weight regularization, are turned off at testing time.
Besides, the training loss is the average of the losses over each batch of training data. Because your model is changing over time, the loss over the first batches of an epoch is generally higher than over the last batches. On the other hand, the testing loss for an epoch is computed using the model as it is at the end of the epoch, resulting in a lower loss.
---
### 중간층의 아웃풋은 어떻게 얻을 수 있습니까?
One simple way is to create a new `Model` that will output the layers that you are interested in:
```python
from keras.models import Model
model = ... # create the original model
layer_name = 'my_layer'
intermediate_layer_model = Model(inputs=model.input,
outputs=model.get_layer(layer_name).output)
intermediate_output = intermediate_layer_model.predict(data)
```
Alternatively, you can build a Keras function that will return the output of a certain layer given a certain input, for example:
```python
from keras import backend as K
# with a Sequential model
get_3rd_layer_output = K.function([model.layers[0].input],
[model.layers[3].output])
layer_output = get_3rd_layer_output([x])[0]
```
Similarly, you could build a Theano and TensorFlow function directly.
Note that if your model has a different behavior in training and testing phase (e.g. if it uses `Dropout`, `BatchNormalization`, etc.), you will need to pass the learning phase flag to your function:
```python
get_3rd_layer_output = K.function([model.layers[0].input, K.learning_phase()],
[model.layers[3].output])
# output in test mode = 0
layer_output = get_3rd_layer_output([x, 0])[0]
# output in train mode = 1
layer_output = get_3rd_layer_output([x, 1])[0]
```
---
### 메모리를 초과하는 데이터셋에 케라스를 사용하려면 어떻게 해야 합니까?
You can do batch training using `model.train_on_batch(x, y)` and `model.test_on_batch(x, y)`. See the [models documentation](/models/sequential).
Alternatively, you can write a generator that yields batches of training data and use the method `model.fit_generator(data_generator, steps_per_epoch, epochs)`.
You can see batch training in action in our [CIFAR10 example](https://github.com/keras-team/keras/blob/master/examples/cifar10_cnn.py).
---
### 검증 손실이 더이상 감소하지 않는 경우 훈련을 어떻게 중단할 수 있습니까?
You can use an `EarlyStopping` callback:
```python
from keras.callbacks import EarlyStopping
early_stopping = EarlyStopping(monitor='val_loss', patience=2)
model.fit(x, y, validation_split=0.2, callbacks=[early_stopping])
```
Find out more in the [callbacks documentation](/callbacks).
---
### 검증 분리는 어떻게 계산됩니까?
If you set the `validation_split` argument in `model.fit` to e.g. 0.1, then the validation data used will be the *last 10%* of the data. If you set it to 0.25, it will be the last 25% of the data, etc. Note that the data isn't shuffled before extracting the validation split, so the validation is literally just the *last* x% of samples in the input you passed.
The same validation set is used for all epochs (within a same call to `fit`).
---
### 훈련 중 데이터가 섞입니까?
Yes, if the `shuffle` argument in `model.fit` is set to `True` (which is the default), the training data will be randomly shuffled at each epoch.
Validation data is never shuffled.
---
### 각 에폭별 훈련 손실 / 검증 손실 / 정확도는 어떻게 기록할 수 있습니까?
The `model.fit` method returns a `History` callback, which has a `history` attribute containing the lists of successive losses and other metrics.
```python
hist = model.fit(x, y, validation_split=0.2)
print(hist.history)
```
---
### 케라스 층들을 "동결" 시키려면 어떻게 해야 합니까?
To "freeze" a layer means to exclude it from training, i.e. its weights will never be updated. This is useful in the context of fine-tuning a model, or using fixed embeddings for a text input.
You can pass a `trainable` argument (boolean) to a layer constructor to set a layer to be non-trainable:
```python
frozen_layer = Dense(32, trainable=False)
```
Additionally, you can set the `trainable` property of a layer to `True` or `False` after instantiation. For this to take effect, you will need to call `compile()` on your model after modifying the `trainable` property. Here's an example:
```python
x = Input(shape=(32,))
layer = Dense(32)
layer.trainable = False
y = layer(x)
frozen_model = Model(x, y)
# in the model below, the weights of `layer` will not be updated during training
frozen_model.compile(optimizer='rmsprop', loss='mse')
layer.trainable = True
trainable_model = Model(x, y)
# with this model the weights of the layer will be updated during training
# (which will also affect the above model since it uses the same layer instance)
trainable_model.compile(optimizer='rmsprop', loss='mse')
frozen_model.fit(data, labels) # this does NOT update the weights of `layer`
trainable_model.fit(data, labels) # this updates the weights of `layer`
```
---
### 상태형 순환 신경망을 사용하려면 어떻게 해야 합니까?
Making a RNN stateful means that the states for the samples of each batch will be reused as initial states for the samples in the next batch.
When using stateful RNNs, it is therefore assumed that:
- all batches have the same number of samples
- If `x1` and `x2` are successive batches of samples, then `x2[i]` is the follow-up sequence to `x1[i]`, for every `i`.
To use statefulness in RNNs, you need to:
- explicitly specify the batch size you are using, by passing a `batch_size` argument to the first layer in your model. E.g. `batch_size=32` for a 32-samples batch of sequences of 10 timesteps with 16 features per timestep.
- set `stateful=True` in your RNN layer(s).
- specify `shuffle=False` when calling `fit()`.
To reset the states accumulated:
- use `model.reset_states()` to reset the states of all layers in the model
- use `layer.reset_states()` to reset the states of a specific stateful RNN layer
Example:
```python
x # this is our input data, of shape (32, 21, 16)
# we will feed it to our model in sequences of length 10
model = Sequential()
model.add(LSTM(32, input_shape=(10, 16), batch_size=32, stateful=True))
model.add(Dense(16, activation='softmax'))
model.compile(optimizer='rmsprop', loss='categorical_crossentropy')
# we train the network to predict the 11th timestep given the first 10:
model.train_on_batch(x[:, :10, :], np.reshape(x[:, 10, :], (32, 16)))
# the state of the network has changed. We can feed the follow-up sequences:
model.train_on_batch(x[:, 10:20, :], np.reshape(x[:, 20, :], (32, 16)))
# let's reset the states of the LSTM layer:
model.reset_states()
# another way to do it in this case:
model.layers[0].reset_states()
```
Note that the methods `predict`, `fit`, `train_on_batch`, `predict_classes`, etc. will *all* update the states of the stateful layers in a model. This allows you to do not only stateful training, but also stateful prediction.
---
### Sequential 모델에서 층을 없애려면 어떻게 해야 합니까?
You can remove the last added layer in a Sequential model by calling `.pop()`:
```python
model = Sequential()
model.add(Dense(32, activation='relu', input_dim=784))
model.add(Dense(32, activation='relu'))
print(len(model.layers)) # "2"
model.pop()
print(len(model.layers)) # "1"
```
---
### 케라스에서 선행 훈련된 모델을 사용하려면 어떻게 해야 합니까?
Code and pre-trained weights are available for the following image classification models:
- Xception
- VGG16
- VGG19
- ResNet
- ResNet v2
- ResNeXt
- Inception v3
- Inception-ResNet v2
- MobileNet v1
- MobileNet v2
- DenseNet
- NASNet
They can be imported from the module `keras.applications`:
```python
from keras.applications.xception import Xception
from keras.applications.vgg16 import VGG16
from keras.applications.vgg19 import VGG19
from keras.applications.resnet import ResNet50
from keras.applications.resnet import ResNet101
from keras.applications.resnet import ResNet152
from keras.applications.resnet_v2 import ResNet50V2
from keras.applications.resnet_v2 import ResNet101V2
from keras.applications.resnet_v2 import ResNet152V2
from keras.applications.resnext import ResNeXt50
from keras.applications.resnext import ResNeXt101
from keras.applications.inception_v3 import InceptionV3
from keras.applications.inception_resnet_v2 import InceptionResNetV2
from keras.applications.mobilenet import MobileNet
from keras.applications.mobilenet_v2 import MobileNetV2
from keras.applications.densenet import DenseNet121
from keras.applications.densenet import DenseNet169
from keras.applications.densenet import DenseNet201
from keras.applications.nasnet import NASNetLarge
from keras.applications.nasnet import NASNetMobile
model = VGG16(weights='imagenet', include_top=True)
```
For a few simple usage examples, see [the documentation for the Applications module](/applications).
For a detailed example of how to use such a pre-trained model for feature extraction or for fine-tuning, see [this blog post](http://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html).
The VGG16 model is also the basis for several Keras example scripts:
- [Style transfer](https://github.com/keras-team/keras/blob/master/examples/neural_style_transfer.py)
- [Feature visualization](https://github.com/keras-team/keras/blob/master/examples/conv_filter_visualization.py)
- [Deep dream](https://github.com/keras-team/keras/blob/master/examples/deep_dream.py)
---
### 케라스에서 HDF5 인풋을 사용하려면 어떻게 해야 합니까?
You can use the `HDF5Matrix` class from `keras.utils`. See [the HDF5Matrix documentation](/utils/#hdf5matrix) for details.
You can also directly use a HDF5 dataset:
```python
import h5py
with h5py.File('input/file.hdf5', 'r') as f:
x_data = f['x_data']
model.predict(x_data)
```
Please also see [How can I install HDF5 or h5py to save my models in Keras?](#how-can-i-install-hdf5-or-h5py-to-save-my-models-in-keras) for instructions on how to install `h5py`.
---
### 케라스 구성 파일은 어디에 저장됩니까?
The default directory where all Keras data is stored is:
```bash
$HOME/.keras/
```
Note that Windows users should replace `$HOME` with `%USERPROFILE%`.
In case Keras cannot create the above directory (e.g. due to permission issues), `/tmp/.keras/` is used as a backup.
The Keras configuration file is a JSON file stored at `$HOME/.keras/keras.json`. The default configuration file looks like this:
```
{
"image_data_format": "channels_last",
"epsilon": 1e-07,
"floatx": "float32",
"backend": "tensorflow"
}
```
It contains the following fields:
- The image data format to be used as default by image processing layers and utilities (either `channels_last` or `channels_first`).
- The `epsilon` numerical fuzz factor to be used to prevent division by zero in some operations.
- The default float data type.
- The default backend. See the [backend documentation](/backend).
Likewise, cached dataset files, such as those downloaded with [`get_file()`](/utils/#get_file), are stored by default in `$HOME/.keras/datasets/`.
---
### 개발 중 케라스를 사용해 재현 가능한 결과를 얻으려면 어떻게 해야 합니까?
During development of a model, sometimes it is useful to be able to obtain reproducible results from run to run in order to determine if a change in performance is due to an actual model or data modification, or merely a result of a new random sample.
First, you need to set the `PYTHONHASHSEED` environment variable to `0` before the program starts (not within the program itself). This is necessary in Python 3.2.3 onwards to have reproducible behavior for certain hash-based operations (e.g., the item order in a set or a dict, see [Python's documentation](https://docs.python.org/3.7/using/cmdline.html#envvar-PYTHONHASHSEED) or [issue #2280](https://github.com/keras-team/keras/issues/2280#issuecomment-306959926) for further details). One way to set the environment variable is when starting python like this:
```
$ cat test_hash.py
print(hash("keras"))
$ python3 test_hash.py # non-reproducible hash (Python 3.2.3+)
-8127205062320133199
$ python3 test_hash.py # non-reproducible hash (Python 3.2.3+)
3204480642156461591
$ PYTHONHASHSEED=0 python3 test_hash.py # reproducible hash
4883664951434749476
$ PYTHONHASHSEED=0 python3 test_hash.py # reproducible hash
4883664951434749476
```
Moreover, when using the TensorFlow backend and running on a GPU, some operations have non-deterministic outputs, in particular `tf.reduce_sum()`. This is due to the fact that GPUs run many operations in parallel, so the order of execution is not always guaranteed. Due to the limited precision of floats, even adding several numbers together may give slightly different results depending on the order in which you add them. You can try to avoid the non-deterministic operations, but some may be created automatically by TensorFlow to compute the gradients, so it is much simpler to just run the code on the CPU. For this, you can set the `CUDA_VISIBLE_DEVICES` environment variable to an empty string, for example:
```
$ CUDA_VISIBLE_DEVICES="" PYTHONHASHSEED=0 python your_program.py
```
The below snippet of code provides an example of how to obtain reproducible results - this is geared towards a TensorFlow backend for a Python 3 environment:
```python
import numpy as np
import tensorflow as tf
import random as rn
# The below is necessary for starting Numpy generated random numbers
# in a well-defined initial state.
np.random.seed(42)
# The below is necessary for starting core Python generated random numbers
# in a well-defined state.
rn.seed(12345)
# Force TensorFlow to use single thread.
# Multiple threads are a potential source of non-reproducible results.
# For further details, see: https://stackoverflow.com/questions/42022950/
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
from keras import backend as K
# The below tf.set_random_seed() will make random number generation
# in the TensorFlow backend have a well-defined initial state.
# For further details, see:
# https://www.tensorflow.org/api_docs/python/tf/set_random_seed
tf.set_random_seed(1234)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
# Rest of code follows ...
```
---
### HDF5나 h5py를 설치해 케라스 모델을 저장하려면 어떻게 해야 합니까?
In order to save your Keras models as HDF5 files, e.g. via
`keras.callbacks.ModelCheckpoint`, Keras uses the h5py Python package. It is
a dependency of Keras and should be installed by default. On Debian-based
distributions, you will have to additionally install `libhdf5`:
```
sudo apt-get install libhdf5-serial-dev
```
If you are unsure if h5py is installed you can open a Python shell and load the
module via
```
import h5py
```
If it imports without error it is installed, otherwise you can find detailed
installation instructions here: http://docs.h5py.org/en/latest/build.html
|
keras-docs-ko/sources/getting-started/faq.md/0
|
{
"file_path": "keras-docs-ko/sources/getting-started/faq.md",
"repo_id": "keras-docs-ko",
"token_count": 13552
}
| 95 |
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/wrappers.py#L116)</span>
### TimeDistributed
```python
keras.layers.TimeDistributed(layer)
```
이 래퍼는 입력값의 모든 시계열 단위에 층<sub>layer</sub>을 적용합니다.
입력값은 최소 3D의 형태를 가져야 하며, 그 중 index 1(두번째 원소)은 시간 차원을 나타냅니다.
32개의 표본으로 구성된 배치에서, 10개의 16차원 벡터로 이루어진 시퀀스의 예시를 보겠습니다.
이 예시에서 층의 배치 입력 형태는 `(32, 10, 16)`이고,
배치 차원을 제외한 `input_shape`는 `(10, 16)`입니다.
`TimeDistributed`를 사용하여 10개의 시간 단계<sub>timestep</sub>에 독립적으로 `Dense`층을 적용할 수 있습니다.
```python
# 모델의 첫 번째 층으로 사용되었습니다.
model = Sequential()
model.add(TimeDistributed(Dense(8), input_shape=(10, 16)))
# model.output_shape == (None, 10, 8)
```
이 결과 출력값은 `(32, 10, 8)`의 형태를 가집니다.
첫 번째 층 이후에는 `input_shape`를 명시할 필요가 없습니다.
```python
model.add(TimeDistributed(Dense(32)))
# now model.output_shape == (None, 10, 32)
```
이 결과 출력값은 `(32, 10, 32)`의 형태를 가집니다.
`TimeDistributed`는 `Dense`가 아닌 케라스에서 사용 가능한 층에서도 사용할 수 있습니다.
(ex:`Conv2D` 층)
```python
model = Sequential()
model.add(TimeDistributed(Conv2D(64, (3, 3)),
input_shape=(10, 299, 299, 3)))
```
__인자__
- __layer__: 층 인스턴스.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/wrappers.py#L335)</span>
### Bidirectional
```python
keras.engine.base_layer.wrapped_fn()
```
순환 신경망에 사용하는 양방향 래퍼.
__인자__
- __layer__: `RNN` 인스턴스.
- __merge_mode__: 정방향 순환 신경망과
역방향 순환 신경망의 출력값이 병합되는 방식을 `{'sum', 'mul', 'concat', 'ave', None}`중에 선택합니다.
`None`의 경우, 출력 값이 합쳐지지 않고, 리스트로 반환됩니다.
- __weights__: `Bidirectional` 층에 사용할 초기 가중치
__오류처리__
- __ValueError__: `merge_mode` 인자가 유효하지 않은 경우 오류 메시지를 전달합니다.
__예시__
```python
model = Sequential()
model.add(Bidirectional(LSTM(10, return_sequences=True),
input_shape=(5, 10)))
model.add(Bidirectional(LSTM(10)))
model.add(Dense(5))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
```
|
keras-docs-ko/sources/layers/wrappers.md/0
|
{
"file_path": "keras-docs-ko/sources/layers/wrappers.md",
"repo_id": "keras-docs-ko",
"token_count": 1722
}
| 96 |
## 回调函数使用
回调函数是一个函数的合集,会在训练的阶段中所使用。你可以使用回调函数来查看训练模型的内在状态和统计。你可以传递一个列表的回调函数(作为 `callbacks` 关键字参数)到 `Sequential` 或 `Model` 类型的 `.fit()` 方法。在训练时,相应的回调函数的方法就会被在各自的阶段被调用。
---
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/callbacks/callbacks.py#L275)</span>
### Callback
```python
keras.callbacks.callbacks.Callback()
```
用来组建新的回调函数的抽象基类。
__属性__
- __params__: 字典。训练参数,
(例如,verbosity, batch size, number of epochs...)。
- __model__: `keras.models.Model` 的实例。
指代被训练模型。
被回调函数作为参数的 `logs` 字典,它会含有于当前批量或训练轮相关数据的键。
目前,`Sequential` 模型类的 `.fit()` 方法会在传入到回调函数的 `logs` 里面包含以下的数据:
- __on_epoch_end__: 包括 `acc` 和 `loss` 的日志, 也可以选择性的包括 `val_loss`(如果在 `fit` 中启用验证),和 `val_acc`(如果启用验证和监测精确值)。
- __on_batch_begin__: 包括 `size` 的日志,在当前批量内的样本数量。
- __on_batch_end__: 包括 `loss` 的日志,也可以选择性的包括 `acc`(如果启用监测精确值)。
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/callbacks/callbacks.py#L477)</span>
### BaseLogger
```python
keras.callbacks.callbacks.BaseLogger(stateful_metrics=None)
```
会积累训练轮平均评估的回调函数。
这个回调函数被自动应用到每一个 Keras 模型上面。
__参数__
__stateful_metrics__: 可重复使用不应在一个 epoch 上平均的指标的字符串名称。
此列表中的度量标准将按原样记录在 `on_epoch_end` 中。
所有其他指标将在 `on_epoch_end` 中取平均值。
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/callbacks/callbacks.py#L524)</span>
### TerminateOnNaN
```python
keras.callbacks.callbacks.TerminateOnNaN()
```
当遇到 NaN 损失会停止训练的回调函数。
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/callbacks/callbacks.py#L537)</span>
### ProgbarLogger
```python
keras.callbacks.callbacks.ProgbarLogger(count_mode='samples', stateful_metrics=None)
```
会把评估以标准输出打印的回调函数。
__参数__
- __count_mode__: "steps" 或者 "samples"。
进度条是否应该计数看见的样本或步骤(批量)。
__stateful_metrics__: 可重复使用不应在一个 epoch 上平均的指标的字符串名称。
此列表中的度量标准将按原样记录在 `on_epoch_end` 中。
所有其他指标将在 `on_epoch_end` 中取平均值。
__异常__
- __ValueError__: 如果 `count_mode`
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/callbacks/callbacks.py#L614)</span>
### History
```python
keras.callbacks.callbacks.History()
```
把所有事件都记录到 `History` 对象的回调函数。
这个回调函数被自动启用到每一个 Keras 模型。`History` 对象会被模型的 `fit` 方法返回。
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/callbacks/callbacks.py#L633)</span>
### ModelCheckpoint
```python
keras.callbacks.callbacks.ModelCheckpoint(filepath, monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', period=1)
```
在每个训练期之后保存模型。
`filepath` 可以包括命名格式选项,可以由 `epoch` 的值和 `logs` 的键(由 `on_epoch_end` 参数传递)来填充。
例如:如果 `filepath` 是 `weights.{epoch:02d}-{val_loss:.2f}.hdf5`,
那么模型被保存的的文件名就会有训练轮数和验证损失。
__参数__
- __filepath__: 字符串,保存模型的路径。
- __monitor__: 被监测的数据。
- __verbose__: 详细信息模式,0 或者 1 。
- __save_best_only__: 如果 `save_best_only=True`,
被监测数据的最佳模型就不会被覆盖。
- __mode__: {auto, min, max} 的其中之一。
如果 `save_best_only=True`,那么是否覆盖保存文件的决定就取决于被监测数据的最大或者最小值。
对于 `val_acc`,模式就会是 `max`,而对于 `val_loss`,模式就需要是 `min`,等等。
在 `auto` 模式中,方向会自动从被监测的数据的名字中判断出来。
- __save_weights_only__: 如果 True,那么只有模型的权重会被保存 (`model.save_weights(filepath)`),
否则的话,整个模型会被保存 (`model.save(filepath)`)。
- __period__: 每个检查点之间的间隔(训练轮数)。
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/callbacks/callbacks.py#L733)</span>
### EarlyStopping
```python
keras.callbacks.callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=0, verbose=0, mode='auto', baseline=None, restore_best_weights=False)
```
当被监测的数量不再提升,则停止训练。
__参数__
- __monitor__: 被监测的数据。
- __min_delta__: 在被监测的数据中被认为是提升的最小变化,
例如,小于 min_delta 的绝对变化会被认为没有提升。
- __patience__: 在监测质量经过多少轮次没有进度时即停止。如果验证频率 (`model.fit(validation_freq=5)`) 大于 1,则可能不会在每个轮次都产生验证数。
- __verbose__: 详细信息模式。
- __mode__: {auto, min, max} 其中之一。 在 `min` 模式中,
当被监测的数据停止下降,训练就会停止;在 `max`
模式中,当被监测的数据停止上升,训练就会停止;在 `auto`
模式中,方向会自动从被监测的数据的名字中判断出来。
- __baseline__: 要监控的数量的基准值。
如果模型没有显示基准的改善,训练将停止。
- __restore_best_weights__: 是否从具有监测数量的最佳值的时期恢复模型权重。
如果为 False,则使用在训练的最后一步获得的模型权重。
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/callbacks/callbacks.py#L851)</span>
### RemoteMonitor
```python
keras.callbacks.callbacks.RemoteMonitor(root='http://localhost:9000', path='/publish/epoch/end/', field='data', headers=None, send_as_json=False)
```
将事件数据流到服务器的回调函数。
需要 `requests` 库。
事件被默认发送到 `root + '/publish/epoch/end/'`。
采用 HTTP POST ,其中的 `data` 参数是以 JSON 编码的事件数据字典。
如果 send_as_json 设置为 True,请求的 content type 是 application/json。否则,将在表单中发送序列化的 JSON。
__参数__
- __root__: 字符串;目标服务器的根地址。
- __path__: 字符串;相对于 `root` 的路径,事件数据被送达的地址。
- __field__: 字符串;JSON ,数据被保存的领域。
- __headers__: 字典;可选自定义的 HTTP 的头字段。
- __send_as_json__: 布尔值;请求是否应该以 application/json 格式发送。
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/callbacks/callbacks.py#L910)</span>
### LearningRateScheduler
```python
keras.callbacks.callbacks.LearningRateScheduler(schedule, verbose=0)
```
学习速率定时器。
__参数__
- __schedule__: 一个函数,接受轮索引数作为输入(整数,从 0 开始迭代)
然后返回一个学习速率作为输出(浮点数)。
- __verbose__: 整数。 0:安静,1:更新信息。
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/callbacks/callbacks.py#L946)</span>
### ReduceLROnPlateau
```python
keras.callbacks.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=10, verbose=0, mode='auto', min_delta=0.0001, cooldown=0, min_lr=0)
```
当标准评估停止提升时,降低学习速率。
当学习停止时,模型总是会受益于降低 2-10 倍的学习速率。
这个回调函数监测一个数据并且当这个数据在一定「有耐心」的训练轮之后还没有进步,
那么学习速率就会被降低。
__例子__
```python
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
patience=5, min_lr=0.001)
model.fit(X_train, Y_train, callbacks=[reduce_lr])
```
__参数__
- __monitor__: 被监测的数据。
- __factor__: 学习速率被降低的因数。新的学习速率 = 学习速率 * 因数
- __patience__: 在监测质量经过多少轮次没有进度时即停止。如果验证频率 (`model.fit(validation_freq=5)`) 大于 1,则可能不会在每个轮次都产生验证数。
- __verbose__: 整数。0:安静,1:更新信息。
- __mode__: {auto, min, max} 其中之一。如果是 `min` 模式,学习速率会被降低如果被监测的数据已经停止下降;
在 `max` 模式,学习塑料会被降低如果被监测的数据已经停止上升;
在 `auto` 模式,方向会被从被监测的数据中自动推断出来。
- __min_delta__: 对于测量新的最优化的阀值,只关注巨大的改变。
- __cooldown__: 在学习速率被降低之后,重新恢复正常操作之前等待的训练轮数量。
- __min_lr__: 学习速率的下边界。
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/callbacks/callbacks.py#L1071)</span>
### CSVLogger
```python
keras.callbacks.callbacks.CSVLogger(filename, separator=',', append=False)
```
把训练轮结果数据流到 csv 文件的回调函数。
支持所有可以被作为字符串表示的值,包括 1D 可迭代数据,例如,np.ndarray。
__例子__
```python
csv_logger = CSVLogger('training.log')
model.fit(X_train, Y_train, callbacks=[csv_logger])
```
__参数__
- __filename__: csv 文件的文件名,例如 'run/log.csv'。
- __separator__: 用来隔离 csv 文件中元素的字符串。
- __append__: True:如果文件存在则增加(可以被用于继续训练)。False:覆盖存在的文件。
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/callbacks/callbacks.py#L1163)</span>
### LambdaCallback
```python
keras.callbacks.callbacks.LambdaCallback(on_epoch_begin=None, on_epoch_end=None, on_batch_begin=None, on_batch_end=None, on_train_begin=None, on_train_end=None)
```
在训练进行中创建简单,自定义的回调函数的回调函数。
这个回调函数和匿名函数在合适的时间被创建。
需要注意的是回调函数要求位置型参数,如下:
- `on_epoch_begin` 和 `on_epoch_end` 要求两个位置型的参数:`epoch`, `logs`
- `on_batch_begin` 和 `on_batch_end` 要求两个位置型的参数:`batch`, `logs`
- `on_train_begin` 和 `on_train_end` 要求一个位置型的参数:`logs`
__参数__
- __on_epoch_begin__: 在每轮开始时被调用。
- __on_epoch_end__: 在每轮结束时被调用。
- __on_batch_begin__: 在每批开始时被调用。
- __on_batch_end__: 在每批结束时被调用。
- __on_train_begin__: 在模型训练开始时被调用。
- __on_train_end__: 在模型训练结束时被调用。
__示例__
```python
# 在每一个批开始时,打印出批数。
batch_print_callback = LambdaCallback(
on_batch_begin=lambda batch,logs: print(batch))
# 把训练轮损失数据流到 JSON 格式的文件。文件的内容
# 不是完美的 JSON 格式,但是时每一行都是 JSON 对象。
import json
json_log = open('loss_log.json', mode='wt', buffering=1)
json_logging_callback = LambdaCallback(
on_epoch_end=lambda epoch, logs: json_log.write(
json.dumps({'epoch': epoch, 'loss': logs['loss']}) + '\n'),
on_train_end=lambda logs: json_log.close()
)
# 在完成模型训练之后,结束一些进程。
processes = ...
cleanup_callback = LambdaCallback(
on_train_end=lambda logs: [
p.terminate() for p in processes if p.is_alive()])
model.fit(...,
callbacks=[batch_print_callback,
json_logging_callback,
cleanup_callback])
```
---
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/callbacks/tensorboard_v1.py#L20)</span>
### TensorBoard
```python
keras.callbacks.tensorboard_v1.TensorBoard(log_dir='./logs', histogram_freq=0, batch_size=32, write_graph=True, write_grads=False, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None, embeddings_data=None, update_freq='epoch')
```
Tensorboard 基本可视化。
[TensorBoard](https://www.tensorflow.org/get_started/summaries_and_tensorboard)
是由 Tensorflow 提供的一个可视化工具。
这个回调函数为 Tensorboard 编写一个日志,
这样你可以可视化测试和训练的标准评估的动态图像,
也可以可视化模型中不同层的激活值直方图。
如果你已经使用 pip 安装了 Tensorflow,你应该可以从命令行启动 Tensorflow:
```sh
tensorboard --logdir=/full_path_to_your_logs
```
当使用 TensorFlow 之外德后端时, TensorBoard 仍然可以运行
(如果你安装了 TensorFlow), 但是仅有展示损失值和评估指标这
两个功能可用。
__参数__
- __log_dir__: 用来保存被 TensorBoard 分析的日志文件的文件名。
- __histogram_freq__: 对于模型中各个层计算激活值和模型权重直方图的频率(训练轮数中)。
如果设置成 0 ,直方图不会被计算。对于直方图可视化的验证数据(或分离数据)一定要明确的指出。
- __batch_size__: 用以直方图计算的传入神经元网络输入批的大小。
- __write_graph__: 是否在 TensorBoard 中可视化图像。
如果 write_graph 被设置为 True,日志文件会变得非常大。
- __write_grads__: 是否在 TensorBoard 中可视化梯度值直方图。
`histogram_freq` 必须要大于 0 。
- __write_images__: 是否在 TensorBoard 中将模型权重以图片可视化。
- __embeddings_freq__: 被选中的嵌入层会被保存的频率(在训练轮中)。如果设置为0,则不会计算嵌入。
要在 TensorBoard 的嵌入选项卡中可视化的数据必须作为 `embeddings_data` 传递。
- __embeddings_layer_names__: 一个列表,会被监测层的名字。
如果是 None 或空列表,那么所有的嵌入层都会被监测。
- __embeddings_metadata__: 一个字典,对应层的名字到保存有这个嵌入层元数据文件的名字。
查看[详情](https://www.tensorflow.org/how_tos/embedding_viz/#metadata_optional)
关于元数据的数据格式。
以防同样的元数据被用于所用的嵌入层,字符串可以被传入。
- __embeddings_data__: 要嵌入在 `embeddings_layer_names` 指定的层的数据。
Numpy 数组(如果模型有单个输入)或 Numpy 数组列表(如果模型有多个输入)。
[Learn more about embeddings](https://www.tensorflow.org/programmers_guide/embedding)。
- __update_freq__: `'batch'` 或 `'epoch'` 或 整数。当使用 `'batch'` 时,在每个 batch 之后将损失和评估值写入到 TensorBoard 中。同样的情况应用到 `'epoch'` 中。如果使用整数,例如 `10000`,这个回调会在每 10000 个样本之后将损失和评估值写入到 TensorBoard 中。注意,频繁地写入到 TensorBoard 会减缓你的训练。
----
# 创建一个回调
你可以通过扩展 `keras.callbacks.Callback` 基类来创建一个自定义的回调函数。
通过类的属性 `self.model`,回调函数可以获得它所联系的模型。
下面是一个简单的例子,在训练时,保存一个列表的批量损失值:
```python
class LossHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.losses = []
def on_batch_end(self, batch, logs={}):
self.losses.append(logs.get('loss'))
```
---
### 示例: 记录损失历史
```python
class LossHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.losses = []
def on_batch_end(self, batch, logs={}):
self.losses.append(logs.get('loss'))
model = Sequential()
model.add(Dense(10, input_dim=784, kernel_initializer='uniform'))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
history = LossHistory()
model.fit(x_train, y_train, batch_size=128, epochs=20, verbose=0, callbacks=[history])
print(history.losses)
# 输出
'''
[0.66047596406559383, 0.3547245744908703, ..., 0.25953155204159617, 0.25901699725311789]
'''
```
---
### 示例: 模型检查点
```python
from keras.callbacks import ModelCheckpoint
model = Sequential()
model.add(Dense(10, input_dim=784, kernel_initializer='uniform'))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
'''
如果验证损失下降, 那么在每个训练轮之后保存模型。
'''
checkpointer = ModelCheckpoint(filepath='/tmp/weights.hdf5', verbose=1, save_best_only=True)
model.fit(x_train, y_train, batch_size=128, epochs=20, verbose=0, validation_data=(X_test, Y_test), callbacks=[checkpointer])
```
|
keras-docs-zh/sources/callbacks.md/0
|
{
"file_path": "keras-docs-zh/sources/callbacks.md",
"repo_id": "keras-docs-zh",
"token_count": 9798
}
| 97 |
# 在 IMDB 情感分类任务上训练双向 LSTM。
Output after 4 epochs on CPU: ~0.8146. Time per epoch on CPU (Core i7): ~150s.
在 CPU 上经过 4 个轮次后的输出:〜0.8146。
CPU(Core i7)上每个轮次的时间:〜150s。
```python
from __future__ import print_function
import numpy as np
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Dropout, Embedding, LSTM, Bidirectional
from keras.datasets import imdb
max_features = 20000
# 在此数量的单词之后剪切文本(取最常见的 max_features 个单词)
maxlen = 100
batch_size = 32
print('Loading data...')
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')
print('Pad sequences (samples x time)')
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
y_train = np.array(y_train)
y_test = np.array(y_test)
model = Sequential()
model.add(Embedding(max_features, 128, input_length=maxlen))
model.add(Bidirectional(LSTM(64)))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
# 尝试使用不同的优化器和优化器配置
model.compile('adam', 'binary_crossentropy', metrics=['accuracy'])
print('Train...')
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=4,
validation_data=[x_test, y_test])
```
|
keras-docs-zh/sources/examples/imdb_bidirectional_lstm.md/0
|
{
"file_path": "keras-docs-zh/sources/examples/imdb_bidirectional_lstm.md",
"repo_id": "keras-docs-zh",
"token_count": 684
}
| 98 |
# 在 MNIST 数据集中的成对数字上训练 Siamese MLP。
它遵循 Hadsell-et-al.'06 [1],通过计算共享网络输出上的欧几里得距离并优化对比损失(有关更多信息,请参见论文)。
# 参考文献
- [Dimensionality Reduction by Learning an Invariant Mapping](http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf)
20 个轮次后达到 97.2% 的测试准确度。
在 Titan X Maxwell GPU 上,每个轮次 2 秒。
```python
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import random
from keras.datasets import mnist
from keras.models import Model
from keras.layers import Input, Flatten, Dense, Dropout, Lambda
from keras.optimizers import RMSprop
from keras import backend as K
num_classes = 10
epochs = 20
def euclidean_distance(vects):
x, y = vects
sum_square = K.sum(K.square(x - y), axis=1, keepdims=True)
return K.sqrt(K.maximum(sum_square, K.epsilon()))
def eucl_dist_output_shape(shapes):
shape1, shape2 = shapes
return (shape1[0], 1)
def contrastive_loss(y_true, y_pred):
'''对比损失,来自 Hadsell-et-al.'06
http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
'''
margin = 1
square_pred = K.square(y_pred)
margin_square = K.square(K.maximum(margin - y_pred, 0))
return K.mean(y_true * square_pred + (1 - y_true) * margin_square)
def create_pairs(x, digit_indices):
'''正负对创建。
在正对和负对之间交替。
'''
pairs = []
labels = []
n = min([len(digit_indices[d]) for d in range(num_classes)]) - 1
for d in range(num_classes):
for i in range(n):
z1, z2 = digit_indices[d][i], digit_indices[d][i + 1]
pairs += [[x[z1], x[z2]]]
inc = random.randrange(1, num_classes)
dn = (d + inc) % num_classes
z1, z2 = digit_indices[d][i], digit_indices[dn][i]
pairs += [[x[z1], x[z2]]]
labels += [1, 0]
return np.array(pairs), np.array(labels)
def create_base_network(input_shape):
'''要共享的基本网络(等同于特征提取)。
'''
input = Input(shape=input_shape)
x = Flatten()(input)
x = Dense(128, activation='relu')(x)
x = Dropout(0.1)(x)
x = Dense(128, activation='relu')(x)
x = Dropout(0.1)(x)
x = Dense(128, activation='relu')(x)
return Model(input, x)
def compute_accuracy(y_true, y_pred):
'''使用固定的距离阈值计算分类精度。
'''
pred = y_pred.ravel() < 0.5
return np.mean(pred == y_true)
def accuracy(y_true, y_pred):
'''使用固定的距离阈值计算分类精度。
'''
return K.mean(K.equal(y_true, K.cast(y_pred < 0.5, y_true.dtype)))
# 数据,分为训练集和测试集
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
input_shape = x_train.shape[1:]
# 创建训练+测试正负对
digit_indices = [np.where(y_train == i)[0] for i in range(num_classes)]
tr_pairs, tr_y = create_pairs(x_train, digit_indices)
digit_indices = [np.where(y_test == i)[0] for i in range(num_classes)]
te_pairs, te_y = create_pairs(x_test, digit_indices)
# 网络定义
base_network = create_base_network(input_shape)
input_a = Input(shape=input_shape)
input_b = Input(shape=input_shape)
# 因为我们重复使用了相同的实例 `base_network`,
# 所以网络的权重将在两个分支之间共享
processed_a = base_network(input_a)
processed_b = base_network(input_b)
distance = Lambda(euclidean_distance,
output_shape=eucl_dist_output_shape)([processed_a, processed_b])
model = Model([input_a, input_b], distance)
# 训练
rms = RMSprop()
model.compile(loss=contrastive_loss, optimizer=rms, metrics=[accuracy])
model.fit([tr_pairs[:, 0], tr_pairs[:, 1]], tr_y,
batch_size=128,
epochs=epochs,
validation_data=([te_pairs[:, 0], te_pairs[:, 1]], te_y))
# 计算训练和测试集的最终精度
y_pred = model.predict([tr_pairs[:, 0], tr_pairs[:, 1]])
tr_acc = compute_accuracy(tr_y, y_pred)
y_pred = model.predict([te_pairs[:, 0], te_pairs[:, 1]])
te_acc = compute_accuracy(te_y, y_pred)
print('* Accuracy on training set: %0.2f%%' % (100 * tr_acc))
print('* Accuracy on test set: %0.2f%%' % (100 * te_acc))
```
|
keras-docs-zh/sources/examples/mnist_siamese.md/0
|
{
"file_path": "keras-docs-zh/sources/examples/mnist_siamese.md",
"repo_id": "keras-docs-zh",
"token_count": 2204
}
| 99 |
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/normalization.py#L16)</span>
### BatchNormalization
```python
keras.layers.BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None)
```
批量标准化层 (Ioffe and Szegedy, 2014)。
在每一个批次的数据中标准化前一层的激活项,
即,应用一个维持激活项平均值接近 0,标准差接近 1 的转换。
__参数__
- __axis__: 整数,需要标准化的轴
(通常是特征轴)。
例如,在 `data_format="channels_first"` 的 `Conv2D` 层之后,
在 `BatchNormalization` 中设置 `axis=1`。
- __momentum__: 移动均值和移动方差的动量。
- __epsilon__: 增加到方差的小的浮点数,以避免除以零。
- __center__: 如果为 True,把 `beta` 的偏移量加到标准化的张量上。
如果为 False, `beta` 被忽略。
- __scale__: 如果为 True,乘以 `gamma`。
如果为 False,`gamma` 不使用。
当下一层为线性层(或者例如 `nn.relu`),
这可以被禁用,因为缩放将由下一层完成。
- __beta_initializer__: beta 权重的初始化方法。
- __gamma_initializer__: gamma 权重的初始化方法。
- __moving_mean_initializer__: 移动均值的初始化方法。
- __moving_variance_initializer__: 移动方差的初始化方法。
- __beta_regularizer__: 可选的 beta 权重的正则化方法。
- __gamma_regularizer__: 可选的 gamma 权重的正则化方法。
- __beta_constraint__: 可选的 beta 权重的约束方法。
- __gamma_constraint__: 可选的 gamma 权重的约束方法。
__输入尺寸__
可以是任意的。如果将这一层作为模型的第一层, 则需要指定 `input_shape` 参数 (整数元组,不包含样本数量的维度)。
__输出尺寸__
与输入相同。
__参考文献__
- [Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift](https://arxiv.org/abs/1502.03167)
|
keras-docs-zh/sources/layers/normalization.md/0
|
{
"file_path": "keras-docs-zh/sources/layers/normalization.md",
"repo_id": "keras-docs-zh",
"token_count": 1163
}
| 100 |
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/utils/generic_utils.py#L21)</span>
### CustomObjectScope
```python
keras.utils.CustomObjectScope()
```
提供更改为 `_GLOBAL_CUSTOM_OBJECTS` 无法转义的范围。
`with` 语句中的代码将能够通过名称访问自定义对象。
对全局自定义对象的更改会在封闭的 `with` 语句中持续存在。
在`with`语句结束时,
全局自定义对象将恢复到 `with` 语句开始时的状态。
__示例__
考虑自定义对象 `MyObject` (例如一个类):
```python
with CustomObjectScope({'MyObject':MyObject}):
layer = Dense(..., kernel_regularizer='MyObject')
# 保存,加载等操作将按这个名称来识别自定义对象
```
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/utils/io_utils.py#L26)</span>
### HDF5Matrix
```python
keras.utils.HDF5Matrix(datapath, dataset, start=0, end=None, normalizer=None)
```
使用 HDF5 数据集表示,而不是 Numpy 数组。
__示例__
```python
x_data = HDF5Matrix('input/file.hdf5', 'data')
model.predict(x_data)
```
提供 `start` 和 `end` 将允许使用数据集的一个切片。
你还可以给出标准化函数(或 lambda)(可选)。
这将在检索到的每一个数据切片上调用它。
__参数__
- __datapath__: 字符串,HDF5 文件路径。
- __dataset__: 字符串,datapath指定的文件中的 HDF5 数据集名称。
- __start__: 整数,所需的指定数据集的切片的开始位置。
- __end__: 整数,所需的指定数据集的切片的结束位置。
- __normalizer__: 在检索数据时调用的函数。
__返回__
一个类似于数组的 HDF5 数据集。
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/utils/data_utils.py#L305)</span>
### Sequence
```python
keras.utils.Sequence()
```
用于拟合数据序列的基对象,例如一个数据集。
每一个 `Sequence` 必须实现 `__getitem__` 和 `__len__` 方法。
如果你想在迭代之间修改你的数据集,你可以实现 `on_epoch_end`。
`__getitem__` 方法应该范围一个完整的批次。
__注意__
`Sequence` 是进行多进程处理的更安全的方法。这种结构保证网络在每个时期每个样本只训练一次,这与生成器不同。
__示例__
```python
from skimage.io import imread
from skimage.transform import resize
import numpy as np
# 这里,`x_set` 是图像的路径列表
# 以及 `y_set` 是对应的类别
class CIFAR10Sequence(Sequence):
def __init__(self, x_set, y_set, batch_size):
self.x, self.y = x_set, y_set
self.batch_size = batch_size
def __len__(self):
return int(np.ceil(len(self.x) / float(self.batch_size)))
def __getitem__(self, idx):
batch_x = self.x[idx * self.batch_size:(idx + 1) * self.batch_size]
batch_y = self.y[idx * self.batch_size:(idx + 1) * self.batch_size]
return np.array([
resize(imread(file_name), (200, 200))
for file_name in batch_x]), np.array(batch_y)
```
----
### to_categorical
```python
keras.utils.to_categorical(y, num_classes=None, dtype='float32')
```
将类向量(整数)转换为二进制类矩阵。
例如,用于 categorical_crossentropy。
__参数__
- __y__: 需要转换成矩阵的类矢量
(从 0 到 num_classes 的整数)。
- __num_classes__: 总类别数。
- __dtype__: 字符串,输入所期望的数据类型 (`float32`, `float64`, `int32`...)
__示例__
```python
# 考虑一组 3 个类 {0,1,2} 中的 5 个标签数组:
> labels
array([0, 2, 1, 2, 0])
# `to_categorical` 将其转换为具有尽可能多表示类别数的列的矩阵。
# 行数保持不变。
> to_categorical(labels)
array([[ 1., 0., 0.],
[ 0., 0., 1.],
[ 0., 1., 0.],
[ 0., 0., 1.],
[ 1., 0., 0.]], dtype=float32)
```
__返回__
输入的二进制矩阵表示。
----
### normalize
```python
keras.utils.normalize(x, axis=-1, order=2)
```
标准化一个 Numpy 数组。
__参数__
- __x__: 需要标准化的 Numpy 数组。
- __axis__: 需要标准化的轴。
- __order__: 标准化顺序(例如,2 表示 L2 规范化)。
__Returns__
数组的标准化副本。
----
### get_file
```python
keras.utils.get_file(fname, origin, untar=False, md5_hash=None, file_hash=None, cache_subdir='datasets', hash_algorithm='auto', extract=False, archive_format='auto', cache_dir=None)
```
从一个 URL 下载文件,如果它不存在缓存中。
默认情况下,URL `origin`处的文件
被下载到缓存目录 `〜/.keras` 中,
放在缓存子目录 `datasets`中,并命名为 `fname`。
文件 `example.txt` 的最终位置为 `~/.keras/datasets/example.txt`。
tar, tar.gz, tar.bz, 以及 zip 格式的文件也可以被解压。
传递一个哈希值将在下载后校验文件。
命令行程序 `shasum` 和 `sha256sum` 可以计算哈希。
__参数__
- __fname__: 文件名。如果指定了绝对路径 `/path/to/file.txt`,
那么文件将会保存到那个路径。
- __origin__: 文件的原始 URL。
- __untar__: 由于使用 'extract' 而已被弃用。
布尔值,是否需要解压文件。
- __md5_hash__: 由于使用 'file_hash' 而已被弃用。
用于校验的 md5 哈希值。
- __file_hash__: 下载后的文件的期望哈希字符串。
支持 sha256 和 md5 两个哈希算法。
- __cache_subdir__: 在 Keras 缓存目录下的保存文件的子目录。
如果指定了绝对路径 `/path/to/folder`,则文件将被保存在该位置。
- __hash_algorithm__: 选择哈希算法来校验文件。
可选的有 'md5', 'sha256', 以及 'auto'。
默认的 'auto' 将自动检测所使用的哈希算法。
- __extract__: True 的话会尝试将解压缩存档文件,如tar或zip。
- __archive_format__: 尝试提取文件的存档格式。
可选的有 'auto', 'tar', 'zip', 以及 None。
'tar' 包含 tar, tar.gz, 和 tar.bz 文件。
默认 'auto' 为 ['tar', 'zip']。
None 或 空列表将返回未找到任何匹配。
- __cache_dir__: 存储缓存文件的位置,为 None 时默认为
[Keras 目录](/faq/#where-is-the-keras-configuration-filed-stored).
__返回__
下载的文件的路径。
----
### print_summary
```python
keras.utils.print_summary(model, line_length=None, positions=None, print_fn=None)
```
打印模型概况。
__参数__
- __model__: Keras 模型实例。
- __line_length__: 打印的每行的总长度
(例如,设置此项以使其显示适应不同的终端窗口大小)。
- __positions__: 每行中日志元素的相对或绝对位置。
如果未提供,默认为 `[.33, .55, .67, 1.]`。
- __print_fn__: 需要使用的打印函数。
它将在每一行概述时调用。
您可以将其设置为自定义函数以捕获字符串概述。
默认为 `print` (打印到标准输出)。
----
### plot_model
```python
keras.utils.plot_model(model, to_file='model.png', show_shapes=False, show_layer_names=True, rankdir='TB', expand_nested=False, dpi=96)
```
将 Keras 模型转换为 dot 格式并保存到文件中。
__参数__
- __model__: 一个 Keras 模型实例。
- __to_file__: 绘制图像的文件名。
- __show_shapes__: 是否显示尺寸信息。
- __show_layer_names__: 是否显示层的名称。
- __rankdir__: 传递给 PyDot 的 `rankdir` 参数,
一个指定绘图格式的字符串:
'TB' 创建一个垂直绘图;
'LR' 创建一个水平绘图。
- __expand_nested__: 是否扩展嵌套模型为聚类。
- __dpi__: 点 DPI。
__返回__
如果安装了 Jupyter,则返回一个 Jupyter notebook Image 对象。这样可以在 notebool 中在线显示模型图。
----
### multi_gpu_model
```python
keras.utils.multi_gpu_model(model, gpus=None, cpu_merge=True, cpu_relocation=False)
```
将模型复制到不同的 GPU 上。
具体来说,该功能实现了单机多 GPU 数据并行性。
它的工作原理如下:
- 将模型的输入分成多个子批次。
- 在每个子批次上应用模型副本。
每个模型副本都在专用 GPU 上执行。
- 将结果(在 CPU 上)连接成一个大批量。
例如, 如果你的 `batch_size` 是 64,且你使用 `gpus=2`,
那么我们将把输入分为两个 32 个样本的子批次,
在 1 个 GPU 上处理 1 个子批次,然后返回完整批次的 64 个处理过的样本。
这实现了多达 8 个 GPU 的准线性加速。
此功能目前仅适用于 TensorFlow 后端。
__参数__
- __model__: 一个 Keras 模型实例。为了避免OOM错误,该模型可以建立在 CPU 上,
详见下面的使用样例。
- __gpus__: 整数 >= 2 或整数列表,创建模型副本的 GPU 数量,
或 GPU ID 的列表。
- __cpu_merge__: 一个布尔值,用于标识是否强制合并 CPU 范围内的模型权重。
- __cpu_relocation__: 一个布尔值,用来确定是否在 CPU 的范围内创建模型的权重。如果模型没有在任何一个设备范围内定义,您仍然可以通过激活这个选项来拯救它。
__返回__
一个 Keras `Model` 实例,它可以像初始 `model` 参数一样使用,但它将工作负载分布在多个 GPU 上。
__示例__
例 1 - 训练在 CPU 上合并权重的模型
```python
import tensorflow as tf
from keras.applications import Xception
from keras.utils import multi_gpu_model
import numpy as np
num_samples = 1000
height = 224
width = 224
num_classes = 1000
# 实例化基础模型(或者「模版」模型)。
# 我们推荐在 CPU 设备范围内做此操作,
# 这样模型的权重就会存储在 CPU 内存中。
# 否则它们会存储在 GPU 上,而完全被共享。
with tf.device('/cpu:0'):
model = Xception(weights=None,
input_shape=(height, width, 3),
classes=num_classes)
# 复制模型到 8 个 GPU 上。
# 这假设你的机器有 8 个可用 GPU。
parallel_model = multi_gpu_model(model, gpus=8)
parallel_model.compile(loss='categorical_crossentropy',
optimizer='rmsprop')
# 生成虚拟数据
x = np.random.random((num_samples, height, width, 3))
y = np.random.random((num_samples, num_classes))
# 这个 `fit` 调用将分布在 8 个 GPU 上。
# 由于 batch size 是 256, 每个 GPU 将处理 32 个样本。
parallel_model.fit(x, y, epochs=20, batch_size=256)
# 通过模版模型存储模型(共享相同权重):
model.save('my_model.h5')
```
例 2 - 训练在 CPU 上利用 cpu_relocation 合并权重的模型
```python
..
# 不需要更改模型定义的设备范围:
model = Xception(weights=None, ..)
try:
parallel_model = multi_gpu_model(model, cpu_relocation=True)
print("Training using multiple GPUs..")
except ValueError:
parallel_model = model
print("Training using single GPU or CPU..")
parallel_model.compile(..)
..
```
例 3 - 训练在 GPU 上合并权重的模型(建议用于 NV-link)
```python
..
# 不需要更改模型定义的设备范围:
model = Xception(weights=None, ..)
try:
parallel_model = multi_gpu_model(model, cpu_merge=False)
print("Training using multiple GPUs..")
except:
parallel_model = model
print("Training using single GPU or CPU..")
parallel_model.compile(..)
..
```
__关于模型保存__
要保存多 GPU 模型,请通过模板模型(传递给 `multi_gpu_model` 的参数)调用 `.save(fname)` 或 `.save_weights(fname)` 以进行存储,而不是通过 `multi_gpu_model` 返回的模型。
|
keras-docs-zh/sources/utils.md/0
|
{
"file_path": "keras-docs-zh/sources/utils.md",
"repo_id": "keras-docs-zh",
"token_count": 6658
}
| 101 |
"""
Title: Automatic Speech Recognition using CTC
Authors: [Mohamed Reda Bouadjenek](https://rbouadjenek.github.io/) and [Ngoc Dung Huynh](https://www.linkedin.com/in/parkerhuynh/)
Date created: 2021/09/26
Last modified: 2021/09/26
Description: Training a CTC-based model for automatic speech recognition.
Accelerator: GPU
"""
"""
## Introduction
Speech recognition is an interdisciplinary subfield of computer science
and computational linguistics that develops methodologies and technologies
that enable the recognition and translation of spoken language into text
by computers. It is also known as automatic speech recognition (ASR),
computer speech recognition or speech to text (STT). It incorporates
knowledge and research in the computer science, linguistics and computer
engineering fields.
This demonstration shows how to combine a 2D CNN, RNN and a Connectionist
Temporal Classification (CTC) loss to build an ASR. CTC is an algorithm
used to train deep neural networks in speech recognition, handwriting
recognition and other sequence problems. CTC is used when we don’t know
how the input aligns with the output (how the characters in the transcript
align to the audio). The model we create is similar to
[DeepSpeech2](https://nvidia.github.io/OpenSeq2Seq/html/speech-recognition/deepspeech2.html).
We will use the LJSpeech dataset from the
[LibriVox](https://librivox.org/) project. It consists of short
audio clips of a single speaker reading passages from 7 non-fiction books.
We will evaluate the quality of the model using
[Word Error Rate (WER)](https://en.wikipedia.org/wiki/Word_error_rate).
WER is obtained by adding up
the substitutions, insertions, and deletions that occur in a sequence of
recognized words. Divide that number by the total number of words originally
spoken. The result is the WER. To get the WER score you need to install the
[jiwer](https://pypi.org/project/jiwer/) package. You can use the following command line:
```
pip install jiwer
```
**References:**
- [LJSpeech Dataset](https://keithito.com/LJ-Speech-Dataset/)
- [Speech recognition](https://en.wikipedia.org/wiki/Speech_recognition)
- [Sequence Modeling With CTC](https://distill.pub/2017/ctc/)
- [DeepSpeech2](https://nvidia.github.io/OpenSeq2Seq/html/speech-recognition/deepspeech2.html)
"""
"""
## Setup
"""
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import matplotlib.pyplot as plt
from IPython import display
from jiwer import wer
"""
## Load the LJSpeech Dataset
Let's download the [LJSpeech Dataset](https://keithito.com/LJ-Speech-Dataset/).
The dataset contains 13,100 audio files as `wav` files in the `/wavs/` folder.
The label (transcript) for each audio file is a string
given in the `metadata.csv` file. The fields are:
- **ID**: this is the name of the corresponding .wav file
- **Transcription**: words spoken by the reader (UTF-8)
- **Normalized transcription**: transcription with numbers,
ordinals, and monetary units expanded into full words (UTF-8).
For this demo we will use on the "Normalized transcription" field.
Each audio file is a single-channel 16-bit PCM WAV with a sample rate of 22,050 Hz.
"""
data_url = "https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2"
data_path = keras.utils.get_file("LJSpeech-1.1", data_url, untar=True)
wavs_path = data_path + "/wavs/"
metadata_path = data_path + "/metadata.csv"
# Read metadata file and parse it
metadata_df = pd.read_csv(metadata_path, sep="|", header=None, quoting=3)
metadata_df.columns = ["file_name", "transcription", "normalized_transcription"]
metadata_df = metadata_df[["file_name", "normalized_transcription"]]
metadata_df = metadata_df.sample(frac=1).reset_index(drop=True)
metadata_df.head(3)
"""
We now split the data into training and validation set.
"""
split = int(len(metadata_df) * 0.90)
df_train = metadata_df[:split]
df_val = metadata_df[split:]
print(f"Size of the training set: {len(df_train)}")
print(f"Size of the training set: {len(df_val)}")
"""
## Preprocessing
We first prepare the vocabulary to be used.
"""
# The set of characters accepted in the transcription.
characters = [x for x in "abcdefghijklmnopqrstuvwxyz'?! "]
# Mapping characters to integers
char_to_num = keras.layers.StringLookup(vocabulary=characters, oov_token="")
# Mapping integers back to original characters
num_to_char = keras.layers.StringLookup(
vocabulary=char_to_num.get_vocabulary(), oov_token="", invert=True
)
print(
f"The vocabulary is: {char_to_num.get_vocabulary()} "
f"(size ={char_to_num.vocabulary_size()})"
)
"""
Next, we create the function that describes the transformation that we apply to each
element of our dataset.
"""
# An integer scalar Tensor. The window length in samples.
frame_length = 256
# An integer scalar Tensor. The number of samples to step.
frame_step = 160
# An integer scalar Tensor. The size of the FFT to apply.
# If not provided, uses the smallest power of 2 enclosing frame_length.
fft_length = 384
def encode_single_sample(wav_file, label):
###########################################
## Process the Audio
##########################################
# 1. Read wav file
file = tf.io.read_file(wavs_path + wav_file + ".wav")
# 2. Decode the wav file
audio, _ = tf.audio.decode_wav(file)
audio = tf.squeeze(audio, axis=-1)
# 3. Change type to float
audio = tf.cast(audio, tf.float32)
# 4. Get the spectrogram
spectrogram = tf.signal.stft(
audio, frame_length=frame_length, frame_step=frame_step, fft_length=fft_length
)
# 5. We only need the magnitude, which can be derived by applying tf.abs
spectrogram = tf.abs(spectrogram)
spectrogram = tf.math.pow(spectrogram, 0.5)
# 6. normalisation
means = tf.math.reduce_mean(spectrogram, 1, keepdims=True)
stddevs = tf.math.reduce_std(spectrogram, 1, keepdims=True)
spectrogram = (spectrogram - means) / (stddevs + 1e-10)
###########################################
## Process the label
##########################################
# 7. Convert label to Lower case
label = tf.strings.lower(label)
# 8. Split the label
label = tf.strings.unicode_split(label, input_encoding="UTF-8")
# 9. Map the characters in label to numbers
label = char_to_num(label)
# 10. Return a dict as our model is expecting two inputs
return spectrogram, label
"""
## Creating `Dataset` objects
We create a `tf.data.Dataset` object that yields
the transformed elements, in the same order as they
appeared in the input.
"""
batch_size = 32
# Define the training dataset
train_dataset = tf.data.Dataset.from_tensor_slices(
(list(df_train["file_name"]), list(df_train["normalized_transcription"]))
)
train_dataset = (
train_dataset.map(encode_single_sample, num_parallel_calls=tf.data.AUTOTUNE)
.padded_batch(batch_size)
.prefetch(buffer_size=tf.data.AUTOTUNE)
)
# Define the validation dataset
validation_dataset = tf.data.Dataset.from_tensor_slices(
(list(df_val["file_name"]), list(df_val["normalized_transcription"]))
)
validation_dataset = (
validation_dataset.map(encode_single_sample, num_parallel_calls=tf.data.AUTOTUNE)
.padded_batch(batch_size)
.prefetch(buffer_size=tf.data.AUTOTUNE)
)
"""
## Visualize the data
Let's visualize an example in our dataset, including the
audio clip, the spectrogram and the corresponding label.
"""
fig = plt.figure(figsize=(8, 5))
for batch in train_dataset.take(1):
spectrogram = batch[0][0].numpy()
spectrogram = np.array([np.trim_zeros(x) for x in np.transpose(spectrogram)])
label = batch[1][0]
# Spectrogram
label = tf.strings.reduce_join(num_to_char(label)).numpy().decode("utf-8")
ax = plt.subplot(2, 1, 1)
ax.imshow(spectrogram, vmax=1)
ax.set_title(label)
ax.axis("off")
# Wav
file = tf.io.read_file(wavs_path + list(df_train["file_name"])[0] + ".wav")
audio, _ = tf.audio.decode_wav(file)
audio = audio.numpy()
ax = plt.subplot(2, 1, 2)
plt.plot(audio)
ax.set_title("Signal Wave")
ax.set_xlim(0, len(audio))
display.display(display.Audio(np.transpose(audio), rate=16000))
plt.show()
"""
## Model
We first define the CTC Loss function.
"""
def CTCLoss(y_true, y_pred):
# Compute the training-time loss value
batch_len = tf.cast(tf.shape(y_true)[0], dtype="int64")
input_length = tf.cast(tf.shape(y_pred)[1], dtype="int64")
label_length = tf.cast(tf.shape(y_true)[1], dtype="int64")
input_length = input_length * tf.ones(shape=(batch_len, 1), dtype="int64")
label_length = label_length * tf.ones(shape=(batch_len, 1), dtype="int64")
loss = keras.backend.ctc_batch_cost(y_true, y_pred, input_length, label_length)
return loss
"""
We now define our model. We will define a model similar to
[DeepSpeech2](https://nvidia.github.io/OpenSeq2Seq/html/speech-recognition/deepspeech2.html).
"""
def build_model(input_dim, output_dim, rnn_layers=5, rnn_units=128):
"""Model similar to DeepSpeech2."""
# Model's input
input_spectrogram = layers.Input((None, input_dim), name="input")
# Expand the dimension to use 2D CNN.
x = layers.Reshape((-1, input_dim, 1), name="expand_dim")(input_spectrogram)
# Convolution layer 1
x = layers.Conv2D(
filters=32,
kernel_size=[11, 41],
strides=[2, 2],
padding="same",
use_bias=False,
name="conv_1",
)(x)
x = layers.BatchNormalization(name="conv_1_bn")(x)
x = layers.ReLU(name="conv_1_relu")(x)
# Convolution layer 2
x = layers.Conv2D(
filters=32,
kernel_size=[11, 21],
strides=[1, 2],
padding="same",
use_bias=False,
name="conv_2",
)(x)
x = layers.BatchNormalization(name="conv_2_bn")(x)
x = layers.ReLU(name="conv_2_relu")(x)
# Reshape the resulted volume to feed the RNNs layers
x = layers.Reshape((-1, x.shape[-2] * x.shape[-1]))(x)
# RNN layers
for i in range(1, rnn_layers + 1):
recurrent = layers.GRU(
units=rnn_units,
activation="tanh",
recurrent_activation="sigmoid",
use_bias=True,
return_sequences=True,
reset_after=True,
name=f"gru_{i}",
)
x = layers.Bidirectional(
recurrent, name=f"bidirectional_{i}", merge_mode="concat"
)(x)
if i < rnn_layers:
x = layers.Dropout(rate=0.5)(x)
# Dense layer
x = layers.Dense(units=rnn_units * 2, name="dense_1")(x)
x = layers.ReLU(name="dense_1_relu")(x)
x = layers.Dropout(rate=0.5)(x)
# Classification layer
output = layers.Dense(units=output_dim + 1, activation="softmax")(x)
# Model
model = keras.Model(input_spectrogram, output, name="DeepSpeech_2")
# Optimizer
opt = keras.optimizers.Adam(learning_rate=1e-4)
# Compile the model and return
model.compile(optimizer=opt, loss=CTCLoss)
return model
# Get the model
model = build_model(
input_dim=fft_length // 2 + 1,
output_dim=char_to_num.vocabulary_size(),
rnn_units=512,
)
model.summary(line_length=110)
"""
## Training and Evaluating
"""
# A utility function to decode the output of the network
def decode_batch_predictions(pred):
input_len = np.ones(pred.shape[0]) * pred.shape[1]
# Use greedy search. For complex tasks, you can use beam search
results = keras.backend.ctc_decode(pred, input_length=input_len, greedy=True)[0][0]
# Iterate over the results and get back the text
output_text = []
for result in results:
result = tf.strings.reduce_join(num_to_char(result)).numpy().decode("utf-8")
output_text.append(result)
return output_text
# A callback class to output a few transcriptions during training
class CallbackEval(keras.callbacks.Callback):
"""Displays a batch of outputs after every epoch."""
def __init__(self, dataset):
super().__init__()
self.dataset = dataset
def on_epoch_end(self, epoch: int, logs=None):
predictions = []
targets = []
for batch in self.dataset:
X, y = batch
batch_predictions = model.predict(X)
batch_predictions = decode_batch_predictions(batch_predictions)
predictions.extend(batch_predictions)
for label in y:
label = (
tf.strings.reduce_join(num_to_char(label)).numpy().decode("utf-8")
)
targets.append(label)
wer_score = wer(targets, predictions)
print("-" * 100)
print(f"Word Error Rate: {wer_score:.4f}")
print("-" * 100)
for i in np.random.randint(0, len(predictions), 2):
print(f"Target : {targets[i]}")
print(f"Prediction: {predictions[i]}")
print("-" * 100)
"""
Let's start the training process.
"""
# Define the number of epochs.
epochs = 1
# Callback function to check transcription on the val set.
validation_callback = CallbackEval(validation_dataset)
# Train the model
history = model.fit(
train_dataset,
validation_data=validation_dataset,
epochs=epochs,
callbacks=[validation_callback],
)
"""
## Inference
"""
# Let's check results on more validation samples
predictions = []
targets = []
for batch in validation_dataset:
X, y = batch
batch_predictions = model.predict(X)
batch_predictions = decode_batch_predictions(batch_predictions)
predictions.extend(batch_predictions)
for label in y:
label = tf.strings.reduce_join(num_to_char(label)).numpy().decode("utf-8")
targets.append(label)
wer_score = wer(targets, predictions)
print("-" * 100)
print(f"Word Error Rate: {wer_score:.4f}")
print("-" * 100)
for i in np.random.randint(0, len(predictions), 5):
print(f"Target : {targets[i]}")
print(f"Prediction: {predictions[i]}")
print("-" * 100)
"""
## Conclusion
In practice, you should train for around 50 epochs or more. Each epoch
takes approximately 5-6mn using a `GeForce RTX 2080 Ti` GPU.
The model we trained at 50 epochs has a `Word Error Rate (WER) ≈ 16% to 17%`.
Some of the transcriptions around epoch 50:
**Audio file: LJ017-0009.wav**
```
- Target : sir thomas overbury was undoubtedly poisoned by lord rochester in the reign
of james the first
- Prediction: cer thomas overbery was undoubtedly poisoned by lordrochester in the reign
of james the first
```
**Audio file: LJ003-0340.wav**
```
- Target : the committee does not seem to have yet understood that newgate could be
only and properly replaced
- Prediction: the committee does not seem to have yet understood that newgate could be
only and proberly replace
```
**Audio file: LJ011-0136.wav**
```
- Target : still no sentence of death was carried out for the offense and in eighteen
thirtytwo
- Prediction: still no sentence of death was carried out for the offense and in eighteen
thirtytwo
```
Example available on HuggingFace.
| Trained Model | Demo |
| :--: | :--: |
| [](https://huggingface.co/keras-io/ctc_asr) | [](https://huggingface.co/spaces/keras-io/ctc_asr) |
"""
|
keras-io/examples/audio/ctc_asr.py/0
|
{
"file_path": "keras-io/examples/audio/ctc_asr.py",
"repo_id": "keras-io",
"token_count": 5741
}
| 102 |
"""
Title: Fine-tuning Stable Diffusion
Author: [Sayak Paul](https://twitter.com/RisingSayak), [Chansung Park](https://twitter.com/algo_diver)
Date created: 2022/12/28
Last modified: 2023/01/13
Description: Fine-tuning Stable Diffusion using a custom image-caption dataset.
Accelerator: GPU
"""
"""
## Introduction
This tutorial shows how to fine-tune a
[Stable Diffusion model](https://keras.io/guides/keras_cv/generate_images_with_stable_diffusion/)
on a custom dataset of `{image, caption}` pairs. We build on top of the fine-tuning
script provided by Hugging Face
[here](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image.py).
We assume that you have a high-level understanding of the Stable Diffusion model.
The following resources can be helpful if you're looking for more information in that regard:
* [High-performance image generation using Stable Diffusion in KerasCV](https://keras.io/guides/keras_cv/generate_images_with_stable_diffusion/)
* [Stable Diffusion with Diffusers](https://huggingface.co/blog/stable_diffusion)
It's highly recommended that you use a GPU with at least 30GB of memory to execute
the code.
By the end of the guide, you'll be able to generate images of interesting Pokémon:

The tutorial relies on KerasCV 0.4.0. Additionally, we need
at least TensorFlow 2.11 in order to use AdamW with mixed precision.
"""
"""shell
pip install keras-cv==0.6.0 -q
pip install -U tensorflow -q
pip install keras-core -q
"""
"""
## What are we fine-tuning?
A Stable Diffusion model can be decomposed into several key models:
* A text encoder that projects the input prompt to a latent space. (The caption
associated with an image is referred to as the "prompt".)
* A variational autoencoder (VAE) that projects an input image to a latent space acting
as an image vector space.
* A diffusion model that refines a latent vector and produces another latent vector, conditioned
on the encoded text prompt
* A decoder that generates images given a latent vector from the diffusion model.
It's worth noting that during the process of generating an image from a text prompt, the
image encoder is not typically employed.
However, during the process of fine-tuning, the workflow goes like the following:
1. An input text prompt is projected to a latent space by the text encoder.
2. An input image is projected to a latent space by the image encoder portion of the VAE.
3. A small amount of noise is added to the image latent vector for a given timestep.
4. The diffusion model uses latent vectors from these two spaces along with a timestep embedding
to predict the noise that was added to the image latent.
5. A reconstruction loss is calculated between the predicted noise and the original noise
added in step 3.
6. Finally, the diffusion model parameters are optimized w.r.t this loss using
gradient descent.
Note that only the diffusion model parameters are updated during fine-tuning, while the
(pre-trained) text and the image encoders are kept frozen.
Don't worry if this sounds complicated. The code is much simpler than this!
"""
"""
## Imports
"""
from textwrap import wrap
import os
import keras_cv
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow.experimental.numpy as tnp
from keras_cv.models.stable_diffusion.clip_tokenizer import SimpleTokenizer
from keras_cv.models.stable_diffusion.diffusion_model import DiffusionModel
from keras_cv.models.stable_diffusion.image_encoder import ImageEncoder
from keras_cv.models.stable_diffusion.noise_scheduler import NoiseScheduler
from keras_cv.models.stable_diffusion.text_encoder import TextEncoder
from tensorflow import keras
"""
## Data loading
We use the dataset
[Pokémon BLIP captions](https://huggingface.co/datasets/lambdalabs/pokemon-blip-captions).
However, we'll use a slightly different version which was derived from the original
dataset to fit better with `tf.data`. Refer to
[the documentation](https://huggingface.co/datasets/sayakpaul/pokemon-blip-original-version)
for more details.
"""
data_path = tf.keras.utils.get_file(
origin="https://huggingface.co/datasets/sayakpaul/pokemon-blip-original-version/resolve/main/pokemon_dataset.tar.gz",
untar=True,
)
data_frame = pd.read_csv(os.path.join(data_path, "data.csv"))
data_frame["image_path"] = data_frame["image_path"].apply(
lambda x: os.path.join(data_path, x)
)
data_frame.head()
"""
Since we have only 833 `{image, caption}` pairs, we can precompute the text embeddings from
the captions. Moreover, the text encoder will be kept frozen during the course of
fine-tuning, so we can save some compute by doing this.
Before we use the text encoder, we need to tokenize the captions.
"""
# The padding token and maximum prompt length are specific to the text encoder.
# If you're using a different text encoder be sure to change them accordingly.
PADDING_TOKEN = 49407
MAX_PROMPT_LENGTH = 77
# Load the tokenizer.
tokenizer = SimpleTokenizer()
# Method to tokenize and pad the tokens.
def process_text(caption):
tokens = tokenizer.encode(caption)
tokens = tokens + [PADDING_TOKEN] * (MAX_PROMPT_LENGTH - len(tokens))
return np.array(tokens)
# Collate the tokenized captions into an array.
tokenized_texts = np.empty((len(data_frame), MAX_PROMPT_LENGTH))
all_captions = list(data_frame["caption"].values)
for i, caption in enumerate(all_captions):
tokenized_texts[i] = process_text(caption)
"""
## Prepare a `tf.data.Dataset`
In this section, we'll prepare a `tf.data.Dataset` object from the input image file paths
and their corresponding caption tokens. The section will include the following:
* Pre-computation of the text embeddings from the tokenized captions.
* Loading and augmentation of the input images.
* Shuffling and batching of the dataset.
"""
RESOLUTION = 256
AUTO = tf.data.AUTOTUNE
POS_IDS = tf.convert_to_tensor([list(range(MAX_PROMPT_LENGTH))], dtype=tf.int32)
augmenter = keras.Sequential(
layers=[
keras_cv.layers.CenterCrop(RESOLUTION, RESOLUTION),
keras_cv.layers.RandomFlip(),
tf.keras.layers.Rescaling(scale=1.0 / 127.5, offset=-1),
]
)
text_encoder = TextEncoder(MAX_PROMPT_LENGTH)
def process_image(image_path, tokenized_text):
image = tf.io.read_file(image_path)
image = tf.io.decode_png(image, 3)
image = tf.image.resize(image, (RESOLUTION, RESOLUTION))
return image, tokenized_text
def apply_augmentation(image_batch, token_batch):
return augmenter(image_batch), token_batch
def run_text_encoder(image_batch, token_batch):
return (
image_batch,
token_batch,
text_encoder([token_batch, POS_IDS], training=False),
)
def prepare_dict(image_batch, token_batch, encoded_text_batch):
return {
"images": image_batch,
"tokens": token_batch,
"encoded_text": encoded_text_batch,
}
def prepare_dataset(image_paths, tokenized_texts, batch_size=1):
dataset = tf.data.Dataset.from_tensor_slices((image_paths, tokenized_texts))
dataset = dataset.shuffle(batch_size * 10)
dataset = dataset.map(process_image, num_parallel_calls=AUTO).batch(batch_size)
dataset = dataset.map(apply_augmentation, num_parallel_calls=AUTO)
dataset = dataset.map(run_text_encoder, num_parallel_calls=AUTO)
dataset = dataset.map(prepare_dict, num_parallel_calls=AUTO)
return dataset.prefetch(AUTO)
"""
The baseline Stable Diffusion model was trained using images with 512x512 resolution. It's
unlikely for a model that's trained using higher-resolution images to transfer well to
lower-resolution images. However, the current model will lead to OOM if we keep the
resolution to 512x512 (without enabling mixed-precision). Therefore, in the interest of
interactive demonstrations, we kept the input resolution to 256x256.
"""
# Prepare the dataset.
training_dataset = prepare_dataset(
np.array(data_frame["image_path"]), tokenized_texts, batch_size=4
)
# Take a sample batch and investigate.
sample_batch = next(iter(training_dataset))
for k in sample_batch:
print(k, sample_batch[k].shape)
"""
We can also take a look at the training images and their corresponding captions.
"""
plt.figure(figsize=(20, 10))
for i in range(3):
ax = plt.subplot(1, 4, i + 1)
plt.imshow((sample_batch["images"][i] + 1) / 2)
text = tokenizer.decode(sample_batch["tokens"][i].numpy().squeeze())
text = text.replace("<|startoftext|>", "")
text = text.replace("<|endoftext|>", "")
text = "\n".join(wrap(text, 12))
plt.title(text, fontsize=15)
plt.axis("off")
"""
## A trainer class for the fine-tuning loop
"""
class Trainer(tf.keras.Model):
# Reference:
# https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image.py
def __init__(
self,
diffusion_model,
vae,
noise_scheduler,
use_mixed_precision=False,
max_grad_norm=1.0,
**kwargs
):
super().__init__(**kwargs)
self.diffusion_model = diffusion_model
self.vae = vae
self.noise_scheduler = noise_scheduler
self.max_grad_norm = max_grad_norm
self.use_mixed_precision = use_mixed_precision
self.vae.trainable = False
def train_step(self, inputs):
images = inputs["images"]
encoded_text = inputs["encoded_text"]
batch_size = tf.shape(images)[0]
with tf.GradientTape() as tape:
# Project image into the latent space and sample from it.
latents = self.sample_from_encoder_outputs(self.vae(images, training=False))
# Know more about the magic number here:
# https://keras.io/examples/generative/fine_tune_via_textual_inversion/
latents = latents * 0.18215
# Sample noise that we'll add to the latents.
noise = tf.random.normal(tf.shape(latents))
# Sample a random timestep for each image.
timesteps = tnp.random.randint(
0, self.noise_scheduler.train_timesteps, (batch_size,)
)
# Add noise to the latents according to the noise magnitude at each timestep
# (this is the forward diffusion process).
noisy_latents = self.noise_scheduler.add_noise(
tf.cast(latents, noise.dtype), noise, timesteps
)
# Get the target for loss depending on the prediction type
# just the sampled noise for now.
target = noise # noise_schedule.predict_epsilon == True
# Predict the noise residual and compute loss.
timestep_embedding = tf.map_fn(
lambda t: self.get_timestep_embedding(t), timesteps, dtype=tf.float32
)
timestep_embedding = tf.squeeze(timestep_embedding, 1)
model_pred = self.diffusion_model(
[noisy_latents, timestep_embedding, encoded_text], training=True
)
loss = self.compiled_loss(target, model_pred)
if self.use_mixed_precision:
loss = self.optimizer.get_scaled_loss(loss)
# Update parameters of the diffusion model.
trainable_vars = self.diffusion_model.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
if self.use_mixed_precision:
gradients = self.optimizer.get_unscaled_gradients(gradients)
gradients = [tf.clip_by_norm(g, self.max_grad_norm) for g in gradients]
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
return {m.name: m.result() for m in self.metrics}
def get_timestep_embedding(self, timestep, dim=320, max_period=10000):
half = dim // 2
log_max_period = tf.math.log(tf.cast(max_period, tf.float32))
freqs = tf.math.exp(
-log_max_period * tf.range(0, half, dtype=tf.float32) / half
)
args = tf.convert_to_tensor([timestep], dtype=tf.float32) * freqs
embedding = tf.concat([tf.math.cos(args), tf.math.sin(args)], 0)
embedding = tf.reshape(embedding, [1, -1])
return embedding
def sample_from_encoder_outputs(self, outputs):
mean, logvar = tf.split(outputs, 2, axis=-1)
logvar = tf.clip_by_value(logvar, -30.0, 20.0)
std = tf.exp(0.5 * logvar)
sample = tf.random.normal(tf.shape(mean), dtype=mean.dtype)
return mean + std * sample
def save_weights(self, filepath, overwrite=True, save_format=None, options=None):
# Overriding this method will allow us to use the `ModelCheckpoint`
# callback directly with this trainer class. In this case, it will
# only checkpoint the `diffusion_model` since that's what we're training
# during fine-tuning.
self.diffusion_model.save_weights(
filepath=filepath,
overwrite=overwrite,
save_format=save_format,
options=options,
)
"""
One important implementation detail to note here: Instead of directly taking
the latent vector produced by the image encoder (which is a VAE), we sample from the
mean and log-variance predicted by it. This way, we can achieve better sample
quality and diversity.
It's common to add support for mixed-precision training along with exponential
moving averaging of model weights for fine-tuning these models. However, in the interest
of brevity, we discard those elements. More on this later in the tutorial.
"""
"""
## Initialize the trainer and compile it
"""
# Enable mixed-precision training if the underlying GPU has tensor cores.
USE_MP = True
if USE_MP:
keras.mixed_precision.set_global_policy("mixed_float16")
image_encoder = ImageEncoder()
diffusion_ft_trainer = Trainer(
diffusion_model=DiffusionModel(RESOLUTION, RESOLUTION, MAX_PROMPT_LENGTH),
# Remove the top layer from the encoder, which cuts off the variance and only
# returns the mean.
vae=tf.keras.Model(
image_encoder.input,
image_encoder.layers[-2].output,
),
noise_scheduler=NoiseScheduler(),
use_mixed_precision=USE_MP,
)
# These hyperparameters come from this tutorial by Hugging Face:
# https://huggingface.co/docs/diffusers/training/text2image
lr = 1e-5
beta_1, beta_2 = 0.9, 0.999
weight_decay = (1e-2,)
epsilon = 1e-08
optimizer = tf.keras.optimizers.experimental.AdamW(
learning_rate=lr,
weight_decay=weight_decay,
beta_1=beta_1,
beta_2=beta_2,
epsilon=epsilon,
)
diffusion_ft_trainer.compile(optimizer=optimizer, loss="mse")
"""
## Fine-tuning
To keep the runtime of this tutorial short, we just fine-tune for an epoch.
"""
epochs = 1
ckpt_path = "finetuned_stable_diffusion.h5"
ckpt_callback = tf.keras.callbacks.ModelCheckpoint(
ckpt_path,
save_weights_only=True,
monitor="loss",
mode="min",
)
diffusion_ft_trainer.fit(training_dataset, epochs=epochs, callbacks=[ckpt_callback])
"""
## Inference
We fine-tuned the model for 60 epochs on an image resolution of 512x512. To allow
training with this resolution, we incorporated mixed-precision support. You can
check out
[this repository](https://github.com/sayakpaul/stabe-diffusion-keras-ft)
for more details. It additionally provides support for exponential moving averaging of
the fine-tuned model parameters and model checkpointing.
For this section, we'll use the checkpoint derived after 60 epochs of fine-tuning.
"""
weights_path = tf.keras.utils.get_file(
origin="https://huggingface.co/sayakpaul/kerascv_sd_pokemon_finetuned/resolve/main/ckpt_epochs_72_res_512_mp_True.h5"
)
img_height = img_width = 512
pokemon_model = keras_cv.models.StableDiffusion(
img_width=img_width, img_height=img_height
)
# We just reload the weights of the fine-tuned diffusion model.
pokemon_model.diffusion_model.load_weights(weights_path)
"""
Now, we can take this model for a test-drive.
"""
prompts = ["Yoda", "Hello Kitty", "A pokemon with red eyes"]
images_to_generate = 3
outputs = {}
for prompt in prompts:
generated_images = pokemon_model.text_to_image(
prompt, batch_size=images_to_generate, unconditional_guidance_scale=40
)
outputs.update({prompt: generated_images})
"""
With 60 epochs of fine-tuning (a good number is about 70), the generated images were not
up to the mark. So, we experimented with the number of steps Stable Diffusion takes
during the inference time and the `unconditional_guidance_scale` parameter.
We found the best results with this checkpoint with `unconditional_guidance_scale` set to
40.
"""
def plot_images(images, title):
plt.figure(figsize=(20, 20))
for i in range(len(images)):
ax = plt.subplot(1, len(images), i + 1)
plt.imshow(images[i])
plt.title(title, fontsize=12)
plt.axis("off")
for prompt in outputs:
plot_images(outputs[prompt], prompt)
"""
We can notice that the model has started adapting to the style of our dataset. You can
check the
[accompanying repository](https://github.com/sayakpaul/stable-diffusion-keras-ft#results)
for more comparisons and commentary. If you're feeling adventurous to try out a demo,
you can check out
[this resource](https://huggingface.co/spaces/sayakpaul/pokemon-sd-kerascv).
"""
"""
## Conclusion and acknowledgements
We demonstrated how to fine-tune the Stable Diffusion model on a custom dataset. While
the results are far from aesthetically pleasing, we believe with more epochs of
fine-tuning, they will likely improve. To enable that, having support for gradient
accumulation and distributed training is crucial. This can be thought of as the next step
in this tutorial.
There is another interesting way in which Stable Diffusion models can be fine-tuned,
called textual inversion. You can refer to
[this tutorial](https://keras.io/examples/generative/fine_tune_via_textual_inversion/)
to know more about it.
We'd like to acknowledge the GCP Credit support from ML Developer Programs' team at
Google. We'd like to thank the Hugging Face team for providing the
[fine-tuning script](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image.py)
. It's very readable and easy to understand.
"""
|
keras-io/examples/generative/finetune_stable_diffusion.py/0
|
{
"file_path": "keras-io/examples/generative/finetune_stable_diffusion.py",
"repo_id": "keras-io",
"token_count": 6544
}
| 103 |
<jupyter_start><jupyter_text>Teach StableDiffusion new concepts via Textual Inversion**Authors:** Ian Stenbit, [lukewood](https://lukewood.xyz)**Date created:** 2022/12/09**Last modified:** 2022/12/09**Description:** Learning new visual concepts with KerasCV's StableDiffusion implementation. Textual InversionSince its release, StableDiffusion has quickly become a favorite amongstthe generative machine learning community.The high volume of traffic has led to open source contributed improvements,heavy prompt engineering, and even the invention of novel algorithms.Perhaps the most impressive new algorithm being used is[Textual Inversion](https://github.com/rinongal/textual_inversion), presented in[_An Image is Worth One Word: Personalizing Text-to-Image Generation using Textual Inversion_](https://textual-inversion.github.io/).Textual Inversion is the process of teaching an image generator a specific visual conceptthrough the use of fine-tuning. In the diagram below, you can see anexample of this process where the authors teach the model new concepts, calling them"S_*".Conceptually, textual inversion works by learning a token embedding for a new texttoken, keeping the remaining components of StableDiffusion frozen.This guide shows you how to fine-tune the StableDiffusion model shipped in KerasCVusing the Textual-Inversion algorithm. By the end of the guide, you will be able towrite the "Gandalf the Gray as a <my-funny-cat-token>".First, let's import the packages we need, and create aStableDiffusion instance so we can use some of its subcomponents for fine-tuning.<jupyter_code>!pip install -q git+https://github.com/keras-team/keras-cv.git
!pip install -q tensorflow==2.11.0
import math
import keras_cv
import numpy as np
import tensorflow as tf
from keras_cv import layers as cv_layers
from keras_cv.models.stable_diffusion import NoiseScheduler
from tensorflow import keras
import matplotlib.pyplot as plt
stable_diffusion = keras_cv.models.StableDiffusion()<jupyter_output><empty_output><jupyter_text>Next, let's define a visualization utility to show off the generated images:<jupyter_code>def plot_images(images):
plt.figure(figsize=(20, 20))
for i in range(len(images)):
ax = plt.subplot(1, len(images), i + 1)
plt.imshow(images[i])
plt.axis("off")<jupyter_output><empty_output><jupyter_text>Assembling a text-image pair datasetIn order to train the embedding of our new token, we first must assemble a datasetconsisting of text-image pairs.Each sample from the dataset must contain an image of the concept we are teachingStableDiffusion, as well as a caption accurately representing the content of the image.In this tutorial, we will teach StableDiffusion the concept of Luke and Ian's GitHubavatars:First, let's construct an image dataset of cat dolls:<jupyter_code>def assemble_image_dataset(urls):
# Fetch all remote files
files = [tf.keras.utils.get_file(origin=url) for url in urls]
# Resize images
resize = keras.layers.Resizing(height=512, width=512, crop_to_aspect_ratio=True)
images = [keras.utils.load_img(img) for img in files]
images = [keras.utils.img_to_array(img) for img in images]
images = np.array([resize(img) for img in images])
# The StableDiffusion image encoder requires images to be normalized to the
# [-1, 1] pixel value range
images = images / 127.5 - 1
# Create the tf.data.Dataset
image_dataset = tf.data.Dataset.from_tensor_slices(images)
# Shuffle and introduce random noise
image_dataset = image_dataset.shuffle(50, reshuffle_each_iteration=True)
image_dataset = image_dataset.map(
cv_layers.RandomCropAndResize(
target_size=(512, 512),
crop_area_factor=(0.8, 1.0),
aspect_ratio_factor=(1.0, 1.0),
),
num_parallel_calls=tf.data.AUTOTUNE,
)
image_dataset = image_dataset.map(
cv_layers.RandomFlip(mode="horizontal"),
num_parallel_calls=tf.data.AUTOTUNE,
)
return image_dataset<jupyter_output><empty_output><jupyter_text>Next, we assemble a text dataset:<jupyter_code>MAX_PROMPT_LENGTH = 77
placeholder_token = "<my-funny-cat-token>"
def pad_embedding(embedding):
return embedding + (
[stable_diffusion.tokenizer.end_of_text] * (MAX_PROMPT_LENGTH - len(embedding))
)
stable_diffusion.tokenizer.add_tokens(placeholder_token)
def assemble_text_dataset(prompts):
prompts = [prompt.format(placeholder_token) for prompt in prompts]
embeddings = [stable_diffusion.tokenizer.encode(prompt) for prompt in prompts]
embeddings = [np.array(pad_embedding(embedding)) for embedding in embeddings]
text_dataset = tf.data.Dataset.from_tensor_slices(embeddings)
text_dataset = text_dataset.shuffle(100, reshuffle_each_iteration=True)
return text_dataset<jupyter_output><empty_output><jupyter_text>Finally, we zip our datasets together to produce a text-image pair dataset.<jupyter_code>def assemble_dataset(urls, prompts):
image_dataset = assemble_image_dataset(urls)
text_dataset = assemble_text_dataset(prompts)
# the image dataset is quite short, so we repeat it to match the length of the
# text prompt dataset
image_dataset = image_dataset.repeat()
# we use the text prompt dataset to determine the length of the dataset. Due to
# the fact that there are relatively few prompts we repeat the dataset 5 times.
# we have found that this anecdotally improves results.
text_dataset = text_dataset.repeat(5)
return tf.data.Dataset.zip((image_dataset, text_dataset))<jupyter_output><empty_output><jupyter_text>In order to ensure our prompts are descriptive, we use extremely generic prompts.Let's try this out with some sample images and prompts.<jupyter_code>train_ds = assemble_dataset(
urls=[
"https://i.imgur.com/VIedH1X.jpg",
"https://i.imgur.com/eBw13hE.png",
"https://i.imgur.com/oJ3rSg7.png",
"https://i.imgur.com/5mCL6Df.jpg",
"https://i.imgur.com/4Q6WWyI.jpg",
],
prompts=[
"a photo of a {}",
"a rendering of a {}",
"a cropped photo of the {}",
"the photo of a {}",
"a photo of a clean {}",
"a dark photo of the {}",
"a photo of my {}",
"a photo of the cool {}",
"a close-up photo of a {}",
"a bright photo of the {}",
"a cropped photo of a {}",
"a photo of the {}",
"a good photo of the {}",
"a photo of one {}",
"a close-up photo of the {}",
"a rendition of the {}",
"a photo of the clean {}",
"a rendition of a {}",
"a photo of a nice {}",
"a good photo of a {}",
"a photo of the nice {}",
"a photo of the small {}",
"a photo of the weird {}",
"a photo of the large {}",
"a photo of a cool {}",
"a photo of a small {}",
],
)<jupyter_output><empty_output><jupyter_text>On the importance of prompt accuracyDuring our first attempt at writing this guide we included images of groups of these catdolls in our dataset but continued to use the generic prompts listed above.Our results were anecdotally poor. For example, here's cat doll gandalf using this method:It's conceptually close, but it isn't as great as it can be.In order to remedy this, we began experimenting with splitting our images into images ofsingular cat dolls and groups of cat dolls.Following this split, we came up with new prompts for the group shots.Training on text-image pairs that accurately represent the content boosted the qualityof our results *substantially*. This speaks to the importance of prompt accuracy.In addition to separating the images into singular and group images, we also remove someinaccurate prompts; such as "a dark photo of the {}"Keeping this in mind, we assemble our final training dataset below:<jupyter_code>single_ds = assemble_dataset(
urls=[
"https://i.imgur.com/VIedH1X.jpg",
"https://i.imgur.com/eBw13hE.png",
"https://i.imgur.com/oJ3rSg7.png",
"https://i.imgur.com/5mCL6Df.jpg",
"https://i.imgur.com/4Q6WWyI.jpg",
],
prompts=[
"a photo of a {}",
"a rendering of a {}",
"a cropped photo of the {}",
"the photo of a {}",
"a photo of a clean {}",
"a photo of my {}",
"a photo of the cool {}",
"a close-up photo of a {}",
"a bright photo of the {}",
"a cropped photo of a {}",
"a photo of the {}",
"a good photo of the {}",
"a photo of one {}",
"a close-up photo of the {}",
"a rendition of the {}",
"a photo of the clean {}",
"a rendition of a {}",
"a photo of a nice {}",
"a good photo of a {}",
"a photo of the nice {}",
"a photo of the small {}",
"a photo of the weird {}",
"a photo of the large {}",
"a photo of a cool {}",
"a photo of a small {}",
],
)<jupyter_output><empty_output><jupyter_text>Looks great!Next, we assemble a dataset of groups of our GitHub avatars:<jupyter_code>group_ds = assemble_dataset(
urls=[
"https://i.imgur.com/yVmZ2Qa.jpg",
"https://i.imgur.com/JbyFbZJ.jpg",
"https://i.imgur.com/CCubd3q.jpg",
],
prompts=[
"a photo of a group of {}",
"a rendering of a group of {}",
"a cropped photo of the group of {}",
"the photo of a group of {}",
"a photo of a clean group of {}",
"a photo of my group of {}",
"a photo of a cool group of {}",
"a close-up photo of a group of {}",
"a bright photo of the group of {}",
"a cropped photo of a group of {}",
"a photo of the group of {}",
"a good photo of the group of {}",
"a photo of one group of {}",
"a close-up photo of the group of {}",
"a rendition of the group of {}",
"a photo of the clean group of {}",
"a rendition of a group of {}",
"a photo of a nice group of {}",
"a good photo of a group of {}",
"a photo of the nice group of {}",
"a photo of the small group of {}",
"a photo of the weird group of {}",
"a photo of the large group of {}",
"a photo of a cool group of {}",
"a photo of a small group of {}",
],
)<jupyter_output><empty_output><jupyter_text>Finally, we concatenate the two datasets:<jupyter_code>train_ds = single_ds.concatenate(group_ds)
train_ds = train_ds.batch(1).shuffle(
train_ds.cardinality(), reshuffle_each_iteration=True
)<jupyter_output><empty_output><jupyter_text>Adding a new token to the text encoderNext, we create a new text encoder for the StableDiffusion model and add our newembedding for '' into the model.<jupyter_code>tokenized_initializer = stable_diffusion.tokenizer.encode("cat")[1]
new_weights = stable_diffusion.text_encoder.layers[2].token_embedding(
tf.constant(tokenized_initializer)
)
# Get len of .vocab instead of tokenizer
new_vocab_size = len(stable_diffusion.tokenizer.vocab)
# The embedding layer is the 2nd layer in the text encoder
old_token_weights = stable_diffusion.text_encoder.layers[
2
].token_embedding.get_weights()
old_position_weights = stable_diffusion.text_encoder.layers[
2
].position_embedding.get_weights()
old_token_weights = old_token_weights[0]
new_weights = np.expand_dims(new_weights, axis=0)
new_weights = np.concatenate([old_token_weights, new_weights], axis=0)<jupyter_output><empty_output><jupyter_text>Let's construct a new TextEncoder and prepare it.<jupyter_code># Have to set download_weights False so we can init (otherwise tries to load weights)
new_encoder = keras_cv.models.stable_diffusion.TextEncoder(
keras_cv.models.stable_diffusion.stable_diffusion.MAX_PROMPT_LENGTH,
vocab_size=new_vocab_size,
download_weights=False,
)
for index, layer in enumerate(stable_diffusion.text_encoder.layers):
# Layer 2 is the embedding layer, so we omit it from our weight-copying
if index == 2:
continue
new_encoder.layers[index].set_weights(layer.get_weights())
new_encoder.layers[2].token_embedding.set_weights([new_weights])
new_encoder.layers[2].position_embedding.set_weights(old_position_weights)
stable_diffusion._text_encoder = new_encoder
stable_diffusion._text_encoder.compile(jit_compile=True)<jupyter_output><empty_output><jupyter_text>TrainingNow we can move on to the exciting part: training!In TextualInversion, the only piece of the model that is trained is the embedding vector.Let's freeze the rest of the model.<jupyter_code>stable_diffusion.diffusion_model.trainable = False
stable_diffusion.decoder.trainable = False
stable_diffusion.text_encoder.trainable = True
stable_diffusion.text_encoder.layers[2].trainable = True
def traverse_layers(layer):
if hasattr(layer, "layers"):
for layer in layer.layers:
yield layer
if hasattr(layer, "token_embedding"):
yield layer.token_embedding
if hasattr(layer, "position_embedding"):
yield layer.position_embedding
for layer in traverse_layers(stable_diffusion.text_encoder):
if isinstance(layer, keras.layers.Embedding) or "clip_embedding" in layer.name:
layer.trainable = True
else:
layer.trainable = False
new_encoder.layers[2].position_embedding.trainable = False<jupyter_output><empty_output><jupyter_text>Let's confirm the proper weights are set to trainable.<jupyter_code>all_models = [
stable_diffusion.text_encoder,
stable_diffusion.diffusion_model,
stable_diffusion.decoder,
]
print([[w.shape for w in model.trainable_weights] for model in all_models])<jupyter_output><empty_output><jupyter_text>Training the new embeddingIn order to train the embedding, we need a couple of utilities.We import a NoiseScheduler from KerasCV, and define the following utilities below:- `sample_from_encoder_outputs` is a wrapper around the base StableDiffusion imageencoder which samples from the statistical distribution produced by the imageencoder, rather than taking just the mean (like many other SD applications)- `get_timestep_embedding` produces an embedding for a specified timestep for thediffusion model- `get_position_ids` produces a tensor of position IDs for the text encoder (which is just aseries from `[1, MAX_PROMPT_LENGTH]`)<jupyter_code># Remove the top layer from the encoder, which cuts off the variance and only returns
# the mean
training_image_encoder = keras.Model(
stable_diffusion.image_encoder.input,
stable_diffusion.image_encoder.layers[-2].output,
)
def sample_from_encoder_outputs(outputs):
mean, logvar = tf.split(outputs, 2, axis=-1)
logvar = tf.clip_by_value(logvar, -30.0, 20.0)
std = tf.exp(0.5 * logvar)
sample = tf.random.normal(tf.shape(mean))
return mean + std * sample
def get_timestep_embedding(timestep, dim=320, max_period=10000):
half = dim // 2
freqs = tf.math.exp(
-math.log(max_period) * tf.range(0, half, dtype=tf.float32) / half
)
args = tf.convert_to_tensor([timestep], dtype=tf.float32) * freqs
embedding = tf.concat([tf.math.cos(args), tf.math.sin(args)], 0)
return embedding
def get_position_ids():
return tf.convert_to_tensor([list(range(MAX_PROMPT_LENGTH))], dtype=tf.int32)<jupyter_output><empty_output><jupyter_text>Next, we implement a `StableDiffusionFineTuner`, which is a subclass of `keras.Model`that overrides `train_step` to train the token embeddings of our text encoder.This is the core of the Textual Inversion algorithm.Abstractly speaking, the train step takes a sample from the output of the frozen SDimage encoder's latent distribution for a training image, adds noise to that sample, andthen passes that noisy sample to the frozen diffusion model.The hidden state of the diffusion model is the output of the text encoder for the promptcorresponding to the image.Our final goal state is that the diffusion model is able to separate the noise from thesample using the text encoding as hidden state, so our loss is the mean-squared error ofthe noise and the output of the diffusion model (which has, ideally, removed the imagelatents from the noise).We compute gradients for only the token embeddings of the text encoder, and in thetrain step we zero-out the gradients for all tokens other than the token that we'relearning.See in-line code comments for more details about the train step.<jupyter_code>class StableDiffusionFineTuner(keras.Model):
def __init__(self, stable_diffusion, noise_scheduler, **kwargs):
super().__init__(**kwargs)
self.stable_diffusion = stable_diffusion
self.noise_scheduler = noise_scheduler
def train_step(self, data):
images, embeddings = data
with tf.GradientTape() as tape:
# Sample from the predicted distribution for the training image
latents = sample_from_encoder_outputs(training_image_encoder(images))
# The latents must be downsampled to match the scale of the latents used
# in the training of StableDiffusion. This number is truly just a "magic"
# constant that they chose when training the model.
latents = latents * 0.18215
# Produce random noise in the same shape as the latent sample
noise = tf.random.normal(tf.shape(latents))
batch_dim = tf.shape(latents)[0]
# Pick a random timestep for each sample in the batch
timesteps = tf.random.uniform(
(batch_dim,),
minval=0,
maxval=noise_scheduler.train_timesteps,
dtype=tf.int64,
)
# Add noise to the latents based on the timestep for each sample
noisy_latents = self.noise_scheduler.add_noise(latents, noise, timesteps)
# Encode the text in the training samples to use as hidden state in the
# diffusion model
encoder_hidden_state = self.stable_diffusion.text_encoder(
[embeddings, get_position_ids()]
)
# Compute timestep embeddings for the randomly-selected timesteps for each
# sample in the batch
timestep_embeddings = tf.map_fn(
fn=get_timestep_embedding,
elems=timesteps,
fn_output_signature=tf.float32,
)
# Call the diffusion model
noise_pred = self.stable_diffusion.diffusion_model(
[noisy_latents, timestep_embeddings, encoder_hidden_state]
)
# Compute the mean-squared error loss and reduce it.
loss = self.compiled_loss(noise_pred, noise)
loss = tf.reduce_mean(loss, axis=2)
loss = tf.reduce_mean(loss, axis=1)
loss = tf.reduce_mean(loss)
# Load the trainable weights and compute the gradients for them
trainable_weights = self.stable_diffusion.text_encoder.trainable_weights
grads = tape.gradient(loss, trainable_weights)
# Gradients are stored in indexed slices, so we have to find the index
# of the slice(s) which contain the placeholder token.
index_of_placeholder_token = tf.reshape(tf.where(grads[0].indices == 49408), ())
condition = grads[0].indices == 49408
condition = tf.expand_dims(condition, axis=-1)
# Override the gradients, zeroing out the gradients for all slices that
# aren't for the placeholder token, effectively freezing the weights for
# all other tokens.
grads[0] = tf.IndexedSlices(
values=tf.where(condition, grads[0].values, 0),
indices=grads[0].indices,
dense_shape=grads[0].dense_shape,
)
self.optimizer.apply_gradients(zip(grads, trainable_weights))
return {"loss": loss}<jupyter_output><empty_output><jupyter_text>Before we start training, let's take a look at what StableDiffusion produces for ourtoken.<jupyter_code>generated = stable_diffusion.text_to_image(
f"an oil painting of {placeholder_token}", seed=1337, batch_size=3
)
plot_images(generated)<jupyter_output><empty_output><jupyter_text>As you can see, the model still thinks of our token as a cat, as this was the seed tokenwe used to initialize our custom token.Now, to get started with training, we can just `compile()` our model like any otherKeras model. Before doing so, we also instantiate a noise scheduler for training andconfigure our training parameters such as learning rate and optimizer.<jupyter_code>noise_scheduler = NoiseScheduler(
beta_start=0.00085,
beta_end=0.012,
beta_schedule="scaled_linear",
train_timesteps=1000,
)
trainer = StableDiffusionFineTuner(stable_diffusion, noise_scheduler, name="trainer")
EPOCHS = 50
learning_rate = keras.optimizers.schedules.CosineDecay(
initial_learning_rate=1e-4, decay_steps=train_ds.cardinality() * EPOCHS
)
optimizer = keras.optimizers.Adam(
weight_decay=0.004, learning_rate=learning_rate, epsilon=1e-8, global_clipnorm=10
)
trainer.compile(
optimizer=optimizer,
# We are performing reduction manually in our train step, so none is required here.
loss=keras.losses.MeanSquaredError(reduction="none"),
)<jupyter_output><empty_output><jupyter_text>To monitor training, we can produce a `keras.callbacks.Callback` to produce a few imagesevery epoch using our custom token.We create three callbacks with different prompts so that we can see how they progressover the course of training. We use a fixed seed so that we can easily see theprogression of the learned token.<jupyter_code>class GenerateImages(keras.callbacks.Callback):
def __init__(
self, stable_diffusion, prompt, steps=50, frequency=10, seed=None, **kwargs
):
super().__init__(**kwargs)
self.stable_diffusion = stable_diffusion
self.prompt = prompt
self.seed = seed
self.frequency = frequency
self.steps = steps
def on_epoch_end(self, epoch, logs):
if epoch % self.frequency == 0:
images = self.stable_diffusion.text_to_image(
self.prompt, batch_size=3, num_steps=self.steps, seed=self.seed
)
plot_images(
images,
)
cbs = [
GenerateImages(
stable_diffusion, prompt=f"an oil painting of {placeholder_token}", seed=1337
),
GenerateImages(
stable_diffusion, prompt=f"gandalf the gray as a {placeholder_token}", seed=1337
),
GenerateImages(
stable_diffusion,
prompt=f"two {placeholder_token} getting married, photorealistic, high quality",
seed=1337,
),
]<jupyter_output><empty_output><jupyter_text>Now, all that is left to do is to call `model.fit()`!<jupyter_code>trainer.fit(
train_ds,
epochs=EPOCHS,
callbacks=cbs,
)<jupyter_output><empty_output><jupyter_text>It's pretty fun to see how the model learns our new token over time. Play around with itand see how you can tune training parameters and your training dataset to produce thebest images! Taking the Fine Tuned Model for a SpinNow for the really fun part. We've learned a token embedding for our custom token, sonow we can generate images with StableDiffusion the same way we would for any othertoken!Here are some fun example prompts to get you started, with sample outputs from our catdoll token!<jupyter_code>generated = stable_diffusion.text_to_image(
f"Gandalf as a {placeholder_token} fantasy art drawn by disney concept artists, "
"golden colour, high quality, highly detailed, elegant, sharp focus, concept art, "
"character concepts, digital painting, mystery, adventure",
batch_size=3,
)
plot_images(generated)
generated = stable_diffusion.text_to_image(
f"A masterpiece of a {placeholder_token} crying out to the heavens. "
f"Behind the {placeholder_token}, an dark, evil shade looms over it - sucking the "
"life right out of it.",
batch_size=3,
)
plot_images(generated)
generated = stable_diffusion.text_to_image(
f"An evil {placeholder_token}.", batch_size=3
)
plot_images(generated)
generated = stable_diffusion.text_to_image(
f"A mysterious {placeholder_token} approaches the great pyramids of egypt.",
batch_size=3,
)
plot_images(generated)<jupyter_output><empty_output>
|
keras-io/examples/generative/ipynb/fine_tune_via_textual_inversion.ipynb/0
|
{
"file_path": "keras-io/examples/generative/ipynb/fine_tune_via_textual_inversion.ipynb",
"repo_id": "keras-io",
"token_count": 9031
}
| 104 |
<jupyter_start><jupyter_text>Vector-Quantized Variational Autoencoders**Author:** [Sayak Paul](https://twitter.com/RisingSayak)**Date created:** 2021/07/21**Last modified:** 2022/06/27**Description:** Training a VQ-VAE for image reconstruction and codebook sampling for generation. In this example, we develop a Vector Quantized Variational Autoencoder (VQ-VAE).VQ-VAE was proposed in[Neural Discrete Representation Learning](https://arxiv.org/abs/1711.00937)by van der Oord et al. In standard VAEs, the latent space is continuous and is sampledfrom a Gaussian distribution. It is generally harder to learn such a continuousdistribution via gradient descent. VQ-VAEs, on the other hand,operate on a discrete latent space, making the optimization problem simpler. It does soby maintaining a discrete *codebook*. The codebook is developed bydiscretizing the distance between continuous embeddings and the encodedoutputs. These discrete code words are then fed to the decoder, which is trainedto generate reconstructed samples.For an overview of VQ-VAEs, please refer to the original paper and[this video explanation](https://www.youtube.com/watch?v=VZFVUrYcig0).If you need a refresher on VAEs, you can refer to[this book chapter](https://livebook.manning.com/book/deep-learning-with-python-second-edition/chapter-12/).VQ-VAEs are one of the main recipes behind [DALL-E](https://openai.com/blog/dall-e/)and the idea of a codebook is used in [VQ-GANs](https://arxiv.org/abs/2012.09841).This example uses implementation details from the[official VQ-VAE tutorial](https://github.com/deepmind/sonnet/blob/master/sonnet/examples/vqvae_example.ipynb)from DeepMind. RequirementsTo run this example, you will need TensorFlow 2.5 or higher, as well asTensorFlow Probability, which can be installed using the command below.<jupyter_code>!pip install -q tensorflow-probability<jupyter_output><empty_output><jupyter_text>Imports<jupyter_code>import numpy as np
import matplotlib.pyplot as plt
from tensorflow import keras
from tensorflow.keras import layers
import tensorflow_probability as tfp
import tensorflow as tf<jupyter_output><empty_output><jupyter_text>`VectorQuantizer` layerFirst, we implement a custom layer for the vector quantizer, which is the layer in betweenthe encoder and decoder. Consider an output from the encoder, with shape `(batch_size, height, width,num_filters)`. The vector quantizer will first flatten this output, only keeping the`num_filters` dimension intact. So, the shape would become `(batch_size * height * width,num_filters)`. The rationale behind this is to treat the total number of filters as the size forthe latent embeddings.An embedding table is then initialized to learn a codebook. We measure the L2-normalizeddistance between the flattened encoder outputs and code words of this codebook. We take thecode that yields the minimum distance, and we apply one-hot encoding to achieve quantization.This way, the code yielding the minimum distance to the corresponding encoder output ismapped as one and the remaining codes are mapped as zeros.Since the quantization process is not differentiable, we apply a[straight-through estimator](https://www.hassanaskary.com/python/pytorch/deep%20learning/2020/09/19/intuitive-explanation-of-straight-through-estimators.html)in between the decoder and the encoder, so that the decoder gradients are directly propagatedto the encoder. As the encoder and decoder share the same channel space, the decoder gradients arestill meaningful to the encoder.<jupyter_code>class VectorQuantizer(layers.Layer):
def __init__(self, num_embeddings, embedding_dim, beta=0.25, **kwargs):
super().__init__(**kwargs)
self.embedding_dim = embedding_dim
self.num_embeddings = num_embeddings
# The `beta` parameter is best kept between [0.25, 2] as per the paper.
self.beta = beta
# Initialize the embeddings which we will quantize.
w_init = tf.random_uniform_initializer()
self.embeddings = tf.Variable(
initial_value=w_init(
shape=(self.embedding_dim, self.num_embeddings), dtype="float32"
),
trainable=True,
name="embeddings_vqvae",
)
def call(self, x):
# Calculate the input shape of the inputs and
# then flatten the inputs keeping `embedding_dim` intact.
input_shape = tf.shape(x)
flattened = tf.reshape(x, [-1, self.embedding_dim])
# Quantization.
encoding_indices = self.get_code_indices(flattened)
encodings = tf.one_hot(encoding_indices, self.num_embeddings)
quantized = tf.matmul(encodings, self.embeddings, transpose_b=True)
# Reshape the quantized values back to the original input shape
quantized = tf.reshape(quantized, input_shape)
# Calculate vector quantization loss and add that to the layer. You can learn more
# about adding losses to different layers here:
# https://keras.io/guides/making_new_layers_and_models_via_subclassing/. Check
# the original paper to get a handle on the formulation of the loss function.
commitment_loss = tf.reduce_mean((tf.stop_gradient(quantized) - x) ** 2)
codebook_loss = tf.reduce_mean((quantized - tf.stop_gradient(x)) ** 2)
self.add_loss(self.beta * commitment_loss + codebook_loss)
# Straight-through estimator.
quantized = x + tf.stop_gradient(quantized - x)
return quantized
def get_code_indices(self, flattened_inputs):
# Calculate L2-normalized distance between the inputs and the codes.
similarity = tf.matmul(flattened_inputs, self.embeddings)
distances = (
tf.reduce_sum(flattened_inputs ** 2, axis=1, keepdims=True)
+ tf.reduce_sum(self.embeddings ** 2, axis=0)
- 2 * similarity
)
# Derive the indices for minimum distances.
encoding_indices = tf.argmin(distances, axis=1)
return encoding_indices<jupyter_output><empty_output><jupyter_text>**A note on straight-through estimation**:This line of code does the straight-through estimation part: `quantized = x +tf.stop_gradient(quantized - x)`. During backpropagation, `(quantized - x)` won't beincluded in the computation graph and the gradients obtained for `quantized`will be copied for `inputs`. Thanks to [this video](https://youtu.be/VZFVUrYcig0?t=1393)for helping me understand this technique. Encoder and decoderNow for the encoder and the decoder for the VQ-VAE. We will keep them small sothat their capacity is a good fit for the MNIST dataset. The implementation of the encoder andcome from[this example](https://keras.io/examples/generative/vae).Note that activations _other than ReLU_ may not work for the encoder and decoder layers in thequantization architecture: Leaky ReLU activated layers, for example, have proven difficult totrain, resulting in intermittent loss spikes that the model has trouble recovering from.<jupyter_code>def get_encoder(latent_dim=16):
encoder_inputs = keras.Input(shape=(28, 28, 1))
x = layers.Conv2D(32, 3, activation="relu", strides=2, padding="same")(
encoder_inputs
)
x = layers.Conv2D(64, 3, activation="relu", strides=2, padding="same")(x)
encoder_outputs = layers.Conv2D(latent_dim, 1, padding="same")(x)
return keras.Model(encoder_inputs, encoder_outputs, name="encoder")
def get_decoder(latent_dim=16):
latent_inputs = keras.Input(shape=get_encoder(latent_dim).output.shape[1:])
x = layers.Conv2DTranspose(64, 3, activation="relu", strides=2, padding="same")(
latent_inputs
)
x = layers.Conv2DTranspose(32, 3, activation="relu", strides=2, padding="same")(x)
decoder_outputs = layers.Conv2DTranspose(1, 3, padding="same")(x)
return keras.Model(latent_inputs, decoder_outputs, name="decoder")<jupyter_output><empty_output><jupyter_text>Standalone VQ-VAE model<jupyter_code>def get_vqvae(latent_dim=16, num_embeddings=64):
vq_layer = VectorQuantizer(num_embeddings, latent_dim, name="vector_quantizer")
encoder = get_encoder(latent_dim)
decoder = get_decoder(latent_dim)
inputs = keras.Input(shape=(28, 28, 1))
encoder_outputs = encoder(inputs)
quantized_latents = vq_layer(encoder_outputs)
reconstructions = decoder(quantized_latents)
return keras.Model(inputs, reconstructions, name="vq_vae")
get_vqvae().summary()<jupyter_output><empty_output><jupyter_text>Note that the output channels of the encoder should match the `latent_dim` for the vectorquantizer. Wrapping up the training loop inside `VQVAETrainer`<jupyter_code>class VQVAETrainer(keras.models.Model):
def __init__(self, train_variance, latent_dim=32, num_embeddings=128, **kwargs):
super().__init__(**kwargs)
self.train_variance = train_variance
self.latent_dim = latent_dim
self.num_embeddings = num_embeddings
self.vqvae = get_vqvae(self.latent_dim, self.num_embeddings)
self.total_loss_tracker = keras.metrics.Mean(name="total_loss")
self.reconstruction_loss_tracker = keras.metrics.Mean(
name="reconstruction_loss"
)
self.vq_loss_tracker = keras.metrics.Mean(name="vq_loss")
@property
def metrics(self):
return [
self.total_loss_tracker,
self.reconstruction_loss_tracker,
self.vq_loss_tracker,
]
def train_step(self, x):
with tf.GradientTape() as tape:
# Outputs from the VQ-VAE.
reconstructions = self.vqvae(x)
# Calculate the losses.
reconstruction_loss = (
tf.reduce_mean((x - reconstructions) ** 2) / self.train_variance
)
total_loss = reconstruction_loss + sum(self.vqvae.losses)
# Backpropagation.
grads = tape.gradient(total_loss, self.vqvae.trainable_variables)
self.optimizer.apply_gradients(zip(grads, self.vqvae.trainable_variables))
# Loss tracking.
self.total_loss_tracker.update_state(total_loss)
self.reconstruction_loss_tracker.update_state(reconstruction_loss)
self.vq_loss_tracker.update_state(sum(self.vqvae.losses))
# Log results.
return {
"loss": self.total_loss_tracker.result(),
"reconstruction_loss": self.reconstruction_loss_tracker.result(),
"vqvae_loss": self.vq_loss_tracker.result(),
}<jupyter_output><empty_output><jupyter_text>Load and preprocess the MNIST dataset<jupyter_code>(x_train, _), (x_test, _) = keras.datasets.mnist.load_data()
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
x_train_scaled = (x_train / 255.0) - 0.5
x_test_scaled = (x_test / 255.0) - 0.5
data_variance = np.var(x_train / 255.0)<jupyter_output><empty_output><jupyter_text>Train the VQ-VAE model<jupyter_code>vqvae_trainer = VQVAETrainer(data_variance, latent_dim=16, num_embeddings=128)
vqvae_trainer.compile(optimizer=keras.optimizers.Adam())
vqvae_trainer.fit(x_train_scaled, epochs=30, batch_size=128)<jupyter_output><empty_output><jupyter_text>Reconstruction results on the test set<jupyter_code>def show_subplot(original, reconstructed):
plt.subplot(1, 2, 1)
plt.imshow(original.squeeze() + 0.5)
plt.title("Original")
plt.axis("off")
plt.subplot(1, 2, 2)
plt.imshow(reconstructed.squeeze() + 0.5)
plt.title("Reconstructed")
plt.axis("off")
plt.show()
trained_vqvae_model = vqvae_trainer.vqvae
idx = np.random.choice(len(x_test_scaled), 10)
test_images = x_test_scaled[idx]
reconstructions_test = trained_vqvae_model.predict(test_images)
for test_image, reconstructed_image in zip(test_images, reconstructions_test):
show_subplot(test_image, reconstructed_image)<jupyter_output><empty_output><jupyter_text>These results look decent. You are encouraged to play with different hyperparameters(especially the number of embeddings and the dimensions of the embeddings) and observe howthey affect the results. Visualizing the discrete codes<jupyter_code>encoder = vqvae_trainer.vqvae.get_layer("encoder")
quantizer = vqvae_trainer.vqvae.get_layer("vector_quantizer")
encoded_outputs = encoder.predict(test_images)
flat_enc_outputs = encoded_outputs.reshape(-1, encoded_outputs.shape[-1])
codebook_indices = quantizer.get_code_indices(flat_enc_outputs)
codebook_indices = codebook_indices.numpy().reshape(encoded_outputs.shape[:-1])
for i in range(len(test_images)):
plt.subplot(1, 2, 1)
plt.imshow(test_images[i].squeeze() + 0.5)
plt.title("Original")
plt.axis("off")
plt.subplot(1, 2, 2)
plt.imshow(codebook_indices[i])
plt.title("Code")
plt.axis("off")
plt.show()<jupyter_output><empty_output><jupyter_text>The figure above shows that the discrete codes have been able to capture someregularities from the dataset. Now, how do we sample from this codebook to createnovel images? Since these codes are discrete and we imposed a categorical distributionon them, we cannot use them yet to generate anything meaningful until we can generate likelysequences of codes that we can give to the decoder.The authors use a PixelCNN to train these codes so that they can be used as powerful priors togenerate novel examples. PixelCNN was proposed in[Conditional Image Generation with PixelCNN Decoders](https://arxiv.org/abs/1606.05328)by van der Oord et al. We borrow the implementation from[this PixelCNN example](https://keras.io/examples/generative/pixelcnn/). It's an autoregressivegenerative model where the outputs are conditional on the prior ones. In other words, a PixelCNNgenerates an image on a pixel-by-pixel basis. For the purpose in this example, however, its taskis to generate code book indices instead of pixels directly. The trained VQ-VAE decoder is usedto map the indices generated by the PixelCNN back into the pixel space. PixelCNN hyperparameters<jupyter_code>num_residual_blocks = 2
num_pixelcnn_layers = 2
pixelcnn_input_shape = encoded_outputs.shape[1:-1]
print(f"Input shape of the PixelCNN: {pixelcnn_input_shape}")<jupyter_output><empty_output><jupyter_text>This input shape represents the reduction in the resolution performed by the encoder. With "same" padding,this exactly halves the "resolution" of the output shape for each stride-2 convolution layer. So, with thesetwo layers, we end up with an encoder output tensor of 7x7 on axes 2 and 3, with the first axis as the batchsize and the last axis being the code book embedding size. Since the quantization layer in the autoencodermaps these 7x7 tensors to indices of the code book, these output layer axis sizes must be matched by thePixelCNN as the input shape. The task of the PixelCNN for this architecture is to generate _likely_ 7x7arrangements of codebook indices.Note that this shape is something to optimize for in larger-sized image domains, along with the codebook sizes. Since the PixelCNN is autoregressive, it needs to pass over each codebook index sequentiallyin order to generate novel images from the codebook. Each stride-2 (or rather more correctly astride (2, 2)) convolution layer will divide the image generation time by four. Note, however, that thereis probably a lower bound on this part: when the number of codes for the image to reconstruct is too small,it has insufficient information for the decoder to represent the level of detail in the image, so theoutput quality will suffer. This can be amended at least to some extent by using a larger code book.Since the autoregressive part of the image generation procedure uses codebook indices, there is far less ofa performance penalty on using a larger code book as the lookup time for a larger-sized code from a largercode book is much smaller in comparison to iterating over a larger sequence of code book indices, althoughthe size of the code book does impact on the batch size that can pass through the image generation procedure.Finding the sweet spot for this trade-off can require some architecture tweaking and could very well differper dataset. PixelCNN modelMajority of this comes from[this example](https://keras.io/examples/generative/pixelcnn/). NotesThanks to [Rein van 't Veer](https://github.com/reinvantveer) for improving this example withcopy-edits and minor code clean-ups.<jupyter_code># The first layer is the PixelCNN layer. This layer simply
# builds on the 2D convolutional layer, but includes masking.
class PixelConvLayer(layers.Layer):
def __init__(self, mask_type, **kwargs):
super().__init__()
self.mask_type = mask_type
self.conv = layers.Conv2D(**kwargs)
def build(self, input_shape):
# Build the conv2d layer to initialize kernel variables
self.conv.build(input_shape)
# Use the initialized kernel to create the mask
kernel_shape = self.conv.kernel.get_shape()
self.mask = np.zeros(shape=kernel_shape)
self.mask[: kernel_shape[0] // 2, ...] = 1.0
self.mask[kernel_shape[0] // 2, : kernel_shape[1] // 2, ...] = 1.0
if self.mask_type == "B":
self.mask[kernel_shape[0] // 2, kernel_shape[1] // 2, ...] = 1.0
def call(self, inputs):
self.conv.kernel.assign(self.conv.kernel * self.mask)
return self.conv(inputs)
# Next, we build our residual block layer.
# This is just a normal residual block, but based on the PixelConvLayer.
class ResidualBlock(keras.layers.Layer):
def __init__(self, filters, **kwargs):
super().__init__(**kwargs)
self.conv1 = keras.layers.Conv2D(
filters=filters, kernel_size=1, activation="relu"
)
self.pixel_conv = PixelConvLayer(
mask_type="B",
filters=filters // 2,
kernel_size=3,
activation="relu",
padding="same",
)
self.conv2 = keras.layers.Conv2D(
filters=filters, kernel_size=1, activation="relu"
)
def call(self, inputs):
x = self.conv1(inputs)
x = self.pixel_conv(x)
x = self.conv2(x)
return keras.layers.add([inputs, x])
pixelcnn_inputs = keras.Input(shape=pixelcnn_input_shape, dtype=tf.int32)
ohe = tf.one_hot(pixelcnn_inputs, vqvae_trainer.num_embeddings)
x = PixelConvLayer(
mask_type="A", filters=128, kernel_size=7, activation="relu", padding="same"
)(ohe)
for _ in range(num_residual_blocks):
x = ResidualBlock(filters=128)(x)
for _ in range(num_pixelcnn_layers):
x = PixelConvLayer(
mask_type="B",
filters=128,
kernel_size=1,
strides=1,
activation="relu",
padding="valid",
)(x)
out = keras.layers.Conv2D(
filters=vqvae_trainer.num_embeddings, kernel_size=1, strides=1, padding="valid"
)(x)
pixel_cnn = keras.Model(pixelcnn_inputs, out, name="pixel_cnn")
pixel_cnn.summary()<jupyter_output><empty_output><jupyter_text>Prepare data to train the PixelCNNWe will train the PixelCNN to learn a categorical distribution of the discrete codes.First, we will generate code indices using the encoder and vector quantizer we justtrained. Our training objective will be to minimize the crossentropy loss between theseindices and the PixelCNN outputs. Here, the number of categories is equal to the numberof embeddings present in our codebook (128 in our case). The PixelCNN model istrained to learn a distribution (as opposed to minimizing the L1/L2 loss), which is whereit gets its generative capabilities from.<jupyter_code># Generate the codebook indices.
encoded_outputs = encoder.predict(x_train_scaled)
flat_enc_outputs = encoded_outputs.reshape(-1, encoded_outputs.shape[-1])
codebook_indices = quantizer.get_code_indices(flat_enc_outputs)
codebook_indices = codebook_indices.numpy().reshape(encoded_outputs.shape[:-1])
print(f"Shape of the training data for PixelCNN: {codebook_indices.shape}")<jupyter_output><empty_output><jupyter_text>PixelCNN training<jupyter_code>pixel_cnn.compile(
optimizer=keras.optimizers.Adam(3e-4),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=["accuracy"],
)
pixel_cnn.fit(
x=codebook_indices,
y=codebook_indices,
batch_size=128,
epochs=30,
validation_split=0.1,
)<jupyter_output><empty_output><jupyter_text>We can improve these scores with more training and hyperparameter tuning. Codebook samplingNow that our PixelCNN is trained, we can sample distinct codes from its outputs and passthem to our decoder to generate novel images.<jupyter_code># Create a mini sampler model.
inputs = layers.Input(shape=pixel_cnn.input_shape[1:])
outputs = pixel_cnn(inputs, training=False)
categorical_layer = tfp.layers.DistributionLambda(tfp.distributions.Categorical)
outputs = categorical_layer(outputs)
sampler = keras.Model(inputs, outputs)<jupyter_output><empty_output><jupyter_text>We now construct a prior to generate images. Here, we will generate 10 images.<jupyter_code># Create an empty array of priors.
batch = 10
priors = np.zeros(shape=(batch,) + (pixel_cnn.input_shape)[1:])
batch, rows, cols = priors.shape
# Iterate over the priors because generation has to be done sequentially pixel by pixel.
for row in range(rows):
for col in range(cols):
# Feed the whole array and retrieving the pixel value probabilities for the next
# pixel.
probs = sampler.predict(priors)
# Use the probabilities to pick pixel values and append the values to the priors.
priors[:, row, col] = probs[:, row, col]
print(f"Prior shape: {priors.shape}")<jupyter_output><empty_output><jupyter_text>We can now use our decoder to generate the images.<jupyter_code># Perform an embedding lookup.
pretrained_embeddings = quantizer.embeddings
priors_ohe = tf.one_hot(priors.astype("int32"), vqvae_trainer.num_embeddings).numpy()
quantized = tf.matmul(
priors_ohe.astype("float32"), pretrained_embeddings, transpose_b=True
)
quantized = tf.reshape(quantized, (-1, *(encoded_outputs.shape[1:])))
# Generate novel images.
decoder = vqvae_trainer.vqvae.get_layer("decoder")
generated_samples = decoder.predict(quantized)
for i in range(batch):
plt.subplot(1, 2, 1)
plt.imshow(priors[i])
plt.title("Code")
plt.axis("off")
plt.subplot(1, 2, 2)
plt.imshow(generated_samples[i].squeeze() + 0.5)
plt.title("Generated Sample")
plt.axis("off")
plt.show()<jupyter_output><empty_output>
|
keras-io/examples/generative/ipynb/vq_vae.ipynb/0
|
{
"file_path": "keras-io/examples/generative/ipynb/vq_vae.ipynb",
"repo_id": "keras-io",
"token_count": 8019
}
| 105 |
"""
Title: Neural style transfer
Author: [fchollet](https://twitter.com/fchollet)
Date created: 2016/01/11
Last modified: 2020/05/02
Description: Transferring the style of a reference image to target image using gradient descent.
Accelerator: GPU
"""
"""
## Introduction
Style transfer consists in generating an image
with the same "content" as a base image, but with the
"style" of a different picture (typically artistic).
This is achieved through the optimization of a loss function
that has 3 components: "style loss", "content loss",
and "total variation loss":
- The total variation loss imposes local spatial continuity between
the pixels of the combination image, giving it visual coherence.
- The style loss is where the deep learning keeps in --that one is defined
using a deep convolutional neural network. Precisely, it consists in a sum of
L2 distances between the Gram matrices of the representations of
the base image and the style reference image, extracted from
different layers of a convnet (trained on ImageNet). The general idea
is to capture color/texture information at different spatial
scales (fairly large scales --defined by the depth of the layer considered).
- The content loss is a L2 distance between the features of the base
image (extracted from a deep layer) and the features of the combination image,
keeping the generated image close enough to the original one.
**Reference:** [A Neural Algorithm of Artistic Style](
http://arxiv.org/abs/1508.06576)
"""
"""
## Setup
"""
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import keras
import numpy as np
import tensorflow as tf
from keras.applications import vgg19
base_image_path = keras.utils.get_file("paris.jpg", "https://i.imgur.com/F28w3Ac.jpg")
style_reference_image_path = keras.utils.get_file(
"starry_night.jpg", "https://i.imgur.com/9ooB60I.jpg"
)
result_prefix = "paris_generated"
# Weights of the different loss components
total_variation_weight = 1e-6
style_weight = 1e-6
content_weight = 2.5e-8
# Dimensions of the generated picture.
width, height = keras.utils.load_img(base_image_path).size
img_nrows = 400
img_ncols = int(width * img_nrows / height)
"""
## Let's take a look at our base (content) image and our style reference image
"""
from IPython.display import Image, display
display(Image(base_image_path))
display(Image(style_reference_image_path))
"""
## Image preprocessing / deprocessing utilities
"""
def preprocess_image(image_path):
# Util function to open, resize and format pictures into appropriate tensors
img = keras.utils.load_img(image_path, target_size=(img_nrows, img_ncols))
img = keras.utils.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = vgg19.preprocess_input(img)
return tf.convert_to_tensor(img)
def deprocess_image(x):
# Util function to convert a tensor into a valid image
x = x.reshape((img_nrows, img_ncols, 3))
# Remove zero-center by mean pixel
x[:, :, 0] += 103.939
x[:, :, 1] += 116.779
x[:, :, 2] += 123.68
# 'BGR'->'RGB'
x = x[:, :, ::-1]
x = np.clip(x, 0, 255).astype("uint8")
return x
"""
## Compute the style transfer loss
First, we need to define 4 utility functions:
- `gram_matrix` (used to compute the style loss)
- The `style_loss` function, which keeps the generated image close to the local textures
of the style reference image
- The `content_loss` function, which keeps the high-level representation of the
generated image close to that of the base image
- The `total_variation_loss` function, a regularization loss which keeps the generated
image locally-coherent
"""
# The gram matrix of an image tensor (feature-wise outer product)
def gram_matrix(x):
x = tf.transpose(x, (2, 0, 1))
features = tf.reshape(x, (tf.shape(x)[0], -1))
gram = tf.matmul(features, tf.transpose(features))
return gram
# The "style loss" is designed to maintain
# the style of the reference image in the generated image.
# It is based on the gram matrices (which capture style) of
# feature maps from the style reference image
# and from the generated image
def style_loss(style, combination):
S = gram_matrix(style)
C = gram_matrix(combination)
channels = 3
size = img_nrows * img_ncols
return tf.reduce_sum(tf.square(S - C)) / (4.0 * (channels**2) * (size**2))
# An auxiliary loss function
# designed to maintain the "content" of the
# base image in the generated image
def content_loss(base, combination):
return tf.reduce_sum(tf.square(combination - base))
# The 3rd loss function, total variation loss,
# designed to keep the generated image locally coherent
def total_variation_loss(x):
a = tf.square(
x[:, : img_nrows - 1, : img_ncols - 1, :] - x[:, 1:, : img_ncols - 1, :]
)
b = tf.square(
x[:, : img_nrows - 1, : img_ncols - 1, :] - x[:, : img_nrows - 1, 1:, :]
)
return tf.reduce_sum(tf.pow(a + b, 1.25))
"""
Next, let's create a feature extraction model that retrieves the intermediate activations
of VGG19 (as a dict, by name).
"""
# Build a VGG19 model loaded with pre-trained ImageNet weights
model = vgg19.VGG19(weights="imagenet", include_top=False)
# Get the symbolic outputs of each "key" layer (we gave them unique names).
outputs_dict = dict([(layer.name, layer.output) for layer in model.layers])
# Set up a model that returns the activation values for every layer in
# VGG19 (as a dict).
feature_extractor = keras.Model(inputs=model.inputs, outputs=outputs_dict)
"""
Finally, here's the code that computes the style transfer loss.
"""
# List of layers to use for the style loss.
style_layer_names = [
"block1_conv1",
"block2_conv1",
"block3_conv1",
"block4_conv1",
"block5_conv1",
]
# The layer to use for the content loss.
content_layer_name = "block5_conv2"
def compute_loss(combination_image, base_image, style_reference_image):
input_tensor = tf.concat(
[base_image, style_reference_image, combination_image], axis=0
)
features = feature_extractor(input_tensor)
# Initialize the loss
loss = tf.zeros(shape=())
# Add content loss
layer_features = features[content_layer_name]
base_image_features = layer_features[0, :, :, :]
combination_features = layer_features[2, :, :, :]
loss = loss + content_weight * content_loss(
base_image_features, combination_features
)
# Add style loss
for layer_name in style_layer_names:
layer_features = features[layer_name]
style_reference_features = layer_features[1, :, :, :]
combination_features = layer_features[2, :, :, :]
sl = style_loss(style_reference_features, combination_features)
loss += (style_weight / len(style_layer_names)) * sl
# Add total variation loss
loss += total_variation_weight * total_variation_loss(combination_image)
return loss
"""
## Add a tf.function decorator to loss & gradient computation
To compile it, and thus make it fast.
"""
@tf.function
def compute_loss_and_grads(combination_image, base_image, style_reference_image):
with tf.GradientTape() as tape:
loss = compute_loss(combination_image, base_image, style_reference_image)
grads = tape.gradient(loss, combination_image)
return loss, grads
"""
## The training loop
Repeatedly run vanilla gradient descent steps to minimize the loss, and save the
resulting image every 100 iterations.
We decay the learning rate by 0.96 every 100 steps.
"""
optimizer = keras.optimizers.SGD(
keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=100.0, decay_steps=100, decay_rate=0.96
)
)
base_image = preprocess_image(base_image_path)
style_reference_image = preprocess_image(style_reference_image_path)
combination_image = tf.Variable(preprocess_image(base_image_path))
iterations = 4000
for i in range(1, iterations + 1):
loss, grads = compute_loss_and_grads(
combination_image, base_image, style_reference_image
)
optimizer.apply_gradients([(grads, combination_image)])
if i % 100 == 0:
print("Iteration %d: loss=%.2f" % (i, loss))
img = deprocess_image(combination_image.numpy())
fname = result_prefix + "_at_iteration_%d.png" % i
keras.utils.save_img(fname, img)
"""
After 4000 iterations, you get the following result:
"""
display(Image(result_prefix + "_at_iteration_4000.png"))
|
keras-io/examples/generative/neural_style_transfer.py/0
|
{
"file_path": "keras-io/examples/generative/neural_style_transfer.py",
"repo_id": "keras-io",
"token_count": 2848
}
| 106 |
"""
Title: Message-passing neural network (MPNN) for molecular property prediction
Author: [akensert](http://github.com/akensert)
Date created: 2021/08/16
Last modified: 2021/12/27
Description: Implementation of an MPNN to predict blood-brain barrier permeability.
Accelerator: GPU
"""
"""
## Introduction
In this tutorial, we will implement a type of graph neural network (GNN) known as
_ message passing neural network_ (MPNN) to predict graph properties. Specifically, we will
implement an MPNN to predict a molecular property known as
_blood-brain barrier permeability_ (BBBP).
Motivation: as molecules are naturally represented as an undirected graph `G = (V, E)`,
where `V` is a set or vertices (nodes; atoms) and `E` a set of edges (bonds), GNNs (such
as MPNN) are proving to be a useful method for predicting molecular properties.
Until now, more traditional methods, such as random forests, support vector machines, etc.,
have been commonly used to predict molecular properties. In contrast to GNNs, these
traditional approaches often operate on precomputed molecular features such as
molecular weight, polarity, charge, number of carbon atoms, etc. Although these
molecular features prove to be good predictors for various molecular properties, it is
hypothesized that operating on these more "raw", "low-level", features could prove even
better.
### References
In recent years, a lot of effort has been put into developing neural networks for
graph data, including molecular graphs. For a summary of graph neural networks, see e.g.,
[A Comprehensive Survey on Graph Neural Networks](https://arxiv.org/abs/1901.00596) and
[Graph Neural Networks: A Review of Methods and Applications](https://arxiv.org/abs/1812.08434);
and for further reading on the specific
graph neural network implemented in this tutorial see
[Neural Message Passing for Quantum Chemistry](https://arxiv.org/abs/1704.01212) and
[DeepChem's MPNNModel](https://deepchem.readthedocs.io/en/latest/api_reference/models.html#mpnnmodel).
"""
"""
## Setup
### Install RDKit and other dependencies
(Text below taken from
[this tutorial](https://keras.io/examples/generative/wgan-graphs/)).
[RDKit](https://www.rdkit.org/) is a collection of cheminformatics and machine-learning
software written in C++ and Python. In this tutorial, RDKit is used to conveniently and
efficiently transform
[SMILES](https://en.wikipedia.org/wiki/Simplified_molecular-input_line-entry_system) to
molecule objects, and then from those obtain sets of atoms and bonds.
SMILES expresses the structure of a given molecule in the form of an ASCII string.
The SMILES string is a compact encoding which, for smaller molecules, is relatively
human-readable. Encoding molecules as a string both alleviates and facilitates database
and/or web searching of a given molecule. RDKit uses algorithms to
accurately transform a given SMILES to a molecule object, which can then
be used to compute a great number of molecular properties/features.
Notice, RDKit is commonly installed via [Conda](https://www.rdkit.org/docs/Install.html).
However, thanks to
[rdkit_platform_wheels](https://github.com/kuelumbus/rdkit_platform_wheels), rdkit
can now (for the sake of this tutorial) be installed easily via pip, as follows:
```
pip -q install rdkit-pypi
```
And for easy and efficient reading of csv files and visualization, the below needs to be
installed:
```
pip -q install pandas
pip -q install Pillow
pip -q install matplotlib
pip -q install pydot
sudo apt-get -qq install graphviz
```
"""
"""
### Import packages
"""
import os
# Temporary suppress tf logs
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import warnings
from rdkit import Chem
from rdkit import RDLogger
from rdkit.Chem.Draw import IPythonConsole
from rdkit.Chem.Draw import MolsToGridImage
# Temporary suppress warnings and RDKit logs
warnings.filterwarnings("ignore")
RDLogger.DisableLog("rdApp.*")
np.random.seed(42)
tf.random.set_seed(42)
"""
## Dataset
Information about the dataset can be found in
[A Bayesian Approach to in Silico Blood-Brain Barrier Penetration Modeling](https://pubs.acs.org/doi/10.1021/ci300124c)
and [MoleculeNet: A Benchmark for Molecular Machine Learning](https://arxiv.org/abs/1703.00564).
The dataset will be downloaded from [MoleculeNet.org](https://moleculenet.org/datasets-1).
### About
The dataset contains **2,050** molecules. Each molecule come with a **name**, **label**
and **SMILES** string.
The blood-brain barrier (BBB) is a membrane separating the blood from the brain
extracellular fluid, hence blocking out most drugs (molecules) from reaching
the brain. Because of this, the BBBP has been important to study for the development of
new drugs that aim to target the central nervous system. The labels for this
data set are binary (1 or 0) and indicate the permeability of the molecules.
"""
csv_path = keras.utils.get_file(
"BBBP.csv", "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/BBBP.csv"
)
df = pd.read_csv(csv_path, usecols=[1, 2, 3])
df.iloc[96:104]
"""
### Define features
To encode features for atoms and bonds (which we will need later),
we'll define two classes: `AtomFeaturizer` and `BondFeaturizer` respectively.
To reduce the lines of code, i.e., to keep this tutorial short and concise,
only about a handful of (atom and bond) features will be considered: \[atom features\]
[symbol (element)](https://en.wikipedia.org/wiki/Chemical_element),
[number of valence electrons](https://en.wikipedia.org/wiki/Valence_electron),
[number of hydrogen bonds](https://en.wikipedia.org/wiki/Hydrogen),
[orbital hybridization](https://en.wikipedia.org/wiki/Orbital_hybridisation),
\[bond features\]
[(covalent) bond type](https://en.wikipedia.org/wiki/Covalent_bond), and
[conjugation](https://en.wikipedia.org/wiki/Conjugated_system).
"""
class Featurizer:
def __init__(self, allowable_sets):
self.dim = 0
self.features_mapping = {}
for k, s in allowable_sets.items():
s = sorted(list(s))
self.features_mapping[k] = dict(zip(s, range(self.dim, len(s) + self.dim)))
self.dim += len(s)
def encode(self, inputs):
output = np.zeros((self.dim,))
for name_feature, feature_mapping in self.features_mapping.items():
feature = getattr(self, name_feature)(inputs)
if feature not in feature_mapping:
continue
output[feature_mapping[feature]] = 1.0
return output
class AtomFeaturizer(Featurizer):
def __init__(self, allowable_sets):
super().__init__(allowable_sets)
def symbol(self, atom):
return atom.GetSymbol()
def n_valence(self, atom):
return atom.GetTotalValence()
def n_hydrogens(self, atom):
return atom.GetTotalNumHs()
def hybridization(self, atom):
return atom.GetHybridization().name.lower()
class BondFeaturizer(Featurizer):
def __init__(self, allowable_sets):
super().__init__(allowable_sets)
self.dim += 1
def encode(self, bond):
output = np.zeros((self.dim,))
if bond is None:
output[-1] = 1.0
return output
output = super().encode(bond)
return output
def bond_type(self, bond):
return bond.GetBondType().name.lower()
def conjugated(self, bond):
return bond.GetIsConjugated()
atom_featurizer = AtomFeaturizer(
allowable_sets={
"symbol": {"B", "Br", "C", "Ca", "Cl", "F", "H", "I", "N", "Na", "O", "P", "S"},
"n_valence": {0, 1, 2, 3, 4, 5, 6},
"n_hydrogens": {0, 1, 2, 3, 4},
"hybridization": {"s", "sp", "sp2", "sp3"},
}
)
bond_featurizer = BondFeaturizer(
allowable_sets={
"bond_type": {"single", "double", "triple", "aromatic"},
"conjugated": {True, False},
}
)
"""
### Generate graphs
Before we can generate complete graphs from SMILES, we need to implement the following functions:
1. `molecule_from_smiles`, which takes as input a SMILES and returns a molecule object.
This is all handled by RDKit.
2. `graph_from_molecule`, which takes as input a molecule object and returns a graph,
represented as a three-tuple (atom_features, bond_features, pair_indices). For this we
will make use of the classes defined previously.
Finally, we can now implement the function `graphs_from_smiles`, which applies function (1)
and subsequently (2) on all SMILES of the training, validation and test datasets.
Notice: although scaffold splitting is recommended for this data set (see
[here](https://arxiv.org/abs/1703.00564)), for simplicity, simple random splittings were
performed.
"""
def molecule_from_smiles(smiles):
# MolFromSmiles(m, sanitize=True) should be equivalent to
# MolFromSmiles(m, sanitize=False) -> SanitizeMol(m) -> AssignStereochemistry(m, ...)
molecule = Chem.MolFromSmiles(smiles, sanitize=False)
# If sanitization is unsuccessful, catch the error, and try again without
# the sanitization step that caused the error
flag = Chem.SanitizeMol(molecule, catchErrors=True)
if flag != Chem.SanitizeFlags.SANITIZE_NONE:
Chem.SanitizeMol(molecule, sanitizeOps=Chem.SanitizeFlags.SANITIZE_ALL ^ flag)
Chem.AssignStereochemistry(molecule, cleanIt=True, force=True)
return molecule
def graph_from_molecule(molecule):
# Initialize graph
atom_features = []
bond_features = []
pair_indices = []
for atom in molecule.GetAtoms():
atom_features.append(atom_featurizer.encode(atom))
# Add self-loops
pair_indices.append([atom.GetIdx(), atom.GetIdx()])
bond_features.append(bond_featurizer.encode(None))
for neighbor in atom.GetNeighbors():
bond = molecule.GetBondBetweenAtoms(atom.GetIdx(), neighbor.GetIdx())
pair_indices.append([atom.GetIdx(), neighbor.GetIdx()])
bond_features.append(bond_featurizer.encode(bond))
return np.array(atom_features), np.array(bond_features), np.array(pair_indices)
def graphs_from_smiles(smiles_list):
# Initialize graphs
atom_features_list = []
bond_features_list = []
pair_indices_list = []
for smiles in smiles_list:
molecule = molecule_from_smiles(smiles)
atom_features, bond_features, pair_indices = graph_from_molecule(molecule)
atom_features_list.append(atom_features)
bond_features_list.append(bond_features)
pair_indices_list.append(pair_indices)
# Convert lists to ragged tensors for tf.data.Dataset later on
return (
tf.ragged.constant(atom_features_list, dtype=tf.float32),
tf.ragged.constant(bond_features_list, dtype=tf.float32),
tf.ragged.constant(pair_indices_list, dtype=tf.int64),
)
# Shuffle array of indices ranging from 0 to 2049
permuted_indices = np.random.permutation(np.arange(df.shape[0]))
# Train set: 80 % of data
train_index = permuted_indices[: int(df.shape[0] * 0.8)]
x_train = graphs_from_smiles(df.iloc[train_index].smiles)
y_train = df.iloc[train_index].p_np
# Valid set: 19 % of data
valid_index = permuted_indices[int(df.shape[0] * 0.8) : int(df.shape[0] * 0.99)]
x_valid = graphs_from_smiles(df.iloc[valid_index].smiles)
y_valid = df.iloc[valid_index].p_np
# Test set: 1 % of data
test_index = permuted_indices[int(df.shape[0] * 0.99) :]
x_test = graphs_from_smiles(df.iloc[test_index].smiles)
y_test = df.iloc[test_index].p_np
"""
### Test the functions
"""
print(f"Name:\t{df.name[100]}\nSMILES:\t{df.smiles[100]}\nBBBP:\t{df.p_np[100]}")
molecule = molecule_from_smiles(df.iloc[100].smiles)
print("Molecule:")
molecule
"""
"""
graph = graph_from_molecule(molecule)
print("Graph (including self-loops):")
print("\tatom features\t", graph[0].shape)
print("\tbond features\t", graph[1].shape)
print("\tpair indices\t", graph[2].shape)
"""
### Create a `tf.data.Dataset`
In this tutorial, the MPNN implementation will take as input (per iteration) a single graph.
Therefore, given a batch of (sub)graphs (molecules), we need to merge them into a
single graph (we'll refer to this graph as *global graph*).
This global graph is a disconnected graph where each subgraph is
completely separated from the other subgraphs.
"""
def prepare_batch(x_batch, y_batch):
"""Merges (sub)graphs of batch into a single global (disconnected) graph"""
atom_features, bond_features, pair_indices = x_batch
# Obtain number of atoms and bonds for each graph (molecule)
num_atoms = atom_features.row_lengths()
num_bonds = bond_features.row_lengths()
# Obtain partition indices (molecule_indicator), which will be used to
# gather (sub)graphs from global graph in model later on
molecule_indices = tf.range(len(num_atoms))
molecule_indicator = tf.repeat(molecule_indices, num_atoms)
# Merge (sub)graphs into a global (disconnected) graph. Adding 'increment' to
# 'pair_indices' (and merging ragged tensors) actualizes the global graph
gather_indices = tf.repeat(molecule_indices[:-1], num_bonds[1:])
increment = tf.cumsum(num_atoms[:-1])
increment = tf.pad(tf.gather(increment, gather_indices), [(num_bonds[0], 0)])
pair_indices = pair_indices.merge_dims(outer_axis=0, inner_axis=1).to_tensor()
pair_indices = pair_indices + increment[:, tf.newaxis]
atom_features = atom_features.merge_dims(outer_axis=0, inner_axis=1).to_tensor()
bond_features = bond_features.merge_dims(outer_axis=0, inner_axis=1).to_tensor()
return (atom_features, bond_features, pair_indices, molecule_indicator), y_batch
def MPNNDataset(X, y, batch_size=32, shuffle=False):
dataset = tf.data.Dataset.from_tensor_slices((X, (y)))
if shuffle:
dataset = dataset.shuffle(1024)
return dataset.batch(batch_size).map(prepare_batch, -1).prefetch(-1)
"""
## Model
The MPNN model can take on various shapes and forms. In this tutorial, we will implement an
MPNN based on the original paper
[Neural Message Passing for Quantum Chemistry](https://arxiv.org/abs/1704.01212) and
[DeepChem's MPNNModel](https://deepchem.readthedocs.io/en/latest/api_reference/models.html#mpnnmodel).
The MPNN of this tutorial consists of three stages: message passing, readout and
classification.
### Message passing
The message passing step itself consists of two parts:
1. The *edge network*, which passes messages from 1-hop neighbors `w_{i}` of `v`
to `v`, based on the edge features between them (`e_{vw_{i}}`),
resulting in an updated node (state) `v'`. `w_{i}` denotes the `i:th` neighbor of
`v`.
2. The *gated recurrent unit* (GRU), which takes as input the most recent node state
and updates it based on previous node states. In
other words, the most recent node state serves as the input to the GRU, while the previous
node states are incorporated within the memory state of the GRU. This allows information
to travel from one node state (e.g., `v`) to another (e.g., `v''`).
Importantly, step (1) and (2) are repeated for `k steps`, and where at each step `1...k`,
the radius (or number of hops) of aggregated information from `v` increases by 1.
"""
class EdgeNetwork(layers.Layer):
def build(self, input_shape):
self.atom_dim = input_shape[0][-1]
self.bond_dim = input_shape[1][-1]
self.kernel = self.add_weight(
shape=(self.bond_dim, self.atom_dim * self.atom_dim),
initializer="glorot_uniform",
name="kernel",
)
self.bias = self.add_weight(
shape=(self.atom_dim * self.atom_dim),
initializer="zeros",
name="bias",
)
self.built = True
def call(self, inputs):
atom_features, bond_features, pair_indices = inputs
# Apply linear transformation to bond features
bond_features = tf.matmul(bond_features, self.kernel) + self.bias
# Reshape for neighborhood aggregation later
bond_features = tf.reshape(bond_features, (-1, self.atom_dim, self.atom_dim))
# Obtain atom features of neighbors
atom_features_neighbors = tf.gather(atom_features, pair_indices[:, 1])
atom_features_neighbors = tf.expand_dims(atom_features_neighbors, axis=-1)
# Apply neighborhood aggregation
transformed_features = tf.matmul(bond_features, atom_features_neighbors)
transformed_features = tf.squeeze(transformed_features, axis=-1)
aggregated_features = tf.math.unsorted_segment_sum(
transformed_features,
pair_indices[:, 0],
num_segments=tf.shape(atom_features)[0],
)
return aggregated_features
class MessagePassing(layers.Layer):
def __init__(self, units, steps=4, **kwargs):
super().__init__(**kwargs)
self.units = units
self.steps = steps
def build(self, input_shape):
self.atom_dim = input_shape[0][-1]
self.message_step = EdgeNetwork()
self.pad_length = max(0, self.units - self.atom_dim)
self.update_step = layers.GRUCell(self.atom_dim + self.pad_length)
self.built = True
def call(self, inputs):
atom_features, bond_features, pair_indices = inputs
# Pad atom features if number of desired units exceeds atom_features dim.
# Alternatively, a dense layer could be used here.
atom_features_updated = tf.pad(atom_features, [(0, 0), (0, self.pad_length)])
# Perform a number of steps of message passing
for i in range(self.steps):
# Aggregate information from neighbors
atom_features_aggregated = self.message_step(
[atom_features_updated, bond_features, pair_indices]
)
# Update node state via a step of GRU
atom_features_updated, _ = self.update_step(
atom_features_aggregated, atom_features_updated
)
return atom_features_updated
"""
### Readout
When the message passing procedure ends, the k-step-aggregated node states are to be partitioned
into subgraphs (correspoding to each molecule in the batch) and subsequently
reduced to graph-level embeddings. In the
[original paper](https://arxiv.org/abs/1704.01212), a
[set-to-set layer](https://arxiv.org/abs/1511.06391) was used for this purpose.
In this tutorial however, a transformer encoder + average pooling will be used. Specifically:
* the k-step-aggregated node states will be partitioned into the subgraphs
(corresponding to each molecule in the batch);
* each subgraph will then be padded to match the subgraph with the greatest number of nodes, followed
by a `tf.stack(...)`;
* the (stacked padded) tensor, encoding subgraphs (each subgraph containing a set of node states), are
masked to make sure the paddings don't interfere with training;
* finally, the tensor is passed to the transformer followed by average pooling.
"""
class PartitionPadding(layers.Layer):
def __init__(self, batch_size, **kwargs):
super().__init__(**kwargs)
self.batch_size = batch_size
def call(self, inputs):
atom_features, molecule_indicator = inputs
# Obtain subgraphs
atom_features_partitioned = tf.dynamic_partition(
atom_features, molecule_indicator, self.batch_size
)
# Pad and stack subgraphs
num_atoms = [tf.shape(f)[0] for f in atom_features_partitioned]
max_num_atoms = tf.reduce_max(num_atoms)
atom_features_stacked = tf.stack(
[
tf.pad(f, [(0, max_num_atoms - n), (0, 0)])
for f, n in zip(atom_features_partitioned, num_atoms)
],
axis=0,
)
# Remove empty subgraphs (usually for last batch in dataset)
gather_indices = tf.where(tf.reduce_sum(atom_features_stacked, (1, 2)) != 0)
gather_indices = tf.squeeze(gather_indices, axis=-1)
return tf.gather(atom_features_stacked, gather_indices, axis=0)
class TransformerEncoderReadout(layers.Layer):
def __init__(
self, num_heads=8, embed_dim=64, dense_dim=512, batch_size=32, **kwargs
):
super().__init__(**kwargs)
self.partition_padding = PartitionPadding(batch_size)
self.attention = layers.MultiHeadAttention(num_heads, embed_dim)
self.dense_proj = keras.Sequential(
[
layers.Dense(dense_dim, activation="relu"),
layers.Dense(embed_dim),
]
)
self.layernorm_1 = layers.LayerNormalization()
self.layernorm_2 = layers.LayerNormalization()
self.average_pooling = layers.GlobalAveragePooling1D()
def call(self, inputs):
x = self.partition_padding(inputs)
padding_mask = tf.reduce_any(tf.not_equal(x, 0.0), axis=-1)
padding_mask = padding_mask[:, tf.newaxis, tf.newaxis, :]
attention_output = self.attention(x, x, attention_mask=padding_mask)
proj_input = self.layernorm_1(x + attention_output)
proj_output = self.layernorm_2(proj_input + self.dense_proj(proj_input))
return self.average_pooling(proj_output)
"""
### Message Passing Neural Network (MPNN)
It is now time to complete the MPNN model. In addition to the message passing
and readout, a two-layer classification network will be implemented to make
predictions of BBBP.
"""
def MPNNModel(
atom_dim,
bond_dim,
batch_size=32,
message_units=64,
message_steps=4,
num_attention_heads=8,
dense_units=512,
):
atom_features = layers.Input((atom_dim), dtype="float32", name="atom_features")
bond_features = layers.Input((bond_dim), dtype="float32", name="bond_features")
pair_indices = layers.Input((2), dtype="int32", name="pair_indices")
molecule_indicator = layers.Input((), dtype="int32", name="molecule_indicator")
x = MessagePassing(message_units, message_steps)(
[atom_features, bond_features, pair_indices]
)
x = TransformerEncoderReadout(
num_attention_heads, message_units, dense_units, batch_size
)([x, molecule_indicator])
x = layers.Dense(dense_units, activation="relu")(x)
x = layers.Dense(1, activation="sigmoid")(x)
model = keras.Model(
inputs=[atom_features, bond_features, pair_indices, molecule_indicator],
outputs=[x],
)
return model
mpnn = MPNNModel(
atom_dim=x_train[0][0][0].shape[0],
bond_dim=x_train[1][0][0].shape[0],
)
mpnn.compile(
loss=keras.losses.BinaryCrossentropy(),
optimizer=keras.optimizers.Adam(learning_rate=5e-4),
metrics=[keras.metrics.AUC(name="AUC")],
)
keras.utils.plot_model(mpnn, show_dtype=True, show_shapes=True)
"""
### Training
"""
train_dataset = MPNNDataset(x_train, y_train)
valid_dataset = MPNNDataset(x_valid, y_valid)
test_dataset = MPNNDataset(x_test, y_test)
history = mpnn.fit(
train_dataset,
validation_data=valid_dataset,
epochs=40,
verbose=2,
class_weight={0: 2.0, 1: 0.5},
)
plt.figure(figsize=(10, 6))
plt.plot(history.history["AUC"], label="train AUC")
plt.plot(history.history["val_AUC"], label="valid AUC")
plt.xlabel("Epochs", fontsize=16)
plt.ylabel("AUC", fontsize=16)
plt.legend(fontsize=16)
"""
### Predicting
"""
molecules = [molecule_from_smiles(df.smiles.values[index]) for index in test_index]
y_true = [df.p_np.values[index] for index in test_index]
y_pred = tf.squeeze(mpnn.predict(test_dataset), axis=1)
legends = [f"y_true/y_pred = {y_true[i]}/{y_pred[i]:.2f}" for i in range(len(y_true))]
MolsToGridImage(molecules, molsPerRow=4, legends=legends)
"""
## Conclusions
In this tutorial, we demonstarted a message passing neural network (MPNN) to
predict blood-brain barrier permeability (BBBP) for a number of different molecules. We
first had to construct graphs from SMILES, then build a Keras model that could
operate on these graphs, and finally train the model to make the predictions.
Example available on HuggingFace
| Trained Model | Demo |
| :--: | :--: |
| [](https://huggingface.co/keras-io/MPNN-for-molecular-property-prediction) | [](https://huggingface.co/spaces/keras-io/molecular-property-prediction) |
"""
|
keras-io/examples/graph/mpnn-molecular-graphs.py/0
|
{
"file_path": "keras-io/examples/graph/mpnn-molecular-graphs.py",
"repo_id": "keras-io",
"token_count": 9085
}
| 107 |
<jupyter_start><jupyter_text>Creating TFRecords**Author:** [Dimitre Oliveira](https://www.linkedin.com/in/dimitre-oliveira-7a1a0113a/)**Date created:** 2021/02/27**Last modified:** 2023/12/20**Description:** Converting data to the TFRecord format. IntroductionThe TFRecord format is a simple format for storing a sequence of binary records.Converting your data into TFRecord has many advantages, such as:- **More efficient storage**: the TFRecord data can take up less space than the originaldata; it can also be partitioned into multiple files.- **Fast I/O**: the TFRecord format can be read with parallel I/O operations, which isuseful for [TPUs](https://www.tensorflow.org/guide/tpu) or multiple hosts.- **Self-contained files**: the TFRecord data can be read from a single source—forexample, the [COCO2017](https://cocodataset.org/) dataset originally stores data intwo folders ("images" and "annotations").An important use case of the TFRecord data format is training on TPUs. First, TPUs arefast enough to benefit from optimized I/O operations. In addition, TPUs requiredata to be stored remotely (e.g. on Google Cloud Storage) and using the TFRecord formatmakes it easier to load the data without batch-downloading.Performance using the TFRecord format can be further improved if you also useit with the [tf.data](https://www.tensorflow.org/guide/data) API.In this example you will learn how to convert data of different types (image, text, andnumeric) into TFRecord.**Reference**- [TFRecord and tf.train.Example](https://www.tensorflow.org/tutorials/load_data/tfrecord) Dependencies<jupyter_code>import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import keras
import json
import pprint
import tensorflow as tf
import matplotlib.pyplot as plt<jupyter_output><empty_output><jupyter_text>Download the COCO2017 datasetWe will be using the [COCO2017](https://cocodataset.org/) dataset, because it has manydifferent types of features, including images, floating point data, and lists.It will serve as a good example of how to encode different features into the TFRecordformat.This dataset has two sets of fields: images and annotation meta-data.The images are a collection of JPG files and the meta-data are stored in a JSON filewhich, according to the [official site](https://cocodataset.org/format-data),contains the following properties:```id: int,image_id: int,category_id: int,segmentation: RLE or [polygon], object segmentation maskbbox: [x,y,width,height], object bounding box coordinatesarea: float, area of the bounding boxiscrowd: 0 or 1, is single object or a collection```<jupyter_code>root_dir = "datasets"
tfrecords_dir = "tfrecords"
images_dir = os.path.join(root_dir, "val2017")
annotations_dir = os.path.join(root_dir, "annotations")
annotation_file = os.path.join(annotations_dir, "instances_val2017.json")
images_url = "http://images.cocodataset.org/zips/val2017.zip"
annotations_url = (
"http://images.cocodataset.org/annotations/annotations_trainval2017.zip"
)
# Download image files
if not os.path.exists(images_dir):
image_zip = keras.utils.get_file(
"images.zip",
cache_dir=os.path.abspath("."),
origin=images_url,
extract=True,
)
os.remove(image_zip)
# Download caption annotation files
if not os.path.exists(annotations_dir):
annotation_zip = keras.utils.get_file(
"captions.zip",
cache_dir=os.path.abspath("."),
origin=annotations_url,
extract=True,
)
os.remove(annotation_zip)
print("The COCO dataset has been downloaded and extracted successfully.")
with open(annotation_file, "r") as f:
annotations = json.load(f)["annotations"]
print(f"Number of images: {len(annotations)}")<jupyter_output><empty_output><jupyter_text>Contents of the COCO2017 dataset<jupyter_code>pprint.pprint(annotations[60])<jupyter_output><empty_output><jupyter_text>Parameters`num_samples` is the number of data samples on each TFRecord file.`num_tfrecords` is total number of TFRecords that we will create.<jupyter_code>num_samples = 4096
num_tfrecords = len(annotations) // num_samples
if len(annotations) % num_samples:
num_tfrecords += 1 # add one record if there are any remaining samples
if not os.path.exists(tfrecords_dir):
os.makedirs(tfrecords_dir) # creating TFRecords output folder<jupyter_output><empty_output><jupyter_text>Define TFRecords helper functions<jupyter_code>def image_feature(value):
"""Returns a bytes_list from a string / byte."""
return tf.train.Feature(
bytes_list=tf.train.BytesList(value=[tf.io.encode_jpeg(value).numpy()])
)
def bytes_feature(value):
"""Returns a bytes_list from a string / byte."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value.encode()]))
def float_feature(value):
"""Returns a float_list from a float / double."""
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def int64_feature(value):
"""Returns an int64_list from a bool / enum / int / uint."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def float_feature_list(value):
"""Returns a list of float_list from a float / double."""
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def create_example(image, path, example):
feature = {
"image": image_feature(image),
"path": bytes_feature(path),
"area": float_feature(example["area"]),
"bbox": float_feature_list(example["bbox"]),
"category_id": int64_feature(example["category_id"]),
"id": int64_feature(example["id"]),
"image_id": int64_feature(example["image_id"]),
}
return tf.train.Example(features=tf.train.Features(feature=feature))
def parse_tfrecord_fn(example):
feature_description = {
"image": tf.io.FixedLenFeature([], tf.string),
"path": tf.io.FixedLenFeature([], tf.string),
"area": tf.io.FixedLenFeature([], tf.float32),
"bbox": tf.io.VarLenFeature(tf.float32),
"category_id": tf.io.FixedLenFeature([], tf.int64),
"id": tf.io.FixedLenFeature([], tf.int64),
"image_id": tf.io.FixedLenFeature([], tf.int64),
}
example = tf.io.parse_single_example(example, feature_description)
example["image"] = tf.io.decode_jpeg(example["image"], channels=3)
example["bbox"] = tf.sparse.to_dense(example["bbox"])
return example<jupyter_output><empty_output><jupyter_text>Generate data in the TFRecord formatLet's generate the COCO2017 data in the TFRecord format. The format will be`file_{number}.tfrec` (this is optional, but including the number sequences in the filenames can make counting easier).<jupyter_code>for tfrec_num in range(num_tfrecords):
samples = annotations[(tfrec_num * num_samples) : ((tfrec_num + 1) * num_samples)]
with tf.io.TFRecordWriter(
tfrecords_dir + "/file_%.2i-%i.tfrec" % (tfrec_num, len(samples))
) as writer:
for sample in samples:
image_path = f"{images_dir}/{sample['image_id']:012d}.jpg"
image = tf.io.decode_jpeg(tf.io.read_file(image_path))
example = create_example(image, image_path, sample)
writer.write(example.SerializeToString())<jupyter_output><empty_output><jupyter_text>Explore one sample from the generated TFRecord<jupyter_code>raw_dataset = tf.data.TFRecordDataset(f"{tfrecords_dir}/file_00-{num_samples}.tfrec")
parsed_dataset = raw_dataset.map(parse_tfrecord_fn)
for features in parsed_dataset.take(1):
for key in features.keys():
if key != "image":
print(f"{key}: {features[key]}")
print(f"Image shape: {features['image'].shape}")
plt.figure(figsize=(7, 7))
plt.imshow(features["image"].numpy())
plt.show()<jupyter_output><empty_output><jupyter_text>Train a simple model using the generated TFRecordsAnother advantage of TFRecord is that you are able to add many features to it and lateruse only a few of them, in this case, we are going to use only `image` and `category_id`. Define dataset helper functions<jupyter_code>def prepare_sample(features):
image = keras.ops.image.resize(features["image"], size=(224, 224))
return image, features["category_id"]
def get_dataset(filenames, batch_size):
dataset = (
tf.data.TFRecordDataset(filenames, num_parallel_reads=AUTOTUNE)
.map(parse_tfrecord_fn, num_parallel_calls=AUTOTUNE)
.map(prepare_sample, num_parallel_calls=AUTOTUNE)
.shuffle(batch_size * 10)
.batch(batch_size)
.prefetch(AUTOTUNE)
)
return dataset
train_filenames = tf.io.gfile.glob(f"{tfrecords_dir}/*.tfrec")
batch_size = 32
epochs = 1
steps_per_epoch = 50
AUTOTUNE = tf.data.AUTOTUNE
input_tensor = keras.layers.Input(shape=(224, 224, 3), name="image")
model = keras.applications.EfficientNetB0(
input_tensor=input_tensor, weights=None, classes=91
)
model.compile(
optimizer=keras.optimizers.Adam(),
loss=keras.losses.SparseCategoricalCrossentropy(),
metrics=[keras.metrics.SparseCategoricalAccuracy()],
)
model.fit(
x=get_dataset(train_filenames, batch_size),
epochs=epochs,
steps_per_epoch=steps_per_epoch,
verbose=1,
)<jupyter_output><empty_output>
|
keras-io/examples/keras_recipes/ipynb/creating_tfrecords.ipynb/0
|
{
"file_path": "keras-io/examples/keras_recipes/ipynb/creating_tfrecords.ipynb",
"repo_id": "keras-io",
"token_count": 3347
}
| 108 |
# Knowledge distillation recipes
**Author:** [Sayak Paul](https://twitter.com/RisingSayak)<br>
**Date created:** 2021/08/01<br>
**Last modified:** 2021/08/01<br>
**Description:** Training better student models via knowledge distillation with function matching.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/keras_recipes/ipynb/better_knowledge_distillation.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/keras_recipes/better_knowledge_distillation.py)
---
## Introduction
Knowledge distillation ([Hinton et al.](https://arxiv.org/abs/1503.02531)) is a technique
that enables us to compress larger models into smaller ones. This allows us to reap the
benefits of high performing larger models, while reducing storage and memory costs and
achieving higher inference speed:
* Smaller models -> smaller memory footprint
* Reduced complexity -> fewer floating-point operations (FLOPs)
In [Knowledge distillation: A good teacher is patient and consistent](https://arxiv.org/abs/2106.05237),
Beyer et al. investigate various existing setups for performing knowledge distillation
and show that all of them lead to sub-optimal performance. Due to this,
practitioners often settle for other alternatives (quantization, pruning, weight
clustering, etc.) when developing production systems that are resource-constrained.
Beyer et al. investigate how we can improve the student models that come out
of the knowledge distillation process and always match the performance of
their teacher models. In this example, we will study the recipes introduced by them, using
the [Flowers102 dataset](https://www.robots.ox.ac.uk/~vgg/data/flowers/102/). As a
reference, with these recipes, the authors were able to produce a ResNet50 model that
achieves 82.8% accuracy on the ImageNet-1k dataset.
In case you need a refresher on knowledge distillation and want to study how it is
implemented in Keras, you can refer to
[this example](https://keras.io/examples/vision/knowledge_distillation/).
You can also follow
[this example](https://keras.io/examples/vision/consistency_training/)
that shows an extension of knowledge distillation applied to consistency training.
To follow this example, you will need TensorFlow 2.5 or higher as well as TensorFlow Addons,
which can be installed using the command below:
```python
!pip install -q tensorflow-addons
```
---
## Imports
```python
from tensorflow import keras
import tensorflow_addons as tfa
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import tensorflow_datasets as tfds
tfds.disable_progress_bar()
```
---
## Hyperparameters and contants
```python
AUTO = tf.data.AUTOTUNE # Used to dynamically adjust parallelism.
BATCH_SIZE = 64
# Comes from Table 4 and "Training setup" section.
TEMPERATURE = 10 # Used to soften the logits before they go to softmax.
INIT_LR = 0.003 # Initial learning rate that will be decayed over the training period.
WEIGHT_DECAY = 0.001 # Used for regularization.
CLIP_THRESHOLD = 1.0 # Used for clipping the gradients by L2-norm.
# We will first resize the training images to a bigger size and then we will take
# random crops of a lower size.
BIGGER = 160
RESIZE = 128
```
---
## Load the Flowers102 dataset
```python
train_ds, validation_ds, test_ds = tfds.load(
"oxford_flowers102", split=["train", "validation", "test"], as_supervised=True
)
print(f"Number of training examples: {train_ds.cardinality()}.")
print(
f"Number of validation examples: {validation_ds.cardinality()}."
)
print(f"Number of test examples: {test_ds.cardinality()}.")
```
<div class="k-default-codeblock">
```
Number of training examples: 1020.
Number of validation examples: 1020.
Number of test examples: 6149.
```
</div>
---
## Teacher model
As is common with any distillation technique, it's important to first train a
well-performing teacher model which is usually larger than the subsequent student model.
The authors distill a BiT ResNet152x2 model (teacher) into a BiT ResNet50 model
(student).
BiT stands for Big Transfer and was introduced in
[Big Transfer (BiT): General Visual Representation Learning](https://arxiv.org/abs/1912.11370).
BiT variants of ResNets use Group Normalization ([Wu et al.](https://arxiv.org/abs/1803.08494))
and Weight Standardization ([Qiao et al.](https://arxiv.org/abs/1903.10520v2))
in place of Batch Normalization ([Ioffe et al.](https://arxiv.org/abs/1502.03167)).
In order to limit the time it takes to run this example, we will be using a BiT
ResNet101x3 already trained on the Flowers102 dataset. You can refer to
[this notebook](https://github.com/sayakpaul/FunMatch-Distillation/blob/main/train_bit.ipynb)
to learn more about the training process. This model reaches 98.18% accuracy on the
test set of Flowers102.
The model weights are hosted on Kaggle as a dataset.
To download the weights, follow these steps:
1. Create an account on Kaggle [here](https://www.kaggle.com).
2. Go to the "Account" tab of your [user profile](https://www.kaggle.com/account).
3. Select "Create API Token". This will trigger the download of `kaggle.json`, a file
containing your API credentials.
4. From that JSON file, copy your Kaggle username and API key.
Now run the following:
```python
import os
os.environ["KAGGLE_USERNAME"] = "" # TODO: enter your Kaggle user name here
os.environ["KAGGLE_KEY"] = "" # TODO: enter your Kaggle key here
```
Once the environment variables are set, run:
```
$ kaggle datasets download -d spsayakpaul/bitresnet101x3flowers102
$ unzip -qq bitresnet101x3flowers102.zip
```
This should generate a folder named `T-r101x3-128` which is essentially a teacher
[`SavedModel`](https://www.tensorflow.org/guide/saved_model).
```python
import os
os.environ["KAGGLE_USERNAME"] = "" # TODO: enter your Kaggle user name here
os.environ["KAGGLE_KEY"] = "" # TODO: enter your Kaggle API key here
```
```python
!kaggle datasets download -d spsayakpaul/bitresnet101x3flowers102
```
```python
!unzip -qq bitresnet101x3flowers102.zip
```
```python
# Since the teacher model is not going to be trained further we make
# it non-trainable.
teacher_model = keras.models.load_model(
"/home/jupyter/keras-io/examples/keras_recipes/T-r101x3-128"
)
teacher_model.trainable = False
teacher_model.summary()
```
<div class="k-default-codeblock">
```
Model: "my_bi_t_model_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
dense_1 (Dense) multiple 626790
_________________________________________________________________
keras_layer_1 (KerasLayer) multiple 381789888
=================================================================
Total params: 382,416,678
Trainable params: 0
Non-trainable params: 382,416,678
_________________________________________________________________
```
</div>
---
## The "function matching" recipe
To train a high-quality student model, the authors propose the following changes to the
student training workflow:
* Use an aggressive variant of MixUp ([Zhang et al.](https://arxiv.org/abs/1710.09412)).
This is done by sampling the `alpha` parameter from a uniform distribution instead of a
beta distribution. MixUp is used here in order to help the student model capture the
function underlying the teacher model. MixUp linearly interpolates between different
samples across the data manifold. So the rationale here is if the student is trained to
fit that it should be able to match the teacher model better. To incorporate more
invariance MixUp is coupled with "Inception-style" cropping
([Szegedy et al.](https://arxiv.org/abs/1409.4842)). This is where the
"function matching" term makes its way in the
[original paper](https://arxiv.org/abs/2106.05237).
* Unlike other works ([Noisy Student Training](https://arxiv.org/abs/1911.04252) for
example), both the teacher and student models receive the same copy of an image, which is
mixed up and randomly cropped. By providing the same inputs to both the models, the
authors make the teacher consistent with the student.
* With MixUp, we are essentially introducing a strong form of regularization when
training the student. As such, it should be trained for a
relatively long period of time (1000 epochs at least). Since the student is trained with
strong regularization, the risk of overfitting due to a longer training
schedule are also mitigated.
In summary, one needs to be consistent and patient while training the student model.
---
## Data input pipeline
```python
def mixup(images, labels):
alpha = tf.random.uniform([], 0, 1)
mixedup_images = alpha * images + (1 - alpha) * tf.reverse(images, axis=[0])
# The labels do not matter here since they are NOT used during
# training.
return mixedup_images, labels
def preprocess_image(image, label, train=True):
image = tf.cast(image, tf.float32) / 255.0
if train:
image = tf.image.resize(image, (BIGGER, BIGGER))
image = tf.image.random_crop(image, (RESIZE, RESIZE, 3))
image = tf.image.random_flip_left_right(image)
else:
# Central fraction amount is from here:
# https://git.io/J8Kda.
image = tf.image.central_crop(image, central_fraction=0.875)
image = tf.image.resize(image, (RESIZE, RESIZE))
return image, label
def prepare_dataset(dataset, train=True, batch_size=BATCH_SIZE):
if train:
dataset = dataset.map(preprocess_image, num_parallel_calls=AUTO)
dataset = dataset.shuffle(BATCH_SIZE * 10)
else:
dataset = dataset.map(
lambda x, y: (preprocess_image(x, y, train)), num_parallel_calls=AUTO
)
dataset = dataset.batch(batch_size)
if train:
dataset = dataset.map(mixup, num_parallel_calls=AUTO)
dataset = dataset.prefetch(AUTO)
return dataset
```
Note that for brevity, we used mild crops for the training set but in practice
"Inception-style" preprocessing should be applied. You can refer to
[this script](https://github.com/sayakpaul/FunMatch-Distillation/blob/main/crop_resize.py)
for a closer implementation. Also, _**the ground-truth labels are not used for
training the student.**_
```python
train_ds = prepare_dataset(train_ds, True)
validation_ds = prepare_dataset(validation_ds, False)
test_ds = prepare_dataset(test_ds, False)
```
---
## Visualization
```python
sample_images, _ = next(iter(train_ds))
plt.figure(figsize=(10, 10))
for n in range(25):
ax = plt.subplot(5, 5, n + 1)
plt.imshow(sample_images[n].numpy())
plt.axis("off")
plt.show()
```

---
## Student model
For the purpose of this example, we will use the standard ResNet50V2
([He et al.](https://arxiv.org/abs/1603.05027)).
```python
def get_resnetv2():
resnet_v2 = keras.applications.ResNet50V2(
weights=None,
input_shape=(RESIZE, RESIZE, 3),
classes=102,
classifier_activation="linear",
)
return resnet_v2
get_resnetv2().count_params()
```
<div class="k-default-codeblock">
```
23773798
```
</div>
Compared to the teacher model, this model has 358 Million fewer parameters.
---
## Distillation utility
We will reuse some code from
[this example](https://keras.io/examples/vision/knowledge_distillation/)
on knowledge distillation.
```python
class Distiller(tf.keras.Model):
def __init__(self, student, teacher):
super().__init__()
self.student = student
self.teacher = teacher
self.loss_tracker = keras.metrics.Mean(name="distillation_loss")
@property
def metrics(self):
metrics = super().metrics
metrics.append(self.loss_tracker)
return metrics
def compile(
self, optimizer, metrics, distillation_loss_fn, temperature=TEMPERATURE,
):
super().compile(optimizer=optimizer, metrics=metrics)
self.distillation_loss_fn = distillation_loss_fn
self.temperature = temperature
def train_step(self, data):
# Unpack data
x, _ = data
# Forward pass of teacher
teacher_predictions = self.teacher(x, training=False)
with tf.GradientTape() as tape:
# Forward pass of student
student_predictions = self.student(x, training=True)
# Compute loss
distillation_loss = self.distillation_loss_fn(
tf.nn.softmax(teacher_predictions / self.temperature, axis=1),
tf.nn.softmax(student_predictions / self.temperature, axis=1),
)
# Compute gradients
trainable_vars = self.student.trainable_variables
gradients = tape.gradient(distillation_loss, trainable_vars)
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# Report progress
self.loss_tracker.update_state(distillation_loss)
return {"distillation_loss": self.loss_tracker.result()}
def test_step(self, data):
# Unpack data
x, y = data
# Forward passes
teacher_predictions = self.teacher(x, training=False)
student_predictions = self.student(x, training=False)
# Calculate the loss
distillation_loss = self.distillation_loss_fn(
tf.nn.softmax(teacher_predictions / self.temperature, axis=1),
tf.nn.softmax(student_predictions / self.temperature, axis=1),
)
# Report progress
self.loss_tracker.update_state(distillation_loss)
self.compiled_metrics.update_state(y, student_predictions)
results = {m.name: m.result() for m in self.metrics}
return results
```
---
## Learning rate schedule
A warmup cosine learning rate schedule is used in the paper. This schedule is also
typical for many pre-training methods especially for computer vision.
```python
# Some code is taken from:
# https://www.kaggle.com/ashusma/training-rfcx-tensorflow-tpu-effnet-b2.
class WarmUpCosine(keras.optimizers.schedules.LearningRateSchedule):
def __init__(
self, learning_rate_base, total_steps, warmup_learning_rate, warmup_steps
):
super().__init__()
self.learning_rate_base = learning_rate_base
self.total_steps = total_steps
self.warmup_learning_rate = warmup_learning_rate
self.warmup_steps = warmup_steps
self.pi = tf.constant(np.pi)
def __call__(self, step):
if self.total_steps < self.warmup_steps:
raise ValueError("Total_steps must be larger or equal to warmup_steps.")
cos_annealed_lr = tf.cos(
self.pi
* (tf.cast(step, tf.float32) - self.warmup_steps)
/ float(self.total_steps - self.warmup_steps)
)
learning_rate = 0.5 * self.learning_rate_base * (1 + cos_annealed_lr)
if self.warmup_steps > 0:
if self.learning_rate_base < self.warmup_learning_rate:
raise ValueError(
"Learning_rate_base must be larger or equal to "
"warmup_learning_rate."
)
slope = (
self.learning_rate_base - self.warmup_learning_rate
) / self.warmup_steps
warmup_rate = slope * tf.cast(step, tf.float32) + self.warmup_learning_rate
learning_rate = tf.where(
step < self.warmup_steps, warmup_rate, learning_rate
)
return tf.where(
step > self.total_steps, 0.0, learning_rate, name="learning_rate"
)
```
We can now plot a a graph of learning rates generated using this schedule.
```python
ARTIFICIAL_EPOCHS = 1000
ARTIFICIAL_BATCH_SIZE = 512
DATASET_NUM_TRAIN_EXAMPLES = 1020
TOTAL_STEPS = int(
DATASET_NUM_TRAIN_EXAMPLES / ARTIFICIAL_BATCH_SIZE * ARTIFICIAL_EPOCHS
)
scheduled_lrs = WarmUpCosine(
learning_rate_base=INIT_LR,
total_steps=TOTAL_STEPS,
warmup_learning_rate=0.0,
warmup_steps=1500,
)
lrs = [scheduled_lrs(step) for step in range(TOTAL_STEPS)]
plt.plot(lrs)
plt.xlabel("Step", fontsize=14)
plt.ylabel("LR", fontsize=14)
plt.show()
```

The original paper uses at least 1000 epochs and a batch size of 512 to perform
"function matching". The objective of this example is to present a workflow to
implement the recipe and not to demonstrate the results when they are applied at full scale.
However, these recipes will transfer to the original settings from the paper. Please
refer to [this repository](https://github.com/sayakpaul/FunMatch-Distillation) if you are
interested in finding out more.
---
## Training
```python
optimizer = tfa.optimizers.AdamW(
weight_decay=WEIGHT_DECAY, learning_rate=scheduled_lrs, clipnorm=CLIP_THRESHOLD
)
student_model = get_resnetv2()
distiller = Distiller(student=student_model, teacher=teacher_model)
distiller.compile(
optimizer,
metrics=[keras.metrics.SparseCategoricalAccuracy()],
distillation_loss_fn=keras.losses.KLDivergence(),
temperature=TEMPERATURE,
)
history = distiller.fit(
train_ds,
steps_per_epoch=int(np.ceil(DATASET_NUM_TRAIN_EXAMPLES / BATCH_SIZE)),
validation_data=validation_ds,
epochs=30, # This should be at least 1000.
)
student = distiller.student
student_model.compile(metrics=["accuracy"])
_, top1_accuracy = student.evaluate(test_ds)
print(f"Top-1 accuracy on the test set: {round(top1_accuracy * 100, 2)}%")
```
<div class="k-default-codeblock">
```
Epoch 1/30
16/16 [==============================] - 74s 3s/step - distillation_loss: 0.0070 - val_sparse_categorical_accuracy: 0.0039 - val_distillation_loss: 0.0061
Epoch 2/30
16/16 [==============================] - 37s 2s/step - distillation_loss: 0.0059 - val_sparse_categorical_accuracy: 0.0098 - val_distillation_loss: 0.0061
Epoch 3/30
16/16 [==============================] - 37s 2s/step - distillation_loss: 0.0049 - val_sparse_categorical_accuracy: 0.0098 - val_distillation_loss: 0.0060
Epoch 4/30
16/16 [==============================] - 37s 2s/step - distillation_loss: 0.0048 - val_sparse_categorical_accuracy: 0.0098 - val_distillation_loss: 0.0060
Epoch 5/30
16/16 [==============================] - 37s 2s/step - distillation_loss: 0.0043 - val_sparse_categorical_accuracy: 0.0098 - val_distillation_loss: 0.0060
Epoch 6/30
16/16 [==============================] - 37s 2s/step - distillation_loss: 0.0041 - val_sparse_categorical_accuracy: 0.0108 - val_distillation_loss: 0.0060
Epoch 7/30
16/16 [==============================] - 37s 2s/step - distillation_loss: 0.0038 - val_sparse_categorical_accuracy: 0.0098 - val_distillation_loss: 0.0061
Epoch 8/30
16/16 [==============================] - 37s 2s/step - distillation_loss: 0.0040 - val_sparse_categorical_accuracy: 0.0098 - val_distillation_loss: 0.0062
Epoch 9/30
16/16 [==============================] - 37s 2s/step - distillation_loss: 0.0039 - val_sparse_categorical_accuracy: 0.0098 - val_distillation_loss: 0.0063
Epoch 10/30
16/16 [==============================] - 37s 2s/step - distillation_loss: 0.0035 - val_sparse_categorical_accuracy: 0.0098 - val_distillation_loss: 0.0064
Epoch 11/30
16/16 [==============================] - 37s 2s/step - distillation_loss: 0.0041 - val_sparse_categorical_accuracy: 0.0098 - val_distillation_loss: 0.0064
Epoch 12/30
16/16 [==============================] - 37s 2s/step - distillation_loss: 0.0039 - val_sparse_categorical_accuracy: 0.0098 - val_distillation_loss: 0.0067
Epoch 13/30
16/16 [==============================] - 37s 2s/step - distillation_loss: 0.0039 - val_sparse_categorical_accuracy: 0.0098 - val_distillation_loss: 0.0067
Epoch 14/30
16/16 [==============================] - 37s 2s/step - distillation_loss: 0.0036 - val_sparse_categorical_accuracy: 0.0098 - val_distillation_loss: 0.0066
Epoch 15/30
16/16 [==============================] - 37s 2s/step - distillation_loss: 0.0037 - val_sparse_categorical_accuracy: 0.0098 - val_distillation_loss: 0.0065
Epoch 16/30
16/16 [==============================] - 37s 2s/step - distillation_loss: 0.0038 - val_sparse_categorical_accuracy: 0.0098 - val_distillation_loss: 0.0068
Epoch 17/30
16/16 [==============================] - 37s 2s/step - distillation_loss: 0.0039 - val_sparse_categorical_accuracy: 0.0098 - val_distillation_loss: 0.0066
Epoch 18/30
16/16 [==============================] - 37s 2s/step - distillation_loss: 0.0038 - val_sparse_categorical_accuracy: 0.0098 - val_distillation_loss: 0.0064
Epoch 19/30
16/16 [==============================] - 37s 2s/step - distillation_loss: 0.0035 - val_sparse_categorical_accuracy: 0.0098 - val_distillation_loss: 0.0071
Epoch 20/30
16/16 [==============================] - 37s 2s/step - distillation_loss: 0.0038 - val_sparse_categorical_accuracy: 0.0098 - val_distillation_loss: 0.0066
Epoch 21/30
16/16 [==============================] - 37s 2s/step - distillation_loss: 0.0038 - val_sparse_categorical_accuracy: 0.0098 - val_distillation_loss: 0.0068
Epoch 22/30
16/16 [==============================] - 37s 2s/step - distillation_loss: 0.0034 - val_sparse_categorical_accuracy: 0.0098 - val_distillation_loss: 0.0073
Epoch 23/30
16/16 [==============================] - 37s 2s/step - distillation_loss: 0.0035 - val_sparse_categorical_accuracy: 0.0098 - val_distillation_loss: 0.0078
Epoch 24/30
16/16 [==============================] - 37s 2s/step - distillation_loss: 0.0037 - val_sparse_categorical_accuracy: 0.0098 - val_distillation_loss: 0.0087
Epoch 25/30
16/16 [==============================] - 37s 2s/step - distillation_loss: 0.0031 - val_sparse_categorical_accuracy: 0.0108 - val_distillation_loss: 0.0078
Epoch 26/30
16/16 [==============================] - 37s 2s/step - distillation_loss: 0.0033 - val_sparse_categorical_accuracy: 0.0098 - val_distillation_loss: 0.0072
Epoch 27/30
16/16 [==============================] - 37s 2s/step - distillation_loss: 0.0036 - val_sparse_categorical_accuracy: 0.0098 - val_distillation_loss: 0.0071
Epoch 28/30
16/16 [==============================] - 37s 2s/step - distillation_loss: 0.0036 - val_sparse_categorical_accuracy: 0.0275 - val_distillation_loss: 0.0078
Epoch 29/30
16/16 [==============================] - 37s 2s/step - distillation_loss: 0.0032 - val_sparse_categorical_accuracy: 0.0196 - val_distillation_loss: 0.0068
Epoch 30/30
16/16 [==============================] - 37s 2s/step - distillation_loss: 0.0034 - val_sparse_categorical_accuracy: 0.0147 - val_distillation_loss: 0.0071
97/97 [==============================] - 7s 64ms/step - loss: 0.0000e+00 - accuracy: 0.0107
Top-1 accuracy on the test set: 1.07%
```
</div>
---
## Results
With just 30 epochs of training, the results are nowhere near expected.
This is where the benefits of patience aka a longer training schedule
will come into play. Let's investigate what the model trained for 1000 epochs can do.
```python
# Download the pre-trained weights.
!wget https://git.io/JBO3Y -O S-r50x1-128-1000.tar.gz
!tar xf S-r50x1-128-1000.tar.gz
```
```python
pretrained_student = keras.models.load_model("S-r50x1-128-1000")
pretrained_student.summary()
```
<div class="k-default-codeblock">
```
Model: "resnet"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
root_block (Sequential) (None, 32, 32, 64) 9408
_________________________________________________________________
block1 (Sequential) (None, 32, 32, 256) 214912
_________________________________________________________________
block2 (Sequential) (None, 16, 16, 512) 1218048
_________________________________________________________________
block3 (Sequential) (None, 8, 8, 1024) 7095296
_________________________________________________________________
block4 (Sequential) (None, 4, 4, 2048) 14958592
_________________________________________________________________
group_norm (GroupNormalizati multiple 4096
_________________________________________________________________
re_lu_97 (ReLU) multiple 0
_________________________________________________________________
global_average_pooling2d_1 ( multiple 0
_________________________________________________________________
head/dense (Dense) multiple 208998
=================================================================
Total params: 23,709,350
Trainable params: 23,709,350
Non-trainable params: 0
_________________________________________________________________
```
</div>
This model exactly follows what the authors have used in their student models. This is
why the model summary is a bit different.
```python
_, top1_accuracy = pretrained_student.evaluate(test_ds)
print(f"Top-1 accuracy on the test set: {round(top1_accuracy * 100, 2)}%")
```
<div class="k-default-codeblock">
```
97/97 [==============================] - 14s 131ms/step - loss: 0.0000e+00 - accuracy: 0.8102
Top-1 accuracy on the test set: 81.02%
```
</div>
With 100000 epochs of training, this same model leads to a top-1 accuracy of 95.54%.
There are a number of important ablations studies presented in the paper that show the
effectiveness of these recipes compared to the prior art. So if you are skeptical about
these recipes, definitely consult the paper.
---
## Note on training for longer
With TPU-based hardware infrastructure, we can train the model for 1000 epochs faster.
This does not even require adding a lot of changes to this codebase. You
are encouraged to check
[this repository](https://github.com/sayakpaul/FunMatch-Distillation)
as it presents TPU-compatible training workflows for these recipes and can be run on
[Kaggle Kernel](https://www.kaggle.com/kernels) leveraging their free TPU v3-8 hardware.
|
keras-io/examples/keras_recipes/md/better_knowledge_distillation.md/0
|
{
"file_path": "keras-io/examples/keras_recipes/md/better_knowledge_distillation.md",
"repo_id": "keras-io",
"token_count": 9281
}
| 109 |
"""
Title: Reproducibility in Keras Models
Author: [Frightera](https://github.com/Frightera)
Date created: 2023/05/05
Last modified: 2023/05/05
Description: Demonstration of random weight initialization and reproducibility in Keras models.
Accelerator: GPU
"""
"""
## Introduction
This example demonstrates how to control randomness in Keras models. Sometimes
you may want to reproduce the exact same results across runs, for experimentation
purposes or to debug a problem.
"""
"""
## Setup
"""
import json
import numpy as np
import tensorflow as tf
import keras
from keras import layers
from keras import initializers
# Set the seed using keras.utils.set_random_seed. This will set:
# 1) `numpy` seed
# 2) backend random seed
# 3) `python` random seed
keras.utils.set_random_seed(812)
# If using TensorFlow, this will make GPU ops as deterministic as possible,
# but it will affect the overall performance, so be mindful of that.
tf.config.experimental.enable_op_determinism()
"""
## Weight initialization in Keras
Most of the layers in Keras have `kernel_initializer` and `bias_initializer`
parameters. These parameters allow you to specify the strategy used for
initializing the weights of layer variables. The following built-in initializers
are available as part of `keras.initializers`:
"""
initializers_list = [
initializers.RandomNormal,
initializers.RandomUniform,
initializers.TruncatedNormal,
initializers.VarianceScaling,
initializers.GlorotNormal,
initializers.GlorotUniform,
initializers.HeNormal,
initializers.HeUniform,
initializers.LecunNormal,
initializers.LecunUniform,
initializers.Orthogonal,
]
"""
In a reproducible model, the weights of the model should be initialized with
same values in subsequent runs. First, we'll check how initializers behave when
they are called multiple times with same `seed` value.
"""
for initializer in initializers_list:
print(f"Running {initializer}")
for iteration in range(2):
# In order to get same results across multiple runs from an initializer,
# you can specify a seed value.
result = float(initializer(seed=42)(shape=(1, 1)))
print(f"\tIteration --> {iteration} // Result --> {result}")
print("\n")
"""
Now, let's inspect how two different initializer objects behave when they are
have the same seed value.
"""
# Setting the seed value for an initializer will cause two different objects
# to produce same results.
glorot_normal_1 = keras.initializers.GlorotNormal(seed=42)
glorot_normal_2 = keras.initializers.GlorotNormal(seed=42)
input_dim, neurons = 3, 5
# Call two different objects with same shape
result_1 = glorot_normal_1(shape=(input_dim, neurons))
result_2 = glorot_normal_2(shape=(input_dim, neurons))
# Check if the results are equal.
equal = np.allclose(result_1, result_2)
print(f"Are the results equal? {equal}")
"""
If the seed value is not set (or different seed values are used), two different
objects will produce different results. Since the random seed is set at the beginning
of the notebook, the results will be same in the sequential runs. This is related
to the `keras.utils.set_random_seed`.
"""
glorot_normal_3 = keras.initializers.GlorotNormal()
glorot_normal_4 = keras.initializers.GlorotNormal()
# Let's call the initializer.
result_3 = glorot_normal_3(shape=(input_dim, neurons))
# Call the second initializer.
result_4 = glorot_normal_4(shape=(input_dim, neurons))
equal = np.allclose(result_3, result_4)
print(f"Are the results equal? {equal}")
"""
`result_3` and `result_4` will be different, but when you run the notebook
again, `result_3` will have identical values to the ones in the previous run.
Same goes for `result_4`.
"""
"""
## Reproducibility in model training process
If you want to reproduce the results of a model training process, you need to
control the randomness sources during the training process. In order to show a
realistic example, this section utilizes `tf.data` using parallel map and shuffle
operations.
In order to start, let's create a simple function which returns the history
object of the Keras model.
"""
def train_model(train_data: tf.data.Dataset, test_data: tf.data.Dataset) -> dict:
model = keras.Sequential(
[
layers.Conv2D(32, (3, 3), activation="relu"),
layers.MaxPooling2D((2, 2)),
layers.Dropout(0.2),
layers.Conv2D(32, (3, 3), activation="relu"),
layers.MaxPooling2D((2, 2)),
layers.Dropout(0.2),
layers.Conv2D(32, (3, 3), activation="relu"),
layers.GlobalAveragePooling2D(),
layers.Dense(64, activation="relu"),
layers.Dropout(0.2),
layers.Dense(10, activation="softmax"),
]
)
model.compile(
optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"]
)
# model.fit has a `shuffle` parameter which has a default value of `True`.
# If you are using array-like objects, this will shuffle the data before
# training. This argument is ignored when `x` is a generator or
# `tf.data.Dataset`.
history = model.fit(train_data, epochs=2, validation_data=test_data)
print(f"Model accuracy on test data: {model.evaluate(test_data)[1] * 100:.2f}%")
return history.history
# Load the MNIST dataset
(train_images, train_labels), (
test_images,
test_labels,
) = keras.datasets.mnist.load_data()
# Construct tf.data.Dataset objects
train_ds = tf.data.Dataset.from_tensor_slices((train_images, train_labels))
test_ds = tf.data.Dataset.from_tensor_slices((test_images, test_labels))
"""
Remember we called `tf.config.experimental.enable_op_determinism()` at the
beginning of the function. This makes the `tf.data` operations deterministic.
However, making `tf.data` operations deterministic comes with a performance
cost. If you want to learn more about it, please check this
[official guide](https://www.tensorflow.org/api_docs/python/tf/config/experimental/enable_op_determinism#determinism_and_tfdata).
Small summary what's going on here. Models have `kernel_initializer` and
`bias_initializer` parameters. Since we set random seeds using
`keras.utils.set_random_seed` in the beginning of the notebook, the initializers
will produce same results in the sequential runs. Additionally, TensorFlow
operations have now become deterministic. Frequently, you will be utilizing GPUs
that have thousands of hardware threads which causes non-deterministic behavior
to occur.
"""
def prepare_dataset(image, label):
# Cast and normalize the image
image = tf.cast(image, tf.float32) / 255.0
# Expand the channel dimension
image = tf.expand_dims(image, axis=-1)
# Resize the image
image = tf.image.resize(image, (32, 32))
return image, label
"""
`tf.data.Dataset` objects have a `shuffle` method which shuffles the data.
This method has a `buffer_size` parameter which controls the size of the
buffer. If you set this value to `len(train_images)`, the whole dataset will
be shuffled. If the buffer size is equal to the length of the dataset,
then the elements will be shuffled in a completely random order.
Main drawback of setting the buffer size to the length of the dataset is that
filling the buffer can take a while depending on the size of the dataset.
Here is a small summary of what's going on here:
1) The `shuffle()` method creates a buffer of the specified size.
2) The elements of the dataset are randomly shuffled and placed into the buffer.
3) The elements of the buffer are then returned in a random order.
Since `tf.config.experimental.enable_op_determinism()` is enabled and we set
random seeds using `keras.utils.set_random_seed` in the beginning of the
notebook, the `shuffle()` method will produce same results in the sequential
runs.
"""
# Prepare the datasets, batch-map --> vectorized operations
train_data = (
train_ds.shuffle(buffer_size=len(train_images))
.batch(batch_size=64)
.map(prepare_dataset, num_parallel_calls=tf.data.AUTOTUNE)
.prefetch(buffer_size=tf.data.AUTOTUNE)
)
test_data = (
test_ds.batch(batch_size=64)
.map(prepare_dataset, num_parallel_calls=tf.data.AUTOTUNE)
.prefetch(buffer_size=tf.data.AUTOTUNE)
)
"""
Train the model for the first time.
"""
history = train_model(train_data, test_data)
"""
Let's save our results into a JSON file, and restart the kernel. After
restarting the kernel, we should see the same results as the previous run,
this includes metrics and loss values both on the training and test data.
"""
# Save the history object into a json file
with open("history.json", "w") as fp:
json.dump(history, fp)
"""
Do not run the cell above in order not to overwrite the results. Execute the
model training cell again and compare the results.
"""
with open("history.json", "r") as fp:
history_loaded = json.load(fp)
"""
Compare the results one by one. You will see that they are equal.
"""
for key in history.keys():
for i in range(len(history[key])):
if not np.allclose(history[key][i], history_loaded[key][i]):
print(f"{key} not equal")
"""
## Conclusion
In this tutorial, you learned how to control the randomness sources in Keras and
TensorFlow. You also learned how to reproduce the results of a model training
process.
If you want to initialize the model with the same weights everytime, you need to
set `kernel_initializer` and `bias_initializer` parameters of the layers and provide
a `seed` value to the initializer.
There still may be some inconsistencies due to numerical error accumulation such
as using `recurrent_dropout` in RNN layers.
Reproducibility is subject to the environment. You'll get the same results if you
run the notebook or the code on the same machine with the same environment.
"""
|
keras-io/examples/keras_recipes/reproducibility_recipes.py/0
|
{
"file_path": "keras-io/examples/keras_recipes/reproducibility_recipes.py",
"repo_id": "keras-io",
"token_count": 3140
}
| 110 |
<jupyter_start><jupyter_text>Bidirectional LSTM on IMDB**Author:** [fchollet](https://twitter.com/fchollet)**Date created:** 2020/05/03**Last modified:** 2020/05/03**Description:** Train a 2-layer bidirectional LSTM on the IMDB movie review sentiment classification dataset. Setup<jupyter_code>import numpy as np
import keras
from keras import layers
max_features = 20000 # Only consider the top 20k words
maxlen = 200 # Only consider the first 200 words of each movie review<jupyter_output><empty_output><jupyter_text>Build the model<jupyter_code># Input for variable-length sequences of integers
inputs = keras.Input(shape=(None,), dtype="int32")
# Embed each integer in a 128-dimensional vector
x = layers.Embedding(max_features, 128)(inputs)
# Add 2 bidirectional LSTMs
x = layers.Bidirectional(layers.LSTM(64, return_sequences=True))(x)
x = layers.Bidirectional(layers.LSTM(64))(x)
# Add a classifier
outputs = layers.Dense(1, activation="sigmoid")(x)
model = keras.Model(inputs, outputs)
model.summary()<jupyter_output><empty_output><jupyter_text>Load the IMDB movie review sentiment data<jupyter_code>(x_train, y_train), (x_val, y_val) = keras.datasets.imdb.load_data(
num_words=max_features
)
print(len(x_train), "Training sequences")
print(len(x_val), "Validation sequences")
# Use pad_sequence to standardize sequence length:
# this will truncate sequences longer than 200 words and zero-pad sequences shorter than 200 words.
x_train = keras.utils.pad_sequences(x_train, maxlen=maxlen)
x_val = keras.utils.pad_sequences(x_val, maxlen=maxlen)<jupyter_output><empty_output><jupyter_text>Train and evaluate the modelYou can use the trained model hosted on [Hugging Face Hub](https://huggingface.co/keras-io/bidirectional-lstm-imdb)and try the demo on [Hugging Face Spaces](https://huggingface.co/spaces/keras-io/bidirectional_lstm_imdb).<jupyter_code>model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
model.fit(x_train, y_train, batch_size=32, epochs=2, validation_data=(x_val, y_val))<jupyter_output><empty_output>
|
keras-io/examples/nlp/ipynb/bidirectional_lstm_imdb.ipynb/0
|
{
"file_path": "keras-io/examples/nlp/ipynb/bidirectional_lstm_imdb.ipynb",
"repo_id": "keras-io",
"token_count": 718
}
| 111 |
<jupyter_start><jupyter_text>Semantic Similarity with BERT**Author:** [Mohamad Merchant](https://twitter.com/mohmadmerchant1)**Date created:** 2020/08/15**Last modified:** 2020/08/29**Description:** Natural Language Inference by fine-tuning BERT model on SNLI Corpus. IntroductionSemantic Similarity is the task of determining how similartwo sentences are, in terms of what they mean.This example demonstrates the use of SNLI (Stanford Natural Language Inference) Corpusto predict sentence semantic similarity with Transformers.We will fine-tune a BERT model that takes two sentences as inputsand that outputs a similarity score for these two sentences. References* [BERT](https://arxiv.org/pdf/1810.04805.pdf)* [SNLI](https://nlp.stanford.edu/projects/snli/) SetupNote: install HuggingFace `transformers` via `pip install transformers` (version >= 2.11.0).<jupyter_code>import numpy as np
import pandas as pd
import tensorflow as tf
import transformers<jupyter_output><empty_output><jupyter_text>Configuration<jupyter_code>max_length = 128 # Maximum length of input sentence to the model.
batch_size = 32
epochs = 2
# Labels in our dataset.
labels = ["contradiction", "entailment", "neutral"]<jupyter_output><empty_output><jupyter_text>Load the Data<jupyter_code>!curl -LO https://raw.githubusercontent.com/MohamadMerchant/SNLI/master/data.tar.gz
!tar -xvzf data.tar.gz
# There are more than 550k samples in total; we will use 100k for this example.
train_df = pd.read_csv("SNLI_Corpus/snli_1.0_train.csv", nrows=100000)
valid_df = pd.read_csv("SNLI_Corpus/snli_1.0_dev.csv")
test_df = pd.read_csv("SNLI_Corpus/snli_1.0_test.csv")
# Shape of the data
print(f"Total train samples : {train_df.shape[0]}")
print(f"Total validation samples: {valid_df.shape[0]}")
print(f"Total test samples: {valid_df.shape[0]}")<jupyter_output><empty_output><jupyter_text>Dataset Overview:- sentence1: The premise caption that was supplied to the author of the pair.- sentence2: The hypothesis caption that was written by the author of the pair.- similarity: This is the label chosen by the majority of annotators.Where no majority exists, the label "-" is used (we will skip such samples here).Here are the "similarity" label values in our dataset:- Contradiction: The sentences share no similarity.- Entailment: The sentences have similar meaning.- Neutral: The sentences are neutral. Let's look at one sample from the dataset:<jupyter_code>print(f"Sentence1: {train_df.loc[1, 'sentence1']}")
print(f"Sentence2: {train_df.loc[1, 'sentence2']}")
print(f"Similarity: {train_df.loc[1, 'similarity']}")<jupyter_output><empty_output><jupyter_text>Preprocessing<jupyter_code># We have some NaN entries in our train data, we will simply drop them.
print("Number of missing values")
print(train_df.isnull().sum())
train_df.dropna(axis=0, inplace=True)<jupyter_output><empty_output><jupyter_text>Distribution of our training targets.<jupyter_code>print("Train Target Distribution")
print(train_df.similarity.value_counts())<jupyter_output><empty_output><jupyter_text>Distribution of our validation targets.<jupyter_code>print("Validation Target Distribution")
print(valid_df.similarity.value_counts())<jupyter_output><empty_output><jupyter_text>The value "-" appears as part of our training and validation targets.We will skip these samples.<jupyter_code>train_df = (
train_df[train_df.similarity != "-"]
.sample(frac=1.0, random_state=42)
.reset_index(drop=True)
)
valid_df = (
valid_df[valid_df.similarity != "-"]
.sample(frac=1.0, random_state=42)
.reset_index(drop=True)
)<jupyter_output><empty_output><jupyter_text>One-hot encode training, validation, and test labels.<jupyter_code>train_df["label"] = train_df["similarity"].apply(
lambda x: 0 if x == "contradiction" else 1 if x == "entailment" else 2
)
y_train = tf.keras.utils.to_categorical(train_df.label, num_classes=3)
valid_df["label"] = valid_df["similarity"].apply(
lambda x: 0 if x == "contradiction" else 1 if x == "entailment" else 2
)
y_val = tf.keras.utils.to_categorical(valid_df.label, num_classes=3)
test_df["label"] = test_df["similarity"].apply(
lambda x: 0 if x == "contradiction" else 1 if x == "entailment" else 2
)
y_test = tf.keras.utils.to_categorical(test_df.label, num_classes=3)<jupyter_output><empty_output><jupyter_text>Keras Custom Data Generator<jupyter_code>class BertSemanticDataGenerator(tf.keras.utils.Sequence):
"""Generates batches of data.
Args:
sentence_pairs: Array of premise and hypothesis input sentences.
labels: Array of labels.
batch_size: Integer batch size.
shuffle: boolean, whether to shuffle the data.
include_targets: boolean, whether to incude the labels.
Returns:
Tuples `([input_ids, attention_mask, `token_type_ids], labels)`
(or just `[input_ids, attention_mask, `token_type_ids]`
if `include_targets=False`)
"""
def __init__(
self,
sentence_pairs,
labels,
batch_size=batch_size,
shuffle=True,
include_targets=True,
):
self.sentence_pairs = sentence_pairs
self.labels = labels
self.shuffle = shuffle
self.batch_size = batch_size
self.include_targets = include_targets
# Load our BERT Tokenizer to encode the text.
# We will use base-base-uncased pretrained model.
self.tokenizer = transformers.BertTokenizer.from_pretrained(
"bert-base-uncased", do_lower_case=True
)
self.indexes = np.arange(len(self.sentence_pairs))
self.on_epoch_end()
def __len__(self):
# Denotes the number of batches per epoch.
return len(self.sentence_pairs) // self.batch_size
def __getitem__(self, idx):
# Retrieves the batch of index.
indexes = self.indexes[idx * self.batch_size : (idx + 1) * self.batch_size]
sentence_pairs = self.sentence_pairs[indexes]
# With BERT tokenizer's batch_encode_plus batch of both the sentences are
# encoded together and separated by [SEP] token.
encoded = self.tokenizer.batch_encode_plus(
sentence_pairs.tolist(),
add_special_tokens=True,
max_length=max_length,
return_attention_mask=True,
return_token_type_ids=True,
pad_to_max_length=True,
return_tensors="tf",
)
# Convert batch of encoded features to numpy array.
input_ids = np.array(encoded["input_ids"], dtype="int32")
attention_masks = np.array(encoded["attention_mask"], dtype="int32")
token_type_ids = np.array(encoded["token_type_ids"], dtype="int32")
# Set to true if data generator is used for training/validation.
if self.include_targets:
labels = np.array(self.labels[indexes], dtype="int32")
return [input_ids, attention_masks, token_type_ids], labels
else:
return [input_ids, attention_masks, token_type_ids]
def on_epoch_end(self):
# Shuffle indexes after each epoch if shuffle is set to True.
if self.shuffle:
np.random.RandomState(42).shuffle(self.indexes)<jupyter_output><empty_output><jupyter_text>Build the model.<jupyter_code># Create the model under a distribution strategy scope.
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
# Encoded token ids from BERT tokenizer.
input_ids = tf.keras.layers.Input(
shape=(max_length,), dtype=tf.int32, name="input_ids"
)
# Attention masks indicates to the model which tokens should be attended to.
attention_masks = tf.keras.layers.Input(
shape=(max_length,), dtype=tf.int32, name="attention_masks"
)
# Token type ids are binary masks identifying different sequences in the model.
token_type_ids = tf.keras.layers.Input(
shape=(max_length,), dtype=tf.int32, name="token_type_ids"
)
# Loading pretrained BERT model.
bert_model = transformers.TFBertModel.from_pretrained("bert-base-uncased")
# Freeze the BERT model to reuse the pretrained features without modifying them.
bert_model.trainable = False
bert_output = bert_model.bert(
input_ids, attention_mask=attention_masks, token_type_ids=token_type_ids
)
sequence_output = bert_output.last_hidden_state
pooled_output = bert_output.pooler_output
# Add trainable layers on top of frozen layers to adapt the pretrained features on the new data.
bi_lstm = tf.keras.layers.Bidirectional(
tf.keras.layers.LSTM(64, return_sequences=True)
)(sequence_output)
# Applying hybrid pooling approach to bi_lstm sequence output.
avg_pool = tf.keras.layers.GlobalAveragePooling1D()(bi_lstm)
max_pool = tf.keras.layers.GlobalMaxPooling1D()(bi_lstm)
concat = tf.keras.layers.concatenate([avg_pool, max_pool])
dropout = tf.keras.layers.Dropout(0.3)(concat)
output = tf.keras.layers.Dense(3, activation="softmax")(dropout)
model = tf.keras.models.Model(
inputs=[input_ids, attention_masks, token_type_ids], outputs=output
)
model.compile(
optimizer=tf.keras.optimizers.Adam(),
loss="categorical_crossentropy",
metrics=["acc"],
)
print(f"Strategy: {strategy}")
model.summary()<jupyter_output><empty_output><jupyter_text>Create train and validation data generators<jupyter_code>train_data = BertSemanticDataGenerator(
train_df[["sentence1", "sentence2"]].values.astype("str"),
y_train,
batch_size=batch_size,
shuffle=True,
)
valid_data = BertSemanticDataGenerator(
valid_df[["sentence1", "sentence2"]].values.astype("str"),
y_val,
batch_size=batch_size,
shuffle=False,
)<jupyter_output><empty_output><jupyter_text>Train the ModelTraining is done only for the top layers to perform "feature extraction",which will allow the model to use the representations of the pretrained model.<jupyter_code>history = model.fit(
train_data,
validation_data=valid_data,
epochs=epochs,
use_multiprocessing=True,
workers=-1,
)<jupyter_output><empty_output><jupyter_text>Fine-tuningThis step must only be performed after the feature extraction model hasbeen trained to convergence on the new data.This is an optional last step where `bert_model` is unfreezed and retrainedwith a very low learning rate. This can deliver meaningful improvement byincrementally adapting the pretrained features to the new data.<jupyter_code># Unfreeze the bert_model.
bert_model.trainable = True
# Recompile the model to make the change effective.
model.compile(
optimizer=tf.keras.optimizers.Adam(1e-5),
loss="categorical_crossentropy",
metrics=["accuracy"],
)
model.summary()<jupyter_output><empty_output><jupyter_text>Train the entire model end-to-end.<jupyter_code>history = model.fit(
train_data,
validation_data=valid_data,
epochs=epochs,
use_multiprocessing=True,
workers=-1,
)<jupyter_output><empty_output><jupyter_text>Evaluate model on the test set<jupyter_code>test_data = BertSemanticDataGenerator(
test_df[["sentence1", "sentence2"]].values.astype("str"),
y_test,
batch_size=batch_size,
shuffle=False,
)
model.evaluate(test_data, verbose=1)<jupyter_output><empty_output><jupyter_text>Inference on custom sentences<jupyter_code>def check_similarity(sentence1, sentence2):
sentence_pairs = np.array([[str(sentence1), str(sentence2)]])
test_data = BertSemanticDataGenerator(
sentence_pairs, labels=None, batch_size=1, shuffle=False, include_targets=False,
)
proba = model.predict(test_data[0])[0]
idx = np.argmax(proba)
proba = f"{proba[idx]: .2f}%"
pred = labels[idx]
return pred, proba<jupyter_output><empty_output><jupyter_text>Check results on some example sentence pairs.<jupyter_code>sentence1 = "Two women are observing something together."
sentence2 = "Two women are standing with their eyes closed."
check_similarity(sentence1, sentence2)<jupyter_output><empty_output><jupyter_text>Check results on some example sentence pairs.<jupyter_code>sentence1 = "A smiling costumed woman is holding an umbrella"
sentence2 = "A happy woman in a fairy costume holds an umbrella"
check_similarity(sentence1, sentence2)<jupyter_output><empty_output><jupyter_text>Check results on some example sentence pairs<jupyter_code>sentence1 = "A soccer game with multiple males playing"
sentence2 = "Some men are playing a sport"
check_similarity(sentence1, sentence2)<jupyter_output><empty_output>
|
keras-io/examples/nlp/ipynb/semantic_similarity_with_bert.ipynb/0
|
{
"file_path": "keras-io/examples/nlp/ipynb/semantic_similarity_with_bert.ipynb",
"repo_id": "keras-io",
"token_count": 4679
}
| 112 |
# Sentence embeddings using Siamese RoBERTa-networks
**Author:** [Mohammed Abu El-Nasr](https://github.com/abuelnasr0)<br>
**Date created:** 2023/07/14<br>
**Last modified:** 2023/07/14<br>
**Description:** Fine-tune a RoBERTa model to generate sentence embeddings using KerasNLP.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/nlp/ipynb/sentence_embeddings_with_sbert.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/nlp/sentence_embeddings_with_sbert.py)
---
## Introduction
BERT and RoBERTa can be used for semantic textual similarity tasks, where two sentences
are passed to the model and the network predicts whether they are similar or not. But
what if we have a large collection of sentences and want to find the most similar pairs
in that collection? That will take n*(n-1)/2 inference computations, where n is the
number of sentences in the collection. For example, if n = 10000, the required time will
be 65 hours on a V100 GPU.
A common method to overcome the time overhead issue is to pass one sentence to the model,
then average the output of the model, or take the first token (the [CLS] token) and use
them as a [sentence embedding](https://en.wikipedia.org/wiki/Sentence_embedding), then
use a vector similarity measure like cosine similarity or Manhatten / Euclidean distance
to find close sentences (semantically similar sentences). That will reduce the time to
find the most similar pairs in a collection of 10,000 sentences from 65 hours to 5
seconds!
If we use RoBERTa directly, that will yield rather bad sentence embeddings. But if we
fine-tune RoBERTa using a Siamese network, that will generate semantically meaningful
sentence embeddings. This will enable RoBERTa to be used for new tasks. These tasks
include:
- Large-scale semantic similarity comparison.
- Clustering.
- Information retrieval via semantic search.
In this example, we will show how to fine-tune a RoBERTa model using a Siamese network
such that it will be able to produce semantically meaningful sentence embeddings and use
them in a semantic search and clustering example.
This method of fine-tuning was introduced in
[Sentence-BERT](https://arxiv.org/abs/1908.10084)
---
## Setup
Let's install and import the libraries we need. We'll be using the KerasNLP library in
this example.
We will also enable [mixed precision](https://www.tensorflow.org/guide/mixed_precision)
training. This will help us reduce the training time.
```python
!pip install -q --upgrade keras-nlp
!pip install -q --upgrade keras # Upgrade to Keras 3.
```
```python
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import keras
import keras_nlp
import tensorflow as tf
import tensorflow_datasets as tfds
import sklearn.cluster as cluster
keras.mixed_precision.set_global_policy("mixed_float16")
```
---
## Fine-tune the model using siamese networks
[Siamese network](https://en.wikipedia.org/wiki/Siamese_neural_network) is a neural
network architecture that contains two or more subnetworks. The subnetworks share the
same weights. It is used to generate feature vectors for each input and then compare them
for similarity.
For our example, the subnetwork will be a RoBERTa model that has a pooling layer on top
of it to produce the embeddings of the input sentences. These embeddings will then be
compared to each other to learn to produce semantically meaningful embeddings.
The pooling strategies used are mean, max, and CLS pooling. Mean pooling produces the
best results. We will use it in our examples.
### Fine-tune using the regression objective function
For building the siamese network with the regression objective function, the siamese
network is asked to predict the cosine similarity between the embeddings of the two input
sentences.
Cosine similarity indicates the angle between the sentence embeddings. If the cosine
similarity is high, that means there is a small angle between the embeddings; hence, they
are semantically similar.
#### Load the dataset
We will use the STSB dataset to fine-tune the model for the regression objective. STSB
consists of a collection of sentence pairs that are labelled in the range [0, 5]. 0
indicates the least semantic similarity between the two sentences, and 5 indicates the
most semantic similarity between the two sentences.
The range of the cosine similarity is [-1, 1] and it's the output of the siamese network,
but the range of the labels in the dataset is [0, 5]. We need to unify the range between
the cosine similarity and the dataset labels, so while preparing the dataset, we will
divide the labels by 2.5 and subtract 1.
```python
TRAIN_BATCH_SIZE = 6
VALIDATION_BATCH_SIZE = 8
TRAIN_NUM_BATCHES = 300
VALIDATION_NUM_BATCHES = 40
AUTOTUNE = tf.data.experimental.AUTOTUNE
def change_range(x):
return (x / 2.5) - 1
def prepare_dataset(dataset, num_batches, batch_size):
dataset = dataset.map(
lambda z: (
[z["sentence1"], z["sentence2"]],
[tf.cast(change_range(z["label"]), tf.float32)],
),
num_parallel_calls=AUTOTUNE,
)
dataset = dataset.batch(batch_size)
dataset = dataset.take(num_batches)
dataset = dataset.prefetch(AUTOTUNE)
return dataset
stsb_ds = tfds.load(
"glue/stsb",
)
stsb_train, stsb_valid = stsb_ds["train"], stsb_ds["validation"]
stsb_train = prepare_dataset(stsb_train, TRAIN_NUM_BATCHES, TRAIN_BATCH_SIZE)
stsb_valid = prepare_dataset(stsb_valid, VALIDATION_NUM_BATCHES, VALIDATION_BATCH_SIZE)
```
Let's see examples from the dataset of two sentenses and their similarity.
```python
for x, y in stsb_train:
for i, example in enumerate(x):
print(f"sentence 1 : {example[0]} ")
print(f"sentence 2 : {example[1]} ")
print(f"similarity : {y[i]} \n")
break
```
<div class="k-default-codeblock">
```
sentence 1 : b"A young girl is sitting on Santa's lap."
sentence 2 : b"A little girl is sitting on Santa's lap"
similarity : [0.9200001]
```
</div>
<div class="k-default-codeblock">
```
sentence 1 : b'A women sitting at a table drinking with a basketball picture in the background.'
sentence 2 : b'A woman in a sari drinks something while sitting at a table.'
similarity : [0.03999996]
```
</div>
<div class="k-default-codeblock">
```
sentence 1 : b'Norway marks anniversary of massacre'
sentence 2 : b"Norway Marks Anniversary of Breivik's Massacre"
similarity : [0.52]
```
</div>
<div class="k-default-codeblock">
```
sentence 1 : b'US drone kills six militants in Pakistan: officials'
sentence 2 : b'US missiles kill 15 in Pakistan: officials'
similarity : [-0.03999996]
```
</div>
<div class="k-default-codeblock">
```
sentence 1 : b'On Tuesday, the central bank left interest rates steady, as expected, but also declared that overall risks were weighted toward weakness and warned of deflation risks.'
sentence 2 : b"The central bank's policy board left rates steady for now, as widely expected, but surprised the market by declaring that overall risks were weighted toward weakness."
similarity : [0.6]
```
</div>
<div class="k-default-codeblock">
```
sentence 1 : b'At one of the three sampling sites at Huntington Beach, the bacteria reading came back at 160 on June 16 and at 120 on June 23.'
sentence 2 : b'The readings came back at 160 on June 16 and 120 at June 23 at one of three sampling sites at Huntington Beach.'
similarity : [0.29999995]
```
</div>
#### Build the encoder model.
Now, we'll build the encoder model that will produce the sentence embeddings. It consists
of:
- A preprocessor layer to tokenize and generate padding masks for the sentences.
- A backbone model that will generate the contextual representation of each token in the
sentence.
- A mean pooling layer to produce the embeddings. We will use `keras.layers.GlobalAveragePooling1D`
to apply the mean pooling to the backbone outputs. We will pass the padding mask to the
layer to exclude padded tokens from being averaged.
- A normalization layer to normalize the embeddings as we are using the cosine similarity.
```python
preprocessor = keras_nlp.models.RobertaPreprocessor.from_preset("roberta_base_en")
backbone = keras_nlp.models.RobertaBackbone.from_preset("roberta_base_en")
inputs = keras.Input(shape=(1,), dtype="string", name="sentence")
x = preprocessor(inputs)
h = backbone(x)
embedding = keras.layers.GlobalAveragePooling1D(name="pooling_layer")(
h, x["padding_mask"]
)
n_embedding = keras.layers.UnitNormalization(axis=1)(embedding)
roberta_normal_encoder = keras.Model(inputs=inputs, outputs=n_embedding)
roberta_normal_encoder.summary()
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "functional_1"</span>
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━┓
┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃<span style="font-weight: bold"> Connected to </span>┃
┡━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━┩
│ sentence │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ - │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">InputLayer</span>) │ │ │ │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ roberta_preprocess… │ [(<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">512</span>), │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ sentence[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">RobertaPreprocess…</span> │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">512</span>)] │ │ │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ roberta_backbone │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">512</span>, <span style="color: #00af00; text-decoration-color: #00af00">768</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">124,05…</span> │ roberta_preprocesso… │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">RobertaBackbone</span>) │ │ │ roberta_preprocesso… │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ pooling_layer │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">768</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ roberta_backbone[<span style="color: #00af00; text-decoration-color: #00af00">0</span>]… │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">GlobalAveragePool…</span> │ │ │ roberta_preprocesso… │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ unit_normalization │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">768</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ pooling_layer[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">UnitNormalization</span>) │ │ │ │
└─────────────────────┴───────────────────┴─────────┴──────────────────────┘
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">124,052,736</span> (473.22 MB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">124,052,736</span> (473.22 MB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B)
</pre>
#### Build the Siamese network with the regression objective function.
It's described above that the Siamese network has two or more subnetworks, and for this
Siamese model, we need two encoders. But we don't have two encoders; we have only one
encoder, but we will pass the two sentences through it. That way, we can have two paths
to get the embeddings and also shared weights between the two paths.
After passing the two sentences to the model and getting the normalized embeddings, we
will multiply the two normalized embeddings to get the cosine similarity between the two
sentences.
```python
class RegressionSiamese(keras.Model):
def __init__(self, encoder, **kwargs):
inputs = keras.Input(shape=(2,), dtype="string", name="sentences")
sen1, sen2 = keras.ops.split(inputs, 2, axis=1)
u = encoder(sen1)
v = encoder(sen2)
cosine_similarity_scores = keras.ops.matmul(u, keras.ops.transpose(v))
super().__init__(
inputs=inputs,
outputs=cosine_similarity_scores,
**kwargs,
)
self.encoder = encoder
def get_encoder(self):
return self.encoder
```
#### Fit the model
Let's try this example before training and compare it to the output after training.
```python
sentences = [
"Today is a very sunny day.",
"I am hungry, I will get my meal.",
"The dog is eating his food.",
]
query = ["The dog is enjoying his meal."]
encoder = roberta_normal_encoder
sentence_embeddings = encoder(tf.constant(sentences))
query_embedding = encoder(tf.constant(query))
cosine_similarity_scores = tf.matmul(query_embedding, tf.transpose(sentence_embeddings))
for i, sim in enumerate(cosine_similarity_scores[0]):
print(f"cosine similarity score between sentence {i+1} and the query = {sim} ")
```
<div class="k-default-codeblock">
```
cosine similarity score between sentence 1 and the query = 0.96630859375
cosine similarity score between sentence 2 and the query = 0.97607421875
cosine similarity score between sentence 3 and the query = 0.99365234375
```
</div>
For the training we will use `MeanSquaredError()` as loss function, and `Adam()`
optimizer with learning rate = 2e-5.
```python
roberta_regression_siamese = RegressionSiamese(roberta_normal_encoder)
roberta_regression_siamese.compile(
loss=keras.losses.MeanSquaredError(),
optimizer=keras.optimizers.Adam(2e-5),
jit_compile=False,
)
roberta_regression_siamese.fit(stsb_train, validation_data=stsb_valid, epochs=1)
```
<div class="k-default-codeblock">
```
300/300 ━━━━━━━━━━━━━━━━━━━━ 115s 297ms/step - loss: 0.4751 - val_loss: 0.4025
<keras.src.callbacks.history.History at 0x7f5a78392140>
```
</div>
Let's try the model after training, we will notice a huge difference in the output. That
means that the model after fine-tuning is capable of producing semantically meaningful
embeddings. where the semantically similar sentences have a small angle between them. and
semantically dissimilar sentences have a large angle between them.
```python
sentences = [
"Today is a very sunny day.",
"I am hungry, I will get my meal.",
"The dog is eating his food.",
]
query = ["The dog is enjoying his food."]
encoder = roberta_regression_siamese.get_encoder()
sentence_embeddings = encoder(tf.constant(sentences))
query_embedding = encoder(tf.constant(query))
cosine_simalarities = tf.matmul(query_embedding, tf.transpose(sentence_embeddings))
for i, sim in enumerate(cosine_simalarities[0]):
print(f"cosine similarity between sentence {i+1} and the query = {sim} ")
```
<div class="k-default-codeblock">
```
cosine similarity between sentence 1 and the query = 0.10986328125
cosine similarity between sentence 2 and the query = 0.53466796875
cosine similarity between sentence 3 and the query = 0.83544921875
```
</div>
### Fine-tune Using the triplet Objective Function
For the Siamese network with the triplet objective function, three sentences are passed
to the Siamese network *anchor*, *positive*, and *negative* sentences. *anchor* and
*positive* sentences are semantically similar, and *anchor* and *negative* sentences are
semantically dissimilar. The objective is to minimize the distance between the *anchor*
sentence and the *positive* sentence, and to maximize the distance between the *anchor*
sentence and the *negative* sentence.
#### Load the dataset
We will use the Wikipedia-sections-triplets dataset for fine-tuning. This data set
consists of sentences derived from the Wikipedia website. It has a collection of 3
sentences *anchor*, *positive*, *negative*. *anchor* and *positive* are derived from the
same section. *anchor* and *negative* are derived from different sections.
This dataset has 1.8 million training triplets and 220,000 test triplets. In this
example, we will only use 1200 triplets for training and 300 for testing.
```python
!wget https://sbert.net/datasets/wikipedia-sections-triplets.zip -q
!unzip wikipedia-sections-triplets.zip -d wikipedia-sections-triplets
```
```python
NUM_TRAIN_BATCHES = 200
NUM_TEST_BATCHES = 75
AUTOTUNE = tf.data.experimental.AUTOTUNE
def prepare_wiki_data(dataset, num_batches):
dataset = dataset.map(
lambda z: ((z["Sentence1"], z["Sentence2"], z["Sentence3"]), 0)
)
dataset = dataset.batch(6)
dataset = dataset.take(num_batches)
dataset = dataset.prefetch(AUTOTUNE)
return dataset
wiki_train = tf.data.experimental.make_csv_dataset(
"wikipedia-sections-triplets/train.csv",
batch_size=1,
num_epochs=1,
)
wiki_test = tf.data.experimental.make_csv_dataset(
"wikipedia-sections-triplets/test.csv",
batch_size=1,
num_epochs=1,
)
wiki_train = prepare_wiki_data(wiki_train, NUM_TRAIN_BATCHES)
wiki_test = prepare_wiki_data(wiki_test, NUM_TEST_BATCHES)
```
<div class="k-default-codeblock">
```
Archive: wikipedia-sections-triplets.zip
inflating: wikipedia-sections-triplets/validation.csv
inflating: wikipedia-sections-triplets/Readme.txt
inflating: wikipedia-sections-triplets/test.csv
inflating: wikipedia-sections-triplets/train.csv
```
</div>
#### Build the encoder model
For this encoder model, we will use RoBERTa with mean pooling and we will not normalize
the output embeddings. The encoder model consists of:
- A preprocessor layer to tokenize and generate padding masks for the sentences.
- A backbone model that will generate the contextual representation of each token in the
sentence.
- A mean pooling layer to produce the embeddings.
```python
preprocessor = keras_nlp.models.RobertaPreprocessor.from_preset("roberta_base_en")
backbone = keras_nlp.models.RobertaBackbone.from_preset("roberta_base_en")
input = keras.Input(shape=(1,), dtype="string", name="sentence")
x = preprocessor(input)
h = backbone(x)
embedding = keras.layers.GlobalAveragePooling1D(name="pooling_layer")(
h, x["padding_mask"]
)
roberta_encoder = keras.Model(inputs=input, outputs=embedding)
roberta_encoder.summary()
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "functional_3"</span>
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━┓
┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃<span style="font-weight: bold"> Connected to </span>┃
┡━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━┩
│ sentence │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ - │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">InputLayer</span>) │ │ │ │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ roberta_preprocess… │ [(<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">512</span>), │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ sentence[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">RobertaPreprocess…</span> │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">512</span>)] │ │ │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ roberta_backbone_1 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">512</span>, <span style="color: #00af00; text-decoration-color: #00af00">768</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">124,05…</span> │ roberta_preprocesso… │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">RobertaBackbone</span>) │ │ │ roberta_preprocesso… │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ pooling_layer │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">768</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ roberta_backbone_1[<span style="color: #00af00; text-decoration-color: #00af00">…</span> │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">GlobalAveragePool…</span> │ │ │ roberta_preprocesso… │
└─────────────────────┴───────────────────┴─────────┴──────────────────────┘
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">124,052,736</span> (473.22 MB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">124,052,736</span> (473.22 MB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B)
</pre>
#### Build the Siamese network with the triplet objective function
For the Siamese network with the triplet objective function, we will build the model with
an encoder, and we will pass the three sentences through that encoder. We will get an
embedding for each sentence, and we will calculate the `positive_dist` and
`negative_dist` that will be passed to the loss function described below.
```python
class TripletSiamese(keras.Model):
def __init__(self, encoder, **kwargs):
anchor = keras.Input(shape=(1,), dtype="string")
positive = keras.Input(shape=(1,), dtype="string")
negative = keras.Input(shape=(1,), dtype="string")
ea = encoder(anchor)
ep = encoder(positive)
en = encoder(negative)
positive_dist = keras.ops.sum(keras.ops.square(ea - ep), axis=1)
negative_dist = keras.ops.sum(keras.ops.square(ea - en), axis=1)
positive_dist = keras.ops.sqrt(positive_dist)
negative_dist = keras.ops.sqrt(negative_dist)
output = keras.ops.stack([positive_dist, negative_dist], axis=0)
super().__init__(inputs=[anchor, positive, negative], outputs=output, **kwargs)
self.encoder = encoder
def get_encoder(self):
return self.encoder
```
We will use a custom loss function for the triplet objective. The loss function will
receive the distance between the *anchor* and the *positive* embeddings `positive_dist`,
and the distance between the *anchor* and the *negative* embeddings `negative_dist`,
where they are stacked together in `y_pred`.
We will use `positive_dist` and `negative_dist` to compute the loss such that
`negative_dist` is larger than `positive_dist` at least by a specific margin.
Mathematically, we will minimize this loss function: `max( positive_dist - negative_dist
+ margin, 0)`.
There is no `y_true` used in this loss function. Note that we set the labels in the
dataset to zero, but they will not be used.
```python
class TripletLoss(keras.losses.Loss):
def __init__(self, margin=1, **kwargs):
super().__init__(**kwargs)
self.margin = margin
def call(self, y_true, y_pred):
positive_dist, negative_dist = tf.unstack(y_pred, axis=0)
losses = keras.ops.relu(positive_dist - negative_dist + self.margin)
return keras.ops.mean(losses, axis=0)
```
#### Fit the model
For the training, we will use the custom `TripletLoss()` loss function, and `Adam()`
optimizer with a learning rate = 2e-5.
```python
roberta_triplet_siamese = TripletSiamese(roberta_encoder)
roberta_triplet_siamese.compile(
loss=TripletLoss(),
optimizer=keras.optimizers.Adam(2e-5),
jit_compile=False,
)
roberta_triplet_siamese.fit(wiki_train, validation_data=wiki_test, epochs=1)
```
<div class="k-default-codeblock">
```
200/200 ━━━━━━━━━━━━━━━━━━━━ 128s 467ms/step - loss: 0.7822 - val_loss: 0.7126
<keras.src.callbacks.history.History at 0x7f5c3636c580>
```
</div>
Let's try this model in a clustering example. Here are 6 questions. first 3 questions
about learning English, and the last 3 questions about working online. Let's see if the
embeddings produced by our encoder will cluster them correctly.
```python
questions = [
"What should I do to improve my English writting?",
"How to be good at speaking English?",
"How can I improve my English?",
"How to earn money online?",
"How do I earn money online?",
"How to work and earn money through internet?",
]
encoder = roberta_triplet_siamese.get_encoder()
embeddings = encoder(tf.constant(questions))
kmeans = cluster.KMeans(n_clusters=2, random_state=0, n_init="auto").fit(embeddings)
for i, label in enumerate(kmeans.labels_):
print(f"sentence ({questions[i]}) belongs to cluster {label}")
```
<div class="k-default-codeblock">
```
sentence (What should I do to improve my English writting?) belongs to cluster 1
sentence (How to be good at speaking English?) belongs to cluster 1
sentence (How can I improve my English?) belongs to cluster 1
sentence (How to earn money online?) belongs to cluster 0
sentence (How do I earn money online?) belongs to cluster 0
sentence (How to work and earn money through internet?) belongs to cluster 0
```
</div>
|
keras-io/examples/nlp/md/sentence_embeddings_with_sbert.md/0
|
{
"file_path": "keras-io/examples/nlp/md/sentence_embeddings_with_sbert.md",
"repo_id": "keras-io",
"token_count": 10459
}
| 113 |
"""
Title: Pretraining BERT with Hugging Face Transformers
Author: Sreyan Ghosh
Date created: 2022/07/01
Last modified: 2022/08/27
Description: Pretraining BERT using Hugging Face Transformers on NSP and MLM.
Accelerator: GPU
"""
"""
## Introduction
"""
"""
### BERT (Bidirectional Encoder Representations from Transformers)
In the field of computer vision, researchers have repeatedly shown the value of
transfer learning — pretraining a neural network model on a known task/dataset, for
instance ImageNet classification, and then performing fine-tuning — using the trained neural
network as the basis of a new specific-purpose model. In recent years, researchers
have shown that a similar technique can be useful in many natural language tasks.
BERT makes use of Transformer, an attention mechanism that learns contextual relations
between words (or subwords) in a text. In its vanilla form, Transformer includes two
separate mechanisms — an encoder that reads the text input and a decoder that produces
a prediction for the task. Since BERT’s goal is to generate a language model, only the
encoder mechanism is necessary. The detailed workings of Transformer are described in
a paper by Google.
As opposed to directional models, which read the text input sequentially
(left-to-right or right-to-left), the Transformer encoder reads the entire
sequence of words at once. Therefore it is considered bidirectional, though
it would be more accurate to say that it’s non-directional. This characteristic
allows the model to learn the context of a word based on all of its surroundings
(left and right of the word).
When training language models, a challenge is defining a prediction goal.
Many models predict the next word in a sequence (e.g. `"The child came home from _"`),
a directional approach which inherently limits context learning. To overcome this
challenge, BERT uses two training strategies:
### Masked Language Modeling (MLM)
Before feeding word sequences into BERT, 15% of the words in each sequence are replaced
with a `[MASK]` token. The model then attempts to predict the original value of the masked
words, based on the context provided by the other, non-masked, words in the sequence.
### Next Sentence Prediction (NSP)
In the BERT training process, the model receives pairs of sentences as input and learns to
predict if the second sentence in the pair is the subsequent sentence in the original
document. During training, 50% of the inputs are a pair in which the second sentence is the
subsequent sentence in the original document, while in the other 50% a random sentence
from the corpus is chosen as the second sentence. The assumption is that the random sentence
will represent a disconnect from the first sentence.
Though Google provides a pretrained BERT checkpoint for English, you may often need
to either pretrain the model from scratch for a different language, or do a
continued-pretraining to fit the model to a new domain. In this notebook, we pretrain
BERT from scratch optimizing both MLM and NSP objectves using 🤗 Transformers on the `WikiText`
English dataset loaded from 🤗 Datasets.
"""
"""
## Setup
"""
"""
### Installing the requirements
"""
"""shell
pip install git+https://github.com/huggingface/transformers.git
pip install datasets
pip install huggingface-hub
pip install nltk
"""
"""
### Importing the necessary libraries
"""
import nltk
import random
import logging
import tensorflow as tf
from tensorflow import keras
nltk.download("punkt")
# Only log error messages
tf.get_logger().setLevel(logging.ERROR)
# Set random seed
tf.keras.utils.set_random_seed(42)
"""
### Define certain variables
"""
TOKENIZER_BATCH_SIZE = 256 # Batch-size to train the tokenizer on
TOKENIZER_VOCABULARY = 25000 # Total number of unique subwords the tokenizer can have
BLOCK_SIZE = 128 # Maximum number of tokens in an input sample
NSP_PROB = 0.50 # Probability that the next sentence is the actual next sentence in NSP
SHORT_SEQ_PROB = 0.1 # Probability of generating shorter sequences to minimize the mismatch between pretraining and fine-tuning.
MAX_LENGTH = 512 # Maximum number of tokens in an input sample after padding
MLM_PROB = 0.2 # Probability with which tokens are masked in MLM
TRAIN_BATCH_SIZE = 2 # Batch-size for pretraining the model on
MAX_EPOCHS = 1 # Maximum number of epochs to train the model for
LEARNING_RATE = 1e-4 # Learning rate for training the model
MODEL_CHECKPOINT = "bert-base-cased" # Name of pretrained model from 🤗 Model Hub
"""
## Load the WikiText dataset
"""
"""
We now download the `WikiText` language modeling dataset. It is a collection of over
100 million tokens extracted from the set of verified "Good" and "Featured" articles on
Wikipedia.
We load the dataset from [🤗 Datasets](https://github.com/huggingface/datasets).
For the purpose of demonstration in this notebook, we work with only the `train`
split of the dataset. This can be easily done with the `load_dataset` function.
"""
from datasets import load_dataset
dataset = load_dataset("wikitext", "wikitext-2-raw-v1")
"""
The dataset just has one column which is the raw text, and this is all we need for
pretraining BERT!
"""
print(dataset)
"""
## Training a new Tokenizer
"""
"""
First we train our own tokenizer from scratch on our corpus, so that can we
can use it to train our language model from scratch.
But why would you need to train a tokenizer? That's because Transformer models very
often use subword tokenization algorithms, and they need to be trained to identify the
parts of words that are often present in the corpus you are using.
The 🤗 Transformers `Tokenizer` (as the name indicates) will tokenize the inputs
(including converting the tokens to their corresponding IDs in the pretrained vocabulary)
and put it in a format the model expects, as well as generate the other inputs that model
requires.
First we make a list of all the raw documents from the `WikiText` corpus:
"""
all_texts = [
doc for doc in dataset["train"]["text"] if len(doc) > 0 and not doc.startswith(" =")
]
"""
Next we make a `batch_iterator` function that will aid us to train our tokenizer.
"""
def batch_iterator():
for i in range(0, len(all_texts), TOKENIZER_BATCH_SIZE):
yield all_texts[i : i + TOKENIZER_BATCH_SIZE]
"""
In this notebook, we train a tokenizer with the exact same algorithms and
parameters as an existing one. For instance, we train a new version of the
`BERT-CASED` tokenzier on `Wikitext-2` using the same tokenization algorithm.
First we need to load the tokenizer we want to use as a model:
"""
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(MODEL_CHECKPOINT)
"""
Now we train our tokenizer using the entire `train` split of the `Wikitext-2`
dataset.
"""
tokenizer = tokenizer.train_new_from_iterator(
batch_iterator(), vocab_size=TOKENIZER_VOCABULARY
)
"""
So now we our done training our new tokenizer! Next we move on to the data
pre-processing steps.
"""
"""
## Data Pre-processing
"""
"""
For the sake of demonstrating the workflow, in this notebook we only take
small subsets of the entire WikiText `train` and `test` splits.
"""
dataset["train"] = dataset["train"].select([i for i in range(1000)])
dataset["validation"] = dataset["validation"].select([i for i in range(1000)])
"""
Before we can feed those texts to our model, we need to pre-process them and get them
ready for the task. As mentioned earlier, the BERT pretraining task includes two tasks
in total, the `NSP` task and the `MLM` task. 🤗 Transformers have an easy to implement
`collator` called the `DataCollatorForLanguageModeling`. However, we need to get the
data ready for `NSP` manually.
Next we write a simple function called the `prepare_train_features` that helps us in
the pre-processing and is compatible with 🤗 Datasets. To summarize, our pre-processing
function should:
- Get the dataset ready for the NSP task by creating pairs of sentences (A,B), where B
either actually follows A, or B is randomly sampled from somewhere else in the corpus.
It should also generate a corresponding label for each pair, which is 1 if B actually
follows A and 0 if not.
- Tokenize the text dataset into it's corresponding token ids that will be used for
embedding look-up in BERT
- Create additional inputs for the model like `token_type_ids`, `attention_mask`, etc.
"""
# We define the maximum number of tokens after tokenization that each training sample
# will have
max_num_tokens = BLOCK_SIZE - tokenizer.num_special_tokens_to_add(pair=True)
def prepare_train_features(examples):
"""Function to prepare features for NSP task
Arguments:
examples: A dictionary with 1 key ("text")
text: List of raw documents (str)
Returns:
examples: A dictionary with 4 keys
input_ids: List of tokenized, concatnated, and batched
sentences from the individual raw documents (int)
token_type_ids: List of integers (0 or 1) corresponding
to: 0 for senetence no. 1 and padding, 1 for sentence
no. 2
attention_mask: List of integers (0 or 1) corresponding
to: 1 for non-padded tokens, 0 for padded
next_sentence_label: List of integers (0 or 1) corresponding
to: 1 if the second sentence actually follows the first,
0 if the senetence is sampled from somewhere else in the corpus
"""
# Remove un-wanted samples from the training set
examples["document"] = [
d.strip() for d in examples["text"] if len(d) > 0 and not d.startswith(" =")
]
# Split the documents from the dataset into it's individual sentences
examples["sentences"] = [
nltk.tokenize.sent_tokenize(document) for document in examples["document"]
]
# Convert the tokens into ids using the trained tokenizer
examples["tokenized_sentences"] = [
[tokenizer.convert_tokens_to_ids(tokenizer.tokenize(sent)) for sent in doc]
for doc in examples["sentences"]
]
# Define the outputs
examples["input_ids"] = []
examples["token_type_ids"] = []
examples["attention_mask"] = []
examples["next_sentence_label"] = []
for doc_index, document in enumerate(examples["tokenized_sentences"]):
current_chunk = [] # a buffer stored current working segments
current_length = 0
i = 0
# We *usually* want to fill up the entire sequence since we are padding
# to `block_size` anyways, so short sequences are generally wasted
# computation. However, we *sometimes*
# (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter
# sequences to minimize the mismatch between pretraining and fine-tuning.
# The `target_seq_length` is just a rough target however, whereas
# `block_size` is a hard limit.
target_seq_length = max_num_tokens
if random.random() < SHORT_SEQ_PROB:
target_seq_length = random.randint(2, max_num_tokens)
while i < len(document):
segment = document[i]
current_chunk.append(segment)
current_length += len(segment)
if i == len(document) - 1 or current_length >= target_seq_length:
if current_chunk:
# `a_end` is how many segments from `current_chunk` go into the `A`
# (first) sentence.
a_end = 1
if len(current_chunk) >= 2:
a_end = random.randint(1, len(current_chunk) - 1)
tokens_a = []
for j in range(a_end):
tokens_a.extend(current_chunk[j])
tokens_b = []
if len(current_chunk) == 1 or random.random() < NSP_PROB:
is_random_next = True
target_b_length = target_seq_length - len(tokens_a)
# This should rarely go for more than one iteration for large
# corpora. However, just to be careful, we try to make sure that
# the random document is not the same as the document
# we're processing.
for _ in range(10):
random_document_index = random.randint(
0, len(examples["tokenized_sentences"]) - 1
)
if random_document_index != doc_index:
break
random_document = examples["tokenized_sentences"][
random_document_index
]
random_start = random.randint(0, len(random_document) - 1)
for j in range(random_start, len(random_document)):
tokens_b.extend(random_document[j])
if len(tokens_b) >= target_b_length:
break
# We didn't actually use these segments so we "put them back" so
# they don't go to waste.
num_unused_segments = len(current_chunk) - a_end
i -= num_unused_segments
else:
is_random_next = False
for j in range(a_end, len(current_chunk)):
tokens_b.extend(current_chunk[j])
input_ids = tokenizer.build_inputs_with_special_tokens(
tokens_a, tokens_b
)
# add token type ids, 0 for sentence a, 1 for sentence b
token_type_ids = tokenizer.create_token_type_ids_from_sequences(
tokens_a, tokens_b
)
padded = tokenizer.pad(
{"input_ids": input_ids, "token_type_ids": token_type_ids},
padding="max_length",
max_length=MAX_LENGTH,
)
examples["input_ids"].append(padded["input_ids"])
examples["token_type_ids"].append(padded["token_type_ids"])
examples["attention_mask"].append(padded["attention_mask"])
examples["next_sentence_label"].append(1 if is_random_next else 0)
current_chunk = []
current_length = 0
i += 1
# We delete all the un-necessary columns from our dataset
del examples["document"]
del examples["sentences"]
del examples["text"]
del examples["tokenized_sentences"]
return examples
tokenized_dataset = dataset.map(
prepare_train_features,
batched=True,
remove_columns=["text"],
num_proc=1,
)
"""
For MLM we are going to use the same preprocessing as before for our dataset with
one additional step: we randomly mask some tokens (by replacing them by [MASK])
and the labels will be adjusted to only include the masked tokens
(we don't have to predict the non-masked tokens). If you use a tokenizer you trained
yourself, make sure the [MASK] token is among the special tokens you passed during training!
To get the data ready for MLM, we simply use the `collator` called the
`DataCollatorForLanguageModeling` provided by the 🤗 Transformers library on our dataset
that is already ready for the NSP task. The `collator` expects certain parameters.
We use the default ones from the original BERT paper in this notebook. The
`return_tensors='tf'` ensures that we get `tf.Tensor` objects back.
"""
from transformers import DataCollatorForLanguageModeling
collater = DataCollatorForLanguageModeling(
tokenizer=tokenizer, mlm=True, mlm_probability=MLM_PROB, return_tensors="tf"
)
"""
Next we define our training set with which we train our model. Again, 🤗 Datasets
provides us with the `to_tf_dataset` method which will help us integrate our dataset with
the `collator` defined above. The method expects certain parameters:
- **columns**: the columns which will serve as our independant variables
- **label_cols**: the columns which will serve as our labels or dependant variables
- **batch_size**: our batch size for training
- **shuffle**: whether we want to shuffle our training dataset
- **collate_fn**: our collator function
"""
train = tokenized_dataset["train"].to_tf_dataset(
columns=["input_ids", "token_type_ids", "attention_mask"],
label_cols=["labels", "next_sentence_label"],
batch_size=TRAIN_BATCH_SIZE,
shuffle=True,
collate_fn=collater,
)
validation = tokenized_dataset["validation"].to_tf_dataset(
columns=["input_ids", "token_type_ids", "attention_mask"],
label_cols=["labels", "next_sentence_label"],
batch_size=TRAIN_BATCH_SIZE,
shuffle=True,
collate_fn=collater,
)
"""
## Defining the model
"""
"""
To define our model, first we need to define a config which will help us define certain
parameters of our model architecture. This includes parameters like number of transformer
layers, number of attention heads, hidden dimension, etc. For this notebook, we try
to define the exact config defined in the original BERT paper.
We can easily achieve this using the `BertConfig` class from the 🤗 Transformers library.
The `from_pretrained()` method expects the name of a model. Here we define the simplest
model with which we also trained our model, i.e., `bert-base-cased`.
"""
from transformers import BertConfig
config = BertConfig.from_pretrained(MODEL_CHECKPOINT)
"""
For defining our model we use the `TFBertForPreTraining` class from the 🤗 Transformers
library. This class internally handles everything starting from defining our model, to
unpacking our inputs and calculating the loss. So we need not do anything ourselves except
defining the model with the correct `config` we want!
"""
from transformers import TFBertForPreTraining
model = TFBertForPreTraining(config)
"""
Now we define our optimizer and compile the model. The loss calculation is handled
internally and so we need not worry about that!
"""
optimizer = keras.optimizers.Adam(learning_rate=LEARNING_RATE)
model.compile(optimizer=optimizer)
"""
Finally all steps are done and now we can start training our model!
"""
model.fit(train, validation_data=validation, epochs=MAX_EPOCHS)
"""
Our model has now been trained! We suggest to please train the model on the complete
dataset for atleast 50 epochs for decent performance. The pretrained model now acts as
a language model and is meant to be fine-tuned on a downstream task. Thus it can now be
fine-tuned on any downstream task like Question Answering, Text Classification
etc.!
"""
"""
Now you can push this model to 🤗 Model Hub and also share it with with all your friends,
family, favorite pets: they can all load it with the identifier
`"your-username/the-name-you-picked"` so for instance:
```python
model.push_to_hub("pretrained-bert", organization="keras-io")
tokenizer.push_to_hub("pretrained-bert", organization="keras-io")
```
And after you push your model this is how you can load it in the future!
```python
from transformers import TFBertForPreTraining
model = TFBertForPreTraining.from_pretrained("your-username/my-awesome-model")
```
or, since it's a pretrained model and you would generally use it for fine-tuning
on a downstream task, you can also load it for some other task like:
```python
from transformers import TFBertForSequenceClassification
model = TFBertForSequenceClassification.from_pretrained("your-username/my-awesome-model")
```
In this case, the pretraining head will be dropped and the model will just be initialized
with the transformer layers. A new task-specific head will be added with random weights.
"""
|
keras-io/examples/nlp/pretraining_BERT.py/0
|
{
"file_path": "keras-io/examples/nlp/pretraining_BERT.py",
"repo_id": "keras-io",
"token_count": 6850
}
| 114 |
<jupyter_start><jupyter_text>Deep Deterministic Policy Gradient (DDPG)**Author:** [amifunny](https://github.com/amifunny)**Date created:** 2020/06/04**Last modified:** 2020/09/21**Description:** Implementing DDPG algorithm on the Inverted Pendulum Problem. Introduction**Deep Deterministic Policy Gradient (DDPG)** is a model-free off-policy algorithm forlearning continous actions.It combines ideas from DPG (Deterministic Policy Gradient) and DQN (Deep Q-Network).It uses Experience Replay and slow-learning target networks from DQN, and it is based onDPG,which can operate over continuous action spaces.This tutorial closely follow this paper -[Continuous control with deep reinforcement learning](https://arxiv.org/pdf/1509.02971.pdf) ProblemWe are trying to solve the classic **Inverted Pendulum** control problem.In this setting, we can take only two actions: swing left or swing right.What make this problem challenging for Q-Learning Algorithms is that actionsare **continuous** instead of being **discrete**. That is, instead of using twodiscrete actions like `-1` or `+1`, we have to select from infinite actionsranging from `-2` to `+2`. Quick theoryJust like the Actor-Critic method, we have two networks:1. Actor - It proposes an action given a state.2. Critic - It predicts if the action is good (positive value) or bad (negative value)given a state and an action.DDPG uses two more techniques not present in the original DQN:**First, it uses two Target networks.****Why?** Because it add stability to training. In short, we are learning from estimatedtargets and Target networks are updated slowly, hence keeping our estimated targetsstable.Conceptually, this is like saying, "I have an idea of how to play this well,I'm going to try it out for a bit until I find something better",as opposed to saying "I'm going to re-learn how to play this entire game after everymove".See this [StackOverflow answer](https://stackoverflow.com/a/54238556/13475679).**Second, it uses Experience Replay.**We store list of tuples `(state, action, reward, next_state)`, and instead oflearning only from recent experience, we learn from sampling all of our experienceaccumulated so far.Now, let's see how is it implemented.<jupyter_code>import gym
import tensorflow as tf
from tensorflow.keras import layers
import numpy as np
import matplotlib.pyplot as plt<jupyter_output><empty_output><jupyter_text>We use [OpenAIGym](http://gym.openai.com/docs) to create the environment.We will use the `upper_bound` parameter to scale our actions later.<jupyter_code>problem = "Pendulum-v1"
env = gym.make(problem)
num_states = env.observation_space.shape[0]
print("Size of State Space -> {}".format(num_states))
num_actions = env.action_space.shape[0]
print("Size of Action Space -> {}".format(num_actions))
upper_bound = env.action_space.high[0]
lower_bound = env.action_space.low[0]
print("Max Value of Action -> {}".format(upper_bound))
print("Min Value of Action -> {}".format(lower_bound))<jupyter_output><empty_output><jupyter_text>To implement better exploration by the Actor network, we use noisy perturbations,specificallyan **Ornstein-Uhlenbeck process** for generating noise, as described in the paper.It samples noise from a correlated normal distribution.<jupyter_code>class OUActionNoise:
def __init__(self, mean, std_deviation, theta=0.15, dt=1e-2, x_initial=None):
self.theta = theta
self.mean = mean
self.std_dev = std_deviation
self.dt = dt
self.x_initial = x_initial
self.reset()
def __call__(self):
# Formula taken from https://www.wikipedia.org/wiki/Ornstein-Uhlenbeck_process.
x = (
self.x_prev
+ self.theta * (self.mean - self.x_prev) * self.dt
+ self.std_dev * np.sqrt(self.dt) * np.random.normal(size=self.mean.shape)
)
# Store x into x_prev
# Makes next noise dependent on current one
self.x_prev = x
return x
def reset(self):
if self.x_initial is not None:
self.x_prev = self.x_initial
else:
self.x_prev = np.zeros_like(self.mean)<jupyter_output><empty_output><jupyter_text>The `Buffer` class implements Experience Replay.------**Critic loss** - Mean Squared Error of `y - Q(s, a)`where `y` is the expected return as seen by the Target network,and `Q(s, a)` is action value predicted by the Critic network. `y` is a moving targetthat the critic model tries to achieve; we make this targetstable by updating the Target model slowly.**Actor loss** - This is computed using the mean of the value given by the Critic networkfor the actions taken by the Actor network. We seek to maximize this quantity.Hence we update the Actor network so that it produces actions that getthe maximum predicted value as seen by the Critic, for a given state.<jupyter_code>class Buffer:
def __init__(self, buffer_capacity=100000, batch_size=64):
# Number of "experiences" to store at max
self.buffer_capacity = buffer_capacity
# Num of tuples to train on.
self.batch_size = batch_size
# Its tells us num of times record() was called.
self.buffer_counter = 0
# Instead of list of tuples as the exp.replay concept go
# We use different np.arrays for each tuple element
self.state_buffer = np.zeros((self.buffer_capacity, num_states))
self.action_buffer = np.zeros((self.buffer_capacity, num_actions))
self.reward_buffer = np.zeros((self.buffer_capacity, 1))
self.next_state_buffer = np.zeros((self.buffer_capacity, num_states))
# Takes (s,a,r,s') obervation tuple as input
def record(self, obs_tuple):
# Set index to zero if buffer_capacity is exceeded,
# replacing old records
index = self.buffer_counter % self.buffer_capacity
self.state_buffer[index] = obs_tuple[0]
self.action_buffer[index] = obs_tuple[1]
self.reward_buffer[index] = obs_tuple[2]
self.next_state_buffer[index] = obs_tuple[3]
self.buffer_counter += 1
# Eager execution is turned on by default in TensorFlow 2. Decorating with tf.function allows
# TensorFlow to build a static graph out of the logic and computations in our function.
# This provides a large speed up for blocks of code that contain many small TensorFlow operations such as this one.
@tf.function
def update(
self, state_batch, action_batch, reward_batch, next_state_batch,
):
# Training and updating Actor & Critic networks.
# See Pseudo Code.
with tf.GradientTape() as tape:
target_actions = target_actor(next_state_batch, training=True)
y = reward_batch + gamma * target_critic(
[next_state_batch, target_actions], training=True
)
critic_value = critic_model([state_batch, action_batch], training=True)
critic_loss = tf.math.reduce_mean(tf.math.square(y - critic_value))
critic_grad = tape.gradient(critic_loss, critic_model.trainable_variables)
critic_optimizer.apply_gradients(
zip(critic_grad, critic_model.trainable_variables)
)
with tf.GradientTape() as tape:
actions = actor_model(state_batch, training=True)
critic_value = critic_model([state_batch, actions], training=True)
# Used `-value` as we want to maximize the value given
# by the critic for our actions
actor_loss = -tf.math.reduce_mean(critic_value)
actor_grad = tape.gradient(actor_loss, actor_model.trainable_variables)
actor_optimizer.apply_gradients(
zip(actor_grad, actor_model.trainable_variables)
)
# We compute the loss and update parameters
def learn(self):
# Get sampling range
record_range = min(self.buffer_counter, self.buffer_capacity)
# Randomly sample indices
batch_indices = np.random.choice(record_range, self.batch_size)
# Convert to tensors
state_batch = tf.convert_to_tensor(self.state_buffer[batch_indices])
action_batch = tf.convert_to_tensor(self.action_buffer[batch_indices])
reward_batch = tf.convert_to_tensor(self.reward_buffer[batch_indices])
reward_batch = tf.cast(reward_batch, dtype=tf.float32)
next_state_batch = tf.convert_to_tensor(self.next_state_buffer[batch_indices])
self.update(state_batch, action_batch, reward_batch, next_state_batch)
# This update target parameters slowly
# Based on rate `tau`, which is much less than one.
@tf.function
def update_target(target_weights, weights, tau):
for (a, b) in zip(target_weights, weights):
a.assign(b * tau + a * (1 - tau))<jupyter_output><empty_output><jupyter_text>Here we define the Actor and Critic networks. These are basic Dense modelswith `ReLU` activation.Note: We need the initialization for last layer of the Actor to be between`-0.003` and `0.003` as this prevents us from getting `1` or `-1` output values inthe initial stages, which would squash our gradients to zero,as we use the `tanh` activation.<jupyter_code>def get_actor():
# Initialize weights between -3e-3 and 3-e3
last_init = tf.random_uniform_initializer(minval=-0.003, maxval=0.003)
inputs = layers.Input(shape=(num_states,))
out = layers.Dense(256, activation="relu")(inputs)
out = layers.Dense(256, activation="relu")(out)
outputs = layers.Dense(1, activation="tanh", kernel_initializer=last_init)(out)
# Our upper bound is 2.0 for Pendulum.
outputs = outputs * upper_bound
model = tf.keras.Model(inputs, outputs)
return model
def get_critic():
# State as input
state_input = layers.Input(shape=(num_states))
state_out = layers.Dense(16, activation="relu")(state_input)
state_out = layers.Dense(32, activation="relu")(state_out)
# Action as input
action_input = layers.Input(shape=(num_actions))
action_out = layers.Dense(32, activation="relu")(action_input)
# Both are passed through seperate layer before concatenating
concat = layers.Concatenate()([state_out, action_out])
out = layers.Dense(256, activation="relu")(concat)
out = layers.Dense(256, activation="relu")(out)
outputs = layers.Dense(1)(out)
# Outputs single value for give state-action
model = tf.keras.Model([state_input, action_input], outputs)
return model<jupyter_output><empty_output><jupyter_text>`policy()` returns an action sampled from our Actor network plus some noise forexploration.<jupyter_code>def policy(state, noise_object):
sampled_actions = tf.squeeze(actor_model(state))
noise = noise_object()
# Adding noise to action
sampled_actions = sampled_actions.numpy() + noise
# We make sure action is within bounds
legal_action = np.clip(sampled_actions, lower_bound, upper_bound)
return [np.squeeze(legal_action)]<jupyter_output><empty_output><jupyter_text>Training hyperparameters<jupyter_code>std_dev = 0.2
ou_noise = OUActionNoise(mean=np.zeros(1), std_deviation=float(std_dev) * np.ones(1))
actor_model = get_actor()
critic_model = get_critic()
target_actor = get_actor()
target_critic = get_critic()
# Making the weights equal initially
target_actor.set_weights(actor_model.get_weights())
target_critic.set_weights(critic_model.get_weights())
# Learning rate for actor-critic models
critic_lr = 0.002
actor_lr = 0.001
critic_optimizer = tf.keras.optimizers.Adam(critic_lr)
actor_optimizer = tf.keras.optimizers.Adam(actor_lr)
total_episodes = 100
# Discount factor for future rewards
gamma = 0.99
# Used to update target networks
tau = 0.005
buffer = Buffer(50000, 64)<jupyter_output><empty_output><jupyter_text>Now we implement our main training loop, and iterate over episodes.We sample actions using `policy()` and train with `learn()` at each time step,along with updating the Target networks at a rate `tau`.<jupyter_code># To store reward history of each episode
ep_reward_list = []
# To store average reward history of last few episodes
avg_reward_list = []
# Takes about 4 min to train
for ep in range(total_episodes):
prev_state = env.reset()
episodic_reward = 0
while True:
# Uncomment this to see the Actor in action
# But not in a python notebook.
# env.render()
tf_prev_state = tf.expand_dims(tf.convert_to_tensor(prev_state), 0)
action = policy(tf_prev_state, ou_noise)
# Recieve state and reward from environment.
state, reward, done, info = env.step(action)
buffer.record((prev_state, action, reward, state))
episodic_reward += reward
buffer.learn()
update_target(target_actor.variables, actor_model.variables, tau)
update_target(target_critic.variables, critic_model.variables, tau)
# End this episode when `done` is True
if done:
break
prev_state = state
ep_reward_list.append(episodic_reward)
# Mean of last 40 episodes
avg_reward = np.mean(ep_reward_list[-40:])
print("Episode * {} * Avg Reward is ==> {}".format(ep, avg_reward))
avg_reward_list.append(avg_reward)
# Plotting graph
# Episodes versus Avg. Rewards
plt.plot(avg_reward_list)
plt.xlabel("Episode")
plt.ylabel("Avg. Epsiodic Reward")
plt.show()<jupyter_output><empty_output><jupyter_text>If training proceeds correctly, the average episodic reward will increase with time.Feel free to try different learning rates, `tau` values, and architectures for theActor and Critic networks.The Inverted Pendulum problem has low complexity, but DDPG work great on many otherproblems.Another great environment to try this on is `LunarLandingContinuous-v2`, but it will takemore episodes to obtain good results.<jupyter_code># Save the weights
actor_model.save_weights("pendulum_actor.h5")
critic_model.save_weights("pendulum_critic.h5")
target_actor.save_weights("pendulum_target_actor.h5")
target_critic.save_weights("pendulum_target_critic.h5")<jupyter_output><empty_output>
|
keras-io/examples/rl/ipynb/ddpg_pendulum.ipynb/0
|
{
"file_path": "keras-io/examples/rl/ipynb/ddpg_pendulum.ipynb",
"repo_id": "keras-io",
"token_count": 4954
}
| 115 |
<jupyter_start><jupyter_text>A Transformer-based recommendation system**Author:** [Khalid Salama](https://www.linkedin.com/in/khalid-salama-24403144/)**Date created:** 2020/12/30**Last modified:** 2020/12/30**Description:** Rating rate prediction using the Behavior Sequence Transformer (BST) model on the Movielens. IntroductionThis example demonstrates the [Behavior Sequence Transformer (BST)](https://arxiv.org/abs/1905.06874)model, by Qiwei Chen et al., using the [Movielens dataset](https://grouplens.org/datasets/movielens/).The BST model leverages the sequential behaviour of the users in watching and rating movies,as well as user profile and movie features, to predict the rating of the user to a target movie.More precisely, the BST model aims to predict the rating of a target movie by acceptingthe following inputs:1. A fixed-length *sequence* of `movie_ids` watched by a user.2. A fixed-length *sequence* of the `ratings` for the movies watched by a user.3. A *set* of user features, including `user_id`, `sex`, `occupation`, and `age_group`.4. A *set* of `genres` for each movie in the input sequence and the target movie.5. A `target_movie_id` for which to predict the rating.This example modifies the original BST model in the following ways:1. We incorporate the movie features (genres) into the processing of the embedding of eachmovie of the input sequence and the target movie, rather than treating them as "other features"outside the transformer layer.2. We utilize the ratings of movies in the input sequence, along with the their positionsin the sequence, to update them before feeding them into the self-attention layer.Note that this example should be run with TensorFlow 2.4 or higher. The datasetWe use the [1M version of the Movielens dataset](https://grouplens.org/datasets/movielens/1m/).The dataset includes around 1 million ratings from 6000 users on 4000 movies,along with some user features, movie genres. In addition, the timestamp of each user-movierating is provided, which allows creating sequences of movie ratings for each user,as expected by the BST model. Setup<jupyter_code>import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import math
from zipfile import ZipFile
from urllib.request import urlretrieve
import keras
import numpy as np
import pandas as pd
import tensorflow as tf
from keras import layers
from keras.layers import StringLookup<jupyter_output><empty_output><jupyter_text>Prepare the data Download and prepare the DataFramesFirst, let's download the movielens data.The downloaded folder will contain three data files: `users.dat`, `movies.dat`,and `ratings.dat`.<jupyter_code>urlretrieve("http://files.grouplens.org/datasets/movielens/ml-1m.zip", "movielens.zip")
ZipFile("movielens.zip", "r").extractall()<jupyter_output><empty_output><jupyter_text>Then, we load the data into pandas DataFrames with their proper column names.<jupyter_code>users = pd.read_csv(
"ml-1m/users.dat",
sep="::",
names=["user_id", "sex", "age_group", "occupation", "zip_code"],
encoding="ISO-8859-1",
engine="python",
)
ratings = pd.read_csv(
"ml-1m/ratings.dat",
sep="::",
names=["user_id", "movie_id", "rating", "unix_timestamp"],
encoding="ISO-8859-1",
engine="python",
)
movies = pd.read_csv(
"ml-1m/movies.dat",
sep="::",
names=["movie_id", "title", "genres"],
encoding="ISO-8859-1",
engine="python",
)<jupyter_output><empty_output><jupyter_text>Here, we do some simple data processing to fix the data types of the columns.<jupyter_code>users["user_id"] = users["user_id"].apply(lambda x: f"user_{x}")
users["age_group"] = users["age_group"].apply(lambda x: f"group_{x}")
users["occupation"] = users["occupation"].apply(lambda x: f"occupation_{x}")
movies["movie_id"] = movies["movie_id"].apply(lambda x: f"movie_{x}")
ratings["movie_id"] = ratings["movie_id"].apply(lambda x: f"movie_{x}")
ratings["user_id"] = ratings["user_id"].apply(lambda x: f"user_{x}")
ratings["rating"] = ratings["rating"].apply(lambda x: float(x))<jupyter_output><empty_output><jupyter_text>Each movie has multiple genres. We split them into separate columns in the `movies`DataFrame.<jupyter_code>genres = ["Action", "Adventure", "Animation", "Children's", "Comedy", "Crime"]
genres += ["Documentary", "Drama", "Fantasy", "Film-Noir", "Horror", "Musical"]
genres += ["Mystery", "Romance", "Sci-Fi", "Thriller", "War", "Western"]
for genre in genres:
movies[genre] = movies["genres"].apply(
lambda values: int(genre in values.split("|"))
)<jupyter_output><empty_output><jupyter_text>Transform the movie ratings data into sequencesFirst, let's sort the the ratings data using the `unix_timestamp`, and then group the`movie_id` values and the `rating` values by `user_id`.The output DataFrame will have a record for each `user_id`, with two ordered lists(sorted by rating datetime): the movies they have rated, and their ratings of these movies.<jupyter_code>ratings_group = ratings.sort_values(by=["unix_timestamp"]).groupby("user_id")
ratings_data = pd.DataFrame(
data={
"user_id": list(ratings_group.groups.keys()),
"movie_ids": list(ratings_group.movie_id.apply(list)),
"ratings": list(ratings_group.rating.apply(list)),
"timestamps": list(ratings_group.unix_timestamp.apply(list)),
}
)<jupyter_output><empty_output><jupyter_text>Now, let's split the `movie_ids` list into a set of sequences of a fixed length.We do the same for the `ratings`. Set the `sequence_length` variable to change the lengthof the input sequence to the model. You can also change the `step_size` to control thenumber of sequences to generate for each user.<jupyter_code>sequence_length = 4
step_size = 2
def create_sequences(values, window_size, step_size):
sequences = []
start_index = 0
while True:
end_index = start_index + window_size
seq = values[start_index:end_index]
if len(seq) < window_size:
seq = values[-window_size:]
if len(seq) == window_size:
sequences.append(seq)
break
sequences.append(seq)
start_index += step_size
return sequences
ratings_data.movie_ids = ratings_data.movie_ids.apply(
lambda ids: create_sequences(ids, sequence_length, step_size)
)
ratings_data.ratings = ratings_data.ratings.apply(
lambda ids: create_sequences(ids, sequence_length, step_size)
)
del ratings_data["timestamps"]<jupyter_output><empty_output><jupyter_text>After that, we process the output to have each sequence in a separate records inthe DataFrame. In addition, we join the user features with the ratings data.<jupyter_code>ratings_data_movies = ratings_data[["user_id", "movie_ids"]].explode(
"movie_ids", ignore_index=True
)
ratings_data_rating = ratings_data[["ratings"]].explode("ratings", ignore_index=True)
ratings_data_transformed = pd.concat([ratings_data_movies, ratings_data_rating], axis=1)
ratings_data_transformed = ratings_data_transformed.join(
users.set_index("user_id"), on="user_id"
)
ratings_data_transformed.movie_ids = ratings_data_transformed.movie_ids.apply(
lambda x: ",".join(x)
)
ratings_data_transformed.ratings = ratings_data_transformed.ratings.apply(
lambda x: ",".join([str(v) for v in x])
)
del ratings_data_transformed["zip_code"]
ratings_data_transformed.rename(
columns={"movie_ids": "sequence_movie_ids", "ratings": "sequence_ratings"},
inplace=True,
)<jupyter_output><empty_output><jupyter_text>With `sequence_length` of 4 and `step_size` of 2, we end up with 498,623 sequences.Finally, we split the data into training and testing splits, with 85% and 15% ofthe instances, respectively, and store them to CSV files.<jupyter_code>random_selection = np.random.rand(len(ratings_data_transformed.index)) <= 0.85
train_data = ratings_data_transformed[random_selection]
test_data = ratings_data_transformed[~random_selection]
train_data.to_csv("train_data.csv", index=False, sep="|", header=False)
test_data.to_csv("test_data.csv", index=False, sep="|", header=False)<jupyter_output><empty_output><jupyter_text>Define metadata<jupyter_code>CSV_HEADER = list(ratings_data_transformed.columns)
CATEGORICAL_FEATURES_WITH_VOCABULARY = {
"user_id": list(users.user_id.unique()),
"movie_id": list(movies.movie_id.unique()),
"sex": list(users.sex.unique()),
"age_group": list(users.age_group.unique()),
"occupation": list(users.occupation.unique()),
}
USER_FEATURES = ["sex", "age_group", "occupation"]
MOVIE_FEATURES = ["genres"]<jupyter_output><empty_output><jupyter_text>Create `tf.data.Dataset` for training and evaluation<jupyter_code>def get_dataset_from_csv(csv_file_path, shuffle=False, batch_size=128):
def process(features):
movie_ids_string = features["sequence_movie_ids"]
sequence_movie_ids = tf.strings.split(movie_ids_string, ",").to_tensor()
# The last movie id in the sequence is the target movie.
features["target_movie_id"] = sequence_movie_ids[:, -1]
features["sequence_movie_ids"] = sequence_movie_ids[:, :-1]
ratings_string = features["sequence_ratings"]
sequence_ratings = tf.strings.to_number(
tf.strings.split(ratings_string, ","), tf.dtypes.float32
).to_tensor()
# The last rating in the sequence is the target for the model to predict.
target = sequence_ratings[:, -1]
features["sequence_ratings"] = sequence_ratings[:, :-1]
return features, target
dataset = tf.data.experimental.make_csv_dataset(
csv_file_path,
batch_size=batch_size,
column_names=CSV_HEADER,
num_epochs=1,
header=False,
field_delim="|",
shuffle=shuffle,
).map(process)
return dataset<jupyter_output><empty_output><jupyter_text>Create model inputs<jupyter_code>def create_model_inputs():
return {
"user_id": keras.Input(name="user_id", shape=(1,), dtype="string"),
"sequence_movie_ids": keras.Input(
name="sequence_movie_ids", shape=(sequence_length - 1,), dtype="string"
),
"target_movie_id": keras.Input(
name="target_movie_id", shape=(1,), dtype="string"
),
"sequence_ratings": keras.Input(
name="sequence_ratings", shape=(sequence_length - 1,), dtype=tf.float32
),
"sex": keras.Input(name="sex", shape=(1,), dtype="string"),
"age_group": keras.Input(name="age_group", shape=(1,), dtype="string"),
"occupation": keras.Input(name="occupation", shape=(1,), dtype="string"),
}<jupyter_output><empty_output><jupyter_text>Encode input featuresThe `encode_input_features` method works as follows:1. Each categorical user feature is encoded using `layers.Embedding`, with embeddingdimension equals to the square root of the vocabulary size of the feature.The embeddings of these features are concatenated to form a single input tensor.2. Each movie in the movie sequence and the target movie is encoded `layers.Embedding`,where the dimension size is the square root of the number of movies.3. A multi-hot genres vector for each movie is concatenated with its embedding vector,and processed using a non-linear `layers.Dense` to output a vector of the same movieembedding dimensions.4. A positional embedding is added to each movie embedding in the sequence, and thenmultiplied by its rating from the ratings sequence.5. The target movie embedding is concatenated to the sequence movie embeddings, producinga tensor with the shape of `[batch size, sequence length, embedding size]`, as expectedby the attention layer for the transformer architecture.6. The method returns a tuple of two elements: `encoded_transformer_features` and`encoded_other_features`.<jupyter_code>def encode_input_features(
inputs,
include_user_id=True,
include_user_features=True,
include_movie_features=True,
):
encoded_transformer_features = []
encoded_other_features = []
other_feature_names = []
if include_user_id:
other_feature_names.append("user_id")
if include_user_features:
other_feature_names.extend(USER_FEATURES)
## Encode user features
for feature_name in other_feature_names:
# Convert the string input values into integer indices.
vocabulary = CATEGORICAL_FEATURES_WITH_VOCABULARY[feature_name]
idx = StringLookup(vocabulary=vocabulary, mask_token=None, num_oov_indices=0)(
inputs[feature_name]
)
# Compute embedding dimensions
embedding_dims = int(math.sqrt(len(vocabulary)))
# Create an embedding layer with the specified dimensions.
embedding_encoder = layers.Embedding(
input_dim=len(vocabulary),
output_dim=embedding_dims,
name=f"{feature_name}_embedding",
)
# Convert the index values to embedding representations.
encoded_other_features.append(embedding_encoder(idx))
## Create a single embedding vector for the user features
if len(encoded_other_features) > 1:
encoded_other_features = layers.concatenate(encoded_other_features)
elif len(encoded_other_features) == 1:
encoded_other_features = encoded_other_features[0]
else:
encoded_other_features = None
## Create a movie embedding encoder
movie_vocabulary = CATEGORICAL_FEATURES_WITH_VOCABULARY["movie_id"]
movie_embedding_dims = int(math.sqrt(len(movie_vocabulary)))
# Create a lookup to convert string values to integer indices.
movie_index_lookup = StringLookup(
vocabulary=movie_vocabulary,
mask_token=None,
num_oov_indices=0,
name="movie_index_lookup",
)
# Create an embedding layer with the specified dimensions.
movie_embedding_encoder = layers.Embedding(
input_dim=len(movie_vocabulary),
output_dim=movie_embedding_dims,
name=f"movie_embedding",
)
# Create a vector lookup for movie genres.
genre_vectors = movies[genres].to_numpy()
movie_genres_lookup = layers.Embedding(
input_dim=genre_vectors.shape[0],
output_dim=genre_vectors.shape[1],
embeddings_initializer=keras.initializers.Constant(genre_vectors),
trainable=False,
name="genres_vector",
)
# Create a processing layer for genres.
movie_embedding_processor = layers.Dense(
units=movie_embedding_dims,
activation="relu",
name="process_movie_embedding_with_genres",
)
## Define a function to encode a given movie id.
def encode_movie(movie_id):
# Convert the string input values into integer indices.
movie_idx = movie_index_lookup(movie_id)
movie_embedding = movie_embedding_encoder(movie_idx)
encoded_movie = movie_embedding
if include_movie_features:
movie_genres_vector = movie_genres_lookup(movie_idx)
encoded_movie = movie_embedding_processor(
layers.concatenate([movie_embedding, movie_genres_vector])
)
return encoded_movie
## Encoding target_movie_id
target_movie_id = inputs["target_movie_id"]
encoded_target_movie = encode_movie(target_movie_id)
## Encoding sequence movie_ids.
sequence_movies_ids = inputs["sequence_movie_ids"]
encoded_sequence_movies = encode_movie(sequence_movies_ids)
# Create positional embedding.
position_embedding_encoder = layers.Embedding(
input_dim=sequence_length,
output_dim=movie_embedding_dims,
name="position_embedding",
)
positions = tf.range(start=0, limit=sequence_length - 1, delta=1)
encodded_positions = position_embedding_encoder(positions)
# Retrieve sequence ratings to incorporate them into the encoding of the movie.
sequence_ratings = inputs["sequence_ratings"]
sequence_ratings = keras.ops.expand_dims(sequence_ratings, -1)
# Add the positional encoding to the movie encodings and multiply them by rating.
encoded_sequence_movies_with_poistion_and_rating = layers.Multiply()(
[(encoded_sequence_movies + encodded_positions), sequence_ratings]
)
# Construct the transformer inputs.
for i in range(sequence_length - 1):
feature = encoded_sequence_movies_with_poistion_and_rating[:, i, ...]
feature = keras.ops.expand_dims(feature, 1)
encoded_transformer_features.append(feature)
encoded_transformer_features.append(encoded_target_movie)
encoded_transformer_features = layers.concatenate(
encoded_transformer_features, axis=1
)
return encoded_transformer_features, encoded_other_features<jupyter_output><empty_output><jupyter_text>Create a BST model<jupyter_code>include_user_id = False
include_user_features = False
include_movie_features = False
hidden_units = [256, 128]
dropout_rate = 0.1
num_heads = 3
def create_model():
inputs = create_model_inputs()
transformer_features, other_features = encode_input_features(
inputs, include_user_id, include_user_features, include_movie_features
)
# Create a multi-headed attention layer.
attention_output = layers.MultiHeadAttention(
num_heads=num_heads, key_dim=transformer_features.shape[2], dropout=dropout_rate
)(transformer_features, transformer_features)
# Transformer block.
attention_output = layers.Dropout(dropout_rate)(attention_output)
x1 = layers.Add()([transformer_features, attention_output])
x1 = layers.LayerNormalization()(x1)
x2 = layers.LeakyReLU()(x1)
x2 = layers.Dense(units=x2.shape[-1])(x2)
x2 = layers.Dropout(dropout_rate)(x2)
transformer_features = layers.Add()([x1, x2])
transformer_features = layers.LayerNormalization()(transformer_features)
features = layers.Flatten()(transformer_features)
# Included the other features.
if other_features is not None:
features = layers.concatenate(
[features, layers.Reshape([other_features.shape[-1]])(other_features)]
)
# Fully-connected layers.
for num_units in hidden_units:
features = layers.Dense(num_units)(features)
features = layers.BatchNormalization()(features)
features = layers.LeakyReLU()(features)
features = layers.Dropout(dropout_rate)(features)
outputs = layers.Dense(units=1)(features)
model = keras.Model(inputs=inputs, outputs=outputs)
return model
model = create_model()<jupyter_output><empty_output><jupyter_text>Run training and evaluation experiment<jupyter_code># Compile the model.
model.compile(
optimizer=keras.optimizers.Adagrad(learning_rate=0.01),
loss=keras.losses.MeanSquaredError(),
metrics=[keras.metrics.MeanAbsoluteError()],
)
# Read the training data.
train_dataset = get_dataset_from_csv("train_data.csv", shuffle=True, batch_size=265)
# Fit the model with the training data.
model.fit(train_dataset, epochs=5)
# Read the test data.
test_dataset = get_dataset_from_csv("test_data.csv", batch_size=265)
# Evaluate the model on the test data.
_, rmse = model.evaluate(test_dataset, verbose=0)
print(f"Test MAE: {round(rmse, 3)}")<jupyter_output><empty_output>
|
keras-io/examples/structured_data/ipynb/movielens_recommendations_transformers.ipynb/0
|
{
"file_path": "keras-io/examples/structured_data/ipynb/movielens_recommendations_transformers.ipynb",
"repo_id": "keras-io",
"token_count": 6945
}
| 116 |
"""
Title: A Transformer-based recommendation system
Author: [Khalid Salama](https://www.linkedin.com/in/khalid-salama-24403144/)
Date created: 2020/12/30
Last modified: 2020/12/30
Description: Rating rate prediction using the Behavior Sequence Transformer (BST) model on the Movielens.
Accelerator: GPU
"""
"""
## Introduction
This example demonstrates the [Behavior Sequence Transformer (BST)](https://arxiv.org/abs/1905.06874)
model, by Qiwei Chen et al., using the [Movielens dataset](https://grouplens.org/datasets/movielens/).
The BST model leverages the sequential behaviour of the users in watching and rating movies,
as well as user profile and movie features, to predict the rating of the user to a target movie.
More precisely, the BST model aims to predict the rating of a target movie by accepting
the following inputs:
1. A fixed-length *sequence* of `movie_ids` watched by a user.
2. A fixed-length *sequence* of the `ratings` for the movies watched by a user.
3. A *set* of user features, including `user_id`, `sex`, `occupation`, and `age_group`.
4. A *set* of `genres` for each movie in the input sequence and the target movie.
5. A `target_movie_id` for which to predict the rating.
This example modifies the original BST model in the following ways:
1. We incorporate the movie features (genres) into the processing of the embedding of each
movie of the input sequence and the target movie, rather than treating them as "other features"
outside the transformer layer.
2. We utilize the ratings of movies in the input sequence, along with the their positions
in the sequence, to update them before feeding them into the self-attention layer.
Note that this example should be run with TensorFlow 2.4 or higher.
"""
"""
## The dataset
We use the [1M version of the Movielens dataset](https://grouplens.org/datasets/movielens/1m/).
The dataset includes around 1 million ratings from 6000 users on 4000 movies,
along with some user features, movie genres. In addition, the timestamp of each user-movie
rating is provided, which allows creating sequences of movie ratings for each user,
as expected by the BST model.
"""
"""
## Setup
"""
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import math
from zipfile import ZipFile
from urllib.request import urlretrieve
import keras
import numpy as np
import pandas as pd
import tensorflow as tf
from keras import layers
from keras.layers import StringLookup
"""
## Prepare the data
### Download and prepare the DataFrames
First, let's download the movielens data.
The downloaded folder will contain three data files: `users.dat`, `movies.dat`,
and `ratings.dat`.
"""
urlretrieve("http://files.grouplens.org/datasets/movielens/ml-1m.zip", "movielens.zip")
ZipFile("movielens.zip", "r").extractall()
"""
Then, we load the data into pandas DataFrames with their proper column names.
"""
users = pd.read_csv(
"ml-1m/users.dat",
sep="::",
names=["user_id", "sex", "age_group", "occupation", "zip_code"],
encoding="ISO-8859-1",
engine="python",
)
ratings = pd.read_csv(
"ml-1m/ratings.dat",
sep="::",
names=["user_id", "movie_id", "rating", "unix_timestamp"],
encoding="ISO-8859-1",
engine="python",
)
movies = pd.read_csv(
"ml-1m/movies.dat",
sep="::",
names=["movie_id", "title", "genres"],
encoding="ISO-8859-1",
engine="python",
)
"""
Here, we do some simple data processing to fix the data types of the columns.
"""
users["user_id"] = users["user_id"].apply(lambda x: f"user_{x}")
users["age_group"] = users["age_group"].apply(lambda x: f"group_{x}")
users["occupation"] = users["occupation"].apply(lambda x: f"occupation_{x}")
movies["movie_id"] = movies["movie_id"].apply(lambda x: f"movie_{x}")
ratings["movie_id"] = ratings["movie_id"].apply(lambda x: f"movie_{x}")
ratings["user_id"] = ratings["user_id"].apply(lambda x: f"user_{x}")
ratings["rating"] = ratings["rating"].apply(lambda x: float(x))
"""
Each movie has multiple genres. We split them into separate columns in the `movies`
DataFrame.
"""
genres = ["Action", "Adventure", "Animation", "Children's", "Comedy", "Crime"]
genres += ["Documentary", "Drama", "Fantasy", "Film-Noir", "Horror", "Musical"]
genres += ["Mystery", "Romance", "Sci-Fi", "Thriller", "War", "Western"]
for genre in genres:
movies[genre] = movies["genres"].apply(
lambda values: int(genre in values.split("|"))
)
"""
### Transform the movie ratings data into sequences
First, let's sort the the ratings data using the `unix_timestamp`, and then group the
`movie_id` values and the `rating` values by `user_id`.
The output DataFrame will have a record for each `user_id`, with two ordered lists
(sorted by rating datetime): the movies they have rated, and their ratings of these movies.
"""
ratings_group = ratings.sort_values(by=["unix_timestamp"]).groupby("user_id")
ratings_data = pd.DataFrame(
data={
"user_id": list(ratings_group.groups.keys()),
"movie_ids": list(ratings_group.movie_id.apply(list)),
"ratings": list(ratings_group.rating.apply(list)),
"timestamps": list(ratings_group.unix_timestamp.apply(list)),
}
)
"""
Now, let's split the `movie_ids` list into a set of sequences of a fixed length.
We do the same for the `ratings`. Set the `sequence_length` variable to change the length
of the input sequence to the model. You can also change the `step_size` to control the
number of sequences to generate for each user.
"""
sequence_length = 4
step_size = 2
def create_sequences(values, window_size, step_size):
sequences = []
start_index = 0
while True:
end_index = start_index + window_size
seq = values[start_index:end_index]
if len(seq) < window_size:
seq = values[-window_size:]
if len(seq) == window_size:
sequences.append(seq)
break
sequences.append(seq)
start_index += step_size
return sequences
ratings_data.movie_ids = ratings_data.movie_ids.apply(
lambda ids: create_sequences(ids, sequence_length, step_size)
)
ratings_data.ratings = ratings_data.ratings.apply(
lambda ids: create_sequences(ids, sequence_length, step_size)
)
del ratings_data["timestamps"]
"""
After that, we process the output to have each sequence in a separate records in
the DataFrame. In addition, we join the user features with the ratings data.
"""
ratings_data_movies = ratings_data[["user_id", "movie_ids"]].explode(
"movie_ids", ignore_index=True
)
ratings_data_rating = ratings_data[["ratings"]].explode("ratings", ignore_index=True)
ratings_data_transformed = pd.concat([ratings_data_movies, ratings_data_rating], axis=1)
ratings_data_transformed = ratings_data_transformed.join(
users.set_index("user_id"), on="user_id"
)
ratings_data_transformed.movie_ids = ratings_data_transformed.movie_ids.apply(
lambda x: ",".join(x)
)
ratings_data_transformed.ratings = ratings_data_transformed.ratings.apply(
lambda x: ",".join([str(v) for v in x])
)
del ratings_data_transformed["zip_code"]
ratings_data_transformed.rename(
columns={"movie_ids": "sequence_movie_ids", "ratings": "sequence_ratings"},
inplace=True,
)
"""
With `sequence_length` of 4 and `step_size` of 2, we end up with 498,623 sequences.
Finally, we split the data into training and testing splits, with 85% and 15% of
the instances, respectively, and store them to CSV files.
"""
random_selection = np.random.rand(len(ratings_data_transformed.index)) <= 0.85
train_data = ratings_data_transformed[random_selection]
test_data = ratings_data_transformed[~random_selection]
train_data.to_csv("train_data.csv", index=False, sep="|", header=False)
test_data.to_csv("test_data.csv", index=False, sep="|", header=False)
"""
## Define metadata
"""
CSV_HEADER = list(ratings_data_transformed.columns)
CATEGORICAL_FEATURES_WITH_VOCABULARY = {
"user_id": list(users.user_id.unique()),
"movie_id": list(movies.movie_id.unique()),
"sex": list(users.sex.unique()),
"age_group": list(users.age_group.unique()),
"occupation": list(users.occupation.unique()),
}
USER_FEATURES = ["sex", "age_group", "occupation"]
MOVIE_FEATURES = ["genres"]
"""
## Create `tf.data.Dataset` for training and evaluation
"""
def get_dataset_from_csv(csv_file_path, shuffle=False, batch_size=128):
def process(features):
movie_ids_string = features["sequence_movie_ids"]
sequence_movie_ids = tf.strings.split(movie_ids_string, ",").to_tensor()
# The last movie id in the sequence is the target movie.
features["target_movie_id"] = sequence_movie_ids[:, -1]
features["sequence_movie_ids"] = sequence_movie_ids[:, :-1]
ratings_string = features["sequence_ratings"]
sequence_ratings = tf.strings.to_number(
tf.strings.split(ratings_string, ","), tf.dtypes.float32
).to_tensor()
# The last rating in the sequence is the target for the model to predict.
target = sequence_ratings[:, -1]
features["sequence_ratings"] = sequence_ratings[:, :-1]
return features, target
dataset = tf.data.experimental.make_csv_dataset(
csv_file_path,
batch_size=batch_size,
column_names=CSV_HEADER,
num_epochs=1,
header=False,
field_delim="|",
shuffle=shuffle,
).map(process)
return dataset
"""
## Create model inputs
"""
def create_model_inputs():
return {
"user_id": keras.Input(name="user_id", shape=(1,), dtype="string"),
"sequence_movie_ids": keras.Input(
name="sequence_movie_ids", shape=(sequence_length - 1,), dtype="string"
),
"target_movie_id": keras.Input(
name="target_movie_id", shape=(1,), dtype="string"
),
"sequence_ratings": keras.Input(
name="sequence_ratings", shape=(sequence_length - 1,), dtype=tf.float32
),
"sex": keras.Input(name="sex", shape=(1,), dtype="string"),
"age_group": keras.Input(name="age_group", shape=(1,), dtype="string"),
"occupation": keras.Input(name="occupation", shape=(1,), dtype="string"),
}
"""
## Encode input features
The `encode_input_features` method works as follows:
1. Each categorical user feature is encoded using `layers.Embedding`, with embedding
dimension equals to the square root of the vocabulary size of the feature.
The embeddings of these features are concatenated to form a single input tensor.
2. Each movie in the movie sequence and the target movie is encoded `layers.Embedding`,
where the dimension size is the square root of the number of movies.
3. A multi-hot genres vector for each movie is concatenated with its embedding vector,
and processed using a non-linear `layers.Dense` to output a vector of the same movie
embedding dimensions.
4. A positional embedding is added to each movie embedding in the sequence, and then
multiplied by its rating from the ratings sequence.
5. The target movie embedding is concatenated to the sequence movie embeddings, producing
a tensor with the shape of `[batch size, sequence length, embedding size]`, as expected
by the attention layer for the transformer architecture.
6. The method returns a tuple of two elements: `encoded_transformer_features` and
`encoded_other_features`.
"""
def encode_input_features(
inputs,
include_user_id=True,
include_user_features=True,
include_movie_features=True,
):
encoded_transformer_features = []
encoded_other_features = []
other_feature_names = []
if include_user_id:
other_feature_names.append("user_id")
if include_user_features:
other_feature_names.extend(USER_FEATURES)
## Encode user features
for feature_name in other_feature_names:
# Convert the string input values into integer indices.
vocabulary = CATEGORICAL_FEATURES_WITH_VOCABULARY[feature_name]
idx = StringLookup(vocabulary=vocabulary, mask_token=None, num_oov_indices=0)(
inputs[feature_name]
)
# Compute embedding dimensions
embedding_dims = int(math.sqrt(len(vocabulary)))
# Create an embedding layer with the specified dimensions.
embedding_encoder = layers.Embedding(
input_dim=len(vocabulary),
output_dim=embedding_dims,
name=f"{feature_name}_embedding",
)
# Convert the index values to embedding representations.
encoded_other_features.append(embedding_encoder(idx))
## Create a single embedding vector for the user features
if len(encoded_other_features) > 1:
encoded_other_features = layers.concatenate(encoded_other_features)
elif len(encoded_other_features) == 1:
encoded_other_features = encoded_other_features[0]
else:
encoded_other_features = None
## Create a movie embedding encoder
movie_vocabulary = CATEGORICAL_FEATURES_WITH_VOCABULARY["movie_id"]
movie_embedding_dims = int(math.sqrt(len(movie_vocabulary)))
# Create a lookup to convert string values to integer indices.
movie_index_lookup = StringLookup(
vocabulary=movie_vocabulary,
mask_token=None,
num_oov_indices=0,
name="movie_index_lookup",
)
# Create an embedding layer with the specified dimensions.
movie_embedding_encoder = layers.Embedding(
input_dim=len(movie_vocabulary),
output_dim=movie_embedding_dims,
name=f"movie_embedding",
)
# Create a vector lookup for movie genres.
genre_vectors = movies[genres].to_numpy()
movie_genres_lookup = layers.Embedding(
input_dim=genre_vectors.shape[0],
output_dim=genre_vectors.shape[1],
embeddings_initializer=keras.initializers.Constant(genre_vectors),
trainable=False,
name="genres_vector",
)
# Create a processing layer for genres.
movie_embedding_processor = layers.Dense(
units=movie_embedding_dims,
activation="relu",
name="process_movie_embedding_with_genres",
)
## Define a function to encode a given movie id.
def encode_movie(movie_id):
# Convert the string input values into integer indices.
movie_idx = movie_index_lookup(movie_id)
movie_embedding = movie_embedding_encoder(movie_idx)
encoded_movie = movie_embedding
if include_movie_features:
movie_genres_vector = movie_genres_lookup(movie_idx)
encoded_movie = movie_embedding_processor(
layers.concatenate([movie_embedding, movie_genres_vector])
)
return encoded_movie
## Encoding target_movie_id
target_movie_id = inputs["target_movie_id"]
encoded_target_movie = encode_movie(target_movie_id)
## Encoding sequence movie_ids.
sequence_movies_ids = inputs["sequence_movie_ids"]
encoded_sequence_movies = encode_movie(sequence_movies_ids)
# Create positional embedding.
position_embedding_encoder = layers.Embedding(
input_dim=sequence_length,
output_dim=movie_embedding_dims,
name="position_embedding",
)
positions = tf.range(start=0, limit=sequence_length - 1, delta=1)
encodded_positions = position_embedding_encoder(positions)
# Retrieve sequence ratings to incorporate them into the encoding of the movie.
sequence_ratings = inputs["sequence_ratings"]
sequence_ratings = keras.ops.expand_dims(sequence_ratings, -1)
# Add the positional encoding to the movie encodings and multiply them by rating.
encoded_sequence_movies_with_poistion_and_rating = layers.Multiply()(
[(encoded_sequence_movies + encodded_positions), sequence_ratings]
)
# Construct the transformer inputs.
for i in range(sequence_length - 1):
feature = encoded_sequence_movies_with_poistion_and_rating[:, i, ...]
feature = keras.ops.expand_dims(feature, 1)
encoded_transformer_features.append(feature)
encoded_transformer_features.append(encoded_target_movie)
encoded_transformer_features = layers.concatenate(
encoded_transformer_features, axis=1
)
return encoded_transformer_features, encoded_other_features
"""
## Create a BST model
"""
include_user_id = False
include_user_features = False
include_movie_features = False
hidden_units = [256, 128]
dropout_rate = 0.1
num_heads = 3
def create_model():
inputs = create_model_inputs()
transformer_features, other_features = encode_input_features(
inputs, include_user_id, include_user_features, include_movie_features
)
# Create a multi-headed attention layer.
attention_output = layers.MultiHeadAttention(
num_heads=num_heads, key_dim=transformer_features.shape[2], dropout=dropout_rate
)(transformer_features, transformer_features)
# Transformer block.
attention_output = layers.Dropout(dropout_rate)(attention_output)
x1 = layers.Add()([transformer_features, attention_output])
x1 = layers.LayerNormalization()(x1)
x2 = layers.LeakyReLU()(x1)
x2 = layers.Dense(units=x2.shape[-1])(x2)
x2 = layers.Dropout(dropout_rate)(x2)
transformer_features = layers.Add()([x1, x2])
transformer_features = layers.LayerNormalization()(transformer_features)
features = layers.Flatten()(transformer_features)
# Included the other features.
if other_features is not None:
features = layers.concatenate(
[features, layers.Reshape([other_features.shape[-1]])(other_features)]
)
# Fully-connected layers.
for num_units in hidden_units:
features = layers.Dense(num_units)(features)
features = layers.BatchNormalization()(features)
features = layers.LeakyReLU()(features)
features = layers.Dropout(dropout_rate)(features)
outputs = layers.Dense(units=1)(features)
model = keras.Model(inputs=inputs, outputs=outputs)
return model
model = create_model()
"""
## Run training and evaluation experiment
"""
# Compile the model.
model.compile(
optimizer=keras.optimizers.Adagrad(learning_rate=0.01),
loss=keras.losses.MeanSquaredError(),
metrics=[keras.metrics.MeanAbsoluteError()],
)
# Read the training data.
train_dataset = get_dataset_from_csv("train_data.csv", shuffle=True, batch_size=265)
# Fit the model with the training data.
model.fit(train_dataset, epochs=5)
# Read the test data.
test_dataset = get_dataset_from_csv("test_data.csv", batch_size=265)
# Evaluate the model on the test data.
_, rmse = model.evaluate(test_dataset, verbose=0)
print(f"Test MAE: {round(rmse, 3)}")
"""
You should achieve a Mean Absolute Error (MAE) at or around 0.7 on the test data.
"""
"""
## Conclusion
The BST model uses the Transformer layer in its architecture to capture the sequential signals underlying
users’ behavior sequences for recommendation.
You can try training this model with different configurations, for example, by increasing
the input sequence length and training the model for a larger number of epochs. In addition,
you can try including other features like movie release year and customer
zipcode, and including cross features like sex X genre.
"""
|
keras-io/examples/structured_data/movielens_recommendations_transformers.py/0
|
{
"file_path": "keras-io/examples/structured_data/movielens_recommendations_transformers.py",
"repo_id": "keras-io",
"token_count": 6887
}
| 117 |
<jupyter_start><jupyter_text>Event classification for payment card fraud detection**Author:** [achoum](https://github.com/achoum/)**Date created:** 2024/02/01**Last modified:** 2024/02/01**Description:** Detection of fraudulent payment card transactions using Temporian and a feed-forward neural network. This notebook depends on Keras 3, Temporian, and a few other libraries. You caninstall them as follow:```shellpip install temporian keras pandas tf-nightly scikit-learn -U```<jupyter_code>import keras # To train the Machine Learning model
import temporian as tp # To convert transactions into tabular data
import numpy as np
import os
import pandas as pd
import datetime
import math
import tensorflow as tf
from sklearn.metrics import RocCurveDisplay<jupyter_output><empty_output><jupyter_text>IntroductionPayment fraud detection is critical for banks, businesses, and consumers. InEurope alone, fraudulent transactions were estimated at[€1.89 billion in 2019](https://www.ecb.europa.eu/pub/pdf/cardfraud/ecb.cardfraudreport202110~cac4c418e8.en.pdf).Worldwide, approximately[3.6%](https://www.cybersource.com/content/dam/documents/campaign/fraud-report/global-fraud-report-2022.pdf)of commerce revenue is lost to fraud. In this notebook, we train and evaluate amodel to detect fraudulent transactions using the synthetic dataset attached tothe book[Reproducible Machine Learning for Credit Card Fraud Detection](https://fraud-detection-handbook.github.io/fraud-detection-handbook/Foreword.html)by Le Borgne et al.Fraudulent transactions often cannot be detected by looking at transactions inisolation. Instead, fraudulent transactions are detected by looking at patternsacross multiple transactions from the same user, to the same merchant, or withother types of relationships. To express these relationships in a way that isunderstandable by a machine learning model, and to augment features with feature engineering, we We use the [Temporian](https://temporian.readthedocs.io/en/latest) preprocessing library.We preprocess a transaction dataset into a tabular dataset and use afeed-forward neural network to learn the patterns of fraud and make predictions. Loading the datasetThe dataset contains payment transactions sampled between April 1, 2018 andSeptember 30, 2018. The transactions are stored in CSV files, one for each day.**Note:** Downloading the dataset takes ~1 minute.<jupyter_code>start_date = datetime.date(2018, 4, 1)
end_date = datetime.date(2018, 9, 30)
# Load the dataset as a Pandas dataframe.
cache_path = "fraud_detection_cache.csv"
if not os.path.exists(cache_path):
print("Download dataset")
dataframes = []
num_files = (end_date - start_date).days
counter = 0
while start_date <= end_date:
if counter % (num_files // 10) == 0:
print(f"[{100 * (counter+1) // num_files}%]", end="", flush=True)
print(".", end="", flush=True)
url = f"https://github.com/Fraud-Detection-Handbook/simulated-data-raw/raw/6e67dbd0a3bfe0d7ec33abc4bce5f37cd4ff0d6a/data/{start_date}.pkl"
dataframes.append(pd.read_pickle(url))
start_date += datetime.timedelta(days=1)
counter += 1
print("done", flush=True)
transactions_dataframe = pd.concat(dataframes)
transactions_dataframe.to_csv(cache_path, index=False)
else:
print("Load dataset from cache")
transactions_dataframe = pd.read_csv(
cache_path, dtype={"CUSTOMER_ID": bytes, "TERMINAL_ID": bytes}
)
print(f"Found {len(transactions_dataframe)} transactions")<jupyter_output><empty_output><jupyter_text>Each transaction is represented by a single row, with the following columns ofinterest:- **TX_DATETIME**: The date and time of the transaction.- **CUSTOMER_ID**: The unique identifier of the customer.- **TERMINAL_ID**: The identifier of the terminal where the transaction was made.- **TX_AMOUNT**: The amount of the transaction.- **TX_FRAUD**: Whether the transaction is fraudulent (1) or not (0).<jupyter_code>transactions_dataframe = transactions_dataframe[
["TX_DATETIME", "CUSTOMER_ID", "TERMINAL_ID", "TX_AMOUNT", "TX_FRAUD"]
]
transactions_dataframe.head(4)<jupyter_output><empty_output><jupyter_text>The dataset is highly imbalanced, with the majority of transactions beinglegitimate.<jupyter_code>fraudulent_rate = transactions_dataframe["TX_FRAUD"].mean()
print("Rate of fraudulent transactions:", fraudulent_rate)<jupyter_output><empty_output><jupyter_text>The[pandas dataframe](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.html)is converted into a[Temporian EventSet](https://temporian.readthedocs.io/en/latest/reference/temporian/EventSet/),which is better suited for the data exploration and feature preprocessing of the next steps.<jupyter_code>transactions_evset = tp.from_pandas(transactions_dataframe, timestamps="TX_DATETIME")
transactions_evset<jupyter_output><empty_output><jupyter_text>It is possible to plot the entire dataset, but the resulting plot will bedifficult to read. Instead, we can group the transactions per client.<jupyter_code>transactions_evset.add_index("CUSTOMER_ID").plot(indexes="3774")<jupyter_output><empty_output><jupyter_text>Note the few fraudulent transactions for this client. Preparing the training dataFraudulent transactions in isolation cannot be detected. Instead, we need toconnect related transactions. For each transaction, we compute the sum and countof transactions for the same terminal in the last `n` days. Because we don'tknow the correct value for `n`, we use multiple values for `n` and compute aset of features for each of them.<jupyter_code># Group the transactions per terminal
transactions_per_terminal = transactions_evset.add_index("TERMINAL_ID")
# Moving statistics per terminal
tmp_features = []
for n in [7, 14, 28]:
tmp_features.append(
transactions_per_terminal["TX_AMOUNT"]
.moving_sum(tp.duration.days(n))
.rename(f"sum_transactions_{n}_days")
)
tmp_features.append(
transactions_per_terminal.moving_count(tp.duration.days(n)).rename(
f"count_transactions_{n}_days"
)
)
feature_set_1 = tp.glue(*tmp_features)
feature_set_1<jupyter_output><empty_output><jupyter_text>Let's look at the features of terminal "3774".<jupyter_code>feature_set_1.plot(indexes="3774")<jupyter_output><empty_output><jupyter_text>A transaction's fraudulent status is not known at the time of the transaction(otherwise, there would be no problem). However, the banks knows if atransacation is fraudulent one week after it is made. We create a set offeatures that indicate the number and ratio of fraudulent transactions in thelast N days.<jupyter_code># Lag the transactions by one week.
lagged_transactions = transactions_per_terminal.lag(tp.duration.weeks(1))
# Moving statistics per customer
tmp_features = []
for n in [7, 14, 28]:
tmp_features.append(
lagged_transactions["TX_FRAUD"]
.moving_sum(tp.duration.days(n), sampling=transactions_per_terminal)
.rename(f"count_fraud_transactions_{n}_days")
)
tmp_features.append(
lagged_transactions["TX_FRAUD"]
.cast(tp.float32)
.simple_moving_average(tp.duration.days(n), sampling=transactions_per_terminal)
.rename(f"rate_fraud_transactions_{n}_days")
)
feature_set_2 = tp.glue(*tmp_features)<jupyter_output><empty_output><jupyter_text>Transaction date and time can be correlated with fraud. While each transactionhas a timestamp, a machine learning model might struggle to consume themdirectly. Instead, we extract various informative calendar features from thetimestamps, such as hour, day of the week (e.g., Monday, Tuesday), and day ofthe month (1-31).<jupyter_code>feature_set_3 = tp.glue(
transactions_per_terminal.calendar_hour(),
transactions_per_terminal.calendar_day_of_week(),
)<jupyter_output><empty_output><jupyter_text>Finally, we group together all the features and the label.<jupyter_code>all_data = tp.glue(
transactions_per_terminal, feature_set_1, feature_set_2, feature_set_3
).drop_index()
print("All the available features:")
all_data.schema.feature_names()<jupyter_output><empty_output><jupyter_text>We extract the name of the input features.<jupyter_code>input_feature_names = [k for k in all_data.schema.feature_names() if k.islower()]
print("The model's input features:")
input_feature_names<jupyter_output><empty_output><jupyter_text>For neural networks to work correctly, numerical inputs must be normalized. Acommon approach is to apply z-normalization, which involves subtracting the meanand dividing by the standard deviation estimated from the training data to eachvalue. In forecasting, such z-normalization is not recommended as it would leadto future leakage. Specifically, to classify a transaction at time t, we cannotrely on data after time t since, at serving time when making a prediction attime t, no subsequent data is available yet. In short, at time t, we are limitedto using data that precedes or is concurrent with time t.The solution is therefore to apply z-normalization **over time**, which meansthat we normalize each transaction using the mean and standard deviationcomputed from the past data **for that transaction**.Future leakage is pernicious. Luckily, Temporian is here to help: the onlyoperator that can cause future leakage is `EventSet.leak()`. If you are notusing `EventSet.leak()`, your preprocessing is **guaranteed** not to createfuture leakage.**Note:** For advanced pipelines, you can also check programatically that afeature does not depends on an `EventSet.leak()` operation.<jupyter_code># Cast all values (e.g. ints) to floats.
values = all_data[input_feature_names].cast(tp.float32)
# Apply z-normalization overtime.
normalized_features = (
values - values.simple_moving_average(math.inf)
) / values.moving_standard_deviation(math.inf)
# Restore the original name of the features.
normalized_features = normalized_features.rename(values.schema.feature_names())
print(normalized_features)<jupyter_output><empty_output><jupyter_text>The first transactions will be normalized using poor estimates of the mean andstandard deviation since there are only a few transactions before them. Tomitigate this issue, we remove the first week of data from the training dataset.Notice that the first values contain NaN. In Temporian, NaN represents missingvalues, and all operators handle them accordingly. For instance, whencalculating a moving average, NaN values are not included in the calculationand do not generate a NaN result.However, neural networks cannot natively handle NaN values. So, we replace themwith zeros.<jupyter_code>normalized_features = normalized_features.fillna(0.0)<jupyter_output><empty_output><jupyter_text>Finally, we group together the features and the labels.<jupyter_code>normalized_all_data = tp.glue(normalized_features, all_data["TX_FRAUD"])<jupyter_output><empty_output><jupyter_text>Split dataset into a train, validation and test setTo evaluate the quality of our machine learning model, we need training,validation and test sets. Since the system is dynamic (new fraud patterns arebeing created all the time), it is important for the training set to come beforethe validation set, and the validation set come before the testing set:- **Training:** April 8, 2018 to July 31, 2018- **Validation:** August 1, 2018 to August 31, 2018- **Testing:** September 1, 2018 to September 30, 2018For the example to run faster, we will effectively reduce the size of thetraining set to:- **Training:** July 1, 2018 to July 31, 2018<jupyter_code># begin_train = datetime.datetime(2018, 4, 8).timestamp() # Full training dataset
begin_train = datetime.datetime(2018, 7, 1).timestamp() # Reduced training dataset
begin_valid = datetime.datetime(2018, 8, 1).timestamp()
begin_test = datetime.datetime(2018, 9, 1).timestamp()
is_train = (normalized_all_data.timestamps() >= begin_train) & (
normalized_all_data.timestamps() < begin_valid
)
is_valid = (normalized_all_data.timestamps() >= begin_valid) & (
normalized_all_data.timestamps() < begin_test
)
is_test = normalized_all_data.timestamps() >= begin_test<jupyter_output><empty_output><jupyter_text>`is_train`, `is_valid` and `is_test` are boolean features overtime that indicatethe limit of the tree folds. Let's plot them.<jupyter_code>tp.plot(
[
is_train.rename("is_train"),
is_valid.rename("is_valid"),
is_test.rename("is_test"),
]
)<jupyter_output><empty_output><jupyter_text>We filter the input features and label in each fold.<jupyter_code>train_ds_evset = normalized_all_data.filter(is_train)
valid_ds_evset = normalized_all_data.filter(is_valid)
test_ds_evset = normalized_all_data.filter(is_test)
print(f"Training examples: {train_ds_evset.num_events()}")
print(f"Validation examples: {valid_ds_evset.num_events()}")
print(f"Testing examples: {test_ds_evset.num_events()}")<jupyter_output><empty_output><jupyter_text>It is important to split the dataset **after** the features have been computedbecause some of the features for the training dataset are computed fromtransactions during the training window. Create TensorFlow datasetsWe convert the datasets from EventSets to TensorFlow Datasets as Keras consumesthem natively.<jupyter_code>non_batched_train_ds = tp.to_tensorflow_dataset(train_ds_evset)
non_batched_valid_ds = tp.to_tensorflow_dataset(valid_ds_evset)
non_batched_test_ds = tp.to_tensorflow_dataset(test_ds_evset)<jupyter_output><empty_output><jupyter_text>The following processing steps are applied using TensorFlow datasets:1. The features and labels are separated using `extract_features_and_label` in the format that Keras expects.1. The dataset is batched, which means that the examples are grouped into mini-batches.1. The training examples are shuffled to improve the quality of mini-batch training.As we noted before, the dataset is imbalanced in the direction of legitimatetransactions. While we want to evaluate our model on this original distribution,neural networks often train poorly on strongly imbalanced datasets. Therefore,we resample the training dataset to a ratio of 80% legitimate / 20% fraudulentusing `rejection_resample`.<jupyter_code>def extract_features_and_label(example):
features = {k: example[k] for k in input_feature_names}
labels = tf.cast(example["TX_FRAUD"], tf.int32)
return features, labels
# Target ratio of fraudulent transactions in the training dataset.
target_rate = 0.2
# Number of examples in a mini-batch.
batch_size = 32
train_ds = (
non_batched_train_ds.shuffle(10000)
.rejection_resample(
class_func=lambda x: tf.cast(x["TX_FRAUD"], tf.int32),
target_dist=[1 - target_rate, target_rate],
initial_dist=[1 - fraudulent_rate, fraudulent_rate],
)
.map(lambda _, x: x) # Remove the label copy added by "rejection_resample".
.batch(batch_size)
.map(extract_features_and_label)
.prefetch(tf.data.AUTOTUNE)
)
# The test and validation dataset does not need resampling or shuffling.
valid_ds = (
non_batched_valid_ds.batch(batch_size)
.map(extract_features_and_label)
.prefetch(tf.data.AUTOTUNE)
)
test_ds = (
non_batched_test_ds.batch(batch_size)
.map(extract_features_and_label)
.prefetch(tf.data.AUTOTUNE)
)<jupyter_output><empty_output><jupyter_text>We print the first four examples of the training dataset. This is a simple wayto identify some of the errors that could have been made above.<jupyter_code>for features, labels in train_ds.take(1):
print("features")
for feature_name, feature_value in features.items():
print(f"\t{feature_name}: {feature_value[:4]}")
print(f"labels: {labels[:4]}")<jupyter_output><empty_output><jupyter_text>Train the modelThe original dataset is transactional, but the processed data is tabular andonly contains normalized numerical values. Therefore, we train a feed-forwardneural network.<jupyter_code>inputs = [keras.Input(shape=(1,), name=name) for name in input_feature_names]
x = keras.layers.concatenate(inputs)
x = keras.layers.Dense(32, activation="sigmoid")(x)
x = keras.layers.Dense(16, activation="sigmoid")(x)
x = keras.layers.Dense(1, activation="sigmoid")(x)
model = keras.Model(inputs=inputs, outputs=x)<jupyter_output><empty_output><jupyter_text>Our goal is to differentiate between the fraudulent and legitimate transactions,so we use a binary classification objective. Because the dataset is imbalanced,accuracy is not an informative metric. Instead, we evaluate the model using the[area under the curve](https://en.wikipedia.org/wiki/Receiver_operating_characteristicArea_under_the_curve)(AUC).<jupyter_code>model.compile(
optimizer=keras.optimizers.Adam(0.01),
loss=keras.losses.BinaryCrossentropy(),
metrics=[keras.metrics.Accuracy(), keras.metrics.AUC()],
)
model.fit(train_ds, validation_data=valid_ds)<jupyter_output><empty_output><jupyter_text>We evaluate the model on the test dataset.<jupyter_code>model.evaluate(test_ds)<jupyter_output><empty_output><jupyter_text>With and AUC of ~83%, our simple fraud detector is showing encouragingresults.Plotting the ROC curve is a good solution to understand and select the operationpoint of the model i.e. the threshold applied on the model output todifferentiate between fraudulent and legitimate transactions.Compute the test predictions:<jupyter_code>predictions = model.predict(test_ds)
predictions = np.nan_to_num(predictions, nan=0)<jupyter_output><empty_output><jupyter_text>Extract the labels from the test set:<jupyter_code>labels = np.concatenate([label for _, label in test_ds])<jupyter_output><empty_output><jupyter_text>Finaly, we plot the ROC curve.<jupyter_code>_ = RocCurveDisplay.from_predictions(labels, predictions)<jupyter_output><empty_output><jupyter_text>The Keras model is ready to be used on transactions with an unknown fraudstatus, a.k.a. serving. We save the model on disk for future use.**Note:** The model does not include the data preparation and preprocessing stepsdone in Pandas and Temporian. They have to be applied manually to the data fedinto the model. While not demonstrated here, Temporian preprocessing can also besaved to disk with[tp.save](https://temporian.readthedocs.io/en/latest/reference/temporian/serialization/save/).<jupyter_code>model.save("fraud_detection_model.keras")<jupyter_output><empty_output><jupyter_text>The model can be later reloaded with:<jupyter_code>loaded_model = keras.saving.load_model("fraud_detection_model.keras")
# Generate predictions with the loaded model on 5 test examples.
loaded_model.predict(test_ds.rebatch(5).take(1))<jupyter_output><empty_output>
|
keras-io/examples/timeseries/ipynb/event_classification_for_payment_card_fraud_detection.ipynb/0
|
{
"file_path": "keras-io/examples/timeseries/ipynb/event_classification_for_payment_card_fraud_detection.ipynb",
"repo_id": "keras-io",
"token_count": 5886
}
| 118 |
"""
Title: Traffic forecasting using graph neural networks and LSTM
Author: [Arash Khodadadi](https://www.linkedin.com/in/arash-khodadadi-08a02490/)
Date created: 2021/12/28
Last modified: 2023/11/22
Description: This example demonstrates how to do timeseries forecasting over graphs.
Accelerator: GPU
"""
"""
## Introduction
This example shows how to forecast traffic condition using graph neural networks and LSTM.
Specifically, we are interested in predicting the future values of the traffic speed given
a history of the traffic speed for a collection of road segments.
One popular method to
solve this problem is to consider each road segment's traffic speed as a separate
timeseries and predict the future values of each timeseries
using the past values of the same timeseries.
This method, however, ignores the dependency of the traffic speed of one road segment on
the neighboring segments. To be able to take into account the complex interactions between
the traffic speed on a collection of neighboring roads, we can define the traffic network
as a graph and consider the traffic speed as a signal on this graph. In this example,
we implement a neural network architecture which can process timeseries data over a graph.
We first show how to process the data and create a
[tf.data.Dataset](https://www.tensorflow.org/api_docs/python/tf/data/Dataset) for
forecasting over graphs. Then, we implement a model which uses graph convolution and
LSTM layers to perform forecasting over a graph.
The data processing and the model architecture are inspired by this paper:
Yu, Bing, Haoteng Yin, and Zhanxing Zhu. "Spatio-temporal graph convolutional networks:
a deep learning framework for traffic forecasting." Proceedings of the 27th International
Joint Conference on Artificial Intelligence, 2018.
([github](https://github.com/VeritasYin/STGCN_IJCAI-18))
"""
"""
## Setup
"""
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import pandas as pd
import numpy as np
import typing
import matplotlib.pyplot as plt
import tensorflow as tf
import keras
from keras import layers
from keras import ops
"""
## Data preparation
"""
"""
### Data description
We use a real-world traffic speed dataset named `PeMSD7`. We use the version
collected and prepared by [Yu et al., 2018](https://arxiv.org/abs/1709.04875)
and available
[here](https://github.com/VeritasYin/STGCN_IJCAI-18/tree/master/dataset).
The data consists of two files:
- `PeMSD7_W_228.csv` contains the distances between 228
stations across the District 7 of California.
- `PeMSD7_V_228.csv` contains traffic
speed collected for those stations in the weekdays of May and June of 2012.
The full description of the dataset can be found in
[Yu et al., 2018](https://arxiv.org/abs/1709.04875).
"""
"""
### Loading data
"""
url = "https://github.com/VeritasYin/STGCN_IJCAI-18/raw/master/dataset/PeMSD7_Full.zip"
data_dir = keras.utils.get_file(origin=url, extract=True, archive_format="zip")
data_dir = data_dir.rstrip("PeMSD7_Full.zip")
route_distances = pd.read_csv(
os.path.join(data_dir, "PeMSD7_W_228.csv"), header=None
).to_numpy()
speeds_array = pd.read_csv(
os.path.join(data_dir, "PeMSD7_V_228.csv"), header=None
).to_numpy()
print(f"route_distances shape={route_distances.shape}")
print(f"speeds_array shape={speeds_array.shape}")
"""
### sub-sampling roads
To reduce the problem size and make the training faster, we will only
work with a sample of 26 roads out of the 228 roads in the dataset.
We have chosen the roads by starting from road 0, choosing the 5 closest
roads to it, and continuing this process until we get 25 roads. You can choose
any other subset of the roads. We chose the roads in this way to increase the likelihood
of having roads with correlated speed timeseries.
`sample_routes` contains the IDs of the selected roads.
"""
sample_routes = [
0,
1,
4,
7,
8,
11,
15,
108,
109,
114,
115,
118,
120,
123,
124,
126,
127,
129,
130,
132,
133,
136,
139,
144,
147,
216,
]
route_distances = route_distances[np.ix_(sample_routes, sample_routes)]
speeds_array = speeds_array[:, sample_routes]
print(f"route_distances shape={route_distances.shape}")
print(f"speeds_array shape={speeds_array.shape}")
"""
### Data visualization
Here are the timeseries of the traffic speed for two of the routes:
"""
plt.figure(figsize=(18, 6))
plt.plot(speeds_array[:, [0, -1]])
plt.legend(["route_0", "route_25"])
"""
We can also visualize the correlation between the timeseries in different routes.
"""
plt.figure(figsize=(8, 8))
plt.matshow(np.corrcoef(speeds_array.T), 0)
plt.xlabel("road number")
plt.ylabel("road number")
"""
Using this correlation heatmap, we can see that for example the speed in
routes 4, 5, 6 are highly correlated.
"""
"""
### Splitting and normalizing data
Next, we split the speed values array into train/validation/test sets,
and normalize the resulting arrays:
"""
train_size, val_size = 0.5, 0.2
def preprocess(data_array: np.ndarray, train_size: float, val_size: float):
"""Splits data into train/val/test sets and normalizes the data.
Args:
data_array: ndarray of shape `(num_time_steps, num_routes)`
train_size: A float value between 0.0 and 1.0 that represent the proportion of the dataset
to include in the train split.
val_size: A float value between 0.0 and 1.0 that represent the proportion of the dataset
to include in the validation split.
Returns:
`train_array`, `val_array`, `test_array`
"""
num_time_steps = data_array.shape[0]
num_train, num_val = (
int(num_time_steps * train_size),
int(num_time_steps * val_size),
)
train_array = data_array[:num_train]
mean, std = train_array.mean(axis=0), train_array.std(axis=0)
train_array = (train_array - mean) / std
val_array = (data_array[num_train : (num_train + num_val)] - mean) / std
test_array = (data_array[(num_train + num_val) :] - mean) / std
return train_array, val_array, test_array
train_array, val_array, test_array = preprocess(speeds_array, train_size, val_size)
print(f"train set size: {train_array.shape}")
print(f"validation set size: {val_array.shape}")
print(f"test set size: {test_array.shape}")
"""
### Creating TensorFlow Datasets
Next, we create the datasets for our forecasting problem. The forecasting problem
can be stated as follows: given a sequence of the
road speed values at times `t+1, t+2, ..., t+T`, we want to predict the future values of
the roads speed for times `t+T+1, ..., t+T+h`. So for each time `t` the inputs to our
model are `T` vectors each of size `N` and the targets are `h` vectors each of size `N`,
where `N` is the number of roads.
"""
"""
We use the Keras built-in function
`keras.utils.timeseries_dataset_from_array`.
The function `create_tf_dataset()` below takes as input a `numpy.ndarray` and returns a
`tf.data.Dataset`. In this function `input_sequence_length=T` and `forecast_horizon=h`.
The argument `multi_horizon` needs more explanation. Assume `forecast_horizon=3`.
If `multi_horizon=True` then the model will make a forecast for time steps
`t+T+1, t+T+2, t+T+3`. So the target will have shape `(T,3)`. But if
`multi_horizon=False`, the model will make a forecast only for time step `t+T+3` and
so the target will have shape `(T, 1)`.
You may notice that the input tensor in each batch has shape
`(batch_size, input_sequence_length, num_routes, 1)`. The last dimension is added to
make the model more general: at each time step, the input features for each raod may
contain multiple timeseries. For instance, one might want to use temperature timeseries
in addition to historical values of the speed as input features. In this example,
however, the last dimension of the input is always 1.
We use the last 12 values of the speed in each road to forecast the speed for 3 time
steps ahead:
"""
batch_size = 64
input_sequence_length = 12
forecast_horizon = 3
multi_horizon = False
def create_tf_dataset(
data_array: np.ndarray,
input_sequence_length: int,
forecast_horizon: int,
batch_size: int = 128,
shuffle=True,
multi_horizon=True,
):
"""Creates tensorflow dataset from numpy array.
This function creates a dataset where each element is a tuple `(inputs, targets)`.
`inputs` is a Tensor
of shape `(batch_size, input_sequence_length, num_routes, 1)` containing
the `input_sequence_length` past values of the timeseries for each node.
`targets` is a Tensor of shape `(batch_size, forecast_horizon, num_routes)`
containing the `forecast_horizon`
future values of the timeseries for each node.
Args:
data_array: np.ndarray with shape `(num_time_steps, num_routes)`
input_sequence_length: Length of the input sequence (in number of timesteps).
forecast_horizon: If `multi_horizon=True`, the target will be the values of the timeseries for 1 to
`forecast_horizon` timesteps ahead. If `multi_horizon=False`, the target will be the value of the
timeseries `forecast_horizon` steps ahead (only one value).
batch_size: Number of timeseries samples in each batch.
shuffle: Whether to shuffle output samples, or instead draw them in chronological order.
multi_horizon: See `forecast_horizon`.
Returns:
A tf.data.Dataset instance.
"""
inputs = keras.utils.timeseries_dataset_from_array(
np.expand_dims(data_array[:-forecast_horizon], axis=-1),
None,
sequence_length=input_sequence_length,
shuffle=False,
batch_size=batch_size,
)
target_offset = (
input_sequence_length
if multi_horizon
else input_sequence_length + forecast_horizon - 1
)
target_seq_length = forecast_horizon if multi_horizon else 1
targets = keras.utils.timeseries_dataset_from_array(
data_array[target_offset:],
None,
sequence_length=target_seq_length,
shuffle=False,
batch_size=batch_size,
)
dataset = tf.data.Dataset.zip((inputs, targets))
if shuffle:
dataset = dataset.shuffle(100)
return dataset.prefetch(16).cache()
train_dataset, val_dataset = (
create_tf_dataset(data_array, input_sequence_length, forecast_horizon, batch_size)
for data_array in [train_array, val_array]
)
test_dataset = create_tf_dataset(
test_array,
input_sequence_length,
forecast_horizon,
batch_size=test_array.shape[0],
shuffle=False,
multi_horizon=multi_horizon,
)
"""
### Roads Graph
As mentioned before, we assume that the road segments form a graph.
The `PeMSD7` dataset has the road segments distance. The next step
is to create the graph adjacency matrix from these distances. Following
[Yu et al., 2018](https://arxiv.org/abs/1709.04875) (equation 10) we assume there
is an edge between two nodes in the graph if the distance between the corresponding roads
is less than a threshold.
"""
def compute_adjacency_matrix(
route_distances: np.ndarray, sigma2: float, epsilon: float
):
"""Computes the adjacency matrix from distances matrix.
It uses the formula in https://github.com/VeritasYin/STGCN_IJCAI-18#data-preprocessing to
compute an adjacency matrix from the distance matrix.
The implementation follows that paper.
Args:
route_distances: np.ndarray of shape `(num_routes, num_routes)`. Entry `i,j` of this array is the
distance between roads `i,j`.
sigma2: Determines the width of the Gaussian kernel applied to the square distances matrix.
epsilon: A threshold specifying if there is an edge between two nodes. Specifically, `A[i,j]=1`
if `np.exp(-w2[i,j] / sigma2) >= epsilon` and `A[i,j]=0` otherwise, where `A` is the adjacency
matrix and `w2=route_distances * route_distances`
Returns:
A boolean graph adjacency matrix.
"""
num_routes = route_distances.shape[0]
route_distances = route_distances / 10000.0
w2, w_mask = (
route_distances * route_distances,
np.ones([num_routes, num_routes]) - np.identity(num_routes),
)
return (np.exp(-w2 / sigma2) >= epsilon) * w_mask
"""
The function `compute_adjacency_matrix()` returns a boolean adjacency matrix
where 1 means there is an edge between two nodes. We use the following class
to store the information about the graph.
"""
class GraphInfo:
def __init__(self, edges: typing.Tuple[list, list], num_nodes: int):
self.edges = edges
self.num_nodes = num_nodes
sigma2 = 0.1
epsilon = 0.5
adjacency_matrix = compute_adjacency_matrix(route_distances, sigma2, epsilon)
node_indices, neighbor_indices = np.where(adjacency_matrix == 1)
graph = GraphInfo(
edges=(node_indices.tolist(), neighbor_indices.tolist()),
num_nodes=adjacency_matrix.shape[0],
)
print(f"number of nodes: {graph.num_nodes}, number of edges: {len(graph.edges[0])}")
"""
## Network architecture
Our model for forecasting over the graph consists of a graph convolution
layer and a LSTM layer.
"""
"""
### Graph convolution layer
Our implementation of the graph convolution layer resembles the implementation
in [this Keras example](https://keras.io/examples/graph/gnn_citations/). Note that
in that example input to the layer is a 2D tensor of shape `(num_nodes,in_feat)`
but in our example the input to the layer is a 4D tensor of shape
`(num_nodes, batch_size, input_seq_length, in_feat)`. The graph convolution layer
performs the following steps:
- The nodes' representations are computed in `self.compute_nodes_representation()`
by multiplying the input features by `self.weight`
- The aggregated neighbors' messages are computed in `self.compute_aggregated_messages()`
by first aggregating the neighbors' representations and then multiplying the results by
`self.weight`
- The final output of the layer is computed in `self.update()` by combining the nodes
representations and the neighbors' aggregated messages
"""
class GraphConv(layers.Layer):
def __init__(
self,
in_feat,
out_feat,
graph_info: GraphInfo,
aggregation_type="mean",
combination_type="concat",
activation: typing.Optional[str] = None,
**kwargs,
):
super().__init__(**kwargs)
self.in_feat = in_feat
self.out_feat = out_feat
self.graph_info = graph_info
self.aggregation_type = aggregation_type
self.combination_type = combination_type
self.weight = self.add_weight(
initializer=keras.initializers.GlorotUniform(),
shape=(in_feat, out_feat),
dtype="float32",
trainable=True,
)
self.activation = layers.Activation(activation)
def aggregate(self, neighbour_representations):
aggregation_func = {
"sum": tf.math.unsorted_segment_sum,
"mean": tf.math.unsorted_segment_mean,
"max": tf.math.unsorted_segment_max,
}.get(self.aggregation_type)
if aggregation_func:
return aggregation_func(
neighbour_representations,
self.graph_info.edges[0],
num_segments=self.graph_info.num_nodes,
)
raise ValueError(f"Invalid aggregation type: {self.aggregation_type}")
def compute_nodes_representation(self, features):
"""Computes each node's representation.
The nodes' representations are obtained by multiplying the features tensor with
`self.weight`. Note that
`self.weight` has shape `(in_feat, out_feat)`.
Args:
features: Tensor of shape `(num_nodes, batch_size, input_seq_len, in_feat)`
Returns:
A tensor of shape `(num_nodes, batch_size, input_seq_len, out_feat)`
"""
return ops.matmul(features, self.weight)
def compute_aggregated_messages(self, features):
neighbour_representations = tf.gather(features, self.graph_info.edges[1])
aggregated_messages = self.aggregate(neighbour_representations)
return ops.matmul(aggregated_messages, self.weight)
def update(self, nodes_representation, aggregated_messages):
if self.combination_type == "concat":
h = ops.concatenate([nodes_representation, aggregated_messages], axis=-1)
elif self.combination_type == "add":
h = nodes_representation + aggregated_messages
else:
raise ValueError(f"Invalid combination type: {self.combination_type}.")
return self.activation(h)
def call(self, features):
"""Forward pass.
Args:
features: tensor of shape `(num_nodes, batch_size, input_seq_len, in_feat)`
Returns:
A tensor of shape `(num_nodes, batch_size, input_seq_len, out_feat)`
"""
nodes_representation = self.compute_nodes_representation(features)
aggregated_messages = self.compute_aggregated_messages(features)
return self.update(nodes_representation, aggregated_messages)
"""
### LSTM plus graph convolution
By applying the graph convolution layer to the input tensor, we get another tensor
containing the nodes' representations over time (another 4D tensor). For each time
step, a node's representation is informed by the information from its neighbors.
To make good forecasts, however, we need not only information from the neighbors
but also we need to process the information over time. To this end, we can pass each
node's tensor through a recurrent layer. The `LSTMGC` layer below, first applies
a graph convolution layer to the inputs and then passes the results through a
`LSTM` layer.
"""
class LSTMGC(layers.Layer):
"""Layer comprising a convolution layer followed by LSTM and dense layers."""
def __init__(
self,
in_feat,
out_feat,
lstm_units: int,
input_seq_len: int,
output_seq_len: int,
graph_info: GraphInfo,
graph_conv_params: typing.Optional[dict] = None,
**kwargs,
):
super().__init__(**kwargs)
# graph conv layer
if graph_conv_params is None:
graph_conv_params = {
"aggregation_type": "mean",
"combination_type": "concat",
"activation": None,
}
self.graph_conv = GraphConv(in_feat, out_feat, graph_info, **graph_conv_params)
self.lstm = layers.LSTM(lstm_units, activation="relu")
self.dense = layers.Dense(output_seq_len)
self.input_seq_len, self.output_seq_len = input_seq_len, output_seq_len
def call(self, inputs):
"""Forward pass.
Args:
inputs: tensor of shape `(batch_size, input_seq_len, num_nodes, in_feat)`
Returns:
A tensor of shape `(batch_size, output_seq_len, num_nodes)`.
"""
# convert shape to (num_nodes, batch_size, input_seq_len, in_feat)
inputs = ops.transpose(inputs, [2, 0, 1, 3])
gcn_out = self.graph_conv(
inputs
) # gcn_out has shape: (num_nodes, batch_size, input_seq_len, out_feat)
shape = ops.shape(gcn_out)
num_nodes, batch_size, input_seq_len, out_feat = (
shape[0],
shape[1],
shape[2],
shape[3],
)
# LSTM takes only 3D tensors as input
gcn_out = ops.reshape(
gcn_out, (batch_size * num_nodes, input_seq_len, out_feat)
)
lstm_out = self.lstm(
gcn_out
) # lstm_out has shape: (batch_size * num_nodes, lstm_units)
dense_output = self.dense(
lstm_out
) # dense_output has shape: (batch_size * num_nodes, output_seq_len)
output = ops.reshape(dense_output, (num_nodes, batch_size, self.output_seq_len))
return ops.transpose(
output, [1, 2, 0]
) # returns Tensor of shape (batch_size, output_seq_len, num_nodes)
"""
## Model training
"""
in_feat = 1
batch_size = 64
epochs = 20
input_sequence_length = 12
forecast_horizon = 3
multi_horizon = False
out_feat = 10
lstm_units = 64
graph_conv_params = {
"aggregation_type": "mean",
"combination_type": "concat",
"activation": None,
}
st_gcn = LSTMGC(
in_feat,
out_feat,
lstm_units,
input_sequence_length,
forecast_horizon,
graph,
graph_conv_params,
)
inputs = layers.Input((input_sequence_length, graph.num_nodes, in_feat))
outputs = st_gcn(inputs)
model = keras.models.Model(inputs, outputs)
model.compile(
optimizer=keras.optimizers.RMSprop(learning_rate=0.0002),
loss=keras.losses.MeanSquaredError(),
)
model.fit(
train_dataset,
validation_data=val_dataset,
epochs=epochs,
callbacks=[keras.callbacks.EarlyStopping(patience=10)],
)
"""
## Making forecasts on test set
Now we can use the trained model to make forecasts for the test set. Below, we
compute the MAE of the model and compare it to the MAE of naive forecasts.
The naive forecasts are the last value of the speed for each node.
"""
x_test, y = next(test_dataset.as_numpy_iterator())
y_pred = model.predict(x_test)
plt.figure(figsize=(18, 6))
plt.plot(y[:, 0, 0])
plt.plot(y_pred[:, 0, 0])
plt.legend(["actual", "forecast"])
naive_mse, model_mse = (
np.square(x_test[:, -1, :, 0] - y[:, 0, :]).mean(),
np.square(y_pred[:, 0, :] - y[:, 0, :]).mean(),
)
print(f"naive MAE: {naive_mse}, model MAE: {model_mse}")
"""
Of course, the goal here is to demonstrate the method,
not to achieve the best performance. To improve the
model's accuracy, all model hyperparameters should be tuned carefully. In addition,
several of the `LSTMGC` blocks can be stacked to increase the representation power
of the model.
"""
|
keras-io/examples/timeseries/timeseries_traffic_forecasting.py/0
|
{
"file_path": "keras-io/examples/timeseries/timeseries_traffic_forecasting.py",
"repo_id": "keras-io",
"token_count": 8190
}
| 119 |
"""
Title: Multiclass semantic segmentation using DeepLabV3+
Author: [Soumik Rakshit](http://github.com/soumik12345)
Date created: 2021/08/31
Last modified: 2024/01/05
Description: Implement DeepLabV3+ architecture for Multi-class Semantic Segmentation.
Accelerator: GPU
Converted to Keras 3: [Muhammad Anas Raza](https://anasrz.com)
"""
"""
## Introduction
Semantic segmentation, with the goal to assign semantic labels to every pixel in an image,
is an essential computer vision task. In this example, we implement
the **DeepLabV3+** model for multi-class semantic segmentation, a fully-convolutional
architecture that performs well on semantic segmentation benchmarks.
### References:
- [Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation](https://arxiv.org/abs/1802.02611)
- [Rethinking Atrous Convolution for Semantic Image Segmentation](https://arxiv.org/abs/1706.05587)
- [DeepLab: Semantic Image Segmentation with Deep Convolutional Nets, Atrous Convolution, and Fully Connected CRFs](https://arxiv.org/abs/1606.00915)
"""
"""
## Downloading the data
We will use the [Crowd Instance-level Human Parsing Dataset](https://arxiv.org/abs/1811.12596)
for training our model. The Crowd Instance-level Human Parsing (CIHP) dataset has 38,280 diverse human images.
Each image in CIHP is labeled with pixel-wise annotations for 20 categories, as well as instance-level identification.
This dataset can be used for the "human part segmentation" task.
"""
import keras
from keras import layers
from keras import ops
import os
import numpy as np
from glob import glob
import cv2
from scipy.io import loadmat
import matplotlib.pyplot as plt
# For data preprocessing
from tensorflow import image as tf_image
from tensorflow import data as tf_data
from tensorflow import io as tf_io
"""shell
gdown "1B9A9UCJYMwTL4oBEo4RZfbMZMaZhKJaz&confirm=t"
unzip -q instance-level-human-parsing.zip
"""
"""
## Creating a TensorFlow Dataset
Training on the entire CIHP dataset with 38,280 images takes a lot of time, hence we will be using
a smaller subset of 200 images for training our model in this example.
"""
IMAGE_SIZE = 512
BATCH_SIZE = 4
NUM_CLASSES = 20
DATA_DIR = "./instance-level_human_parsing/instance-level_human_parsing/Training"
NUM_TRAIN_IMAGES = 1000
NUM_VAL_IMAGES = 50
train_images = sorted(glob(os.path.join(DATA_DIR, "Images/*")))[:NUM_TRAIN_IMAGES]
train_masks = sorted(glob(os.path.join(DATA_DIR, "Category_ids/*")))[:NUM_TRAIN_IMAGES]
val_images = sorted(glob(os.path.join(DATA_DIR, "Images/*")))[
NUM_TRAIN_IMAGES : NUM_VAL_IMAGES + NUM_TRAIN_IMAGES
]
val_masks = sorted(glob(os.path.join(DATA_DIR, "Category_ids/*")))[
NUM_TRAIN_IMAGES : NUM_VAL_IMAGES + NUM_TRAIN_IMAGES
]
def read_image(image_path, mask=False):
image = tf_io.read_file(image_path)
if mask:
image = tf_image.decode_png(image, channels=1)
image.set_shape([None, None, 1])
image = tf_image.resize(images=image, size=[IMAGE_SIZE, IMAGE_SIZE])
else:
image = tf_image.decode_png(image, channels=3)
image.set_shape([None, None, 3])
image = tf_image.resize(images=image, size=[IMAGE_SIZE, IMAGE_SIZE])
return image
def load_data(image_list, mask_list):
image = read_image(image_list)
mask = read_image(mask_list, mask=True)
return image, mask
def data_generator(image_list, mask_list):
dataset = tf_data.Dataset.from_tensor_slices((image_list, mask_list))
dataset = dataset.map(load_data, num_parallel_calls=tf_data.AUTOTUNE)
dataset = dataset.batch(BATCH_SIZE, drop_remainder=True)
return dataset
train_dataset = data_generator(train_images, train_masks)
val_dataset = data_generator(val_images, val_masks)
print("Train Dataset:", train_dataset)
print("Val Dataset:", val_dataset)
"""
## Building the DeepLabV3+ model
DeepLabv3+ extends DeepLabv3 by adding an encoder-decoder structure. The encoder module
processes multiscale contextual information by applying dilated convolution at multiple
scales, while the decoder module refines the segmentation results along object boundaries.

**Dilated convolution:** With dilated convolution, as we go deeper in the network, we can keep the
stride constant but with larger field-of-view without increasing the number of parameters
or the amount of computation. Besides, it enables larger output feature maps, which is
useful for semantic segmentation.
The reason for using **Dilated Spatial Pyramid Pooling** is that it was shown that as the
sampling rate becomes larger, the number of valid filter weights (i.e., weights that
are applied to the valid feature region, instead of padded zeros) becomes smaller.
"""
def convolution_block(
block_input,
num_filters=256,
kernel_size=3,
dilation_rate=1,
use_bias=False,
):
x = layers.Conv2D(
num_filters,
kernel_size=kernel_size,
dilation_rate=dilation_rate,
padding="same",
use_bias=use_bias,
kernel_initializer=keras.initializers.HeNormal(),
)(block_input)
x = layers.BatchNormalization()(x)
return ops.nn.relu(x)
def DilatedSpatialPyramidPooling(dspp_input):
dims = dspp_input.shape
x = layers.AveragePooling2D(pool_size=(dims[-3], dims[-2]))(dspp_input)
x = convolution_block(x, kernel_size=1, use_bias=True)
out_pool = layers.UpSampling2D(
size=(dims[-3] // x.shape[1], dims[-2] // x.shape[2]),
interpolation="bilinear",
)(x)
out_1 = convolution_block(dspp_input, kernel_size=1, dilation_rate=1)
out_6 = convolution_block(dspp_input, kernel_size=3, dilation_rate=6)
out_12 = convolution_block(dspp_input, kernel_size=3, dilation_rate=12)
out_18 = convolution_block(dspp_input, kernel_size=3, dilation_rate=18)
x = layers.Concatenate(axis=-1)([out_pool, out_1, out_6, out_12, out_18])
output = convolution_block(x, kernel_size=1)
return output
"""
The encoder features are first bilinearly upsampled by a factor 4, and then
concatenated with the corresponding low-level features from the network backbone that
have the same spatial resolution. For this example, we
use a ResNet50 pretrained on ImageNet as the backbone model, and we use
the low-level features from the `conv4_block6_2_relu` block of the backbone.
"""
def DeeplabV3Plus(image_size, num_classes):
model_input = keras.Input(shape=(image_size, image_size, 3))
preprocessed = keras.applications.resnet50.preprocess_input(model_input)
resnet50 = keras.applications.ResNet50(
weights="imagenet", include_top=False, input_tensor=preprocessed
)
x = resnet50.get_layer("conv4_block6_2_relu").output
x = DilatedSpatialPyramidPooling(x)
input_a = layers.UpSampling2D(
size=(image_size // 4 // x.shape[1], image_size // 4 // x.shape[2]),
interpolation="bilinear",
)(x)
input_b = resnet50.get_layer("conv2_block3_2_relu").output
input_b = convolution_block(input_b, num_filters=48, kernel_size=1)
x = layers.Concatenate(axis=-1)([input_a, input_b])
x = convolution_block(x)
x = convolution_block(x)
x = layers.UpSampling2D(
size=(image_size // x.shape[1], image_size // x.shape[2]),
interpolation="bilinear",
)(x)
model_output = layers.Conv2D(num_classes, kernel_size=(1, 1), padding="same")(x)
return keras.Model(inputs=model_input, outputs=model_output)
model = DeeplabV3Plus(image_size=IMAGE_SIZE, num_classes=NUM_CLASSES)
model.summary()
"""
## Training
We train the model using sparse categorical crossentropy as the loss function, and
Adam as the optimizer.
"""
loss = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=0.001),
loss=loss,
metrics=["accuracy"],
)
history = model.fit(train_dataset, validation_data=val_dataset, epochs=25)
plt.plot(history.history["loss"])
plt.title("Training Loss")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.show()
plt.plot(history.history["accuracy"])
plt.title("Training Accuracy")
plt.ylabel("accuracy")
plt.xlabel("epoch")
plt.show()
plt.plot(history.history["val_loss"])
plt.title("Validation Loss")
plt.ylabel("val_loss")
plt.xlabel("epoch")
plt.show()
plt.plot(history.history["val_accuracy"])
plt.title("Validation Accuracy")
plt.ylabel("val_accuracy")
plt.xlabel("epoch")
plt.show()
"""
## Inference using Colormap Overlay
The raw predictions from the model represent a one-hot encoded tensor of shape `(N, 512, 512, 20)`
where each one of the 20 channels is a binary mask corresponding to a predicted label.
In order to visualize the results, we plot them as RGB segmentation masks where each pixel
is represented by a unique color corresponding to the particular label predicted. We can easily
find the color corresponding to each label from the `human_colormap.mat` file provided as part
of the dataset. We would also plot an overlay of the RGB segmentation mask on the input image as
this further helps us to identify the different categories present in the image more intuitively.
"""
# Loading the Colormap
colormap = loadmat(
"./instance-level_human_parsing/instance-level_human_parsing/human_colormap.mat"
)["colormap"]
colormap = colormap * 100
colormap = colormap.astype(np.uint8)
def infer(model, image_tensor):
predictions = model.predict(np.expand_dims((image_tensor), axis=0))
predictions = np.squeeze(predictions)
predictions = np.argmax(predictions, axis=2)
return predictions
def decode_segmentation_masks(mask, colormap, n_classes):
r = np.zeros_like(mask).astype(np.uint8)
g = np.zeros_like(mask).astype(np.uint8)
b = np.zeros_like(mask).astype(np.uint8)
for l in range(0, n_classes):
idx = mask == l
r[idx] = colormap[l, 0]
g[idx] = colormap[l, 1]
b[idx] = colormap[l, 2]
rgb = np.stack([r, g, b], axis=2)
return rgb
def get_overlay(image, colored_mask):
image = keras.utils.array_to_img(image)
image = np.array(image).astype(np.uint8)
overlay = cv2.addWeighted(image, 0.35, colored_mask, 0.65, 0)
return overlay
def plot_samples_matplotlib(display_list, figsize=(5, 3)):
_, axes = plt.subplots(nrows=1, ncols=len(display_list), figsize=figsize)
for i in range(len(display_list)):
if display_list[i].shape[-1] == 3:
axes[i].imshow(keras.utils.array_to_img(display_list[i]))
else:
axes[i].imshow(display_list[i])
plt.show()
def plot_predictions(images_list, colormap, model):
for image_file in images_list:
image_tensor = read_image(image_file)
prediction_mask = infer(image_tensor=image_tensor, model=model)
prediction_colormap = decode_segmentation_masks(prediction_mask, colormap, 20)
overlay = get_overlay(image_tensor, prediction_colormap)
plot_samples_matplotlib(
[image_tensor, overlay, prediction_colormap], figsize=(18, 14)
)
"""
### Inference on Train Images
"""
plot_predictions(train_images[:4], colormap, model=model)
"""
### Inference on Validation Images
You can use the trained model hosted on [Hugging Face Hub](https://huggingface.co/keras-io/deeplabv3p-resnet50)
and try the demo on [Hugging Face Spaces](https://huggingface.co/spaces/keras-io/Human-Part-Segmentation).
"""
plot_predictions(val_images[:4], colormap, model=model)
|
keras-io/examples/vision/deeplabv3_plus.py/0
|
{
"file_path": "keras-io/examples/vision/deeplabv3_plus.py",
"repo_id": "keras-io",
"token_count": 4272
}
| 120 |
"""
Title: Image classification with Vision Transformer
Author: [Khalid Salama](https://www.linkedin.com/in/khalid-salama-24403144/)
Date created: 2021/01/18
Last modified: 2021/01/18
Description: Implementing the Vision Transformer (ViT) model for image classification.
Accelerator: GPU
"""
"""
## Introduction
This example implements the [Vision Transformer (ViT)](https://arxiv.org/abs/2010.11929)
model by Alexey Dosovitskiy et al. for image classification,
and demonstrates it on the CIFAR-100 dataset.
The ViT model applies the Transformer architecture with self-attention to sequences of
image patches, without using convolution layers.
"""
"""
## Setup
"""
import os
os.environ["KERAS_BACKEND"] = "jax" # @param ["tensorflow", "jax", "torch"]
import keras
from keras import layers
from keras import ops
import numpy as np
import matplotlib.pyplot as plt
"""
## Prepare the data
"""
num_classes = 100
input_shape = (32, 32, 3)
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar100.load_data()
print(f"x_train shape: {x_train.shape} - y_train shape: {y_train.shape}")
print(f"x_test shape: {x_test.shape} - y_test shape: {y_test.shape}")
"""
## Configure the hyperparameters
"""
learning_rate = 0.001
weight_decay = 0.0001
batch_size = 256
num_epochs = 10 # For real training, use num_epochs=100. 10 is a test value
image_size = 72 # We'll resize input images to this size
patch_size = 6 # Size of the patches to be extract from the input images
num_patches = (image_size // patch_size) ** 2
projection_dim = 64
num_heads = 4
transformer_units = [
projection_dim * 2,
projection_dim,
] # Size of the transformer layers
transformer_layers = 8
mlp_head_units = [
2048,
1024,
] # Size of the dense layers of the final classifier
"""
## Use data augmentation
"""
data_augmentation = keras.Sequential(
[
layers.Normalization(),
layers.Resizing(image_size, image_size),
layers.RandomFlip("horizontal"),
layers.RandomRotation(factor=0.02),
layers.RandomZoom(height_factor=0.2, width_factor=0.2),
],
name="data_augmentation",
)
# Compute the mean and the variance of the training data for normalization.
data_augmentation.layers[0].adapt(x_train)
"""
## Implement multilayer perceptron (MLP)
"""
def mlp(x, hidden_units, dropout_rate):
for units in hidden_units:
x = layers.Dense(units, activation=keras.activations.gelu)(x)
x = layers.Dropout(dropout_rate)(x)
return x
"""
## Implement patch creation as a layer
"""
class Patches(layers.Layer):
def __init__(self, patch_size):
super().__init__()
self.patch_size = patch_size
def call(self, images):
input_shape = ops.shape(images)
batch_size = input_shape[0]
height = input_shape[1]
width = input_shape[2]
channels = input_shape[3]
num_patches_h = height // self.patch_size
num_patches_w = width // self.patch_size
patches = keras.ops.image.extract_patches(images, size=self.patch_size)
patches = ops.reshape(
patches,
(
batch_size,
num_patches_h * num_patches_w,
self.patch_size * self.patch_size * channels,
),
)
return patches
def get_config(self):
config = super().get_config()
config.update({"patch_size": self.patch_size})
return config
"""
Let's display patches for a sample image
"""
plt.figure(figsize=(4, 4))
image = x_train[np.random.choice(range(x_train.shape[0]))]
plt.imshow(image.astype("uint8"))
plt.axis("off")
resized_image = ops.image.resize(
ops.convert_to_tensor([image]), size=(image_size, image_size)
)
patches = Patches(patch_size)(resized_image)
print(f"Image size: {image_size} X {image_size}")
print(f"Patch size: {patch_size} X {patch_size}")
print(f"Patches per image: {patches.shape[1]}")
print(f"Elements per patch: {patches.shape[-1]}")
n = int(np.sqrt(patches.shape[1]))
plt.figure(figsize=(4, 4))
for i, patch in enumerate(patches[0]):
ax = plt.subplot(n, n, i + 1)
patch_img = ops.reshape(patch, (patch_size, patch_size, 3))
plt.imshow(ops.convert_to_numpy(patch_img).astype("uint8"))
plt.axis("off")
"""
## Implement the patch encoding layer
The `PatchEncoder` layer will linearly transform a patch by projecting it into a
vector of size `projection_dim`. In addition, it adds a learnable position
embedding to the projected vector.
"""
class PatchEncoder(layers.Layer):
def __init__(self, num_patches, projection_dim):
super().__init__()
self.num_patches = num_patches
self.projection = layers.Dense(units=projection_dim)
self.position_embedding = layers.Embedding(
input_dim=num_patches, output_dim=projection_dim
)
def call(self, patch):
positions = ops.expand_dims(
ops.arange(start=0, stop=self.num_patches, step=1), axis=0
)
projected_patches = self.projection(patch)
encoded = projected_patches + self.position_embedding(positions)
return encoded
def get_config(self):
config = super().get_config()
config.update({"num_patches": self.num_patches})
return config
"""
## Build the ViT model
The ViT model consists of multiple Transformer blocks,
which use the `layers.MultiHeadAttention` layer as a self-attention mechanism
applied to the sequence of patches. The Transformer blocks produce a
`[batch_size, num_patches, projection_dim]` tensor, which is processed via an
classifier head with softmax to produce the final class probabilities output.
Unlike the technique described in the [paper](https://arxiv.org/abs/2010.11929),
which prepends a learnable embedding to the sequence of encoded patches to serve
as the image representation, all the outputs of the final Transformer block are
reshaped with `layers.Flatten()` and used as the image
representation input to the classifier head.
Note that the `layers.GlobalAveragePooling1D` layer
could also be used instead to aggregate the outputs of the Transformer block,
especially when the number of patches and the projection dimensions are large.
"""
def create_vit_classifier():
inputs = keras.Input(shape=input_shape)
# Augment data.
augmented = data_augmentation(inputs)
# Create patches.
patches = Patches(patch_size)(augmented)
# Encode patches.
encoded_patches = PatchEncoder(num_patches, projection_dim)(patches)
# Create multiple layers of the Transformer block.
for _ in range(transformer_layers):
# Layer normalization 1.
x1 = layers.LayerNormalization(epsilon=1e-6)(encoded_patches)
# Create a multi-head attention layer.
attention_output = layers.MultiHeadAttention(
num_heads=num_heads, key_dim=projection_dim, dropout=0.1
)(x1, x1)
# Skip connection 1.
x2 = layers.Add()([attention_output, encoded_patches])
# Layer normalization 2.
x3 = layers.LayerNormalization(epsilon=1e-6)(x2)
# MLP.
x3 = mlp(x3, hidden_units=transformer_units, dropout_rate=0.1)
# Skip connection 2.
encoded_patches = layers.Add()([x3, x2])
# Create a [batch_size, projection_dim] tensor.
representation = layers.LayerNormalization(epsilon=1e-6)(encoded_patches)
representation = layers.Flatten()(representation)
representation = layers.Dropout(0.5)(representation)
# Add MLP.
features = mlp(representation, hidden_units=mlp_head_units, dropout_rate=0.5)
# Classify outputs.
logits = layers.Dense(num_classes)(features)
# Create the Keras model.
model = keras.Model(inputs=inputs, outputs=logits)
return model
"""
## Compile, train, and evaluate the mode
"""
def run_experiment(model):
optimizer = keras.optimizers.AdamW(
learning_rate=learning_rate, weight_decay=weight_decay
)
model.compile(
optimizer=optimizer,
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[
keras.metrics.SparseCategoricalAccuracy(name="accuracy"),
keras.metrics.SparseTopKCategoricalAccuracy(5, name="top-5-accuracy"),
],
)
checkpoint_filepath = "/tmp/checkpoint.weights.h5"
checkpoint_callback = keras.callbacks.ModelCheckpoint(
checkpoint_filepath,
monitor="val_accuracy",
save_best_only=True,
save_weights_only=True,
)
history = model.fit(
x=x_train,
y=y_train,
batch_size=batch_size,
epochs=num_epochs,
validation_split=0.1,
callbacks=[checkpoint_callback],
)
model.load_weights(checkpoint_filepath)
_, accuracy, top_5_accuracy = model.evaluate(x_test, y_test)
print(f"Test accuracy: {round(accuracy * 100, 2)}%")
print(f"Test top 5 accuracy: {round(top_5_accuracy * 100, 2)}%")
return history
vit_classifier = create_vit_classifier()
history = run_experiment(vit_classifier)
def plot_history(item):
plt.plot(history.history[item], label=item)
plt.plot(history.history["val_" + item], label="val_" + item)
plt.xlabel("Epochs")
plt.ylabel(item)
plt.title("Train and Validation {} Over Epochs".format(item), fontsize=14)
plt.legend()
plt.grid()
plt.show()
plot_history("loss")
plot_history("top-5-accuracy")
"""
After 100 epochs, the ViT model achieves around 55% accuracy and
82% top-5 accuracy on the test data. These are not competitive results on the CIFAR-100 dataset,
as a ResNet50V2 trained from scratch on the same data can achieve 67% accuracy.
Note that the state of the art results reported in the
[paper](https://arxiv.org/abs/2010.11929) are achieved by pre-training the ViT model using
the JFT-300M dataset, then fine-tuning it on the target dataset. To improve the model quality
without pre-training, you can try to train the model for more epochs, use a larger number of
Transformer layers, resize the input images, change the patch size, or increase the projection dimensions.
Besides, as mentioned in the paper, the quality of the model is affected not only by architecture choices,
but also by parameters such as the learning rate schedule, optimizer, weight decay, etc.
In practice, it's recommended to fine-tune a ViT model
that was pre-trained using a large, high-resolution dataset.
"""
|
keras-io/examples/vision/image_classification_with_vision_transformer.py/0
|
{
"file_path": "keras-io/examples/vision/image_classification_with_vision_transformer.py",
"repo_id": "keras-io",
"token_count": 3921
}
| 121 |
<jupyter_start><jupyter_text>Compact Convolutional Transformers**Author:** [Sayak Paul](https://twitter.com/RisingSayak)**Date created:** 2021/06/30**Last modified:** 2023/08/07**Description:** Compact Convolutional Transformers for efficient image classification. As discussed in the [Vision Transformers (ViT)](https://arxiv.org/abs/2010.11929) paper,a Transformer-based architecture for vision typically requires a larger dataset thanusual, as well as a longer pre-training schedule. [ImageNet-1k](http://imagenet.org/)(which has about a million images) is considered to fall under the medium-sized data regime withrespect to ViTs. This is primarily because, unlike CNNs, ViTs (or a typicalTransformer-based architecture) do not have well-informed inductive biases (such asconvolutions for processing images). This begs the question: can't we combine thebenefits of convolution and the benefits of Transformersin a single network architecture? These benefits include parameter-efficiency, andself-attention to process long-range and global dependencies (interactions betweendifferent regions in an image).In [Escaping the Big Data Paradigm with Compact Transformers](https://arxiv.org/abs/2104.05704),Hassani et al. present an approach for doing exactly this. They proposed the**Compact Convolutional Transformer** (CCT) architecture. In this example, we will work on animplementation of CCT and we will see how well it performs on the CIFAR-10 dataset.If you are unfamiliar with the concept of self-attention or Transformers, you can read[this chapter](https://livebook.manning.com/book/deep-learning-with-python-second-edition/chapter-11/r-3/312)from François Chollet's book *Deep Learning with Python*. This example usescode snippets from another example,[Image classification with Vision Transformer](https://keras.io/examples/vision/image_classification_with_vision_transformer/). Imports<jupyter_code>from keras import layers
import keras
import matplotlib.pyplot as plt
import numpy as np<jupyter_output><empty_output><jupyter_text>Hyperparameters and constants<jupyter_code>positional_emb = True
conv_layers = 2
projection_dim = 128
num_heads = 2
transformer_units = [
projection_dim,
projection_dim,
]
transformer_layers = 2
stochastic_depth_rate = 0.1
learning_rate = 0.001
weight_decay = 0.0001
batch_size = 128
num_epochs = 30
image_size = 32<jupyter_output><empty_output><jupyter_text>Load CIFAR-10 dataset<jupyter_code>num_classes = 10
input_shape = (32, 32, 3)
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
print(f"x_train shape: {x_train.shape} - y_train shape: {y_train.shape}")
print(f"x_test shape: {x_test.shape} - y_test shape: {y_test.shape}")<jupyter_output><empty_output><jupyter_text>The CCT tokenizerThe first recipe introduced by the CCT authors is the tokenizer for processing theimages. In a standard ViT, images are organized into uniform *non-overlapping* patches.This eliminates the boundary-level information present in between different patches. Thisis important for a neural network to effectively exploit the locality information. Thefigure below presents an illustration of how images are organized into patches.We already know that convolutions are quite good at exploiting locality information. So,based on this, the authors introduce an all-convolution mini-network to produce imagepatches.<jupyter_code>class CCTTokenizer(layers.Layer):
def __init__(
self,
kernel_size=3,
stride=1,
padding=1,
pooling_kernel_size=3,
pooling_stride=2,
num_conv_layers=conv_layers,
num_output_channels=[64, 128],
positional_emb=positional_emb,
**kwargs,
):
super().__init__(**kwargs)
# This is our tokenizer.
self.conv_model = keras.Sequential()
for i in range(num_conv_layers):
self.conv_model.add(
layers.Conv2D(
num_output_channels[i],
kernel_size,
stride,
padding="valid",
use_bias=False,
activation="relu",
kernel_initializer="he_normal",
)
)
self.conv_model.add(layers.ZeroPadding2D(padding))
self.conv_model.add(
layers.MaxPooling2D(pooling_kernel_size, pooling_stride, "same")
)
self.positional_emb = positional_emb
def call(self, images):
outputs = self.conv_model(images)
# After passing the images through our mini-network the spatial dimensions
# are flattened to form sequences.
reshaped = keras.ops.reshape(
outputs,
(
-1,
keras.ops.shape(outputs)[1] * keras.ops.shape(outputs)[2],
keras.ops.shape(outputs)[-1],
),
)
return reshaped<jupyter_output><empty_output><jupyter_text>Positional embeddings are optional in CCT. If we want to use them, we can usethe Layer defined below.<jupyter_code>class PositionEmbedding(keras.layers.Layer):
def __init__(
self,
sequence_length,
initializer="glorot_uniform",
**kwargs,
):
super().__init__(**kwargs)
if sequence_length is None:
raise ValueError("`sequence_length` must be an Integer, received `None`.")
self.sequence_length = int(sequence_length)
self.initializer = keras.initializers.get(initializer)
def get_config(self):
config = super().get_config()
config.update(
{
"sequence_length": self.sequence_length,
"initializer": keras.initializers.serialize(self.initializer),
}
)
return config
def build(self, input_shape):
feature_size = input_shape[-1]
self.position_embeddings = self.add_weight(
name="embeddings",
shape=[self.sequence_length, feature_size],
initializer=self.initializer,
trainable=True,
)
super().build(input_shape)
def call(self, inputs, start_index=0):
shape = keras.ops.shape(inputs)
feature_length = shape[-1]
sequence_length = shape[-2]
# trim to match the length of the input sequence, which might be less
# than the sequence_length of the layer.
position_embeddings = keras.ops.convert_to_tensor(self.position_embeddings)
position_embeddings = keras.ops.slice(
position_embeddings,
(start_index, 0),
(sequence_length, feature_length),
)
return keras.ops.broadcast_to(position_embeddings, shape)
def compute_output_shape(self, input_shape):
return input_shape<jupyter_output><empty_output><jupyter_text>Sequence PoolingAnother recipe introduced in CCT is attention pooling or sequence pooling. In ViT, onlythe feature map corresponding to the class token is pooled and is then used for thesubsequent classification task (or any other downstream task).<jupyter_code>class SequencePooling(layers.Layer):
def __init__(self):
super().__init__()
self.attention = layers.Dense(1)
def call(self, x):
attention_weights = keras.ops.softmax(self.attention(x), axis=1)
attention_weights = keras.ops.transpose(attention_weights, axes=(0, 2, 1))
weighted_representation = keras.ops.matmul(attention_weights, x)
return keras.ops.squeeze(weighted_representation, -2)<jupyter_output><empty_output><jupyter_text>Stochastic depth for regularization[Stochastic depth](https://arxiv.org/abs/1603.09382) is a regularization technique thatrandomly drops a set of layers. During inference, the layers are kept as they are. It isvery much similar to [Dropout](https://jmlr.org/papers/v15/srivastava14a.html) but onlythat it operates on a block of layers rather than individual nodes present inside alayer. In CCT, stochastic depth is used just before the residual blocks of a Transformersencoder.<jupyter_code># Referred from: github.com:rwightman/pytorch-image-models.
class StochasticDepth(layers.Layer):
def __init__(self, drop_prop, **kwargs):
super().__init__(**kwargs)
self.drop_prob = drop_prop
self.seed_generator = keras.random.SeedGenerator(1337)
def call(self, x, training=None):
if training:
keep_prob = 1 - self.drop_prob
shape = (keras.ops.shape(x)[0],) + (1,) * (len(x.shape) - 1)
random_tensor = keep_prob + keras.random.uniform(
shape, 0, 1, seed=self.seed_generator
)
random_tensor = keras.ops.floor(random_tensor)
return (x / keep_prob) * random_tensor
return x<jupyter_output><empty_output><jupyter_text>MLP for the Transformers encoder<jupyter_code>def mlp(x, hidden_units, dropout_rate):
for units in hidden_units:
x = layers.Dense(units, activation=keras.ops.gelu)(x)
x = layers.Dropout(dropout_rate)(x)
return x<jupyter_output><empty_output><jupyter_text>Data augmentationIn the [original paper](https://arxiv.org/abs/2104.05704), the authors use[AutoAugment](https://arxiv.org/abs/1805.09501) to induce stronger regularization. Forthis example, we will be using the standard geometric augmentations like random croppingand flipping.<jupyter_code># Note the rescaling layer. These layers have pre-defined inference behavior.
data_augmentation = keras.Sequential(
[
layers.Rescaling(scale=1.0 / 255),
layers.RandomCrop(image_size, image_size),
layers.RandomFlip("horizontal"),
],
name="data_augmentation",
)<jupyter_output><empty_output><jupyter_text>The final CCT modelIn CCT, outputs from the Transformers encoder are weighted and then passed on to the final task-specific layer (inthis example, we do classification).<jupyter_code>def create_cct_model(
image_size=image_size,
input_shape=input_shape,
num_heads=num_heads,
projection_dim=projection_dim,
transformer_units=transformer_units,
):
inputs = layers.Input(input_shape)
# Augment data.
augmented = data_augmentation(inputs)
# Encode patches.
cct_tokenizer = CCTTokenizer()
encoded_patches = cct_tokenizer(augmented)
# Apply positional embedding.
if positional_emb:
sequence_length = encoded_patches.shape[1]
encoded_patches += PositionEmbedding(sequence_length=sequence_length)(
encoded_patches
)
# Calculate Stochastic Depth probabilities.
dpr = [x for x in np.linspace(0, stochastic_depth_rate, transformer_layers)]
# Create multiple layers of the Transformer block.
for i in range(transformer_layers):
# Layer normalization 1.
x1 = layers.LayerNormalization(epsilon=1e-5)(encoded_patches)
# Create a multi-head attention layer.
attention_output = layers.MultiHeadAttention(
num_heads=num_heads, key_dim=projection_dim, dropout=0.1
)(x1, x1)
# Skip connection 1.
attention_output = StochasticDepth(dpr[i])(attention_output)
x2 = layers.Add()([attention_output, encoded_patches])
# Layer normalization 2.
x3 = layers.LayerNormalization(epsilon=1e-5)(x2)
# MLP.
x3 = mlp(x3, hidden_units=transformer_units, dropout_rate=0.1)
# Skip connection 2.
x3 = StochasticDepth(dpr[i])(x3)
encoded_patches = layers.Add()([x3, x2])
# Apply sequence pooling.
representation = layers.LayerNormalization(epsilon=1e-5)(encoded_patches)
weighted_representation = SequencePooling()(representation)
# Classify outputs.
logits = layers.Dense(num_classes)(weighted_representation)
# Create the Keras model.
model = keras.Model(inputs=inputs, outputs=logits)
return model<jupyter_output><empty_output><jupyter_text>Model training and evaluation<jupyter_code>def run_experiment(model):
optimizer = keras.optimizers.AdamW(learning_rate=0.001, weight_decay=0.0001)
model.compile(
optimizer=optimizer,
loss=keras.losses.CategoricalCrossentropy(
from_logits=True, label_smoothing=0.1
),
metrics=[
keras.metrics.CategoricalAccuracy(name="accuracy"),
keras.metrics.TopKCategoricalAccuracy(5, name="top-5-accuracy"),
],
)
checkpoint_filepath = "/tmp/checkpoint.weights.h5"
checkpoint_callback = keras.callbacks.ModelCheckpoint(
checkpoint_filepath,
monitor="val_accuracy",
save_best_only=True,
save_weights_only=True,
)
history = model.fit(
x=x_train,
y=y_train,
batch_size=batch_size,
epochs=num_epochs,
validation_split=0.1,
callbacks=[checkpoint_callback],
)
model.load_weights(checkpoint_filepath)
_, accuracy, top_5_accuracy = model.evaluate(x_test, y_test)
print(f"Test accuracy: {round(accuracy * 100, 2)}%")
print(f"Test top 5 accuracy: {round(top_5_accuracy * 100, 2)}%")
return history
cct_model = create_cct_model()
history = run_experiment(cct_model)<jupyter_output><empty_output><jupyter_text>Let's now visualize the training progress of the model.<jupyter_code>plt.plot(history.history["loss"], label="train_loss")
plt.plot(history.history["val_loss"], label="val_loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.title("Train and Validation Losses Over Epochs", fontsize=14)
plt.legend()
plt.grid()
plt.show()<jupyter_output><empty_output>
|
keras-io/examples/vision/ipynb/cct.ipynb/0
|
{
"file_path": "keras-io/examples/vision/ipynb/cct.ipynb",
"repo_id": "keras-io",
"token_count": 5370
}
| 122 |
<jupyter_start><jupyter_text>Handwriting recognition**Authors:** [A_K_Nain](https://twitter.com/A_K_Nain), [Sayak Paul](https://twitter.com/RisingSayak)**Date created:** 2021/08/16**Last modified:** 2023/07/06**Description:** Training a handwriting recognition model with variable-length sequences. IntroductionThis example shows how the [Captcha OCR](https://keras.io/examples/vision/captcha_ocr/)example can be extended to the[IAM Dataset](https://fki.tic.heia-fr.ch/databases/iam-handwriting-database),which has variable length ground-truth targets. Each sample in the dataset is an image of somehandwritten text, and its corresponding target is the string present in the image.The IAM Dataset is widely used across many OCR benchmarks, so we hope this example can serve as agood starting point for building OCR systems. Data collection<jupyter_code>!wget -q https://github.com/sayakpaul/Handwriting-Recognizer-in-Keras/releases/download/v1.0.0/IAM_Words.zip
!unzip -qq IAM_Words.zip
!
!mkdir data
!mkdir data/words
!tar -xf IAM_Words/words.tgz -C data/words
!mv IAM_Words/words.txt data<jupyter_output><empty_output><jupyter_text>Preview how the dataset is organized. Lines prepended by "" are just metadata information.<jupyter_code>!head -20 data/words.txt<jupyter_output><empty_output><jupyter_text>Imports<jupyter_code>from tensorflow.keras.layers import StringLookup
from tensorflow import keras
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
import os
np.random.seed(42)
tf.random.set_seed(42)<jupyter_output><empty_output><jupyter_text>Dataset splitting<jupyter_code>base_path = "data"
words_list = []
words = open(f"{base_path}/words.txt", "r").readlines()
for line in words:
if line[0] == "#":
continue
if line.split(" ")[1] != "err": # We don't need to deal with errored entries.
words_list.append(line)
len(words_list)
np.random.shuffle(words_list)<jupyter_output><empty_output><jupyter_text>We will split the dataset into three subsets with a 90:5:5 ratio (train:validation:test).<jupyter_code>split_idx = int(0.9 * len(words_list))
train_samples = words_list[:split_idx]
test_samples = words_list[split_idx:]
val_split_idx = int(0.5 * len(test_samples))
validation_samples = test_samples[:val_split_idx]
test_samples = test_samples[val_split_idx:]
assert len(words_list) == len(train_samples) + len(validation_samples) + len(
test_samples
)
print(f"Total training samples: {len(train_samples)}")
print(f"Total validation samples: {len(validation_samples)}")
print(f"Total test samples: {len(test_samples)}")<jupyter_output><empty_output><jupyter_text>Data input pipelineWe start building our data input pipeline by first preparing the image paths.<jupyter_code>base_image_path = os.path.join(base_path, "words")
def get_image_paths_and_labels(samples):
paths = []
corrected_samples = []
for (i, file_line) in enumerate(samples):
line_split = file_line.strip()
line_split = line_split.split(" ")
# Each line split will have this format for the corresponding image:
# part1/part1-part2/part1-part2-part3.png
image_name = line_split[0]
partI = image_name.split("-")[0]
partII = image_name.split("-")[1]
img_path = os.path.join(
base_image_path, partI, partI + "-" + partII, image_name + ".png"
)
if os.path.getsize(img_path):
paths.append(img_path)
corrected_samples.append(file_line.split("\n")[0])
return paths, corrected_samples
train_img_paths, train_labels = get_image_paths_and_labels(train_samples)
validation_img_paths, validation_labels = get_image_paths_and_labels(validation_samples)
test_img_paths, test_labels = get_image_paths_and_labels(test_samples)<jupyter_output><empty_output><jupyter_text>Then we prepare the ground-truth labels.<jupyter_code># Find maximum length and the size of the vocabulary in the training data.
train_labels_cleaned = []
characters = set()
max_len = 0
for label in train_labels:
label = label.split(" ")[-1].strip()
for char in label:
characters.add(char)
max_len = max(max_len, len(label))
train_labels_cleaned.append(label)
characters = sorted(list(characters))
print("Maximum length: ", max_len)
print("Vocab size: ", len(characters))
# Check some label samples.
train_labels_cleaned[:10]<jupyter_output><empty_output><jupyter_text>Now we clean the validation and the test labels as well.<jupyter_code>def clean_labels(labels):
cleaned_labels = []
for label in labels:
label = label.split(" ")[-1].strip()
cleaned_labels.append(label)
return cleaned_labels
validation_labels_cleaned = clean_labels(validation_labels)
test_labels_cleaned = clean_labels(test_labels)<jupyter_output><empty_output><jupyter_text>Building the character vocabularyKeras provides different preprocessing layers to deal with different modalities of data.[This guide](https://keras.io/api/layers/preprocessing_layers/) provides a comprehensive introduction.Our example involves preprocessing labels at the characterlevel. This means that if there are two labels, e.g. "cat" and "dog", then our charactervocabulary should be {a, c, d, g, o, t} (without any special tokens). We use the[`StringLookup`](https://keras.io/api/layers/preprocessing_layers/categorical/string_lookup/)layer for this purpose.<jupyter_code>AUTOTUNE = tf.data.AUTOTUNE
# Mapping characters to integers.
char_to_num = StringLookup(vocabulary=list(characters), mask_token=None)
# Mapping integers back to original characters.
num_to_char = StringLookup(
vocabulary=char_to_num.get_vocabulary(), mask_token=None, invert=True
)<jupyter_output><empty_output><jupyter_text>Resizing images without distortionInstead of square images, many OCR models work with rectangular images. This will becomeclearer in a moment when we will visualize a few samples from the dataset. Whileaspect-unaware resizing square images does not introduce a significant amount ofdistortion this is not the case for rectangular images. But resizing images to a uniformsize is a requirement for mini-batching. So we need to perform our resizing such thatthe following criteria are met:* Aspect ratio is preserved.* Content of the images is not affected.<jupyter_code>def distortion_free_resize(image, img_size):
w, h = img_size
image = tf.image.resize(image, size=(h, w), preserve_aspect_ratio=True)
# Check tha amount of padding needed to be done.
pad_height = h - tf.shape(image)[0]
pad_width = w - tf.shape(image)[1]
# Only necessary if you want to do same amount of padding on both sides.
if pad_height % 2 != 0:
height = pad_height // 2
pad_height_top = height + 1
pad_height_bottom = height
else:
pad_height_top = pad_height_bottom = pad_height // 2
if pad_width % 2 != 0:
width = pad_width // 2
pad_width_left = width + 1
pad_width_right = width
else:
pad_width_left = pad_width_right = pad_width // 2
image = tf.pad(
image,
paddings=[
[pad_height_top, pad_height_bottom],
[pad_width_left, pad_width_right],
[0, 0],
],
)
image = tf.transpose(image, perm=[1, 0, 2])
image = tf.image.flip_left_right(image)
return image<jupyter_output><empty_output><jupyter_text>If we just go with the plain resizing then the images would look like so:Notice how this resizing would have introduced unnecessary stretching. Putting the utilities together<jupyter_code>batch_size = 64
padding_token = 99
image_width = 128
image_height = 32
def preprocess_image(image_path, img_size=(image_width, image_height)):
image = tf.io.read_file(image_path)
image = tf.image.decode_png(image, 1)
image = distortion_free_resize(image, img_size)
image = tf.cast(image, tf.float32) / 255.0
return image
def vectorize_label(label):
label = char_to_num(tf.strings.unicode_split(label, input_encoding="UTF-8"))
length = tf.shape(label)[0]
pad_amount = max_len - length
label = tf.pad(label, paddings=[[0, pad_amount]], constant_values=padding_token)
return label
def process_images_labels(image_path, label):
image = preprocess_image(image_path)
label = vectorize_label(label)
return {"image": image, "label": label}
def prepare_dataset(image_paths, labels):
dataset = tf.data.Dataset.from_tensor_slices((image_paths, labels)).map(
process_images_labels, num_parallel_calls=AUTOTUNE
)
return dataset.batch(batch_size).cache().prefetch(AUTOTUNE)<jupyter_output><empty_output><jupyter_text>Prepare `tf.data.Dataset` objects<jupyter_code>train_ds = prepare_dataset(train_img_paths, train_labels_cleaned)
validation_ds = prepare_dataset(validation_img_paths, validation_labels_cleaned)
test_ds = prepare_dataset(test_img_paths, test_labels_cleaned)<jupyter_output><empty_output><jupyter_text>Visualize a few samples<jupyter_code>for data in train_ds.take(1):
images, labels = data["image"], data["label"]
_, ax = plt.subplots(4, 4, figsize=(15, 8))
for i in range(16):
img = images[i]
img = tf.image.flip_left_right(img)
img = tf.transpose(img, perm=[1, 0, 2])
img = (img * 255.0).numpy().clip(0, 255).astype(np.uint8)
img = img[:, :, 0]
# Gather indices where label!= padding_token.
label = labels[i]
indices = tf.gather(label, tf.where(tf.math.not_equal(label, padding_token)))
# Convert to string.
label = tf.strings.reduce_join(num_to_char(indices))
label = label.numpy().decode("utf-8")
ax[i // 4, i % 4].imshow(img, cmap="gray")
ax[i // 4, i % 4].set_title(label)
ax[i // 4, i % 4].axis("off")
plt.show()<jupyter_output><empty_output><jupyter_text>You will notice that the content of original image is kept as faithful as possible and hasbeen padded accordingly. ModelOur model will use the CTC loss as an endpoint layer. For a detailed understanding of theCTC loss, refer to [this post](https://distill.pub/2017/ctc/).<jupyter_code>class CTCLayer(keras.layers.Layer):
def __init__(self, name=None):
super().__init__(name=name)
self.loss_fn = keras.backend.ctc_batch_cost
def call(self, y_true, y_pred):
batch_len = tf.cast(tf.shape(y_true)[0], dtype="int64")
input_length = tf.cast(tf.shape(y_pred)[1], dtype="int64")
label_length = tf.cast(tf.shape(y_true)[1], dtype="int64")
input_length = input_length * tf.ones(shape=(batch_len, 1), dtype="int64")
label_length = label_length * tf.ones(shape=(batch_len, 1), dtype="int64")
loss = self.loss_fn(y_true, y_pred, input_length, label_length)
self.add_loss(loss)
# At test time, just return the computed predictions.
return y_pred
def build_model():
# Inputs to the model
input_img = keras.Input(shape=(image_width, image_height, 1), name="image")
labels = keras.layers.Input(name="label", shape=(None,))
# First conv block.
x = keras.layers.Conv2D(
32,
(3, 3),
activation="relu",
kernel_initializer="he_normal",
padding="same",
name="Conv1",
)(input_img)
x = keras.layers.MaxPooling2D((2, 2), name="pool1")(x)
# Second conv block.
x = keras.layers.Conv2D(
64,
(3, 3),
activation="relu",
kernel_initializer="he_normal",
padding="same",
name="Conv2",
)(x)
x = keras.layers.MaxPooling2D((2, 2), name="pool2")(x)
# We have used two max pool with pool size and strides 2.
# Hence, downsampled feature maps are 4x smaller. The number of
# filters in the last layer is 64. Reshape accordingly before
# passing the output to the RNN part of the model.
new_shape = ((image_width // 4), (image_height // 4) * 64)
x = keras.layers.Reshape(target_shape=new_shape, name="reshape")(x)
x = keras.layers.Dense(64, activation="relu", name="dense1")(x)
x = keras.layers.Dropout(0.2)(x)
# RNNs.
x = keras.layers.Bidirectional(
keras.layers.LSTM(128, return_sequences=True, dropout=0.25)
)(x)
x = keras.layers.Bidirectional(
keras.layers.LSTM(64, return_sequences=True, dropout=0.25)
)(x)
# +2 is to account for the two special tokens introduced by the CTC loss.
# The recommendation comes here: https://git.io/J0eXP.
x = keras.layers.Dense(
len(char_to_num.get_vocabulary()) + 2, activation="softmax", name="dense2"
)(x)
# Add CTC layer for calculating CTC loss at each step.
output = CTCLayer(name="ctc_loss")(labels, x)
# Define the model.
model = keras.models.Model(
inputs=[input_img, labels], outputs=output, name="handwriting_recognizer"
)
# Optimizer.
opt = keras.optimizers.Adam()
# Compile the model and return.
model.compile(optimizer=opt)
return model
# Get the model.
model = build_model()
model.summary()<jupyter_output><empty_output><jupyter_text>Evaluation metric[Edit Distance](https://en.wikipedia.org/wiki/Edit_distance)is the most widely used metric for evaluating OCR models. In this section, we willimplement it and use it as a callback to monitor our model. We first segregate the validation images and their labels for convenience.<jupyter_code>validation_images = []
validation_labels = []
for batch in validation_ds:
validation_images.append(batch["image"])
validation_labels.append(batch["label"])<jupyter_output><empty_output><jupyter_text>Now, we create a callback to monitor the edit distances.<jupyter_code>def calculate_edit_distance(labels, predictions):
# Get a single batch and convert its labels to sparse tensors.
saprse_labels = tf.cast(tf.sparse.from_dense(labels), dtype=tf.int64)
# Make predictions and convert them to sparse tensors.
input_len = np.ones(predictions.shape[0]) * predictions.shape[1]
predictions_decoded = keras.backend.ctc_decode(
predictions, input_length=input_len, greedy=True
)[0][0][:, :max_len]
sparse_predictions = tf.cast(
tf.sparse.from_dense(predictions_decoded), dtype=tf.int64
)
# Compute individual edit distances and average them out.
edit_distances = tf.edit_distance(
sparse_predictions, saprse_labels, normalize=False
)
return tf.reduce_mean(edit_distances)
class EditDistanceCallback(keras.callbacks.Callback):
def __init__(self, pred_model):
super().__init__()
self.prediction_model = pred_model
def on_epoch_end(self, epoch, logs=None):
edit_distances = []
for i in range(len(validation_images)):
labels = validation_labels[i]
predictions = self.prediction_model.predict(validation_images[i])
edit_distances.append(calculate_edit_distance(labels, predictions).numpy())
print(
f"Mean edit distance for epoch {epoch + 1}: {np.mean(edit_distances):.4f}"
)<jupyter_output><empty_output><jupyter_text>TrainingNow we are ready to kick off model training.<jupyter_code>epochs = 10 # To get good results this should be at least 50.
model = build_model()
prediction_model = keras.models.Model(
model.get_layer(name="image").input, model.get_layer(name="dense2").output
)
edit_distance_callback = EditDistanceCallback(prediction_model)
# Train the model.
history = model.fit(
train_ds,
validation_data=validation_ds,
epochs=epochs,
callbacks=[edit_distance_callback],
)<jupyter_output><empty_output><jupyter_text>Inference<jupyter_code># A utility function to decode the output of the network.
def decode_batch_predictions(pred):
input_len = np.ones(pred.shape[0]) * pred.shape[1]
# Use greedy search. For complex tasks, you can use beam search.
results = keras.backend.ctc_decode(pred, input_length=input_len, greedy=True)[0][0][
:, :max_len
]
# Iterate over the results and get back the text.
output_text = []
for res in results:
res = tf.gather(res, tf.where(tf.math.not_equal(res, -1)))
res = tf.strings.reduce_join(num_to_char(res)).numpy().decode("utf-8")
output_text.append(res)
return output_text
# Let's check results on some test samples.
for batch in test_ds.take(1):
batch_images = batch["image"]
_, ax = plt.subplots(4, 4, figsize=(15, 8))
preds = prediction_model.predict(batch_images)
pred_texts = decode_batch_predictions(preds)
for i in range(16):
img = batch_images[i]
img = tf.image.flip_left_right(img)
img = tf.transpose(img, perm=[1, 0, 2])
img = (img * 255.0).numpy().clip(0, 255).astype(np.uint8)
img = img[:, :, 0]
title = f"Prediction: {pred_texts[i]}"
ax[i // 4, i % 4].imshow(img, cmap="gray")
ax[i // 4, i % 4].set_title(title)
ax[i // 4, i % 4].axis("off")
plt.show()<jupyter_output><empty_output>
|
keras-io/examples/vision/ipynb/handwriting_recognition.ipynb/0
|
{
"file_path": "keras-io/examples/vision/ipynb/handwriting_recognition.ipynb",
"repo_id": "keras-io",
"token_count": 6568
}
| 123 |
<jupyter_start><jupyter_text>Image classification with modern MLP models**Author:** [Khalid Salama](https://www.linkedin.com/in/khalid-salama-24403144/)**Date created:** 2021/05/30**Last modified:** 2023/08/03**Description:** Implementing the MLP-Mixer, FNet, and gMLP models for CIFAR-100 image classification. IntroductionThis example implements three modern attention-free, multi-layer perceptron (MLP) based models for imageclassification, demonstrated on the CIFAR-100 dataset:1. The [MLP-Mixer](https://arxiv.org/abs/2105.01601) model, by Ilya Tolstikhin et al., based on two types of MLPs.3. The [FNet](https://arxiv.org/abs/2105.03824) model, by James Lee-Thorp et al., based on unparameterizedFourier Transform.2. The [gMLP](https://arxiv.org/abs/2105.08050) model, by Hanxiao Liu et al., based on MLP with gating.The purpose of the example is not to compare between these models, as they might perform differently ondifferent datasets with well-tuned hyperparameters. Rather, it is to show simple implementations of theirmain building blocks. Setup<jupyter_code>import numpy as np
import keras
from keras import layers<jupyter_output><empty_output><jupyter_text>Prepare the data<jupyter_code>num_classes = 100
input_shape = (32, 32, 3)
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar100.load_data()
print(f"x_train shape: {x_train.shape} - y_train shape: {y_train.shape}")
print(f"x_test shape: {x_test.shape} - y_test shape: {y_test.shape}")<jupyter_output><empty_output><jupyter_text>Configure the hyperparameters<jupyter_code>weight_decay = 0.0001
batch_size = 128
num_epochs = 1 # Recommended num_epochs = 50
dropout_rate = 0.2
image_size = 64 # We'll resize input images to this size.
patch_size = 8 # Size of the patches to be extracted from the input images.
num_patches = (image_size // patch_size) ** 2 # Size of the data array.
embedding_dim = 256 # Number of hidden units.
num_blocks = 4 # Number of blocks.
print(f"Image size: {image_size} X {image_size} = {image_size ** 2}")
print(f"Patch size: {patch_size} X {patch_size} = {patch_size ** 2} ")
print(f"Patches per image: {num_patches}")
print(f"Elements per patch (3 channels): {(patch_size ** 2) * 3}")<jupyter_output><empty_output><jupyter_text>Build a classification modelWe implement a method that builds a classifier given the processing blocks.<jupyter_code>def build_classifier(blocks, positional_encoding=False):
inputs = layers.Input(shape=input_shape)
# Augment data.
augmented = data_augmentation(inputs)
# Create patches.
patches = Patches(patch_size)(augmented)
# Encode patches to generate a [batch_size, num_patches, embedding_dim] tensor.
x = layers.Dense(units=embedding_dim)(patches)
if positional_encoding:
x = x + PositionEmbedding(sequence_length=num_patches)(x)
# Process x using the module blocks.
x = blocks(x)
# Apply global average pooling to generate a [batch_size, embedding_dim] representation tensor.
representation = layers.GlobalAveragePooling1D()(x)
# Apply dropout.
representation = layers.Dropout(rate=dropout_rate)(representation)
# Compute logits outputs.
logits = layers.Dense(num_classes)(representation)
# Create the Keras model.
return keras.Model(inputs=inputs, outputs=logits)<jupyter_output><empty_output><jupyter_text>Define an experimentWe implement a utility function to compile, train, and evaluate a given model.<jupyter_code>def run_experiment(model):
# Create Adam optimizer with weight decay.
optimizer = keras.optimizers.AdamW(
learning_rate=learning_rate,
weight_decay=weight_decay,
)
# Compile the model.
model.compile(
optimizer=optimizer,
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[
keras.metrics.SparseCategoricalAccuracy(name="acc"),
keras.metrics.SparseTopKCategoricalAccuracy(5, name="top5-acc"),
],
)
# Create a learning rate scheduler callback.
reduce_lr = keras.callbacks.ReduceLROnPlateau(
monitor="val_loss", factor=0.5, patience=5
)
# Create an early stopping callback.
early_stopping = keras.callbacks.EarlyStopping(
monitor="val_loss", patience=10, restore_best_weights=True
)
# Fit the model.
history = model.fit(
x=x_train,
y=y_train,
batch_size=batch_size,
epochs=num_epochs,
validation_split=0.1,
callbacks=[early_stopping, reduce_lr],
)
_, accuracy, top_5_accuracy = model.evaluate(x_test, y_test)
print(f"Test accuracy: {round(accuracy * 100, 2)}%")
print(f"Test top 5 accuracy: {round(top_5_accuracy * 100, 2)}%")
# Return history to plot learning curves.
return history<jupyter_output><empty_output><jupyter_text>Use data augmentation<jupyter_code>data_augmentation = keras.Sequential(
[
layers.Normalization(),
layers.Resizing(image_size, image_size),
layers.RandomFlip("horizontal"),
layers.RandomZoom(height_factor=0.2, width_factor=0.2),
],
name="data_augmentation",
)
# Compute the mean and the variance of the training data for normalization.
data_augmentation.layers[0].adapt(x_train)<jupyter_output><empty_output><jupyter_text>Implement patch extraction as a layer<jupyter_code>class Patches(layers.Layer):
def __init__(self, patch_size, **kwargs):
super().__init__(**kwargs)
self.patch_size = patch_size
def call(self, x):
patches = keras.ops.image.extract_patches(x, self.patch_size)
batch_size = keras.ops.shape(patches)[0]
num_patches = keras.ops.shape(patches)[1] * keras.ops.shape(patches)[2]
patch_dim = keras.ops.shape(patches)[3]
out = keras.ops.reshape(patches, (batch_size, num_patches, patch_dim))
return out<jupyter_output><empty_output><jupyter_text>Implement position embedding as a layer<jupyter_code>class PositionEmbedding(keras.layers.Layer):
def __init__(
self,
sequence_length,
initializer="glorot_uniform",
**kwargs,
):
super().__init__(**kwargs)
if sequence_length is None:
raise ValueError("`sequence_length` must be an Integer, received `None`.")
self.sequence_length = int(sequence_length)
self.initializer = keras.initializers.get(initializer)
def get_config(self):
config = super().get_config()
config.update(
{
"sequence_length": self.sequence_length,
"initializer": keras.initializers.serialize(self.initializer),
}
)
return config
def build(self, input_shape):
feature_size = input_shape[-1]
self.position_embeddings = self.add_weight(
name="embeddings",
shape=[self.sequence_length, feature_size],
initializer=self.initializer,
trainable=True,
)
super().build(input_shape)
def call(self, inputs, start_index=0):
shape = keras.ops.shape(inputs)
feature_length = shape[-1]
sequence_length = shape[-2]
# trim to match the length of the input sequence, which might be less
# than the sequence_length of the layer.
position_embeddings = keras.ops.convert_to_tensor(self.position_embeddings)
position_embeddings = keras.ops.slice(
position_embeddings,
(start_index, 0),
(sequence_length, feature_length),
)
return keras.ops.broadcast_to(position_embeddings, shape)
def compute_output_shape(self, input_shape):
return input_shape<jupyter_output><empty_output><jupyter_text>The MLP-Mixer modelThe MLP-Mixer is an architecture based exclusively onmulti-layer perceptrons (MLPs), that contains two types of MLP layers:1. One applied independently to image patches, which mixes the per-location features.2. The other applied across patches (along channels), which mixes spatial information.This is similar to a [depthwise separable convolution based model](https://arxiv.org/abs/1610.02357)such as the Xception model, but with two chained dense transforms, no max pooling, and layer normalizationinstead of batch normalization. Implement the MLP-Mixer module<jupyter_code>class MLPMixerLayer(layers.Layer):
def __init__(self, num_patches, hidden_units, dropout_rate, *args, **kwargs):
super().__init__(*args, **kwargs)
self.mlp1 = keras.Sequential(
[
layers.Dense(units=num_patches, activation="gelu"),
layers.Dense(units=num_patches),
layers.Dropout(rate=dropout_rate),
]
)
self.mlp2 = keras.Sequential(
[
layers.Dense(units=num_patches, activation="gelu"),
layers.Dense(units=hidden_units),
layers.Dropout(rate=dropout_rate),
]
)
self.normalize = layers.LayerNormalization(epsilon=1e-6)
def build(self, input_shape):
return super().build(input_shape)
def call(self, inputs):
# Apply layer normalization.
x = self.normalize(inputs)
# Transpose inputs from [num_batches, num_patches, hidden_units] to [num_batches, hidden_units, num_patches].
x_channels = keras.ops.transpose(x, axes=(0, 2, 1))
# Apply mlp1 on each channel independently.
mlp1_outputs = self.mlp1(x_channels)
# Transpose mlp1_outputs from [num_batches, hidden_dim, num_patches] to [num_batches, num_patches, hidden_units].
mlp1_outputs = keras.ops.transpose(mlp1_outputs, axes=(0, 2, 1))
# Add skip connection.
x = mlp1_outputs + inputs
# Apply layer normalization.
x_patches = self.normalize(x)
# Apply mlp2 on each patch independtenly.
mlp2_outputs = self.mlp2(x_patches)
# Add skip connection.
x = x + mlp2_outputs
return x<jupyter_output><empty_output><jupyter_text>Build, train, and evaluate the MLP-Mixer modelNote that training the model with the current settings on a V100 GPUstakes around 8 seconds per epoch.<jupyter_code>mlpmixer_blocks = keras.Sequential(
[MLPMixerLayer(num_patches, embedding_dim, dropout_rate) for _ in range(num_blocks)]
)
learning_rate = 0.005
mlpmixer_classifier = build_classifier(mlpmixer_blocks)
history = run_experiment(mlpmixer_classifier)<jupyter_output><empty_output><jupyter_text>The MLP-Mixer model tends to have much less number of parameters comparedto convolutional and transformer-based models, which leads to less training andserving computational cost.As mentioned in the [MLP-Mixer](https://arxiv.org/abs/2105.01601) paper,when pre-trained on large datasets, or with modern regularization schemes,the MLP-Mixer attains competitive scores to state-of-the-art models.You can obtain better results by increasing the embedding dimensions,increasing the number of mixer blocks, and training the model for longer.You may also try to increase the size of the input images and use different patch sizes. The FNet modelThe FNet uses a similar block to the Transformer block. However, FNet replaces the self-attention layerin the Transformer block with a parameter-free 2D Fourier transformation layer:1. One 1D Fourier Transform is applied along the patches.2. One 1D Fourier Transform is applied along the channels. Implement the FNet module<jupyter_code>class FNetLayer(layers.Layer):
def __init__(self, embedding_dim, dropout_rate, *args, **kwargs):
super().__init__(*args, **kwargs)
self.ffn = keras.Sequential(
[
layers.Dense(units=embedding_dim, activation="gelu"),
layers.Dropout(rate=dropout_rate),
layers.Dense(units=embedding_dim),
]
)
self.normalize1 = layers.LayerNormalization(epsilon=1e-6)
self.normalize2 = layers.LayerNormalization(epsilon=1e-6)
def call(self, inputs):
# Apply fourier transformations.
real_part = inputs
im_part = keras.ops.zeros_like(inputs)
x = keras.ops.fft2((real_part, im_part))[0]
# Add skip connection.
x = x + inputs
# Apply layer normalization.
x = self.normalize1(x)
# Apply Feedfowrad network.
x_ffn = self.ffn(x)
# Add skip connection.
x = x + x_ffn
# Apply layer normalization.
return self.normalize2(x)<jupyter_output><empty_output><jupyter_text>Build, train, and evaluate the FNet modelNote that training the model with the current settings on a V100 GPUstakes around 8 seconds per epoch.<jupyter_code>fnet_blocks = keras.Sequential(
[FNetLayer(embedding_dim, dropout_rate) for _ in range(num_blocks)]
)
learning_rate = 0.001
fnet_classifier = build_classifier(fnet_blocks, positional_encoding=True)
history = run_experiment(fnet_classifier)<jupyter_output><empty_output><jupyter_text>As shown in the [FNet](https://arxiv.org/abs/2105.03824) paper,better results can be achieved by increasing the embedding dimensions,increasing the number of FNet blocks, and training the model for longer.You may also try to increase the size of the input images and use different patch sizes.The FNet scales very efficiently to long inputs, runs much faster than attention-basedTransformer models, and produces competitive accuracy results. The gMLP modelThe gMLP is a MLP architecture that features a Spatial Gating Unit (SGU).The SGU enables cross-patch interactions across the spatial (channel) dimension, by:1. Transforming the input spatially by applying linear projection across patches (along channels).2. Applying element-wise multiplication of the input and its spatial transformation. Implement the gMLP module<jupyter_code>class gMLPLayer(layers.Layer):
def __init__(self, num_patches, embedding_dim, dropout_rate, *args, **kwargs):
super().__init__(*args, **kwargs)
self.channel_projection1 = keras.Sequential(
[
layers.Dense(units=embedding_dim * 2, activation="gelu"),
layers.Dropout(rate=dropout_rate),
]
)
self.channel_projection2 = layers.Dense(units=embedding_dim)
self.spatial_projection = layers.Dense(
units=num_patches, bias_initializer="Ones"
)
self.normalize1 = layers.LayerNormalization(epsilon=1e-6)
self.normalize2 = layers.LayerNormalization(epsilon=1e-6)
def spatial_gating_unit(self, x):
# Split x along the channel dimensions.
# Tensors u and v will in the shape of [batch_size, num_patchs, embedding_dim].
u, v = keras.ops.split(x, indices_or_sections=2, axis=2)
# Apply layer normalization.
v = self.normalize2(v)
# Apply spatial projection.
v_channels = keras.ops.transpose(v, axes=(0, 2, 1))
v_projected = self.spatial_projection(v_channels)
v_projected = keras.ops.transpose(v_projected, axes=(0, 2, 1))
# Apply element-wise multiplication.
return u * v_projected
def call(self, inputs):
# Apply layer normalization.
x = self.normalize1(inputs)
# Apply the first channel projection. x_projected shape: [batch_size, num_patches, embedding_dim * 2].
x_projected = self.channel_projection1(x)
# Apply the spatial gating unit. x_spatial shape: [batch_size, num_patches, embedding_dim].
x_spatial = self.spatial_gating_unit(x_projected)
# Apply the second channel projection. x_projected shape: [batch_size, num_patches, embedding_dim].
x_projected = self.channel_projection2(x_spatial)
# Add skip connection.
return x + x_projected<jupyter_output><empty_output><jupyter_text>Build, train, and evaluate the gMLP modelNote that training the model with the current settings on a V100 GPUstakes around 9 seconds per epoch.<jupyter_code>gmlp_blocks = keras.Sequential(
[gMLPLayer(num_patches, embedding_dim, dropout_rate) for _ in range(num_blocks)]
)
learning_rate = 0.003
gmlp_classifier = build_classifier(gmlp_blocks)
history = run_experiment(gmlp_classifier)<jupyter_output><empty_output>
|
keras-io/examples/vision/ipynb/mlp_image_classification.ipynb/0
|
{
"file_path": "keras-io/examples/vision/ipynb/mlp_image_classification.ipynb",
"repo_id": "keras-io",
"token_count": 6269
}
| 124 |
<jupyter_start><jupyter_text>Object Detection with RetinaNet**Author:** [Srihari Humbarwadi](https://twitter.com/srihari_rh)**Date created:** 2020/05/17**Last modified:** 2023/07/10**Description:** Implementing RetinaNet: Focal Loss for Dense Object Detection. IntroductionObject detection a very important problem in computervision. Here the model is tasked with localizing the objects present in animage, and at the same time, classifying them into different categories.Object detection models can be broadly classified into "single-stage" and"two-stage" detectors. Two-stage detectors are often more accurate but at thecost of being slower. Here in this example, we will implement RetinaNet,a popular single-stage detector, which is accurate and runs fast.RetinaNet uses a feature pyramid network to efficiently detect objects atmultiple scales and introduces a new loss, the Focal loss function, to alleviatethe problem of the extreme foreground-background class imbalance.**References:**- [RetinaNet Paper](https://arxiv.org/abs/1708.02002)- [Feature Pyramid Network Paper](https://arxiv.org/abs/1612.03144)<jupyter_code>import os
import re
import zipfile
import numpy as np
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import tensorflow_datasets as tfds<jupyter_output><empty_output><jupyter_text>Downloading the COCO2017 datasetTraining on the entire COCO2017 dataset which has around 118k images takes alot of time, hence we will be using a smaller subset of ~500 images fortraining in this example.<jupyter_code>url = "https://github.com/srihari-humbarwadi/datasets/releases/download/v0.1.0/data.zip"
filename = os.path.join(os.getcwd(), "data.zip")
keras.utils.get_file(filename, url)
with zipfile.ZipFile("data.zip", "r") as z_fp:
z_fp.extractall("./")<jupyter_output><empty_output><jupyter_text>Implementing utility functionsBounding boxes can be represented in multiple ways, the most common formats are:- Storing the coordinates of the corners `[xmin, ymin, xmax, ymax]`- Storing the coordinates of the center and the box dimensions`[x, y, width, height]`Since we require both formats, we will be implementing functions for convertingbetween the formats.<jupyter_code>def swap_xy(boxes):
"""Swaps order the of x and y coordinates of the boxes.
Arguments:
boxes: A tensor with shape `(num_boxes, 4)` representing bounding boxes.
Returns:
swapped boxes with shape same as that of boxes.
"""
return tf.stack([boxes[:, 1], boxes[:, 0], boxes[:, 3], boxes[:, 2]], axis=-1)
def convert_to_xywh(boxes):
"""Changes the box format to center, width and height.
Arguments:
boxes: A tensor of rank 2 or higher with a shape of `(..., num_boxes, 4)`
representing bounding boxes where each box is of the format
`[xmin, ymin, xmax, ymax]`.
Returns:
converted boxes with shape same as that of boxes.
"""
return tf.concat(
[(boxes[..., :2] + boxes[..., 2:]) / 2.0, boxes[..., 2:] - boxes[..., :2]],
axis=-1,
)
def convert_to_corners(boxes):
"""Changes the box format to corner coordinates
Arguments:
boxes: A tensor of rank 2 or higher with a shape of `(..., num_boxes, 4)`
representing bounding boxes where each box is of the format
`[x, y, width, height]`.
Returns:
converted boxes with shape same as that of boxes.
"""
return tf.concat(
[boxes[..., :2] - boxes[..., 2:] / 2.0, boxes[..., :2] + boxes[..., 2:] / 2.0],
axis=-1,
)<jupyter_output><empty_output><jupyter_text>Computing pairwise Intersection Over Union (IOU)As we will see later in the example, we would be assigning ground truth boxesto anchor boxes based on the extent of overlapping. This will require us tocalculate the Intersection Over Union (IOU) between all the anchorboxes and ground truth boxes pairs.<jupyter_code>def compute_iou(boxes1, boxes2):
"""Computes pairwise IOU matrix for given two sets of boxes
Arguments:
boxes1: A tensor with shape `(N, 4)` representing bounding boxes
where each box is of the format `[x, y, width, height]`.
boxes2: A tensor with shape `(M, 4)` representing bounding boxes
where each box is of the format `[x, y, width, height]`.
Returns:
pairwise IOU matrix with shape `(N, M)`, where the value at ith row
jth column holds the IOU between ith box and jth box from
boxes1 and boxes2 respectively.
"""
boxes1_corners = convert_to_corners(boxes1)
boxes2_corners = convert_to_corners(boxes2)
lu = tf.maximum(boxes1_corners[:, None, :2], boxes2_corners[:, :2])
rd = tf.minimum(boxes1_corners[:, None, 2:], boxes2_corners[:, 2:])
intersection = tf.maximum(0.0, rd - lu)
intersection_area = intersection[:, :, 0] * intersection[:, :, 1]
boxes1_area = boxes1[:, 2] * boxes1[:, 3]
boxes2_area = boxes2[:, 2] * boxes2[:, 3]
union_area = tf.maximum(
boxes1_area[:, None] + boxes2_area - intersection_area, 1e-8
)
return tf.clip_by_value(intersection_area / union_area, 0.0, 1.0)
def visualize_detections(
image, boxes, classes, scores, figsize=(7, 7), linewidth=1, color=[0, 0, 1]
):
"""Visualize Detections"""
image = np.array(image, dtype=np.uint8)
plt.figure(figsize=figsize)
plt.axis("off")
plt.imshow(image)
ax = plt.gca()
for box, _cls, score in zip(boxes, classes, scores):
text = "{}: {:.2f}".format(_cls, score)
x1, y1, x2, y2 = box
w, h = x2 - x1, y2 - y1
patch = plt.Rectangle(
[x1, y1], w, h, fill=False, edgecolor=color, linewidth=linewidth
)
ax.add_patch(patch)
ax.text(
x1,
y1,
text,
bbox={"facecolor": color, "alpha": 0.4},
clip_box=ax.clipbox,
clip_on=True,
)
plt.show()
return ax<jupyter_output><empty_output><jupyter_text>Implementing Anchor generatorAnchor boxes are fixed sized boxes that the model uses to predict the boundingbox for an object. It does this by regressing the offset between the locationof the object's center and the center of an anchor box, and then uses the widthand height of the anchor box to predict a relative scale of the object. In thecase of RetinaNet, each location on a given feature map has nine anchor boxes(at three scales and three ratios).<jupyter_code>class AnchorBox:
"""Generates anchor boxes.
This class has operations to generate anchor boxes for feature maps at
strides `[8, 16, 32, 64, 128]`. Where each anchor each box is of the
format `[x, y, width, height]`.
Attributes:
aspect_ratios: A list of float values representing the aspect ratios of
the anchor boxes at each location on the feature map
scales: A list of float values representing the scale of the anchor boxes
at each location on the feature map.
num_anchors: The number of anchor boxes at each location on feature map
areas: A list of float values representing the areas of the anchor
boxes for each feature map in the feature pyramid.
strides: A list of float value representing the strides for each feature
map in the feature pyramid.
"""
def __init__(self):
self.aspect_ratios = [0.5, 1.0, 2.0]
self.scales = [2 ** x for x in [0, 1 / 3, 2 / 3]]
self._num_anchors = len(self.aspect_ratios) * len(self.scales)
self._strides = [2 ** i for i in range(3, 8)]
self._areas = [x ** 2 for x in [32.0, 64.0, 128.0, 256.0, 512.0]]
self._anchor_dims = self._compute_dims()
def _compute_dims(self):
"""Computes anchor box dimensions for all ratios and scales at all levels
of the feature pyramid.
"""
anchor_dims_all = []
for area in self._areas:
anchor_dims = []
for ratio in self.aspect_ratios:
anchor_height = tf.math.sqrt(area / ratio)
anchor_width = area / anchor_height
dims = tf.reshape(
tf.stack([anchor_width, anchor_height], axis=-1), [1, 1, 2]
)
for scale in self.scales:
anchor_dims.append(scale * dims)
anchor_dims_all.append(tf.stack(anchor_dims, axis=-2))
return anchor_dims_all
def _get_anchors(self, feature_height, feature_width, level):
"""Generates anchor boxes for a given feature map size and level
Arguments:
feature_height: An integer representing the height of the feature map.
feature_width: An integer representing the width of the feature map.
level: An integer representing the level of the feature map in the
feature pyramid.
Returns:
anchor boxes with the shape
`(feature_height * feature_width * num_anchors, 4)`
"""
rx = tf.range(feature_width, dtype=tf.float32) + 0.5
ry = tf.range(feature_height, dtype=tf.float32) + 0.5
centers = tf.stack(tf.meshgrid(rx, ry), axis=-1) * self._strides[level - 3]
centers = tf.expand_dims(centers, axis=-2)
centers = tf.tile(centers, [1, 1, self._num_anchors, 1])
dims = tf.tile(
self._anchor_dims[level - 3], [feature_height, feature_width, 1, 1]
)
anchors = tf.concat([centers, dims], axis=-1)
return tf.reshape(
anchors, [feature_height * feature_width * self._num_anchors, 4]
)
def get_anchors(self, image_height, image_width):
"""Generates anchor boxes for all the feature maps of the feature pyramid.
Arguments:
image_height: Height of the input image.
image_width: Width of the input image.
Returns:
anchor boxes for all the feature maps, stacked as a single tensor
with shape `(total_anchors, 4)`
"""
anchors = [
self._get_anchors(
tf.math.ceil(image_height / 2 ** i),
tf.math.ceil(image_width / 2 ** i),
i,
)
for i in range(3, 8)
]
return tf.concat(anchors, axis=0)<jupyter_output><empty_output><jupyter_text>Preprocessing dataPreprocessing the images involves two steps:- Resizing the image: Images are resized such that the shortest size is equalto 800 px, after resizing if the longest side of the image exceeds 1333 px,the image is resized such that the longest size is now capped at 1333 px.- Applying augmentation: Random scale jittering and random horizontal flippingare the only augmentations applied to the images.Along with the images, bounding boxes are rescaled and flipped if required.<jupyter_code>def random_flip_horizontal(image, boxes):
"""Flips image and boxes horizontally with 50% chance
Arguments:
image: A 3-D tensor of shape `(height, width, channels)` representing an
image.
boxes: A tensor with shape `(num_boxes, 4)` representing bounding boxes,
having normalized coordinates.
Returns:
Randomly flipped image and boxes
"""
if tf.random.uniform(()) > 0.5:
image = tf.image.flip_left_right(image)
boxes = tf.stack(
[1 - boxes[:, 2], boxes[:, 1], 1 - boxes[:, 0], boxes[:, 3]], axis=-1
)
return image, boxes
def resize_and_pad_image(
image, min_side=800.0, max_side=1333.0, jitter=[640, 1024], stride=128.0
):
"""Resizes and pads image while preserving aspect ratio.
1. Resizes images so that the shorter side is equal to `min_side`
2. If the longer side is greater than `max_side`, then resize the image
with longer side equal to `max_side`
3. Pad with zeros on right and bottom to make the image shape divisible by
`stride`
Arguments:
image: A 3-D tensor of shape `(height, width, channels)` representing an
image.
min_side: The shorter side of the image is resized to this value, if
`jitter` is set to None.
max_side: If the longer side of the image exceeds this value after
resizing, the image is resized such that the longer side now equals to
this value.
jitter: A list of floats containing minimum and maximum size for scale
jittering. If available, the shorter side of the image will be
resized to a random value in this range.
stride: The stride of the smallest feature map in the feature pyramid.
Can be calculated using `image_size / feature_map_size`.
Returns:
image: Resized and padded image.
image_shape: Shape of the image before padding.
ratio: The scaling factor used to resize the image
"""
image_shape = tf.cast(tf.shape(image)[:2], dtype=tf.float32)
if jitter is not None:
min_side = tf.random.uniform((), jitter[0], jitter[1], dtype=tf.float32)
ratio = min_side / tf.reduce_min(image_shape)
if ratio * tf.reduce_max(image_shape) > max_side:
ratio = max_side / tf.reduce_max(image_shape)
image_shape = ratio * image_shape
image = tf.image.resize(image, tf.cast(image_shape, dtype=tf.int32))
padded_image_shape = tf.cast(
tf.math.ceil(image_shape / stride) * stride, dtype=tf.int32
)
image = tf.image.pad_to_bounding_box(
image, 0, 0, padded_image_shape[0], padded_image_shape[1]
)
return image, image_shape, ratio
def preprocess_data(sample):
"""Applies preprocessing step to a single sample
Arguments:
sample: A dict representing a single training sample.
Returns:
image: Resized and padded image with random horizontal flipping applied.
bbox: Bounding boxes with the shape `(num_objects, 4)` where each box is
of the format `[x, y, width, height]`.
class_id: An tensor representing the class id of the objects, having
shape `(num_objects,)`.
"""
image = sample["image"]
bbox = swap_xy(sample["objects"]["bbox"])
class_id = tf.cast(sample["objects"]["label"], dtype=tf.int32)
image, bbox = random_flip_horizontal(image, bbox)
image, image_shape, _ = resize_and_pad_image(image)
bbox = tf.stack(
[
bbox[:, 0] * image_shape[1],
bbox[:, 1] * image_shape[0],
bbox[:, 2] * image_shape[1],
bbox[:, 3] * image_shape[0],
],
axis=-1,
)
bbox = convert_to_xywh(bbox)
return image, bbox, class_id<jupyter_output><empty_output><jupyter_text>Encoding labelsThe raw labels, consisting of bounding boxes and class ids need to betransformed into targets for training. This transformation consists ofthe following steps:- Generating anchor boxes for the given image dimensions- Assigning ground truth boxes to the anchor boxes- The anchor boxes that are not assigned any objects, are either assigned thebackground class or ignored depending on the IOU- Generating the classification and regression targets using anchor boxes<jupyter_code>class LabelEncoder:
"""Transforms the raw labels into targets for training.
This class has operations to generate targets for a batch of samples which
is made up of the input images, bounding boxes for the objects present and
their class ids.
Attributes:
anchor_box: Anchor box generator to encode the bounding boxes.
box_variance: The scaling factors used to scale the bounding box targets.
"""
def __init__(self):
self._anchor_box = AnchorBox()
self._box_variance = tf.convert_to_tensor(
[0.1, 0.1, 0.2, 0.2], dtype=tf.float32
)
def _match_anchor_boxes(
self, anchor_boxes, gt_boxes, match_iou=0.5, ignore_iou=0.4
):
"""Matches ground truth boxes to anchor boxes based on IOU.
1. Calculates the pairwise IOU for the M `anchor_boxes` and N `gt_boxes`
to get a `(M, N)` shaped matrix.
2. The ground truth box with the maximum IOU in each row is assigned to
the anchor box provided the IOU is greater than `match_iou`.
3. If the maximum IOU in a row is less than `ignore_iou`, the anchor
box is assigned with the background class.
4. The remaining anchor boxes that do not have any class assigned are
ignored during training.
Arguments:
anchor_boxes: A float tensor with the shape `(total_anchors, 4)`
representing all the anchor boxes for a given input image shape,
where each anchor box is of the format `[x, y, width, height]`.
gt_boxes: A float tensor with shape `(num_objects, 4)` representing
the ground truth boxes, where each box is of the format
`[x, y, width, height]`.
match_iou: A float value representing the minimum IOU threshold for
determining if a ground truth box can be assigned to an anchor box.
ignore_iou: A float value representing the IOU threshold under which
an anchor box is assigned to the background class.
Returns:
matched_gt_idx: Index of the matched object
positive_mask: A mask for anchor boxes that have been assigned ground
truth boxes.
ignore_mask: A mask for anchor boxes that need to by ignored during
training
"""
iou_matrix = compute_iou(anchor_boxes, gt_boxes)
max_iou = tf.reduce_max(iou_matrix, axis=1)
matched_gt_idx = tf.argmax(iou_matrix, axis=1)
positive_mask = tf.greater_equal(max_iou, match_iou)
negative_mask = tf.less(max_iou, ignore_iou)
ignore_mask = tf.logical_not(tf.logical_or(positive_mask, negative_mask))
return (
matched_gt_idx,
tf.cast(positive_mask, dtype=tf.float32),
tf.cast(ignore_mask, dtype=tf.float32),
)
def _compute_box_target(self, anchor_boxes, matched_gt_boxes):
"""Transforms the ground truth boxes into targets for training"""
box_target = tf.concat(
[
(matched_gt_boxes[:, :2] - anchor_boxes[:, :2]) / anchor_boxes[:, 2:],
tf.math.log(matched_gt_boxes[:, 2:] / anchor_boxes[:, 2:]),
],
axis=-1,
)
box_target = box_target / self._box_variance
return box_target
def _encode_sample(self, image_shape, gt_boxes, cls_ids):
"""Creates box and classification targets for a single sample"""
anchor_boxes = self._anchor_box.get_anchors(image_shape[1], image_shape[2])
cls_ids = tf.cast(cls_ids, dtype=tf.float32)
matched_gt_idx, positive_mask, ignore_mask = self._match_anchor_boxes(
anchor_boxes, gt_boxes
)
matched_gt_boxes = tf.gather(gt_boxes, matched_gt_idx)
box_target = self._compute_box_target(anchor_boxes, matched_gt_boxes)
matched_gt_cls_ids = tf.gather(cls_ids, matched_gt_idx)
cls_target = tf.where(
tf.not_equal(positive_mask, 1.0), -1.0, matched_gt_cls_ids
)
cls_target = tf.where(tf.equal(ignore_mask, 1.0), -2.0, cls_target)
cls_target = tf.expand_dims(cls_target, axis=-1)
label = tf.concat([box_target, cls_target], axis=-1)
return label
def encode_batch(self, batch_images, gt_boxes, cls_ids):
"""Creates box and classification targets for a batch"""
images_shape = tf.shape(batch_images)
batch_size = images_shape[0]
labels = tf.TensorArray(dtype=tf.float32, size=batch_size, dynamic_size=True)
for i in range(batch_size):
label = self._encode_sample(images_shape, gt_boxes[i], cls_ids[i])
labels = labels.write(i, label)
batch_images = tf.keras.applications.resnet.preprocess_input(batch_images)
return batch_images, labels.stack()<jupyter_output><empty_output><jupyter_text>Building the ResNet50 backboneRetinaNet uses a ResNet based backbone, using which a feature pyramid networkis constructed. In the example we use ResNet50 as the backbone, and return thefeature maps at strides 8, 16 and 32.<jupyter_code>def get_backbone():
"""Builds ResNet50 with pre-trained imagenet weights"""
backbone = keras.applications.ResNet50(
include_top=False, input_shape=[None, None, 3]
)
c3_output, c4_output, c5_output = [
backbone.get_layer(layer_name).output
for layer_name in ["conv3_block4_out", "conv4_block6_out", "conv5_block3_out"]
]
return keras.Model(
inputs=[backbone.inputs], outputs=[c3_output, c4_output, c5_output]
)<jupyter_output><empty_output><jupyter_text>Building Feature Pyramid Network as a custom layer<jupyter_code>class FeaturePyramid(keras.layers.Layer):
"""Builds the Feature Pyramid with the feature maps from the backbone.
Attributes:
num_classes: Number of classes in the dataset.
backbone: The backbone to build the feature pyramid from.
Currently supports ResNet50 only.
"""
def __init__(self, backbone=None, **kwargs):
super().__init__(name="FeaturePyramid", **kwargs)
self.backbone = backbone if backbone else get_backbone()
self.conv_c3_1x1 = keras.layers.Conv2D(256, 1, 1, "same")
self.conv_c4_1x1 = keras.layers.Conv2D(256, 1, 1, "same")
self.conv_c5_1x1 = keras.layers.Conv2D(256, 1, 1, "same")
self.conv_c3_3x3 = keras.layers.Conv2D(256, 3, 1, "same")
self.conv_c4_3x3 = keras.layers.Conv2D(256, 3, 1, "same")
self.conv_c5_3x3 = keras.layers.Conv2D(256, 3, 1, "same")
self.conv_c6_3x3 = keras.layers.Conv2D(256, 3, 2, "same")
self.conv_c7_3x3 = keras.layers.Conv2D(256, 3, 2, "same")
self.upsample_2x = keras.layers.UpSampling2D(2)
def call(self, images, training=False):
c3_output, c4_output, c5_output = self.backbone(images, training=training)
p3_output = self.conv_c3_1x1(c3_output)
p4_output = self.conv_c4_1x1(c4_output)
p5_output = self.conv_c5_1x1(c5_output)
p4_output = p4_output + self.upsample_2x(p5_output)
p3_output = p3_output + self.upsample_2x(p4_output)
p3_output = self.conv_c3_3x3(p3_output)
p4_output = self.conv_c4_3x3(p4_output)
p5_output = self.conv_c5_3x3(p5_output)
p6_output = self.conv_c6_3x3(c5_output)
p7_output = self.conv_c7_3x3(tf.nn.relu(p6_output))
return p3_output, p4_output, p5_output, p6_output, p7_output<jupyter_output><empty_output><jupyter_text>Building the classification and box regression heads.The RetinaNet model has separate heads for bounding box regression andfor predicting class probabilities for the objects. These heads are sharedbetween all the feature maps of the feature pyramid.<jupyter_code>def build_head(output_filters, bias_init):
"""Builds the class/box predictions head.
Arguments:
output_filters: Number of convolution filters in the final layer.
bias_init: Bias Initializer for the final convolution layer.
Returns:
A keras sequential model representing either the classification
or the box regression head depending on `output_filters`.
"""
head = keras.Sequential([keras.Input(shape=[None, None, 256])])
kernel_init = tf.initializers.RandomNormal(0.0, 0.01)
for _ in range(4):
head.add(
keras.layers.Conv2D(256, 3, padding="same", kernel_initializer=kernel_init)
)
head.add(keras.layers.ReLU())
head.add(
keras.layers.Conv2D(
output_filters,
3,
1,
padding="same",
kernel_initializer=kernel_init,
bias_initializer=bias_init,
)
)
return head<jupyter_output><empty_output><jupyter_text>Building RetinaNet using a subclassed model<jupyter_code>class RetinaNet(keras.Model):
"""A subclassed Keras model implementing the RetinaNet architecture.
Attributes:
num_classes: Number of classes in the dataset.
backbone: The backbone to build the feature pyramid from.
Currently supports ResNet50 only.
"""
def __init__(self, num_classes, backbone=None, **kwargs):
super().__init__(name="RetinaNet", **kwargs)
self.fpn = FeaturePyramid(backbone)
self.num_classes = num_classes
prior_probability = tf.constant_initializer(-np.log((1 - 0.01) / 0.01))
self.cls_head = build_head(9 * num_classes, prior_probability)
self.box_head = build_head(9 * 4, "zeros")
def call(self, image, training=False):
features = self.fpn(image, training=training)
N = tf.shape(image)[0]
cls_outputs = []
box_outputs = []
for feature in features:
box_outputs.append(tf.reshape(self.box_head(feature), [N, -1, 4]))
cls_outputs.append(
tf.reshape(self.cls_head(feature), [N, -1, self.num_classes])
)
cls_outputs = tf.concat(cls_outputs, axis=1)
box_outputs = tf.concat(box_outputs, axis=1)
return tf.concat([box_outputs, cls_outputs], axis=-1)<jupyter_output><empty_output><jupyter_text>Implementing a custom layer to decode predictions<jupyter_code>class DecodePredictions(tf.keras.layers.Layer):
"""A Keras layer that decodes predictions of the RetinaNet model.
Attributes:
num_classes: Number of classes in the dataset
confidence_threshold: Minimum class probability, below which detections
are pruned.
nms_iou_threshold: IOU threshold for the NMS operation
max_detections_per_class: Maximum number of detections to retain per
class.
max_detections: Maximum number of detections to retain across all
classes.
box_variance: The scaling factors used to scale the bounding box
predictions.
"""
def __init__(
self,
num_classes=80,
confidence_threshold=0.05,
nms_iou_threshold=0.5,
max_detections_per_class=100,
max_detections=100,
box_variance=[0.1, 0.1, 0.2, 0.2],
**kwargs
):
super().__init__(**kwargs)
self.num_classes = num_classes
self.confidence_threshold = confidence_threshold
self.nms_iou_threshold = nms_iou_threshold
self.max_detections_per_class = max_detections_per_class
self.max_detections = max_detections
self._anchor_box = AnchorBox()
self._box_variance = tf.convert_to_tensor(
[0.1, 0.1, 0.2, 0.2], dtype=tf.float32
)
def _decode_box_predictions(self, anchor_boxes, box_predictions):
boxes = box_predictions * self._box_variance
boxes = tf.concat(
[
boxes[:, :, :2] * anchor_boxes[:, :, 2:] + anchor_boxes[:, :, :2],
tf.math.exp(boxes[:, :, 2:]) * anchor_boxes[:, :, 2:],
],
axis=-1,
)
boxes_transformed = convert_to_corners(boxes)
return boxes_transformed
def call(self, images, predictions):
image_shape = tf.cast(tf.shape(images), dtype=tf.float32)
anchor_boxes = self._anchor_box.get_anchors(image_shape[1], image_shape[2])
box_predictions = predictions[:, :, :4]
cls_predictions = tf.nn.sigmoid(predictions[:, :, 4:])
boxes = self._decode_box_predictions(anchor_boxes[None, ...], box_predictions)
return tf.image.combined_non_max_suppression(
tf.expand_dims(boxes, axis=2),
cls_predictions,
self.max_detections_per_class,
self.max_detections,
self.nms_iou_threshold,
self.confidence_threshold,
clip_boxes=False,
)<jupyter_output><empty_output><jupyter_text>Implementing Smooth L1 loss and Focal Loss as keras custom losses<jupyter_code>class RetinaNetBoxLoss(tf.losses.Loss):
"""Implements Smooth L1 loss"""
def __init__(self, delta):
super().__init__(
reduction="none", name="RetinaNetBoxLoss"
)
self._delta = delta
def call(self, y_true, y_pred):
difference = y_true - y_pred
absolute_difference = tf.abs(difference)
squared_difference = difference ** 2
loss = tf.where(
tf.less(absolute_difference, self._delta),
0.5 * squared_difference,
absolute_difference - 0.5,
)
return tf.reduce_sum(loss, axis=-1)
class RetinaNetClassificationLoss(tf.losses.Loss):
"""Implements Focal loss"""
def __init__(self, alpha, gamma):
super().__init__(
reduction="none", name="RetinaNetClassificationLoss"
)
self._alpha = alpha
self._gamma = gamma
def call(self, y_true, y_pred):
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(
labels=y_true, logits=y_pred
)
probs = tf.nn.sigmoid(y_pred)
alpha = tf.where(tf.equal(y_true, 1.0), self._alpha, (1.0 - self._alpha))
pt = tf.where(tf.equal(y_true, 1.0), probs, 1 - probs)
loss = alpha * tf.pow(1.0 - pt, self._gamma) * cross_entropy
return tf.reduce_sum(loss, axis=-1)
class RetinaNetLoss(tf.losses.Loss):
"""Wrapper to combine both the losses"""
def __init__(self, num_classes=80, alpha=0.25, gamma=2.0, delta=1.0):
super().__init__(reduction="auto", name="RetinaNetLoss")
self._clf_loss = RetinaNetClassificationLoss(alpha, gamma)
self._box_loss = RetinaNetBoxLoss(delta)
self._num_classes = num_classes
def call(self, y_true, y_pred):
y_pred = tf.cast(y_pred, dtype=tf.float32)
box_labels = y_true[:, :, :4]
box_predictions = y_pred[:, :, :4]
cls_labels = tf.one_hot(
tf.cast(y_true[:, :, 4], dtype=tf.int32),
depth=self._num_classes,
dtype=tf.float32,
)
cls_predictions = y_pred[:, :, 4:]
positive_mask = tf.cast(tf.greater(y_true[:, :, 4], -1.0), dtype=tf.float32)
ignore_mask = tf.cast(tf.equal(y_true[:, :, 4], -2.0), dtype=tf.float32)
clf_loss = self._clf_loss(cls_labels, cls_predictions)
box_loss = self._box_loss(box_labels, box_predictions)
clf_loss = tf.where(tf.equal(ignore_mask, 1.0), 0.0, clf_loss)
box_loss = tf.where(tf.equal(positive_mask, 1.0), box_loss, 0.0)
normalizer = tf.reduce_sum(positive_mask, axis=-1)
clf_loss = tf.math.divide_no_nan(tf.reduce_sum(clf_loss, axis=-1), normalizer)
box_loss = tf.math.divide_no_nan(tf.reduce_sum(box_loss, axis=-1), normalizer)
loss = clf_loss + box_loss
return loss<jupyter_output><empty_output><jupyter_text>Setting up training parameters<jupyter_code>model_dir = "retinanet/"
label_encoder = LabelEncoder()
num_classes = 80
batch_size = 2
learning_rates = [2.5e-06, 0.000625, 0.00125, 0.0025, 0.00025, 2.5e-05]
learning_rate_boundaries = [125, 250, 500, 240000, 360000]
learning_rate_fn = tf.optimizers.schedules.PiecewiseConstantDecay(
boundaries=learning_rate_boundaries, values=learning_rates
)<jupyter_output><empty_output><jupyter_text>Initializing and compiling model<jupyter_code>resnet50_backbone = get_backbone()
loss_fn = RetinaNetLoss(num_classes)
model = RetinaNet(num_classes, resnet50_backbone)
optimizer = tf.keras.optimizers.legacy.SGD(learning_rate=learning_rate_fn, momentum=0.9)
model.compile(loss=loss_fn, optimizer=optimizer)<jupyter_output><empty_output><jupyter_text>Setting up callbacks<jupyter_code>callbacks_list = [
tf.keras.callbacks.ModelCheckpoint(
filepath=os.path.join(model_dir, "weights" + "_epoch_{epoch}"),
monitor="loss",
save_best_only=False,
save_weights_only=True,
verbose=1,
)
]<jupyter_output><empty_output><jupyter_text>Load the COCO2017 dataset using TensorFlow Datasets<jupyter_code># set `data_dir=None` to load the complete dataset
(train_dataset, val_dataset), dataset_info = tfds.load(
"coco/2017", split=["train", "validation"], with_info=True, data_dir="data"
)<jupyter_output><empty_output><jupyter_text>Setting up a `tf.data` pipelineTo ensure that the model is fed with data efficiently we will be using`tf.data` API to create our input pipeline. The input pipelineconsists for the following major processing steps:- Apply the preprocessing function to the samples- Create batches with fixed batch size. Since images in the batch canhave different dimensions, and can also have different number ofobjects, we use `padded_batch` to the add the necessary padding to createrectangular tensors- Create targets for each sample in the batch using `LabelEncoder`<jupyter_code>autotune = tf.data.AUTOTUNE
train_dataset = train_dataset.map(preprocess_data, num_parallel_calls=autotune)
train_dataset = train_dataset.shuffle(8 * batch_size)
train_dataset = train_dataset.padded_batch(
batch_size=batch_size, padding_values=(0.0, 1e-8, -1), drop_remainder=True
)
train_dataset = train_dataset.map(
label_encoder.encode_batch, num_parallel_calls=autotune
)
train_dataset = train_dataset.apply(tf.data.experimental.ignore_errors())
train_dataset = train_dataset.prefetch(autotune)
val_dataset = val_dataset.map(preprocess_data, num_parallel_calls=autotune)
val_dataset = val_dataset.padded_batch(
batch_size=1, padding_values=(0.0, 1e-8, -1), drop_remainder=True
)
val_dataset = val_dataset.map(label_encoder.encode_batch, num_parallel_calls=autotune)
val_dataset = val_dataset.apply(tf.data.experimental.ignore_errors())
val_dataset = val_dataset.prefetch(autotune)<jupyter_output><empty_output><jupyter_text>Training the model<jupyter_code># Uncomment the following lines, when training on full dataset
# train_steps_per_epoch = dataset_info.splits["train"].num_examples // batch_size
# val_steps_per_epoch = \
# dataset_info.splits["validation"].num_examples // batch_size
# train_steps = 4 * 100000
# epochs = train_steps // train_steps_per_epoch
epochs = 1
# Running 100 training and 50 validation steps,
# remove `.take` when training on the full dataset
model.fit(
train_dataset.take(100),
validation_data=val_dataset.take(50),
epochs=epochs,
callbacks=callbacks_list,
verbose=1,
)<jupyter_output><empty_output><jupyter_text>Loading weights<jupyter_code># Change this to `model_dir` when not using the downloaded weights
weights_dir = "data"
latest_checkpoint = tf.train.latest_checkpoint(weights_dir)
model.load_weights(latest_checkpoint)<jupyter_output><empty_output><jupyter_text>Building inference model<jupyter_code>image = tf.keras.Input(shape=[None, None, 3], name="image")
predictions = model(image, training=False)
detections = DecodePredictions(confidence_threshold=0.5)(image, predictions)
inference_model = tf.keras.Model(inputs=image, outputs=detections)<jupyter_output><empty_output><jupyter_text>Generating detections<jupyter_code>def prepare_image(image):
image, _, ratio = resize_and_pad_image(image, jitter=None)
image = tf.keras.applications.resnet.preprocess_input(image)
return tf.expand_dims(image, axis=0), ratio
val_dataset = tfds.load("coco/2017", split="validation", data_dir="data")
int2str = dataset_info.features["objects"]["label"].int2str
for sample in val_dataset.take(2):
image = tf.cast(sample["image"], dtype=tf.float32)
input_image, ratio = prepare_image(image)
detections = inference_model.predict(input_image)
num_detections = detections.valid_detections[0]
class_names = [
int2str(int(x)) for x in detections.nmsed_classes[0][:num_detections]
]
visualize_detections(
image,
detections.nmsed_boxes[0][:num_detections] / ratio,
class_names,
detections.nmsed_scores[0][:num_detections],
)<jupyter_output><empty_output>
|
keras-io/examples/vision/ipynb/retinanet.ipynb/0
|
{
"file_path": "keras-io/examples/vision/ipynb/retinanet.ipynb",
"repo_id": "keras-io",
"token_count": 14564
}
| 125 |
<jupyter_start><jupyter_text>Visualizing what convnets learn**Author:** [fchollet](https://twitter.com/fchollet)**Date created:** 2020/05/29**Last modified:** 2020/05/29**Description:** Displaying the visual patterns that convnet filters respond to. IntroductionIn this example, we look into what sort of visual patterns image classification modelslearn. We'll be using the `ResNet50V2` model, trained on the ImageNet dataset.Our process is simple: we will create input images that maximize the activation ofspecific filters in a target layer (picked somewhere in the middle of the model: layer`conv3_block4_out`). Such images represent a visualization of thepattern that the filter responds to. Setup<jupyter_code>import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import keras
import numpy as np
import tensorflow as tf
# The dimensions of our input image
img_width = 180
img_height = 180
# Our target layer: we will visualize the filters from this layer.
# See `model.summary()` for list of layer names, if you want to change this.
layer_name = "conv3_block4_out"<jupyter_output><empty_output><jupyter_text>Build a feature extraction model<jupyter_code># Build a ResNet50V2 model loaded with pre-trained ImageNet weights
model = keras.applications.ResNet50V2(weights="imagenet", include_top=False)
# Set up a model that returns the activation values for our target layer
layer = model.get_layer(name=layer_name)
feature_extractor = keras.Model(inputs=model.inputs, outputs=layer.output)<jupyter_output><empty_output><jupyter_text>Set up the gradient ascent processThe "loss" we will maximize is simply the mean of the activation of a specific filter inour target layer. To avoid border effects, we exclude border pixels.<jupyter_code>def compute_loss(input_image, filter_index):
activation = feature_extractor(input_image)
# We avoid border artifacts by only involving non-border pixels in the loss.
filter_activation = activation[:, 2:-2, 2:-2, filter_index]
return tf.reduce_mean(filter_activation)<jupyter_output><empty_output><jupyter_text>Our gradient ascent function simply computes the gradients of the loss abovewith regard to the input image, and update the update image so as to move ittowards a state that will activate the target filter more strongly.<jupyter_code>@tf.function
def gradient_ascent_step(img, filter_index, learning_rate):
with tf.GradientTape() as tape:
tape.watch(img)
loss = compute_loss(img, filter_index)
# Compute gradients.
grads = tape.gradient(loss, img)
# Normalize gradients.
grads = tf.math.l2_normalize(grads)
img += learning_rate * grads
return loss, img<jupyter_output><empty_output><jupyter_text>Set up the end-to-end filter visualization loopOur process is as follow:- Start from a random image that is close to "all gray" (i.e. visually netural)- Repeatedly apply the gradient ascent step function defined above- Convert the resulting input image back to a displayable form, by normalizing it,center-cropping it, and restricting it to the [0, 255] range.<jupyter_code>def initialize_image():
# We start from a gray image with some random noise
img = tf.random.uniform((1, img_width, img_height, 3))
# ResNet50V2 expects inputs in the range [-1, +1].
# Here we scale our random inputs to [-0.125, +0.125]
return (img - 0.5) * 0.25
def visualize_filter(filter_index):
# We run gradient ascent for 20 steps
iterations = 30
learning_rate = 10.0
img = initialize_image()
for iteration in range(iterations):
loss, img = gradient_ascent_step(img, filter_index, learning_rate)
# Decode the resulting input image
img = deprocess_image(img[0].numpy())
return loss, img
def deprocess_image(img):
# Normalize array: center on 0., ensure variance is 0.15
img -= img.mean()
img /= img.std() + 1e-5
img *= 0.15
# Center crop
img = img[25:-25, 25:-25, :]
# Clip to [0, 1]
img += 0.5
img = np.clip(img, 0, 1)
# Convert to RGB array
img *= 255
img = np.clip(img, 0, 255).astype("uint8")
return img<jupyter_output><empty_output><jupyter_text>Let's try it out with filter 0 in the target layer:<jupyter_code>from IPython.display import Image, display
loss, img = visualize_filter(0)
keras.utils.save_img("0.png", img)<jupyter_output><empty_output><jupyter_text>This is what an input that maximizes the response of filter 0 in the target layer wouldlook like:<jupyter_code>display(Image("0.png"))<jupyter_output><empty_output><jupyter_text>Visualize the first 64 filters in the target layerNow, let's make a 8x8 grid of the first 64 filtersin the target layer to get of feel for the rangeof different visual patterns that the model has learned.<jupyter_code># Compute image inputs that maximize per-filter activations
# for the first 64 filters of our target layer
all_imgs = []
for filter_index in range(64):
print("Processing filter %d" % (filter_index,))
loss, img = visualize_filter(filter_index)
all_imgs.append(img)
# Build a black picture with enough space for
# our 8 x 8 filters of size 128 x 128, with a 5px margin in between
margin = 5
n = 8
cropped_width = img_width - 25 * 2
cropped_height = img_height - 25 * 2
width = n * cropped_width + (n - 1) * margin
height = n * cropped_height + (n - 1) * margin
stitched_filters = np.zeros((width, height, 3))
# Fill the picture with our saved filters
for i in range(n):
for j in range(n):
img = all_imgs[i * n + j]
stitched_filters[
(cropped_width + margin) * i : (cropped_width + margin) * i + cropped_width,
(cropped_height + margin) * j : (cropped_height + margin) * j
+ cropped_height,
:,
] = img
keras.utils.save_img("stiched_filters.png", stitched_filters)
from IPython.display import Image, display
display(Image("stiched_filters.png"))<jupyter_output><empty_output>
|
keras-io/examples/vision/ipynb/visualizing_what_convnets_learn.ipynb/0
|
{
"file_path": "keras-io/examples/vision/ipynb/visualizing_what_convnets_learn.ipynb",
"repo_id": "keras-io",
"token_count": 1963
}
| 126 |
# Image Classification using BigTransfer (BiT)
**Author:** [Sayan Nath](https://twitter.com/sayannath2350)<br>
**Date created:** 2021/09/24<br>
**Last modified:** 2024/01/03<br>
**Description:** BigTransfer (BiT) State-of-the-art transfer learning for image classification.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/vision/ipynb/bit.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/vision/bit.py)
---
## Introduction
BigTransfer (also known as BiT) is a state-of-the-art transfer learning method for image
classification. Transfer of pre-trained representations improves sample efficiency and
simplifies hyperparameter tuning when training deep neural networks for vision. BiT
revisit the paradigm of pre-training on large supervised datasets and fine-tuning the
model on a target task. The importance of appropriately choosing normalization layers and
scaling the architecture capacity as the amount of pre-training data increases.
BigTransfer(BiT) is trained on public datasets, along with code in
[TF2, Jax and Pytorch](https://github.com/google-research/big_transfer). This will help anyone to reach
state of the art performance on their task of interest, even with just a handful of
labeled images per class.
You can find BiT models pre-trained on
[ImageNet](https://image-net.org/challenges/LSVRC/2012/index) and ImageNet-21k in
[TFHub](https://tfhub.dev/google/collections/bit/1) as TensorFlow2 SavedModels that you
can use easily as Keras Layers. There are a variety of sizes ranging from a standard
ResNet50 to a ResNet152x4 (152 layers deep, 4x wider than a typical ResNet50) for users
with larger computational and memory budgets but higher accuracy requirements.

Figure: The x-axis shows the number of images used per class, ranging from 1 to the full
dataset. On the plots on the left, the curve in blue above is our BiT-L model, whereas
the curve below is a ResNet-50 pre-trained on ImageNet (ILSVRC-2012).
---
## Setup
```python
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import keras
from keras import ops
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_datasets as tfds
tfds.disable_progress_bar()
SEEDS = 42
keras.utils.set_random_seed(SEEDS)
```
---
## Gather Flower Dataset
```python
train_ds, validation_ds = tfds.load(
"tf_flowers",
split=["train[:85%]", "train[85%:]"],
as_supervised=True,
)
```
<div class="k-default-codeblock">
```
[1mDownloading and preparing dataset 218.21 MiB (download: 218.21 MiB, generated: 221.83 MiB, total: 440.05 MiB) to ~/tensorflow_datasets/tf_flowers/3.0.1...[0m
[1mDataset tf_flowers downloaded and prepared to ~/tensorflow_datasets/tf_flowers/3.0.1. Subsequent calls will reuse this data.[0m
```
</div>
---
## Visualise the dataset
```python
plt.figure(figsize=(10, 10))
for i, (image, label) in enumerate(train_ds.take(9)):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(image)
plt.title(int(label))
plt.axis("off")
```

---
## Define hyperparameters
```python
RESIZE_TO = 384
CROP_TO = 224
BATCH_SIZE = 64
STEPS_PER_EPOCH = 10
AUTO = tf.data.AUTOTUNE # optimise the pipeline performance
NUM_CLASSES = 5 # number of classes
SCHEDULE_LENGTH = (
500 # we will train on lower resolution images and will still attain good results
)
SCHEDULE_BOUNDARIES = [
200,
300,
400,
] # more the dataset size the schedule length increase
```
The hyperparamteres like `SCHEDULE_LENGTH` and `SCHEDULE_BOUNDARIES` are determined based
on empirical results. The method has been explained in the [original
paper](https://arxiv.org/abs/1912.11370) and in their [Google AI Blog
Post](https://ai.googleblog.com/2020/05/open-sourcing-bit-exploring-large-scale.html).
The `SCHEDULE_LENGTH` is aslo determined whether to use [MixUp
Augmentation](https://arxiv.org/abs/1710.09412) or not. You can also find an easy MixUp
Implementation in [Keras Coding Examples](https://keras.io/examples/vision/mixup/).

---
## Define preprocessing helper functions
```python
SCHEDULE_LENGTH = SCHEDULE_LENGTH * 512 / BATCH_SIZE
random_flip = keras.layers.RandomFlip("horizontal")
random_crop = keras.layers.RandomCrop(CROP_TO, CROP_TO)
def preprocess_train(image, label):
image = random_flip(image)
image = ops.image.resize(image, (RESIZE_TO, RESIZE_TO))
image = random_crop(image)
image = image / 255.0
return (image, label)
def preprocess_test(image, label):
image = ops.image.resize(image, (RESIZE_TO, RESIZE_TO))
image = ops.cast(image, dtype="float32")
image = image / 255.0
return (image, label)
DATASET_NUM_TRAIN_EXAMPLES = train_ds.cardinality().numpy()
repeat_count = int(
SCHEDULE_LENGTH * BATCH_SIZE / DATASET_NUM_TRAIN_EXAMPLES * STEPS_PER_EPOCH
)
repeat_count += 50 + 1 # To ensure at least there are 50 epochs of training
```
---
## Define the data pipeline
```python
# Training pipeline
pipeline_train = (
train_ds.shuffle(10000)
.repeat(repeat_count) # Repeat dataset_size / num_steps
.map(preprocess_train, num_parallel_calls=AUTO)
.batch(BATCH_SIZE)
.prefetch(AUTO)
)
# Validation pipeline
pipeline_validation = (
validation_ds.map(preprocess_test, num_parallel_calls=AUTO)
.batch(BATCH_SIZE)
.prefetch(AUTO)
)
```
---
## Visualise the training samples
```python
image_batch, label_batch = next(iter(pipeline_train))
plt.figure(figsize=(10, 10))
for n in range(25):
ax = plt.subplot(5, 5, n + 1)
plt.imshow(image_batch[n])
plt.title(label_batch[n].numpy())
plt.axis("off")
```

---
## Load pretrained TF-Hub model into a `KerasLayer`
```python
bit_model_url = "https://tfhub.dev/google/bit/m-r50x1/1"
bit_module = hub.load(bit_model_url)
```
---
## Create BigTransfer (BiT) model
To create the new model, we:
1. Cut off the BiT model’s original head. This leaves us with the “pre-logits” output.
We do not have to do this if we use the ‘feature extractor’ models (i.e. all those in
subdirectories titled `feature_vectors`), since for those models the head has already
been cut off.
2. Add a new head with the number of outputs equal to the number of classes of our new
task. Note that it is important that we initialise the head to all zeroes.
```python
class MyBiTModel(keras.Model):
def __init__(self, num_classes, module, **kwargs):
super().__init__(**kwargs)
self.num_classes = num_classes
self.head = keras.layers.Dense(num_classes, kernel_initializer="zeros")
self.bit_model = module
def call(self, images):
bit_embedding = self.bit_model(images)
return self.head(bit_embedding)
model = MyBiTModel(num_classes=NUM_CLASSES, module=bit_module)
```
---
## Define optimizer and loss
```python
learning_rate = 0.003 * BATCH_SIZE / 512
# Decay learning rate by a factor of 10 at SCHEDULE_BOUNDARIES.
lr_schedule = keras.optimizers.schedules.PiecewiseConstantDecay(
boundaries=SCHEDULE_BOUNDARIES,
values=[
learning_rate,
learning_rate * 0.1,
learning_rate * 0.01,
learning_rate * 0.001,
],
)
optimizer = keras.optimizers.SGD(learning_rate=lr_schedule, momentum=0.9)
loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
```
---
## Compile the model
```python
model.compile(optimizer=optimizer, loss=loss_fn, metrics=["accuracy"])
```
---
## Set up callbacks
```python
train_callbacks = [
keras.callbacks.EarlyStopping(
monitor="val_accuracy", patience=2, restore_best_weights=True
)
]
```
---
## Train the model
```python
history = model.fit(
pipeline_train,
batch_size=BATCH_SIZE,
epochs=int(SCHEDULE_LENGTH / STEPS_PER_EPOCH),
steps_per_epoch=STEPS_PER_EPOCH,
validation_data=pipeline_validation,
callbacks=train_callbacks,
)
```
<div class="k-default-codeblock">
```
Epoch 1/400
10/10 [==============================] - 18s 852ms/step - loss: 0.7465 - accuracy: 0.7891 - val_loss: 0.1865 - val_accuracy: 0.9582
Epoch 2/400
10/10 [==============================] - 5s 529ms/step - loss: 0.1389 - accuracy: 0.9578 - val_loss: 0.1075 - val_accuracy: 0.9727
Epoch 3/400
10/10 [==============================] - 5s 520ms/step - loss: 0.1720 - accuracy: 0.9391 - val_loss: 0.0858 - val_accuracy: 0.9727
Epoch 4/400
10/10 [==============================] - 5s 525ms/step - loss: 0.1211 - accuracy: 0.9516 - val_loss: 0.0833 - val_accuracy: 0.9691
```
</div>
---
## Plot the training and validation metrics
```python
def plot_hist(hist):
plt.plot(hist.history["accuracy"])
plt.plot(hist.history["val_accuracy"])
plt.plot(hist.history["loss"])
plt.plot(hist.history["val_loss"])
plt.title("Training Progress")
plt.ylabel("Accuracy/Loss")
plt.xlabel("Epochs")
plt.legend(["train_acc", "val_acc", "train_loss", "val_loss"], loc="upper left")
plt.show()
plot_hist(history)
```

---
## Evaluate the model
```python
accuracy = model.evaluate(pipeline_validation)[1] * 100
print("Accuracy: {:.2f}%".format(accuracy))
```
<div class="k-default-codeblock">
```
9/9 [==============================] - 3s 364ms/step - loss: 0.1075 - accuracy: 0.9727
Accuracy: 97.27%
```
</div>
---
## Conclusion
BiT performs well across a surprisingly wide range of data regimes
-- from 1 example per class to 1M total examples. BiT achieves 87.5% top-1 accuracy on
ILSVRC-2012, 99.4% on CIFAR-10, and 76.3% on the 19 task Visual Task Adaptation Benchmark
(VTAB). On small datasets, BiT attains 76.8% on ILSVRC-2012 with 10 examples per class,
and 97.0% on CIFAR-10 with 10 examples per class.

You can experiment further with the BigTransfer Method by following the
[original paper](https://arxiv.org/abs/1912.11370).
**Example available on HuggingFace**
| Trained Model | Demo |
| :--: | :--: |
| [](https://huggingface.co/keras-io/bit) | [](https://huggingface.co/spaces/keras-io/siamese-contrastive) |
|
keras-io/examples/vision/md/bit.md/0
|
{
"file_path": "keras-io/examples/vision/md/bit.md",
"repo_id": "keras-io",
"token_count": 4004
}
| 127 |
# Image Segmentation using Composable Fully-Convolutional Networks
**Author:** [Suvaditya Mukherjee](https://twitter.com/halcyonrayes)<br>
**Date created:** 2023/06/16<br>
**Last modified:** 2023/12/25<br>
**Description:** Using the Fully-Convolutional Network for Image Segmentation.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/vision/ipynb/fully_convolutional_network.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/vision/fully_convolutional_network.py)
---
## Introduction
The following example walks through the steps to implement Fully-Convolutional Networks
for Image Segmentation on the Oxford-IIIT Pets dataset.
The model was proposed in the paper,
[Fully Convolutional Networks for Semantic Segmentation by Long et. al.(2014)](https://arxiv.org/abs/1411.4038).
Image segmentation is one of the most common and introductory tasks when it comes to
Computer Vision, where we extend the problem of Image Classification from
one-label-per-image to a pixel-wise classification problem.
In this example, we will assemble the aforementioned Fully-Convolutional Segmentation architecture,
capable of performing Image Segmentation.
The network extends the pooling layer outputs from the VGG in order to perform upsampling
and get a final result. The intermediate outputs coming from the 3rd, 4th and 5th Max-Pooling layers from VGG19 are
extracted out and upsampled at different levels and factors to get a final output with the same shape as that
of the output, but with the class of each pixel present at each location, instead of pixel intensity values.
Different intermediate pool layers are extracted and processed upon for different versions of the network.
The FCN architecture has 3 versions of differing quality.
- FCN-32S
- FCN-16S
- FCN-8S
All versions of the model derive their outputs through an iterative processing of
successive intermediate pool layers of the main backbone used.
A better idea can be gained from the figure below.
|  |
| :--: |
| **Diagram 1**: Combined Architecture Versions (Source: Paper) |
To get a better idea on Image Segmentation or find more pre-trained models, feel free to
navigate to the [Hugging Face Image Segmentation Models](https://huggingface.co/models?pipeline_tag=image-segmentation) page,
or a [PyImageSearch Blog on Semantic Segmentation](https://pyimagesearch.com/2018/09/03/semantic-segmentation-with-opencv-and-deep-learning/)
---
## Setup Imports
```python
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import keras
from keras import ops
import tensorflow as tf
import matplotlib.pyplot as plt
import tensorflow_datasets as tfds
import numpy as np
AUTOTUNE = tf.data.AUTOTUNE
```
---
## Set configurations for notebook variables
We set the required parameters for the experiment.
The chosen dataset has a total of 4 classes per image, with regards to the segmentation mask.
We also set our hyperparameters in this cell.
Mixed Precision as an option is also available in systems which support it, to reduce
load.
This would make most tensors use `16-bit float` values instead of `32-bit float`
values, in places where it will not adversely affect computation.
This means, during computation, TensorFlow will use `16-bit float` Tensors to increase speed at the cost of precision,
while storing the values in their original default `32-bit float` form.
```python
NUM_CLASSES = 4
INPUT_HEIGHT = 224
INPUT_WIDTH = 224
LEARNING_RATE = 1e-3
WEIGHT_DECAY = 1e-4
EPOCHS = 20
BATCH_SIZE = 32
MIXED_PRECISION = True
SHUFFLE = True
# Mixed-precision setting
if MIXED_PRECISION:
policy = keras.mixed_precision.Policy("mixed_float16")
keras.mixed_precision.set_global_policy(policy)
```
<div class="k-default-codeblock">
```
INFO:tensorflow:Mixed precision compatibility check (mixed_float16): OK
Your GPU will likely run quickly with dtype policy mixed_float16 as it has compute capability of at least 7.0. Your GPU: Quadro RTX 5000, compute capability 7.5
```
</div>
---
## Load dataset
We make use of the [Oxford-IIIT Pets dataset](http://www.robots.ox.ac.uk/~vgg/data/pets/)
which contains a total of 7,349 samples and their segmentation masks.
We have 37 classes, with roughly 200 samples per class.
Our training and validation dataset has 3,128 and 552 samples respectively.
Aside from this, our test split has a total of 3,669 samples.
We set a `batch_size` parameter that will batch our samples together, use a `shuffle`
parameter to mix our samples together.
```python
(train_ds, valid_ds, test_ds) = tfds.load(
"oxford_iiit_pet",
split=["train[:85%]", "train[85%:]", "test"],
batch_size=BATCH_SIZE,
shuffle_files=SHUFFLE,
)
```
---
## Unpack and preprocess dataset
We define a simple function that includes performs Resizing over our
training, validation and test datasets.
We do the same process on the masks as well, to make sure both are aligned in terms of shape and size.
```python
# Image and Mask Pre-processing
def unpack_resize_data(section):
image = section["image"]
segmentation_mask = section["segmentation_mask"]
resize_layer = keras.layers.Resizing(INPUT_HEIGHT, INPUT_WIDTH)
image = resize_layer(image)
segmentation_mask = resize_layer(segmentation_mask)
return image, segmentation_mask
train_ds = train_ds.map(unpack_resize_data, num_parallel_calls=AUTOTUNE)
valid_ds = valid_ds.map(unpack_resize_data, num_parallel_calls=AUTOTUNE)
test_ds = test_ds.map(unpack_resize_data, num_parallel_calls=AUTOTUNE)
```
---
## Visualize one random sample from the pre-processed dataset
We visualize what a random sample in our test split of the dataset looks like, and plot
the segmentation mask on top to see the effective mask areas.
Note that we have performed pre-processing on this dataset too,
which makes the image and mask size same.
```python
# Select random image and mask. Cast to NumPy array
# for Matplotlib visualization.
images, masks = next(iter(test_ds))
random_idx = keras.random.uniform([], minval=0, maxval=BATCH_SIZE, seed=10)
test_image = images[int(random_idx)].numpy().astype("float")
test_mask = masks[int(random_idx)].numpy().astype("float")
# Overlay segmentation mask on top of image.
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(10, 5))
ax[0].set_title("Image")
ax[0].imshow(test_image / 255.0)
ax[1].set_title("Image with segmentation mask overlay")
ax[1].imshow(test_image / 255.0)
ax[1].imshow(
test_mask,
cmap="inferno",
alpha=0.6,
)
plt.show()
```

---
## Perform VGG-specific pre-processing
`keras.applications.VGG19` requires the use of a `preprocess_input` function that will
pro-actively perform Image-net style Standard Deviation Normalization scheme.
```python
def preprocess_data(image, segmentation_mask):
image = keras.applications.vgg19.preprocess_input(image)
return image, segmentation_mask
train_ds = (
train_ds.map(preprocess_data, num_parallel_calls=AUTOTUNE)
.shuffle(buffer_size=1024)
.prefetch(buffer_size=1024)
)
valid_ds = (
valid_ds.map(preprocess_data, num_parallel_calls=AUTOTUNE)
.shuffle(buffer_size=1024)
.prefetch(buffer_size=1024)
)
test_ds = (
test_ds.map(preprocess_data, num_parallel_calls=AUTOTUNE)
.shuffle(buffer_size=1024)
.prefetch(buffer_size=1024)
)
```
---
## Model Definition
The Fully-Convolutional Network boasts a simple architecture composed of only
`keras.layers.Conv2D` Layers, `keras.layers.Dense` layers and `keras.layers.Dropout`
layers.
|  |
| :--: |
| **Diagram 2**: Generic FCN Forward Pass (Source: Paper)|
Pixel-wise prediction is performed by having a Softmax Convolutional layer with the same
size of the image, such that we can perform direct comparison
We can find several important metrics such as Accuracy and Mean-Intersection-over-Union on the network.
### Backbone (VGG-19)
We use the [VGG-19 network](https://keras.io/api/applications/vgg/) as the backbone, as
the paper suggests it to be one of the most effective backbones for this network.
We extract different outputs from the network by making use of `keras.models.Model`.
Following this, we add layers on top to make a network perfectly simulating that of
Diagram 1.
The backbone's `keras.layers.Dense` layers will be converted to `keras.layers.Conv2D`
layers based on the [original Caffe code present here.](https://github.com/linxi159/FCN-caffe/blob/master/pascalcontext-fcn16s/net.py)
All 3 networks will share the same backbone weights, but will have differing results
based on their extensions.
We make the backbone non-trainable to improve training time requirements.
It is also noted in the paper that making the network trainable does not yield major benefits.
```python
input_layer = keras.Input(shape=(INPUT_HEIGHT, INPUT_WIDTH, 3))
# VGG Model backbone with pre-trained ImageNet weights.
vgg_model = keras.applications.vgg19.VGG19(include_top=True, weights="imagenet")
# Extracting different outputs from same model
fcn_backbone = keras.models.Model(
inputs=vgg_model.layers[1].input,
outputs=[
vgg_model.get_layer(block_name).output
for block_name in ["block3_pool", "block4_pool", "block5_pool"]
],
)
# Setting backbone to be non-trainable
fcn_backbone.trainable = False
x = fcn_backbone(input_layer)
# Converting Dense layers to Conv2D layers
units = [4096, 4096]
dense_convs = []
for filter_idx in range(len(units)):
dense_conv = keras.layers.Conv2D(
filters=units[filter_idx],
kernel_size=(7, 7) if filter_idx == 0 else (1, 1),
strides=(1, 1),
activation="relu",
padding="same",
use_bias=False,
kernel_initializer=keras.initializers.Constant(1.0),,
)
dense_convs.append(dense_conv)
dropout_layer = keras.layers.Dropout(0.5)
dense_convs.append(dropout_layer)
dense_convs = keras.Sequential(dense_convs)
dense_convs.trainable = False
x[-1] = dense_convs(x[-1])
pool3_output, pool4_output, pool5_output = x
```
### FCN-32S
We extend the last output, perform a `1x1 Convolution` and perform 2D Bilinear Upsampling
by a factor of 32 to get an image of the same size as that of our input.
We use a simple `keras.layers.UpSampling2D` layer over a `keras.layers.Conv2DTranspose`
since it yields performance benefits from being a deterministic mathematical operation
over a Convolutional operation
It is also noted in the paper that making the Up-sampling parameters trainable does not yield benefits.
Original experiments of the paper used Upsampling as well.
```python
# 1x1 convolution to set channels = number of classes
pool5 = keras.layers.Conv2D(
filters=NUM_CLASSES,
kernel_size=(1, 1),
padding="same",
strides=(1, 1),
activation="relu",
)
# Get Softmax outputs for all classes
fcn32s_conv_layer = keras.layers.Conv2D(
filters=NUM_CLASSES,
kernel_size=(1, 1),
activation="softmax",
padding="same",
strides=(1, 1),
)
# Up-sample to original image size
fcn32s_upsampling = keras.layers.UpSampling2D(
size=(32, 32),
data_format=keras.backend.image_data_format(),
interpolation="bilinear",
)
final_fcn32s_pool = pool5(pool5_output)
final_fcn32s_output = fcn32s_conv_layer(final_fcn32s_pool)
final_fcn32s_output = fcn32s_upsampling(final_fcn32s_output)
fcn32s_model = keras.Model(inputs=input_layer, outputs=final_fcn32s_output)
```
### FCN-16S
The pooling output from the FCN-32S is extended and added to the 4th-level Pooling output
of our backbone.
Following this, we upsample by a factor of 16 to get image of the same
size as that of our input.
```python
# 1x1 convolution to set channels = number of classes
# Followed from the original Caffe implementation
pool4 = keras.layers.Conv2D(
filters=NUM_CLASSES,
kernel_size=(1, 1),
padding="same",
strides=(1, 1),
activation="linear",
kernel_initializer=keras.initializers.Zeros(),
)(pool4_output)
# Intermediate up-sample
pool5 = keras.layers.UpSampling2D(
size=(2, 2),
data_format=keras.backend.image_data_format(),
interpolation="bilinear",
)(final_fcn32s_pool)
# Get Softmax outputs for all classes
fcn16s_conv_layer = keras.layers.Conv2D(
filters=NUM_CLASSES,
kernel_size=(1, 1),
activation="softmax",
padding="same",
strides=(1, 1),
)
# Up-sample to original image size
fcn16s_upsample_layer = keras.layers.UpSampling2D(
size=(16, 16),
data_format=keras.backend.image_data_format(),
interpolation="bilinear",
)
# Add intermediate outputs
final_fcn16s_pool = keras.layers.Add()([pool4, pool5])
final_fcn16s_output = fcn16s_conv_layer(final_fcn16s_pool)
final_fcn16s_output = fcn16s_upsample_layer(final_fcn16s_output)
fcn16s_model = keras.models.Model(inputs=input_layer, outputs=final_fcn16s_output)
```
### FCN-8S
The pooling output from the FCN-16S is extended once more, and added from the 3rd-level
Pooling output of our backbone.
This result is upsampled by a factor of 8 to get an image of the same size as that of our input.
```python
# 1x1 convolution to set channels = number of classes
# Followed from the original Caffe implementation
pool3 = keras.layers.Conv2D(
filters=NUM_CLASSES,
kernel_size=(1, 1),
padding="same",
strides=(1, 1),
activation="linear",
kernel_initializer=keras.initializers.Zeros(),
)(pool3_output)
# Intermediate up-sample
intermediate_pool_output = keras.layers.UpSampling2D(
size=(2, 2),
data_format=keras.backend.image_data_format(),
interpolation="bilinear",
)(final_fcn16s_pool)
# Get Softmax outputs for all classes
fcn8s_conv_layer = keras.layers.Conv2D(
filters=NUM_CLASSES,
kernel_size=(1, 1),
activation="softmax",
padding="same",
strides=(1, 1),
)
# Up-sample to original image size
fcn8s_upsample_layer = keras.layers.UpSampling2D(
size=(8, 8),
data_format=keras.backend.image_data_format(),
interpolation="bilinear",
)
# Add intermediate outputs
final_fcn8s_pool = keras.layers.Add()([pool3, intermediate_pool_output])
final_fcn8s_output = fcn8s_conv_layer(final_fcn8s_pool)
final_fcn8s_output = fcn8s_upsample_layer(final_fcn8s_output)
fcn8s_model = keras.models.Model(inputs=input_layer, outputs=final_fcn8s_output)
```
### Load weights into backbone
It was noted in the paper, as well as through experimentation that extracting the weights
of the last 2 Fully-connected Dense layers from the backbone, reshaping the weights to
fit that of the `keras.layers.Dense` layers we had previously converted into
`keras.layers.Conv2D`, and setting them to it yields far better results and a significant
increase in mIOU performance.
```python
# VGG's last 2 layers
weights1 = vgg_model.get_layer("fc1").get_weights()[0]
weights2 = vgg_model.get_layer("fc2").get_weights()[0]
weights1 = weights1.reshape(7, 7, 512, 4096)
weights2 = weights2.reshape(1, 1, 4096, 4096)
dense_convs.layers[0].set_weights([weights1])
dense_convs.layers[2].set_weights([weights2])
```
---
## Training
The original paper talks about making use of [SGD with Momentum](https://keras.io/api/optimizers/sgd/) as the optimizer of choice.
But it was noticed during experimentation that
[AdamW](https://keras.io/api/optimizers/adamw/)
yielded better results in terms of mIOU and Pixel-wise Accuracy.
### FCN-32S
```python
fcn32s_optimizer = keras.optimizers.AdamW(
learning_rate=LEARNING_RATE, weight_decay=WEIGHT_DECAY
)
fcn32s_loss = keras.losses.SparseCategoricalCrossentropy()
# Maintain mIOU and Pixel-wise Accuracy as metrics
fcn32s_model.compile(
optimizer=fcn32s_optimizer,
loss=fcn32s_loss,
metrics=[
keras.metrics.MeanIoU(num_classes=NUM_CLASSES, sparse_y_pred=False),
keras.metrics.SparseCategoricalAccuracy(),
],
)
fcn32s_history = fcn32s_model.fit(train_ds, epochs=EPOCHS, validation_data=valid_ds)
```
<div class="k-default-codeblock">
```
Epoch 1/20
Corrupt JPEG data: premature end of data segment
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
98/98 [==============================] - 31s 171ms/step - loss: 0.9853 - mean_io_u: 0.3056 - sparse_categorical_accuracy: 0.6242 - val_loss: 0.7911 - val_mean_io_u: 0.4022 - val_sparse_categorical_accuracy: 0.7011
Epoch 2/20
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
Corrupt JPEG data: premature end of data segment
98/98 [==============================] - 22s 131ms/step - loss: 0.7463 - mean_io_u: 0.3978 - sparse_categorical_accuracy: 0.7100 - val_loss: 0.7162 - val_mean_io_u: 0.3968 - val_sparse_categorical_accuracy: 0.7157
Epoch 3/20
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
Corrupt JPEG data: premature end of data segment
98/98 [==============================] - 21s 120ms/step - loss: 0.6939 - mean_io_u: 0.4139 - sparse_categorical_accuracy: 0.7255 - val_loss: 0.6714 - val_mean_io_u: 0.4383 - val_sparse_categorical_accuracy: 0.7379
Epoch 4/20
Corrupt JPEG data: premature end of data segment
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
98/98 [==============================] - 21s 117ms/step - loss: 0.6694 - mean_io_u: 0.4239 - sparse_categorical_accuracy: 0.7339 - val_loss: 0.6715 - val_mean_io_u: 0.4258 - val_sparse_categorical_accuracy: 0.7332
Epoch 5/20
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
Corrupt JPEG data: premature end of data segment
98/98 [==============================] - 21s 115ms/step - loss: 0.6556 - mean_io_u: 0.4279 - sparse_categorical_accuracy: 0.7382 - val_loss: 0.6271 - val_mean_io_u: 0.4483 - val_sparse_categorical_accuracy: 0.7514
Epoch 6/20
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
Corrupt JPEG data: premature end of data segment
98/98 [==============================] - 21s 120ms/step - loss: 0.6501 - mean_io_u: 0.4295 - sparse_categorical_accuracy: 0.7394 - val_loss: 0.6390 - val_mean_io_u: 0.4375 - val_sparse_categorical_accuracy: 0.7442
Epoch 7/20
Corrupt JPEG data: premature end of data segment
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
98/98 [==============================] - 20s 109ms/step - loss: 0.6464 - mean_io_u: 0.4309 - sparse_categorical_accuracy: 0.7402 - val_loss: 0.6143 - val_mean_io_u: 0.4508 - val_sparse_categorical_accuracy: 0.7553
Epoch 8/20
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
Corrupt JPEG data: premature end of data segment
98/98 [==============================] - 20s 108ms/step - loss: 0.6363 - mean_io_u: 0.4343 - sparse_categorical_accuracy: 0.7444 - val_loss: 0.6143 - val_mean_io_u: 0.4481 - val_sparse_categorical_accuracy: 0.7541
Epoch 9/20
Corrupt JPEG data: premature end of data segment
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
98/98 [==============================] - 20s 108ms/step - loss: 0.6367 - mean_io_u: 0.4346 - sparse_categorical_accuracy: 0.7445 - val_loss: 0.6222 - val_mean_io_u: 0.4534 - val_sparse_categorical_accuracy: 0.7510
Epoch 10/20
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
Corrupt JPEG data: premature end of data segment
98/98 [==============================] - 19s 108ms/step - loss: 0.6398 - mean_io_u: 0.4346 - sparse_categorical_accuracy: 0.7426 - val_loss: 0.6123 - val_mean_io_u: 0.4494 - val_sparse_categorical_accuracy: 0.7541
Epoch 11/20
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
Corrupt JPEG data: premature end of data segment
98/98 [==============================] - 20s 110ms/step - loss: 0.6361 - mean_io_u: 0.4365 - sparse_categorical_accuracy: 0.7439 - val_loss: 0.6310 - val_mean_io_u: 0.4405 - val_sparse_categorical_accuracy: 0.7461
Epoch 12/20
Corrupt JPEG data: premature end of data segment
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
98/98 [==============================] - 21s 110ms/step - loss: 0.6325 - mean_io_u: 0.4362 - sparse_categorical_accuracy: 0.7454 - val_loss: 0.6155 - val_mean_io_u: 0.4441 - val_sparse_categorical_accuracy: 0.7509
Epoch 13/20
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
Corrupt JPEG data: premature end of data segment
98/98 [==============================] - 20s 112ms/step - loss: 0.6335 - mean_io_u: 0.4368 - sparse_categorical_accuracy: 0.7452 - val_loss: 0.6153 - val_mean_io_u: 0.4430 - val_sparse_categorical_accuracy: 0.7504
Epoch 14/20
Corrupt JPEG data: premature end of data segment
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
98/98 [==============================] - 20s 113ms/step - loss: 0.6289 - mean_io_u: 0.4380 - sparse_categorical_accuracy: 0.7466 - val_loss: 0.6357 - val_mean_io_u: 0.4309 - val_sparse_categorical_accuracy: 0.7382
Epoch 15/20
Corrupt JPEG data: premature end of data segment
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
98/98 [==============================] - 20s 113ms/step - loss: 0.6267 - mean_io_u: 0.4369 - sparse_categorical_accuracy: 0.7474 - val_loss: 0.5974 - val_mean_io_u: 0.4619 - val_sparse_categorical_accuracy: 0.7617
Epoch 16/20
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
Corrupt JPEG data: premature end of data segment
98/98 [==============================] - 20s 109ms/step - loss: 0.6309 - mean_io_u: 0.4368 - sparse_categorical_accuracy: 0.7458 - val_loss: 0.6071 - val_mean_io_u: 0.4463 - val_sparse_categorical_accuracy: 0.7533
Epoch 17/20
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
Corrupt JPEG data: premature end of data segment
98/98 [==============================] - 20s 112ms/step - loss: 0.6285 - mean_io_u: 0.4382 - sparse_categorical_accuracy: 0.7465 - val_loss: 0.5979 - val_mean_io_u: 0.4576 - val_sparse_categorical_accuracy: 0.7602
Epoch 18/20
Corrupt JPEG data: premature end of data segment
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
98/98 [==============================] - 20s 111ms/step - loss: 0.6250 - mean_io_u: 0.4403 - sparse_categorical_accuracy: 0.7479 - val_loss: 0.6121 - val_mean_io_u: 0.4451 - val_sparse_categorical_accuracy: 0.7507
Epoch 19/20
Corrupt JPEG data: premature end of data segment
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
98/98 [==============================] - 20s 111ms/step - loss: 0.6307 - mean_io_u: 0.4386 - sparse_categorical_accuracy: 0.7454 - val_loss: 0.6010 - val_mean_io_u: 0.4532 - val_sparse_categorical_accuracy: 0.7577
Epoch 20/20
Corrupt JPEG data: premature end of data segment
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
98/98 [==============================] - 20s 114ms/step - loss: 0.6199 - mean_io_u: 0.4403 - sparse_categorical_accuracy: 0.7505 - val_loss: 0.6180 - val_mean_io_u: 0.4339 - val_sparse_categorical_accuracy: 0.7465
```
</div>
### FCN-16S
```python
fcn16s_optimizer = keras.optimizers.AdamW(
learning_rate=LEARNING_RATE, weight_decay=WEIGHT_DECAY
)
fcn16s_loss = keras.losses.SparseCategoricalCrossentropy()
# Maintain mIOU and Pixel-wise Accuracy as metrics
fcn16s_model.compile(
optimizer=fcn16s_optimizer,
loss=fcn16s_loss,
metrics=[
keras.metrics.MeanIoU(num_classes=NUM_CLASSES, sparse_y_pred=False),
keras.metrics.SparseCategoricalAccuracy(),
],
)
fcn16s_history = fcn16s_model.fit(train_ds, epochs=EPOCHS, validation_data=valid_ds)
```
<div class="k-default-codeblock">
```
Epoch 1/20
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
Corrupt JPEG data: premature end of data segment
98/98 [==============================] - 23s 127ms/step - loss: 6.4519 - mean_io_u_1: 0.3101 - sparse_categorical_accuracy: 0.5649 - val_loss: 5.7052 - val_mean_io_u_1: 0.3842 - val_sparse_categorical_accuracy: 0.6057
Epoch 2/20
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
Corrupt JPEG data: premature end of data segment
98/98 [==============================] - 19s 110ms/step - loss: 5.2670 - mean_io_u_1: 0.3936 - sparse_categorical_accuracy: 0.6339 - val_loss: 5.8929 - val_mean_io_u_1: 0.3864 - val_sparse_categorical_accuracy: 0.5940
Epoch 3/20
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
Corrupt JPEG data: premature end of data segment
98/98 [==============================] - 20s 111ms/step - loss: 5.2376 - mean_io_u_1: 0.3945 - sparse_categorical_accuracy: 0.6366 - val_loss: 5.6404 - val_mean_io_u_1: 0.3889 - val_sparse_categorical_accuracy: 0.6079
Epoch 4/20
Corrupt JPEG data: premature end of data segment
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
98/98 [==============================] - 21s 113ms/step - loss: 5.3014 - mean_io_u_1: 0.3924 - sparse_categorical_accuracy: 0.6323 - val_loss: 5.6516 - val_mean_io_u_1: 0.3874 - val_sparse_categorical_accuracy: 0.6094
Epoch 5/20
Corrupt JPEG data: premature end of data segment
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
98/98 [==============================] - 20s 112ms/step - loss: 5.3135 - mean_io_u_1: 0.3918 - sparse_categorical_accuracy: 0.6323 - val_loss: 5.6588 - val_mean_io_u_1: 0.3903 - val_sparse_categorical_accuracy: 0.6084
Epoch 6/20
Corrupt JPEG data: premature end of data segment
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
98/98 [==============================] - 20s 108ms/step - loss: 5.2401 - mean_io_u_1: 0.3938 - sparse_categorical_accuracy: 0.6357 - val_loss: 5.6463 - val_mean_io_u_1: 0.3868 - val_sparse_categorical_accuracy: 0.6097
Epoch 7/20
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
Corrupt JPEG data: premature end of data segment
98/98 [==============================] - 20s 109ms/step - loss: 5.2277 - mean_io_u_1: 0.3921 - sparse_categorical_accuracy: 0.6371 - val_loss: 5.6272 - val_mean_io_u_1: 0.3796 - val_sparse_categorical_accuracy: 0.6136
Epoch 8/20
Corrupt JPEG data: premature end of data segment
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
98/98 [==============================] - 20s 112ms/step - loss: 5.2479 - mean_io_u_1: 0.3910 - sparse_categorical_accuracy: 0.6360 - val_loss: 5.6303 - val_mean_io_u_1: 0.3823 - val_sparse_categorical_accuracy: 0.6108
Epoch 9/20
Corrupt JPEG data: premature end of data segment
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
98/98 [==============================] - 21s 112ms/step - loss: 5.1940 - mean_io_u_1: 0.3913 - sparse_categorical_accuracy: 0.6388 - val_loss: 5.8818 - val_mean_io_u_1: 0.3848 - val_sparse_categorical_accuracy: 0.5912
Epoch 10/20
Corrupt JPEG data: premature end of data segment
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
98/98 [==============================] - 20s 111ms/step - loss: 5.2457 - mean_io_u_1: 0.3898 - sparse_categorical_accuracy: 0.6358 - val_loss: 5.6423 - val_mean_io_u_1: 0.3880 - val_sparse_categorical_accuracy: 0.6087
Epoch 11/20
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
Corrupt JPEG data: premature end of data segment
98/98 [==============================] - 20s 110ms/step - loss: 5.1808 - mean_io_u_1: 0.3905 - sparse_categorical_accuracy: 0.6400 - val_loss: 5.6175 - val_mean_io_u_1: 0.3834 - val_sparse_categorical_accuracy: 0.6090
Epoch 12/20
Corrupt JPEG data: premature end of data segment
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
98/98 [==============================] - 20s 112ms/step - loss: 5.2730 - mean_io_u_1: 0.3907 - sparse_categorical_accuracy: 0.6341 - val_loss: 5.6322 - val_mean_io_u_1: 0.3878 - val_sparse_categorical_accuracy: 0.6109
Epoch 13/20
Corrupt JPEG data: premature end of data segment
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
98/98 [==============================] - 20s 109ms/step - loss: 5.2501 - mean_io_u_1: 0.3904 - sparse_categorical_accuracy: 0.6359 - val_loss: 5.8711 - val_mean_io_u_1: 0.3859 - val_sparse_categorical_accuracy: 0.5950
Epoch 14/20
Corrupt JPEG data: premature end of data segment
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
98/98 [==============================] - 20s 107ms/step - loss: 5.2407 - mean_io_u_1: 0.3926 - sparse_categorical_accuracy: 0.6362 - val_loss: 5.6387 - val_mean_io_u_1: 0.3805 - val_sparse_categorical_accuracy: 0.6122
Epoch 15/20
Corrupt JPEG data: premature end of data segment
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
98/98 [==============================] - 20s 108ms/step - loss: 5.2280 - mean_io_u_1: 0.3909 - sparse_categorical_accuracy: 0.6370 - val_loss: 5.6382 - val_mean_io_u_1: 0.3837 - val_sparse_categorical_accuracy: 0.6112
Epoch 16/20
Corrupt JPEG data: premature end of data segment
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
98/98 [==============================] - 20s 108ms/step - loss: 5.2232 - mean_io_u_1: 0.3899 - sparse_categorical_accuracy: 0.6369 - val_loss: 5.6285 - val_mean_io_u_1: 0.3818 - val_sparse_categorical_accuracy: 0.6101
Epoch 17/20
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
Corrupt JPEG data: premature end of data segment
98/98 [==============================] - 20s 107ms/step - loss: 1.4671 - mean_io_u_1: 0.5928 - sparse_categorical_accuracy: 0.8210 - val_loss: 0.7661 - val_mean_io_u_1: 0.6455 - val_sparse_categorical_accuracy: 0.8504
Epoch 18/20
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
Corrupt JPEG data: premature end of data segment
98/98 [==============================] - 20s 110ms/step - loss: 0.6795 - mean_io_u_1: 0.6508 - sparse_categorical_accuracy: 0.8664 - val_loss: 0.6913 - val_mean_io_u_1: 0.6490 - val_sparse_categorical_accuracy: 0.8562
Epoch 19/20
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
Corrupt JPEG data: premature end of data segment
98/98 [==============================] - 20s 110ms/step - loss: 0.6498 - mean_io_u_1: 0.6530 - sparse_categorical_accuracy: 0.8663 - val_loss: 0.6834 - val_mean_io_u_1: 0.6559 - val_sparse_categorical_accuracy: 0.8577
Epoch 20/20
Corrupt JPEG data: premature end of data segment
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
98/98 [==============================] - 20s 110ms/step - loss: 0.6305 - mean_io_u_1: 0.6563 - sparse_categorical_accuracy: 0.8681 - val_loss: 0.6529 - val_mean_io_u_1: 0.6575 - val_sparse_categorical_accuracy: 0.8657
```
</div>
### FCN-8S
```python
fcn8s_optimizer = keras.optimizers.AdamW(
learning_rate=LEARNING_RATE, weight_decay=WEIGHT_DECAY
)
fcn8s_loss = keras.losses.SparseCategoricalCrossentropy()
# Maintain mIOU and Pixel-wise Accuracy as metrics
fcn8s_model.compile(
optimizer=fcn8s_optimizer,
loss=fcn8s_loss,
metrics=[
keras.metrics.MeanIoU(num_classes=NUM_CLASSES, sparse_y_pred=False),
keras.metrics.SparseCategoricalAccuracy(),
],
)
fcn8s_history = fcn8s_model.fit(train_ds, epochs=EPOCHS, validation_data=valid_ds)
```
<div class="k-default-codeblock">
```
Epoch 1/20
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
Corrupt JPEG data: premature end of data segment
98/98 [==============================] - 24s 125ms/step - loss: 8.4168 - mean_io_u_2: 0.3116 - sparse_categorical_accuracy: 0.4237 - val_loss: 7.6113 - val_mean_io_u_2: 0.3540 - val_sparse_categorical_accuracy: 0.4682
Epoch 2/20
Corrupt JPEG data: premature end of data segment
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
98/98 [==============================] - 20s 110ms/step - loss: 8.1030 - mean_io_u_2: 0.3423 - sparse_categorical_accuracy: 0.4401 - val_loss: 7.7038 - val_mean_io_u_2: 0.3335 - val_sparse_categorical_accuracy: 0.4481
Epoch 3/20
Corrupt JPEG data: premature end of data segment
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
98/98 [==============================] - 20s 110ms/step - loss: 8.0868 - mean_io_u_2: 0.3433 - sparse_categorical_accuracy: 0.4408 - val_loss: 7.5839 - val_mean_io_u_2: 0.3518 - val_sparse_categorical_accuracy: 0.4722
Epoch 4/20
Corrupt JPEG data: premature end of data segment
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
98/98 [==============================] - 21s 111ms/step - loss: 8.1508 - mean_io_u_2: 0.3414 - sparse_categorical_accuracy: 0.4365 - val_loss: 7.2391 - val_mean_io_u_2: 0.3519 - val_sparse_categorical_accuracy: 0.4805
Epoch 5/20
Corrupt JPEG data: premature end of data segment
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
98/98 [==============================] - 20s 112ms/step - loss: 8.1621 - mean_io_u_2: 0.3440 - sparse_categorical_accuracy: 0.4361 - val_loss: 7.2805 - val_mean_io_u_2: 0.3474 - val_sparse_categorical_accuracy: 0.4816
Epoch 6/20
Corrupt JPEG data: premature end of data segment
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
98/98 [==============================] - 20s 110ms/step - loss: 8.1470 - mean_io_u_2: 0.3412 - sparse_categorical_accuracy: 0.4360 - val_loss: 7.5605 - val_mean_io_u_2: 0.3543 - val_sparse_categorical_accuracy: 0.4736
Epoch 7/20
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
Corrupt JPEG data: premature end of data segment
98/98 [==============================] - 20s 110ms/step - loss: 8.1464 - mean_io_u_2: 0.3430 - sparse_categorical_accuracy: 0.4368 - val_loss: 7.5442 - val_mean_io_u_2: 0.3542 - val_sparse_categorical_accuracy: 0.4702
Epoch 8/20
Corrupt JPEG data: premature end of data segment
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
98/98 [==============================] - 20s 108ms/step - loss: 8.0812 - mean_io_u_2: 0.3463 - sparse_categorical_accuracy: 0.4403 - val_loss: 7.5565 - val_mean_io_u_2: 0.3471 - val_sparse_categorical_accuracy: 0.4614
Epoch 9/20
Corrupt JPEG data: premature end of data segment
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
98/98 [==============================] - 20s 109ms/step - loss: 8.0441 - mean_io_u_2: 0.3463 - sparse_categorical_accuracy: 0.4420 - val_loss: 7.5563 - val_mean_io_u_2: 0.3522 - val_sparse_categorical_accuracy: 0.4734
Epoch 10/20
Corrupt JPEG data: premature end of data segment
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
98/98 [==============================] - 20s 110ms/step - loss: 8.1385 - mean_io_u_2: 0.3432 - sparse_categorical_accuracy: 0.4363 - val_loss: 7.5236 - val_mean_io_u_2: 0.3506 - val_sparse_categorical_accuracy: 0.4660
Epoch 11/20
Corrupt JPEG data: premature end of data segment
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
98/98 [==============================] - 20s 111ms/step - loss: 8.1114 - mean_io_u_2: 0.3447 - sparse_categorical_accuracy: 0.4381 - val_loss: 7.2068 - val_mean_io_u_2: 0.3518 - val_sparse_categorical_accuracy: 0.4808
Epoch 12/20
Corrupt JPEG data: premature end of data segment
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
98/98 [==============================] - 20s 107ms/step - loss: 8.0777 - mean_io_u_2: 0.3451 - sparse_categorical_accuracy: 0.4392 - val_loss: 7.2252 - val_mean_io_u_2: 0.3497 - val_sparse_categorical_accuracy: 0.4815
Epoch 13/20
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
Corrupt JPEG data: premature end of data segment
98/98 [==============================] - 21s 110ms/step - loss: 8.1355 - mean_io_u_2: 0.3446 - sparse_categorical_accuracy: 0.4366 - val_loss: 7.5587 - val_mean_io_u_2: 0.3500 - val_sparse_categorical_accuracy: 0.4671
Epoch 14/20
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
Corrupt JPEG data: premature end of data segment
98/98 [==============================] - 20s 107ms/step - loss: 8.1828 - mean_io_u_2: 0.3410 - sparse_categorical_accuracy: 0.4330 - val_loss: 7.2464 - val_mean_io_u_2: 0.3557 - val_sparse_categorical_accuracy: 0.4927
Epoch 15/20
Corrupt JPEG data: premature end of data segment
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
98/98 [==============================] - 20s 108ms/step - loss: 8.1845 - mean_io_u_2: 0.3432 - sparse_categorical_accuracy: 0.4330 - val_loss: 7.2032 - val_mean_io_u_2: 0.3506 - val_sparse_categorical_accuracy: 0.4805
Epoch 16/20
Corrupt JPEG data: premature end of data segment
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
98/98 [==============================] - 21s 109ms/step - loss: 8.1183 - mean_io_u_2: 0.3449 - sparse_categorical_accuracy: 0.4374 - val_loss: 7.6210 - val_mean_io_u_2: 0.3460 - val_sparse_categorical_accuracy: 0.4751
Epoch 17/20
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
Corrupt JPEG data: premature end of data segment
98/98 [==============================] - 21s 111ms/step - loss: 8.1766 - mean_io_u_2: 0.3429 - sparse_categorical_accuracy: 0.4329 - val_loss: 7.5361 - val_mean_io_u_2: 0.3489 - val_sparse_categorical_accuracy: 0.4639
Epoch 18/20
Corrupt JPEG data: premature end of data segment
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
98/98 [==============================] - 20s 109ms/step - loss: 8.0453 - mean_io_u_2: 0.3442 - sparse_categorical_accuracy: 0.4404 - val_loss: 7.1767 - val_mean_io_u_2: 0.3549 - val_sparse_categorical_accuracy: 0.4839
Epoch 19/20
Corrupt JPEG data: premature end of data segment
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
98/98 [==============================] - 20s 109ms/step - loss: 8.0856 - mean_io_u_2: 0.3449 - sparse_categorical_accuracy: 0.4390 - val_loss: 7.1724 - val_mean_io_u_2: 0.3574 - val_sparse_categorical_accuracy: 0.4878
Epoch 20/20
Corrupt JPEG data: 240 extraneous bytes before marker 0xd9
Corrupt JPEG data: premature end of data segment
98/98 [==============================] - 21s 109ms/step - loss: 8.1378 - mean_io_u_2: 0.3445 - sparse_categorical_accuracy: 0.4358 - val_loss: 7.5449 - val_mean_io_u_2: 0.3521 - val_sparse_categorical_accuracy: 0.4681
```
</div>
---
## Visualizations
### Plotting metrics for training run
We perform a comparative study between all 3 versions of the model by tracking training
and validation metrics of Accuracy, Loss and Mean IoU.
```python
total_plots = len(fcn32s_history.history)
cols = total_plots // 2
rows = total_plots // cols
if total_plots % cols != 0:
rows += 1
# Set all history dictionary objects
fcn32s_dict = fcn32s_history.history
fcn16s_dict = fcn16s_history.history
fcn8s_dict = fcn8s_history.history
pos = range(1, total_plots + 1)
plt.figure(figsize=(15, 10))
for i, ((key_32s, value_32s), (key_16s, value_16s), (key_8s, value_8s)) in enumerate(
zip(fcn32s_dict.items(), fcn16s_dict.items(), fcn8s_dict.items())
):
plt.subplot(rows, cols, pos[i])
plt.plot(range(len(value_32s)), value_32s)
plt.plot(range(len(value_16s)), value_16s)
plt.plot(range(len(value_8s)), value_8s)
plt.title(str(key_32s) + " (combined)")
plt.legend(["FCN-32S", "FCN-16S", "FCN-8S"])
plt.show()
```

### Visualizing predicted segmentation masks
To understand the results and see them better, we pick a random image from the test
dataset and perform inference on it to see the masks generated by each model.
Note: For better results, the model must be trained for a higher number of epochs.
```python
images, masks = next(iter(test_ds))
random_idx = keras.random.uniform([], minval=0, maxval=BATCH_SIZE,seed=10)
# Get random test image and mask
test_image = images[int(random_idx)].numpy().astype("float")
test_mask = masks[int(random_idx)].numpy().astype("float")
pred_image = ops.expand_dims(test_image, axis=0)
pred_image = keras.applications.vgg19.preprocess_input(pred_image)
# Perform inference on FCN-32S
pred_mask_32s = fcn32s_model.predict(pred_image, verbose=0).astype("float")
pred_mask_32s = np.argmax(pred_mask_32s, axis=-1)
pred_mask_32s = pred_mask_32s[0, ...]
# Perform inference on FCN-16S
pred_mask_16s = fcn16s_model.predict(pred_image, verbose=0).astype("float")
pred_mask_16s = np.argmax(pred_mask_16s, axis=-1)
pred_mask_16s = pred_mask_16s[0, ...]
# Perform inference on FCN-8S
pred_mask_8s = fcn8s_model.predict(pred_image, verbose=0).astype("float")
pred_mask_8s = np.argmax(pred_mask_8s, axis=-1)
pred_mask_8s = pred_mask_8s[0, ...]
# Plot all results
fig, ax = plt.subplots(nrows=2, ncols=3, figsize=(15, 8))
fig.delaxes(ax[0, 2])
ax[0, 0].set_title("Image")
ax[0, 0].imshow(test_image / 255.0)
ax[0, 1].set_title("Image with ground truth overlay")
ax[0, 1].imshow(test_image / 255.0)
ax[0, 1].imshow(
test_mask,
cmap="inferno",
alpha=0.6,
)
ax[1, 0].set_title("Image with FCN-32S mask overlay")
ax[1, 0].imshow(test_image / 255.0)
ax[1, 0].imshow(pred_mask_32s, cmap="inferno", alpha=0.6)
ax[1, 1].set_title("Image with FCN-16S mask overlay")
ax[1, 1].imshow(test_image / 255.0)
ax[1, 1].imshow(pred_mask_16s, cmap="inferno", alpha=0.6)
ax[1, 2].set_title("Image with FCN-8S mask overlay")
ax[1, 2].imshow(test_image / 255.0)
ax[1, 2].imshow(pred_mask_8s, cmap="inferno", alpha=0.6)
plt.show()
```
<div class="k-default-codeblock">
```
WARNING:matplotlib.image:Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
WARNING:matplotlib.image:Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
WARNING:matplotlib.image:Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
WARNING:matplotlib.image:Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
WARNING:matplotlib.image:Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
```
</div>

---
## Conclusion
The Fully-Convolutional Network is an exceptionally simple network that has yielded
strong results in Image Segmentation tasks across different benchmarks.
With the advent of better mechanisms like [Attention](https://arxiv.org/abs/1706.03762) as used in
[SegFormer](https://arxiv.org/abs/2105.15203) and
[DeTR](https://arxiv.org/abs/2005.12872), this model serves as a quick way to iterate and
find baselines for this task on unknown data.
---
## Acknowledgements
I thank [Aritra Roy Gosthipaty](https://twitter.com/ariG23498), [Ayush
Thakur](https://twitter.com/ayushthakur0) and [Ritwik
Raha](https://twitter.com/ritwik_raha) for giving a preliminary review of the example.
I also thank the [Google Developer
Experts](https://developers.google.com/community/experts) program.
|
keras-io/examples/vision/md/fully_convolutional_network.md/0
|
{
"file_path": "keras-io/examples/vision/md/fully_convolutional_network.md",
"repo_id": "keras-io",
"token_count": 15419
}
| 128 |
# Metric learning for image similarity search using TensorFlow Similarity
**Author:** [Owen Vallis](https://twitter.com/owenvallis)<br>
**Date created:** 2021/09/30<br>
**Last modified:** 2022/02/29<br>
**Description:** Example of using similarity metric learning on CIFAR-10 images.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/vision/ipynb/metric_learning_tf_similarity.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/vision/metric_learning_tf_similarity.py)
---
## Overview
This example is based on the
["Metric learning for image similarity search" example](https://keras.io/examples/vision/metric_learning/).
We aim to use the same data set but implement the model using
[TensorFlow Similarity](https://github.com/tensorflow/similarity).
Metric learning aims to train models that can embed inputs into a
high-dimensional space such that "similar" inputs are pulled closer to each
other and "dissimilar" inputs are pushed farther apart. Once trained, these
models can produce embeddings for downstream systems where such similarity is
useful, for instance as a ranking signal for search or as a form of pretrained
embedding model for another supervised problem.
For a more detailed overview of metric learning, see:
* [What is metric learning?](http://contrib.scikit-learn.org/metric-learn/introduction.html)
* ["Using crossentropy for metric learning" tutorial](https://www.youtube.com/watch?v=Jb4Ewl5RzkI)
---
## Setup
This tutorial will use the [TensorFlow Similarity](https://github.com/tensorflow/similarity) library
to learn and evaluate the similarity embedding.
TensorFlow Similarity provides components that:
* Make training contrastive models simple and fast.
* Make it easier to ensure that batches contain pairs of examples.
* Enable the evaluation of the quality of the embedding.
TensorFlow Similarity can be installed easily via pip, as follows:
```
pip -q install tensorflow_similarity
```
```python
import random
from matplotlib import pyplot as plt
from mpl_toolkits import axes_grid1
import numpy as np
import tensorflow as tf
from tensorflow import keras
import tensorflow_similarity as tfsim
tfsim.utils.tf_cap_memory()
print("TensorFlow:", tf.__version__)
print("TensorFlow Similarity:", tfsim.__version__)
```
<div class="k-default-codeblock">
```
TensorFlow: 2.7.0
TensorFlow Similarity: 0.15.5
```
</div>
---
## Dataset samplers
We will be using the
[CIFAR-10](https://www.tensorflow.org/datasets/catalog/cifar10)
dataset for this tutorial.
For a similarity model to learn efficiently, each batch must contains at least 2
examples of each class.
To make this easy, tf_similarity offers `Sampler` objects that enable you to set both
the number of classes and the minimum number of examples of each class per
batch.
The train and validation datasets will be created using the
`TFDatasetMultiShotMemorySampler` object. This creates a sampler that loads datasets
from [TensorFlow Datasets](https://www.tensorflow.org/datasets) and yields
batches containing a target number of classes and a target number of examples
per class. Additionally, we can restrict the sampler to only yield the subset of
classes defined in `class_list`, enabling us to train on a subset of the classes
and then test how the embedding generalizes to the unseen classes. This can be
useful when working on few-shot learning problems.
The following cell creates a train_ds sample that:
* Loads the CIFAR-10 dataset from TFDS and then takes the `examples_per_class_per_batch`.
* Ensures the sampler restricts the classes to those defined in `class_list`.
* Ensures each batch contains 10 different classes with 8 examples each.
We also create a validation dataset in the same way, but we limit the total number of
examples per class to 100 and the examples per class per batch is set to the
default of 2.
```python
# This determines the number of classes used during training.
# Here we are using all the classes.
num_known_classes = 10
class_list = random.sample(population=range(10), k=num_known_classes)
classes_per_batch = 10
# Passing multiple examples per class per batch ensures that each example has
# multiple positive pairs. This can be useful when performing triplet mining or
# when using losses like `MultiSimilarityLoss` or `CircleLoss` as these can
# take a weighted mix of all the positive pairs. In general, more examples per
# class will lead to more information for the positive pairs, while more classes
# per batch will provide more varied information in the negative pairs. However,
# the losses compute the pairwise distance between the examples in a batch so
# the upper limit of the batch size is restricted by the memory.
examples_per_class_per_batch = 8
print(
"Batch size is: "
f"{min(classes_per_batch, num_known_classes) * examples_per_class_per_batch}"
)
print(" Create Training Data ".center(34, "#"))
train_ds = tfsim.samplers.TFDatasetMultiShotMemorySampler(
"cifar10",
classes_per_batch=min(classes_per_batch, num_known_classes),
splits="train",
steps_per_epoch=4000,
examples_per_class_per_batch=examples_per_class_per_batch,
class_list=class_list,
)
print("\n" + " Create Validation Data ".center(34, "#"))
val_ds = tfsim.samplers.TFDatasetMultiShotMemorySampler(
"cifar10",
classes_per_batch=classes_per_batch,
splits="test",
total_examples_per_class=100,
)
```
<div class="k-default-codeblock">
```
Batch size is: 80
###### Create Training Data ######
converting train: 0%| | 0/50000 [00:00<?, ?it/s]
```
</div>
<div class="k-default-codeblock">
```
The initial batch size is 80 (10 classes * 8 examples per class) with 0 augmenters
filtering examples: 0%| | 0/50000 [00:00<?, ?it/s]
selecting classes: 0%| | 0/10 [00:00<?, ?it/s]
gather examples: 0%| | 0/50000 [00:00<?, ?it/s]
indexing classes: 0%| | 0/50000 [00:00<?, ?it/s]
```
</div>
<div class="k-default-codeblock">
```
##### Create Validation Data #####
converting test: 0%| | 0/10000 [00:00<?, ?it/s]
```
</div>
<div class="k-default-codeblock">
```
The initial batch size is 20 (10 classes * 2 examples per class) with 0 augmenters
filtering examples: 0%| | 0/10000 [00:00<?, ?it/s]
selecting classes: 0%| | 0/10 [00:00<?, ?it/s]
gather examples: 0%| | 0/1000 [00:00<?, ?it/s]
indexing classes: 0%| | 0/1000 [00:00<?, ?it/s]
```
</div>
---
## Visualize the dataset
The samplers will shuffle the dataset, so we can get a sense of the dataset by
plotting the first 25 images.
The samplers provide a `get_slice(begin, size)` method that allows us to easily
select a block of samples.
Alternatively, we can use the `generate_batch()` method to yield a batch. This
can allow us to check that a batch contains the expected number of classes and
examples per class.
```python
num_cols = num_rows = 5
# Get the first 25 examples.
x_slice, y_slice = train_ds.get_slice(begin=0, size=num_cols * num_rows)
fig = plt.figure(figsize=(6.0, 6.0))
grid = axes_grid1.ImageGrid(fig, 111, nrows_ncols=(num_cols, num_rows), axes_pad=0.1)
for ax, im, label in zip(grid, x_slice, y_slice):
ax.imshow(im)
ax.axis("off")
```

---
## Embedding model
Next we define a `SimilarityModel` using the Keras Functional API. The model
is a standard convnet with the addition of a `MetricEmbedding` layer that
applies L2 normalization. The metric embedding layer is helpful when using
`Cosine` distance as we only care about the angle between the vectors.
Additionally, the `SimilarityModel` provides a number of helper methods for:
* Indexing embedded examples
* Performing example lookups
* Evaluating the classification
* Evaluating the quality of the embedding space
See the [TensorFlow Similarity documentation](https://github.com/tensorflow/similarity)
for more details.
```python
embedding_size = 256
inputs = keras.layers.Input((32, 32, 3))
x = keras.layers.Rescaling(scale=1.0 / 255)(inputs)
x = keras.layers.Conv2D(64, 3, activation="relu")(x)
x = keras.layers.BatchNormalization()(x)
x = keras.layers.Conv2D(128, 3, activation="relu")(x)
x = keras.layers.BatchNormalization()(x)
x = keras.layers.MaxPool2D((4, 4))(x)
x = keras.layers.Conv2D(256, 3, activation="relu")(x)
x = keras.layers.BatchNormalization()(x)
x = keras.layers.Conv2D(256, 3, activation="relu")(x)
x = keras.layers.GlobalMaxPool2D()(x)
outputs = tfsim.layers.MetricEmbedding(embedding_size)(x)
# building model
model = tfsim.models.SimilarityModel(inputs, outputs)
model.summary()
```
<div class="k-default-codeblock">
```
Model: "similarity_model"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) [(None, 32, 32, 3)] 0
rescaling (Rescaling) (None, 32, 32, 3) 0
conv2d (Conv2D) (None, 30, 30, 64) 1792
batch_normalization (BatchN (None, 30, 30, 64) 256
ormalization)
conv2d_1 (Conv2D) (None, 28, 28, 128) 73856
batch_normalization_1 (Batc (None, 28, 28, 128) 512
hNormalization)
max_pooling2d (MaxPooling2D (None, 7, 7, 128) 0
)
conv2d_2 (Conv2D) (None, 5, 5, 256) 295168
batch_normalization_2 (Batc (None, 5, 5, 256) 1024
hNormalization)
conv2d_3 (Conv2D) (None, 3, 3, 256) 590080
global_max_pooling2d (Globa (None, 256) 0
lMaxPooling2D)
metric_embedding (MetricEmb (None, 256) 65792
edding)
=================================================================
Total params: 1,028,480
Trainable params: 1,027,584
Non-trainable params: 896
_________________________________________________________________
```
</div>
---
## Similarity loss
The similarity loss expects batches containing at least 2 examples of each
class, from which it computes the loss over the pairwise positive and negative
distances. Here we are using `MultiSimilarityLoss()`
([paper](ihttps://arxiv.org/abs/1904.06627)), one of several losses in
[TensorFlow Similarity](https://github.com/tensorflow/similarity). This loss
attempts to use all informative pairs in the batch, taking into account the
self-similarity, positive-similarity, and the negative-similarity.
```python
epochs = 3
learning_rate = 0.002
val_steps = 50
# init similarity loss
loss = tfsim.losses.MultiSimilarityLoss()
# compiling and training
model.compile(
optimizer=keras.optimizers.Adam(learning_rate), loss=loss, steps_per_execution=10,
)
history = model.fit(
train_ds, epochs=epochs, validation_data=val_ds, validation_steps=val_steps
)
```
<div class="k-default-codeblock">
```
Distance metric automatically set to cosine use the distance arg to override.
Epoch 1/3
4000/4000 [==============================] - ETA: 0s - loss: 2.2179Warmup complete
4000/4000 [==============================] - 38s 9ms/step - loss: 2.2179 - val_loss: 0.8894
Warmup complete
Epoch 2/3
4000/4000 [==============================] - 34s 9ms/step - loss: 1.9047 - val_loss: 0.8767
Epoch 3/3
4000/4000 [==============================] - 35s 9ms/step - loss: 1.6336 - val_loss: 0.8469
```
</div>
---
## Indexing
Now that we have trained our model, we can create an index of examples. Here we
batch index the first 200 validation examples by passing the x and y to the index
along with storing the image in the data parameter. The `x_index` values are
embedded and then added to the index to make them searchable. The `y_index` and
data parameters are optional but allow the user to associate metadata with the
embedded example.
```python
x_index, y_index = val_ds.get_slice(begin=0, size=200)
model.reset_index()
model.index(x_index, y_index, data=x_index)
```
<div class="k-default-codeblock">
```
[Indexing 200 points]
|-Computing embeddings
|-Storing data points in key value store
|-Adding embeddings to index.
|-Building index.
```
</div>
<div class="k-default-codeblock">
```
0% 10 20 30 40 50 60 70 80 90 100%
|----|----|----|----|----|----|----|----|----|----|
***************************************************
```
</div>
---
## Calibration
Once the index is built, we can calibrate a distance threshold using a matching
strategy and a calibration metric.
Here we are searching for the optimal F1 score while using K=1 as our
classifier. All matches at or below the calibrated threshold distance will be
labeled as a Positive match between the query example and the label associated
with the match result, while all matches above the threshold distance will be
labeled as a Negative match.
Additionally, we pass in extra metrics to compute as well. All values in the
output are computed at the calibrated threshold.
Finally, `model.calibrate()` returns a `CalibrationResults` object containing:
* `"cutpoints"`: A Python dict mapping the cutpoint name to a dict containing the
`ClassificationMetric` values associated with a particular distance threshold,
e.g., `"optimal" : {"acc": 0.90, "f1": 0.92}`.
* `"thresholds"`: A Python dict mapping `ClassificationMetric` names to a list
containing the metric's value computed at each of the distance thresholds, e.g.,
`{"f1": [0.99, 0.80], "distance": [0.0, 1.0]}`.
```python
x_train, y_train = train_ds.get_slice(begin=0, size=1000)
calibration = model.calibrate(
x_train,
y_train,
calibration_metric="f1",
matcher="match_nearest",
extra_metrics=["precision", "recall", "binary_accuracy"],
verbose=1,
)
```
<div class="k-default-codeblock">
```
Performing NN search
```
</div>
<div class="k-default-codeblock">
```
Building NN list: 0%| | 0/1000 [00:00<?, ?it/s]
Evaluating: 0%| | 0/4 [00:00<?, ?it/s]
computing thresholds: 0%| | 0/989 [00:00<?, ?it/s]
```
</div>
<div class="k-default-codeblock">
```
name value distance precision recall binary_accuracy f1
------- ------- ---------- ----------- -------- ----------------- --------
optimal 0.93 0.048435 0.869 1 0.869 0.929909
```
</div>
---
## Visualization
It may be difficult to get a sense of the model quality from the metrics alone.
A complementary approach is to manually inspect a set of query results to get a
feel for the match quality.
Here we take 10 validation examples and plot them with their 5 nearest
neighbors and the distances to the query example. Looking at the results, we see
that while they are imperfect they still represent meaningfully similar images,
and that the model is able to find similar images irrespective of their pose or
image illumination.
We can also see that the model is very confident with certain images, resulting
in very small distances between the query and the neighbors. Conversely, we see
more mistakes in the class labels as the distances become larger. This is one of
the reasons why calibration is critical for matching applications.
```python
num_neighbors = 5
labels = [
"Airplane",
"Automobile",
"Bird",
"Cat",
"Deer",
"Dog",
"Frog",
"Horse",
"Ship",
"Truck",
"Unknown",
]
class_mapping = {c_id: c_lbl for c_id, c_lbl in zip(range(11), labels)}
x_display, y_display = val_ds.get_slice(begin=200, size=10)
# lookup nearest neighbors in the index
nns = model.lookup(x_display, k=num_neighbors)
# display
for idx in np.argsort(y_display):
tfsim.visualization.viz_neigbors_imgs(
x_display[idx],
y_display[idx],
nns[idx],
class_mapping=class_mapping,
fig_size=(16, 2),
)
```
<div class="k-default-codeblock">
```
Performing NN search
```
</div>
<div class="k-default-codeblock">
```
Building NN list: 0%| | 0/10 [00:00<?, ?it/s]
```
</div>










---
## Metrics
We can also plot the extra metrics contained in the `CalibrationResults` to get
a sense of the matching performance as the distance threshold increases.
The following plots show the Precision, Recall, and F1 Score. We can see that
the matching precision degrades as the distance increases, but that the
percentage of the queries that we accept as positive matches (recall) grows
faster up to the calibrated distance threshold.
```python
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 5))
x = calibration.thresholds["distance"]
ax1.plot(x, calibration.thresholds["precision"], label="precision")
ax1.plot(x, calibration.thresholds["recall"], label="recall")
ax1.plot(x, calibration.thresholds["f1"], label="f1 score")
ax1.legend()
ax1.set_title("Metric evolution as distance increase")
ax1.set_xlabel("Distance")
ax1.set_ylim((-0.05, 1.05))
ax2.plot(calibration.thresholds["recall"], calibration.thresholds["precision"])
ax2.set_title("Precision recall curve")
ax2.set_xlabel("Recall")
ax2.set_ylabel("Precision")
ax2.set_ylim((-0.05, 1.05))
plt.show()
```

We can also take 100 examples for each class and plot the confusion matrix for
each example and their nearest match. We also add an "extra" 10th class to
represent the matches above the calibrated distance threshold.
We can see that most of the errors are between the animal classes with an
interesting number of confusions between Airplane and Bird. Additionally, we see
that only a few of the 100 examples for each class returned matches outside of
the calibrated distance threshold.
```python
cutpoint = "optimal"
# This yields 100 examples for each class.
# We defined this when we created the val_ds sampler.
x_confusion, y_confusion = val_ds.get_slice(0, -1)
matches = model.match(x_confusion, cutpoint=cutpoint, no_match_label=10)
cm = tfsim.visualization.confusion_matrix(
matches,
y_confusion,
labels=labels,
title="Confusion matrix for cutpoint:%s" % cutpoint,
normalize=False,
)
```

---
## No Match
We can plot the examples outside of the calibrated threshold to see which images
are not matching any indexed examples.
This may provide insight into what other examples may need to be indexed or
surface anomalous examples within the class.
```python
idx_no_match = np.where(np.array(matches) == 10)
no_match_queries = x_confusion[idx_no_match]
if len(no_match_queries):
plt.imshow(no_match_queries[0])
else:
print("All queries have a match below the distance threshold.")
```

---
## Visualize clusters
One of the best ways to quickly get a sense of the quality of how the model is
doing and understand it's short comings is to project the embedding into a 2D
space.
This allows us to inspect clusters of images and understand which classes are
entangled.
```python
# Each class in val_ds was restricted to 100 examples.
num_examples_to_clusters = 1000
thumb_size = 96
plot_size = 800
vx, vy = val_ds.get_slice(0, num_examples_to_clusters)
# Uncomment to run the interactive projector.
# tfsim.visualization.projector(
# model.predict(vx),
# labels=vy,
# images=vx,
# class_mapping=class_mapping,
# image_size=thumb_size,
# plot_size=plot_size,
# )
```
|
keras-io/examples/vision/md/metric_learning_tf_similarity.md/0
|
{
"file_path": "keras-io/examples/vision/md/metric_learning_tf_similarity.md",
"repo_id": "keras-io",
"token_count": 8491
}
| 129 |
# Investigating Vision Transformer representations
**Authors:** [Aritra Roy Gosthipaty](https://twitter.com/ariG23498), [Sayak Paul](https://twitter.com/RisingSayak) (equal contribution)<br>
**Date created:** 2022/04/12<br>
**Last modified:** 2023/11/20<br>
**Description:** Looking into the representations learned by different Vision Transformers variants.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/vision/ipynb/probing_vits.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/vision/probing_vits.py)
---
## Introduction
In this example, we look into the representations learned by different Vision
Transformer (ViT) models. Our main goal with this example is to provide insights into
what empowers ViTs to learn from image data. In particular, the example discusses
implementations of a few different ViT analysis tools.
**Note:** when we say "Vision Transformer", we refer to a computer vision architecture that
involves Transformer blocks ([Vaswani et al.](https://arxiv.org/abs/1706.03762)) and not
necessarily the original Vision Transformer model
([Dosovitskiy et al.](https://arxiv.org/abs/2010.11929)).
---
## Models considered
Since the inception of the original Vision Transformer, the computer vision community has
seen a number of different ViT variants improving upon the original in various ways:
training improvements, architecture improvements, and so on.
In this example, we consider the following ViT model families:
* ViTs trained using supervised pretraining with the ImageNet-1k and ImageNet-21k
datasets ([Dosovitskiy et al.](https://arxiv.org/abs/2010.11929))
* ViTs trained using supervised pretraining but only with the ImageNet-1k dataset with
more regularization and distillation ([Touvron et al.](https://arxiv.org/abs/2012.12877))
(DeiT).
* ViTs trained using self-supervised pretraining ([Caron et al.](https://arxiv.org/abs/2104.14294))
(DINO).
Since the pretrained models are not implemented in Keras, we first implemented them as
faithfully as possible. We then populated them with the official pretrained parameters.
Finally, we evaluated our implementations on the ImageNet-1k validation set to ensure the
evaluation numbers were matching with the original implementations. The details of our implementations
are available in [this repository](https://github.com/sayakpaul/probing-vits).
To keep the example concise, we won't exhaustively pair each model with the analysis
methods. We'll provide notes in the respective sections so that you can pick up the
pieces.
To run this example on Google Colab, we need to update the `gdown` library like so:
```shell
pip install -U gdown -q
```
---
## Imports
```python
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import zipfile
from io import BytesIO
import cv2
import matplotlib.pyplot as plt
import numpy as np
import requests
from PIL import Image
from sklearn.preprocessing import MinMaxScaler
import keras
from keras import ops
```
---
## Constants
```python
RESOLUTION = 224
PATCH_SIZE = 16
GITHUB_RELEASE = "https://github.com/sayakpaul/probing-vits/releases/download/v1.0.0/probing_vits.zip"
FNAME = "probing_vits.zip"
MODELS_ZIP = {
"vit_dino_base16": "Probing_ViTs/vit_dino_base16.zip",
"vit_b16_patch16_224": "Probing_ViTs/vit_b16_patch16_224.zip",
"vit_b16_patch16_224-i1k_pretrained": "Probing_ViTs/vit_b16_patch16_224-i1k_pretrained.zip",
}
```
---
## Data utilities
For the original ViT models, the input images need to be scaled to the range `[-1, 1]`. For
the other model families mentioned at the beginning, we need to normalize the images with
channel-wise mean and standard deviation of the ImageNet-1k training set.
```python
crop_layer = keras.layers.CenterCrop(RESOLUTION, RESOLUTION)
norm_layer = keras.layers.Normalization(
mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
variance=[(0.229 * 255) ** 2, (0.224 * 255) ** 2, (0.225 * 255) ** 2],
)
rescale_layer = keras.layers.Rescaling(scale=1.0 / 127.5, offset=-1)
def preprocess_image(image, model_type, size=RESOLUTION):
# Turn the image into a numpy array and add batch dim.
image = np.array(image)
image = ops.expand_dims(image, 0)
# If model type is vit rescale the image to [-1, 1].
if model_type == "original_vit":
image = rescale_layer(image)
# Resize the image using bicubic interpolation.
resize_size = int((256 / 224) * size)
image = ops.image.resize(image, (resize_size, resize_size), interpolation="bicubic")
# Crop the image.
image = crop_layer(image)
# If model type is DeiT or DINO normalize the image.
if model_type != "original_vit":
image = norm_layer(image)
return ops.convert_to_numpy(image)
def load_image_from_url(url, model_type):
# Credit: Willi Gierke
response = requests.get(url)
image = Image.open(BytesIO(response.content))
preprocessed_image = preprocess_image(image, model_type)
return image, preprocessed_image
```
---
## Load a test image and display it
```python
# ImageNet-1k label mapping file and load it.
mapping_file = keras.utils.get_file(
origin="https://storage.googleapis.com/bit_models/ilsvrc2012_wordnet_lemmas.txt"
)
with open(mapping_file, "r") as f:
lines = f.readlines()
imagenet_int_to_str = [line.rstrip() for line in lines]
img_url = "https://dl.fbaipublicfiles.com/dino/img.png"
image, preprocessed_image = load_image_from_url(img_url, model_type="original_vit")
plt.imshow(image)
plt.axis("off")
plt.show()
```

---
## Load a model
```python
zip_path = keras.utils.get_file(
fname=FNAME,
origin=GITHUB_RELEASE,
)
with zipfile.ZipFile(zip_path, "r") as zip_ref:
zip_ref.extractall("./")
os.rename("Probing ViTs", "Probing_ViTs")
def load_model(model_path: str) -> keras.Model:
with zipfile.ZipFile(model_path, "r") as zip_ref:
zip_ref.extractall("Probing_ViTs/")
model_name = model_path.split(".")[0]
inputs = keras.Input((RESOLUTION, RESOLUTION, 3))
model = keras.layers.TFSMLayer(model_name, call_endpoint="serving_default")
outputs = model(inputs, training=False)
return keras.Model(inputs, outputs=outputs)
vit_base_i21k_patch16_224 = load_model(MODELS_ZIP["vit_b16_patch16_224-i1k_pretrained"])
print("Model loaded.")
```
<div class="k-default-codeblock">
```
Model loaded.
```
</div>
**More about the model**:
This model was pretrained on the ImageNet-21k dataset and was then fine-tuned on the
ImageNet-1k dataset. To learn more about how we developed this model in TensorFlow
(with pretrained weights from
[this source](https://github.com/google-research/vision_transformer/)) refer to
[this notebook](https://github.com/sayakpaul/probing-vits/blob/main/notebooks/load-jax-weights-vitb16.ipynb).
---
## Running regular inference with the model
We now run inference with the loaded model on our test image.
```python
def split_prediction_and_attention_scores(outputs):
predictions = outputs["output_1"]
attention_score_dict = {}
for key, value in outputs.items():
if key.startswith("output_2_"):
attention_score_dict[key[len("output_2_") :]] = value
return predictions, attention_score_dict
predictions, attention_score_dict = split_prediction_and_attention_scores(
vit_base_i21k_patch16_224.predict(preprocessed_image)
)
predicted_label = imagenet_int_to_str[int(np.argmax(predictions))]
print(predicted_label)
```
<div class="k-default-codeblock">
```
1/1 ━━━━━━━━━━━━━━━━━━━━ 5s 5s/step
toucan
WARNING: All log messages before absl::InitializeLog() is called are written to STDERR
I0000 00:00:1700526824.965785 75784 device_compiler.h:187] Compiled cluster using XLA! This line is logged at most once for the lifetime of the process.
```
</div>
`attention_score_dict` contains the attention scores (softmaxed outputs) from each
attention head of each Transformer block.
---
## Method I: Mean attention distance
[Dosovitskiy et al.](https://arxiv.org/abs/2010.11929) and
[Raghu et al.](https://arxiv.org/abs/2108.08810) use a measure called
"mean attention distance" from each attention head of different
Transformer blocks to understand how local and global information flows
into Vision Transformers.
Mean attention distance is defined as the distance between query tokens and the other
tokens times attention weights. So, for a single image
* we take individual patches (tokens) extracted from the image,
* calculate their geometric distance, and
* multiply that with the attention scores.
Attention scores are calculated here after forward passing the image in inference mode
through the network. The following figure may help you understand the process a
little bit better.
<img src="https://i.imgur.com/pZCgPwl.gif" height=500>
This animation is created by [Ritwik Raha](https://twitter.com/ritwik_raha).
```python
def compute_distance_matrix(patch_size, num_patches, length):
distance_matrix = np.zeros((num_patches, num_patches))
for i in range(num_patches):
for j in range(num_patches):
if i == j: # zero distance
continue
xi, yi = (int(i / length)), (i % length)
xj, yj = (int(j / length)), (j % length)
distance_matrix[i, j] = patch_size * np.linalg.norm([xi - xj, yi - yj])
return distance_matrix
def compute_mean_attention_dist(patch_size, attention_weights, model_type):
num_cls_tokens = 2 if "distilled" in model_type else 1
# The attention_weights shape = (batch, num_heads, num_patches, num_patches)
attention_weights = attention_weights[
..., num_cls_tokens:, num_cls_tokens:
] # Removing the CLS token
num_patches = attention_weights.shape[-1]
length = int(np.sqrt(num_patches))
assert length**2 == num_patches, "Num patches is not perfect square"
distance_matrix = compute_distance_matrix(patch_size, num_patches, length)
h, w = distance_matrix.shape
distance_matrix = distance_matrix.reshape((1, 1, h, w))
# The attention_weights along the last axis adds to 1
# this is due to the fact that they are softmax of the raw logits
# summation of the (attention_weights * distance_matrix)
# should result in an average distance per token.
mean_distances = attention_weights * distance_matrix
mean_distances = np.sum(
mean_distances, axis=-1
) # Sum along last axis to get average distance per token
mean_distances = np.mean(
mean_distances, axis=-1
) # Now average across all the tokens
return mean_distances
```
Thanks to [Simon Kornblith](https://scholar.google.com/citations?user=1O3RPmsAAAAJ&hl=en)
from Google who helped us with this code snippet. It can be found
[here](https://gist.github.com/simonster/155894d48aef2bd36bd2dd8267e62391). Let's now use
these utilities to generate a plot of attention distances with our loaded model and test
image.
```python
# Build the mean distances for every Transformer block.
mean_distances = {
f"{name}_mean_dist": compute_mean_attention_dist(
patch_size=PATCH_SIZE,
attention_weights=attention_weight,
model_type="original_vit",
)
for name, attention_weight in attention_score_dict.items()
}
# Get the number of heads from the mean distance output.
num_heads = mean_distances["transformer_block_0_att_mean_dist"].shape[-1]
# Print the shapes
print(f"Num Heads: {num_heads}.")
plt.figure(figsize=(9, 9))
for idx in range(len(mean_distances)):
mean_distance = mean_distances[f"transformer_block_{idx}_att_mean_dist"]
x = [idx] * num_heads
y = mean_distance[0, :]
plt.scatter(x=x, y=y, label=f"transformer_block_{idx}")
plt.legend(loc="lower right")
plt.xlabel("Attention Head", fontsize=14)
plt.ylabel("Attention Distance", fontsize=14)
plt.title("vit_base_i21k_patch16_224", fontsize=14)
plt.grid()
plt.show()
```
<div class="k-default-codeblock">
```
Num Heads: 12.
```
</div>

### Inspecting the plots
**How does self-attention span across the input space? Do they attend
input regions locally or globally?**
The promise of self-attention is to enable the learning of contextual dependencies
so that a model can attend to the regions of inputs which are the most salient w.r.t
the objective. From the above plots we can notice that different attention heads yield
different attention distances suggesting they use both local and global information
from an image. But as we go deeper in the Transformer blocks the heads tend to
focus more on global aggregate information.
Inspired by [Raghu et al.](https://arxiv.org/abs/2108.08810) we computed mean attention
distances over 1000 images randomly taken from the ImageNet-1k validation set and we
repeated the process for all the models mentioned at the beginning. Intrestingly, we
notice the following:
* Pretraining with a larger dataset helps with more global attention spans:
| Pretrained on ImageNet-21k<br>Fine-tuned on ImageNet-1k | Pretrained on ImageNet-1k |
| :--: | :--: |
|  |  |
* When distilled from a CNN ViTs tend to have less global attention spans:
| No distillation (ViT B-16 from DeiT) | Distilled ViT B-16 from DeiT |
| :--: | :--: |
|  |  |
To reproduce these plots, please refer to
[this notebook](https://github.com/sayakpaul/probing-vits/blob/main/notebooks/mean-attention-distance-1k.ipynb).
---
## Method II: Attention Rollout
[Abnar et al.](https://arxiv.org/abs/2005.00928) introduce "Attention rollout" for
quantifying how information flow through self-attention layers of Transformer blocks.
Original ViT authors use this method to investigate the learned representations, stating:
> Briefly, we averaged attention weights of ViTL/16 across all heads and then recursively
multiplied the weight matrices of all layers. This accounts for the mixing of attention
across tokens through all layers.
We used
[this notebook](https://colab.research.google.com/github/jeonsworld/ViT-pytorch/blob/main/visualize_attention_map.ipynb)
and modified the attention rollout code from it for compatibility with our models.
```python
def attention_rollout_map(image, attention_score_dict, model_type):
num_cls_tokens = 2 if "distilled" in model_type else 1
# Stack the individual attention matrices from individual Transformer blocks.
attn_mat = ops.stack([attention_score_dict[k] for k in attention_score_dict.keys()])
attn_mat = ops.squeeze(attn_mat, axis=1)
# Average the attention weights across all heads.
attn_mat = ops.mean(attn_mat, axis=1)
# To account for residual connections, we add an identity matrix to the
# attention matrix and re-normalize the weights.
residual_attn = ops.eye(attn_mat.shape[1])
aug_attn_mat = attn_mat + residual_attn
aug_attn_mat = aug_attn_mat / ops.sum(aug_attn_mat, axis=-1)[..., None]
aug_attn_mat = ops.convert_to_numpy(aug_attn_mat)
# Recursively multiply the weight matrices.
joint_attentions = np.zeros(aug_attn_mat.shape)
joint_attentions[0] = aug_attn_mat[0]
for n in range(1, aug_attn_mat.shape[0]):
joint_attentions[n] = np.matmul(aug_attn_mat[n], joint_attentions[n - 1])
# Attention from the output token to the input space.
v = joint_attentions[-1]
grid_size = int(np.sqrt(aug_attn_mat.shape[-1]))
mask = v[0, num_cls_tokens:].reshape(grid_size, grid_size)
mask = cv2.resize(mask / mask.max(), image.size)[..., np.newaxis]
result = (mask * image).astype("uint8")
return result
```
Let's now use these utilities to generate an attention plot based on our previous results
from the "Running regular inference with the model" section. Following are the links to
download each individual model:
* [Original ViT model (pretrained on ImageNet-21k)](https://drive.google.com/file/d/1mbtnliT3jRb3yJUHhbItWw8unfYZw8KJ/view?usp=sharing)
* [Original ViT model (pretrained on ImageNet-1k)](https://drive.google.com/file/d/1ApOdYe4NXxhPhJABefgZ3KVvqsQzhCL7/view?usp=sharing)
* [DINO model (pretrained on ImageNet-1k)](https://drive.google.com/file/d/16_1oDm0PeCGJ_KGBG5UKVN7TsAtiRNrN/view?usp=sharing)
* [DeiT models (pretrained on ImageNet-1k including distilled and non-distilled ones)](https://tfhub.dev/sayakpaul/collections/deit/1)
```python
attn_rollout_result = attention_rollout_map(
image, attention_score_dict, model_type="original_vit"
)
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(8, 10))
fig.suptitle(f"Predicted label: {predicted_label}.", fontsize=20)
_ = ax1.imshow(image)
_ = ax2.imshow(attn_rollout_result)
ax1.set_title("Input Image", fontsize=16)
ax2.set_title("Attention Map", fontsize=16)
ax1.axis("off")
ax2.axis("off")
fig.tight_layout()
fig.subplots_adjust(top=1.35)
fig.show()
```

### Inspecting the plots
**How can we quanitfy the information flow that propagates through the
attention layers?**
We notice that the model is able to focus its attention on the
salient parts of the input image. We encourage you to apply this
method to the other models we mentioned and compare the results. The
attention rollout plots will differ according to the tasks and
augmentation the model was trained with. We observe that DeiT has the
best rollout plot, likely due to its augmentation regime.
---
## Method III: Attention heatmaps
A simple yet useful way to probe into the representation of a Vision Transformer is to
visualise the attention maps overlayed on the input images. This helps form an intuition
about what the model attends to. We use the DINO model for this purpose, because it
yields better attention heatmaps.
```python
# Load the model.
vit_dino_base16 = load_model(MODELS_ZIP["vit_dino_base16"])
print("Model loaded.")
# Preprocess the same image but with normlization.
img_url = "https://dl.fbaipublicfiles.com/dino/img.png"
image, preprocessed_image = load_image_from_url(img_url, model_type="dino")
# Grab the predictions.
predictions, attention_score_dict = split_prediction_and_attention_scores(
vit_dino_base16.predict(preprocessed_image)
)
```
<div class="k-default-codeblock">
```
Model loaded.
1/1 ━━━━━━━━━━━━━━━━━━━━ 4s 4s/step
```
</div>
A Transformer block consists of multiple heads. Each head in a Transformer block projects
the input data to different sub-spaces. This helps each individual head to attend to
different parts of the image. Therefore, it makes sense to visualize each attention head
map seperately, to make sense of what each heads looks at.
**Notes**:
* The following code has been copy-modified from the
[original DINO codebase](https://github.com/facebookresearch/dino/blob/main/visualize_attention.py).
* Here we grab the attention maps of the last Transformer block.
* [DINO](https://arxiv.org/abs/2104.14294) was pretrained using a self-supervised
objective.
```python
def attention_heatmap(attention_score_dict, image, model_type="dino"):
num_tokens = 2 if "distilled" in model_type else 1
# Sort the Transformer blocks in order of their depth.
attention_score_list = list(attention_score_dict.keys())
attention_score_list.sort(key=lambda x: int(x.split("_")[-2]), reverse=True)
# Process the attention maps for overlay.
w_featmap = image.shape[2] // PATCH_SIZE
h_featmap = image.shape[1] // PATCH_SIZE
attention_scores = attention_score_dict[attention_score_list[0]]
# Taking the representations from CLS token.
attentions = attention_scores[0, :, 0, num_tokens:].reshape(num_heads, -1)
# Reshape the attention scores to resemble mini patches.
attentions = attentions.reshape(num_heads, w_featmap, h_featmap)
attentions = attentions.transpose((1, 2, 0))
# Resize the attention patches to 224x224 (224: 14x16).
attentions = ops.image.resize(
attentions, size=(h_featmap * PATCH_SIZE, w_featmap * PATCH_SIZE)
)
return attentions
```
We can use the same image we used for inference with DINO and the `attention_score_dict`
we extracted from the results.
```python
# De-normalize the image for visual clarity.
in1k_mean = np.array([0.485 * 255, 0.456 * 255, 0.406 * 255])
in1k_std = np.array([0.229 * 255, 0.224 * 255, 0.225 * 255])
preprocessed_img_orig = (preprocessed_image * in1k_std) + in1k_mean
preprocessed_img_orig = preprocessed_img_orig / 255.0
preprocessed_img_orig = ops.convert_to_numpy(ops.clip(preprocessed_img_orig, 0.0, 1.0))
# Generate the attention heatmaps.
attentions = attention_heatmap(attention_score_dict, preprocessed_img_orig)
# Plot the maps.
fig, axes = plt.subplots(nrows=3, ncols=4, figsize=(13, 13))
img_count = 0
for i in range(3):
for j in range(4):
if img_count < len(attentions):
axes[i, j].imshow(preprocessed_img_orig[0])
axes[i, j].imshow(attentions[..., img_count], cmap="inferno", alpha=0.6)
axes[i, j].title.set_text(f"Attention head: {img_count}")
axes[i, j].axis("off")
img_count += 1
```

### Inspecting the plots
**How can we qualitatively evaluate the attention weights?**
The attention weights of a Transformer block are computed between the
key and the query. The weights quantifies how important is the key to the query.
In the ViTs the key and the query comes from the same image, hence
the weights determine which part of the image is important.
Plotting the attention weigths overlayed on the image gives us a great
intuition about the parts of the image that are important to the Transformer.
This plot qualitatively evaluates the purpose of the attention weights.
---
## Method IV: Visualizing the learned projection filters
After extracting non-overlapping patches, ViTs flatten those patches across their
saptial dimensions, and then linearly project them. One might wonder, how do these
projections look like? Below, we take the ViT B-16 model and visualize its
learned projections.
```python
def extract_weights(model, name):
for variable in model.weights:
if variable.name.startswith(name):
return variable.numpy()
# Extract the projections.
projections = extract_weights(vit_base_i21k_patch16_224, "conv_projection/kernel")
projection_dim = projections.shape[-1]
patch_h, patch_w, patch_channels = projections.shape[:-1]
# Scale the projections.
scaled_projections = MinMaxScaler().fit_transform(
projections.reshape(-1, projection_dim)
)
# Reshape the scaled projections so that the leading
# three dimensions resemble an image.
scaled_projections = scaled_projections.reshape(patch_h, patch_w, patch_channels, -1)
# Visualize the first 128 filters of the learned
# projections.
fig, axes = plt.subplots(nrows=8, ncols=16, figsize=(13, 8))
img_count = 0
limit = 128
for i in range(8):
for j in range(16):
if img_count < limit:
axes[i, j].imshow(scaled_projections[..., img_count])
axes[i, j].axis("off")
img_count += 1
fig.tight_layout()
```
<div class="k-default-codeblock">
```
WARNING:matplotlib.image:Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
WARNING:matplotlib.image:Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
WARNING:matplotlib.image:Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
WARNING:matplotlib.image:Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
WARNING:matplotlib.image:Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
WARNING:matplotlib.image:Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
```
</div>

### Inspecting the plots
**What do the projection filters learn?**
[When visualized](https://distill.pub/2017/feature-visualization/),
the kernels of a convolutional neural network show
the pattern that they look for in an image. This could be circles,
sometimes lines -- when combined together (in later stage of a ConvNet), the filters
transform into more complex shapes. We have found a stark similarity between such
ConvNet kernels and the projection filters of a ViT.
---
## Method V: Visualizing the positional emebddings
Transformers are permutation-invariant. This means that do not take into account
the spatial position of the input tokens. To overcome this
limitation, we add positional information to the input tokens.
The positional information can be in the form of leaned positional
embeddings or handcrafted constant embeddings. In our case, all the
three variants of ViTs feature learned positional embeddings.
In this section, we visualize the similarities between the
learned positional embeddings with itself. Below, we take the ViT B-16
model and visualize the similarity of the positional embeddings by
taking their dot-product.
```python
position_embeddings = extract_weights(vit_base_i21k_patch16_224, "pos_embedding")
# Discard the batch dimension and the position embeddings of the
# cls token.
position_embeddings = position_embeddings.squeeze()[1:, ...]
similarity = position_embeddings @ position_embeddings.T
plt.imshow(similarity, cmap="inferno")
plt.show()
```

### Inspecting the plots
**What do the positional embeddings tell us?**
The plot has a distinctive diagonal pattern. The main diagonal is the brightest
signifying that a position is the most similar to itself. An interesting
pattern to look out for is the repeating diagonals. The repeating pattern
portrays a sinusoidal function which is close in essence to what was proposed by
[Vaswani et. al.](https://arxiv.org/abs/1706.03762) as a hand-crafted feature.
---
## Notes
* DINO extended the attention heatmap generation process to videos. We also
[applied](https://github.com/sayakpaul/probing-vits/blob/main/notebooks/dino-attention-map
s-video.ipynb) our DINO implementation on a series of videos and obtained similar
results. Here's one such video of attention heatmaps:

* [Raghu et al.](https://arxiv.org/abs/2108.08810) use an array of techniques to
investigate the representations learned by ViTs and make comparisons with that of
ResNets. We strongly recommend reading their work.
* To author this example, we developed
[this repository](https://github.com/sayakpaul/probing-vits) to guide our readers so that they
can easily reproduce the experiments and extend them.
* Another repository that you may find interesting in this regard is
[`vit-explain`](https://github.com/jacobgil/vit-explain).
* One can also plot the attention rollout and attention heat maps with
custom images using our Hugging Face spaces.
| Attention Heat Maps | Attention Rollout |
| :--: | :--: |
| [](https://huggingface.co/spaces/probing-vits/attention-heat-maps) | [](https://huggingface.co/spaces/probing-vits/attention-rollout) |
---
## Acknowledgements
- [PyImageSearch](https://pyimagesearch.com)
- [Jarvislabs.ai](https://jarvislabs.ai/)
- [GDE Program](https://developers.google.com/programs/experts/)
|
keras-io/examples/vision/md/probing_vits.md/0
|
{
"file_path": "keras-io/examples/vision/md/probing_vits.md",
"repo_id": "keras-io",
"token_count": 9714
}
| 130 |
# Learning to tokenize in Vision Transformers
**Authors:** [Aritra Roy Gosthipaty](https://twitter.com/ariG23498), [Sayak Paul](https://twitter.com/RisingSayak) (equal contribution), converted to Keras 3 by [Muhammad Anas Raza](https://anasrz.com)<br>
**Date created:** 2021/12/10<br>
**Last modified:** 2023/08/14<br>
**Description:** Adaptively generating a smaller number of tokens for Vision Transformers.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/vision/ipynb/token_learner.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/vision/token_learner.py)
---
## Introduction
Vision Transformers ([Dosovitskiy et al.](https://arxiv.org/abs/2010.11929)) and many
other Transformer-based architectures ([Liu et al.](https://arxiv.org/abs/2103.14030),
[Yuan et al.](https://arxiv.org/abs/2101.11986), etc.) have shown strong results in
image recognition. The following provides a brief overview of the components involved in the
Vision Transformer architecture for image classification:
* Extract small patches from input images.
* Linearly project those patches.
* Add positional embeddings to these linear projections.
* Run these projections through a series of Transformer ([Vaswani et al.](https://arxiv.org/abs/1706.03762))
blocks.
* Finally, take the representation from the final Transformer block and add a
classification head.
If we take 224x224 images and extract 16x16 patches, we get a total of 196 patches (also
called tokens) for each image. The number of patches increases as we increase the
resolution, leading to higher memory footprint. Could we use a reduced
number of patches without having to compromise performance?
Ryoo et al. investigate this question in
[TokenLearner: Adaptive Space-Time Tokenization for Videos](https://openreview.net/forum?id=z-l1kpDXs88).
They introduce a novel module called **TokenLearner** that can help reduce the number
of patches used by a Vision Transformer (ViT) in an adaptive manner. With TokenLearner
incorporated in the standard ViT architecture, they are able to reduce the amount of
compute (measured in FLOPS) used by the model.
In this example, we implement the TokenLearner module and demonstrate its
performance with a mini ViT and the CIFAR-10 dataset. We make use of the following
references:
* [Official TokenLearner code](https://github.com/google-research/scenic/blob/main/scenic/projects/token_learner/model.py)
* [Image Classification with ViTs on keras.io](https://keras.io/examples/vision/image_classification_with_vision_transformer/)
* [TokenLearner slides from NeurIPS 2021](https://nips.cc/media/neurips-2021/Slides/26578.pdf)
---
## Imports
```python
import keras
from keras import layers
from keras import ops
from tensorflow import data as tf_data
from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
import math
```
---
## Hyperparameters
Please feel free to change the hyperparameters and check your results. The best way to
develop intuition about the architecture is to experiment with it.
```python
# DATA
BATCH_SIZE = 256
AUTO = tf_data.AUTOTUNE
INPUT_SHAPE = (32, 32, 3)
NUM_CLASSES = 10
# OPTIMIZER
LEARNING_RATE = 1e-3
WEIGHT_DECAY = 1e-4
# TRAINING
EPOCHS = 1
# AUGMENTATION
IMAGE_SIZE = 48 # We will resize input images to this size.
PATCH_SIZE = 6 # Size of the patches to be extracted from the input images.
NUM_PATCHES = (IMAGE_SIZE // PATCH_SIZE) ** 2
# ViT ARCHITECTURE
LAYER_NORM_EPS = 1e-6
PROJECTION_DIM = 128
NUM_HEADS = 4
NUM_LAYERS = 4
MLP_UNITS = [
PROJECTION_DIM * 2,
PROJECTION_DIM,
]
# TOKENLEARNER
NUM_TOKENS = 4
```
---
## Load and prepare the CIFAR-10 dataset
```python
# Load the CIFAR-10 dataset.
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
(x_train, y_train), (x_val, y_val) = (
(x_train[:40000], y_train[:40000]),
(x_train[40000:], y_train[40000:]),
)
print(f"Training samples: {len(x_train)}")
print(f"Validation samples: {len(x_val)}")
print(f"Testing samples: {len(x_test)}")
# Convert to tf.data.Dataset objects.
train_ds = tf_data.Dataset.from_tensor_slices((x_train, y_train))
train_ds = train_ds.shuffle(BATCH_SIZE * 100).batch(BATCH_SIZE).prefetch(AUTO)
val_ds = tf_data.Dataset.from_tensor_slices((x_val, y_val))
val_ds = val_ds.batch(BATCH_SIZE).prefetch(AUTO)
test_ds = tf_data.Dataset.from_tensor_slices((x_test, y_test))
test_ds = test_ds.batch(BATCH_SIZE).prefetch(AUTO)
```
<div class="k-default-codeblock">
```
Training samples: 40000
Validation samples: 10000
Testing samples: 10000
```
</div>
---
## Data augmentation
The augmentation pipeline consists of:
- Rescaling
- Resizing
- Random cropping (fixed-sized or random sized)
- Random horizontal flipping
```python
data_augmentation = keras.Sequential(
[
layers.Rescaling(1 / 255.0),
layers.Resizing(INPUT_SHAPE[0] + 20, INPUT_SHAPE[0] + 20),
layers.RandomCrop(IMAGE_SIZE, IMAGE_SIZE),
layers.RandomFlip("horizontal"),
],
name="data_augmentation",
)
```
Note that image data augmentation layers do not apply data transformations at inference time.
This means that when these layers are called with `training=False` they behave differently. Refer
[to the documentation](https://keras.io/api/layers/preprocessing_layers/image_augmentation/) for more
details.
---
## Positional embedding module
A [Transformer](https://arxiv.org/abs/1706.03762) architecture consists of **multi-head
self attention** layers and **fully-connected feed forward** networks (MLP) as the main
components. Both these components are _permutation invariant_: they're not aware of
feature order.
To overcome this problem we inject tokens with positional information. The
`position_embedding` function adds this positional information to the linearly projected
tokens.
```python
class PatchEncoder(layers.Layer):
def __init__(self, num_patches, projection_dim):
super().__init__()
self.num_patches = num_patches
self.position_embedding = layers.Embedding(
input_dim=num_patches, output_dim=projection_dim
)
def call(self, patch):
positions = ops.expand_dims(
ops.arange(start=0, stop=self.num_patches, step=1), axis=0
)
encoded = patch + self.position_embedding(positions)
return encoded
def get_config(self):
config = super().get_config()
config.update({"num_patches": self.num_patches})
return config
```
---
## MLP block for Transformer
This serves as the Fully Connected Feed Forward block for our Transformer.
```python
def mlp(x, dropout_rate, hidden_units):
# Iterate over the hidden units and
# add Dense => Dropout.
for units in hidden_units:
x = layers.Dense(units, activation=ops.gelu)(x)
x = layers.Dropout(dropout_rate)(x)
return x
```
---
## TokenLearner module
The following figure presents a pictorial overview of the module
([source](https://ai.googleblog.com/2021/12/improving-vision-transformer-efficiency.html)).

The TokenLearner module takes as input an image-shaped tensor. It then passes it through
multiple single-channel convolutional layers extracting different spatial attention maps
focusing on different parts of the input. These attention maps are then element-wise
multiplied to the input and result is aggregated with pooling. This pooled output can be
trated as a summary of the input and has much lesser number of patches (8, for example)
than the original one (196, for example).
Using multiple convolution layers helps with expressivity. Imposing a form of spatial
attention helps retain relevant information from the inputs. Both of these components are
crucial to make TokenLearner work, especially when we are significantly reducing the number of patches.
```python
def token_learner(inputs, number_of_tokens=NUM_TOKENS):
# Layer normalize the inputs.
x = layers.LayerNormalization(epsilon=LAYER_NORM_EPS)(inputs) # (B, H, W, C)
# Applying Conv2D => Reshape => Permute
# The reshape and permute is done to help with the next steps of
# multiplication and Global Average Pooling.
attention_maps = keras.Sequential(
[
# 3 layers of conv with gelu activation as suggested
# in the paper.
layers.Conv2D(
filters=number_of_tokens,
kernel_size=(3, 3),
activation=ops.gelu,
padding="same",
use_bias=False,
),
layers.Conv2D(
filters=number_of_tokens,
kernel_size=(3, 3),
activation=ops.gelu,
padding="same",
use_bias=False,
),
layers.Conv2D(
filters=number_of_tokens,
kernel_size=(3, 3),
activation=ops.gelu,
padding="same",
use_bias=False,
),
# This conv layer will generate the attention maps
layers.Conv2D(
filters=number_of_tokens,
kernel_size=(3, 3),
activation="sigmoid", # Note sigmoid for [0, 1] output
padding="same",
use_bias=False,
),
# Reshape and Permute
layers.Reshape((-1, number_of_tokens)), # (B, H*W, num_of_tokens)
layers.Permute((2, 1)),
]
)(
x
) # (B, num_of_tokens, H*W)
# Reshape the input to align it with the output of the conv block.
num_filters = inputs.shape[-1]
inputs = layers.Reshape((1, -1, num_filters))(inputs) # inputs == (B, 1, H*W, C)
# Element-Wise multiplication of the attention maps and the inputs
attended_inputs = (
ops.expand_dims(attention_maps, axis=-1) * inputs
) # (B, num_tokens, H*W, C)
# Global average pooling the element wise multiplication result.
outputs = ops.mean(attended_inputs, axis=2) # (B, num_tokens, C)
return outputs
```
---
## Transformer block
```python
def transformer(encoded_patches):
# Layer normalization 1.
x1 = layers.LayerNormalization(epsilon=LAYER_NORM_EPS)(encoded_patches)
# Multi Head Self Attention layer 1.
attention_output = layers.MultiHeadAttention(
num_heads=NUM_HEADS, key_dim=PROJECTION_DIM, dropout=0.1
)(x1, x1)
# Skip connection 1.
x2 = layers.Add()([attention_output, encoded_patches])
# Layer normalization 2.
x3 = layers.LayerNormalization(epsilon=LAYER_NORM_EPS)(x2)
# MLP layer 1.
x4 = mlp(x3, hidden_units=MLP_UNITS, dropout_rate=0.1)
# Skip connection 2.
encoded_patches = layers.Add()([x4, x2])
return encoded_patches
```
---
## ViT model with the TokenLearner module
```python
def create_vit_classifier(use_token_learner=True, token_learner_units=NUM_TOKENS):
inputs = layers.Input(shape=INPUT_SHAPE) # (B, H, W, C)
# Augment data.
augmented = data_augmentation(inputs)
# Create patches and project the pathces.
projected_patches = layers.Conv2D(
filters=PROJECTION_DIM,
kernel_size=(PATCH_SIZE, PATCH_SIZE),
strides=(PATCH_SIZE, PATCH_SIZE),
padding="VALID",
)(augmented)
_, h, w, c = projected_patches.shape
projected_patches = layers.Reshape((h * w, c))(
projected_patches
) # (B, number_patches, projection_dim)
# Add positional embeddings to the projected patches.
encoded_patches = PatchEncoder(
num_patches=NUM_PATCHES, projection_dim=PROJECTION_DIM
)(
projected_patches
) # (B, number_patches, projection_dim)
encoded_patches = layers.Dropout(0.1)(encoded_patches)
# Iterate over the number of layers and stack up blocks of
# Transformer.
for i in range(NUM_LAYERS):
# Add a Transformer block.
encoded_patches = transformer(encoded_patches)
# Add TokenLearner layer in the middle of the
# architecture. The paper suggests that anywhere
# between 1/2 or 3/4 will work well.
if use_token_learner and i == NUM_LAYERS // 2:
_, hh, c = encoded_patches.shape
h = int(math.sqrt(hh))
encoded_patches = layers.Reshape((h, h, c))(
encoded_patches
) # (B, h, h, projection_dim)
encoded_patches = token_learner(
encoded_patches, token_learner_units
) # (B, num_tokens, c)
# Layer normalization and Global average pooling.
representation = layers.LayerNormalization(epsilon=LAYER_NORM_EPS)(encoded_patches)
representation = layers.GlobalAvgPool1D()(representation)
# Classify outputs.
outputs = layers.Dense(NUM_CLASSES, activation="softmax")(representation)
# Create the Keras model.
model = keras.Model(inputs=inputs, outputs=outputs)
return model
```
As shown in the [TokenLearner paper](https://openreview.net/forum?id=z-l1kpDXs88), it is
almost always advantageous to include the TokenLearner module in the middle of the
network.
---
## Training utility
```python
def run_experiment(model):
# Initialize the AdamW optimizer.
optimizer = keras.optimizers.AdamW(
learning_rate=LEARNING_RATE, weight_decay=WEIGHT_DECAY
)
# Compile the model with the optimizer, loss function
# and the metrics.
model.compile(
optimizer=optimizer,
loss="sparse_categorical_crossentropy",
metrics=[
keras.metrics.SparseCategoricalAccuracy(name="accuracy"),
keras.metrics.SparseTopKCategoricalAccuracy(5, name="top-5-accuracy"),
],
)
# Define callbacks
checkpoint_filepath = "/tmp/checkpoint.weights.h5"
checkpoint_callback = keras.callbacks.ModelCheckpoint(
checkpoint_filepath,
monitor="val_accuracy",
save_best_only=True,
save_weights_only=True,
)
# Train the model.
_ = model.fit(
train_ds,
epochs=EPOCHS,
validation_data=val_ds,
callbacks=[checkpoint_callback],
)
model.load_weights(checkpoint_filepath)
_, accuracy, top_5_accuracy = model.evaluate(test_ds)
print(f"Test accuracy: {round(accuracy * 100, 2)}%")
print(f"Test top 5 accuracy: {round(top_5_accuracy * 100, 2)}%")
```
---
## Train and evaluate a ViT with TokenLearner
```python
vit_token_learner = create_vit_classifier()
run_experiment(vit_token_learner)
```
<div class="k-default-codeblock">
```
157/157 ━━━━━━━━━━━━━━━━━━━━ 303s 2s/step - accuracy: 0.1158 - loss: 2.4798 - top-5-accuracy: 0.5352 - val_accuracy: 0.2206 - val_loss: 2.0292 - val_top-5-accuracy: 0.7688
40/40 ━━━━━━━━━━━━━━━━━━━━ 5s 133ms/step - accuracy: 0.2298 - loss: 2.0179 - top-5-accuracy: 0.7723
Test accuracy: 22.9%
Test top 5 accuracy: 77.22%
```
</div>
---
## Results
We experimented with and without the TokenLearner inside the mini ViT we implemented
(with the same hyperparameters presented in this example). Here are our results:
| **TokenLearner** | **# tokens in<br> TokenLearner** | **Top-1 Acc<br>(Averaged across 5 runs)** | **GFLOPs** | **TensorBoard** |
|:---:|:---:|:---:|:---:|:---:|
| N | - | 56.112% | 0.0184 | [Link](https://tensorboard.dev/experiment/vkCwM49dQZ2RiK0ZT4mj7w/) |
| Y | 8 | **56.55%** | **0.0153** | [Link](https://tensorboard.dev/experiment/vkCwM49dQZ2RiK0ZT4mj7w/) |
| N | - | 56.37% | 0.0184 | [Link](https://tensorboard.dev/experiment/hdyJ4wznQROwqZTgbtmztQ/) |
| Y | 4 | **56.4980%** | **0.0147** | [Link](https://tensorboard.dev/experiment/hdyJ4wznQROwqZTgbtmztQ/) |
| N | - (# Transformer layers: 8) | 55.36% | 0.0359 | [Link](https://tensorboard.dev/experiment/sepBK5zNSaOtdCeEG6SV9w/) |
TokenLearner is able to consistently outperform our mini ViT without the module. It is
also interesting to notice that it was also able to outperform a deeper version of our
mini ViT (with 8 layers). The authors also report similar observations in the paper and
they attribute this to the adaptiveness of TokenLearner.
One should also note that the FLOPs count **decreases** considerably with the addition of
the TokenLearner module. With less FLOPs count the TokenLearner module is able to
deliver better results. This aligns very well with the authors' findings.
Additionally, the authors [introduced](https://github.com/google-research/scenic/blob/main/scenic/projects/token_learner/model.py#L104)
a newer version of the TokenLearner for smaller training data regimes. Quoting the authors:
> Instead of using 4 conv. layers with small channels to implement spatial attention,
this version uses 2 grouped conv. layers with more channels. It also uses softmax
instead of sigmoid. We confirmed that this version works better when having limited
training data, such as training with ImageNet1K from scratch.
We experimented with this module and in the following table we summarize the results:
| **# Groups** | **# Tokens** | **Top-1 Acc** | **GFLOPs** | **TensorBoard** |
|:---:|:---:|:---:|:---:|:---:|
| 4 | 4 | 54.638% | 0.0149 | [Link](https://tensorboard.dev/experiment/KmfkGqAGQjikEw85phySmw/) |
| 8 | 8 | 54.898% | 0.0146 | [Link](https://tensorboard.dev/experiment/0PpgYOq9RFWV9njX6NJQ2w/) |
| 4 | 8 | 55.196% | 0.0149 | [Link](https://tensorboard.dev/experiment/WUkrHbZASdu3zrfmY4ETZg/) |
Please note that we used the same hyperparameters presented in this example. Our
implementation is available
[in this notebook](https://github.com/ariG23498/TokenLearner/blob/master/TokenLearner-V1.1.ipynb).
We acknowledge that the results with this new TokenLearner module are slightly off
than expected and this might mitigate with hyperparameter tuning.
*Note*: To compute the FLOPs of our models we used
[this utility](https://github.com/AdityaKane2001/regnety/blob/main/regnety/utils/model_utils.py#L27)
from [this repository](https://github.com/AdityaKane2001/regnety).
---
## Number of parameters
You may have noticed that adding the TokenLearner module increases the number of
parameters of the base network. But that does not mean it is less efficient as shown by
[Dehghani et al.](https://arxiv.org/abs/2110.12894). Similar findings were reported
by [Bello et al.](https://arxiv.org/abs/2103.07579) as well. The TokenLearner module
helps reducing the FLOPS in the overall network thereby helping to reduce the memory
footprint.
---
## Final notes
* TokenFuser: The authors of the paper also propose another module named TokenFuser. This
module helps in remapping the representation of the TokenLearner output back to its
original spatial resolution. To reuse the TokenLearner in the ViT architecture, the
TokenFuser is a must. We first learn the tokens from the TokenLearner, build a
representation of the tokens from a Transformer layer and then remap the representation
into the original spatial resolution, so that it can again be consumed by a TokenLearner.
Note here that you can only use the TokenLearner module once in entire ViT model if not
paired with the TokenFuser.
* Use of these modules for video: The authors also suggest that TokenFuser goes really
well with Vision Transformers for Videos ([Arnab et al.](https://arxiv.org/abs/2103.15691)).
We are grateful to [JarvisLabs](https://jarvislabs.ai/) and
[Google Developers Experts](https://developers.google.com/programs/experts/)
program for helping with GPU credits. Also, we are thankful to Michael Ryoo (first
author of TokenLearner) for fruitful discussions.
|
keras-io/examples/vision/md/token_learner.md/0
|
{
"file_path": "keras-io/examples/vision/md/token_learner.md",
"repo_id": "keras-io",
"token_count": 7503
}
| 131 |
"""
Title: Near-duplicate image search
Author: [Sayak Paul](https://twitter.com/RisingSayak)
Date created: 2021/09/10
Last modified: 2023/08/30
Description: Building a near-duplicate image search utility using deep learning and locality-sensitive hashing.
Accelerator: GPU
"""
"""
## Introduction
Fetching similar images in (near) real time is an important use case of information
retrieval systems. Some popular products utilizing it include Pinterest, Google Image
Search, etc. In this example, we will build a similar image search utility using
[Locality Sensitive Hashing](https://towardsdatascience.com/understanding-locality-sensitive-hashing-49f6d1f6134)
(LSH) and [random projection](https://en.wikipedia.org/wiki/Random_projection) on top
of the image representations computed by a pretrained image classifier.
This kind of search engine is also known
as a _near-duplicate (or near-dup) image detector_.
We will also look into optimizing the inference performance of
our search utility on GPU using [TensorRT](https://developer.nvidia.com/tensorrt).
There are other examples under [keras.io/examples/vision](https://keras.io/examples/vision)
that are worth checking out in this regard:
* [Metric learning for image similarity search](https://keras.io/examples/vision/metric_learning)
* [Image similarity estimation using a Siamese Network with a triplet loss](https://keras.io/examples/vision/siamese_network)
Finally, this example uses the following resource as a reference and as such reuses some
of its code:
[Locality Sensitive Hashing for Similar Item Search](https://towardsdatascience.com/locality-sensitive-hashing-for-music-search-f2f1940ace23).
_Note that in order to optimize the performance of our parser,
you should have a GPU runtime available._
"""
"""
## Setup
"""
"""shell
pip install tensorrt
"""
"""
## Imports
"""
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorrt
import numpy as np
import time
import tensorflow_datasets as tfds
tfds.disable_progress_bar()
"""
## Load the dataset and create a training set of 1,000 images
To keep the run time of the example short, we will be using a subset of 1,000 images from
the `tf_flowers` dataset (available through
[TensorFlow Datasets](https://www.tensorflow.org/datasets/catalog/tf_flowers))
to build our vocabulary.
"""
train_ds, validation_ds = tfds.load(
"tf_flowers", split=["train[:85%]", "train[85%:]"], as_supervised=True
)
IMAGE_SIZE = 224
NUM_IMAGES = 1000
images = []
labels = []
for image, label in train_ds.take(NUM_IMAGES):
image = tf.image.resize(image, (IMAGE_SIZE, IMAGE_SIZE))
images.append(image.numpy())
labels.append(label.numpy())
images = np.array(images)
labels = np.array(labels)
"""
## Load a pre-trained model
"""
"""
In this section, we load an image classification model that was trained on the
`tf_flowers` dataset. 85% of the total images were used to build the training set. For
more details on the training, refer to
[this notebook](https://github.com/sayakpaul/near-dup-parser/blob/main/bit-supervised-training.ipynb).
The underlying model is a BiT-ResNet (proposed in
[Big Transfer (BiT): General Visual Representation Learning](https://arxiv.org/abs/1912.11370)).
The BiT-ResNet family of models is known to provide excellent transfer performance across
a wide variety of different downstream tasks.
"""
"""shell
wget -q https://github.com/sayakpaul/near-dup-parser/releases/download/v0.1.0/flower_model_bit_0.96875.zip
unzip -qq flower_model_bit_0.96875.zip
"""
bit_model = tf.keras.models.load_model("flower_model_bit_0.96875")
bit_model.count_params()
"""
## Create an embedding model
To retrieve similar images given a query image, we need to first generate vector
representations of all the images involved. We do this via an
embedding model that extracts output features from our pretrained classifier and
normalizes the resulting feature vectors.
"""
embedding_model = tf.keras.Sequential(
[
tf.keras.layers.Input((IMAGE_SIZE, IMAGE_SIZE, 3)),
tf.keras.layers.Rescaling(scale=1.0 / 255),
bit_model.layers[1],
tf.keras.layers.Normalization(mean=0, variance=1),
],
name="embedding_model",
)
embedding_model.summary()
"""
Take note of the normalization layer inside the model. It is used to project the
representation vectors to the space of unit-spheres.
"""
"""
## Hashing utilities
"""
def hash_func(embedding, random_vectors):
embedding = np.array(embedding)
# Random projection.
bools = np.dot(embedding, random_vectors) > 0
return [bool2int(bool_vec) for bool_vec in bools]
def bool2int(x):
y = 0
for i, j in enumerate(x):
if j:
y += 1 << i
return y
"""
The shape of the vectors coming out of `embedding_model` is `(2048,)`, and considering practical
aspects (storage, retrieval performance, etc.) it is quite large. So, there arises a need
to reduce the dimensionality of the embedding vectors without reducing their information
content. This is where *random projection* comes into the picture.
It is based on the principle that if the
distance between a group of points on a given plane is _approximately_ preserved, the
dimensionality of that plane can further be reduced.
Inside `hash_func()`, we first reduce the dimensionality of the embedding vectors. Then
we compute the bitwise hash values of the images to determine their hash buckets. Images
having same hash values are likely to go into the same hash bucket. From a deployment
perspective, bitwise hash values are cheaper to store and operate on.
"""
"""
## Query utilities
The `Table` class is responsible for building a single hash table. Each entry in the hash
table is a mapping between the reduced embedding of an image from our dataset and a
unique identifier. Because our dimensionality reduction technique involves randomness, it
can so happen that similar images are not mapped to the same hash bucket everytime the
process run. To reduce this effect, we will take results from multiple tables into
consideration -- the number of tables and the reduction dimensionality are the key
hyperparameters here.
Crucially, you wouldn't reimplement locality-sensitive hashing yourself when working with
real world applications. Instead, you'd likely use one of the following popular libraries:
* [ScaNN](https://github.com/google-research/google-research/tree/master/scann)
* [Annoy](https://github.com/spotify/annoy)
* [Vald](https://github.com/vdaas/vald)
"""
class Table:
def __init__(self, hash_size, dim):
self.table = {}
self.hash_size = hash_size
self.random_vectors = np.random.randn(hash_size, dim).T
def add(self, id, vectors, label):
# Create a unique indentifier.
entry = {"id_label": str(id) + "_" + str(label)}
# Compute the hash values.
hashes = hash_func(vectors, self.random_vectors)
# Add the hash values to the current table.
for h in hashes:
if h in self.table:
self.table[h].append(entry)
else:
self.table[h] = [entry]
def query(self, vectors):
# Compute hash value for the query vector.
hashes = hash_func(vectors, self.random_vectors)
results = []
# Loop over the query hashes and determine if they exist in
# the current table.
for h in hashes:
if h in self.table:
results.extend(self.table[h])
return results
"""
In the following `LSH` class we will pack the utilities to have multiple hash tables.
"""
class LSH:
def __init__(self, hash_size, dim, num_tables):
self.num_tables = num_tables
self.tables = []
for i in range(self.num_tables):
self.tables.append(Table(hash_size, dim))
def add(self, id, vectors, label):
for table in self.tables:
table.add(id, vectors, label)
def query(self, vectors):
results = []
for table in self.tables:
results.extend(table.query(vectors))
return results
"""
Now we can encapsulate the logic for building and operating with the master LSH table (a
collection of many tables) inside a class. It has two methods:
* `train()`: Responsible for building the final LSH table.
* `query()`: Computes the number of matches given a query image and also quantifies the
similarity score.
"""
class BuildLSHTable:
def __init__(
self,
prediction_model,
concrete_function=False,
hash_size=8,
dim=2048,
num_tables=10,
):
self.hash_size = hash_size
self.dim = dim
self.num_tables = num_tables
self.lsh = LSH(self.hash_size, self.dim, self.num_tables)
self.prediction_model = prediction_model
self.concrete_function = concrete_function
def train(self, training_files):
for id, training_file in enumerate(training_files):
# Unpack the data.
image, label = training_file
if len(image.shape) < 4:
image = image[None, ...]
# Compute embeddings and update the LSH tables.
# More on `self.concrete_function()` later.
if self.concrete_function:
features = self.prediction_model(tf.constant(image))[
"normalization"
].numpy()
else:
features = self.prediction_model.predict(image)
self.lsh.add(id, features, label)
def query(self, image, verbose=True):
# Compute the embeddings of the query image and fetch the results.
if len(image.shape) < 4:
image = image[None, ...]
if self.concrete_function:
features = self.prediction_model(tf.constant(image))[
"normalization"
].numpy()
else:
features = self.prediction_model.predict(image)
results = self.lsh.query(features)
if verbose:
print("Matches:", len(results))
# Calculate Jaccard index to quantify the similarity.
counts = {}
for r in results:
if r["id_label"] in counts:
counts[r["id_label"]] += 1
else:
counts[r["id_label"]] = 1
for k in counts:
counts[k] = float(counts[k]) / self.dim
return counts
"""
## Create LSH tables
With our helper utilities and classes implemented, we can now build our LSH table. Since
we will be benchmarking performance between optimized and unoptimized embedding models, we
will also warm up our GPU to avoid any unfair comparison.
"""
# Utility to warm up the GPU.
def warmup():
dummy_sample = tf.ones((1, IMAGE_SIZE, IMAGE_SIZE, 3))
for _ in range(100):
_ = embedding_model.predict(dummy_sample)
"""
Now we can first do the GPU wam-up and proceed to build the master LSH table with
`embedding_model`.
"""
warmup()
training_files = zip(images, labels)
lsh_builder = BuildLSHTable(embedding_model)
lsh_builder.train(training_files)
"""
At the time of writing, the wall time was 54.1 seconds on a Tesla T4 GPU. This timing may
vary based on the GPU you are using.
"""
"""
## Optimize the model with TensorRT
For NVIDIA-based GPUs, the
[TensorRT framework](https://docs.nvidia.com/deeplearning/frameworks/tf-trt-user-guide/index.html)
can be used to dramatically enhance the inference latency by using various model
optimization techniques like pruning, constant folding, layer fusion, and so on. Here we
will use the `tf.experimental.tensorrt` module to optimize our embedding model.
"""
# First serialize the embedding model as a SavedModel.
embedding_model.save("embedding_model")
# Initialize the conversion parameters.
params = tf.experimental.tensorrt.ConversionParams(
precision_mode="FP16", maximum_cached_engines=16
)
# Run the conversion.
converter = tf.experimental.tensorrt.Converter(
input_saved_model_dir="embedding_model", conversion_params=params
)
converter.convert()
converter.save("tensorrt_embedding_model")
"""
**Notes on the parameters inside of `tf.experimental.tensorrt.ConversionParams()`**:
* `precision_mode` defines the numerical precision of the operations in the
to-be-converted model.
* `maximum_cached_engines` specifies the maximum number of TRT engines that will be
cached to handle dynamic operations (operations with unknown shapes).
To learn more about the other options, refer to the
[official documentation](https://www.tensorflow.org/api_docs/python/tf/experimental/tensorrt/ConversionParams).
You can also explore the different quantization options provided by the
`tf.experimental.tensorrt` module.
"""
# Load the converted model.
root = tf.saved_model.load("tensorrt_embedding_model")
trt_model_function = root.signatures["serving_default"]
"""
## Build LSH tables with optimized model
"""
warmup()
training_files = zip(images, labels)
lsh_builder_trt = BuildLSHTable(trt_model_function, concrete_function=True)
lsh_builder_trt.train(training_files)
"""
Notice the difference in the wall time which is **13.1 seconds**. Earlier, with the
unoptimized model it was **54.1 seconds**.
We can take a closer look into one of the hash tables and get an idea of how they are
represented.
"""
idx = 0
for hash, entry in lsh_builder_trt.lsh.tables[0].table.items():
if idx == 5:
break
if len(entry) < 5:
print(hash, entry)
idx += 1
"""
## Visualize results on validation images
In this section we will first writing a couple of utility functions to visualize the
similar image parsing process. Then we will benchmark the query performance of the models
with and without optimization.
"""
"""
First, we take 100 images from the validation set for testing purposes.
"""
validation_images = []
validation_labels = []
for image, label in validation_ds.take(100):
image = tf.image.resize(image, (224, 224))
validation_images.append(image.numpy())
validation_labels.append(label.numpy())
validation_images = np.array(validation_images)
validation_labels = np.array(validation_labels)
validation_images.shape, validation_labels.shape
"""
Now we write our visualization utilities.
"""
def plot_images(images, labels):
plt.figure(figsize=(20, 10))
columns = 5
for i, image in enumerate(images):
ax = plt.subplot(len(images) // columns + 1, columns, i + 1)
if i == 0:
ax.set_title("Query Image\n" + "Label: {}".format(labels[i]))
else:
ax.set_title("Similar Image # " + str(i) + "\nLabel: {}".format(labels[i]))
plt.imshow(image.astype("int"))
plt.axis("off")
def visualize_lsh(lsh_class):
idx = np.random.choice(len(validation_images))
image = validation_images[idx]
label = validation_labels[idx]
results = lsh_class.query(image)
candidates = []
labels = []
overlaps = []
for idx, r in enumerate(sorted(results, key=results.get, reverse=True)):
if idx == 4:
break
image_id, label = r.split("_")[0], r.split("_")[1]
candidates.append(images[int(image_id)])
labels.append(label)
overlaps.append(results[r])
candidates.insert(0, image)
labels.insert(0, label)
plot_images(candidates, labels)
"""
### Non-TRT model
"""
for _ in range(5):
visualize_lsh(lsh_builder)
visualize_lsh(lsh_builder)
"""
### TRT model
"""
for _ in range(5):
visualize_lsh(lsh_builder_trt)
"""
As you may have noticed, there are a couple of incorrect results. This can be mitigated in
a few ways:
* Better models for generating the initial embeddings especially for noisy samples. We can
use techniques like [ArcFace](https://arxiv.org/abs/1801.07698),
[Supervised Contrastive Learning](https://arxiv.org/abs/2004.11362), etc.
that implicitly encourage better learning of representations for retrieval purposes.
* The trade-off between the number of tables and the reduction dimensionality is crucial
and helps set the right recall required for your application.
"""
"""
## Benchmarking query performance
"""
def benchmark(lsh_class):
warmup()
start_time = time.time()
for _ in range(1000):
image = np.ones((1, 224, 224, 3)).astype("float32")
_ = lsh_class.query(image, verbose=False)
end_time = time.time() - start_time
print(f"Time taken: {end_time:.3f}")
benchmark(lsh_builder)
benchmark(lsh_builder_trt)
"""
We can immediately notice a stark difference between the query performance of the two
models.
"""
"""
## Final remarks
In this example, we explored the TensorRT framework from NVIDIA for optimizing our model.
It's best suited for GPU-based inference servers. There are other choices for such
frameworks that cater to different hardware platforms:
* [TensorFlow Lite](https://www.tensorflow.org/lite) for mobile and edge devices.
* [ONNX](hhttps://onnx.ai/) for commodity CPU-based servers.
* [Apache TVM](https://tvm.apache.org/), compiler for machine learning models covering
various platforms.
Here are a few resources you might want to check out to learn more
about applications based on vector similary search in general:
* [ANN Benchmarks](http://ann-benchmarks.com/)
* [Accelerating Large-Scale Inference with Anisotropic Vector Quantization(ScaNN)](https://arxiv.org/abs/1908.10396)
* [Spreading vectors for similarity search](https://arxiv.org/abs/1806.03198)
* [Building a real-time embeddings similarity matching system](https://cloud.google.com/architecture/building-real-time-embeddings-similarity-matching-system)
"""
|
keras-io/examples/vision/near_dup_search.py/0
|
{
"file_path": "keras-io/examples/vision/near_dup_search.py",
"repo_id": "keras-io",
"token_count": 6103
}
| 132 |
"""
Title: Semantic Image Clustering
Author: [Khalid Salama](https://www.linkedin.com/in/khalid-salama-24403144/)
Date created: 2021/02/28
Last modified: 2021/02/28
Description: Semantic Clustering by Adopting Nearest neighbors (SCAN) algorithm.
Accelerator: GPU
"""
"""
## Introduction
This example demonstrates how to apply the [Semantic Clustering by Adopting Nearest neighbors
(SCAN)](https://arxiv.org/abs/2005.12320) algorithm (Van Gansbeke et al., 2020) on the
[CIFAR-10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset. The algorithm consists of
two phases:
1. Self-supervised visual representation learning of images, in which we use the
[simCLR](https://arxiv.org/abs/2002.05709) technique.
2. Clustering of the learned visual representation vectors to maximize the agreement
between the cluster assignments of neighboring vectors.
"""
"""
## Setup
"""
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
from collections import defaultdict
import numpy as np
import tensorflow as tf
import keras
from keras import layers
import matplotlib.pyplot as plt
from tqdm import tqdm
"""
## Prepare the data
"""
num_classes = 10
input_shape = (32, 32, 3)
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
x_data = np.concatenate([x_train, x_test])
y_data = np.concatenate([y_train, y_test])
print("x_data shape:", x_data.shape, "- y_data shape:", y_data.shape)
classes = [
"airplane",
"automobile",
"bird",
"cat",
"deer",
"dog",
"frog",
"horse",
"ship",
"truck",
]
"""
## Define hyperparameters
"""
target_size = 32 # Resize the input images.
representation_dim = 512 # The dimensions of the features vector.
projection_units = 128 # The projection head of the representation learner.
num_clusters = 20 # Number of clusters.
k_neighbours = 5 # Number of neighbours to consider during cluster learning.
tune_encoder_during_clustering = False # Freeze the encoder in the cluster learning.
"""
## Implement data preprocessing
The data preprocessing step resizes the input images to the desired `target_size` and applies
feature-wise normalization. Note that, when using `keras.applications.ResNet50V2` as the
visual encoder, resizing the images into 255 x 255 inputs would lead to more accurate results
but require a longer time to train.
"""
data_preprocessing = keras.Sequential(
[
layers.Resizing(target_size, target_size),
layers.Normalization(),
]
)
# Compute the mean and the variance from the data for normalization.
data_preprocessing.layers[-1].adapt(x_data)
"""
## Data augmentation
Unlike simCLR, which randomly picks a single data augmentation function to apply to an input
image, we apply a set of data augmentation functions randomly to the input image.
(You can experiment with other image augmentation techniques by following
the [data augmentation tutorial](https://www.tensorflow.org/tutorials/images/data_augmentation).)
"""
data_augmentation = keras.Sequential(
[
layers.RandomTranslation(
height_factor=(-0.2, 0.2), width_factor=(-0.2, 0.2), fill_mode="nearest"
),
layers.RandomFlip(mode="horizontal"),
layers.RandomRotation(factor=0.15, fill_mode="nearest"),
layers.RandomZoom(
height_factor=(-0.3, 0.1), width_factor=(-0.3, 0.1), fill_mode="nearest"
),
]
)
"""
Display a random image
"""
image_idx = np.random.choice(range(x_data.shape[0]))
image = x_data[image_idx]
image_class = classes[y_data[image_idx][0]]
plt.figure(figsize=(3, 3))
plt.imshow(x_data[image_idx].astype("uint8"))
plt.title(image_class)
_ = plt.axis("off")
"""
Display a sample of augmented versions of the image
"""
plt.figure(figsize=(10, 10))
for i in range(9):
augmented_images = data_augmentation(np.array([image]))
ax = plt.subplot(3, 3, i + 1)
plt.imshow(augmented_images[0].numpy().astype("uint8"))
plt.axis("off")
"""
## Self-supervised representation learning
"""
"""
### Implement the vision encoder
"""
def create_encoder(representation_dim):
encoder = keras.Sequential(
[
keras.applications.ResNet50V2(
include_top=False, weights=None, pooling="avg"
),
layers.Dense(representation_dim),
]
)
return encoder
"""
### Implement the unsupervised contrastive loss
"""
class RepresentationLearner(keras.Model):
def __init__(
self,
encoder,
projection_units,
num_augmentations,
temperature=1.0,
dropout_rate=0.1,
l2_normalize=False,
**kwargs
):
super().__init__(**kwargs)
self.encoder = encoder
# Create projection head.
self.projector = keras.Sequential(
[
layers.Dropout(dropout_rate),
layers.Dense(units=projection_units, use_bias=False),
layers.BatchNormalization(),
layers.ReLU(),
]
)
self.num_augmentations = num_augmentations
self.temperature = temperature
self.l2_normalize = l2_normalize
self.loss_tracker = keras.metrics.Mean(name="loss")
@property
def metrics(self):
return [self.loss_tracker]
def compute_contrastive_loss(self, feature_vectors, batch_size):
num_augmentations = keras.ops.shape(feature_vectors)[0] // batch_size
if self.l2_normalize:
feature_vectors = keras.utils.normalize(feature_vectors)
# The logits shape is [num_augmentations * batch_size, num_augmentations * batch_size].
logits = (
tf.linalg.matmul(feature_vectors, feature_vectors, transpose_b=True)
/ self.temperature
)
# Apply log-max trick for numerical stability.
logits_max = keras.ops.max(logits, axis=1)
logits = logits - logits_max
# The shape of targets is [num_augmentations * batch_size, num_augmentations * batch_size].
# targets is a matrix consits of num_augmentations submatrices of shape [batch_size * batch_size].
# Each [batch_size * batch_size] submatrix is an identity matrix (diagonal entries are ones).
targets = keras.ops.tile(
tf.eye(batch_size), [num_augmentations, num_augmentations]
)
# Compute cross entropy loss
return keras.losses.categorical_crossentropy(
y_true=targets, y_pred=logits, from_logits=True
)
def call(self, inputs):
# Preprocess the input images.
preprocessed = data_preprocessing(inputs)
# Create augmented versions of the images.
augmented = []
for _ in range(self.num_augmentations):
augmented.append(data_augmentation(preprocessed))
augmented = layers.Concatenate(axis=0)(augmented)
# Generate embedding representations of the images.
features = self.encoder(augmented)
# Apply projection head.
return self.projector(features)
def train_step(self, inputs):
batch_size = keras.ops.shape(inputs)[0]
# Run the forward pass and compute the contrastive loss
with tf.GradientTape() as tape:
feature_vectors = self(inputs, training=True)
loss = self.compute_contrastive_loss(feature_vectors, batch_size)
# Compute gradients
trainable_vars = self.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# Update loss tracker metric
self.loss_tracker.update_state(loss)
# Return a dict mapping metric names to current value
return {m.name: m.result() for m in self.metrics}
def test_step(self, inputs):
batch_size = keras.ops.shape(inputs)[0]
feature_vectors = self(inputs, training=False)
loss = self.compute_contrastive_loss(feature_vectors, batch_size)
self.loss_tracker.update_state(loss)
return {"loss": self.loss_tracker.result()}
"""
### Train the model
"""
# Create vision encoder.
encoder = create_encoder(representation_dim)
# Create representation learner.
representation_learner = RepresentationLearner(
encoder, projection_units, num_augmentations=2, temperature=0.1
)
# Create a a Cosine decay learning rate scheduler.
lr_scheduler = keras.optimizers.schedules.CosineDecay(
initial_learning_rate=0.001, decay_steps=500, alpha=0.1
)
# Compile the model.
representation_learner.compile(
optimizer=keras.optimizers.AdamW(learning_rate=lr_scheduler, weight_decay=0.0001),
jit_compile=False,
)
# Fit the model.
history = representation_learner.fit(
x=x_data,
batch_size=512,
epochs=50, # for better results, increase the number of epochs to 500.
)
"""
Plot training loss
"""
plt.plot(history.history["loss"])
plt.ylabel("loss")
plt.xlabel("epoch")
plt.show()
"""
## Compute the nearest neighbors
"""
"""
### Generate the embeddings for the images
"""
batch_size = 500
# Get the feature vector representations of the images.
feature_vectors = encoder.predict(x_data, batch_size=batch_size, verbose=1)
# Normalize the feature vectores.
feature_vectors = keras.utils.normalize(feature_vectors)
"""
### Find the *k* nearest neighbours for each embedding
"""
neighbours = []
num_batches = feature_vectors.shape[0] // batch_size
for batch_idx in tqdm(range(num_batches)):
start_idx = batch_idx * batch_size
end_idx = start_idx + batch_size
current_batch = feature_vectors[start_idx:end_idx]
# Compute the dot similarity.
similarities = tf.linalg.matmul(current_batch, feature_vectors, transpose_b=True)
# Get the indices of most similar vectors.
_, indices = keras.ops.top_k(similarities, k=k_neighbours + 1, sorted=True)
# Add the indices to the neighbours.
neighbours.append(indices[..., 1:])
neighbours = np.reshape(np.array(neighbours), (-1, k_neighbours))
"""
Let's display some neighbors on each row
"""
nrows = 4
ncols = k_neighbours + 1
plt.figure(figsize=(12, 12))
position = 1
for _ in range(nrows):
anchor_idx = np.random.choice(range(x_data.shape[0]))
neighbour_indicies = neighbours[anchor_idx]
indices = [anchor_idx] + neighbour_indicies.tolist()
for j in range(ncols):
plt.subplot(nrows, ncols, position)
plt.imshow(x_data[indices[j]].astype("uint8"))
plt.title(classes[y_data[indices[j]][0]])
plt.axis("off")
position += 1
"""
You notice that images on each row are visually similar, and belong to similar classes.
"""
"""
## Semantic clustering with nearest neighbours
"""
"""
### Implement clustering consistency loss
This loss tries to make sure that neighbours have the same clustering assignments.
"""
class ClustersConsistencyLoss(keras.losses.Loss):
def __init__(self):
super().__init__()
def __call__(self, target, similarity, sample_weight=None):
# Set targets to be ones.
target = keras.ops.ones_like(similarity)
# Compute cross entropy loss.
loss = keras.losses.binary_crossentropy(
y_true=target, y_pred=similarity, from_logits=True
)
return keras.ops.mean(loss)
"""
### Implement the clusters entropy loss
This loss tries to make sure that cluster distribution is roughly uniformed, to avoid
assigning most of the instances to one cluster.
"""
class ClustersEntropyLoss(keras.losses.Loss):
def __init__(self, entropy_loss_weight=1.0):
super().__init__()
self.entropy_loss_weight = entropy_loss_weight
def __call__(self, target, cluster_probabilities, sample_weight=None):
# Ideal entropy = log(num_clusters).
num_clusters = keras.ops.cast(
keras.ops.shape(cluster_probabilities)[-1], "float32"
)
target = keras.ops.log(num_clusters)
# Compute the overall clusters distribution.
cluster_probabilities = keras.ops.mean(cluster_probabilities, axis=0)
# Replacing zero probabilities - if any - with a very small value.
cluster_probabilities = keras.ops.clip(cluster_probabilities, 1e-8, 1.0)
# Compute the entropy over the clusters.
entropy = -keras.ops.sum(
cluster_probabilities * keras.ops.log(cluster_probabilities)
)
# Compute the difference between the target and the actual.
loss = target - entropy
return loss
"""
### Implement clustering model
This model takes a raw image as an input, generated its feature vector using the trained
encoder, and produces a probability distribution of the clusters given the feature vector
as the cluster assignments.
"""
def create_clustering_model(encoder, num_clusters, name=None):
inputs = keras.Input(shape=input_shape)
# Preprocess the input images.
preprocessed = data_preprocessing(inputs)
# Apply data augmentation to the images.
augmented = data_augmentation(preprocessed)
# Generate embedding representations of the images.
features = encoder(augmented)
# Assign the images to clusters.
outputs = layers.Dense(units=num_clusters, activation="softmax")(features)
# Create the model.
model = keras.Model(inputs=inputs, outputs=outputs, name=name)
return model
"""
### Implement clustering learner
This model receives the input `anchor` image and its `neighbours`, produces the clusters
assignments for them using the `clustering_model`, and produces two outputs:
1. `similarity`: the similarity between the cluster assignments of the `anchor` image and
its `neighbours`. This output is fed to the `ClustersConsistencyLoss`.
2. `anchor_clustering`: cluster assignments of the `anchor` images. This is fed to the `ClustersEntropyLoss`.
"""
def create_clustering_learner(clustering_model):
anchor = keras.Input(shape=input_shape, name="anchors")
neighbours = keras.Input(
shape=tuple([k_neighbours]) + input_shape, name="neighbours"
)
# Changes neighbours shape to [batch_size * k_neighbours, width, height, channels]
neighbours_reshaped = keras.ops.reshape(neighbours, tuple([-1]) + input_shape)
# anchor_clustering shape: [batch_size, num_clusters]
anchor_clustering = clustering_model(anchor)
# neighbours_clustering shape: [batch_size * k_neighbours, num_clusters]
neighbours_clustering = clustering_model(neighbours_reshaped)
# Convert neighbours_clustering shape to [batch_size, k_neighbours, num_clusters]
neighbours_clustering = keras.ops.reshape(
neighbours_clustering,
(-1, k_neighbours, keras.ops.shape(neighbours_clustering)[-1]),
)
# similarity shape: [batch_size, 1, k_neighbours]
similarity = keras.ops.einsum(
"bij,bkj->bik",
keras.ops.expand_dims(anchor_clustering, axis=1),
neighbours_clustering,
)
# similarity shape: [batch_size, k_neighbours]
similarity = layers.Lambda(
lambda x: keras.ops.squeeze(x, axis=1), name="similarity"
)(similarity)
# Create the model.
model = keras.Model(
inputs=[anchor, neighbours],
outputs=[similarity, anchor_clustering],
name="clustering_learner",
)
return model
"""
### Train model
"""
# If tune_encoder_during_clustering is set to False,
# then freeze the encoder weights.
for layer in encoder.layers:
layer.trainable = tune_encoder_during_clustering
# Create the clustering model and learner.
clustering_model = create_clustering_model(encoder, num_clusters, name="clustering")
clustering_learner = create_clustering_learner(clustering_model)
# Instantiate the model losses.
losses = [ClustersConsistencyLoss(), ClustersEntropyLoss(entropy_loss_weight=5)]
# Create the model inputs and labels.
inputs = {"anchors": x_data, "neighbours": tf.gather(x_data, neighbours)}
labels = np.ones(shape=(x_data.shape[0]))
# Compile the model.
clustering_learner.compile(
optimizer=keras.optimizers.AdamW(learning_rate=0.0005, weight_decay=0.0001),
loss=losses,
jit_compile=False,
)
# Begin training the model.
clustering_learner.fit(x=inputs, y=labels, batch_size=512, epochs=50)
"""
Plot training loss
"""
plt.plot(history.history["loss"])
plt.ylabel("loss")
plt.xlabel("epoch")
plt.show()
"""
## Cluster analysis
"""
"""
### Assign images to clusters
"""
# Get the cluster probability distribution of the input images.
clustering_probs = clustering_model.predict(x_data, batch_size=batch_size, verbose=1)
# Get the cluster of the highest probability.
cluster_assignments = keras.ops.argmax(clustering_probs, axis=-1).numpy()
# Store the clustering confidence.
# Images with the highest clustering confidence are considered the 'prototypes'
# of the clusters.
cluster_confidence = keras.ops.max(clustering_probs, axis=-1).numpy()
"""
Let's compute the cluster sizes
"""
clusters = defaultdict(list)
for idx, c in enumerate(cluster_assignments):
clusters[c].append((idx, cluster_confidence[idx]))
non_empty_clusters = defaultdict(list)
for c in clusters.keys():
if clusters[c]:
non_empty_clusters[c] = clusters[c]
for c in range(num_clusters):
print("cluster", c, ":", len(clusters[c]))
"""
### Visualize cluster images
Display the *prototypes*—instances with the highest clustering confidence—of each cluster:
"""
num_images = 8
plt.figure(figsize=(15, 15))
position = 1
for c in non_empty_clusters.keys():
cluster_instances = sorted(
non_empty_clusters[c], key=lambda kv: kv[1], reverse=True
)
for j in range(num_images):
image_idx = cluster_instances[j][0]
plt.subplot(len(non_empty_clusters), num_images, position)
plt.imshow(x_data[image_idx].astype("uint8"))
plt.title(classes[y_data[image_idx][0]])
plt.axis("off")
position += 1
"""
### Compute clustering accuracy
First, we assign a label for each cluster based on the majority label of its images.
Then, we compute the accuracy of each cluster by dividing the number of image with the
majority label by the size of the cluster.
"""
cluster_label_counts = dict()
for c in range(num_clusters):
cluster_label_counts[c] = [0] * num_classes
instances = clusters[c]
for i, _ in instances:
cluster_label_counts[c][y_data[i][0]] += 1
cluster_label_idx = np.argmax(cluster_label_counts[c])
correct_count = np.max(cluster_label_counts[c])
cluster_size = len(clusters[c])
accuracy = (
np.round((correct_count / cluster_size) * 100, 2) if cluster_size > 0 else 0
)
cluster_label = classes[cluster_label_idx]
print("cluster", c, "label is:", cluster_label, " - accuracy:", accuracy, "%")
"""
## Conclusion
To improve the accuracy results, you can: 1) increase the number
of epochs in the representation learning and the clustering phases; 2)
allow the encoder weights to be tuned during the clustering phase; and 3) perform a final
fine-tuning step through self-labeling, as described in the [original SCAN paper](https://arxiv.org/abs/2005.12320).
Note that unsupervised image clustering techniques are not expected to outperform the accuracy
of supervised image classification techniques, rather showing that they can learn the semantics
of the images and group them into clusters that are similar to their original classes.
"""
|
keras-io/examples/vision/semantic_image_clustering.py/0
|
{
"file_path": "keras-io/examples/vision/semantic_image_clustering.py",
"repo_id": "keras-io",
"token_count": 7286
}
| 133 |
"""
Title: Pneumonia Classification on TPU
Author: Amy MiHyun Jang
Date created: 2020/07/28
Last modified: 2020/08/24
Description: Medical image classification on TPU.
Accelerator: TPU
"""
"""
## Introduction + Set-up
This tutorial will explain how to build an X-ray image classification model
to predict whether an X-ray scan shows presence of pneumonia.
"""
import re
import os
import random
import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver.connect()
print("Device:", tpu.master())
strategy = tf.distribute.TPUStrategy(tpu)
except:
strategy = tf.distribute.get_strategy()
print("Number of replicas:", strategy.num_replicas_in_sync)
"""
We need a Google Cloud link to our data to load the data using a TPU.
Below, we define key configuration parameters we'll use in this example.
To run on TPU, this example must be on Colab with the TPU runtime selected.
"""
AUTOTUNE = tf.data.AUTOTUNE
BATCH_SIZE = 25 * strategy.num_replicas_in_sync
IMAGE_SIZE = [180, 180]
CLASS_NAMES = ["NORMAL", "PNEUMONIA"]
"""
## Load the data
The Chest X-ray data we are using from
[*Cell*](https://www.cell.com/cell/fulltext/S0092-8674(18)30154-5) divides the data into
training and test files. Let's first load in the training TFRecords.
"""
train_images = tf.data.TFRecordDataset(
"gs://download.tensorflow.org/data/ChestXRay2017/train/images.tfrec"
)
train_paths = tf.data.TFRecordDataset(
"gs://download.tensorflow.org/data/ChestXRay2017/train/paths.tfrec"
)
ds = tf.data.Dataset.zip((train_images, train_paths))
"""
Let's count how many healthy/normal chest X-rays we have and how many
pneumonia chest X-rays we have:
"""
COUNT_NORMAL = len(
[
filename
for filename in train_paths
if "NORMAL" in filename.numpy().decode("utf-8")
]
)
print("Normal images count in training set: " + str(COUNT_NORMAL))
COUNT_PNEUMONIA = len(
[
filename
for filename in train_paths
if "PNEUMONIA" in filename.numpy().decode("utf-8")
]
)
print("Pneumonia images count in training set: " + str(COUNT_PNEUMONIA))
"""
Notice that there are way more images that are classified as pneumonia than normal. This
shows that we have an imbalance in our data. We will correct for this imbalance later on
in our notebook.
"""
"""
We want to map each filename to the corresponding (image, label) pair. The following
methods will help us do that.
As we only have two labels, we will encode the label so that `1` or `True` indicates
pneumonia and `0` or `False` indicates normal.
"""
def get_label(file_path):
# convert the path to a list of path components
parts = tf.strings.split(file_path, "/")
# The second to last is the class-directory
if parts[-2] == "PNEUMONIA":
return 1
else:
return 0
def decode_img(img):
# convert the compressed string to a 3D uint8 tensor
img = tf.image.decode_jpeg(img, channels=3)
# resize the image to the desired size.
return tf.image.resize(img, IMAGE_SIZE)
def process_path(image, path):
label = get_label(path)
# load the raw data from the file as a string
img = decode_img(image)
return img, label
ds = ds.map(process_path, num_parallel_calls=AUTOTUNE)
"""
Let's split the data into a training and validation datasets.
"""
ds = ds.shuffle(10000)
train_ds = ds.take(4200)
val_ds = ds.skip(4200)
"""
Let's visualize the shape of an (image, label) pair.
"""
for image, label in train_ds.take(1):
print("Image shape: ", image.numpy().shape)
print("Label: ", label.numpy())
"""
Load and format the test data as well.
"""
test_images = tf.data.TFRecordDataset(
"gs://download.tensorflow.org/data/ChestXRay2017/test/images.tfrec"
)
test_paths = tf.data.TFRecordDataset(
"gs://download.tensorflow.org/data/ChestXRay2017/test/paths.tfrec"
)
test_ds = tf.data.Dataset.zip((test_images, test_paths))
test_ds = test_ds.map(process_path, num_parallel_calls=AUTOTUNE)
test_ds = test_ds.batch(BATCH_SIZE)
"""
## Visualize the dataset
First, let's use buffered prefetching so we can yield data from disk without having I/O
become blocking.
Please note that large image datasets should not be cached in memory. We do it here
because the dataset is not very large and we want to train on TPU.
"""
def prepare_for_training(ds, cache=True):
# This is a small dataset, only load it once, and keep it in memory.
# use `.cache(filename)` to cache preprocessing work for datasets that don't
# fit in memory.
if cache:
if isinstance(cache, str):
ds = ds.cache(cache)
else:
ds = ds.cache()
ds = ds.batch(BATCH_SIZE)
# `prefetch` lets the dataset fetch batches in the background while the model
# is training.
ds = ds.prefetch(buffer_size=AUTOTUNE)
return ds
"""
Call the next batch iteration of the training data.
"""
train_ds = prepare_for_training(train_ds)
val_ds = prepare_for_training(val_ds)
image_batch, label_batch = next(iter(train_ds))
"""
Define the method to show the images in the batch.
"""
def show_batch(image_batch, label_batch):
plt.figure(figsize=(10, 10))
for n in range(25):
ax = plt.subplot(5, 5, n + 1)
plt.imshow(image_batch[n] / 255)
if label_batch[n]:
plt.title("PNEUMONIA")
else:
plt.title("NORMAL")
plt.axis("off")
"""
As the method takes in NumPy arrays as its parameters, call the numpy function on the
batches to return the tensor in NumPy array form.
"""
show_batch(image_batch.numpy(), label_batch.numpy())
"""
## Build the CNN
To make our model more modular and easier to understand, let's define some blocks. As
we're building a convolution neural network, we'll create a convolution block and a dense
layer block.
The architecture for this CNN has been inspired by this
[article](https://towardsdatascience.com/deep-learning-for-detecting-pneumonia-from-x-ray-images-fc9a3d9fdba8).
"""
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import keras
from keras import layers
def conv_block(filters, inputs):
x = layers.SeparableConv2D(filters, 3, activation="relu", padding="same")(inputs)
x = layers.SeparableConv2D(filters, 3, activation="relu", padding="same")(x)
x = layers.BatchNormalization()(x)
outputs = layers.MaxPool2D()(x)
return outputs
def dense_block(units, dropout_rate, inputs):
x = layers.Dense(units, activation="relu")(inputs)
x = layers.BatchNormalization()(x)
outputs = layers.Dropout(dropout_rate)(x)
return outputs
"""
The following method will define the function to build our model for us.
The images originally have values that range from [0, 255]. CNNs work better with smaller
numbers so we will scale this down for our input.
The Dropout layers are important, as they
reduce the likelikhood of the model overfitting. We want to end the model with a `Dense`
layer with one node, as this will be the binary output that determines if an X-ray shows
presence of pneumonia.
"""
def build_model():
inputs = keras.Input(shape=(IMAGE_SIZE[0], IMAGE_SIZE[1], 3))
x = layers.Rescaling(1.0 / 255)(inputs)
x = layers.Conv2D(16, 3, activation="relu", padding="same")(x)
x = layers.Conv2D(16, 3, activation="relu", padding="same")(x)
x = layers.MaxPool2D()(x)
x = conv_block(32, x)
x = conv_block(64, x)
x = conv_block(128, x)
x = layers.Dropout(0.2)(x)
x = conv_block(256, x)
x = layers.Dropout(0.2)(x)
x = layers.Flatten()(x)
x = dense_block(512, 0.7, x)
x = dense_block(128, 0.5, x)
x = dense_block(64, 0.3, x)
outputs = layers.Dense(1, activation="sigmoid")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
return model
"""
## Correct for data imbalance
We saw earlier in this example that the data was imbalanced, with more images classified
as pneumonia than normal. We will correct for that by using class weighting:
"""
initial_bias = np.log([COUNT_PNEUMONIA / COUNT_NORMAL])
print("Initial bias: {:.5f}".format(initial_bias[0]))
TRAIN_IMG_COUNT = COUNT_NORMAL + COUNT_PNEUMONIA
weight_for_0 = (1 / COUNT_NORMAL) * (TRAIN_IMG_COUNT) / 2.0
weight_for_1 = (1 / COUNT_PNEUMONIA) * (TRAIN_IMG_COUNT) / 2.0
class_weight = {0: weight_for_0, 1: weight_for_1}
print("Weight for class 0: {:.2f}".format(weight_for_0))
print("Weight for class 1: {:.2f}".format(weight_for_1))
"""
The weight for class `0` (Normal) is a lot higher than the weight for class `1`
(Pneumonia). Because there are less normal images, each normal image will be weighted
more to balance the data as the CNN works best when the training data is balanced.
"""
"""
## Train the model
"""
"""
### Defining callbacks
The checkpoint callback saves the best weights of the model, so next time we want to use
the model, we do not have to spend time training it. The early stopping callback stops
the training process when the model starts becoming stagnant, or even worse, when the
model starts overfitting.
"""
checkpoint_cb = keras.callbacks.ModelCheckpoint("xray_model.keras", save_best_only=True)
early_stopping_cb = keras.callbacks.EarlyStopping(
patience=10, restore_best_weights=True
)
"""
We also want to tune our learning rate. Too high of a learning rate will cause the model
to diverge. Too small of a learning rate will cause the model to be too slow. We
implement the exponential learning rate scheduling method below.
"""
initial_learning_rate = 0.015
lr_schedule = keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate, decay_steps=100000, decay_rate=0.96, staircase=True
)
"""
### Fit the model
For our metrics, we want to include precision and recall as they will provide use with a
more informed picture of how good our model is. Accuracy tells us what fraction of the
labels is correct. Since our data is not balanced, accuracy might give a skewed sense of
a good model (i.e. a model that always predicts PNEUMONIA will be 74% accurate but is not
a good model).
Precision is the number of true positives (TP) over the sum of TP and false positives
(FP). It shows what fraction of labeled positives are actually correct.
Recall is the number of TP over the sum of TP and false negatves (FN). It shows what
fraction of actual positives are correct.
Since there are only two possible labels for the image, we will be using the
binary crossentropy loss. When we fit the model, remember to specify the class weights,
which we defined earlier. Because we are using a TPU, training will be quick - less than
2 minutes.
"""
with strategy.scope():
model = build_model()
METRICS = [
keras.metrics.BinaryAccuracy(),
keras.metrics.Precision(name="precision"),
keras.metrics.Recall(name="recall"),
]
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=lr_schedule),
loss="binary_crossentropy",
metrics=METRICS,
)
history = model.fit(
train_ds,
epochs=100,
validation_data=val_ds,
class_weight=class_weight,
callbacks=[checkpoint_cb, early_stopping_cb],
)
"""
## Visualizing model performance
Let's plot the model accuracy and loss for the training and the validating set. Note that
no random seed is specified for this notebook. For your notebook, there might be slight
variance.
"""
fig, ax = plt.subplots(1, 4, figsize=(20, 3))
ax = ax.ravel()
for i, met in enumerate(["precision", "recall", "binary_accuracy", "loss"]):
ax[i].plot(history.history[met])
ax[i].plot(history.history["val_" + met])
ax[i].set_title("Model {}".format(met))
ax[i].set_xlabel("epochs")
ax[i].set_ylabel(met)
ax[i].legend(["train", "val"])
"""
We see that the accuracy for our model is around 95%.
"""
"""
## Predict and evaluate results
Let's evaluate the model on our test data!
"""
model.evaluate(test_ds, return_dict=True)
"""
We see that our accuracy on our test data is lower than the accuracy for our validating
set. This may indicate overfitting.
Our recall is greater than our precision, indicating that almost all pneumonia images are
correctly identified but some normal images are falsely identified. We should aim to
increase our precision.
"""
for image, label in test_ds.take(1):
plt.imshow(image[0] / 255.0)
plt.title(CLASS_NAMES[label[0].numpy()])
prediction = model.predict(test_ds.take(1))[0]
scores = [1 - prediction, prediction]
for score, name in zip(scores, CLASS_NAMES):
print("This image is %.2f percent %s" % ((100 * score), name))
|
keras-io/examples/vision/xray_classification_with_tpus.py/0
|
{
"file_path": "keras-io/examples/vision/xray_classification_with_tpus.py",
"repo_id": "keras-io",
"token_count": 4413
}
| 134 |
<jupyter_start><jupyter_text>Writing your own callbacks**Authors:** Rick Chao, Francois Chollet**Date created:** 2019/03/20**Last modified:** 2023/06/25**Description:** Complete guide to writing new Keras callbacks. IntroductionA callback is a powerful tool to customize the behavior of a Keras model duringtraining, evaluation, or inference. Examples include `keras.callbacks.TensorBoard`to visualize training progress and results with TensorBoard, or`keras.callbacks.ModelCheckpoint` to periodically save your model during training.In this guide, you will learn what a Keras callback is, what it can do, and how you canbuild your own. We provide a few demos of simple callback applications to get youstarted. Setup<jupyter_code>import numpy as np
import keras<jupyter_output><empty_output><jupyter_text>Keras callbacks overviewAll callbacks subclass the `keras.callbacks.Callback` class, andoverride a set of methods called at various stages of training, testing, andpredicting. Callbacks are useful to get a view on internal states and statistics ofthe model during training.You can pass a list of callbacks (as the keyword argument `callbacks`) to the followingmodel methods:- `keras.Model.fit()`- `keras.Model.evaluate()`- `keras.Model.predict()` An overview of callback methods Global methods `on_(train|test|predict)_begin(self, logs=None)`Called at the beginning of `fit`/`evaluate`/`predict`. `on_(train|test|predict)_end(self, logs=None)`Called at the end of `fit`/`evaluate`/`predict`. Batch-level methods for training/testing/predicting `on_(train|test|predict)_batch_begin(self, batch, logs=None)`Called right before processing a batch during training/testing/predicting. `on_(train|test|predict)_batch_end(self, batch, logs=None)`Called at the end of training/testing/predicting a batch. Within this method, `logs` isa dict containing the metrics results. Epoch-level methods (training only) `on_epoch_begin(self, epoch, logs=None)`Called at the beginning of an epoch during training. `on_epoch_end(self, epoch, logs=None)`Called at the end of an epoch during training. A basic exampleLet's take a look at a concrete example. To get started, let's import tensorflow anddefine a simple Sequential Keras model:<jupyter_code># Define the Keras model to add callbacks to
def get_model():
model = keras.Sequential()
model.add(keras.layers.Dense(1))
model.compile(
optimizer=keras.optimizers.RMSprop(learning_rate=0.1),
loss="mean_squared_error",
metrics=["mean_absolute_error"],
)
return model<jupyter_output><empty_output><jupyter_text>Then, load the MNIST data for training and testing from Keras datasets API:<jupyter_code># Load example MNIST data and pre-process it
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
x_train = x_train.reshape(-1, 784).astype("float32") / 255.0
x_test = x_test.reshape(-1, 784).astype("float32") / 255.0
# Limit the data to 1000 samples
x_train = x_train[:1000]
y_train = y_train[:1000]
x_test = x_test[:1000]
y_test = y_test[:1000]<jupyter_output><empty_output><jupyter_text>Now, define a simple custom callback that logs:- When `fit`/`evaluate`/`predict` starts & ends- When each epoch starts & ends- When each training batch starts & ends- When each evaluation (test) batch starts & ends- When each inference (prediction) batch starts & ends<jupyter_code>class CustomCallback(keras.callbacks.Callback):
def on_train_begin(self, logs=None):
keys = list(logs.keys())
print("Starting training; got log keys: {}".format(keys))
def on_train_end(self, logs=None):
keys = list(logs.keys())
print("Stop training; got log keys: {}".format(keys))
def on_epoch_begin(self, epoch, logs=None):
keys = list(logs.keys())
print("Start epoch {} of training; got log keys: {}".format(epoch, keys))
def on_epoch_end(self, epoch, logs=None):
keys = list(logs.keys())
print("End epoch {} of training; got log keys: {}".format(epoch, keys))
def on_test_begin(self, logs=None):
keys = list(logs.keys())
print("Start testing; got log keys: {}".format(keys))
def on_test_end(self, logs=None):
keys = list(logs.keys())
print("Stop testing; got log keys: {}".format(keys))
def on_predict_begin(self, logs=None):
keys = list(logs.keys())
print("Start predicting; got log keys: {}".format(keys))
def on_predict_end(self, logs=None):
keys = list(logs.keys())
print("Stop predicting; got log keys: {}".format(keys))
def on_train_batch_begin(self, batch, logs=None):
keys = list(logs.keys())
print("...Training: start of batch {}; got log keys: {}".format(batch, keys))
def on_train_batch_end(self, batch, logs=None):
keys = list(logs.keys())
print("...Training: end of batch {}; got log keys: {}".format(batch, keys))
def on_test_batch_begin(self, batch, logs=None):
keys = list(logs.keys())
print("...Evaluating: start of batch {}; got log keys: {}".format(batch, keys))
def on_test_batch_end(self, batch, logs=None):
keys = list(logs.keys())
print("...Evaluating: end of batch {}; got log keys: {}".format(batch, keys))
def on_predict_batch_begin(self, batch, logs=None):
keys = list(logs.keys())
print("...Predicting: start of batch {}; got log keys: {}".format(batch, keys))
def on_predict_batch_end(self, batch, logs=None):
keys = list(logs.keys())
print("...Predicting: end of batch {}; got log keys: {}".format(batch, keys))<jupyter_output><empty_output><jupyter_text>Let's try it out:<jupyter_code>model = get_model()
model.fit(
x_train,
y_train,
batch_size=128,
epochs=1,
verbose=0,
validation_split=0.5,
callbacks=[CustomCallback()],
)
res = model.evaluate(
x_test, y_test, batch_size=128, verbose=0, callbacks=[CustomCallback()]
)
res = model.predict(x_test, batch_size=128, callbacks=[CustomCallback()])<jupyter_output><empty_output><jupyter_text>Usage of `logs` dictThe `logs` dict contains the loss value, and all the metrics at the end of a batch orepoch. Example includes the loss and mean absolute error.<jupyter_code>class LossAndErrorPrintingCallback(keras.callbacks.Callback):
def on_train_batch_end(self, batch, logs=None):
print(
"Up to batch {}, the average loss is {:7.2f}.".format(batch, logs["loss"])
)
def on_test_batch_end(self, batch, logs=None):
print(
"Up to batch {}, the average loss is {:7.2f}.".format(batch, logs["loss"])
)
def on_epoch_end(self, epoch, logs=None):
print(
"The average loss for epoch {} is {:7.2f} "
"and mean absolute error is {:7.2f}.".format(
epoch, logs["loss"], logs["mean_absolute_error"]
)
)
model = get_model()
model.fit(
x_train,
y_train,
batch_size=128,
epochs=2,
verbose=0,
callbacks=[LossAndErrorPrintingCallback()],
)
res = model.evaluate(
x_test,
y_test,
batch_size=128,
verbose=0,
callbacks=[LossAndErrorPrintingCallback()],
)<jupyter_output><empty_output><jupyter_text>Usage of `self.model` attributeIn addition to receiving log information when one of their methods is called,callbacks have access to the model associated with the current round oftraining/evaluation/inference: `self.model`.Here are a few of the things you can do with `self.model` in a callback:- Set `self.model.stop_training = True` to immediately interrupt training.- Mutate hyperparameters of the optimizer (available as `self.model.optimizer`),such as `self.model.optimizer.learning_rate`.- Save the model at period intervals.- Record the output of `model.predict()` on a few test samples at the end of eachepoch, to use as a sanity check during training.- Extract visualizations of intermediate features at the end of each epoch, to monitorwhat the model is learning over time.- etc.Let's see this in action in a couple of examples. Examples of Keras callback applications Early stopping at minimum lossThis first example shows the creation of a `Callback` that stops training when theminimum of loss has been reached, by setting the attribute `self.model.stop_training`(boolean). Optionally, you can provide an argument `patience` to specify how manyepochs we should wait before stopping after having reached a local minimum.`keras.callbacks.EarlyStopping` provides a more complete and general implementation.<jupyter_code>class EarlyStoppingAtMinLoss(keras.callbacks.Callback):
"""Stop training when the loss is at its min, i.e. the loss stops decreasing.
Arguments:
patience: Number of epochs to wait after min has been hit. After this
number of no improvement, training stops.
"""
def __init__(self, patience=0):
super().__init__()
self.patience = patience
# best_weights to store the weights at which the minimum loss occurs.
self.best_weights = None
def on_train_begin(self, logs=None):
# The number of epoch it has waited when loss is no longer minimum.
self.wait = 0
# The epoch the training stops at.
self.stopped_epoch = 0
# Initialize the best as infinity.
self.best = np.Inf
def on_epoch_end(self, epoch, logs=None):
current = logs.get("loss")
if np.less(current, self.best):
self.best = current
self.wait = 0
# Record the best weights if current results is better (less).
self.best_weights = self.model.get_weights()
else:
self.wait += 1
if self.wait >= self.patience:
self.stopped_epoch = epoch
self.model.stop_training = True
print("Restoring model weights from the end of the best epoch.")
self.model.set_weights(self.best_weights)
def on_train_end(self, logs=None):
if self.stopped_epoch > 0:
print(f"Epoch {self.stopped_epoch + 1}: early stopping")
model = get_model()
model.fit(
x_train,
y_train,
batch_size=64,
epochs=30,
verbose=0,
callbacks=[LossAndErrorPrintingCallback(), EarlyStoppingAtMinLoss()],
)<jupyter_output><empty_output><jupyter_text>Learning rate schedulingIn this example, we show how a custom Callback can be used to dynamically change thelearning rate of the optimizer during the course of training.See `callbacks.LearningRateScheduler` for a more general implementations.<jupyter_code>class CustomLearningRateScheduler(keras.callbacks.Callback):
"""Learning rate scheduler which sets the learning rate according to schedule.
Arguments:
schedule: a function that takes an epoch index
(integer, indexed from 0) and current learning rate
as inputs and returns a new learning rate as output (float).
"""
def __init__(self, schedule):
super().__init__()
self.schedule = schedule
def on_epoch_begin(self, epoch, logs=None):
if not hasattr(self.model.optimizer, "learning_rate"):
raise ValueError('Optimizer must have a "learning_rate" attribute.')
# Get the current learning rate from model's optimizer.
lr = self.model.optimizer.learning_rate
# Call schedule function to get the scheduled learning rate.
scheduled_lr = self.schedule(epoch, lr)
# Set the value back to the optimizer before this epoch starts
self.model.optimizer.learning_rate = scheduled_lr
print(f"\nEpoch {epoch}: Learning rate is {float(np.array(scheduled_lr))}.")
LR_SCHEDULE = [
# (epoch to start, learning rate) tuples
(3, 0.05),
(6, 0.01),
(9, 0.005),
(12, 0.001),
]
def lr_schedule(epoch, lr):
"""Helper function to retrieve the scheduled learning rate based on epoch."""
if epoch < LR_SCHEDULE[0][0] or epoch > LR_SCHEDULE[-1][0]:
return lr
for i in range(len(LR_SCHEDULE)):
if epoch == LR_SCHEDULE[i][0]:
return LR_SCHEDULE[i][1]
return lr
model = get_model()
model.fit(
x_train,
y_train,
batch_size=64,
epochs=15,
verbose=0,
callbacks=[
LossAndErrorPrintingCallback(),
CustomLearningRateScheduler(lr_schedule),
],
)<jupyter_output><empty_output>
|
keras-io/guides/ipynb/keras_core/writing_your_own_callbacks.ipynb/0
|
{
"file_path": "keras-io/guides/ipynb/keras_core/writing_your_own_callbacks.ipynb",
"repo_id": "keras-io",
"token_count": 4529
}
| 135 |
<jupyter_start><jupyter_text>Tailor the search space**Authors:** Luca Invernizzi, James Long, Francois Chollet, Tom O'Malley, Haifeng Jin**Date created:** 2019/05/31**Last modified:** 2021/10/27**Description:** Tune a subset of the hyperparameters without changing the hypermodel.<jupyter_code>!pip install keras-tuner -q<jupyter_output><empty_output><jupyter_text>In this guide, we will show how to tailor the search space without changing the`HyperModel` code directly. For example, you can only tune some of thehyperparameters and keep the rest fixed, or you can override the compilearguments, like `optimizer`, `loss`, and `metrics`. The default value of a hyperparameterBefore we tailor the search space, it is important to know that everyhyperparameter has a default value. This default value is used as thehyperparameter value when not tuning it during our tailoring the search space.Whenever you register a hyperparameter, you can use the `default` argument tospecify a default value:```pythonhp.Int("units", min_value=32, max_value=128, step=32, default=64)```If you don't, hyperparameters always have a default default (for `Int`, it isequal to `min_value`).In the following model-building function, we specified the default value forthe `units` hyperparameter as 64.<jupyter_code>import keras
from keras import layers
import keras_tuner
import numpy as np
def build_model(hp):
model = keras.Sequential()
model.add(layers.Flatten())
model.add(
layers.Dense(
units=hp.Int("units", min_value=32, max_value=128, step=32, default=64)
)
)
if hp.Boolean("dropout"):
model.add(layers.Dropout(rate=0.25))
model.add(layers.Dense(units=10, activation="softmax"))
model.compile(
optimizer=keras.optimizers.Adam(
learning_rate=hp.Choice("learning_rate", values=[1e-2, 1e-3, 1e-4])
),
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
return model<jupyter_output><empty_output><jupyter_text>We will reuse this search space in the rest of the tutorial by overriding thehyperparameters without defining a new search space. Search a few and fix the restIf you have an existing hypermodel, and you want to search over only a fewhyperparameters, and keep the rest fixed, you don't have to change the code inthe model-building function or the `HyperModel`. You can pass a`HyperParameters` to the `hyperparameters` argument to the tuner constructorwith all the hyperparameters you want to tune. Specify`tune_new_entries=False` to prevent it from tuning other hyperparameters, thedefault value of which would be used.In the following example, we only tune the `learning_rate` hyperparameter, andchanged its type and value ranges.<jupyter_code>hp = keras_tuner.HyperParameters()
# This will override the `learning_rate` parameter with your
# own selection of choices
hp.Float("learning_rate", min_value=1e-4, max_value=1e-2, sampling="log")
tuner = keras_tuner.RandomSearch(
hypermodel=build_model,
hyperparameters=hp,
# Prevents unlisted parameters from being tuned
tune_new_entries=False,
objective="val_accuracy",
max_trials=3,
overwrite=True,
directory="my_dir",
project_name="search_a_few",
)
# Generate random data
x_train = np.random.rand(100, 28, 28, 1)
y_train = np.random.randint(0, 10, (100, 1))
x_val = np.random.rand(20, 28, 28, 1)
y_val = np.random.randint(0, 10, (20, 1))
# Run the search
tuner.search(x_train, y_train, epochs=1, validation_data=(x_val, y_val))<jupyter_output><empty_output><jupyter_text>If you summarize the search space, you will see only one hyperparameter.<jupyter_code>tuner.search_space_summary()<jupyter_output><empty_output><jupyter_text>Fix a few and tune the restIn the example above we showed how to tune only a few hyperparameters and keepthe rest fixed. You can also do the reverse: only fix a few hyperparametersand tune all the rest.In the following example, we fixed the value of the `learning_rate`hyperparameter. Pass a `hyperparameters` argument with a `Fixed` entry (or anynumber of `Fixed` entries). Also remember to specify `tune_new_entries=True`,which allows us to tune the rest of the hyperparameters.<jupyter_code>hp = keras_tuner.HyperParameters()
hp.Fixed("learning_rate", value=1e-4)
tuner = keras_tuner.RandomSearch(
build_model,
hyperparameters=hp,
tune_new_entries=True,
objective="val_accuracy",
max_trials=3,
overwrite=True,
directory="my_dir",
project_name="fix_a_few",
)
tuner.search(x_train, y_train, epochs=1, validation_data=(x_val, y_val))<jupyter_output><empty_output><jupyter_text>If you summarize the search space, you will see the `learning_rate` is markedas fixed, and the rest of the hyperparameters are being tuned.<jupyter_code>tuner.search_space_summary()<jupyter_output><empty_output><jupyter_text>Overriding compilation argumentsIf you have a hypermodel for which you want to change the existing optimizer,loss, or metrics, you can do so by passing these arguments to the tunerconstructor:<jupyter_code>tuner = keras_tuner.RandomSearch(
build_model,
optimizer=keras.optimizers.Adam(1e-3),
loss="mse",
metrics=[
"sparse_categorical_crossentropy",
],
objective="val_loss",
max_trials=3,
overwrite=True,
directory="my_dir",
project_name="override_compile",
)
tuner.search(x_train, y_train, epochs=1, validation_data=(x_val, y_val))<jupyter_output><empty_output><jupyter_text>If you get the best model, you can see the loss function has changed to MSE.<jupyter_code>tuner.get_best_models()[0].loss<jupyter_output><empty_output><jupyter_text>Tailor the search space of pre-build HyperModelsYou can also use these techniques with the pre-build models in KerasTuner, like`HyperResNet` or `HyperXception`. However, to see what hyperparameters are inthese pre-build `HyperModel`s, you will have to read the source code.In the following example, we only tune the `learning_rate` of `HyperXception`and fixed all the rest of the hyperparameters. Because the default loss of`HyperXception` is `categorical_crossentropy`, which expect the labels to beone-hot encoded, which doesn't match our raw integer label data, we need tochange it by overriding the `loss` in the compile args to`sparse_categorical_crossentropy`.<jupyter_code>hypermodel = keras_tuner.applications.HyperXception(input_shape=(28, 28, 1), classes=10)
hp = keras_tuner.HyperParameters()
# This will override the `learning_rate` parameter with your
# own selection of choices
hp.Choice("learning_rate", values=[1e-2, 1e-3, 1e-4])
tuner = keras_tuner.RandomSearch(
hypermodel,
hyperparameters=hp,
# Prevents unlisted parameters from being tuned
tune_new_entries=False,
# Override the loss.
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
objective="val_accuracy",
max_trials=3,
overwrite=True,
directory="my_dir",
project_name="helloworld",
)
# Run the search
tuner.search(x_train, y_train, epochs=1, validation_data=(x_val, y_val))
tuner.search_space_summary()<jupyter_output><empty_output>
|
keras-io/guides/ipynb/keras_tuner/tailor_the_search_space.ipynb/0
|
{
"file_path": "keras-io/guides/ipynb/keras_tuner/tailor_the_search_space.ipynb",
"repo_id": "keras-io",
"token_count": 2397
}
| 136 |
"""
Title: Tailor the search space
Authors: Luca Invernizzi, James Long, Francois Chollet, Tom O'Malley, Haifeng Jin
Date created: 2019/05/31
Last modified: 2021/10/27
Description: Tune a subset of the hyperparameters without changing the hypermodel.
Accelerator: None
"""
"""shell
pip install keras-tuner -q
"""
"""
In this guide, we will show how to tailor the search space without changing the
`HyperModel` code directly. For example, you can only tune some of the
hyperparameters and keep the rest fixed, or you can override the compile
arguments, like `optimizer`, `loss`, and `metrics`.
## The default value of a hyperparameter
Before we tailor the search space, it is important to know that every
hyperparameter has a default value. This default value is used as the
hyperparameter value when not tuning it during our tailoring the search space.
Whenever you register a hyperparameter, you can use the `default` argument to
specify a default value:
```python
hp.Int("units", min_value=32, max_value=128, step=32, default=64)
```
If you don't, hyperparameters always have a default default (for `Int`, it is
equal to `min_value`).
In the following model-building function, we specified the default value for
the `units` hyperparameter as 64.
"""
import keras
from keras import layers
import keras_tuner
import numpy as np
def build_model(hp):
model = keras.Sequential()
model.add(layers.Flatten())
model.add(
layers.Dense(
units=hp.Int("units", min_value=32, max_value=128, step=32, default=64)
)
)
if hp.Boolean("dropout"):
model.add(layers.Dropout(rate=0.25))
model.add(layers.Dense(units=10, activation="softmax"))
model.compile(
optimizer=keras.optimizers.Adam(
learning_rate=hp.Choice("learning_rate", values=[1e-2, 1e-3, 1e-4])
),
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
return model
"""
We will reuse this search space in the rest of the tutorial by overriding the
hyperparameters without defining a new search space.
## Search a few and fix the rest
If you have an existing hypermodel, and you want to search over only a few
hyperparameters, and keep the rest fixed, you don't have to change the code in
the model-building function or the `HyperModel`. You can pass a
`HyperParameters` to the `hyperparameters` argument to the tuner constructor
with all the hyperparameters you want to tune. Specify
`tune_new_entries=False` to prevent it from tuning other hyperparameters, the
default value of which would be used.
In the following example, we only tune the `learning_rate` hyperparameter, and
changed its type and value ranges.
"""
hp = keras_tuner.HyperParameters()
# This will override the `learning_rate` parameter with your
# own selection of choices
hp.Float("learning_rate", min_value=1e-4, max_value=1e-2, sampling="log")
tuner = keras_tuner.RandomSearch(
hypermodel=build_model,
hyperparameters=hp,
# Prevents unlisted parameters from being tuned
tune_new_entries=False,
objective="val_accuracy",
max_trials=3,
overwrite=True,
directory="my_dir",
project_name="search_a_few",
)
# Generate random data
x_train = np.random.rand(100, 28, 28, 1)
y_train = np.random.randint(0, 10, (100, 1))
x_val = np.random.rand(20, 28, 28, 1)
y_val = np.random.randint(0, 10, (20, 1))
# Run the search
tuner.search(x_train, y_train, epochs=1, validation_data=(x_val, y_val))
"""
If you summarize the search space, you will see only one hyperparameter.
"""
tuner.search_space_summary()
"""
## Fix a few and tune the rest
In the example above we showed how to tune only a few hyperparameters and keep
the rest fixed. You can also do the reverse: only fix a few hyperparameters
and tune all the rest.
In the following example, we fixed the value of the `learning_rate`
hyperparameter. Pass a `hyperparameters` argument with a `Fixed` entry (or any
number of `Fixed` entries). Also remember to specify `tune_new_entries=True`,
which allows us to tune the rest of the hyperparameters.
"""
hp = keras_tuner.HyperParameters()
hp.Fixed("learning_rate", value=1e-4)
tuner = keras_tuner.RandomSearch(
build_model,
hyperparameters=hp,
tune_new_entries=True,
objective="val_accuracy",
max_trials=3,
overwrite=True,
directory="my_dir",
project_name="fix_a_few",
)
tuner.search(x_train, y_train, epochs=1, validation_data=(x_val, y_val))
"""
If you summarize the search space, you will see the `learning_rate` is marked
as fixed, and the rest of the hyperparameters are being tuned.
"""
tuner.search_space_summary()
"""
## Overriding compilation arguments
If you have a hypermodel for which you want to change the existing optimizer,
loss, or metrics, you can do so by passing these arguments to the tuner
constructor:
"""
tuner = keras_tuner.RandomSearch(
build_model,
optimizer=keras.optimizers.Adam(1e-3),
loss="mse",
metrics=[
"sparse_categorical_crossentropy",
],
objective="val_loss",
max_trials=3,
overwrite=True,
directory="my_dir",
project_name="override_compile",
)
tuner.search(x_train, y_train, epochs=1, validation_data=(x_val, y_val))
"""
If you get the best model, you can see the loss function has changed to MSE.
"""
tuner.get_best_models()[0].loss
"""
## Tailor the search space of pre-build HyperModels
You can also use these techniques with the pre-build models in KerasTuner, like
`HyperResNet` or `HyperXception`. However, to see what hyperparameters are in
these pre-build `HyperModel`s, you will have to read the source code.
In the following example, we only tune the `learning_rate` of `HyperXception`
and fixed all the rest of the hyperparameters. Because the default loss of
`HyperXception` is `categorical_crossentropy`, which expect the labels to be
one-hot encoded, which doesn't match our raw integer label data, we need to
change it by overriding the `loss` in the compile args to
`sparse_categorical_crossentropy`.
"""
hypermodel = keras_tuner.applications.HyperXception(input_shape=(28, 28, 1), classes=10)
hp = keras_tuner.HyperParameters()
# This will override the `learning_rate` parameter with your
# own selection of choices
hp.Choice("learning_rate", values=[1e-2, 1e-3, 1e-4])
tuner = keras_tuner.RandomSearch(
hypermodel,
hyperparameters=hp,
# Prevents unlisted parameters from being tuned
tune_new_entries=False,
# Override the loss.
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
objective="val_accuracy",
max_trials=3,
overwrite=True,
directory="my_dir",
project_name="helloworld",
)
# Run the search
tuner.search(x_train, y_train, epochs=1, validation_data=(x_val, y_val))
tuner.search_space_summary()
|
keras-io/guides/keras_tuner/tailor_the_search_space.py/0
|
{
"file_path": "keras-io/guides/keras_tuner/tailor_the_search_space.py",
"repo_id": "keras-io",
"token_count": 2294
}
| 137 |
# Classification with KerasCV
**Author:** [lukewood](https://lukewood.xyz)<br>
**Date created:** 03/28/2023<br>
**Last modified:** 03/28/2023<br>
**Description:** Use KerasCV to train powerful image classifiers.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/guides/ipynb/keras_cv/classification_with_keras_cv.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/guides/keras_cv/classification_with_keras_cv.py)
Classification is the process of predicting a categorical label for a given
input image.
While classification is a relatively straightforward computer vision task,
modern approaches still are built of several complex components.
Luckily, KerasCV provides APIs to construct commonly used components.
This guide demonstrates KerasCV's modular approach to solving image
classification problems at three levels of complexity:
- Inference with a pretrained classifier
- Fine-tuning a pretrained backbone
- Training a image classifier from scratch
KerasCV uses Keras 3 to work with any of TensorFlow, PyTorch or Jax. In the
guide below, we will use the `jax` backend. This guide runs in
TensorFlow or PyTorch backends with zero changes, simply update the
`KERAS_BACKEND` below.
We use Professor Keras, the official Keras mascot, as a
visual reference for the complexity of the material:

```python
!pip install -q --upgrade keras-cv
!pip install -q --upgrade keras # Upgrade to Keras 3.
```
```python
import os
os.environ["KERAS_BACKEND"] = "jax" # @param ["tensorflow", "jax", "torch"]
import json
import math
import numpy as np
import keras
from keras import losses
from keras import ops
from keras import optimizers
from keras.optimizers import schedules
from keras import metrics
import keras_cv
# Import tensorflow for `tf.data` and its preprocessing functions
import tensorflow as tf
import tensorflow_datasets as tfds
```
---
## Inference with a pretrained classifier

Let's get started with the simplest KerasCV API: a pretrained classifier.
In this example, we will construct a classifier that was
pretrained on the ImageNet dataset.
We'll use this model to solve the age old "Cat or Dog" problem.
The highest level module in KerasCV is a *task*. A *task* is a `keras.Model`
consisting of a (generally pretrained) backbone model and task-specific layers.
Here's an example using `keras_cv.models.ImageClassifier` with an
EfficientNetV2B0 Backbone.
EfficientNetV2B0 is a great starting model when constructing an image
classification pipeline.
This architecture manages to achieve high accuracy, while using a
parameter count of 7M.
If an EfficientNetV2B0 is not powerful enough for the task you are hoping to
solve, be sure to check out [KerasCV's other available Backbones](https://github.com/keras-team/keras-cv/tree/master/keras_cv/models/backbones)!
```python
classifier = keras_cv.models.ImageClassifier.from_preset(
"efficientnetv2_b0_imagenet_classifier"
)
```
You may notice a small deviation from the old `keras.applications` API; where
you would construct the class with `EfficientNetV2B0(weights="imagenet")`.
While the old API was great for classification, it did not scale effectively to
other use cases that required complex architectures, like object deteciton and
semantic segmentation.
Now that our classifier is built, let's apply it to this cute cat picture!
```python
filepath = keras.utils.get_file(origin="https://i.imgur.com/9i63gLN.jpg")
image = keras.utils.load_img(filepath)
image = np.array(image)
keras_cv.visualization.plot_image_gallery(
np.array([image]), rows=1, cols=1, value_range=(0, 255), show=True, scale=4
)
```

Next, let's get some predictions from our classifier:
```python
predictions = classifier.predict(np.expand_dims(image, axis=0))
```
<div class="k-default-codeblock">
```
1/1 ━━━━━━━━━━━━━━━━━━━━ 4s 4s/step
```
</div>
Predictions come in the form of softmax-ed category rankings.
We can find the index of the top classes using a simple argsort function:
```python
top_classes = predictions[0].argsort(axis=-1)
```
In order to decode the class mappings, we can construct a mapping from
category indices to ImageNet class names.
For convenience, I've stored the ImageNet class mapping in a GitHub gist.
Let's download and load it now.
```python
classes = keras.utils.get_file(
origin="https://gist.githubusercontent.com/LukeWood/62eebcd5c5c4a4d0e0b7845780f76d55/raw/fde63e5e4c09e2fa0a3436680f436bdcb8325aac/ImagenetClassnames.json"
)
with open(classes, "rb") as f:
classes = json.load(f)
```
<div class="k-default-codeblock">
```
Downloading data from https://gist.githubusercontent.com/LukeWood/62eebcd5c5c4a4d0e0b7845780f76d55/raw/fde63e5e4c09e2fa0a3436680f436bdcb8325aac/ImagenetClassnames.json
33567/33567 ━━━━━━━━━━━━━━━━━━━━ 0s 0us/step
```
</div>
Now we can simply look up the class names via index:
```python
top_two = [classes[str(i)] for i in top_classes[-2:]]
print("Top two classes are:", top_two)
```
<div class="k-default-codeblock">
```
Top two classes are: ['Egyptian cat', 'velvet']
```
</div>
Great! Both of these appear to be correct!
However, one of the classes is "Velvet".
We're trying to classify Cats VS Dogs.
We don't care about the velvet blanket!
Ideally, we'd have a classifier that only performs computation to determine if
an image is a cat or a dog, and has all of its resources dedicated to this task.
This can be solved by fine tuning our own classifier.
# Fine tuning a pretrained classifier

When labeled images specific to our task are available, fine-tuning a custom
classifier can improve performance.
If we want to train a Cats vs Dogs Classifier, using explicitly labeled Cat vs
Dog data should perform better than the generic classifier!
For many tasks, no relevant pretrained model
will be available (e.g., categorizing images specific to your application).
First, let's get started by loading some data:
```python
BATCH_SIZE = 32
IMAGE_SIZE = (224, 224)
AUTOTUNE = tf.data.AUTOTUNE
tfds.disable_progress_bar()
data, dataset_info = tfds.load("cats_vs_dogs", with_info=True, as_supervised=True)
train_steps_per_epoch = dataset_info.splits["train"].num_examples // BATCH_SIZE
train_dataset = data["train"]
num_classes = dataset_info.features["label"].num_classes
resizing = keras_cv.layers.Resizing(
IMAGE_SIZE[0], IMAGE_SIZE[1], crop_to_aspect_ratio=True
)
def preprocess_inputs(image, label):
image = tf.cast(image, tf.float32)
# Staticly resize images as we only iterate the dataset once.
return resizing(image), tf.one_hot(label, num_classes)
# Shuffle the dataset to increase diversity of batches.
# 10*BATCH_SIZE follows the assumption that bigger machines can handle bigger
# shuffle buffers.
train_dataset = train_dataset.shuffle(
10 * BATCH_SIZE, reshuffle_each_iteration=True
).map(preprocess_inputs, num_parallel_calls=AUTOTUNE)
train_dataset = train_dataset.batch(BATCH_SIZE)
images = next(iter(train_dataset.take(1)))[0]
keras_cv.visualization.plot_image_gallery(images, value_range=(0, 255))
```


Meow!
Next let's construct our model.
The use of imagenet in the preset name indicates that the backbone was
pretrained on the ImageNet dataset.
Pretrained backbones extract more information from our labeled examples by
leveraging patterns extracted from potentially much larger datasets.
Next lets put together our classifier:
```python
model = keras_cv.models.ImageClassifier.from_preset(
"efficientnetv2_b0_imagenet", num_classes=2
)
model.compile(
loss="categorical_crossentropy",
optimizer=keras.optimizers.SGD(learning_rate=0.01),
metrics=["accuracy"],
)
```
<div class="k-default-codeblock">
```
Downloading data from https://storage.googleapis.com/keras-cv/models/efficientnetv2b0/imagenet/classification-v0-notop.h5
24029184/24029184 ━━━━━━━━━━━━━━━━━━━━ 1s 0us/step
```
</div>
Here our classifier is just a simple `keras.Sequential`.
All that is left to do is call `model.fit()`:
```python
model.fit(train_dataset)
```
<div class="k-default-codeblock">
```
216/727 ━━━━━[37m━━━━━━━━━━━━━━━ 15s 30ms/step - accuracy: 0.8433 - loss: 0.5113
Corrupt JPEG data: 99 extraneous bytes before marker 0xd9
254/727 ━━━━━━[37m━━━━━━━━━━━━━━ 14s 30ms/step - accuracy: 0.8535 - loss: 0.4941
Warning: unknown JFIF revision number 0.00
266/727 ━━━━━━━[37m━━━━━━━━━━━━━ 14s 30ms/step - accuracy: 0.8563 - loss: 0.4891
Corrupt JPEG data: 396 extraneous bytes before marker 0xd9
310/727 ━━━━━━━━[37m━━━━━━━━━━━━ 12s 30ms/step - accuracy: 0.8651 - loss: 0.4719
Corrupt JPEG data: 162 extraneous bytes before marker 0xd9
358/727 ━━━━━━━━━[37m━━━━━━━━━━━ 11s 30ms/step - accuracy: 0.8729 - loss: 0.4550
Corrupt JPEG data: 252 extraneous bytes before marker 0xd9
Corrupt JPEG data: 65 extraneous bytes before marker 0xd9
374/727 ━━━━━━━━━━[37m━━━━━━━━━━ 10s 30ms/step - accuracy: 0.8752 - loss: 0.4497
Corrupt JPEG data: 1403 extraneous bytes before marker 0xd9
534/727 ━━━━━━━━━━━━━━[37m━━━━━━ 5s 30ms/step - accuracy: 0.8921 - loss: 0.4056
Corrupt JPEG data: 214 extraneous bytes before marker 0xd9
636/727 ━━━━━━━━━━━━━━━━━[37m━━━ 2s 30ms/step - accuracy: 0.8993 - loss: 0.3837
Corrupt JPEG data: 2226 extraneous bytes before marker 0xd9
654/727 ━━━━━━━━━━━━━━━━━[37m━━━ 2s 30ms/step - accuracy: 0.9004 - loss: 0.3802
Corrupt JPEG data: 128 extraneous bytes before marker 0xd9
668/727 ━━━━━━━━━━━━━━━━━━[37m━━ 1s 30ms/step - accuracy: 0.9012 - loss: 0.3775
Corrupt JPEG data: 239 extraneous bytes before marker 0xd9
704/727 ━━━━━━━━━━━━━━━━━━━[37m━ 0s 30ms/step - accuracy: 0.9032 - loss: 0.3709
Corrupt JPEG data: 1153 extraneous bytes before marker 0xd9
712/727 ━━━━━━━━━━━━━━━━━━━[37m━ 0s 30ms/step - accuracy: 0.9036 - loss: 0.3695
Corrupt JPEG data: 228 extraneous bytes before marker 0xd9
727/727 ━━━━━━━━━━━━━━━━━━━━ 69s 62ms/step - accuracy: 0.9045 - loss: 0.3667
<keras.src.callbacks.history.History at 0x7fce380df100>
```
</div>
Let's look at how our model performs after the fine tuning:
```python
predictions = model.predict(np.expand_dims(image, axis=0))
classes = {0: "cat", 1: "dog"}
print("Top class is:", classes[predictions[0].argmax()])
```
<div class="k-default-codeblock">
```
1/1 ━━━━━━━━━━━━━━━━━━━━ 3s 3s/step
Top class is: cat
```
</div>
Awesome - looks like the model correctly classified the image.
# Train a Classifier from Scratch

Now that we've gotten our hands dirty with classification, let's take on one
last task: training a classification model from scratch!
A standard benchmark for image classification is the ImageNet dataset, however
due to licensing constraints we will use the CalTech 101 image classification
dataset in this tutorial.
While we use the simpler CalTech 101 dataset in this guide, the same training
template may be used on ImageNet to achieve near state-of-the-art scores.
Let's start out by tackling data loading:
```python
NUM_CLASSES = 101
# Change epochs to 100~ to fully train.
EPOCHS = 1
def package_inputs(image, label):
return {"images": image, "labels": tf.one_hot(label, NUM_CLASSES)}
train_ds, eval_ds = tfds.load(
"caltech101", split=["train", "test"], as_supervised="true"
)
train_ds = train_ds.map(package_inputs, num_parallel_calls=tf.data.AUTOTUNE)
eval_ds = eval_ds.map(package_inputs, num_parallel_calls=tf.data.AUTOTUNE)
train_ds = train_ds.shuffle(BATCH_SIZE * 16)
```
<div class="k-default-codeblock">
```
Downloading and preparing dataset 125.64 MiB (download: 125.64 MiB, generated: 132.86 MiB, total: 258.50 MiB) to /usr/local/google/home/rameshsampath/tensorflow_datasets/caltech101/3.0.1...
Dataset caltech101 downloaded and prepared to /usr/local/google/home/rameshsampath/tensorflow_datasets/caltech101/3.0.1. Subsequent calls will reuse this data.
```
</div>
The CalTech101 dataset has different sizes for every image, so we use the
`ragged_batch()` API to batch them together while maintaining each individual
image's shape information.
```python
train_ds = train_ds.ragged_batch(BATCH_SIZE)
eval_ds = eval_ds.ragged_batch(BATCH_SIZE)
batch = next(iter(train_ds.take(1)))
image_batch = batch["images"]
label_batch = batch["labels"]
keras_cv.visualization.plot_image_gallery(
image_batch.to_tensor(),
rows=3,
cols=3,
value_range=(0, 255),
show=True,
)
```

---
## Data Augmentation
In our previous finetuning exmaple, we performed a static resizing operation and
did not utilize any image augmentation.
This is because a single pass over the training set was sufficient to achieve
decent results.
When training to solve a more difficult task, you'll want to include data
augmentation in your data pipeline.
Data augmentation is a technique to make your model robust to changes in input
data such as lighting, cropping, and orientation.
KerasCV includes some of the most useful augmentations in the `keras_cv.layers`
API.
Creating an optimal pipeline of augmentations is an art, but in this section of
the guide we'll offer some tips on best practices for classification.
One caveat to be aware of with image data augmentation is that you must be careful
to not shift your augmented data distribution too far from the original data
distribution.
The goal is to prevent overfitting and increase generalization,
but samples that lie completely out of the data distribution simply add noise to
the training process.
The first augmentation we'll use is `RandomFlip`.
This augmentation behaves more or less how you'd expect: it either flips the
image or not.
While this augmentation is useful in CalTech101 and ImageNet, it should be noted
that it should not be used on tasks where the data distribution is not vertical
mirror invariant.
An example of a dataset where this occurs is MNIST hand written digits.
Flipping a `6` over the
vertical axis will make the digit appear more like a `7` than a `6`, but the
label will still show a `6`.
```python
random_flip = keras_cv.layers.RandomFlip()
augmenters = [random_flip]
image_batch = random_flip(image_batch)
keras_cv.visualization.plot_image_gallery(
image_batch.to_tensor(),
rows=3,
cols=3,
value_range=(0, 255),
show=True,
)
```

Half of the images have been flipped!
The next augmentation we'll use is `RandomCropAndResize`.
This operation selects a random subset of the image, then resizes it to the
provided target size.
By using this augmentation, we force our classifier to become spatially invariant.
Additionally, this layer accepts an `aspect_ratio_factor` which can be used to
distort the aspect ratio of the image.
While this can improve model performance, it should be used with caution.
It is very easy for an aspect ratio distortion to shift a sample too far from
the original training set's data distribution.
Remember - the goal of data augmentation is to produce more training samples
that align with the data distribution of your training set!
`RandomCropAndResize` also can handle `tf.RaggedTensor` inputs. In the
CalTech101 image dataset images come in a wide variety of sizes.
As such they cannot easily be batched together into a dense training batch.
Luckily, `RandomCropAndResize` handles the Ragged -> Dense conversion process
for you!
Let's add a `RandomCropAndResize` to our set of augmentations:
```python
crop_and_resize = keras_cv.layers.RandomCropAndResize(
target_size=IMAGE_SIZE,
crop_area_factor=(0.8, 1.0),
aspect_ratio_factor=(0.9, 1.1),
)
augmenters += [crop_and_resize]
image_batch = crop_and_resize(image_batch)
keras_cv.visualization.plot_image_gallery(
image_batch,
rows=3,
cols=3,
value_range=(0, 255),
show=True,
)
```

Great! We are now working with a batch of dense images.
Next up, lets include some spatial and color-based jitter to our training set.
This will allow us to produce a classifier that is robust to lighting flickers,
shadows, and more.
There are limitless ways to augment an image by altering color and spatial
features, but perhaps the most battle tested technique is
[`RandAugment`](https://arxiv.org/abs/1909.13719).
`RandAugment` is actually a set of 10 different augmentations:
`AutoContrast`, `Equalize`, `Solarize`, `RandomColorJitter`, `RandomContrast`,
`RandomBrightness`, `ShearX`, `ShearY`, `TranslateX` and `TranslateY`.
At inference time, `num_augmentations` augmenters are sampled for each image,
and random magnitude factors are sampled for each.
These augmentations are then applied sequentially.
KerasCV makes tuning these parameters easy using the `augmentations_per_image`
and `magnitude` parameters!
Let's take it for a spin:
```python
rand_augment = keras_cv.layers.RandAugment(
augmentations_per_image=3,
value_range=(0, 255),
magnitude=0.3,
magnitude_stddev=0.2,
rate=1.0,
)
augmenters += [rand_augment]
image_batch = rand_augment(image_batch)
keras_cv.visualization.plot_image_gallery(
image_batch,
rows=3,
cols=3,
value_range=(0, 255),
show=True,
)
```

Looks great; but we're not done yet!
What if an image is missing one critical feature of a class? For example, what
if a leaf is blocking the view of a cat's ear, but our classifier learned to
classify cats simply by observing their ears?
One easy approach to tackling this is to use `RandomCutout`, which randomly
strips out a sub-section of the image:
```python
random_cutout = keras_cv.layers.RandomCutout(width_factor=0.4, height_factor=0.4)
keras_cv.visualization.plot_image_gallery(
random_cutout(image_batch),
rows=3,
cols=3,
value_range=(0, 255),
show=True,
)
```

While this tackles the problem reasonably well, it can cause the classifier to
develop responses to borders between features and black pixel areas caused by
the cutout.
[`CutMix`](https://arxiv.org/abs/1905.04899) solves the same issue by using
a more complex (and more effective) technique.
Instead of replacing the cut-out areas with black pixels, `CutMix` replaces
these regions with regions of other images sampled from within your training
set!
Following this replacement, the image's classification label is updated to be a
blend of the original and mixed image's class label.
What does this look like in practice? Let's check it out:
```python
cut_mix = keras_cv.layers.CutMix()
# CutMix needs to modify both images and labels
inputs = {"images": image_batch, "labels": label_batch}
keras_cv.visualization.plot_image_gallery(
cut_mix(inputs)["images"],
rows=3,
cols=3,
value_range=(0, 255),
show=True,
)
```

Let's hold off from adding it to our augmenter for a minute - more on that
soon!
Next, let's look into `MixUp()`.
Unfortunately, while `MixUp()` has been empirically shown to *substantially*
improve both the robustness and the generalization of the trained model,
it is not well-understood why such improvement occurs... but
a little alchemy never hurt anyone!
`MixUp()` works by sampling two images from a batch, then proceeding to
literally blend together their pixel intensities as well as their classification
labels.
Let's see it in action:
```python
mix_up = keras_cv.layers.MixUp()
# MixUp needs to modify both images and labels
inputs = {"images": image_batch, "labels": label_batch}
keras_cv.visualization.plot_image_gallery(
mix_up(inputs)["images"],
rows=3,
cols=3,
value_range=(0, 255),
show=True,
)
```

If you look closely, you'll see that the images have been blended together.
Instead of applying `CutMix()` and `MixUp()` to every image, we instead pick
one or the other to apply to each batch.
This can be expressed using `keras_cv.layers.RandomChoice()`
```python
cut_mix_or_mix_up = keras_cv.layers.RandomChoice([cut_mix, mix_up], batchwise=True)
augmenters += [cut_mix_or_mix_up]
```
Now let's apply our final augmenter to the training data:
```python
def create_augmenter_fn(augmenters):
def augmenter_fn(inputs):
for augmenter in augmenters:
inputs = augmenter(inputs)
return inputs
return augmenter_fn
augmenter_fn = create_augmenter_fn(augmenters)
train_ds = train_ds.map(augmenter_fn, num_parallel_calls=tf.data.AUTOTUNE)
image_batch = next(iter(train_ds.take(1)))["images"]
keras_cv.visualization.plot_image_gallery(
image_batch,
rows=3,
cols=3,
value_range=(0, 255),
show=True,
)
```

We also need to resize our evaluation set to get dense batches of the image size
expected by our model. We use the deterministic `keras_cv.layers.Resizing` in
this case to avoid adding noise to our evaluation metric.
```python
inference_resizing = keras_cv.layers.Resizing(
IMAGE_SIZE[0], IMAGE_SIZE[1], crop_to_aspect_ratio=True
)
eval_ds = eval_ds.map(inference_resizing, num_parallel_calls=tf.data.AUTOTUNE)
image_batch = next(iter(eval_ds.take(1)))["images"]
keras_cv.visualization.plot_image_gallery(
image_batch,
rows=3,
cols=3,
value_range=(0, 255),
show=True,
)
```

Finally, lets unpackage our datasets and prepare to pass them to `model.fit()`,
which accepts a tuple of `(images, labels)`.
```python
def unpackage_dict(inputs):
return inputs["images"], inputs["labels"]
train_ds = train_ds.map(unpackage_dict, num_parallel_calls=tf.data.AUTOTUNE)
eval_ds = eval_ds.map(unpackage_dict, num_parallel_calls=tf.data.AUTOTUNE)
```
Data augmentation is by far the hardest piece of training a modern
classifier.
Congratulations on making it this far!
---
## Optimizer Tuning
To achieve optimal performance, we need to use a learning rate schedule instead
of a single learning rate. While we won't go into detail on the Cosine decay
with warmup schedule used here, [you can read more about it
here](https://scorrea92.medium.com/cosine-learning-rate-decay-e8b50aa455b).
```python
def lr_warmup_cosine_decay(
global_step,
warmup_steps,
hold=0,
total_steps=0,
start_lr=0.0,
target_lr=1e-2,
):
# Cosine decay
learning_rate = (
0.5
* target_lr
* (
1
+ ops.cos(
math.pi
* ops.convert_to_tensor(
global_step - warmup_steps - hold, dtype="float32"
)
/ ops.convert_to_tensor(
total_steps - warmup_steps - hold, dtype="float32"
)
)
)
)
warmup_lr = target_lr * (global_step / warmup_steps)
if hold > 0:
learning_rate = ops.where(
global_step > warmup_steps + hold, learning_rate, target_lr
)
learning_rate = ops.where(global_step < warmup_steps, warmup_lr, learning_rate)
return learning_rate
class WarmUpCosineDecay(schedules.LearningRateSchedule):
def __init__(self, warmup_steps, total_steps, hold, start_lr=0.0, target_lr=1e-2):
super().__init__()
self.start_lr = start_lr
self.target_lr = target_lr
self.warmup_steps = warmup_steps
self.total_steps = total_steps
self.hold = hold
def __call__(self, step):
lr = lr_warmup_cosine_decay(
global_step=step,
total_steps=self.total_steps,
warmup_steps=self.warmup_steps,
start_lr=self.start_lr,
target_lr=self.target_lr,
hold=self.hold,
)
return ops.where(step > self.total_steps, 0.0, lr)
```

The schedule looks a as we expect.
Next let's construct this optimizer:
```python
total_images = 9000
total_steps = (total_images // BATCH_SIZE) * EPOCHS
warmup_steps = int(0.1 * total_steps)
hold_steps = int(0.45 * total_steps)
schedule = WarmUpCosineDecay(
start_lr=0.05,
target_lr=1e-2,
warmup_steps=warmup_steps,
total_steps=total_steps,
hold=hold_steps,
)
optimizer = optimizers.SGD(
weight_decay=5e-4,
learning_rate=schedule,
momentum=0.9,
)
```
At long last, we can now build our model and call `fit()`!
`keras_cv.models.EfficientNetV2B0Backbone()` is a convenience alias for
`keras_cv.models.EfficientNetV2Backbone.from_preset('efficientnetv2_b0')`.
Note that this preset does not come with any pretrained weights.
```python
backbone = keras_cv.models.EfficientNetV2B0Backbone()
model = keras.Sequential(
[
backbone,
keras.layers.GlobalMaxPooling2D(),
keras.layers.Dropout(rate=0.5),
keras.layers.Dense(101, activation="softmax"),
]
)
```
Since the labels produced by MixUp() and CutMix() are somewhat artificial, we
employ label smoothing to prevent the model from overfitting to artifacts of
this augmentation process.
```python
loss = losses.CategoricalCrossentropy(label_smoothing=0.1)
```
Let's compile our model:
```python
model.compile(
loss=loss,
optimizer=optimizer,
metrics=[
metrics.CategoricalAccuracy(),
metrics.TopKCategoricalAccuracy(k=5),
],
)
```
and finally call fit().
```python
model.fit(
train_ds,
epochs=EPOCHS,
validation_data=eval_ds,
)
```
<div class="k-default-codeblock">
```
96/96 ━━━━━━━━━━━━━━━━━━━━ 65s 462ms/step - categorical_accuracy: 0.0068 - loss: 6.6096 - top_k_categorical_accuracy: 0.0497 - val_categorical_accuracy: 0.0122 - val_loss: 4.7151 - val_top_k_categorical_accuracy: 0.1596
<keras.src.callbacks.history.History at 0x7fc7142c2e80>
```
</div>
Congratulations! You now know how to train a powerful image classifier from
scratch in KerasCV.
Depending on the availability of labeled data for your application, training
from scratch may or may not be more powerful than using transfer learning in
addition to the data augmentations discussed above. For smaller datasets,
pretrained models generally produce high accuracy and faster convergence.
---
## Conclusions
While image classification is perhaps the simplest problem in computer vision,
the modern landscape has numerous complex components.
Luckily, KerasCV offers robust, production-grade APIs to make assembling most
of these components possible in one line of code.
Through the use of KerasCV's `ImageClassifier` API, pretrained weights, and
KerasCV data augmentations you can assemble everything you need to train a
powerful classifier in a few hundred lines of code!
As a follow up exercise, give the following a try:
- Fine tune a KerasCV classifier on your own dataset
- Learn more about [KerasCV's data augmentations](https://keras.io/guides/keras_cv/cut_mix_mix_up_and_rand_augment/)
- Check out how we train our models on [ImageNet](https://github.com/keras-team/keras-cv/blob/master/examples/training/classification/imagenet/basic_training.py)
|
keras-io/guides/md/keras_cv/classification_with_keras_cv.md/0
|
{
"file_path": "keras-io/guides/md/keras_cv/classification_with_keras_cv.md",
"repo_id": "keras-io",
"token_count": 10091
}
| 138 |
# Visualize the hyperparameter tuning process
**Author:** Haifeng Jin<br>
**Date created:** 2021/06/25<br>
**Last modified:** 2021/06/05<br>
**Description:** Using TensorBoard to visualize the hyperparameter tuning process in KerasTuner.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/guides/ipynb/keras_tuner/visualize_tuning.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/guides/keras_tuner/visualize_tuning.py)
```python
!pip install keras-tuner -q
```
---
## Introduction
KerasTuner prints the logs to screen including the values of the
hyperparameters in each trial for the user to monitor the progress. However,
reading the logs is not intuitive enough to sense the influences of
hyperparameters have on the results, Therefore, we provide a method to
visualize the hyperparameter values and the corresponding evaluation results
with interactive figures using TensorBaord.
[TensorBoard](https://www.tensorflow.org/tensorboard) is a useful tool for
visualizing the machine learning experiments. It can monitor the losses and
metrics during the model training and visualize the model architectures.
Running KerasTuner with TensorBoard will give you additional features for
visualizing hyperparameter tuning results using its HParams plugin.
We will use a simple example of tuning a model for the MNIST image
classification dataset to show how to use KerasTuner with TensorBoard.
The first step is to download and format the data.
```python
import numpy as np
import keras_tuner
import keras
from keras import layers
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
# Normalize the pixel values to the range of [0, 1].
x_train = x_train.astype("float32") / 255
x_test = x_test.astype("float32") / 255
# Add the channel dimension to the images.
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
# Print the shapes of the data.
print(x_train.shape)
print(y_train.shape)
print(x_test.shape)
print(y_test.shape)
```
<div class="k-default-codeblock">
```
(60000, 28, 28, 1)
(60000,)
(10000, 28, 28, 1)
(10000,)
```
</div>
Then, we write a `build_model` function to build the model with hyperparameters
and return the model. The hyperparameters include the type of model to use
(multi-layer perceptron or convolutional neural network), the number of layers,
the number of units or filters, whether to use dropout.
```python
def build_model(hp):
inputs = keras.Input(shape=(28, 28, 1))
# Model type can be MLP or CNN.
model_type = hp.Choice("model_type", ["mlp", "cnn"])
x = inputs
if model_type == "mlp":
x = layers.Flatten()(x)
# Number of layers of the MLP is a hyperparameter.
for i in range(hp.Int("mlp_layers", 1, 3)):
# Number of units of each layer are
# different hyperparameters with different names.
x = layers.Dense(
units=hp.Int(f"units_{i}", 32, 128, step=32),
activation="relu",
)(x)
else:
# Number of layers of the CNN is also a hyperparameter.
for i in range(hp.Int("cnn_layers", 1, 3)):
x = layers.Conv2D(
hp.Int(f"filters_{i}", 32, 128, step=32),
kernel_size=(3, 3),
activation="relu",
)(x)
x = layers.MaxPooling2D(pool_size=(2, 2))(x)
x = layers.Flatten()(x)
# A hyperparamter for whether to use dropout layer.
if hp.Boolean("dropout"):
x = layers.Dropout(0.5)(x)
# The last layer contains 10 units,
# which is the same as the number of classes.
outputs = layers.Dense(units=10, activation="softmax")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
# Compile the model.
model.compile(
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
optimizer="adam",
)
return model
```
We can do a quick test of the models to check if it build successfully for both
CNN and MLP.
```python
# Initialize the `HyperParameters` and set the values.
hp = keras_tuner.HyperParameters()
hp.values["model_type"] = "cnn"
# Build the model using the `HyperParameters`.
model = build_model(hp)
# Test if the model runs with our data.
model(x_train[:100])
# Print a summary of the model.
model.summary()
# Do the same for MLP model.
hp.values["model_type"] = "mlp"
model = build_model(hp)
model(x_train[:100])
model.summary()
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "functional_1"</span>
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┓
┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━┩
│ input_layer (<span style="color: #0087ff; text-decoration-color: #0087ff">InputLayer</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv2d (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">26</span>, <span style="color: #00af00; text-decoration-color: #00af00">26</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">320</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ max_pooling2d (<span style="color: #0087ff; text-decoration-color: #0087ff">MaxPooling2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">13</span>, <span style="color: #00af00; text-decoration-color: #00af00">13</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ flatten (<span style="color: #0087ff; text-decoration-color: #0087ff">Flatten</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">5408</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ dense (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">10</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">54,090</span> │
└─────────────────────────────────┴───────────────────────────┴────────────┘
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">54,410</span> (212.54 KB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">54,410</span> (212.54 KB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "functional_3"</span>
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┓
┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━┩
│ input_layer_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">InputLayer</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ flatten_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">Flatten</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">784</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ dense_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">25,120</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ dense_2 (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">10</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">330</span> │
└─────────────────────────────────┴───────────────────────────┴────────────┘
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">25,450</span> (99.41 KB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">25,450</span> (99.41 KB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B)
</pre>
Initialize the `RandomSearch` tuner with 10 trials and using validation
accuracy as the metric for selecting models.
```python
tuner = keras_tuner.RandomSearch(
build_model,
max_trials=10,
# Do not resume the previous search in the same directory.
overwrite=True,
objective="val_accuracy",
# Set a directory to store the intermediate results.
directory="/tmp/tb",
)
```
Start the search by calling `tuner.search(...)`. To use TensorBoard, we need
to pass a `keras.callbacks.TensorBoard` instance to the callbacks.
```python
tuner.search(
x_train,
y_train,
validation_split=0.2,
epochs=2,
# Use the TensorBoard callback.
# The logs will be write to "/tmp/tb_logs".
callbacks=[keras.callbacks.TensorBoard("/tmp/tb_logs")],
)
```
<div class="k-default-codeblock">
```
Trial 10 Complete [00h 00m 06s]
val_accuracy: 0.9617499709129333
```
</div>
<div class="k-default-codeblock">
```
Best val_accuracy So Far: 0.9837499856948853
Total elapsed time: 00h 08m 32s
```
</div>
If running in Colab, the following two commands will show you the TensorBoard
inside Colab.
`%load_ext tensorboard`
`%tensorboard --logdir /tmp/tb_logs`
You have access to all the common features of the TensorBoard. For example, you
can view the loss and metrics curves and visualize the computational graph of
the models in different trials.


In addition to these features, we also have a HParams tab, in which there are
three views. In the table view, you can view the 10 different trials in a
table with the different hyperparameter values and evaluation metrics.

On the left side, you can specify the filters for certain hyperparameters. For
example, you can specify to only view the MLP models without the dropout layer
and with 1 to 2 dense layers.

Besides the table view, it also provides two other views, parallel coordinates
view and scatter plot matrix view. They are just different visualization
methods for the same data. You can still use the panel on the left to filter
the results.
In the parallel coordinates view, each colored line is a trial.
The axes are the hyperparameters and evaluation metrics.

In the scatter plot matrix view, each dot is a trial. The plots are projections
of the trials on planes with different hyperparameter and metrics as the axes.

|
keras-io/guides/md/keras_tuner/visualize_tuning.md/0
|
{
"file_path": "keras-io/guides/md/keras_tuner/visualize_tuning.md",
"repo_id": "keras-io",
"token_count": 5755
}
| 139 |
"""
Title: The Sequential model
Author: [fchollet](https://twitter.com/fchollet)
Date created: 2020/04/12
Last modified: 2023/06/25
Description: Complete guide to the Sequential model.
Accelerator: GPU
"""
"""
## Setup
"""
import keras
from keras import layers
from keras import ops
"""
## When to use a Sequential model
A `Sequential` model is appropriate for **a plain stack of layers**
where each layer has **exactly one input tensor and one output tensor**.
Schematically, the following `Sequential` model:
"""
# Define Sequential model with 3 layers
model = keras.Sequential(
[
layers.Dense(2, activation="relu", name="layer1"),
layers.Dense(3, activation="relu", name="layer2"),
layers.Dense(4, name="layer3"),
]
)
# Call model on a test input
x = ops.ones((3, 3))
y = model(x)
"""
is equivalent to this function:
"""
# Create 3 layers
layer1 = layers.Dense(2, activation="relu", name="layer1")
layer2 = layers.Dense(3, activation="relu", name="layer2")
layer3 = layers.Dense(4, name="layer3")
# Call layers on a test input
x = ops.ones((3, 3))
y = layer3(layer2(layer1(x)))
"""
A Sequential model is **not appropriate** when:
- Your model has multiple inputs or multiple outputs
- Any of your layers has multiple inputs or multiple outputs
- You need to do layer sharing
- You want non-linear topology (e.g. a residual connection, a multi-branch
model)
"""
"""
## Creating a Sequential model
You can create a Sequential model by passing a list of layers to the Sequential
constructor:
"""
model = keras.Sequential(
[
layers.Dense(2, activation="relu"),
layers.Dense(3, activation="relu"),
layers.Dense(4),
]
)
"""
Its layers are accessible via the `layers` attribute:
"""
model.layers
"""
You can also create a Sequential model incrementally via the `add()` method:
"""
model = keras.Sequential()
model.add(layers.Dense(2, activation="relu"))
model.add(layers.Dense(3, activation="relu"))
model.add(layers.Dense(4))
"""
Note that there's also a corresponding `pop()` method to remove layers:
a Sequential model behaves very much like a list of layers.
"""
model.pop()
print(len(model.layers)) # 2
"""
Also note that the Sequential constructor accepts a `name` argument, just like
any layer or model in Keras. This is useful to annotate TensorBoard graphs
with semantically meaningful names.
"""
model = keras.Sequential(name="my_sequential")
model.add(layers.Dense(2, activation="relu", name="layer1"))
model.add(layers.Dense(3, activation="relu", name="layer2"))
model.add(layers.Dense(4, name="layer3"))
"""
## Specifying the input shape in advance
Generally, all layers in Keras need to know the shape of their inputs
in order to be able to create their weights. So when you create a layer like
this, initially, it has no weights:
"""
layer = layers.Dense(3)
layer.weights # Empty
"""
It creates its weights the first time it is called on an input, since the shape
of the weights depends on the shape of the inputs:
"""
# Call layer on a test input
x = ops.ones((1, 4))
y = layer(x)
layer.weights # Now it has weights, of shape (4, 3) and (3,)
"""
Naturally, this also applies to Sequential models. When you instantiate a
Sequential model without an input shape, it isn't "built": it has no weights
(and calling
`model.weights` results in an error stating just this). The weights are created
when the model first sees some input data:
"""
model = keras.Sequential(
[
layers.Dense(2, activation="relu"),
layers.Dense(3, activation="relu"),
layers.Dense(4),
]
) # No weights at this stage!
# At this point, you can't do this:
# model.weights
# You also can't do this:
# model.summary()
# Call the model on a test input
x = ops.ones((1, 4))
y = model(x)
print("Number of weights after calling the model:", len(model.weights)) # 6
"""
Once a model is "built", you can call its `summary()` method to display its
contents:
"""
model.summary()
"""
However, it can be very useful when building a Sequential model incrementally
to be able to display the summary of the model so far, including the current
output shape. In this case, you should start your model by passing an `Input`
object to your model, so that it knows its input shape from the start:
"""
model = keras.Sequential()
model.add(keras.Input(shape=(4,)))
model.add(layers.Dense(2, activation="relu"))
model.summary()
"""
Note that the `Input` object is not displayed as part of `model.layers`, since
it isn't a layer:
"""
model.layers
"""
Models built with a predefined input shape like this always have weights (even
before seeing any data) and always have a defined output shape.
In general, it's a recommended best practice to always specify the input shape
of a Sequential model in advance if you know what it is.
"""
"""
## A common debugging workflow: `add()` + `summary()`
When building a new Sequential architecture, it's useful to incrementally stack
layers with `add()` and frequently print model summaries. For instance, this
enables you to monitor how a stack of `Conv2D` and `MaxPooling2D` layers is
downsampling image feature maps:
"""
model = keras.Sequential()
model.add(keras.Input(shape=(250, 250, 3))) # 250x250 RGB images
model.add(layers.Conv2D(32, 5, strides=2, activation="relu"))
model.add(layers.Conv2D(32, 3, activation="relu"))
model.add(layers.MaxPooling2D(3))
# Can you guess what the current output shape is at this point? Probably not.
# Let's just print it:
model.summary()
# The answer was: (40, 40, 32), so we can keep downsampling...
model.add(layers.Conv2D(32, 3, activation="relu"))
model.add(layers.Conv2D(32, 3, activation="relu"))
model.add(layers.MaxPooling2D(3))
model.add(layers.Conv2D(32, 3, activation="relu"))
model.add(layers.Conv2D(32, 3, activation="relu"))
model.add(layers.MaxPooling2D(2))
# And now?
model.summary()
# Now that we have 4x4 feature maps, time to apply global max pooling.
model.add(layers.GlobalMaxPooling2D())
# Finally, we add a classification layer.
model.add(layers.Dense(10))
"""
Very practical, right?
"""
"""
## What to do once you have a model
Once your model architecture is ready, you will want to:
- Train your model, evaluate it, and run inference. See our
[guide to training & evaluation with the built-in loops](
/guides/training_with_built_in_methods/)
- Save your model to disk and restore it. See our
[guide to serialization & saving](/guides/serialization_and_saving/).
"""
"""
## Feature extraction with a Sequential model
Once a Sequential model has been built, it behaves like a
[Functional API model](/guides/functional_api/).
This means that every layer has an `input`
and `output` attribute. These attributes can be used to do neat things, like
quickly creating a model that extracts the outputs of all intermediate layers in a
Sequential model:
"""
initial_model = keras.Sequential(
[
keras.Input(shape=(250, 250, 3)),
layers.Conv2D(32, 5, strides=2, activation="relu"),
layers.Conv2D(32, 3, activation="relu"),
layers.Conv2D(32, 3, activation="relu"),
]
)
feature_extractor = keras.Model(
inputs=initial_model.inputs,
outputs=[layer.output for layer in initial_model.layers],
)
# Call feature extractor on test input.
x = ops.ones((1, 250, 250, 3))
features = feature_extractor(x)
"""
Here's a similar example that only extract features from one layer:
"""
initial_model = keras.Sequential(
[
keras.Input(shape=(250, 250, 3)),
layers.Conv2D(32, 5, strides=2, activation="relu"),
layers.Conv2D(32, 3, activation="relu", name="my_intermediate_layer"),
layers.Conv2D(32, 3, activation="relu"),
]
)
feature_extractor = keras.Model(
inputs=initial_model.inputs,
outputs=initial_model.get_layer(name="my_intermediate_layer").output,
)
# Call feature extractor on test input.
x = ops.ones((1, 250, 250, 3))
features = feature_extractor(x)
"""
## Transfer learning with a Sequential model
Transfer learning consists of freezing the bottom layers in a model and only training
the top layers. If you aren't familiar with it, make sure to read our [guide
to transfer learning](/guides/transfer_learning/).
Here are two common transfer learning blueprint involving Sequential models.
First, let's say that you have a Sequential model, and you want to freeze all
layers except the last one. In this case, you would simply iterate over
`model.layers` and set `layer.trainable = False` on each layer, except the
last one. Like this:
```python
model = keras.Sequential([
keras.Input(shape=(784)),
layers.Dense(32, activation='relu'),
layers.Dense(32, activation='relu'),
layers.Dense(32, activation='relu'),
layers.Dense(10),
])
# Presumably you would want to first load pre-trained weights.
model.load_weights(...)
# Freeze all layers except the last one.
for layer in model.layers[:-1]:
layer.trainable = False
# Recompile and train (this will only update the weights of the last layer).
model.compile(...)
model.fit(...)
```
Another common blueprint is to use a Sequential model to stack a pre-trained
model and some freshly initialized classification layers. Like this:
```python
# Load a convolutional base with pre-trained weights
base_model = keras.applications.Xception(
weights='imagenet',
include_top=False,
pooling='avg')
# Freeze the base model
base_model.trainable = False
# Use a Sequential model to add a trainable classifier on top
model = keras.Sequential([
base_model,
layers.Dense(1000),
])
# Compile & train
model.compile(...)
model.fit(...)
```
If you do transfer learning, you will probably find yourself frequently using
these two patterns.
"""
"""
That's about all you need to know about Sequential models!
To find out more about building models in Keras, see:
- [Guide to the Functional API](/guides/functional_api/)
- [Guide to making new Layers & Models via subclassing](/guides/making_new_layers_and_models_via_subclassing/)
"""
|
keras-io/guides/sequential_model.py/0
|
{
"file_path": "keras-io/guides/sequential_model.py",
"repo_id": "keras-io",
"token_count": 3242
}
| 140 |
<meta http-equiv="refresh" content="0; URL='https://keras.io/api/keras_nlp/preprocessing_layers/start_end_packer/'" />
|
keras-io/redirects/api/keras_nlp/layers/start_end_packer/index.html/0
|
{
"file_path": "keras-io/redirects/api/keras_nlp/layers/start_end_packer/index.html",
"repo_id": "keras-io",
"token_count": 48
}
| 141 |
<meta http-equiv="refresh" content="0; URL='https://keras.io/examples/generative/text_generation_fnet/'" />
|
keras-io/redirects/examples/nlp/text_generation_fnet/index.html/0
|
{
"file_path": "keras-io/redirects/examples/nlp/text_generation_fnet/index.html",
"repo_id": "keras-io",
"token_count": 40
}
| 142 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.