text
stringlengths 5
261k
| id
stringlengths 16
106
| metadata
dict | __index_level_0__
int64 0
266
|
---|---|---|---|
from keras_core import backend
from keras_core import ops
from keras_core.api_export import keras_core_export
from keras_core.optimizers import optimizer
@keras_core_export(["keras_core.optimizers.Nadam"])
class Nadam(optimizer.Optimizer):
"""Optimizer that implements the Nadam algorithm.
Much like Adam is essentially RMSprop with momentum, Nadam is Adam with
Nesterov momentum.
Args:
learning_rate: A float, a
`keras_core.optimizers.schedules.LearningRateSchedule` instance, or
a callable that takes no arguments and returns the actual value to
use. The learning rate. Defaults to `0.001`.
beta_1: A float value or a constant float tensor, or a callable
that takes no arguments and returns the actual value to use. The
exponential decay rate for the 1st moment estimates.
Defaults to `0.9`.
beta_2: A float value or a constant float tensor, or a callable
that takes no arguments and returns the actual value to use. The
exponential decay rate for the 2nd moment estimates. Defaults to
`0.999`.
epsilon: A small constant for numerical stability. This epsilon is
"epsilon hat" in the Kingma and Ba paper (in the formula just before
Section 2.1), not the epsilon in Algorithm 1 of the paper.
Defaults to `1e-7`.
{{base_optimizer_keyword_args}}
Reference:
- [Dozat, 2015](http://cs229.stanford.edu/proj2015/054_report.pdf).
"""
def __init__(
self,
learning_rate=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-7,
weight_decay=None,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
name="nadam",
**kwargs,
):
super().__init__(
learning_rate=learning_rate,
name=name,
weight_decay=weight_decay,
clipnorm=clipnorm,
clipvalue=clipvalue,
global_clipnorm=global_clipnorm,
use_ema=use_ema,
ema_momentum=ema_momentum,
ema_overwrite_frequency=ema_overwrite_frequency,
**kwargs,
)
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
def build(self, var_list):
"""Initialize optimizer variables.
Nadam optimizer has 2 types of variables: momentums and velocities.
Args:
var_list: list of model variables to build Nadam variables on.
"""
if self.built:
return
if var_list:
dtype = var_list[0].dtype
else:
dtype = backend.floatx()
super().build(var_list)
self._momentums = []
self._velocities = []
self._u_product = backend.Variable(1.0, dtype=dtype)
for var in var_list:
self._momentums.append(
self.add_variable_from_reference(
reference_variable=var, name="momentum"
)
)
self._velocities.append(
self.add_variable_from_reference(
reference_variable=var, name="velocity"
)
)
def _internal_apply_gradients(self, grads_and_vars):
dtype = self._u_product.dtype
self._u_product.assign(
self._u_product
* self.beta_1
* (
1.0
- 0.5 * ops.power(0.96, ops.cast(self.iterations + 1, dtype))
)
)
super()._internal_apply_gradients(grads_and_vars)
def update_step(self, gradient, variable, learning_rate):
"""Update step given gradient and the associated model variable."""
var_dtype = variable.dtype
lr = ops.cast(learning_rate, var_dtype)
gradient = ops.cast(gradient, var_dtype)
local_step = ops.cast(self.iterations + 1, var_dtype)
next_step = ops.cast(self.iterations + 2, var_dtype)
decay = ops.cast(0.96, var_dtype)
beta_1 = ops.cast(self.beta_1, var_dtype)
beta_2 = ops.cast(self.beta_2, var_dtype)
u_t = beta_1 * (1.0 - 0.5 * (ops.power(decay, local_step)))
u_t_1 = beta_1 * (1.0 - 0.5 * (ops.power(decay, next_step)))
u_product_t = ops.cast(self._u_product, var_dtype)
u_product_t_1 = u_product_t * u_t_1
beta_2_power = ops.power(beta_2, local_step)
m = self._momentums[self._get_variable_index(variable)]
v = self._velocities[self._get_variable_index(variable)]
m.assign(m + (gradient - m) * (1 - beta_1))
v.assign(v + (ops.square(gradient) - v) * (1 - beta_2))
m_hat = u_t_1 * m / (1 - u_product_t_1) + (1 - u_t) * gradient / (
1 - u_product_t
)
v_hat = v / (1 - beta_2_power)
variable.assign(
variable - (m_hat * lr) / (ops.sqrt(v_hat) + self.epsilon)
)
def get_config(self):
config = super().get_config()
config.update(
{
"beta_1": self.beta_1,
"beta_2": self.beta_2,
"epsilon": self.epsilon,
}
)
return config
Nadam.__doc__ = Nadam.__doc__.replace(
"{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args
)
| keras-core/keras_core/optimizers/nadam.py/0 | {
"file_path": "keras-core/keras_core/optimizers/nadam.py",
"repo_id": "keras-core",
"token_count": 2677
} | 51 |
import math
from keras_core import ops
from keras_core.api_export import keras_core_export
from keras_core.utils.numerical_utils import normalize
@keras_core_export(
["keras_core.Regularizer", "keras_core.regularizers.Regularizer"]
)
class Regularizer:
"""Regularizer base class.
Regularizers allow you to apply penalties on layer parameters or layer
activity during optimization. These penalties are summed into the loss
function that the network optimizes.
Regularization penalties are applied on a per-layer basis. The exact API
will depend on the layer, but many layers (e.g. `Dense`, `Conv1D`, `Conv2D`
and `Conv3D`) have a unified API.
These layers expose 3 keyword arguments:
- `kernel_regularizer`: Regularizer to apply a penalty on the layer's kernel
- `bias_regularizer`: Regularizer to apply a penalty on the layer's bias
- `activity_regularizer`: Regularizer to apply a penalty on the layer's
output
All layers (including custom layers) expose `activity_regularizer` as a
settable property, whether or not it is in the constructor arguments.
The value returned by the `activity_regularizer` is divided by the input
batch size so that the relative weighting between the weight regularizers
and the activity regularizers does not change with the batch size.
You can access a layer's regularization penalties by calling `layer.losses`
after calling the layer on inputs.
## Example
>>> layer = Dense(
... 5, input_dim=5,
... kernel_initializer='ones',
... kernel_regularizer=L1(0.01),
... activity_regularizer=L2(0.01))
>>> tensor = ops.ones(shape=(5, 5)) * 2.0
>>> out = layer(tensor)
>>> # The kernel regularization term is 0.25
>>> # The activity regularization term (after dividing by the batch size)
>>> # is 5
>>> ops.sum(layer.losses)
5.25
## Available penalties
```python
L1(0.3) # L1 Regularization Penalty
L2(0.1) # L2 Regularization Penalty
L1L2(l1=0.01, l2=0.01) # L1 + L2 penalties
```
## Directly calling a regularizer
Compute a regularization loss on a tensor by directly calling a regularizer
as if it is a one-argument function.
E.g.
>>> regularizer = L2(2.)
>>> tensor = ops.ones(shape=(5, 5))
>>> regularizer(tensor)
50.0
## Developing new regularizers
Any function that takes in a weight matrix and returns a scalar
tensor can be used as a regularizer, e.g.:
>>> def l1_reg(weight_matrix):
... return 0.01 * ops.sum(ops.absolute(weight_matrix))
...
>>> layer = Dense(5, input_dim=5,
... kernel_initializer='ones', kernel_regularizer=l1_reg)
>>> tensor = ops.ones(shape=(5, 5))
>>> out = layer(tensor)
>>> layer.losses
0.25
Alternatively, you can write your custom regularizers in an
object-oriented way by extending this regularizer base class, e.g.:
>>> class L2Regularizer(Regularizer):
... def __init__(self, l2=0.):
... self.l2 = l2
...
... def __call__(self, x):
... return self.l2 * ops.sum(ops.square(x))
...
... def get_config(self):
... return {'l2': float(self.l2)}
...
>>> layer = Dense(
... 5, input_dim=5, kernel_initializer='ones',
... kernel_regularizer=L2Regularizer(l2=0.5))
>>> tensor = ops.ones(shape=(5, 5))
>>> out = layer(tensor)
>>> layer.losses
12.5
### A note on serialization and deserialization:
Registering the regularizers as serializable is optional if you are just
training and executing models, exporting to and from SavedModels, or saving
and loading weight checkpoints.
Registration is required for saving and
loading models to HDF5 format, Keras model cloning, some visualization
utilities, and exporting models to and from JSON. If using this
functionality, you must make sure any python process running your model has
also defined and registered your custom regularizer.
"""
def __call__(self, x):
"""Compute a regularization penalty from an input tensor."""
return 0.0
@classmethod
def from_config(cls, config):
"""Creates a regularizer from its config.
This method is the reverse of `get_config`,
capable of instantiating the same regularizer from the config
dictionary.
This method is used by Keras `model_to_estimator`, saving and
loading models to HDF5 formats, Keras model cloning, some visualization
utilities, and exporting models to and from JSON.
Args:
config: A Python dictionary, typically the output of get_config.
Returns:
A regularizer instance.
"""
return cls(**config)
def get_config(self):
"""Returns the config of the regularizer.
An regularizer config is a Python dictionary (serializable)
containing all configuration parameters of the regularizer.
The same regularizer can be reinstantiated later
(without any saved state) from this configuration.
This method is optional if you are just training and executing models,
exporting to and from SavedModels, or using weight checkpoints.
This method is required for Keras `model_to_estimator`, saving and
loading models to HDF5 formats, Keras model cloning, some visualization
utilities, and exporting models to and from JSON.
Returns:
Python dictionary.
"""
raise NotImplementedError(f"{self} does not implement get_config()")
@keras_core_export(
["keras_core.regularizers.L1L2", "keras_core.regularizers.l1_l2"]
)
class L1L2(Regularizer):
"""A regularizer that applies both L1 and L2 regularization penalties.
The L1 regularization penalty is computed as:
`loss = l1 * reduce_sum(abs(x))`
The L2 regularization penalty is computed as
`loss = l2 * reduce_sum(square(x))`
L1L2 may be passed to a layer as a string identifier:
>>> dense = Dense(3, kernel_regularizer='l1_l2')
In this case, the default values used are `l1=0.01` and `l2=0.01`.
Arguments:
l1: float, L1 regularization factor.
l2: float, L2 regularization factor.
"""
def __init__(self, l1=0.0, l2=0.0):
# The default value for l1 and l2 are different from the value in l1_l2
# for backward compatibility reason. Eg, L1L2(l2=0.1) will only have l2
# and no l1 penalty.
l1 = 0.0 if l1 is None else l1
l2 = 0.0 if l2 is None else l2
validate_float_arg(l1, name="l1")
validate_float_arg(l2, name="l2")
self.l1 = l1
self.l2 = l2
def __call__(self, x):
regularization = ops.convert_to_tensor(0.0, dtype=x.dtype)
if self.l1:
regularization += self.l1 * ops.sum(ops.absolute(x))
if self.l2:
regularization += self.l2 * ops.sum(ops.square(x))
return regularization
def get_config(self):
return {"l1": float(self.l1), "l2": float(self.l2)}
@keras_core_export(["keras_core.regularizers.L1", "keras_core.regularizers.l1"])
class L1(Regularizer):
"""A regularizer that applies a L1 regularization penalty.
The L1 regularization penalty is computed as:
`loss = l1 * reduce_sum(abs(x))`
L1 may be passed to a layer as a string identifier:
>>> dense = Dense(3, kernel_regularizer='l1')
In this case, the default value used is `l1=0.01`.
Arguments:
l1: float, L1 regularization factor.
"""
def __init__(self, l1=0.01):
l1 = 0.01 if l1 is None else l1
validate_float_arg(l1, name="l1")
self.l1 = ops.convert_to_tensor(l1)
def __call__(self, x):
return self.l1 * ops.sum(ops.absolute(x))
def get_config(self):
return {"l1": float(self.l1)}
@keras_core_export(["keras_core.regularizers.L2", "keras_core.regularizers.l2"])
class L2(Regularizer):
"""A regularizer that applies a L2 regularization penalty.
The L2 regularization penalty is computed as:
`loss = l2 * reduce_sum(square(x))`
L2 may be passed to a layer as a string identifier:
>>> dense = Dense(3, kernel_regularizer='l2')
In this case, the default value used is `l2=0.01`.
Arguments:
l2: float, L2 regularization factor.
"""
def __init__(self, l2=0.01):
l2 = 0.01 if l2 is None else l2
validate_float_arg(l2, name="l2")
self.l2 = l2
def __call__(self, x):
return self.l2 * ops.sum(ops.square(x))
def get_config(self):
return {"l2": float(self.l2)}
@keras_core_export(
[
"keras_core.regularizers.OrthogonalRegularizer",
"keras_core.regularizers.orthogonal_regularizer",
]
)
class OrthogonalRegularizer(Regularizer):
"""Regularizer that encourages input vectors to be orthogonal to each other.
It can be applied to either the rows of a matrix (`mode="rows"`) or its
columns (`mode="columns"`). When applied to a `Dense` kernel of shape
`(input_dim, units)`, rows mode will seek to make the feature vectors
(i.e. the basis of the output space) orthogonal to each other.
Arguments:
factor: Float. The regularization factor. The regularization penalty
will be proportional to `factor` times the mean of the dot products
between the L2-normalized rows (if `mode="rows"`, or columns if
`mode="columns"`) of the inputs, excluding the product of each
row/column with itself. Defaults to `0.01`.
mode: String, one of `{"rows", "columns"}`. Defaults to `"rows"`. In
rows mode, the regularization effect seeks to make the rows of the
input orthogonal to each other. In columns mode, it seeks to make
the columns of the input orthogonal to each other.
Example:
>>> regularizer = OrthogonalRegularizer(factor=0.01)
>>> layer = Dense(units=4, kernel_regularizer=regularizer)
"""
def __init__(self, factor=0.01, mode="rows"):
validate_float_arg(factor, name="factor")
self.factor = ops.convert_to_tensor(factor)
if mode not in {"rows", "columns"}:
raise ValueError(
"Invalid value for argument `mode`. Expected one of "
f'{{"rows", "columns"}}. Received: mode={mode}'
)
self.mode = mode
def __call__(self, inputs):
if len(inputs.shape) != 2:
raise ValueError(
"Inputs to OrthogonalRegularizer must have rank 2. Received: "
f"inputs.shape={inputs.shape}"
)
if self.mode == "rows":
inputs = normalize(inputs, axis=1)
product = ops.matmul(inputs, ops.transpose(inputs))
size = inputs.shape[0]
else:
inputs = normalize(inputs, axis=0)
product = ops.matmul(ops.transpose(inputs), inputs)
size = inputs.shape[1]
product_no_diagonal = product * (
1.0 - ops.eye(size, dtype=inputs.dtype)
)
num_pairs = size * (size - 1.0) / 2.0
return (
self.factor
* 0.5
* ops.sum(ops.absolute(product_no_diagonal))
/ num_pairs
)
def get_config(self):
return {"factor": float(self.factor), "mode": self.mode}
def validate_float_arg(value, name):
"""check penalty number availability, raise ValueError if failed."""
if not isinstance(value, (float, int)) or (
math.isinf(value) or math.isnan(value)
):
raise ValueError(
f"Invalid value for argument {name}: expected a float. "
f"Received: {name}={value}"
)
return float(value)
| keras-core/keras_core/regularizers/regularizers.py/0 | {
"file_path": "keras-core/keras_core/regularizers/regularizers.py",
"repo_id": "keras-core",
"token_count": 4658
} | 52 |
import numpy as np
from absl.testing import parameterized
from keras_core import backend
from keras_core import metrics as losses_module
from keras_core import metrics as metrics_module
from keras_core import testing
from keras_core.trainers.compile_utils import CompileLoss
from keras_core.trainers.compile_utils import CompileMetrics
class TestCompileMetrics(testing.TestCase):
def test_single_output_case(self):
compile_metrics = CompileMetrics(
metrics=[metrics_module.MeanSquaredError()],
weighted_metrics=[metrics_module.MeanSquaredError()],
)
# Test symbolic build
y_true, y_pred = backend.KerasTensor((3, 4)), backend.KerasTensor(
(3, 4)
)
compile_metrics.build(y_true, y_pred)
# Test eager build
y_true = np.array([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]])
y_pred = np.array([[0.4, 0.1], [0.2, 0.6], [0.6, 0.1]])
sample_weight = np.array([1, 0.0, 1])
compile_metrics.build(y_true, y_pred)
# Test update / result / reset flow
compile_metrics.update_state(
y_true, y_pred, sample_weight=sample_weight
)
y_pred = np.array([[0.3, 0.2], [0.1, 0.4], [0.2, 0.3]])
compile_metrics.update_state(
y_true, y_pred, sample_weight=sample_weight
)
result = compile_metrics.result()
self.assertIsInstance(result, dict)
self.assertEqual(len(result), 2)
self.assertAllClose(result["mean_squared_error"], 0.055833336)
self.assertAllClose(result["weighted_mean_squared_error"], 0.0725)
compile_metrics.reset_state()
result = compile_metrics.result()
self.assertIsInstance(result, dict)
self.assertEqual(len(result), 2)
self.assertAllClose(result["mean_squared_error"], 0.0)
self.assertAllClose(result["weighted_mean_squared_error"], 0.0)
def test_list_output_case(self):
compile_metrics = CompileMetrics(
metrics=[
[
metrics_module.MeanSquaredError(),
metrics_module.MeanSquaredError(),
],
[
metrics_module.MeanSquaredError(),
metrics_module.MeanSquaredError(),
],
],
weighted_metrics=[
[
metrics_module.MeanSquaredError(),
metrics_module.MeanSquaredError(),
],
[
metrics_module.MeanSquaredError(),
metrics_module.MeanSquaredError(),
],
],
)
# Test symbolic build
y_true = [backend.KerasTensor((3, 4)), backend.KerasTensor((3, 4))]
y_pred = [backend.KerasTensor((3, 4)), backend.KerasTensor((3, 4))]
compile_metrics.build(y_true, y_pred)
# Test eager build
y_true = [
np.array([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]]),
np.array([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]]),
]
y_pred = [
np.array([[0.4, 0.1], [0.2, 0.6], [0.6, 0.1]]),
np.array([[0.4, 0.1], [0.2, 0.6], [0.6, 0.1]]),
]
sample_weight = np.array([1, 0.0, 1])
compile_metrics.build(y_true, y_pred)
# Test update / result / reset flow
compile_metrics.update_state(
y_true, y_pred, sample_weight=sample_weight
)
y_pred = [
np.array([[0.3, 0.2], [0.1, 0.4], [0.2, 0.3]]),
np.array([[0.3, 0.2], [0.1, 0.4], [0.2, 0.3]]),
]
compile_metrics.update_state(
y_true, y_pred, sample_weight=sample_weight
)
result = compile_metrics.result()
self.assertIsInstance(result, dict)
self.assertEqual(len(result), 8)
self.assertAllClose(result["mean_squared_error"], 0.055833336)
self.assertAllClose(result["weighted_mean_squared_error"], 0.0725)
compile_metrics.reset_state()
result = compile_metrics.result()
self.assertIsInstance(result, dict)
self.assertEqual(len(result), 8)
self.assertAllClose(result["mean_squared_error"], 0.0)
self.assertAllClose(result["weighted_mean_squared_error"], 0.0)
def test_dict_output_case(self):
compile_metrics = CompileMetrics(
metrics={
"output_1": [
metrics_module.MeanSquaredError(),
metrics_module.MeanSquaredError(name="mse"),
],
"output_2": [
metrics_module.MeanSquaredError(),
metrics_module.MeanSquaredError(name="mse"),
],
},
weighted_metrics={
"output_1": [
metrics_module.MeanSquaredError(),
metrics_module.MeanSquaredError(name="mse"),
],
"output_2": [
metrics_module.MeanSquaredError(),
metrics_module.MeanSquaredError(name="mse"),
],
},
)
# Test symbolic build
y_true = {
"output_1": backend.KerasTensor((3, 4)),
"output_2": backend.KerasTensor((3, 4)),
}
y_pred = {
"output_1": backend.KerasTensor((3, 4)),
"output_2": backend.KerasTensor((3, 4)),
}
compile_metrics.build(y_true, y_pred)
# Test eager build
y_true = {
"output_1": np.array([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]]),
"output_2": np.array([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]]),
}
y_pred = {
"output_1": np.array([[0.4, 0.1], [0.2, 0.6], [0.6, 0.1]]),
"output_2": np.array([[0.4, 0.1], [0.2, 0.6], [0.6, 0.1]]),
}
sample_weight = np.array([1, 0.0, 1])
compile_metrics.build(y_true, y_pred)
# Test update / result / reset flow
compile_metrics.update_state(
y_true, y_pred, sample_weight=sample_weight
)
y_pred = {
"output_1": np.array([[0.3, 0.2], [0.1, 0.4], [0.2, 0.3]]),
"output_2": np.array([[0.3, 0.2], [0.1, 0.4], [0.2, 0.3]]),
}
compile_metrics.update_state(
y_true, y_pred, sample_weight=sample_weight
)
result = compile_metrics.result()
self.assertIsInstance(result, dict)
self.assertEqual(len(result), 8)
# Result values obtained from `tf.keras`
# m = tf.keras.metrics.MeanSquaredError()
# m.update_state(y_true, y_pred1, sample_weight=weight)
# m.update_state(y_true, y_pred2, sample_weight=weight)
# m.result().numpy()
self.assertAllClose(result["output_1_mean_squared_error"], 0.055833336)
self.assertAllClose(result["output_2_mean_squared_error"], 0.055833336)
self.assertAllClose(result["output_1_mse"], 0.055833336)
self.assertAllClose(result["output_2_mse"], 0.055833336)
self.assertAllClose(
result["output_1_weighted_mean_squared_error"], 0.0725
)
self.assertAllClose(
result["output_2_weighted_mean_squared_error"], 0.0725
)
self.assertAllClose(result["output_1_weighted_mse"], 0.0725)
self.assertAllClose(result["output_2_weighted_mse"], 0.0725)
compile_metrics.reset_state()
result = compile_metrics.result()
self.assertIsInstance(result, dict)
self.assertEqual(len(result), 8)
self.assertAllClose(result["output_1_mean_squared_error"], 0.0)
self.assertAllClose(result["output_2_mean_squared_error"], 0.0)
self.assertAllClose(result["output_1_weighted_mean_squared_error"], 0.0)
self.assertAllClose(result["output_2_weighted_mean_squared_error"], 0.0)
def test_name_conversions(self):
compile_metrics = CompileMetrics(
metrics=["acc", "accuracy", "mse"],
weighted_metrics=[],
)
y_true = np.array([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]])
y_pred = np.array([[0.4, 0.1], [0.2, 0.6], [0.6, 0.1]])
compile_metrics.build(y_true, y_pred)
compile_metrics.update_state(y_true, y_pred, sample_weight=None)
result = compile_metrics.result()
self.assertIsInstance(result, dict)
self.assertEqual(len(result), 3)
self.assertAllClose(result["acc"], 0.333333)
self.assertAllClose(result["accuracy"], 0.333333)
self.assertTrue("mse" in result)
class TestCompileLoss(testing.TestCase, parameterized.TestCase):
def test_single_output_case(self):
compile_loss = CompileLoss(
loss=losses_module.MeanSquaredError(),
)
# Test symbolic build
y_true, y_pred = backend.KerasTensor((3, 4)), backend.KerasTensor(
(3, 4)
)
compile_loss.build(y_true, y_pred)
# Test eager build
y_true = np.array([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]])
y_pred = np.array([[0.4, 0.1], [0.2, 0.6], [0.6, 0.1]])
compile_loss.build(y_true, y_pred)
value = compile_loss(y_true, y_pred)
self.assertAllClose(value, 0.068333, atol=1e-5)
@parameterized.parameters(True, False)
def test_list_output_case(self, broadcast):
if broadcast:
# Test broadcasting single loss to all outputs
compile_loss = CompileLoss(
loss="mse",
)
else:
compile_loss = CompileLoss(
loss=["mse", "mse"],
)
# Test symbolic build
y_true = [backend.KerasTensor((3, 4)), backend.KerasTensor((3, 4))]
y_pred = [backend.KerasTensor((3, 4)), backend.KerasTensor((3, 4))]
compile_loss.build(y_true, y_pred)
# Test eager build
y_true = [
np.array([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]]),
np.array([[0.7, 0.8], [0.9, 1.0], [1.1, 1.2]]),
]
y_pred = [
np.array([[1.2, 1.1], [1.0, 0.9], [0.8, 0.7]]),
np.array([[0.6, 0.5], [0.4, 0.3], [0.2, 0.1]]),
]
compile_loss.build(y_true, y_pred)
value = compile_loss(y_true, y_pred)
self.assertAllClose(value, 0.953333, atol=1e-5)
@parameterized.parameters(True, False)
def test_dict_output_case(self, broadcast):
if broadcast:
# Test broadcasting single loss to all outputs
compile_loss = CompileLoss(
loss="mse",
)
else:
compile_loss = CompileLoss(
loss={"a": "mse", "b": "mse"},
)
# Test symbolic build
y_true = {
"a": backend.KerasTensor((3, 4)),
"b": backend.KerasTensor((3, 4)),
}
y_pred = {
"a": backend.KerasTensor((3, 4)),
"b": backend.KerasTensor((3, 4)),
}
compile_loss.build(y_true, y_pred)
# Test eager build
y_true = {
"a": np.array([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]]),
"b": np.array([[0.7, 0.8], [0.9, 1.0], [1.1, 1.2]]),
}
y_pred = {
"a": np.array([[1.2, 1.1], [1.0, 0.9], [0.8, 0.7]]),
"b": np.array([[0.6, 0.5], [0.4, 0.3], [0.2, 0.1]]),
}
sample_weight = {
"a": np.array([1.0, 2.0, 3.0]),
"b": np.array([3.0, 2.0, 1.0]),
}
compile_loss.build(y_true, y_pred)
value = compile_loss(y_true, y_pred, sample_weight)
self.assertAllClose(value, 1.266666, atol=1e-5)
def test_list_loss_dict_data(self):
compile_loss = CompileLoss(loss=["mse", "mae"], output_names=["b", "a"])
y_true = [backend.KerasTensor((3, 4)), backend.KerasTensor((3, 4))]
y_pred = [backend.KerasTensor((3, 4)), backend.KerasTensor((3, 4))]
compile_loss.build(y_true, y_pred)
y_true = {
"a": np.array([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]]),
"b": np.array([[0.7, 0.8], [0.9, 1.0], [1.1, 1.2]]),
}
y_pred = {
"a": np.array([[1.2, 1.1], [1.0, 0.9], [0.8, 0.7]]),
"b": np.array([[0.6, 0.5], [0.4, 0.3], [0.2, 0.1]]),
}
value = compile_loss(y_true, y_pred)
self.assertAllClose(value, 1.07666, atol=1e-5)
| keras-core/keras_core/trainers/compile_utils_test.py/0 | {
"file_path": "keras-core/keras_core/trainers/compile_utils_test.py",
"repo_id": "keras-core",
"token_count": 6665
} | 53 |
import platform
import warnings
from keras_core import backend
from keras_core import metrics as metrics_module
from keras_core import ops
from keras_core import optimizers
from keras_core.optimizers.loss_scale_optimizer import LossScaleOptimizer
from keras_core.saving import serialization_lib
from keras_core.trainers.compile_utils import CompileLoss
from keras_core.trainers.compile_utils import CompileMetrics
from keras_core.utils import traceback_utils
from keras_core.utils import tracking
class Trainer:
def __init__(self):
self._lock = False
self._run_eagerly = False
self._jit_compile = None
self.compiled = False
self.loss = None
self.steps_per_execution = 1
@traceback_utils.filter_traceback
@tracking.no_automatic_dependency_tracking
def compile(
self,
optimizer="rmsprop",
loss=None,
loss_weights=None,
metrics=None,
weighted_metrics=None,
run_eagerly=False,
steps_per_execution=1,
jit_compile="auto",
auto_scale_loss=True,
):
"""Configures the model for training.
Example:
```python
model.compile(
optimizer=keras_core.optimizers.Adam(learning_rate=1e-3),
loss=keras_core.losses.BinaryCrossentropy(),
metrics=[
keras_core.metrics.BinaryAccuracy(),
keras_core.metrics.FalseNegatives(),
],
)
```
Args:
optimizer: String (name of optimizer) or optimizer instance. See
`keras_core.optimizers`.
loss: Loss function. May be a string (name of loss function), or
a `keras_core.losses.Loss` instance. See `keras_core.losses`. A
loss function is any callable with the signature
`loss = fn(y_true, y_pred)`, where `y_true` are the ground truth
values, and `y_pred` are the model's predictions.
`y_true` should have shape `(batch_size, d0, .. dN)`
(except in the case of sparse loss functions such as
sparse categorical crossentropy which expects integer arrays of
shape `(batch_size, d0, .. dN-1)`).
`y_pred` should have shape `(batch_size, d0, .. dN)`.
The loss function should return a float tensor.
loss_weights: Optional list or dictionary specifying scalar
coefficients (Python floats) to weight the loss contributions of
different model outputs. The loss value that will be minimized
by the model will then be the *weighted sum* of all individual
losses, weighted by the `loss_weights` coefficients. If a list,
it is expected to have a 1:1 mapping to the model's outputs. If
a dict, it is expected to map output names (strings) to scalar
coefficients.
metrics: List of metrics to be evaluated by the model during
training and testing. Each of this can be a string (name of a
built-in function), function or a `keras_core.metrics.Metric`
instance. See `keras_core.metrics`. Typically you will use
`metrics=['accuracy']`. A function is any callable with the
signature `result = fn(y_true, _pred)`. To specify different
metrics for different outputs of a multi-output model, you could
also pass a dictionary, such as
`metrics={'a':'accuracy', 'b':['accuracy', 'mse']}`.
You can also pass a list to specify a metric or a list of
metrics for each output, such as
`metrics=[['accuracy'], ['accuracy', 'mse']]`
or `metrics=['accuracy', ['accuracy', 'mse']]`. When you pass
the strings 'accuracy' or 'acc', we convert this to one of
`keras_core.metrics.BinaryAccuracy`,
`keras_core.metrics.CategoricalAccuracy`,
`keras_core.metrics.SparseCategoricalAccuracy` based on the
shapes of the targets and of the model output. A similar
conversion is done for the strings `"crossentropy"`
and `"ce"` as well.
The metrics passed here are evaluated without sample weighting;
if you would like sample weighting to apply, you can specify
your metrics via the `weighted_metrics` argument instead.
weighted_metrics: List of metrics to be evaluated and weighted by
`sample_weight` or `class_weight` during training and testing.
run_eagerly: Bool. If `True`, this model's forward pass
will never be compiled. It is recommended to leave this
as `False` when training (for best performance),
and to set it to `True` when debugging.
steps_per_execution: Int. The number of batches to run
during each a single compiled function call. Running multiple
batches inside a single a single compiled function call can
greatly improve performance on TPUs or small models with a large
Python overhead. At most, one full epoch will be run each
execution. If a number larger than the size of the epoch is
passed, the execution will be truncated to the size of the
epoch. Note that if `steps_per_execution` is set to `N`,
`Callback.on_batch_begin` and `Callback.on_batch_end` methods
will only be called every `N` batches (i.e. before/after
each compiled function execution).
Not supported with the PyTorch backend.
jit_compile: Bool or `"auto"`. Whether to use XLA compilation when
compiling a model. Not supported with the PyTorch backend.
If `"auto"`, XLA compilation will be enabled if the
the model supports it, and disabled otherwise.
auto_scale_loss: Bool. If `True` and the model dtype policy is
`"mixed_float16"`, the passed optimizer will be automatically
wrapped in a `LossScaleOptimizer`, which will dynamically
scale the loss to prevent underflow.
"""
self.optimizer = optimizers.get(optimizer)
if (
auto_scale_loss
and self.dtype_policy.name == "mixed_float16"
and self.optimizer
and not isinstance(self.optimizer, LossScaleOptimizer)
):
self.optimizer = LossScaleOptimizer(
self.optimizer, name="loss_scale_optimizer"
)
if hasattr(self, "output_names"):
output_names = self.output_names
else:
output_names = None
if loss is not None:
self._compile_loss = CompileLoss(
loss, loss_weights, output_names=output_names
)
self.loss = loss
else:
self._compile_loss = None
if metrics is not None or weighted_metrics is not None:
self._compile_metrics = CompileMetrics(
metrics, weighted_metrics, output_names=output_names
)
else:
self._compile_metrics = None
if jit_compile == "auto":
if run_eagerly:
jit_compile = False
else:
jit_compile = resolve_auto_jit_compile(self)
if jit_compile and run_eagerly:
jit_compile = False
warnings.warn(
"If `run_eagerly` is True, then `jit_compile` "
"cannot also be True. Disabling `jit_compile`.",
stacklevel=2,
)
if jit_compile and backend.backend() == "torch":
warnings.warn(
"`jit_compile` is not yet enabled for the PyTorch backend. "
"Proceeding with `jit_compile=False`."
)
jit_compile = False
self.jit_compile = jit_compile
self.run_eagerly = run_eagerly
self.stop_training = False
self.compiled = True
self._loss_tracker = metrics_module.Mean(name="loss")
self.steps_per_execution = steps_per_execution
self.train_function = None
self.test_function = None
self.predict_function = None
self._compile_config = serialization_lib.SerializableDict(
optimizer=optimizer,
loss=loss,
loss_weights=loss_weights,
metrics=metrics,
weighted_metrics=weighted_metrics,
run_eagerly=run_eagerly,
steps_per_execution=steps_per_execution,
jit_compile=jit_compile,
)
@property
def jit_compile(self):
if self._jit_compile is None:
# Value was never set. Resolve it now.
jit_compile = model_supports_jit(self)
self._jit_compile = jit_compile
return self._jit_compile
@jit_compile.setter
def jit_compile(self, value):
if value and not model_supports_jit(self):
warnings.warn(
"Model doesn't support `jit_compile=True`. "
"Proceeding with `jit_compile=False`."
)
self._jit_compile = False
else:
self._jit_compile = value
@property
def run_eagerly(self):
return self._run_eagerly
@run_eagerly.setter
def run_eagerly(self, value):
self._run_eagerly = value
@property
def metrics(self):
metrics = [self._loss_tracker] if self.compiled else []
metrics.extend(self._metrics[:])
if self.compiled and self._compile_metrics is not None:
metrics += [self._compile_metrics]
return metrics
@property
def metrics_names(self):
return [m.name for m in self.metrics]
@property
def metrics_variables(self):
vars = []
for metric in self.metrics:
vars.extend(metric.variables)
return vars
def reset_metrics(self):
for m in self.metrics:
m.reset_state()
def compute_loss(
self, x=None, y=None, y_pred=None, sample_weight=None, allow_empty=False
):
"""Compute the total loss, validate it, and return it.
Subclasses can optionally override this method to provide custom loss
computation logic.
Example:
```python
class MyModel(Model):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.loss_tracker = metrics.Mean(name='loss')
def compute_loss(self, x, y, y_pred, sample_weight):
loss = ops.means((y_pred - y) ** 2)
loss += ops.sum(self.losses)
self.loss_tracker.update_state(loss)
return loss
def reset_metrics(self):
self.loss_tracker.reset_state()
@property
def metrics(self):
return [self.loss_tracker]
inputs = layers.Input(shape=(10,), name='my_input')
outputs = layers.Dense(10)(inputs)
model = MyModel(inputs, outputs)
model.add_loss(ops.sum(outputs))
optimizer = SGD()
model.compile(optimizer, loss='mse', steps_per_execution=10)
dataset = ...
model.fit(dataset, epochs=2, steps_per_epoch=10)
print(f"Custom loss: {model.loss_tracker.result()}")
```
Args:
x: Input data.
y: Target data.
y_pred: Predictions returned by the model (output of `model(x)`)
sample_weight: Sample weights for weighting the loss function.
allow_empty: If `False`, the method will error out if
no loss has been computed by the model. If `True`, then
if no loss is computed, the method returns 0.
Returns:
The total loss as a scalar tensor, or `None` if no loss results
(which is the case when called by `Model.test_step`).
"""
del x # The default implementation does not use `x`.
losses = []
if self._compile_loss is not None:
loss = self._compile_loss(y, y_pred, sample_weight)
if loss is not None:
losses.append(loss)
for loss in self.losses:
losses.append(ops.cast(loss, dtype=backend.floatx()))
if not allow_empty and len(losses) == 0:
raise ValueError(
"No loss to compute. Provide a `loss` argument in `compile()`."
)
if len(losses) == 1:
total_loss = losses[0]
elif len(losses) == 0:
total_loss = ops.zeros(())
else:
total_loss = ops.sum(losses)
return total_loss
def compute_metrics(self, x, y, y_pred, sample_weight=None):
"""Update metric states and collect all metrics to be returned.
Subclasses can optionally override this method to provide custom metric
updating and collection logic.
Example:
```python
class MyModel(Sequential):
def compute_metrics(self, x, y, y_pred, sample_weight):
# This super call updates `self.compiled_metrics` and returns
# results for all metrics listed in `self.metrics`.
metric_results = super().compute_metrics(
x, y, y_pred, sample_weight)
# Note that `self.custom_metric` is not listed
# in `self.metrics`.
self.custom_metric.update_state(x, y, y_pred, sample_weight)
metric_results['metric_name'] = self.custom_metric.result()
return metric_results
```
Args:
x: Input data.
y: Target data.
y_pred: Predictions returned by the model output of `model.call(x)`.
sample_weight: Sample weights for weighting the loss function.
Returns:
A `dict` containing values that will be passed to
`keras_core.callbacks.CallbackList.on_train_batch_end()`. Typically,
the values of the metrics listed in `self.metrics` are returned.
Example: `{'loss': 0.2, 'accuracy': 0.7}`.
"""
del x # The default implementation does not use `x`.
if self._compile_metrics is not None:
self._compile_metrics.update_state(y, y_pred, sample_weight)
return self.get_metrics_result()
def get_metrics_result(self):
"""Returns the model's metrics values as a dict.
If any of the metric result is a dict (containing multiple metrics),
each of them gets added to the top level returned dict of this method.
Returns:
A `dict` containing values of the metrics listed in `self.metrics`.
Example: `{'loss': 0.2, 'accuracy': 0.7}`.
"""
return_metrics = {}
for metric in self.metrics:
result = metric.result()
if isinstance(result, dict):
return_metrics.update(result)
else:
return_metrics[metric.name] = result
return self._pythonify_logs(return_metrics)
def fit(
self,
x=None,
y=None,
batch_size=None,
epochs=1,
verbose="auto",
callbacks=None,
validation_split=0.0,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None,
validation_batch_size=None,
validation_freq=1,
):
"""Trains the model for a fixed number of epochs (dataset iterations).
Args:
x: Input data. It could be:
- A NumPy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A tensor, or a list of tensors
(in case the model has multiple inputs).
- A dict mapping input names to the corresponding array/tensors,
if the model has named inputs.
- A `tf.data.Dataset`. Should return a tuple
of either `(inputs, targets)` or
`(inputs, targets, sample_weights)`.
- A `keras_core.utils.PyDataset` returning `(inputs,
targets)` or `(inputs, targets, sample_weights)`.
y: Target data. Like the input data `x`,
it could be either NumPy array(s) or backend-native tensor(s).
If `x` is a dataset, generator,
or `keras_core.utils.PyDataset` instance, `y` should
not be specified (since targets will be obtained from `x`).
batch_size: Integer or `None`.
Number of samples per gradient update.
If unspecified, `batch_size` will default to 32.
Do not specify the `batch_size` if your data is in the
form of datasets, generators, or `keras_core.utils.PyDataset`
instances (since they generate batches).
epochs: Integer. Number of epochs to train the model.
An epoch is an iteration over the entire `x` and `y`
data provided
(unless the `steps_per_epoch` flag is set to
something other than None).
Note that in conjunction with `initial_epoch`,
`epochs` is to be understood as "final epoch".
The model is not trained for a number of iterations
given by `epochs`, but merely until the epoch
of index `epochs` is reached.
verbose: `"auto"`, 0, 1, or 2. Verbosity mode.
0 = silent, 1 = progress bar, 2 = one line per epoch.
"auto" becomes 1 for most cases.
Note that the progress bar is not
particularly useful when logged to a file,
so `verbose=2` is recommended when not running interactively
(e.g., in a production environment). Defaults to `"auto"`.
callbacks: List of `keras_core.callbacks.Callback` instances.
List of callbacks to apply during training.
See `keras_core.callbacks`. Note
`keras_core.callbacks.ProgbarLogger` and
`keras_core.callbacks.History` callbacks are created
automatically and need not be passed to `model.fit()`.
`keras_core.callbacks.ProgbarLogger` is created
or not based on the `verbose` argument in `model.fit()`.
validation_split: Float between 0 and 1.
Fraction of the training data to be used as validation data.
The model will set apart this fraction of the training data,
will not train on it, and will evaluate
the loss and any model metrics
on this data at the end of each epoch.
The validation data is selected from the last samples
in the `x` and `y` data provided, before shuffling. This
argument is not supported when `x` is a dataset, generator or
`keras_core.utils.PyDataset` instance.
If both `validation_data` and `validation_split` are provided,
`validation_data` will override `validation_split`.
validation_data: Data on which to evaluate
the loss and any model metrics at the end of each epoch.
The model will not be trained on this data. Thus, note the fact
that the validation loss of data provided using
`validation_split` or `validation_data` is not affected by
regularization layers like noise and dropout.
`validation_data` will override `validation_split`.
`validation_data` could be:
- A tuple `(x_val, y_val)` of NumPy arrays or tensors.
- A tuple `(x_val, y_val, val_sample_weights)` of NumPy
arrays.
- A `tf.data.Dataset`.
- A Python generator or `keras_core.utils.PyDataset` returning
`(inputs, targets)` or `(inputs, targets, sample_weights)`.
shuffle: Boolean, whether to shuffle the training data
before each epoch. This argument is
ignored when `x` is a generator or a `tf.data.Dataset`.
class_weight: Optional dictionary mapping class indices (integers)
to a weight (float) value, used for weighting the loss function
(during training only).
This can be useful to tell the model to
"pay more attention" to samples from
an under-represented class. When `class_weight` is specified
and targets have a rank of 2 or greater, either `y` must be
one-hot encoded, or an explicit final dimension of `1` must
be included for sparse class labels.
sample_weight: Optional NumPy array of weights for
the training samples, used for weighting the loss function
(during training only). You can either pass a flat (1D)
NumPy array with the same length as the input samples
(1:1 mapping between weights and samples),
or in the case of temporal data,
you can pass a 2D array with shape
`(samples, sequence_length)`,
to apply a different weight to every timestep of every sample.
This argument is not supported when `x` is a dataset, generator,
or `keras_core.utils.PyDataset` instance, instead provide the
sample_weights as the third element of `x`.
Note that sample weighting does not apply to metrics specified
via the `metrics` argument in `compile()`. To apply sample
weighting to your metrics, you can specify them via the
`weighted_metrics` in `compile()` instead.
initial_epoch: Integer.
Epoch at which to start training
(useful for resuming a previous training run).
steps_per_epoch: Integer or `None`.
Total number of steps (batches of samples)
before declaring one epoch finished and starting the
next epoch. When training with input tensors such as
backend-native tensors, the default `None` is equal to
the number of samples in your dataset divided by
the batch size, or 1 if that cannot be determined. If `x` is a
`tf.data.Dataset`, and `steps_per_epoch`
is `None`, the epoch will run until the input dataset is
exhausted. When passing an infinitely repeating dataset, you
must specify the `steps_per_epoch` argument. If
`steps_per_epoch=-1` the training will run indefinitely with an
infinitely repeating dataset.
validation_steps: Only relevant if `validation_data` is provided.
Total number of steps (batches of
samples) to draw before stopping when performing validation
at the end of every epoch. If `validation_steps` is `None`,
validation will run until the `validation_data` dataset is
exhausted. In the case of an infinitely repeated dataset, it
will run into an infinite loop. If `validation_steps` is
specified and only part of the dataset will be consumed, the
evaluation will start from the beginning of the dataset at each
epoch. This ensures that the same validation samples are used
every time.
validation_batch_size: Integer or `None`.
Number of samples per validation batch.
If unspecified, will default to `batch_size`.
Do not specify the `validation_batch_size` if your data is in
the form of datasets or `keras_core.utils.PyDataset`
instances (since they generate batches).
validation_freq: Only relevant if validation data is provided.
Specifies how many training epochs to run
before a new validation run is performed, e.g. `validation_freq=2`
runs validation every 2 epochs.
Unpacking behavior for iterator-like inputs:
A common pattern is to pass an iterator like object such as a
`tf.data.Dataset` or a `keras_core.utils.PyDataset` to `fit()`,
which will in fact yield not only features (`x`)
but optionally targets (`y`) and sample weights (`sample_weight`).
Keras requires that the output of such iterator-likes be
unambiguous. The iterator should return a tuple
of length 1, 2, or 3, where the optional second and third elements
will be used for `y` and `sample_weight` respectively.
Any other type provided will be wrapped in
a length-one tuple, effectively treating everything as `x`. When
yielding dicts, they should still adhere to the top-level tuple
structure,
e.g. `({"x0": x0, "x1": x1}, y)`. Keras will not attempt to separate
features, targets, and weights from the keys of a single dict.
A notable unsupported data type is the `namedtuple`. The reason is
that it behaves like both an ordered datatype (tuple) and a mapping
datatype (dict). So given a namedtuple of the form:
`namedtuple("example_tuple", ["y", "x"])`
it is ambiguous whether to reverse the order of the elements when
interpreting the value. Even worse is a tuple of the form:
`namedtuple("other_tuple", ["x", "y", "z"])`
where it is unclear if the tuple was intended to be unpacked
into `x`, `y`, and `sample_weight` or passed through
as a single element to `x`.
Returns:
A `History` object. Its `History.history` attribute is
a record of training loss values and metrics values
at successive epochs, as well as validation loss values
and validation metrics values (if applicable).
"""
raise NotImplementedError
def evaluate(
self,
x=None,
y=None,
batch_size=None,
verbose="auto",
sample_weight=None,
steps=None,
callbacks=None,
return_dict=False,
**kwargs,
):
"""Returns the loss value & metrics values for the model in test mode.
Computation is done in batches (see the `batch_size` arg.)
Args:
x: Input data. It could be:
- A NumPy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A tensor, or a list of tensors
(in case the model has multiple inputs).
- A dict mapping input names to the corresponding array/tensors,
if the model has named inputs.
- A `tf.data.Dataset`. Should return a tuple
of either `(inputs, targets)` or
`(inputs, targets, sample_weights)`.
- A generator or `keras_core.utils.PyDataset` returning
`(inputs, targets)` or `(inputs, targets, sample_weights)`.
y: Target data. Like the input data `x`, it could be either NumPy
array(s) or backend-native tensor(s).
If `x` is a `tf.data.Dataset` or `keras_core.utils.PyDataset`
instance, `y` should not be specified
(since targets will be obtained from the iterator/dataset).
batch_size: Integer or `None`. Number of samples per batch of
computation. If unspecified, `batch_size` will default to 32. Do
not specify the `batch_size` if your data is in the form of a
dataset, generators, or `keras_core.utils.PyDataset` instances
(since they generate batches).
verbose: `"auto"`, 0, 1, or 2. Verbosity mode.
0 = silent, 1 = progress bar, 2 = single line.
`"auto"` becomes 1 for most cases.
Note that the progress bar is not
particularly useful when logged to a file, so `verbose=2` is
recommended when not running interactively
(e.g. in a production environment). Defaults to `"auto"`.
sample_weight: Optional NumPy array of weights for the test samples,
used for weighting the loss function. You can either pass a flat
(1D) NumPy array with the same length as the input samples
(1:1 mapping between weights and samples), or in the case of
temporal data, you can pass a 2D array with shape `(samples,
sequence_length)`, to apply a different weight to every
timestep of every sample. This argument is not supported when
`x` is a dataset, instead pass sample weights as the third
element of `x`.
steps: Integer or `None`. Total number of steps (batches of samples)
before declaring the evaluation round finished. Ignored with the
default value of `None`. If `x` is a `tf.data.Dataset` and
`steps` is `None`, evaluation will run until the dataset
is exhausted.
callbacks: List of `keras_core.callbacks.Callback` instances.
List of callbacks to apply during evaluation.
return_dict: If `True`, loss and metric results are returned as a
dict, with each key being the name of the metric.
If `False`, they are returned as a list.
Returns:
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
"""
raise NotImplementedError
def predict(
self, x, batch_size=None, verbose="auto", steps=None, callbacks=None
):
"""Generates output predictions for the input samples.
Computation is done in batches. This method is designed for batch
processing of large numbers of inputs. It is not intended for use inside
of loops that iterate over your data and process small numbers of inputs
at a time.
For small numbers of inputs that fit in one batch,
directly use `__call__()` for faster execution, e.g.,
`model(x)`, or `model(x, training=False)` if you have layers such as
`BatchNormalization` that behave differently during
inference.
Note: See [this FAQ entry](
https://keras.io/getting_started/faq/#whats-the-difference-between-model-methods-predict-and-call)
for more details about the difference between `Model` methods
`predict()` and `__call__()`.
Args:
x: Input samples. It could be:
- A NumPy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A tensor, or a list of tensors
(in case the model has multiple inputs).
- A `tf.data.Dataset`.
- A `keras_core.utils.PyDataset` instance.
batch_size: Integer or `None`.
Number of samples per batch.
If unspecified, `batch_size` will default to 32.
Do not specify the `batch_size` if your data is in the
form of dataset, generators, or `keras_core.utils.PyDataset`
instances (since they generate batches).
verbose: `"auto"`, 0, 1, or 2. Verbosity mode.
0 = silent, 1 = progress bar, 2 = single line.
`"auto"` becomes 1 for most cases. Note that the progress bar
is not particularly useful when logged to a file,
so `verbose=2` is recommended when not running interactively
(e.g. in a production environment). Defaults to `"auto"`.
steps: Total number of steps (batches of samples)
before declaring the prediction round finished.
Ignored with the default value of `None`.
If `x` is a `tf.data.Dataset` and `steps` is `None`,
`predict()` will run until the input dataset is exhausted.
callbacks: List of `keras_core.callbacks.Callback` instances.
List of callbacks to apply during prediction.
Returns:
NumPy array(s) of predictions.
"""
raise NotImplementedError
def train_on_batch(
self,
x,
y=None,
sample_weight=None,
class_weight=None,
return_dict=False,
):
"""Runs a single gradient update on a single batch of data.
Args:
x: Input data. Must be array-like.
y: Target data. Must be array-like.
sample_weight: Optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape `(samples, sequence_length)`, to apply a different
weight to every timestep of every sample.
class_weight: Optional dictionary mapping class indices (integers)
to a weight (float) to apply to the model's loss for the samples
from this class during training. This can be useful to tell the
model to "pay more attention" to samples from an
under-represented class. When `class_weight` is specified
and targets have a rank of 2 or greater, either `y` must
be one-hot encoded, or an explicit final dimension of 1
must be included for sparse class labels.
return_dict: If `True`, loss and metric results are returned as a
dict, with each key being the name of the metric. If `False`,
they are returned as a list.
Returns:
A scalar loss value (when no metrics and `return_dict=False`),
a list of loss and metric values
(if there are metrics and `return_dict=False`), or a dict of
metric and loss values (if `return_dict=True`).
"""
raise NotImplementedError
def test_on_batch(
self,
x,
y=None,
sample_weight=None,
return_dict=False,
):
"""Test the model on a single batch of samples.
Args:
x: Input data. Must be array-like.
y: Target data. Must be array-like.
sample_weight: Optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape `(samples, sequence_length)`, to apply a different
weight to every timestep of every sample.
return_dict: If `True`, loss and metric results are returned as a
dict, with each key being the name of the metric. If `False`,
they are returned as a list.
Returns:
A scalar loss value (when no metrics and `return_dict=False`),
a list of loss and metric values
(if there are metrics and `return_dict=False`), or a dict of
metric and loss values (if `return_dict=True`).
"""
raise NotImplementedError
def predict_on_batch(self, x):
"""Returns predictions for a single batch of samples.
Args:
x: Input data. It must be array-like.
Returns:
NumPy array(s) of predictions.
"""
raise NotImplementedError
def get_compile_config(self):
"""Returns a serialized config with information for compiling the model.
This method returns a config dictionary containing all the information
(optimizer, loss, metrics, etc.) with which the model was compiled.
Returns:
A dict containing information for compiling the model.
"""
if self.compiled and hasattr(self, "_compile_config"):
return self._compile_config.serialize()
def compile_from_config(self, config):
"""Compiles the model with the information given in config.
This method uses the information in the config (optimizer, loss,
metrics, etc.) to compile the model.
Args:
config: Dict containing information for compiling the model.
"""
has_overridden_compile = self.__class__.compile != Trainer.compile
if has_overridden_compile:
warnings.warn(
"`compile()` was not called as part of model loading "
"because the model's `compile()` method is custom. "
"All subclassed Models that have `compile()` "
"overridden should also override "
"`get_compile_config()` and `compile_from_config(config)`. "
"Alternatively, you can "
"call `compile()` manually after loading.",
stacklevel=2,
)
return
config = serialization_lib.deserialize_keras_object(config)
self.compile(**config)
if hasattr(self, "optimizer") and self.built:
# Create optimizer variables.
self.optimizer.build(self.trainable_variables)
def _should_eval(self, epoch, validation_freq):
epoch = epoch + 1 # one-index the user-facing epoch.
if isinstance(validation_freq, int):
return epoch % validation_freq == 0
elif isinstance(validation_freq, list):
return epoch in validation_freq
else:
raise ValueError(
"Expected `validation_freq` to be a list or int. "
f"Received: validation_freq={validation_freq} of the "
f"type {type(validation_freq)}."
)
def _pythonify_logs(self, logs):
result = {}
for key, value in sorted(logs.items()):
if isinstance(value, dict):
result.update(self._pythonify_logs(value))
else:
try:
value = float(value)
except:
pass
result[key] = value
return result
def _flatten_metrics_in_order(self, logs):
"""Turns `logs` dict into a list as per key order of `metrics_names`."""
metric_names = [m.name for m in self.metrics]
results = []
for name in metric_names:
if name in logs:
results.append(logs[name])
for key in sorted(logs.keys()):
if key not in metric_names:
results.append(logs[key])
if len(results) == 1:
return results[0]
return results
def _assert_compile_called(self, method_name=None):
if not self.compiled:
msg = "You must call `compile()` before "
if metrics_module:
msg += "using the model."
else:
msg += f"calling `{method_name}()`."
raise ValueError(msg)
def resolve_auto_jit_compile(model):
if model_supports_jit(model):
if backend.backend() == "torch":
# Torch defaults to eager mode
# until torch compile is reliable
return False
return True
return False
def model_supports_jit(model):
if platform.system() == "Darwin" and "arm" in platform.processor().lower():
if backend.backend() == "tensorflow":
from keras_core.utils.module_utils import tensorflow as tf
if tf.config.list_physical_devices("GPU"):
return False
if all(x.supports_jit for x in model._flatten_layers()):
return True
return False
| keras-core/keras_core/trainers/trainer.py/0 | {
"file_path": "keras-core/keras_core/trainers/trainer.py",
"repo_id": "keras-core",
"token_count": 18212
} | 54 |
import os
import numpy as np
from keras_core import testing
from keras_core.utils import image_dataset_utils
from keras_core.utils import image_utils
from keras_core.utils.module_utils import tensorflow as tf
class ImageDatasetFromDirectoryTest(testing.TestCase):
def _get_images(self, count=16, color_mode="rgb"):
width = height = 24
imgs = []
for _ in range(count):
if color_mode == "grayscale":
img = np.random.randint(0, 256, size=(height, width, 1))
elif color_mode == "rgba":
img = np.random.randint(0, 256, size=(height, width, 4))
else:
img = np.random.randint(0, 256, size=(height, width, 3))
img = image_utils.array_to_img(img)
imgs.append(img)
return imgs
def _prepare_directory(
self,
num_classes=2,
nested_dirs=False,
color_mode="rgb",
count=16,
):
# Generate paths to class subdirectories
temp_dir = self.get_temp_dir()
paths = []
for class_index in range(num_classes):
class_directory = f"class_{class_index}"
if nested_dirs:
class_paths = [
class_directory,
os.path.join(class_directory, "subfolder_1"),
os.path.join(class_directory, "subfolder_2"),
os.path.join(
class_directory, "subfolder_1", "sub-subfolder"
),
]
else:
class_paths = [class_directory]
for path in class_paths:
os.mkdir(os.path.join(temp_dir, path))
paths += class_paths
# Save images to the paths
i = 0
for img in self._get_images(color_mode=color_mode, count=count):
path = paths[i % len(paths)]
if color_mode == "rgb":
ext = "jpg"
else:
ext = "png"
filename = os.path.join(path, f"image_{i}.{ext}")
img.save(os.path.join(temp_dir, filename))
i += 1
return temp_dir
def test_image_dataset_from_directory_standalone(self):
# Test retrieving images without labels from a directory and its
# subdirs.
# Save a few extra images in the parent directory.
directory = self._prepare_directory(count=7, num_classes=2)
for i, img in enumerate(self._get_images(3)):
filename = f"image_{i}.jpg"
img.save(os.path.join(directory, filename))
dataset = image_dataset_utils.image_dataset_from_directory(
directory, batch_size=5, image_size=(18, 18), labels=None
)
batch = next(iter(dataset))
# We return plain images
self.assertEqual(batch.shape, (5, 18, 18, 3))
self.assertEqual(batch.dtype.name, "float32")
# Count samples
batch_count = 0
sample_count = 0
for batch in dataset:
batch_count += 1
sample_count += batch.shape[0]
self.assertEqual(batch_count, 2)
self.assertEqual(sample_count, 10)
def test_image_dataset_from_directory_binary(self):
directory = self._prepare_directory(num_classes=2)
dataset = image_dataset_utils.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18), label_mode="int"
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 18, 18, 3))
self.assertEqual(batch[0].dtype.name, "float32")
self.assertEqual(batch[1].shape, (8,))
self.assertEqual(batch[1].dtype.name, "int32")
dataset = image_dataset_utils.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18), label_mode="binary"
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 18, 18, 3))
self.assertEqual(batch[0].dtype.name, "float32")
self.assertEqual(batch[1].shape, (8, 1))
self.assertEqual(batch[1].dtype.name, "float32")
dataset = image_dataset_utils.image_dataset_from_directory(
directory,
batch_size=8,
image_size=(18, 18),
label_mode="categorical",
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 18, 18, 3))
self.assertEqual(batch[0].dtype.name, "float32")
self.assertEqual(batch[1].shape, (8, 2))
self.assertEqual(batch[1].dtype.name, "float32")
def test_static_shape_in_graph(self):
directory = self._prepare_directory(num_classes=2)
dataset = image_dataset_utils.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18), label_mode="int"
)
test_case = self
@tf.function
def symbolic_fn(ds):
for x, _ in ds.take(1):
test_case.assertListEqual(x.shape.as_list(), [None, 18, 18, 3])
symbolic_fn(dataset)
def test_sample_count(self):
directory = self._prepare_directory(num_classes=4, count=15)
dataset = image_dataset_utils.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18), label_mode=None
)
sample_count = 0
for batch in dataset:
sample_count += batch.shape[0]
self.assertEqual(sample_count, 15)
def test_image_dataset_from_directory_multiclass(self):
directory = self._prepare_directory(num_classes=4, count=15)
dataset = image_dataset_utils.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18), label_mode=None
)
batch = next(iter(dataset))
self.assertEqual(batch.shape, (8, 18, 18, 3))
dataset = image_dataset_utils.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18), label_mode=None
)
sample_count = 0
iterator = iter(dataset)
for batch in dataset:
sample_count += next(iterator).shape[0]
self.assertEqual(sample_count, 15)
dataset = image_dataset_utils.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18), label_mode="int"
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 18, 18, 3))
self.assertEqual(batch[0].dtype.name, "float32")
self.assertEqual(batch[1].shape, (8,))
self.assertEqual(batch[1].dtype.name, "int32")
dataset = image_dataset_utils.image_dataset_from_directory(
directory,
batch_size=8,
image_size=(18, 18),
label_mode="categorical",
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 18, 18, 3))
self.assertEqual(batch[0].dtype.name, "float32")
self.assertEqual(batch[1].shape, (8, 4))
self.assertEqual(batch[1].dtype.name, "float32")
def test_image_dataset_from_directory_color_modes(self):
directory = self._prepare_directory(num_classes=4, color_mode="rgba")
dataset = image_dataset_utils.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18), color_mode="rgba"
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 18, 18, 4))
self.assertEqual(batch[0].dtype.name, "float32")
directory = self._prepare_directory(
num_classes=4, color_mode="grayscale"
)
dataset = image_dataset_utils.image_dataset_from_directory(
directory, batch_size=8, image_size=(18, 18), color_mode="grayscale"
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 18, 18, 1))
self.assertEqual(batch[0].dtype.name, "float32")
def test_image_dataset_from_directory_validation_split(self):
directory = self._prepare_directory(num_classes=2, count=10)
dataset = image_dataset_utils.image_dataset_from_directory(
directory,
batch_size=10,
image_size=(18, 18),
validation_split=0.2,
subset="training",
seed=1337,
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 18, 18, 3))
dataset = image_dataset_utils.image_dataset_from_directory(
directory,
batch_size=10,
image_size=(18, 18),
validation_split=0.2,
subset="validation",
seed=1337,
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (2, 18, 18, 3))
(
train_dataset,
val_dataset,
) = image_dataset_utils.image_dataset_from_directory(
directory,
batch_size=10,
image_size=(18, 18),
validation_split=0.2,
subset="both",
seed=1337,
)
batch = next(iter(train_dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 18, 18, 3))
batch = next(iter(val_dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (2, 18, 18, 3))
def test_image_dataset_from_directory_manual_labels(self):
directory = self._prepare_directory(num_classes=2, count=2)
dataset = image_dataset_utils.image_dataset_from_directory(
directory,
batch_size=8,
image_size=(18, 18),
labels=[0, 1],
shuffle=False,
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertAllClose(batch[1], [0, 1])
def test_image_dataset_from_directory_follow_links(self):
directory = self._prepare_directory(
num_classes=2, count=25, nested_dirs=True
)
dataset = image_dataset_utils.image_dataset_from_directory(
directory,
batch_size=8,
image_size=(18, 18),
label_mode=None,
follow_links=True,
)
sample_count = 0
for batch in dataset:
sample_count += batch.shape[0]
self.assertEqual(sample_count, 25)
def test_image_dataset_from_directory_no_images(self):
directory = self._prepare_directory(num_classes=2, count=0)
with self.assertRaisesRegex(ValueError, "No images found."):
_ = image_dataset_utils.image_dataset_from_directory(directory)
def test_image_dataset_from_directory_crop_to_aspect_ratio(self):
directory = self._prepare_directory(num_classes=2, count=5)
dataset = image_dataset_utils.image_dataset_from_directory(
directory,
batch_size=5,
image_size=(18, 18),
crop_to_aspect_ratio=True,
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (5, 18, 18, 3))
def test_image_dataset_from_directory_errors(self):
directory = self._prepare_directory(num_classes=3, count=5)
with self.assertRaisesRegex(ValueError, "`labels` argument should be"):
_ = image_dataset_utils.image_dataset_from_directory(
directory, labels="other"
)
with self.assertRaisesRegex(
ValueError, "`label_mode` argument must be"
):
_ = image_dataset_utils.image_dataset_from_directory(
directory, label_mode="other"
)
with self.assertRaisesRegex(ValueError, "`color_mode` must be one of"):
_ = image_dataset_utils.image_dataset_from_directory(
directory, color_mode="other"
)
with self.assertRaisesRegex(
ValueError, 'only pass `class_names` if `labels="inferred"`'
):
_ = image_dataset_utils.image_dataset_from_directory(
directory,
labels=[0, 0, 1, 1, 1],
class_names=["class_0", "class_1", "class_2"],
)
with self.assertRaisesRegex(
ValueError,
"Expected the lengths of `labels` to match the number of files",
):
_ = image_dataset_utils.image_dataset_from_directory(
directory, labels=[0, 0, 1, 1]
)
with self.assertRaisesRegex(
ValueError, "`class_names` passed did not match"
):
_ = image_dataset_utils.image_dataset_from_directory(
directory, class_names=["class_0", "class_2"]
)
with self.assertRaisesRegex(ValueError, "there must be exactly 2"):
_ = image_dataset_utils.image_dataset_from_directory(
directory, label_mode="binary"
)
with self.assertRaisesRegex(
ValueError, "`validation_split` must be between 0 and 1"
):
_ = image_dataset_utils.image_dataset_from_directory(
directory, validation_split=2
)
with self.assertRaisesRegex(
ValueError,
'`subset` must be either "training", "validation" or "both"',
):
_ = image_dataset_utils.image_dataset_from_directory(
directory, validation_split=0.2, subset="other"
)
with self.assertRaisesRegex(
ValueError, "`validation_split` must be set"
):
_ = image_dataset_utils.image_dataset_from_directory(
directory, validation_split=0, subset="training"
)
with self.assertRaisesRegex(ValueError, "must provide a `seed`"):
_ = image_dataset_utils.image_dataset_from_directory(
directory, validation_split=0.2, subset="training"
)
def test_image_dataset_from_directory_not_batched(self):
directory = self._prepare_directory(num_classes=2, count=2)
dataset = image_dataset_utils.image_dataset_from_directory(
directory,
batch_size=None,
image_size=(18, 18),
label_mode=None,
shuffle=False,
)
sample = next(iter(dataset))
self.assertEqual(len(sample.shape), 3)
| keras-core/keras_core/utils/image_dataset_utils_test.py/0 | {
"file_path": "keras-core/keras_core/utils/image_dataset_utils_test.py",
"repo_id": "keras-core",
"token_count": 7204
} | 55 |
import numpy as np
import pytest
import tensorflow as tf
import keras_core
from keras_core import backend
from keras_core.testing import test_case
from keras_core.utils import rng_utils
class TestRandomSeedSetting(test_case.TestCase):
@pytest.mark.skipif(
backend.backend() == "numpy",
reason="Numpy backend does not support random seed setting.",
)
def test_set_random_seed(self):
def get_model_output():
model = keras_core.Sequential(
[
keras_core.layers.Dense(10),
keras_core.layers.Dropout(0.5),
keras_core.layers.Dense(10),
]
)
x = np.random.random((32, 10)).astype("float32")
ds = tf.data.Dataset.from_tensor_slices(x).shuffle(32).batch(16)
return model.predict(ds)
rng_utils.set_random_seed(42)
y1 = get_model_output()
rng_utils.set_random_seed(42)
y2 = get_model_output()
self.assertAllClose(y1, y2)
| keras-core/keras_core/utils/rng_utils_test.py/0 | {
"file_path": "keras-core/keras_core/utils/rng_utils_test.py",
"repo_id": "keras-core",
"token_count": 518
} | 56 |
# Unique source of truth for the version number.
__version__ = "0.1.7"
| keras-core/keras_core/version.py/0 | {
"file_path": "keras-core/keras_core/version.py",
"repo_id": "keras-core",
"token_count": 23
} | 57 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from keras_cv.layers import AutoContrast
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing
class OldAutoContrast(BaseImageAugmentationLayer):
"""Performs the AutoContrast operation on an image.
Auto contrast stretches the values of an image across the entire available
`value_range`. This makes differences between pixels more obvious. An
example of this is if an image only has values `[0, 1]` out of the range
`[0, 255]`, auto contrast will change the `1` values to be `255`.
Args:
value_range: the range of values the incoming images will have.
Represented as a two number tuple written [low, high].
This is typically either `[0, 1]` or `[0, 255]` depending
on how your preprocessing pipeline is set up.
"""
def __init__(
self,
value_range,
**kwargs,
):
super().__init__(**kwargs)
self.value_range = value_range
def augment_image(self, image, transformation=None, **kwargs):
original_image = image
image = preprocessing.transform_value_range(
image,
original_range=self.value_range,
target_range=(0, 255),
dtype=self.compute_dtype,
)
low = tf.reduce_min(tf.reduce_min(image, axis=0), axis=0)
high = tf.reduce_max(tf.reduce_max(image, axis=0), axis=0)
scale = 255.0 / (high - low)
offset = -low * scale
image = image * scale[None, None] + offset[None, None]
result = tf.clip_by_value(image, 0.0, 255.0)
result = preprocessing.transform_value_range(
result,
original_range=(0, 255),
target_range=self.value_range,
dtype=self.compute_dtype,
)
# don't process NaN channels
result = tf.where(tf.math.is_nan(result), original_image, result)
return result
def augment_bounding_boxes(self, bounding_boxes, **kwargs):
return bounding_boxes
def augment_label(self, label, transformation=None, **kwargs):
return label
def augment_segmentation_mask(
self, segmentation_mask, transformation, **kwargs
):
return segmentation_mask
def get_config(self):
config = super().get_config()
config.update({"value_range": self.value_range})
return config
class AutoContrastConsistencyTest(tf.test.TestCase):
def test_consistency_with_old_implementation(self):
images = tf.random.uniform(shape=(16, 32, 32, 3))
output = AutoContrast(value_range=(0, 1))(images)
old_output = OldAutoContrast(value_range=(0, 1))(images)
self.assertAllClose(old_output, output)
if __name__ == "__main__":
(x_train, _), _ = keras.datasets.cifar10.load_data()
x_train = x_train.astype(float)
images = []
num_images = [1000, 2000, 5000, 10000]
results = {}
for aug in [AutoContrast, OldAutoContrast]:
c = aug.__name__
layer = aug(value_range=(0, 255))
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
layer(x_train[:n_images])
t0 = time.time()
r1 = layer(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1 - t0}")
results[c] = runtimes
c = aug.__name__ + " Graph Mode"
layer = aug(value_range=(0, 255))
@tf.function()
def apply_aug(inputs):
return layer(inputs)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
apply_aug(x_train[:n_images])
t0 = time.time()
r1 = apply_aug(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1 - t0}")
results[c] = runtimes
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.show()
# So we can actually see more relevant margins
del results["OldAutoContrast"]
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.show()
# Compare two implementations
tf.test.main()
| keras-cv/benchmarks/vectorized_auto_contrast.py/0 | {
"file_path": "keras-cv/benchmarks/vectorized_auto_contrast.py",
"repo_id": "keras-cv",
"token_count": 2237
} | 58 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from keras import backend
from tensorflow import keras
from keras_cv.layers import RandomZoom
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing as preprocessing_utils
# In order to support both unbatched and batched inputs, the horizontal
# and vertical axis is reverse indexed
H_AXIS = -3
W_AXIS = -2
class OldRandomZoom(BaseImageAugmentationLayer):
"""A preprocessing layer which randomly zooms images during training.
This layer will randomly zoom in or out on each axis of an image
independently, filling empty space according to `fill_mode`.
Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
of integer or floating point dtype. By default, the layer will output
floats.
Args:
height_factor: a float represented as fraction of value, or a tuple of
size 2 representing lower and upper bound for zooming vertically. When
represented as a single float, this value is used for both the upper and
lower bound. A positive value means zooming out, while a negative value
means zooming in. For instance, `height_factor=(0.2, 0.3)` result in an
output zoomed out by a random amount in the range `[+20%, +30%]`.
`height_factor=(-0.3, -0.2)` result in an output zoomed in by a random
amount in the range `[-30%, -20%]`.
width_factor: a float represented as fraction of value, or a tuple of size
2 representing lower and upper bound for zooming horizontally. When
represented as a single float, this value is used for both the upper and
lower bound. For instance, `width_factor=(0.2, 0.3)` result in an output
zooming out between 20% to 30%. `width_factor=(-0.3, -0.2)` result in an
output zooming in between 20% to 30%. Defaults to `None`, i.e., zooming
vertical and horizontal directions by preserving the aspect ratio. If
height_factor=0 and width_factor=None, it would result in images with
no zoom at all.
fill_mode: Points outside the boundaries of the input are filled according
to the given mode (one of `{"constant", "reflect", "wrap", "nearest"}`).
- *reflect*: `(d c b a | a b c d | d c b a)` The input is extended by
reflecting about the edge of the last pixel.
- *constant*: `(k k k k | a b c d | k k k k)` The input is extended by
filling all values beyond the edge with the same constant value k = 0.
- *wrap*: `(a b c d | a b c d | a b c d)` The input is extended by
wrapping around to the opposite edge.
- *nearest*: `(a a a a | a b c d | d d d d)` The input is extended by
the nearest pixel.
interpolation: Interpolation mode. Supported values: `"nearest"`,
`"bilinear"`.
seed: Integer. Used to create a random seed.
fill_value: a float represents the value to be filled outside the
boundaries when `fill_mode="constant"`.
Example:
>>> input_img = np.random.random((32, 224, 224, 3))
>>> layer = keras_cv.layers.RandomZoom(.5, .2)
>>> out_img = layer(input_img)
>>> out_img.shape
TensorShape([32, 224, 224, 3])
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
"""
def __init__(
self,
height_factor,
width_factor=None,
fill_mode="reflect",
interpolation="bilinear",
seed=None,
fill_value=0.0,
**kwargs,
):
super().__init__(seed=seed, **kwargs)
self.height_factor = height_factor
if isinstance(height_factor, (tuple, list)):
self.height_lower = height_factor[0]
self.height_upper = height_factor[1]
else:
self.height_lower = -height_factor
self.height_upper = height_factor
if abs(self.height_lower) > 1.0 or abs(self.height_upper) > 1.0:
raise ValueError(
"`height_factor` must have values between [-1, 1], "
f"got {height_factor}"
)
self.width_factor = width_factor
if width_factor is not None:
if isinstance(width_factor, (tuple, list)):
self.width_lower = width_factor[0]
self.width_upper = width_factor[1]
else:
self.width_lower = -width_factor
self.width_upper = width_factor
if self.width_lower < -1.0 or self.width_upper < -1.0:
raise ValueError(
"`width_factor` must have values larger than -1, "
f"got {width_factor}"
)
preprocessing_utils.check_fill_mode_and_interpolation(
fill_mode, interpolation
)
self.fill_mode = fill_mode
self.fill_value = fill_value
self.interpolation = interpolation
self.seed = seed
def get_random_transformation(self, image=None, **kwargs):
height_zoom = self._random_generator.uniform(
shape=[1, 1],
minval=1.0 + self.height_lower,
maxval=1.0 + self.height_upper,
)
if self.width_factor is not None:
width_zoom = self._random_generator.uniform(
shape=[1, 1],
minval=1.0 + self.width_lower,
maxval=1.0 + self.width_upper,
)
else:
width_zoom = height_zoom
return {"height_zoom": height_zoom, "width_zoom": width_zoom}
def augment_image(self, image, transformation, **kwargs):
image = preprocessing_utils.ensure_tensor(image, self.compute_dtype)
original_shape = image.shape
image = tf.expand_dims(image, 0)
image_shape = tf.shape(image)
img_hd = tf.cast(image_shape[H_AXIS], tf.float32)
img_wd = tf.cast(image_shape[W_AXIS], tf.float32)
width_zoom = transformation["width_zoom"]
height_zoom = transformation["height_zoom"]
zooms = tf.cast(
tf.concat([width_zoom, height_zoom], axis=1), dtype=tf.float32
)
output = preprocessing_utils.transform(
image,
self.get_zoom_matrix(zooms, img_hd, img_wd),
fill_mode=self.fill_mode,
fill_value=self.fill_value,
interpolation=self.interpolation,
)
output = tf.squeeze(output, 0)
output.set_shape(original_shape)
return output
def augment_label(self, label, transformation, **kwargs):
return label
def get_config(self):
config = {
"height_factor": self.height_factor,
"width_factor": self.width_factor,
"fill_mode": self.fill_mode,
"fill_value": self.fill_value,
"interpolation": self.interpolation,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def get_zoom_matrix(self, zooms, image_height, image_width, name=None):
"""Returns projective transform(s) for the given zoom(s).
Args:
zooms: A matrix of 2-element lists representing `[zx, zy]` to zoom for
each image (for a batch of images).
image_height: Height of the image(s) to be transformed.
image_width: Width of the image(s) to be transformed.
name: The name of the op.
Returns:
A tensor of shape `(num_images, 8)`. Projective transforms which can be
given to operation `image_projective_transform_v2`.
If one row of transforms is
`[a0, a1, a2, b0, b1, b2, c0, c1]`, then it maps the *output* point
`(x, y)` to a transformed *input* point
`(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`,
where `k = c0 x + c1 y + 1`.
"""
with backend.name_scope(name or "zoom_matrix"):
num_zooms = tf.shape(zooms)[0]
# The zoom matrix looks like:
# [[zx 0 0]
# [0 zy 0]
# [0 0 1]]
# where the last entry is implicit.
# Zoom matrices are always float32.
x_offset = ((image_width - 1.0) / 2.0) * (1.0 - zooms[:, 0, None])
y_offset = ((image_height - 1.0) / 2.0) * (1.0 - zooms[:, 1, None])
return tf.concat(
values=[
zooms[:, 0, None],
tf.zeros((num_zooms, 1), tf.float32),
x_offset,
tf.zeros((num_zooms, 1), tf.float32),
zooms[:, 1, None],
y_offset,
tf.zeros((num_zooms, 2), tf.float32),
],
axis=1,
)
class RandomZoomTest(tf.test.TestCase):
def test_consistency_with_old_impl_in(self):
image_shape = (16, 32, 32, 3)
fixed_height_factor = (-0.5, -0.5)
fixed_width_factor = (-0.5, -0.5)
image = tf.random.uniform(shape=image_shape) * 255.0
layer = RandomZoom(
fixed_height_factor, fixed_width_factor, interpolation="nearest"
)
old_layer = OldRandomZoom(
fixed_height_factor, fixed_width_factor, interpolation="nearest"
)
output = layer(image)
old_output = old_layer(image)
self.assertAllClose(old_output, output)
def test_consistency_with_old_impl_out(self):
image_shape = (16, 32, 32, 3)
fixed_height_factor = (0.5, 0.5)
fixed_width_factor = (0.8, 0.8)
image = tf.random.uniform(shape=image_shape) * 255.0
layer = RandomZoom(
fixed_height_factor, fixed_width_factor, interpolation="nearest"
)
old_layer = OldRandomZoom(
fixed_height_factor, fixed_width_factor, interpolation="nearest"
)
output = layer(image)
old_output = old_layer(image)
self.assertAllClose(old_output, output)
if __name__ == "__main__":
# Run benchmark
(x_train, _), _ = keras.datasets.cifar10.load_data()
x_train = x_train.astype(np.float32)
num_images = [100, 200, 500, 1000]
results = {}
aug_candidates = [RandomZoom, OldRandomZoom]
aug_args = {"height_factor": 0.2, "width_factor": 0.3}
for aug in aug_candidates:
# Eager Mode
c = aug.__name__
layer = aug(**aug_args)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
layer(x_train[:n_images])
t0 = time.time()
r1 = layer(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# Graph Mode
c = aug.__name__ + " Graph Mode"
layer = aug(**aug_args)
@tf.function()
def apply_aug(inputs):
return layer(inputs)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
apply_aug(x_train[:n_images])
t0 = time.time()
r1 = apply_aug(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# XLA Mode
# cannot run tf.raw_ops.ImageProjectiveTransformV3 on XLA
# for more information please refer:
# https://github.com/tensorflow/tensorflow/issues/55194
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison.png")
# So we can actually see more relevant margins
del results[aug_candidates[1].__name__]
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison_no_old_eager.png")
# Run unit tests
tf.test.main()
| keras-cv/benchmarks/vectorized_random_zoom.py/0 | {
"file_path": "keras-cv/benchmarks/vectorized_random_zoom.py",
"repo_id": "keras-cv",
"token_count": 5924
} | 59 |
{
"convmixer_512_16": {
"v0": {
"accelerators": 8,
"args": {
"batch_size": "64",
"initial_learning_rate": "0.0125",
"use_ema": "True",
"weight_decay": "0.0001"
},
"contributor": "ianstenbit",
"epochs_trained": 136,
"script": {
"name": "basic_training.py",
"version": "c58b266f1bc21047a82a7ac983515d8818b9e438"
},
"tensorboard_logs": "https://tensorboard.dev/experiment/uM1t7gZJRMekAeKRPtJfrQ/",
"validation_accuracy": "0.7438"
}
},
"cspdarknet": {
"v0": {
"accelerators": 8,
"args": {
"batch_size": "64",
"epochs": "300",
"initial_learning_rate": ".0125",
"learning_rate_schedule": "CosineDecayWithWarmup",
"warmup_hold_steps_percentage": ".45",
"warmup_steps_percentage": ".01"
},
"contributor": "ianstenbit",
"epochs_trained": 299,
"script": {
"name": "basic_training.py",
"version": "dceea23c954e59c5884e98384140e0a8ad5bd320"
},
"tensorboard_logs": "https://tensorboard.dev/experiment/3eUYhaFMQ3O3fvdg1lDRMw/",
"validation_accuracy": "0.7744"
}
},
"cspdarknettiny": {
"v0": {
"accelerators": 8,
"args": {
"batch_size": "64",
"initial_learning_rate": "0.0125",
"use_ema": "True",
"weight_decay": "0.0001"
},
"contributor": "ianstenbit",
"epochs_trained": 136,
"script": {
"name": "basic_training.py",
"version": "212e67fc9acb65c699b609e4cdae54552d22e6b4"
},
"tensorboard_logs": "https://tensorboard.dev/experiment/yaJKJ934QO2i9ozLFrnfZw/",
"validation_accuracy": "0.6169"
}
},
"darknet53": {
"v0": {
"accelerators": 8,
"args": {
"batch_size": "64",
"initial_learning_rate": "0.0125"
},
"contributor": "ianstenbit",
"epochs_trained": 174,
"script": {
"name": "basic_training.py",
"version": "dceea23c954e59c5884e98384140e0a8ad5bd320"
},
"tensorboard_logs": "https://tensorboard.dev/experiment/665BYHNUQlmSpxkyyvrKng/",
"validation_accuracy": "0.7640"
}
},
"densenet121": {
"v0": {
"accelerators": 2,
"args": {
"batch_size": "64"
},
"contributor": "ianstenbit",
"epochs_trained": 84,
"script": {
"name": "basic_training.py",
"version": "90d4c3548a2e989fe52d6cf7ae7439af794f0ae6"
},
"tensorboard_logs": "https://tensorboard.dev/experiment/K5Q0gAk0RayXwP0WsLPpMA/",
"validation_accuracy": "0.6771"
}
},
"densenet169": {
"v0": {
"accelerators": 2,
"args": {
"batch_size": "64"
},
"contributor": "ianstenbit",
"epochs_trained": 50,
"script": {
"name": "basic_training.py",
"version": "90d4c3548a2e989fe52d6cf7ae7439af794f0ae6"
},
"tensorboard_logs": "https://tensorboard.dev/experiment/aQIvxQEgTqajldKxp688Nw/",
"validation_accuracy": "0.6613"
}
},
"densenet201": {
"v0": {
"accelerators": 8,
"args": {
"batch_size": "512"
},
"contributor": "ianstenbit",
"epochs_trained": 166,
"script": {
"name": "basic_training.py",
"version": "b0b349612e00ab34c25af5467ddd3b48d6fbf7a3"
},
"tensorboard_logs": "https://tensorboard.dev/experiment/6iLPGz5RSEiyPymgzJbKIQ/",
"validation_accuracy": "0.7469"
}
},
"efficientnetv2b0": {
"v0": {
"accelerators": 8,
"args": {
"batch_size": "64",
"initial_learning_rate": ".0125"
},
"contributor": "ianstenbit",
"epochs_trained": 320,
"script": {
"name": "basic_training.py",
"version": "e349ca5563b05548996f438fa03b2f34a8231ca3"
},
"tensorboard_logs": "https://tensorboard.dev/experiment/kBs9YZkwQAeVNfv8JPKCLw/",
"validation_accuracy": "0.7527"
}
},
"efficientnetv2b1": {
"v0": {
"accelerators": 8,
"args": {
"batch_size": "64",
"initial_learning_rate": ".0125"
},
"contributor": "ianstenbit",
"epochs_trained": 288,
"script": {
"name": "basic_training.py",
"version": "e349ca5563b05548996f438fa03b2f34a8231ca3"
},
"tensorboard_logs": "https://tensorboard.dev/experiment/jQAQBh6LQUep18CDayP8ww/",
"validation_accuracy": "0.7560"
}
},
"efficientnetv2b2": {
"v0": {
"accelerators": 8,
"args": {
"batch_size": "64",
"initial_learning_rate": ".0125"
},
"contributor": "ianstenbit",
"epochs_trained": 313,
"script": {
"name": "basic_training.py",
"version": "02b41ea91b972cdd29c27dbc4d79e6a0b4e90de2"
},
"tensorboard_logs": "https://tensorboard.dev/experiment/iyhN2qvIRrqj6C0Q328drg/",
"validation_accuracy": "0.7699"
}
},
"efficientnetv2s": {
"v0": {
"accelerators": 2,
"args": {
"batch_size": "64",
"initial_learning_rate": "0.05"
},
"contributor": "ianstenbit",
"epochs_trained": 305,
"script": {
"name": "basic_training.py",
"version": "02b41ea91b972cdd29c27dbc4d79e6a0b4e90de2"
},
"tensorboard_logs": "https://tensorboard.dev/experiment/Lr4LbX32T1yOAxPhQJRkAw/",
"validation_accuracy": "0.8010"
}
},
"resnet50": {
"v0": {
"accelerators": 8,
"args": {
"batch_size": "64",
"initial_learning_rate": "0.0125",
"use_ema": "True",
"weight_decay": "0.0001"
},
"contributor": "ianstenbit",
"epochs_trained": 158,
"script": {
"name": "basic_training.py",
"version": "212e67fc9acb65c699b609e4cdae54552d22e6b4"
},
"tensorboard_logs": "https://tensorboard.dev/experiment/H5kM5mYOQQq82sEEtrOq7g/",
"validation_accuracy": "0.7550"
}
},
"resnet50v2": {
"v0": {
"accelerators": 2,
"args": {
"batch_size": "64",
"initial_learning_rate": "0.005"
},
"contributor": "ianstenbit",
"epochs_trained": 132,
"script": {
"name": "basic_training.py",
"version": "3288c3ab31ce1c35fe7505e245fdfa9c593af78e"
},
"tensorboard_logs": "https://tensorboard.dev/experiment/QlkKjMkqQxm3jbzOlzBvWA/",
"validation_accuracy": "0.6337"
},
"v1": {
"accelerators": 2,
"args": {
"batch_size": "128"
},
"contributor": "ianstenbit",
"epochs_trained": 168,
"script": {
"name": "basic_training.py",
"version": "8fcffd9ee81ca9892f73d8ec3ac0ba475d2f1426"
},
"tensorboard_logs": "https://tensorboard.dev/experiment/TQ5r1EhXS4SDDagBD84rgA/",
"validation_accuracy": "0.7550"
},
"v2": {
"accelerators": 8,
"args": {
"batch_size": "64",
"initial_learning_rate": ".0125"
},
"contributor": "ianstenbit",
"epochs_trained": 150,
"script": {
"name": "basic_training.py",
"version": "02b41ea91b972cdd29c27dbc4d79e6a0b4e90de2"
},
"tensorboard_logs": "https://tensorboard.dev/experiment/ReyWQHwETwah0nqlXl8BOA/",
"validation_accuracy": "0.7612"
}
},
"script_authors": {
"basic_training.py": [
"ianstenbit",
"DavidLandup0"
]
}
}
| keras-cv/examples/training/classification/imagenet/training_history.json/0 | {
"file_path": "keras-cv/examples/training/classification/imagenet/training_history.json",
"repo_id": "keras-cv",
"token_count": 5582
} | 60 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.backend import config
if config.keras_3():
from keras.ops import * # noqa: F403, F401
from keras.preprocessing.image import smart_resize # noqa: F403, F401
from keras_cv.backend import keras
name_scope = keras.name_scope
else:
try:
from keras.src.ops import * # noqa: F403, F401
from keras.src.utils.image_utils import smart_resize # noqa: F403, F401
# Import error means Keras isn't installed, or is Keras 2.
except ImportError:
from keras_core.src.backend import vectorized_map # noqa: F403, F401
from keras_core.src.ops import * # noqa: F403, F401
from keras_core.src.utils.image_utils import ( # noqa: F403, F401
smart_resize,
)
if config.backend() == "tensorflow":
from keras_cv.backend.tf_ops import * # noqa: F403, F401
| keras-cv/keras_cv/backend/ops.py/0 | {
"file_path": "keras-cv/keras_cv/backend/ops.py",
"repo_id": "keras-cv",
"token_count": 517
} | 61 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.core.factor_sampler.factor_sampler import FactorSampler
@keras_cv_export("keras_cv.core.ConstantFactorSampler")
class ConstantFactorSampler(FactorSampler):
"""ConstantFactorSampler samples the same factor for every call to
`__call__()`.
This is useful in cases where a user wants to always ensure that an
augmentation layer performs augmentations of the same strength.
Args:
value: the value to return from `__call__()`.
Usage:
```python
constant_factor = keras_cv.ConstantFactorSampler(0.5)
random_sharpness = keras_cv.layers.RandomSharpness(factor=constant_factor)
# random_sharpness will now always use a factor of 0.5
```
"""
def __init__(self, value):
self.value = value
def __call__(self, shape=(), dtype="float32"):
return tf.ones(shape=shape, dtype=dtype) * self.value
def get_config(self):
return {"value": self.value}
@classmethod
def from_config(cls, config):
return cls(**config)
| keras-cv/keras_cv/core/factor_sampler/constant_factor_sampler.py/0 | {
"file_path": "keras-cv/keras_cv/core/factor_sampler/constant_factor_sampler.py",
"repo_id": "keras-cv",
"token_count": 553
} | 62 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.layers.regularization.drop_path import DropPath
from keras_cv.layers.segformer_multihead_attention import (
SegFormerMultiheadAttention,
)
@keras_cv_export("keras_cv.layers.HierarchicalTransformerEncoder")
class HierarchicalTransformerEncoder(keras.layers.Layer):
"""
Hierarchical transformer encoder block implementation as a Keras Layer.
The layer uses `SegFormerMultiheadAttention` as a `MultiHeadAttention`
alternative for computational efficiency, and is meant to be used
within the SegFormer architecture.
References:
- [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) (CVPR 2021) # noqa: E501
- [Official PyTorch implementation](https://github.com/NVlabs/SegFormer/blob/master/mmseg/models/backbones/mix_transformer.py) # noqa: E501
- [Ported from the TensorFlow implementation from DeepVision](https://github.com/DavidLandup0/deepvision/blob/main/deepvision/layers/hierarchical_transformer_encoder.py) # noqa: E501
Args:
project_dim: integer, the dimensionality of the projection of the
encoder, and output of the `SegFormerMultiheadAttention` layer.
Due to the residual addition the input dimensionality has to be
equal to the output dimensionality.
num_heads: integer, the number of heads for the
`SegFormerMultiheadAttention` layer.
drop_prob: float, the probability of dropping a random
sample using the `DropPath` layer. Defaults to `0.0`.
layer_norm_epsilon: float, the epsilon for
`LayerNormalization` layers. Defaults to `1e-06`
sr_ratio: integer, the ratio to use within
`SegFormerMultiheadAttention`. If set to > 1, a `Conv2D`
layer is used to reduce the length of the sequence. Defaults to `1`.
Basic usage:
```
project_dim = 1024
num_heads = 4
patch_size = 16
encoded_patches = keras_cv.layers.OverlappingPatchingAndEmbedding(
project_dim=project_dim, patch_size=patch_size)(img_batch)
trans_encoded = keras_cv.layers.HierarchicalTransformerEncoder(project_dim=project_dim,
num_heads=num_heads,
sr_ratio=1)(encoded_patches)
print(trans_encoded.shape) # (1, 3136, 1024)
```
"""
def __init__(
self,
project_dim,
num_heads,
sr_ratio=1,
drop_prob=0.0,
layer_norm_epsilon=1e-6,
**kwargs,
):
super().__init__(**kwargs)
self.project_dim = project_dim
self.num_heads = num_heads
self.drop_prop = drop_prob
self.norm1 = keras.layers.LayerNormalization(epsilon=layer_norm_epsilon)
self.attn = SegFormerMultiheadAttention(
project_dim, num_heads, sr_ratio
)
self.drop_path = DropPath(drop_prob)
self.norm2 = keras.layers.LayerNormalization(epsilon=layer_norm_epsilon)
self.mlp = self.MixFFN(
channels=project_dim,
mid_channels=int(project_dim * 4),
)
def build(self, input_shape):
super().build(input_shape)
self.H = ops.sqrt(ops.cast(input_shape[1], "float32"))
self.W = ops.sqrt(ops.cast(input_shape[2], "float32"))
def call(self, x):
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
def get_config(self):
config = super().get_config()
config.update(
{
"mlp": keras.saving.serialize_keras_object(self.mlp),
"project_dim": self.project_dim,
"num_heads": self.num_heads,
"drop_prop": self.drop_prop,
}
)
return config
class MixFFN(keras.layers.Layer):
def __init__(self, channels, mid_channels):
super().__init__()
self.fc1 = keras.layers.Dense(mid_channels)
self.dwconv = keras.layers.DepthwiseConv2D(
kernel_size=3,
strides=1,
padding="same",
)
self.fc2 = keras.layers.Dense(channels)
def call(self, x):
x = self.fc1(x)
shape = ops.shape(x)
H, W = int(math.sqrt(shape[1])), int(math.sqrt(shape[1]))
B, C = shape[0], shape[2]
x = ops.reshape(x, (B, H, W, C))
x = self.dwconv(x)
x = ops.reshape(x, (B, -1, C))
x = ops.nn.gelu(x)
x = self.fc2(x)
return x
| keras-cv/keras_cv/layers/hierarchical_transformer_encoder.py/0 | {
"file_path": "keras-cv/keras_cv/layers/hierarchical_transformer_encoder.py",
"repo_id": "keras-cv",
"token_count": 2444
} | 63 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import tensorflow as tf
from keras_cv.layers.object_detection.roi_pool import ROIPooler
from keras_cv.tests.test_case import TestCase
@pytest.mark.tf_keras_only
class ROIPoolTest(TestCase):
def test_no_quantize(self):
roi_pooler = ROIPooler(
"rel_yxyx", target_size=[2, 2], image_shape=[224, 224, 3]
)
feature_map = tf.expand_dims(
tf.reshape(tf.range(64), [8, 8, 1]), axis=0
)
rois = tf.reshape(tf.constant([0.0, 0.0, 1.0, 1.0]), [1, 1, 4])
pooled_feature_map = roi_pooler(feature_map, rois)
# the maximum value would be at bottom-right at each block, roi sharded
# into 2x2 blocks
# | 0, 1, 2, 3 | 4, 5, 6, 7 |
# | 8, 9, 10, 11 | 12, 13, 14, 15 |
# | 16, 17, 18, 19 | 20, 21, 22, 23 |
# | 24, 25, 26, 27(max) | 28, 29, 30, 31(max) |
# --------------------------------------------
# | 32, 33, 34, 35 | 36, 37, 38, 39 |
# | 40, 41, 42, 43 | 44, 45, 46, 47 |
# | 48, 49, 50, 51 | 52, 53, 54, 55 |
# | 56, 57, 58, 59(max) | 60, 61, 62, 63(max) |
# --------------------------------------------
expected_feature_map = tf.reshape(
tf.constant([27, 31, 59, 63]), [1, 2, 2, 1]
)
self.assertAllClose(expected_feature_map, pooled_feature_map)
def test_roi_quantize_y(self):
roi_pooler = ROIPooler(
"yxyx", target_size=[2, 2], image_shape=[224, 224, 3]
)
feature_map = tf.expand_dims(
tf.reshape(tf.range(64), [8, 8, 1]), axis=0
)
rois = tf.reshape(tf.constant([0.0, 0.0, 224, 220]), [1, 1, 4])
pooled_feature_map = roi_pooler(feature_map, rois)
# the maximum value would be at bottom-right at each block, roi sharded
# into 2x2 blocks
# | 0, 1, 2 | 3, 4, 5, 6 | 7 (removed)
# | 8, 9, 10 | 11, 12, 13, 14 | 15 (removed)
# | 16, 17, 18 | 19, 20, 21, 22 | 23 (removed)
# | 24, 25, 26(max) | 27, 28, 29, 30(max) | 31 (removed)
# --------------------------------------------
# | 32, 33, 34 | 35, 36, 37, 38 | 39 (removed)
# | 40, 41, 42 | 43, 44, 45, 46 | 47 (removed)
# | 48, 49, 50 | 51, 52, 53, 54 | 55 (removed)
# | 56, 57, 58(max) | 59, 60, 61, 62(max) | 63 (removed)
# --------------------------------------------
expected_feature_map = tf.reshape(
tf.constant([26, 30, 58, 62]), [1, 2, 2, 1]
)
self.assertAllClose(expected_feature_map, pooled_feature_map)
def test_roi_quantize_x(self):
roi_pooler = ROIPooler(
"yxyx", target_size=[2, 2], image_shape=[224, 224, 3]
)
feature_map = tf.expand_dims(
tf.reshape(tf.range(64), [8, 8, 1]), axis=0
)
rois = tf.reshape(tf.constant([0.0, 0.0, 220, 224]), [1, 1, 4])
pooled_feature_map = roi_pooler(feature_map, rois)
# the maximum value would be at bottom-right at each block, roi sharded
# into 2x2 blocks
# | 0, 1, 2, 3 | 4, 5, 6, 7 |
# | 8, 9, 10, 11 | 12, 13, 14, 15 |
# | 16, 17, 18, 19(max) | 20, 21, 22, 23(max) |
# --------------------------------------------
# | 24, 25, 26, 27 | 28, 29, 30, 31 |
# | 32, 33, 34, 35 | 36, 37, 38, 39 |
# | 40, 41, 42, 43 | 44, 45, 46, 47 |
# | 48, 49, 50, 51(max) | 52, 53, 54, 55(max) |
# --------------------------------------------
expected_feature_map = tf.reshape(
tf.constant([19, 23, 51, 55]), [1, 2, 2, 1]
)
self.assertAllClose(expected_feature_map, pooled_feature_map)
def test_roi_quantize_h(self):
roi_pooler = ROIPooler(
"yxyx", target_size=[3, 2], image_shape=[224, 224, 3]
)
feature_map = tf.expand_dims(
tf.reshape(tf.range(64), [8, 8, 1]), axis=0
)
rois = tf.reshape(tf.constant([0.0, 0.0, 224, 224]), [1, 1, 4])
pooled_feature_map = roi_pooler(feature_map, rois)
# the maximum value would be at bottom-right at each block, roi sharded
# into 3x2 blocks
# | 0, 1, 2, 3 | 4, 5, 6, 7 |
# | 8, 9, 10, 11(max) | 12, 13, 14, 15(max) |
# --------------------------------------------
# | 16, 17, 18, 19 | 20, 21, 22, 23 |
# | 24, 25, 26, 27 | 28, 29, 30, 31 |
# | 32, 33, 34, 35(max) | 36, 37, 38, 39(max) |
# --------------------------------------------
# | 40, 41, 42, 43 | 44, 45, 46, 47 |
# | 48, 49, 50, 51 | 52, 53, 54, 55 |
# | 56, 57, 58, 59(max) | 60, 61, 62, 63(max) |
# --------------------------------------------
expected_feature_map = tf.reshape(
tf.constant([11, 15, 35, 39, 59, 63]), [1, 3, 2, 1]
)
self.assertAllClose(expected_feature_map, pooled_feature_map)
def test_roi_quantize_w(self):
roi_pooler = ROIPooler(
"yxyx", target_size=[2, 3], image_shape=[224, 224, 3]
)
feature_map = tf.expand_dims(
tf.reshape(tf.range(64), [8, 8, 1]), axis=0
)
rois = tf.reshape(tf.constant([0.0, 0.0, 224, 224]), [1, 1, 4])
pooled_feature_map = roi_pooler(feature_map, rois)
# the maximum value would be at bottom-right at each block, roi sharded
# into 2x3 blocks
# | 0, 1 | 2, 3, 4 | 5, 6, 7 |
# | 8, 9 | 10, 11, 12 | 13, 14, 15 |
# | 16, 17 | 18, 19, 20 | 21, 22, 23 |
# | 24, 25(max) | 26, 27, 28(max) | 29, 30, 31(max) |
# --------------------------------------------
# | 32, 33 | 34, 35, 36 | 37, 38, 39 |
# | 40, 41 | 42, 43, 44 | 45, 46, 47 |
# | 48, 49 | 50, 51, 52 | 53, 54, 55 |
# | 56, 57(max) | 58, 59, 60(max) | 61, 62, 63(max) |
# --------------------------------------------
expected_feature_map = tf.reshape(
tf.constant([25, 28, 31, 57, 60, 63]), [1, 2, 3, 1]
)
self.assertAllClose(expected_feature_map, pooled_feature_map)
def test_roi_feature_map_height_smaller_than_roi(self):
roi_pooler = ROIPooler(
"yxyx", target_size=[6, 2], image_shape=[224, 224, 3]
)
feature_map = tf.expand_dims(
tf.reshape(tf.range(16), [4, 4, 1]), axis=0
)
rois = tf.reshape(tf.constant([0.0, 0.0, 224, 224]), [1, 1, 4])
pooled_feature_map = roi_pooler(feature_map, rois)
# | 0, 1(max) | 2, 3(max) |
# ------------------repeated----------------------
# | 4, 5(max) | 6, 7(max) |
# --------------------------------------------
# | 8, 9(max) | 10, 11(max) |
# ------------------repeated----------------------
# | 12, 13(max) | 14, 15(max) |
expected_feature_map = tf.reshape(
tf.constant([1, 3, 1, 3, 5, 7, 9, 11, 9, 11, 13, 15]), [1, 6, 2, 1]
)
self.assertAllClose(expected_feature_map, pooled_feature_map)
def test_roi_feature_map_width_smaller_than_roi(self):
roi_pooler = ROIPooler(
"yxyx", target_size=[2, 6], image_shape=[224, 224, 3]
)
feature_map = tf.expand_dims(
tf.reshape(tf.range(16), [4, 4, 1]), axis=0
)
rois = tf.reshape(tf.constant([0.0, 0.0, 224, 224]), [1, 1, 4])
pooled_feature_map = roi_pooler(feature_map, rois)
# | 0 | 1 | 2 | 3 |
# | 4(max) | 5(max) | 6(max) | 7(max) |
# --------------------------------------------
# | 8 | 9 | 10 | 11 |
# | 12(max) | 13(max) | 14(max) | 15(max) |
# --------------------------------------------
expected_feature_map = tf.reshape(
tf.constant([4, 4, 5, 6, 6, 7, 12, 12, 13, 14, 14, 15]),
[1, 2, 6, 1],
)
self.assertAllClose(expected_feature_map, pooled_feature_map)
def test_roi_empty(self):
roi_pooler = ROIPooler(
"yxyx", target_size=[2, 2], image_shape=[224, 224, 3]
)
feature_map = tf.expand_dims(
tf.reshape(tf.range(1, 65), [8, 8, 1]), axis=0
)
rois = tf.reshape(tf.constant([0.0, 0.0, 0.0, 0.0]), [1, 1, 4])
pooled_feature_map = roi_pooler(feature_map, rois)
# all outputs should be top-left pixel
self.assertAllClose(tf.ones([1, 2, 2, 1]), pooled_feature_map)
def test_invalid_image_shape(self):
with self.assertRaisesRegex(ValueError, "dynamic shape"):
_ = ROIPooler(
"rel_yxyx", target_size=[2, 2], image_shape=[None, 224, 3]
)
| keras-cv/keras_cv/layers/object_detection/roi_pool_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/object_detection/roi_pool_test.py",
"repo_id": "keras-cv",
"token_count": 5018
} | 64 |
# Preprocessing Layers
KerasCV offers many preprocessing and data augmentation layers which support classification, object detection, and segmentation masks. When you use KerasCV augmentation layers to augment your training data, class labels, bounding boxes, and mask labels automatically get augmented alongside the image augmentations!
The provided table gives an overview of the different augmentation layers available and the data formats they support.
| Layer Name | Vectorized | Segmentation Masks | BBoxes | Class Labels |
| :-- | :--: | :--: | :--: | :--: |
| AugMix | ❌ | ✅ | ✅ | ✅ |
| AutoContrast | ✅ | ✅ | ✅ | ✅ |
| ChannelShuffle | ✅ | ✅ | ✅ | ✅ |
| CutMix | ❌ | ✅ | ❌ | ✅ |
| Equalization | ❌ | ✅ | ✅ | ✅ |
| FourierMix | ❌ | ✅ | ❌ | ✅ |
| Grayscale | ✅ | ✅ | ✅ | ✅ |
| GridMask | ❌ | ✅ | ✅ | ✅ |
| JitteredResize | ✅ | ✅ | ✅ | ✅ |
| MixUp | ❌ | ✅ | ✅ | ✅ |
| Mosaic | ✅ | ✅ | ✅ | ✅ |
| Posterization | ❌ | ✅ | ✅ | ✅ |
| RandAugment | ❌ | ❌ | ❌ | ❌ |
| RandomApply <sup>+</sup> | - | - | - | - |
| RandomAspectRatio | ❌ | ❌ | ✅ | ✅ |
| RandomBrightness | ✅| ✅ | ✅ | ✅ |
| RandomChannelShift | ❌| ✅ | ✅ | ✅ |
| RandomChoice <sup>+</sup> | - | - | - | - |
| RandomColorDegeneration | ❌ | ✅ | ✅ | ✅ |
| RandomColorJitter | ✅ | ✅ | ✅ | ✅ |
| RandomContrast | ✅ | ✅ | ✅ | ✅ |
| RandomCropAndResize | ❌ | ✅ | ✅ | ❌ |
| RandomCrop | ✅ | ❌ | ✅ | ✅ |
| RandomCutout | ❌ | ✅ | ❌ | ✅ |
| RandomFlip | ✅ | ✅ | ✅ | ✅ |
| RandomGaussianBlur | ❌ | ✅ | ✅ | ✅ |
| RandomHue | ✅ | ✅ | ✅ | ✅ |
| RandomJpegQuality | ❌ | ✅ | ✅ | ✅ |
| RandomRotation | ✅ | ✅ | ✅ | ✅ |
| RandomSaturation | ✅ | ✅ | ✅ | ✅ |
| RandomSharpness | ✅ | ✅ | ✅ | ✅ |
| RandomShear | ✅ | ✅ | ✅ | ✅ |
| RandomTranslation | ✅ | ✅ | ✅ | ✅ |
| RandomZoom | ✅ | ✅ | ❌ | ✅ |
| RepeatedAugmentation <sup>+</sup> | - | - | - | - |
| Rescaling | ❌ | ✅ | ✅ | ✅ |
| Resizing | ❌ | ✅ | ✅ | ❌ |
| Solarization | ✅ | ✅ | ✅ | ✅ |
<sup>+</sup> Meta Layers, the data types will depend on the Sub Layers.
# Base Layers
- BaseImageAugmentationLayer
- VectorizedBaseImageAugmentationLayer
- RandomAugmentationPipeline | keras-cv/keras_cv/layers/preprocessing/README.md/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/README.md",
"repo_id": "keras-cv",
"token_count": 882
} | 65 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
VectorizedBaseImageAugmentationLayer,
)
@keras_cv_export("keras_cv.layers.Grayscale")
class Grayscale(VectorizedBaseImageAugmentationLayer):
"""Grayscale is a preprocessing layer that transforms RGB images to
Grayscale images.
Input images should have values in the range of [0, 255].
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format
Args:
output_channels.
Number color channels present in the output image.
The output_channels can be 1 or 3. RGB image with shape
(..., height, width, 3) will have the following shapes
after the `Grayscale` operation:
a. (..., height, width, 1) if output_channels = 1
b. (..., height, width, 3) if output_channels = 3.
Usage:
```python
(images, labels), _ = keras.datasets.cifar10.load_data()
to_grayscale = keras_cv.layers.preprocessing.Grayscale()
augmented_images = to_grayscale(images)
```
"""
def __init__(self, output_channels=1, **kwargs):
super().__init__(**kwargs)
self.output_channels = output_channels
self._check_input_params(output_channels)
def _check_input_params(self, output_channels):
if output_channels not in [1, 3]:
raise ValueError(
"Received invalid argument output_channels. "
f"output_channels must be in 1 or 3. Got {output_channels}"
)
self.output_channels = output_channels
def compute_ragged_image_signature(self, images):
ragged_spec = tf.RaggedTensorSpec(
shape=images.shape[1:3] + (self.output_channels,),
ragged_rank=1,
dtype=self.compute_dtype,
)
return ragged_spec
def augment_ragged_image(self, image, transformation, **kwargs):
return self.augment_images(
image, transformations=transformation, **kwargs
)
def augment_images(self, images, transformations=None, **kwargs):
grayscale = tf.image.rgb_to_grayscale(images)
if self.output_channels == 1:
return grayscale
elif self.output_channels == 3:
return tf.image.grayscale_to_rgb(grayscale)
else:
raise ValueError("Unsupported value for `output_channels`.")
def augment_bounding_boxes(self, bounding_boxes, **kwargs):
return bounding_boxes
def augment_labels(self, labels, transformations=None, **kwargs):
return labels
def augment_segmentation_masks(
self, segmentation_masks, transformations, **kwargs
):
return segmentation_masks
def get_config(self):
config = {
"output_channels": self.output_channels,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| keras-cv/keras_cv/layers/preprocessing/grayscale.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/grayscale.py",
"repo_id": "keras-cv",
"token_count": 1536
} | 66 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import tensorflow as tf
from absl.testing import parameterized
from keras_cv import layers
from keras_cv.backend import ops
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.layers.preprocessing.random_apply import RandomApply
from keras_cv.tests.test_case import TestCase
class ZeroOut(BaseImageAugmentationLayer):
"""Zero out all entries, for testing purposes."""
def __init__(self):
super(ZeroOut, self).__init__()
def augment_image(self, image, transformation=None, **kwargs):
return 0 * image
def augment_label(self, label, transformation=None, **kwargs):
return 0 * label
class RandomApplyTest(TestCase):
rng = tf.random.Generator.from_seed(seed=1234)
@parameterized.parameters([-0.5, 1.7])
def test_raises_error_on_invalid_rate_parameter(self, invalid_rate):
with self.assertRaises(ValueError):
RandomApply(rate=invalid_rate, layer=ZeroOut())
def test_works_with_batched_input(self):
batch_size = 32
dummy_inputs = self.rng.uniform(shape=(batch_size, 224, 224, 3))
layer = RandomApply(rate=0.5, layer=ZeroOut(), seed=1234)
outputs = ops.convert_to_numpy(layer(dummy_inputs))
num_zero_inputs = self._num_zero_batches(dummy_inputs)
num_zero_outputs = self._num_zero_batches(outputs)
self.assertEqual(num_zero_inputs, 0)
self.assertLess(num_zero_outputs, batch_size)
self.assertGreater(num_zero_outputs, 0)
def test_works_with_batchwise_layers(self):
batch_size = 32
dummy_inputs = self.rng.uniform(shape=(batch_size, 224, 224, 3))
dummy_outputs = self.rng.uniform(shape=(batch_size,))
inputs = {"images": dummy_inputs, "labels": dummy_outputs}
layer = layers.CutMix()
layer = layers.RandomApply(layer, rate=0.5, batchwise=True)
_ = layer(inputs)
@staticmethod
def _num_zero_batches(images):
num_batches = tf.shape(images)[0]
num_non_zero_batches = tf.math.count_nonzero(
tf.math.count_nonzero(images, axis=[1, 2, 3]), dtype=tf.int32
)
return num_batches - num_non_zero_batches
def test_inputs_unchanged_with_zero_rate(self):
dummy_inputs = self.rng.uniform(shape=(32, 224, 224, 3))
layer = RandomApply(rate=0.0, layer=ZeroOut())
outputs = layer(dummy_inputs)
self.assertAllClose(outputs, dummy_inputs)
def test_all_inputs_changed_with_rate_equal_to_one(self):
dummy_inputs = self.rng.uniform(shape=(32, 224, 224, 3))
layer = RandomApply(rate=1.0, layer=ZeroOut())
outputs = layer(dummy_inputs)
self.assertAllEqual(outputs, tf.zeros_like(dummy_inputs))
def test_works_with_single_image(self):
dummy_inputs = self.rng.uniform(shape=(224, 224, 3))
layer = RandomApply(rate=1.0, layer=ZeroOut())
outputs = layer(dummy_inputs)
self.assertAllEqual(outputs, tf.zeros_like(dummy_inputs))
def test_can_modify_label(self):
dummy_inputs = self.rng.uniform(shape=(32, 224, 224, 3))
dummy_labels = tf.ones(shape=(32, 2))
layer = RandomApply(rate=1.0, layer=ZeroOut())
outputs = layer({"images": dummy_inputs, "labels": dummy_labels})
self.assertAllEqual(outputs["labels"], tf.zeros_like(dummy_labels))
@pytest.mark.tf_only
def test_works_with_xla(self):
dummy_inputs = self.rng.uniform(shape=(32, 224, 224, 3))
# auto_vectorize=True will crash XLA
layer = RandomApply(rate=0.5, layer=ZeroOut(), auto_vectorize=False)
@tf.function(jit_compile=True)
def apply(x):
return layer(x)
apply(dummy_inputs)
| keras-cv/keras_cv/layers/preprocessing/random_apply_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_apply_test.py",
"repo_id": "keras-cv",
"token_count": 1773
} | 67 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
from keras_cv import bounding_box
from keras_cv.backend import ops
from keras_cv.layers.preprocessing.random_rotation import RandomRotation
from keras_cv.tests.test_case import TestCase
class RandomRotationTest(TestCase):
def test_random_rotation_output_shapes(self):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images
layer = RandomRotation(0.5)
actual_output = layer(input_images, training=True)
self.assertEqual(expected_output.shape, actual_output.shape)
def test_random_rotation_on_batched_images_independently(self):
image = tf.random.uniform((100, 100, 3))
batched_images = tf.stack((image, image), axis=0)
layer = RandomRotation(factor=0.5)
results = layer(batched_images)
self.assertNotAllClose(results[0], results[1])
def test_config_with_custom_name(self):
layer = RandomRotation(0.5, name="image_preproc")
config = layer.get_config()
layer_reconstructed = RandomRotation.from_config(config)
self.assertEqual(layer_reconstructed.name, layer.name)
def test_unbatched_image(self):
input_image = np.reshape(np.arange(0, 25), (5, 5, 1)).astype(np.float32)
# 180 rotation.
layer = RandomRotation(factor=(0.5, 0.5))
output_image = layer(input_image)
expected_output = np.asarray(
[
[24, 23, 22, 21, 20],
[19, 18, 17, 16, 15],
[14, 13, 12, 11, 10],
[9, 8, 7, 6, 5],
[4, 3, 2, 1, 0],
]
).astype(np.float32)
expected_output = np.reshape(expected_output, (5, 5, 1))
self.assertAllClose(expected_output, output_image)
def test_augment_bounding_boxes(self):
input_image = np.random.random((512, 512, 3)).astype(np.float32)
bounding_boxes = {
"boxes": np.array([[200, 200, 400, 400], [100, 100, 300, 300]]),
"classes": np.array([1, 2]),
}
input = {"images": input_image, "bounding_boxes": bounding_boxes}
# 180 rotation.
layer = RandomRotation(factor=(0.5, 0.5), bounding_box_format="xyxy")
output = layer(input)
output["bounding_boxes"] = bounding_box.to_dense(
output["bounding_boxes"]
)
expected_bounding_boxes = {
"boxes": np.array(
[[112.0, 112.0, 312.0, 312.0], [212.0, 212.0, 412.0, 412.0]],
),
"classes": np.array([1, 2]),
}
self.assertAllClose(expected_bounding_boxes, output["bounding_boxes"])
def test_output_dtypes(self):
inputs = np.array([[[1], [2]], [[3], [4]]], dtype="float64")
layer = RandomRotation(0.5)
self.assertAllEqual(
ops.convert_to_numpy(layer(inputs)).dtype, "float32"
)
layer = RandomRotation(0.5, dtype="uint8")
self.assertAllEqual(ops.convert_to_numpy(layer(inputs)).dtype, "uint8")
def test_ragged_bounding_boxes(self):
input_image = tf.random.uniform((2, 512, 512, 3))
bounding_boxes = {
"boxes": tf.ragged.constant(
[
[[200, 200, 400, 400], [100, 100, 300, 300]],
[[200, 200, 400, 400]],
],
dtype=tf.float32,
),
"classes": tf.ragged.constant(
[
[
0,
0,
],
[0],
],
dtype=tf.float32,
),
}
input = {"images": input_image, "bounding_boxes": bounding_boxes}
layer = RandomRotation(factor=(0.5, 0.5), bounding_box_format="xyxy")
output = layer(input)
expected_output = {
"boxes": tf.ragged.constant(
[
[
[112.0, 112.0, 312.0, 312.0],
[212.0, 212.0, 412.0, 412.0],
],
[[112.0, 112.0, 312.0, 312.0]],
],
dtype=tf.float32,
),
"classes": tf.ragged.constant(
[
[
0,
0,
],
[0],
],
dtype=tf.float32,
),
}
expected_output = bounding_box.to_dense(expected_output)
output["bounding_boxes"] = bounding_box.to_dense(
output["bounding_boxes"]
)
self.assertAllClose(
expected_output["boxes"], output["bounding_boxes"]["boxes"]
)
self.assertAllClose(
expected_output["classes"],
output["bounding_boxes"]["classes"],
)
def test_augment_sparse_segmentation_mask(self):
num_classes = 8
input_images = np.random.random((2, 20, 20, 3)).astype(np.float32)
# Masks are all 0s or 8s, to verify that when we rotate we don't do bad
# mask interpolation to either a 0 or a 7
masks = np.random.randint(2, size=(2, 20, 20, 1)) * (num_classes - 1)
inputs = {"images": input_images, "segmentation_masks": masks}
# Attempting to rotate a sparse mask without specifying num_classes
# fails.
bad_layer = RandomRotation(factor=(0.25, 0.25))
with self.assertRaisesRegex(ValueError, "masks must be one-hot"):
outputs = bad_layer(inputs)
# 90 degree rotation.
layer = RandomRotation(
factor=(0.25, 0.25), segmentation_classes=num_classes
)
outputs = layer(inputs)
expected_masks = np.rot90(masks, axes=(1, 2))
self.assertAllClose(expected_masks, outputs["segmentation_masks"])
# 45-degree rotation. Only verifies that no interpolation takes place.
layer = RandomRotation(
factor=(0.125, 0.125), segmentation_classes=num_classes
)
outputs = layer(inputs)
self.assertAllInSet(
ops.convert_to_numpy(outputs["segmentation_masks"]), [0, 7]
)
def test_augment_one_hot_segmentation_mask(self):
num_classes = 8
input_images = np.random.random((2, 20, 20, 3)).astype(np.float32)
masks = np.array(
tf.one_hot(
np.random.randint(num_classes, size=(2, 20, 20)), num_classes
)
)
inputs = {"images": input_images, "segmentation_masks": masks}
# 90 rotation.
layer = RandomRotation(factor=(0.25, 0.25))
outputs = layer(inputs)
expected_masks = np.rot90(masks, axes=(1, 2))
self.assertAllClose(expected_masks, outputs["segmentation_masks"])
| keras-cv/keras_cv/layers/preprocessing/random_rotation_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_rotation_test.py",
"repo_id": "keras-cv",
"token_count": 3663
} | 68 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import tensorflow as tf
from absl.testing import parameterized
from keras_cv import layers as cv_layers
from keras_cv.backend import ops
from keras_cv.backend.config import keras_3
from keras_cv.tests.test_case import TestCase
class ResizingTest(TestCase):
def _run_output_shape_test(self, kwargs, height, width):
kwargs.update({"height": height, "width": width})
layer = cv_layers.Resizing(**kwargs)
inputs = tf.random.uniform((2, 5, 8, 3))
outputs = layer(inputs)
self.assertEqual(outputs.shape, (2, height, width, 3))
@parameterized.named_parameters(
("down_sample_bilinear_2_by_2", {"interpolation": "bilinear"}, 2, 2),
("down_sample_bilinear_3_by_2", {"interpolation": "bilinear"}, 3, 2),
("down_sample_nearest_2_by_2", {"interpolation": "nearest"}, 2, 2),
("down_sample_nearest_3_by_2", {"interpolation": "nearest"}, 3, 2),
("down_sample_area_2_by_2", {"interpolation": "area"}, 2, 2),
("down_sample_area_3_by_2", {"interpolation": "area"}, 3, 2),
(
"down_sample_crop_to_aspect_ratio_3_by_2",
{
"interpolation": "bilinear",
"crop_to_aspect_ratio": True,
},
3,
2,
),
)
def test_down_sampling(self, kwargs, height, width):
self._run_output_shape_test(kwargs, height, width)
@parameterized.named_parameters(
("up_sample_bilinear_10_by_12", {"interpolation": "bilinear"}, 10, 12),
("up_sample_bilinear_12_by_12", {"interpolation": "bilinear"}, 12, 12),
("up_sample_nearest_10_by_12", {"interpolation": "nearest"}, 10, 12),
("up_sample_nearest_12_by_12", {"interpolation": "nearest"}, 12, 12),
("up_sample_area_10_by_12", {"interpolation": "area"}, 10, 12),
("up_sample_area_12_by_12", {"interpolation": "area"}, 12, 12),
(
"up_sample_crop_to_aspect_ratio_12_by_14",
{
"interpolation": "bilinear",
"crop_to_aspect_ratio": True,
},
12,
14,
),
)
def test_up_sampling(self, kwargs, expected_height, expected_width):
self._run_output_shape_test(kwargs, expected_height, expected_width)
def test_down_sampling_numeric(self):
for dtype in (np.int64, np.float32):
input_image = np.reshape(np.arange(0, 16), (1, 4, 4, 1)).astype(
dtype
)
layer = cv_layers.Resizing(
height=2, width=2, interpolation="nearest"
)
output_image = layer(input_image)
# pyformat: disable
expected_output = np.asarray([[5, 7], [13, 15]]).astype(dtype)
# pyformat: enable
expected_output = np.reshape(expected_output, (1, 2, 2, 1))
self.assertAllEqual(expected_output, output_image)
def test_up_sampling_numeric(self):
for dtype in (np.int64, np.float32):
input_image = np.reshape(np.arange(0, 4), (1, 2, 2, 1)).astype(
dtype
)
layer = cv_layers.Resizing(
height=4, width=4, interpolation="nearest"
)
output_image = layer(input_image)
# pyformat: disable
expected_output = np.asarray(
[[0, 0, 1, 1], [0, 0, 1, 1], [2, 2, 3, 3], [2, 2, 3, 3]]
).astype(dtype)
# pyformat: enable
expected_output = np.reshape(expected_output, (1, 4, 4, 1))
self.assertAllEqual(expected_output, output_image)
@parameterized.named_parameters(
("reshape_bilinear_10_by_4", {"interpolation": "bilinear"}, 10, 4)
)
def test_reshaping(self, kwargs, expected_height, expected_width):
self._run_output_shape_test(kwargs, expected_height, expected_width)
def test_invalid_interpolation(self):
with self.assertRaises(NotImplementedError):
cv_layers.Resizing(5, 5, interpolation="invalid_interpolation")
def test_config_with_custom_name(self):
layer = cv_layers.Resizing(5, 5, name="image_preproc")
config = layer.get_config()
layer_1 = cv_layers.Resizing.from_config(config)
self.assertEqual(layer_1.name, layer.name)
def test_crop_to_aspect_ratio(self):
input_image = np.reshape(np.arange(0, 16), (1, 4, 4, 1)).astype(
"float32"
)
layer = cv_layers.Resizing(4, 2, crop_to_aspect_ratio=True)
output_image = layer(input_image)
expected_output = np.asarray(
[
[1, 2],
[5, 6],
[9, 10],
[13, 14],
]
).astype("float32")
expected_output = np.reshape(expected_output, (1, 4, 2, 1))
self.assertAllEqual(expected_output, output_image)
def test_unbatched_image(self):
input_image = np.reshape(np.arange(0, 16), (4, 4, 1)).astype("float32")
layer = cv_layers.Resizing(2, 2, interpolation="nearest")
output_image = layer(input_image)
expected_output = np.asarray(
[
[5, 7],
[13, 15],
]
).astype("float32")
expected_output = np.reshape(expected_output, (2, 2, 1))
self.assertAllEqual(expected_output, output_image)
@parameterized.named_parameters(
("crop_to_aspect_ratio_false", False),
("crop_to_aspect_ratio_true", True),
)
@pytest.mark.tf_keras_only
def test_ragged_image(self, crop_to_aspect_ratio):
inputs = tf.ragged.constant(
[
np.ones((8, 8, 1)),
np.ones((8, 4, 1)),
np.ones((4, 8, 1)),
np.ones((2, 2, 1)),
],
dtype="float32",
)
layer = cv_layers.Resizing(
2,
2,
interpolation="nearest",
crop_to_aspect_ratio=crop_to_aspect_ratio,
)
outputs = layer(inputs)
expected_output = [
[[[1.0], [1.0]], [[1.0], [1.0]]],
[[[1.0], [1.0]], [[1.0], [1.0]]],
[[[1.0], [1.0]], [[1.0], [1.0]]],
[[[1.0], [1.0]], [[1.0], [1.0]]],
]
self.assertIsInstance(outputs, tf.Tensor)
self.assertNotIsInstance(outputs, tf.RaggedTensor)
self.assertAllEqual(expected_output, outputs)
def test_output_dtypes(self):
inputs = np.array([[[1], [2]], [[3], [4]]], dtype="float64")
layer = cv_layers.Resizing(2, 2)
self.assertAllEqual(
ops.convert_to_numpy(layer(inputs)).dtype, "float32"
)
layer = cv_layers.Resizing(2, 2, dtype="uint8")
self.assertAllEqual(ops.convert_to_numpy(layer(inputs)).dtype, "uint8")
@parameterized.named_parameters(
("batch_crop_to_aspect_ratio", True, False, True),
("batch_dont_crop_to_aspect_ratio", False, False, True),
("single_sample_crop_to_aspect_ratio", True, False, False),
("single_sample_dont_crop_to_aspect_ratio", False, False, False),
("batch_pad_to_aspect_ratio", False, True, True),
("single_sample_pad_to_aspect_ratio", False, True, False),
)
@pytest.mark.skipif(
keras_3(), reason="ragged tests not yet enabled for keras 3"
)
def test_static_shape_inference(
self, crop_to_aspect_ratio, pad_to_aspect_ratio, batch
):
channels = 3
input_height = 8
input_width = 8
target_height = 4
target_width = 6
layer = cv_layers.Resizing(
target_height,
target_width,
crop_to_aspect_ratio=crop_to_aspect_ratio,
pad_to_aspect_ratio=pad_to_aspect_ratio,
)
unit_test = self
@tf.function
def tf_function(img):
unit_test.assertListEqual(
[input_height, input_width, channels], img.shape.as_list()[-3:]
)
img = layer(img)
unit_test.assertListEqual(
[target_height, target_width, channels],
img.shape.as_list()[-3:],
)
return img
if batch:
input_shape = (2, input_height, input_width, channels)
else:
input_shape = (input_height, input_width, channels)
img_data = np.random.random(size=input_shape).astype("float32")
tf_function(img_data)
@pytest.mark.tf_keras_only
def test_pad_to_size_with_bounding_boxes_ragged_images(self):
images = tf.ragged.constant(
[
np.ones((8, 8, 3)),
np.ones((8, 4, 3)),
np.ones((4, 8, 3)),
np.ones((2, 2, 3)),
],
dtype="float32",
)
boxes = {
"boxes": tf.ragged.stack(
[
np.ones((3, 4), dtype="float32"),
np.ones((5, 4), dtype="float32"),
np.ones((3, 4), dtype="float32"),
np.ones((2, 4), dtype="float32"),
],
),
"classes": tf.ragged.stack(
[
np.ones((3,), dtype="float32"),
np.ones((5,), dtype="float32"),
np.ones((3,), dtype="float32"),
np.ones((2,), dtype="float32"),
],
),
}
layer = cv_layers.Resizing(
4, 4, pad_to_aspect_ratio=True, bounding_box_format="xyxy"
)
inputs = {"images": images, "bounding_boxes": boxes}
outputs = layer(inputs)
self.assertListEqual(
[4, 4, 4, 3],
outputs["images"].shape.as_list(),
)
@pytest.mark.tf_keras_only
def test_pad_to_size_with_bounding_boxes_ragged_images_upsample(self):
images = tf.ragged.constant(
[
np.ones((8, 8, 3)),
np.ones((8, 4, 3)),
np.ones((4, 8, 3)),
np.ones((2, 2, 3)),
],
dtype="float32",
)
boxes = {
"boxes": tf.ragged.stack(
[
np.ones((3, 4), dtype="float32"),
np.ones((5, 4), dtype="float32"),
np.ones((3, 4), dtype="float32"),
np.ones((2, 4), dtype="float32"),
],
),
"classes": tf.ragged.stack(
[
np.ones((3,), dtype="float32"),
np.ones((5,), dtype="float32"),
np.ones((3,), dtype="float32"),
np.ones((2,), dtype="float32"),
],
),
}
layer = cv_layers.Resizing(
16, 16, pad_to_aspect_ratio=True, bounding_box_format="xyxy"
)
inputs = {"images": images, "bounding_boxes": boxes}
outputs = layer(inputs)
self.assertListEqual(
[4, 16, 16, 3],
outputs["images"].shape.as_list(),
)
self.assertAllEqual(outputs["images"][1][:, :8, :], np.ones((16, 8, 3)))
self.assertAllEqual(
outputs["images"][1][:, -8:, :], np.zeros((16, 8, 3))
)
def test_resize_with_mask(self):
input_images = np.random.normal(size=(2, 4, 4, 3))
seg_masks = np.random.uniform(
low=0.0, high=3.0, size=(2, 4, 4, 3)
).astype("int32")
inputs = {
"images": input_images,
"segmentation_masks": seg_masks,
}
layer = cv_layers.Resizing(2, 2)
outputs = layer(inputs)
expected_output_images = tf.image.resize(input_images, size=(2, 2))
expected_output_seg_masks = tf.image.resize(
seg_masks, size=(2, 2), method="nearest"
)
self.assertAllEqual(expected_output_images, outputs["images"])
self.assertAllEqual(
expected_output_seg_masks, outputs["segmentation_masks"]
)
| keras-cv/keras_cv/layers/preprocessing/resizing_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/resizing_test.py",
"repo_id": "keras-cv",
"token_count": 6645
} | 69 |
# Copyright 2022 Waymo LLC.
#
# Licensed under the terms in https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing_3d/waymo/LICENSE # noqa: E501
import tensorflow as tf
from keras_cv import point_cloud
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d
POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS
BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES
POINTCLOUD_LABEL_INDEX = base_augmentation_layer_3d.POINTCLOUD_LABEL_INDEX
POINTCLOUD_FEATURE_INDEX = base_augmentation_layer_3d.POINTCLOUD_FEATURE_INDEX
@keras_cv_export("keras_cv.layers.FrustumRandomPointFeatureNoise")
class FrustumRandomPointFeatureNoise(
base_augmentation_layer_3d.BaseAugmentationLayer3D
):
"""A preprocessing layer which randomly add noise to point features within a
randomly generated frustum during training.
This layer will randomly select a point from the point cloud as the center
of a frustum then generate a frustum based on r_distance, theta_width, and
phi_width. Uniformly sampled features noise from [1-max_noise_level,
1+max_noise_level] will be multiplied to points inside the selected frustum.
Here, we perturb point features other than (x, y, z, class). The
point_clouds tensor shape must be specific and cannot be dynamic. During
inference time, the output will be identical to input. Call the layer with
`training=True` to add noise to the input points.
Input shape:
point_clouds: 3D (multi frames) float32 Tensor with shape
[num of frames, num of points, num of point features].
The first 4 features are [x, y, z, class, additional features].
bounding_boxes: 3D (multi frames) float32 Tensor with shape
[num of frames, num of boxes, num of box features]. Boxes are expected
to follow the CENTER_XYZ_DXDYDZ_PHI format. Refer to
https://github.com/keras-team/keras-cv/blob/master/keras_cv/bounding_box_3d/formats.py
for more details on supported bounding box formats.
Output shape:
A dictionary of Tensors with the same shape as input Tensors.
Arguments:
r_distance: A float scalar sets the starting distance of a frustum.
theta_width: A float scalar sets the theta width of a frustum.
phi_width: A float scalar sets the phi width of a frustum.
max_noise_level: A float scalar sets the sampled feature noise range
[1-max_noise_level, 1+max_noise_level].
exclude_classes: An optional int scalar or a list of ints. Points with the
specified class(es) will not be modified.
"""
def __init__(
self,
r_distance,
theta_width,
phi_width,
max_noise_level=None,
exclude_classes=None,
**kwargs
):
super().__init__(**kwargs)
if not isinstance(exclude_classes, (tuple, list)):
exclude_classes = [exclude_classes]
if r_distance < 0:
raise ValueError("r_distance must be >=0.")
if theta_width < 0:
raise ValueError("theta_width must be >=0.")
if phi_width < 0:
raise ValueError("phi_width must be >=0.")
max_noise_level = max_noise_level if max_noise_level else 0.0
if max_noise_level < 0 or max_noise_level > 1:
raise ValueError("max_noise_level must be >=0 and <=1.")
self._r_distance = r_distance
self._theta_width = theta_width
self._phi_width = phi_width
self._max_noise_level = max_noise_level
self._exclude_classes = exclude_classes
def get_config(self):
return {
"r_distance": self._r_distance,
"theta_width": self._theta_width,
"phi_width": self._phi_width,
"max_noise_level": self._max_noise_level,
"exclude_classes": self._exclude_classes,
}
def get_random_transformation(self, point_clouds, **kwargs):
# Randomly select a point from the first frame as the center of the
# frustum.
valid_points = point_clouds[0, :, POINTCLOUD_LABEL_INDEX] > 0
num_valid_points = tf.math.reduce_sum(tf.cast(valid_points, tf.int32))
randomly_select_point_index = tf.random.uniform(
(), minval=0, maxval=num_valid_points, dtype=tf.int32
)
randomly_select_frustum_center = tf.boolean_mask(
point_clouds[0], valid_points, axis=0
)[randomly_select_point_index, :POINTCLOUD_LABEL_INDEX]
(
num_frames,
num_points,
num_features,
) = point_clouds.get_shape().as_list()
frustum_mask = []
for f in range(num_frames):
frustum_mask.append(
point_cloud.within_a_frustum(
point_clouds[f],
randomly_select_frustum_center,
self._r_distance,
self._theta_width,
self._phi_width,
)[tf.newaxis, :, tf.newaxis]
)
frustum_mask = tf.concat(frustum_mask, axis=0)
feature_noise = tf.random.uniform(
[num_frames, num_points, num_features - POINTCLOUD_FEATURE_INDEX],
minval=1 - self._max_noise_level,
maxval=1 + self._max_noise_level,
)
noise = tf.concat(
[
tf.ones([num_frames, num_points, POINTCLOUD_FEATURE_INDEX]),
feature_noise,
],
axis=-1,
)
# Do add feature noise outside the frustum mask.
random_point_noise = tf.where(~frustum_mask, 1.0, noise)
random_point_noise = tf.cast(
random_point_noise, dtype=self.compute_dtype
)
return {"point_noise": random_point_noise}
def augment_point_clouds_bounding_boxes(
self, point_clouds, bounding_boxes, transformation, **kwargs
):
point_noise = transformation["point_noise"]
# Do not add noise to points that are protected by setting the
# corresponding point_noise = 1.0.
protected_points = tf.zeros_like(point_clouds[..., -1], dtype=tf.bool)
for excluded_class in self._exclude_classes:
protected_points |= point_clouds[..., -1] == excluded_class
no_noise = tf.ones_like(point_noise, point_noise.dtype)
point_noise = tf.where(
protected_points[:, :, tf.newaxis], no_noise, point_noise
)
point_clouds *= point_noise
return (point_clouds, bounding_boxes)
| keras-cv/keras_cv/layers/preprocessing_3d/waymo/frustum_random_point_feature_noise.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing_3d/waymo/frustum_random_point_feature_noise.py",
"repo_id": "keras-cv",
"token_count": 2909
} | 70 |
# Copyright 2022 Waymo LLC.
#
# Licensed under the terms in https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing_3d/waymo/LICENSE # noqa: E501
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d
from keras_cv.point_cloud import is_within_any_box3d
POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS
BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES
BOX_LABEL_INDEX = base_augmentation_layer_3d.BOX_LABEL_INDEX
@keras_cv_export("keras_cv.layers.RandomDropBox")
class RandomDropBox(base_augmentation_layer_3d.BaseAugmentationLayer3D):
"""A preprocessing layer which randomly drops object bounding boxes and
points during training.
This layer will randomly drop object point clouds and bounding boxes. Number
of dropped bounding boxes is sampled uniformly sampled between 0 and
max_drop_bounding_boxes. If label_index is set, only bounding boxes with box
class == label_index will be sampled and dropped; otherwise, all valid
bounding boxes (box class > 0) will be sampled and dropped.
Input shape:
point_clouds: 3D (multi frames) float32 Tensor with shape
[num of frames, num of points, num of point features].
The first 5 features are [x, y, z, class, range].
bounding_boxes: 3D (multi frames) float32 Tensor with shape
[num of frames, num of boxes, num of box features].
The first 8 features are [x, y, z, dx, dy, dz, phi, box class].
Output shape:
A tuple of two Tensors (point_clouds, bounding_boxes) with the same shape
as input Tensors.
Arguments:
max_drop_bounding_boxes: A int non-negative scalar sets the maximum number
of dropped bounding boxes. Do not drop any bounding boxes when
max_drop_bounding_boxes = 0.
label_index: An optional int scalar sets the target object index.
If label index is set, randomly drop bounding boxes, where box
class == label_index.
If label index is None, randomly drop bounding boxes, where box
class > 0.
"""
def __init__(self, max_drop_bounding_boxes, label_index=None, **kwargs):
super().__init__(**kwargs)
self.auto_vectorize = False
if label_index and label_index < 0:
raise ValueError("label_index must be >=0 or None.")
if max_drop_bounding_boxes < 0:
raise ValueError("max_drop_bounding_boxes must be >=0.")
self._label_index = label_index
self._max_drop_bounding_boxes = max_drop_bounding_boxes
def get_config(self):
return {
"label_index": self._label_index,
"max_drop_bounding_boxes": self._max_drop_bounding_boxes,
}
def get_random_transformation(self, point_clouds, bounding_boxes, **kwargs):
if not self._max_drop_bounding_boxes:
return {}
del point_clouds
if self._label_index:
selected_boxes_mask = (
bounding_boxes[0, :, BOX_LABEL_INDEX] == self._label_index
)
else:
selected_boxes_mask = tf.math.greater(
bounding_boxes[0, :, BOX_LABEL_INDEX], 0
)
max_drop_bounding_boxes = tf.random.uniform(
(), maxval=self._max_drop_bounding_boxes, dtype=tf.int32
)
# Randomly remove max_drop_bounding_boxes number of bounding boxes.
num_bounding_boxes = bounding_boxes.get_shape().as_list()[1]
random_scores_for_selected_boxes = tf.random.uniform(
shape=[num_bounding_boxes]
)
random_scores_for_selected_boxes = tf.where(
selected_boxes_mask, random_scores_for_selected_boxes, 0.0
)
topk, _ = tf.math.top_k(
random_scores_for_selected_boxes, k=max_drop_bounding_boxes + 1
)
drop_bounding_boxes_mask = tf.math.greater(
random_scores_for_selected_boxes, topk[-1]
)
# Only drop selected bounding boxes.
drop_bounding_boxes_mask &= selected_boxes_mask
return {
"drop_bounding_boxes_mask": drop_bounding_boxes_mask,
}
def augment_point_clouds_bounding_boxes(
self, point_clouds, bounding_boxes, transformation, **kwargs
):
if not self._max_drop_bounding_boxes:
return (point_clouds, bounding_boxes)
drop_bounding_boxes_mask = transformation["drop_bounding_boxes_mask"]
drop_bounding_boxes = tf.boolean_mask(
bounding_boxes, drop_bounding_boxes_mask, axis=1
)
drop_points_mask = is_within_any_box3d(
point_clouds[..., :3], drop_bounding_boxes[..., :7], keepdims=True
)
return (
tf.where(~drop_points_mask, point_clouds, 0.0),
tf.where(
~drop_bounding_boxes_mask[tf.newaxis, :, tf.newaxis],
bounding_boxes,
0.0,
),
)
| keras-cv/keras_cv/layers/preprocessing_3d/waymo/random_drop_box.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing_3d/waymo/random_drop_box.py",
"repo_id": "keras-cv",
"token_count": 2157
} | 71 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from keras_cv import bounding_box
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
@keras_cv_export("keras_cv.losses.IoULoss")
class IoULoss(keras.losses.Loss):
"""Implements the IoU Loss
IoU loss is commonly used for object detection. This loss aims to directly
optimize the IoU score between true boxes and predicted boxes. The length of
the last dimension should be 4 to represent the bounding boxes. This loss
uses IoUs according to box pairs and therefore, the number of boxes in both
y_true and y_pred are expected to be equal i.e. the i<sup>th</sup>
y_true box in a batch will be compared the i<sup>th</sup> y_pred box.
Args:
bounding_box_format: a case-insensitive string (for example, "xyxy").
Each bounding box is defined by these 4 values. For detailed
information on the supported formats, see the
[KerasCV bounding box documentation](https://keras.io/api/keras_cv/bounding_box/formats/).
mode: must be one of
- `"linear"`. The loss will be calculated as 1 - iou
- `"quadratic"`. The loss will be calculated as 1 - iou<sup>2</sup>
- `"log"`. The loss will be calculated as -ln(iou)
Defaults to "log".
axis: the axis along which to mean the ious, defaults to -1.
References:
- [UnitBox paper](https://arxiv.org/pdf/1608.01471)
Sample Usage:
```python
y_true = np.random.uniform(size=(5, 10, 5), low=10, high=10)
y_pred = np.random.uniform(size=(5, 10, 5), low=10, high=10)
loss = IoULoss(bounding_box_format = "xyWH")
loss(y_true, y_pred)
```
Usage with the `compile()` API:
```python
model.compile(optimizer='adam', loss=keras_cv.losses.IoULoss())
```
""" # noqa: E501
def __init__(self, bounding_box_format, mode="log", axis=-1, **kwargs):
super().__init__(**kwargs)
self.bounding_box_format = bounding_box_format
self.mode = mode
self.axis = axis
if self.mode not in ["linear", "quadratic", "log"]:
raise ValueError(
"IoULoss expects mode to be one of 'linear', 'quadratic' or "
f"'log' Received mode={self.mode}, "
)
def call(self, y_true, y_pred):
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.cast(y_true, y_pred.dtype)
if y_pred.shape[-1] != 4:
raise ValueError(
"IoULoss expects y_pred.shape[-1] to be 4 to represent the "
f"bounding boxes. Received y_pred.shape[-1]={y_pred.shape[-1]}."
)
if y_true.shape[-1] != 4:
raise ValueError(
"IoULoss expects y_true.shape[-1] to be 4 to represent the "
f"bounding boxes. Received y_true.shape[-1]={y_true.shape[-1]}."
)
if y_true.shape[-2] != y_pred.shape[-2]:
raise ValueError(
"IoULoss expects number of boxes in y_pred to be equal to the "
"number of boxes in y_true. Received number of boxes in "
f"y_true={y_true.shape[-2]} and number of boxes in "
f"y_pred={y_pred.shape[-2]}."
)
iou = bounding_box.compute_iou(y_true, y_pred, self.bounding_box_format)
# pick out the diagonal for corresponding ious
iou = ops.diagonal(iou)
if self.axis == "no_reduction":
warnings.warn(
"`axis='no_reduction'` is a temporary API, and the API "
"contract will be replaced in the future with a more generic "
"solution covering all losses."
)
else:
iou = ops.mean(iou, axis=self.axis)
if self.mode == "linear":
loss = 1 - iou
elif self.mode == "quadratic":
loss = 1 - iou**2
elif self.mode == "log":
loss = -ops.log(iou)
return loss
def get_config(self):
config = super().get_config()
config.update(
{
"bounding_box_format": self.bounding_box_format,
"mode": self.mode,
"axis": self.axis,
}
)
return config
| keras-cv/keras_cv/losses/iou_loss.py/0 | {
"file_path": "keras-cv/keras_cv/losses/iou_loss.py",
"repo_id": "keras-cv",
"token_count": 2165
} | 72 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for loading pretrained model presets."""
import numpy as np
import pytest
from keras_cv.backend import ops
from keras_cv.models.backbones.densenet.densenet_aliases import (
DenseNet121Backbone,
)
from keras_cv.models.backbones.densenet.densenet_backbone import (
DenseNetBackbone,
)
from keras_cv.tests.test_case import TestCase
@pytest.mark.large
class DenseNetPresetSmokeTest(TestCase):
"""
A smoke test for DenseNet presets we run continuously.
This only tests the smallest weights we have available. Run with:
`pytest keras_cv/models/backbones/densenet/densenet_backbone_presets_test.py --run_large` # noqa: E501
"""
def setUp(self):
self.input_batch = np.ones(shape=(2, 224, 224, 3))
def test_backbone_output(self):
model = DenseNetBackbone.from_preset("densenet121")
model(self.input_batch)
def test_backbone_output_with_weights(self):
model = DenseNetBackbone.from_preset("densenet121_imagenet")
# The forward pass from a preset should be stable!
# This test should catch cases where we unintentionally change our
# network code in a way that would invalidate our preset weights.
# We should only update these numbers if we are updating a weights
# file, or have found a discrepancy with the upstream source.
outputs = model(np.ones(shape=(1, 512, 512, 3)))
expected = [0.0, 0.0, 0.09920305, 0.0, 0.0]
# Keep a high tolerance, so we are robust to different hardware.
self.assertAllClose(
ops.convert_to_numpy(outputs[0, 0, 0, :5]),
expected,
atol=0.01,
rtol=0.01,
)
def test_applications_model_output(self):
model = DenseNet121Backbone()
model(self.input_batch)
def test_applications_model_output_with_preset(self):
model = DenseNet121Backbone.from_preset("densenet121_imagenet")
model(self.input_batch)
def test_preset_docstring(self):
"""Check we did our docstring formatting correctly."""
for name in DenseNetBackbone.presets:
self.assertRegex(DenseNetBackbone.from_preset.__doc__, name)
def test_unknown_preset_error(self):
# Not a preset name
with self.assertRaises(ValueError):
DenseNetBackbone.from_preset("densenet121_clowntown")
def test_load_weights_error(self):
# Try to load weights when none available
with self.assertRaises(ValueError):
DenseNetBackbone.from_preset("densenet121", load_weights=True)
@pytest.mark.extra_large
class DenseNetPresetFullTest(TestCase):
"""
Test the full enumeration of our preset.
This tests every preset for DenseNet and is only run manually.
Run with:
`pytest keras_cv/models/backbones/densenet/densenet_backbone_presets_test.py --run_extra_large` # noqa: E501
"""
def test_load_densenet(self):
input_data = np.ones(shape=(2, 224, 224, 3))
for preset in DenseNetBackbone.presets:
model = DenseNetBackbone.from_preset(preset)
model(input_data)
| keras-cv/keras_cv/models/backbones/densenet/densenet_backbone_presets_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/densenet/densenet_backbone_presets_test.py",
"repo_id": "keras-cv",
"token_count": 1419
} | 73 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import math
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.layers import FusedMBConvBlock
from keras_cv.layers import MBConvBlock
from keras_cv.models import utils
from keras_cv.models.backbones.backbone import Backbone
from keras_cv.models.backbones.efficientnet_v2.efficientnet_v2_backbone_presets import ( # noqa: E501
backbone_presets,
)
from keras_cv.models.backbones.efficientnet_v2.efficientnet_v2_backbone_presets import ( # noqa: E501
backbone_presets_with_weights,
)
from keras_cv.utils.python_utils import classproperty
@keras_cv_export("keras_cv.models.EfficientNetV2Backbone")
class EfficientNetV2Backbone(Backbone):
"""Instantiates the EfficientNetV2 architecture.
Reference:
- [EfficientNetV2: Smaller Models and Faster Training](https://arxiv.org/abs/2104.00298)
(ICML 2021)
Args:
include_rescaling: bool, whether to rescale the inputs. If set
to `True`, inputs will be passed through a `Rescaling(1/255.0)`
layer.
width_coefficient: float, scaling coefficient for network width.
depth_coefficient: float, scaling coefficient for network depth.
stackwise_kernel_sizes: list of ints, the kernel sizes used for each
conv block.
stackwise_num_repeats: list of ints, number of times to repeat each
conv block.
stackwise_input_filters: list of ints, number of input filters for
each conv block.
stackwise_output_filters: list of ints, number of output filters for
each stack in the conv blocks model.
stackwise_expansion_ratios: list of floats, expand ratio passed to the
squeeze and excitation blocks.
stackwise_squeeze_and_excite_ratios: list of ints, the squeeze and
excite ratios passed to the squeeze and excitation blocks.
stackwise_strides: list of ints, stackwise_strides for each conv block.
stackwise_conv_types: list of strings. Each value is either 'unfused'
or 'fused' depending on the desired blocks. FusedMBConvBlock is
similar to MBConvBlock, but instead of using a depthwise convolution
and a 1x1 output convolution blocks fused blocks use a single 3x3
convolution block.
skip_connection_dropout: float, dropout rate at skip connections.
depth_divisor: integer, a unit of network width.
min_depth: integer, minimum number of filters.
activation: activation function to use between each convolutional layer.
input_shape: optional shape tuple, defaults to (None, None, 3).
input_tensor: optional Keras tensor (i.e. output of `keras.layers.Input()`)
to use as image input for the model.
Example:
```python
# Construct an EfficientNetV2 from a preset:
efficientnet = keras_cv.models.EfficientNetV2Backbone.from_preset(
"efficientnetv2_s"
)
images = tf.ones((1, 256, 256, 3))
outputs = efficientnet.predict(images)
# Alternatively, you can also customize the EfficientNetV2 architecture:
model = EfficientNetV2Backbone(
stackwise_kernel_sizes=[3, 3, 3, 3, 3, 3],
stackwise_num_repeats=[2, 4, 4, 6, 9, 15],
stackwise_input_filters=[24, 24, 48, 64, 128, 160],
stackwise_output_filters=[24, 48, 64, 128, 160, 256],
stackwise_expansion_ratios=[1, 4, 4, 4, 6, 6],
stackwise_squeeze_and_excite_ratios=[0.0, 0.0, 0, 0.25, 0.25, 0.25],
stackwise_strides=[1, 2, 2, 2, 1, 2],
stackwise_conv_types=[
"fused",
"fused",
"fused",
"unfused",
"unfused",
"unfused",
],
width_coefficient=1.0,
depth_coefficient=1.0,
include_rescaling=False,
)
images = tf.ones((1, 256, 256, 3))
outputs = efficientnet.predict(images)
```
""" # noqa: E501
def __init__(
self,
*,
include_rescaling,
width_coefficient,
depth_coefficient,
stackwise_kernel_sizes,
stackwise_num_repeats,
stackwise_input_filters,
stackwise_output_filters,
stackwise_expansion_ratios,
stackwise_squeeze_and_excite_ratios,
stackwise_strides,
stackwise_conv_types,
skip_connection_dropout=0.2,
depth_divisor=8,
min_depth=8,
activation="swish",
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Determine proper input shape
img_input = utils.parse_model_inputs(input_shape, input_tensor)
x = img_input
if include_rescaling:
x = keras.layers.Rescaling(scale=1 / 255.0)(x)
# Build stem
stem_filters = round_filters(
filters=stackwise_input_filters[0],
width_coefficient=width_coefficient,
min_depth=min_depth,
depth_divisor=depth_divisor,
)
x = keras.layers.Conv2D(
filters=stem_filters,
kernel_size=3,
strides=2,
kernel_initializer=conv_kernel_initializer(),
padding="same",
use_bias=False,
name="stem_conv",
)(x)
x = keras.layers.BatchNormalization(
momentum=0.9,
name="stem_bn",
)(x)
x = keras.layers.Activation(activation, name="stem_activation")(x)
# Build blocks
block_id = 0
blocks = float(
sum(num_repeats for num_repeats in stackwise_num_repeats)
)
pyramid_level_inputs = []
for i in range(len(stackwise_kernel_sizes)):
num_repeats = stackwise_num_repeats[i]
input_filters = stackwise_input_filters[i]
output_filters = stackwise_output_filters[i]
# Update block input and output filters based on depth multiplier.
input_filters = round_filters(
filters=input_filters,
width_coefficient=width_coefficient,
min_depth=min_depth,
depth_divisor=depth_divisor,
)
output_filters = round_filters(
filters=output_filters,
width_coefficient=width_coefficient,
min_depth=min_depth,
depth_divisor=depth_divisor,
)
repeats = round_repeats(
repeats=num_repeats,
depth_coefficient=depth_coefficient,
)
strides = stackwise_strides[i]
squeeze_and_excite_ratio = stackwise_squeeze_and_excite_ratios[i]
for j in range(repeats):
# The first block needs to take care of stride and filter size
# increase.
if j > 0:
strides = 1
input_filters = output_filters
if strides != 1:
pyramid_level_inputs.append(utils.get_tensor_input_name(x))
# 97 is the start of the lowercase alphabet.
letter_identifier = chr(j + 97)
block = get_conv_constructor(stackwise_conv_types[i])(
input_filters=input_filters,
output_filters=output_filters,
expand_ratio=stackwise_expansion_ratios[i],
kernel_size=stackwise_kernel_sizes[i],
strides=strides,
se_ratio=squeeze_and_excite_ratio,
activation=activation,
survival_probability=skip_connection_dropout
* block_id
/ blocks,
bn_momentum=0.9,
name="block{}{}_".format(i + 1, letter_identifier),
)
x = block(x)
block_id += 1
# Build top
top_filters = round_filters(
filters=1280,
width_coefficient=width_coefficient,
min_depth=min_depth,
depth_divisor=depth_divisor,
)
x = keras.layers.Conv2D(
filters=top_filters,
kernel_size=1,
strides=1,
kernel_initializer=conv_kernel_initializer(),
padding="same",
data_format="channels_last",
use_bias=False,
name="top_conv",
)(x)
x = keras.layers.BatchNormalization(
momentum=0.9,
name="top_bn",
)(x)
x = keras.layers.Activation(
activation=activation, name="top_activation"
)(x)
pyramid_level_inputs.append(utils.get_tensor_input_name(x))
# Create model.
super().__init__(inputs=img_input, outputs=x, **kwargs)
self.include_rescaling = include_rescaling
self.width_coefficient = width_coefficient
self.depth_coefficient = depth_coefficient
self.skip_connection_dropout = skip_connection_dropout
self.depth_divisor = depth_divisor
self.min_depth = min_depth
self.activation = activation
self.input_tensor = input_tensor
self.pyramid_level_inputs = {
f"P{i + 1}": name for i, name in enumerate(pyramid_level_inputs)
}
self.stackwise_kernel_sizes = stackwise_kernel_sizes
self.stackwise_num_repeats = stackwise_num_repeats
self.stackwise_input_filters = stackwise_input_filters
self.stackwise_output_filters = stackwise_output_filters
self.stackwise_expansion_ratios = stackwise_expansion_ratios
self.stackwise_squeeze_and_excite_ratios = (
stackwise_squeeze_and_excite_ratios
)
self.stackwise_strides = stackwise_strides
self.stackwise_conv_types = stackwise_conv_types
def get_config(self):
config = super().get_config()
config.update(
{
"include_rescaling": self.include_rescaling,
"width_coefficient": self.width_coefficient,
"depth_coefficient": self.depth_coefficient,
"skip_connection_dropout": self.skip_connection_dropout,
"depth_divisor": self.depth_divisor,
"min_depth": self.min_depth,
"activation": self.activation,
"input_shape": self.input_shape[1:],
"input_tensor": self.input_tensor,
"stackwise_kernel_sizes": self.stackwise_kernel_sizes,
"stackwise_num_repeats": self.stackwise_num_repeats,
"stackwise_input_filters": self.stackwise_input_filters,
"stackwise_output_filters": self.stackwise_output_filters,
"stackwise_expansion_ratios": self.stackwise_expansion_ratios,
"stackwise_squeeze_and_excite_ratios": self.stackwise_squeeze_and_excite_ratios, # noqa: E501
"stackwise_strides": self.stackwise_strides,
"stackwise_conv_types": self.stackwise_conv_types,
}
)
return config
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return copy.deepcopy(backbone_presets)
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return copy.deepcopy(backbone_presets_with_weights)
def conv_kernel_initializer(scale=2.0):
return keras.initializers.VarianceScaling(
scale=scale, mode="fan_out", distribution="truncated_normal"
)
def round_filters(filters, width_coefficient, min_depth, depth_divisor):
"""Round number of filters based on depth multiplier."""
filters *= width_coefficient
minimum_depth = min_depth or depth_divisor
new_filters = max(
minimum_depth,
int(filters + depth_divisor / 2) // depth_divisor * depth_divisor,
)
return int(new_filters)
def round_repeats(repeats, depth_coefficient):
"""Round number of repeats based on depth multiplier."""
return int(math.ceil(depth_coefficient * repeats))
def get_conv_constructor(conv_type):
if conv_type == "unfused":
return MBConvBlock
elif conv_type == "fused":
return FusedMBConvBlock
else:
raise ValueError(
"Expected `conv_type` to be "
"one of 'unfused', 'fused', but got "
f"`conv_type={conv_type}`"
)
| keras-cv/keras_cv/models/backbones/efficientnet_v2/efficientnet_v2_backbone.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/efficientnet_v2/efficientnet_v2_backbone.py",
"repo_id": "keras-cv",
"token_count": 6095
} | 74 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.layers.vit_det_layers import AddPositionalEmbedding
from keras_cv.layers.vit_det_layers import ViTDetPatchingAndEmbedding
from keras_cv.layers.vit_det_layers import WindowedTransformerEncoder
from keras_cv.models import utils
from keras_cv.models.backbones.backbone import Backbone
from keras_cv.models.backbones.vit_det.vit_det_backbone_presets import (
backbone_presets,
)
from keras_cv.models.backbones.vit_det.vit_det_backbone_presets import (
backbone_presets_with_weights,
)
from keras_cv.utils.python_utils import classproperty
@keras_cv_export("keras_cv.models.ViTDetBackbone", package="keras_cv.models")
class ViTDetBackbone(Backbone):
"""A ViT image encoder that uses a windowed transformer encoder and
relative positional encodings.
Args:
input_shape (tuple[int], optional): The size of the input image in
`(H, W, C)` format. Defaults to `(1024, 1024, 3)`.
input_tensor (KerasTensor, optional): Output of
`keras.layers.Input()`) to use as image input for the model.
Defaults to `None`.
include_rescaling (bool, optional): Whether to rescale the inputs. If
set to `True`, inputs will be passed through a
`Rescaling(1/255.0)` layer. Defaults to `False`.
patch_size (int, optional): the patch size to be supplied to the
Patching layer to turn input images into a flattened sequence of
patches. Defaults to `16`.
embed_dim (int, optional): The latent dimensionality to be projected
into in the output of each stacked windowed transformer encoder.
Defaults to `768`.
depth (int, optional): The number of transformer encoder layers to
stack in the Vision Transformer. Defaults to `12`.
mlp_dim (int, optional): The dimensionality of the hidden Dense
layer in the transformer MLP head. Defaults to `768*4`.
num_heads (int, optional): the number of heads to use in the
`MultiHeadAttentionWithRelativePE` layer of each transformer
encoder. Defaults to `12`.
out_chans (int, optional): The number of channels (features) in the
output (image encodings). Defaults to `256`.
use_bias (bool, optional): Whether to use bias to project the keys,
queries, and values in the attention layer. Defaults to `True`.
use_abs_pos (bool, optional): Whether to add absolute positional
embeddings to the output patches. Defaults to `True`.
use_rel_pos (bool, optional): Whether to use relative positional
emcodings in the attention layer. Defaults to `True`.
window_size (int, optional): The size of the window for windowed
attention in the transformer encoder blocks. Defaults to `14`.
global_attention_indices (list, optional): Indexes for blocks using
global attention. Defaults to `[2, 5, 8, 11]`.
layer_norm_epsilon (int, optional): The epsilon to use in the layer
normalization blocks in transformer encoder. Defaults to `1e-6`.
References:
- [Segment Anything paper](https://arxiv.org/abs/2304.02643)
- [Segment Anything GitHub](https://github.com/facebookresearch/segment-anything)
- [Detectron2](https://github.com/facebookresearch/detectron2)
""" # noqa: E501
def __init__(
self,
*,
include_rescaling,
input_shape=(1024, 1024, 3),
input_tensor=None,
patch_size=16,
embed_dim=768,
depth=12,
mlp_dim=768 * 4,
num_heads=12,
out_chans=256,
use_bias=True,
use_abs_pos=True,
use_rel_pos=True,
window_size=14,
global_attention_indices=[2, 5, 8, 11],
layer_norm_epsilon=1e-6,
**kwargs
):
img_input = utils.parse_model_inputs(
input_shape, input_tensor, name="images"
)
# Check that the input image is well specified.
if img_input.shape[-3] is None or img_input.shape[-2] is None:
raise ValueError(
"Height and width of the image must be specified"
" in `input_shape`."
)
if img_input.shape[-3] != img_input.shape[-2]:
raise ValueError(
"Input image must be square i.e. the height must"
" be equal to the width in the `input_shape`"
" tuple/tensor."
)
img_size = img_input.shape[-3]
x = img_input
if include_rescaling:
# Use common rescaling strategy across keras_cv
x = keras.layers.Rescaling(1.0 / 255.0)(x)
# VITDet scales inputs based on the standard ImageNet mean/stddev.
x = (x - ops.array([0.485, 0.456, 0.406], dtype=x.dtype)) / (
ops.array([0.229, 0.224, 0.225], dtype=x.dtype)
)
x = ViTDetPatchingAndEmbedding(
kernel_size=(patch_size, patch_size),
strides=(patch_size, patch_size),
embed_dim=embed_dim,
)(x)
if use_abs_pos:
x = AddPositionalEmbedding(img_size, patch_size, embed_dim)(x)
for i in range(depth):
x = WindowedTransformerEncoder(
project_dim=embed_dim,
mlp_dim=mlp_dim,
num_heads=num_heads,
use_bias=use_bias,
use_rel_pos=use_rel_pos,
window_size=(
window_size if i not in global_attention_indices else 0
),
input_size=(img_size // patch_size, img_size // patch_size),
)(x)
x = keras.models.Sequential(
[
keras.layers.Conv2D(
filters=out_chans, kernel_size=1, use_bias=False
),
keras.layers.LayerNormalization(epsilon=1e-6),
keras.layers.Conv2D(
filters=out_chans,
kernel_size=3,
padding="same",
use_bias=False,
),
keras.layers.LayerNormalization(epsilon=1e-6),
]
)(x)
super().__init__(inputs=img_input, outputs=x, **kwargs)
self.patch_size = patch_size
self.embed_dim = embed_dim
self.depth = depth
self.mlp_dim = mlp_dim
self.num_heads = num_heads
self.out_chans = out_chans
self.use_bias = use_bias
self.use_rel_pos = use_rel_pos
self.use_abs_pos = use_abs_pos
self.window_size = window_size
self.global_attention_indices = global_attention_indices
self.layer_norm_epsilon = layer_norm_epsilon
self.input_tensor = input_tensor
self.include_rescaling = include_rescaling
@property
def pyramid_level_inputs(self):
raise NotImplementedError(
"The `ViTDetBackbone` model doesn't compute"
" pyramid level features."
)
def get_config(self):
config = super().get_config()
config.update(
{
"input_shape": self.input_shape[1:],
"input_tensor": self.input_tensor,
"include_rescaling": self.include_rescaling,
"patch_size": self.patch_size,
"embed_dim": self.embed_dim,
"depth": self.depth,
"mlp_dim": self.mlp_dim,
"num_heads": self.num_heads,
"out_chans": self.out_chans,
"use_bias": self.use_bias,
"use_abs_pos": self.use_abs_pos,
"use_rel_pos": self.use_rel_pos,
"window_size": self.window_size,
"global_attention_indices": self.global_attention_indices,
"layer_norm_epsilon": self.layer_norm_epsilon,
}
)
return config
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return copy.deepcopy(backbone_presets)
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return copy.deepcopy(backbone_presets_with_weights)
| keras-cv/keras_cv/models/backbones/vit_det/vit_det_backbone.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/vit_det/vit_det_backbone.py",
"repo_id": "keras-cv",
"token_count": 4128
} | 75 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.models.feature_extractor.clip.clip_encoder import CLIPEncoder
class CLIPTextEncoder(keras.Model):
def __init__(
self,
transformer_width,
transformer_layers,
transformer_heads,
vocab_size,
embed_dim,
context_length,
**kwargs,
):
super().__init__(
**kwargs,
)
self.transformer_width = transformer_width
self.transformer_layers = transformer_layers
self.transformer_heads = transformer_heads
self.vocab_size = vocab_size
self.embed_dim = embed_dim
self.context_length = context_length
self.token_embedding = keras.layers.Embedding(
vocab_size,
transformer_width,
name="token_embedding",
)
self.positional_embedding = keras.layers.Embedding(
self.context_length,
transformer_width,
name="positional_embedding",
)
self.encoder = CLIPEncoder(
width=transformer_width,
num_layers=transformer_layers,
heads=transformer_heads,
name="clip_encoder",
)
self.ln_final = keras.layers.LayerNormalization(name="ln_final")
self.text_projector = keras.layers.Dense(
embed_dim, name="text_projector", use_bias=False
)
def build(self, input_shape):
super().build(input_shape)
self.token_embedding.build(input_shape)
self.positional_embedding.build([1, self.context_length])
self.encoder.build(None)
self.ln_final.build([None, None, self.transformer_width])
self.text_projector.build([None, None, self.transformer_width])
def call(self, inputs, attention_mask=None):
token_embedding = self.token_embedding(inputs)
position_ids = ops.expand_dims(
ops.arange(self.context_length, dtype="int32"), 0
)
position_embedding = self.positional_embedding(position_ids)
position_embedding = ops.tile(
position_embedding, repeats=(inputs.shape[0], 1, 1)
)
causal_attention_mask = ops.ones(
(self.context_length, self.context_length)
)
# Zero out the lower diagonal
causal_attention_mask = ops.triu(causal_attention_mask)
causal_attention_mask = ops.cast(causal_attention_mask, "float32")
attention_mask = ops.cast(attention_mask, dtype="float32")
expanded_mask = ops.tile(
attention_mask[:, None, None, :], (1, 1, self.context_length, 1)
)
expanded_mask = (1.0 - expanded_mask) * (-1e8)
encoded_output = self.encoder(
token_embedding + position_embedding,
causal_attention_mask=causal_attention_mask,
attention_mask=expanded_mask,
)
layer_norm = self.ln_final(encoded_output)
indices = ops.expand_dims(
ops.cast(ops.argmax(inputs, axis=-1), "int32"), axis=-1
)
selected_features = ops.take_along_axis(
layer_norm, indices[:, :, None], axis=1
)
text_features = self.text_projector(selected_features)
output = ops.squeeze(text_features, axis=1)
return output
def get_config(self):
config = super().get_config()
config.update(
{
"transformer_width": self.transformer_width,
"transformer_layers": self.transformer_layers,
"transformer_heads": self.transformer_heads,
"vocab_size": self.vocab_size,
"embed_dim": self.embed_dim,
"context_length": self.context_length,
}
)
return config
| keras-cv/keras_cv/models/feature_extractor/clip/clip_text_model.py/0 | {
"file_path": "keras-cv/keras_cv/models/feature_extractor/clip/clip_text_model.py",
"repo_id": "keras-cv",
"token_count": 1964
} | 76 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import tensorflow as tf
from absl.testing import parameterized
from tensorflow import keras
from tensorflow.keras import optimizers
from keras_cv.models import ResNet18V2Backbone
from keras_cv.models.legacy.object_detection.faster_rcnn.faster_rcnn import (
FasterRCNN,
)
from keras_cv.models.object_detection.__test_utils__ import (
_create_bounding_box_dataset,
)
from keras_cv.tests.test_case import TestCase
class FasterRCNNTest(TestCase):
# TODO(ianstenbit): Make FasterRCNN support shapes that are not multiples
# of 128, perhaps by adding a flag to the anchor generator for whether to
# include anchors centered outside of the image. (RetinaNet does use those,
# while FasterRCNN doesn't). For more context on why this is the case, see
# https://github.com/keras-team/keras-cv/pull/1882
@parameterized.parameters(
((2, 640, 384, 3),),
((2, 512, 512, 3),),
((2, 128, 128, 3),),
)
def test_faster_rcnn_infer(self, batch_shape):
model = FasterRCNN(
num_classes=80,
bounding_box_format="xyxy",
backbone=ResNet18V2Backbone(),
)
images = tf.random.normal(batch_shape)
outputs = model(images, training=False)
# 1000 proposals in inference
self.assertAllEqual([2, 1000, 81], outputs[1].shape)
self.assertAllEqual([2, 1000, 4], outputs[0].shape)
@parameterized.parameters(
((2, 640, 384, 3),),
((2, 512, 512, 3),),
((2, 128, 128, 3),),
)
def test_faster_rcnn_train(self, batch_shape):
model = FasterRCNN(
num_classes=80,
bounding_box_format="xyxy",
backbone=ResNet18V2Backbone(),
)
images = tf.random.normal(batch_shape)
outputs = model(images, training=True)
self.assertAllEqual([2, 1000, 81], outputs[1].shape)
self.assertAllEqual([2, 1000, 4], outputs[0].shape)
def test_invalid_compile(self):
model = FasterRCNN(
num_classes=80,
bounding_box_format="yxyx",
backbone=ResNet18V2Backbone(),
)
with self.assertRaisesRegex(ValueError, "only accepts"):
model.compile(rpn_box_loss="binary_crossentropy")
with self.assertRaisesRegex(ValueError, "only accepts"):
model.compile(
rpn_classification_loss=keras.losses.BinaryCrossentropy(
from_logits=False
)
)
@pytest.mark.large # Fit is slow, so mark these large.
def test_faster_rcnn_with_dictionary_input_format(self):
faster_rcnn = FasterRCNN(
num_classes=20,
bounding_box_format="xywh",
backbone=ResNet18V2Backbone(),
)
images, boxes = _create_bounding_box_dataset("xywh")
dataset = tf.data.Dataset.from_tensor_slices(
{"images": images, "bounding_boxes": boxes}
).batch(5, drop_remainder=True)
faster_rcnn.compile(
optimizer=optimizers.Adam(),
box_loss="Huber",
classification_loss="SparseCategoricalCrossentropy",
rpn_box_loss="Huber",
rpn_classification_loss="BinaryCrossentropy",
)
faster_rcnn.fit(dataset, epochs=1)
faster_rcnn.evaluate(dataset)
| keras-cv/keras_cv/models/legacy/object_detection/faster_rcnn/faster_rcnn_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/legacy/object_detection/faster_rcnn/faster_rcnn_test.py",
"repo_id": "keras-cv",
"token_count": 1681
} | 77 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
try:
from keras.src.utils import tf_utils
except ImportError:
from keras.utils import tf_utils
def _minimum_control_deps(outputs):
"""Returns the minimum control dependencies to ensure step succeeded."""
if tf.executing_eagerly():
return [] # Control dependencies not needed.
outputs = tf.nest.flatten(outputs, expand_composites=True)
for out in outputs:
# Variables can't be control dependencies.
if not isinstance(out, tf.Variable):
return [out] # Return first Tensor or Op from outputs.
return [] # No viable Tensor or Op to use for control deps.
def make_predict_function(model, force=False):
if model.predict_function is not None and not force:
return model.predict_function
def step_function(iterator):
"""Runs a single evaluation step."""
def run_step(data):
outputs = model.predict_step(data)
# Ensure counter is updated only if `test_step` succeeds.
with tf.control_dependencies(_minimum_control_deps(outputs)):
model._predict_counter.assign_add(1)
return outputs
if model._jit_compile:
run_step = tf.function(
run_step, jit_compile=True, reduce_retracing=True
)
data = next(iterator)
outputs = model.distribute_strategy.run(run_step, args=(data,))
outputs = model.distribute_strategy.gather(outputs, axis=0)
# Note that this is the only deviation from the base keras.Model
# implementation. We add the decode_step inside of the computation
# graph but outside of the distribute_strategy (i.e on host CPU).
if not isinstance(data, tf.Tensor):
data = tf.concat(data.values, axis=0)
return model.decode_predictions(outputs, data)
# Special case if steps_per_execution is one.
if (
model._steps_per_execution is None
or model._steps_per_execution.numpy().item() == 1
):
def predict_function(iterator):
"""Runs an evaluation execution with a single step."""
return step_function(iterator)
else:
def predict_function(iterator):
"""Runs an evaluation execution with multiple steps."""
outputs = step_function(iterator)
for _ in tf.range(model._steps_per_execution - 1):
tf.autograph.experimental.set_loop_options(
shape_invariants=[
(
outputs,
tf.nest.map_structure(
lambda t: tf_utils.get_tensor_spec(
t, dynamic_batch=True
).shape,
outputs,
),
)
]
)
step_outputs = step_function(iterator)
outputs = tf.nest.map_structure(
lambda t1, t2: tf.concat([t1, t2]), outputs, step_outputs
)
return outputs
if not model.run_eagerly:
predict_function = tf.function(predict_function, reduce_retracing=True)
model.predict_function = predict_function
return predict_function
| keras-cv/keras_cv/models/object_detection/predict_utils.py/0 | {
"file_path": "keras-cv/keras_cv/models/object_detection/predict_utils.py",
"repo_id": "keras-cv",
"token_count": 1702
} | 78 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.backend import keras
from keras_cv.backend import ops
BATCH_NORM_EPSILON = 1e-3
BATCH_NORM_MOMENTUM = 0.97
# TODO(ianstenbit): Remove this method once we're using CSPDarkNet backbone
# (Calls to it should be inlined in the detector head)
def apply_conv_bn(
inputs,
output_channel,
kernel_size=1,
strides=1,
activation="swish",
name="conv_bn",
):
if kernel_size > 1:
inputs = keras.layers.ZeroPadding2D(
padding=kernel_size // 2, name=f"{name}_pad"
)(inputs)
x = keras.layers.Conv2D(
filters=output_channel,
kernel_size=kernel_size,
strides=strides,
padding="valid",
use_bias=False,
name=f"{name}_conv",
)(inputs)
x = keras.layers.BatchNormalization(
momentum=BATCH_NORM_MOMENTUM,
epsilon=BATCH_NORM_EPSILON,
name=f"{name}_bn",
)(x)
x = keras.layers.Activation(activation, name=name)(x)
return x
# TODO(ianstenbit): Remove this method once we're using CSPDarkNet backbone
# Calls to it should instead call the CSP block from the DarkNet implementation.
def apply_csp_block(
inputs,
channels=-1,
depth=2,
shortcut=True,
expansion=0.5,
activation="swish",
name="csp_block",
):
channel_axis = -1
channels = channels if channels > 0 else inputs.shape[channel_axis]
hidden_channels = int(channels * expansion)
pre = apply_conv_bn(
inputs,
hidden_channels * 2,
kernel_size=1,
activation=activation,
name=f"{name}_pre",
)
short, deep = ops.split(pre, 2, axis=channel_axis)
out = [short, deep]
for id in range(depth):
deep = apply_conv_bn(
deep,
hidden_channels,
kernel_size=3,
activation=activation,
name=f"{name}_pre_{id}_1",
)
deep = apply_conv_bn(
deep,
hidden_channels,
kernel_size=3,
activation=activation,
name=f"{name}_pre_{id}_2",
)
deep = (out[-1] + deep) if shortcut else deep
out.append(deep)
out = ops.concatenate(out, axis=channel_axis)
out = apply_conv_bn(
out,
channels,
kernel_size=1,
activation=activation,
name=f"{name}_output",
)
return out
| keras-cv/keras_cv/models/object_detection/yolo_v8/yolo_v8_layers.py/0 | {
"file_path": "keras-cv/keras_cv/models/object_detection/yolo_v8/yolo_v8_layers.py",
"repo_id": "keras-cv",
"token_count": 1278
} | 79 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import tensorflow as tf
from keras_cv.backend import keras
from keras_cv.backend.config import keras_3
from keras_cv.layers.object_detection_3d.voxelization import DynamicVoxelization
from keras_cv.models.object_detection_3d.center_pillar import (
MultiClassDetectionHead,
)
from keras_cv.models.object_detection_3d.center_pillar import (
MultiClassHeatmapDecoder,
)
from keras_cv.models.object_detection_3d.center_pillar import (
MultiHeadCenterPillar,
)
from keras_cv.models.object_detection_3d.center_pillar_backbone import (
CenterPillarBackbone,
)
from keras_cv.tests.test_case import TestCase
@pytest.mark.skipif(
keras_3() and keras.backend.backend() == "torch",
reason="CenterPillar does not yet support PyTorch.",
)
class CenterPillarTest(TestCase):
def test_center_pillar_call(self):
voxel_net = DynamicVoxelization(
voxel_size=[0.1, 0.1, 1000],
spatial_size=[-20, 20, -20, 20, -20, 20],
)
# dimensions computed from voxel_net
backbone = CenterPillarBackbone(
stackwise_down_blocks=[1, 1],
stackwise_down_filters=[64, 128],
stackwise_up_filters=[128, 64],
input_shape=(None, None, 128),
)
decoder = MultiClassHeatmapDecoder(
num_classes=2,
num_head_bin=[2, 2],
anchor_size=[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
max_pool_size=[3, 3],
max_num_box=[3, 4],
heatmap_threshold=[0.2, 0.2],
voxel_size=voxel_net._voxel_size,
spatial_size=voxel_net._spatial_size,
)
multiclass_head = MultiClassDetectionHead(
num_classes=2,
num_head_bin=[2, 2],
)
model = MultiHeadCenterPillar(
backbone=backbone,
voxel_net=voxel_net,
multiclass_head=multiclass_head,
prediction_decoder=decoder,
)
point_xyz = tf.random.normal([2, 1000, 3])
point_feature = tf.random.normal([2, 1000, 4])
point_mask = tf.constant(True, shape=[2, 1000, 1])
outputs = model(
{
"point_xyz": point_xyz,
"point_feature": point_feature,
"point_mask": point_mask,
},
training=True,
)
self.assertEqual(outputs["class_1"].shape, (2, 400, 400, 12))
self.assertEqual(outputs["class_2"].shape, (2, 400, 400, 12))
def test_center_pillar_predict(self):
voxel_net = DynamicVoxelization(
voxel_size=[0.1, 0.1, 1000],
spatial_size=[-20, 20, -20, 20, -20, 20],
)
backbone = CenterPillarBackbone(
stackwise_down_blocks=[1, 1],
stackwise_down_filters=[64, 128],
stackwise_up_filters=[128, 64],
input_shape=(None, None, 128),
)
decoder = MultiClassHeatmapDecoder(
num_classes=2,
num_head_bin=[2, 2],
anchor_size=[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
max_pool_size=[3, 3],
max_num_box=[3, 4],
heatmap_threshold=[0.2, 0.2],
voxel_size=voxel_net._voxel_size,
spatial_size=voxel_net._spatial_size,
)
multiclass_head = MultiClassDetectionHead(
num_classes=2,
num_head_bin=[2, 2],
)
model = MultiHeadCenterPillar(
backbone=backbone,
voxel_net=voxel_net,
multiclass_head=multiclass_head,
prediction_decoder=decoder,
)
point_xyz = tf.random.normal([2, 1000, 3])
point_feature = tf.random.normal([2, 1000, 4])
point_mask = tf.constant(True, shape=[2, 1000, 1])
outputs = model.predict(
{
"point_xyz": point_xyz,
"point_feature": point_feature,
"point_mask": point_mask,
}
)
# max number boxes is 3
self.assertEqual(outputs["3d_boxes"]["boxes"].shape, (2, 7, 7))
self.assertEqual(outputs["3d_boxes"]["classes"].shape, (2, 7))
self.assertEqual(outputs["3d_boxes"]["confidence"].shape, (2, 7))
self.assertAllEqual(
outputs["3d_boxes"]["classes"],
tf.constant([1, 1, 1, 2, 2, 2, 2] * 2, shape=(2, 7)),
)
| keras-cv/keras_cv/models/object_detection_3d/center_pillar_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/object_detection_3d/center_pillar_test.py",
"repo_id": "keras-cv",
"token_count": 2425
} | 80 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.models.stable_diffusion.attention_block import ( # noqa: E501
AttentionBlock,
)
from keras_cv.models.stable_diffusion.padded_conv2d import PaddedConv2D
from keras_cv.models.stable_diffusion.resnet_block import ResnetBlock
@keras_cv_export("keras_cv.models.stable_diffusion.ImageEncoder")
class ImageEncoder(keras.Sequential):
"""ImageEncoder is the VAE Encoder for StableDiffusion."""
def __init__(self, download_weights=True):
super().__init__(
[
keras.layers.Input((None, None, 3)),
PaddedConv2D(128, 3, padding=1),
ResnetBlock(128),
ResnetBlock(128),
PaddedConv2D(128, 3, padding=((0, 1), (0, 1)), strides=2),
ResnetBlock(256),
ResnetBlock(256),
PaddedConv2D(256, 3, padding=((0, 1), (0, 1)), strides=2),
ResnetBlock(512),
ResnetBlock(512),
PaddedConv2D(512, 3, padding=((0, 1), (0, 1)), strides=2),
ResnetBlock(512),
ResnetBlock(512),
ResnetBlock(512),
AttentionBlock(512),
ResnetBlock(512),
keras.layers.GroupNormalization(epsilon=1e-5),
keras.layers.Activation("swish"),
PaddedConv2D(8, 3, padding=1),
PaddedConv2D(8, 1),
# TODO(lukewood): can this be refactored to be a Rescaling
# layer? Perhaps some sort of rescale and gather?
# Either way, we may need a lambda to gather the first 4
# dimensions.
keras.layers.Lambda(lambda x: x[..., :4] * 0.18215),
]
)
if download_weights:
image_encoder_weights_fpath = keras.utils.get_file(
origin="https://huggingface.co/fchollet/stable-diffusion/resolve/main/vae_encoder.h5", # noqa: E501
file_hash="c60fb220a40d090e0f86a6ab4c312d113e115c87c40ff75d11ffcf380aab7ebb", # noqa: E501
)
self.load_weights(image_encoder_weights_fpath)
| keras-cv/keras_cv/models/stable_diffusion/image_encoder.py/0 | {
"file_path": "keras-cv/keras_cv/models/stable_diffusion/image_encoder.py",
"repo_id": "keras-cv",
"token_count": 1309
} | 81 |
# Copyright 2022 The KerasCV Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import numpy as np
import pytest
import tensorflow as tf
import keras_cv
from keras_cv.tests.test_case import TestCase
num_points = 200000
num_boxes = 1000
box_dimension = 20.0
def get_points_boxes():
points = tf.random.uniform(
shape=[num_points, 2], minval=0, maxval=box_dimension, dtype=tf.float32
)
points_z = 5.0 * np.ones(shape=[num_points, 1], dtype="float32")
points = tf.concat([points, points_z], axis=-1)
boxes_x = tf.random.uniform(
shape=[num_boxes, 1],
minval=0,
maxval=box_dimension - 1.0,
dtype=tf.float32,
)
boxes_y = tf.random.uniform(
shape=[num_boxes, 1],
minval=0,
maxval=box_dimension - 1.0,
dtype=tf.float32,
)
boxes_dx = tf.random.uniform(
shape=[num_boxes, 1], minval=0, maxval=5.0, dtype=tf.float32
)
boxes_dx = tf.math.minimum(box_dimension - boxes_x, boxes_dx)
boxes_dy = tf.random.uniform(
shape=[num_boxes, 1], minval=0, maxval=5.0, dtype=tf.float32
)
boxes_dy = tf.math.minimum(box_dimension - boxes_y, boxes_dy)
boxes_z = 5.0 * np.ones([num_boxes, 1], dtype="float32")
boxes_dz = 3.0 * np.ones([num_boxes, 1], dtype="float32")
boxes_angle = np.zeros([num_boxes, 1], dtype="float32")
boxes = tf.concat(
[boxes_x, boxes_y, boxes_z, boxes_dx, boxes_dy, boxes_dz, boxes_angle],
axis=-1,
)
return points, boxes
class WithinBox3DTest(TestCase):
@pytest.mark.skipif(
"TEST_CUSTOM_OPS" not in os.environ
or os.environ["TEST_CUSTOM_OPS"] != "true",
reason="Requires binaries compiled from source",
)
def test_unbatched_unrotated(self):
boxes = np.array(
[
[0, 0, 0, 4, 4, 4, 0],
[5, 5, 5, 1, 1, 1, 0],
]
).astype("float32")
points = np.array(
[
[0, 0, 0],
[0, 0, 2],
# this point has z value larger than box top z
[0, 0, 2.1],
[2, 0, 0],
[2.01, 0, 0],
# this point belongs to 2nd box
[5.5, 5.5, 5.5],
# this point doesn't belong to 2nd box
[5.6, 5.5, 5.5],
]
).astype("float32")
res = keras_cv.point_cloud.within_box3d_index(points, boxes)
self.assertAllEqual([0, 0, -1, 0, -1, 1, -1], res)
@pytest.mark.skipif(
"TEST_CUSTOM_OPS" not in os.environ
or os.environ["TEST_CUSTOM_OPS"] != "true",
reason="Requires binaries compiled from source",
)
def test_unbatched_rotated(self):
# a box rotated with 45 degree, the intersection with x and y axis
# is [2*sqrt(2), 0] and [0, 2*sqrt(2)]
boxes = np.array(
[
[0, 0, 0, 4, 4, 4, np.pi / 4],
]
).astype("float32")
points = np.array(
[
[0, 0, 0],
[0, 0, 2],
# this point has z value larger than box top z
[0, 0, 2.1],
[2.82, 0, 0],
# this point has x value larger than rotated box
[2.83, 0, 0],
]
).astype("float32")
res = keras_cv.point_cloud.within_box3d_index(points, boxes)
self.assertAllClose([0, 0, -1, 0, -1], res)
@pytest.mark.skipif(
"TEST_CUSTOM_OPS" not in os.environ
or os.environ["TEST_CUSTOM_OPS"] != "true",
reason="Requires binaries compiled from source",
)
def test_batched_unrotated(self):
boxes = np.array(
[
[[0, 0, 0, 4, 4, 4, 0]],
[[5, 5, 5, 1, 1, 1, 0]],
]
).astype("float32")
points = np.array(
[
[
[0, 0, 0],
[0, 0, 2],
# this point has z value larger than box top z
[0, 0, 2.1],
[2, 0, 0],
[2.01, 0, 0],
# this point belongs to 2nd box
[5.5, 5.5, 5.5],
# this point doesn't belong to 2nd box
[5.6, 5.5, 5.5],
]
]
* 2
).astype("float32")
res = keras_cv.point_cloud.within_box3d_index(points, boxes)
self.assertAllEqual(
[[0, 0, -1, 0, -1, -1, -1], [-1, -1, -1, -1, -1, 0, -1]], res
)
@pytest.mark.skipif(
"TEST_CUSTOM_OPS" not in os.environ
or os.environ["TEST_CUSTOM_OPS"] != "true",
reason="Requires binaries compiled from source",
)
def test_batched_rotated(self):
# a box rotated with 45 degree, the intersection with x and y axis
# is [2*sqrt(2), 0] and [0, 2*sqrt(2)]
boxes = np.array(
[
[[0, 0, 0, 4, 4, 4, np.pi / 4]],
[[5, 5, 5, 1, 1, 1, 0]],
]
).astype("float32")
points = np.array(
[
[
[0, 0, 0],
[0, 0, 2],
# this point has z value larger than box top z
[0, 0, 2.1],
[2.82, 0, 0],
# this point has x value larger than rotated box
[2.83, 0, 0],
]
]
* 2
).astype("float32")
res = keras_cv.point_cloud.within_box3d_index(points, boxes)
self.assertAllEqual([[0, 0, -1, 0, -1], [-1, -1, -1, -1, -1]], res)
@pytest.mark.skipif(
"TEST_CUSTOM_OPS" not in os.environ
or os.environ["TEST_CUSTOM_OPS"] != "true",
reason="Requires binaries compiled from source",
)
def test_many_points(self):
points, boxes = get_points_boxes()
for _ in range(5):
res = keras_cv.point_cloud.within_box3d_index(points, boxes)
self.assertAllClose(res.shape, points.shape[:1])
@pytest.mark.skipif(
"TEST_CUSTOM_OPS" not in os.environ
or os.environ["TEST_CUSTOM_OPS"] != "true",
reason="Requires binaries compiled from source",
)
@pytest.mark.extra_large
def test_equal(self):
for _ in range(10000):
with tf.device("cpu:0"):
box_center = tf.random.uniform(
shape=[1, 3], minval=-10.0, maxval=10.0
)
box_dim = tf.random.uniform(
shape=[1, 3], minval=0.1, maxval=10.0
)
boxes = tf.concat([box_center, box_dim, [[0.0]]], axis=-1)
points = tf.random.normal([32, 3])
res = keras_cv.point_cloud.is_within_any_box3d(points, boxes)
res_v2 = keras_cv.point_cloud.is_within_any_box3d_v2(
points, boxes
)
res_v3 = keras_cv.point_cloud.is_within_any_box3d_v3(
points, boxes
)
self.assertAllEqual(res, res_v2)
self.assertAllEqual(res, res_v3)
| keras-cv/keras_cv/point_cloud/within_box_3d_test.py/0 | {
"file_path": "keras-cv/keras_cv/point_cloud/within_box_3d_test.py",
"repo_id": "keras-cv",
"token_count": 4191
} | 82 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
from keras_cv.tests.test_case import TestCase
from keras_cv.utils import fill_utils
class BoundingBoxToMaskTest(TestCase):
def _run_test(self, corners, expected):
mask = fill_utils.corners_to_mask(corners, mask_shape=(6, 6))
mask = tf.cast(mask, dtype=tf.int32)
tf.assert_equal(mask, expected)
def test_corners_whole(self):
expected = np.array(
[
[0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
],
dtype="int32",
)
corners = np.array([[1, 0, 4, 3]], dtype="float32")
self._run_test(corners, expected)
def test_corners_frac(self):
expected = np.array(
[
[0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
],
dtype="int32",
)
corners = np.array([[1.5, 0.5, 4.5, 3.5]], dtype="float32")
self._run_test(corners, expected)
def test_width_zero(self):
expected = np.array(
[
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
],
dtype="int32",
)
corners = np.array([[0, 0, 0, 3]], dtype="float32")
self._run_test(corners, expected)
def test_height_zero(self):
expected = np.array(
[
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
],
dtype="int32",
)
corners = np.array([[1, 0, 4, 0]], dtype="float32")
self._run_test(corners, expected)
def test_width_negative(self):
expected = np.array(
[
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
],
dtype="int32",
)
corners = np.array([[1, 0, -2, 3]], dtype="float32")
self._run_test(corners, expected)
def test_height_negative(self):
expected = np.array(
[
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
],
dtype="int32",
)
corners = np.array([[1, 0, 4, -2]], dtype="float32")
self._run_test(corners, expected)
def test_width_out_of_lower_bound(self):
expected = np.array(
[
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
],
dtype="int32",
)
corners = np.array([[-2, -2, 2, 3]], dtype="float32")
self._run_test(corners, expected)
def test_width_out_of_upper_bound(self):
expected = np.array(
[
[0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
],
dtype="int32",
)
corners = np.array([[4, 0, 8, 3]], dtype="float32")
self._run_test(corners, expected)
def test_height_out_of_lower_bound(self):
expected = np.array(
[
[0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
],
dtype="int32",
)
corners = np.array([[1, -3, 4, 2]], dtype="float32")
self._run_test(corners, expected)
def test_height_out_of_upper_bound(self):
expected = np.array(
[
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 0, 0],
],
dtype="int32",
)
corners = np.array([[1, 4, 4, 9]], dtype="float32")
self._run_test(corners, expected)
def test_start_out_of_upper_bound(self):
expected = np.array(
[
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
],
dtype="int32",
)
corners = np.array([[8, 8, 10, 12]], dtype="float32")
self._run_test(corners, expected)
class FillRectangleTest(TestCase):
def _run_test(self, img_w, img_h, cent_x, cent_y, rec_w, rec_h, expected):
batch_size = 1
batch_shape = (batch_size, img_h, img_w, 1)
images = np.ones(batch_shape, dtype="int32")
centers_x = tf.fill([batch_size], cent_x)
centers_y = tf.fill([batch_size], cent_y)
width = tf.fill([batch_size], rec_w)
height = tf.fill([batch_size], rec_h)
fill = tf.zeros_like(images)
filled_images = fill_utils.fill_rectangle(
images, centers_x, centers_y, width, height, fill
)
# remove batch dimension and channel dimension
filled_images = filled_images[0, ..., 0]
tf.assert_equal(filled_images, expected)
def test_rectangle_position(self):
img_w, img_h = 8, 8
cent_x, cent_y = 4, 3
rec_w, rec_h = 5, 3
expected = np.array(
[
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 0, 0, 0, 0, 0, 1],
[1, 1, 0, 0, 0, 0, 0, 1],
[1, 1, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
],
dtype="int32",
)
self._run_test(img_w, img_h, cent_x, cent_y, rec_w, rec_h, expected)
def test_width_out_of_lower_bound(self):
img_w, img_h = 8, 8
cent_x, cent_y = 1, 3
rec_w, rec_h = 5, 3
# assert width is truncated when cent_x - rec_w < 0
expected = np.array(
[
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
],
dtype="int32",
)
self._run_test(img_w, img_h, cent_x, cent_y, rec_w, rec_h, expected)
def test_width_out_of_upper_bound(self):
img_w, img_h = 8, 8
cent_x, cent_y = 6, 3
rec_w, rec_h = 5, 3
# assert width is truncated when cent_x + rec_w > img_w
expected = np.array(
[
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
],
dtype="int32",
)
self._run_test(img_w, img_h, cent_x, cent_y, rec_w, rec_h, expected)
def test_height_out_of_lower_bound(self):
img_w, img_h = 8, 8
cent_x, cent_y = 4, 1
rec_w, rec_h = 3, 5
# assert height is truncated when cent_y - rec_h < 0
expected = np.array(
[
[1, 1, 1, 0, 0, 0, 1, 1],
[1, 1, 1, 0, 0, 0, 1, 1],
[1, 1, 1, 0, 0, 0, 1, 1],
[1, 1, 1, 0, 0, 0, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
],
dtype="int32",
)
self._run_test(img_w, img_h, cent_x, cent_y, rec_w, rec_h, expected)
def test_height_out_of_upper_bound(self):
img_w, img_h = 8, 8
cent_x, cent_y = 4, 6
rec_w, rec_h = 3, 5
# assert height is truncated when cent_y + rec_h > img_h
expected = np.array(
[
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 1, 1],
[1, 1, 1, 0, 0, 0, 1, 1],
[1, 1, 1, 0, 0, 0, 1, 1],
[1, 1, 1, 0, 0, 0, 1, 1],
],
dtype="int32",
)
self._run_test(img_w, img_h, cent_x, cent_y, rec_w, rec_h, expected)
def test_different_fill(self):
batch_size = 2
img_w, img_h = 5, 5
cent_x, cent_y = 2, 2
rec_w, rec_h = 3, 3
batch_shape = (batch_size, img_h, img_w, 1)
images = np.ones(batch_shape, dtype="int32")
centers_x = tf.fill([batch_size], cent_x)
centers_y = tf.fill([batch_size], cent_y)
width = tf.fill([batch_size], rec_w)
height = tf.fill([batch_size], rec_h)
fill = tf.stack(
[tf.fill(images[0].shape, 2), tf.fill(images[1].shape, 3)]
)
filled_images = fill_utils.fill_rectangle(
images, centers_x, centers_y, width, height, fill
)
# remove channel dimension
filled_images = filled_images[..., 0]
expected = np.array(
[
[
[1, 1, 1, 1, 1],
[1, 2, 2, 2, 1],
[1, 2, 2, 2, 1],
[1, 2, 2, 2, 1],
[1, 1, 1, 1, 1],
],
[
[1, 1, 1, 1, 1],
[1, 3, 3, 3, 1],
[1, 3, 3, 3, 1],
[1, 3, 3, 3, 1],
[1, 1, 1, 1, 1],
],
],
dtype="int32",
)
tf.assert_equal(filled_images, expected)
| keras-cv/keras_cv/utils/fill_utils_test.py/0 | {
"file_path": "keras-cv/keras_cv/utils/fill_utils_test.py",
"repo_id": "keras-cv",
"token_count": 7182
} | 83 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import cv2
except:
cv2 = None
import numpy as np
from keras_cv import bounding_box
from keras_cv import utils
from keras_cv.api_export import keras_cv_export
from keras_cv.utils import assert_cv2_installed
@keras_cv_export("keras_cv.visualization.draw_bounding_boxes")
def draw_bounding_boxes(
images,
bounding_boxes,
color,
bounding_box_format,
line_thickness=1,
text_thickness=1,
font_scale=1.0,
class_mapping=None,
):
"""Internal utility to draw bounding boxes on the target image.
Accepts a batch of images and batch of bounding boxes. The function draws
the bounding boxes onto the image, and returns a new image tensor with the
annotated images. This API is intentionally not exported, and is considered
an implementation detail.
Args:
images: a batch Tensor of images to plot bounding boxes onto.
bounding_boxes: a Tensor of batched bounding boxes to plot onto the
provided images.
color: the color in which to plot the bounding boxes
bounding_box_format: The format of bounding boxes to plot onto the
images. Refer
[to the keras.io docs](https://keras.io/api/keras_cv/bounding_box/formats/)
for more details on supported bounding box formats.
line_thickness: (Optional) line_thickness for the box and text labels.
Defaults to 2.
text_thickness: (Optional) the thickness for the text, defaults to
`1.0`.
font_scale: (Optional) scale of font to draw in, defaults to `1.0`.
class_mapping: (Optional) dictionary from class ID to class label.
Returns:
the input `images` with provided bounding boxes plotted on top of them
""" # noqa: E501
assert_cv2_installed("draw_bounding_boxes")
bounding_boxes = bounding_box.convert_format(
bounding_boxes, source=bounding_box_format, target="xyxy", images=images
)
text_thickness = text_thickness or line_thickness
bounding_boxes["boxes"] = utils.to_numpy(bounding_boxes["boxes"])
bounding_boxes["classes"] = utils.to_numpy(bounding_boxes["classes"])
images = utils.to_numpy(images)
image_width = images.shape[-2]
outline_factor = image_width // 100
class_mapping = class_mapping or {}
result = []
if len(images.shape) != 4:
raise ValueError(
"Images must be a batched np-like with elements of shape "
"(height, width, 3)"
)
for i in range(images.shape[0]):
bounding_box_batch = {
"boxes": bounding_boxes["boxes"][i],
"classes": bounding_boxes["classes"][i],
}
if "confidence" in bounding_boxes:
bounding_box_batch["confidence"] = bounding_boxes["confidence"][i]
image = utils.to_numpy(images[i]).astype("uint8")
for b_id in range(bounding_box_batch["boxes"].shape[0]):
x, y, x2, y2 = bounding_box_batch["boxes"][b_id].astype(int)
class_id = bounding_box_batch["classes"][b_id].astype(int)
confidence = bounding_box_batch.get("confidence", None)
if class_id == -1:
continue
# force conversion back to contiguous array
x, y, x2, y2 = int(x), int(y), int(x2), int(y2)
cv2.rectangle(
image,
(x, y),
(x2, y2),
(0, 0, 0, 0.5),
line_thickness + outline_factor,
)
cv2.rectangle(image, (x, y), (x2, y2), color, line_thickness)
class_id = int(class_id)
if class_id in class_mapping:
label = class_mapping[class_id]
if confidence is not None:
label = f"{label} | {confidence[b_id]:.2f}"
x, y = _find_text_location(
x, y, font_scale, line_thickness, outline_factor
)
cv2.putText(
image,
label,
(x, y),
cv2.FONT_HERSHEY_SIMPLEX,
font_scale,
(0, 0, 0, 0.5),
text_thickness + outline_factor,
)
cv2.putText(
image,
label,
(x, y),
cv2.FONT_HERSHEY_SIMPLEX,
font_scale,
color,
text_thickness,
)
result.append(image)
return np.array(result).astype(int)
def _find_text_location(x, y, font_scale, line_thickness, outline_factor):
font_height = int(font_scale * 12)
target_y = y - int(8 + outline_factor)
if target_y - (2 * font_height) > 0:
return x, y - int(8 + outline_factor)
line_offset = line_thickness + outline_factor
static_offset = 3
return (
x + outline_factor + static_offset,
y + (2 * font_height) + line_offset + static_offset,
)
| keras-cv/keras_cv/visualization/draw_bounding_boxes.py/0 | {
"file_path": "keras-cv/keras_cv/visualization/draw_bounding_boxes.py",
"repo_id": "keras-cv",
"token_count": 2568
} | 84 |
#!/bin/bash
isort .
black .
find . -iname *.h -o -iname *.c -o -iname *.cpp -o -iname *.hpp -o -iname *.cc \
| xargs clang-format --style=google -i -fallback-style=none
| keras-cv/shell/format.sh/0 | {
"file_path": "keras-cv/shell/format.sh",
"repo_id": "keras-cv",
"token_count": 80
} | 85 |
# Japanese translation of the Keras documentation
This is the repository for the translated `.md` sources files of [keras.io](http://keras.io/). The translation project.
---
# Keras documentationの日本語訳化
## 翻訳ガイドライン
- 翻訳対象は本文とコード中のコメント
- 本文は敬体(です・ます調)
- 句読点は「,.」を用いる
- 引用符(',")は基本的にそのまま
- 記号`「,.()?!:;」`は全角
- 文中のシンタックスハイライト(syntax highlight)の前後に空白は入れない.
- 用語の訳は対訳表に従う.
※ 翻訳は英語から日本語へのただの変換作業ではなく,英文の意味を読み取り,日本語として表現する創作作業です.
英語の言い回しに引きずられることなく自然な日本語で表現しましょう.
---
## 対訳表
- 構文キーワードなどはそのまま英語表記とする.
- 検索性のため,python/numpy/keras特有の単語はそのまま英語表記とする.
| English | 日本語 |
|:-------------------|:-----------------------|
| arguments | 引数 |
| boolean | 真理値 |
| data augumentation | データ拡張 |
| deep learning | 深層学習 |
| float | 浮動小数点数 |
| Functional API | Functional API |
| Fuzz factor | 微小量 |
| input shape | 入力のshape |
| index | インデックス |
| int | 整数 |
| layer | レイヤー |
| loss function | 損失関数 |
| metrics | 評価関数(値) |
| nD tensor | n階テンソル |
| Numpy Array | Numpy 配列 |
| objective | 目的関数 |
| optimizer | 最適化(アルゴリズム) |
| output shape | 出力のshape |
| regularizer | 正則化 |
| return | 戻り値 |
| recurrent | recurrent |
| See something | ~~を参照 |
| Sequential Model | Sequentialモデル |
| shape | shape |
| str | 文字列 |
| target | ターゲット |
| testing | テスト |
| training | 訓練 |
| 1--9 | 1--9 |
| keras-docs-ja/README.md/0 | {
"file_path": "keras-docs-ja/README.md",
"repo_id": "keras-docs-ja",
"token_count": 1620
} | 86 |
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional.py#L237)</span>
### Conv1D
```python
keras.layers.Conv1D(filters, kernel_size, strides=1, padding='valid', dilation_rate=1, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None)
```
1次元の畳み込みレイヤー(例えば時間的な畳込み).
このレイヤーは畳み込みカーネルを生成します.これはレイヤーの入力を単一の空間的(または時間的)次元で畳み込んで,出力のテンソルを作ります.
`use_bias`をTrueにすると,バイアスベクトルが出力に加えられます.`activation`が`None`でない場合,指定した活性化関数が出力に適用されます.
このレイヤーを第一層に使う場合,キーワード引数として`input_shape`(整数のタプルか`None`.例えば10個の128次元ベクトルの場合ならば`(10, 128)`,任意個数の128次元ベクトルの場合は`(None, 128)`)を指定してください.
__引数__
- __filters__: 整数,出力空間の次元(つまり畳み込みにおける出力フィルタの数).
- __kernel_size__: 整数か単一の整数からなるタプル/リストで,1次元の畳み込みウィンドウの長さを指定します.
- __strides__: 整数か単一の整数からなるタプル/リストで,畳み込みのストライドの長さを指定します.
strides value != 1とすると`dilation_rate` value != 1と指定できません.
- __padding__: `"valid"`,`"same"`,`"causal"`のいずれか(大文字小文字の区別はしない).`"valid"`はパディングを行いません.`"same"`は元の入力と同じ長さを出力がもつように入力にパディングします.`"causal"`はcausal(dilated)畳み込み.例えば,output[t]はinput[t+1]に依存しません.時間的順序を無視すべきでない時系列データをモデリングする際に有効です.[WaveNet: A Generative Model for Raw Audio, section 2.1](https://arxiv.org/abs/1609.03499)を参照して下さい.
- __dilation_rate__: 整数か単一の整数からなるタプル/リストで,dilated convolutionで使われる膨張率を指定します.
現在,`dilation_rate` value != 1 とすると,`strides` value != 1を指定することはできません.
- __activation__: 使用する活性化関数の名前([activations](../activations.md)を参照),
何も指定しなければ,活性化は一切適用されません(つまり"線形"活性`a(x) = x`).
- __use_bias__: 真理値で,バイアスベクトルを加えるかどうかを指定します.
- __kernel_initializer__: `kernel`の重み行列の初期値を指定します.([initializers](../initializers.md)を参照)
- __bias_initializer__: バイアスベクトルの初期値を指定します.([initializers](../initializers.md)を参照)
- __kernel_regularizer__: `kernel`の重みに適用させるRegularizerを指定します.([regularizer](../regularizers.md)を参照)
- __bias_regularizer__: バイアスに適用させるRegularizerを指定します.([regularizer](../regularizers.md)を参照)
- __activity_regularizer__: 出力テンソルに適用させるRegularizerを指定します.([regularizer](../regularizers.md)を参照)
- __kernel_constraint__: カーネルの行列に適用させるConstraintを指定します.([constraint](../constraints.md)を参照)
- __bias_constraint__: バイアスベクトルに適用させるConstraintを指定します.([constraint](../constraints.md)を参照)
__入力のshape__
shapeが`(batch_size, steps, input_dim)`の3階テンソル.
__出力のshape__
shapeが`(batch_size, new_steps, nb_filter)`の3階テンソル.
`steps`はパディングにより変わっている可能性があります.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional.py#L347)</span>
### Conv2D
```python
keras.layers.Conv2D(filters, kernel_size, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1), activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None)
```
2次元の畳み込みレイヤー(例えばイメージに対する空間的畳み込み).
このレイヤーは畳み込みカーネルを生成します.これはレイヤーの入力を畳み込んで,出力のテンソルを作ります.
`use_bias`をTrueにすると,バイアスベクトルが出力に加えられます.`activation`が`None`でない場合,指定した活性化関数が出力に適用されます.
このレイヤーをモデルの第1層に使うときはキーワード引数`input_shape`
(整数のタプル,サンプル軸を含まない)を指定してください.
例えば,`data_format="channels_last"`のとき,128x128 RGB画像では`input_shape=(128, 128, 3)`となります.
__引数__
- __filters__: 整数で,出力空間の次元(つまり畳み込みにおける出力フィルタの数).
- __kernel_size__: 整数か2つの整数からなるタプル/リストで,2次元の畳み込みウィンドウの幅と高さを指定します. 単一の整数の場合は全ての次元に対して同じ値を指定します.単一の整数の場合は正方形のカーネルになります.
- __strides__: 整数か2つの整数からなるタプル/リストで畳み込みの縦と横のストライドをそれぞれ指定できます.単一の整数の場合は幅と高さが同様のストライドになります.strides value != 1とすると`dilation_rate` value != 1と指定できません.
- __padding__: `"valid"`か`"same"`のどちらかを指定します.
- __data_format__: 文字列で,`"channels_last"`(デフォルト)か`"channels_first"`のどちらかを指定します.これは入力における次元の順序です. `"channels_last"`の場合,入力のshapeは`"(batch, height, width, channels)"`となり,`"channels_first"`の場合は`"(batch, channels, height, width)"`となります.デフォルトはKerasの設定ファイル`~/.keras/keras.json`の`image_data_format`の値です.一度も値を変更していなければ,"channels_last"になります.
- __dilation_rate__: 整数か2つの整数からなるタプル/リストで,dilated convolutionで使われる膨張率を指定します.
現在,`dilation_rate` value != 1 とすると,strides value != 1を指定することはできません.
- __activation__: 使用する活性化関数の名前([activations](../activations.md)を参照),
何も指定しなければ,活性化は一切適用されません(つまり"線形"活性`a(x) = x`).
- __use_bias__: 真理値で,バイアスベクトルを加えるかどうかを指定します.
- __kernel_initializer__: `kernel`の重み行列の初期値を指定します.([initializers](../initializers.md)を参照)
- __bias_initializer__: バイアスベクトルの初期値を指定します.([initializers](../initializers.md)を参照)
- __kernel_regularizer__: `kernel`の重み行列に適用させるRegularizerを指定します.([regularizer](../regularizers.md)を参照)
- __bias_regularizer__: バイアスベクトルに適用させるRegularizerを指定します.([regularizer](../regularizers.md)を参照)
- __activity_regularizer__: 出力テンソルに適用させるRegularizerを指定します.([regularizer](../regularizers.md)を参照)
- __kernel_constraint__: カーネルの行列に適用させるConstraintを指定します.([constraint](../constraints.md)を参照)
- __bias_constraint__: バイアスベクトルに適用させるConstraintを指定します.([constraint](../constraints.md)を参照)
__入力のshape__
data_format='channels_first'の場合,
`(batch_size, channels, rows, cols)`の4階テンソル.
data_format='channels_last'の場合,
`(batch_size, rows, cols, channels)`の4階テンソルになります.
__出力のshape__
data_format='channels_first'の場合,
`(samples, channels, rows, cols)`の4階テンソル.
data_format='channels_last'の場合,
`(samples, rows, cols, channels)`の4階テンソルになります.`rows`と`cols`値はパディングにより変わっている可能性があります.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional.py#L1384)</span>
### SeparableConv2D
```python
keras.layers.SeparableConv2D(filters, kernel_size, strides=(1, 1), padding='valid', data_format=None, depth_multiplier=1, activation=None, use_bias=True, depthwise_initializer='glorot_uniform', pointwise_initializer='glorot_uniform', bias_initializer='zeros', depthwise_regularizer=None, pointwise_regularizer=None, bias_regularizer=None, activity_regularizer=None, depthwise_constraint=None, pointwise_constraint=None, bias_constraint=None)
```
Depthwiseな2次元separable畳み込み層.
separable畳み込み演算は,depthwiseの空間的な畳み込み(各入力チャネルに別々に作用する)を実行し,続いてpointwiseに畳み込みを行い,両者の出力チャネルを混合します.`depth_multiplier`は,出力チャネルを生成するための入力チャネルの数を指定します.
separable畳み込み演算はひとつのカーネルをふたつの小さなカーネルに分解する方法として,直感的に理解することができます.もしくはInception blockの極端な例として考えることもできます.
__引数__
- __filters__: 整数で,出力空間の次元(つまり畳み込みにおける出力フィルタの数).
- __kernel_size__: 整数か2つの整数からなるタプル/リストで,2次元の畳み込みウィンドウの幅と高さを指定します. 単一の整数の場合は全ての次元に対して同じ値を指定します.
- __strides__: 整数か2つの整数からなるタプル/リストで畳み込みの縦と横のストライドをそれぞれ指定できます.単一の整数の場合は幅と高さが同様のストライドになります.strides value != 1とすると`dilation_rate` value != 1と指定できません.
- __padding__: `"valid"`か`"same"`のどちらかを指定します.
- __data_format__: 文字列で,`"channels_last"`(デフォルト)か`"channels_first"`のどちらかを指定します.これは入力における次元の順序です. `"channels_last"`の場合,入力のshapeは`"(batch, height, width, channels)"`となり,`"channels_first"`の場合は`"(batch, channels, height, width)"`となります.デフォルトはKerasの設定ファイル`~/.keras/keras.json`の`image_data_format`の値です.一度も値を変更していなければ,"channels_last"になります.
- __depth_multiplier__: 各入力チャネルに対するdepthwiseな畳み込みチャネルの数.深さ方向畳み込みチャネルの出力総数は,`filters_in * depth_multiplier`に等しくなります.
- __activation__: 使用する活性化関数の名前([activations](../activations.md)を参照),
何も指定しなければ,活性化は一切適用されません(つまり"線形"活性`a(x) = x`).
- __use_bias__: 真理値で,バイアスベクトルを加えるかどうかを指定します.
- __depthwise_initializer__: カーネルの重み行列の初期値をdepthwiseに指定します.([initializers](../initializers.md)を参照)
- __pointwise_initializer__: カーネルの重み行列の初期値をpointwiseに指定します.([initializers](../initializers.md)を参照)
- __bias_initializer__: バイアスベクトルの初期値を指定します.([initializers](../initializers.md)を参照)
- __depthwise_regularizer__: 重み行列に対し,"depthwise"に適用させるRegularizerを指定します.([ regularizer](../regularizers.md)を参照)
- __pointwise_regularizer__: 重み行列に対し,"pointwise"に適用させるRegularizerを指定します.([ regularizer](../regularizers.md)を参照)
- __bias_regularizer__: バイアスベクトルに適用させるRegularizerを指定します.([regularizer](../regularizers.md)を参照)
- __activity_regularizer__: 出力テンソルに適用させるRegularizerを指定します.([regularizer](../regularizers.md)を参照)
- __depthwise_constraint__: カーネル行列に対し,"depthwise"に適用させるConstraintを指定します.([constraint](../constraints.md)を参照)
- __depthwise_constraint__: カーネル行列に対し,"pointwise"に適用させるConstraintを指定します.([constraint](../constraints.md)を参照)
- __bias_constraint__: バイアスベクトルに適用させるConstraintを指定します.([constraint](../constraints.md)を参照)
__入力のshape__
data_format='channels_first'の場合,
`(batch_size, channels, rows, cols)`の4階テンソル.
data_format='channels_last'の場合,
`(batch_size, rows, cols, channels)`の4階テンソルになります.
__出力のshape__
data_format='channels_first'の場合,
`(batch, channels, rows, cols)`の4階テンソル.
data_format='channels_last'の場合,
`(batch, rows, cols, channels)`の4階テンソルになります.`rows`と`cols`値はパディングにより変わっている可能性があります.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional.py#L596)</span>
### Conv2DTranspose
```python
keras.layers.Conv2DTranspose(filters, kernel_size, strides=(1, 1), padding='valid', data_format=None, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None)
```
transposed畳み込みレイヤー(Deconvolutionとも呼ばれます).
一般的に,transposed畳み込み演算は通常の畳み込みに対して逆の演算を行いたい時に使われます.つまり,なんらかの畳み込み演算の出力を,接続パターンを保ちながら入力の形に変換する層です.
このレイヤーをモデルの第一層に使うときはキーワード引数`input_shape`
(整数のタプル,サンプル軸を含まない)を指定してください.
例えば`data_format="channels_last"`のとき,128x128 RGB画像では`input_shape=(128, 128, 3)`となります.
__引数__
- __filters__: 整数で,出力空間の次元(つまり畳み込みにおける出力フィルタの数).
- __kernel_size__: 整数か2つの整数からなるタプル/リストで,2次元の畳み込みウィンドウの幅と高さを指定します. 単一の整数の場合は全ての次元に対して同じ値を指定します.
- __strides__: 整数か2つの整数からなるタプル/リストで畳み込みの縦と横のストライドをそれぞれ指定できます.単一の整数の場合は幅と高さが同様のストライドになります.strides value != 1とすると`dilation_rate` value != 1と指定できません.
- __padding__: `"valid"`か`"same"`のどちらかを指定します.
- __data_format__: 文字列で,`"channels_last"`(デフォルト)か`"channels_first"`のどちらかを指定します.これは入力における次元の順序です. `"channels_last"`の場合,入力のshapeは`"(batch, height, width, channels)"`となり,`"channels_first"`の場合は`"(batch, channels, height, width)"`となります.デフォルトはKerasの設定ファイル`~/.keras/keras.json`の`image_data_format`の値です.一度も値を変更していなければ,"channels_last"になります.
- __dilation_rate__: 整数か2つの整数からなるタプル/リストで,dilated convolutionで使われる膨張率を指定します.
現在,`dilation_rate` value != 1 とすると,strides value != 1を指定することはできません.
- __activation__: 使用する活性化関数の名前([activations](../activations.md)を参照),
何も指定しなければ,活性化は一切適用されません(つまり"線形"活性`a(x) = x`).
- __use_bias__: 真理値で,バイアスベクトルを加えるかどうかを指定します.
- __kernel_initializer__: `kernel`の重み行列の初期値を指定します.([initializers](../initializers.md)を参照)
- __bias_initializer__: バイアスベクトルの初期値を指定します.([initializers](../initializers.md)を参照)
- __kernel_regularizer__: `kernel`の重み行列に適用させるRegularizerを指定します.([regularizer](../regularizers.md)を参照)
- __bias_regularizer__: バイアスベクトルに適用させるRegularizer関数を指定します.([regularizer](../regularizers.md)を参照)
- __activity_regularizer__: 出力テンソルに適用させるRegularizer関数を指定します.([regularizer](../regularizers.md)を参照)
- __kernel_constraint__: カーネルの行列に適用させるConstraint関数を指定します.([Constraint](../constraints.md)を参照)
- __bias_constraint__: バイアスベクトルに適用させるConstraint関数を指定します.([Constraint](../constraints.md)を参照)
__入力のshape__
data_format='channels_first'の場合,
`(batch, channels, rows, cols)`の4階テンソル.
data_format='channels_last'の場合,
`(batch, rows, cols, channels)`の4階テンソルになります.
__出力のshape__
data_format='channels_first'の場合,
`(batch, filters, new_rows, new_cols)`の4階テンソル.
data_format='channels_last'の場合,
`(batch, new_rows, new_cols, channels)`の4階テンソルになります.`rows`と`cols`値はパディングにより変わっている可能性があります.
__参考文献__
- [A guide to convolution arithmetic for deep learning](https://arxiv.org/abs/1603.07285v1)
- [Deconvolutional Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf)
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional.py#L471)</span>
### Conv3D
```python
keras.layers.Conv3D(filters, kernel_size, strides=(1, 1, 1), padding='valid', data_format=None, dilation_rate=(1, 1, 1), activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None)
```
3次元入力をフィルターする畳み込み演算(例えば高さを含めた空間の畳込み).
このレイヤーは畳み込みカーネルを生成します.これはレイヤーの入力を畳み込んで,出力のテンソルを作ります.
`use_bias`をTrueにすると,バイアスベクトルが出力に加えられます.`activation`が`None`でない場合,指定した活性化関数が出力に適用されます.
このレイヤーをモデルの第一層に使うときはキーワード引数`input_shape`
(整数のタプル,サンプル軸を含まない)を指定してください.
例えば`data_format="channels_last"`の場合,シングルチャネルの128x128x128の立体は`input_shape=(128, 128, 128, 1)`です.
__引数__
- __filters__: 整数で,出力空間の次元(つまり畳み込みにおける出力フィルタの数).
- __kernel_size__: 整数か3つの整数からなるタプル/リストで,3次元の畳み込みウィンドウの幅と高さを指定します. 単一の整数の場合は全ての次元に対して同じ値を指定します.
- __strides__: 整数か3つの整数からなるタプル/リストで畳み込みの縦と横のストライドをそれぞれ指定できます.単一の整数の場合は幅と高さが同様のストライドになります.strides value != 1とすると`dilation_rate` value != 1と指定できません.
- __padding__: `"valid"`か`"same"`のどちらかを指定します.
- __data_format__: 文字列で,`"channels_last"`(デフォルト)か`"channels_first"`のどちらかを指定します.これは入力における次元の順序です. `"channels_last"`の場合,入力のshapeは`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`となり,`"channels_first"`の場合は`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`となります.デフォルトはKerasの設定ファイル`~/.keras/keras.json`の`image_data_format`の値です.一度も値を変更していなければ,"channels_last"になります.
- __dilation_rate__: 整数か3つの整数からなるタプル/リストで,dilated convolutionで使われる膨張率を指定します.
現在,`dilation_rate` value != 1 とすると,strides value != 1を指定することはできません.
- __activation__: 使用する活性化関数の名前([activations](../activations.md)を参照),
何も指定しなければ,活性化は一切適用されません(つまり"線形"活性`a(x) = x`).
- __use_bias__: 真理値で,バイアスベクトルを加えるかどうかを指定します.
- __kernel_initializer__: `kernel`の重み行列の初期値を指定します.([initializers](../initializers.md)を参照)
- __bias_initializer__: バイアスベクトルの初期値を指定します.([initializers](../initializers.md)を参照)
- __kernel_regularizer__: `kernel`の重み行列に適用させるRegularizerを指定します.([regularizer](../regularizers.md)を参照)
- __bias_regularizer__: バイアスベクトルに適用させるRegularizer関数を指定します.([regularizer](../regularizers.md)を参照)
- __activity_regularizer__: 出力テンソルに適用させるRegularizer関数を指定します.([regularizer](../regularizers.md)を参照)
- __kernel_constraint__: カーネルの行列に適用させるConstraint関数を指定します.([Constraint](../constraints.md)を参照)
- __bias_constraint__: バイアスベクトルに適用させるConstraint関数を指定します.([Constraint](../constraints.md)を参照)
__入力のshape__
data_format='channels_first'の場合,
`(samples, channels, conv_dim1, conv_dim2, conv_dim3)`の5階テンソル.
data_format='channels_last'の場合,
`(samples, conv_dim1, conv_dim2, conv_dim3, channels)`の5階テンソルになります.
__出力のshape__
data_format='channels_first'の場合,
`(samples, channels, conv_dim1, conv_dim2, conv_dim3)`の5階テンソル.
data_format='channels_last'の場合,
`(samples, conv_dim1, conv_dim2, conv_dim3, channels)`の5階テンソルになります.`conv_dim1`,`conv_dim2`,`conv_dim3`の値はパディングにより変わっている可能性があります.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional.py#L2159)</span>
### Cropping1D
```python
keras.layers.Cropping1D(cropping=(1, 1))
```
一次元の入力をクロップする(切り落とす)層(例えば時間の配列).
クロップは時間軸に沿って実行されます(axis 1).
__引数__
- __cropping__: 整数か長さ2の整数のタプルで,クロップしたいユニットの数を指定します.2つの整数からなるタプルで指定した場合は,それぞれ両側からクロップします.1つの整数の場合は,両側から同数のユニットをクロップします.
__入力のShape__
`(batch, axis_to_crop, features)`の3階テンソル.
__出力のShape__
`(batch, cropped_axis, features)`の3階テンソル.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional.py#L2204)</span>
### Cropping2D
```python
keras.layers.Cropping2D(cropping=((0, 0), (0, 0)), data_format=None)
```
二次元の入力をクロップする(切り落とす)層(例えば画像).
クロップは幅と高さに対して実行されます.
__引数__
- __cropping__: 整数, タプル(2つの整数), タプル(2つの整数)のタプルのいずれか.
- 整数: 幅と高さに対称なクロップが実行されます.
- タプル(2つの整数): 幅と高さでそれぞれ対称なクロップが実行されます.
`(symmetric_height_crop, symmetric_width_crop)`
- タプルのタプル: 四辺それぞれにクロップが実行されます.
`(top_crop, bottom_crop), (left_crop, right_crop))`
- __data_format__: `"channels_last"`(デフォルト)か`"channels_first"`を指定します.
`"channels_last"`の場合,入力は`"(batch, height, width, channels)"`.
`"channels_first"`の場合は`"(batch, channels, height, width)"`となります.
デフォルトはKerasの設定ファイル`~/.keras/keras.json`の`image_data_format`の値です.一度も値を変更していなければ,"channels_last"になります.
__入力のShape__
`data_format`が`"channels_last"`の場合,`(batch, rows, cols, channels)`.
`"channels_first"`の場合,`(batch, channels, rows, cols)`の4階テンソル.
__出力のShape__
`data_format`が`"channels_last"`の場合,`(batch, cropped_rows, cropped_cols, channels)`.
`"channels_first"`の場合,`(batch, channels, cropped_rows, cropped_cols)`の4階テンソル.
__例__
```python
# Crop the input 2D images or feature maps
model = Sequential()
model.add(Cropping2D(cropping=((2, 2), (4, 4)),
input_shape=(28, 28, 3)))
# now model.output_shape == (None, 24, 20, 3)
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Cropping2D(cropping=((2, 2), (2, 2))))
# now model.output_shape == (None, 20, 16. 64)
```
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional.py#L2347)</span>
### Cropping3D
```python
keras.layers.Cropping3D(cropping=((1, 1), (1, 1), (1, 1)), data_format=None)
```
三次元の入力をクロップする(切り落とす)層(例えば空間や時空間).
__引数__
- __cropping__: 整数, タプル(2つの整数),タプル(2つの整数)のタプル,のいずれか.
- 整数: 3つの軸に対して対称なクロップが実行されます.
- タプル(3つの整数): 3つの軸に対して,それぞれ対称なクロップが実行されます.
`(symmetric_dim1_crop, symmetric_dim2_crop, symmetric_dim3_crop)`
- タプルのタプル: 六面それぞれにクロップが実行されます.
`((left_dim1_crop, right_dim1_crop), (left_dim2_crop, right_dim2_crop), (left_dim3_crop, right_dim3_crop))`
- __data_format__: `"channels_last"`(デフォルト)か`"channels_first"`を指定します.
`"channels_last"`の場合,入力は`"(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)"`.`"channels_first"`の場合は`"(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)"`となります.デフォルトはKerasの設定ファイル`~/.keras/keras.json`の`image_data_format`の値です.一度も値を変更していなければ,"channels_last"になります.
__入力のShape__
`data_format`が`"channels_last"`の場合,`(batch, first_cropped_axis, second_cropped_axis, third_cropped_axis, depth)`.`"channels_first"`の場合,`(batch, depth, first_cropped_axis, second_cropped_axis, third_cropped_axis)`の5階テンソル.
__出力のShape__
`data_format`が`"channels_last"`の場合,`(batch, first_axis_to_crop, second_axis_to_crop, third_axis_to_crop, depth)`. `"channels_first"`の場合,`(batch, depth, first_axis_to_crop, second_axis_to_crop, third_axis_to_crop)`の5階テンソル.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional.py#L1711)</span>
### UpSampling1D
```python
keras.layers.UpSampling1D(size=2)
```
1次元の入力に対するアップサンプリングレイヤー.
時間軸方向にそれぞれの時間ステップを`size`回繰り返します.
__引数__
- __size__: 整数.upsampling係数.
__入力のshape__
`(batch, steps, features)`の3階テンソル.
__出力のshape__
`(batch, upsampled_steps, features)`の3階テンソル.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional.py#L1746)</span>
### UpSampling2D
```python
keras.layers.UpSampling2D(size=(2, 2), data_format=None)
```
2次元の入力に対するアップサンプリングレイヤー.
データの行と列をそれぞれsize[0]及びsize[1]回繰り返します.
__引数__
- __size__: 整数か2つの整数のタプル.行と列のupsampling係数.
- __data_format__: `"channels_last"`(デフォルト)か`"channels_first"`を指定します. `"channels_last"`の場合,入力のshapeは`"(batch, height, width, channels)"`となり,`"channels_first"`の場合は`"(batch, channels, height, width)"`となります.デフォルトはKerasの設定ファイル`~/.keras/keras.json`の`image_data_format`の値です.一度も値を変更していなければ,"channels_last"になります.
__入力のshape__
data_format='channels_last'の場合,
`(batch, rows, cols, channels)`の4階テンソル.
data_format='channels_first'の場合,
`(batch, channels, rows, cols)`の4階テンソルになります.
__出力のshape__
data_format='channels_first'の場合,
`(batch, channels, upsampled_rows, upsampled_cols)`の4階テンソル.
data_format='channels_last'の場合,
`(batch, upsampled_rows, upsampled_cols, channels)`の4階テンソルになります.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional.py#L1815)</span>
### UpSampling3D
```python
keras.layers.UpSampling3D(size=(2, 2, 2), data_format=None)
```
3次元の入力に対するアップサンプリングレイヤー.
データの1番目,2番目,3番目の次元をそれぞれsize[0],size[1],size[2]だけ繰り返す.
__引数__
- __size__: 3つの整数のタプル.dim1,dim2,dim3のアップサンプリング係数.
- __data_format__: `"channels_last"`(デフォルト)か`"channels_first"`を指定します. `"channels_last"`の場合,入力のshapeは`"(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)"`となり,`"channels_first"`の場合は`"(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)"`となります.デフォルトはKerasの設定ファイル`~/.keras/keras.json`の`image_data_format`の値です.一度も値を変更していなければ,"channels_last"になります.
__入力のshape__
data_format='channels_last'の場合,
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`の5階テンソル.
data_format='channels_first'の場合,
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`の5階テンソルになります.
__出力のshape__
data_format='channels_last'の場合,
`(batch, upsampled_dim1, upsampled_dim2, upsampled_dim3, channels)`の5階テンソル.
data_format='channels_first'の場合,
`(batch, channels, upsampled_dim1, upsampled_dim2, upsampled_dim3)`の5階テンソルになります.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional.py#L1889)</span>
### ZeroPadding1D
```python
keras.layers.ZeroPadding1D(padding=1)
```
一次元入力(例,時系列)に対するゼロパディングレイヤー.
__引数__
- __padding__: 整数,タプル(2つの整数)のいずれか.
- 整数: パディング次元(axis 1)の始めと終わりにいくつのゼロを加えるか.
- (長さ2の)整数のタプル: 始めと終わりにそれぞれいくつのゼロを加えるか.
`(left_pad, right_pad)`
__入力のshape__
`(batch, axis_to_pad, features)`の3階テンソル.
__出力のshape__
`(batch, padded_axis, features)`の3階テンソル.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional.py#L1931)</span>
### ZeroPadding2D
```python
keras.layers.ZeroPadding2D(padding=(1, 1), data_format=None)
```
2次元入力(例,画像)のためのゼロパディングレイヤー
このレイヤーは画像のテンソルの上下左右にゼロの行と列を追加します.
__引数__
- __padding__: 整数,タプル(2つの整数),タプル(2つの整数)のタプル.
- 整数: 幅と高さにたいして対称なパディングを実行する.
- タプル: 幅と高さ,それぞれに対して対称なパディングを実行する.
`(symmetric_height_pad, symmetric_width_pad)`
- タプルのタプル: 四辺それぞれにパディング.
`((top_pad, bottom_pad), (left_pad, right_pad))`
- __data_format__: `"channels_last"`(デフォルト)か`"channels_first"`を指定します.
`"channels_last"`の場合,入力は`"(batch, height, width, channels)"`.
`"channels_first"`の場合は`"(batch, channels, height, width)"`となります.
デフォルトはKerasの設定ファイル`~/.keras/keras.json`の`image_data_format`の値です.一度も値を変更していなければ,"channels_last"になります.
__入力のShape__
`data_format`が`"channels_last"`の場合,`(batch, rows, cols, channels)`.
`"channels_first"`の場合,`(batch, channels, rows, cols)`の4階テンソル.
__出力のShape__
`data_format`が`"channels_last"`の場合,`(batch, padded_rows, padded_cols, channels)`.
`"channels_first"`の場合,`(batch, channels, padded_rows, padded_cols)`の4階テンソル.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional.py#L2041)</span>
### ZeroPadding3D
```python
keras.layers.ZeroPadding3D(padding=(1, 1, 1), data_format=None)
```
3次元データ(空間及び時空間)のためのゼロパディングレイヤー.
__引数__
- __cropping__: 整数, タプル(2つの整数),タプル(2つの整数)のタプル,のいずれか.
- 整数: 3つの軸に対して対称なパディングが実行されます.
- タプル(3つの整数): 3つの軸に対して,それぞれ対称なパディングが実行されます.
`(symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad)`
- タプルのタプル: 六面それぞれにパディングが実行されます.
`((left_dim1_pad, right_dim1_pad), (left_dim2_pad, right_dim2_pad), (left_dim3_pad, right_dim3_pad))`
- __data_format__: `"channels_last"`(デフォルト)か`"channels_first"`を指定します.
`"channels_last"`の場合,入力は`"(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)"`.`"channels_first"`の場合は`"(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)"`となります.デフォルトはKerasの設定ファイル`~/.keras/keras.json`の`image_data_format`の値です.一度も値を変更していなければ,"channels_last"になります.
__入力のShape__
`data_format`が`"channels_last"`の場合,`(batch, first_axis_to_pad, second_axis_to_pad, third_axis_to_pad, depth)`.`"channels_first"`の場合,`(batch, depth, first_axis_to_pad, second_axis_to_pad, third_axis_to_pad)`の5階テンソル.
__出力のShape__
`data_format`が`"channels_last"`の場合,`(batch, first_padded_axis, second_padded_axis, third_axis_to_pad, depth)`. `"channels_first"`の場合,`(batch, depth, first_padded_axis, second_padded_axis, third_axis_to_pad)`の5階テンソル.
| keras-docs-ja/sources/layers/convolutional.md/0 | {
"file_path": "keras-docs-ja/sources/layers/convolutional.md",
"repo_id": "keras-docs-ja",
"token_count": 17230
} | 87 |
## オプティマイザ(最適化アルゴリズム)の利用方法
オプティマイザ(最適化アルゴリズム)はモデルをコンパイルする際に必要となるパラメータの1つです:
```python
from keras import optimizers
model = Sequential()
model.add(Dense(64, kernel_initializer='uniform', input_shape=(10,)))
model.add(Activation('tanh'))
model.add(Activation('softmax'))
sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='mean_squared_error', optimizer=sgd)
```
上記の例のように,オプティマイザのインスタンスを `model.compile()` に渡す,もしくは,オプティマイザの名前を渡すことができます.後者の場合,オプティマイザのデフォルトパラメータが利用されます.
```python
# オプティマイザを名前で指定すると,デフォルトパラメータが利用されます
model.compile(loss='mean_squared_error', optimizer='sgd')
```
----
## Kerasのオプティマイザの共通パラメータ
`clipnorm`と`clipvalue`はすべての最適化法についてgradient clippingを制御するために使われます:
```python
from keras import optimizers
# All parameter gradients will be clipped to
# a maximum norm of 1.
sgd = optimizers.SGD(lr=0.01, clipnorm=1.)
```
```python
from keras import optimizers
# All parameter gradients will be clipped to
# a maximum value of 0.5 and
# a minimum value of -0.5.
sgd = optimizers.SGD(lr=0.01, clipvalue=0.5)
```
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/optimizers.py#L130)</span>
### SGD
```python
keras.optimizers.SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False)
```
確率的勾配降下法オプティマイザ.
モーメンタム,学習率減衰,Nesterov momentumをサポートした確率的勾配降下法.
__引数__
- __lr__: 0以上の浮動小数点数.学習率.
- __momentum__: 0以上の浮動小数点数.モーメンタム.
- __decay__: 0以上の浮動小数点数.各更新の学習率減衰.
- __nesterov__: 真理値. Nesterov momentumを適用するかどうか.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/optimizers.py#L193)</span>
### RMSprop
```python
keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=None, decay=0.0)
```
RMSPropオプティマイザ.
デフォルトパラメータのまま利用することを推奨します.
(ただし,学習率は自由に調整可能です)
RMSPropはリカレントニューラルネットワークに対して良い選択となるでしょう.
__引数__
- __lr__: 0以上の浮動小数点数.学習率.
- __rho__: 0以上の浮動小数点数.
- __epsilon__: 0以上の浮動小数点数.微小量.`None`ならばデフォルトで`K.epsilon()`.
- __decay__: 0以上の浮動小数点数.各更新の学習率減衰.
__参考文献__
- [rmsprop: Divide the gradient by a running average of its recent magnitude](http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf)
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/optimizers.py#L260)</span>
### Adagrad
```python
keras.optimizers.Adagrad(lr=0.01, epsilon=None, decay=0.0)
```
Adagradオプティマイザ.
デフォルトパラメータのまま利用することを推奨します.
__引数__
- __lr__: 0以上の浮動小数点数.学習率.
- __epsilon__: 0以上の浮動小数点数.`None`ならばデフォルトで`K.epsilon()`.
- __decay__: 0以上の浮動小数点数.各更新の学習率減衰.
__参考文献__
- [Adaptive Subgradient Methods for Online Learning and Stochastic Optimization](http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf)
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/optimizers.py#L319)</span>
### Adadelta
```python
keras.optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=None, decay=0.0)
```
Adadeltaオプティマイザ.
デフォルトパラメータのまま利用することを推奨します.
__引数__
- __lr__: 0以上の浮動小数点数.学習率.
デフォルト値を推奨します.
- __rho__: 0以上の浮動小数点数.
- __epsilon__: 0以上の浮動小数点数.微小量.`None`ならばデフォルトで`K.epsilon()`.
- __decay__: 0以上の浮動小数点数.各更新の学習率減衰.
__参考文献__
- [Adadelta - an adaptive learning rate method](http://arxiv.org/abs/1212.5701)
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/optimizers.py#L392)</span>
### Adam
```python
keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
```
Adamオプティマイザ.
デフォルトパラメータは提案論文に従います.
__引数__
- __lr__: 0以上の浮動小数点数.学習率.
- __beta_1__: 浮動小数点数, 0 < beta < 1. 一般的に1に近い値です.
- __beta_2__: 浮動小数点数, 0 < beta < 1. 一般的に1に近い値です.
- __epsilon__: 0以上の浮動小数点数.微小量.`None`ならばデフォルトで`K.epsilon()`.
- __decay__: 0以上の浮動小数点数.各更新の学習率減衰.
- __amsgrad__: 論文"On the Convergence of Adam and Beyond"にあるAdamの変種であるAMSGradを適用するかどうか.
__参考文献__
- [Adam - A Method for Stochastic Optimization](http://arxiv.org/abs/1412.6980v8)
- [On the Convergence of Adam and Beyond](https://openreview.net/forum?id=ryQu7f-RZ)
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/optimizers.py#L481)</span>
### Adamax
```python
keras.optimizers.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0)
```
Adamaxは,Adamの提案論文の7節で提案されたAdamaxオプティマイザ.
これは無限ノルムに基づくAdamの拡張です.デフォルトパラメータは提案論文に従います.
__引数__
- __lr__: 0以上の浮動小数点数.学習率.
- __beta_1/beta_2__: 浮動小数点数, 0 < beta < 1. 一般的に1に近い値です.
- __epsilon__: 0以上の浮動小数点数.微小量.`None`ならばデフォルトで`K.epsilon()`.
- __decay__: 0以上の浮動小数点数.各更新の学習率減衰.
__参考文献__
- [Adam - A Method for Stochastic Optimization](http://arxiv.org/abs/1412.6980v8)
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/optimizers.py#L558)</span>
### Nadam
```python
keras.optimizers.Nadam(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=None, schedule_decay=0.004)
```
Nesterov Adamオプティマイザ.
よく似たAdamはRMSPropとmomumtumを組み合わせたもので,NadamはRMSPropとNesterov momentumを組み合わせたものです.
デフォルトパラメータは提案論文に従います.
デフォルトパラメータのまま利用することを推奨します.
__引数__
- __lr__: 0以上の浮動小数点数.学習率.
- __beta_1/beta_2__: 浮動小数点数, 0 < beta < 1. 一般的に1に近い値です.
- __epsilon__: 0以上の浮動小数点数.微小量.`None`ならばデフォルトで`K.epsilon()`.
__参考文献__
- [Nadam report](http://cs229.stanford.edu/proj2015/054_report.pdf)
- [On the importance of initialization and momentum in deep learning](http://www.cs.toronto.edu/~fritz/absps/momentum.pdf)
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/optimizers.py#L644)</span>
### TFOptimizer
```python
keras.optimizers.TFOptimizer(optimizer)
```
TensorFlowのオプティマイザのためのラッパークラス.
| keras-docs-ja/sources/optimizers.md/0 | {
"file_path": "keras-docs-ja/sources/optimizers.md",
"repo_id": "keras-docs-ja",
"token_count": 3825
} | 88 |
# 케라스: 파이썬 딥러닝 라이브러리
<img src='https://s3.amazonaws.com/keras.io/img/keras-logo-2018-large-1200.png' style='max-width: 600px; width: 90%;' />
## 케라스에 오신걸 환영합니다.
케라스는 파이썬으로 작성된 고수준 신경망 API로 [TensorFlow](https://github.com/tensorflow/tensorflow), [CNTK](https://github.com/Microsoft/cntk), 혹은 [Theano](https://github.com/Theano/Theano)와 함께 사용하실 수 있습니다. 빠른 실험에 특히 중점을 두고 있습니다. *아이디어를 결과물로 최대한 빠르게 구현하는 것은 훌륭한 연구의 핵심입니다.*
다음과 같은 딥러닝 라이브러리가 필요한 경우에 케라스를 사용하면 좋습니다.
- (사용자 친화성, 모듈성, 확장성을 통해)빠르고 간편한 프로토타이핑을 할 수 있습니다.
- 합성곱 신경망<sub>convolutional networks</sub>, 순환 신경망<sub>recurrent networks</sub>, 그리고 둘의 조합까지 모두 지원됩니다.
- CPU와 GPU에서 매끄럽게 실행됩니다.
[Keras.io](https://keras.io)의 사용설명서를 참고하십시오.
케라스는 다음의 파이썬 버전과 호환됩니다: __Python 2.7-3.6__.
------------------
## 다중 백엔드 Keras 및 tf.keras
**TensorFlow 백엔드와 함께 다중 백엔드 케라스<sub>multi-backend Keras</sub>를 사용하고 있는 사용자는 TensorFlow 2.0의 `tf.keras`로 전환하기를 권장합니다.** `tf.keras`는 TensorFlow의 기능들 (즉시실행, 배포 지원 등)과 잘 호환되도록 유지, 관리되고 있습니다.
Keras 2.2.5는 2.2.* API를 구현 한 Keras의 마지막 릴리스입니다. TensorFlow 1(그리고 Theano 및 CNTK)을 지원하는 마지막 릴리스입니다.
현재 릴리스는 Keras 2.3.0입니다. API가 크게 변경되었고 TensorFlow 2.0에 대한 지원이 추가되었습니다. 2.3.0 릴리스는 멀티 백엔드 Keras의 마지막 메이저 릴리스입니다. 다중 백엔드 케라스는 `tf.keras`로 대체되었습니다.
다중 백엔드 케라스에 존재하는 버그는 2020 년 4 월까지만 마이너 릴리스로 수정될 예정입니다.
케라스의 미래에 대한 자세한 내용은 [케라스 회의 노트](http://bit.ly/keras-meeting-notes)를 참조하십시오.
------------------
## 이념
- __사용자 친화성.__ 케라스는 기계가 아닌 사람을 위해 디자인된 API입니다. 무엇보다 사용자 경험을 우선으로 두는 것입니다. 케라스는 사용자가 쉽게 접근할 수 있도록 다음의 원칙을 지킵니다. 일관성있고 간단한 API를 제공하고, 자주 쓰는 기능에 대해서 사용자의 액션을 최소화하며, 사용자 에러가 발생하는 경우 명료하고 실행가능한 피드백을 줍니다.
- __모듈성.__ 모델은, 최소한의 제한으로 다양한 조합이 가능한 독립적이며 완전히 변경가능한 모듈의 시퀀스 혹은 그래프로 이해할 수 있습니다. 특히 신경망 층<sub>neural layers</sub>, 손실 함수<sub>cost functions</sub>, 최적화 함수<sub>optimizer</sub>, 최초값 설정 규칙<sub>initialization schemes</sub>, 활성화 함수<sub>activation functions</sub>, 정규화 규칙<sub>regularization schemes</sub>은 모두 독립적인 모듈로, 다양하게 조합하여 새로운 모델을 만들어 낼 수 있습니다.
- __쉬운 확장성.__ 새로운 모듈은 (새로운 클래스와 함수의 형태로) 간단하게 추가할 수 있으며, 기존의 모듈이 풍부한 범례를 제공합니다. 새로운 모듈을 쉽게 만들어낼 수 있다는 점은 표현력을 풍부하게 해주기에, 케라스는 고급 연구를 하기에 적합합니다.
- __파이썬과의 호환.__ 모델 구성 파일을 선언형 포맷으로 따로 두거나 하지 않습니다. 모델은 파이썬 코드로 작성되어 간결하고, 오류를 수정하기 쉬우며, 확장성이 뛰어납니다.
------------------
## 시작하기: 30초 케라스
케라스의 주요 데이터 구조는 __model__,로 층<sub>layer</sub>을 조직하는 방식입니다. 가장 간단한 종류의 모델인 [`Sequential`](https://keras.io/getting-started/sequential-model-guide) 모델은 층을 선형적으로 쌓습니다. 보다 복잡한 구조를 만드려면, [Keras functional API](https://keras.io/getting-started/functional-api-guide)를 사용하여 층으로 임의의 그래프를 구축하면 됩니다.
`Sequential` 모델입니다.
```python
from keras.models import Sequential
model = Sequential()
```
`.add()`를 통해 층을 간단하게 쌓을 수 있습니다.
```python
from keras.layers import Dense
model.add(Dense(units=64, activation='relu', input_dim=100))
model.add(Dense(units=10, activation='softmax'))
```
모델이 마음에 드신다면, `.compile()`로 학습 과정에 대한 설정을 할 수 있습니다.
```python
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
```
필요한 경우에 최적화 함수에 대한 설정도 할 수 있습니다. 케라스의 주요 철학중 하나는 사용자가 필요한 작업에 대해서는 완전한 제어권을 가질 수 있도록 하되 간결성을 유지하는 것입니다 (제어권의 궁극적인 형태로는 소스코드의 간편한 확장성이 있습니다).
```python
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.SGD(lr=0.01, momentum=0.9, nesterov=True))
```
배치<sub>batch</sub>의 형태로 학습 데이터에 대한 반복작업을 수행할 수 있습니다.
```python
# x_train and y_train are Numpy arrays --just like in the Scikit-Learn API.
model.fit(x_train, y_train, epochs=5, batch_size=32)
```
혹은, 모델에 배치를 수동으로 전달할 수도 있습니다.
```python
model.train_on_batch(x_batch, y_batch)
```
코드 한 줄로 모델의 성능을 평가할 수 있습니다.
```python
loss_and_metrics = model.evaluate(x_test, y_test, batch_size=128)
```
혹은 다음과 같이 새로운 데이터에 대한 예측 결과를 생성할 수 있습니다.
```python
classes = model.predict(x_test, batch_size=128)
```
질문에 대답하는 시스템, 이미지 분류 모델, 신경망 튜링 기계나 그 외의 다른 모델도 이처럼 빠르게 만들 수 있습니다. 딥러닝의 기본이 되는 아이디어가 간단한데 그 실행이 복잡할 이유가 어디 있겠습니까?
조금 심화된 케라스 튜토리얼을 원하신다면 다음을 참고하십시오.
- [Getting started with the Sequential model](https://keras.io/getting-started/sequential-model-guide)
- [Getting started with the functional API](https://keras.io/getting-started/functional-api-guide)
저장소<sub>repository</sub>의 [examples 폴더](https://github.com/keras-team/keras/tree/master/examples)에서는, 보다 고급 모델을 확인할 수 있습니다. 질문에 대답하는 메모리 신경망<sub>memory networks</sub>, 복수의 LSTM을 이용한 문서 생성 등이 있습니다.
------------------
## 설치
케라스를 설치하기 전에, 다음의 백엔드 엔진 중 하나를 설치하십시오: TensorFlow, Theano, 혹은 CNTK. TensorFlow 백엔드를 사용할 것을 추천드립니다.
- [TensorFlow 설치 설명서](https://www.tensorflow.org/install/).
- [Theano 설치 설명서](http://deeplearning.net/software/theano/install.html#install).
- [CNTK 설치 설명서](https://docs.microsoft.com/en-us/cognitive-toolkit/setup-cntk-on-your-machine).
다음의 **선택적 종속**을 설치하는 것도 고려해 보십시오.
- [cuDNN](https://docs.nvidia.com/deeplearning/sdk/cudnn-install/) (GPU에 케라스를 실행하실 경우 추천드립니다).
- HDF5 and [h5py](http://docs.h5py.org/en/latest/build.html) (디스크에 케라스 모델을 저장하실 경우 필요합니다).
- [graphviz](https://graphviz.gitlab.io/download/)와 [pydot](https://github.com/erocarrera/pydot) (모델 그래프를 시각화하는 [visualization utilities](https://keras.io/visualization/)에 사용됩니다).
이제 케라스를 설치하시면 됩니다. 케라스 설치에는 두가지 방법이 있습니다.
- **PyPI에서 케라스 설치하기(추천)**
참고: 이 설치 단계는 사용자가 Linux 또는 Mac 환경에 있다고 가정합니다. Windows 사용자는 sudo를 빼고 아래의 명령을 실행해야 합니다.
```sh
sudo pip install keras
```
만약 virtualenv를 사용하신다면 sudo를 사용하는 것은 피하는 편이 좋습니다.
```sh
pip install keras
```
- **대안: 깃허브 소스를 통해 케라스 설치하는 방법입니다.**
먼저 `git`명령어를 사용하여 케라스를 clone하십시오.
```sh
git clone https://github.com/keras-team/keras.git
```
그리고 `cd`명령어를 사용하여 케라스 폴더로 이동한 후 install 명령을 실행하십시오.
```sh
cd keras
sudo python setup.py install
```
------------------
## 케라스 백엔드 설정
케라스는 텐서를 처리하는 라이브러리로 TensorFlow를 기본으로 사용합니다. 케라스 백엔드의 설정에 대해서는 [이 설명서](https://keras.io/backend/)를 따라주십시오.
------------------
## 지원
다음을 통해서 개발 논의에 참여하거나 질문을 주실 수 있습니다.
- [케라스 구글 그룹](https://groups.google.com/forum/#!forum/keras-users).
- [케라스 슬랙 채널](https://kerasteam.slack.com). [이 링크](https://keras-slack-autojoin.herokuapp.com/)를 사용해서 케라스 슬랙 채널에의 초대를 신청하시면 됩니다.
또한 [GitHub issues](https://github.com/keras-team/keras/issues)에 (유일한 창구입니다) **버그 리포트와 기능 요청**을 올리실 수 있습니다. 먼저 [가이드라인](https://github.com/keras-team/keras/blob/master/CONTRIBUTING.md)을 꼭 참고해주십시오.
------------------
## 왜 케라스라는 이름인가요?
케라스(κέρας)는 그리스어로 _뿔_ 이라는 뜻입니다. _Odyssey_ 에서 최초로 언급된, 고대 그리스와 라틴 문학의 신화적 존재에 대한 이야기로, 두 가지 꿈의 정령(_Oneiroi_, 단수 _Oneiros_) 중 하나는 상아문을 통해 땅으로 내려와 거짓된 환상으로 사람을 속이며, 다른 하나는 뿔을 통해 내려와 앞으로 벌어질 미래를 예언합니다. 이는 κέρας (뿔) / κραίνω (이뤄지다), 그리고 ἐλέφας (상아) / ἐλεφαίρομαι (속이다)에 대한 언어유희이기도 합니다.
케라스는 초기에 ONEIROS(Open-ended Neuro-Electronic Intelligent Robot Operating System)라는 프로젝트의 일환으로 개발되었습니다.
>_"Oneiroi are beyond our unravelling --who can be sure what tale they tell? Not all that men look for comes to pass. Two gates there are that give passage to fleeting Oneiroi; one is made of horn, one of ivory. The Oneiroi that pass through sawn ivory are deceitful, bearing a message that will not be fulfilled; those that come out through polished horn have truth behind them, to be accomplished for men who see them."_ Homer, Odyssey 19. 562 ff (Shewring translation).
------------------
| keras-docs-ko/sources/index.md/0 | {
"file_path": "keras-docs-ko/sources/index.md",
"repo_id": "keras-docs-ko",
"token_count": 7712
} | 89 |
## 측정항목의 사용법
측정항목은 모델의 성능을 평가하는데 사용되는 함수입니다. 측정항목 함수는 모델이 컴파일 될 때 `metrics` 매개변수를 통해 공급됩니다.
```python
model.compile(loss='mean_squared_error',
optimizer='sgd',
metrics=['mae', 'acc'])
```
```python
from keras import metrics
model.compile(loss='mean_squared_error',
optimizer='sgd',
metrics=[metrics.mae, metrics.categorical_accuracy])
```
측정항목 함수는 [loss function](/losses)와 비슷하지만, 측정항목을 평가한 결과는 모델을 학습시키는데 사용되지 않는다는 점에서 다릅니다. 어느 손실 함수나 측정항목 함수로 사용할 수 있습니다.
기존의 측정항목의 이름을 전달하거나 Theano/TensorFlow 심볼릭 함수를 전달하시면 됩니다. ([Custom metrics](#custom-metrics)를 참조하십시오)
#### 인수
- __y_true__: 참 라벨. Theano/TensorFlow 텐서.
- __y_pred__: 예측. y_true와 같은 형태를 취하는 Theano/TensorFlow 텐서.
#### 반환값
모든 데이터포인트에 대한 아웃풋 배열의 평균을 나타내는
하나의 텐서 값.
----
## 사용가능한 측정항목
### binary_accuracy
```python
keras.metrics.binary_accuracy(y_true, y_pred)
```
----
### categorical_accuracy
```python
keras.metrics.categorical_accuracy(y_true, y_pred)
```
----
### sparse_categorical_accuracy
```python
keras.metrics.sparse_categorical_accuracy(y_true, y_pred)
```
----
### top_k_categorical_accuracy
```python
keras.metrics.top_k_categorical_accuracy(y_true, y_pred, k=5)
```
----
### sparse_top_k_categorical_accuracy
```python
keras.metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=5)
```
위의 측정항목 외에도, [loss function](/losses) 페이지에 기술된 어떠한 손실 함수를 사용해도 좋습니다.
----
## 커스텀 측정항목
컴파일 단계에서 커스텀 측정항목을 전달할 수 있습니다.
커스텀 측정항목 함수는 `(y_true, y_pred)`를 인수로 받아야 하며
하나의 텐서 값을 반환해야 합니다.
```python
import keras.backend as K
def mean_pred(y_true, y_pred):
return K.mean(y_pred)
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy', mean_pred])
```
| keras-docs-ko/sources/metrics.md/0 | {
"file_path": "keras-docs-ko/sources/metrics.md",
"repo_id": "keras-docs-ko",
"token_count": 1533
} | 90 |
# 数据集
## CIFAR10 小图像分类数据集
50,000 张 32x32 彩色训练图像数据,以及 10,000 张测试图像数据,总共分为 10 个类别。
### 用法:
```python
from keras.datasets import cifar10
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
```
- __返回:__
- 2 个元组:
- __x_train, x_test__: uint8 数组表示的 RGB 图像数据,尺寸为 (num_samples, 3, 32, 32) 或 (num_samples, 32, 32, 3),基于 `image_data_format` 后端设定的 `channels_first` 或 `channels_last`。
- __y_train, y_test__: uint8 数组表示的类别标签(范围在 0-9 之间的整数),尺寸为 (num_samples, 1)。
---
## CIFAR100 小图像分类数据集
50,000 张 32x32 彩色训练图像数据,以及 10,000 张测试图像数据,总共分为 100 个类别。
### 用法:
```python
from keras.datasets import cifar100
(x_train, y_train), (x_test, y_test) = cifar100.load_data(label_mode='fine')
```
- __返回:__
- 2 个元组:
- __x_train, x_test__: uint8 数组表示的 RGB 图像数据,尺寸为 (num_samples, 3, 32, 32) 或 (num_samples, 32, 32, 3),基于 `image_data_format` 后端设定的 `channels_first` 或 `channels_last`。
- __y_train, y_test__: uint8 数组表示的类别标签,尺寸为 (num_samples, 1)。
- __参数:__
- __label_mode__: "fine" 或者 "coarse"
---
## IMDB 电影评论情感分类数据集
数据集来自 IMDB 的 25,000 条电影评论,以情绪(正面/负面)标记。评论已经过预处理,并编码为词索引(整数)的[序列](preprocessing/sequence.md)表示。为了方便起见,将词按数据集中出现的频率进行索引,例如整数 3 编码数据中第三个最频繁的词。这允许快速筛选操作,例如:「只考虑前 10,000 个最常用的词,但排除前 20 个最常见的词」。
作为惯例,0 不代表特定的单词,而是被用于编码任何未知单词。
### 用法
```python
from keras.datasets import imdb
(x_train, y_train), (x_test, y_test) = imdb.load_data(path="imdb.npz",
num_words=None,
skip_top=0,
maxlen=None,
seed=113,
start_char=1,
oov_char=2,
index_from=3)
```
- __返回:__
- 2 个元组:
- __x_train, x_test__: 序列的列表,即词索引的列表。如果指定了 `num_words` 参数,则可能的最大索引值是 `num_words-1`。如果指定了 `maxlen` 参数,则可能的最大序列长度为 `maxlen`。
- __y_train, y_test__: 整数标签列表 (1 或 0)。
- __参数:__
- __path__: 如果你本地没有该数据集 (在 `'~/.keras/datasets/' + path`),它将被下载到此目录。
- __num_words__: 整数或 None。要考虑的最常用的词语。任何不太频繁的词将在序列数据中显示为 `oov_char` 值。
- __skip_top__: 整数。要忽略的最常见的单词(它们将在序列数据中显示为 `oov_char` 值)。
- __maxlen__: 整数。最大序列长度。 任何更长的序列都将被截断。
- __seed__: 整数。用于可重现数据混洗的种子。
- __start_char__: 整数。序列的开始将用这个字符标记。设置为 1,因为 0 通常作为填充字符。
- __oov_char__: 整数。由于 `num_words` 或 `skip_top` 限制而被删除的单词将被替换为此字符。
- __index_from__: 整数。使用此数以上更高的索引值实际词汇索引的开始。
---
## 路透社新闻主题分类
数据集来源于路透社的 11,228 条新闻文本,总共分为 46 个主题。与 IMDB 数据集一样,每条新闻都被编码为一个词索引的序列(相同的约定)。
### 用法:
```python
from keras.datasets import reuters
(x_train, y_train), (x_test, y_test) = reuters.load_data(path="reuters.npz",
num_words=None,
skip_top=0,
maxlen=None,
test_split=0.2,
seed=113,
start_char=1,
oov_char=2,
index_from=3)
```
规格与 IMDB 数据集的规格相同,但增加了:
- __test_split__: 浮点型。用作测试集的数据比例。
该数据集还提供了用于编码序列的词索引:
```python
word_index = reuters.get_word_index(path="reuters_word_index.json")
```
- __返回:__ 一个字典,其中键是单词(字符串),值是索引(整数)。 例如,`word_index["giraffe"]` 可能会返回 `1234`。
- __参数:__
- __path__: 如果在本地没有索引文件 (at `'~/.keras/datasets/' + path`), 它将被下载到该目录。
---
## MNIST 手写字符数据集
训练集为 60,000 张 28x28 像素灰度图像,测试集为 10,000 同规格图像,总共 10 类数字标签。
### 用法:
```python
from keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
```
- __返回:__
- 2 个元组:
- __x_train, x_test__: uint8 数组表示的灰度图像,尺寸为 (num_samples, 28, 28)。
- __y_train, y_test__: uint8 数组表示的数字标签(范围在 0-9 之间的整数),尺寸为 (num_samples,)。
- __参数:__
- __path__: 如果在本地没有索引文件 (at `'~/.keras/datasets/' + path`), 它将被下载到该目录。
---
## Fashion-MNIST 时尚物品数据集
训练集为 60,000 张 28x28 像素灰度图像,测试集为 10,000 同规格图像,总共 10 类时尚物品标签。该数据集可以用作 MNIST 的直接替代品。类别标签是:
| 类别 | 描述 | 中文 |
| --- | --- | --- |
| 0 | T-shirt/top | T恤/上衣 |
| 1 | Trouser | 裤子 |
| 2 | Pullover | 套头衫 |
| 3 | Dress | 连衣裙 |
| 4 | Coat | 外套 |
| 5 | Sandal | 凉鞋 |
| 6 | Shirt | 衬衫 |
| 7 | Sneaker | 运动鞋 |
| 8 | Bag | 背包 |
| 9 | Ankle boot | 短靴 |
### 用法:
```python
from keras.datasets import fashion_mnist
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
```
- __返回:__
- 2 个元组:
- __x_train, x_test__: uint8 数组表示的灰度图像,尺寸为 (num_samples, 28, 28)。
- __y_train, y_test__: uint8 数组表示的数字标签(范围在 0-9 之间的整数),尺寸为 (num_samples,)。
---
## Boston 房价回归数据集
数据集来自卡内基梅隆大学维护的 StatLib 库。
样本包含 1970 年代的在波士顿郊区不同位置的房屋信息,总共有 13 种房屋属性。
目标值是一个位置的房屋的中值(单位:k$)。
### 用法:
```python
from keras.datasets import boston_housing
(x_train, y_train), (x_test, y_test) = boston_housing.load_data()
```
- __参数:__
- __path__: 缓存本地数据集的位置
(相对路径 ~/.keras/datasets)。
- __seed__: 在计算测试分割之前对数据进行混洗的随机种子。
- __test_split__: 需要保留作为测试数据的比例。
- __返回:__ Numpy 数组的元组: `(x_train, y_train), (x_test, y_test)`。
| keras-docs-zh/sources/datasets.md/0 | {
"file_path": "keras-docs-zh/sources/datasets.md",
"repo_id": "keras-docs-zh",
"token_count": 5051
} | 91 |
# 本示例演示了使用 fasttext 进行文本分类
根据Joulin等人的论文:
[Bags of Tricks for Efficient Text Classification](https://arxiv.org/abs/1607.01759)
在具有 uni-gram 和 bi-gram 嵌入的 IMDB 数据集上的结果:
Embedding|Accuracy, 5 epochs|Speed (s/epoch)|Hardware
:--------|-----------------:|----:|:-------
Uni-gram | 0.8813| 8|i7 CPU
Bi-gram | 0.9056| 2|GTx 980M GPU
```python
from __future__ import print_function
import numpy as np
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Embedding
from keras.layers import GlobalAveragePooling1D
from keras.datasets import imdb
def create_ngram_set(input_list, ngram_value=2):
"""
从整数列表中提取一组 n 元语法。
>>> create_ngram_set([1, 4, 9, 4, 1, 4], ngram_value=2)
{(4, 9), (4, 1), (1, 4), (9, 4)}
>>> create_ngram_set([1, 4, 9, 4, 1, 4], ngram_value=3)
[(1, 4, 9), (4, 9, 4), (9, 4, 1), (4, 1, 4)]
"""
return set(zip(*[input_list[i:] for i in range(ngram_value)]))
def add_ngram(sequences, token_indice, ngram_range=2):
"""
通过附加 n-gram 值来增强列表(序列)的输入列表。
示例:添加 bi-gram
>>> sequences = [[1, 3, 4, 5], [1, 3, 7, 9, 2]]
>>> token_indice = {(1, 3): 1337, (9, 2): 42, (4, 5): 2017}
>>> add_ngram(sequences, token_indice, ngram_range=2)
[[1, 3, 4, 5, 1337, 2017], [1, 3, 7, 9, 2, 1337, 42]]
示例:添加 tri-gram
>>> sequences = [[1, 3, 4, 5], [1, 3, 7, 9, 2]]
>>> token_indice = {(1, 3): 1337, (9, 2): 42, (4, 5): 2017, (7, 9, 2): 2018}
>>> add_ngram(sequences, token_indice, ngram_range=3)
[[1, 3, 4, 5, 1337, 2017], [1, 3, 7, 9, 2, 1337, 42, 2018]]
"""
new_sequences = []
for input_list in sequences:
new_list = input_list[:]
for ngram_value in range(2, ngram_range + 1):
for i in range(len(new_list) - ngram_value + 1):
ngram = tuple(new_list[i:i + ngram_value])
if ngram in token_indice:
new_list.append(token_indice[ngram])
new_sequences.append(new_list)
return new_sequences
# 设置参数
# ngram_range = 2 会添加bi-grams 特征
ngram_range = 1
max_features = 20000
maxlen = 400
batch_size = 32
embedding_dims = 50
epochs = 5
print('Loading data...')
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')
print('Average train sequence length: {}'.format(
np.mean(list(map(len, x_train)), dtype=int)))
print('Average test sequence length: {}'.format(
np.mean(list(map(len, x_test)), dtype=int)))
if ngram_range > 1:
print('Adding {}-gram features'.format(ngram_range))
# 从训练集中创建一组唯一的 n-gram。
ngram_set = set()
for input_list in x_train:
for i in range(2, ngram_range + 1):
set_of_ngram = create_ngram_set(input_list, ngram_value=i)
ngram_set.update(set_of_ngram)
# 将 n-gram token 映射到唯一整数的字典。
# 整数值大于 max_features,
# 以避免与现有功能冲突。
start_index = max_features + 1
token_indice = {v: k + start_index for k, v in enumerate(ngram_set)}
indice_token = {token_indice[k]: k for k in token_indice}
# max_features 是可以在数据集中找到的最大整数。
max_features = np.max(list(indice_token.keys())) + 1
# 使用 n-grams 功能增强 x_train 和 x_test
x_train = add_ngram(x_train, token_indice, ngram_range)
x_test = add_ngram(x_test, token_indice, ngram_range)
print('Average train sequence length: {}'.format(
np.mean(list(map(len, x_train)), dtype=int)))
print('Average test sequence length: {}'.format(
np.mean(list(map(len, x_test)), dtype=int)))
print('Pad sequences (samples x time)')
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
print('Build model...')
model = Sequential()
# 我们从有效的嵌入层开始,该层将 vocab 索引映射到 embedding_dims 维度
model.add(Embedding(max_features,
embedding_dims,
input_length=maxlen))
# 我们添加了 GlobalAveragePooling1D,它将对文档中所有单词执行平均嵌入
model.add(GlobalAveragePooling1D())
# 我们投影到单个单位输出层上,并用 sigmoid 压扁它:
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test))
``` | keras-docs-zh/sources/examples/imdb_fasttext.md/0 | {
"file_path": "keras-docs-zh/sources/examples/imdb_fasttext.md",
"repo_id": "keras-docs-zh",
"token_count": 2383
} | 92 |
# 转移学习玩具示例。
1 - 在 MNIST 数据集的前 5 位 [0..4] 上训练简单的 convnet。
2 - 冻结卷积层并微调密集层以进行数字分类 [5..9]。
迁移+微调后的前五个数字分类器经过 5 个轮次后,测试准确率达到 99.8%,最后 5 个数字达到 99.2%。
```python
from __future__ import print_function
import datetime
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
now = datetime.datetime.now
batch_size = 128
num_classes = 5
epochs = 5
# 输入图像尺寸
img_rows, img_cols = 28, 28
# 使用的卷积滤波器数量
filters = 32
# 最大池的池区域大小
pool_size = 2
# 卷积核大小
kernel_size = 3
if K.image_data_format() == 'channels_first':
input_shape = (1, img_rows, img_cols)
else:
input_shape = (img_rows, img_cols, 1)
def train_model(model, train, test, num_classes):
x_train = train[0].reshape((train[0].shape[0],) + input_shape)
x_test = test[0].reshape((test[0].shape[0],) + input_shape)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# 将类向量转换为二进制类矩阵
y_train = keras.utils.to_categorical(train[1], num_classes)
y_test = keras.utils.to_categorical(test[1], num_classes)
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
t = now()
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
print('Training time: %s' % (now() - t))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
# 数据,分为训练集和测试集
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# 创建两个数据集,一个的数字小于 5,另一个的数字大于等于 5
x_train_lt5 = x_train[y_train < 5]
y_train_lt5 = y_train[y_train < 5]
x_test_lt5 = x_test[y_test < 5]
y_test_lt5 = y_test[y_test < 5]
x_train_gte5 = x_train[y_train >= 5]
y_train_gte5 = y_train[y_train >= 5] - 5
x_test_gte5 = x_test[y_test >= 5]
y_test_gte5 = y_test[y_test >= 5] - 5
# 定义两组网络层:特征(卷积)和分类(密集)
feature_layers = [
Conv2D(filters, kernel_size,
padding='valid',
input_shape=input_shape),
Activation('relu'),
Conv2D(filters, kernel_size),
Activation('relu'),
MaxPooling2D(pool_size=pool_size),
Dropout(0.25),
Flatten(),
]
classification_layers = [
Dense(128),
Activation('relu'),
Dropout(0.5),
Dense(num_classes),
Activation('softmax')
]
# 创建完整的模型
model = Sequential(feature_layers + classification_layers)
# 5位分类的训练模型[0..4]
train_model(model,
(x_train_lt5, y_train_lt5),
(x_test_lt5, y_test_lt5), num_classes)
# 冻结特征层并重建模型
for l in feature_layers:
l.trainable = False
# 迁移:为新的分类任务训练密集层 [5..9]
train_model(model,
(x_train_gte5, y_train_gte5),
(x_test_gte5, y_test_gte5), num_classes)
``` | keras-docs-zh/sources/examples/mnist_transfer_cnn.md/0 | {
"file_path": "keras-docs-zh/sources/examples/mnist_transfer_cnn.md",
"repo_id": "keras-docs-zh",
"token_count": 1850
} | 93 |
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/wrappers.py#L116)</span>
### TimeDistributed
```python
keras.layers.TimeDistributed(layer)
```
这个封装器将一个层应用于输入的每个时间片。
输入至少为 3D,且第一个维度应该是时间所表示的维度。
考虑 32 个样本的一个 batch,
其中每个样本是 10 个 16 维向量的序列。
那么这个 batch 的输入尺寸为 `(32, 10, 16)`,
而 `input_shape` 不包含样本数量的维度,为 `(10, 16)`。
你可以使用 `TimeDistributed` 来将 `Dense` 层独立地应用到
这 10 个时间步的每一个:
```python
# 作为模型第一层
model = Sequential()
model.add(TimeDistributed(Dense(8), input_shape=(10, 16)))
# 现在 model.output_shape == (None, 10, 8)
```
输出的尺寸为 `(32, 10, 8)`。
在后续的层中,将不再需要 `input_shape`:
```python
model.add(TimeDistributed(Dense(32)))
# 现在 model.output_shape == (None, 10, 32)
```
输出的尺寸为 `(32, 10, 32)`。
`TimeDistributed` 可以应用于任意层,不仅仅是 `Dense`,
例如运用于 `Conv2D` 层:
```python
model = Sequential()
model.add(TimeDistributed(Conv2D(64, (3, 3)),
input_shape=(10, 299, 299, 3)))
```
__参数__
- __layer__: 一个网络层实例。
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/wrappers.py#L335)</span>
### Bidirectional
```python
keras.layers.Bidirectional(layer, merge_mode='concat', weights=None)
```
RNN 的双向封装器,对序列进行前向和后向计算。
__参数__
- __layer__: `Recurrent` 实例。
- __merge_mode__: 前向和后向 RNN 的输出的结合模式。
为 {'sum', 'mul', 'concat', 'ave', None} 其中之一。
如果是 None,输出不会被结合,而是作为一个列表被返回。
- __weights__: 双向模型中要加载的初始权重。
__异常__
- __ValueError__: 如果参数 `merge_mode` 非法。
__示例__
```python
model = Sequential()
model.add(Bidirectional(LSTM(10, return_sequences=True),
input_shape=(5, 10)))
model.add(Bidirectional(LSTM(10)))
model.add(Dense(5))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
```
| keras-docs-zh/sources/layers/wrappers.md/0 | {
"file_path": "keras-docs-zh/sources/layers/wrappers.md",
"repo_id": "keras-docs-zh",
"token_count": 1238
} | 94 |
.git
.gitignore
tmp/*
sources/*
site/*
scripts/upload.py
*.pyc
templates/examples/generative/*
templates/examples/nlp/*
templates/examples/vision/*
templates/examples/structured_data/*
templates/examples/keras_recipes/*
| keras-io/.dockerignore/0 | {
"file_path": "keras-io/.dockerignore",
"repo_id": "keras-io",
"token_count": 81
} | 95 |
"""
Title: GPT2 Text Generation with KerasNLP
Author: Chen Qian
Date created: 04/17/2023
Last modified: 04/17/2023
Description: Use KerasNLP GPT2 model and `samplers` to do text generation.
Accelerator: GPU
"""
"""
In this tutorial, you will learn to use [KerasNLP](https://keras.io/keras_nlp/) to load a
pre-trained Large Language Model (LLM) - [GPT-2 model](https://openai.com/research/better-language-models)
(originally invented by OpenAI), finetune it to a specific text style, and
generate text based on users' input (also known as prompt). You will also learn
how GPT2 adapts quickly to non-English languages, such as Chinese.
"""
"""
## Before we begin
Colab offers different kinds of runtimes. Make sure to go to **Runtime ->
Change runtime type** and choose the GPU Hardware Accelerator runtime
(which should have >12G host RAM and ~15G GPU RAM) since you will finetune the
GPT-2 model. Running this tutorial on CPU runtime will take hours.
"""
"""
## Install KerasNLP, Choose Backend and Import Dependencies
This examples uses [Keras Core](https://keras.io/keras_core/) to work in any of
`"tensorflow"`, `"jax"` or `"torch"`. Support for Keras Core is baked into
KerasNLP, simply change the `"KERAS_BACKEND"` environment variable to select
the backend of your choice. We select the JAX backend below.
"""
"""shell
pip install git+https://github.com/keras-team/keras-nlp.git -q
"""
import os
os.environ["KERAS_BACKEND"] = "jax" # or "tensorflow" or "torch"
import keras_nlp
import keras
import tensorflow as tf
import time
keras.mixed_precision.set_global_policy("mixed_float16")
"""
## Introduction to Generative Large Language Models (LLMs)
Large language models (LLMs) are a type of machine learning models that are
trained on a large corpus of text data to generate outputs for various natural
language processing (NLP) tasks, such as text generation, question answering,
and machine translation.
Generative LLMs are typically based on deep learning neural networks, such as
the [Transformer architecture](https://arxiv.org/abs/1706.03762) invented by
Google researchers in 2017, and are trained on massive amounts of text data,
often involving billions of words. These models, such as Google [LaMDA](https://blog.google/technology/ai/lamda/)
and [PaLM](https://ai.googleblog.com/2022/04/pathways-language-model-palm-scaling-to.html),
are trained with a large dataset from various data sources which allows them to
generate output for many tasks. The core of Generative LLMs is predicting the
next word in a sentence, often referred as **Causal LM Pretraining**. In this
way LLMs can generate coherent text based on user prompts. For a more
pedagogical discussion on language models, you can refer to the
[Stanford CS324 LLM class](https://stanford-cs324.github.io/winter2022/lectures/introduction/).
"""
"""
## Introduction to KerasNLP
Large Language Models are complex to build and expensive to train from scratch.
Luckily there are pretrained LLMs available for use right away. [KerasNLP](https://keras.io/keras_nlp/)
provides a large number of pre-trained checkpoints that allow you to experiment
with SOTA models without needing to train them yourself.
KerasNLP is a natural language processing library that supports users through
their entire development cycle. KerasNLP offers both pretrained models and
modularized building blocks, so developers could easily reuse pretrained models
or stack their own LLM.
In a nutshell, for generative LLM, KerasNLP offers:
- Pretrained models with `generate()` method, e.g.,
`keras_nlp.models.GPT2CausalLM` and `keras_nlp.models.OPTCausalLM`.
- Sampler class that implements generation algorithms such as Top-K, Beam and
contrastive search. These samplers can be used to generate text with
custom models.
"""
"""
## Load a pre-trained GPT-2 model and generate some text
KerasNLP provides a number of pre-trained models, such as [Google
Bert](https://ai.googleblog.com/2018/11/open-sourcing-bert-state-of-art-pre.html)
and [GPT-2](https://openai.com/research/better-language-models). You can see
the list of models available in the [KerasNLP repository](https://github.com/keras-team/keras-nlp/tree/master/keras_nlp/models).
It's very easy to load the GPT-2 model as you can see below:
"""
# To speed up training and generation, we use preprocessor of length 128
# instead of full length 1024.
preprocessor = keras_nlp.models.GPT2CausalLMPreprocessor.from_preset(
"gpt2_base_en",
sequence_length=128,
)
gpt2_lm = keras_nlp.models.GPT2CausalLM.from_preset(
"gpt2_base_en", preprocessor=preprocessor
)
"""
Once the model is loaded, you can use it to generate some text right away. Run
the cells below to give it a try. It's as simple as calling a single function
*generate()*:
"""
start = time.time()
output = gpt2_lm.generate("My trip to Yosemite was", max_length=200)
print("\nGPT-2 output:")
print(output)
end = time.time()
print(f"TOTAL TIME ELAPSED: {end - start:.2f}s")
"""
Try another one:
"""
start = time.time()
output = gpt2_lm.generate("That Italian restaurant is", max_length=200)
print("\nGPT-2 output:")
print(output)
end = time.time()
print(f"TOTAL TIME ELAPSED: {end - start:.2f}s")
"""
Notice how much faster the second call is. This is because the computational
graph is [XLA compiled](https://www.tensorflow.org/xla) in the 1st run and
re-used in the 2nd behind the scenes.
The quality of the generated text looks OK, but we can improve it via
fine-tuning.
"""
"""
## More on the GPT-2 model from KerasNLP
Next up, we will actually fine-tune the model to update its parameters, but
before we do, let's take a look at the full set of tools we have to for working
with for GPT2.
The code of GPT2 can be found
[here](https://github.com/keras-team/keras-nlp/blob/master/keras_nlp/models/gpt2/).
Conceptually the `GPT2CausalLM` can be hierarchically broken down into several
modules in KerasNLP, all of which have a *from_preset()* function that loads a
pretrained model:
- `keras_nlp.models.GPT2Tokenizer`: The tokenizer used by GPT2 model, which is a
[byte-pair encoder](https://huggingface.co/course/chapter6/5?fw=pt).
- `keras_nlp.models.GPT2CausalLMPreprocessor`: the preprocessor used by GPT2
causal LM training. It does the tokenization along with other preprocessing
works such as creating the label and appending the end token.
- `keras_nlp.models.GPT2Backbone`: the GPT2 model, which is a stack of
`keras_nlp.layers.TransformerDecoder`. This is usually just referred as
`GPT2`.
- `keras_nlp.models.GPT2CausalLM`: wraps `GPT2Backbone`, it multiplies the
output of `GPT2Backbone` by embedding matrix to generate logits over
vocab tokens.
"""
"""
## Finetune on Reddit dataset
Now you have the knowledge of the GPT-2 model from KerasNLP, you can take one
step further to finetune the model so that it generates text in a specific
style, short or long, strict or casual. In this tutorial, we will use reddit
dataset for example.
"""
import tensorflow_datasets as tfds
reddit_ds = tfds.load("reddit_tifu", split="train", as_supervised=True)
"""
Let's take a look inside sample data from the reddit TensorFlow Dataset. There
are two features:
- **__document__**: text of the post.
- **__title__**: the title.
"""
for document, title in reddit_ds:
print(document.numpy())
print(title.numpy())
break
"""
In our case, we are performing next word prediction in a language model, so we
only need the 'document' feature.
"""
train_ds = (
reddit_ds.map(lambda document, _: document)
.batch(32)
.cache()
.prefetch(tf.data.AUTOTUNE)
)
"""
Now you can finetune the model using the familiar *fit()* function. Note that
`preprocessor` will be automatically called inside `fit` method since
`GPT2CausalLM` is a `keras_nlp.models.Task` instance.
This step takes quite a bit of GPU memory and a long time if we were to train
it all the way to a fully trained state. Here we just use part of the dataset
for demo purposes.
"""
train_ds = train_ds.take(500)
num_epochs = 1
# Linearly decaying learning rate.
learning_rate = keras.optimizers.schedules.PolynomialDecay(
5e-5,
decay_steps=train_ds.cardinality() * num_epochs,
end_learning_rate=0.0,
)
loss = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
gpt2_lm.compile(
optimizer=keras.optimizers.Adam(learning_rate),
loss=loss,
weighted_metrics=["accuracy"],
)
gpt2_lm.fit(train_ds, epochs=num_epochs)
"""
After fine-tuning is finished, you can again generate text using the same
*generate()* function. This time, the text will be closer to Reddit writing
style, and the generated length will be close to our preset length in the
training set.
"""
start = time.time()
output = gpt2_lm.generate("I like basketball", max_length=200)
print("\nGPT-2 output:")
print(output)
end = time.time()
print(f"TOTAL TIME ELAPSED: {end - start:.2f}s")
"""
## Into the Sampling Method
In KerasNLP, we offer a few sampling methods, e.g., contrastive search,
Top-K and beam sampling. By default, our `GPT2CausalLM` uses Top-k search, but
you can choose your own sampling method.
Much like optimizer and activations, there are two ways to specify your custom
sampler:
- Use a string identifier, such as "greedy", you are using the default
configuration via this way.
- Pass a `keras_nlp.samplers.Sampler` instance, you can use custom configuration
via this way.
"""
# Use a string identifier.
gpt2_lm.compile(sampler="top_k")
output = gpt2_lm.generate("I like basketball", max_length=200)
print("\nGPT-2 output:")
print(output)
# Use a `Sampler` instance. `GreedySampler` tends to repeat itself,
greedy_sampler = keras_nlp.samplers.GreedySampler()
gpt2_lm.compile(sampler=greedy_sampler)
output = gpt2_lm.generate("I like basketball", max_length=200)
print("\nGPT-2 output:")
print(output)
"""
For more details on KerasNLP `Sampler` class, you can check the code
[here](https://github.com/keras-team/keras-nlp/tree/master/keras_nlp/samplers).
"""
"""
## Finetune on Chinese Poem Dataset
We can also finetune GPT2 on non-English datasets. For readers knowing Chinese,
this part illustrates how to fine-tune GPT2 on Chinese poem dataset to teach our
model to become a poet!
Because GPT2 uses byte-pair encoder, and the original pretraining dataset
contains some Chinese characters, we can use the original vocab to finetune on
Chinese dataset.
"""
"""shell
# Load chinese poetry dataset.
git clone https://github.com/chinese-poetry/chinese-poetry.git
"""
"""
Load text from the json file. We only use《全唐诗》for demo purposes.
"""
import os
import json
poem_collection = []
for file in os.listdir("chinese-poetry/全唐诗"):
if ".json" not in file or "poet" not in file:
continue
full_filename = "%s/%s" % ("chinese-poetry/全唐诗", file)
with open(full_filename, "r") as f:
content = json.load(f)
poem_collection.extend(content)
paragraphs = ["".join(data["paragraphs"]) for data in poem_collection]
"""
Let's take a look at sample data.
"""
print(paragraphs[0])
"""
Similar as Reddit example, we convert to TF dataset, and only use partial data
to train.
"""
train_ds = (
tf.data.Dataset.from_tensor_slices(paragraphs)
.batch(16)
.cache()
.prefetch(tf.data.AUTOTUNE)
)
# Running through the whole dataset takes long, only take `500` and run 1
# epochs for demo purposes.
train_ds = train_ds.take(500)
num_epochs = 1
learning_rate = keras.optimizers.schedules.PolynomialDecay(
5e-4,
decay_steps=train_ds.cardinality() * num_epochs,
end_learning_rate=0.0,
)
loss = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
gpt2_lm.compile(
optimizer=keras.optimizers.Adam(learning_rate),
loss=loss,
weighted_metrics=["accuracy"],
)
gpt2_lm.fit(train_ds, epochs=num_epochs)
"""
Let's check the result!
"""
output = gpt2_lm.generate("昨夜雨疏风骤", max_length=200)
print(output)
"""
Not bad 😀
"""
| keras-io/examples/generative/gpt2_text_generation_with_kerasnlp.py/0 | {
"file_path": "keras-io/examples/generative/gpt2_text_generation_with_kerasnlp.py",
"repo_id": "keras-io",
"token_count": 3983
} | 96 |
<jupyter_start><jupyter_text>GauGAN for conditional image generation**Author:** [Soumik Rakshit](https://github.com/soumik12345), [Sayak Paul](https://twitter.com/RisingSayak)**Date created:** 2021/12/26**Last modified:** 2022/01/03**Description:** Implementing a GauGAN for conditional image generation. IntroductionIn this example, we present an implementation of the GauGAN architecture proposed in[Semantic Image Synthesis with Spatially-Adaptive Normalization](https://arxiv.org/abs/1903.07291).Briefly, GauGAN uses a Generative Adversarial Network (GAN) to generate realistic imagesthat are conditioned on cue images and segmentation maps, as shown below([image source](https://nvlabs.github.io/SPADE/)):The main components of a GauGAN are:- **SPADE (aka spatially-adaptive normalization)** : The authors of GauGAN argue that themore conventional normalization layers (such as[Batch Normalization](https://arxiv.org/abs/1502.03167))destroy the semantic information obtained from segmentation maps thatare provided as inputs. To address this problem, the authors introduce SPADE, anormalization layer particularly suitable for learning affine parameters (scale and bias)that are spatially adaptive. This is done by learning different sets of scaling andbias parameters for each semantic label.- **Variational encoder**: Inspired by[Variational Autoencoders](https://arxiv.org/abs/1312.6114), GauGAN uses avariational formulation wherein an encoder learns the mean and variance of anormal (Gaussian) distribution from the cue images. This is where GauGAN gets its namefrom. The generator of GauGAN takes as inputs the latents sampled from the Gaussiandistribution as well as the one-hot encoded semantic segmentation label maps. The cueimages act as style images that guide the generator to stylistic generation. Thisvariational formulation helps GauGAN achieve image diversity as well as fidelity.- **Multi-scale patch discriminator** : Inspired by the[PatchGAN](https://paperswithcode.com/method/patchgan) model,GauGAN uses a discriminator that assesses a given image on a patch basisand produces an averaged score.As we proceed with the example, we will discuss each of the differentcomponents in further detail.For a thorough review of GauGAN, please refer to[this article](https://blog.paperspace.com/nvidia-gaugan-introduction/).We also encourage you to check out[the official GauGAN website](https://nvlabs.github.io/SPADE/), whichhas many creative applications of GauGAN. This example assumes that the reader is alreadyfamiliar with the fundamental concepts of GANs. If you need a refresher, the followingresources might be useful:* [Chapter on GANs](https://livebook.manning.com/book/deep-learning-with-python/chapter-8)from the Deep Learning with Python book by François Chollet.* GAN implementations on keras.io: * [Data efficient GANs](https://keras.io/examples/generative/gan_ada) * [CycleGAN](https://keras.io/examples/generative/cyclegan) * [Conditional GAN](https://keras.io/examples/generative/conditional_gan) Data collectionWe will be using the[Facades dataset](https://cmp.felk.cvut.cz/~tylecr1/facade/)for training our GauGAN model. Let's first download it.<jupyter_code>!wget https://drive.google.com/uc?id=1q4FEjQg1YSb4mPx2VdxL7LXKYu3voTMj -O facades_data.zip
!unzip -q facades_data.zip<jupyter_output><empty_output><jupyter_text>Imports<jupyter_code>import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import keras
from keras import ops
from keras import layers
from glob import glob<jupyter_output><empty_output><jupyter_text>Data splitting<jupyter_code>PATH = "./facades_data/"
SPLIT = 0.2
files = glob(PATH + "*.jpg")
np.random.shuffle(files)
split_index = int(len(files) * (1 - SPLIT))
train_files = files[:split_index]
val_files = files[split_index:]
print(f"Total samples: {len(files)}.")
print(f"Total training samples: {len(train_files)}.")
print(f"Total validation samples: {len(val_files)}.")<jupyter_output><empty_output><jupyter_text>Data loader<jupyter_code>BATCH_SIZE = 4
IMG_HEIGHT = IMG_WIDTH = 256
NUM_CLASSES = 12
AUTOTUNE = tf.data.AUTOTUNE
def load(image_files, batch_size, is_train=True):
def _random_crop(
segmentation_map,
image,
labels,
crop_size=(IMG_HEIGHT, IMG_WIDTH),
):
crop_size = tf.convert_to_tensor(crop_size)
image_shape = tf.shape(image)[:2]
margins = image_shape - crop_size
y1 = tf.random.uniform(shape=(), maxval=margins[0], dtype=tf.int32)
x1 = tf.random.uniform(shape=(), maxval=margins[1], dtype=tf.int32)
y2 = y1 + crop_size[0]
x2 = x1 + crop_size[1]
cropped_images = []
images = [segmentation_map, image, labels]
for img in images:
cropped_images.append(img[y1:y2, x1:x2])
return cropped_images
def _load_data_tf(image_file, segmentation_map_file, label_file):
image = tf.image.decode_png(tf.io.read_file(image_file), channels=3)
segmentation_map = tf.image.decode_png(
tf.io.read_file(segmentation_map_file), channels=3
)
labels = tf.image.decode_bmp(tf.io.read_file(label_file), channels=0)
labels = tf.squeeze(labels)
image = tf.cast(image, tf.float32) / 127.5 - 1
segmentation_map = tf.cast(segmentation_map, tf.float32) / 127.5 - 1
return segmentation_map, image, labels
def _one_hot(segmentation_maps, real_images, labels):
labels = tf.one_hot(labels, NUM_CLASSES)
labels.set_shape((None, None, NUM_CLASSES))
return segmentation_maps, real_images, labels
segmentation_map_files = [
image_file.replace("images", "segmentation_map").replace("jpg", "png")
for image_file in image_files
]
label_files = [
image_file.replace("images", "segmentation_labels").replace("jpg", "bmp")
for image_file in image_files
]
dataset = tf.data.Dataset.from_tensor_slices(
(image_files, segmentation_map_files, label_files)
)
dataset = dataset.shuffle(batch_size * 10) if is_train else dataset
dataset = dataset.map(_load_data_tf, num_parallel_calls=AUTOTUNE)
dataset = dataset.map(_random_crop, num_parallel_calls=AUTOTUNE)
dataset = dataset.map(_one_hot, num_parallel_calls=AUTOTUNE)
dataset = dataset.batch(batch_size, drop_remainder=True)
return dataset
train_dataset = load(train_files, batch_size=BATCH_SIZE, is_train=True)
val_dataset = load(val_files, batch_size=BATCH_SIZE, is_train=False)<jupyter_output><empty_output><jupyter_text>Now, let's visualize a few samples from the training set.<jupyter_code>sample_train_batch = next(iter(train_dataset))
print(f"Segmentation map batch shape: {sample_train_batch[0].shape}.")
print(f"Image batch shape: {sample_train_batch[1].shape}.")
print(f"One-hot encoded label map shape: {sample_train_batch[2].shape}.")
# Plot a view samples from the training set.
for segmentation_map, real_image in zip(sample_train_batch[0], sample_train_batch[1]):
fig = plt.figure(figsize=(10, 10))
fig.add_subplot(1, 2, 1).set_title("Segmentation Map")
plt.imshow((segmentation_map + 1) / 2)
fig.add_subplot(1, 2, 2).set_title("Real Image")
plt.imshow((real_image + 1) / 2)
plt.show()<jupyter_output><empty_output><jupyter_text>Note that in the rest of this example, we use a couple of figures from the[original GauGAN paper](https://arxiv.org/abs/1903.07291) for convenience. Custom layersIn the following section, we implement the following layers:* SPADE* Residual block including SPADE* Gaussian sampler Some more notes on SPADE**SPatially-Adaptive (DE) normalization** or **SPADE** is a simple but effective layerfor synthesizing photorealistic images given an input semantic layout. Previous methodsfor conditional image generation from semantic input such asPix2Pix ([Isola et al.](https://arxiv.org/abs/1611.07004))or Pix2PixHD ([Wang et al.](https://arxiv.org/abs/1711.11585))directly feed the semantic layout as input to the deep network, which is then processedthrough stacks of convolution, normalization, and nonlinearity layers. This is oftensuboptimal as the normalization layers have a tendency to wash away semantic information.In SPADE, the segmentation mask is first projected onto an embedding space, and thenconvolved to produce the modulation parameters `γ` and `β`. Unlike prior conditionalnormalization methods, `γ` and `β` are not vectors, but tensors with spatial dimensions.The produced `γ` and `β` are multiplied and added to the normalized activationelement-wise. As the modulation parameters are adaptive to the input segmentation mask,SPADE is better suited for semantic image synthesis.<jupyter_code>class SPADE(layers.Layer):
def __init__(self, filters, epsilon=1e-5, **kwargs):
super().__init__(**kwargs)
self.epsilon = epsilon
self.conv = layers.Conv2D(128, 3, padding="same", activation="relu")
self.conv_gamma = layers.Conv2D(filters, 3, padding="same")
self.conv_beta = layers.Conv2D(filters, 3, padding="same")
def build(self, input_shape):
self.resize_shape = input_shape[1:3]
def call(self, input_tensor, raw_mask):
mask = ops.image.resize(raw_mask, self.resize_shape, interpolation="nearest")
x = self.conv(mask)
gamma = self.conv_gamma(x)
beta = self.conv_beta(x)
mean, var = ops.moments(input_tensor, axes=(0, 1, 2), keepdims=True)
std = ops.sqrt(var + self.epsilon)
normalized = (input_tensor - mean) / std
output = gamma * normalized + beta
return output
class ResBlock(layers.Layer):
def __init__(self, filters, **kwargs):
super().__init__(**kwargs)
self.filters = filters
def build(self, input_shape):
input_filter = input_shape[-1]
self.spade_1 = SPADE(input_filter)
self.spade_2 = SPADE(self.filters)
self.conv_1 = layers.Conv2D(self.filters, 3, padding="same")
self.conv_2 = layers.Conv2D(self.filters, 3, padding="same")
self.learned_skip = False
if self.filters != input_filter:
self.learned_skip = True
self.spade_3 = SPADE(input_filter)
self.conv_3 = layers.Conv2D(self.filters, 3, padding="same")
def call(self, input_tensor, mask):
x = self.spade_1(input_tensor, mask)
x = self.conv_1(keras.activations.leaky_relu(x, 0.2))
x = self.spade_2(x, mask)
x = self.conv_2(keras.activations.leaky_relu(x, 0.2))
skip = (
self.conv_3(
keras.activations.leaky_relu(self.spade_3(input_tensor, mask), 0.2)
)
if self.learned_skip
else input_tensor
)
output = skip + x
return output
class GaussianSampler(layers.Layer):
def __init__(self, batch_size, latent_dim, **kwargs):
super().__init__(**kwargs)
self.batch_size = batch_size
self.latent_dim = latent_dim
self.seed_generator = keras.random.SeedGenerator(1337)
def call(self, inputs):
means, variance = inputs
epsilon = keras.random.normal(
shape=(self.batch_size, self.latent_dim),
mean=0.0,
stddev=1.0,
seed=self.seed_generator,
)
samples = means + ops.exp(0.5 * variance) * epsilon
return samples<jupyter_output><empty_output><jupyter_text>Next, we implement the downsampling block for the encoder.<jupyter_code>def downsample(
channels,
kernels,
strides=2,
apply_norm=True,
apply_activation=True,
apply_dropout=False,
):
block = keras.Sequential()
block.add(
layers.Conv2D(
channels,
kernels,
strides=strides,
padding="same",
use_bias=False,
kernel_initializer=keras.initializers.GlorotNormal(),
)
)
if apply_norm:
block.add(layers.GroupNormalization(groups=-1))
if apply_activation:
block.add(layers.LeakyReLU(0.2))
if apply_dropout:
block.add(layers.Dropout(0.5))
return block<jupyter_output><empty_output><jupyter_text>The GauGAN encoder consists of a few downsampling blocks. It outputs the mean andvariance of a distribution.<jupyter_code>def build_encoder(image_shape, encoder_downsample_factor=64, latent_dim=256):
input_image = keras.Input(shape=image_shape)
x = downsample(encoder_downsample_factor, 3, apply_norm=False)(input_image)
x = downsample(2 * encoder_downsample_factor, 3)(x)
x = downsample(4 * encoder_downsample_factor, 3)(x)
x = downsample(8 * encoder_downsample_factor, 3)(x)
x = downsample(8 * encoder_downsample_factor, 3)(x)
x = layers.Flatten()(x)
mean = layers.Dense(latent_dim, name="mean")(x)
variance = layers.Dense(latent_dim, name="variance")(x)
return keras.Model(input_image, [mean, variance], name="encoder")<jupyter_output><empty_output><jupyter_text>Next, we implement the generator, which consists of the modified residual blocks andupsampling blocks. It takes latent vectors and one-hot encoded segmentation labels, andproduces new images.With SPADE, there is no need to feed the segmentation map to the first layer of thegenerator, since the latent inputs have enough structural information about the style wewant the generator to emulate. We also discard the encoder part of the generator, which iscommonly used in prior architectures. This results in a more lightweightgenerator network, which can also take a random vector as input, enabling a simple andnatural path to multi-modal synthesis.<jupyter_code>def build_generator(mask_shape, latent_dim=256):
latent = keras.Input(shape=(latent_dim,))
mask = keras.Input(shape=mask_shape)
x = layers.Dense(16384)(latent)
x = layers.Reshape((4, 4, 1024))(x)
x = ResBlock(filters=1024)(x, mask)
x = layers.UpSampling2D((2, 2))(x)
x = ResBlock(filters=1024)(x, mask)
x = layers.UpSampling2D((2, 2))(x)
x = ResBlock(filters=1024)(x, mask)
x = layers.UpSampling2D((2, 2))(x)
x = ResBlock(filters=512)(x, mask)
x = layers.UpSampling2D((2, 2))(x)
x = ResBlock(filters=256)(x, mask)
x = layers.UpSampling2D((2, 2))(x)
x = ResBlock(filters=128)(x, mask)
x = layers.UpSampling2D((2, 2))(x)
x = keras.activations.leaky_relu(x, 0.2)
output_image = keras.activations.tanh(layers.Conv2D(3, 4, padding="same")(x))
return keras.Model([latent, mask], output_image, name="generator")<jupyter_output><empty_output><jupyter_text>The discriminator takes a segmentation map and an image and concatenates them. Itthen predicts if patches of the concatenated image are real or fake.<jupyter_code>def build_discriminator(image_shape, downsample_factor=64):
input_image_A = keras.Input(shape=image_shape, name="discriminator_image_A")
input_image_B = keras.Input(shape=image_shape, name="discriminator_image_B")
x = layers.Concatenate()([input_image_A, input_image_B])
x1 = downsample(downsample_factor, 4, apply_norm=False)(x)
x2 = downsample(2 * downsample_factor, 4)(x1)
x3 = downsample(4 * downsample_factor, 4)(x2)
x4 = downsample(8 * downsample_factor, 4, strides=1)(x3)
x5 = layers.Conv2D(1, 4)(x4)
outputs = [x1, x2, x3, x4, x5]
return keras.Model([input_image_A, input_image_B], outputs)<jupyter_output><empty_output><jupyter_text>Loss functionsGauGAN uses the following loss functions:* Generator: * Expectation over the discriminator predictions. * [KL divergence](https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence) for learning the mean and variance predicted by the encoder. * Minimization between the discriminator predictions on original and generated images to align the feature space of the generator. * [Perceptual loss](https://arxiv.org/abs/1603.08155) for encouraging the generated images to have perceptual quality.* Discriminator: * [Hinge loss](https://en.wikipedia.org/wiki/Hinge_loss).<jupyter_code>def generator_loss(y):
return -ops.mean(y)
def kl_divergence_loss(mean, variance):
return -0.5 * ops.sum(1 + variance - ops.square(mean) - ops.exp(variance))
class FeatureMatchingLoss(keras.losses.Loss):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.mae = keras.losses.MeanAbsoluteError()
def call(self, y_true, y_pred):
loss = 0
for i in range(len(y_true) - 1):
loss += self.mae(y_true[i], y_pred[i])
return loss
class VGGFeatureMatchingLoss(keras.losses.Loss):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.encoder_layers = [
"block1_conv1",
"block2_conv1",
"block3_conv1",
"block4_conv1",
"block5_conv1",
]
self.weights = [1.0 / 32, 1.0 / 16, 1.0 / 8, 1.0 / 4, 1.0]
vgg = keras.applications.VGG19(include_top=False, weights="imagenet")
layer_outputs = [vgg.get_layer(x).output for x in self.encoder_layers]
self.vgg_model = keras.Model(vgg.input, layer_outputs, name="VGG")
self.mae = keras.losses.MeanAbsoluteError()
def call(self, y_true, y_pred):
y_true = keras.applications.vgg19.preprocess_input(127.5 * (y_true + 1))
y_pred = keras.applications.vgg19.preprocess_input(127.5 * (y_pred + 1))
real_features = self.vgg_model(y_true)
fake_features = self.vgg_model(y_pred)
loss = 0
for i in range(len(real_features)):
loss += self.weights[i] * self.mae(real_features[i], fake_features[i])
return loss
class DiscriminatorLoss(keras.losses.Loss):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.hinge_loss = keras.losses.Hinge()
def call(self, y, is_real):
return self.hinge_loss(is_real, y)<jupyter_output><empty_output><jupyter_text>GAN monitor callbackNext, we implement a callback to monitor the GauGAN results while it is training.<jupyter_code>class GanMonitor(keras.callbacks.Callback):
def __init__(self, val_dataset, n_samples, epoch_interval=5):
self.val_images = next(iter(val_dataset))
self.n_samples = n_samples
self.epoch_interval = epoch_interval
self.seed_generator = keras.random.SeedGenerator(42)
def infer(self):
latent_vector = keras.random.normal(
shape=(self.model.batch_size, self.model.latent_dim),
mean=0.0,
stddev=2.0,
seed=self.seed_generator,
)
return self.model.predict([latent_vector, self.val_images[2]])
def on_epoch_end(self, epoch, logs=None):
if epoch % self.epoch_interval == 0:
generated_images = self.infer()
for _ in range(self.n_samples):
grid_row = min(generated_images.shape[0], 3)
f, axarr = plt.subplots(grid_row, 3, figsize=(18, grid_row * 6))
for row in range(grid_row):
ax = axarr if grid_row == 1 else axarr[row]
ax[0].imshow((self.val_images[0][row] + 1) / 2)
ax[0].axis("off")
ax[0].set_title("Mask", fontsize=20)
ax[1].imshow((self.val_images[1][row] + 1) / 2)
ax[1].axis("off")
ax[1].set_title("Ground Truth", fontsize=20)
ax[2].imshow((generated_images[row] + 1) / 2)
ax[2].axis("off")
ax[2].set_title("Generated", fontsize=20)
plt.show()<jupyter_output><empty_output><jupyter_text>Subclassed GauGAN modelFinally, we put everything together inside a subclassed model (from `tf.keras.Model`)overriding its `train_step()` method.<jupyter_code>class GauGAN(keras.Model):
def __init__(
self,
image_size,
num_classes,
batch_size,
latent_dim,
feature_loss_coeff=10,
vgg_feature_loss_coeff=0.1,
kl_divergence_loss_coeff=0.1,
**kwargs,
):
super().__init__(**kwargs)
self.image_size = image_size
self.latent_dim = latent_dim
self.batch_size = batch_size
self.num_classes = num_classes
self.image_shape = (image_size, image_size, 3)
self.mask_shape = (image_size, image_size, num_classes)
self.feature_loss_coeff = feature_loss_coeff
self.vgg_feature_loss_coeff = vgg_feature_loss_coeff
self.kl_divergence_loss_coeff = kl_divergence_loss_coeff
self.discriminator = build_discriminator(self.image_shape)
self.generator = build_generator(self.mask_shape)
self.encoder = build_encoder(self.image_shape)
self.sampler = GaussianSampler(batch_size, latent_dim)
self.patch_size, self.combined_model = self.build_combined_generator()
self.disc_loss_tracker = keras.metrics.Mean(name="disc_loss")
self.gen_loss_tracker = keras.metrics.Mean(name="gen_loss")
self.feat_loss_tracker = keras.metrics.Mean(name="feat_loss")
self.vgg_loss_tracker = keras.metrics.Mean(name="vgg_loss")
self.kl_loss_tracker = keras.metrics.Mean(name="kl_loss")
@property
def metrics(self):
return [
self.disc_loss_tracker,
self.gen_loss_tracker,
self.feat_loss_tracker,
self.vgg_loss_tracker,
self.kl_loss_tracker,
]
def build_combined_generator(self):
# This method builds a model that takes as inputs the following:
# latent vector, one-hot encoded segmentation label map, and
# a segmentation map. It then (i) generates an image with the generator,
# (ii) passes the generated images and segmentation map to the discriminator.
# Finally, the model produces the following outputs: (a) discriminator outputs,
# (b) generated image.
# We will be using this model to simplify the implementation.
self.discriminator.trainable = False
mask_input = keras.Input(shape=self.mask_shape, name="mask")
image_input = keras.Input(shape=self.image_shape, name="image")
latent_input = keras.Input(shape=(self.latent_dim,), name="latent")
generated_image = self.generator([latent_input, mask_input])
discriminator_output = self.discriminator([image_input, generated_image])
combined_outputs = discriminator_output + [generated_image]
patch_size = discriminator_output[-1].shape[1]
combined_model = keras.Model(
[latent_input, mask_input, image_input], combined_outputs
)
return patch_size, combined_model
def compile(self, gen_lr=1e-4, disc_lr=4e-4, **kwargs):
super().compile(**kwargs)
self.generator_optimizer = keras.optimizers.Adam(
gen_lr, beta_1=0.0, beta_2=0.999
)
self.discriminator_optimizer = keras.optimizers.Adam(
disc_lr, beta_1=0.0, beta_2=0.999
)
self.discriminator_loss = DiscriminatorLoss()
self.feature_matching_loss = FeatureMatchingLoss()
self.vgg_loss = VGGFeatureMatchingLoss()
def train_discriminator(self, latent_vector, segmentation_map, real_image, labels):
fake_images = self.generator([latent_vector, labels])
with tf.GradientTape() as gradient_tape:
pred_fake = self.discriminator([segmentation_map, fake_images])[-1]
pred_real = self.discriminator([segmentation_map, real_image])[-1]
loss_fake = self.discriminator_loss(pred_fake, -1.0)
loss_real = self.discriminator_loss(pred_real, 1.0)
total_loss = 0.5 * (loss_fake + loss_real)
self.discriminator.trainable = True
gradients = gradient_tape.gradient(
total_loss, self.discriminator.trainable_variables
)
self.discriminator_optimizer.apply_gradients(
zip(gradients, self.discriminator.trainable_variables)
)
return total_loss
def train_generator(
self, latent_vector, segmentation_map, labels, image, mean, variance
):
# Generator learns through the signal provided by the discriminator. During
# backpropagation, we only update the generator parameters.
self.discriminator.trainable = False
with tf.GradientTape() as tape:
real_d_output = self.discriminator([segmentation_map, image])
combined_outputs = self.combined_model(
[latent_vector, labels, segmentation_map]
)
fake_d_output, fake_image = combined_outputs[:-1], combined_outputs[-1]
pred = fake_d_output[-1]
# Compute generator losses.
g_loss = generator_loss(pred)
kl_loss = self.kl_divergence_loss_coeff * kl_divergence_loss(mean, variance)
vgg_loss = self.vgg_feature_loss_coeff * self.vgg_loss(image, fake_image)
feature_loss = self.feature_loss_coeff * self.feature_matching_loss(
real_d_output, fake_d_output
)
total_loss = g_loss + kl_loss + vgg_loss + feature_loss
all_trainable_variables = (
self.combined_model.trainable_variables + self.encoder.trainable_variables
)
gradients = tape.gradient(total_loss, all_trainable_variables)
self.generator_optimizer.apply_gradients(
zip(gradients, all_trainable_variables)
)
return total_loss, feature_loss, vgg_loss, kl_loss
def train_step(self, data):
segmentation_map, image, labels = data
mean, variance = self.encoder(image)
latent_vector = self.sampler([mean, variance])
discriminator_loss = self.train_discriminator(
latent_vector, segmentation_map, image, labels
)
(generator_loss, feature_loss, vgg_loss, kl_loss) = self.train_generator(
latent_vector, segmentation_map, labels, image, mean, variance
)
# Report progress.
self.disc_loss_tracker.update_state(discriminator_loss)
self.gen_loss_tracker.update_state(generator_loss)
self.feat_loss_tracker.update_state(feature_loss)
self.vgg_loss_tracker.update_state(vgg_loss)
self.kl_loss_tracker.update_state(kl_loss)
results = {m.name: m.result() for m in self.metrics}
return results
def test_step(self, data):
segmentation_map, image, labels = data
# Obtain the learned moments of the real image distribution.
mean, variance = self.encoder(image)
# Sample a latent from the distribution defined by the learned moments.
latent_vector = self.sampler([mean, variance])
# Generate the fake images.
fake_images = self.generator([latent_vector, labels])
# Calculate the losses.
pred_fake = self.discriminator([segmentation_map, fake_images])[-1]
pred_real = self.discriminator([segmentation_map, image])[-1]
loss_fake = self.discriminator_loss(pred_fake, -1.0)
loss_real = self.discriminator_loss(pred_real, 1.0)
total_discriminator_loss = 0.5 * (loss_fake + loss_real)
real_d_output = self.discriminator([segmentation_map, image])
combined_outputs = self.combined_model(
[latent_vector, labels, segmentation_map]
)
fake_d_output, fake_image = combined_outputs[:-1], combined_outputs[-1]
pred = fake_d_output[-1]
g_loss = generator_loss(pred)
kl_loss = self.kl_divergence_loss_coeff * kl_divergence_loss(mean, variance)
vgg_loss = self.vgg_feature_loss_coeff * self.vgg_loss(image, fake_image)
feature_loss = self.feature_loss_coeff * self.feature_matching_loss(
real_d_output, fake_d_output
)
total_generator_loss = g_loss + kl_loss + vgg_loss + feature_loss
# Report progress.
self.disc_loss_tracker.update_state(total_discriminator_loss)
self.gen_loss_tracker.update_state(total_generator_loss)
self.feat_loss_tracker.update_state(feature_loss)
self.vgg_loss_tracker.update_state(vgg_loss)
self.kl_loss_tracker.update_state(kl_loss)
results = {m.name: m.result() for m in self.metrics}
return results
def call(self, inputs):
latent_vectors, labels = inputs
return self.generator([latent_vectors, labels])<jupyter_output><empty_output><jupyter_text>GauGAN training<jupyter_code>gaugan = GauGAN(IMG_HEIGHT, NUM_CLASSES, BATCH_SIZE, latent_dim=256)
gaugan.compile()
history = gaugan.fit(
train_dataset,
validation_data=val_dataset,
epochs=15,
callbacks=[GanMonitor(val_dataset, BATCH_SIZE)],
)
def plot_history(item):
plt.plot(history.history[item], label=item)
plt.plot(history.history["val_" + item], label="val_" + item)
plt.xlabel("Epochs")
plt.ylabel(item)
plt.title("Train and Validation {} Over Epochs".format(item), fontsize=14)
plt.legend()
plt.grid()
plt.show()
plot_history("disc_loss")
plot_history("gen_loss")
plot_history("feat_loss")
plot_history("vgg_loss")
plot_history("kl_loss")<jupyter_output><empty_output><jupyter_text>Inference<jupyter_code>val_iterator = iter(val_dataset)
for _ in range(5):
val_images = next(val_iterator)
# Sample latent from a normal distribution.
latent_vector = keras.random.normal(
shape=(gaugan.batch_size, gaugan.latent_dim), mean=0.0, stddev=2.0
)
# Generate fake images.
fake_images = gaugan.predict([latent_vector, val_images[2]])
real_images = val_images
grid_row = min(fake_images.shape[0], 3)
grid_col = 3
f, axarr = plt.subplots(grid_row, grid_col, figsize=(grid_col * 6, grid_row * 6))
for row in range(grid_row):
ax = axarr if grid_row == 1 else axarr[row]
ax[0].imshow((real_images[0][row] + 1) / 2)
ax[0].axis("off")
ax[0].set_title("Mask", fontsize=20)
ax[1].imshow((real_images[1][row] + 1) / 2)
ax[1].axis("off")
ax[1].set_title("Ground Truth", fontsize=20)
ax[2].imshow((fake_images[row] + 1) / 2)
ax[2].axis("off")
ax[2].set_title("Generated", fontsize=20)
plt.show()<jupyter_output><empty_output> | keras-io/examples/generative/ipynb/gaugan.ipynb/0 | {
"file_path": "keras-io/examples/generative/ipynb/gaugan.ipynb",
"repo_id": "keras-io",
"token_count": 12513
} | 97 |
"""
Title: Character-level text generation with LSTM
Author: [fchollet](https://twitter.com/fchollet)
Date created: 2015/06/15
Last modified: 2020/04/30
Description: Generate text from Nietzsche's writings with a character-level LSTM.
Accelerator: GPU
"""
"""
## Introduction
This example demonstrates how to use a LSTM model to generate
text character-by-character.
At least 20 epochs are required before the generated text
starts sounding locally coherent.
It is recommended to run this script on GPU, as recurrent
networks are quite computationally intensive.
If you try this script on new data, make sure your corpus
has at least ~100k characters. ~1M is better.
"""
"""
## Setup
"""
import keras
from keras import layers
import numpy as np
import random
import io
"""
## Prepare the data
"""
path = keras.utils.get_file(
"nietzsche.txt",
origin="https://s3.amazonaws.com/text-datasets/nietzsche.txt",
)
with io.open(path, encoding="utf-8") as f:
text = f.read().lower()
text = text.replace("\n", " ") # We remove newlines chars for nicer display
print("Corpus length:", len(text))
chars = sorted(list(set(text)))
print("Total chars:", len(chars))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
# cut the text in semi-redundant sequences of maxlen characters
maxlen = 40
step = 3
sentences = []
next_chars = []
for i in range(0, len(text) - maxlen, step):
sentences.append(text[i : i + maxlen])
next_chars.append(text[i + maxlen])
print("Number of sequences:", len(sentences))
x = np.zeros((len(sentences), maxlen, len(chars)), dtype="bool")
y = np.zeros((len(sentences), len(chars)), dtype="bool")
for i, sentence in enumerate(sentences):
for t, char in enumerate(sentence):
x[i, t, char_indices[char]] = 1
y[i, char_indices[next_chars[i]]] = 1
"""
## Build the model: a single LSTM layer
"""
model = keras.Sequential(
[
keras.Input(shape=(maxlen, len(chars))),
layers.LSTM(128),
layers.Dense(len(chars), activation="softmax"),
]
)
optimizer = keras.optimizers.RMSprop(learning_rate=0.01)
model.compile(loss="categorical_crossentropy", optimizer=optimizer)
"""
## Prepare the text sampling function
"""
def sample(preds, temperature=1.0):
# helper function to sample an index from a probability array
preds = np.asarray(preds).astype("float64")
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
"""
## Train the model
"""
epochs = 40
batch_size = 128
for epoch in range(epochs):
model.fit(x, y, batch_size=batch_size, epochs=1)
print()
print("Generating text after epoch: %d" % epoch)
start_index = random.randint(0, len(text) - maxlen - 1)
for diversity in [0.2, 0.5, 1.0, 1.2]:
print("...Diversity:", diversity)
generated = ""
sentence = text[start_index : start_index + maxlen]
print('...Generating with seed: "' + sentence + '"')
for i in range(400):
x_pred = np.zeros((1, maxlen, len(chars)))
for t, char in enumerate(sentence):
x_pred[0, t, char_indices[char]] = 1.0
preds = model.predict(x_pred, verbose=0)[0]
next_index = sample(preds, diversity)
next_char = indices_char[next_index]
sentence = sentence[1:] + next_char
generated += next_char
print("...Generated: ", generated)
print("-")
| keras-io/examples/generative/lstm_character_level_text_generation.py/0 | {
"file_path": "keras-io/examples/generative/lstm_character_level_text_generation.py",
"repo_id": "keras-io",
"token_count": 1392
} | 98 |
"""
Title: Density estimation using Real NVP
Authors: [Mandolini Giorgio Maria](https://www.linkedin.com/in/giorgio-maria-mandolini-a2a1b71b4/), [Sanna Daniele](https://www.linkedin.com/in/daniele-sanna-338629bb/), [Zannini Quirini Giorgio](https://www.linkedin.com/in/giorgio-zannini-quirini-16ab181a0/)
Date created: 2020/08/10
Last modified: 2020/08/10
Description: Estimating the density distribution of the "double moon" dataset.
Accelerator: GPU
"""
"""
## Introduction
The aim of this work is to map a simple distribution - which is easy to sample
and whose density is simple to estimate - to a more complex one learned from the data.
This kind of generative model is also known as "normalizing flow".
In order to do this, the model is trained via the maximum
likelihood principle, using the "change of variable" formula.
We will use an affine coupling function. We create it such that its inverse, as well as
the determinant of the Jacobian, are easy to obtain (more details in the referenced paper).
**Requirements:**
* Tensorflow 2.9.1
* Tensorflow probability 0.17.0
**Reference:**
[Density estimation using Real NVP](https://arxiv.org/abs/1605.08803)
"""
"""
## Setup
"""
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import regularizers
from sklearn.datasets import make_moons
import numpy as np
import matplotlib.pyplot as plt
import tensorflow_probability as tfp
"""
## Load the data
"""
data = make_moons(3000, noise=0.05)[0].astype("float32")
norm = layers.Normalization()
norm.adapt(data)
normalized_data = norm(data)
"""
## Affine coupling layer
"""
# Creating a custom layer with keras API.
output_dim = 256
reg = 0.01
def Coupling(input_shape):
input = keras.layers.Input(shape=input_shape)
t_layer_1 = keras.layers.Dense(
output_dim, activation="relu", kernel_regularizer=regularizers.l2(reg)
)(input)
t_layer_2 = keras.layers.Dense(
output_dim, activation="relu", kernel_regularizer=regularizers.l2(reg)
)(t_layer_1)
t_layer_3 = keras.layers.Dense(
output_dim, activation="relu", kernel_regularizer=regularizers.l2(reg)
)(t_layer_2)
t_layer_4 = keras.layers.Dense(
output_dim, activation="relu", kernel_regularizer=regularizers.l2(reg)
)(t_layer_3)
t_layer_5 = keras.layers.Dense(
input_shape, activation="linear", kernel_regularizer=regularizers.l2(reg)
)(t_layer_4)
s_layer_1 = keras.layers.Dense(
output_dim, activation="relu", kernel_regularizer=regularizers.l2(reg)
)(input)
s_layer_2 = keras.layers.Dense(
output_dim, activation="relu", kernel_regularizer=regularizers.l2(reg)
)(s_layer_1)
s_layer_3 = keras.layers.Dense(
output_dim, activation="relu", kernel_regularizer=regularizers.l2(reg)
)(s_layer_2)
s_layer_4 = keras.layers.Dense(
output_dim, activation="relu", kernel_regularizer=regularizers.l2(reg)
)(s_layer_3)
s_layer_5 = keras.layers.Dense(
input_shape, activation="tanh", kernel_regularizer=regularizers.l2(reg)
)(s_layer_4)
return keras.Model(inputs=input, outputs=[s_layer_5, t_layer_5])
"""
## Real NVP
"""
class RealNVP(keras.Model):
def __init__(self, num_coupling_layers):
super().__init__()
self.num_coupling_layers = num_coupling_layers
# Distribution of the latent space.
self.distribution = tfp.distributions.MultivariateNormalDiag(
loc=[0.0, 0.0], scale_diag=[1.0, 1.0]
)
self.masks = np.array(
[[0, 1], [1, 0]] * (num_coupling_layers // 2), dtype="float32"
)
self.loss_tracker = keras.metrics.Mean(name="loss")
self.layers_list = [Coupling(2) for i in range(num_coupling_layers)]
@property
def metrics(self):
"""List of the model's metrics.
We make sure the loss tracker is listed as part of `model.metrics`
so that `fit()` and `evaluate()` are able to `reset()` the loss tracker
at the start of each epoch and at the start of an `evaluate()` call.
"""
return [self.loss_tracker]
def call(self, x, training=True):
log_det_inv = 0
direction = 1
if training:
direction = -1
for i in range(self.num_coupling_layers)[::direction]:
x_masked = x * self.masks[i]
reversed_mask = 1 - self.masks[i]
s, t = self.layers_list[i](x_masked)
s *= reversed_mask
t *= reversed_mask
gate = (direction - 1) / 2
x = (
reversed_mask
* (x * tf.exp(direction * s) + direction * t * tf.exp(gate * s))
+ x_masked
)
log_det_inv += gate * tf.reduce_sum(s, [1])
return x, log_det_inv
# Log likelihood of the normal distribution plus the log determinant of the jacobian.
def log_loss(self, x):
y, logdet = self(x)
log_likelihood = self.distribution.log_prob(y) + logdet
return -tf.reduce_mean(log_likelihood)
def train_step(self, data):
with tf.GradientTape() as tape:
loss = self.log_loss(data)
g = tape.gradient(loss, self.trainable_variables)
self.optimizer.apply_gradients(zip(g, self.trainable_variables))
self.loss_tracker.update_state(loss)
return {"loss": self.loss_tracker.result()}
def test_step(self, data):
loss = self.log_loss(data)
self.loss_tracker.update_state(loss)
return {"loss": self.loss_tracker.result()}
"""
## Model training
"""
model = RealNVP(num_coupling_layers=6)
model.compile(optimizer=keras.optimizers.Adam(learning_rate=0.0001))
history = model.fit(
normalized_data, batch_size=256, epochs=300, verbose=2, validation_split=0.2
)
"""
## Performance evaluation
"""
plt.figure(figsize=(15, 10))
plt.plot(history.history["loss"])
plt.plot(history.history["val_loss"])
plt.title("model loss")
plt.legend(["train", "validation"], loc="upper right")
plt.ylabel("loss")
plt.xlabel("epoch")
# From data to latent space.
z, _ = model(normalized_data)
# From latent space to data.
samples = model.distribution.sample(3000)
x, _ = model.predict(samples)
f, axes = plt.subplots(2, 2)
f.set_size_inches(20, 15)
axes[0, 0].scatter(normalized_data[:, 0], normalized_data[:, 1], color="r")
axes[0, 0].set(title="Inference data space X", xlabel="x", ylabel="y")
axes[0, 1].scatter(z[:, 0], z[:, 1], color="r")
axes[0, 1].set(title="Inference latent space Z", xlabel="x", ylabel="y")
axes[0, 1].set_xlim([-3.5, 4])
axes[0, 1].set_ylim([-4, 4])
axes[1, 0].scatter(samples[:, 0], samples[:, 1], color="g")
axes[1, 0].set(title="Generated latent space Z", xlabel="x", ylabel="y")
axes[1, 1].scatter(x[:, 0], x[:, 1], color="g")
axes[1, 1].set(title="Generated data space X", label="x", ylabel="y")
axes[1, 1].set_xlim([-2, 2])
axes[1, 1].set_ylim([-2, 2])
| keras-io/examples/generative/real_nvp.py/0 | {
"file_path": "keras-io/examples/generative/real_nvp.py",
"repo_id": "keras-io",
"token_count": 2930
} | 99 |
"""
Title: Approximating non-Function Mappings with Mixture Density Networks
Author: [lukewood](https://twitter.com/luke_wood_ml)
Date created: 2023/07/15
Last modified: 2023/07/15
Description: Approximate non one to one mapping using mixture density networks.
Accelerator: None
"""
"""
## Approximating NonFunctions
Neural networks are universal function approximators. Key word: function!
While powerful function approximators, neural networks are not able to
approximate non-functions.
One important characteristic of functions is that they map one input to a
unique output.
Neural networks do not perform well when the training set has multiple values of
Y for a single X.
Instead of learning the proper distribution, a naive neural network will
interpret the problem as a function and learn the geometric mean of all `Y` in
the training set.
In this guide I'll show you how to approximate the class of non-functions
consisting of mappings from `x -> y` such that multiple `y` may exist for a
given `x`. We'll use a class of neural networks called
"Mixture Density Networks".
I'm going to use the new
[multibackend Keras Core project](https://github.com/keras-team/keras-core) to
build my Mixture Density networks.
Great job to the Keras team on the project - it's awesome to be able to swap
frameworks in one line of code.
Some bad news: I use TensorFlow probability in this guide... so it doesn't
actually work with other backends.
Anyways, let's start by installing dependencies and sorting out imports:
"""
"""shell
pip install -q --upgrade tensorflow-probability keras-core
"""
import numpy as np
import matplotlib.pyplot as plt
import math
import random
from keras_core import callbacks
import keras_core
import tensorflow as tf
from keras_core import layers
from keras_core import optimizers
from tensorflow_probability import distributions as tfd
"""
Next, lets generate a noisy spiral that we're going to attempt to approximate.
I've defined a few functions below to do this:
"""
def normalize(x):
return (x - np.min(x)) / (np.max(x) - np.min(x))
def create_noisy_spiral(n, jitter_std=0.2, revolutions=2):
angle = np.random.uniform(0, 2 * np.pi * revolutions, [n])
r = angle
x = r * np.cos(angle)
y = r * np.sin(angle)
result = np.stack([x, y], axis=1)
result = result + np.random.normal(scale=jitter_std, size=[n, 2])
result = 5 * normalize(result)
return result
"""
Next, lets invoke this function many times to construct a sample dataset:
"""
xy = create_noisy_spiral(10000)
x, y = xy[:, 0:1], xy[:, 1:]
plt.scatter(x, y)
plt.show()
"""
As you can see, there's multiple possible values for Y with respect to a given
X.
Normal neural networks will simply learn the mean of these points with
respect to geometric space.
In the context of our spiral, however, the geometric mean of the each Y occurs
with a probability of zero.
We can quickly show this with a simple linear model:
"""
N_HIDDEN = 128
model = keras_core.Sequential(
[
layers.Dense(N_HIDDEN, activation="relu"),
layers.Dense(N_HIDDEN, activation="relu"),
layers.Dense(1),
]
)
"""
Let's use mean squared error as well as the adam optimizer.
These tend to be reasonable prototyping choices:
"""
model.compile(optimizer="adam", loss="mse")
"""
We can fit this model quite easy
"""
model.fit(
x,
y,
epochs=300,
batch_size=128,
validation_split=0.15,
callbacks=[callbacks.EarlyStopping(monitor="val_loss", patience=10)],
)
"""
And let's check out the result:
"""
y_pred = model.predict(x)
"""
As expected, the model learns the geometric mean of all points in `y` for a
given `x`.
"""
plt.scatter(x, y)
plt.scatter(x, y_pred)
plt.show()
"""
## Mixture Density Networks
Mixture Density networks can alleviate this problem.
A mixture density is a class of complicated densities expressible in terms of simpler densities.
Effectively, a mixture density is the sum of various probability distributions.
By summing various distributions, mixture densitry distributions can
model arbitrarily complex distributions.
Mixture Density networks learn to parameterize a mixture density distribution
based on a given training set.
As a practitioner, all you need to know, is that Mixture Density Networks solve
the problem of multiple values of Y for a given X.
I'm hoping to add a tool to your kit- but I'm not going to formally explain the
derivation of Mixture Density networks in this guide.
The most important thing to know is that a Mixture Density network learns to
parameterize a mixture density distribution.
This is done by computing a special loss with respect to both the provided
`y_i` label as well as the predicted distribution for the corresponding `x_i`.
This loss function operates by computing the probability that `y_i` would be
drawn from the predicted mixture distribution.
Let's implement a Mixture density network.
Below, a ton of helper functions are defined based on an old Keras library
[`Keras Mixture Density Network Layer`](https://github.com/cpmpercussion/keras-mdn-layer).
I've adapted the code for use with Keras core.
Lets start writing a Mixture Density Network!
First, we need a special activation function: ELU plus a tiny epsilon.
This helps prevent ELU from outputting 0 which causes NaNs in Mixture Density
Network loss evaluation.
"""
def elu_plus_one_plus_epsilon(x):
return keras_core.activations.elu(x) + 1 + keras_core.backend.epsilon()
"""
Next, lets actually define a MixtureDensity layer that outputs all values needed
to sample from the learned mixture distribution:
"""
class MixtureDensityOutput(layers.Layer):
def __init__(self, output_dimension, num_mixtures, **kwargs):
super().__init__(**kwargs)
self.output_dim = output_dimension
self.num_mix = num_mixtures
self.mdn_mus = layers.Dense(
self.num_mix * self.output_dim, name="mdn_mus"
) # mix*output vals, no activation
self.mdn_sigmas = layers.Dense(
self.num_mix * self.output_dim,
activation=elu_plus_one_plus_epsilon,
name="mdn_sigmas",
) # mix*output vals exp activation
self.mdn_pi = layers.Dense(self.num_mix, name="mdn_pi") # mix vals, logits
def build(self, input_shape):
self.mdn_mus.build(input_shape)
self.mdn_sigmas.build(input_shape)
self.mdn_pi.build(input_shape)
super().build(input_shape)
@property
def trainable_weights(self):
return (
self.mdn_mus.trainable_weights
+ self.mdn_sigmas.trainable_weights
+ self.mdn_pi.trainable_weights
)
@property
def non_trainable_weights(self):
return (
self.mdn_mus.non_trainable_weights
+ self.mdn_sigmas.non_trainable_weights
+ self.mdn_pi.non_trainable_weights
)
def call(self, x, mask=None):
return layers.concatenate(
[self.mdn_mus(x), self.mdn_sigmas(x), self.mdn_pi(x)], name="mdn_outputs"
)
"""
Lets construct an Mixture Density Network using our new layer:
"""
OUTPUT_DIMS = 1
N_MIXES = 20
mdn_network = keras_core.Sequential(
[
layers.Dense(N_HIDDEN, activation="relu"),
layers.Dense(N_HIDDEN, activation="relu"),
MixtureDensityOutput(OUTPUT_DIMS, N_MIXES),
]
)
"""
Next, let's implement a custom loss function to train the Mixture Density
Network layer based on the true values and our expected outputs:
"""
def get_mixture_loss_func(output_dim, num_mixes):
def mdn_loss_func(y_true, y_pred):
# Reshape inputs in case this is used in a TimeDistribued layer
y_pred = tf.reshape(
y_pred,
[-1, (2 * num_mixes * output_dim) + num_mixes],
name="reshape_ypreds",
)
y_true = tf.reshape(y_true, [-1, output_dim], name="reshape_ytrue")
# Split the inputs into paramaters
out_mu, out_sigma, out_pi = tf.split(
y_pred,
num_or_size_splits=[
num_mixes * output_dim,
num_mixes * output_dim,
num_mixes,
],
axis=-1,
name="mdn_coef_split",
)
# Construct the mixture models
cat = tfd.Categorical(logits=out_pi)
component_splits = [output_dim] * num_mixes
mus = tf.split(out_mu, num_or_size_splits=component_splits, axis=1)
sigs = tf.split(out_sigma, num_or_size_splits=component_splits, axis=1)
coll = [
tfd.MultivariateNormalDiag(loc=loc, scale_diag=scale)
for loc, scale in zip(mus, sigs)
]
mixture = tfd.Mixture(cat=cat, components=coll)
loss = mixture.log_prob(y_true)
loss = tf.negative(loss)
loss = tf.reduce_mean(loss)
return loss
return mdn_loss_func
mdn_network.compile(loss=get_mixture_loss_func(OUTPUT_DIMS, N_MIXES), optimizer="adam")
"""
Finally, we can call `model.fit()` like any other Keras model.
"""
mdn_network.fit(
x,
y,
epochs=300,
batch_size=128,
validation_split=0.15,
callbacks=[
callbacks.EarlyStopping(monitor="loss", patience=10, restore_best_weights=True),
callbacks.ReduceLROnPlateau(monitor="loss", patience=5),
],
)
"""
Let's make some predictions!
"""
y_pred_mixture = mdn_network.predict(x)
print(y_pred_mixture.shape)
"""
The MDN does not output a single value; instead it outputs values to
parameterize a mixture distribution.
To visualize these outputs, lets sample from the distribution.
Note that sampling is a lossy process.
If you want to preserve all information as part of a greater latent
representation (i.e. for downstream processing) I recommend you simply keep the
distribution parameters in place.
"""
def split_mixture_params(params, output_dim, num_mixes):
mus = params[: num_mixes * output_dim]
sigs = params[num_mixes * output_dim : 2 * num_mixes * output_dim]
pi_logits = params[-num_mixes:]
return mus, sigs, pi_logits
def softmax(w, t=1.0):
e = np.array(w) / t # adjust temperature
e -= e.max() # subtract max to protect from exploding exp values.
e = np.exp(e)
dist = e / np.sum(e)
return dist
def sample_from_categorical(dist):
r = np.random.rand(1) # uniform random number in [0,1]
accumulate = 0
for i in range(0, dist.size):
accumulate += dist[i]
if accumulate >= r:
return i
tf.logging.info("Error sampling categorical model.")
return -1
def sample_from_output(params, output_dim, num_mixes, temp=1.0, sigma_temp=1.0):
mus, sigs, pi_logits = split_mixture_params(params, output_dim, num_mixes)
pis = softmax(pi_logits, t=temp)
m = sample_from_categorical(pis)
# Alternative way to sample from categorical:
# m = np.random.choice(range(len(pis)), p=pis)
mus_vector = mus[m * output_dim : (m + 1) * output_dim]
sig_vector = sigs[m * output_dim : (m + 1) * output_dim]
scale_matrix = np.identity(output_dim) * sig_vector # scale matrix from diag
cov_matrix = np.matmul(scale_matrix, scale_matrix.T) # cov is scale squared.
cov_matrix = cov_matrix * sigma_temp # adjust for sigma temperature
sample = np.random.multivariate_normal(mus_vector, cov_matrix, 1)
return sample
"""
Next lets use our sampling function:
"""
# Sample from the predicted distributions
y_samples = np.apply_along_axis(
sample_from_output, 1, y_pred_mixture, 1, N_MIXES, temp=1.0
)
"""
Finally, we can visualize our network outputs
"""
plt.scatter(x, y, alpha=0.05, color="blue", label="Ground Truth")
plt.scatter(
x,
y_samples[:, :, 0],
color="green",
alpha=0.05,
label="Mixture Density Network prediction",
)
plt.show()
"""
Beautiful. Love to see it
# Conclusions
Neural Networks are universal function approximators - but they can only
approximate functions. Mixture Density networks can approximate arbitrary
x->y mappings using some neat probability tricks.
For more examples with `tensorflow_probability`
[start here](https://www.tensorflow.org/probability/examples/Probabilistic_Layers_Regression).
One more pretty graphic for the road:
"""
fig, axs = plt.subplots(1, 3)
fig.set_figheight(3)
fig.set_figwidth(12)
axs[0].set_title("Ground Truth")
axs[0].scatter(x, y, alpha=0.05, color="blue")
xlim = axs[0].get_xlim()
ylim = axs[0].get_ylim()
axs[1].set_title("Normal Model prediction")
axs[1].scatter(x, y_pred, alpha=0.05, color="red")
axs[1].set_xlim(xlim)
axs[1].set_ylim(ylim)
axs[2].scatter(
x,
y_samples[:, :, 0],
color="green",
alpha=0.05,
label="Mixture Density Network prediction",
)
axs[2].set_title("Mixture Density Network prediction")
axs[2].set_xlim(xlim)
axs[2].set_ylim(ylim)
plt.show()
| keras-io/examples/keras_recipes/approximating_non_function_mappings.py/0 | {
"file_path": "keras-io/examples/keras_recipes/approximating_non_function_mappings.py",
"repo_id": "keras-io",
"token_count": 4810
} | 100 |
<jupyter_start><jupyter_text>Memory-efficient embeddings for recommendation systems**Author:** [Khalid Salama](https://www.linkedin.com/in/khalid-salama-24403144/)**Date created:** 2021/02/15**Last modified:** 2023/11/15**Description:** Using compositional & mixed-dimension embeddings for memory-efficient recommendation models. IntroductionThis example demonstrates two techniques for building memory-efficient recommendation modelsby reducing the size of the embedding tables, without sacrificing model effectiveness:1. [Quotient-remainder trick](https://arxiv.org/abs/1909.02107), by Hao-Jun Michael Shi et al.,which reduces the number of embedding vectors to store, yet produces unique embeddingvector for each item without explicit definition.2. [Mixed Dimension embeddings](https://arxiv.org/abs/1909.11810), by Antonio Ginart et al.,which stores embedding vectors with mixed dimensions, where less popular items havereduced dimension embeddings.We use the [1M version of the Movielens dataset](https://grouplens.org/datasets/movielens/1m/).The dataset includes around 1 million ratings from 6,000 users on 4,000 movies. Setup<jupyter_code>import os
os.environ["KERAS_BACKEND"] = "tensorflow"
from zipfile import ZipFile
from urllib.request import urlretrieve
import numpy as np
import pandas as pd
import tensorflow as tf
import keras
from keras import layers
from keras.layers import StringLookup
import matplotlib.pyplot as plt<jupyter_output><empty_output><jupyter_text>Prepare the data Download and process data<jupyter_code>urlretrieve("http://files.grouplens.org/datasets/movielens/ml-1m.zip", "movielens.zip")
ZipFile("movielens.zip", "r").extractall()
ratings_data = pd.read_csv(
"ml-1m/ratings.dat",
sep="::",
names=["user_id", "movie_id", "rating", "unix_timestamp"],
)
ratings_data["movie_id"] = ratings_data["movie_id"].apply(lambda x: f"movie_{x}")
ratings_data["user_id"] = ratings_data["user_id"].apply(lambda x: f"user_{x}")
ratings_data["rating"] = ratings_data["rating"].apply(lambda x: float(x))
del ratings_data["unix_timestamp"]
print(f"Number of users: {len(ratings_data.user_id.unique())}")
print(f"Number of movies: {len(ratings_data.movie_id.unique())}")
print(f"Number of ratings: {len(ratings_data.index)}")<jupyter_output><empty_output><jupyter_text>Create train and eval data splits<jupyter_code>random_selection = np.random.rand(len(ratings_data.index)) <= 0.85
train_data = ratings_data[random_selection]
eval_data = ratings_data[~random_selection]
train_data.to_csv("train_data.csv", index=False, sep="|", header=False)
eval_data.to_csv("eval_data.csv", index=False, sep="|", header=False)
print(f"Train data split: {len(train_data.index)}")
print(f"Eval data split: {len(eval_data.index)}")
print("Train and eval data files are saved.")<jupyter_output><empty_output><jupyter_text>Define dataset metadata and hyperparameters<jupyter_code>csv_header = list(ratings_data.columns)
user_vocabulary = list(ratings_data.user_id.unique())
movie_vocabulary = list(ratings_data.movie_id.unique())
target_feature_name = "rating"
learning_rate = 0.001
batch_size = 128
num_epochs = 3
base_embedding_dim = 64<jupyter_output><empty_output><jupyter_text>Train and evaluate the model<jupyter_code>def get_dataset_from_csv(csv_file_path, batch_size=128, shuffle=True):
return tf.data.experimental.make_csv_dataset(
csv_file_path,
batch_size=batch_size,
column_names=csv_header,
label_name=target_feature_name,
num_epochs=1,
header=False,
field_delim="|",
shuffle=shuffle,
)
def run_experiment(model):
# Compile the model.
model.compile(
optimizer=keras.optimizers.Adam(learning_rate),
loss=keras.losses.MeanSquaredError(),
metrics=[keras.metrics.MeanAbsoluteError(name="mae")],
)
# Read the training data.
train_dataset = get_dataset_from_csv("train_data.csv", batch_size)
# Read the test data.
eval_dataset = get_dataset_from_csv("eval_data.csv", batch_size, shuffle=False)
# Fit the model with the training data.
history = model.fit(
train_dataset,
epochs=num_epochs,
validation_data=eval_dataset,
)
return history<jupyter_output><empty_output><jupyter_text>Experiment 1: baseline collaborative filtering model Implement embedding encoder<jupyter_code>def embedding_encoder(vocabulary, embedding_dim, num_oov_indices=0, name=None):
return keras.Sequential(
[
StringLookup(
vocabulary=vocabulary, mask_token=None, num_oov_indices=num_oov_indices
),
layers.Embedding(
input_dim=len(vocabulary) + num_oov_indices, output_dim=embedding_dim
),
],
name=f"{name}_embedding" if name else None,
)<jupyter_output><empty_output><jupyter_text>Implement the baseline model<jupyter_code>def create_baseline_model():
# Receive the user as an input.
user_input = layers.Input(name="user_id", shape=(), dtype=tf.string)
# Get user embedding.
user_embedding = embedding_encoder(
vocabulary=user_vocabulary, embedding_dim=base_embedding_dim, name="user"
)(user_input)
# Receive the movie as an input.
movie_input = layers.Input(name="movie_id", shape=(), dtype=tf.string)
# Get embedding.
movie_embedding = embedding_encoder(
vocabulary=movie_vocabulary, embedding_dim=base_embedding_dim, name="movie"
)(movie_input)
# Compute dot product similarity between user and movie embeddings.
logits = layers.Dot(axes=1, name="dot_similarity")(
[user_embedding, movie_embedding]
)
# Convert to rating scale.
prediction = keras.activations.sigmoid(logits) * 5
# Create the model.
model = keras.Model(
inputs=[user_input, movie_input], outputs=prediction, name="baseline_model"
)
return model
baseline_model = create_baseline_model()
baseline_model.summary()<jupyter_output><empty_output><jupyter_text>Notice that the number of trainable parameters is 623,744<jupyter_code>history = run_experiment(baseline_model)
plt.plot(history.history["loss"])
plt.plot(history.history["val_loss"])
plt.title("model loss")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.legend(["train", "eval"], loc="upper left")
plt.show()<jupyter_output><empty_output><jupyter_text>Experiment 2: memory-efficient model Implement Quotient-Remainder embedding as a layerThe Quotient-Remainder technique works as follows. For a set of vocabulary and embedding size`embedding_dim`, instead of creating a `vocabulary_size X embedding_dim` embedding table,we create *two* `num_buckets X embedding_dim` embedding tables, where `num_buckets`is much smaller than `vocabulary_size`.An embedding for a given item `index` is generated via the following steps:1. Compute the `quotient_index` as `index // num_buckets`.2. Compute the `remainder_index` as `index % num_buckets`.3. Lookup `quotient_embedding` from the first embedding table using `quotient_index`.4. Lookup `remainder_embedding` from the second embedding table using `remainder_index`.5. Return `quotient_embedding` * `remainder_embedding`.This technique not only reduces the number of embedding vectors needs to be stored and trained,but also generates a *unique* embedding vector for each item of size `embedding_dim`.Note that `q_embedding` and `r_embedding` can be combined using other operations,like `Add` and `Concatenate`.<jupyter_code>class QREmbedding(keras.layers.Layer):
def __init__(self, vocabulary, embedding_dim, num_buckets, name=None):
super().__init__(name=name)
self.num_buckets = num_buckets
self.index_lookup = StringLookup(
vocabulary=vocabulary, mask_token=None, num_oov_indices=0
)
self.q_embeddings = layers.Embedding(
num_buckets,
embedding_dim,
)
self.r_embeddings = layers.Embedding(
num_buckets,
embedding_dim,
)
def call(self, inputs):
# Get the item index.
embedding_index = self.index_lookup(inputs)
# Get the quotient index.
quotient_index = tf.math.floordiv(embedding_index, self.num_buckets)
# Get the reminder index.
remainder_index = tf.math.floormod(embedding_index, self.num_buckets)
# Lookup the quotient_embedding using the quotient_index.
quotient_embedding = self.q_embeddings(quotient_index)
# Lookup the remainder_embedding using the remainder_index.
remainder_embedding = self.r_embeddings(remainder_index)
# Use multiplication as a combiner operation
return quotient_embedding * remainder_embedding<jupyter_output><empty_output><jupyter_text>Implement Mixed Dimension embedding as a layerIn the mixed dimension embedding technique, we train embedding vectors with full dimensionsfor the frequently queried items, while train embedding vectors with *reduced dimensions*for less frequent items, plus a *projection weights matrix* to bring low dimension embeddingsto the full dimensions.More precisely, we define *blocks* of items of similar frequencies. For each block,a `block_vocab_size X block_embedding_dim` embedding table and `block_embedding_dim X full_embedding_dim`projection weights matrix are created. Note that, if `block_embedding_dim` equals `full_embedding_dim`,the projection weights matrix becomes an *identity* matrix. Embeddings for a given batch of item`indices` are generated via the following steps:1. For each block, lookup the `block_embedding_dim` embedding vectors using `indices`, andproject them to the `full_embedding_dim`.2. If an item index does not belong to a given block, an out-of-vocabulary embedding is returned.Each block will return a `batch_size X full_embedding_dim` tensor.3. A mask is applied to the embeddings returned from each block in order to convert theout-of-vocabulary embeddings to vector of zeros. That is, for each item in the batch,a single non-zero embedding vector is returned from the all block embeddings.4. Embeddings retrieved from the blocks are combined using *sum* to produce the final`batch_size X full_embedding_dim` tensor.<jupyter_code>class MDEmbedding(keras.layers.Layer):
def __init__(
self, blocks_vocabulary, blocks_embedding_dims, base_embedding_dim, name=None
):
super().__init__(name=name)
self.num_blocks = len(blocks_vocabulary)
# Create vocab to block lookup.
keys = []
values = []
for block_idx, block_vocab in enumerate(blocks_vocabulary):
keys.extend(block_vocab)
values.extend([block_idx] * len(block_vocab))
self.vocab_to_block = tf.lookup.StaticHashTable(
tf.lookup.KeyValueTensorInitializer(keys, values), default_value=-1
)
self.block_embedding_encoders = []
self.block_embedding_projectors = []
# Create block embedding encoders and projectors.
for idx in range(self.num_blocks):
vocabulary = blocks_vocabulary[idx]
embedding_dim = blocks_embedding_dims[idx]
block_embedding_encoder = embedding_encoder(
vocabulary, embedding_dim, num_oov_indices=1
)
self.block_embedding_encoders.append(block_embedding_encoder)
if embedding_dim == base_embedding_dim:
self.block_embedding_projectors.append(layers.Lambda(lambda x: x))
else:
self.block_embedding_projectors.append(
layers.Dense(units=base_embedding_dim)
)
def call(self, inputs):
# Get block index for each input item.
block_indicies = self.vocab_to_block.lookup(inputs)
# Initialize output embeddings to zeros.
embeddings = tf.zeros(shape=(tf.shape(inputs)[0], base_embedding_dim))
# Generate embeddings from blocks.
for idx in range(self.num_blocks):
# Lookup embeddings from the current block.
block_embeddings = self.block_embedding_encoders[idx](inputs)
# Project embeddings to base_embedding_dim.
block_embeddings = self.block_embedding_projectors[idx](block_embeddings)
# Create a mask to filter out embeddings of items that do not belong to the current block.
mask = tf.expand_dims(tf.cast(block_indicies == idx, tf.dtypes.float32), 1)
# Set the embeddings for the items not belonging to the current block to zeros.
block_embeddings = block_embeddings * mask
# Add the block embeddings to the final embeddings.
embeddings += block_embeddings
return embeddings<jupyter_output><empty_output><jupyter_text>Implement the memory-efficient modelIn this experiment, we are going to use the **Quotient-Remainder** technique to reduce thesize of the user embeddings, and the **Mixed Dimension** technique to reduce the size of themovie embeddings.While in the [paper](https://arxiv.org/abs/1909.11810), an alpha-power rule is used to determinedthe dimensions of the embedding of each block, we simply set the number of blocks and thedimensions of embeddings of each block based on the histogram visualization of movies popularity.<jupyter_code>movie_frequencies = ratings_data["movie_id"].value_counts()
movie_frequencies.hist(bins=10)<jupyter_output><empty_output><jupyter_text>You can see that we can group the movies into three blocks, and assign them 64, 32, and 16embedding dimensions, respectively. Feel free to experiment with different number of blocksand dimensions.<jupyter_code>sorted_movie_vocabulary = list(movie_frequencies.keys())
movie_blocks_vocabulary = [
sorted_movie_vocabulary[:400], # high popularity movies block
sorted_movie_vocabulary[400:1700], # normal popularity movies block
sorted_movie_vocabulary[1700:], # low popularity movies block
]
movie_blocks_embedding_dims = [64, 32, 16]
user_embedding_num_buckets = len(user_vocabulary) // 50
def create_memory_efficient_model():
# Take the user as an input.
user_input = layers.Input(name="user_id", shape=(), dtype="string")
# Get user embedding.
user_embedding = QREmbedding(
vocabulary=user_vocabulary,
embedding_dim=base_embedding_dim,
num_buckets=user_embedding_num_buckets,
name="user_embedding",
)(user_input)
# Take the movie as an input.
movie_input = layers.Input(name="movie_id", shape=(), dtype="string")
# Get embedding.
movie_embedding = MDEmbedding(
blocks_vocabulary=movie_blocks_vocabulary,
blocks_embedding_dims=movie_blocks_embedding_dims,
base_embedding_dim=base_embedding_dim,
name="movie_embedding",
)(movie_input)
# Compute dot product similarity between user and movie embeddings.
logits = layers.Dot(axes=1, name="dot_similarity")(
[user_embedding, movie_embedding]
)
# Convert to rating scale.
prediction = keras.activations.sigmoid(logits) * 5
# Create the model.
model = keras.Model(
inputs=[user_input, movie_input], outputs=prediction, name="baseline_model"
)
return model
memory_efficient_model = create_memory_efficient_model()
memory_efficient_model.summary()<jupyter_output><empty_output><jupyter_text>Notice that the number of trainable parameters is 117,968, which is more than 5x less thanthe number of parameters in the baseline model.<jupyter_code>history = run_experiment(memory_efficient_model)
plt.plot(history.history["loss"])
plt.plot(history.history["val_loss"])
plt.title("model loss")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.legend(["train", "eval"], loc="upper left")
plt.show()<jupyter_output><empty_output> | keras-io/examples/keras_recipes/ipynb/memory_efficient_embeddings.ipynb/0 | {
"file_path": "keras-io/examples/keras_recipes/ipynb/memory_efficient_embeddings.ipynb",
"repo_id": "keras-io",
"token_count": 5816
} | 101 |
# Endpoint layer pattern
**Author:** [fchollet](https://twitter.com/fchollet)<br>
**Date created:** 2019/05/10<br>
**Last modified:** 2023/11/22<br>
**Description:** Demonstration of the "endpoint layer" pattern (layer that handles loss management).
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/keras_recipes/ipynb/endpoint_layer_pattern.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/keras_recipes/endpoint_layer_pattern.py)
---
## Setup
```python
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import tensorflow as tf
import keras
import numpy as np
```
---
## Usage of endpoint layers in the Functional API
An "endpoint layer" has access to the model's targets, and creates arbitrary losses
in `call()` using `self.add_loss()` and `Metric.update_state()`.
This enables you to define losses and
metrics that don't match the usual signature `fn(y_true, y_pred, sample_weight=None)`.
Note that you could have separate metrics for training and eval with this pattern.
```python
class LogisticEndpoint(keras.layers.Layer):
def __init__(self, name=None):
super().__init__(name=name)
self.loss_fn = keras.losses.BinaryCrossentropy(from_logits=True)
self.accuracy_metric = keras.metrics.BinaryAccuracy(name="accuracy")
def call(self, logits, targets=None, sample_weight=None):
if targets is not None:
# Compute the training-time loss value and add it
# to the layer using `self.add_loss()`.
loss = self.loss_fn(targets, logits, sample_weight)
self.add_loss(loss)
# Log the accuracy as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.)
self.accuracy_metric.update_state(targets, logits, sample_weight)
# Return the inference-time prediction tensor (for `.predict()`).
return tf.nn.softmax(logits)
inputs = keras.Input((764,), name="inputs")
logits = keras.layers.Dense(1)(inputs)
targets = keras.Input((1,), name="targets")
sample_weight = keras.Input((1,), name="sample_weight")
preds = LogisticEndpoint()(logits, targets, sample_weight)
model = keras.Model([inputs, targets, sample_weight], preds)
data = {
"inputs": np.random.random((1000, 764)),
"targets": np.random.random((1000, 1)),
"sample_weight": np.random.random((1000, 1)),
}
model.compile(keras.optimizers.Adam(1e-3))
model.fit(data, epochs=2)
```
<div class="k-default-codeblock">
```
Epoch 1/2
27/32 ━━━━━━━━━━━━━━━━[37m━━━━ 0s 2ms/step - loss: 0.3664
WARNING: All log messages before absl::InitializeLog() is called are written to STDERR
I0000 00:00:1700705222.380735 3351467 device_compiler.h:186] Compiled cluster using XLA! This line is logged at most once for the lifetime of the process.
32/32 ━━━━━━━━━━━━━━━━━━━━ 2s 31ms/step - loss: 0.3663
Epoch 2/2
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.3627
<keras.src.callbacks.history.History at 0x7f13401b1e10>
```
</div>
---
## Exporting an inference-only model
Simply don't include `targets` in the model. The weights stay the same.
```python
inputs = keras.Input((764,), name="inputs")
logits = keras.layers.Dense(1)(inputs)
preds = LogisticEndpoint()(logits, targets=None, sample_weight=None)
inference_model = keras.Model(inputs, preds)
inference_model.set_weights(model.get_weights())
preds = inference_model.predict(np.random.random((1000, 764)))
```
<div class="k-default-codeblock">
```
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step
```
</div>
---
## Usage of loss endpoint layers in subclassed models
```python
class LogReg(keras.Model):
def __init__(self):
super().__init__()
self.dense = keras.layers.Dense(1)
self.logistic_endpoint = LogisticEndpoint()
def call(self, inputs):
# Note that all inputs should be in the first argument
# since we want to be able to call `model.fit(inputs)`.
logits = self.dense(inputs["inputs"])
preds = self.logistic_endpoint(
logits=logits,
targets=inputs["targets"],
sample_weight=inputs["sample_weight"],
)
return preds
model = LogReg()
data = {
"inputs": np.random.random((1000, 764)),
"targets": np.random.random((1000, 1)),
"sample_weight": np.random.random((1000, 1)),
}
model.compile(keras.optimizers.Adam(1e-3))
model.fit(data, epochs=2)
```
<div class="k-default-codeblock">
```
Epoch 1/2
32/32 ━━━━━━━━━━━━━━━━━━━━ 1s 9ms/step - loss: 0.3529
Epoch 2/2
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - loss: 0.3509
<keras.src.callbacks.history.History at 0x7f132c1d1450>
```
</div> | keras-io/examples/keras_recipes/md/endpoint_layer_pattern.md/0 | {
"file_path": "keras-io/examples/keras_recipes/md/endpoint_layer_pattern.md",
"repo_id": "keras-io",
"token_count": 2012
} | 102 |
"""
Title: Customizing the convolution operation of a Conv2D layer
Author: [lukewood](https://lukewood.xyz)
Date created: 11/03/2021
Last modified: 11/03/2021
Description: This example shows how to implement custom convolution layers using the `Conv.convolution_op()` API.
Accelerator: GPU
"""
"""
## Introduction
You may sometimes need to implement custom versions of convolution layers like `Conv1D` and `Conv2D`.
Keras enables you do this without implementing the entire layer from scratch: you can reuse
most of the base convolution layer and just customize the convolution op itself via the
`convolution_op()` method.
This method was introduced in Keras 2.7. So before using the
`convolution_op()` API, ensure that you are running Keras version 2.7.0 or greater.
"""
"""
## A Simple `StandardizedConv2D` implementation
There are two ways to use the `Conv.convolution_op()` API. The first way
is to override the `convolution_op()` method on a convolution layer subclass.
Using this approach, we can quickly implement a
[StandardizedConv2D](https://arxiv.org/abs/1903.10520) as shown below.
"""
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import tensorflow as tf
import keras
from keras import layers
import numpy as np
class StandardizedConv2DWithOverride(layers.Conv2D):
def convolution_op(self, inputs, kernel):
mean, var = tf.nn.moments(kernel, axes=[0, 1, 2], keepdims=True)
return tf.nn.conv2d(
inputs,
(kernel - mean) / tf.sqrt(var + 1e-10),
padding="VALID",
strides=list(self.strides),
name=self.__class__.__name__,
)
"""
The other way to use the `Conv.convolution_op()` API is to directly call the
`convolution_op()` method from the `call()` method of a convolution layer subclass.
A comparable class implemented using this approach is shown below.
"""
class StandardizedConv2DWithCall(layers.Conv2D):
def call(self, inputs):
mean, var = tf.nn.moments(self.kernel, axes=[0, 1, 2], keepdims=True)
result = self.convolution_op(
inputs, (self.kernel - mean) / tf.sqrt(var + 1e-10)
)
if self.use_bias:
result = result + self.bias
return result
"""
## Example Usage
Both of these layers work as drop-in replacements for `Conv2D`. The following
demonstration performs classification on the MNIST dataset.
"""
# Model / data parameters
num_classes = 10
input_shape = (28, 28, 1)
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
# Scale images to the [0, 1] range
x_train = x_train.astype("float32") / 255
x_test = x_test.astype("float32") / 255
# Make sure images have shape (28, 28, 1)
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
print("x_train shape:", x_train.shape)
print(x_train.shape[0], "train samples")
print(x_test.shape[0], "test samples")
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = keras.Sequential(
[
keras.layers.Input(shape=input_shape),
StandardizedConv2DWithCall(32, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
StandardizedConv2DWithOverride(64, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Flatten(),
layers.Dropout(0.5),
layers.Dense(num_classes, activation="softmax"),
]
)
model.summary()
"""
"""
batch_size = 128
epochs = 5
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
model.fit(x_train, y_train, batch_size=batch_size, epochs=5, validation_split=0.1)
"""
## Conclusion
The `Conv.convolution_op()` API provides an easy and readable way to implement custom
convolution layers. A `StandardizedConvolution` implementation using the API is quite
terse, consisting of only four lines of code.
"""
| keras-io/examples/keras_recipes/subclassing_conv_layers.py/0 | {
"file_path": "keras-io/examples/keras_recipes/subclassing_conv_layers.py",
"repo_id": "keras-io",
"token_count": 1477
} | 103 |
<jupyter_start><jupyter_text>Character-level recurrent sequence-to-sequence model**Author:** [fchollet](https://twitter.com/fchollet)**Date created:** 2017/09/29**Last modified:** 2023/11/22**Description:** Character-level recurrent sequence-to-sequence model. IntroductionThis example demonstrates how to implement a basic character-levelrecurrent sequence-to-sequence model. We apply it to translatingshort English sentences into short French sentences,character-by-character. Note that it is fairly unusual todo character-level machine translation, as word-levelmodels are more common in this domain.**Summary of the algorithm**- We start with input sequences from a domain (e.g. English sentences) and corresponding target sequences from another domain (e.g. French sentences).- An encoder LSTM turns input sequences to 2 state vectors (we keep the last LSTM state and discard the outputs).- A decoder LSTM is trained to turn the target sequences into the same sequence but offset by one timestep in the future, a training process called "teacher forcing" in this context. It uses as initial state the state vectors from the encoder. Effectively, the decoder learns to generate `targets[t+1...]` given `targets[...t]`, conditioned on the input sequence.- In inference mode, when we want to decode unknown input sequences, we: - Encode the input sequence into state vectors - Start with a target sequence of size 1 (just the start-of-sequence character) - Feed the state vectors and 1-char target sequence to the decoder to produce predictions for the next character - Sample the next character using these predictions (we simply use argmax). - Append the sampled character to the target sequence - Repeat until we generate the end-of-sequence character or we hit the character limit. Setup<jupyter_code>import numpy as np
import keras
import os
from pathlib import Path<jupyter_output><empty_output><jupyter_text>Download the data<jupyter_code>fpath = keras.utils.get_file(origin="http://www.manythings.org/anki/fra-eng.zip")
dirpath = Path(fpath).parent.absolute()
os.system(f"unzip -q {fpath} -d {dirpath}")<jupyter_output><empty_output><jupyter_text>Configuration<jupyter_code>batch_size = 64 # Batch size for training.
epochs = 100 # Number of epochs to train for.
latent_dim = 256 # Latent dimensionality of the encoding space.
num_samples = 10000 # Number of samples to train on.
# Path to the data txt file on disk.
data_path = os.path.join(dirpath, "fra.txt")<jupyter_output><empty_output><jupyter_text>Prepare the data<jupyter_code># Vectorize the data.
input_texts = []
target_texts = []
input_characters = set()
target_characters = set()
with open(data_path, "r", encoding="utf-8") as f:
lines = f.read().split("\n")
for line in lines[: min(num_samples, len(lines) - 1)]:
input_text, target_text, _ = line.split("\t")
# We use "tab" as the "start sequence" character
# for the targets, and "\n" as "end sequence" character.
target_text = "\t" + target_text + "\n"
input_texts.append(input_text)
target_texts.append(target_text)
for char in input_text:
if char not in input_characters:
input_characters.add(char)
for char in target_text:
if char not in target_characters:
target_characters.add(char)
input_characters = sorted(list(input_characters))
target_characters = sorted(list(target_characters))
num_encoder_tokens = len(input_characters)
num_decoder_tokens = len(target_characters)
max_encoder_seq_length = max([len(txt) for txt in input_texts])
max_decoder_seq_length = max([len(txt) for txt in target_texts])
print("Number of samples:", len(input_texts))
print("Number of unique input tokens:", num_encoder_tokens)
print("Number of unique output tokens:", num_decoder_tokens)
print("Max sequence length for inputs:", max_encoder_seq_length)
print("Max sequence length for outputs:", max_decoder_seq_length)
input_token_index = dict([(char, i) for i, char in enumerate(input_characters)])
target_token_index = dict([(char, i) for i, char in enumerate(target_characters)])
encoder_input_data = np.zeros(
(len(input_texts), max_encoder_seq_length, num_encoder_tokens),
dtype="float32",
)
decoder_input_data = np.zeros(
(len(input_texts), max_decoder_seq_length, num_decoder_tokens),
dtype="float32",
)
decoder_target_data = np.zeros(
(len(input_texts), max_decoder_seq_length, num_decoder_tokens),
dtype="float32",
)
for i, (input_text, target_text) in enumerate(zip(input_texts, target_texts)):
for t, char in enumerate(input_text):
encoder_input_data[i, t, input_token_index[char]] = 1.0
encoder_input_data[i, t + 1 :, input_token_index[" "]] = 1.0
for t, char in enumerate(target_text):
# decoder_target_data is ahead of decoder_input_data by one timestep
decoder_input_data[i, t, target_token_index[char]] = 1.0
if t > 0:
# decoder_target_data will be ahead by one timestep
# and will not include the start character.
decoder_target_data[i, t - 1, target_token_index[char]] = 1.0
decoder_input_data[i, t + 1 :, target_token_index[" "]] = 1.0
decoder_target_data[i, t:, target_token_index[" "]] = 1.0<jupyter_output><empty_output><jupyter_text>Build the model<jupyter_code># Define an input sequence and process it.
encoder_inputs = keras.Input(shape=(None, num_encoder_tokens))
encoder = keras.layers.LSTM(latent_dim, return_state=True)
encoder_outputs, state_h, state_c = encoder(encoder_inputs)
# We discard `encoder_outputs` and only keep the states.
encoder_states = [state_h, state_c]
# Set up the decoder, using `encoder_states` as initial state.
decoder_inputs = keras.Input(shape=(None, num_decoder_tokens))
# We set up our decoder to return full output sequences,
# and to return internal states as well. We don't use the
# return states in the training model, but we will use them in inference.
decoder_lstm = keras.layers.LSTM(latent_dim, return_sequences=True, return_state=True)
decoder_outputs, _, _ = decoder_lstm(decoder_inputs, initial_state=encoder_states)
decoder_dense = keras.layers.Dense(num_decoder_tokens, activation="softmax")
decoder_outputs = decoder_dense(decoder_outputs)
# Define the model that will turn
# `encoder_input_data` & `decoder_input_data` into `decoder_target_data`
model = keras.Model([encoder_inputs, decoder_inputs], decoder_outputs)<jupyter_output><empty_output><jupyter_text>Train the model<jupyter_code>model.compile(
optimizer="rmsprop", loss="categorical_crossentropy", metrics=["accuracy"]
)
model.fit(
[encoder_input_data, decoder_input_data],
decoder_target_data,
batch_size=batch_size,
epochs=epochs,
validation_split=0.2,
)
# Save model
model.save("s2s_model.keras")<jupyter_output><empty_output><jupyter_text>Run inference (sampling)1. encode input and retrieve initial decoder state2. run one step of decoder with this initial stateand a "start of sequence" token as target.Output will be the next target token.3. Repeat with the current target token and current states<jupyter_code># Define sampling models
# Restore the model and construct the encoder and decoder.
model = keras.models.load_model("s2s_model.keras")
encoder_inputs = model.input[0] # input_1
encoder_outputs, state_h_enc, state_c_enc = model.layers[2].output # lstm_1
encoder_states = [state_h_enc, state_c_enc]
encoder_model = keras.Model(encoder_inputs, encoder_states)
decoder_inputs = model.input[1] # input_2
decoder_state_input_h = keras.Input(shape=(latent_dim,))
decoder_state_input_c = keras.Input(shape=(latent_dim,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
decoder_lstm = model.layers[3]
decoder_outputs, state_h_dec, state_c_dec = decoder_lstm(
decoder_inputs, initial_state=decoder_states_inputs
)
decoder_states = [state_h_dec, state_c_dec]
decoder_dense = model.layers[4]
decoder_outputs = decoder_dense(decoder_outputs)
decoder_model = keras.Model(
[decoder_inputs] + decoder_states_inputs, [decoder_outputs] + decoder_states
)
# Reverse-lookup token index to decode sequences back to
# something readable.
reverse_input_char_index = dict((i, char) for char, i in input_token_index.items())
reverse_target_char_index = dict((i, char) for char, i in target_token_index.items())
def decode_sequence(input_seq):
# Encode the input as state vectors.
states_value = encoder_model.predict(input_seq, verbose=0)
# Generate empty target sequence of length 1.
target_seq = np.zeros((1, 1, num_decoder_tokens))
# Populate the first character of target sequence with the start character.
target_seq[0, 0, target_token_index["\t"]] = 1.0
# Sampling loop for a batch of sequences
# (to simplify, here we assume a batch of size 1).
stop_condition = False
decoded_sentence = ""
while not stop_condition:
output_tokens, h, c = decoder_model.predict(
[target_seq] + states_value, verbose=0
)
# Sample a token
sampled_token_index = np.argmax(output_tokens[0, -1, :])
sampled_char = reverse_target_char_index[sampled_token_index]
decoded_sentence += sampled_char
# Exit condition: either hit max length
# or find stop character.
if sampled_char == "\n" or len(decoded_sentence) > max_decoder_seq_length:
stop_condition = True
# Update the target sequence (of length 1).
target_seq = np.zeros((1, 1, num_decoder_tokens))
target_seq[0, 0, sampled_token_index] = 1.0
# Update states
states_value = [h, c]
return decoded_sentence<jupyter_output><empty_output><jupyter_text>You can now generate decoded sentences as such:<jupyter_code>for seq_index in range(20):
# Take one sequence (part of the training set)
# for trying out decoding.
input_seq = encoder_input_data[seq_index : seq_index + 1]
decoded_sentence = decode_sequence(input_seq)
print("-")
print("Input sentence:", input_texts[seq_index])
print("Decoded sentence:", decoded_sentence)<jupyter_output><empty_output> | keras-io/examples/nlp/ipynb/lstm_seq2seq.ipynb/0 | {
"file_path": "keras-io/examples/nlp/ipynb/lstm_seq2seq.ipynb",
"repo_id": "keras-io",
"token_count": 3665
} | 104 |
<jupyter_start><jupyter_text>Abstractive Summarization with Hugging Face Transformers**Author:** Sreyan Ghosh**Date created:** 2022/07/04**Last modified:** 2022/08/28**Description:** Training T5 using Hugging Face Transformers for Abstractive Summarization. IntroductionAutomatic summarization is one of the central problems inNatural Language Processing (NLP). It poses several challenges relating to languageunderstanding (e.g. identifying important content)and generation (e.g. aggregating and rewording the identified content into a summary).In this tutorial, we tackle the single-document summarization taskwith an abstractive modeling approach. The primary idea here is to generate a short,single-sentence news summary answering the question “What is the news article about?”.This approach to summarization is also known as *Abstractive Summarization* and hasseen growing interest among researchers in various disciplines.Following prior work, we aim to tackle this problem using asequence-to-sequence model. [Text-to-Text Transfer Transformer (`T5`)](https://arxiv.org/abs/1910.10683)is a [Transformer-based](https://arxiv.org/abs/1706.03762) model built on the encoder-decoderarchitecture, pretrained on a multi-task mixture of unsupervised and supervised tasks where each taskis converted into a text-to-text format. T5 shows impressive results in a variety of sequence-to-sequence(sequence in this notebook refers to text) like summarization, translation, etc.In this notebook, we will fine-tune the pretrained T5 on the Abstractive Summarizationtask using Hugging Face Transformers on the `XSum` dataset loaded from Hugging Face Datasets. Setup Installing the requirements<jupyter_code>!pip install transformers==4.20.0
!pip install keras_nlp==0.3.0
!pip install datasets
!pip install huggingface-hub
!pip install nltk
!pip install rouge-score<jupyter_output><empty_output><jupyter_text>Importing the necessary libraries<jupyter_code>import os
import logging
import nltk
import numpy as np
import tensorflow as tf
from tensorflow import keras
# Only log error messages
tf.get_logger().setLevel(logging.ERROR)
os.environ["TOKENIZERS_PARALLELISM"] = "false"<jupyter_output><empty_output><jupyter_text>Define certain variables<jupyter_code># The percentage of the dataset you want to split as train and test
TRAIN_TEST_SPLIT = 0.1
MAX_INPUT_LENGTH = 1024 # Maximum length of the input to the model
MIN_TARGET_LENGTH = 5 # Minimum length of the output by the model
MAX_TARGET_LENGTH = 128 # Maximum length of the output by the model
BATCH_SIZE = 8 # Batch-size for training our model
LEARNING_RATE = 2e-5 # Learning-rate for training our model
MAX_EPOCHS = 1 # Maximum number of epochs we will train the model for
# This notebook is built on the t5-small checkpoint from the Hugging Face Model Hub
MODEL_CHECKPOINT = "t5-small"<jupyter_output><empty_output><jupyter_text>Load the datasetWe will now download the [Extreme Summarization (XSum)](https://arxiv.org/abs/1808.08745).The dataset consists of BBC articles and accompanying single sentence summaries.Specifically, each article is prefaced with an introductory sentence (aka summary) which isprofessionally written, typically by the author of the article. That dataset has 226,711 articlesdivided into training (90%, 204,045), validation (5%, 11,332), and test (5%, 11,334) sets.Following much of literature, we use the Recall-Oriented Understudy for Gisting Evaluation(ROUGE) metric to evaluate our sequence-to-sequence abstrative summarization approach.We will use the [Hugging Face Datasets](https://github.com/huggingface/datasets) library to downloadthe data we need to use for training and evaluation. This can be easily done with the`load_dataset` function.<jupyter_code>from datasets import load_dataset
raw_datasets = load_dataset("xsum", split="train")<jupyter_output><empty_output><jupyter_text>The dataset has the following fields:- **document**: the original BBC article to be summarized- **summary**: the single sentence summary of the BBC article- **id**: ID of the document-summary pair<jupyter_code>print(raw_datasets)<jupyter_output><empty_output><jupyter_text>We will now see how the data looks like:<jupyter_code>print(raw_datasets[0])<jupyter_output><empty_output><jupyter_text>For the sake of demonstrating the workflow, in this notebook we will only takesmall stratified balanced splits (10%) of the train as our training and test sets.We can easily split the dataset using the `train_test_split` method which expectsthe split size and the name of the column relative to which you want to stratify.<jupyter_code>raw_datasets = raw_datasets.train_test_split(
train_size=TRAIN_TEST_SPLIT, test_size=TRAIN_TEST_SPLIT
)<jupyter_output><empty_output><jupyter_text>Data Pre-processingBefore we can feed those texts to our model, we need to pre-process them and get themready for the task. This is done by a Hugging Face Transformers `Tokenizer` which will tokenizethe inputs (including converting the tokens to their corresponding IDs in the pretrainedvocabulary) and put it in a format the model expects, as well as generate the other inputsthat model requires.The `from_pretrained()` method expects the name of a model from the Hugging Face Model Hub. This isexactly similar to MODEL_CHECKPOINT declared earlier and we will just pass that.<jupyter_code>from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(MODEL_CHECKPOINT)<jupyter_output><empty_output><jupyter_text>If you are using one of the five T5 checkpoints we have to prefix the inputs with"summarize:" (the model can also translate and it needs the prefix to know which task ithas to perform).<jupyter_code>if MODEL_CHECKPOINT in ["t5-small", "t5-base", "t5-large", "t5-3b", "t5-11b"]:
prefix = "summarize: "
else:
prefix = ""<jupyter_output><empty_output><jupyter_text>We will write a simple function that helps us in the pre-processing that is compatiblewith Hugging Face Datasets. To summarize, our pre-processing function should:- Tokenize the text dataset (input and targets) into it's corresponding token ids thatwill be used for embedding look-up in BERT- Add the prefix to the tokens- Create additional inputs for the model like `token_type_ids`, `attention_mask`, etc.<jupyter_code>def preprocess_function(examples):
inputs = [prefix + doc for doc in examples["document"]]
model_inputs = tokenizer(inputs, max_length=MAX_INPUT_LENGTH, truncation=True)
# Setup the tokenizer for targets
with tokenizer.as_target_tokenizer():
labels = tokenizer(
examples["summary"], max_length=MAX_TARGET_LENGTH, truncation=True
)
model_inputs["labels"] = labels["input_ids"]
return model_inputs<jupyter_output><empty_output><jupyter_text>To apply this function on all the pairs of sentences in our dataset, we just use the`map` method of our `dataset` object we created earlier. This will apply the function onall the elements of all the splits in `dataset`, so our training and testingdata will be preprocessed in one single command.<jupyter_code>tokenized_datasets = raw_datasets.map(preprocess_function, batched=True)<jupyter_output><empty_output><jupyter_text>Defining the modelNow we can download the pretrained model and fine-tune it. Since our task issequence-to-sequence (both the input and output are text sequences), we use the`TFAutoModelForSeq2SeqLM` class from the Hugging Face Transformers library. Like with thetokenizer, the `from_pretrained` method will download and cache the model for us.The `from_pretrained()` method expects the name of a model from the Hugging Face Model Hub. Asmentioned earlier, we will use the `t5-small` model checkpoint.<jupyter_code>from transformers import TFAutoModelForSeq2SeqLM, DataCollatorForSeq2Seq
model = TFAutoModelForSeq2SeqLM.from_pretrained(MODEL_CHECKPOINT)<jupyter_output><empty_output><jupyter_text>For training Sequence to Sequence models, we need a special kind of data collator,which will not only pad the inputs to the maximum length in the batch, but also thelabels. Thus, we use the `DataCollatorForSeq2Seq` provided by the Hugging Face Transformerslibrary on our dataset. The `return_tensors='tf'` ensures that we get `tf.Tensor`objects back.<jupyter_code>from transformers import DataCollatorForSeq2Seq
data_collator = DataCollatorForSeq2Seq(tokenizer, model=model, return_tensors="tf")<jupyter_output><empty_output><jupyter_text>Next we define our training and testing sets with which we will train our model. Again, Hugging FaceDatasets provides us with the `to_tf_dataset` method which will help us integrate ourdataset with the `collator` defined above. The method expects certain parameters:- **columns**: the columns which will serve as our independant variables- **batch_size**: our batch size for training- **shuffle**: whether we want to shuffle our dataset- **collate_fn**: our collator functionAdditionally, we also define a relatively smaller `generation_dataset` to calculate`ROUGE` scores on the fly while training.<jupyter_code>train_dataset = tokenized_datasets["train"].to_tf_dataset(
batch_size=BATCH_SIZE,
columns=["input_ids", "attention_mask", "labels"],
shuffle=True,
collate_fn=data_collator,
)
test_dataset = tokenized_datasets["test"].to_tf_dataset(
batch_size=BATCH_SIZE,
columns=["input_ids", "attention_mask", "labels"],
shuffle=False,
collate_fn=data_collator,
)
generation_dataset = (
tokenized_datasets["test"]
.shuffle()
.select(list(range(200)))
.to_tf_dataset(
batch_size=BATCH_SIZE,
columns=["input_ids", "attention_mask", "labels"],
shuffle=False,
collate_fn=data_collator,
)
)<jupyter_output><empty_output><jupyter_text>Building and Compiling the the modelNow we will define our optimizer and compile the model. The loss calculation is handledinternally and so we need not worry about that!<jupyter_code>optimizer = keras.optimizers.Adam(learning_rate=LEARNING_RATE)
model.compile(optimizer=optimizer)<jupyter_output><empty_output><jupyter_text>Training and Evaluating the modelTo evaluate our model on-the-fly while training, we will define `metric_fn` which willcalculate the `ROUGE` score between the groud-truth and predictions.<jupyter_code>import keras_nlp
rouge_l = keras_nlp.metrics.RougeL()
def metric_fn(eval_predictions):
predictions, labels = eval_predictions
decoded_predictions = tokenizer.batch_decode(predictions, skip_special_tokens=True)
for label in labels:
label[label < 0] = tokenizer.pad_token_id # Replace masked label tokens
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
result = rouge_l(decoded_labels, decoded_predictions)
# We will print only the F1 score, you can use other aggregation metrics as well
result = {"RougeL": result["f1_score"]}
return result<jupyter_output><empty_output><jupyter_text>Now we can finally start training our model!<jupyter_code>from transformers.keras_callbacks import KerasMetricCallback
metric_callback = KerasMetricCallback(
metric_fn, eval_dataset=generation_dataset, predict_with_generate=True
)
callbacks = [metric_callback]
# For now we will use our test set as our validation_data
model.fit(
train_dataset, validation_data=test_dataset, epochs=MAX_EPOCHS, callbacks=callbacks
)<jupyter_output><empty_output><jupyter_text>For best results, we recommend training the model for atleast 5 epochs on the entiretraining dataset! InferenceNow we will try to infer the model we trained on an arbitary article. To do so,we will use the `pipeline` method from Hugging Face Transformers. Hugging Face Transformers providesus with a variety of pipelines to choose from. For our task, we use the `summarization`pipeline.The `pipeline` method takes in the trained model and tokenizer as arguments. The`framework="tf"` argument ensures that you are passing a model that was trained with TF.<jupyter_code>from transformers import pipeline
summarizer = pipeline("summarization", model=model, tokenizer=tokenizer, framework="tf")
summarizer(
raw_datasets["test"][0]["document"],
min_length=MIN_TARGET_LENGTH,
max_length=MAX_TARGET_LENGTH,
)<jupyter_output><empty_output> | keras-io/examples/nlp/ipynb/t5_hf_summarization.ipynb/0 | {
"file_path": "keras-io/examples/nlp/ipynb/t5_hf_summarization.ipynb",
"repo_id": "keras-io",
"token_count": 3716
} | 105 |
# Training a language model from scratch with 🤗 Transformers and TPUs
**Authors:** [Matthew Carrigan](https://twitter.com/carrigmat), [Sayak Paul](https://twitter.com/RisingSayak)<br>
**Date created:** 2023/05/21<br>
**Last modified:** 2023/05/21<br>
**Description:** Train a masked language model on TPUs using 🤗 Transformers.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/nlp/ipynb/mlm_training_tpus.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/nlp/mlm_training_tpus.py)
---
## Introduction
In this example, we cover how to train a masked language model using TensorFlow,
[🤗 Transformers](https://huggingface.co/transformers/index),
and TPUs.
TPU training is a useful skill to have: TPU pods are high-performance and extremely
scalable, making it easy to train models at any scale from a few tens of millions of
parameters up to truly enormous sizes: Google's PaLM model
(over 500 billion parameters!) was trained entirely on TPU pods.
We've previously written a
[**tutorial**](https://huggingface.co/docs/transformers/main/perf_train_tpu_tf)
and a
[**Colab example**](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/tpu_training-tf.ipynb)
showing small-scale TPU training with TensorFlow and introducing the core concepts you
need to understand to get your model working on TPU. However, our Colab example doesn't
contain all the steps needed to train a language model from scratch such as
training the tokenizer. So, we wanted to provide a consolidated example of
walking you through every critical step involved there.
As in our Colab example, we're taking advantage of TensorFlow's very clean TPU support
via XLA and `TPUStrategy`. We'll also be benefiting from the fact that the majority of
the TensorFlow models in 🤗 Transformers are fully
[XLA-compatible](https://huggingface.co/blog/tf-xla-generate).
So surprisingly, little work is needed to get them to run on TPU.
This example is designed to be **scalable** and much closer to a realistic training run
-- although we only use a BERT-sized model by default, the code could be expanded to a
much larger model and a much more powerful TPU pod slice by changing a few configuration
options.
The following diagram gives you a pictorial overview of the steps involved in training a
language model with 🤗 Transformers using TensorFlow and TPUs:
![https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/tf_tpu/tf_tpu_steps.png](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/tf_tpu/tf_tpu_steps.png)
*(Contents of this example overlap with
[this blog post](https://huggingface.co/blog/tf_tpu)).*
---
## Data
We use the
[WikiText dataset (v1)](https://huggingface.co/datasets/wikitext).
You can head over to the
[dataset page on the Hugging Face Hub](https://huggingface.co/datasets/wikitext)
to explore the dataset.
![data_preview_wikitext](https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/data_preview_wikitext.png)
Since the dataset is already available on the Hub in a compatible format, we can easily
load and interact with it using
[🤗 datasets](https://hf.co/docs/datasets).
However, training a language model from scratch also requires a separate
tokenizer training step. We skip that part in this example for brevity, but,
here's a gist of what we can do to train a tokenizer from scratch:
- Load the `train` split of the WikiText using 🤗 datasets.
- Leverage
[🤗 tokenizers](https://huggingface.co/docs/tokenizers/index)
to train a
[**Unigram model**](https://huggingface.co/course/chapter6/7?fw=pt).
- Upload the trained tokenizer on the Hub.
You can find the tokenizer training
code
[**here**](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/language-modeling-tpu#training-a-tokenizer)
and the tokenizer
[**here**](https://huggingface.co/tf-tpu/unigram-tokenizer-wikitext).
This script also allows you to run it with
[**any compatible dataset**](https://huggingface.co/datasets?task_ids=task_ids:language-modeling)
from the Hub.
---
## Tokenizing the data and creating TFRecords
Once the tokenizer is trained, we can use it on all the dataset splits
(`train`, `validation`, and `test` in this case) and create TFRecord shards out of them.
Having the data splits spread across multiple TFRecord shards helps with massively
parallel processing as opposed to having each split in single TFRecord files.
We tokenize the samples individually. We then take a batch of samples, concatenate them
together, and split them into several chunks of a fixed size (128 in our case). We follow
this strategy rather than tokenizing a batch of samples with a fixed length to avoid
aggressively discarding text content (because of truncation).
We then take these tokenized samples in batches and serialize those batches as multiple
TFRecord shards, where the total dataset length and individual shard size determine the
number of shards. Finally, these shards are pushed to a
[Google Cloud Storage (GCS) bucket](https://cloud.google.com/storage/docs/json_api/v1/buckets).
If you're using a TPU node for training, then the data needs to be streamed from a GCS
bucket since the node host memory is very small. But for TPU VMs, we can use datasets
locally or even attach persistent storage to those VMs. Since TPU nodes (which is what we
have in a Colab) are still quite heavily used, we based our example on using a GCS bucket
for data storage.
You can see all of this in code in
[this script](https://github.com/huggingface/transformers/blob/main/examples/tensorflow/language-modeling-tpu/prepare_tfrecord_shards.py).
For convenience, we have also hosted the resultant TFRecord shards in
[this repository](https://huggingface.co/datasets/tf-tpu/wikitext-v1-tfrecords)
on the Hub.
Once the data is tokenized and serialized into TFRecord shards, we can proceed toward
training.
---
## Training
### Setup and imports
Let's start by installing 🤗 Transformers.
```python
!pip install transformers -q
```
Then, let's import the modules we need.
```python
import os
import re
import tensorflow as tf
import transformers
```
### Initialize TPUs
Then let's connect to our TPU and determine the distribution strategy:
```python
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.TPUStrategy(tpu)
print(f"Available number of replicas: {strategy.num_replicas_in_sync}")
```
<div class="k-default-codeblock">
```
Available number of replicas: 8
```
</div>
We then load the tokenizer. For more details on the tokenizer, check out
[its repository](https://huggingface.co/tf-tpu/unigram-tokenizer-wikitext).
For the model, we use RoBERTa (the base variant), introduced in
[this paper](https://arxiv.org/abs/1907.11692).
### Initialize the tokenizer
```python
tokenizer = "tf-tpu/unigram-tokenizer-wikitext"
pretrained_model_config = "roberta-base"
tokenizer = transformers.AutoTokenizer.from_pretrained(tokenizer)
config = transformers.AutoConfig.from_pretrained(pretrained_model_config)
config.vocab_size = tokenizer.vocab_size
```
<div class="k-default-codeblock">
```
Downloading (…)okenizer_config.json: 0%| | 0.00/483 [00:00<?, ?B/s]
Downloading (…)/main/tokenizer.json: 0%| | 0.00/1.61M [00:00<?, ?B/s]
Downloading (…)cial_tokens_map.json: 0%| | 0.00/286 [00:00<?, ?B/s]
Downloading (…)lve/main/config.json: 0%| | 0.00/481 [00:00<?, ?B/s]
```
</div>
### Prepare the datasets
We now load the TFRecord shards of the WikiText dataset (which the Hugging Face team
prepared beforehand for this example):
```python
train_dataset_path = "gs://tf-tpu-training-resources/train"
eval_dataset_path = "gs://tf-tpu-training-resources/validation"
training_records = tf.io.gfile.glob(os.path.join(train_dataset_path, "*.tfrecord"))
eval_records = tf.io.gfile.glob(os.path.join(eval_dataset_path, "*.tfrecord"))
```
Now, we will write a utility to count the number of training samples we have. We need to
know this value in order properly initialize our optimizer later:
```python
def count_samples(file_list):
num_samples = 0
for file in file_list:
filename = file.split("/")[-1]
sample_count = re.search(r"-\d+-(\d+)\.tfrecord", filename).group(1)
sample_count = int(sample_count)
num_samples += sample_count
return num_samples
num_train_samples = count_samples(training_records)
print(f"Number of total training samples: {num_train_samples}")
```
<div class="k-default-codeblock">
```
Number of total training samples: 300917
```
</div>
Let's now prepare our datasets for training and evaluation. We start by writing our
utilities. First, we need to be able to decode the TFRecords:
```python
max_sequence_length = 512
def decode_fn(example):
features = {
"input_ids": tf.io.FixedLenFeature(
dtype=tf.int64, shape=(max_sequence_length,)
),
"attention_mask": tf.io.FixedLenFeature(
dtype=tf.int64, shape=(max_sequence_length,)
),
}
return tf.io.parse_single_example(example, features)
```
Here, `max_sequence_length` needs to be the same as the one used during preparing the
TFRecord shards.Refer to
[this script](https://github.com/huggingface/transformers/blob/main/examples/tensorflow/language-modeling-tpu/prepare_tfrecord_shards.py)
for more details.
Next up, we have our masking utility that is responsible for masking parts of the inputs
and preparing labels for the masked language model to learn from. We leverage the
[`DataCollatorForLanguageModeling`](https://huggingface.co/docs/transformers/v4.29.1/en/main_classes/data_collator#transformers.DataCollatorForLanguageModeling)
for this purpose.
```python
# We use a standard masking probability of 0.15. `mlm_probability` denotes
# probability with which we mask the input tokens in a sequence.
mlm_probability = 0.15
data_collator = transformers.DataCollatorForLanguageModeling(
tokenizer=tokenizer, mlm_probability=mlm_probability, mlm=True, return_tensors="tf"
)
def mask_with_collator(batch):
special_tokens_mask = (
~tf.cast(batch["attention_mask"], tf.bool)
| (batch["input_ids"] == tokenizer.cls_token_id)
| (batch["input_ids"] == tokenizer.sep_token_id)
)
batch["input_ids"], batch["labels"] = data_collator.tf_mask_tokens(
batch["input_ids"],
vocab_size=len(tokenizer),
mask_token_id=tokenizer.mask_token_id,
special_tokens_mask=special_tokens_mask,
)
return batch
```
And now is the time to write the final data preparation utility to put it all together in
a `tf.data.Dataset` object:
```python
auto = tf.data.AUTOTUNE
shuffle_buffer_size = 2**18
def prepare_dataset(
records, decode_fn, mask_fn, batch_size, shuffle, shuffle_buffer_size=None
):
num_samples = count_samples(records)
dataset = tf.data.Dataset.from_tensor_slices(records)
if shuffle:
dataset = dataset.shuffle(len(dataset))
dataset = tf.data.TFRecordDataset(dataset, num_parallel_reads=auto)
# TF can't infer the total sample count because it doesn't read
# all the records yet, so we assert it here.
dataset = dataset.apply(tf.data.experimental.assert_cardinality(num_samples))
dataset = dataset.map(decode_fn, num_parallel_calls=auto)
if shuffle:
assert shuffle_buffer_size is not None
dataset = dataset.shuffle(shuffle_buffer_size)
dataset = dataset.batch(batch_size, drop_remainder=True)
dataset = dataset.map(mask_fn, num_parallel_calls=auto)
dataset = dataset.prefetch(auto)
return dataset
```
Let's prepare our datasets with these utilities:
```python
per_replica_batch_size = 16 # Change as needed.
batch_size = per_replica_batch_size * strategy.num_replicas_in_sync
shuffle_buffer_size = 2**18 # Default corresponds to a 1GB buffer for seq_len 512
train_dataset = prepare_dataset(
training_records,
decode_fn=decode_fn,
mask_fn=mask_with_collator,
batch_size=batch_size,
shuffle=True,
shuffle_buffer_size=shuffle_buffer_size,
)
eval_dataset = prepare_dataset(
eval_records,
decode_fn=decode_fn,
mask_fn=mask_with_collator,
batch_size=batch_size,
shuffle=False,
)
```
Let's now investigate how a single batch of dataset looks like.
```python
single_batch = next(iter(train_dataset))
print(single_batch.keys())
```
<div class="k-default-codeblock">
```
dict_keys(['attention_mask', 'input_ids', 'labels'])
```
</div>
* `input_ids` denotes the tokenized versions of the input samples containing the mask
tokens as well.
* `attention_mask` denotes the mask to be used when performing attention operations.
* `labels` denotes the actual values of masked tokens the model is supposed to learn from.
```python
for k in single_batch:
if k == "input_ids":
input_ids = single_batch[k]
print(f"Input shape: {input_ids.shape}")
if k == "labels":
labels = single_batch[k]
print(f"Label shape: {labels.shape}")
```
<div class="k-default-codeblock">
```
Input shape: (128, 512)
Label shape: (128, 512)
```
</div>
Now, we can leverage our `tokenizer` to investigate the values of the tokens. Let's start
with `input_ids`:
```python
idx = 0
print("Taking the first sample:\n")
print(tokenizer.decode(input_ids[idx].numpy()))
```
<div class="k-default-codeblock">
```
Taking the first sample:
```
</div>
<div class="k-default-codeblock">
```
they called the character of Tsugum[MASK] one of the[MASK] tragic heroines[MASK] had encountered in a game. Chandran ranked the game as the third best role @[MASK][MASK] playing game from the sixth generation of video[MASK] consoles, saying that it was his favorite in the[MASK]Infinity[MASK], and one his favorite[MASK] games overall[MASK].[MASK]
[SEP][CLS][SEP][CLS][SEP][CLS] =[MASK] Sea party 1914[MASK]– 16 =
[SEP][CLS][SEP][CLS] The Ross Sea party was a component of Sir[MASK] Shackleton's Imperial Trans @-@ Antarctic Expedition 1914 garde 17.[MASK] task was to lay a series of supply depots across the Great Ice Barrier from the Ross Sea to the Beardmore Glacier, along the[MASK] route established by earlier Antarctic expeditions[MASK]. The expedition's main party, under[MASK], was to land[MASK]on the opposite, Weddell Sea coast of Antarctica [MASK] and to march across the continent via the South[MASK] to the Ross Sea. As the main party would be un[MASK] to carry[MASK] fuel and supplies for the whole distance[MASK], their survival depended on the Ross Sea party's depots[MASK][MASK][MASK] would cover the[MASK] quarter of their journey.
[SEP][CLS][MASK] set sail from London on[MASK] ship Endurance, bound[MASK] the Weddell Sea in August 1914. Meanwhile, the Ross Sea party[MASK] gathered in Australia, prior[MASK] Probabl for the Ross Sea in[MASK] second expedition ship, SY Aurora. Organisational and financial problems[MASK]ed their[MASK] until December 1914, which shortened their first depot @-@[MASK] season.[MASK][MASK] arrival the inexperienced party struggle[MASK] to master the art of Antarctic travel, in the[MASK] losing most of their sledge dogs [MASK]อ greater misfortune[MASK]ed when, at the onset of the southern winter, Aurora[MASK] torn from its [MASK]ings during [MASK] severe storm and was un[MASK] to return, leaving the shore party stranded.
[SEP][CLS] Crossroadspite[MASK] setbacks, the Ross Sea party survived inter @-@ personnel disputes, extreme weather[MASK], illness, and Pay deaths of three of its members to carry[MASK] its[MASK] in full during its[MASK] Antarctic season. This success proved ultimate[MASK] without purpose, because Shackleton's Grimaldi expedition was un
```
</div>
As expected, the decoded tokens contain the special tokens including the mask tokens as
well. Let's now investigate the mask tokens:
```python
# Taking the first 30 tokens of the first sequence.
print(labels[0].numpy()[:30])
```
<div class="k-default-codeblock">
```
[-100 -100 -100 -100 -100 -100 -100 -100 -100 43 -100 -100 -100 -100
351 -100 -100 -100 99 -100 -100 -100 -100 -100 -100 -100 -100 -100
-100 -100]
```
</div>
Here, `-100` means that the corresponding tokens in the `input_ids` are NOT masked and
non `-100` values denote the actual values of the masked tokens.
---
## Initialize the mode and and the optimizer
With the datasets prepared, we now initialize and compile our model and optimizer within
the `strategy.scope()`:
```python
# For this example, we keep this value to 10. But for a realistic run, start with 500.
num_epochs = 10
steps_per_epoch = num_train_samples // (
per_replica_batch_size * strategy.num_replicas_in_sync
)
total_train_steps = steps_per_epoch * num_epochs
learning_rate = 0.0001
weight_decay_rate = 1e-3
with strategy.scope():
model = transformers.TFAutoModelForMaskedLM.from_config(config)
model(
model.dummy_inputs
) # Pass some dummy inputs through the model to ensure all the weights are built
optimizer, schedule = transformers.create_optimizer(
num_train_steps=total_train_steps,
num_warmup_steps=total_train_steps // 20,
init_lr=learning_rate,
weight_decay_rate=weight_decay_rate,
)
model.compile(optimizer=optimizer, metrics=["accuracy"])
```
<div class="k-default-codeblock">
```
No loss specified in compile() - the model's internal loss computation will be used as the loss. Don't panic - this is a common way to train TensorFlow models in Transformers! To disable this behaviour please pass a loss argument, or explicitly pass `loss=None` if you do not want your model to compute a loss.
```
</div>
A couple of things to note here:
* The
[`create_optimizer()`](https://huggingface.co/docs/transformers/main_classes/optimizer_schedules#transformers.create_optimizer)
function creates an Adam optimizer with a learning rate schedule using a warmup phase
followed by a linear decay. Since we're using weight decay here, under the hood,
`create_optimizer()` instantiates
[the right variant of Adam](https://github.com/huggingface/transformers/blob/118e9810687dd713b6be07af79e80eeb1d916908/src/transformers/optimization_tf.py#L172)
to enable weight decay.
* While compiling the model, we're NOT using any `loss` argument. This is because
the TensorFlow models internally compute the loss when expected labels are provided.
Based on the model type and the labels being used, `transformers` will automatically
infer the loss to use.
### Start training!
Next, we set up a handy callback to push the intermediate training checkpoints to the
Hugging Face Hub. To be able to operationalize this callback, we need to log in to our
Hugging Face account (if you don't have one, you create one
[here](https://huggingface.co/join) for free). Execute the code below for logging in:
```python
from huggingface_hub import notebook_login
notebook_login()
```
Let's now define the
[`PushToHubCallback`](https://huggingface.co/docs/transformers/main_classes/keras_callbacks#transformers.PushToHubCallback):
```python
hub_model_id = output_dir = "masked-lm-tpu"
callbacks = []
callbacks.append(
transformers.PushToHubCallback(
output_dir=output_dir, hub_model_id=hub_model_id, tokenizer=tokenizer
)
)
```
<div class="k-default-codeblock">
```
Cloning https://huggingface.co/sayakpaul/masked-lm-tpu into local empty directory.
WARNING:huggingface_hub.repository:Cloning https://huggingface.co/sayakpaul/masked-lm-tpu into local empty directory.
Download file tf_model.h5: 0%| | 15.4k/477M [00:00<?, ?B/s]
Clean file tf_model.h5: 0%| | 1.00k/477M [00:00<?, ?B/s]
```
</div>
And now, we're ready to chug the TPUs:
```python
# In the interest of the runtime of this example,
# we limit the number of batches to just 2.
model.fit(
train_dataset.take(2),
validation_data=eval_dataset.take(2),
epochs=num_epochs,
callbacks=callbacks,
)
# After training we also serialize the final model.
model.save_pretrained(output_dir)
```
<div class="k-default-codeblock">
```
Epoch 1/10
2/2 [==============================] - 96s 35s/step - loss: 10.2116 - accuracy: 0.0000e+00 - val_loss: 10.1957 - val_accuracy: 2.2888e-05
Epoch 2/10
2/2 [==============================] - 9s 2s/step - loss: 10.2017 - accuracy: 0.0000e+00 - val_loss: 10.1798 - val_accuracy: 0.0000e+00
Epoch 3/10
2/2 [==============================] - ETA: 0s - loss: 10.1890 - accuracy: 7.6294e-06
WARNING:tensorflow:Callback method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0045s vs `on_train_batch_end` time: 9.1679s). Check your callbacks.
2/2 [==============================] - 35s 27s/step - loss: 10.1890 - accuracy: 7.6294e-06 - val_loss: 10.1604 - val_accuracy: 1.5259e-05
Epoch 4/10
2/2 [==============================] - 8s 2s/step - loss: 10.1733 - accuracy: 1.5259e-05 - val_loss: 10.1145 - val_accuracy: 7.6294e-06
Epoch 5/10
2/2 [==============================] - 34s 26s/step - loss: 10.1336 - accuracy: 1.5259e-05 - val_loss: 10.0666 - val_accuracy: 7.6294e-06
Epoch 6/10
2/2 [==============================] - 10s 2s/step - loss: 10.0906 - accuracy: 6.1035e-05 - val_loss: 10.0200 - val_accuracy: 5.4169e-04
Epoch 7/10
2/2 [==============================] - 33s 25s/step - loss: 10.0360 - accuracy: 6.1035e-04 - val_loss: 9.9646 - val_accuracy: 0.0049
Epoch 8/10
2/2 [==============================] - 8s 2s/step - loss: 9.9830 - accuracy: 0.0038 - val_loss: 9.8938 - val_accuracy: 0.0155
Epoch 9/10
2/2 [==============================] - 33s 26s/step - loss: 9.9067 - accuracy: 0.0116 - val_loss: 9.8225 - val_accuracy: 0.0198
Epoch 10/10
2/2 [==============================] - 8s 2s/step - loss: 9.8302 - accuracy: 0.0196 - val_loss: 9.7454 - val_accuracy: 0.0215
```
</div>
Once your training is complete, you can easily perform inference like so:
```python
from transformers import pipeline
# Replace your `model_id` here.
# Here, we're using a model that the Hugging Face team trained for longer.
model_id = "tf-tpu/roberta-base-epochs-500-no-wd"
unmasker = pipeline("fill-mask", model=model_id, framework="tf")
print(unmasker("Goal of my life is to [MASK]."))
```
<div class="k-default-codeblock">
```
Downloading (…)lve/main/config.json: 0%| | 0.00/649 [00:00<?, ?B/s]
Downloading tf_model.h5: 0%| | 0.00/500M [00:00<?, ?B/s]
All model checkpoint layers were used when initializing TFRobertaForMaskedLM.
```
</div>
<div class="k-default-codeblock">
```
All the layers of TFRobertaForMaskedLM were initialized from the model checkpoint at tf-tpu/roberta-base-epochs-500-no-wd.
If your task is similar to the task the model of the checkpoint was trained on, you can already use TFRobertaForMaskedLM for predictions without further training.
Downloading (…)okenizer_config.json: 0%| | 0.00/683 [00:00<?, ?B/s]
Downloading (…)/main/tokenizer.json: 0%| | 0.00/1.61M [00:00<?, ?B/s]
Downloading (…)cial_tokens_map.json: 0%| | 0.00/286 [00:00<?, ?B/s]
[{'score': 0.10031876713037491, 'token': 52, 'token_str': 'be', 'sequence': 'Goal of my life is to be.'}, {'score': 0.032648470252752304, 'token': 5, 'token_str': '', 'sequence': 'Goal of my life is to .'}, {'score': 0.02152678370475769, 'token': 138, 'token_str': 'work', 'sequence': 'Goal of my life is to work.'}, {'score': 0.019547568634152412, 'token': 984, 'token_str': 'act', 'sequence': 'Goal of my life is to act.'}, {'score': 0.01939115859568119, 'token': 73, 'token_str': 'have', 'sequence': 'Goal of my life is to have.'}]
```
</div>
And that's it!
If you enjoyed this example, we encourage you to check out the full codebase
[here](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/language-modeling-tpu)
and the accompanying blog post
[here](https://huggingface.co/blog/tf_tpu).
| keras-io/examples/nlp/md/mlm_training_tpus.md/0 | {
"file_path": "keras-io/examples/nlp/md/mlm_training_tpus.md",
"repo_id": "keras-io",
"token_count": 8307
} | 106 |
# Text classification with Switch Transformer
**Author:** [Khalid Salama](https://www.linkedin.com/in/khalid-salama-24403144/)<br>
**Date created:** 2020/05/10<br>
**Last modified:** 2021/02/15<br>
**Description:** Implement a Switch Transformer for text classification.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/nlp/ipynb/text_classification_with_switch_transformer.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/nlp/text_classification_with_switch_transformer.py)
---
## Introduction
This example demonstrates the implementation of the
[Switch Transformer](https://arxiv.org/abs/2101.03961) model for text
classification.
The Switch Transformer replaces the feedforward network (FFN) layer in the standard
Transformer with a Mixture of Expert (MoE) routing layer, where each expert operates
independently on the tokens in the sequence. This allows increasing the model size without
increasing the computation needed to process each example.
Note that, for training the Switch Transformer efficiently, data and model parallelism
need to be applied, so that expert modules can run simultaneously, each on its own accelerator.
While the implementation described in the paper uses the
[TensorFlow Mesh](https://github.com/tensorflow/mesh) framework for distributed training,
this example presents a simple, non-distributed implementation of the Switch Transformer
model for demonstration purposes.
---
## Setup
```python
import keras
from keras import ops
from keras import layers
```
---
## Download and prepare dataset
```python
vocab_size = 20000 # Only consider the top 20k words
num_tokens_per_example = 200 # Only consider the first 200 words of each movie review
(x_train, y_train), (x_val, y_val) = keras.datasets.imdb.load_data(num_words=vocab_size)
print(len(x_train), "Training sequences")
print(len(x_val), "Validation sequences")
x_train = keras.utils.pad_sequences(x_train, maxlen=num_tokens_per_example)
x_val = keras.utils.pad_sequences(x_val, maxlen=num_tokens_per_example)
```
<div class="k-default-codeblock">
```
25000 Training sequences
25000 Validation sequences
```
</div>
---
## Define hyperparameters
```python
embed_dim = 32 # Embedding size for each token.
num_heads = 2 # Number of attention heads
ff_dim = 32 # Hidden layer size in feedforward network.
num_experts = 10 # Number of experts used in the Switch Transformer.
batch_size = 50 # Batch size.
learning_rate = 0.001 # Learning rate.
dropout_rate = 0.25 # Dropout rate.
num_epochs = 3 # Number of epochs.
num_tokens_per_batch = (
batch_size * num_tokens_per_example
) # Total number of tokens per batch.
print(f"Number of tokens per batch: {num_tokens_per_batch}")
```
<div class="k-default-codeblock">
```
Number of tokens per batch: 10000
```
</div>
---
## Implement token & position embedding layer
It consists of two seperate embedding layers, one for tokens, one for token index (positions).
```python
class TokenAndPositionEmbedding(layers.Layer):
def __init__(self, maxlen, vocab_size, embed_dim):
super().__init__()
self.token_emb = layers.Embedding(input_dim=vocab_size, output_dim=embed_dim)
self.pos_emb = layers.Embedding(input_dim=maxlen, output_dim=embed_dim)
def call(self, x):
maxlen = ops.shape(x)[-1]
positions = ops.arange(start=0, stop=maxlen, step=1)
positions = self.pos_emb(positions)
x = self.token_emb(x)
return x + positions
```
---
## Implement the feedforward network
This is used as the Mixture of Experts in the Switch Transformer.
```python
def create_feedforward_network(ff_dim, embed_dim, name=None):
return keras.Sequential(
[layers.Dense(ff_dim, activation="relu"), layers.Dense(embed_dim)], name=name
)
```
---
## Implement the load-balanced loss
This is an auxiliary loss to encourage a balanced load across experts.
```python
def load_balanced_loss(router_probs, expert_mask):
# router_probs [tokens_per_batch, num_experts] is the probability assigned for
# each expert per token. expert_mask [tokens_per_batch, num_experts] contains
# the expert with the highest router probability in one−hot format.
num_experts = ops.shape(expert_mask)[-1]
# Get the fraction of tokens routed to each expert.
# density is a vector of length num experts that sums to 1.
density = ops.mean(expert_mask, axis=0)
# Get fraction of probability mass assigned to each expert from the router
# across all tokens. density_proxy is a vector of length num experts that sums to 1.
density_proxy = ops.mean(router_probs, axis=0)
# Want both vectors to have uniform allocation (1/num experts) across all
# num_expert elements. The two vectors will be pushed towards uniform allocation
# when the dot product is minimized.
loss = ops.mean(density_proxy * density) * ops.cast((num_experts**2), "float32")
return loss
```
### Implement the router as a layer
```python
class Router(layers.Layer):
def __init__(self, num_experts, expert_capacity):
self.num_experts = num_experts
self.route = layers.Dense(units=num_experts)
self.expert_capacity = expert_capacity
super().__init__()
def call(self, inputs, training=False):
# inputs shape: [tokens_per_batch, embed_dim]
# router_logits shape: [tokens_per_batch, num_experts]
router_logits = self.route(inputs)
if training:
# Add noise for exploration across experts.
router_logits += keras.random.uniform(
shape=router_logits.shape, minval=0.9, maxval=1.1
)
# Probabilities for each token of what expert it should be sent to.
router_probs = keras.activations.softmax(router_logits, axis=-1)
# Get the top−1 expert for each token. expert_gate is the top−1 probability
# from the router for each token. expert_index is what expert each token
# is going to be routed to.
expert_gate, expert_index = ops.top_k(router_probs, k=1)
# expert_mask shape: [tokens_per_batch, num_experts]
expert_mask = ops.one_hot(expert_index, self.num_experts)
# Compute load balancing loss.
aux_loss = load_balanced_loss(router_probs, expert_mask)
self.add_loss(aux_loss)
# Experts have a fixed capacity, ensure we do not exceed it. Construct
# the batch indices, to each expert, with position in expert make sure that
# not more that expert capacity examples can be routed to each expert.
position_in_expert = ops.cast(
ops.cumsum(expert_mask, axis=0) * expert_mask, "int32"
)
# Keep only tokens that fit within expert capacity.
expert_mask *= ops.cast(
ops.less(ops.cast(position_in_expert, "int32"), self.expert_capacity),
"float32",
)
expert_mask_flat = ops.sum(expert_mask, axis=-1)
# Mask out the experts that have overflowed the expert capacity.
expert_gate *= expert_mask_flat
# Combine expert outputs and scaling with router probability.
# combine_tensor shape: [tokens_per_batch, num_experts, expert_capacity]
combined_tensor = ops.expand_dims(
expert_gate
* expert_mask_flat
* ops.squeeze(ops.one_hot(expert_index, self.num_experts), 1),
-1,
) * ops.squeeze(ops.one_hot(position_in_expert, self.expert_capacity), 1)
# Create binary dispatch_tensor [tokens_per_batch, num_experts, expert_capacity]
# that is 1 if the token gets routed to the corresponding expert.
dispatch_tensor = ops.cast(combined_tensor, "float32")
return dispatch_tensor, combined_tensor
```
### Implement a Switch layer
```python
class Switch(layers.Layer):
def __init__(
self, num_experts, embed_dim, ff_dim, num_tokens_per_batch, capacity_factor=1
):
self.num_experts = num_experts
self.embed_dim = embed_dim
self.experts = [
create_feedforward_network(ff_dim, embed_dim) for _ in range(num_experts)
]
self.expert_capacity = num_tokens_per_batch // self.num_experts
self.router = Router(self.num_experts, self.expert_capacity)
super().__init__()
def call(self, inputs):
batch_size = ops.shape(inputs)[0]
num_tokens_per_example = ops.shape(inputs)[1]
# inputs shape: [num_tokens_per_batch, embed_dim]
inputs = ops.reshape(inputs, [num_tokens_per_batch, self.embed_dim])
# dispatch_tensor shape: [expert_capacity, num_experts, tokens_per_batch]
# combine_tensor shape: [tokens_per_batch, num_experts, expert_capacity]
dispatch_tensor, combine_tensor = self.router(inputs)
# expert_inputs shape: [num_experts, expert_capacity, embed_dim]
expert_inputs = ops.einsum("ab,acd->cdb", inputs, dispatch_tensor)
expert_inputs = ops.reshape(
expert_inputs, [self.num_experts, self.expert_capacity, self.embed_dim]
)
# Dispatch to experts
expert_input_list = ops.unstack(expert_inputs, axis=0)
expert_output_list = [
self.experts[idx](expert_input)
for idx, expert_input in enumerate(expert_input_list)
]
# expert_outputs shape: [expert_capacity, num_experts, embed_dim]
expert_outputs = ops.stack(expert_output_list, axis=1)
# expert_outputs_combined shape: [tokens_per_batch, embed_dim]
expert_outputs_combined = ops.einsum(
"abc,xba->xc", expert_outputs, combine_tensor
)
# output shape: [batch_size, num_tokens_per_example, embed_dim]
outputs = ops.reshape(
expert_outputs_combined,
[batch_size, num_tokens_per_example, self.embed_dim],
)
return outputs
```
---
## Implement a Transformer block layer
```python
class TransformerBlock(layers.Layer):
def __init__(self, embed_dim, num_heads, ffn, dropout_rate=0.1):
super().__init__()
self.att = layers.MultiHeadAttention(num_heads=num_heads, key_dim=embed_dim)
# The ffn can be either a standard feedforward network or a switch
# layer with a Mixture of Experts.
self.ffn = ffn
self.layernorm1 = layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = layers.Dropout(dropout_rate)
self.dropout2 = layers.Dropout(dropout_rate)
def call(self, inputs, training=False):
attn_output = self.att(inputs, inputs)
attn_output = self.dropout1(attn_output, training=training)
out1 = self.layernorm1(inputs + attn_output)
ffn_output = self.ffn(out1)
ffn_output = self.dropout2(ffn_output, training=training)
return self.layernorm2(out1 + ffn_output)
```
---
## Implement the classifier
The `TransformerBlock` layer outputs one vector for each time step of our input sequence.
Here, we take the mean across all time steps and use a feedforward network on top
of it to classify text.
```python
def create_classifier():
switch = Switch(num_experts, embed_dim, ff_dim, num_tokens_per_batch)
transformer_block = TransformerBlock(embed_dim // num_heads, num_heads, switch)
inputs = layers.Input(shape=(num_tokens_per_example,))
embedding_layer = TokenAndPositionEmbedding(
num_tokens_per_example, vocab_size, embed_dim
)
x = embedding_layer(inputs)
x = transformer_block(x)
x = layers.GlobalAveragePooling1D()(x)
x = layers.Dropout(dropout_rate)(x)
x = layers.Dense(ff_dim, activation="relu")(x)
x = layers.Dropout(dropout_rate)(x)
outputs = layers.Dense(2, activation="softmax")(x)
classifier = keras.Model(inputs=inputs, outputs=outputs)
return classifier
```
---
## Train and evaluate the model
```python
def run_experiment(classifier):
classifier.compile(
optimizer=keras.optimizers.Adam(learning_rate),
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
history = classifier.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=num_epochs,
validation_data=(x_val, y_val),
)
return history
classifier = create_classifier()
run_experiment(classifier)
```
<div class="k-default-codeblock">
```
Epoch 1/3
500/500 ━━━━━━━━━━━━━━━━━━━━ 251s 485ms/step - accuracy: 0.7121 - loss: 1.5394 - val_accuracy: 0.8748 - val_loss: 1.2891
Epoch 2/3
500/500 ━━━━━━━━━━━━━━━━━━━━ 240s 480ms/step - accuracy: 0.9243 - loss: 1.2063 - val_accuracy: 0.8752 - val_loss: 1.3090
Epoch 3/3
500/500 ━━━━━━━━━━━━━━━━━━━━ 242s 485ms/step - accuracy: 0.9572 - loss: 1.1222 - val_accuracy: 0.8614 - val_loss: 1.3744
<keras.src.callbacks.history.History at 0x7efb79d82a90>
```
</div>
---
## Conclusion
Compared to the standard Transformer architecture, the Switch Transformer can have a much
larger number of parameters, leading to increased model
capacity, while maintaining a reasonable computational cost.
| keras-io/examples/nlp/md/text_classification_with_switch_transformer.md/0 | {
"file_path": "keras-io/examples/nlp/md/text_classification_with_switch_transformer.md",
"repo_id": "keras-io",
"token_count": 5189
} | 107 |
"""
Title: Semantic Similarity with KerasNLP
Author: [Anshuman Mishra](https://github.com/shivance/)
Date created: 2023/02/25
Last modified: 2023/02/25
Description: Use pretrained models from KerasNLP for the Semantic Similarity Task.
Accelerator: GPU
"""
"""
## Introduction
Semantic similarity refers to the task of determining the degree of similarity between two
sentences in terms of their meaning. We already saw in [this](https://keras.io/examples/nlp/semantic_similarity_with_bert/)
example how to use SNLI (Stanford Natural Language Inference) corpus to predict sentence
semantic similarity with the HuggingFace Transformers library. In this tutorial we will
learn how to use [KerasNLP](https://keras.io/keras_nlp/), an extension of the core Keras API,
for the same task. Furthermore, we will discover how KerasNLP effectively reduces boilerplate
code and simplifies the process of building and utilizing models. For more information on KerasNLP,
please refer to [KerasNLP's official documentation](https://keras.io/keras_nlp/).
This guide is broken down into the following parts:
1. *Setup*, task definition, and establishing a baseline.
2. *Establishing baseline* with BERT.
3. *Saving and Reloading* the model.
4. *Performing inference* with the model.
5 *Improving accuracy* with RoBERTa
## Setup
The following guide uses [Keras Core](https://keras.io/keras_core/) to work in
any of `tensorflow`, `jax` or `torch`. Support for Keras Core is baked into
KerasNLP, simply change the `KERAS_BACKEND` environment variable below to change
the backend you would like to use. We select the `jax` backend below, which will
give us a particularly fast train step below.
"""
"""shell
pip install -q --upgrade keras-nlp
pip install -q --upgrade keras # Upgrade to Keras 3.
"""
import numpy as np
import tensorflow as tf
import keras
import keras_nlp
import tensorflow_datasets as tfds
"""
To load the SNLI dataset, we use the tensorflow-datasets library, which
contains over 550,000 samples in total. However, to ensure that this example runs
quickly, we use only 20% of the training samples.
## Overview of SNLI Dataset
Every sample in the dataset contains three components: `hypothesis`, `premise`,
and `label`. epresents the original caption provided to the author of the pair,
while the hypothesis refers to the hypothesis caption created by the author of
the pair. The label is assigned by annotators to indicate the similarity between
the two sentences.
The dataset contains three possible similarity label values: Contradiction, Entailment,
and Neutral. Contradiction represents completely dissimilar sentences, while Entailment
denotes similar meaning sentences. Lastly, Neutral refers to sentences where no clear
similarity or dissimilarity can be established between them.
"""
snli_train = tfds.load("snli", split="train[:20%]")
snli_val = tfds.load("snli", split="validation")
snli_test = tfds.load("snli", split="test")
# Here's an example of how our training samples look like, where we randomly select
# four samples:
sample = snli_test.batch(4).take(1).get_single_element()
sample
"""
### Preprocessing
In our dataset, we have identified that some samples have missing or incorrectly labeled
data, which is denoted by a value of -1. To ensure the accuracy and reliability of our model,
we simply filter out these samples from our dataset.
"""
def filter_labels(sample):
return sample["label"] >= 0
"""
Here's a utility function that splits the example into an `(x, y)` tuple that is suitable
for `model.fit()`. By default, `keras_nlp.models.BertClassifier` will tokenize and pack
together raw strings using a `"[SEP]"` token during training. Therefore, this label
splitting is all the data preparation that we need to perform.
"""
def split_labels(sample):
x = (sample["hypothesis"], sample["premise"])
y = sample["label"]
return x, y
train_ds = (
snli_train.filter(filter_labels)
.map(split_labels, num_parallel_calls=tf.data.AUTOTUNE)
.batch(16)
)
val_ds = (
snli_val.filter(filter_labels)
.map(split_labels, num_parallel_calls=tf.data.AUTOTUNE)
.batch(16)
)
test_ds = (
snli_test.filter(filter_labels)
.map(split_labels, num_parallel_calls=tf.data.AUTOTUNE)
.batch(16)
)
"""
## Establishing baseline with BERT.
We use the BERT model from KerasNLP to establish a baseline for our semantic similarity
task. The `keras_nlp.models.BertClassifier` class attaches a classification head to the BERT
Backbone, mapping the backbone outputs to a logit output suitable for a classification task.
This significantly reduces the need for custom code.
KerasNLP models have built-in tokenization capabilities that handle tokenization by default
based on the selected model. However, users can also use custom preprocessing techniques
as per their specific needs. If we pass a tuple as input, the model will tokenize all the
strings and concatenate them with a `"[SEP]"` separator.
We use this model with pretrained weights, and we can use the `from_preset()` method
to use our own preprocessor. For the SNLI dataset, we set `num_classes` to 3.
"""
bert_classifier = keras_nlp.models.BertClassifier.from_preset(
"bert_tiny_en_uncased", num_classes=3
)
"""
Please note that the BERT Tiny model has only 4,386,307 trainable parameters.
KerasNLP task models come with compilation defaults. We can now train the model we just
instantiated by calling the `fit()` method.
"""
bert_classifier.fit(train_ds, validation_data=val_ds, epochs=1)
"""
Our BERT classifier achieved an accuracy of around 76% on the validation split. Now,
let's evaluate its performance on the test split.
### Evaluate the performance of the trained model on test data.
"""
bert_classifier.evaluate(test_ds)
"""
Our baseline BERT model achieved a similar accuracy of around 76% on the test split.
Now, let's try to improve its performance by recompiling the model with a slightly
higher learning rate.
"""
bert_classifier = keras_nlp.models.BertClassifier.from_preset(
"bert_tiny_en_uncased", num_classes=3
)
bert_classifier.compile(
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=keras.optimizers.Adam(5e-5),
metrics=["accuracy"],
)
bert_classifier.fit(train_ds, validation_data=val_ds, epochs=1)
bert_classifier.evaluate(test_ds)
"""
Just tweaking the learning rate alone was not enough to boost performance, which
stayed right around 76%. Let's try again, but this time with
`keras.optimizers.AdamW`, and a learning rate schedule.
"""
class TriangularSchedule(keras.optimizers.schedules.LearningRateSchedule):
"""Linear ramp up for `warmup` steps, then linear decay to zero at `total` steps."""
def __init__(self, rate, warmup, total):
self.rate = rate
self.warmup = warmup
self.total = total
def get_config(self):
config = {"rate": self.rate, "warmup": self.warmup, "total": self.total}
return config
def __call__(self, step):
step = keras.ops.cast(step, dtype="float32")
rate = keras.ops.cast(self.rate, dtype="float32")
warmup = keras.ops.cast(self.warmup, dtype="float32")
total = keras.ops.cast(self.total, dtype="float32")
warmup_rate = rate * step / self.warmup
cooldown_rate = rate * (total - step) / (total - warmup)
triangular_rate = keras.ops.minimum(warmup_rate, cooldown_rate)
return keras.ops.maximum(triangular_rate, 0.0)
bert_classifier = keras_nlp.models.BertClassifier.from_preset(
"bert_tiny_en_uncased", num_classes=3
)
# Get the total count of training batches.
# This requires walking the dataset to filter all -1 labels.
epochs = 3
total_steps = sum(1 for _ in train_ds.as_numpy_iterator()) * epochs
warmup_steps = int(total_steps * 0.2)
bert_classifier.compile(
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=keras.optimizers.AdamW(
TriangularSchedule(1e-4, warmup_steps, total_steps)
),
metrics=["accuracy"],
)
bert_classifier.fit(train_ds, validation_data=val_ds, epochs=epochs)
"""
Success! With the learning rate scheduler and the `AdamW` optimizer, our validation
accuracy improved to around 79%.
Now, let's evaluate our final model on the test set and see how it performs.
"""
bert_classifier.evaluate(test_ds)
"""
Our Tiny BERT model achieved an accuracy of approximately 79% on the test set
with the use of a learning rate scheduler. This is a significant improvement over
our previous results. Fine-tuning a pretrained BERT
model can be a powerful tool in natural language processing tasks, and even a
small model like Tiny BERT can achieve impressive results.
Let's save our model for now
and move on to learning how to perform inference with it.
## Save and Reload the model
"""
bert_classifier.save("bert_classifier.keras")
restored_model = keras.models.load_model("bert_classifier.keras")
restored_model.evaluate(test_ds)
"""
## Performing inference with the model.
Let's see how to perform inference with KerasNLP models
"""
# Convert to Hypothesis-Premise pair, for forward pass through model
sample = (sample["hypothesis"], sample["premise"])
sample
"""
The default preprocessor in KerasNLP models handles input tokenization automatically,
so we don't need to perform tokenization explicitly.
"""
predictions = bert_classifier.predict(sample)
def softmax(x):
return np.exp(x) / np.exp(x).sum(axis=0)
# Get the class predictions with maximum probabilities
predictions = softmax(predictions)
"""
## Improving accuracy with RoBERTa
Now that we have established a baseline, we can attempt to improve our results
by experimenting with different models. Thanks to KerasNLP, fine-tuning a RoBERTa
checkpoint on the same dataset is easy with just a few lines of code.
"""
# Inittializing a RoBERTa from preset
roberta_classifier = keras_nlp.models.RobertaClassifier.from_preset(
"roberta_base_en", num_classes=3
)
roberta_classifier.fit(train_ds, validation_data=val_ds, epochs=1)
roberta_classifier.evaluate(test_ds)
"""
The RoBERTa base model has significantly more trainable parameters than the BERT
Tiny model, with almost 30 times as many at 124,645,635 parameters. As a result, it took
approximately 1.5 hours to train on a P100 GPU. However, the performance
improvement was substantial, with accuracy increasing to 88% on both the validation
and test splits. With RoBERTa, we were able to fit a maximum batch size of 16 on
our P100 GPU.
Despite using a different model, the steps to perform inference with RoBERTa are
the same as with BERT!
"""
predictions = roberta_classifier.predict(sample)
print(tf.math.argmax(predictions, axis=1).numpy())
"""
We hope this tutorial has been helpful in demonstrating the ease and effectiveness
of using KerasNLP and BERT for semantic similarity tasks.
Throughout this tutorial, we demonstrated how to use a pretrained BERT model to
establish a baseline and improve performance by training a larger RoBERTa model
using just a few lines of code.
The KerasNLP toolbox provides a range of modular building blocks for preprocessing
text, including pretrained state-of-the-art models and low-level Transformer Encoder
layers. We believe that this makes experimenting with natural language solutions
more accessible and efficient.
"""
| keras-io/examples/nlp/semantic_similarity_with_keras_nlp.py/0 | {
"file_path": "keras-io/examples/nlp/semantic_similarity_with_keras_nlp.py",
"repo_id": "keras-io",
"token_count": 3440
} | 108 |
# Actor Critic Method
**Author:** [Apoorv Nandan](https://twitter.com/NandanApoorv)<br>
**Date created:** 2020/05/13<br>
**Last modified:** 2020/05/13<br>
**Description:** Implement Actor Critic Method in CartPole environment.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/rl/ipynb/actor_critic_cartpole.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/rl/actor_critic_cartpole.py)
---
## Introduction
This script shows an implementation of Actor Critic method on CartPole-V0 environment.
### Actor Critic Method
As an agent takes actions and moves through an environment, it learns to map
the observed state of the environment to two possible outputs:
1. Recommended action: A probability value for each action in the action space.
The part of the agent responsible for this output is called the **actor**.
2. Estimated rewards in the future: Sum of all rewards it expects to receive in the
future. The part of the agent responsible for this output is the **critic**.
Agent and Critic learn to perform their tasks, such that the recommended actions
from the actor maximize the rewards.
### CartPole-V0
A pole is attached to a cart placed on a frictionless track. The agent has to apply
force to move the cart. It is rewarded for every time step the pole
remains upright. The agent, therefore, must learn to keep the pole from falling over.
### References
- [CartPole](http://www.derongliu.org/adp/adp-cdrom/Barto1983.pdf)
- [Actor Critic Method](https://hal.inria.fr/hal-00840470/document)
---
## Setup
```python
import gym
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
# Configuration parameters for the whole setup
seed = 42
gamma = 0.99 # Discount factor for past rewards
max_steps_per_episode = 10000
env = gym.make("CartPole-v0") # Create the environment
env.seed(seed)
eps = np.finfo(np.float32).eps.item() # Smallest number such that 1.0 + eps != 1.0
```
---
## Implement Actor Critic network
This network learns two functions:
1. Actor: This takes as input the state of our environment and returns a
probability value for each action in its action space.
2. Critic: This takes as input the state of our environment and returns
an estimate of total rewards in the future.
In our implementation, they share the initial layer.
```python
num_inputs = 4
num_actions = 2
num_hidden = 128
inputs = layers.Input(shape=(num_inputs,))
common = layers.Dense(num_hidden, activation="relu")(inputs)
action = layers.Dense(num_actions, activation="softmax")(common)
critic = layers.Dense(1)(common)
model = keras.Model(inputs=inputs, outputs=[action, critic])
```
---
## Train
```python
optimizer = keras.optimizers.Adam(learning_rate=0.01)
huber_loss = keras.losses.Huber()
action_probs_history = []
critic_value_history = []
rewards_history = []
running_reward = 0
episode_count = 0
while True: # Run until solved
state = env.reset()
episode_reward = 0
with tf.GradientTape() as tape:
for timestep in range(1, max_steps_per_episode):
# env.render(); Adding this line would show the attempts
# of the agent in a pop up window.
state = tf.convert_to_tensor(state)
state = tf.expand_dims(state, 0)
# Predict action probabilities and estimated future rewards
# from environment state
action_probs, critic_value = model(state)
critic_value_history.append(critic_value[0, 0])
# Sample action from action probability distribution
action = np.random.choice(num_actions, p=np.squeeze(action_probs))
action_probs_history.append(tf.math.log(action_probs[0, action]))
# Apply the sampled action in our environment
state, reward, done, _ = env.step(action)
rewards_history.append(reward)
episode_reward += reward
if done:
break
# Update running reward to check condition for solving
running_reward = 0.05 * episode_reward + (1 - 0.05) * running_reward
# Calculate expected value from rewards
# - At each timestep what was the total reward received after that timestep
# - Rewards in the past are discounted by multiplying them with gamma
# - These are the labels for our critic
returns = []
discounted_sum = 0
for r in rewards_history[::-1]:
discounted_sum = r + gamma * discounted_sum
returns.insert(0, discounted_sum)
# Normalize
returns = np.array(returns)
returns = (returns - np.mean(returns)) / (np.std(returns) + eps)
returns = returns.tolist()
# Calculating loss values to update our network
history = zip(action_probs_history, critic_value_history, returns)
actor_losses = []
critic_losses = []
for log_prob, value, ret in history:
# At this point in history, the critic estimated that we would get a
# total reward = `value` in the future. We took an action with log probability
# of `log_prob` and ended up recieving a total reward = `ret`.
# The actor must be updated so that it predicts an action that leads to
# high rewards (compared to critic's estimate) with high probability.
diff = ret - value
actor_losses.append(-log_prob * diff) # actor loss
# The critic must be updated so that it predicts a better estimate of
# the future rewards.
critic_losses.append(
huber_loss(tf.expand_dims(value, 0), tf.expand_dims(ret, 0))
)
# Backpropagation
loss_value = sum(actor_losses) + sum(critic_losses)
grads = tape.gradient(loss_value, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
# Clear the loss and reward history
action_probs_history.clear()
critic_value_history.clear()
rewards_history.clear()
# Log details
episode_count += 1
if episode_count % 10 == 0:
template = "running reward: {:.2f} at episode {}"
print(template.format(running_reward, episode_count))
if running_reward > 195: # Condition to consider the task solved
print("Solved at episode {}!".format(episode_count))
break
```
<div class="k-default-codeblock">
```
running reward: 8.82 at episode 10
running reward: 23.04 at episode 20
running reward: 28.41 at episode 30
running reward: 53.59 at episode 40
running reward: 53.71 at episode 50
running reward: 77.35 at episode 60
running reward: 74.76 at episode 70
running reward: 57.89 at episode 80
running reward: 46.59 at episode 90
running reward: 43.48 at episode 100
running reward: 63.77 at episode 110
running reward: 111.13 at episode 120
running reward: 142.77 at episode 130
running reward: 127.96 at episode 140
running reward: 113.92 at episode 150
running reward: 128.57 at episode 160
running reward: 139.95 at episode 170
running reward: 154.95 at episode 180
running reward: 171.45 at episode 190
running reward: 171.33 at episode 200
running reward: 177.74 at episode 210
running reward: 184.76 at episode 220
running reward: 190.88 at episode 230
running reward: 154.78 at episode 240
running reward: 114.38 at episode 250
running reward: 107.51 at episode 260
running reward: 128.99 at episode 270
running reward: 157.48 at episode 280
running reward: 174.54 at episode 290
running reward: 184.76 at episode 300
running reward: 190.87 at episode 310
running reward: 194.54 at episode 320
Solved at episode 322!
```
</div>
---
## Visualizations
In early stages of training:
![Imgur](https://i.imgur.com/5gCs5kH.gif)
In later stages of training:
![Imgur](https://i.imgur.com/5ziiZUD.gif)
| keras-io/examples/rl/md/actor_critic_cartpole.md/0 | {
"file_path": "keras-io/examples/rl/md/actor_critic_cartpole.md",
"repo_id": "keras-io",
"token_count": 2860
} | 109 |
<jupyter_start><jupyter_text>Timeseries classification with a Transformer model**Author:** [Theodoros Ntakouris](https://github.com/ntakouris)**Date created:** 2021/06/25**Last modified:** 2021/08/05**Description:** This notebook demonstrates how to do timeseries classification using a Transformer model. IntroductionThis is the Transformer architecture from[Attention Is All You Need](https://arxiv.org/abs/1706.03762),applied to timeseries instead of natural language.This example requires TensorFlow 2.4 or higher. Load the datasetWe are going to use the same dataset and preprocessing as the[TimeSeries Classification from Scratch](https://keras.io/examples/timeseries/timeseries_classification_from_scratch)example.<jupyter_code>import numpy as np
import keras
from keras import layers
def readucr(filename):
data = np.loadtxt(filename, delimiter="\t")
y = data[:, 0]
x = data[:, 1:]
return x, y.astype(int)
root_url = "https://raw.githubusercontent.com/hfawaz/cd-diagram/master/FordA/"
x_train, y_train = readucr(root_url + "FordA_TRAIN.tsv")
x_test, y_test = readucr(root_url + "FordA_TEST.tsv")
x_train = x_train.reshape((x_train.shape[0], x_train.shape[1], 1))
x_test = x_test.reshape((x_test.shape[0], x_test.shape[1], 1))
n_classes = len(np.unique(y_train))
idx = np.random.permutation(len(x_train))
x_train = x_train[idx]
y_train = y_train[idx]
y_train[y_train == -1] = 0
y_test[y_test == -1] = 0<jupyter_output><empty_output><jupyter_text>Build the modelOur model processes a tensor of shape `(batch size, sequence length, features)`,where `sequence length` is the number of time steps and `features` is each inputtimeseries.You can replace your classification RNN layers with this one: theinputs are fully compatible!We include residual connections, layer normalization, and dropout.The resulting layer can be stacked multiple times.The projection layers are implemented through `keras.layers.Conv1D`.<jupyter_code>def transformer_encoder(inputs, head_size, num_heads, ff_dim, dropout=0):
# Attention and Normalization
x = layers.MultiHeadAttention(
key_dim=head_size, num_heads=num_heads, dropout=dropout
)(inputs, inputs)
x = layers.Dropout(dropout)(x)
x = layers.LayerNormalization(epsilon=1e-6)(x)
res = x + inputs
# Feed Forward Part
x = layers.Conv1D(filters=ff_dim, kernel_size=1, activation="relu")(res)
x = layers.Dropout(dropout)(x)
x = layers.Conv1D(filters=inputs.shape[-1], kernel_size=1)(x)
x = layers.LayerNormalization(epsilon=1e-6)(x)
return x + res<jupyter_output><empty_output><jupyter_text>The main part of our model is now complete. We can stack multiple of those`transformer_encoder` blocks and we can also proceed to add the finalMulti-Layer Perceptron classification head. Apart from a stack of `Dense`layers, we need to reduce the output tensor of the `TransformerEncoder` part ofour model down to a vector of features for each data point in the currentbatch. A common way to achieve this is to use a pooling layer. Forthis example, a `GlobalAveragePooling1D` layer is sufficient.<jupyter_code>def build_model(
input_shape,
head_size,
num_heads,
ff_dim,
num_transformer_blocks,
mlp_units,
dropout=0,
mlp_dropout=0,
):
inputs = keras.Input(shape=input_shape)
x = inputs
for _ in range(num_transformer_blocks):
x = transformer_encoder(x, head_size, num_heads, ff_dim, dropout)
x = layers.GlobalAveragePooling1D(data_format="channels_last")(x)
for dim in mlp_units:
x = layers.Dense(dim, activation="relu")(x)
x = layers.Dropout(mlp_dropout)(x)
outputs = layers.Dense(n_classes, activation="softmax")(x)
return keras.Model(inputs, outputs)<jupyter_output><empty_output><jupyter_text>Train and evaluate<jupyter_code>input_shape = x_train.shape[1:]
model = build_model(
input_shape,
head_size=256,
num_heads=4,
ff_dim=4,
num_transformer_blocks=4,
mlp_units=[128],
mlp_dropout=0.4,
dropout=0.25,
)
model.compile(
loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.Adam(learning_rate=1e-4),
metrics=["sparse_categorical_accuracy"],
)
model.summary()
callbacks = [keras.callbacks.EarlyStopping(patience=10, restore_best_weights=True)]
model.fit(
x_train,
y_train,
validation_split=0.2,
epochs=150,
batch_size=64,
callbacks=callbacks,
)
model.evaluate(x_test, y_test, verbose=1)<jupyter_output><empty_output> | keras-io/examples/timeseries/ipynb/timeseries_classification_transformer.ipynb/0 | {
"file_path": "keras-io/examples/timeseries/ipynb/timeseries_classification_transformer.ipynb",
"repo_id": "keras-io",
"token_count": 1659
} | 110 |
"""
Title: Semi-supervision and domain adaptation with AdaMatch
Author: [Sayak Paul](https://twitter.com/RisingSayak)
Date created: 2021/06/19
Last modified: 2021/06/19
Description: Unifying semi-supervised learning and unsupervised domain adaptation with AdaMatch.
Accelerator: GPU
"""
"""
## Introduction
In this example, we will implement the AdaMatch algorithm, proposed in
[AdaMatch: A Unified Approach to Semi-Supervised Learning and Domain Adaptation](https://arxiv.org/abs/2106.04732)
by Berthelot et al. It sets a new state-of-the-art in unsupervised domain adaptation (as of
June 2021). AdaMatch is particularly interesting because it
unifies semi-supervised learning (SSL) and unsupervised domain adaptation
(UDA) under one framework. It thereby provides a way to perform semi-supervised domain
adaptation (SSDA).
This example requires TensorFlow 2.5 or higher, as well as TensorFlow Models, which can
be installed using the following command:
"""
"""shell
pip install -q tf-models-official==2.9.2
"""
"""
Before we proceed, let's review a few preliminary concepts underlying this example.
"""
"""
## Preliminaries
In **semi-supervised learning (SSL)**, we use a small amount of labeled data to
train models on a bigger unlabeled dataset. Popular semi-supervised learning methods
for computer vision include [FixMatch](https://arxiv.org/abs/2001.07685),
[MixMatch](https://arxiv.org/abs/1905.02249),
[Noisy Student Training](https://arxiv.org/abs/1911.04252), etc. You can refer to
[this example](https://keras.io/examples/vision/consistency_training/) to get an idea
of what a standard SSL workflow looks like.
In **unsupervised domain adaptation**, we have access to a source labeled dataset and
a target *unlabeled* dataset. Then the task is to learn a model that can generalize well
to the target dataset. The source and the target datasets vary in terms of distribution.
The following figure provides an illustration of this idea. In the present example, we use the
[MNIST dataset](http://yann.lecun.com/exdb/mnist/) as the source dataset, while the target dataset is
[SVHN](http://ufldl.stanford.edu/housenumbers/), which consists of images of house
numbers. Both datasets have various varying factors in terms of texture, viewpoint,
appearance, etc.: their domains, or distributions, are different from one
another.
![](https://i.imgur.com/dJFSJuT.png)
Popular domain adaptation algorithms in deep learning include
[Deep CORAL](https://arxiv.org/abs/1612.01939),
[Moment Matching](https://arxiv.org/abs/1812.01754), etc.
"""
"""
## Setup
"""
import tensorflow as tf
tf.random.set_seed(42)
import numpy as np
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import regularizers
from keras_cv.layers import RandAugment
import tensorflow_datasets as tfds
tfds.disable_progress_bar()
"""
## Prepare the data
"""
# MNIST
(
(mnist_x_train, mnist_y_train),
(mnist_x_test, mnist_y_test),
) = keras.datasets.mnist.load_data()
# Add a channel dimension
mnist_x_train = tf.expand_dims(mnist_x_train, -1)
mnist_x_test = tf.expand_dims(mnist_x_test, -1)
# Convert the labels to one-hot encoded vectors
mnist_y_train = tf.one_hot(mnist_y_train, 10).numpy()
# SVHN
svhn_train, svhn_test = tfds.load(
"svhn_cropped", split=["train", "test"], as_supervised=True
)
"""
## Define constants and hyperparameters
"""
RESIZE_TO = 32
SOURCE_BATCH_SIZE = 64
TARGET_BATCH_SIZE = 3 * SOURCE_BATCH_SIZE # Reference: Section 3.2
EPOCHS = 10
STEPS_PER_EPOCH = len(mnist_x_train) // SOURCE_BATCH_SIZE
TOTAL_STEPS = EPOCHS * STEPS_PER_EPOCH
AUTO = tf.data.AUTOTUNE
LEARNING_RATE = 0.03
WEIGHT_DECAY = 0.0005
INIT = "he_normal"
DEPTH = 28
WIDTH_MULT = 2
"""
## Data augmentation utilities
A standard element of SSL algorithms is to feed weakly and strongly augmented versions of
the same images to the learning model to make its predictions consistent. For strong
augmentation, [RandAugment](https://arxiv.org/abs/1909.13719) is a standard choice. For
weak augmentation, we will use horizontal flipping and random cropping.
"""
# Initialize `RandAugment` object with 2 layers of
# augmentation transforms and strength of 5.
augmenter = RandAugment(value_range=(0, 255), augmentations_per_image=2, magnitude=0.5)
def weak_augment(image, source=True):
if image.dtype != tf.float32:
image = tf.cast(image, tf.float32)
# MNIST images are grayscale, this is why we first convert them to
# RGB images.
if source:
image = tf.image.resize_with_pad(image, RESIZE_TO, RESIZE_TO)
image = tf.tile(image, [1, 1, 3])
image = tf.image.random_flip_left_right(image)
image = tf.image.random_crop(image, (RESIZE_TO, RESIZE_TO, 3))
return image
def strong_augment(image, source=True):
if image.dtype != tf.float32:
image = tf.cast(image, tf.float32)
if source:
image = tf.image.resize_with_pad(image, RESIZE_TO, RESIZE_TO)
image = tf.tile(image, [1, 1, 3])
image = augmenter(image)
return image
"""
## Data loading utilities
"""
def create_individual_ds(ds, aug_func, source=True):
if source:
batch_size = SOURCE_BATCH_SIZE
else:
# During training 3x more target unlabeled samples are shown
# to the model in AdaMatch (Section 3.2 of the paper).
batch_size = TARGET_BATCH_SIZE
ds = ds.shuffle(batch_size * 10, seed=42)
if source:
ds = ds.map(lambda x, y: (aug_func(x), y), num_parallel_calls=AUTO)
else:
ds = ds.map(lambda x, y: (aug_func(x, False), y), num_parallel_calls=AUTO)
ds = ds.batch(batch_size).prefetch(AUTO)
return ds
"""
`_w` and `_s` suffixes denote weak and strong respectively.
"""
source_ds = tf.data.Dataset.from_tensor_slices((mnist_x_train, mnist_y_train))
source_ds_w = create_individual_ds(source_ds, weak_augment)
source_ds_s = create_individual_ds(source_ds, strong_augment)
final_source_ds = tf.data.Dataset.zip((source_ds_w, source_ds_s))
target_ds_w = create_individual_ds(svhn_train, weak_augment, source=False)
target_ds_s = create_individual_ds(svhn_train, strong_augment, source=False)
final_target_ds = tf.data.Dataset.zip((target_ds_w, target_ds_s))
"""
Here's what a single image batch looks like:
![](https://i.imgur.com/aver8cG.png)
"""
"""
## Loss computation utilities
"""
def compute_loss_source(source_labels, logits_source_w, logits_source_s):
loss_func = keras.losses.CategoricalCrossentropy(from_logits=True)
# First compute the losses between original source labels and
# predictions made on the weakly and strongly augmented versions
# of the same images.
w_loss = loss_func(source_labels, logits_source_w)
s_loss = loss_func(source_labels, logits_source_s)
return w_loss + s_loss
def compute_loss_target(target_pseudo_labels_w, logits_target_s, mask):
loss_func = keras.losses.CategoricalCrossentropy(from_logits=True, reduction="none")
target_pseudo_labels_w = tf.stop_gradient(target_pseudo_labels_w)
# For calculating loss for the target samples, we treat the pseudo labels
# as the ground-truth. These are not considered during backpropagation
# which is a standard SSL practice.
target_loss = loss_func(target_pseudo_labels_w, logits_target_s)
# More on `mask` later.
mask = tf.cast(mask, target_loss.dtype)
target_loss *= mask
return tf.reduce_mean(target_loss, 0)
"""
## Subclassed model for AdaMatch training
The figure below presents the overall workflow of AdaMatch (taken from the
[original paper](https://arxiv.org/abs/2106.04732)):
![](https://i.imgur.com/1QsEm2M.png)
Here's a brief step-by-step breakdown of the workflow:
1. We first retrieve the weakly and strongly augmented pairs of images from the source and
target datasets.
2. We prepare two concatenated copies:
i. One where both pairs are concatenated.
ii. One where only the source data image pair is concatenated.
3. We run two forward passes through the model:
i. The first forward pass uses the concatenated copy obtained from **2.i**. In
this forward pass, the [Batch Normalization](https://arxiv.org/abs/1502.03167) statistics
are updated.
ii. In the second forward pass, we only use the concatenated copy obtained from **2.ii**.
Batch Normalization layers are run in inference mode.
4. The respective logits are computed for both the forward passes.
5. The logits go through a series of transformations, introduced in the paper (which
we will discuss shortly).
6. We compute the loss and update the gradients of the underlying model.
"""
class AdaMatch(keras.Model):
def __init__(self, model, total_steps, tau=0.9):
super().__init__()
self.model = model
self.tau = tau # Denotes the confidence threshold
self.loss_tracker = tf.keras.metrics.Mean(name="loss")
self.total_steps = total_steps
self.current_step = tf.Variable(0, dtype="int64")
@property
def metrics(self):
return [self.loss_tracker]
# This is a warmup schedule to update the weight of the
# loss contributed by the target unlabeled samples. More
# on this in the text.
def compute_mu(self):
pi = tf.constant(np.pi, dtype="float32")
step = tf.cast(self.current_step, dtype="float32")
return 0.5 - tf.cos(tf.math.minimum(pi, (2 * pi * step) / self.total_steps)) / 2
def train_step(self, data):
## Unpack and organize the data ##
source_ds, target_ds = data
(source_w, source_labels), (source_s, _) = source_ds
(
(target_w, _),
(target_s, _),
) = target_ds # Notice that we are NOT using any labels here.
combined_images = tf.concat([source_w, source_s, target_w, target_s], 0)
combined_source = tf.concat([source_w, source_s], 0)
total_source = tf.shape(combined_source)[0]
total_target = tf.shape(tf.concat([target_w, target_s], 0))[0]
with tf.GradientTape() as tape:
## Forward passes ##
combined_logits = self.model(combined_images, training=True)
z_d_prime_source = self.model(
combined_source, training=False
) # No BatchNorm update.
z_prime_source = combined_logits[:total_source]
## 1. Random logit interpolation for the source images ##
lambd = tf.random.uniform((total_source, 10), 0, 1)
final_source_logits = (lambd * z_prime_source) + (
(1 - lambd) * z_d_prime_source
)
## 2. Distribution alignment (only consider weakly augmented images) ##
# Compute softmax for logits of the WEAKLY augmented SOURCE images.
y_hat_source_w = tf.nn.softmax(final_source_logits[: tf.shape(source_w)[0]])
# Extract logits for the WEAKLY augmented TARGET images and compute softmax.
logits_target = combined_logits[total_source:]
logits_target_w = logits_target[: tf.shape(target_w)[0]]
y_hat_target_w = tf.nn.softmax(logits_target_w)
# Align the target label distribution to that of the source.
expectation_ratio = tf.reduce_mean(y_hat_source_w) / tf.reduce_mean(
y_hat_target_w
)
y_tilde_target_w = tf.math.l2_normalize(
y_hat_target_w * expectation_ratio, 1
)
## 3. Relative confidence thresholding ##
row_wise_max = tf.reduce_max(y_hat_source_w, axis=-1)
final_sum = tf.reduce_mean(row_wise_max, 0)
c_tau = self.tau * final_sum
mask = tf.reduce_max(y_tilde_target_w, axis=-1) >= c_tau
## Compute losses (pay attention to the indexing) ##
source_loss = compute_loss_source(
source_labels,
final_source_logits[: tf.shape(source_w)[0]],
final_source_logits[tf.shape(source_w)[0] :],
)
target_loss = compute_loss_target(
y_tilde_target_w, logits_target[tf.shape(target_w)[0] :], mask
)
t = self.compute_mu() # Compute weight for the target loss
total_loss = source_loss + (t * target_loss)
self.current_step.assign_add(
1
) # Update current training step for the scheduler
gradients = tape.gradient(total_loss, self.model.trainable_variables)
self.optimizer.apply_gradients(zip(gradients, self.model.trainable_variables))
self.loss_tracker.update_state(total_loss)
return {"loss": self.loss_tracker.result()}
"""
The authors introduce three improvements in the paper:
* In AdaMatch, we perform two forward passes, and only one of them is respsonsible for
updating the Batch Normalization statistics. This is done to account for distribution
shifts in the target dataset. In the other forward pass, we only use the source sample,
and the Batch Normalization layers are run in inference mode. Logits for the source
samples (weakly and strongly augmented versions) from these two passes are slightly
different from one another because of how Batch Normalization layers are run. Final
logits for the source samples are computed by linearly interpolating between these two
different pairs of logits. This induces a form of consistency regularization. This step
is referred to as **random logit interpolation**.
* **Distribution alignment** is used to align the source and target label distributions.
This further helps the underlying model learn *domain-invariant representations*. In case
of unsupervised domain adaptation, we don't have access to any labels of the target
dataset. This is why pseudo labels are generated from the underlying model.
* The underlying model generates pseudo-labels for the target samples. It's likely that
the model would make faulty predictions. Those can propagate back as we make progress in
the training, and hurt the overall performance. To compensate for that, we filter the
high-confidence predictions based on a threshold (hence the use of `mask` inside
`compute_loss_target()`). In AdaMatch, this threshold is relatively adjusted which is why
it is called **relative confidence thresholding**.
For more details on these methods and to know how each of them contribute please refer to
[the paper](https://arxiv.org/abs/2106.04732).
**About `compute_mu()`**:
Rather than using a fixed scalar quantity, a varying scalar is used in AdaMatch. It
denotes the weight of the loss contibuted by the target samples. Visually, the weight
scheduler look like so:
![](https://i.imgur.com/dG7i9uH.png)
This scheduler increases the weight of the target domain loss from 0 to 1 for the first
half of the training. Then it keeps that weight at 1 for the second half of the training.
"""
"""
## Instantiate a Wide-ResNet-28-2
The authors use a [WideResNet-28-2](https://arxiv.org/abs/1605.07146) for the dataset
pairs we are using in this example. Most of the following code has been referred from
[this script](https://github.com/asmith26/wide_resnets_keras/blob/master/main.py). Note
that the following model has a scaling layer inside it that scales the pixel values to
[0, 1].
"""
def wide_basic(x, n_input_plane, n_output_plane, stride):
conv_params = [[3, 3, stride, "same"], [3, 3, (1, 1), "same"]]
n_bottleneck_plane = n_output_plane
# Residual block
for i, v in enumerate(conv_params):
if i == 0:
if n_input_plane != n_output_plane:
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
convs = x
else:
convs = layers.BatchNormalization()(x)
convs = layers.Activation("relu")(convs)
convs = layers.Conv2D(
n_bottleneck_plane,
(v[0], v[1]),
strides=v[2],
padding=v[3],
kernel_initializer=INIT,
kernel_regularizer=regularizers.l2(WEIGHT_DECAY),
use_bias=False,
)(convs)
else:
convs = layers.BatchNormalization()(convs)
convs = layers.Activation("relu")(convs)
convs = layers.Conv2D(
n_bottleneck_plane,
(v[0], v[1]),
strides=v[2],
padding=v[3],
kernel_initializer=INIT,
kernel_regularizer=regularizers.l2(WEIGHT_DECAY),
use_bias=False,
)(convs)
# Shortcut connection: identity function or 1x1
# convolutional
# (depends on difference between input & output shape - this
# corresponds to whether we are using the first block in
# each
# group; see `block_series()`).
if n_input_plane != n_output_plane:
shortcut = layers.Conv2D(
n_output_plane,
(1, 1),
strides=stride,
padding="same",
kernel_initializer=INIT,
kernel_regularizer=regularizers.l2(WEIGHT_DECAY),
use_bias=False,
)(x)
else:
shortcut = x
return layers.Add()([convs, shortcut])
# Stacking residual units on the same stage
def block_series(x, n_input_plane, n_output_plane, count, stride):
x = wide_basic(x, n_input_plane, n_output_plane, stride)
for i in range(2, int(count + 1)):
x = wide_basic(x, n_output_plane, n_output_plane, stride=1)
return x
def get_network(image_size=32, num_classes=10):
n = (DEPTH - 4) / 6
n_stages = [16, 16 * WIDTH_MULT, 32 * WIDTH_MULT, 64 * WIDTH_MULT]
inputs = keras.Input(shape=(image_size, image_size, 3))
x = layers.Rescaling(scale=1.0 / 255)(inputs)
conv1 = layers.Conv2D(
n_stages[0],
(3, 3),
strides=1,
padding="same",
kernel_initializer=INIT,
kernel_regularizer=regularizers.l2(WEIGHT_DECAY),
use_bias=False,
)(x)
## Add wide residual blocks ##
conv2 = block_series(
conv1,
n_input_plane=n_stages[0],
n_output_plane=n_stages[1],
count=n,
stride=(1, 1),
) # Stage 1
conv3 = block_series(
conv2,
n_input_plane=n_stages[1],
n_output_plane=n_stages[2],
count=n,
stride=(2, 2),
) # Stage 2
conv4 = block_series(
conv3,
n_input_plane=n_stages[2],
n_output_plane=n_stages[3],
count=n,
stride=(2, 2),
) # Stage 3
batch_norm = layers.BatchNormalization()(conv4)
relu = layers.Activation("relu")(batch_norm)
# Classifier
trunk_outputs = layers.GlobalAveragePooling2D()(relu)
outputs = layers.Dense(
num_classes, kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
)(trunk_outputs)
return keras.Model(inputs, outputs)
"""
We can now instantiate a Wide ResNet model like so. Note that the purpose of using a
Wide ResNet here is to keep the implementation as close to the original one
as possible.
"""
wrn_model = get_network()
print(f"Model has {wrn_model.count_params()/1e6} Million parameters.")
"""
## Instantiate AdaMatch model and compile it
"""
reduce_lr = keras.optimizers.schedules.CosineDecay(LEARNING_RATE, TOTAL_STEPS, 0.25)
optimizer = keras.optimizers.Adam(reduce_lr)
adamatch_trainer = AdaMatch(model=wrn_model, total_steps=TOTAL_STEPS)
adamatch_trainer.compile(optimizer=optimizer)
"""
## Model training
"""
total_ds = tf.data.Dataset.zip((final_source_ds, final_target_ds))
adamatch_trainer.fit(total_ds, epochs=EPOCHS)
"""
## Evaluation on the target and source test sets
"""
# Compile the AdaMatch model to yield accuracy.
adamatch_trained_model = adamatch_trainer.model
adamatch_trained_model.compile(metrics=keras.metrics.SparseCategoricalAccuracy())
# Score on the target test set.
svhn_test = svhn_test.batch(TARGET_BATCH_SIZE).prefetch(AUTO)
_, accuracy = adamatch_trained_model.evaluate(svhn_test)
print(f"Accuracy on target test set: {accuracy * 100:.2f}%")
"""
With more training, this score improves. When this same network is trained with
standard classification objective, it yields an accuracy of **7.20%** which is
significantly lower than what we got with AdaMatch. You can check out
[this notebook](https://colab.research.google.com/github/sayakpaul/AdaMatch-TF/blob/main/Vanilla_WideResNet.ipynb)
to learn more about the hyperparameters and other experimental details.
"""
# Utility function for preprocessing the source test set.
def prepare_test_ds_source(image, label):
image = tf.image.resize_with_pad(image, RESIZE_TO, RESIZE_TO)
image = tf.tile(image, [1, 1, 3])
return image, label
source_test_ds = tf.data.Dataset.from_tensor_slices((mnist_x_test, mnist_y_test))
source_test_ds = (
source_test_ds.map(prepare_test_ds_source, num_parallel_calls=AUTO)
.batch(TARGET_BATCH_SIZE)
.prefetch(AUTO)
)
# Evaluation on the source test set.
_, accuracy = adamatch_trained_model.evaluate(source_test_ds)
print(f"Accuracy on source test set: {accuracy * 100:.2f}%")
"""
You can reproduce the results by using these
[model weights](https://github.com/sayakpaul/AdaMatch-TF/releases/tag/v1.0.0).
"""
"""
**Example available on HuggingFace**
| Trained Model | Demo |
| :--: | :--: |
| [![Generic badge](https://img.shields.io/badge/%F0%9F%A4%97%20Model-AdaMatch%20Domain%20Adaption-black.svg)](https://huggingface.co/keras-io/adamatch-domain-adaption) | [![Generic badge](https://img.shields.io/badge/%F0%9F%A4%97%20Spaces-AdaMatch%20Domain%20Adaption-black.svg)](https://huggingface.co/spaces/keras-io/adamatch-domain-adaption) |
"""
| keras-io/examples/vision/adamatch.py/0 | {
"file_path": "keras-io/examples/vision/adamatch.py",
"repo_id": "keras-io",
"token_count": 8380
} | 111 |
"""
Title: Image classification with EANet (External Attention Transformer)
Author: [ZhiYong Chang](https://github.com/czy00000)
Date created: 2021/10/19
Last modified: 2023/07/18
Description: Image classification with a Transformer that leverages external attention.
Accelerator: GPU
Converted to Keras 3: [Muhammad Anas Raza](https://anasrz.com)
"""
"""
## Introduction
This example implements the [EANet](https://arxiv.org/abs/2105.02358)
model for image classification, and demonstrates it on the CIFAR-100 dataset.
EANet introduces a novel attention mechanism
named ***external attention***, based on two external, small, learnable, and
shared memories, which can be implemented easily by simply using two cascaded
linear layers and two normalization layers. It conveniently replaces self-attention
as used in existing architectures. External attention has linear complexity, as it only
implicitly considers the correlations between all samples.
"""
"""
## Setup
"""
import keras
from keras import layers
from keras import ops
import matplotlib.pyplot as plt
"""
## Prepare the data
"""
num_classes = 100
input_shape = (32, 32, 3)
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar100.load_data()
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
print(f"x_train shape: {x_train.shape} - y_train shape: {y_train.shape}")
print(f"x_test shape: {x_test.shape} - y_test shape: {y_test.shape}")
"""
## Configure the hyperparameters
"""
weight_decay = 0.0001
learning_rate = 0.001
label_smoothing = 0.1
validation_split = 0.2
batch_size = 128
num_epochs = 50
patch_size = 2 # Size of the patches to be extracted from the input images.
num_patches = (input_shape[0] // patch_size) ** 2 # Number of patch
embedding_dim = 64 # Number of hidden units.
mlp_dim = 64
dim_coefficient = 4
num_heads = 4
attention_dropout = 0.2
projection_dropout = 0.2
num_transformer_blocks = 8 # Number of repetitions of the transformer layer
print(f"Patch size: {patch_size} X {patch_size} = {patch_size ** 2} ")
print(f"Patches per image: {num_patches}")
"""
## Use data augmentation
"""
data_augmentation = keras.Sequential(
[
layers.Normalization(),
layers.RandomFlip("horizontal"),
layers.RandomRotation(factor=0.1),
layers.RandomContrast(factor=0.1),
layers.RandomZoom(height_factor=0.2, width_factor=0.2),
],
name="data_augmentation",
)
# Compute the mean and the variance of the training data for normalization.
data_augmentation.layers[0].adapt(x_train)
"""
## Implement the patch extraction and encoding layer
"""
class PatchExtract(layers.Layer):
def __init__(self, patch_size, **kwargs):
super().__init__(**kwargs)
self.patch_size = patch_size
def call(self, x):
B, C = ops.shape(x)[0], ops.shape(x)[-1]
x = ops.image.extract_patches(x, self.patch_size)
x = ops.reshape(x, (B, -1, self.patch_size * self.patch_size * C))
return x
class PatchEmbedding(layers.Layer):
def __init__(self, num_patch, embed_dim, **kwargs):
super().__init__(**kwargs)
self.num_patch = num_patch
self.proj = layers.Dense(embed_dim)
self.pos_embed = layers.Embedding(input_dim=num_patch, output_dim=embed_dim)
def call(self, patch):
pos = ops.arange(start=0, stop=self.num_patch, step=1)
return self.proj(patch) + self.pos_embed(pos)
"""
## Implement the external attention block
"""
def external_attention(
x,
dim,
num_heads,
dim_coefficient=4,
attention_dropout=0,
projection_dropout=0,
):
_, num_patch, channel = x.shape
assert dim % num_heads == 0
num_heads = num_heads * dim_coefficient
x = layers.Dense(dim * dim_coefficient)(x)
# create tensor [batch_size, num_patches, num_heads, dim*dim_coefficient//num_heads]
x = ops.reshape(x, (-1, num_patch, num_heads, dim * dim_coefficient // num_heads))
x = ops.transpose(x, axes=[0, 2, 1, 3])
# a linear layer M_k
attn = layers.Dense(dim // dim_coefficient)(x)
# normalize attention map
attn = layers.Softmax(axis=2)(attn)
# dobule-normalization
attn = layers.Lambda(
lambda attn: ops.divide(
attn,
ops.convert_to_tensor(1e-9) + ops.sum(attn, axis=-1, keepdims=True),
)
)(attn)
attn = layers.Dropout(attention_dropout)(attn)
# a linear layer M_v
x = layers.Dense(dim * dim_coefficient // num_heads)(attn)
x = ops.transpose(x, axes=[0, 2, 1, 3])
x = ops.reshape(x, [-1, num_patch, dim * dim_coefficient])
# a linear layer to project original dim
x = layers.Dense(dim)(x)
x = layers.Dropout(projection_dropout)(x)
return x
"""
## Implement the MLP block
"""
def mlp(x, embedding_dim, mlp_dim, drop_rate=0.2):
x = layers.Dense(mlp_dim, activation=ops.gelu)(x)
x = layers.Dropout(drop_rate)(x)
x = layers.Dense(embedding_dim)(x)
x = layers.Dropout(drop_rate)(x)
return x
"""
## Implement the Transformer block
"""
def transformer_encoder(
x,
embedding_dim,
mlp_dim,
num_heads,
dim_coefficient,
attention_dropout,
projection_dropout,
attention_type="external_attention",
):
residual_1 = x
x = layers.LayerNormalization(epsilon=1e-5)(x)
if attention_type == "external_attention":
x = external_attention(
x,
embedding_dim,
num_heads,
dim_coefficient,
attention_dropout,
projection_dropout,
)
elif attention_type == "self_attention":
x = layers.MultiHeadAttention(
num_heads=num_heads,
key_dim=embedding_dim,
dropout=attention_dropout,
)(x, x)
x = layers.add([x, residual_1])
residual_2 = x
x = layers.LayerNormalization(epsilon=1e-5)(x)
x = mlp(x, embedding_dim, mlp_dim)
x = layers.add([x, residual_2])
return x
"""
## Implement the EANet model
"""
"""
The EANet model leverages external attention.
The computational complexity of traditional self attention is `O(d * N ** 2)`,
where `d` is the embedding size, and `N` is the number of patch.
the authors find that most pixels are closely related to just a few other
pixels, and an `N`-to-`N` attention matrix may be redundant.
So, they propose as an alternative an external
attention module where the computational complexity of external attention is `O(d * S * N)`.
As `d` and `S` are hyper-parameters,
the proposed algorithm is linear in the number of pixels. In fact, this is equivalent
to a drop patch operation, because a lot of information contained in a patch
in an image is redundant and unimportant.
"""
def get_model(attention_type="external_attention"):
inputs = layers.Input(shape=input_shape)
# Image augment
x = data_augmentation(inputs)
# Extract patches.
x = PatchExtract(patch_size)(x)
# Create patch embedding.
x = PatchEmbedding(num_patches, embedding_dim)(x)
# Create Transformer block.
for _ in range(num_transformer_blocks):
x = transformer_encoder(
x,
embedding_dim,
mlp_dim,
num_heads,
dim_coefficient,
attention_dropout,
projection_dropout,
attention_type,
)
x = layers.GlobalAveragePooling1D()(x)
outputs = layers.Dense(num_classes, activation="softmax")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
return model
"""
## Train on CIFAR-100
"""
model = get_model(attention_type="external_attention")
model.compile(
loss=keras.losses.CategoricalCrossentropy(label_smoothing=label_smoothing),
optimizer=keras.optimizers.AdamW(
learning_rate=learning_rate, weight_decay=weight_decay
),
metrics=[
keras.metrics.CategoricalAccuracy(name="accuracy"),
keras.metrics.TopKCategoricalAccuracy(5, name="top-5-accuracy"),
],
)
history = model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=num_epochs,
validation_split=validation_split,
)
"""
### Let's visualize the training progress of the model.
"""
plt.plot(history.history["loss"], label="train_loss")
plt.plot(history.history["val_loss"], label="val_loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.title("Train and Validation Losses Over Epochs", fontsize=14)
plt.legend()
plt.grid()
plt.show()
"""
### Let's display the final results of the test on CIFAR-100.
"""
loss, accuracy, top_5_accuracy = model.evaluate(x_test, y_test)
print(f"Test loss: {round(loss, 2)}")
print(f"Test accuracy: {round(accuracy * 100, 2)}%")
print(f"Test top 5 accuracy: {round(top_5_accuracy * 100, 2)}%")
"""
EANet just replaces self attention in Vit with external attention.
The traditional Vit achieved a ~73% test top-5 accuracy and ~41 top-1 accuracy after
training 50 epochs, but with 0.6M parameters. Under the same experimental environment
and the same hyperparameters, The EANet model we just trained has just 0.3M parameters,
and it gets us to ~73% test top-5 accuracy and ~43% top-1 accuracy. This fully demonstrates the
effectiveness of external attention.
We only show the training
process of EANet, you can train Vit under the same experimental conditions and observe
the test results.
"""
| keras-io/examples/vision/eanet.py/0 | {
"file_path": "keras-io/examples/vision/eanet.py",
"repo_id": "keras-io",
"token_count": 3597
} | 112 |
<jupyter_start><jupyter_text>Image classification with ConvMixer**Author:** [Sayak Paul](https://twitter.com/RisingSayak)**Date created:** 2021/10/12**Last modified:** 2021/10/12**Description:** An all-convolutional network applied to patches of images. IntroductionVision Transformers (ViT; [Dosovitskiy et al.](https://arxiv.org/abs/1612.00593)) extractsmall patches from the input images, linearly project them, and then apply theTransformer ([Vaswani et al.](https://arxiv.org/abs/1706.03762)) blocks. The applicationof ViTs to image recognition tasks is quickly becoming a promising area of research,because ViTs eliminate the need to have strong inductive biases (such as convolutions) formodeling locality. This presents them as a general computation primititive capable oflearning just from the training data with as minimal inductive priors as possible. ViTsyield great downstream performance when trained with proper regularization, dataaugmentation, and relatively large datasets.In the [Patches Are All You Need](https://openreview.net/pdf?id=TVHS5Y4dNvM) paper (note:atthe time of writing, it is a submission to the ICLR 2022 conference), the authors extendthe idea of using patches to train an all-convolutional network and demonstratecompetitive results. Their architecture namely **ConvMixer** uses recipes from the recentisotrophic architectures like ViT, MLP-Mixer([Tolstikhin et al.](https://arxiv.org/abs/2105.01601)), such as using the samedepth and resolution across different layers in the network, residual connections,and so on.In this example, we will implement the ConvMixer model and demonstrate its performance onthe CIFAR-10 dataset. Imports<jupyter_code>import keras
from keras import layers
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np<jupyter_output><empty_output><jupyter_text>HyperparametersTo keep run time short, we will train the model for only 10 epochs. To focus onthe core ideas of ConvMixer, we will not use other training-specific elements likeRandAugment ([Cubuk et al.](https://arxiv.org/abs/1909.13719)). If you are interested inlearning more about those details, please refer to the[original paper](https://openreview.net/pdf?id=TVHS5Y4dNvM).<jupyter_code>learning_rate = 0.001
weight_decay = 0.0001
batch_size = 128
num_epochs = 10<jupyter_output><empty_output><jupyter_text>Load the CIFAR-10 dataset<jupyter_code>(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
val_split = 0.1
val_indices = int(len(x_train) * val_split)
new_x_train, new_y_train = x_train[val_indices:], y_train[val_indices:]
x_val, y_val = x_train[:val_indices], y_train[:val_indices]
print(f"Training data samples: {len(new_x_train)}")
print(f"Validation data samples: {len(x_val)}")
print(f"Test data samples: {len(x_test)}")<jupyter_output><empty_output><jupyter_text>Prepare `tf.data.Dataset` objectsOur data augmentation pipeline is different from what the authors used for the CIFAR-10dataset, which is fine for the purpose of the example.Note that, it's ok to use **TF APIs for data I/O and preprocessing** with other backends(jax, torch) as it is feature-complete framework when it comes to data preprocessing.<jupyter_code>image_size = 32
auto = tf.data.AUTOTUNE
augmentation_layers = [
keras.layers.RandomCrop(image_size, image_size),
keras.layers.RandomFlip("horizontal"),
]
def augment_images(images):
for layer in augmentation_layers:
images = layer(images, training=True)
return images
def make_datasets(images, labels, is_train=False):
dataset = tf.data.Dataset.from_tensor_slices((images, labels))
if is_train:
dataset = dataset.shuffle(batch_size * 10)
dataset = dataset.batch(batch_size)
if is_train:
dataset = dataset.map(
lambda x, y: (augment_images(x), y), num_parallel_calls=auto
)
return dataset.prefetch(auto)
train_dataset = make_datasets(new_x_train, new_y_train, is_train=True)
val_dataset = make_datasets(x_val, y_val)
test_dataset = make_datasets(x_test, y_test)<jupyter_output><empty_output><jupyter_text>ConvMixer utilitiesThe following figure (taken from the original paper) depicts the ConvMixer model:ConvMixer is very similar to the MLP-Mixer, model with the following keydifferences:* Instead of using fully-connected layers, it uses standard convolution layers.* Instead of LayerNorm (which is typical for ViTs and MLP-Mixers), it uses BatchNorm.Two types of convolution layers are used in ConvMixer. **(1)**: Depthwise convolutions,for mixing spatial locations of the images, **(2)**: Pointwise convolutions (which followthe depthwise convolutions), for mixing channel-wise information across the patches.Another keypoint is the use of *larger kernel sizes* to allow a larger receptive field.<jupyter_code>def activation_block(x):
x = layers.Activation("gelu")(x)
return layers.BatchNormalization()(x)
def conv_stem(x, filters: int, patch_size: int):
x = layers.Conv2D(filters, kernel_size=patch_size, strides=patch_size)(x)
return activation_block(x)
def conv_mixer_block(x, filters: int, kernel_size: int):
# Depthwise convolution.
x0 = x
x = layers.DepthwiseConv2D(kernel_size=kernel_size, padding="same")(x)
x = layers.Add()([activation_block(x), x0]) # Residual.
# Pointwise convolution.
x = layers.Conv2D(filters, kernel_size=1)(x)
x = activation_block(x)
return x
def get_conv_mixer_256_8(
image_size=32, filters=256, depth=8, kernel_size=5, patch_size=2, num_classes=10
):
"""ConvMixer-256/8: https://openreview.net/pdf?id=TVHS5Y4dNvM.
The hyperparameter values are taken from the paper.
"""
inputs = keras.Input((image_size, image_size, 3))
x = layers.Rescaling(scale=1.0 / 255)(inputs)
# Extract patch embeddings.
x = conv_stem(x, filters, patch_size)
# ConvMixer blocks.
for _ in range(depth):
x = conv_mixer_block(x, filters, kernel_size)
# Classification block.
x = layers.GlobalAvgPool2D()(x)
outputs = layers.Dense(num_classes, activation="softmax")(x)
return keras.Model(inputs, outputs)<jupyter_output><empty_output><jupyter_text>The model used in this experiment is termed as **ConvMixer-256/8** where 256 denotes thenumber of channels and 8 denotes the depth. The resulting model only has 0.8 millionparameters. Model training and evaluation utility<jupyter_code># Code reference:
# https://keras.io/examples/vision/image_classification_with_vision_transformer/.
def run_experiment(model):
optimizer = keras.optimizers.AdamW(
learning_rate=learning_rate, weight_decay=weight_decay
)
model.compile(
optimizer=optimizer,
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
checkpoint_filepath = "/tmp/checkpoint.keras"
checkpoint_callback = keras.callbacks.ModelCheckpoint(
checkpoint_filepath,
monitor="val_accuracy",
save_best_only=True,
save_weights_only=False,
)
history = model.fit(
train_dataset,
validation_data=val_dataset,
epochs=num_epochs,
callbacks=[checkpoint_callback],
)
model.load_weights(checkpoint_filepath)
_, accuracy = model.evaluate(test_dataset)
print(f"Test accuracy: {round(accuracy * 100, 2)}%")
return history, model<jupyter_output><empty_output><jupyter_text>Train and evaluate model<jupyter_code>conv_mixer_model = get_conv_mixer_256_8()
history, conv_mixer_model = run_experiment(conv_mixer_model)<jupyter_output><empty_output><jupyter_text>The gap in training and validation performance can be mitigated by using additionalregularization techniques. Nevertheless, being able to get to ~83% accuracy within 10epochs with 0.8 million parameters is a strong result. Visualizing the internals of ConvMixerWe can visualize the patch embeddings and the learned convolution filters. Recallthat each patch embedding and intermediate feature map have the same number of channels(256 in this case). This will make our visualization utility easier to implement.<jupyter_code># Code reference: https://bit.ly/3awIRbP.
def visualization_plot(weights, idx=1):
# First, apply min-max normalization to the
# given weights to avoid isotrophic scaling.
p_min, p_max = weights.min(), weights.max()
weights = (weights - p_min) / (p_max - p_min)
# Visualize all the filters.
num_filters = 256
plt.figure(figsize=(8, 8))
for i in range(num_filters):
current_weight = weights[:, :, :, i]
if current_weight.shape[-1] == 1:
current_weight = current_weight.squeeze()
ax = plt.subplot(16, 16, idx)
ax.set_xticks([])
ax.set_yticks([])
plt.imshow(current_weight)
idx += 1
# We first visualize the learned patch embeddings.
patch_embeddings = conv_mixer_model.layers[2].get_weights()[0]
visualization_plot(patch_embeddings)<jupyter_output><empty_output><jupyter_text>Even though we did not train the network to convergence, we can notice that differentpatches show different patterns. Some share similarity with others while some are verydifferent. These visualizations are more salient with larger image sizes.Similarly, we can visualize the raw convolution kernels. This can help us understandthe patterns to which a given kernel is receptive.<jupyter_code># First, print the indices of the convolution layers that are not
# pointwise convolutions.
for i, layer in enumerate(conv_mixer_model.layers):
if isinstance(layer, layers.DepthwiseConv2D):
if layer.get_config()["kernel_size"] == (5, 5):
print(i, layer)
idx = 26 # Taking a kernel from the middle of the network.
kernel = conv_mixer_model.layers[idx].get_weights()[0]
kernel = np.expand_dims(kernel.squeeze(), axis=2)
visualization_plot(kernel)<jupyter_output><empty_output> | keras-io/examples/vision/ipynb/convmixer.ipynb/0 | {
"file_path": "keras-io/examples/vision/ipynb/convmixer.ipynb",
"repo_id": "keras-io",
"token_count": 3362
} | 113 |
<jupyter_start><jupyter_text>Image classification from scratch**Author:** [fchollet](https://twitter.com/fchollet)**Date created:** 2020/04/27**Last modified:** 2023/11/09**Description:** Training an image classifier from scratch on the Kaggle Cats vs Dogs dataset. IntroductionThis example shows how to do image classification from scratch, starting from JPEGimage files on disk, without leveraging pre-trained weights or a pre-made KerasApplication model. We demonstrate the workflow on the Kaggle Cats vs Dogs binaryclassification dataset.We use the `image_dataset_from_directory` utility to generate the datasets, andwe use Keras image preprocessing layers for image standardization and data augmentation. Setup<jupyter_code>import os
import numpy as np
import keras
from keras import layers
from tensorflow import data as tf_data
import matplotlib.pyplot as plt<jupyter_output><empty_output><jupyter_text>Load the data: the Cats vs Dogs dataset Raw data downloadFirst, let's download the 786M ZIP archive of the raw data:<jupyter_code>!curl -O https://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_5340.zip
!unzip -q kagglecatsanddogs_5340.zip
!ls<jupyter_output><empty_output><jupyter_text>Now we have a `PetImages` folder which contain two subfolders, `Cat` and `Dog`. Eachsubfolder contains image files for each category.<jupyter_code>!ls PetImages<jupyter_output><empty_output><jupyter_text>Filter out corrupted imagesWhen working with lots of real-world image data, corrupted images are a commonoccurence. Let's filter out badly-encoded images that do not feature the string "JFIF"in their header.<jupyter_code>num_skipped = 0
for folder_name in ("Cat", "Dog"):
folder_path = os.path.join("PetImages", folder_name)
for fname in os.listdir(folder_path):
fpath = os.path.join(folder_path, fname)
try:
fobj = open(fpath, "rb")
is_jfif = b"JFIF" in fobj.peek(10)
finally:
fobj.close()
if not is_jfif:
num_skipped += 1
# Delete corrupted image
os.remove(fpath)
print(f"Deleted {num_skipped} images.")<jupyter_output><empty_output><jupyter_text>Generate a `Dataset`<jupyter_code>image_size = (180, 180)
batch_size = 128
train_ds, val_ds = keras.utils.image_dataset_from_directory(
"PetImages",
validation_split=0.2,
subset="both",
seed=1337,
image_size=image_size,
batch_size=batch_size,
)<jupyter_output><empty_output><jupyter_text>Visualize the dataHere are the first 9 images in the training dataset.<jupyter_code>plt.figure(figsize=(10, 10))
for images, labels in train_ds.take(1):
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(np.array(images[i]).astype("uint8"))
plt.title(int(labels[i]))
plt.axis("off")<jupyter_output><empty_output><jupyter_text>Using image data augmentationWhen you don't have a large image dataset, it's a good practice to artificiallyintroduce sample diversity by applying random yet realistic transformations to thetraining images, such as random horizontal flipping or small random rotations. Thishelps expose the model to different aspects of the training data while slowing downoverfitting.<jupyter_code>data_augmentation_layers = [
layers.RandomFlip("horizontal"),
layers.RandomRotation(0.1),
]
def data_augmentation(images):
for layer in data_augmentation_layers:
images = layer(images)
return images<jupyter_output><empty_output><jupyter_text>Let's visualize what the augmented samples look like, by applying `data_augmentation`repeatedly to the first few images in the dataset:<jupyter_code>plt.figure(figsize=(10, 10))
for images, _ in train_ds.take(1):
for i in range(9):
augmented_images = data_augmentation(images)
ax = plt.subplot(3, 3, i + 1)
plt.imshow(np.array(augmented_images[0]).astype("uint8"))
plt.axis("off")<jupyter_output><empty_output><jupyter_text>Standardizing the dataOur image are already in a standard size (180x180), as they are being yielded ascontiguous `float32` batches by our dataset. However, their RGB channel values are inthe `[0, 255]` range. This is not ideal for a neural network;in general you should seek to make your input values small. Here, we willstandardize values to be in the `[0, 1]` by using a `Rescaling` layer at the start ofour model. Two options to preprocess the dataThere are two ways you could be using the `data_augmentation` preprocessor:**Option 1: Make it part of the model**, like this:```pythoninputs = keras.Input(shape=input_shape)x = data_augmentation(inputs)x = layers.Rescaling(1./255)(x)... Rest of the model```With this option, your data augmentation will happen *on device*, synchronouslywith the rest of the model execution, meaning that it will benefit from GPUacceleration.Note that data augmentation is inactive at test time, so the input samples will only beaugmented during `fit()`, not when calling `evaluate()` or `predict()`.If you're training on GPU, this may be a good option.**Option 2: apply it to the dataset**, so as to obtain a dataset that yields batches ofaugmented images, like this:```pythonaugmented_train_ds = train_ds.map( lambda x, y: (data_augmentation(x, training=True), y))```With this option, your data augmentation will happen **on CPU**, asynchronously, and willbe buffered before going into the model.If you're training on CPU, this is the better option, since it makes data augmentationasynchronous and non-blocking.In our case, we'll go with the second option. If you're not surewhich one to pick, this second option (asynchronous preprocessing) is always a solid choice. Configure the dataset for performanceLet's apply data augmentation to our training dataset,and let's make sure to use buffered prefetching so we can yield data from disk withouthaving I/O becoming blocking:<jupyter_code># Apply `data_augmentation` to the training images.
train_ds = train_ds.map(
lambda img, label: (data_augmentation(img), label),
num_parallel_calls=tf_data.AUTOTUNE,
)
# Prefetching samples in GPU memory helps maximize GPU utilization.
train_ds = train_ds.prefetch(tf_data.AUTOTUNE)
val_ds = val_ds.prefetch(tf_data.AUTOTUNE)<jupyter_output><empty_output><jupyter_text>Build a modelWe'll build a small version of the Xception network. We haven't particularly tried tooptimize the architecture; if you want to do a systematic search for the best modelconfiguration, consider using[KerasTuner](https://github.com/keras-team/keras-tuner).Note that:- We start the model with the `data_augmentation` preprocessor, followed by a `Rescaling` layer.- We include a `Dropout` layer before the final classification layer.<jupyter_code>def make_model(input_shape, num_classes):
inputs = keras.Input(shape=input_shape)
# Entry block
x = layers.Rescaling(1.0 / 255)(inputs)
x = layers.Conv2D(128, 3, strides=2, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
previous_block_activation = x # Set aside residual
for size in [256, 512, 728]:
x = layers.Activation("relu")(x)
x = layers.SeparableConv2D(size, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
x = layers.SeparableConv2D(size, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.MaxPooling2D(3, strides=2, padding="same")(x)
# Project residual
residual = layers.Conv2D(size, 1, strides=2, padding="same")(
previous_block_activation
)
x = layers.add([x, residual]) # Add back residual
previous_block_activation = x # Set aside next residual
x = layers.SeparableConv2D(1024, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
x = layers.GlobalAveragePooling2D()(x)
if num_classes == 2:
units = 1
else:
units = num_classes
x = layers.Dropout(0.25)(x)
# We specify activation=None so as to return logits
outputs = layers.Dense(units, activation=None)(x)
return keras.Model(inputs, outputs)
model = make_model(input_shape=image_size + (3,), num_classes=2)
keras.utils.plot_model(model, show_shapes=True)<jupyter_output><empty_output><jupyter_text>Train the model<jupyter_code>epochs = 25
callbacks = [
keras.callbacks.ModelCheckpoint("save_at_{epoch}.keras"),
]
model.compile(
optimizer=keras.optimizers.Adam(3e-4),
loss=keras.losses.BinaryCrossentropy(from_logits=True),
metrics=[keras.metrics.BinaryAccuracy(name="acc")],
)
model.fit(
train_ds,
epochs=epochs,
callbacks=callbacks,
validation_data=val_ds,
)<jupyter_output><empty_output><jupyter_text>We get to >90% validation accuracy after training for 25 epochs on the full dataset(in practice, you can train for 50+ epochs before validation performance starts degrading). Run inference on new dataNote that data augmentation and dropout are inactive at inference time.<jupyter_code>img = keras.utils.load_img("PetImages/Cat/6779.jpg", target_size=image_size)
plt.imshow(img)
img_array = keras.utils.img_to_array(img)
img_array = keras.ops.expand_dims(img_array, 0) # Create batch axis
predictions = model.predict(img_array)
score = float(keras.ops.sigmoid(predictions[0][0]))
print(f"This image is {100 * (1 - score):.2f}% cat and {100 * score:.2f}% dog.")<jupyter_output><empty_output> | keras-io/examples/vision/ipynb/image_classification_from_scratch.ipynb/0 | {
"file_path": "keras-io/examples/vision/ipynb/image_classification_from_scratch.ipynb",
"repo_id": "keras-io",
"token_count": 3308
} | 114 |
<jupyter_start><jupyter_text>Near-duplicate image search**Author:** [Sayak Paul](https://twitter.com/RisingSayak)**Date created:** 2021/09/10**Last modified:** 2023/08/30**Description:** Building a near-duplicate image search utility using deep learning and locality-sensitive hashing. IntroductionFetching similar images in (near) real time is an important use case of informationretrieval systems. Some popular products utilizing it include Pinterest, Google ImageSearch, etc. In this example, we will build a similar image search utility using[Locality Sensitive Hashing](https://towardsdatascience.com/understanding-locality-sensitive-hashing-49f6d1f6134)(LSH) and [random projection](https://en.wikipedia.org/wiki/Random_projection) on topof the image representations computed by a pretrained image classifier.This kind of search engine is also knownas a _near-duplicate (or near-dup) image detector_.We will also look into optimizing the inference performance ofour search utility on GPU using [TensorRT](https://developer.nvidia.com/tensorrt).There are other examples under [keras.io/examples/vision](https://keras.io/examples/vision)that are worth checking out in this regard:* [Metric learning for image similarity search](https://keras.io/examples/vision/metric_learning)* [Image similarity estimation using a Siamese Network with a triplet loss](https://keras.io/examples/vision/siamese_network)Finally, this example uses the following resource as a reference and as such reuses someof its code:[Locality Sensitive Hashing for Similar Item Search](https://towardsdatascience.com/locality-sensitive-hashing-for-music-search-f2f1940ace23)._Note that in order to optimize the performance of our parser,you should have a GPU runtime available._ Setup<jupyter_code>!pip install tensorrt<jupyter_output><empty_output><jupyter_text>Imports<jupyter_code>import matplotlib.pyplot as plt
import tensorflow as tf
import tensorrt
import numpy as np
import time
import tensorflow_datasets as tfds
tfds.disable_progress_bar()<jupyter_output><empty_output><jupyter_text>Load the dataset and create a training set of 1,000 imagesTo keep the run time of the example short, we will be using a subset of 1,000 images fromthe `tf_flowers` dataset (available through[TensorFlow Datasets](https://www.tensorflow.org/datasets/catalog/tf_flowers))to build our vocabulary.<jupyter_code>train_ds, validation_ds = tfds.load(
"tf_flowers", split=["train[:85%]", "train[85%:]"], as_supervised=True
)
IMAGE_SIZE = 224
NUM_IMAGES = 1000
images = []
labels = []
for (image, label) in train_ds.take(NUM_IMAGES):
image = tf.image.resize(image, (IMAGE_SIZE, IMAGE_SIZE))
images.append(image.numpy())
labels.append(label.numpy())
images = np.array(images)
labels = np.array(labels)<jupyter_output><empty_output><jupyter_text>Load a pre-trained model In this section, we load an image classification model that was trained on the`tf_flowers` dataset. 85% of the total images were used to build the training set. Formore details on the training, refer to[this notebook](https://github.com/sayakpaul/near-dup-parser/blob/main/bit-supervised-training.ipynb).The underlying model is a BiT-ResNet (proposed in[Big Transfer (BiT): General Visual Representation Learning](https://arxiv.org/abs/1912.11370)).The BiT-ResNet family of models is known to provide excellent transfer performance acrossa wide variety of different downstream tasks.<jupyter_code>!wget -q https://github.com/sayakpaul/near-dup-parser/releases/download/v0.1.0/flower_model_bit_0.96875.zip
!unzip -qq flower_model_bit_0.96875.zip
bit_model = tf.keras.models.load_model("flower_model_bit_0.96875")
bit_model.count_params()<jupyter_output><empty_output><jupyter_text>Create an embedding modelTo retrieve similar images given a query image, we need to first generate vectorrepresentations of all the images involved. We do this via anembedding model that extracts output features from our pretrained classifier andnormalizes the resulting feature vectors.<jupyter_code>embedding_model = tf.keras.Sequential(
[
tf.keras.layers.Input((IMAGE_SIZE, IMAGE_SIZE, 3)),
tf.keras.layers.Rescaling(scale=1.0 / 255),
bit_model.layers[1],
tf.keras.layers.Normalization(mean=0, variance=1),
],
name="embedding_model",
)
embedding_model.summary()<jupyter_output><empty_output><jupyter_text>Take note of the normalization layer inside the model. It is used to project therepresentation vectors to the space of unit-spheres. Hashing utilities<jupyter_code>def hash_func(embedding, random_vectors):
embedding = np.array(embedding)
# Random projection.
bools = np.dot(embedding, random_vectors) > 0
return [bool2int(bool_vec) for bool_vec in bools]
def bool2int(x):
y = 0
for i, j in enumerate(x):
if j:
y += 1 << i
return y<jupyter_output><empty_output><jupyter_text>The shape of the vectors coming out of `embedding_model` is `(2048,)`, and considering practicalaspects (storage, retrieval performance, etc.) it is quite large. So, there arises a needto reduce the dimensionality of the embedding vectors without reducing their informationcontent. This is where *random projection* comes into the picture.It is based on the principle that if thedistance between a group of points on a given plane is _approximately_ preserved, thedimensionality of that plane can further be reduced.Inside `hash_func()`, we first reduce the dimensionality of the embedding vectors. Thenwe compute the bitwise hash values of the images to determine their hash buckets. Imageshaving same hash values are likely to go into the same hash bucket. From a deploymentperspective, bitwise hash values are cheaper to store and operate on. Query utilitiesThe `Table` class is responsible for building a single hash table. Each entry in the hashtable is a mapping between the reduced embedding of an image from our dataset and aunique identifier. Because our dimensionality reduction technique involves randomness, itcan so happen that similar images are not mapped to the same hash bucket everytime theprocess run. To reduce this effect, we will take results from multiple tables intoconsideration -- the number of tables and the reduction dimensionality are the keyhyperparameters here.Crucially, you wouldn't reimplement locality-sensitive hashing yourself when working withreal world applications. Instead, you'd likely use one of the following popular libraries:* [ScaNN](https://github.com/google-research/google-research/tree/master/scann)* [Annoy](https://github.com/spotify/annoy)* [Vald](https://github.com/vdaas/vald)<jupyter_code>class Table:
def __init__(self, hash_size, dim):
self.table = {}
self.hash_size = hash_size
self.random_vectors = np.random.randn(hash_size, dim).T
def add(self, id, vectors, label):
# Create a unique indentifier.
entry = {"id_label": str(id) + "_" + str(label)}
# Compute the hash values.
hashes = hash_func(vectors, self.random_vectors)
# Add the hash values to the current table.
for h in hashes:
if h in self.table:
self.table[h].append(entry)
else:
self.table[h] = [entry]
def query(self, vectors):
# Compute hash value for the query vector.
hashes = hash_func(vectors, self.random_vectors)
results = []
# Loop over the query hashes and determine if they exist in
# the current table.
for h in hashes:
if h in self.table:
results.extend(self.table[h])
return results<jupyter_output><empty_output><jupyter_text>In the following `LSH` class we will pack the utilities to have multiple hash tables.<jupyter_code>class LSH:
def __init__(self, hash_size, dim, num_tables):
self.num_tables = num_tables
self.tables = []
for i in range(self.num_tables):
self.tables.append(Table(hash_size, dim))
def add(self, id, vectors, label):
for table in self.tables:
table.add(id, vectors, label)
def query(self, vectors):
results = []
for table in self.tables:
results.extend(table.query(vectors))
return results<jupyter_output><empty_output><jupyter_text>Now we can encapsulate the logic for building and operating with the master LSH table (acollection of many tables) inside a class. It has two methods:* `train()`: Responsible for building the final LSH table.* `query()`: Computes the number of matches given a query image and also quantifies thesimilarity score.<jupyter_code>class BuildLSHTable:
def __init__(
self,
prediction_model,
concrete_function=False,
hash_size=8,
dim=2048,
num_tables=10,
):
self.hash_size = hash_size
self.dim = dim
self.num_tables = num_tables
self.lsh = LSH(self.hash_size, self.dim, self.num_tables)
self.prediction_model = prediction_model
self.concrete_function = concrete_function
def train(self, training_files):
for id, training_file in enumerate(training_files):
# Unpack the data.
image, label = training_file
if len(image.shape) < 4:
image = image[None, ...]
# Compute embeddings and update the LSH tables.
# More on `self.concrete_function()` later.
if self.concrete_function:
features = self.prediction_model(tf.constant(image))[
"normalization"
].numpy()
else:
features = self.prediction_model.predict(image)
self.lsh.add(id, features, label)
def query(self, image, verbose=True):
# Compute the embeddings of the query image and fetch the results.
if len(image.shape) < 4:
image = image[None, ...]
if self.concrete_function:
features = self.prediction_model(tf.constant(image))[
"normalization"
].numpy()
else:
features = self.prediction_model.predict(image)
results = self.lsh.query(features)
if verbose:
print("Matches:", len(results))
# Calculate Jaccard index to quantify the similarity.
counts = {}
for r in results:
if r["id_label"] in counts:
counts[r["id_label"]] += 1
else:
counts[r["id_label"]] = 1
for k in counts:
counts[k] = float(counts[k]) / self.dim
return counts<jupyter_output><empty_output><jupyter_text>Create LSH tablesWith our helper utilities and classes implemented, we can now build our LSH table. Sincewe will be benchmarking performance between optimized and unoptimized embedding models, wewill also warm up our GPU to avoid any unfair comparison.<jupyter_code># Utility to warm up the GPU.
def warmup():
dummy_sample = tf.ones((1, IMAGE_SIZE, IMAGE_SIZE, 3))
for _ in range(100):
_ = embedding_model.predict(dummy_sample)<jupyter_output><empty_output><jupyter_text>Now we can first do the GPU wam-up and proceed to build the master LSH table with`embedding_model`.<jupyter_code>warmup()
training_files = zip(images, labels)
lsh_builder = BuildLSHTable(embedding_model)
lsh_builder.train(training_files)<jupyter_output><empty_output><jupyter_text>At the time of writing, the wall time was 54.1 seconds on a Tesla T4 GPU. This timing mayvary based on the GPU you are using. Optimize the model with TensorRTFor NVIDIA-based GPUs, the[TensorRT framework](https://docs.nvidia.com/deeplearning/frameworks/tf-trt-user-guide/index.html)can be used to dramatically enhance the inference latency by using various modeloptimization techniques like pruning, constant folding, layer fusion, and so on. Here wewill use the[`tf.experimental.tensorrt`](https://www.tensorflow.org/api_docs/python/tf/experimental/tensorrt)module to optimize our embedding model.<jupyter_code># First serialize the embedding model as a SavedModel.
embedding_model.save("embedding_model")
# Initialize the conversion parameters.
params = tf.experimental.tensorrt.ConversionParams(
precision_mode="FP16", maximum_cached_engines=16
)
# Run the conversion.
converter = tf.experimental.tensorrt.Converter(
input_saved_model_dir="embedding_model", conversion_params=params
)
converter.convert()
converter.save("tensorrt_embedding_model")<jupyter_output><empty_output><jupyter_text>**Notes on the parameters inside of `tf.experimental.tensorrt.ConversionParams()`**:* `precision_mode` defines the numerical precision of the operations in theto-be-converted model.* `maximum_cached_engines` specifies the maximum number of TRT engines that will becached to handle dynamic operations (operations with unknown shapes).To learn more about the other options, refer to the[official documentation](https://www.tensorflow.org/api_docs/python/tf/experimental/tensorrt/ConversionParams).You can also explore the different quantization options provided by the`tf.experimental.tensorrt` module.<jupyter_code># Load the converted model.
root = tf.saved_model.load("tensorrt_embedding_model")
trt_model_function = root.signatures["serving_default"]<jupyter_output><empty_output><jupyter_text>Build LSH tables with optimized model<jupyter_code>warmup()
training_files = zip(images, labels)
lsh_builder_trt = BuildLSHTable(trt_model_function, concrete_function=True)
lsh_builder_trt.train(training_files)<jupyter_output><empty_output><jupyter_text>Notice the difference in the wall time which is **13.1 seconds**. Earlier, with theunoptimized model it was **54.1 seconds**.We can take a closer look into one of the hash tables and get an idea of how they arerepresented.<jupyter_code>idx = 0
for hash, entry in lsh_builder_trt.lsh.tables[0].table.items():
if idx == 5:
break
if len(entry) < 5:
print(hash, entry)
idx += 1<jupyter_output><empty_output><jupyter_text>Visualize results on validation imagesIn this section we will first writing a couple of utility functions to visualize thesimilar image parsing process. Then we will benchmark the query performance of the modelswith and without optimization. First, we take 100 images from the validation set for testing purposes.<jupyter_code>validation_images = []
validation_labels = []
for image, label in validation_ds.take(100):
image = tf.image.resize(image, (224, 224))
validation_images.append(image.numpy())
validation_labels.append(label.numpy())
validation_images = np.array(validation_images)
validation_labels = np.array(validation_labels)
validation_images.shape, validation_labels.shape<jupyter_output><empty_output><jupyter_text>Now we write our visualization utilities.<jupyter_code>def plot_images(images, labels):
plt.figure(figsize=(20, 10))
columns = 5
for (i, image) in enumerate(images):
ax = plt.subplot(len(images) // columns + 1, columns, i + 1)
if i == 0:
ax.set_title("Query Image\n" + "Label: {}".format(labels[i]))
else:
ax.set_title("Similar Image # " + str(i) + "\nLabel: {}".format(labels[i]))
plt.imshow(image.astype("int"))
plt.axis("off")
def visualize_lsh(lsh_class):
idx = np.random.choice(len(validation_images))
image = validation_images[idx]
label = validation_labels[idx]
results = lsh_class.query(image)
candidates = []
labels = []
overlaps = []
for idx, r in enumerate(sorted(results, key=results.get, reverse=True)):
if idx == 4:
break
image_id, label = r.split("_")[0], r.split("_")[1]
candidates.append(images[int(image_id)])
labels.append(label)
overlaps.append(results[r])
candidates.insert(0, image)
labels.insert(0, label)
plot_images(candidates, labels)<jupyter_output><empty_output><jupyter_text>Non-TRT model<jupyter_code>for _ in range(5):
visualize_lsh(lsh_builder)
visualize_lsh(lsh_builder)<jupyter_output><empty_output><jupyter_text>TRT model<jupyter_code>for _ in range(5):
visualize_lsh(lsh_builder_trt)<jupyter_output><empty_output><jupyter_text>As you may have noticed, there are a couple of incorrect results. This can be mitigated ina few ways:* Better models for generating the initial embeddings especially for noisy samples. We canuse techniques like [ArcFace](https://arxiv.org/abs/1801.07698),[Supervised Contrastive Learning](https://arxiv.org/abs/2004.11362), etc.that implicitly encourage better learning of representations for retrieval purposes.* The trade-off between the number of tables and the reduction dimensionality is crucialand helps set the right recall required for your application. Benchmarking query performance<jupyter_code>def benchmark(lsh_class):
warmup()
start_time = time.time()
for _ in range(1000):
image = np.ones((1, 224, 224, 3)).astype("float32")
_ = lsh_class.query(image, verbose=False)
end_time = time.time() - start_time
print(f"Time taken: {end_time:.3f}")
benchmark(lsh_builder)
benchmark(lsh_builder_trt)<jupyter_output><empty_output> | keras-io/examples/vision/ipynb/near_dup_search.ipynb/0 | {
"file_path": "keras-io/examples/vision/ipynb/near_dup_search.ipynb",
"repo_id": "keras-io",
"token_count": 6009
} | 115 |
<jupyter_start><jupyter_text>Semantic Image Clustering**Author:** [Khalid Salama](https://www.linkedin.com/in/khalid-salama-24403144/)**Date created:** 2021/02/28**Last modified:** 2021/02/28**Description:** Semantic Clustering by Adopting Nearest neighbors (SCAN) algorithm. IntroductionThis example demonstrates how to apply the [Semantic Clustering by Adopting Nearest neighbors(SCAN)](https://arxiv.org/abs/2005.12320) algorithm (Van Gansbeke et al., 2020) on the[CIFAR-10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset. The algorithm consists oftwo phases:1. Self-supervised visual representation learning of images, in which we use the[simCLR](https://arxiv.org/abs/2002.05709) technique.2. Clustering of the learned visual representation vectors to maximize the agreementbetween the cluster assignments of neighboring vectors. Setup<jupyter_code>import os
os.environ["KERAS_BACKEND"] = "tensorflow"
from collections import defaultdict
import numpy as np
import tensorflow as tf
import keras
from keras import layers
import matplotlib.pyplot as plt
from tqdm import tqdm<jupyter_output><empty_output><jupyter_text>Prepare the data<jupyter_code>num_classes = 10
input_shape = (32, 32, 3)
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
x_data = np.concatenate([x_train, x_test])
y_data = np.concatenate([y_train, y_test])
print("x_data shape:", x_data.shape, "- y_data shape:", y_data.shape)
classes = [
"airplane",
"automobile",
"bird",
"cat",
"deer",
"dog",
"frog",
"horse",
"ship",
"truck",
]<jupyter_output><empty_output><jupyter_text>Define hyperparameters<jupyter_code>target_size = 32 # Resize the input images.
representation_dim = 512 # The dimensions of the features vector.
projection_units = 128 # The projection head of the representation learner.
num_clusters = 20 # Number of clusters.
k_neighbours = 5 # Number of neighbours to consider during cluster learning.
tune_encoder_during_clustering = False # Freeze the encoder in the cluster learning.<jupyter_output><empty_output><jupyter_text>Implement data preprocessingThe data preprocessing step resizes the input images to the desired `target_size` and appliesfeature-wise normalization. Note that, when using `keras.applications.ResNet50V2` as thevisual encoder, resizing the images into 255 x 255 inputs would lead to more accurate resultsbut require a longer time to train.<jupyter_code>data_preprocessing = keras.Sequential(
[
layers.Resizing(target_size, target_size),
layers.Normalization(),
]
)
# Compute the mean and the variance from the data for normalization.
data_preprocessing.layers[-1].adapt(x_data)<jupyter_output><empty_output><jupyter_text>Data augmentationUnlike simCLR, which randomly picks a single data augmentation function to apply to an inputimage, we apply a set of data augmentation functions randomly to the input image.(You can experiment with other image augmentation techniques by followingthe [data augmentation tutorial](https://www.tensorflow.org/tutorials/images/data_augmentation).)<jupyter_code>data_augmentation = keras.Sequential(
[
layers.RandomTranslation(
height_factor=(-0.2, 0.2), width_factor=(-0.2, 0.2), fill_mode="nearest"
),
layers.RandomFlip(mode="horizontal"),
layers.RandomRotation(factor=0.15, fill_mode="nearest"),
layers.RandomZoom(
height_factor=(-0.3, 0.1), width_factor=(-0.3, 0.1), fill_mode="nearest"
),
]
)<jupyter_output><empty_output><jupyter_text>Display a random image<jupyter_code>image_idx = np.random.choice(range(x_data.shape[0]))
image = x_data[image_idx]
image_class = classes[y_data[image_idx][0]]
plt.figure(figsize=(3, 3))
plt.imshow(x_data[image_idx].astype("uint8"))
plt.title(image_class)
_ = plt.axis("off")<jupyter_output><empty_output><jupyter_text>Display a sample of augmented versions of the image<jupyter_code>plt.figure(figsize=(10, 10))
for i in range(9):
augmented_images = data_augmentation(np.array([image]))
ax = plt.subplot(3, 3, i + 1)
plt.imshow(augmented_images[0].numpy().astype("uint8"))
plt.axis("off")<jupyter_output><empty_output><jupyter_text>Self-supervised representation learning Implement the vision encoder<jupyter_code>def create_encoder(representation_dim):
encoder = keras.Sequential(
[
keras.applications.ResNet50V2(
include_top=False, weights=None, pooling="avg"
),
layers.Dense(representation_dim),
]
)
return encoder<jupyter_output><empty_output><jupyter_text>Implement the unsupervised contrastive loss<jupyter_code>class RepresentationLearner(keras.Model):
def __init__(
self,
encoder,
projection_units,
num_augmentations,
temperature=1.0,
dropout_rate=0.1,
l2_normalize=False,
**kwargs
):
super().__init__(**kwargs)
self.encoder = encoder
# Create projection head.
self.projector = keras.Sequential(
[
layers.Dropout(dropout_rate),
layers.Dense(units=projection_units, use_bias=False),
layers.BatchNormalization(),
layers.ReLU(),
]
)
self.num_augmentations = num_augmentations
self.temperature = temperature
self.l2_normalize = l2_normalize
self.loss_tracker = keras.metrics.Mean(name="loss")
@property
def metrics(self):
return [self.loss_tracker]
def compute_contrastive_loss(self, feature_vectors, batch_size):
num_augmentations = keras.ops.shape(feature_vectors)[0] // batch_size
if self.l2_normalize:
feature_vectors = keras.utils.normalize(feature_vectors)
# The logits shape is [num_augmentations * batch_size, num_augmentations * batch_size].
logits = (
tf.linalg.matmul(feature_vectors, feature_vectors, transpose_b=True)
/ self.temperature
)
# Apply log-max trick for numerical stability.
logits_max = keras.ops.max(logits, axis=1)
logits = logits - logits_max
# The shape of targets is [num_augmentations * batch_size, num_augmentations * batch_size].
# targets is a matrix consits of num_augmentations submatrices of shape [batch_size * batch_size].
# Each [batch_size * batch_size] submatrix is an identity matrix (diagonal entries are ones).
targets = keras.ops.tile(
tf.eye(batch_size), [num_augmentations, num_augmentations]
)
# Compute cross entropy loss
return keras.losses.categorical_crossentropy(
y_true=targets, y_pred=logits, from_logits=True
)
def call(self, inputs):
# Preprocess the input images.
preprocessed = data_preprocessing(inputs)
# Create augmented versions of the images.
augmented = []
for _ in range(self.num_augmentations):
augmented.append(data_augmentation(preprocessed))
augmented = layers.Concatenate(axis=0)(augmented)
# Generate embedding representations of the images.
features = self.encoder(augmented)
# Apply projection head.
return self.projector(features)
def train_step(self, inputs):
batch_size = keras.ops.shape(inputs)[0]
# Run the forward pass and compute the contrastive loss
with tf.GradientTape() as tape:
feature_vectors = self(inputs, training=True)
loss = self.compute_contrastive_loss(feature_vectors, batch_size)
# Compute gradients
trainable_vars = self.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# Update loss tracker metric
self.loss_tracker.update_state(loss)
# Return a dict mapping metric names to current value
return {m.name: m.result() for m in self.metrics}
def test_step(self, inputs):
batch_size = keras.ops.shape(inputs)[0]
feature_vectors = self(inputs, training=False)
loss = self.compute_contrastive_loss(feature_vectors, batch_size)
self.loss_tracker.update_state(loss)
return {"loss": self.loss_tracker.result()}<jupyter_output><empty_output><jupyter_text>Train the model<jupyter_code># Create vision encoder.
encoder = create_encoder(representation_dim)
# Create representation learner.
representation_learner = RepresentationLearner(
encoder, projection_units, num_augmentations=2, temperature=0.1
)
# Create a a Cosine decay learning rate scheduler.
lr_scheduler = keras.optimizers.schedules.CosineDecay(
initial_learning_rate=0.001, decay_steps=500, alpha=0.1
)
# Compile the model.
representation_learner.compile(
optimizer=keras.optimizers.AdamW(learning_rate=lr_scheduler, weight_decay=0.0001),
jit_compile=False,
)
# Fit the model.
history = representation_learner.fit(
x=x_data,
batch_size=512,
epochs=50, # for better results, increase the number of epochs to 500.
)<jupyter_output><empty_output><jupyter_text>Plot training loss<jupyter_code>plt.plot(history.history["loss"])
plt.ylabel("loss")
plt.xlabel("epoch")
plt.show()<jupyter_output><empty_output><jupyter_text>Compute the nearest neighbors Generate the embeddings for the images<jupyter_code>batch_size = 500
# Get the feature vector representations of the images.
feature_vectors = encoder.predict(x_data, batch_size=batch_size, verbose=1)
# Normalize the feature vectores.
feature_vectors = keras.utils.normalize(feature_vectors)<jupyter_output><empty_output><jupyter_text>Find the *k* nearest neighbours for each embedding<jupyter_code>neighbours = []
num_batches = feature_vectors.shape[0] // batch_size
for batch_idx in tqdm(range(num_batches)):
start_idx = batch_idx * batch_size
end_idx = start_idx + batch_size
current_batch = feature_vectors[start_idx:end_idx]
# Compute the dot similarity.
similarities = tf.linalg.matmul(current_batch, feature_vectors, transpose_b=True)
# Get the indices of most similar vectors.
_, indices = keras.ops.top_k(similarities, k=k_neighbours + 1, sorted=True)
# Add the indices to the neighbours.
neighbours.append(indices[..., 1:])
neighbours = np.reshape(np.array(neighbours), (-1, k_neighbours))<jupyter_output><empty_output><jupyter_text>Let's display some neighbors on each row<jupyter_code>nrows = 4
ncols = k_neighbours + 1
plt.figure(figsize=(12, 12))
position = 1
for _ in range(nrows):
anchor_idx = np.random.choice(range(x_data.shape[0]))
neighbour_indicies = neighbours[anchor_idx]
indices = [anchor_idx] + neighbour_indicies.tolist()
for j in range(ncols):
plt.subplot(nrows, ncols, position)
plt.imshow(x_data[indices[j]].astype("uint8"))
plt.title(classes[y_data[indices[j]][0]])
plt.axis("off")
position += 1<jupyter_output><empty_output><jupyter_text>You notice that images on each row are visually similar, and belong to similar classes. Semantic clustering with nearest neighbours Implement clustering consistency lossThis loss tries to make sure that neighbours have the same clustering assignments.<jupyter_code>class ClustersConsistencyLoss(keras.losses.Loss):
def __init__(self):
super().__init__()
def __call__(self, target, similarity, sample_weight=None):
# Set targets to be ones.
target = keras.ops.ones_like(similarity)
# Compute cross entropy loss.
loss = keras.losses.binary_crossentropy(
y_true=target, y_pred=similarity, from_logits=True
)
return keras.ops.mean(loss)<jupyter_output><empty_output><jupyter_text>Implement the clusters entropy lossThis loss tries to make sure that cluster distribution is roughly uniformed, to avoidassigning most of the instances to one cluster.<jupyter_code>class ClustersEntropyLoss(keras.losses.Loss):
def __init__(self, entropy_loss_weight=1.0):
super().__init__()
self.entropy_loss_weight = entropy_loss_weight
def __call__(self, target, cluster_probabilities, sample_weight=None):
# Ideal entropy = log(num_clusters).
num_clusters = keras.ops.cast(
keras.ops.shape(cluster_probabilities)[-1], "float32"
)
target = keras.ops.log(num_clusters)
# Compute the overall clusters distribution.
cluster_probabilities = keras.ops.mean(cluster_probabilities, axis=0)
# Replacing zero probabilities - if any - with a very small value.
cluster_probabilities = keras.ops.clip(cluster_probabilities, 1e-8, 1.0)
# Compute the entropy over the clusters.
entropy = -keras.ops.sum(
cluster_probabilities * keras.ops.log(cluster_probabilities)
)
# Compute the difference between the target and the actual.
loss = target - entropy
return loss<jupyter_output><empty_output><jupyter_text>Implement clustering modelThis model takes a raw image as an input, generated its feature vector using the trainedencoder, and produces a probability distribution of the clusters given the feature vectoras the cluster assignments.<jupyter_code>def create_clustering_model(encoder, num_clusters, name=None):
inputs = keras.Input(shape=input_shape)
# Preprocess the input images.
preprocessed = data_preprocessing(inputs)
# Apply data augmentation to the images.
augmented = data_augmentation(preprocessed)
# Generate embedding representations of the images.
features = encoder(augmented)
# Assign the images to clusters.
outputs = layers.Dense(units=num_clusters, activation="softmax")(features)
# Create the model.
model = keras.Model(inputs=inputs, outputs=outputs, name=name)
return model<jupyter_output><empty_output><jupyter_text>Implement clustering learnerThis model receives the input `anchor` image and its `neighbours`, produces the clustersassignments for them using the `clustering_model`, and produces two outputs:1. `similarity`: the similarity between the cluster assignments of the `anchor` image andits `neighbours`. This output is fed to the `ClustersConsistencyLoss`.2. `anchor_clustering`: cluster assignments of the `anchor` images. This is fed to the `ClustersEntropyLoss`.<jupyter_code>def create_clustering_learner(clustering_model):
anchor = keras.Input(shape=input_shape, name="anchors")
neighbours = keras.Input(
shape=tuple([k_neighbours]) + input_shape, name="neighbours"
)
# Changes neighbours shape to [batch_size * k_neighbours, width, height, channels]
neighbours_reshaped = keras.ops.reshape(neighbours, tuple([-1]) + input_shape)
# anchor_clustering shape: [batch_size, num_clusters]
anchor_clustering = clustering_model(anchor)
# neighbours_clustering shape: [batch_size * k_neighbours, num_clusters]
neighbours_clustering = clustering_model(neighbours_reshaped)
# Convert neighbours_clustering shape to [batch_size, k_neighbours, num_clusters]
neighbours_clustering = keras.ops.reshape(
neighbours_clustering,
(-1, k_neighbours, keras.ops.shape(neighbours_clustering)[-1]),
)
# similarity shape: [batch_size, 1, k_neighbours]
similarity = keras.ops.einsum(
"bij,bkj->bik",
keras.ops.expand_dims(anchor_clustering, axis=1),
neighbours_clustering,
)
# similarity shape: [batch_size, k_neighbours]
similarity = layers.Lambda(
lambda x: keras.ops.squeeze(x, axis=1), name="similarity"
)(similarity)
# Create the model.
model = keras.Model(
inputs=[anchor, neighbours],
outputs=[similarity, anchor_clustering],
name="clustering_learner",
)
return model<jupyter_output><empty_output><jupyter_text>Train model<jupyter_code># If tune_encoder_during_clustering is set to False,
# then freeze the encoder weights.
for layer in encoder.layers:
layer.trainable = tune_encoder_during_clustering
# Create the clustering model and learner.
clustering_model = create_clustering_model(encoder, num_clusters, name="clustering")
clustering_learner = create_clustering_learner(clustering_model)
# Instantiate the model losses.
losses = [ClustersConsistencyLoss(), ClustersEntropyLoss(entropy_loss_weight=5)]
# Create the model inputs and labels.
inputs = {"anchors": x_data, "neighbours": tf.gather(x_data, neighbours)}
labels = np.ones(shape=(x_data.shape[0]))
# Compile the model.
clustering_learner.compile(
optimizer=keras.optimizers.AdamW(learning_rate=0.0005, weight_decay=0.0001),
loss=losses,
jit_compile=False,
)
# Begin training the model.
clustering_learner.fit(x=inputs, y=labels, batch_size=512, epochs=50)<jupyter_output><empty_output><jupyter_text>Plot training loss<jupyter_code>plt.plot(history.history["loss"])
plt.ylabel("loss")
plt.xlabel("epoch")
plt.show()<jupyter_output><empty_output><jupyter_text>Cluster analysis Assign images to clusters<jupyter_code># Get the cluster probability distribution of the input images.
clustering_probs = clustering_model.predict(x_data, batch_size=batch_size, verbose=1)
# Get the cluster of the highest probability.
cluster_assignments = keras.ops.argmax(clustering_probs, axis=-1).numpy()
# Store the clustering confidence.
# Images with the highest clustering confidence are considered the 'prototypes'
# of the clusters.
cluster_confidence = keras.ops.max(clustering_probs, axis=-1).numpy()<jupyter_output><empty_output><jupyter_text>Let's compute the cluster sizes<jupyter_code>clusters = defaultdict(list)
for idx, c in enumerate(cluster_assignments):
clusters[c].append((idx, cluster_confidence[idx]))
non_empty_clusters = defaultdict(list)
for c in clusters.keys():
if clusters[c]:
non_empty_clusters[c] = clusters[c]
for c in range(num_clusters):
print("cluster", c, ":", len(clusters[c]))<jupyter_output><empty_output><jupyter_text>Visualize cluster imagesDisplay the *prototypes*—instances with the highest clustering confidence—of each cluster:<jupyter_code>num_images = 8
plt.figure(figsize=(15, 15))
position = 1
for c in non_empty_clusters.keys():
cluster_instances = sorted(
non_empty_clusters[c], key=lambda kv: kv[1], reverse=True
)
for j in range(num_images):
image_idx = cluster_instances[j][0]
plt.subplot(len(non_empty_clusters), num_images, position)
plt.imshow(x_data[image_idx].astype("uint8"))
plt.title(classes[y_data[image_idx][0]])
plt.axis("off")
position += 1<jupyter_output><empty_output><jupyter_text>Compute clustering accuracyFirst, we assign a label for each cluster based on the majority label of its images.Then, we compute the accuracy of each cluster by dividing the number of image with themajority label by the size of the cluster.<jupyter_code>cluster_label_counts = dict()
for c in range(num_clusters):
cluster_label_counts[c] = [0] * num_classes
instances = clusters[c]
for i, _ in instances:
cluster_label_counts[c][y_data[i][0]] += 1
cluster_label_idx = np.argmax(cluster_label_counts[c])
correct_count = np.max(cluster_label_counts[c])
cluster_size = len(clusters[c])
accuracy = (
np.round((correct_count / cluster_size) * 100, 2) if cluster_size > 0 else 0
)
cluster_label = classes[cluster_label_idx]
print("cluster", c, "label is:", cluster_label, " - accuracy:", accuracy, "%")<jupyter_output><empty_output> | keras-io/examples/vision/ipynb/semantic_image_clustering.ipynb/0 | {
"file_path": "keras-io/examples/vision/ipynb/semantic_image_clustering.ipynb",
"repo_id": "keras-io",
"token_count": 7503
} | 116 |
<jupyter_start><jupyter_text>Pneumonia Classification on TPU**Author:** Amy MiHyun Jang**Date created:** 2020/07/28**Last modified:** 2024/02/12**Description:** Medical image classification on TPU. Introduction + Set-upThis tutorial will explain how to build an X-ray image classification modelto predict whether an X-ray scan shows presence of pneumonia.<jupyter_code>import re
import os
import random
import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver.connect()
print("Device:", tpu.master())
strategy = tf.distribute.TPUStrategy(tpu)
except:
strategy = tf.distribute.get_strategy()
print("Number of replicas:", strategy.num_replicas_in_sync)<jupyter_output><empty_output><jupyter_text>We need a Google Cloud link to our data to load the data using a TPU.Below, we define key configuration parameters we'll use in this example.To run on TPU, this example must be on Colab with the TPU runtime selected.<jupyter_code>AUTOTUNE = tf.data.AUTOTUNE
BATCH_SIZE = 25 * strategy.num_replicas_in_sync
IMAGE_SIZE = [180, 180]
CLASS_NAMES = ["NORMAL", "PNEUMONIA"]<jupyter_output><empty_output><jupyter_text>Load the dataThe Chest X-ray data we are using from[*Cell*](https://www.cell.com/cell/fulltext/S0092-8674(18)30154-5) divides the data intotraining and test files. Let's first load in the training TFRecords.<jupyter_code>train_images = tf.data.TFRecordDataset(
"gs://download.tensorflow.org/data/ChestXRay2017/train/images.tfrec"
)
train_paths = tf.data.TFRecordDataset(
"gs://download.tensorflow.org/data/ChestXRay2017/train/paths.tfrec"
)
ds = tf.data.Dataset.zip((train_images, train_paths))<jupyter_output><empty_output><jupyter_text>Let's count how many healthy/normal chest X-rays we have and how manypneumonia chest X-rays we have:<jupyter_code>COUNT_NORMAL = len(
[
filename
for filename in train_paths
if "NORMAL" in filename.numpy().decode("utf-8")
]
)
print("Normal images count in training set: " + str(COUNT_NORMAL))
COUNT_PNEUMONIA = len(
[
filename
for filename in train_paths
if "PNEUMONIA" in filename.numpy().decode("utf-8")
]
)
print("Pneumonia images count in training set: " + str(COUNT_PNEUMONIA))<jupyter_output><empty_output><jupyter_text>Notice that there are way more images that are classified as pneumonia than normal. Thisshows that we have an imbalance in our data. We will correct for this imbalance later onin our notebook. We want to map each filename to the corresponding (image, label) pair. The followingmethods will help us do that.As we only have two labels, we will encode the label so that `1` or `True` indicatespneumonia and `0` or `False` indicates normal.<jupyter_code>def get_label(file_path):
# convert the path to a list of path components
parts = tf.strings.split(file_path, "/")
# The second to last is the class-directory
if parts[-2] == "PNEUMONIA":
return 1
else:
return 0
def decode_img(img):
# convert the compressed string to a 3D uint8 tensor
img = tf.image.decode_jpeg(img, channels=3)
# resize the image to the desired size.
return tf.image.resize(img, IMAGE_SIZE)
def process_path(image, path):
label = get_label(path)
# load the raw data from the file as a string
img = decode_img(image)
return img, label
ds = ds.map(process_path, num_parallel_calls=AUTOTUNE)<jupyter_output><empty_output><jupyter_text>Let's split the data into a training and validation datasets.<jupyter_code>ds = ds.shuffle(10000)
train_ds = ds.take(4200)
val_ds = ds.skip(4200)<jupyter_output><empty_output><jupyter_text>Let's visualize the shape of an (image, label) pair.<jupyter_code>for image, label in train_ds.take(1):
print("Image shape: ", image.numpy().shape)
print("Label: ", label.numpy())<jupyter_output><empty_output><jupyter_text>Load and format the test data as well.<jupyter_code>test_images = tf.data.TFRecordDataset(
"gs://download.tensorflow.org/data/ChestXRay2017/test/images.tfrec"
)
test_paths = tf.data.TFRecordDataset(
"gs://download.tensorflow.org/data/ChestXRay2017/test/paths.tfrec"
)
test_ds = tf.data.Dataset.zip((test_images, test_paths))
test_ds = test_ds.map(process_path, num_parallel_calls=AUTOTUNE)
test_ds = test_ds.batch(BATCH_SIZE)<jupyter_output><empty_output><jupyter_text>Visualize the datasetFirst, let's use buffered prefetching so we can yield data from disk without having I/Obecome blocking.Please note that large image datasets should not be cached in memory. We do it herebecause the dataset is not very large and we want to train on TPU.<jupyter_code>def prepare_for_training(ds, cache=True):
# This is a small dataset, only load it once, and keep it in memory.
# use `.cache(filename)` to cache preprocessing work for datasets that don't
# fit in memory.
if cache:
if isinstance(cache, str):
ds = ds.cache(cache)
else:
ds = ds.cache()
ds = ds.batch(BATCH_SIZE)
# `prefetch` lets the dataset fetch batches in the background while the model
# is training.
ds = ds.prefetch(buffer_size=AUTOTUNE)
return ds<jupyter_output><empty_output><jupyter_text>Call the next batch iteration of the training data.<jupyter_code>train_ds = prepare_for_training(train_ds)
val_ds = prepare_for_training(val_ds)
image_batch, label_batch = next(iter(train_ds))<jupyter_output><empty_output><jupyter_text>Define the method to show the images in the batch.<jupyter_code>def show_batch(image_batch, label_batch):
plt.figure(figsize=(10, 10))
for n in range(25):
ax = plt.subplot(5, 5, n + 1)
plt.imshow(image_batch[n] / 255)
if label_batch[n]:
plt.title("PNEUMONIA")
else:
plt.title("NORMAL")
plt.axis("off")<jupyter_output><empty_output><jupyter_text>As the method takes in NumPy arrays as its parameters, call the numpy function on thebatches to return the tensor in NumPy array form.<jupyter_code>show_batch(image_batch.numpy(), label_batch.numpy())<jupyter_output><empty_output><jupyter_text>Build the CNNTo make our model more modular and easier to understand, let's define some blocks. Aswe're building a convolution neural network, we'll create a convolution block and a denselayer block.The architecture for this CNN has been inspired by this[article](https://towardsdatascience.com/deep-learning-for-detecting-pneumonia-from-x-ray-images-fc9a3d9fdba8).<jupyter_code>import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import keras
from keras import layers
def conv_block(filters, inputs):
x = layers.SeparableConv2D(filters, 3, activation="relu", padding="same")(inputs)
x = layers.SeparableConv2D(filters, 3, activation="relu", padding="same")(x)
x = layers.BatchNormalization()(x)
outputs = layers.MaxPool2D()(x)
return outputs
def dense_block(units, dropout_rate, inputs):
x = layers.Dense(units, activation="relu")(inputs)
x = layers.BatchNormalization()(x)
outputs = layers.Dropout(dropout_rate)(x)
return outputs<jupyter_output><empty_output><jupyter_text>The following method will define the function to build our model for us.The images originally have values that range from [0, 255]. CNNs work better with smallernumbers so we will scale this down for our input.The Dropout layers are important, as theyreduce the likelikhood of the model overfitting. We want to end the model with a `Dense`layer with one node, as this will be the binary output that determines if an X-ray showspresence of pneumonia.<jupyter_code>def build_model():
inputs = keras.Input(shape=(IMAGE_SIZE[0], IMAGE_SIZE[1], 3))
x = layers.Rescaling(1.0 / 255)(inputs)
x = layers.Conv2D(16, 3, activation="relu", padding="same")(x)
x = layers.Conv2D(16, 3, activation="relu", padding="same")(x)
x = layers.MaxPool2D()(x)
x = conv_block(32, x)
x = conv_block(64, x)
x = conv_block(128, x)
x = layers.Dropout(0.2)(x)
x = conv_block(256, x)
x = layers.Dropout(0.2)(x)
x = layers.Flatten()(x)
x = dense_block(512, 0.7, x)
x = dense_block(128, 0.5, x)
x = dense_block(64, 0.3, x)
outputs = layers.Dense(1, activation="sigmoid")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
return model<jupyter_output><empty_output><jupyter_text>Correct for data imbalanceWe saw earlier in this example that the data was imbalanced, with more images classifiedas pneumonia than normal. We will correct for that by using class weighting:<jupyter_code>initial_bias = np.log([COUNT_PNEUMONIA / COUNT_NORMAL])
print("Initial bias: {:.5f}".format(initial_bias[0]))
TRAIN_IMG_COUNT = COUNT_NORMAL + COUNT_PNEUMONIA
weight_for_0 = (1 / COUNT_NORMAL) * (TRAIN_IMG_COUNT) / 2.0
weight_for_1 = (1 / COUNT_PNEUMONIA) * (TRAIN_IMG_COUNT) / 2.0
class_weight = {0: weight_for_0, 1: weight_for_1}
print("Weight for class 0: {:.2f}".format(weight_for_0))
print("Weight for class 1: {:.2f}".format(weight_for_1))<jupyter_output><empty_output><jupyter_text>The weight for class `0` (Normal) is a lot higher than the weight for class `1`(Pneumonia). Because there are less normal images, each normal image will be weightedmore to balance the data as the CNN works best when the training data is balanced. Train the model Defining callbacksThe checkpoint callback saves the best weights of the model, so next time we want to usethe model, we do not have to spend time training it. The early stopping callback stopsthe training process when the model starts becoming stagnant, or even worse, when themodel starts overfitting.<jupyter_code>checkpoint_cb = keras.callbacks.ModelCheckpoint("xray_model.keras", save_best_only=True)
early_stopping_cb = keras.callbacks.EarlyStopping(
patience=10, restore_best_weights=True
)<jupyter_output><empty_output><jupyter_text>We also want to tune our learning rate. Too high of a learning rate will cause the modelto diverge. Too small of a learning rate will cause the model to be too slow. Weimplement the exponential learning rate scheduling method below.<jupyter_code>initial_learning_rate = 0.015
lr_schedule = keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate, decay_steps=100000, decay_rate=0.96, staircase=True
)<jupyter_output><empty_output><jupyter_text>Fit the modelFor our metrics, we want to include precision and recall as they will provide use with amore informed picture of how good our model is. Accuracy tells us what fraction of thelabels is correct. Since our data is not balanced, accuracy might give a skewed sense ofa good model (i.e. a model that always predicts PNEUMONIA will be 74% accurate but is nota good model).Precision is the number of true positives (TP) over the sum of TP and false positives(FP). It shows what fraction of labeled positives are actually correct.Recall is the number of TP over the sum of TP and false negatves (FN). It shows whatfraction of actual positives are correct.Since there are only two possible labels for the image, we will be using thebinary crossentropy loss. When we fit the model, remember to specify the class weights,which we defined earlier. Because we are using a TPU, training will be quick - less than2 minutes.<jupyter_code>with strategy.scope():
model = build_model()
METRICS = [
keras.metrics.BinaryAccuracy(),
keras.metrics.Precision(name="precision"),
keras.metrics.Recall(name="recall"),
]
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=lr_schedule),
loss="binary_crossentropy",
metrics=METRICS,
)
history = model.fit(
train_ds,
epochs=100,
validation_data=val_ds,
class_weight=class_weight,
callbacks=[checkpoint_cb, early_stopping_cb],
)<jupyter_output><empty_output><jupyter_text>Visualizing model performanceLet's plot the model accuracy and loss for the training and the validating set. Note thatno random seed is specified for this notebook. For your notebook, there might be slightvariance.<jupyter_code>fig, ax = plt.subplots(1, 4, figsize=(20, 3))
ax = ax.ravel()
for i, met in enumerate(["precision", "recall", "binary_accuracy", "loss"]):
ax[i].plot(history.history[met])
ax[i].plot(history.history["val_" + met])
ax[i].set_title("Model {}".format(met))
ax[i].set_xlabel("epochs")
ax[i].set_ylabel(met)
ax[i].legend(["train", "val"])<jupyter_output><empty_output><jupyter_text>We see that the accuracy for our model is around 95%. Predict and evaluate resultsLet's evaluate the model on our test data!<jupyter_code>model.evaluate(test_ds, return_dict=True)<jupyter_output><empty_output><jupyter_text>We see that our accuracy on our test data is lower than the accuracy for our validatingset. This may indicate overfitting.Our recall is greater than our precision, indicating that almost all pneumonia images arecorrectly identified but some normal images are falsely identified. We should aim toincrease our precision.<jupyter_code>for image, label in test_ds.take(1):
plt.imshow(image[0] / 255.0)
plt.title(CLASS_NAMES[label[0].numpy()])
prediction = model.predict(test_ds.take(1))[0]
scores = [1 - prediction, prediction]
for score, name in zip(scores, CLASS_NAMES):
print("This image is %.2f percent %s" % ((100 * score), name))<jupyter_output><empty_output> | keras-io/examples/vision/ipynb/xray_classification_with_tpus.ipynb/0 | {
"file_path": "keras-io/examples/vision/ipynb/xray_classification_with_tpus.ipynb",
"repo_id": "keras-io",
"token_count": 4692
} | 117 |
# Compact Convolutional Transformers
**Author:** [Sayak Paul](https://twitter.com/RisingSayak)<br>
**Date created:** 2021/06/30<br>
**Last modified:** 2023/08/07<br>
**Description:** Compact Convolutional Transformers for efficient image classification.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/vision/ipynb/cct.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/vision/cct.py)
As discussed in the [Vision Transformers (ViT)](https://arxiv.org/abs/2010.11929) paper,
a Transformer-based architecture for vision typically requires a larger dataset than
usual, as well as a longer pre-training schedule. [ImageNet-1k](http://imagenet.org/)
(which has about a million images) is considered to fall under the medium-sized data regime with
respect to ViTs. This is primarily because, unlike CNNs, ViTs (or a typical
Transformer-based architecture) do not have well-informed inductive biases (such as
convolutions for processing images). This begs the question: can't we combine the
benefits of convolution and the benefits of Transformers
in a single network architecture? These benefits include parameter-efficiency, and
self-attention to process long-range and global dependencies (interactions between
different regions in an image).
In [Escaping the Big Data Paradigm with Compact Transformers](https://arxiv.org/abs/2104.05704),
Hassani et al. present an approach for doing exactly this. They proposed the
**Compact Convolutional Transformer** (CCT) architecture. In this example, we will work on an
implementation of CCT and we will see how well it performs on the CIFAR-10 dataset.
If you are unfamiliar with the concept of self-attention or Transformers, you can read
[this chapter](https://livebook.manning.com/book/deep-learning-with-python-second-edition/chapter-11/r-3/312)
from François Chollet's book *Deep Learning with Python*. This example uses
code snippets from another example,
[Image classification with Vision Transformer](https://keras.io/examples/vision/image_classification_with_vision_transformer/).
---
## Imports
```python
from keras import layers
import keras
import matplotlib.pyplot as plt
import numpy as np
```
---
## Hyperparameters and constants
```python
positional_emb = True
conv_layers = 2
projection_dim = 128
num_heads = 2
transformer_units = [
projection_dim,
projection_dim,
]
transformer_layers = 2
stochastic_depth_rate = 0.1
learning_rate = 0.001
weight_decay = 0.0001
batch_size = 128
num_epochs = 30
image_size = 32
```
---
## Load CIFAR-10 dataset
```python
num_classes = 10
input_shape = (32, 32, 3)
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
print(f"x_train shape: {x_train.shape} - y_train shape: {y_train.shape}")
print(f"x_test shape: {x_test.shape} - y_test shape: {y_test.shape}")
```
<div class="k-default-codeblock">
```
x_train shape: (50000, 32, 32, 3) - y_train shape: (50000, 10)
x_test shape: (10000, 32, 32, 3) - y_test shape: (10000, 10)
```
</div>
---
## The CCT tokenizer
The first recipe introduced by the CCT authors is the tokenizer for processing the
images. In a standard ViT, images are organized into uniform *non-overlapping* patches.
This eliminates the boundary-level information present in between different patches. This
is important for a neural network to effectively exploit the locality information. The
figure below presents an illustration of how images are organized into patches.
![](https://i.imgur.com/IkBK9oY.png)
We already know that convolutions are quite good at exploiting locality information. So,
based on this, the authors introduce an all-convolution mini-network to produce image
patches.
```python
class CCTTokenizer(layers.Layer):
def __init__(
self,
kernel_size=3,
stride=1,
padding=1,
pooling_kernel_size=3,
pooling_stride=2,
num_conv_layers=conv_layers,
num_output_channels=[64, 128],
positional_emb=positional_emb,
**kwargs,
):
super().__init__(**kwargs)
# This is our tokenizer.
self.conv_model = keras.Sequential()
for i in range(num_conv_layers):
self.conv_model.add(
layers.Conv2D(
num_output_channels[i],
kernel_size,
stride,
padding="valid",
use_bias=False,
activation="relu",
kernel_initializer="he_normal",
)
)
self.conv_model.add(layers.ZeroPadding2D(padding))
self.conv_model.add(
layers.MaxPooling2D(pooling_kernel_size, pooling_stride, "same")
)
self.positional_emb = positional_emb
def call(self, images):
outputs = self.conv_model(images)
# After passing the images through our mini-network the spatial dimensions
# are flattened to form sequences.
reshaped = keras.ops.reshape(
outputs,
(
-1,
keras.ops.shape(outputs)[1] * keras.ops.shape(outputs)[2],
keras.ops.shape(outputs)[-1],
),
)
return reshaped
```
Positional embeddings are optional in CCT. If we want to use them, we can use
the Layer defined below.
```python
class PositionEmbedding(keras.layers.Layer):
def __init__(
self,
sequence_length,
initializer="glorot_uniform",
**kwargs,
):
super().__init__(**kwargs)
if sequence_length is None:
raise ValueError("`sequence_length` must be an Integer, received `None`.")
self.sequence_length = int(sequence_length)
self.initializer = keras.initializers.get(initializer)
def get_config(self):
config = super().get_config()
config.update(
{
"sequence_length": self.sequence_length,
"initializer": keras.initializers.serialize(self.initializer),
}
)
return config
def build(self, input_shape):
feature_size = input_shape[-1]
self.position_embeddings = self.add_weight(
name="embeddings",
shape=[self.sequence_length, feature_size],
initializer=self.initializer,
trainable=True,
)
super().build(input_shape)
def call(self, inputs, start_index=0):
shape = keras.ops.shape(inputs)
feature_length = shape[-1]
sequence_length = shape[-2]
# trim to match the length of the input sequence, which might be less
# than the sequence_length of the layer.
position_embeddings = keras.ops.convert_to_tensor(self.position_embeddings)
position_embeddings = keras.ops.slice(
position_embeddings,
(start_index, 0),
(sequence_length, feature_length),
)
return keras.ops.broadcast_to(position_embeddings, shape)
def compute_output_shape(self, input_shape):
return input_shape
```
---
## Sequence Pooling
Another recipe introduced in CCT is attention pooling or sequence pooling. In ViT, only
the feature map corresponding to the class token is pooled and is then used for the
subsequent classification task (or any other downstream task).
```python
class SequencePooling(layers.Layer):
def __init__(self):
super().__init__()
self.attention = layers.Dense(1)
def call(self, x):
attention_weights = keras.ops.softmax(self.attention(x), axis=1)
attention_weights = keras.ops.transpose(attention_weights, axes=(0, 2, 1))
weighted_representation = keras.ops.matmul(attention_weights, x)
return keras.ops.squeeze(weighted_representation, -2)
```
---
## Stochastic depth for regularization
[Stochastic depth](https://arxiv.org/abs/1603.09382) is a regularization technique that
randomly drops a set of layers. During inference, the layers are kept as they are. It is
very much similar to [Dropout](https://jmlr.org/papers/v15/srivastava14a.html) but only
that it operates on a block of layers rather than individual nodes present inside a
layer. In CCT, stochastic depth is used just before the residual blocks of a Transformers
encoder.
```python
# Referred from: github.com:rwightman/pytorch-image-models.
class StochasticDepth(layers.Layer):
def __init__(self, drop_prop, **kwargs):
super().__init__(**kwargs)
self.drop_prob = drop_prop
self.seed_generator = keras.random.SeedGenerator(1337)
def call(self, x, training=None):
if training:
keep_prob = 1 - self.drop_prob
shape = (keras.ops.shape(x)[0],) + (1,) * (len(x.shape) - 1)
random_tensor = keep_prob + keras.random.uniform(
shape, 0, 1, seed=self.seed_generator
)
random_tensor = keras.ops.floor(random_tensor)
return (x / keep_prob) * random_tensor
return x
```
---
## MLP for the Transformers encoder
```python
def mlp(x, hidden_units, dropout_rate):
for units in hidden_units:
x = layers.Dense(units, activation=keras.ops.gelu)(x)
x = layers.Dropout(dropout_rate)(x)
return x
```
---
## Data augmentation
In the [original paper](https://arxiv.org/abs/2104.05704), the authors use
[AutoAugment](https://arxiv.org/abs/1805.09501) to induce stronger regularization. For
this example, we will be using the standard geometric augmentations like random cropping
and flipping.
```python
# Note the rescaling layer. These layers have pre-defined inference behavior.
data_augmentation = keras.Sequential(
[
layers.Rescaling(scale=1.0 / 255),
layers.RandomCrop(image_size, image_size),
layers.RandomFlip("horizontal"),
],
name="data_augmentation",
)
```
---
## The final CCT model
In CCT, outputs from the Transformers encoder are weighted and then passed on to the final task-specific layer (in
this example, we do classification).
```python
def create_cct_model(
image_size=image_size,
input_shape=input_shape,
num_heads=num_heads,
projection_dim=projection_dim,
transformer_units=transformer_units,
):
inputs = layers.Input(input_shape)
# Augment data.
augmented = data_augmentation(inputs)
# Encode patches.
cct_tokenizer = CCTTokenizer()
encoded_patches = cct_tokenizer(augmented)
# Apply positional embedding.
if positional_emb:
sequence_length = encoded_patches.shape[1]
encoded_patches += PositionEmbedding(sequence_length=sequence_length)(
encoded_patches
)
# Calculate Stochastic Depth probabilities.
dpr = [x for x in np.linspace(0, stochastic_depth_rate, transformer_layers)]
# Create multiple layers of the Transformer block.
for i in range(transformer_layers):
# Layer normalization 1.
x1 = layers.LayerNormalization(epsilon=1e-5)(encoded_patches)
# Create a multi-head attention layer.
attention_output = layers.MultiHeadAttention(
num_heads=num_heads, key_dim=projection_dim, dropout=0.1
)(x1, x1)
# Skip connection 1.
attention_output = StochasticDepth(dpr[i])(attention_output)
x2 = layers.Add()([attention_output, encoded_patches])
# Layer normalization 2.
x3 = layers.LayerNormalization(epsilon=1e-5)(x2)
# MLP.
x3 = mlp(x3, hidden_units=transformer_units, dropout_rate=0.1)
# Skip connection 2.
x3 = StochasticDepth(dpr[i])(x3)
encoded_patches = layers.Add()([x3, x2])
# Apply sequence pooling.
representation = layers.LayerNormalization(epsilon=1e-5)(encoded_patches)
weighted_representation = SequencePooling()(representation)
# Classify outputs.
logits = layers.Dense(num_classes)(weighted_representation)
# Create the Keras model.
model = keras.Model(inputs=inputs, outputs=logits)
return model
```
---
## Model training and evaluation
```python
def run_experiment(model):
optimizer = keras.optimizers.AdamW(learning_rate=0.001, weight_decay=0.0001)
model.compile(
optimizer=optimizer,
loss=keras.losses.CategoricalCrossentropy(
from_logits=True, label_smoothing=0.1
),
metrics=[
keras.metrics.CategoricalAccuracy(name="accuracy"),
keras.metrics.TopKCategoricalAccuracy(5, name="top-5-accuracy"),
],
)
checkpoint_filepath = "/tmp/checkpoint.weights.h5"
checkpoint_callback = keras.callbacks.ModelCheckpoint(
checkpoint_filepath,
monitor="val_accuracy",
save_best_only=True,
save_weights_only=True,
)
history = model.fit(
x=x_train,
y=y_train,
batch_size=batch_size,
epochs=num_epochs,
validation_split=0.1,
callbacks=[checkpoint_callback],
)
model.load_weights(checkpoint_filepath)
_, accuracy, top_5_accuracy = model.evaluate(x_test, y_test)
print(f"Test accuracy: {round(accuracy * 100, 2)}%")
print(f"Test top 5 accuracy: {round(top_5_accuracy * 100, 2)}%")
return history
cct_model = create_cct_model()
history = run_experiment(cct_model)
```
<div class="k-default-codeblock">
```
Epoch 1/30
352/352 ━━━━━━━━━━━━━━━━━━━━ 90s 248ms/step - accuracy: 0.2578 - loss: 2.0882 - top-5-accuracy: 0.7553 - val_accuracy: 0.4438 - val_loss: 1.6872 - val_top-5-accuracy: 0.9046
Epoch 2/30
352/352 ━━━━━━━━━━━━━━━━━━━━ 91s 258ms/step - accuracy: 0.4779 - loss: 1.6074 - top-5-accuracy: 0.9261 - val_accuracy: 0.5730 - val_loss: 1.4462 - val_top-5-accuracy: 0.9562
Epoch 3/30
352/352 ━━━━━━━━━━━━━━━━━━━━ 92s 260ms/step - accuracy: 0.5655 - loss: 1.4371 - top-5-accuracy: 0.9501 - val_accuracy: 0.6178 - val_loss: 1.3458 - val_top-5-accuracy: 0.9626
Epoch 4/30
352/352 ━━━━━━━━━━━━━━━━━━━━ 92s 261ms/step - accuracy: 0.6166 - loss: 1.3343 - top-5-accuracy: 0.9613 - val_accuracy: 0.6610 - val_loss: 1.2695 - val_top-5-accuracy: 0.9706
Epoch 5/30
352/352 ━━━━━━━━━━━━━━━━━━━━ 92s 261ms/step - accuracy: 0.6468 - loss: 1.2814 - top-5-accuracy: 0.9672 - val_accuracy: 0.6834 - val_loss: 1.2231 - val_top-5-accuracy: 0.9716
Epoch 6/30
352/352 ━━━━━━━━━━━━━━━━━━━━ 92s 261ms/step - accuracy: 0.6619 - loss: 1.2412 - top-5-accuracy: 0.9708 - val_accuracy: 0.6842 - val_loss: 1.2018 - val_top-5-accuracy: 0.9744
Epoch 7/30
352/352 ━━━━━━━━━━━━━━━━━━━━ 93s 263ms/step - accuracy: 0.6976 - loss: 1.1775 - top-5-accuracy: 0.9752 - val_accuracy: 0.6988 - val_loss: 1.1988 - val_top-5-accuracy: 0.9752
Epoch 8/30
352/352 ━━━━━━━━━━━━━━━━━━━━ 93s 263ms/step - accuracy: 0.7070 - loss: 1.1579 - top-5-accuracy: 0.9774 - val_accuracy: 0.7010 - val_loss: 1.1780 - val_top-5-accuracy: 0.9732
Epoch 9/30
352/352 ━━━━━━━━━━━━━━━━━━━━ 95s 269ms/step - accuracy: 0.7219 - loss: 1.1255 - top-5-accuracy: 0.9795 - val_accuracy: 0.7166 - val_loss: 1.1375 - val_top-5-accuracy: 0.9784
Epoch 10/30
352/352 ━━━━━━━━━━━━━━━━━━━━ 93s 264ms/step - accuracy: 0.7273 - loss: 1.1087 - top-5-accuracy: 0.9801 - val_accuracy: 0.7258 - val_loss: 1.1286 - val_top-5-accuracy: 0.9814
Epoch 11/30
352/352 ━━━━━━━━━━━━━━━━━━━━ 93s 265ms/step - accuracy: 0.7361 - loss: 1.0863 - top-5-accuracy: 0.9828 - val_accuracy: 0.7222 - val_loss: 1.1412 - val_top-5-accuracy: 0.9766
Epoch 12/30
352/352 ━━━━━━━━━━━━━━━━━━━━ 93s 264ms/step - accuracy: 0.7504 - loss: 1.0644 - top-5-accuracy: 0.9834 - val_accuracy: 0.7418 - val_loss: 1.0943 - val_top-5-accuracy: 0.9812
Epoch 13/30
352/352 ━━━━━━━━━━━━━━━━━━━━ 94s 266ms/step - accuracy: 0.7593 - loss: 1.0422 - top-5-accuracy: 0.9856 - val_accuracy: 0.7468 - val_loss: 1.0834 - val_top-5-accuracy: 0.9818
Epoch 14/30
352/352 ━━━━━━━━━━━━━━━━━━━━ 93s 265ms/step - accuracy: 0.7647 - loss: 1.0307 - top-5-accuracy: 0.9868 - val_accuracy: 0.7526 - val_loss: 1.0863 - val_top-5-accuracy: 0.9822
Epoch 15/30
352/352 ━━━━━━━━━━━━━━━━━━━━ 93s 263ms/step - accuracy: 0.7684 - loss: 1.0231 - top-5-accuracy: 0.9863 - val_accuracy: 0.7666 - val_loss: 1.0454 - val_top-5-accuracy: 0.9834
Epoch 16/30
352/352 ━━━━━━━━━━━━━━━━━━━━ 94s 268ms/step - accuracy: 0.7809 - loss: 1.0007 - top-5-accuracy: 0.9859 - val_accuracy: 0.7670 - val_loss: 1.0469 - val_top-5-accuracy: 0.9838
Epoch 17/30
352/352 ━━━━━━━━━━━━━━━━━━━━ 94s 268ms/step - accuracy: 0.7902 - loss: 0.9795 - top-5-accuracy: 0.9895 - val_accuracy: 0.7676 - val_loss: 1.0396 - val_top-5-accuracy: 0.9836
Epoch 18/30
352/352 ━━━━━━━━━━━━━━━━━━━━ 106s 301ms/step - accuracy: 0.7920 - loss: 0.9693 - top-5-accuracy: 0.9889 - val_accuracy: 0.7616 - val_loss: 1.0791 - val_top-5-accuracy: 0.9828
Epoch 19/30
352/352 ━━━━━━━━━━━━━━━━━━━━ 93s 264ms/step - accuracy: 0.7965 - loss: 0.9631 - top-5-accuracy: 0.9893 - val_accuracy: 0.7850 - val_loss: 1.0149 - val_top-5-accuracy: 0.9842
Epoch 20/30
352/352 ━━━━━━━━━━━━━━━━━━━━ 93s 265ms/step - accuracy: 0.8030 - loss: 0.9529 - top-5-accuracy: 0.9899 - val_accuracy: 0.7898 - val_loss: 1.0029 - val_top-5-accuracy: 0.9852
Epoch 21/30
352/352 ━━━━━━━━━━━━━━━━━━━━ 92s 261ms/step - accuracy: 0.8118 - loss: 0.9322 - top-5-accuracy: 0.9903 - val_accuracy: 0.7728 - val_loss: 1.0529 - val_top-5-accuracy: 0.9850
Epoch 22/30
352/352 ━━━━━━━━━━━━━━━━━━━━ 91s 259ms/step - accuracy: 0.8104 - loss: 0.9308 - top-5-accuracy: 0.9906 - val_accuracy: 0.7874 - val_loss: 1.0090 - val_top-5-accuracy: 0.9876
Epoch 23/30
352/352 ━━━━━━━━━━━━━━━━━━━━ 92s 263ms/step - accuracy: 0.8164 - loss: 0.9193 - top-5-accuracy: 0.9911 - val_accuracy: 0.7800 - val_loss: 1.0091 - val_top-5-accuracy: 0.9844
Epoch 24/30
352/352 ━━━━━━━━━━━━━━━━━━━━ 94s 268ms/step - accuracy: 0.8147 - loss: 0.9184 - top-5-accuracy: 0.9919 - val_accuracy: 0.7854 - val_loss: 1.0260 - val_top-5-accuracy: 0.9856
Epoch 25/30
352/352 ━━━━━━━━━━━━━━━━━━━━ 92s 262ms/step - accuracy: 0.8255 - loss: 0.9000 - top-5-accuracy: 0.9914 - val_accuracy: 0.7918 - val_loss: 1.0014 - val_top-5-accuracy: 0.9842
Epoch 26/30
352/352 ━━━━━━━━━━━━━━━━━━━━ 90s 257ms/step - accuracy: 0.8297 - loss: 0.8865 - top-5-accuracy: 0.9933 - val_accuracy: 0.7924 - val_loss: 1.0065 - val_top-5-accuracy: 0.9834
Epoch 27/30
352/352 ━━━━━━━━━━━━━━━━━━━━ 92s 262ms/step - accuracy: 0.8339 - loss: 0.8837 - top-5-accuracy: 0.9931 - val_accuracy: 0.7906 - val_loss: 1.0035 - val_top-5-accuracy: 0.9870
Epoch 28/30
352/352 ━━━━━━━━━━━━━━━━━━━━ 92s 260ms/step - accuracy: 0.8362 - loss: 0.8781 - top-5-accuracy: 0.9934 - val_accuracy: 0.7878 - val_loss: 1.0041 - val_top-5-accuracy: 0.9850
Epoch 29/30
352/352 ━━━━━━━━━━━━━━━━━━━━ 92s 260ms/step - accuracy: 0.8398 - loss: 0.8707 - top-5-accuracy: 0.9942 - val_accuracy: 0.7854 - val_loss: 1.0186 - val_top-5-accuracy: 0.9858
Epoch 30/30
352/352 ━━━━━━━━━━━━━━━━━━━━ 92s 263ms/step - accuracy: 0.8438 - loss: 0.8614 - top-5-accuracy: 0.9933 - val_accuracy: 0.7892 - val_loss: 1.0123 - val_top-5-accuracy: 0.9846
313/313 ━━━━━━━━━━━━━━━━━━━━ 14s 44ms/step - accuracy: 0.7752 - loss: 1.0370 - top-5-accuracy: 0.9824
Test accuracy: 77.82%
Test top 5 accuracy: 98.42%
```
</div>
Let's now visualize the training progress of the model.
```python
plt.plot(history.history["loss"], label="train_loss")
plt.plot(history.history["val_loss"], label="val_loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.title("Train and Validation Losses Over Epochs", fontsize=14)
plt.legend()
plt.grid()
plt.show()
```
![png](/img/examples/vision/cct/cct_25_0.png)
The CCT model we just trained has just **0.4 million** parameters, and it gets us to
~79% top-1 accuracy within 30 epochs. The plot above shows no signs of overfitting as
well. This means we can train this network for longer (perhaps with a bit more
regularization) and may obtain even better performance. This performance can further be
improved by additional recipes like cosine decay learning rate schedule, other data augmentation
techniques like [AutoAugment](https://arxiv.org/abs/1805.09501),
[MixUp](https://arxiv.org/abs/1710.09412) or
[Cutmix](https://arxiv.org/abs/1905.04899). With these modifications, the authors present
95.1% top-1 accuracy on the CIFAR-10 dataset. The authors also present a number of
experiments to study how the number of convolution blocks, Transformers layers, etc.
affect the final performance of CCTs.
For a comparison, a ViT model takes about **4.7 million** parameters and **100
epochs** of training to reach a top-1 accuracy of 78.22% on the CIFAR-10 dataset. You can
refer to
[this notebook](https://colab.research.google.com/gist/sayakpaul/1a80d9f582b044354a1a26c5cb3d69e5/image_classification_with_vision_transformer.ipynb)
to know about the experimental setup.
The authors also demonstrate the performance of Compact Convolutional Transformers on
NLP tasks and they report competitive results there.
| keras-io/examples/vision/md/cct.md/0 | {
"file_path": "keras-io/examples/vision/md/cct.md",
"repo_id": "keras-io",
"token_count": 8939
} | 118 |
# Handwriting recognition
**Authors:** [A_K_Nain](https://twitter.com/A_K_Nain), [Sayak Paul](https://twitter.com/RisingSayak)<br>
**Date created:** 2021/08/16<br>
**Last modified:** 2023/07/06<br>
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/vision/ipynb/handwriting_recognition.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/vision/handwriting_recognition.py)
**Description:** Training a handwriting recognition model with variable-length sequences.
---
## Introduction
This example shows how the [Captcha OCR](https://keras.io/examples/vision/captcha_ocr/)
example can be extended to the
[IAM Dataset](https://fki.tic.heia-fr.ch/databases/iam-handwriting-database),
which has variable length ground-truth targets. Each sample in the dataset is an image of some
handwritten text, and its corresponding target is the string present in the image.
The IAM Dataset is widely used across many OCR benchmarks, so we hope this example can serve as a
good starting point for building OCR systems.
---
## Data collection
```python
!wget -q https://github.com/sayakpaul/Handwriting-Recognizer-in-Keras/releases/download/v1.0.0/IAM_Words.zip
!unzip -qq IAM_Words.zip
!
!mkdir data
!mkdir data/words
!tar -xf IAM_Words/words.tgz -C data/words
!mv IAM_Words/words.txt data
```
Preview how the dataset is organized. Lines prepended by "#" are just metadata information.
```python
!head -20 data/words.txt
```
<div class="k-default-codeblock">
```
#--- words.txt ---------------------------------------------------------------#
#
# iam database word information
#
# format: a01-000u-00-00 ok 154 1 408 768 27 51 AT A
#
# a01-000u-00-00 -> word id for line 00 in form a01-000u
# ok -> result of word segmentation
# ok: word was correctly
# er: segmentation of word can be bad
#
# 154 -> graylevel to binarize the line containing this word
# 1 -> number of components for this word
# 408 768 27 51 -> bounding box around this word in x,y,w,h format
# AT -> the grammatical tag for this word, see the
# file tagset.txt for an explanation
# A -> the transcription for this word
#
a01-000u-00-00 ok 154 408 768 27 51 AT A
a01-000u-00-01 ok 154 507 766 213 48 NN MOVE
```
</div>
---
## Imports
```python
from tensorflow.keras.layers import StringLookup
from tensorflow import keras
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
import os
np.random.seed(42)
tf.random.set_seed(42)
```
---
## Dataset splitting
```python
base_path = "data"
words_list = []
words = open(f"{base_path}/words.txt", "r").readlines()
for line in words:
if line[0] == "#":
continue
if line.split(" ")[1] != "err": # We don't need to deal with errored entries.
words_list.append(line)
len(words_list)
np.random.shuffle(words_list)
```
We will split the dataset into three subsets with a 90:5:5 ratio (train:validation:test).
```python
split_idx = int(0.9 * len(words_list))
train_samples = words_list[:split_idx]
test_samples = words_list[split_idx:]
val_split_idx = int(0.5 * len(test_samples))
validation_samples = test_samples[:val_split_idx]
test_samples = test_samples[val_split_idx:]
assert len(words_list) == len(train_samples) + len(validation_samples) + len(
test_samples
)
print(f"Total training samples: {len(train_samples)}")
print(f"Total validation samples: {len(validation_samples)}")
print(f"Total test samples: {len(test_samples)}")
```
<div class="k-default-codeblock">
```
Total training samples: 86810
Total validation samples: 4823
Total test samples: 4823
```
</div>
---
## Data input pipeline
We start building our data input pipeline by first preparing the image paths.
```python
base_image_path = os.path.join(base_path, "words")
def get_image_paths_and_labels(samples):
paths = []
corrected_samples = []
for (i, file_line) in enumerate(samples):
line_split = file_line.strip()
line_split = line_split.split(" ")
# Each line split will have this format for the corresponding image:
# part1/part1-part2/part1-part2-part3.png
image_name = line_split[0]
partI = image_name.split("-")[0]
partII = image_name.split("-")[1]
img_path = os.path.join(
base_image_path, partI, partI + "-" + partII, image_name + ".png"
)
if os.path.getsize(img_path):
paths.append(img_path)
corrected_samples.append(file_line.split("\n")[0])
return paths, corrected_samples
train_img_paths, train_labels = get_image_paths_and_labels(train_samples)
validation_img_paths, validation_labels = get_image_paths_and_labels(validation_samples)
test_img_paths, test_labels = get_image_paths_and_labels(test_samples)
```
Then we prepare the ground-truth labels.
```python
# Find maximum length and the size of the vocabulary in the training data.
train_labels_cleaned = []
characters = set()
max_len = 0
for label in train_labels:
label = label.split(" ")[-1].strip()
for char in label:
characters.add(char)
max_len = max(max_len, len(label))
train_labels_cleaned.append(label)
characters = sorted(list(characters))
print("Maximum length: ", max_len)
print("Vocab size: ", len(characters))
# Check some label samples.
train_labels_cleaned[:10]
```
<div class="k-default-codeblock">
```
Maximum length: 21
Vocab size: 78
['sure',
'he',
'during',
'of',
'booty',
'gastronomy',
'boy',
'The',
'and',
'in']
```
</div>
Now we clean the validation and the test labels as well.
```python
def clean_labels(labels):
cleaned_labels = []
for label in labels:
label = label.split(" ")[-1].strip()
cleaned_labels.append(label)
return cleaned_labels
validation_labels_cleaned = clean_labels(validation_labels)
test_labels_cleaned = clean_labels(test_labels)
```
### Building the character vocabulary
Keras provides different preprocessing layers to deal with different modalities of data.
[This guide](https://keras.io/api/layers/preprocessing_layers/) provides a comprehensive introduction.
Our example involves preprocessing labels at the character
level. This means that if there are two labels, e.g. "cat" and "dog", then our character
vocabulary should be {a, c, d, g, o, t} (without any special tokens). We use the
[`StringLookup`](https://keras.io/api/layers/preprocessing_layers/categorical/string_lookup/)
layer for this purpose.
```python
AUTOTUNE = tf.data.AUTOTUNE
# Mapping characters to integers.
char_to_num = StringLookup(vocabulary=list(characters), mask_token=None)
# Mapping integers back to original characters.
num_to_char = StringLookup(
vocabulary=char_to_num.get_vocabulary(), mask_token=None, invert=True
)
```
### Resizing images without distortion
Instead of square images, many OCR models work with rectangular images. This will become
clearer in a moment when we will visualize a few samples from the dataset. While
aspect-unaware resizing square images does not introduce a significant amount of
distortion this is not the case for rectangular images. But resizing images to a uniform
size is a requirement for mini-batching. So we need to perform our resizing such that
the following criteria are met:
* Aspect ratio is preserved.
* Content of the images is not affected.
```python
def distortion_free_resize(image, img_size):
w, h = img_size
image = tf.image.resize(image, size=(h, w), preserve_aspect_ratio=True)
# Check tha amount of padding needed to be done.
pad_height = h - tf.shape(image)[0]
pad_width = w - tf.shape(image)[1]
# Only necessary if you want to do same amount of padding on both sides.
if pad_height % 2 != 0:
height = pad_height // 2
pad_height_top = height + 1
pad_height_bottom = height
else:
pad_height_top = pad_height_bottom = pad_height // 2
if pad_width % 2 != 0:
width = pad_width // 2
pad_width_left = width + 1
pad_width_right = width
else:
pad_width_left = pad_width_right = pad_width // 2
image = tf.pad(
image,
paddings=[
[pad_height_top, pad_height_bottom],
[pad_width_left, pad_width_right],
[0, 0],
],
)
image = tf.transpose(image, perm=[1, 0, 2])
image = tf.image.flip_left_right(image)
return image
```
If we just go with the plain resizing then the images would look like so:
![](https://i.imgur.com/eqq3s4N.png)
Notice how this resizing would have introduced unnecessary stretching.
### Putting the utilities together
```python
batch_size = 64
padding_token = 99
image_width = 128
image_height = 32
def preprocess_image(image_path, img_size=(image_width, image_height)):
image = tf.io.read_file(image_path)
image = tf.image.decode_png(image, 1)
image = distortion_free_resize(image, img_size)
image = tf.cast(image, tf.float32) / 255.0
return image
def vectorize_label(label):
label = char_to_num(tf.strings.unicode_split(label, input_encoding="UTF-8"))
length = tf.shape(label)[0]
pad_amount = max_len - length
label = tf.pad(label, paddings=[[0, pad_amount]], constant_values=padding_token)
return label
def process_images_labels(image_path, label):
image = preprocess_image(image_path)
label = vectorize_label(label)
return {"image": image, "label": label}
def prepare_dataset(image_paths, labels):
dataset = tf.data.Dataset.from_tensor_slices((image_paths, labels)).map(
process_images_labels, num_parallel_calls=AUTOTUNE
)
return dataset.batch(batch_size).cache().prefetch(AUTOTUNE)
```
---
## Prepare `tf.data.Dataset` objects
```python
train_ds = prepare_dataset(train_img_paths, train_labels_cleaned)
validation_ds = prepare_dataset(validation_img_paths, validation_labels_cleaned)
test_ds = prepare_dataset(test_img_paths, test_labels_cleaned)
```
---
## Visualize a few samples
```python
for data in train_ds.take(1):
images, labels = data["image"], data["label"]
_, ax = plt.subplots(4, 4, figsize=(15, 8))
for i in range(16):
img = images[i]
img = tf.image.flip_left_right(img)
img = tf.transpose(img, perm=[1, 0, 2])
img = (img * 255.0).numpy().clip(0, 255).astype(np.uint8)
img = img[:, :, 0]
# Gather indices where label!= padding_token.
label = labels[i]
indices = tf.gather(label, tf.where(tf.math.not_equal(label, padding_token)))
# Convert to string.
label = tf.strings.reduce_join(num_to_char(indices))
label = label.numpy().decode("utf-8")
ax[i // 4, i % 4].imshow(img, cmap="gray")
ax[i // 4, i % 4].set_title(label)
ax[i // 4, i % 4].axis("off")
plt.show()
```
![png](/img/examples/vision/handwriting_recognition/handwriting_recognition_28_0.png)
You will notice that the content of original image is kept as faithful as possible and has
been padded accordingly.
---
## Model
Our model will use the CTC loss as an endpoint layer. For a detailed understanding of the
CTC loss, refer to [this post](https://distill.pub/2017/ctc/).
```python
class CTCLayer(keras.layers.Layer):
def __init__(self, name=None):
super().__init__(name=name)
self.loss_fn = keras.backend.ctc_batch_cost
def call(self, y_true, y_pred):
batch_len = tf.cast(tf.shape(y_true)[0], dtype="int64")
input_length = tf.cast(tf.shape(y_pred)[1], dtype="int64")
label_length = tf.cast(tf.shape(y_true)[1], dtype="int64")
input_length = input_length * tf.ones(shape=(batch_len, 1), dtype="int64")
label_length = label_length * tf.ones(shape=(batch_len, 1), dtype="int64")
loss = self.loss_fn(y_true, y_pred, input_length, label_length)
self.add_loss(loss)
# At test time, just return the computed predictions.
return y_pred
def build_model():
# Inputs to the model
input_img = keras.Input(shape=(image_width, image_height, 1), name="image")
labels = keras.layers.Input(name="label", shape=(None,))
# First conv block.
x = keras.layers.Conv2D(
32,
(3, 3),
activation="relu",
kernel_initializer="he_normal",
padding="same",
name="Conv1",
)(input_img)
x = keras.layers.MaxPooling2D((2, 2), name="pool1")(x)
# Second conv block.
x = keras.layers.Conv2D(
64,
(3, 3),
activation="relu",
kernel_initializer="he_normal",
padding="same",
name="Conv2",
)(x)
x = keras.layers.MaxPooling2D((2, 2), name="pool2")(x)
# We have used two max pool with pool size and strides 2.
# Hence, downsampled feature maps are 4x smaller. The number of
# filters in the last layer is 64. Reshape accordingly before
# passing the output to the RNN part of the model.
new_shape = ((image_width // 4), (image_height // 4) * 64)
x = keras.layers.Reshape(target_shape=new_shape, name="reshape")(x)
x = keras.layers.Dense(64, activation="relu", name="dense1")(x)
x = keras.layers.Dropout(0.2)(x)
# RNNs.
x = keras.layers.Bidirectional(
keras.layers.LSTM(128, return_sequences=True, dropout=0.25)
)(x)
x = keras.layers.Bidirectional(
keras.layers.LSTM(64, return_sequences=True, dropout=0.25)
)(x)
# +2 is to account for the two special tokens introduced by the CTC loss.
# The recommendation comes here: https://git.io/J0eXP.
x = keras.layers.Dense(
len(char_to_num.get_vocabulary()) + 2, activation="softmax", name="dense2"
)(x)
# Add CTC layer for calculating CTC loss at each step.
output = CTCLayer(name="ctc_loss")(labels, x)
# Define the model.
model = keras.models.Model(
inputs=[input_img, labels], outputs=output, name="handwriting_recognizer"
)
# Optimizer.
opt = keras.optimizers.Adam()
# Compile the model and return.
model.compile(optimizer=opt)
return model
# Get the model.
model = build_model()
model.summary()
```
<div class="k-default-codeblock">
```
Model: "handwriting_recognizer"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
image (InputLayer) [(None, 128, 32, 1)] 0
__________________________________________________________________________________________________
Conv1 (Conv2D) (None, 128, 32, 32) 320 image[0][0]
__________________________________________________________________________________________________
pool1 (MaxPooling2D) (None, 64, 16, 32) 0 Conv1[0][0]
__________________________________________________________________________________________________
Conv2 (Conv2D) (None, 64, 16, 64) 18496 pool1[0][0]
__________________________________________________________________________________________________
pool2 (MaxPooling2D) (None, 32, 8, 64) 0 Conv2[0][0]
__________________________________________________________________________________________________
reshape (Reshape) (None, 32, 512) 0 pool2[0][0]
__________________________________________________________________________________________________
dense1 (Dense) (None, 32, 64) 32832 reshape[0][0]
__________________________________________________________________________________________________
dropout (Dropout) (None, 32, 64) 0 dense1[0][0]
__________________________________________________________________________________________________
bidirectional (Bidirectional) (None, 32, 256) 197632 dropout[0][0]
__________________________________________________________________________________________________
bidirectional_1 (Bidirectional) (None, 32, 128) 164352 bidirectional[0][0]
__________________________________________________________________________________________________
label (InputLayer) [(None, None)] 0
__________________________________________________________________________________________________
dense2 (Dense) (None, 32, 81) 10449 bidirectional_1[0][0]
__________________________________________________________________________________________________
ctc_loss (CTCLayer) (None, 32, 81) 0 label[0][0]
dense2[0][0]
==================================================================================================
Total params: 424,081
Trainable params: 424,081
Non-trainable params: 0
__________________________________________________________________________________________________
```
</div>
---
## Evaluation metric
[Edit Distance](https://en.wikipedia.org/wiki/Edit_distance)
is the most widely used metric for evaluating OCR models. In this section, we will
implement it and use it as a callback to monitor our model.
We first segregate the validation images and their labels for convenience.
```python
validation_images = []
validation_labels = []
for batch in validation_ds:
validation_images.append(batch["image"])
validation_labels.append(batch["label"])
```
Now, we create a callback to monitor the edit distances.
```python
def calculate_edit_distance(labels, predictions):
# Get a single batch and convert its labels to sparse tensors.
saprse_labels = tf.cast(tf.sparse.from_dense(labels), dtype=tf.int64)
# Make predictions and convert them to sparse tensors.
input_len = np.ones(predictions.shape[0]) * predictions.shape[1]
predictions_decoded = keras.backend.ctc_decode(
predictions, input_length=input_len, greedy=True
)[0][0][:, :max_len]
sparse_predictions = tf.cast(
tf.sparse.from_dense(predictions_decoded), dtype=tf.int64
)
# Compute individual edit distances and average them out.
edit_distances = tf.edit_distance(
sparse_predictions, saprse_labels, normalize=False
)
return tf.reduce_mean(edit_distances)
class EditDistanceCallback(keras.callbacks.Callback):
def __init__(self, pred_model):
super().__init__()
self.prediction_model = pred_model
def on_epoch_end(self, epoch, logs=None):
edit_distances = []
for i in range(len(validation_images)):
labels = validation_labels[i]
predictions = self.prediction_model.predict(validation_images[i])
edit_distances.append(calculate_edit_distance(labels, predictions).numpy())
print(
f"Mean edit distance for epoch {epoch + 1}: {np.mean(edit_distances):.4f}"
)
```
---
## Training
Now we are ready to kick off model training.
```python
epochs = 10 # To get good results this should be at least 50.
model = build_model()
prediction_model = keras.models.Model(
model.get_layer(name="image").input, model.get_layer(name="dense2").output
)
edit_distance_callback = EditDistanceCallback(prediction_model)
# Train the model.
history = model.fit(
train_ds,
validation_data=validation_ds,
epochs=epochs,
callbacks=[edit_distance_callback],
)
```
<div class="k-default-codeblock">
```
Epoch 1/10
1357/1357 [==============================] - 89s 51ms/step - loss: 13.6670 - val_loss: 11.8041
Mean edit distance for epoch 1: 20.5117
Epoch 2/10
1357/1357 [==============================] - 48s 36ms/step - loss: 10.6864 - val_loss: 9.6994
Mean edit distance for epoch 2: 20.1167
Epoch 3/10
1357/1357 [==============================] - 48s 35ms/step - loss: 9.0437 - val_loss: 8.0355
Mean edit distance for epoch 3: 19.7270
Epoch 4/10
1357/1357 [==============================] - 48s 35ms/step - loss: 7.6098 - val_loss: 6.4239
Mean edit distance for epoch 4: 19.1106
Epoch 5/10
1357/1357 [==============================] - 48s 35ms/step - loss: 6.3194 - val_loss: 4.9814
Mean edit distance for epoch 5: 18.4894
Epoch 6/10
1357/1357 [==============================] - 48s 35ms/step - loss: 5.3417 - val_loss: 4.1307
Mean edit distance for epoch 6: 18.1909
Epoch 7/10
1357/1357 [==============================] - 48s 35ms/step - loss: 4.6396 - val_loss: 3.7706
Mean edit distance for epoch 7: 18.1224
Epoch 8/10
1357/1357 [==============================] - 48s 35ms/step - loss: 4.1926 - val_loss: 3.3682
Mean edit distance for epoch 8: 17.9387
Epoch 9/10
1357/1357 [==============================] - 48s 36ms/step - loss: 3.8532 - val_loss: 3.1829
Mean edit distance for epoch 9: 17.9074
Epoch 10/10
1357/1357 [==============================] - 49s 36ms/step - loss: 3.5769 - val_loss: 2.9221
Mean edit distance for epoch 10: 17.7960
```
</div>
---
## Inference
```python
# A utility function to decode the output of the network.
def decode_batch_predictions(pred):
input_len = np.ones(pred.shape[0]) * pred.shape[1]
# Use greedy search. For complex tasks, you can use beam search.
results = keras.backend.ctc_decode(pred, input_length=input_len, greedy=True)[0][0][
:, :max_len
]
# Iterate over the results and get back the text.
output_text = []
for res in results:
res = tf.gather(res, tf.where(tf.math.not_equal(res, -1)))
res = tf.strings.reduce_join(num_to_char(res)).numpy().decode("utf-8")
output_text.append(res)
return output_text
# Let's check results on some test samples.
for batch in test_ds.take(1):
batch_images = batch["image"]
_, ax = plt.subplots(4, 4, figsize=(15, 8))
preds = prediction_model.predict(batch_images)
pred_texts = decode_batch_predictions(preds)
for i in range(16):
img = batch_images[i]
img = tf.image.flip_left_right(img)
img = tf.transpose(img, perm=[1, 0, 2])
img = (img * 255.0).numpy().clip(0, 255).astype(np.uint8)
img = img[:, :, 0]
title = f"Prediction: {pred_texts[i]}"
ax[i // 4, i % 4].imshow(img, cmap="gray")
ax[i // 4, i % 4].set_title(title)
ax[i // 4, i % 4].axis("off")
plt.show()
```
![png](/img/examples/vision/handwriting_recognition/handwriting_recognition_40_0.png)
To get better results the model should be trained for at least 50 epochs.
---
## Final remarks
* The `prediction_model` is fully compatible with TensorFlow Lite. If you are interested,
you can use it inside a mobile application. You may find
[this notebook](https://github.com/tulasiram58827/ocr_tflite/blob/main/colabs/captcha_ocr_tflite.ipynb)
to be useful in this regard.
* Not all the training examples are perfectly aligned as observed in this example. This
can hurt model performance for complex sequences. To this end, we can leverage
Spatial Transformer Networks ([Jaderberg et al.](https://arxiv.org/abs/1506.02025))
that can help the model learn affine transformations that maximize its performance.
| keras-io/examples/vision/md/handwriting_recognition.md/0 | {
"file_path": "keras-io/examples/vision/md/handwriting_recognition.md",
"repo_id": "keras-io",
"token_count": 9127
} | 119 |
# Image classification with modern MLP models
**Author:** [Khalid Salama](https://www.linkedin.com/in/khalid-salama-24403144/)<br>
**Date created:** 2021/05/30<br>
**Last modified:** 2023/08/03<br>
**Description:** Implementing the MLP-Mixer, FNet, and gMLP models for CIFAR-100 image classification.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/vision/ipynb/mlp_image_classification.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/vision/mlp_image_classification.py)
---
## Introduction
This example implements three modern attention-free, multi-layer perceptron (MLP) based models for image
classification, demonstrated on the CIFAR-100 dataset:
1. The [MLP-Mixer](https://arxiv.org/abs/2105.01601) model, by Ilya Tolstikhin et al., based on two types of MLPs.
3. The [FNet](https://arxiv.org/abs/2105.03824) model, by James Lee-Thorp et al., based on unparameterized
Fourier Transform.
2. The [gMLP](https://arxiv.org/abs/2105.08050) model, by Hanxiao Liu et al., based on MLP with gating.
The purpose of the example is not to compare between these models, as they might perform differently on
different datasets with well-tuned hyperparameters. Rather, it is to show simple implementations of their
main building blocks.
---
## Setup
```python
import numpy as np
import keras
from keras import layers
```
---
## Prepare the data
```python
num_classes = 100
input_shape = (32, 32, 3)
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar100.load_data()
print(f"x_train shape: {x_train.shape} - y_train shape: {y_train.shape}")
print(f"x_test shape: {x_test.shape} - y_test shape: {y_test.shape}")
```
<div class="k-default-codeblock">
```
x_train shape: (50000, 32, 32, 3) - y_train shape: (50000, 1)
x_test shape: (10000, 32, 32, 3) - y_test shape: (10000, 1)
```
</div>
---
## Configure the hyperparameters
```python
weight_decay = 0.0001
batch_size = 128
num_epochs = 1 # Recommended num_epochs = 50
dropout_rate = 0.2
image_size = 64 # We'll resize input images to this size.
patch_size = 8 # Size of the patches to be extracted from the input images.
num_patches = (image_size // patch_size) ** 2 # Size of the data array.
embedding_dim = 256 # Number of hidden units.
num_blocks = 4 # Number of blocks.
print(f"Image size: {image_size} X {image_size} = {image_size ** 2}")
print(f"Patch size: {patch_size} X {patch_size} = {patch_size ** 2} ")
print(f"Patches per image: {num_patches}")
print(f"Elements per patch (3 channels): {(patch_size ** 2) * 3}")
```
<div class="k-default-codeblock">
```
Image size: 64 X 64 = 4096
Patch size: 8 X 8 = 64
Patches per image: 64
Elements per patch (3 channels): 192
```
</div>
---
## Build a classification model
We implement a method that builds a classifier given the processing blocks.
```python
def build_classifier(blocks, positional_encoding=False):
inputs = layers.Input(shape=input_shape)
# Augment data.
augmented = data_augmentation(inputs)
# Create patches.
patches = Patches(patch_size)(augmented)
# Encode patches to generate a [batch_size, num_patches, embedding_dim] tensor.
x = layers.Dense(units=embedding_dim)(patches)
if positional_encoding:
x = x + PositionEmbedding(sequence_length=num_patches)(x)
# Process x using the module blocks.
x = blocks(x)
# Apply global average pooling to generate a [batch_size, embedding_dim] representation tensor.
representation = layers.GlobalAveragePooling1D()(x)
# Apply dropout.
representation = layers.Dropout(rate=dropout_rate)(representation)
# Compute logits outputs.
logits = layers.Dense(num_classes)(representation)
# Create the Keras model.
return keras.Model(inputs=inputs, outputs=logits)
```
---
## Define an experiment
We implement a utility function to compile, train, and evaluate a given model.
```python
def run_experiment(model):
# Create Adam optimizer with weight decay.
optimizer = keras.optimizers.AdamW(
learning_rate=learning_rate,
weight_decay=weight_decay,
)
# Compile the model.
model.compile(
optimizer=optimizer,
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[
keras.metrics.SparseCategoricalAccuracy(name="acc"),
keras.metrics.SparseTopKCategoricalAccuracy(5, name="top5-acc"),
],
)
# Create a learning rate scheduler callback.
reduce_lr = keras.callbacks.ReduceLROnPlateau(
monitor="val_loss", factor=0.5, patience=5
)
# Create an early stopping callback.
early_stopping = keras.callbacks.EarlyStopping(
monitor="val_loss", patience=10, restore_best_weights=True
)
# Fit the model.
history = model.fit(
x=x_train,
y=y_train,
batch_size=batch_size,
epochs=num_epochs,
validation_split=0.1,
callbacks=[early_stopping, reduce_lr],
verbose=0,
)
_, accuracy, top_5_accuracy = model.evaluate(x_test, y_test)
print(f"Test accuracy: {round(accuracy * 100, 2)}%")
print(f"Test top 5 accuracy: {round(top_5_accuracy * 100, 2)}%")
# Return history to plot learning curves.
return history
```
---
## Use data augmentation
```python
data_augmentation = keras.Sequential(
[
layers.Normalization(),
layers.Resizing(image_size, image_size),
layers.RandomFlip("horizontal"),
layers.RandomZoom(height_factor=0.2, width_factor=0.2),
],
name="data_augmentation",
)
# Compute the mean and the variance of the training data for normalization.
data_augmentation.layers[0].adapt(x_train)
```
---
## Implement patch extraction as a layer
```python
class Patches(layers.Layer):
def __init__(self, patch_size, **kwargs):
super().__init__(**kwargs)
self.patch_size = patch_size
def call(self, x):
patches = keras.ops.image.extract_patches(x, self.patch_size)
batch_size = keras.ops.shape(patches)[0]
num_patches = keras.ops.shape(patches)[1] * keras.ops.shape(patches)[2]
patch_dim = keras.ops.shape(patches)[3]
out = keras.ops.reshape(patches, (batch_size, num_patches, patch_dim))
return out
```
---
## Implement position embedding as a layer
```python
class PositionEmbedding(keras.layers.Layer):
def __init__(
self,
sequence_length,
initializer="glorot_uniform",
**kwargs,
):
super().__init__(**kwargs)
if sequence_length is None:
raise ValueError("`sequence_length` must be an Integer, received `None`.")
self.sequence_length = int(sequence_length)
self.initializer = keras.initializers.get(initializer)
def get_config(self):
config = super().get_config()
config.update(
{
"sequence_length": self.sequence_length,
"initializer": keras.initializers.serialize(self.initializer),
}
)
return config
def build(self, input_shape):
feature_size = input_shape[-1]
self.position_embeddings = self.add_weight(
name="embeddings",
shape=[self.sequence_length, feature_size],
initializer=self.initializer,
trainable=True,
)
super().build(input_shape)
def call(self, inputs, start_index=0):
shape = keras.ops.shape(inputs)
feature_length = shape[-1]
sequence_length = shape[-2]
# trim to match the length of the input sequence, which might be less
# than the sequence_length of the layer.
position_embeddings = keras.ops.convert_to_tensor(self.position_embeddings)
position_embeddings = keras.ops.slice(
position_embeddings,
(start_index, 0),
(sequence_length, feature_length),
)
return keras.ops.broadcast_to(position_embeddings, shape)
def compute_output_shape(self, input_shape):
return input_shape
```
---
## The MLP-Mixer model
The MLP-Mixer is an architecture based exclusively on
multi-layer perceptrons (MLPs), that contains two types of MLP layers:
1. One applied independently to image patches, which mixes the per-location features.
2. The other applied across patches (along channels), which mixes spatial information.
This is similar to a [depthwise separable convolution based model](https://arxiv.org/abs/1610.02357)
such as the Xception model, but with two chained dense transforms, no max pooling, and layer normalization
instead of batch normalization.
### Implement the MLP-Mixer module
```python
class MLPMixerLayer(layers.Layer):
def __init__(self, num_patches, hidden_units, dropout_rate, *args, **kwargs):
super().__init__(*args, **kwargs)
self.mlp1 = keras.Sequential(
[
layers.Dense(units=num_patches, activation="gelu"),
layers.Dense(units=num_patches),
layers.Dropout(rate=dropout_rate),
]
)
self.mlp2 = keras.Sequential(
[
layers.Dense(units=num_patches, activation="gelu"),
layers.Dense(units=hidden_units),
layers.Dropout(rate=dropout_rate),
]
)
self.normalize = layers.LayerNormalization(epsilon=1e-6)
def build(self, input_shape):
return super().build(input_shape)
def call(self, inputs):
# Apply layer normalization.
x = self.normalize(inputs)
# Transpose inputs from [num_batches, num_patches, hidden_units] to [num_batches, hidden_units, num_patches].
x_channels = keras.ops.transpose(x, axes=(0, 2, 1))
# Apply mlp1 on each channel independently.
mlp1_outputs = self.mlp1(x_channels)
# Transpose mlp1_outputs from [num_batches, hidden_dim, num_patches] to [num_batches, num_patches, hidden_units].
mlp1_outputs = keras.ops.transpose(mlp1_outputs, axes=(0, 2, 1))
# Add skip connection.
x = mlp1_outputs + inputs
# Apply layer normalization.
x_patches = self.normalize(x)
# Apply mlp2 on each patch independtenly.
mlp2_outputs = self.mlp2(x_patches)
# Add skip connection.
x = x + mlp2_outputs
return x
```
### Build, train, and evaluate the MLP-Mixer model
Note that training the model with the current settings on a V100 GPUs
takes around 8 seconds per epoch.
```python
mlpmixer_blocks = keras.Sequential(
[MLPMixerLayer(num_patches, embedding_dim, dropout_rate) for _ in range(num_blocks)]
)
learning_rate = 0.005
mlpmixer_classifier = build_classifier(mlpmixer_blocks)
history = run_experiment(mlpmixer_classifier)
```
<div class="k-default-codeblock">
```
Test accuracy: 9.76%
Test top 5 accuracy: 30.8%
```
</div>
The MLP-Mixer model tends to have much less number of parameters compared
to convolutional and transformer-based models, which leads to less training and
serving computational cost.
As mentioned in the [MLP-Mixer](https://arxiv.org/abs/2105.01601) paper,
when pre-trained on large datasets, or with modern regularization schemes,
the MLP-Mixer attains competitive scores to state-of-the-art models.
You can obtain better results by increasing the embedding dimensions,
increasing the number of mixer blocks, and training the model for longer.
You may also try to increase the size of the input images and use different patch sizes.
---
## The FNet model
The FNet uses a similar block to the Transformer block. However, FNet replaces the self-attention layer
in the Transformer block with a parameter-free 2D Fourier transformation layer:
1. One 1D Fourier Transform is applied along the patches.
2. One 1D Fourier Transform is applied along the channels.
### Implement the FNet module
```python
class FNetLayer(layers.Layer):
def __init__(self, embedding_dim, dropout_rate, *args, **kwargs):
super().__init__(*args, **kwargs)
self.ffn = keras.Sequential(
[
layers.Dense(units=embedding_dim, activation="gelu"),
layers.Dropout(rate=dropout_rate),
layers.Dense(units=embedding_dim),
]
)
self.normalize1 = layers.LayerNormalization(epsilon=1e-6)
self.normalize2 = layers.LayerNormalization(epsilon=1e-6)
def call(self, inputs):
# Apply fourier transformations.
real_part = inputs
im_part = keras.ops.zeros_like(inputs)
x = keras.ops.fft2((real_part, im_part))[0]
# Add skip connection.
x = x + inputs
# Apply layer normalization.
x = self.normalize1(x)
# Apply Feedfowrad network.
x_ffn = self.ffn(x)
# Add skip connection.
x = x + x_ffn
# Apply layer normalization.
return self.normalize2(x)
```
### Build, train, and evaluate the FNet model
Note that training the model with the current settings on a V100 GPUs
takes around 8 seconds per epoch.
```python
fnet_blocks = keras.Sequential(
[FNetLayer(embedding_dim, dropout_rate) for _ in range(num_blocks)]
)
learning_rate = 0.001
fnet_classifier = build_classifier(fnet_blocks, positional_encoding=True)
history = run_experiment(fnet_classifier)
```
<div class="k-default-codeblock">
```
Test accuracy: 13.82%
Test top 5 accuracy: 36.15%
```
</div>
As shown in the [FNet](https://arxiv.org/abs/2105.03824) paper,
better results can be achieved by increasing the embedding dimensions,
increasing the number of FNet blocks, and training the model for longer.
You may also try to increase the size of the input images and use different patch sizes.
The FNet scales very efficiently to long inputs, runs much faster than attention-based
Transformer models, and produces competitive accuracy results.
---
## The gMLP model
The gMLP is a MLP architecture that features a Spatial Gating Unit (SGU).
The SGU enables cross-patch interactions across the spatial (channel) dimension, by:
1. Transforming the input spatially by applying linear projection across patches (along channels).
2. Applying element-wise multiplication of the input and its spatial transformation.
### Implement the gMLP module
```python
class gMLPLayer(layers.Layer):
def __init__(self, num_patches, embedding_dim, dropout_rate, *args, **kwargs):
super().__init__(*args, **kwargs)
self.channel_projection1 = keras.Sequential(
[
layers.Dense(units=embedding_dim * 2, activation="gelu"),
layers.Dropout(rate=dropout_rate),
]
)
self.channel_projection2 = layers.Dense(units=embedding_dim)
self.spatial_projection = layers.Dense(
units=num_patches, bias_initializer="Ones"
)
self.normalize1 = layers.LayerNormalization(epsilon=1e-6)
self.normalize2 = layers.LayerNormalization(epsilon=1e-6)
def spatial_gating_unit(self, x):
# Split x along the channel dimensions.
# Tensors u and v will in the shape of [batch_size, num_patchs, embedding_dim].
u, v = keras.ops.split(x, indices_or_sections=2, axis=2)
# Apply layer normalization.
v = self.normalize2(v)
# Apply spatial projection.
v_channels = keras.ops.transpose(v, axes=(0, 2, 1))
v_projected = self.spatial_projection(v_channels)
v_projected = keras.ops.transpose(v_projected, axes=(0, 2, 1))
# Apply element-wise multiplication.
return u * v_projected
def call(self, inputs):
# Apply layer normalization.
x = self.normalize1(inputs)
# Apply the first channel projection. x_projected shape: [batch_size, num_patches, embedding_dim * 2].
x_projected = self.channel_projection1(x)
# Apply the spatial gating unit. x_spatial shape: [batch_size, num_patches, embedding_dim].
x_spatial = self.spatial_gating_unit(x_projected)
# Apply the second channel projection. x_projected shape: [batch_size, num_patches, embedding_dim].
x_projected = self.channel_projection2(x_spatial)
# Add skip connection.
return x + x_projected
```
### Build, train, and evaluate the gMLP model
Note that training the model with the current settings on a V100 GPUs
takes around 9 seconds per epoch.
```python
gmlp_blocks = keras.Sequential(
[gMLPLayer(num_patches, embedding_dim, dropout_rate) for _ in range(num_blocks)]
)
learning_rate = 0.003
gmlp_classifier = build_classifier(gmlp_blocks)
history = run_experiment(gmlp_classifier)
```
<div class="k-default-codeblock">
```
Test accuracy: 17.05%
Test top 5 accuracy: 42.57%
```
</div>
As shown in the [gMLP](https://arxiv.org/abs/2105.08050) paper,
better results can be achieved by increasing the embedding dimensions,
increasing the number of gMLP blocks, and training the model for longer.
You may also try to increase the size of the input images and use different patch sizes.
Note that, the paper used advanced regularization strategies, such as MixUp and CutMix,
as well as AutoAugment.
| keras-io/examples/vision/md/mlp_image_classification.md/0 | {
"file_path": "keras-io/examples/vision/md/mlp_image_classification.md",
"repo_id": "keras-io",
"token_count": 6733
} | 120 |
# Object Detection with RetinaNet
**Author:** [Srihari Humbarwadi](https://twitter.com/srihari_rh)<br>
**Date created:** 2020/05/17<br>
**Last modified:** 2023/07/10<br>
**Description:** Implementing RetinaNet: Focal Loss for Dense Object Detection.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/vision/ipynb/retinanet.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/vision/retinanet.py)
---
## Introduction
Object detection a very important problem in computer
vision. Here the model is tasked with localizing the objects present in an
image, and at the same time, classifying them into different categories.
Object detection models can be broadly classified into "single-stage" and
"two-stage" detectors. Two-stage detectors are often more accurate but at the
cost of being slower. Here in this example, we will implement RetinaNet,
a popular single-stage detector, which is accurate and runs fast.
RetinaNet uses a feature pyramid network to efficiently detect objects at
multiple scales and introduces a new loss, the Focal loss function, to alleviate
the problem of the extreme foreground-background class imbalance.
**References:**
- [RetinaNet Paper](https://arxiv.org/abs/1708.02002)
- [Feature Pyramid Network Paper](https://arxiv.org/abs/1612.03144)
```python
import os
import re
import zipfile
import numpy as np
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import tensorflow_datasets as tfds
```
---
## Downloading the COCO2017 dataset
Training on the entire COCO2017 dataset which has around 118k images takes a
lot of time, hence we will be using a smaller subset of ~500 images for
training in this example.
```python
url = "https://github.com/srihari-humbarwadi/datasets/releases/download/v0.1.0/data.zip"
filename = os.path.join(os.getcwd(), "data.zip")
keras.utils.get_file(filename, url)
with zipfile.ZipFile("data.zip", "r") as z_fp:
z_fp.extractall("./")
```
<div class="k-default-codeblock">
```
Downloading data from https://github.com/srihari-humbarwadi/datasets/releases/download/v0.1.0/data.zip
560529408/560525318 [==============================] - 7s 0us/step
560537600/560525318 [==============================] - 7s 0us/step
```
</div>
---
## Implementing utility functions
Bounding boxes can be represented in multiple ways, the most common formats are:
- Storing the coordinates of the corners `[xmin, ymin, xmax, ymax]`
- Storing the coordinates of the center and the box dimensions
`[x, y, width, height]`
Since we require both formats, we will be implementing functions for converting
between the formats.
```python
def swap_xy(boxes):
"""Swaps order the of x and y coordinates of the boxes.
Arguments:
boxes: A tensor with shape `(num_boxes, 4)` representing bounding boxes.
Returns:
swapped boxes with shape same as that of boxes.
"""
return tf.stack([boxes[:, 1], boxes[:, 0], boxes[:, 3], boxes[:, 2]], axis=-1)
def convert_to_xywh(boxes):
"""Changes the box format to center, width and height.
Arguments:
boxes: A tensor of rank 2 or higher with a shape of `(..., num_boxes, 4)`
representing bounding boxes where each box is of the format
`[xmin, ymin, xmax, ymax]`.
Returns:
converted boxes with shape same as that of boxes.
"""
return tf.concat(
[(boxes[..., :2] + boxes[..., 2:]) / 2.0, boxes[..., 2:] - boxes[..., :2]],
axis=-1,
)
def convert_to_corners(boxes):
"""Changes the box format to corner coordinates
Arguments:
boxes: A tensor of rank 2 or higher with a shape of `(..., num_boxes, 4)`
representing bounding boxes where each box is of the format
`[x, y, width, height]`.
Returns:
converted boxes with shape same as that of boxes.
"""
return tf.concat(
[boxes[..., :2] - boxes[..., 2:] / 2.0, boxes[..., :2] + boxes[..., 2:] / 2.0],
axis=-1,
)
```
---
## Computing pairwise Intersection Over Union (IOU)
As we will see later in the example, we would be assigning ground truth boxes
to anchor boxes based on the extent of overlapping. This will require us to
calculate the Intersection Over Union (IOU) between all the anchor
boxes and ground truth boxes pairs.
```python
def compute_iou(boxes1, boxes2):
"""Computes pairwise IOU matrix for given two sets of boxes
Arguments:
boxes1: A tensor with shape `(N, 4)` representing bounding boxes
where each box is of the format `[x, y, width, height]`.
boxes2: A tensor with shape `(M, 4)` representing bounding boxes
where each box is of the format `[x, y, width, height]`.
Returns:
pairwise IOU matrix with shape `(N, M)`, where the value at ith row
jth column holds the IOU between ith box and jth box from
boxes1 and boxes2 respectively.
"""
boxes1_corners = convert_to_corners(boxes1)
boxes2_corners = convert_to_corners(boxes2)
lu = tf.maximum(boxes1_corners[:, None, :2], boxes2_corners[:, :2])
rd = tf.minimum(boxes1_corners[:, None, 2:], boxes2_corners[:, 2:])
intersection = tf.maximum(0.0, rd - lu)
intersection_area = intersection[:, :, 0] * intersection[:, :, 1]
boxes1_area = boxes1[:, 2] * boxes1[:, 3]
boxes2_area = boxes2[:, 2] * boxes2[:, 3]
union_area = tf.maximum(
boxes1_area[:, None] + boxes2_area - intersection_area, 1e-8
)
return tf.clip_by_value(intersection_area / union_area, 0.0, 1.0)
def visualize_detections(
image, boxes, classes, scores, figsize=(7, 7), linewidth=1, color=[0, 0, 1]
):
"""Visualize Detections"""
image = np.array(image, dtype=np.uint8)
plt.figure(figsize=figsize)
plt.axis("off")
plt.imshow(image)
ax = plt.gca()
for box, _cls, score in zip(boxes, classes, scores):
text = "{}: {:.2f}".format(_cls, score)
x1, y1, x2, y2 = box
w, h = x2 - x1, y2 - y1
patch = plt.Rectangle(
[x1, y1], w, h, fill=False, edgecolor=color, linewidth=linewidth
)
ax.add_patch(patch)
ax.text(
x1,
y1,
text,
bbox={"facecolor": color, "alpha": 0.4},
clip_box=ax.clipbox,
clip_on=True,
)
plt.show()
return ax
```
---
## Implementing Anchor generator
Anchor boxes are fixed sized boxes that the model uses to predict the bounding
box for an object. It does this by regressing the offset between the location
of the object's center and the center of an anchor box, and then uses the width
and height of the anchor box to predict a relative scale of the object. In the
case of RetinaNet, each location on a given feature map has nine anchor boxes
(at three scales and three ratios).
```python
class AnchorBox:
"""Generates anchor boxes.
This class has operations to generate anchor boxes for feature maps at
strides `[8, 16, 32, 64, 128]`. Where each anchor each box is of the
format `[x, y, width, height]`.
Attributes:
aspect_ratios: A list of float values representing the aspect ratios of
the anchor boxes at each location on the feature map
scales: A list of float values representing the scale of the anchor boxes
at each location on the feature map.
num_anchors: The number of anchor boxes at each location on feature map
areas: A list of float values representing the areas of the anchor
boxes for each feature map in the feature pyramid.
strides: A list of float value representing the strides for each feature
map in the feature pyramid.
"""
def __init__(self):
self.aspect_ratios = [0.5, 1.0, 2.0]
self.scales = [2 ** x for x in [0, 1 / 3, 2 / 3]]
self._num_anchors = len(self.aspect_ratios) * len(self.scales)
self._strides = [2 ** i for i in range(3, 8)]
self._areas = [x ** 2 for x in [32.0, 64.0, 128.0, 256.0, 512.0]]
self._anchor_dims = self._compute_dims()
def _compute_dims(self):
"""Computes anchor box dimensions for all ratios and scales at all levels
of the feature pyramid.
"""
anchor_dims_all = []
for area in self._areas:
anchor_dims = []
for ratio in self.aspect_ratios:
anchor_height = tf.math.sqrt(area / ratio)
anchor_width = area / anchor_height
dims = tf.reshape(
tf.stack([anchor_width, anchor_height], axis=-1), [1, 1, 2]
)
for scale in self.scales:
anchor_dims.append(scale * dims)
anchor_dims_all.append(tf.stack(anchor_dims, axis=-2))
return anchor_dims_all
def _get_anchors(self, feature_height, feature_width, level):
"""Generates anchor boxes for a given feature map size and level
Arguments:
feature_height: An integer representing the height of the feature map.
feature_width: An integer representing the width of the feature map.
level: An integer representing the level of the feature map in the
feature pyramid.
Returns:
anchor boxes with the shape
`(feature_height * feature_width * num_anchors, 4)`
"""
rx = tf.range(feature_width, dtype=tf.float32) + 0.5
ry = tf.range(feature_height, dtype=tf.float32) + 0.5
centers = tf.stack(tf.meshgrid(rx, ry), axis=-1) * self._strides[level - 3]
centers = tf.expand_dims(centers, axis=-2)
centers = tf.tile(centers, [1, 1, self._num_anchors, 1])
dims = tf.tile(
self._anchor_dims[level - 3], [feature_height, feature_width, 1, 1]
)
anchors = tf.concat([centers, dims], axis=-1)
return tf.reshape(
anchors, [feature_height * feature_width * self._num_anchors, 4]
)
def get_anchors(self, image_height, image_width):
"""Generates anchor boxes for all the feature maps of the feature pyramid.
Arguments:
image_height: Height of the input image.
image_width: Width of the input image.
Returns:
anchor boxes for all the feature maps, stacked as a single tensor
with shape `(total_anchors, 4)`
"""
anchors = [
self._get_anchors(
tf.math.ceil(image_height / 2 ** i),
tf.math.ceil(image_width / 2 ** i),
i,
)
for i in range(3, 8)
]
return tf.concat(anchors, axis=0)
```
---
## Preprocessing data
Preprocessing the images involves two steps:
- Resizing the image: Images are resized such that the shortest size is equal
to 800 px, after resizing if the longest side of the image exceeds 1333 px,
the image is resized such that the longest size is now capped at 1333 px.
- Applying augmentation: Random scale jittering and random horizontal flipping
are the only augmentations applied to the images.
Along with the images, bounding boxes are rescaled and flipped if required.
```python
def random_flip_horizontal(image, boxes):
"""Flips image and boxes horizontally with 50% chance
Arguments:
image: A 3-D tensor of shape `(height, width, channels)` representing an
image.
boxes: A tensor with shape `(num_boxes, 4)` representing bounding boxes,
having normalized coordinates.
Returns:
Randomly flipped image and boxes
"""
if tf.random.uniform(()) > 0.5:
image = tf.image.flip_left_right(image)
boxes = tf.stack(
[1 - boxes[:, 2], boxes[:, 1], 1 - boxes[:, 0], boxes[:, 3]], axis=-1
)
return image, boxes
def resize_and_pad_image(
image, min_side=800.0, max_side=1333.0, jitter=[640, 1024], stride=128.0
):
"""Resizes and pads image while preserving aspect ratio.
1. Resizes images so that the shorter side is equal to `min_side`
2. If the longer side is greater than `max_side`, then resize the image
with longer side equal to `max_side`
3. Pad with zeros on right and bottom to make the image shape divisible by
`stride`
Arguments:
image: A 3-D tensor of shape `(height, width, channels)` representing an
image.
min_side: The shorter side of the image is resized to this value, if
`jitter` is set to None.
max_side: If the longer side of the image exceeds this value after
resizing, the image is resized such that the longer side now equals to
this value.
jitter: A list of floats containing minimum and maximum size for scale
jittering. If available, the shorter side of the image will be
resized to a random value in this range.
stride: The stride of the smallest feature map in the feature pyramid.
Can be calculated using `image_size / feature_map_size`.
Returns:
image: Resized and padded image.
image_shape: Shape of the image before padding.
ratio: The scaling factor used to resize the image
"""
image_shape = tf.cast(tf.shape(image)[:2], dtype=tf.float32)
if jitter is not None:
min_side = tf.random.uniform((), jitter[0], jitter[1], dtype=tf.float32)
ratio = min_side / tf.reduce_min(image_shape)
if ratio * tf.reduce_max(image_shape) > max_side:
ratio = max_side / tf.reduce_max(image_shape)
image_shape = ratio * image_shape
image = tf.image.resize(image, tf.cast(image_shape, dtype=tf.int32))
padded_image_shape = tf.cast(
tf.math.ceil(image_shape / stride) * stride, dtype=tf.int32
)
image = tf.image.pad_to_bounding_box(
image, 0, 0, padded_image_shape[0], padded_image_shape[1]
)
return image, image_shape, ratio
def preprocess_data(sample):
"""Applies preprocessing step to a single sample
Arguments:
sample: A dict representing a single training sample.
Returns:
image: Resized and padded image with random horizontal flipping applied.
bbox: Bounding boxes with the shape `(num_objects, 4)` where each box is
of the format `[x, y, width, height]`.
class_id: An tensor representing the class id of the objects, having
shape `(num_objects,)`.
"""
image = sample["image"]
bbox = swap_xy(sample["objects"]["bbox"])
class_id = tf.cast(sample["objects"]["label"], dtype=tf.int32)
image, bbox = random_flip_horizontal(image, bbox)
image, image_shape, _ = resize_and_pad_image(image)
bbox = tf.stack(
[
bbox[:, 0] * image_shape[1],
bbox[:, 1] * image_shape[0],
bbox[:, 2] * image_shape[1],
bbox[:, 3] * image_shape[0],
],
axis=-1,
)
bbox = convert_to_xywh(bbox)
return image, bbox, class_id
```
---
## Encoding labels
The raw labels, consisting of bounding boxes and class ids need to be
transformed into targets for training. This transformation consists of
the following steps:
- Generating anchor boxes for the given image dimensions
- Assigning ground truth boxes to the anchor boxes
- The anchor boxes that are not assigned any objects, are either assigned the
background class or ignored depending on the IOU
- Generating the classification and regression targets using anchor boxes
```python
class LabelEncoder:
"""Transforms the raw labels into targets for training.
This class has operations to generate targets for a batch of samples which
is made up of the input images, bounding boxes for the objects present and
their class ids.
Attributes:
anchor_box: Anchor box generator to encode the bounding boxes.
box_variance: The scaling factors used to scale the bounding box targets.
"""
def __init__(self):
self._anchor_box = AnchorBox()
self._box_variance = tf.convert_to_tensor(
[0.1, 0.1, 0.2, 0.2], dtype=tf.float32
)
def _match_anchor_boxes(
self, anchor_boxes, gt_boxes, match_iou=0.5, ignore_iou=0.4
):
"""Matches ground truth boxes to anchor boxes based on IOU.
1. Calculates the pairwise IOU for the M `anchor_boxes` and N `gt_boxes`
to get a `(M, N)` shaped matrix.
2. The ground truth box with the maximum IOU in each row is assigned to
the anchor box provided the IOU is greater than `match_iou`.
3. If the maximum IOU in a row is less than `ignore_iou`, the anchor
box is assigned with the background class.
4. The remaining anchor boxes that do not have any class assigned are
ignored during training.
Arguments:
anchor_boxes: A float tensor with the shape `(total_anchors, 4)`
representing all the anchor boxes for a given input image shape,
where each anchor box is of the format `[x, y, width, height]`.
gt_boxes: A float tensor with shape `(num_objects, 4)` representing
the ground truth boxes, where each box is of the format
`[x, y, width, height]`.
match_iou: A float value representing the minimum IOU threshold for
determining if a ground truth box can be assigned to an anchor box.
ignore_iou: A float value representing the IOU threshold under which
an anchor box is assigned to the background class.
Returns:
matched_gt_idx: Index of the matched object
positive_mask: A mask for anchor boxes that have been assigned ground
truth boxes.
ignore_mask: A mask for anchor boxes that need to by ignored during
training
"""
iou_matrix = compute_iou(anchor_boxes, gt_boxes)
max_iou = tf.reduce_max(iou_matrix, axis=1)
matched_gt_idx = tf.argmax(iou_matrix, axis=1)
positive_mask = tf.greater_equal(max_iou, match_iou)
negative_mask = tf.less(max_iou, ignore_iou)
ignore_mask = tf.logical_not(tf.logical_or(positive_mask, negative_mask))
return (
matched_gt_idx,
tf.cast(positive_mask, dtype=tf.float32),
tf.cast(ignore_mask, dtype=tf.float32),
)
def _compute_box_target(self, anchor_boxes, matched_gt_boxes):
"""Transforms the ground truth boxes into targets for training"""
box_target = tf.concat(
[
(matched_gt_boxes[:, :2] - anchor_boxes[:, :2]) / anchor_boxes[:, 2:],
tf.math.log(matched_gt_boxes[:, 2:] / anchor_boxes[:, 2:]),
],
axis=-1,
)
box_target = box_target / self._box_variance
return box_target
def _encode_sample(self, image_shape, gt_boxes, cls_ids):
"""Creates box and classification targets for a single sample"""
anchor_boxes = self._anchor_box.get_anchors(image_shape[1], image_shape[2])
cls_ids = tf.cast(cls_ids, dtype=tf.float32)
matched_gt_idx, positive_mask, ignore_mask = self._match_anchor_boxes(
anchor_boxes, gt_boxes
)
matched_gt_boxes = tf.gather(gt_boxes, matched_gt_idx)
box_target = self._compute_box_target(anchor_boxes, matched_gt_boxes)
matched_gt_cls_ids = tf.gather(cls_ids, matched_gt_idx)
cls_target = tf.where(
tf.not_equal(positive_mask, 1.0), -1.0, matched_gt_cls_ids
)
cls_target = tf.where(tf.equal(ignore_mask, 1.0), -2.0, cls_target)
cls_target = tf.expand_dims(cls_target, axis=-1)
label = tf.concat([box_target, cls_target], axis=-1)
return label
def encode_batch(self, batch_images, gt_boxes, cls_ids):
"""Creates box and classification targets for a batch"""
images_shape = tf.shape(batch_images)
batch_size = images_shape[0]
labels = tf.TensorArray(dtype=tf.float32, size=batch_size, dynamic_size=True)
for i in range(batch_size):
label = self._encode_sample(images_shape, gt_boxes[i], cls_ids[i])
labels = labels.write(i, label)
batch_images = tf.keras.applications.resnet.preprocess_input(batch_images)
return batch_images, labels.stack()
```
---
## Building the ResNet50 backbone
RetinaNet uses a ResNet based backbone, using which a feature pyramid network
is constructed. In the example we use ResNet50 as the backbone, and return the
feature maps at strides 8, 16 and 32.
```python
def get_backbone():
"""Builds ResNet50 with pre-trained imagenet weights"""
backbone = keras.applications.ResNet50(
include_top=False, input_shape=[None, None, 3]
)
c3_output, c4_output, c5_output = [
backbone.get_layer(layer_name).output
for layer_name in ["conv3_block4_out", "conv4_block6_out", "conv5_block3_out"]
]
return keras.Model(
inputs=[backbone.inputs], outputs=[c3_output, c4_output, c5_output]
)
```
---
## Building Feature Pyramid Network as a custom layer
```python
class FeaturePyramid(keras.layers.Layer):
"""Builds the Feature Pyramid with the feature maps from the backbone.
Attributes:
num_classes: Number of classes in the dataset.
backbone: The backbone to build the feature pyramid from.
Currently supports ResNet50 only.
"""
def __init__(self, backbone=None, **kwargs):
super().__init__(name="FeaturePyramid", **kwargs)
self.backbone = backbone if backbone else get_backbone()
self.conv_c3_1x1 = keras.layers.Conv2D(256, 1, 1, "same")
self.conv_c4_1x1 = keras.layers.Conv2D(256, 1, 1, "same")
self.conv_c5_1x1 = keras.layers.Conv2D(256, 1, 1, "same")
self.conv_c3_3x3 = keras.layers.Conv2D(256, 3, 1, "same")
self.conv_c4_3x3 = keras.layers.Conv2D(256, 3, 1, "same")
self.conv_c5_3x3 = keras.layers.Conv2D(256, 3, 1, "same")
self.conv_c6_3x3 = keras.layers.Conv2D(256, 3, 2, "same")
self.conv_c7_3x3 = keras.layers.Conv2D(256, 3, 2, "same")
self.upsample_2x = keras.layers.UpSampling2D(2)
def call(self, images, training=False):
c3_output, c4_output, c5_output = self.backbone(images, training=training)
p3_output = self.conv_c3_1x1(c3_output)
p4_output = self.conv_c4_1x1(c4_output)
p5_output = self.conv_c5_1x1(c5_output)
p4_output = p4_output + self.upsample_2x(p5_output)
p3_output = p3_output + self.upsample_2x(p4_output)
p3_output = self.conv_c3_3x3(p3_output)
p4_output = self.conv_c4_3x3(p4_output)
p5_output = self.conv_c5_3x3(p5_output)
p6_output = self.conv_c6_3x3(c5_output)
p7_output = self.conv_c7_3x3(tf.nn.relu(p6_output))
return p3_output, p4_output, p5_output, p6_output, p7_output
```
---
## Building the classification and box regression heads.
The RetinaNet model has separate heads for bounding box regression and
for predicting class probabilities for the objects. These heads are shared
between all the feature maps of the feature pyramid.
```python
def build_head(output_filters, bias_init):
"""Builds the class/box predictions head.
Arguments:
output_filters: Number of convolution filters in the final layer.
bias_init: Bias Initializer for the final convolution layer.
Returns:
A keras sequential model representing either the classification
or the box regression head depending on `output_filters`.
"""
head = keras.Sequential([keras.Input(shape=[None, None, 256])])
kernel_init = tf.initializers.RandomNormal(0.0, 0.01)
for _ in range(4):
head.add(
keras.layers.Conv2D(256, 3, padding="same", kernel_initializer=kernel_init)
)
head.add(keras.layers.ReLU())
head.add(
keras.layers.Conv2D(
output_filters,
3,
1,
padding="same",
kernel_initializer=kernel_init,
bias_initializer=bias_init,
)
)
return head
```
---
## Building RetinaNet using a subclassed model
```python
class RetinaNet(keras.Model):
"""A subclassed Keras model implementing the RetinaNet architecture.
Attributes:
num_classes: Number of classes in the dataset.
backbone: The backbone to build the feature pyramid from.
Currently supports ResNet50 only.
"""
def __init__(self, num_classes, backbone=None, **kwargs):
super().__init__(name="RetinaNet", **kwargs)
self.fpn = FeaturePyramid(backbone)
self.num_classes = num_classes
prior_probability = tf.constant_initializer(-np.log((1 - 0.01) / 0.01))
self.cls_head = build_head(9 * num_classes, prior_probability)
self.box_head = build_head(9 * 4, "zeros")
def call(self, image, training=False):
features = self.fpn(image, training=training)
N = tf.shape(image)[0]
cls_outputs = []
box_outputs = []
for feature in features:
box_outputs.append(tf.reshape(self.box_head(feature), [N, -1, 4]))
cls_outputs.append(
tf.reshape(self.cls_head(feature), [N, -1, self.num_classes])
)
cls_outputs = tf.concat(cls_outputs, axis=1)
box_outputs = tf.concat(box_outputs, axis=1)
return tf.concat([box_outputs, cls_outputs], axis=-1)
```
---
## Implementing a custom layer to decode predictions
```python
class DecodePredictions(tf.keras.layers.Layer):
"""A Keras layer that decodes predictions of the RetinaNet model.
Attributes:
num_classes: Number of classes in the dataset
confidence_threshold: Minimum class probability, below which detections
are pruned.
nms_iou_threshold: IOU threshold for the NMS operation
max_detections_per_class: Maximum number of detections to retain per
class.
max_detections: Maximum number of detections to retain across all
classes.
box_variance: The scaling factors used to scale the bounding box
predictions.
"""
def __init__(
self,
num_classes=80,
confidence_threshold=0.05,
nms_iou_threshold=0.5,
max_detections_per_class=100,
max_detections=100,
box_variance=[0.1, 0.1, 0.2, 0.2],
**kwargs
):
super().__init__(**kwargs)
self.num_classes = num_classes
self.confidence_threshold = confidence_threshold
self.nms_iou_threshold = nms_iou_threshold
self.max_detections_per_class = max_detections_per_class
self.max_detections = max_detections
self._anchor_box = AnchorBox()
self._box_variance = tf.convert_to_tensor(
[0.1, 0.1, 0.2, 0.2], dtype=tf.float32
)
def _decode_box_predictions(self, anchor_boxes, box_predictions):
boxes = box_predictions * self._box_variance
boxes = tf.concat(
[
boxes[:, :, :2] * anchor_boxes[:, :, 2:] + anchor_boxes[:, :, :2],
tf.math.exp(boxes[:, :, 2:]) * anchor_boxes[:, :, 2:],
],
axis=-1,
)
boxes_transformed = convert_to_corners(boxes)
return boxes_transformed
def call(self, images, predictions):
image_shape = tf.cast(tf.shape(images), dtype=tf.float32)
anchor_boxes = self._anchor_box.get_anchors(image_shape[1], image_shape[2])
box_predictions = predictions[:, :, :4]
cls_predictions = tf.nn.sigmoid(predictions[:, :, 4:])
boxes = self._decode_box_predictions(anchor_boxes[None, ...], box_predictions)
return tf.image.combined_non_max_suppression(
tf.expand_dims(boxes, axis=2),
cls_predictions,
self.max_detections_per_class,
self.max_detections,
self.nms_iou_threshold,
self.confidence_threshold,
clip_boxes=False,
)
```
---
## Implementing Smooth L1 loss and Focal Loss as keras custom losses
```python
class RetinaNetBoxLoss(tf.losses.Loss):
"""Implements Smooth L1 loss"""
def __init__(self, delta):
super().__init__(
reduction="none", name="RetinaNetBoxLoss"
)
self._delta = delta
def call(self, y_true, y_pred):
difference = y_true - y_pred
absolute_difference = tf.abs(difference)
squared_difference = difference ** 2
loss = tf.where(
tf.less(absolute_difference, self._delta),
0.5 * squared_difference,
absolute_difference - 0.5,
)
return tf.reduce_sum(loss, axis=-1)
class RetinaNetClassificationLoss(tf.losses.Loss):
"""Implements Focal loss"""
def __init__(self, alpha, gamma):
super().__init__(
reduction="none", name="RetinaNetClassificationLoss"
)
self._alpha = alpha
self._gamma = gamma
def call(self, y_true, y_pred):
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(
labels=y_true, logits=y_pred
)
probs = tf.nn.sigmoid(y_pred)
alpha = tf.where(tf.equal(y_true, 1.0), self._alpha, (1.0 - self._alpha))
pt = tf.where(tf.equal(y_true, 1.0), probs, 1 - probs)
loss = alpha * tf.pow(1.0 - pt, self._gamma) * cross_entropy
return tf.reduce_sum(loss, axis=-1)
class RetinaNetLoss(tf.losses.Loss):
"""Wrapper to combine both the losses"""
def __init__(self, num_classes=80, alpha=0.25, gamma=2.0, delta=1.0):
super().__init__(reduction="auto", name="RetinaNetLoss")
self._clf_loss = RetinaNetClassificationLoss(alpha, gamma)
self._box_loss = RetinaNetBoxLoss(delta)
self._num_classes = num_classes
def call(self, y_true, y_pred):
y_pred = tf.cast(y_pred, dtype=tf.float32)
box_labels = y_true[:, :, :4]
box_predictions = y_pred[:, :, :4]
cls_labels = tf.one_hot(
tf.cast(y_true[:, :, 4], dtype=tf.int32),
depth=self._num_classes,
dtype=tf.float32,
)
cls_predictions = y_pred[:, :, 4:]
positive_mask = tf.cast(tf.greater(y_true[:, :, 4], -1.0), dtype=tf.float32)
ignore_mask = tf.cast(tf.equal(y_true[:, :, 4], -2.0), dtype=tf.float32)
clf_loss = self._clf_loss(cls_labels, cls_predictions)
box_loss = self._box_loss(box_labels, box_predictions)
clf_loss = tf.where(tf.equal(ignore_mask, 1.0), 0.0, clf_loss)
box_loss = tf.where(tf.equal(positive_mask, 1.0), box_loss, 0.0)
normalizer = tf.reduce_sum(positive_mask, axis=-1)
clf_loss = tf.math.divide_no_nan(tf.reduce_sum(clf_loss, axis=-1), normalizer)
box_loss = tf.math.divide_no_nan(tf.reduce_sum(box_loss, axis=-1), normalizer)
loss = clf_loss + box_loss
return loss
```
---
## Setting up training parameters
```python
model_dir = "retinanet/"
label_encoder = LabelEncoder()
num_classes = 80
batch_size = 2
learning_rates = [2.5e-06, 0.000625, 0.00125, 0.0025, 0.00025, 2.5e-05]
learning_rate_boundaries = [125, 250, 500, 240000, 360000]
learning_rate_fn = tf.optimizers.schedules.PiecewiseConstantDecay(
boundaries=learning_rate_boundaries, values=learning_rates
)
```
---
## Initializing and compiling model
```python
resnet50_backbone = get_backbone()
loss_fn = RetinaNetLoss(num_classes)
model = RetinaNet(num_classes, resnet50_backbone)
optimizer = tf.keras.optimizers.legacy.SGD(learning_rate=learning_rate_fn, momentum=0.9)
model.compile(loss=loss_fn, optimizer=optimizer)
```
<div class="k-default-codeblock">
```
Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/resnet/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5
94773248/94765736 [==============================] - 0s 0us/step
94781440/94765736 [==============================] - 0s 0us/step
```
</div>
---
## Setting up callbacks
```python
callbacks_list = [
tf.keras.callbacks.ModelCheckpoint(
filepath=os.path.join(model_dir, "weights" + "_epoch_{epoch}"),
monitor="loss",
save_best_only=False,
save_weights_only=True,
verbose=1,
)
]
```
---
## Load the COCO2017 dataset using TensorFlow Datasets
```python
# set `data_dir=None` to load the complete dataset
(train_dataset, val_dataset), dataset_info = tfds.load(
"coco/2017", split=["train", "validation"], with_info=True, data_dir="data"
)
```
---
## Setting up a `tf.data` pipeline
To ensure that the model is fed with data efficiently we will be using
`tf.data` API to create our input pipeline. The input pipeline
consists for the following major processing steps:
- Apply the preprocessing function to the samples
- Create batches with fixed batch size. Since images in the batch can
have different dimensions, and can also have different number of
objects, we use `padded_batch` to the add the necessary padding to create
rectangular tensors
- Create targets for each sample in the batch using `LabelEncoder`
```python
autotune = tf.data.AUTOTUNE
train_dataset = train_dataset.map(preprocess_data, num_parallel_calls=autotune)
train_dataset = train_dataset.shuffle(8 * batch_size)
train_dataset = train_dataset.padded_batch(
batch_size=batch_size, padding_values=(0.0, 1e-8, -1), drop_remainder=True
)
train_dataset = train_dataset.map(
label_encoder.encode_batch, num_parallel_calls=autotune
)
train_dataset = train_dataset.apply(tf.data.experimental.ignore_errors())
train_dataset = train_dataset.prefetch(autotune)
val_dataset = val_dataset.map(preprocess_data, num_parallel_calls=autotune)
val_dataset = val_dataset.padded_batch(
batch_size=1, padding_values=(0.0, 1e-8, -1), drop_remainder=True
)
val_dataset = val_dataset.map(label_encoder.encode_batch, num_parallel_calls=autotune)
val_dataset = val_dataset.apply(tf.data.experimental.ignore_errors())
val_dataset = val_dataset.prefetch(autotune)
```
---
## Training the model
```python
# Uncomment the following lines, when training on full dataset
# train_steps_per_epoch = dataset_info.splits["train"].num_examples // batch_size
# val_steps_per_epoch = \
# dataset_info.splits["validation"].num_examples // batch_size
# train_steps = 4 * 100000
# epochs = train_steps // train_steps_per_epoch
epochs = 1
# Running 100 training and 50 validation steps,
# remove `.take` when training on the full dataset
model.fit(
train_dataset.take(100),
validation_data=val_dataset.take(50),
epochs=epochs,
callbacks=callbacks_list,
verbose=1,
)
```
<div class="k-default-codeblock">
```
100/Unknown - 290s 3s/step - loss: 4.0817
Epoch 1: saving model to retinanet/weights_epoch_1
100/100 [==============================] - 336s 3s/step - loss: 4.0817 - val_loss: 4.1082
<keras.callbacks.History at 0x7f4c7e0428d0>
```
</div>
---
## Loading weights
```python
# Change this to `model_dir` when not using the downloaded weights
weights_dir = "data"
latest_checkpoint = tf.train.latest_checkpoint(weights_dir)
model.load_weights(latest_checkpoint)
```
<div class="k-default-codeblock">
```
<tensorflow.python.training.tracking.util.CheckpointLoadStatus at 0x7f4c6823d0d0>
```
</div>
---
## Building inference model
```python
image = tf.keras.Input(shape=[None, None, 3], name="image")
predictions = model(image, training=False)
detections = DecodePredictions(confidence_threshold=0.5)(image, predictions)
inference_model = tf.keras.Model(inputs=image, outputs=detections)
```
---
## Generating detections
```python
def prepare_image(image):
image, _, ratio = resize_and_pad_image(image, jitter=None)
image = tf.keras.applications.resnet.preprocess_input(image)
return tf.expand_dims(image, axis=0), ratio
val_dataset = tfds.load("coco/2017", split="validation", data_dir="data")
int2str = dataset_info.features["objects"]["label"].int2str
for sample in val_dataset.take(2):
image = tf.cast(sample["image"], dtype=tf.float32)
input_image, ratio = prepare_image(image)
detections = inference_model.predict(input_image)
num_detections = detections.valid_detections[0]
class_names = [
int2str(int(x)) for x in detections.nmsed_classes[0][:num_detections]
]
visualize_detections(
image,
detections.nmsed_boxes[0][:num_detections] / ratio,
class_names,
detections.nmsed_scores[0][:num_detections],
)
```
![png](/img/examples/vision/retinanet/retinanet_44_0.png)
![png](/img/examples/vision/retinanet/retinanet_44_1.png)
Example available on HuggingFace.
| Trained Model | Demo |
| :--: | :--: |
| [![Generic badge](https://img.shields.io/badge/%F0%9F%A4%97%20Model-Object%20Detection%20With%20Retinanet-black.svg)](https://huggingface.co/keras-io/Object-Detection-RetinaNet) | [![Generic badge](https://img.shields.io/badge/%F0%9F%A4%97%20Spaces-Object%20Detection%20With%20Retinanet-black.svg)](https://huggingface.co/spaces/keras-io/Object-Detection-Using-RetinaNet) |
| keras-io/examples/vision/md/retinanet.md/0 | {
"file_path": "keras-io/examples/vision/md/retinanet.md",
"repo_id": "keras-io",
"token_count": 15216
} | 121 |
"""
Title: Self-supervised contrastive learning with NNCLR
Author: [Rishit Dagli](https://twitter.com/rishit_dagli)
Date created: 2021/09/13
Last modified: 2024/01/22
Description: Implementation of NNCLR, a self-supervised learning method for computer vision.
Accelerator: GPU
"""
"""
## Introduction
### Self-supervised learning
Self-supervised representation learning aims to obtain robust representations of samples
from raw data without expensive labels or annotations. Early methods in this field
focused on defining pretraining tasks which involved a surrogate task on a domain with ample
weak supervision labels. Encoders trained to solve such tasks are expected to
learn general features that might be useful for other downstream tasks requiring
expensive annotations like image classification.
### Contrastive Learning
A broad category of self-supervised learning techniques are those that use *contrastive
losses*, which have been used in a wide range of computer vision applications like
[image similarity](https://www.jmlr.org/papers/v11/chechik10a.html),
[dimensionality reduction (DrLIM)](http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf)
and [face verification/identification](https://openaccess.thecvf.com/content_cvpr_2015/html/Schroff_FaceNet_A_Unified_2015_CVPR_paper.html).
These methods learn a latent space that clusters positive samples together while
pushing apart negative samples.
### NNCLR
In this example, we implement NNCLR as proposed in the paper
[With a Little Help from My Friends: Nearest-Neighbor Contrastive Learning of Visual Representations](https://arxiv.org/abs/2104.14548),
by Google Research and DeepMind.
NNCLR learns self-supervised representations that go beyond single-instance positives, which
allows for learning better features that are invariant to different viewpoints, deformations,
and even intra-class variations.
Clustering based methods offer a great approach to go beyond single instance positives,
but assuming the entire cluster to be positives could hurt performance due to early
over-generalization. Instead, NNCLR uses nearest neighbors in the learned representation
space as positives.
In addition, NNCLR increases the performance of existing contrastive learning methods like
[SimCLR](https://arxiv.org/abs/2002.05709)([Keras Example](https://keras.io/examples/vision/semisupervised_simclr))
and reduces the reliance of self-supervised methods on data augmentation strategies.
Here is a great visualization by the paper authors showing how NNCLR builds on ideas from
SimCLR:
![](https://i.imgur.com/p2DbZJJ.png)
We can see that SimCLR uses two views of the same image as the positive pair. These two
views, which are produced using random data augmentations, are fed through an encoder to
obtain the positive embedding pair, we end up using two augmentations. NNCLR instead
keeps a _support set_ of embeddings representing the full data distribution, and forms
the positive pairs using nearest-neighbours. A support set is used as memory during
training, similar to a queue (i.e. first-in-first-out) as in
[MoCo](https://arxiv.org/abs/1911.05722).
This example requires `tensorflow_datasets`, which can
be installed with this command:
"""
"""shell
pip install tensorflow-datasets
"""
"""
## Setup
"""
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow_datasets as tfds
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import keras
import keras_cv
from keras import ops
from keras import layers
"""
## Hyperparameters
A greater `queue_size` most likely means better performance as shown in the original
paper, but introduces significant computational overhead. The authors show that the best
results of NNCLR are achieved with a queue size of 98,304 (the largest `queue_size` they
experimented on). We here use 10,000 to show a working example.
"""
AUTOTUNE = tf.data.AUTOTUNE
shuffle_buffer = 5000
# The below two values are taken from https://www.tensorflow.org/datasets/catalog/stl10
labelled_train_images = 5000
unlabelled_images = 100000
temperature = 0.1
queue_size = 10000
contrastive_augmenter = {
"brightness": 0.5,
"name": "contrastive_augmenter",
"scale": (0.2, 1.0),
}
classification_augmenter = {
"brightness": 0.2,
"name": "classification_augmenter",
"scale": (0.5, 1.0),
}
input_shape = (96, 96, 3)
width = 128
num_epochs = 5 # Use 25 for better results
steps_per_epoch = 50 # Use 200 for better results
"""
## Load the Dataset
We load the [STL-10](http://ai.stanford.edu/~acoates/stl10/) dataset from
TensorFlow Datasets, an image recognition dataset for developing unsupervised
feature learning, deep learning, self-taught learning algorithms. It is inspired by the
CIFAR-10 dataset, with some modifications.
"""
dataset_name = "stl10"
def prepare_dataset():
unlabeled_batch_size = unlabelled_images // steps_per_epoch
labeled_batch_size = labelled_train_images // steps_per_epoch
batch_size = unlabeled_batch_size + labeled_batch_size
unlabeled_train_dataset = (
tfds.load(
dataset_name, split="unlabelled", as_supervised=True, shuffle_files=True
)
.shuffle(buffer_size=shuffle_buffer)
.batch(unlabeled_batch_size, drop_remainder=True)
)
labeled_train_dataset = (
tfds.load(dataset_name, split="train", as_supervised=True, shuffle_files=True)
.shuffle(buffer_size=shuffle_buffer)
.batch(labeled_batch_size, drop_remainder=True)
)
test_dataset = (
tfds.load(dataset_name, split="test", as_supervised=True)
.batch(batch_size)
.prefetch(buffer_size=AUTOTUNE)
)
train_dataset = tf.data.Dataset.zip(
(unlabeled_train_dataset, labeled_train_dataset)
).prefetch(buffer_size=AUTOTUNE)
return batch_size, train_dataset, labeled_train_dataset, test_dataset
batch_size, train_dataset, labeled_train_dataset, test_dataset = prepare_dataset()
"""
## Augmentations
Other self-supervised techniques like [SimCLR](https://arxiv.org/abs/2002.05709),
[BYOL](https://arxiv.org/abs/2006.07733), [SwAV](https://arxiv.org/abs/2006.09882) etc.
rely heavily on a well-designed data augmentation pipeline to get the best performance.
However, NNCLR is _less_ dependent on complex augmentations as nearest-neighbors already
provide richness in sample variations. A few common techniques often included
augmentation pipelines are:
- Random resized crops
- Multiple color distortions
- Gaussian blur
Since NNCLR is less dependent on complex augmentations, we will only use random
crops and random brightness for augmenting the input images.
"""
"""
### Prepare augmentation module
"""
def augmenter(brightness, name, scale):
return keras.Sequential(
[
layers.Input(shape=input_shape),
layers.Rescaling(1 / 255),
layers.RandomFlip("horizontal"),
keras_cv.layers.RandomCropAndResize(
target_size=(input_shape[0], input_shape[1]),
crop_area_factor=scale,
aspect_ratio_factor=(3 / 4, 4 / 3),
),
keras_cv.layers.RandomBrightness(factor=brightness, value_range=(0.0, 1.0)),
],
name=name,
)
"""
### Encoder architecture
Using a ResNet-50 as the encoder architecture
is standard in the literature. In the original paper, the authors use ResNet-50 as
the encoder architecture and spatially average the outputs of ResNet-50. However, keep in
mind that more powerful models will not only increase training time but will also
require more memory and will limit the maximal batch size you can use. For the purpose of
this example, we just use four convolutional layers.
"""
def encoder():
return keras.Sequential(
[
layers.Input(shape=input_shape),
layers.Conv2D(width, kernel_size=3, strides=2, activation="relu"),
layers.Conv2D(width, kernel_size=3, strides=2, activation="relu"),
layers.Conv2D(width, kernel_size=3, strides=2, activation="relu"),
layers.Conv2D(width, kernel_size=3, strides=2, activation="relu"),
layers.Flatten(),
layers.Dense(width, activation="relu"),
],
name="encoder",
)
"""
## The NNCLR model for contrastive pre-training
We train an encoder on unlabeled images with a contrastive loss. A nonlinear projection
head is attached to the top of the encoder, as it improves the quality of representations
of the encoder.
"""
class NNCLR(keras.Model):
def __init__(
self,
temperature,
queue_size,
):
super().__init__()
self.probe_accuracy = keras.metrics.SparseCategoricalAccuracy()
self.correlation_accuracy = keras.metrics.SparseCategoricalAccuracy()
self.contrastive_accuracy = keras.metrics.SparseCategoricalAccuracy()
self.probe_loss = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
self.contrastive_augmenter = augmenter(**contrastive_augmenter)
self.classification_augmenter = augmenter(**classification_augmenter)
self.encoder = encoder()
self.projection_head = keras.Sequential(
[
layers.Input(shape=(width,)),
layers.Dense(width, activation="relu"),
layers.Dense(width),
],
name="projection_head",
)
self.linear_probe = keras.Sequential(
[layers.Input(shape=(width,)), layers.Dense(10)], name="linear_probe"
)
self.temperature = temperature
feature_dimensions = self.encoder.output_shape[1]
self.feature_queue = keras.Variable(
keras.utils.normalize(
keras.random.normal(shape=(queue_size, feature_dimensions)),
axis=1,
order=2,
),
trainable=False,
)
def compile(self, contrastive_optimizer, probe_optimizer, **kwargs):
super().compile(**kwargs)
self.contrastive_optimizer = contrastive_optimizer
self.probe_optimizer = probe_optimizer
def nearest_neighbour(self, projections):
support_similarities = ops.matmul(
projections, ops.transpose(self.feature_queue)
)
nn_projections = ops.take(
self.feature_queue, ops.argmax(support_similarities, axis=1), axis=0
)
return projections + ops.stop_gradient(nn_projections - projections)
def update_contrastive_accuracy(self, features_1, features_2):
features_1 = keras.utils.normalize(features_1, axis=1, order=2)
features_2 = keras.utils.normalize(features_2, axis=1, order=2)
similarities = ops.matmul(features_1, ops.transpose(features_2))
batch_size = ops.shape(features_1)[0]
contrastive_labels = ops.arange(batch_size)
self.contrastive_accuracy.update_state(
ops.concatenate([contrastive_labels, contrastive_labels], axis=0),
ops.concatenate([similarities, ops.transpose(similarities)], axis=0),
)
def update_correlation_accuracy(self, features_1, features_2):
features_1 = (features_1 - ops.mean(features_1, axis=0)) / ops.std(
features_1, axis=0
)
features_2 = (features_2 - ops.mean(features_2, axis=0)) / ops.std(
features_2, axis=0
)
batch_size = ops.shape(features_1)[0]
cross_correlation = (
ops.matmul(ops.transpose(features_1), features_2) / batch_size
)
feature_dim = ops.shape(features_1)[1]
correlation_labels = ops.arange(feature_dim)
self.correlation_accuracy.update_state(
ops.concatenate([correlation_labels, correlation_labels], axis=0),
ops.concatenate(
[cross_correlation, ops.transpose(cross_correlation)], axis=0
),
)
def contrastive_loss(self, projections_1, projections_2):
projections_1 = keras.utils.normalize(projections_1, axis=1, order=2)
projections_2 = keras.utils.normalize(projections_2, axis=1, order=2)
similarities_1_2_1 = (
ops.matmul(
self.nearest_neighbour(projections_1), ops.transpose(projections_2)
)
/ self.temperature
)
similarities_1_2_2 = (
ops.matmul(
projections_2, ops.transpose(self.nearest_neighbour(projections_1))
)
/ self.temperature
)
similarities_2_1_1 = ( #
ops.matmul(
self.nearest_neighbour(projections_2), ops.transpose(projections_1)
)
/ self.temperature
)
similarities_2_1_2 = (
ops.matmul(
projections_1, ops.transpose(self.nearest_neighbour(projections_2))
)
/ self.temperature
)
batch_size = ops.shape(projections_1)[0]
contrastive_labels = ops.arange(batch_size)
loss = keras.losses.sparse_categorical_crossentropy(
ops.concatenate(
[
contrastive_labels,
contrastive_labels,
contrastive_labels,
contrastive_labels,
],
axis=0,
),
ops.concatenate(
[
similarities_1_2_1,
similarities_1_2_2,
similarities_2_1_1,
similarities_2_1_2,
],
axis=0,
),
from_logits=True,
)
self.feature_queue.assign(
ops.concatenate([projections_1, self.feature_queue[:-batch_size]], axis=0)
)
return loss
def train_step(self, data):
(unlabeled_images, _), (labeled_images, labels) = data
images = ops.concatenate((unlabeled_images, labeled_images), axis=0)
augmented_images_1 = self.contrastive_augmenter(images)
augmented_images_2 = self.contrastive_augmenter(images)
with tf.GradientTape() as tape:
features_1 = self.encoder(augmented_images_1)
features_2 = self.encoder(augmented_images_2)
projections_1 = self.projection_head(features_1)
projections_2 = self.projection_head(features_2)
contrastive_loss = self.contrastive_loss(projections_1, projections_2)
gradients = tape.gradient(
contrastive_loss,
self.encoder.trainable_weights + self.projection_head.trainable_weights,
)
self.contrastive_optimizer.apply_gradients(
zip(
gradients,
self.encoder.trainable_weights + self.projection_head.trainable_weights,
)
)
self.update_contrastive_accuracy(features_1, features_2)
self.update_correlation_accuracy(features_1, features_2)
preprocessed_images = self.classification_augmenter(labeled_images)
with tf.GradientTape() as tape:
features = self.encoder(preprocessed_images)
class_logits = self.linear_probe(features)
probe_loss = self.probe_loss(labels, class_logits)
gradients = tape.gradient(probe_loss, self.linear_probe.trainable_weights)
self.probe_optimizer.apply_gradients(
zip(gradients, self.linear_probe.trainable_weights)
)
self.probe_accuracy.update_state(labels, class_logits)
return {
"c_loss": contrastive_loss,
"c_acc": self.contrastive_accuracy.result(),
"r_acc": self.correlation_accuracy.result(),
"p_loss": probe_loss,
"p_acc": self.probe_accuracy.result(),
}
def test_step(self, data):
labeled_images, labels = data
preprocessed_images = self.classification_augmenter(
labeled_images, training=False
)
features = self.encoder(preprocessed_images, training=False)
class_logits = self.linear_probe(features, training=False)
probe_loss = self.probe_loss(labels, class_logits)
self.probe_accuracy.update_state(labels, class_logits)
return {"p_loss": probe_loss, "p_acc": self.probe_accuracy.result()}
"""
## Pre-train NNCLR
We train the network using a `temperature` of 0.1 as suggested in the paper and
a `queue_size` of 10,000 as explained earlier. We use Adam as our contrastive and probe
optimizer. For this example we train the model for only 30 epochs but it should be
trained for more epochs for better performance.
The following two metrics can be used for monitoring the pretraining performance
which we also log (taken from
[this Keras example](https://keras.io/examples/vision/semisupervised_simclr/#selfsupervised-model-for-contrastive-pretraining)):
- Contrastive accuracy: self-supervised metric, the ratio of cases in which the
representation of an image is more similar to its differently augmented version's one,
than to the representation of any other image in the current batch. Self-supervised
metrics can be used for hyperparameter tuning even in the case when there are no labeled
examples.
- Linear probing accuracy: linear probing is a popular metric to evaluate self-supervised
classifiers. It is computed as the accuracy of a logistic regression classifier trained
on top of the encoder's features. In our case, this is done by training a single dense
layer on top of the frozen encoder. Note that contrary to traditional approach where the
classifier is trained after the pretraining phase, in this example we train it during
pretraining. This might slightly decrease its accuracy, but that way we can monitor its
value during training, which helps with experimentation and debugging.
"""
model = NNCLR(temperature=temperature, queue_size=queue_size)
model.compile(
contrastive_optimizer=keras.optimizers.Adam(),
probe_optimizer=keras.optimizers.Adam(),
jit_compile=False,
)
pretrain_history = model.fit(
train_dataset, epochs=num_epochs, validation_data=test_dataset
)
"""
## Evaluate our model
A popular way to evaluate a SSL method in computer vision or for that fact any other
pre-training method as such is to learn a linear classifier on the frozen features of the
trained backbone model and evaluate the classifier on unseen images. Other methods often
include fine-tuning on the source dataset or even a target dataset with 5% or 10% labels
present. You can use the backbone we just trained for any downstream task such as image
classification (like we do here) or segmentation or detection, where the backbone models
are usually pre-trained with supervised learning.
"""
finetuning_model = keras.Sequential(
[
layers.Input(shape=input_shape),
augmenter(**classification_augmenter),
model.encoder,
layers.Dense(10),
],
name="finetuning_model",
)
finetuning_model.compile(
optimizer=keras.optimizers.Adam(),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[keras.metrics.SparseCategoricalAccuracy(name="acc")],
jit_compile=False,
)
finetuning_history = finetuning_model.fit(
labeled_train_dataset, epochs=num_epochs, validation_data=test_dataset
)
"""
Self supervised learning is particularly helpful when you do only have access to very
limited labeled training data but you can manage to build a large corpus of unlabeled
data as shown by previous methods like [SEER](https://arxiv.org/abs/2103.01988),
[SimCLR](https://arxiv.org/abs/2002.05709), [SwAV](https://arxiv.org/abs/2006.09882) and
more.
You should also take a look at the blog posts for these papers which neatly show that it is
possible to achieve good results with few class labels by first pretraining on a large
unlabeled dataset and then fine-tuning on a smaller labeled dataset:
- [Advancing Self-Supervised and Semi-Supervised Learning with SimCLR](https://ai.googleblog.com/2020/04/advancing-self-supervised-and-semi.html)
- [High-performance self-supervised image classification with contrastive clustering](https://ai.facebook.com/blog/high-performance-self-supervised-image-classification-with-contrastive-clustering/)
- [Self-supervised learning: The dark matter of intelligence](https://ai.facebook.com/blog/self-supervised-learning-the-dark-matter-of-intelligence/)
You are also advised to check out the [original paper](https://arxiv.org/abs/2104.14548).
*Many thanks to [Debidatta Dwibedi](https://twitter.com/debidatta) (Google Research),
primary author of the NNCLR paper for his super-insightful reviews for this example.
This example also takes inspiration from the [SimCLR Keras Example](https://keras.io/examples/vision/semisupervised_simclr/).*
"""
| keras-io/examples/vision/nnclr.py/0 | {
"file_path": "keras-io/examples/vision/nnclr.py",
"repo_id": "keras-io",
"token_count": 7932
} | 122 |
"""
Title: Image similarity estimation using a Siamese Network with a contrastive loss
Author: Mehdi
Date created: 2021/05/06
Last modified: 2022/09/10
Description: Similarity learning using a siamese network trained with a contrastive loss.
Accelerator: GPU
"""
"""
## Introduction
[Siamese Networks](https://en.wikipedia.org/wiki/Siamese_neural_network)
are neural networks which share weights between two or more sister networks,
each producing embedding vectors of its respective inputs.
In supervised similarity learning, the networks are then trained to maximize the
contrast (distance) between embeddings of inputs of different classes, while minimizing the distance between
embeddings of similar classes, resulting in embedding spaces that reflect
the class segmentation of the training inputs.
"""
"""
## Setup
"""
import random
import numpy as np
import keras
from keras import ops
import matplotlib.pyplot as plt
"""
## Hyperparameters
"""
epochs = 10
batch_size = 16
margin = 1 # Margin for contrastive loss.
"""
## Load the MNIST dataset
"""
(x_train_val, y_train_val), (x_test, y_test) = keras.datasets.mnist.load_data()
# Change the data type to a floating point format
x_train_val = x_train_val.astype("float32")
x_test = x_test.astype("float32")
"""
## Define training and validation sets
"""
# Keep 50% of train_val in validation set
x_train, x_val = x_train_val[:30000], x_train_val[30000:]
y_train, y_val = y_train_val[:30000], y_train_val[30000:]
del x_train_val, y_train_val
"""
## Create pairs of images
We will train the model to differentiate between digits of different classes. For
example, digit `0` needs to be differentiated from the rest of the
digits (`1` through `9`), digit `1` - from `0` and `2` through `9`, and so on.
To carry this out, we will select N random images from class A (for example,
for digit `0`) and pair them with N random images from another class B
(for example, for digit `1`). Then, we can repeat this process for all classes
of digits (until digit `9`). Once we have paired digit `0` with other digits,
we can repeat this process for the remaining classes for the rest of the digits
(from `1` until `9`).
"""
def make_pairs(x, y):
"""Creates a tuple containing image pairs with corresponding label.
Arguments:
x: List containing images, each index in this list corresponds to one image.
y: List containing labels, each label with datatype of `int`.
Returns:
Tuple containing two numpy arrays as (pairs_of_samples, labels),
where pairs_of_samples' shape is (2len(x), 2,n_features_dims) and
labels are a binary array of shape (2len(x)).
"""
num_classes = max(y) + 1
digit_indices = [np.where(y == i)[0] for i in range(num_classes)]
pairs = []
labels = []
for idx1 in range(len(x)):
# add a matching example
x1 = x[idx1]
label1 = y[idx1]
idx2 = random.choice(digit_indices[label1])
x2 = x[idx2]
pairs += [[x1, x2]]
labels += [0]
# add a non-matching example
label2 = random.randint(0, num_classes - 1)
while label2 == label1:
label2 = random.randint(0, num_classes - 1)
idx2 = random.choice(digit_indices[label2])
x2 = x[idx2]
pairs += [[x1, x2]]
labels += [1]
return np.array(pairs), np.array(labels).astype("float32")
# make train pairs
pairs_train, labels_train = make_pairs(x_train, y_train)
# make validation pairs
pairs_val, labels_val = make_pairs(x_val, y_val)
# make test pairs
pairs_test, labels_test = make_pairs(x_test, y_test)
"""
We get:
**pairs_train.shape = (60000, 2, 28, 28)**
- We have 60,000 pairs
- Each pair contains 2 images
- Each image has shape `(28, 28)`
"""
"""
Split the training pairs
"""
x_train_1 = pairs_train[:, 0] # x_train_1.shape is (60000, 28, 28)
x_train_2 = pairs_train[:, 1]
"""
Split the validation pairs
"""
x_val_1 = pairs_val[:, 0] # x_val_1.shape = (60000, 28, 28)
x_val_2 = pairs_val[:, 1]
"""
Split the test pairs
"""
x_test_1 = pairs_test[:, 0] # x_test_1.shape = (20000, 28, 28)
x_test_2 = pairs_test[:, 1]
"""
## Visualize pairs and their labels
"""
def visualize(pairs, labels, to_show=6, num_col=3, predictions=None, test=False):
"""Creates a plot of pairs and labels, and prediction if it's test dataset.
Arguments:
pairs: Numpy Array, of pairs to visualize, having shape
(Number of pairs, 2, 28, 28).
to_show: Int, number of examples to visualize (default is 6)
`to_show` must be an integral multiple of `num_col`.
Otherwise it will be trimmed if it is greater than num_col,
and incremented if if it is less then num_col.
num_col: Int, number of images in one row - (default is 3)
For test and train respectively, it should not exceed 3 and 7.
predictions: Numpy Array of predictions with shape (to_show, 1) -
(default is None)
Must be passed when test=True.
test: Boolean telling whether the dataset being visualized is
train dataset or test dataset - (default False).
Returns:
None.
"""
# Define num_row
# If to_show % num_col != 0
# trim to_show,
# to trim to_show limit num_row to the point where
# to_show % num_col == 0
#
# If to_show//num_col == 0
# then it means num_col is greater then to_show
# increment to_show
# to increment to_show set num_row to 1
num_row = to_show // num_col if to_show // num_col != 0 else 1
# `to_show` must be an integral multiple of `num_col`
# we found num_row and we have num_col
# to increment or decrement to_show
# to make it integral multiple of `num_col`
# simply set it equal to num_row * num_col
to_show = num_row * num_col
# Plot the images
fig, axes = plt.subplots(num_row, num_col, figsize=(5, 5))
for i in range(to_show):
# If the number of rows is 1, the axes array is one-dimensional
if num_row == 1:
ax = axes[i % num_col]
else:
ax = axes[i // num_col, i % num_col]
ax.imshow(ops.concatenate([pairs[i][0], pairs[i][1]], axis=1), cmap="gray")
ax.set_axis_off()
if test:
ax.set_title("True: {} | Pred: {:.5f}".format(labels[i], predictions[i][0]))
else:
ax.set_title("Label: {}".format(labels[i]))
if test:
plt.tight_layout(rect=(0, 0, 1.9, 1.9), w_pad=0.0)
else:
plt.tight_layout(rect=(0, 0, 1.5, 1.5))
plt.show()
"""
Inspect training pairs
"""
visualize(pairs_train[:-1], labels_train[:-1], to_show=4, num_col=4)
"""
Inspect validation pairs
"""
visualize(pairs_val[:-1], labels_val[:-1], to_show=4, num_col=4)
"""
Inspect test pairs
"""
visualize(pairs_test[:-1], labels_test[:-1], to_show=4, num_col=4)
"""
## Define the model
There are two input layers, each leading to its own network, which
produces embeddings. A `Lambda` layer then merges them using an
[Euclidean distance](https://en.wikipedia.org/wiki/Euclidean_distance) and the
merged output is fed to the final network.
"""
# Provided two tensors t1 and t2
# Euclidean distance = sqrt(sum(square(t1-t2)))
def euclidean_distance(vects):
"""Find the Euclidean distance between two vectors.
Arguments:
vects: List containing two tensors of same length.
Returns:
Tensor containing euclidean distance
(as floating point value) between vectors.
"""
x, y = vects
sum_square = ops.sum(ops.square(x - y), axis=1, keepdims=True)
return ops.sqrt(ops.maximum(sum_square, keras.backend.epsilon()))
input = keras.layers.Input((28, 28, 1))
x = keras.layers.BatchNormalization()(input)
x = keras.layers.Conv2D(4, (5, 5), activation="tanh")(x)
x = keras.layers.AveragePooling2D(pool_size=(2, 2))(x)
x = keras.layers.Conv2D(16, (5, 5), activation="tanh")(x)
x = keras.layers.AveragePooling2D(pool_size=(2, 2))(x)
x = keras.layers.Flatten()(x)
x = keras.layers.BatchNormalization()(x)
x = keras.layers.Dense(10, activation="tanh")(x)
embedding_network = keras.Model(input, x)
input_1 = keras.layers.Input((28, 28, 1))
input_2 = keras.layers.Input((28, 28, 1))
# As mentioned above, Siamese Network share weights between
# tower networks (sister networks). To allow this, we will use
# same embedding network for both tower networks.
tower_1 = embedding_network(input_1)
tower_2 = embedding_network(input_2)
merge_layer = keras.layers.Lambda(euclidean_distance, output_shape=(1,))(
[tower_1, tower_2]
)
normal_layer = keras.layers.BatchNormalization()(merge_layer)
output_layer = keras.layers.Dense(1, activation="sigmoid")(normal_layer)
siamese = keras.Model(inputs=[input_1, input_2], outputs=output_layer)
"""
## Define the contrastive Loss
"""
def loss(margin=1):
"""Provides 'contrastive_loss' an enclosing scope with variable 'margin'.
Arguments:
margin: Integer, defines the baseline for distance for which pairs
should be classified as dissimilar. - (default is 1).
Returns:
'contrastive_loss' function with data ('margin') attached.
"""
# Contrastive loss = mean( (1-true_value) * square(prediction) +
# true_value * square( max(margin-prediction, 0) ))
def contrastive_loss(y_true, y_pred):
"""Calculates the contrastive loss.
Arguments:
y_true: List of labels, each label is of type float32.
y_pred: List of predictions of same length as of y_true,
each label is of type float32.
Returns:
A tensor containing contrastive loss as floating point value.
"""
square_pred = ops.square(y_pred)
margin_square = ops.square(ops.maximum(margin - (y_pred), 0))
return ops.mean((1 - y_true) * square_pred + (y_true) * margin_square)
return contrastive_loss
"""
## Compile the model with the contrastive loss
"""
siamese.compile(loss=loss(margin=margin), optimizer="RMSprop", metrics=["accuracy"])
siamese.summary()
"""
## Train the model
"""
history = siamese.fit(
[x_train_1, x_train_2],
labels_train,
validation_data=([x_val_1, x_val_2], labels_val),
batch_size=batch_size,
epochs=epochs,
)
"""
## Visualize results
"""
def plt_metric(history, metric, title, has_valid=True):
"""Plots the given 'metric' from 'history'.
Arguments:
history: history attribute of History object returned from Model.fit.
metric: Metric to plot, a string value present as key in 'history'.
title: A string to be used as title of plot.
has_valid: Boolean, true if valid data was passed to Model.fit else false.
Returns:
None.
"""
plt.plot(history[metric])
if has_valid:
plt.plot(history["val_" + metric])
plt.legend(["train", "validation"], loc="upper left")
plt.title(title)
plt.ylabel(metric)
plt.xlabel("epoch")
plt.show()
# Plot the accuracy
plt_metric(history=history.history, metric="accuracy", title="Model accuracy")
# Plot the contrastive loss
plt_metric(history=history.history, metric="loss", title="Contrastive Loss")
"""
## Evaluate the model
"""
results = siamese.evaluate([x_test_1, x_test_2], labels_test)
print("test loss, test acc:", results)
"""
## Visualize the predictions
"""
predictions = siamese.predict([x_test_1, x_test_2])
visualize(pairs_test, labels_test, to_show=3, predictions=predictions, test=True)
| keras-io/examples/vision/siamese_contrastive.py/0 | {
"file_path": "keras-io/examples/vision/siamese_contrastive.py",
"repo_id": "keras-io",
"token_count": 4553
} | 123 |
<jupyter_start><jupyter_text>Multi-GPU distributed training with TensorFlow**Author:** [fchollet](https://twitter.com/fchollet)**Date created:** 2020/04/28**Last modified:** 2023/06/29**Description:** Guide to multi-GPU training for Keras models with TensorFlow. IntroductionThere are generally two ways to distribute computation across multiple devices:**Data parallelism**, where a single model gets replicated on multiple devices ormultiple machines. Each of them processes different batches of data, then they mergetheir results. There exist many variants of this setup, that differ in how the differentmodel replicas merge results, in whether they stay in sync at every batch or whether theyare more loosely coupled, etc.**Model parallelism**, where different parts of a single model run on different devices,processing a single batch of data together. This works best with models that have anaturally-parallel architecture, such as models that feature multiple branches.This guide focuses on data parallelism, in particular **synchronous data parallelism**,where the different replicas of the model stay in sync after each batch they process.Synchronicity keeps the model convergence behavior identical to what you would see forsingle-device training.Specifically, this guide teaches you how to use the `tf.distribute` API to train Kerasmodels on multiple GPUs, with minimal changes to your code,on multiple GPUs (typically 2 to 16) installed on a single machine (single host,multi-device training). This is the most common setup for researchers and small-scaleindustry workflows. Setup<jupyter_code>import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import tensorflow as tf
import keras<jupyter_output><empty_output><jupyter_text>Single-host, multi-device synchronous trainingIn this setup, you have one machine with several GPUs on it (typically 2 to 16). Eachdevice will run a copy of your model (called a **replica**). For simplicity, in whatfollows, we'll assume we're dealing with 8 GPUs, at no loss of generality.**How it works**At each step of training:- The current batch of data (called **global batch**) is split into 8 differentsub-batches (called **local batches**). For instance, if the global batch has 512samples, each of the 8 local batches will have 64 samples.- Each of the 8 replicas independently processes a local batch: they run a forward pass,then a backward pass, outputting the gradient of the weights with respect to the loss ofthe model on the local batch.- The weight updates originating from local gradients are efficiently merged across the 8replicas. Because this is done at the end of every step, the replicas always stay insync.In practice, the process of synchronously updating the weights of the model replicas ishandled at the level of each individual weight variable. This is done through a **mirroredvariable** object.**How to use it**To do single-host, multi-device synchronous training with a Keras model, you would usethe [`tf.distribute.MirroredStrategy` API]( https://www.tensorflow.org/api_docs/python/tf/distribute/MirroredStrategy).Here's how it works:- Instantiate a `MirroredStrategy`, optionally configuring which specific devices youwant to use (by default the strategy will use all GPUs available).- Use the strategy object to open a scope, and within this scope, create all the Kerasobjects you need that contain variables. Typically, that means **creating & compiling themodel** inside the distribution scope. In some cases, the first call to `fit()` may alsocreate variables, so it's a good idea to put your `fit()` call in the scope as well.- Train the model via `fit()` as usual.Importantly, we recommend that you use `tf.data.Dataset` objects to load datain a multi-device or distributed workflow.Schematically, it looks like this:```python Create a MirroredStrategy.strategy = tf.distribute.MirroredStrategy()print('Number of devices: {}'.format(strategy.num_replicas_in_sync)) Open a strategy scope.with strategy.scope(): Everything that creates variables should be under the strategy scope. In general this is only model construction & `compile()`. model = Model(...) model.compile(...) Train the model on all available devices. model.fit(train_dataset, validation_data=val_dataset, ...) Test the model on all available devices. model.evaluate(test_dataset)```Here's a simple end-to-end runnable example:<jupyter_code>def get_compiled_model():
# Make a simple 2-layer densely-connected neural network.
inputs = keras.Input(shape=(784,))
x = keras.layers.Dense(256, activation="relu")(inputs)
x = keras.layers.Dense(256, activation="relu")(x)
outputs = keras.layers.Dense(10)(x)
model = keras.Model(inputs, outputs)
model.compile(
optimizer=keras.optimizers.Adam(),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[keras.metrics.SparseCategoricalAccuracy()],
)
return model
def get_dataset():
batch_size = 32
num_val_samples = 10000
# Return the MNIST dataset in the form of a `tf.data.Dataset`.
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
# Preprocess the data (these are Numpy arrays)
x_train = x_train.reshape(-1, 784).astype("float32") / 255
x_test = x_test.reshape(-1, 784).astype("float32") / 255
y_train = y_train.astype("float32")
y_test = y_test.astype("float32")
# Reserve num_val_samples samples for validation
x_val = x_train[-num_val_samples:]
y_val = y_train[-num_val_samples:]
x_train = x_train[:-num_val_samples]
y_train = y_train[:-num_val_samples]
return (
tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(batch_size),
tf.data.Dataset.from_tensor_slices((x_val, y_val)).batch(batch_size),
tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(batch_size),
)
# Create a MirroredStrategy.
strategy = tf.distribute.MirroredStrategy()
print("Number of devices: {}".format(strategy.num_replicas_in_sync))
# Open a strategy scope.
with strategy.scope():
# Everything that creates variables should be under the strategy scope.
# In general this is only model construction & `compile()`.
model = get_compiled_model()
# Train the model on all available devices.
train_dataset, val_dataset, test_dataset = get_dataset()
model.fit(train_dataset, epochs=2, validation_data=val_dataset)
# Test the model on all available devices.
model.evaluate(test_dataset)<jupyter_output><empty_output><jupyter_text>Using callbacks to ensure fault toleranceWhen using distributed training, you should always make sure you have a strategy torecover from failure (fault tolerance). The simplest way to handle this is to pass`ModelCheckpoint` callback to `fit()`, to save your modelat regular intervals (e.g. every 100 batches or every epoch). You can then restarttraining from your saved model.Here's a simple example:<jupyter_code># Prepare a directory to store all the checkpoints.
checkpoint_dir = "./ckpt"
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
def make_or_restore_model():
# Either restore the latest model, or create a fresh one
# if there is no checkpoint available.
checkpoints = [checkpoint_dir + "/" + name for name in os.listdir(checkpoint_dir)]
if checkpoints:
latest_checkpoint = max(checkpoints, key=os.path.getctime)
print("Restoring from", latest_checkpoint)
return keras.models.load_model(latest_checkpoint)
print("Creating a new model")
return get_compiled_model()
def run_training(epochs=1):
# Create a MirroredStrategy.
strategy = tf.distribute.MirroredStrategy()
# Open a strategy scope and create/restore the model
with strategy.scope():
model = make_or_restore_model()
callbacks = [
# This callback saves a SavedModel every epoch
# We include the current epoch in the folder name.
keras.callbacks.ModelCheckpoint(
filepath=checkpoint_dir + "/ckpt-{epoch}.keras",
save_freq="epoch",
)
]
model.fit(
train_dataset,
epochs=epochs,
callbacks=callbacks,
validation_data=val_dataset,
verbose=2,
)
# Running the first time creates the model
run_training(epochs=1)
# Calling the same function again will resume from where we left off
run_training(epochs=1)<jupyter_output><empty_output> | keras-io/guides/ipynb/keras_core/distributed_training_with_tensorflow.ipynb/0 | {
"file_path": "keras-io/guides/ipynb/keras_core/distributed_training_with_tensorflow.ipynb",
"repo_id": "keras-io",
"token_count": 2697
} | 124 |
<jupyter_start><jupyter_text>Custom Image Augmentations with BaseImageAugmentationLayer**Author:** [lukewood](https://twitter.com/luke_wood_ml)**Date created:** 2022/04/26**Last modified:** 2023/11/29**Description:** Use BaseImageAugmentationLayer to implement custom data augmentations. OverviewData augmentation is an integral part of training any robust computer vision model.While KerasCV offers a plethora of prebuild high quality data augmentation techniques,you may still want to implement your own custom technique.KerasCV offers a helpful base class for writing data augmentation layers:`BaseImageAugmentationLayer`.Any augmentation layer built with `BaseImageAugmentationLayer` will automatically becompatible with the KerasCV `RandomAugmentationPipeline` class.This guide will show you how to implement your own custom augmentation layers using`BaseImageAugmentationLayer`. As an example, we will implement a layer that tints allimages blue.Currently, KerasCV's preprocessing layers only support the TensorFlow backend with Keras 3.<jupyter_code>!pip install -q --upgrade keras-cv
!pip install -q --upgrade keras # Upgrade to Keras 3
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import keras
from keras import ops
from keras import layers
import keras_cv
import matplotlib.pyplot as plt<jupyter_output><empty_output><jupyter_text>First, let's implement some helper functions for visualization and some transformations.<jupyter_code>def imshow(img):
img = img.astype(int)
plt.axis("off")
plt.imshow(img)
plt.show()
def gallery_show(images):
images = images.astype(int)
for i in range(9):
image = images[i]
plt.subplot(3, 3, i + 1)
plt.imshow(image.astype("uint8"))
plt.axis("off")
plt.show()
def transform_value_range(images, original_range, target_range):
images = (images - original_range[0]) / (original_range[1] - original_range[0])
scale_factor = target_range[1] - target_range[0]
return (images * scale_factor) + target_range[0]
def parse_factor(param, min_value=0.0, max_value=1.0, seed=None):
if isinstance(param, keras_cv.core.FactorSampler):
return param
if isinstance(param, float) or isinstance(param, int):
param = (min_value, param)
if param[0] == param[1]:
return keras_cv.core.ConstantFactorSampler(param[0])
return keras_cv.core.UniformFactorSampler(param[0], param[1], seed=seed)<jupyter_output><empty_output><jupyter_text>BaseImageAugmentationLayer IntroductionImage augmentation should operate on a sample-wise basis; not batch-wise.This is a common mistake many machine learning practicioners make when implementingcustom techniques.`BaseImageAugmentation` offers a set of clean abstractions to make implementing imageaugmentation techniques on a sample wise basis much easier.This is done by allowing the end user to override an `augment_image()` method and thenperforming automatic vectorization under the hood.Most augmentation techniques also must sample from one or more random distributions.KerasCV offers an abstraction to make random sampling end user configurable: the`FactorSampler` API.Finally, many augmentation techniques requires some information about the pixel valuespresent in the input images. KerasCV offers the `value_range` API to simplify the handling of this.In our example, we will use the `FactorSampler` API, the `value_range` API, and`BaseImageAugmentationLayer` to implement a robust, configurable, and correct `RandomBlueTint` layer. Overriding `augment_image()`Let's start off with the minimum:<jupyter_code>class RandomBlueTint(keras_cv.layers.BaseImageAugmentationLayer):
def augment_image(self, image, *args, transformation=None, **kwargs):
# image is of shape (height, width, channels)
[*others, blue] = ops.unstack(image, axis=-1)
blue = ops.clip(blue + 100, 0.0, 255.0)
return ops.stack([*others, blue], axis=-1)<jupyter_output><empty_output><jupyter_text>Our layer overrides `BaseImageAugmentationLayer.augment_image()`. This method isused to augment images given to the layer. By default, using`BaseImageAugmentationLayer` gives you a few nice features for free:- support for unbatched inputs (HWC Tensor)- support for batched inputs (BHWC Tensor)- automatic vectorization on batched inputs (more information on this in automatic vectorization performance)Let's check out the result. First, let's download a sample image:<jupyter_code>SIZE = (300, 300)
elephants = keras.utils.get_file(
"african_elephant.jpg", "https://i.imgur.com/Bvro0YD.png"
)
elephants = keras.utils.load_img(elephants, target_size=SIZE)
elephants = keras.utils.img_to_array(elephants)
imshow(elephants)<jupyter_output><empty_output><jupyter_text>Next, let's augment it and visualize the result:<jupyter_code>layer = RandomBlueTint()
augmented = layer(elephants)
imshow(ops.convert_to_numpy(augmented))<jupyter_output><empty_output><jupyter_text>Looks great! We can also call our layer on batched inputs:<jupyter_code>layer = RandomBlueTint()
augmented = layer(ops.expand_dims(elephants, axis=0))
imshow(ops.convert_to_numpy(augmented)[0])<jupyter_output><empty_output><jupyter_text>Adding Random Behavior with the `FactorSampler` API.Usually an image augmentation technique should not do the same thing on everyinvocation of the layer's `__call__` method.KerasCV offers the `FactorSampler` API to allow users to provide configurable randomdistributions.<jupyter_code>class RandomBlueTint(keras_cv.layers.BaseImageAugmentationLayer):
"""RandomBlueTint randomly applies a blue tint to images.
Args:
factor: A tuple of two floats, a single float or a
`keras_cv.FactorSampler`. `factor` controls the extent to which the
image is blue shifted. `factor=0.0` makes this layer perform a no-op
operation, while a value of 1.0 uses the degenerated result entirely.
Values between 0 and 1 result in linear interpolation between the original
image and a fully blue image.
Values should be between `0.0` and `1.0`. If a tuple is used, a `factor` is
sampled between the two values for every image augmented. If a single float
is used, a value between `0.0` and the passed float is sampled. In order to
ensure the value is always the same, please pass a tuple with two identical
floats: `(0.5, 0.5)`.
"""
def __init__(self, factor, **kwargs):
super().__init__(**kwargs)
self.factor = parse_factor(factor)
def augment_image(self, image, *args, transformation=None, **kwargs):
[*others, blue] = ops.unstack(image, axis=-1)
blue_shift = self.factor() * 255
blue = ops.clip(blue + blue_shift, 0.0, 255.0)
return ops.stack([*others, blue], axis=-1)<jupyter_output><empty_output><jupyter_text>Now, we can configure the random behavior of ou `RandomBlueTint` layer.We can give it a range of values to sample from:<jupyter_code>many_elephants = ops.repeat(ops.expand_dims(elephants, axis=0), 9, axis=0)
layer = RandomBlueTint(factor=0.5)
augmented = layer(many_elephants)
gallery_show(ops.convert_to_numpy(augmented))<jupyter_output><empty_output><jupyter_text>Each image is augmented differently with a random factor sampled from the range`(0, 0.5)`.We can also configure the layer to draw from a normal distribution:<jupyter_code>many_elephants = ops.repeat(ops.expand_dims(elephants, axis=0), 9, axis=0)
factor = keras_cv.core.NormalFactorSampler(
mean=0.3, stddev=0.1, min_value=0.0, max_value=1.0
)
layer = RandomBlueTint(factor=factor)
augmented = layer(many_elephants)
gallery_show(ops.convert_to_numpy(augmented))<jupyter_output><empty_output><jupyter_text>As you can see, the augmentations now are drawn from a normal distributions.There are various types of `FactorSamplers` including `UniformFactorSampler`,`NormalFactorSampler`, and `ConstantFactorSampler`. You can also implement you own. Overridding `get_random_transformation()`Now, suppose that your layer impacts the prediction targets: whether they are boundingboxes, classification labels, or regression targets.Your layer will need to have information about what augmentations are taken on the imagewhen augmenting the label.Luckily, `BaseImageAugmentationLayer` was designed with this in mind.To handle this issue, `BaseImageAugmentationLayer` has an overrideable`get_random_transformation()` method alongside with `augment_label()`,`augment_target()` and `augment_bounding_boxes()`.`augment_segmentation_map()` and others will be added in the future.Let's add this to our layer.<jupyter_code>class RandomBlueTint(keras_cv.layers.BaseImageAugmentationLayer):
"""RandomBlueTint randomly applies a blue tint to images.
Args:
factor: A tuple of two floats, a single float or a
`keras_cv.FactorSampler`. `factor` controls the extent to which the
image is blue shifted. `factor=0.0` makes this layer perform a no-op
operation, while a value of 1.0 uses the degenerated result entirely.
Values between 0 and 1 result in linear interpolation between the original
image and a fully blue image.
Values should be between `0.0` and `1.0`. If a tuple is used, a `factor` is
sampled between the two values for every image augmented. If a single float
is used, a value between `0.0` and the passed float is sampled. In order to
ensure the value is always the same, please pass a tuple with two identical
floats: `(0.5, 0.5)`.
"""
def __init__(self, factor, **kwargs):
super().__init__(**kwargs)
self.factor = parse_factor(factor)
def get_random_transformation(self, **kwargs):
# kwargs holds {"images": image, "labels": label, etc...}
return self.factor() * 255
def augment_image(self, image, transformation=None, **kwargs):
[*others, blue] = ops.unstack(image, axis=-1)
blue = ops.clip(blue + transformation, 0.0, 255.0)
return ops.stack([*others, blue], axis=-1)
def augment_label(self, label, transformation=None, **kwargs):
# you can use transformation somehow if you want
if transformation > 100:
# i.e. maybe class 2 corresponds to blue images
return 2.0
return label
def augment_bounding_boxes(self, bounding_boxes, transformation=None, **kwargs):
# you can also perform no-op augmentations on label types to support them in
# your pipeline.
return bounding_boxes<jupyter_output><empty_output><jupyter_text>To make use of these new methods, you will need to feed your inputs in with adictionary maintaining a mapping from images to targets.As of now, KerasCV supports the following label types:- labels via `augment_label()`.- bounding_boxes via `augment_bounding_boxes()`.In order to use augmention layers alongside your prediction targets, you must packageyour inputs as follows:<jupyter_code>labels = ops.array([[1, 0]])
inputs = {"images": ops.convert_to_tensor(elephants), "labels": labels}<jupyter_output><empty_output><jupyter_text>Now if we call our layer on the inputs:<jupyter_code>layer = RandomBlueTint(factor=(0.6, 0.6))
augmented = layer(inputs)
print(augmented["labels"])<jupyter_output><empty_output><jupyter_text>Both the inputs and labels are augmented.Note how when `transformation` is > 100 the label is modified to contain 2.0 asspecified in the layer above. `value_range` supportImagine you are using your new augmentation layer in many pipelines.Some pipelines have values in the range `[0, 255]`, some pipelines have normalized their images to the range `[-1, 1]`, and some use a value range of `[0, 1]`.If a user calls your layer with an image in value range `[0, 1]`, the outputs will benonsense!<jupyter_code>layer = RandomBlueTint(factor=(0.1, 0.1))
elephants_0_1 = elephants / 255
print("min and max before augmentation:", elephants_0_1.min(), elephants_0_1.max())
augmented = layer(elephants_0_1)
print(
"min and max after augmentation:",
ops.convert_to_numpy(augmented).min(),
ops.convert_to_numpy(augmented).max(),
)
imshow(ops.convert_to_numpy(augmented * 255).astype(int))<jupyter_output><empty_output><jupyter_text>Note that this is an incredibly weak augmentation!Factor is only set to 0.1.Let's resolve this issue with KerasCV's `value_range` API.<jupyter_code>class RandomBlueTint(keras_cv.layers.BaseImageAugmentationLayer):
"""RandomBlueTint randomly applies a blue tint to images.
Args:
value_range: value_range: a tuple or a list of two elements. The first value
represents the lower bound for values in passed images, the second represents
the upper bound. Images passed to the layer should have values within
`value_range`.
factor: A tuple of two floats, a single float or a
`keras_cv.FactorSampler`. `factor` controls the extent to which the
image is blue shifted. `factor=0.0` makes this layer perform a no-op
operation, while a value of 1.0 uses the degenerated result entirely.
Values between 0 and 1 result in linear interpolation between the original
image and a fully blue image.
Values should be between `0.0` and `1.0`. If a tuple is used, a `factor` is
sampled between the two values for every image augmented. If a single float
is used, a value between `0.0` and the passed float is sampled. In order to
ensure the value is always the same, please pass a tuple with two identical
floats: `(0.5, 0.5)`.
"""
def __init__(self, value_range, factor, **kwargs):
super().__init__(**kwargs)
self.value_range = value_range
self.factor = parse_factor(factor)
def get_random_transformation(self, **kwargs):
# kwargs holds {"images": image, "labels": label, etc...}
return self.factor() * 255
def augment_image(self, image, transformation=None, **kwargs):
image = transform_value_range(image, self.value_range, (0, 255))
[*others, blue] = ops.unstack(image, axis=-1)
blue = ops.clip(blue + transformation, 0.0, 255.0)
result = ops.stack([*others, blue], axis=-1)
result = transform_value_range(result, (0, 255), self.value_range)
return result
def augment_label(self, label, transformation=None, **kwargs):
# you can use transformation somehow if you want
if transformation > 100:
# i.e. maybe class 2 corresponds to blue images
return 2.0
return label
def augment_bounding_boxes(self, bounding_boxes, transformation=None, **kwargs):
# you can also perform no-op augmentations on label types to support them in
# your pipeline.
return bounding_boxes
layer = RandomBlueTint(value_range=(0, 1), factor=(0.1, 0.1))
elephants_0_1 = elephants / 255
print("min and max before augmentation:", elephants_0_1.min(), elephants_0_1.max())
augmented = layer(elephants_0_1)
print(
"min and max after augmentation:",
ops.convert_to_numpy(augmented).min(),
ops.convert_to_numpy(augmented).max(),
)
imshow(ops.convert_to_numpy(augmented * 255).astype(int))<jupyter_output><empty_output><jupyter_text>Now our elephants are only slgihtly blue tinted. This is the expected behavior whenusing a factor of `0.1`. Great!Now users can configure the layer to support any value range they may need. Note thatonly layers that interact with color information should use the value range API.Many augmentation techniques, such as `RandomRotation` will not need this. Auto vectorization performanceIf you are wondering:> Does implementing my augmentations on an sample-wise basis carry performance implications?You are not alone!Luckily, I have performed extensive analysis on the performance of automaticvectorization, manual vectorization, and unvectorized implementations.In this benchmark, I implemented a RandomCutout layer using auto vectorization, no autovectorization and manual vectorization.All of these were benchmarked inside of an `@tf.function` annotation.They were also each benchmarked with the `jit_compile` argument.The following chart shows the results of this benchmark:_The primary takeaway should be that the difference between manual vectorization andautomatic vectorization is marginal!_Please note that Eager mode performance will be drastically different. Common gotchasSome layers are not able to be automatically vectorizated.An example of this is [GridMask](https://tinyurl.com/ffb5zzf7).If you receive an error when invoking your layer, try adding the following to yourconstructor:<jupyter_code>class UnVectorizable(keras_cv.layers.BaseImageAugmentationLayer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# this disables BaseImageAugmentationLayer's Auto Vectorization
self.auto_vectorize = False<jupyter_output><empty_output> | keras-io/guides/ipynb/keras_cv/custom_image_augmentations.ipynb/0 | {
"file_path": "keras-io/guides/ipynb/keras_cv/custom_image_augmentations.ipynb",
"repo_id": "keras-io",
"token_count": 5522
} | 125 |
<jupyter_start><jupyter_text>Migrating Keras 2 code to multi-backend Keras 3**Author:** [Divyashree Sreepathihalli](https://github.com/divyashreepathihalli)**Date created:** 2023/10/23**Last modified:** 2023/10/30**Description:** Instructions & troubleshooting for migrating your Keras 2 code to multi-backend Keras 3. This guide will help you migrate TensorFlow-only Keras 2 code to multi-backend Keras3 code. The overhead for the migration is minimal. Once you have migrated,you can run Keras workflows on top of either JAX, TensorFlow, or PyTorch.This guide has two parts:1. Migrating your legacy Keras 2 code to Keras 3, running on top of the TensorFlow backend.This is generally very easy, though there are minor issues to be mindful of, that we will go overin detail.2. Further migrating your Keras 3 + TensorFlow code to multi-backend Keras 3, so that it can run onJAX and PyTorch.Let's get started.<jupyter_code>!pip install keras==3.0.0 --upgrade --quiet<jupyter_output><empty_output><jupyter_text>SetupFirst, lets install `keras-nightly`.This example uses the TensorFlow backend (`os.environ["KERAS_BACKEND"] = "tensorflow"`).After you've migrated your code, you can change the `"tensorflow"` string to `"jax"` or `"torch"`and click "Restart runtime" in Colab, and your code will run on the JAX or PyTorch backend.<jupyter_code>!pip install -q keras-nightly
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import keras
import tensorflow as tf
import numpy as np<jupyter_output><empty_output><jupyter_text>Going from Keras 2 to Keras 3 with the TensorFlow backendFirst, replace your imports:1. Replace `from tensorflow import keras` to `import keras`2. Replace `from tensorflow.keras import xyz` (e.g. `from tensorflow.keras import layers`)to `from keras import xyz` (e.g. `from keras import layers`)3. Replace `tf.keras.*` to `keras.*`Next, start running your tests. Most of the time, your code will execute on Keras 3 just fine.All issues you might encouter are detailed below, with their fixes. `jit_compile` is set to `True` by default on GPU.The default value of the `jit_compile` argument to the `Model` constructor has been set to`True` on GPU in Keras 3. This means that models will be compiled with Just-In-Time (JIT)compilation by default on GPU.JIT compilation can improve the performance of some models. However, it may not work withall TensorFlow operations. If you are using a custom model or layer and you see anXLA-related error, you may need to set the `jit_compile` argument to `False`. Here is a listof [known issues](https://www.tensorflow.org/xla/known_issues) encountered whenusing XLA with TensorFlow. In addition to these issues, there are someops that are not supported by XLA.The error message you could encounter would be as follows:```Detected unsupported operations when trying to compile graph__inference_one_step_on_data_125[] on XLA_GPU_JIT```For example, the following snippet of code will reproduce the above error:```pythonclass MyModel(keras.Model): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def call(self, inputs): string_input = tf.strings.as_string(inputs) return tf.strings.to_number(string_input)subclass_model = MyModel()x_train = np.array([[1, 2, 3], [4, 5, 6]])subclass_model.compile(optimizer="sgd", loss="mse")subclass_model.predict(x_train)``` **How to fix it:** set `jit_compile=False` in `model.compile(..., jit_compile=False)`,or set the `jit_compile` attribute to `False`, like this:<jupyter_code>class MyModel(keras.Model):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def call(self, inputs):
# tf.strings ops aren't support by XLA
string_input = tf.strings.as_string(inputs)
return tf.strings.to_number(string_input)
subclass_model = MyModel()
x_train = np.array([[1, 2, 3], [4, 5, 6]])
subclass_model.jit_compile = False
subclass_model.predict(x_train)<jupyter_output><empty_output><jupyter_text>Saving a model in the TF SavedModel formatSaving to the TF SavedModel format via `model.save()` is no longer supported in Keras 3.The error message you could encounter would be as follows:```>>> model.save("mymodel")ValueError: Invalid filepath extension for saving. Please add either a `.keras` extensionfor the native Keras format (recommended) or a `.h5` extension. Use`tf.saved_model.save()` if you want to export a SavedModel for use withTFLite/TFServing/etc. Received: filepath=saved_model.```The following snippet of code will reproduce the above error:```pythonsequential_model = keras.Sequential([ keras.layers.Dense(2)])sequential_model.save("saved_model")``` **How to fix it:** use `tf.saved_model.save` instead of `model.save`<jupyter_code>sequential_model = keras.Sequential([keras.layers.Dense(2)])
sequential_model(np.random.rand(3, 5))
tf.saved_model.save(sequential_model, "saved_model")<jupyter_output><empty_output><jupyter_text>Loading a TF SavedModelLoading a TF SavedModel file via `keras.models.load_model()` is no longer supportedIf you try to use `keras.models.load_model()` with a TF SavedModel, you will get the following error:```pythonValueError: File format not supported: filepath=saved_model. Keras 3 only supports V3`.keras` files and legacy H5 format files (`.h5` extension). Note that the legacySavedModel format is not supported by `load_model()` in Keras 3. In order to reload aTensorFlow SavedModel as an inference-only layer in Keras 3, use`keras.layers.TFSMLayer(saved_model, call_endpoint='serving_default')` (note that your`call_endpoint` might have a different name).```The following snippet of code will reproduce the above error:```pythonkeras.models.load_model("saved_model")``` **How to fix it:** Use `keras.layers.TFSMLayer(filepath, call_endpoint="serving_default")` to reload a TFSavedModel as a Keras layer. This is not limited to SavedModels that originate from Keras -- it will workwith any SavedModel, e.g. TF-Hub models.<jupyter_code>keras.layers.TFSMLayer("saved_model", call_endpoint="serving_default")<jupyter_output><empty_output><jupyter_text>Using deeply nested inputs in Functional Models`Model()` can no longer be passed deeply nested inputs/outputs (nested more than 1 leveldeep, e.g. lists of lists of tensors).You would encounter errors as follows:```ValueError: When providing `inputs` as a dict, all values in the dict must beKerasTensors. Received: inputs={'foo': <KerasTensor shape=(None, 1), dtype=float32,sparse=None, name=foo>, 'bar': {'baz': <KerasTensor shape=(None, 1), dtype=float32,sparse=None, name=bar>}} including invalid value {'baz': <KerasTensor shape=(None, 1),dtype=float32, sparse=None, name=bar>} of type ```The following snippet of code will reproduce the above error:```pythoninputs = { "foo": keras.Input(shape=(1,), name="foo"), "bar": { "baz": keras.Input(shape=(1,), name="bar"), },}outputs = inputs["foo"] + inputs["bar"]["baz"]keras.Model(inputs, outputs)``` **How to fix it:** replace nested input with either dicts, lists, and tuplesof input tensors.<jupyter_code>inputs = {
"foo": keras.Input(shape=(1,), name="foo"),
"bar": keras.Input(shape=(1,), name="bar"),
}
outputs = inputs["foo"] + inputs["bar"]
keras.Model(inputs, outputs)<jupyter_output><empty_output><jupyter_text>TF autographIn Keras 2, TF autograph is enabled by default on the `call()` method of customlayers. In Keras 3, it is not. This means you may have to use cond ops if you're usingcontrol flow, or alternatively you can decorate your `call()` method with `@tf.function`.You would encounter an error as follows:```OperatorNotAllowedInGraphError: Exception encountered when calling MyCustomLayer.call().Using a symbolic `tf.Tensor` as a Python `bool` is not allowed. You can attempt thefollowing resolutions to the problem: If you are running in Graph mode, use Eagerexecution mode or decorate this function with @tf.function. If you are using AutoGraph,you can try decorating this function with @tf.function. If that does not work, then youmay be using an unsupported feature or your source code may not be visible to AutoGraph.Here is a [link for more information](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/autograph/g3doc/reference/limitations.mdaccess-to-source-code).```The following snippet of code will reproduce the above error:```pythonclass MyCustomLayer(keras.layers.Layer): def call(self, inputs): if tf.random.uniform(()) > 0.5: return inputs * 2 else: return inputs / 2layer = MyCustomLayer()data = np.random.uniform(size=[3, 3])model = keras.models.Sequential([layer])model.compile(optimizer="adam", loss="mse")model.predict(data)``` **How to fix it:** decorate your `call()` method with `@tf.function`<jupyter_code>class MyCustomLayer(keras.layers.Layer):
@tf.function()
def call(self, inputs):
if tf.random.uniform(()) > 0.5:
return inputs * 2
else:
return inputs / 2
layer = MyCustomLayer()
data = np.random.uniform(size=[3, 3])
model = keras.models.Sequential([layer])
model.compile(optimizer="adam", loss="mse")
model.predict(data)<jupyter_output><empty_output><jupyter_text>Calling TF ops with a `KerasTensor`Using a TF op on a Keras tensor during functional model construction is disallowed: "AKerasTensor cannot be used as input to a TensorFlow function".The error you would encounter would be as follows:```ValueError: A KerasTensor cannot be used as input to a TensorFlow function. A KerasTensoris a symbolic placeholder for a shape and dtype, used when constructing Keras Functionalmodels or Keras Functions. You can only use it as input to a Keras layer or a Kerasoperation (from the namespaces `keras.layers` and `keras.operations`).```The following snippet of code will reproduce the error:```pythoninput = keras.layers.Input([2, 2, 1])tf.squeeze(input)``` **How to fix it:** use an equivalent op from `keras.ops`.<jupyter_code>input = keras.layers.Input([2, 2, 1])
keras.ops.squeeze(input)<jupyter_output><empty_output><jupyter_text>Multi-output model `evaluate()`The `evaluate()` method of a multi-output model no longer returns individual outputlosses separately. Instead, you should utilize the `metrics` argument in the `compile()`method to keep track of these losses.When dealing with multiple named outputs, such as output_a and output_b, the legacy`tf.keras` would include _loss, _loss, and similar entries inmetrics. However, in keras 3.0, these entries are not automatically added to metrics.They must be explicitly provided in the metrics list for each individual output.The following snippet of code will reproduce the above behavior:```pythonfrom keras import layers A functional model with multiple outputsinputs = layers.Input(shape=(10,))x1 = layers.Dense(5, activation='relu')(inputs)x2 = layers.Dense(5, activation='relu')(x1)output_1 = layers.Dense(5, activation='softmax', name="output_1")(x1)output_2 = layers.Dense(5, activation='softmax', name="output_2")(x2)model = keras.Model(inputs=inputs, outputs=[output_1, output_2])model.compile(optimizer='adam', loss='categorical_crossentropy') dummy datax_test = np.random.uniform(size=[10, 10])y_test = np.random.uniform(size=[10, 5])model.evaluate(x_test, y_test)```<jupyter_code>from keras import layers
# A functional model with multiple outputs
inputs = layers.Input(shape=(10,))
x1 = layers.Dense(5, activation="relu")(inputs)
x2 = layers.Dense(5, activation="relu")(x1)
output_1 = layers.Dense(5, activation="softmax", name="output_1")(x1)
output_2 = layers.Dense(5, activation="softmax", name="output_2")(x2)
# dummy data
x_test = np.random.uniform(size=[10, 10])
y_test = np.random.uniform(size=[10, 5])
multi_output_model = keras.Model(inputs=inputs, outputs=[output_1, output_2])
multi_output_model.compile(
optimizer="adam",
loss="categorical_crossentropy",
metrics=["categorical_crossentropy", "categorical_crossentropy"],
)
multi_output_model.evaluate(x_test, y_test)<jupyter_output><empty_output><jupyter_text>TensorFlow variables trackingSetting a `tf.Variable` as an attribute of a Keras 3 layer or model will not automaticallytrack the variable, unlike in Keras 2. The following snippet of code will show that the `tf.Variables`are not being tracked.```pythonclass MyCustomLayer(keras.layers.Layer): def __init__(self, units): super().__init__() self.units = units def build(self, input_shape): input_dim = input_shape[-1] self.w = tf.Variable(initial_value=tf.zeros([input_dim, self.units])) self.b = tf.Variable(initial_value=tf.zeros([self.units,])) def call(self, inputs): return keras.ops.matmul(inputs, self.w) + self.blayer = MyCustomLayer(3)data = np.random.uniform(size=[3, 3])model = keras.models.Sequential([layer])model.compile(optimizer="adam", loss="mse")model.predict(data) The model does not have any trainable variablesfor layer in model.layers: print(layer.trainable_variables)```You will see the following warning:```UserWarning: The model does not have any trainable weights. warnings.warn("The model does not have any trainable weights.")```**How to fix it:** use `self.add_weight()` method or opt for a `keras.Variable` instead. If youare currently using `tf.variable`, you can switch to `keras.Variable`.<jupyter_code>class MyCustomLayer(keras.layers.Layer):
def __init__(self, units):
super().__init__()
self.units = units
def build(self, input_shape):
input_dim = input_shape[-1]
self.w = self.add_weight(
shape=[input_dim, self.units],
initializer="zeros",
)
self.b = self.add_weight(
shape=[
self.units,
],
initializer="zeros",
)
def call(self, inputs):
return keras.ops.matmul(inputs, self.w) + self.b
layer = MyCustomLayer(3)
data = np.random.uniform(size=[3, 3])
model = keras.models.Sequential([layer])
model.compile(optimizer="adam", loss="mse")
model.predict(data)
# Verify that the variables are now being tracked
for layer in model.layers:
print(layer.trainable_variables)<jupyter_output><empty_output><jupyter_text>`None` entries in nested `call()` arguments`None` entries are not allowed as part of nested (e.g. list/tuples) tensorarguments in `Layer.call()`, nor as part of `call()`'s nested return values.If the `None` in the argument is intentional and serves a specific purpose,ensure that the argument is optional and structure it as a separate parameter.For example, consider defining the `call` method with optional argument.The following snippet of code will reproduce the error.```pythonclass CustomLayer(keras.layers.Layer): def __init__(self): super().__init__() def call(self, inputs): foo = inputs["foo"] baz = inputs["bar"]["baz"] if baz is not None: return foo + baz return foolayer = CustomLayer()inputs = { "foo": keras.Input(shape=(1,), name="foo"), "bar": { "baz": None, },}layer(inputs)``` **How to fix it:****Solution 1:** Replace `None` with a value, like this:<jupyter_code>class CustomLayer(keras.layers.Layer):
def __init__(self):
super().__init__()
def call(self, inputs):
foo = inputs["foo"]
baz = inputs["bar"]["baz"]
return foo + baz
layer = CustomLayer()
inputs = {
"foo": keras.Input(shape=(1,), name="foo"),
"bar": {
"baz": keras.Input(shape=(1,), name="bar"),
},
}
layer(inputs)<jupyter_output><empty_output><jupyter_text>**Solution 2:** Define the call method with an optional argument.Here is an example of this fix:<jupyter_code>class CustomLayer(keras.layers.Layer):
def __init__(self):
super().__init__()
def call(self, foo, baz=None):
if baz is not None:
return foo + baz
return foo
layer = CustomLayer()
foo = keras.Input(shape=(1,), name="foo")
baz = None
layer(foo, baz=baz)<jupyter_output><empty_output><jupyter_text>State-building issuesKeras 3 is significantly stricter than Keras 2 about when state (e.g. numerical weight variables)can be created. Keras 3 wants all state to be created before the model can be trained. This is a requirementfor using JAX (whereas TensorFlow was very lenient about state creation timing).Keras layers should create their state either in their constructor (`__init__()` method) or in their `build()` method.They should avoid creating state in `call()`.If you ignore this recommendation and create state in `call()`anyway (e.g. by calling a previously unbuilt layer), then Keras will attempt to build the layer automaticallyby calling the `call()` method on symbolic inputs before training.However, this attempt at automatic state creation may fail in certain cases.This will cause an error that looks like like this:```Layer 'frame_position_embedding' looks like it has unbuilt state,but Keras is not able to trace the layer `call()` in order to build it automatically.Possible causes:1. The `call()` method of your layer may be crashing.Try to `__call__()` the layer eagerly on some test input first to see if it works.E.g. `x = np.random.random((3, 4)); y = layer(x)`2. If the `call()` method is correct, then you may need to implementthe `def build(self, input_shape)` method on your layer.It should create all variables used by the layer(e.g. by calling `layer.build()` on all its children layers).```You could reproduce this error with the following layer, when used with the JAX backend:```pythonclass PositionalEmbedding(keras.layers.Layer): def __init__(self, sequence_length, output_dim, **kwargs): super().__init__(**kwargs) self.position_embeddings = layers.Embedding( input_dim=sequence_length, output_dim=output_dim ) self.sequence_length = sequence_length self.output_dim = output_dim def call(self, inputs): inputs = keras.ops.cast(inputs, self.compute_dtype) length = keras.ops.shape(inputs)[1] positions = keras.ops.arange(start=0, stop=length, step=1) embedded_positions = self.position_embeddings(positions) return inputs + embedded_positions```**How to fix it:** Do exactly what the error message asks. First, try to run the layer eagerlyto see if the `call()` method is in fact correct (note: if it was working in Keras 2, then it is correctand does not need to be changed). If it is indeed correct, then you should implement a `build(self, input_shape)`method that creates all of the layer's state, including the state of sublayers. Here's the fix as applied for the layer above(note the `build()` method):```pythonclass PositionalEmbedding(keras.layers.Layer): def __init__(self, sequence_length, output_dim, **kwargs): super().__init__(**kwargs) self.position_embeddings = layers.Embedding( input_dim=sequence_length, output_dim=output_dim ) self.sequence_length = sequence_length self.output_dim = output_dim def build(self, input_shape): self.position_embeddings.build(input_shape) def call(self, inputs): inputs = keras.ops.cast(inputs, self.compute_dtype) length = keras.ops.shape(inputs)[1] positions = keras.ops.arange(start=0, stop=length, step=1) embedded_positions = self.position_embeddings(positions) return inputs + embedded_positions``` Removed featuresA small number of legacy features with very low usage were removed from Keras 3 as a cleanup measure:* `keras.layers.ThresholdedReLU` is removed. Instead, you can simply use the `ReLU` layerwith the argument `threshold`.* Symbolic `Layer.add_loss()`: Symbolic `add_loss()` is removed (you can still use`add_loss()` inside the `call()` method of a layer/model).* Locally connected layers (`LocallyConnected1D`, `LocallyConnected2D`are removed due to very low usage. Touse locally connected layers, copy the layer implementation into your own codebase.* `keras.layers.experimental.RandomFourierFeatures` is removed due to very low usage.To use it, copy the layer implementation into your own codebase.* Removed layer attributes: Layer attributes `metrics`, `dynamic` are removed. `metrics` is stillavailable on the `Model` class.* The `constants` and `time_major` arguments in RNN layers are removed.The `constants` argument was a remnant of Theano and had very low usage. The `time_major`argument also had very low usage.* `reset_metrics` argument: The `reset_metrics` argument is removed from `model.*_on_batch()`methods. This argument had very low usage.* The `keras.constraints.RadialConstraint` object is removed. This object had very low usage. Transitioning to backend-agnostic Keras 3Keras 3 code with the TensorFlow backend will work with native TensorFlow APIs.However, if you want your code to be backend-agnostic, you will need to:- Replace all of the `tf.*` API calls with their equivalent Keras APIs.- Convert your custom `train_step`/`test_step` methods to a multi-frameworkimplementation.- Make sure you're using stateless `keras.random` ops correctly in your layers.Let's go over each point in detail. Switching to Keras opsIn many cases, this is the only thing you need to do to start being able to runyour custom layers and metrics with JAX and PyTorch:replace any `tf.*`, `tf.math*`, `tf.linalg.*`, etc. with `keras.ops.*`. Most TF opsshould be consistent with Keras 3. If the names different, they will behighlighted in this guide. NumPy opsKeras implements the NumPy API as part of `keras.ops`.The table below only lists a small subset of TensorFlow and Keras ops; ops not listedare usually named the same in both frameworks (e.g. `reshape`, `matmul`, `cast`, etc.)| TensorFlow | Keras 3.0 ||--------------------------------------------|-------------------------------------------|| `tf.abs` | `keras.ops.absolute` || `tf.reduce_all` | `keras.ops.all` || `tf.reduce_max` | `keras.ops.amax` || `tf.reduce_min` | `keras.ops.amin` || `tf.reduce_any` | `keras.ops.any` || `tf.concat` | `keras.ops.concatenate` || `tf.range` | `keras.ops.arange` || `tf.acos` | `keras.ops.arccos` || `tf.asin` | `keras.ops.arcsin` || `tf.asinh` | `keras.ops.arcsinh` || `tf.atan` | `keras.ops.arctan` || `tf.atan2` | `keras.ops.arctan2` || `tf.atanh` | `keras.ops.arctanh` || `tf.convert_to_tensor` | `keras.ops.convert_to_tensor` || `tf.reduce_mean` | `keras.ops.mean` || `tf.clip_by_value` | `keras.ops.clip` || `tf.math.conj` | `keras.ops.conjugate` || `tf.linalg.diag_part` | `keras.ops.diagonal` || `tf.reverse` | `keras.ops.flip` || `tf.gather` | `keras.ops.take` || `tf.math.is_finite` | `keras.ops.isfinite` || `tf.math.is_inf` | `keras.ops.isinf` || `tf.math.is_nan` | `keras.ops.isnan` || `tf.reduce_max` | `keras.ops.max` || `tf.reduce_mean` | `keras.ops.mean` || `tf.reduce_min` | `keras.ops.min` || `tf.rank` | `keras.ops.ndim` || `tf.math.pow` | `keras.ops.power` || `tf.reduce_prod` | `keras.ops.prod` || `tf.math.reduce_std` | `keras.ops.std` || `tf.reduce_sum` | `keras.ops.sum` || `tf.gather` | `keras.ops.take` || `tf.gather_nd` | `keras.ops.take_along_axis` || `tf.math.reduce_variance` | `keras.ops.var` | Others ops| TensorFlow | Keras 3.0 ||----------------------------------------------------|-------------------------------------------------------------------|| `tf.nn.sigmoid_cross_entropy_with_logits` | `keras.ops.binary_crossentropy` (mind the `from_logits` argument) || `tf.nn.sparse_softmax_cross_entropy_with_logits` | `keras.ops.sparse_categorical_crossentropy` (mind the `from_logits` argument)|| `tf.nn.sparse_softmax_cross_entropy_with_logits` | `keras.ops.categorical_crossentropy(target, output, from_logits=False, axis=-1)`|| `tf.nn.conv1d`, `tf.nn.conv2d`, `tf.nn.conv3d`, `tf.nn.convolution` | `keras.ops.conv` || `tf.nn.conv_transpose`, `tf.nn.conv1d_transpose`, `tf.nn.conv2d_transpose`, `tf.nn.conv3d_transpose` | `keras.ops.conv_transpose` || `tf.nn.depthwise_conv2d` | `keras.ops.depthwise_conv` || `tf.nn.separable_conv2d` | `keras.ops.separable_conv` || `tf.nn.batch_normalization` | No direct equivalent; use `keras.layers.BatchNormalization` || `tf.nn.dropout` | `keras.random.dropout` || `tf.nn.embedding_lookup` | `keras.ops.take` || `tf.nn.l2_normalize` | `keras.utils.normalize` (not an op) || `x.numpy` | `keras.ops.convert_to_numpy` || `tf.scatter_nd_update` | `keras.ops.scatter_update` || `tf.tensor_scatter_nd_update` | `keras.ops.slice_update` || `tf.signal.fft2d` | `keras.ops.fft2` || `tf.signal.inverse_stft` | `keras.ops.istft` | Custom `train_step()` methodsYour models may include a custom `train_step()` or `test_step()` method, which relyon TensorFlow-only APIs -- for instance, your `train_step()` method may leverage TensorFlow's `tf.GradientTape`.To convert such models to run on JAX or PyTorch, you will have a write a different `train_step()` implementationfor each backend you want to support.In some cases, you might be able to simply override the `Model.compute_loss()` method and make it fully backend-agnostic,instead of overriding `train_step()`. Here's an example of a layer with a custom `compute_loss()` method which worksacross JAX, TensorFlow, and PyTorch:<jupyter_code>class MyModel(keras.Model):
def compute_loss(self, x=None, y=None, y_pred=None, sample_weight=None):
loss = keras.ops.sum(keras.losses.mean_squared_error(y, y_pred, sample_weight))
return loss<jupyter_output><empty_output><jupyter_text>If you need to modify the optimization mechanism itself, beyond the loss computation,then you will need to override `train_step()`, and implement one `train_step` method per backend, like below.See the following guides for details on how each backend should be handled:- [Customizing what happens in `fit()` with JAX](https://keras.io/guides/custom_train_step_in_jax/)- [Customizing what happens in `fit()` with TensorFlow](https://keras.io/guides/custom_train_step_in_tensorflow/)- [Customizing what happens in `fit()` with PyTorch](https://keras.io/guides/custom_train_step_in_torch/)<jupyter_code>class MyModel(keras.Model):
def train_step(self, *args, **kwargs):
if keras.backend.backend() == "jax":
return self._jax_train_step(*args, **kwargs)
elif keras.backend.backend() == "tensorflow":
return self._tensorflow_train_step(*args, **kwargs)
elif keras.backend.backend() == "torch":
return self._torch_train_step(*args, **kwargs)
def _jax_train_step(self, state, data):
pass # See guide: keras.io/guides/custom_train_step_in_jax/
def _tensorflow_train_step(self, data):
pass # See guide: keras.io/guides/custom_train_step_in_tensorflow/
def _torch_train_step(self, data):
pass # See guide: keras.io/guides/custom_train_step_in_torch/<jupyter_output><empty_output><jupyter_text>RNG-using layersKeras 3 has a new `keras.random` namespace, containing:- `keras.random.normal`- `keras.random.uniform`- `keras.random.shuffle`- etc.These operations are **stateless**, which means that if you pass a `seed`argument, they will return the same result every time. Like this:<jupyter_code>print(keras.random.normal(shape=(), seed=123))
print(keras.random.normal(shape=(), seed=123))<jupyter_output><empty_output><jupyter_text>Crucially, this differs from the behavior of stateful `tf.random` ops:<jupyter_code>print(tf.random.normal(shape=(), seed=123))
print(tf.random.normal(shape=(), seed=123))<jupyter_output><empty_output><jupyter_text>When you write a RNG-using layer, such as a custom dropout layer, you aregoing to want to use a different seed value at layer call. However, you cannotjust increment a Python integer and pass it, because while this would work finewhen executed eagerly, it would not work as expected when using compilation(which is available with JAX, TensorFlow, and PyTorch). When compiling the layer,the first Python integer seed value seen by the layer would be hardcoded into thecompiled graph.To address this, you should pass as the `seed` argument an instance of astateful `keras.random.SeedGenerator` object, like this:<jupyter_code>seed_generator = keras.random.SeedGenerator(1337)
print(keras.random.normal(shape=(), seed=seed_generator))
print(keras.random.normal(shape=(), seed=seed_generator))<jupyter_output><empty_output><jupyter_text>So when writing a RNG using layer, you would use the following pattern:<jupyter_code>class RandomNoiseLayer(keras.layers.Layer):
def __init__(self, noise_rate, **kwargs):
super().__init__(**kwargs)
self.noise_rate = noise_rate
self.seed_generator = keras.random.SeedGenerator(1337)
def call(self, inputs):
noise = keras.random.uniform(
minval=0, maxval=self.noise_rate, seed=self.seed_generator
)
return inputs + noise<jupyter_output><empty_output> | keras-io/guides/ipynb/migrating_to_keras_3.ipynb/0 | {
"file_path": "keras-io/guides/ipynb/migrating_to_keras_3.ipynb",
"repo_id": "keras-io",
"token_count": 12439
} | 126 |
# Customizing what happens in `fit()` with JAX
**Author:** [fchollet](https://twitter.com/fchollet)<br>
**Date created:** 2023/06/27<br>
**Last modified:** 2023/06/27<br>
**Description:** Overriding the training step of the Model class with JAX.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/guides/ipynb/custom_train_step_in_jax.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/guides/custom_train_step_in_jax.py)
---
## Introduction
When you're doing supervised learning, you can use `fit()` and everything works
smoothly.
When you need to take control of every little detail, you can write your own training
loop entirely from scratch.
But what if you need a custom training algorithm, but you still want to benefit from
the convenient features of `fit()`, such as callbacks, built-in distribution support,
or step fusing?
A core principle of Keras is **progressive disclosure of complexity**. You should
always be able to get into lower-level workflows in a gradual way. You shouldn't fall
off a cliff if the high-level functionality doesn't exactly match your use case. You
should be able to gain more control over the small details while retaining a
commensurate amount of high-level convenience.
When you need to customize what `fit()` does, you should **override the training step
function of the `Model` class**. This is the function that is called by `fit()` for
every batch of data. You will then be able to call `fit()` as usual -- and it will be
running your own learning algorithm.
Note that this pattern does not prevent you from building models with the Functional
API. You can do this whether you're building `Sequential` models, Functional API
models, or subclassed models.
Let's see how that works.
---
## Setup
```python
import os
# This guide can only be run with the JAX backend.
os.environ["KERAS_BACKEND"] = "jax"
import jax
import keras
import numpy as np
```
---
## A first simple example
Let's start from a simple example:
- We create a new class that subclasses `keras.Model`.
- We implement a fully-stateless `compute_loss_and_updates()` method
to compute the loss as well as the updated values for the non-trainable
variables of the model. Internally, it calls `stateless_call()` and
the built-in `compute_loss()`.
- We implement a fully-stateless `train_step()` method to compute current
metric values (including the loss) as well as updated values for the
trainable variables, the optimizer variables, and the metric variables.
Note that you can also take into account the `sample_weight` argument by:
- Unpacking the data as `x, y, sample_weight = data`
- Passing `sample_weight` to `compute_loss()`
- Passing `sample_weight` alongside `y` and `y_pred`
to metrics in `stateless_update_state()`
```python
class CustomModel(keras.Model):
def compute_loss_and_updates(
self,
trainable_variables,
non_trainable_variables,
x,
y,
training=False,
):
y_pred, non_trainable_variables = self.stateless_call(
trainable_variables,
non_trainable_variables,
x,
training=training,
)
loss = self.compute_loss(x, y, y_pred)
return loss, (y_pred, non_trainable_variables)
def train_step(self, state, data):
(
trainable_variables,
non_trainable_variables,
optimizer_variables,
metrics_variables,
) = state
x, y = data
# Get the gradient function.
grad_fn = jax.value_and_grad(self.compute_loss_and_updates, has_aux=True)
# Compute the gradients.
(loss, (y_pred, non_trainable_variables)), grads = grad_fn(
trainable_variables,
non_trainable_variables,
x,
y,
training=True,
)
# Update trainable variables and optimizer variables.
(
trainable_variables,
optimizer_variables,
) = self.optimizer.stateless_apply(
optimizer_variables, grads, trainable_variables
)
# Update metrics.
new_metrics_vars = []
for metric in self.metrics:
this_metric_vars = metrics_variables[
len(new_metrics_vars) : len(new_metrics_vars) + len(metric.variables)
]
if metric.name == "loss":
this_metric_vars = metric.stateless_update_state(this_metric_vars, loss)
else:
this_metric_vars = metric.stateless_update_state(
this_metric_vars, y, y_pred
)
logs = metric.stateless_result(this_metric_vars)
new_metrics_vars += this_metric_vars
# Return metric logs and updated state variables.
state = (
trainable_variables,
non_trainable_variables,
optimizer_variables,
new_metrics_vars,
)
return logs, state
```
Let's try this out:
```python
# Construct and compile an instance of CustomModel
inputs = keras.Input(shape=(32,))
outputs = keras.layers.Dense(1)(inputs)
model = CustomModel(inputs, outputs)
model.compile(optimizer="adam", loss="mse", metrics=["mae"])
# Just use `fit` as usual
x = np.random.random((1000, 32))
y = np.random.random((1000, 1))
model.fit(x, y, epochs=3)
```
<div class="k-default-codeblock">
```
Epoch 1/3
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - mae: 0.8205 - loss: 0.7613
Epoch 2/3
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 119us/step - mae: 0.5123 - loss: 0.3576
Epoch 3/3
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 101us/step - mae: 0.4244 - loss: 0.2784
<keras.src.callbacks.history.History at 0x14a8e41f0>
```
</div>
---
## Going lower-level
Naturally, you could just skip passing a loss function in `compile()`, and instead do
everything *manually* in `train_step`. Likewise for metrics.
Here's a lower-level example, that only uses `compile()` to configure the optimizer:
```python
class CustomModel(keras.Model):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.loss_tracker = keras.metrics.Mean(name="loss")
self.mae_metric = keras.metrics.MeanAbsoluteError(name="mae")
self.loss_fn = keras.losses.MeanSquaredError()
def compute_loss_and_updates(
self,
trainable_variables,
non_trainable_variables,
x,
y,
training=False,
):
y_pred, non_trainable_variables = self.stateless_call(
trainable_variables,
non_trainable_variables,
x,
training=training,
)
loss = self.loss_fn(y, y_pred)
return loss, (y_pred, non_trainable_variables)
def train_step(self, state, data):
(
trainable_variables,
non_trainable_variables,
optimizer_variables,
metrics_variables,
) = state
x, y = data
# Get the gradient function.
grad_fn = jax.value_and_grad(self.compute_loss_and_updates, has_aux=True)
# Compute the gradients.
(loss, (y_pred, non_trainable_variables)), grads = grad_fn(
trainable_variables,
non_trainable_variables,
x,
y,
training=True,
)
# Update trainable variables and optimizer variables.
(
trainable_variables,
optimizer_variables,
) = self.optimizer.stateless_apply(
optimizer_variables, grads, trainable_variables
)
# Update metrics.
loss_tracker_vars = metrics_variables[: len(self.loss_tracker.variables)]
mae_metric_vars = metrics_variables[len(self.loss_tracker.variables) :]
loss_tracker_vars = self.loss_tracker.stateless_update_state(
loss_tracker_vars, loss
)
mae_metric_vars = self.mae_metric.stateless_update_state(
mae_metric_vars, y, y_pred
)
logs = {}
logs[self.loss_tracker.name] = self.loss_tracker.stateless_result(
loss_tracker_vars
)
logs[self.mae_metric.name] = self.mae_metric.stateless_result(mae_metric_vars)
new_metrics_vars = loss_tracker_vars + mae_metric_vars
# Return metric logs and updated state variables.
state = (
trainable_variables,
non_trainable_variables,
optimizer_variables,
new_metrics_vars,
)
return logs, state
@property
def metrics(self):
# We list our `Metric` objects here so that `reset_states()` can be
# called automatically at the start of each epoch
# or at the start of `evaluate()`.
return [self.loss_tracker, self.mae_metric]
# Construct an instance of CustomModel
inputs = keras.Input(shape=(32,))
outputs = keras.layers.Dense(1)(inputs)
model = CustomModel(inputs, outputs)
# We don't pass a loss or metrics here.
model.compile(optimizer="adam")
# Just use `fit` as usual -- you can use callbacks, etc.
x = np.random.random((1000, 32))
y = np.random.random((1000, 1))
model.fit(x, y, epochs=5)
```
<div class="k-default-codeblock">
```
Epoch 1/5
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.2970 - mae: 0.4350
Epoch 2/5
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 102us/step - loss: 0.2574 - mae: 0.3989
Epoch 3/5
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 96us/step - loss: 0.2366 - mae: 0.3871
Epoch 4/5
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 91us/step - loss: 0.2171 - mae: 0.3647
Epoch 5/5
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 91us/step - loss: 0.2122 - mae: 0.3634
<keras.src.callbacks.history.History at 0x14c8e5de0>
```
</div>
---
## Providing your own evaluation step
What if you want to do the same for calls to `model.evaluate()`? Then you would
override `test_step` in exactly the same way. Here's what it looks like:
```python
class CustomModel(keras.Model):
def test_step(self, state, data):
# Unpack the data.
x, y = data
(
trainable_variables,
non_trainable_variables,
metrics_variables,
) = state
# Compute predictions and loss.
y_pred, non_trainable_variables = self.stateless_call(
trainable_variables,
non_trainable_variables,
x,
training=False,
)
loss = self.compute_loss(x, y, y_pred)
# Update metrics.
new_metrics_vars = []
for metric in self.metrics:
this_metric_vars = metrics_variables[
len(new_metrics_vars) : len(new_metrics_vars) + len(metric.variables)
]
if metric.name == "loss":
this_metric_vars = metric.stateless_update_state(this_metric_vars, loss)
else:
this_metric_vars = metric.stateless_update_state(
this_metric_vars, y, y_pred
)
logs = metric.stateless_result(this_metric_vars)
new_metrics_vars += this_metric_vars
# Return metric logs and updated state variables.
state = (
trainable_variables,
non_trainable_variables,
new_metrics_vars,
)
return logs, state
# Construct an instance of CustomModel
inputs = keras.Input(shape=(32,))
outputs = keras.layers.Dense(1)(inputs)
model = CustomModel(inputs, outputs)
model.compile(loss="mse", metrics=["mae"])
# Evaluate with our custom test_step
x = np.random.random((1000, 32))
y = np.random.random((1000, 1))
model.evaluate(x, y)
```
<div class="k-default-codeblock">
```
32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 777us/step - mae: 0.6349 - loss: 0.5714
[0.5714115500450134, 0.6256848573684692]
```
</div>
That's it!
| keras-io/guides/md/custom_train_step_in_jax.md/0 | {
"file_path": "keras-io/guides/md/custom_train_step_in_jax.md",
"repo_id": "keras-io",
"token_count": 5313
} | 127 |
# CutMix, MixUp, and RandAugment image augmentation with KerasCV
**Author:** [lukewood](https://twitter.com/luke_wood_ml)<br>
**Date created:** 2022/04/08<br>
**Last modified:** 2022/04/08<br>
**Description:** Use KerasCV to augment images with CutMix, MixUp, RandAugment, and more.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/guides/ipynb/keras_cv/cut_mix_mix_up_and_rand_augment.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/guides/keras_cv/cut_mix_mix_up_and_rand_augment.py)
---
## Overview
KerasCV makes it easy to assemble state-of-the-art, industry-grade data augmentation
pipelines for image classification and object detection tasks. KerasCV offers a wide
suite of preprocessing layers implementing common data augmentation techniques.
Perhaps three of the most useful layers are `keras_cv.layers.CutMix`,
`keras_cv.layers.MixUp`, and `keras_cv.layers.RandAugment`. These
layers are used in nearly all state-of-the-art image classification pipelines.
This guide will show you how to compose these layers into your own data
augmentation pipeline for image classification tasks. This guide will also walk you
through the process of customizing a KerasCV data augmentation pipeline.
---
## Imports & setup
KerasCV uses Keras 3 to work with any of TensorFlow, PyTorch or Jax. In the
guide below, we will use the `jax` backend. This guide runs in
TensorFlow or PyTorch backends with zero changes, simply update the
`KERAS_BACKEND` below.
```python
!pip install -q --upgrade keras-cv
!pip install -q --upgrade keras # Upgrade to Keras 3.
```
We begin by importing all required packages:
```python
import os
os.environ["KERAS_BACKEND"] = "jax" # @param ["tensorflow", "jax", "torch"]
import matplotlib.pyplot as plt
# Import tensorflow for `tf.data` and its preprocessing map functions
import tensorflow as tf
import tensorflow_datasets as tfds
import keras
import keras_cv
```
---
## Data loading
This guide uses the
[102 Category Flower Dataset](https://www.robots.ox.ac.uk/~vgg/data/flowers/102/)
for demonstration purposes.
To get started, we first load the dataset:
```python
BATCH_SIZE = 32
AUTOTUNE = tf.data.AUTOTUNE
tfds.disable_progress_bar()
data, dataset_info = tfds.load("oxford_flowers102", with_info=True, as_supervised=True)
train_steps_per_epoch = dataset_info.splits["train"].num_examples // BATCH_SIZE
val_steps_per_epoch = dataset_info.splits["test"].num_examples // BATCH_SIZE
```
<div class="k-default-codeblock">
```
Downloading and preparing dataset 328.90 MiB (download: 328.90 MiB, generated: 331.34 MiB, total: 660.25 MiB) to /usr/local/google/home/rameshsampath/tensorflow_datasets/oxford_flowers102/2.1.1...
Dataset oxford_flowers102 downloaded and prepared to /usr/local/google/home/rameshsampath/tensorflow_datasets/oxford_flowers102/2.1.1. Subsequent calls will reuse this data.
```
</div>
Next, we resize the images to a constant size, `(224, 224)`, and one-hot encode the
labels. Please note that `keras_cv.layers.CutMix` and `keras_cv.layers.MixUp` expect
targets to be one-hot encoded. This is because they modify the values of the targets
in a way that is not possible with a sparse label representation.
```python
IMAGE_SIZE = (224, 224)
num_classes = dataset_info.features["label"].num_classes
def to_dict(image, label):
image = tf.image.resize(image, IMAGE_SIZE)
image = tf.cast(image, tf.float32)
label = tf.one_hot(label, num_classes)
return {"images": image, "labels": label}
def prepare_dataset(dataset, split):
if split == "train":
return (
dataset.shuffle(10 * BATCH_SIZE)
.map(to_dict, num_parallel_calls=AUTOTUNE)
.batch(BATCH_SIZE)
)
if split == "test":
return dataset.map(to_dict, num_parallel_calls=AUTOTUNE).batch(BATCH_SIZE)
def load_dataset(split="train"):
dataset = data[split]
return prepare_dataset(dataset, split)
train_dataset = load_dataset()
```
Let's inspect some samples from our dataset:
```python
def visualize_dataset(dataset, title):
plt.figure(figsize=(6, 6)).suptitle(title, fontsize=18)
for i, samples in enumerate(iter(dataset.take(9))):
images = samples["images"]
plt.subplot(3, 3, i + 1)
plt.imshow(images[0].numpy().astype("uint8"))
plt.axis("off")
plt.show()
visualize_dataset(train_dataset, title="Before Augmentation")
```
![png](/img/guides/cut_mix_mix_up_and_rand_augment/cut_mix_mix_up_and_rand_augment_11_0.png)
Great! Now we can move onto the augmentation step.
---
## RandAugment
[RandAugment](https://arxiv.org/abs/1909.13719)
has been shown to provide improved image
classification results across numerous datasets.
It performs a standard set of augmentations on an image.
To use RandAugment in KerasCV, you need to provide a few values:
- `value_range` describes the range of values covered in your images
- `magnitude` is a value between 0 and 1, describing the strength of the perturbations
applied
- `augmentations_per_image` is an integer telling the layer how many augmentations to apply to each
individual image
- (Optional) `magnitude_stddev` allows `magnitude` to be randomly sampled
from a distribution with a standard deviation of `magnitude_stddev`
- (Optional) `rate` indicates the probability to apply the augmentation
applied at each layer.
You can read more about these
parameters in the
[`RandAugment` API documentation](/api/keras_cv/layers/preprocessing/rand_augment/).
Let's use KerasCV's RandAugment implementation.
```python
rand_augment = keras_cv.layers.RandAugment(
value_range=(0, 255),
augmentations_per_image=3,
magnitude=0.3,
magnitude_stddev=0.2,
rate=1.0,
)
def apply_rand_augment(inputs):
inputs["images"] = rand_augment(inputs["images"])
return inputs
train_dataset = load_dataset().map(apply_rand_augment, num_parallel_calls=AUTOTUNE)
```
Finally, let's inspect some of the results:
```python
visualize_dataset(train_dataset, title="After RandAugment")
```
![png](/img/guides/cut_mix_mix_up_and_rand_augment/cut_mix_mix_up_and_rand_augment_17_0.png)
Try tweaking the magnitude settings to see a wider variety of results.
---
## CutMix and MixUp: generate high-quality inter-class examples
`CutMix` and `MixUp` allow us to produce inter-class examples. `CutMix` randomly cuts out
portions of one image and places them over another, and `MixUp` interpolates the pixel
values between two images. Both of these prevent the model from overfitting the
training distribution and improve the likelihood that the model can generalize to out of
distribution examples. Additionally, `CutMix` prevents your model from over-relying on
any particular feature to perform its classifications. You can read more about these
techniques in their respective papers:
- [CutMix: Train Strong Classifiers](https://arxiv.org/abs/1905.04899)
- [MixUp: Beyond Empirical Risk Minimization](https://arxiv.org/abs/1710.09412)
In this example, we will use `CutMix` and `MixUp` independently in a manually created
preprocessing pipeline. In most state of the art pipelines images are randomly
augmented by either `CutMix`, `MixUp`, or neither. The function below implements both.
```python
cut_mix = keras_cv.layers.CutMix()
mix_up = keras_cv.layers.MixUp()
def cut_mix_and_mix_up(samples):
samples = cut_mix(samples, training=True)
samples = mix_up(samples, training=True)
return samples
train_dataset = load_dataset().map(cut_mix_and_mix_up, num_parallel_calls=AUTOTUNE)
visualize_dataset(train_dataset, title="After CutMix and MixUp")
```
![png](/img/guides/cut_mix_mix_up_and_rand_augment/cut_mix_mix_up_and_rand_augment_20_0.png)
Great! Looks like we have successfully added `CutMix` and `MixUp` to our preprocessing
pipeline.
---
## Customizing your augmentation pipeline
Perhaps you want to exclude an augmentation from `RandAugment`, or perhaps you want to
include the `keras_cv.layers.GridMask` as an option alongside the default `RandAugment`
augmentations.
KerasCV allows you to construct production grade custom data augmentation pipelines using
the `keras_cv.layers.RandomAugmentationPipeline` layer. This class operates similarly to
`RandAugment`; selecting a random layer to apply to each image `augmentations_per_image`
times. `RandAugment` can be thought of as a specific case of
`RandomAugmentationPipeline`. In fact, our `RandAugment` implementation inherits from
`RandomAugmentationPipeline` internally.
In this example, we will create a custom `RandomAugmentationPipeline` by removing
`RandomRotation` layers from the standard `RandAugment` policy, and substitute a
`GridMask` layer in its place.
As a first step, let's use the helper method `RandAugment.get_standard_policy()` to
create a base pipeline.
```python
layers = keras_cv.layers.RandAugment.get_standard_policy(
value_range=(0, 255), magnitude=0.75, magnitude_stddev=0.3
)
```
First, let's filter out `RandomRotation` layers
```python
layers = [
layer for layer in layers if not isinstance(layer, keras_cv.layers.RandomRotation)
]
```
Next, let's add `keras_cv.layers.GridMask` to our layers:
```python
layers = layers + [keras_cv.layers.GridMask()]
```
Finally, we can put together our pipeline
```python
pipeline = keras_cv.layers.RandomAugmentationPipeline(
layers=layers, augmentations_per_image=3
)
def apply_pipeline(inputs):
inputs["images"] = pipeline(inputs["images"])
return inputs
```
Let's check out the results!
```python
train_dataset = load_dataset().map(apply_pipeline, num_parallel_calls=AUTOTUNE)
visualize_dataset(train_dataset, title="After custom pipeline")
```
![png](/img/guides/cut_mix_mix_up_and_rand_augment/cut_mix_mix_up_and_rand_augment_32_0.png)
Awesome! As you can see, no images were randomly rotated. You can customize the
pipeline however you like:
```python
pipeline = keras_cv.layers.RandomAugmentationPipeline(
layers=[keras_cv.layers.GridMask(), keras_cv.layers.Grayscale(output_channels=3)],
augmentations_per_image=1,
)
```
This pipeline will either apply `GrayScale` or GridMask:
```python
train_dataset = load_dataset().map(apply_pipeline, num_parallel_calls=AUTOTUNE)
visualize_dataset(train_dataset, title="After custom pipeline")
```
![png](/img/guides/cut_mix_mix_up_and_rand_augment/cut_mix_mix_up_and_rand_augment_36_0.png)
Looks great! You can use `RandomAugmentationPipeline` however you want.
---
## Training a CNN
As a final exercise, let's take some of these layers for a spin. In this section, we
will use `CutMix`, `MixUp`, and `RandAugment` to train a state of the art `ResNet50`
image classifier on the Oxford flowers dataset.
```python
def preprocess_for_model(inputs):
images, labels = inputs["images"], inputs["labels"]
images = tf.cast(images, tf.float32)
return images, labels
train_dataset = (
load_dataset()
.map(apply_rand_augment, num_parallel_calls=AUTOTUNE)
.map(cut_mix_and_mix_up, num_parallel_calls=AUTOTUNE)
)
visualize_dataset(train_dataset, "CutMix, MixUp and RandAugment")
train_dataset = train_dataset.map(preprocess_for_model, num_parallel_calls=AUTOTUNE)
test_dataset = load_dataset(split="test")
test_dataset = test_dataset.map(preprocess_for_model, num_parallel_calls=AUTOTUNE)
train_dataset = train_dataset.prefetch(AUTOTUNE)
test_dataset = test_dataset.prefetch(AUTOTUNE)
```
![png](/img/guides/cut_mix_mix_up_and_rand_augment/cut_mix_mix_up_and_rand_augment_39_0.png)
Next we should create a the model itself. Notice that we use `label_smoothing=0.1` in
the loss function. When using `MixUp`, label smoothing is _highly_ recommended.
```python
input_shape = IMAGE_SIZE + (3,)
def get_model():
model = keras_cv.models.ImageClassifier.from_preset(
"efficientnetv2_s", num_classes=num_classes
)
model.compile(
loss=keras.losses.CategoricalCrossentropy(label_smoothing=0.1),
optimizer=keras.optimizers.SGD(momentum=0.9),
metrics=["accuracy"],
)
return model
```
Finally we train the model:
```python
model = get_model()
model.fit(
train_dataset,
epochs=1,
validation_data=test_dataset,
)
```
<div class="k-default-codeblock">
```
32/32 ━━━━━━━━━━━━━━━━━━━━ 103s 2s/step - accuracy: 0.0059 - loss: 4.6941 - val_accuracy: 0.0114 - val_loss: 10.4028
<keras.src.callbacks.history.History at 0x7fd0d00e07c0>
```
</div>
---
## Conclusion & next steps
That's all it takes to assemble state of the art image augmentation pipeliens with
KerasCV!
As an additional exercise for readers, you can:
- Perform a hyper parameter search over the RandAugment parameters to improve the
classifier accuracy
- Substitute the Oxford Flowers dataset with your own dataset
- Experiment with custom `RandomAugmentationPipeline` objects.
Currently, between Keras core and KerasCV there are
[_28 image augmentation layers_](https://keras.io/api/keras_cv/layers/preprocessing)!
Each of these can be used independently, or in a pipeline. Check them out, and if you
find an augmentation techniques you need is missing please file a
[GitHub issue on KerasCV](https://github.com/keras-team/keras-cv/issues).
| keras-io/guides/md/keras_cv/cut_mix_mix_up_and_rand_augment.md/0 | {
"file_path": "keras-io/guides/md/keras_cv/cut_mix_mix_up_and_rand_augment.md",
"repo_id": "keras-io",
"token_count": 4745
} | 128 |
# Working with preprocessing layers
**Authors:** Francois Chollet, Mark Omernick<br>
**Date created:** 2020/07/25<br>
**Last modified:** 2021/04/23<br>
**Description:** Overview of how to leverage preprocessing layers to create end-to-end models.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/guides/ipynb/preprocessing_layers.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/guides/preprocessing_layers.py)
---
## Keras preprocessing
The Keras preprocessing layers API allows developers to build Keras-native input
processing pipelines. These input processing pipelines can be used as independent
preprocessing code in non-Keras workflows, combined directly with Keras models, and
exported as part of a Keras SavedModel.
With Keras preprocessing layers, you can build and export models that are truly
end-to-end: models that accept raw images or raw structured data as input; models that
handle feature normalization or feature value indexing on their own.
---
## Available preprocessing
### Text preprocessing
- `tf.keras.layers.TextVectorization`: turns raw strings into an encoded
representation that can be read by an `Embedding` layer or `Dense` layer.
### Numerical features preprocessing
- `tf.keras.layers.Normalization`: performs feature-wise normalization of
input features.
- `tf.keras.layers.Discretization`: turns continuous numerical features
into integer categorical features.
### Categorical features preprocessing
- `tf.keras.layers.CategoryEncoding`: turns integer categorical features
into one-hot, multi-hot, or count dense representations.
- `tf.keras.layers.Hashing`: performs categorical feature hashing, also known as
the "hashing trick".
- `tf.keras.layers.StringLookup`: turns string categorical values into an encoded
representation that can be read by an `Embedding` layer or `Dense` layer.
- `tf.keras.layers.IntegerLookup`: turns integer categorical values into an
encoded representation that can be read by an `Embedding` layer or `Dense`
layer.
### Image preprocessing
These layers are for standardizing the inputs of an image model.
- `tf.keras.layers.Resizing`: resizes a batch of images to a target size.
- `tf.keras.layers.Rescaling`: rescales and offsets the values of a batch of
images (e.g. go from inputs in the `[0, 255]` range to inputs in the `[0, 1]`
range.
- `tf.keras.layers.CenterCrop`: returns a center crop of a batch of images.
### Image data augmentation
These layers apply random augmentation transforms to a batch of images. They
are only active during training.
- `tf.keras.layers.RandomCrop`
- `tf.keras.layers.RandomFlip`
- `tf.keras.layers.RandomTranslation`
- `tf.keras.layers.RandomRotation`
- `tf.keras.layers.RandomZoom`
- `tf.keras.layers.RandomContrast`
---
## The `adapt()` method
Some preprocessing layers have an internal state that can be computed based on
a sample of the training data. The list of stateful preprocessing layers is:
- `TextVectorization`: holds a mapping between string tokens and integer indices
- `StringLookup` and `IntegerLookup`: hold a mapping between input values and integer
indices.
- `Normalization`: holds the mean and standard deviation of the features.
- `Discretization`: holds information about value bucket boundaries.
Crucially, these layers are **non-trainable**. Their state is not set during training; it
must be set **before training**, either by initializing them from a precomputed constant,
or by "adapting" them on data.
You set the state of a preprocessing layer by exposing it to training data, via the
`adapt()` method:
```python
import numpy as np
import tensorflow as tf
import keras
from keras import layers
data = np.array(
[
[0.1, 0.2, 0.3],
[0.8, 0.9, 1.0],
[1.5, 1.6, 1.7],
]
)
layer = layers.Normalization()
layer.adapt(data)
normalized_data = layer(data)
print("Features mean: %.2f" % (normalized_data.numpy().mean()))
print("Features std: %.2f" % (normalized_data.numpy().std()))
```
<div class="k-default-codeblock">
```
Features mean: -0.00
Features std: 1.00
```
</div>
The `adapt()` method takes either a Numpy array or a `tf.data.Dataset` object. In the
case of `StringLookup` and `TextVectorization`, you can also pass a list of strings:
```python
data = [
"ξεῖν᾽, ἦ τοι μὲν ὄνειροι ἀμήχανοι ἀκριτόμυθοι",
"γίγνοντ᾽, οὐδέ τι πάντα τελείεται ἀνθρώποισι.",
"δοιαὶ γάρ τε πύλαι ἀμενηνῶν εἰσὶν ὀνείρων:",
"αἱ μὲν γὰρ κεράεσσι τετεύχαται, αἱ δ᾽ ἐλέφαντι:",
"τῶν οἳ μέν κ᾽ ἔλθωσι διὰ πριστοῦ ἐλέφαντος,",
"οἵ ῥ᾽ ἐλεφαίρονται, ἔπε᾽ ἀκράαντα φέροντες:",
"οἱ δὲ διὰ ξεστῶν κεράων ἔλθωσι θύραζε,",
"οἵ ῥ᾽ ἔτυμα κραίνουσι, βροτῶν ὅτε κέν τις ἴδηται.",
]
layer = layers.TextVectorization()
layer.adapt(data)
vectorized_text = layer(data)
print(vectorized_text)
```
<div class="k-default-codeblock">
```
tf.Tensor(
[[37 12 25 5 9 20 21 0 0]
[51 34 27 33 29 18 0 0 0]
[49 52 30 31 19 46 10 0 0]
[ 7 5 50 43 28 7 47 17 0]
[24 35 39 40 3 6 32 16 0]
[ 4 2 15 14 22 23 0 0 0]
[36 48 6 38 42 3 45 0 0]
[ 4 2 13 41 53 8 44 26 11]], shape=(8, 9), dtype=int64)
```
</div>
In addition, adaptable layers always expose an option to directly set state via
constructor arguments or weight assignment. If the intended state values are known at
layer construction time, or are calculated outside of the `adapt()` call, they can be set
without relying on the layer's internal computation. For instance, if external vocabulary
files for the `TextVectorization`, `StringLookup`, or `IntegerLookup` layers already
exist, those can be loaded directly into the lookup tables by passing a path to the
vocabulary file in the layer's constructor arguments.
Here's an example where you instantiate a `StringLookup` layer with precomputed vocabulary:
```python
vocab = ["a", "b", "c", "d"]
data = tf.constant([["a", "c", "d"], ["d", "z", "b"]])
layer = layers.StringLookup(vocabulary=vocab)
vectorized_data = layer(data)
print(vectorized_data)
```
<div class="k-default-codeblock">
```
tf.Tensor(
[[1 3 4]
[4 0 2]], shape=(2, 3), dtype=int64)
```
</div>
---
## Preprocessing data before the model or inside the model
There are two ways you could be using preprocessing layers:
**Option 1:** Make them part of the model, like this:
```python
inputs = keras.Input(shape=input_shape)
x = preprocessing_layer(inputs)
outputs = rest_of_the_model(x)
model = keras.Model(inputs, outputs)
```
With this option, preprocessing will happen on device, synchronously with the rest of the
model execution, meaning that it will benefit from GPU acceleration.
If you're training on a GPU, this is the best option for the `Normalization` layer, and for
all image preprocessing and data augmentation layers.
**Option 2:** apply it to your `tf.data.Dataset`, so as to obtain a dataset that yields
batches of preprocessed data, like this:
```python
dataset = dataset.map(lambda x, y: (preprocessing_layer(x), y))
```
With this option, your preprocessing will happen on a CPU, asynchronously, and will be
buffered before going into the model.
In addition, if you call `dataset.prefetch(tf.data.AUTOTUNE)` on your dataset,
the preprocessing will happen efficiently in parallel with training:
```python
dataset = dataset.map(lambda x, y: (preprocessing_layer(x), y))
dataset = dataset.prefetch(tf.data.AUTOTUNE)
model.fit(dataset, ...)
```
This is the best option for `TextVectorization`, and all structured data preprocessing
layers. It can also be a good option if you're training on a CPU and you use image preprocessing
layers.
Note that the `TextVectorization` layer can only be executed on a CPU, as it is mostly a
dictionary lookup operation. Therefore, if you are training your model on a GPU or a TPU,
you should put the `TextVectorization` layer in the `tf.data` pipeline to get the best performance.
**When running on a TPU, you should always place preprocessing layers in the `tf.data` pipeline**
(with the exception of `Normalization` and `Rescaling`, which run fine on a TPU and are commonly
used as the first layer in an image model).
---
## Benefits of doing preprocessing inside the model at inference time
Even if you go with option 2, you may later want to export an inference-only end-to-end
model that will include the preprocessing layers. The key benefit to doing this is that
**it makes your model portable** and it **helps reduce the
[training/serving skew](https://developers.google.com/machine-learning/guides/rules-of-ml#training-serving_skew)**.
When all data preprocessing is part of the model, other people can load and use your
model without having to be aware of how each feature is expected to be encoded &
normalized. Your inference model will be able to process raw images or raw structured
data, and will not require users of the model to be aware of the details of e.g. the
tokenization scheme used for text, the indexing scheme used for categorical features,
whether image pixel values are normalized to `[-1, +1]` or to `[0, 1]`, etc. This is
especially powerful if you're exporting
your model to another runtime, such as TensorFlow.js: you won't have to
reimplement your preprocessing pipeline in JavaScript.
If you initially put your preprocessing layers in your `tf.data` pipeline,
you can export an inference model that packages the preprocessing.
Simply instantiate a new model that chains
your preprocessing layers and your training model:
```python
inputs = keras.Input(shape=input_shape)
x = preprocessing_layer(inputs)
outputs = training_model(x)
inference_model = keras.Model(inputs, outputs)
```
---
## Preprocessing during multi-worker training
Preprocessing layers are compatible with the
[tf.distribute](https://www.tensorflow.org/api_docs/python/tf/distribute) API
for running training across multiple machines.
In general, preprocessing layers should be placed inside a `tf.distribute.Strategy.scope()`
and called either inside or before the model as discussed above.
```python
with strategy.scope():
inputs = keras.Input(shape=input_shape)
preprocessing_layer = tf.keras.layers.Hashing(10)
dense_layer = tf.keras.layers.Dense(16)
```
For more details, refer to the _Data preprocessing_ section
of the [Distributed input](https://www.tensorflow.org/tutorials/distribute/input)
tutorial.
---
## Quick recipes
### Image data augmentation
Note that image data augmentation layers are only active during training (similarly to
the `Dropout` layer).
```python
from tensorflow import keras
from tensorflow.keras import layers
# Create a data augmentation stage with horizontal flipping, rotations, zooms
data_augmentation = keras.Sequential(
[
layers.RandomFlip("horizontal"),
layers.RandomRotation(0.1),
layers.RandomZoom(0.1),
]
)
# Load some data
(x_train, y_train), _ = keras.datasets.cifar10.load_data()
input_shape = x_train.shape[1:]
classes = 10
# Create a tf.data pipeline of augmented images (and their labels)
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.batch(16).map(lambda x, y: (data_augmentation(x), y))
# Create a model and train it on the augmented image data
inputs = keras.Input(shape=input_shape)
x = layers.Rescaling(1.0 / 255)(inputs) # Rescale inputs
outputs = keras.applications.ResNet50( # Add the rest of the model
weights=None, input_shape=input_shape, classes=classes
)(x)
model = keras.Model(inputs, outputs)
model.compile(optimizer="rmsprop", loss="sparse_categorical_crossentropy")
model.fit(train_dataset, steps_per_epoch=5)
```
<div class="k-default-codeblock">
```
5/5 [==============================] - 9s 124ms/step - loss: 9.9572
<keras.src.callbacks.History at 0x7f749c4f5010>
```
</div>
You can see a similar setup in action in the example
[image classification from scratch](https://keras.io/examples/vision/image_classification_from_scratch/).
### Normalizing numerical features
```python
# Load some data
(x_train, y_train), _ = keras.datasets.cifar10.load_data()
x_train = x_train.reshape((len(x_train), -1))
input_shape = x_train.shape[1:]
classes = 10
# Create a Normalization layer and set its internal state using the training data
normalizer = layers.Normalization()
normalizer.adapt(x_train)
# Create a model that include the normalization layer
inputs = keras.Input(shape=input_shape)
x = normalizer(inputs)
outputs = layers.Dense(classes, activation="softmax")(x)
model = keras.Model(inputs, outputs)
# Train the model
model.compile(optimizer="adam", loss="sparse_categorical_crossentropy")
model.fit(x_train, y_train)
```
<div class="k-default-codeblock">
```
1563/1563 [==============================] - 3s 2ms/step - loss: 2.1200
<keras.src.callbacks.History at 0x7f749c3bd790>
```
</div>
### Encoding string categorical features via one-hot encoding
```python
# Define some toy data
data = tf.constant([["a"], ["b"], ["c"], ["b"], ["c"], ["a"]])
# Use StringLookup to build an index of the feature values and encode output.
lookup = layers.StringLookup(output_mode="one_hot")
lookup.adapt(data)
# Convert new test data (which includes unknown feature values)
test_data = tf.constant([["a"], ["b"], ["c"], ["d"], ["e"], [""]])
encoded_data = lookup(test_data)
print(encoded_data)
```
<div class="k-default-codeblock">
```
tf.Tensor(
[[0. 0. 0. 1.]
[0. 0. 1. 0.]
[0. 1. 0. 0.]
[1. 0. 0. 0.]
[1. 0. 0. 0.]
[1. 0. 0. 0.]], shape=(6, 4), dtype=float32)
```
</div>
Note that, here, index 0 is reserved for out-of-vocabulary values
(values that were not seen during `adapt()`).
You can see the `StringLookup` in action in the
[Structured data classification from scratch](https://keras.io/examples/structured_data/structured_data_classification_from_scratch/)
example.
### Encoding integer categorical features via one-hot encoding
```python
# Define some toy data
data = tf.constant([[10], [20], [20], [10], [30], [0]])
# Use IntegerLookup to build an index of the feature values and encode output.
lookup = layers.IntegerLookup(output_mode="one_hot")
lookup.adapt(data)
# Convert new test data (which includes unknown feature values)
test_data = tf.constant([[10], [10], [20], [50], [60], [0]])
encoded_data = lookup(test_data)
print(encoded_data)
```
<div class="k-default-codeblock">
```
tf.Tensor(
[[0. 0. 1. 0. 0.]
[0. 0. 1. 0. 0.]
[0. 1. 0. 0. 0.]
[1. 0. 0. 0. 0.]
[1. 0. 0. 0. 0.]
[0. 0. 0. 0. 1.]], shape=(6, 5), dtype=float32)
```
</div>
Note that index 0 is reserved for missing values (which you should specify as the value
0), and index 1 is reserved for out-of-vocabulary values (values that were not seen
during `adapt()`). You can configure this by using the `mask_token` and `oov_token`
constructor arguments of `IntegerLookup`.
You can see the `IntegerLookup` in action in the example
[structured data classification from scratch](https://keras.io/examples/structured_data/structured_data_classification_from_scratch/).
### Applying the hashing trick to an integer categorical feature
If you have a categorical feature that can take many different values (on the order of
10e3 or higher), where each value only appears a few times in the data,
it becomes impractical and ineffective to index and one-hot encode the feature values.
Instead, it can be a good idea to apply the "hashing trick": hash the values to a vector
of fixed size. This keeps the size of the feature space manageable, and removes the need
for explicit indexing.
```python
# Sample data: 10,000 random integers with values between 0 and 100,000
data = np.random.randint(0, 100000, size=(10000, 1))
# Use the Hashing layer to hash the values to the range [0, 64]
hasher = layers.Hashing(num_bins=64, salt=1337)
# Use the CategoryEncoding layer to multi-hot encode the hashed values
encoder = layers.CategoryEncoding(num_tokens=64, output_mode="multi_hot")
encoded_data = encoder(hasher(data))
print(encoded_data.shape)
```
<div class="k-default-codeblock">
```
(10000, 64)
```
</div>
### Encoding text as a sequence of token indices
This is how you should preprocess text to be passed to an `Embedding` layer.
```python
# Define some text data to adapt the layer
adapt_data = tf.constant(
[
"The Brain is wider than the Sky",
"For put them side by side",
"The one the other will contain",
"With ease and You beside",
]
)
# Create a TextVectorization layer
text_vectorizer = layers.TextVectorization(output_mode="int")
# Index the vocabulary via `adapt()`
text_vectorizer.adapt(adapt_data)
# Try out the layer
print(
"Encoded text:\n",
text_vectorizer(["The Brain is deeper than the sea"]).numpy(),
)
# Create a simple model
inputs = keras.Input(shape=(None,), dtype="int64")
x = layers.Embedding(input_dim=text_vectorizer.vocabulary_size(), output_dim=16)(inputs)
x = layers.GRU(8)(x)
outputs = layers.Dense(1)(x)
model = keras.Model(inputs, outputs)
# Create a labeled dataset (which includes unknown tokens)
train_dataset = tf.data.Dataset.from_tensor_slices(
(["The Brain is deeper than the sea", "for if they are held Blue to Blue"], [1, 0])
)
# Preprocess the string inputs, turning them into int sequences
train_dataset = train_dataset.batch(2).map(lambda x, y: (text_vectorizer(x), y))
# Train the model on the int sequences
print("\nTraining model...")
model.compile(optimizer="rmsprop", loss="mse")
model.fit(train_dataset)
# For inference, you can export a model that accepts strings as input
inputs = keras.Input(shape=(1,), dtype="string")
x = text_vectorizer(inputs)
outputs = model(x)
end_to_end_model = keras.Model(inputs, outputs)
# Call the end-to-end model on test data (which includes unknown tokens)
print("\nCalling end-to-end model on test string...")
test_data = tf.constant(["The one the other will absorb"])
test_output = end_to_end_model(test_data)
print("Model output:", test_output)
```
<div class="k-default-codeblock">
```
Encoded text:
[[ 2 19 14 1 9 2 1]]
```
</div>
<div class="k-default-codeblock">
```
Training model...
1/1 [==============================] - 2s 2s/step - loss: 0.5227
```
</div>
<div class="k-default-codeblock">
```
Calling end-to-end model on test string...
Model output: tf.Tensor([[-0.00107805]], shape=(1, 1), dtype=float32)
```
</div>
You can see the `TextVectorization` layer in action, combined with an `Embedding` mode,
in the example
[text classification from scratch](https://keras.io/examples/nlp/text_classification_from_scratch/).
Note that when training such a model, for best performance, you should always
use the `TextVectorization` layer as part of the input pipeline.
### Encoding text as a dense matrix of N-grams with multi-hot encoding
This is how you should preprocess text to be passed to a `Dense` layer.
```python
# Define some text data to adapt the layer
adapt_data = tf.constant(
[
"The Brain is wider than the Sky",
"For put them side by side",
"The one the other will contain",
"With ease and You beside",
]
)
# Instantiate TextVectorization with "multi_hot" output_mode
# and ngrams=2 (index all bigrams)
text_vectorizer = layers.TextVectorization(output_mode="multi_hot", ngrams=2)
# Index the bigrams via `adapt()`
text_vectorizer.adapt(adapt_data)
# Try out the layer
print(
"Encoded text:\n",
text_vectorizer(["The Brain is deeper than the sea"]).numpy(),
)
# Create a simple model
inputs = keras.Input(shape=(text_vectorizer.vocabulary_size(),))
outputs = layers.Dense(1)(inputs)
model = keras.Model(inputs, outputs)
# Create a labeled dataset (which includes unknown tokens)
train_dataset = tf.data.Dataset.from_tensor_slices(
(["The Brain is deeper than the sea", "for if they are held Blue to Blue"], [1, 0])
)
# Preprocess the string inputs, turning them into int sequences
train_dataset = train_dataset.batch(2).map(lambda x, y: (text_vectorizer(x), y))
# Train the model on the int sequences
print("\nTraining model...")
model.compile(optimizer="rmsprop", loss="mse")
model.fit(train_dataset)
# For inference, you can export a model that accepts strings as input
inputs = keras.Input(shape=(1,), dtype="string")
x = text_vectorizer(inputs)
outputs = model(x)
end_to_end_model = keras.Model(inputs, outputs)
# Call the end-to-end model on test data (which includes unknown tokens)
print("\nCalling end-to-end model on test string...")
test_data = tf.constant(["The one the other will absorb"])
test_output = end_to_end_model(test_data)
print("Model output:", test_output)
```
<div class="k-default-codeblock">
```
WARNING:tensorflow:5 out of the last 1567 calls to <function PreprocessingLayer.make_adapt_function.<locals>.adapt_step at 0x7f73dc15eac0> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has reduce_retracing=True option that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/guide/function#controlling_retracing and https://www.tensorflow.org/api_docs/python/tf/function for more details.
Encoded text:
[[1. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 1. 1. 0. 0. 0. 0. 0.
0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 1. 1. 0. 0. 0.]]
```
</div>
<div class="k-default-codeblock">
```
Training model...
1/1 [==============================] - 0s 204ms/step - loss: 1.1430
```
</div>
<div class="k-default-codeblock">
```
Calling end-to-end model on test string...
Model output: tf.Tensor([[0.64093614]], shape=(1, 1), dtype=float32)
```
</div>
### Encoding text as a dense matrix of N-grams with TF-IDF weighting
This is an alternative way of preprocessing text before passing it to a `Dense` layer.
```python
# Define some text data to adapt the layer
adapt_data = tf.constant(
[
"The Brain is wider than the Sky",
"For put them side by side",
"The one the other will contain",
"With ease and You beside",
]
)
# Instantiate TextVectorization with "tf-idf" output_mode
# (multi-hot with TF-IDF weighting) and ngrams=2 (index all bigrams)
text_vectorizer = layers.TextVectorization(output_mode="tf-idf", ngrams=2)
# Index the bigrams and learn the TF-IDF weights via `adapt()`
text_vectorizer.adapt(adapt_data)
# Try out the layer
print(
"Encoded text:\n",
text_vectorizer(["The Brain is deeper than the sea"]).numpy(),
)
# Create a simple model
inputs = keras.Input(shape=(text_vectorizer.vocabulary_size(),))
outputs = layers.Dense(1)(inputs)
model = keras.Model(inputs, outputs)
# Create a labeled dataset (which includes unknown tokens)
train_dataset = tf.data.Dataset.from_tensor_slices(
(["The Brain is deeper than the sea", "for if they are held Blue to Blue"], [1, 0])
)
# Preprocess the string inputs, turning them into int sequences
train_dataset = train_dataset.batch(2).map(lambda x, y: (text_vectorizer(x), y))
# Train the model on the int sequences
print("\nTraining model...")
model.compile(optimizer="rmsprop", loss="mse")
model.fit(train_dataset)
# For inference, you can export a model that accepts strings as input
inputs = keras.Input(shape=(1,), dtype="string")
x = text_vectorizer(inputs)
outputs = model(x)
end_to_end_model = keras.Model(inputs, outputs)
# Call the end-to-end model on test data (which includes unknown tokens)
print("\nCalling end-to-end model on test string...")
test_data = tf.constant(["The one the other will absorb"])
test_output = end_to_end_model(test_data)
print("Model output:", test_output)
```
<div class="k-default-codeblock">
```
WARNING:tensorflow:6 out of the last 1568 calls to <function PreprocessingLayer.make_adapt_function.<locals>.adapt_step at 0x7f73bc6bf6a0> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has reduce_retracing=True option that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/guide/function#controlling_retracing and https://www.tensorflow.org/api_docs/python/tf/function for more details.
Encoded text:
[[5.461647 1.6945957 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0.
0. 0. 1.0986123 1.0986123 1.0986123 0. 0.
0. 0. 0. 0. 0. 0. 0.
1.0986123 0. 0. 0. 0. 0. 0.
0. 1.0986123 1.0986123 0. 0. 0. ]]
```
</div>
<div class="k-default-codeblock">
```
Training model...
1/1 [==============================] - 1s 567ms/step - loss: 16.3522
```
</div>
<div class="k-default-codeblock">
```
Calling end-to-end model on test string...
Model output: tf.Tensor([[-0.20062147]], shape=(1, 1), dtype=float32)
```
</div>
---
## Important gotchas
### Working with lookup layers with very large vocabularies
You may find yourself working with a very large vocabulary in a `TextVectorization`, a `StringLookup` layer,
or an `IntegerLookup` layer. Typically, a vocabulary larger than 500MB would be considered "very large".
In such a case, for best performance, you should avoid using `adapt()`.
Instead, pre-compute your vocabulary in advance
(you could use Apache Beam or TF Transform for this)
and store it in a file. Then load the vocabulary into the layer at construction
time by passing the file path as the `vocabulary` argument.
| keras-io/guides/md/preprocessing_layers.md/0 | {
"file_path": "keras-io/guides/md/preprocessing_layers.md",
"repo_id": "keras-io",
"token_count": 8972
} | 129 |
<meta http-equiv="refresh" content="0; URL='https://keras.io/api/keras_nlp/modeling_layers/transformer_encoder/'" />
| keras-io/redirects/api/keras_nlp/layers/transformer_encoder/index.html/0 | {
"file_path": "keras-io/redirects/api/keras_nlp/layers/transformer_encoder/index.html",
"repo_id": "keras-io",
"token_count": 47
} | 130 |
<meta http-equiv="refresh" content="0; URL='https://keras.io/guides/functional_api/'" />
| keras-io/redirects/getting-started/functional-api-guide/index.html/0 | {
"file_path": "keras-io/redirects/getting-started/functional-api-guide/index.html",
"repo_id": "keras-io",
"token_count": 34
} | 131 |
<meta http-equiv="refresh" content="0; URL='https://keras.io/api/layers/recurrent_layers/'" />
| keras-io/redirects/layers/wrappers/index.html/0 | {
"file_path": "keras-io/redirects/layers/wrappers/index.html",
"repo_id": "keras-io",
"token_count": 38
} | 132 |
<meta http-equiv="refresh" content="0; URL='https://keras.io/why_keras/'" />
| keras-io/redirects/why-use-keras/index.html/0 | {
"file_path": "keras-io/redirects/why-use-keras/index.html",
"repo_id": "keras-io",
"token_count": 32
} | 133 |
"""Keras tutobooks implementation.
A tutobook is a tutorial available simultaneously as a notebook,
as a Python script, and as a nicely rendered webpage.
Its source-of-truth (for manual edition and version control) is
its Python script form, but you can also create one by starting
from a notebook and converting it with the command `nb2py`.
Text cells are stored in markdown-formatted comment blocks.
the first line (starting with " * 3) may optionally contain a special
annotation, one of:
- invisible: do not render this block.
- shell: execute this block while prefixing each line with `!`.
The script form should start with a header with the following fields:
Title:
Author: (could be `Authors`: as well, and may contain markdown links)
Date created: (date in yyyy/mm/dd format)
Last modified: (date in yyyy/mm/dd format)
Description: (one-line text description)
## How to add a new code example to Keras.io
You would typically start from an existing notebook.
Save it to disk (let's say as `path_to_your_nb.ipynb`).
`cd` to the `keras-io/scripts/` directory.
Then run:
```
python tutobooks nb2py path_to_your_nb.ipynb ../examples/your_example.py
```
This will create the file `examples/your_example.py`. Open it,
fill in the headers, and generally edit it so that it looks nice.
NOTE THAT THE CONVERSION SCRIPT MAY MAKE MISTAKES IN ITS ATTEMPTS
TO SHORTEN LINES. MAKE SURE TO PROOFREAD THE GENERATED .py IN FULL.
Or alternatively, make sure to keep your lines reasonably-sized (<90 char)
to start with, so that the script won't have to shorten them.
You can then preview what it looks like when converted back again
to ipynb by running:
```
python tutobooks py2nb ../examples/your_example.py preview.ipynb
```
NOTE THAT THIS COMMAND WILL ERROR OUT IF ANY CELLS TAKES TOO LONG
TO EXECUTE. In that case, make your code lighter/faster.
Remember that examples are meant to demonstrate workflows, not
train state-of-the-art models. They should
stay very lightweight.
Open the generated `preview.ipynb` and make sure it looks like what
you expect. If not, keep editing `your_example.py` until it does.
Finally, submit a PR adding `examples/your_example.py`.
"""
import os
import sys
import json
import copy
import random
import shutil
import tempfile
from pathlib import Path
TIMEOUT = 12 * 60 * 60 # 12 hours
MAX_LOC = 350
def nb_to_py(nb_path, py_path):
f = open(nb_path)
content = f.read()
f.close()
nb = json.loads(content)
py = '"""\n'
py += "Title: FILLME\n"
py += "Author: FILLME\n"
py += "Date created: FILLME\n"
py += "Last modified: FILLME\n"
py += "Description: FILLME\n"
py += '"""\n'
for cell in nb["cells"]:
if cell["cell_type"] == "code":
# Is it a shell cell?
if cell["source"] and cell["source"][0] and cell["source"][0][0] == "!":
# It's a shell cell
py += '"""shell\n'
py += "".join(cell["source"]) + "\n"
py += '"""\n\n'
else:
# It's a Python cell
py += "".join(cell["source"]) + "\n\n"
elif cell["cell_type"] == "markdown":
py += '"""\n'
py += "".join(cell["source"]) + "\n"
py += '"""\n\n'
# Save file
f = open(py_path, "w")
f.write(py)
f.close()
# Format file with Black
os.system("black " + py_path)
# Shorten lines
py = open(py_path).read()
try:
py = _shorten_lines(py)
finally:
f = open(py_path, "w")
f.write(py)
f.close()
def py_to_nb(py_path, nb_path, fill_outputs=False):
f = open(py_path)
py = f.read()
f.close()
validate(py)
header, _, py, tag = _get_next_script_element(py)
attributes = _parse_header(header)
cells = []
loc = 0
# Write first header cell
header_cell = {
"cell_type": "markdown",
"source": [
"# " + attributes["title"] + "\n",
"\n",
"**" + attributes["auth_field"] + ":** " + attributes["author"] + "<br>\n",
"**Date created:** " + attributes["date_created"] + "<br>\n",
"**Last modified:** " + attributes["last_modified"] + "<br>\n",
"**Description:** " + attributes["description"],
],
"metadata": {"colab_type": "text"},
}
cells.append(header_cell)
while py:
e, cell_type, py, tag = _get_next_script_element(py)
lines = e.split("\n")
if all(l == "" for l in lines):
continue
if lines and not lines[0]:
lines = lines[1:]
source = [l + "\n" for l in lines]
# Drop last newline char
if source and not source[-1].strip():
source = source[:-1]
if source:
source[-1] = source[-1].rstrip()
if tag == "shell":
source = ["!" + l for l in source]
cell_type = "code"
if tag != "invisible" and source:
cell = {"cell_type": cell_type, "source": source}
if cell_type == "code":
cell["outputs"] = []
cell["metadata"] = {"colab_type": "code"}
cell["execution_count"] = 0
loc += _count_locs(source)
else:
cell["metadata"] = {"colab_type": "text"}
cells.append(cell)
notebook = {}
for key in NB_BASE.keys():
notebook[key] = copy.deepcopy(NB_BASE[key])
notebook["metadata"]["colab"]["name"] = str(py_path).split("/")[-1][:-3]
notebook["metadata"]["accelerator"] = attributes["accelerator"]
notebook["cells"] = cells
if loc > MAX_LOC:
raise ValueError(
f"Found {loc} lines of code, but expected fewer than {MAX_LOC}"
)
f = open(nb_path, "w")
f.write(json.dumps(notebook, indent=1, sort_keys=True))
f.close()
if fill_outputs:
print("Generating ipynb")
parent_dir = Path(nb_path).parent
current_files = os.listdir(parent_dir)
try:
os.system(
"jupyter nbconvert --to notebook --execute --debug "
+ str(nb_path)
+ " --inplace"
+ " --ExecutePreprocessor.timeout="
+ str(TIMEOUT)
)
finally:
new_files = os.listdir(parent_dir)
for fname in new_files:
if fname not in current_files:
fpath = parent_dir / fname
if os.path.isdir(fpath):
print("Removing created folder:", fname)
shutil.rmtree(fpath)
else:
print("Removing created file:", fname)
os.remove(fpath)
def nb_to_md(nb_path, md_path, img_dir, working_dir=None):
img_exts = ("png", "jpg", "jpeg")
# Assumes an already populated notebook.
assert str(md_path).endswith(".md")
current_dir = os.getcwd()
original_img_dir = str(img_dir)
if original_img_dir.endswith("/"):
original_img_dir = original_img_dir[:-1]
img_dir = os.path.abspath(img_dir)
nb_path = os.path.abspath(nb_path)
nb_fname = str(nb_path).split(os.path.sep)[-1]
del_working_dir = False
if working_dir is None:
del_working_dir = True
working_dir = "tmp_" + str(random.randint(1e6, 1e7))
if not os.path.exists(working_dir):
os.makedirs(working_dir)
print("Using working_dir:", working_dir)
os.chdir(working_dir)
shutil.copyfile(nb_path, nb_fname)
md_name = str(md_path).split("/")[-1][:-3]
target_md = md_name + ".md"
img_dir = Path(img_dir) / md_name
if not os.path.exists(img_dir):
os.makedirs(img_dir)
os.system(
"jupyter nbconvert --to markdown --execute --debug "
+ nb_fname
+ " --output "
+ target_md
+ " --ExecutePreprocessor.timeout="
+ str(TIMEOUT)
)
if os.path.exists(md_name + ".md"):
success = True
tmp_img_dir = md_name + "_files"
if os.path.exists(tmp_img_dir):
for fname in os.listdir(tmp_img_dir):
if fname.endswith(img_exts):
src = Path(tmp_img_dir) / fname
target = Path(img_dir) / fname
print("copy", src, "to", target)
shutil.copyfile(src, target)
os.chdir(current_dir)
md_content = open(Path(working_dir) / (md_name + ".md")).read()
for ext in img_exts:
md_content = md_content.replace(
"![" + ext + "](" + md_name + "_files",
"![" + ext + "](" + original_img_dir + "/" + md_name,
)
md_content = _make_output_code_blocks(md_content)
open(md_path, "w").write(md_content)
else:
success = False
os.chdir(current_dir)
if del_working_dir:
shutil.rmtree(working_dir)
if not success:
raise RuntimeError(
"An error was encountered when attempting to run the notebook. "
"See logs for details."
)
def py_to_md(py_path, nb_path, md_path, img_dir, working_dir=None):
py_to_nb(py_path, nb_path, fill_outputs=False)
nb_to_md(nb_path, md_path, img_dir, working_dir=working_dir)
def validate(py):
"""Validate the format of a tutobook script.
Specifically:
- validate headers
- validate style with black
"""
lines = py.split("\n")
if not lines[0].startswith('"""'):
raise ValueError('Missing `"""`-fenced header at top of script.')
if not lines[1].startswith("Title: "):
raise ValueError("Missing `Title:` field.")
if not lines[2].startswith("Author: ") and not lines[2].startswith("Authors: "):
raise ValueError("Missing `Author:` field.")
if not lines[3].startswith("Date created: "):
raise ValueError("Missing `Date created:` field.")
if not lines[4].startswith("Last modified: "):
raise ValueError("Missing `Last modified:` field.")
if not lines[5].startswith("Description: "):
raise ValueError("Missing `Description:` field.")
if not lines[6].startswith("Accelerator: "):
raise ValueError("Missing `Accelerator:` field.")
description = lines[5][len("Description: ") :]
if not description:
raise ValueError("Missing `Description:` field content.")
if not description[0] == description[0].upper():
raise ValueError("Description field content must be capitalized.")
if not description[-1] == ".":
raise ValueError("Description field content must end with a period.")
if len(description) > 100:
raise ValueError("Description field content must be less than 100 chars.")
accelerator = lines[6][len("Accelerator: ") :]
accelerator_options = ["GPU", "TPU", "None"]
if accelerator not in accelerator_options:
raise ValueError(
f"Accelerator field content must be one of: {accelerator_options}. "
f"Received: accelerator={accelerator}"
)
for i, line in enumerate(lines):
if line.startswith('"""') and line.endswith('"""') and len(line) > 3:
raise ValueError(
'Do not use single line `"""`-fenced comments. '
"Encountered at line %d" % (i,)
)
for i, line in enumerate(lines):
if line.endswith(" "):
raise ValueError("Found trailing space on line %d; line: `%s`" % (i, line))
# Validate style with black
tmp = tempfile.gettempdir()
fpath = os.path.join(tmp, str(random.randint(1e6, 1e7)) + ".py")
f = open(fpath, "w")
pre_formatting = "\n".join(lines)
f.write(pre_formatting)
f.close()
os.system("black " + fpath)
f = open(fpath)
formatted = f.read()
f.close()
os.remove(fpath)
if formatted != pre_formatting:
raise ValueError(
"Your python file did not follow `black` conventions. "
"Run `black your_file.py` to autoformat it."
)
# Extra checks.
if "//arxiv.org/pdf/" in py:
raise ValueError(
"Do not link to arXiv PDFs directly. " "Instead, link to the abstract page."
)
def count_locs_in_file(py_path):
f = open(py_path)
py = f.read()
f.close()
_get_next_script_element(py) # Header
loc = 0
while py:
e, cell_type, py, _ = _get_next_script_element(py)
lines = e.split("\n")
if cell_type == "code":
loc += _count_locs(lines)
return loc
def _count_locs(lines):
loc = 0
string_open = False
for line in lines:
line = line.strip()
if not line or line.startswith("#"):
continue
if not string_open:
if not line.startswith('"""'):
loc += 1
else:
if not line.endswith('"""'):
string_open = True
else:
if line.startswith('"""'):
string_open = False
return loc
def _shorten_lines(py):
max_len = 90
lines = []
for line in py.split("\n"):
if len(line) <= max_len:
lines.append(line)
continue
i = 0
while len(line) > max_len:
line = line.lstrip()
if " " not in line[1:]:
lines.append(line)
break
else:
short_line = line[:max_len]
line = line[max_len:]
if " " in short_line:
reversed_short_line = short_line[::-1]
index = reversed_short_line.find(" ") + 1
line = short_line[-index:] + line
short_line = short_line[:-index]
lines.append(short_line.lstrip())
i += 1
if i > 10:
raise
lines.append(line.lstrip())
return "\n".join(lines)
def _get_next_script_element(py):
lines = py.split("\n")
assert lines
elines = []
i = 0
tag = None
if lines[0].startswith('"""'):
assert len(lines) >= 2
etype = "markdown"
if len(lines[0]) > 3:
tag = lines[0][3:]
if tag not in ["shell", "invisible"]:
raise ValueError("Found unknown cell tag:", tag)
lines = lines[1:]
else:
etype = "code"
for i, line in enumerate(lines):
if line.startswith('"""'):
break
else:
elines.append(line)
if etype == "markdown":
py = "\n".join(lines[i + 1 :])
else:
py = "\n".join(lines[i:])
e = "\n".join(elines)
return e, etype, py, tag
def _parse_header(header):
lines = header.split("\n")
if len(lines) not in (6, 7):
raise ValueError("Invalid header, it should be exactly 6 or 7 lines.")
title = lines[0][len("Title: ") :]
author_line = lines[1]
if author_line.startswith("Authors"):
author = author_line[len("Authors: ") :]
auth_field = "Authors"
else:
author = author_line[len("Author: ") :]
auth_field = "Author"
date_created = lines[2][len("Date created: ") :]
last_modified = lines[3][len("Last modified: ") :]
description = lines[4][len("Description: ") :]
accelerator = lines[5][len("Accelerator: ") :]
return {
"title": title,
"author": author,
"auth_field": auth_field,
"date_created": date_created,
"last_modified": last_modified,
"description": description,
"accelerator": accelerator,
}
def _make_output_code_blocks(md):
lines = md.split("\n")
output_lines = []
final_lines = []
is_inside_backticks = False
def is_output_line(line, prev_line, output_lines):
if line.startswith(" ") and len(line) >= 5:
if output_lines or (lines[i - 1].strip() == "" and line.strip()):
return True
return False
def flush(output_lines, final_lines):
final_lines.append('<div class="k-default-codeblock">')
final_lines.append("```")
if len(output_lines) == 1:
line = output_lines[0]
final_lines.append(line[4:])
else:
for line in output_lines:
final_lines.append(line[4:])
final_lines.append("```")
final_lines.append("</div>")
for i, line in enumerate(lines):
if line.startswith("```"):
is_inside_backticks = not is_inside_backticks
final_lines.append(line)
continue
if is_inside_backticks:
final_lines.append(line)
continue
if i > 0 and is_output_line(line, lines[-1], output_lines):
output_lines.append(line)
elif not line:
if output_lines:
if output_lines[-1]:
output_lines.append(line)
else:
final_lines.append(line)
else:
if output_lines:
flush(output_lines, final_lines)
output_lines = []
final_lines.append(line)
if output_lines:
flush(output_lines, final_lines)
return "\n".join(final_lines)
NB_BASE = {
"metadata": {
"accelerator": "GPU",
"colab": {
"collapsed_sections": [],
"name": "", # FILL ME
"private_outputs": False,
"provenance": [],
"toc_visible": True,
},
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3",
},
"language_info": {
"codemirror_mode": {"name": "ipython", "version": 3},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.0",
},
},
"nbformat": 4,
"nbformat_minor": 0,
}
if __name__ == "__main__":
cmd = sys.argv[1]
if cmd not in {"nb2py", "py2nb", "count_loc"}:
raise ValueError(
"Specify a command: either "
"`nb2py source_filename.ipynb target_filename.py` or "
"`py2nb source_filename.py target_file name.ipynb` or "
"`count_loc source_filename.py`."
)
if cmd == "count_loc":
source = sys.argv[2]
loc = count_locs_in_file(source)
print(f"Counted {loc} lines of code in {source}.")
else:
if len(sys.argv) < 4:
raise ValueError("Specify a source filename and a target filename")
source = sys.argv[2]
target = sys.argv[3]
if cmd == "py2nb":
if not source.endswith(".py"):
raise ValueError(
f"The source filename should be a Python file. Got: {source}"
)
if not target.endswith(".ipynb"):
raise ValueError(
f"The target filename should be a notebook file. Got: {target}"
)
py_to_nb(source, target)
if cmd == "nb2py":
if not source.endswith(".ipynb"):
raise ValueError(
f"The source filename should be a notebook file. Got: {source}"
)
if not target.endswith(".py"):
raise ValueError(
f"The target filename should be a Python file. Got: {target}"
)
nb_to_py(source, target)
| keras-io/scripts/tutobooks.py/0 | {
"file_path": "keras-io/scripts/tutobooks.py",
"repo_id": "keras-io",
"token_count": 9254
} | 134 |
# KerasNLP Metrics
KerasNLP metrics are `keras.Metric` subclasses for NLP-specific use cases.
{{toc}}
| keras-io/templates/api/keras_nlp/metrics/index.md/0 | {
"file_path": "keras-io/templates/api/keras_nlp/metrics/index.md",
"repo_id": "keras-io",
"token_count": 39
} | 135 |
# Preprocessing layers
{{toc}} | keras-io/templates/api/layers/preprocessing_layers/index.md/0 | {
"file_path": "keras-io/templates/api/layers/preprocessing_layers/index.md",
"repo_id": "keras-io",
"token_count": 9
} | 136 |
# Code examples
Our code examples are short (less than 300 lines of code), focused demonstrations of vertical deep learning workflows.
All of our examples are written as Jupyter notebooks and can be run in one click in [Google Colab](https://colab.research.google.com/notebooks/welcome.ipynb),
a hosted notebook environment that requires no setup and runs in the cloud. Google Colab includes GPU and TPU runtimes.
{{examples_list}}
## Adding a new code example
We welcome new code examples! Here are our rules:
- They should be shorter than 300 lines of code (comments may be as long as you want).
- They should demonstrate modern Keras best practices.
- They should be substantially different in topic from all examples listed above.
- They should be extensively documented & commented.
New examples are added via Pull Requests to the [keras.io repository](https://github.com/keras-team/keras-io).
They must be submitted as a `.py` file that follows a specific format. They are usually generated from Jupyter notebooks.
See the [`tutobooks` documentation](https://github.com/keras-team/keras-io/blob/master/README.md) for more details.
If you would like to convert a Keras 2 example to Keras 3, please open a Pull Request to the [keras.io repository](https://github.com/keras-team/keras-io). | keras-io/templates/examples/index.md/0 | {
"file_path": "keras-io/templates/examples/index.md",
"repo_id": "keras-io",
"token_count": 343
} | 137 |
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
<meta name="description" content="Keras documentation">
<meta name="author" content="Keras Team">
<title>Keras: Deep Learning for humans</title>
<!-- Bootstrap core CSS -->
<link href="css/bootstrap.min.css" rel="stylesheet">
<!-- Custom fonts for this template -->
<link href="https://fonts.googleapis.com/css?family=Open+Sans:wght@300;400;600;800&display=swap" rel="stylesheet">
<!-- Custom styles for this template -->
<link href="css/landing.css" rel="stylesheet">
<!-- Google Tag Manager -->
<script>(function(w,d,s,l,i){w[l]=w[l]||[];w[l].push({'gtm.start':
new Date().getTime(),event:'gtm.js'});var f=d.getElementsByTagName(s)[0],
j=d.createElement(s),dl=l!='dataLayer'?'&l='+l:'';j.async=true;j.src=
'https://www.googletagmanager.com/gtm.js?id='+i+dl;f.parentNode.insertBefore(j,f);
})(window,document,'script','dataLayer','GTM-5DNGF4N');
</script>
<script>
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
})(window,document,'script','https://www.google-analytics.com/analytics.js','ga');
ga('create', 'UA-175165319-128', 'auto');
ga('send', 'pageview');
</script>
<!-- End Google Tag Manager -->
</head>
<body>
<!-- Google Tag Manager (noscript) -->
<noscript><iframe src="https://www.googletagmanager.com/ns.html?id=GTM-5DNGF4N"
height="0" width="0" style="display:none;visibility:hidden"></iframe></noscript>
<!-- End Google Tag Manager (noscript) -->
<!-- Masthead -->
<header class="masthead smooth-white-bg text-center">
<div class="container">
<img src='img/logo.png' class='logo' />
<div class="row">
<div class="col-xl-8 mx-auto">
<h1 class="mb-5">Simple. Flexible. Powerful.</h1>
<div class="row mx-auto">
<div class="col-md px-1">
<a href='{{base_url}}getting_started/' class="btn btn-block btn-lg btn-primary">Get started</a>
</div>
<div class="col-md px-1">
<a href='{{base_url}}api/' class="btn btn-block btn-lg btn-secondary">API docs</a>
</div>
<div class="col-md px-1">
<a href='{{base_url}}guides/' class="btn btn-block btn-lg btn-secondary">Guides</a>
</div>
<div class="col-md px-1">
<a href='{{base_url}}examples/' class="btn btn-block btn-lg btn-secondary">Examples</a>
</div>
</div>
</div>
</div>
</header>
<div class="masthead text-center smooth-black-bg" style="padding: 1em;">
<div class="row">
<div class="col-xl-8 mx-auto" id="announcement-box">
Keras is now available for<br>
JAX, TensorFlow, and PyTorch!<br>
<a href="/keras_3/" id="announcement-link">Read the Keras 3.0 release announcement</a>
</div>
</div>
</div>
<!-- Testimonials -->
<section class="testimonials text-center smooth-white-bg">
<div class="container">
<div class="row">
<div class="col-lg-4">
<div class="testimonial-item mx-auto mb-5">
<p class="font-weight-light mb-0 quote-content">
"Keras is one of the key building blocks in YouTube Discovery's new modeling infrastructure. It brings a clear, consistent API and a common way of expressing modeling ideas to 8 teams across the major surfaces of YouTube recommendations."
</p>
<h5><span class="quote-name">Maciej Kula</span><br><span class="quote-title">Staff Software Engineer - Google</span></h5>
</div>
</div>
<div class="col-lg-4">
<div class="testimonial-item mx-auto mb-5">
<p class="font-weight-light mb-0 quote-content">
"Keras has tremendously simplified the development workflow of Waymo's ML practitioners, with the benefits of a significantly simplified API, standardized interface and behaviors, easily shareable model building components, and highly improved debuggability."
</p>
<h5><span class="quote-name">Yiming Chen</span><br><span class="quote-title">Senior Software Engineer - Waymo</span></h5>
</div>
</div>
<div class="col-lg-4">
<div class="testimonial-item mx-auto mb-5">
<p class="font-weight-light mb-0 quote-content">
"The best thing you can say about any software library is that the abstractions it chooses feel completely natural, such that there is zero friction between thinking about what you want to do and thinking about how you want to code it. That's exactly what you get with Keras."
</p>
<h5><span class="quote-name">Matthew Carrigan</span><br><span class="quote-title">Machine Learning Engineer - Hugging Face</span></h5>
</div>
</div>
</div>
<div class="row">
<div class="col-lg-4">
<div class="testimonial-item mx-auto mb-5">
<p class="font-weight-light mb-0 quote-content">
"Keras allows us to prototype, research and deploy deep learning models in an intuitive and streamlined manner. The functional API makes code comprehensible and stylistic, allowing for effective knowledge transfer between scientists on my team."
</p>
<h5><span class="quote-name">Aiden Arnold, PhD</span><br><span class="quote-title">Lead Data Scientist - Rune Labs</span></h5>
</div>
</div>
<div class="col-lg-4">
<div class="testimonial-item mx-auto mb-5">
<p class="font-weight-light mb-0 quote-content">
"Keras has something for every user: easy customisability for the academic; out-of-the-box, performant models and pipelines for use by the industry, and readable, modular code for the student. Keras has made it very simple to quickly iterate over experiments without worrying about low-level details."
</p>
<h5><span class="quote-name">Abheesht Sharma</span><br><span class="quote-title">Research Scientist - Amazon</span></h5>
</div>
</div>
<div class="col-lg-4">
<div class="testimonial-item mx-auto mb-5">
<p class="font-weight-light mb-0 quote-content">
"Keras is the perfect abstraction layer to build and operationalize Deep Learning models. I've been using it since 2018 to develop and deploy models for some of the largest companies in the world [...] a combination of Keras, TensorFlow, and TFX has no rival."
</p>
<h5><span class="quote-name">Santiago L. Valdarrama</span><br><span class="quote-title">Machine Learning Consultant</span></h5>
</div>
</div>
</div>
<div class="row">
<div class="col-lg-4">
<div class="testimonial-item mx-auto mb-5">
<p class="font-weight-light mb-0 quote-content">
"What I personally like the most about Keras (aside from its intuitive APIs), is the ease of transitioning from research to production. I can train a Keras model, convert it to TF Lite and deploy it to mobile & edge devices."
</p>
<h5><span class="quote-name">Margaret Maynard-Reid</span><br><span class="quote-title">Machine Learning Engineer</span></h5>
</div>
</div>
<div class="col-lg-4">
<div class="testimonial-item mx-auto mb-5">
<p class="font-weight-light mb-0 quote-content">
"Keras is that sweet spot where you get flexibility for research and consistency for deployment. Keras is to Deep Learning what Ubuntu is to Operating Systems."
</p>
<h5><span class="quote-name">Aakash Nain</span><br><span class="quote-title">Research Engineer</span></h5>
</div>
</div>
<div class="col-lg-4">
<div class="testimonial-item mx-auto mb-5">
<p class="font-weight-light mb-0 quote-content">
"Keras's user-friendly design means it's easy to learn and easy to use [...] it allows for the rapid prototyping and deployment of models across a variety of platforms."
</p>
<h5><span class="quote-name">Gareth Collins</span><br><span class="quote-title">Machine Learning Engineer</span></h5>
</div>
</div>
</div>
</div>
</section>
<!-- Image Showcases -->
<section class="showcase">
<div class="container-fluid p-0">
<div class="row no-gutters bottom-border smooth-black-bg">
<div class="col-lg-6 order-lg-2 text-white showcase-img" style="background-image: url('img/showcase-superpower.png');"></div>
<div class="col-lg-6 order-lg-1 my-auto showcase-text">
<h2>A superpower for developers.</h2>
<p class="lead mb-0">
The purpose of Keras is to give an <b>unfair advantage</b> to any developer looking to ship Machine Learning-powered apps.
Keras focuses on debugging speed, code elegance & conciseness, maintainability, and deployability.
When you choose Keras, your codebase is smaller, more readable, easier to iterate on. Your models run faster
thanks to XLA compilation with JAX and TensorFlow, and are easier to deploy across every surface (server, mobile, browser, embedded) thanks to
the serving components from the TensorFlow and PyTorch ecosystems, such as TF Serving, TorchServe, TF Lite, TF.js, and more.
</p>
</div>
</div>
<div class="row no-gutters bottom-border smooth-white-bg">
<div class="col-lg-6 text-white showcase-img" style="background-image: url('img/showcase-api-2.png');"></div>
<div class="col-lg-6 my-auto showcase-text">
<h2>Deep learning for humans.</h2>
<p class="lead mb-0">
Keras is an API designed for human beings, not machines.
Keras follows best practices for <b>reducing cognitive load</b>: it offers consistent & simple APIs,
it minimizes the number of user actions required for common use cases,
and it provides clear & actionable error messages.
Keras also gives the highest priority to crafting great documentation and developer guides.
</p>
</div>
</div>
<div class="row no-gutters bottom-border smooth-black-bg">
<div class="col-lg-6 order-lg-2 text-white showcase-img" style="background-image: url('img/framework-optionality.png');"></div>
<div class="col-lg-6 order-lg-1 my-auto showcase-text">
<h2>Unlock framework optionality.</h2>
<p class="lead mb-0">
Keras works with JAX, TensorFlow, and PyTorch. It enables you to create models that can move across framework
boundaries and that can benefit from the ecosystem of all three of these frameworks.
</p>
</div>
</div>
<div class="row no-gutters bottom-border smooth-white-bg">
<div class="col-lg-6 text-white showcase-img" style="background-image: url('img/showcase-tpu.jpg');"></div>
<div class="col-lg-6 my-auto showcase-text">
<h2>Exascale machine learning.</h2>
<p class="lead mb-0">
Keras is an industry-strength framework
that can scale to large clusters of GPUs or an entire <a href='https://cloud.google.com/tpu'>TPU pod</a>.
It's not only possible; it's easy.
</p>
</div>
</div>
<div class="row no-gutters bottom-border smooth-black-bg">
<div class="col-lg-6 order-lg-2 text-white showcase-img" style="background-image: url('img/showcase-lhc.jpg');"></div>
<div class="col-lg-6 order-lg-1 my-auto showcase-text">
<h2>State-of-the-art research.</h2>
<p class="lead mb-0">
Keras is used by CERN, NASA, NIH, and many more scientific organizations around the world
(and yes, Keras is used at the LHC).
Keras has the low-level flexibility to implement arbitrary research ideas while
offering optional high-level convenience features to speed up experimentation cycles.
</p>
</div>
</div>
</div>
</section>
</body>
<footer>
<a href="https://policies.google.com/terms">Terms</a> | <a href="https://policies.google.com/privacy">Privacy</a>
</footer>
</html>
| keras-io/theme/landing.html/0 | {
"file_path": "keras-io/theme/landing.html",
"repo_id": "keras-io",
"token_count": 5212
} | 138 |
# KerasNLP Examples
The `examples/` directly contains scripts built on top of the library that do not fit well into
the colab format used on [keras.io](https://keras.io/examples/). This includes recipes for
pre-training models and evaluating models on benchmarks such as GLUE.
| keras-nlp/examples/README.md/0 | {
"file_path": "keras-nlp/examples/README.md",
"repo_id": "keras-nlp",
"token_count": 73
} | 139 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl import app
from absl import flags
from tensorflow import keras
from examples.machine_translation.data import get_dataset_and_tokenizer
from examples.machine_translation.model import TranslationModel
FLAGS = flags.FLAGS
flags.DEFINE_integer("num_epochs", 1, "Number of epochs to train.")
flags.DEFINE_integer("steps_per_epoch", None, "Number of steps per epoch.")
flags.DEFINE_integer("num_encoders", 2, "Number of Transformer encoder layers.")
flags.DEFINE_integer("num_decoders", 2, "Number of Transformer decoder layers.")
flags.DEFINE_integer("batch_size", 64, "The training batch size.")
flags.DEFINE_float("learning_rate", 0.001, "The initial learning rate.")
flags.DEFINE_integer("model_dim", 64, "Embedding size.")
flags.DEFINE_integer(
"intermediate_dim",
128,
"Intermediate dimension (feedforward network) of transformer.",
)
flags.DEFINE_integer(
"num_heads",
8,
"Number of head of the multihead attention.",
)
flags.DEFINE_integer(
"sequence_length",
20,
"Input and output sequence length.",
)
flags.DEFINE_integer(
"vocab_size",
15000,
"Vocabulary size, required by tokenizer.",
)
flags.DEFINE_string(
"saved_model_path",
"saved_models/machine_translation_model",
"The path to saved model",
)
def run_training(model, train_ds, val_ds):
learning_rate = keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=FLAGS.learning_rate,
decay_steps=20,
decay_rate=0.98,
)
optimizer = keras.optimizers.Adam(learning_rate)
loss_fn = keras.losses.SparseCategoricalCrossentropy(
reduction=keras.losses.Reduction.NONE
)
metrics = keras.metrics.SparseCategoricalAccuracy()
model.compile(optimizer=optimizer, metrics=[metrics], loss=loss_fn)
model.fit(
train_ds,
epochs=FLAGS.num_epochs,
validation_data=val_ds,
steps_per_epoch=FLAGS.steps_per_epoch,
)
def main(_):
(
(train_ds, val_ds, test_ds),
(
eng_tokenizer,
spa_tokenizer,
),
) = get_dataset_and_tokenizer(
FLAGS.sequence_length, FLAGS.vocab_size, FLAGS.batch_size
)
english_vocab_size = eng_tokenizer.vocabulary_size()
spanish_vocab_size = spa_tokenizer.vocabulary_size()
model = TranslationModel(
encoder_tokenizer=eng_tokenizer,
decoder_tokenizer=spa_tokenizer,
num_encoders=FLAGS.num_encoders,
num_decoders=FLAGS.num_decoders,
num_heads=FLAGS.num_heads,
transformer_intermediate_dim=FLAGS.intermediate_dim,
encoder_vocab_size=english_vocab_size,
decoder_vocab_size=spanish_vocab_size,
embed_dim=FLAGS.model_dim,
sequence_length=FLAGS.sequence_length,
)
run_training(model, train_ds, val_ds)
print(f"Saving to {FLAGS.saved_model_path}")
model.save(FLAGS.saved_model_path)
print(f"Successfully saved model to {FLAGS.saved_model_path}")
if __name__ == "__main__":
app.run(main)
| keras-nlp/examples/machine_translation/train.py/0 | {
"file_path": "keras-nlp/examples/machine_translation/train.py",
"repo_id": "keras-nlp",
"token_count": 1395
} | 140 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_nlp.backend import config
if config.keras_3():
from keras.ops import * # noqa: F403, F401
else:
from keras_core.ops import * # noqa: F403, F401
if config.backend() == "tensorflow":
import tensorflow as tf
from tensorflow.experimental import numpy as tfnp
def take_along_axis(x, indices, axis=None):
# TODO: move this workaround for dynamic shapes into keras-core.
if axis < 0:
axis = axis + indices.shape.rank
# If all shapes after axis are 1, squeeze them off and use tf.gather.
# tf.gather plays nicer with dynamic shapes in compiled functions.
leftover_axes = list(range(axis + 1, indices.shape.rank))
static_shape = indices.shape.as_list()
squeezable = True
for i in leftover_axes:
if static_shape[i] != 1:
squeezable = False
if squeezable:
if leftover_axes:
indices = tf.squeeze(indices, leftover_axes)
return tf.gather(x, indices, batch_dims=axis)
# Otherwise, fall back to the tfnp call.
return tfnp.take_along_axis(x, indices, axis=axis)
| keras-nlp/keras_nlp/backend/ops.py/0 | {
"file_path": "keras-nlp/keras_nlp/backend/ops.py",
"repo_id": "keras-nlp",
"token_count": 649
} | 141 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
from absl.testing import parameterized
from keras_nlp.backend import config
from keras_nlp.backend import keras
from keras_nlp.backend import ops
from keras_nlp.backend import random
from keras_nlp.layers.modeling.reversible_embedding import ReversibleEmbedding
from keras_nlp.tests.test_case import TestCase
class ReversibleEmbeddingTest(TestCase):
@parameterized.named_parameters(
("tie_weights", True),
("untie_weights", False),
)
def test_layer_behaviors_tied(self, tie_weights):
self.run_layer_test(
cls=ReversibleEmbedding,
init_kwargs={
"input_dim": 100,
"output_dim": 32,
"tie_weights": tie_weights,
"embeddings_initializer": "HeNormal",
},
input_data=random.randint(minval=0, maxval=100, shape=(4, 10)),
expected_output_shape=(4, 10, 32),
expected_num_trainable_weights=1 if tie_weights else 2,
)
def test_correctness(self):
layer = ReversibleEmbedding(input_dim=3, output_dim=2)
layer.build()
layer.embeddings.assign(np.array([[0.0, 0.0], [2.0, 2.0], [3.0, 3.0]]))
out = layer(np.array(([2, 1, 0])))
self.assertAllClose(out, np.array([[3.0, 3.0], [2.0, 2.0], [0.0, 0.0]]))
layer = ReversibleEmbedding(input_dim=3, output_dim=2)
layer.build()
layer.embeddings.assign(np.array([[0.0, 0.0], [2.0, 2.0], [3.0, 3.0]]))
out = layer(np.array(([[1.0, 1.0]])), reverse=True)
self.assertAllClose(out, np.array([[0.0, 4.0, 6.0]]))
def test_tied_checkpoint_untied_weights(self):
embedding = ReversibleEmbedding(100, 16, tie_weights=True)
inputs = keras.Input(shape=(10,), dtype="int32")
hidden_states = embedding(inputs)
outputs = embedding(hidden_states, reverse=True)
tied_model = keras.Model(inputs, outputs)
path = os.path.join(self.get_temp_dir(), "checkpoint.weights.h5")
tied_model.save_weights(path)
embedding = ReversibleEmbedding(100, 16, tie_weights=False)
inputs = keras.Input(shape=(10,), dtype="int32")
hidden_states = embedding(inputs)
outputs = embedding(hidden_states, reverse=True)
untied_model = keras.Model(inputs, outputs)
untied_model.load_weights(path)
input_data = ops.ones(shape=(4, 10), dtype="int32")
self.assertAllClose(untied_model(input_data), tied_model(input_data))
def test_reverse_dtype(self):
embedding = ReversibleEmbedding(100, 16, reverse_dtype="float32")
input_data = ops.ones(shape=(4, 10, 16))
output_data = embedding(input_data, reverse=True)
self.assertEqual(output_data.shape, (4, 10, 100))
self.assertDTypeEqual(output_data, "float32")
if config.backend() == "torch":
import torch
if not torch.cuda.is_available():
self.skipTest("Torch CPU does not support float16")
embedding = ReversibleEmbedding(100, 16, reverse_dtype="float16")
input_data = ops.ones(shape=(4, 10, 16))
output_data = embedding(input_data, reverse=True)
self.assertEqual(output_data.shape, (4, 10, 100))
self.assertDTypeEqual(output_data, "float16")
| keras-nlp/keras_nlp/layers/modeling/reversible_embedding_test.py/0 | {
"file_path": "keras-nlp/keras_nlp/layers/modeling/reversible_embedding_test.py",
"repo_id": "keras-nlp",
"token_count": 1672
} | 142 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_nlp.api_export import keras_nlp_export
from keras_nlp.layers.preprocessing.preprocessing_layer import (
PreprocessingLayer,
)
from keras_nlp.utils.tensor_utils import assert_tf_text_installed
from keras_nlp.utils.tensor_utils import convert_to_ragged_batch
try:
import tensorflow_text as tf_text
except ImportError:
tf_text = None
@keras_nlp_export("keras_nlp.layers.MultiSegmentPacker")
class MultiSegmentPacker(PreprocessingLayer):
"""Packs multiple sequences into a single fixed width model input.
This layer packs multiple input sequences into a single fixed width sequence
containing start and end delimeters, forming a dense input suitable for a
classification task for BERT and BERT-like models.
Takes as input a tuple of token segments. Each tuple element should contain
the tokens for a segment, passed as tensors, `tf.RaggedTensor`s, or lists.
For batched input, each element in the tuple of segments should be a list of
lists or a rank two tensor. For unbatched inputs, each element should be a
list or rank one tensor.
The layer will process inputs as follows:
- Truncate all input segments to fit within `sequence_length` according to
the `truncate` strategy.
- Concatenate all input segments, adding a single `start_value` at the
start of the entire sequence, and multiple `end_value`s at the end of
each segment.
- Pad the resulting sequence to `sequence_length` using `pad_tokens`.
- Calculate a separate tensor of "segment ids", with integer type and the
same shape as the packed token output, where each integer index of the
segment the token originated from. The segment id of the `start_value`
is always 0, and the segment id of each `end_value` is the segment that
precedes it.
Args:
sequence_length: int. The desired output length.
start_value: int/str/list/tuple. The id(s) or token(s) that are to be
placed at the start of each sequence (called "[CLS]" for BERT). The
dtype must match the dtype of the input tensors to the layer.
end_value: int/str/list/tuple. The id(s) or token(s) that are to be
placed at the end of the last input segment (called "[SEP]" for
BERT). The dtype must match the dtype of the input tensors to the
layer.
sep_value: int/str/list/tuple. The id(s) or token(s) that are to be
placed at the end of every segment, except the last segment (called
"[SEP]" for BERT). If `None`, `end_value` is used. The dtype must
match the dtype of the input tensors to the layer.
pad_value: int/str. The id or token that is to be placed into the unused
positions after the last segment in the sequence
(called "[PAD]" for BERT).
truncate: str. The algorithm to truncate a list of batched segments to
fit a per-example length limit. The value can be either
`"round_robin"` or `"waterfall"`:
- `"round_robin"`: Available space is assigned one token at a
time in a round-robin fashion to the inputs that still need
some, until the limit is reached.
- `"waterfall"`: The allocation of the budget is done using a
"waterfall" algorithm that allocates quota in a
left-to-right manner and fills up the buckets until we run
out of budget. It support arbitrary number of segments.
Returns:
A tuple with two elements. The first is the dense, packed token
sequence. The second is an integer tensor of the same shape, containing
the segment ids.
Examples:
*Pack a single input for classification.*
>>> seq1 = [1, 2, 3, 4]
>>> packer = keras_nlp.layers.MultiSegmentPacker(
... sequence_length=8, start_value=101, end_value=102
... )
>>> token_ids, segment_ids = packer((seq1,))
>>> np.array(token_ids)
array([101, 1, 2, 3, 4, 102, 0, 0], dtype=int32)
>>> np.array(segment_ids)
array([0, 0, 0, 0, 0, 0, 0, 0], dtype=int32)
*Pack multiple inputs for classification.*
>>> seq1 = [1, 2, 3, 4]
>>> seq2 = [11, 12, 13, 14]
>>> packer = keras_nlp.layers.MultiSegmentPacker(
... sequence_length=8, start_value=101, end_value=102
... )
>>> token_ids, segment_ids = packer((seq1, seq2))
>>> np.array(token_ids)
array([101, 1, 2, 3, 102, 11, 12, 102], dtype=int32)
>>> np.array(segment_ids)
array([0, 0, 0, 0, 0, 1, 1, 1], dtype=int32)
*Pack multiple inputs for classification with different sep tokens.*
>>> seq1 = [1, 2, 3, 4]
>>> seq2 = [11, 12, 13, 14]
>>> packer = keras_nlp.layers.MultiSegmentPacker(
... sequence_length=8,
... start_value=101,
... end_value=102,
... sep_value=[102, 102],
... )
>>> token_ids, segment_ids = packer((seq1, seq2))
>>> np.array(token_ids)
array([101, 1, 2, 102, 102, 11, 12, 102], dtype=int32)
>>> np.array(segment_ids)
array([0, 0, 0, 0, 0, 1, 1, 1], dtype=int32)
Reference:
[Devlin et al., 2018](https://arxiv.org/abs/1810.04805).
"""
def __init__(
self,
sequence_length,
start_value,
end_value,
sep_value=None,
pad_value=None,
truncate="round_robin",
**kwargs,
):
assert_tf_text_installed(self.__class__.__name__)
super().__init__(**kwargs)
self.sequence_length = sequence_length
if truncate not in ("round_robin", "waterfall"):
raise ValueError(
"Only 'round_robin' and 'waterfall' algorithms are "
"supported. Received %s" % truncate
)
self.truncate = truncate
# Maintain private copies of start/end values for config purposes.
self._start_value = start_value
self._sep_value = sep_value
self._end_value = end_value
def check_special_value_type(value, value_name):
if isinstance(value, (int, str)):
return [value]
if value and not isinstance(value, (list, tuple)):
raise ValueError(
f"{value_name} should be of type int/str/list/tuple."
f"Received type: `{type(value)}`."
)
return value
start_value = check_special_value_type(start_value, "start_value")
if sep_value is None:
sep_value = end_value
sep_value = check_special_value_type(sep_value, "sep_value")
end_value = check_special_value_type(end_value, "end_value")
self.start_value = start_value
self.sep_value = sep_value
self.end_value = end_value
self.pad_value = pad_value
def get_config(self):
config = super().get_config()
config.update(
{
"sequence_length": self.sequence_length,
"start_value": self._start_value,
"end_value": self._end_value,
"sep_value": self._sep_value,
"pad_value": self.pad_value,
"truncate": self.truncate,
}
)
return config
def _sanitize_inputs(self, inputs):
"""Force inputs to a list of rank 2 ragged tensors."""
# Sanitize inputs.
if not isinstance(inputs, (list, tuple)):
inputs = (inputs,)
if not inputs:
raise ValueError(
"At least one input is required for packing. "
f"Received: `inputs={inputs}`"
)
inputs, unbatched_list, _ = list(
zip(*(convert_to_ragged_batch(x) for x in inputs))
)
if len(set(unbatched_list)) != 1:
ranks = [1 if unbatched else 2 for unbatched in unbatched_list]
raise ValueError(
"All inputs for packing must have the same rank. "
f"Received: `inputs={inputs}` with ranks {ranks}"
)
return inputs, unbatched_list[0]
def _trim_inputs(self, inputs):
"""Trim inputs to desired length."""
num_segments = len(inputs)
num_special_tokens = (
len(self.start_value)
+ (num_segments - 1) * len(self.sep_value)
+ len(self.end_value)
)
if self.truncate == "round_robin":
return tf_text.RoundRobinTrimmer(
self.sequence_length - num_special_tokens
).trim(inputs)
elif self.truncate == "waterfall":
return tf_text.WaterfallTrimmer(
self.sequence_length - num_special_tokens
).trim(inputs)
else:
raise ValueError("Unsupported truncate: %s" % self.truncate)
def _combine_inputs(self, segments):
"""Combine inputs with start and end values added."""
dtype = segments[0].dtype
batch_size = segments[0].nrows()
start_value = tf.convert_to_tensor(self.start_value, dtype=dtype)
sep_value = tf.convert_to_tensor(self.sep_value, dtype=dtype)
end_value = tf.convert_to_tensor(self.end_value, dtype=dtype)
start_columns = tf.repeat(
start_value[tf.newaxis, :], repeats=batch_size, axis=0
)
sep_columns = tf.repeat(
sep_value[tf.newaxis, :], repeats=batch_size, axis=0
)
end_columns = tf.repeat(
end_value[tf.newaxis, :], repeats=batch_size, axis=0
)
ones_sep_columns = tf.ones_like(sep_columns, dtype="int32")
ones_end_columns = tf.ones_like(end_columns, dtype="int32")
segments_to_combine = [start_columns]
segment_ids_to_combine = [
tf.ones_like(start_columns, dtype="int32") * 0
]
for i, seg in enumerate(segments):
# Combine all segments.
segments_to_combine.append(seg)
# Combine segment ids.
segment_ids_to_combine.append(tf.ones_like(seg, dtype="int32") * i)
# Account for the sep/end tokens here.
if i == len(segments) - 1:
segments_to_combine.append(end_columns)
segment_ids_to_combine.append(ones_end_columns * i)
else:
segments_to_combine.append(sep_columns)
segment_ids_to_combine.append(ones_sep_columns * i)
token_ids = tf.concat(segments_to_combine, 1)
segment_ids = tf.concat(segment_ids_to_combine, 1)
return token_ids, segment_ids
def call(self, inputs):
inputs, unbatched = self._sanitize_inputs(inputs)
segments = self._trim_inputs(inputs)
token_ids, segment_ids = self._combine_inputs(segments)
# Pad to dense tensor output.
shape = tf.cast([-1, self.sequence_length], "int64")
token_ids = token_ids.to_tensor(
shape=shape, default_value=self.pad_value
)
segment_ids = segment_ids.to_tensor(shape=shape)
# Remove the batch dim if added.
if unbatched:
token_ids = tf.squeeze(token_ids, 0)
segment_ids = tf.squeeze(segment_ids, 0)
return (token_ids, segment_ids)
def compute_output_shape(self, inputs_shape):
if isinstance(inputs_shape[0], tuple):
inputs_shape = inputs_shape[0]
inputs_shape = list(inputs_shape)
inputs_shape[-1] = self.sequence_length
return tuple(inputs_shape)
| keras-nlp/keras_nlp/layers/preprocessing/multi_segment_packer.py/0 | {
"file_path": "keras-nlp/keras_nlp/layers/preprocessing/multi_segment_packer.py",
"repo_id": "keras-nlp",
"token_count": 5344
} | 143 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_nlp.backend import keras
from keras_nlp.backend import ops
from keras_nlp.utils.tensor_utils import is_float_dtype
from keras_nlp.utils.tensor_utils import tensor_to_list
try:
from rouge_score import rouge_scorer
except ImportError:
rouge_scorer = None
class RougeBase(keras.metrics.Metric):
"""ROUGE metric.
This class implements two variants of the ROUGE metric - ROUGE-N,
and ROUGE-L.
Note on input shapes:
For `y_true` and `y_pred`, this class supports scalar values and batch
inputs of shapes `()`, `(batch_size,)` and `(batch_size, 1)`.
Args:
variant: string. One of "rougeN", "rougeL". For "rougeN", N lies in
the range [1, 9]. Defaults to `"rouge2"`.
use_stemmer: bool. Whether Porter Stemmer should be used to strip word
suffixes to improve matching. Defaults to `False`.
dtype: string or tf.dtypes.Dtype. Precision of metric computation. If
not specified, it defaults to `"float32"`.
name: string. Name of the metric instance.
**kwargs: Other keyword arguments.
References:
- [Lin et al., 2004](https://aclanthology.org/W04-1013/)
"""
def __init__(
self,
variant="rouge2",
use_stemmer=False,
dtype="float32",
name="rouge",
**kwargs,
):
super().__init__(name=name, dtype=dtype, **kwargs)
if rouge_scorer is None:
raise ImportError(
f"{self.__class__.__name__} requires the `rouge_score` "
"package. Please install it with `pip install rouge-score`."
)
if not is_float_dtype(dtype):
raise ValueError(
"`dtype` must be a floating point type. "
f"Received: dtype={dtype}"
)
if variant not in tuple(
("rouge" + str(order) for order in range(1, 10))
) + ("rougeL",):
raise ValueError(
"Invalid variant of ROUGE. Should be one of: rougeN, rougeL, "
"with N ranging from 1 to 9. Received: "
f"variant={variant}"
)
self.variant = variant
self.use_stemmer = use_stemmer
# To-do: Add split_summaries and tokenizer options after the maintainers
# of rouge_scorer have released a new version.
self._rouge_scorer = rouge_scorer.RougeScorer(
rouge_types=[self.variant],
use_stemmer=use_stemmer,
)
self._rouge_precision = self.add_weight(
shape=(),
initializer="zeros",
dtype=self.dtype,
name="rouge_precision",
)
self._rouge_recall = self.add_weight(
shape=(),
initializer="zeros",
dtype=self.dtype,
name="rouge_recall",
)
self._rouge_f1_score = self.add_weight(
shape=(),
initializer="zeros",
dtype=self.dtype,
name="rouge_f1_score",
)
self._number_of_samples = self.add_weight(
shape=(),
initializer="zeros",
dtype=self.dtype,
name="number_of_samples",
)
def update_state(self, y_true, y_pred, sample_weight=None):
# Three possible shapes for y_true and y_pred: Python string,
# [batch_size] and [batch_size, 1]. In the latter two cases, we have
# strings in the tensor/list.
def validate_and_fix_rank(inputs, tensor_name):
if not isinstance(inputs, tf.Tensor):
inputs = tf.convert_to_tensor(inputs)
if inputs.shape.rank == 0:
return inputs[tf.newaxis]
elif inputs.shape.rank == 1:
return inputs
elif inputs.shape.rank == 2:
if inputs.shape[1] != 1:
raise ValueError(
f"{tensor_name} must be of shape `[batch_size, 1]`. "
f"Found shape: {inputs.shape}"
)
else:
return tf.squeeze(inputs, axis=1)
else:
raise ValueError(
f"{tensor_name} must be of rank 0 (scalar input), 1 or 2. "
f"Found rank: {inputs.shape.rank}"
)
y_true = validate_and_fix_rank(y_true, "y_true")
y_pred = validate_and_fix_rank(y_pred, "y_pred")
batch_size = tf.shape(y_true)[0]
def calculate_rouge_score(reference, hypothesis):
reference = tensor_to_list(reference)
hypothesis = tensor_to_list(hypothesis)
score = self._rouge_scorer.score(reference, hypothesis)[
self.variant
]
return score.precision, score.recall, score.fmeasure
for batch_idx in range(batch_size):
score = calculate_rouge_score(y_true[batch_idx], y_pred[batch_idx])
self._rouge_precision.assign_add(score[0])
self._rouge_recall.assign_add(score[1])
self._rouge_f1_score.assign_add(score[2])
self._number_of_samples.assign_add(
ops.cast(batch_size, dtype=self.dtype)
)
def result(self):
if self._number_of_samples == 0:
return {
"precision": 0.0,
"recall": 0.0,
"f1_score": 0.0,
}
rouge_precision = self._rouge_precision / self._number_of_samples
rouge_recall = self._rouge_recall / self._number_of_samples
rouge_f1_score = self._rouge_f1_score / self._number_of_samples
return {
"precision": rouge_precision,
"recall": rouge_recall,
"f1_score": rouge_f1_score,
}
def reset_state(self):
self._rouge_precision.assign(0.0)
self._rouge_recall.assign(0.0)
self._rouge_f1_score.assign(0.0)
self._number_of_samples.assign(0.0)
def get_config(self):
config = super().get_config()
config.update(
{
"variant": self.variant,
"use_stemmer": self.use_stemmer,
}
)
return config
| keras-nlp/keras_nlp/metrics/rouge_base.py/0 | {
"file_path": "keras-nlp/keras_nlp/metrics/rouge_base.py",
"repo_id": "keras-nlp",
"token_count": 3350
} | 144 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from keras_nlp.models.bart.bart_tokenizer import BartTokenizer
from keras_nlp.tests.test_case import TestCase
class BartTokenizerTest(TestCase):
def setUp(self):
self.vocab = ["<s>", "<pad>", "</s>", "air", "Ġair", "plane", "Ġat"]
self.vocab += ["port", "<mask>"]
self.vocab = dict([(token, i) for i, token in enumerate(self.vocab)])
self.merges = ["Ġ a", "Ġ t", "Ġ i", "Ġ b", "a i", "p l", "n e"]
self.merges += ["Ġa t", "p o", "r t", "Ġt h", "ai r", "pl a", "po rt"]
self.merges += ["Ġai r", "Ġa i", "pla ne"]
self.init_kwargs = {"vocabulary": self.vocab, "merges": self.merges}
self.input_data = [
"<s> airplane at airport</s><pad>",
" airplane airport",
]
def test_tokenizer_basics(self):
self.run_preprocessing_layer_test(
cls=BartTokenizer,
init_kwargs=self.init_kwargs,
input_data=self.input_data,
# TODO: </s> should not get tokenized as <s>
expected_output=[[0, 4, 5, 6, 4, 7, 0, 1], [4, 5, 4, 7]],
expected_detokenize_output=[
"<s> airplane at airport<s><pad>",
" airplane airport",
],
)
def test_errors_missing_special_tokens(self):
with self.assertRaises(ValueError):
BartTokenizer(vocabulary=["a", "b", "c"], merges=[])
@pytest.mark.large
def test_smallest_preset(self):
self.run_preset_test(
cls=BartTokenizer,
preset="bart_base_en",
input_data=["The quick brown fox."],
expected_output=[[133, 2119, 6219, 23602, 4]],
)
@pytest.mark.extra_large
def test_all_presets(self):
for preset in BartTokenizer.presets:
self.run_preset_test(
cls=BartTokenizer,
preset=preset,
input_data=self.input_data,
)
| keras-nlp/keras_nlp/models/bart/bart_tokenizer_test.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/bart/bart_tokenizer_test.py",
"repo_id": "keras-nlp",
"token_count": 1157
} | 145 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from keras_nlp.backend import keras
from keras_nlp.backend import ops
from keras_nlp.layers.modeling.alibi_bias import AlibiBias
from keras_nlp.utils.keras_utils import clone_initializer
class BloomAttention(keras.layers.Layer):
def __init__(
self,
num_heads,
dropout=0.0,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
**kwargs,
):
super().__init__(**kwargs)
self.num_heads = num_heads
self.dropout = dropout
self.kernel_initializer = keras.initializers.get(kernel_initializer)
self.bias_initializer = keras.initializers.get(bias_initializer)
def build(self, inputs_shape):
batch_size, seq_length, hidden_dim = inputs_shape
self.head_dim = hidden_dim // self.num_heads
# Layer-wise attention scaling
self.inv_norm_factor = 1.0 / math.sqrt(self.head_dim)
self._query_dense = keras.layers.EinsumDense(
equation="btm,mnh->btnh",
output_shape=(None, self.num_heads, self.head_dim),
bias_axes="nh",
kernel_initializer=clone_initializer(self.kernel_initializer),
bias_initializer=clone_initializer(self.bias_initializer),
dtype=self.dtype_policy,
name="query_dense",
)
self._query_dense.build(inputs_shape)
self._key_dense = keras.layers.EinsumDense(
equation="bsm,mnh->bsnh",
output_shape=(None, self.num_heads, self.head_dim),
bias_axes="nh",
kernel_initializer=clone_initializer(self.kernel_initializer),
bias_initializer=clone_initializer(self.bias_initializer),
dtype=self.dtype_policy,
name="key_dense",
)
self._key_dense.build(inputs_shape)
self._value_dense = keras.layers.EinsumDense(
equation="bsm,mnh->bsnh",
output_shape=(None, self.num_heads, self.head_dim),
bias_axes="nh",
kernel_initializer=clone_initializer(self.kernel_initializer),
bias_initializer=clone_initializer(self.bias_initializer),
dtype=self.dtype_policy,
name="value_dense",
)
self._value_dense.build(inputs_shape)
self._alibi_layer = AlibiBias(
dtype=self.dtype_policy,
)
self._output_dense = keras.layers.Dense(
hidden_dim,
kernel_initializer=clone_initializer(self.kernel_initializer),
bias_initializer=clone_initializer(self.bias_initializer),
dtype=self.dtype_policy,
name="output_dense",
)
self._output_dense.build(inputs_shape)
self._dropout_layer = keras.layers.Dropout(
rate=self.dropout,
dtype=self.dtype_policy,
name="dropout",
)
self._softmax = keras.layers.Softmax(
dtype="float32",
name="softmax",
)
self.built = True
def call(
self,
hidden_states,
attention_mask=None,
cache=None,
cache_update_index=None,
):
batch_size, seq_length, hidden_dim = ops.shape(hidden_states)
query = self._query_dense(hidden_states)
key = self._key_dense(hidden_states)
value = self._value_dense(hidden_states)
if cache is not None:
key_cache = cache[:, 0, ...]
value_cache = cache[:, 1, ...]
if cache_update_index is None:
key = key_cache
value = value_cache
else:
start = [0, cache_update_index, 0, 0]
key = ops.slice_update(key_cache, start, key)
value = ops.slice_update(value_cache, start, value)
cache = ops.stack((key, value), axis=1)
else:
if cache_update_index is not None:
raise ValueError(
"`cache_update_index` should not be set if `cache` is "
f"`None`. Received: cache={cache}, "
f"cache_update_index={cache_update_index}"
)
# query (batch_size, num_heads, query_length, head_dim)
query = ops.transpose(query, [0, 2, 1, 3])
# value (batch_size, num_heads, kv_length, head_dim)
value = ops.transpose(value, [0, 2, 1, 3])
# key (batch_size, num_heads, head_dim, kv_length)
key = ops.transpose(key, [0, 2, 3, 1])
attention_scores = (
ops.matmul(query, key) * self.inv_norm_factor
) # [batch_size, num_heads, query_length, kv_length]
attention_scores = self._alibi_layer(attention_scores)
attention_scores = self._softmax(
attention_scores, ops.expand_dims(attention_mask, 1)
)
attention_scores = self._dropout_layer(attention_scores)
attention_output = ops.matmul(
attention_scores, value
) # [batch_size, num_heads, query_length, head_dim]
attention_output = ops.transpose(
attention_output, [0, 2, 1, 3]
) # [batch_size, query_length, num_heads, head_dim]
attention_output = ops.reshape(
attention_output,
[batch_size, seq_length, self.num_heads * self.head_dim],
) # [batch_size, query_length, hidden_dim]
attention_output = self._output_dense(attention_output)
attention_output = self._dropout_layer(attention_output)
if cache is not None:
return attention_output, cache
return attention_output
def get_config(self):
config = super().get_config()
config.update(
{
"num_heads": self.num_heads,
"dropout": self.dropout,
"kernel_initializer": keras.initializers.serialize(
self.kernel_initializer
),
"bias_initializer": keras.initializers.serialize(
self.bias_initializer
),
}
)
return config
| keras-nlp/keras_nlp/models/bloom/bloom_attention.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/bloom/bloom_attention.py",
"repo_id": "keras-nlp",
"token_count": 3189
} | 146 |
# Copyright 2022 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from keras_nlp.api_export import keras_nlp_export
from keras_nlp.backend import keras
from keras_nlp.layers.modeling.masked_lm_head import MaskedLMHead
from keras_nlp.models.deberta_v3.deberta_v3_backbone import DebertaV3Backbone
from keras_nlp.models.deberta_v3.deberta_v3_backbone import (
deberta_kernel_initializer,
)
from keras_nlp.models.deberta_v3.deberta_v3_masked_lm_preprocessor import (
DebertaV3MaskedLMPreprocessor,
)
from keras_nlp.models.deberta_v3.deberta_v3_presets import backbone_presets
from keras_nlp.models.task import Task
from keras_nlp.utils.python_utils import classproperty
@keras_nlp_export("keras_nlp.models.DebertaV3MaskedLM")
class DebertaV3MaskedLM(Task):
"""An end-to-end DeBERTaV3 model for the masked language modeling task.
This model will train DeBERTaV3 on a masked language modeling task.
The model will predict labels for a number of masked tokens in the
input data. For usage of this model with pre-trained weights, see the
`from_preset()` method.
This model can optionally be configured with a `preprocessor` layer, in
which case inputs can be raw string features during `fit()`, `predict()`,
and `evaluate()`. Inputs will be tokenized and dynamically masked during
training and evaluation. This is done by default when creating the model
with `from_preset()`.
Disclaimer: Pre-trained models are provided on an "as is" basis, without
warranties or conditions of any kind. The underlying model is provided by a
third party and subject to a separate license, available
[here](https://github.com/microsoft/DeBERTa).
Args:
backbone: A `keras_nlp.models.DebertaV3Backbone` instance.
preprocessor: A `keras_nlp.models.DebertaV3MaskedLMPreprocessor` or
`None`. If `None`, this model will not apply preprocessing, and
inputs should be preprocessed before calling the model.
Example usage:
Raw string data.
```python
features = ["The quick brown fox jumped.", "I forgot my homework."]
# Pretrained language model.
masked_lm = keras_nlp.models.DebertaV3MaskedLM.from_preset(
"deberta_v3_base_en",
)
masked_lm.fit(x=features, batch_size=2)
# Re-compile (e.g., with a new learning rate).
masked_lm.compile(
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=keras.optimizers.Adam(5e-5),
jit_compile=True,
)
# Access backbone programmatically (e.g., to change `trainable`).
masked_lm.backbone.trainable = False
# Fit again.
masked_lm.fit(x=features, batch_size=2)
```
Preprocessed integer data.
```python
# Create preprocessed batch where 0 is the mask token.
features = {
"token_ids": np.array([[1, 2, 0, 4, 0, 6, 7, 8]] * 2),
"padding_mask": np.array([[1, 1, 1, 1, 1, 1, 1, 1]] * 2),
"mask_positions": np.array([[2, 4]] * 2),
}
# Labels are the original masked values.
labels = [[3, 5]] * 2
masked_lm = keras_nlp.models.DebertaV3MaskedLM.from_preset(
"deberta_v3_base_en",
preprocessor=None,
)
masked_lm.fit(x=features, y=labels, batch_size=2)
```
"""
def __init__(
self,
backbone,
preprocessor=None,
**kwargs,
):
# === Layers ===
self.backbone = backbone
self.preprocessor = preprocessor
self.masked_lm_head = MaskedLMHead(
vocabulary_size=backbone.vocabulary_size,
token_embedding=backbone.token_embedding,
intermediate_activation=keras.activations.gelu,
kernel_initializer=deberta_kernel_initializer(),
dtype=backbone.dtype_policy,
name="mlm_head",
)
# === Functional Model ===
inputs = {
**backbone.input,
"mask_positions": keras.Input(
shape=(None,), dtype="int32", name="mask_positions"
),
}
x = backbone(backbone.input)
outputs = self.masked_lm_head(x, inputs["mask_positions"])
super().__init__(
inputs=inputs,
outputs=outputs,
**kwargs,
)
# === Default compilation ===
self.compile(
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=keras.optimizers.Adam(5e-5),
weighted_metrics=[keras.metrics.SparseCategoricalAccuracy()],
jit_compile=True,
)
@classproperty
def backbone_cls(cls):
return DebertaV3Backbone
@classproperty
def preprocessor_cls(cls):
return DebertaV3MaskedLMPreprocessor
@classproperty
def presets(cls):
return copy.deepcopy(backbone_presets)
| keras-nlp/keras_nlp/models/deberta_v3/deberta_v3_masked_lm.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/deberta_v3/deberta_v3_masked_lm.py",
"repo_id": "keras-nlp",
"token_count": 2189
} | 147 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from keras_nlp.api_export import keras_nlp_export
from keras_nlp.backend import keras
from keras_nlp.layers.modeling.f_net_encoder import FNetEncoder
from keras_nlp.layers.modeling.position_embedding import PositionEmbedding
from keras_nlp.layers.modeling.reversible_embedding import ReversibleEmbedding
from keras_nlp.models.backbone import Backbone
from keras_nlp.models.f_net.f_net_presets import backbone_presets
from keras_nlp.utils.keras_utils import gelu_approximate
from keras_nlp.utils.python_utils import classproperty
def f_net_kernel_initializer(stddev=0.02):
return keras.initializers.RandomNormal(stddev=stddev)
def f_net_bias_initializer(stddev=0.02):
return keras.initializers.RandomNormal(stddev=stddev)
@keras_nlp_export("keras_nlp.models.FNetBackbone")
class FNetBackbone(Backbone):
"""A FNet encoder network.
This class implements a bi-directional Fourier Transform-based encoder as
described in ["FNet: Mixing Tokens with Fourier Transforms"](https://arxiv.org/abs/2105.03824).
It includes the embedding lookups and `keras_nlp.layers.FNetEncoder` layers,
but not the masked language model or next sentence prediction heads.
The default constructor gives a fully customizable, randomly initialized
FNet encoder with any number of layers and embedding dimensions. To
load preset architectures and weights, use the `from_preset()` constructor.
Note: unlike other models, FNet does not take in a `"padding_mask"` input,
the `"<pad>"` token is handled equivalently to all other tokens in the input
sequence.
Disclaimer: Pre-trained models are provided on an "as is" basis, without
warranties or conditions of any kind.
Args:
vocabulary_size: int. The size of the token vocabulary.
num_layers: int. The number of FNet layers.
hidden_dim: int. The size of the FNet encoding and pooler layers.
intermediate_dim: int. The output dimension of the first Dense layer in
a two-layer feedforward network for each FNet layer.
dropout: float. Dropout probability for the embeddings and FNet encoder.
max_sequence_length: int. The maximum sequence length that this encoder
can consume. If None, `max_sequence_length` uses the value from
sequence length. This determines the variable shape for positional
embeddings.
num_segments: int. The number of types that the 'segment_ids' input can
take.
dtype: string or `keras.mixed_precision.DTypePolicy`. The dtype to use
for model computations and weights. Note that some computations,
such as softmax and layer normalization, will always be done at
float32 precision regardless of dtype.
Examples:
```python
input_data = {
"token_ids": np.ones(shape=(1, 12), dtype="int32"),
"segment_ids": np.array([[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0]]),
}
# Pretrained BERT encoder.
model = keras_nlp.models.FNetBackbone.from_preset("f_net_base_en")
model(input_data)
# Randomly initialized FNet encoder with a custom config.
model = keras_nlp.models.FNetBackbone(
vocabulary_size=32000,
num_layers=4,
hidden_dim=256,
intermediate_dim=512,
max_sequence_length=128,
)
model(input_data)
```
"""
def __init__(
self,
vocabulary_size,
num_layers,
hidden_dim,
intermediate_dim,
dropout=0.1,
max_sequence_length=512,
num_segments=4,
dtype=None,
**kwargs,
):
# === Layers ===
self.token_embedding = ReversibleEmbedding(
input_dim=vocabulary_size,
output_dim=hidden_dim,
embeddings_initializer=f_net_kernel_initializer(),
dtype=dtype,
name="token_embedding",
)
self.position_embedding = PositionEmbedding(
initializer=f_net_kernel_initializer(),
sequence_length=max_sequence_length,
dtype=dtype,
name="position_embedding",
)
self.segment_embedding = keras.layers.Embedding(
input_dim=num_segments,
output_dim=hidden_dim,
embeddings_initializer=f_net_kernel_initializer(),
dtype=dtype,
name="segment_embedding",
)
self.embeddings_add = keras.layers.Add(
dtype=dtype,
name="embeddings_add",
)
self.embeddings_layer_norm = keras.layers.LayerNormalization(
axis=-1,
epsilon=1e-12,
dtype=dtype,
name="embeddings_layer_norm",
)
self.embedding_projection = keras.layers.Dense(
hidden_dim,
kernel_initializer=f_net_kernel_initializer(),
bias_initializer=f_net_bias_initializer(),
dtype=dtype,
name="embedding_projection",
)
self.embeddings_dropout = keras.layers.Dropout(
dropout,
dtype=dtype,
name="embeddings_dropout",
)
self.transformer_layers = []
for i in range(num_layers):
layer = FNetEncoder(
intermediate_dim=intermediate_dim,
activation=gelu_approximate,
dropout=dropout,
layer_norm_epsilon=1e-12,
kernel_initializer=f_net_kernel_initializer(),
bias_initializer=f_net_bias_initializer(),
dtype=dtype,
name=f"f_net_layer_{i}",
)
self.transformer_layers.append(layer)
self.pooled_dense = keras.layers.Dense(
hidden_dim,
kernel_initializer=f_net_kernel_initializer(),
bias_initializer=f_net_bias_initializer(),
activation="tanh",
dtype=dtype,
name="pooled_dense",
)
# === Functional Model ===
token_id_input = keras.Input(
shape=(None,), dtype="int32", name="token_ids"
)
segment_id_input = keras.Input(
shape=(None,), dtype="int32", name="segment_ids"
)
# Embed tokens, positions, and segment ids.
tokens = self.token_embedding(token_id_input)
positions = self.position_embedding(tokens)
segments = self.segment_embedding(segment_id_input)
# Sum, normalize and apply dropout to embeddings.
x = self.embeddings_add((tokens, positions, segments))
x = self.embeddings_layer_norm(x)
x = self.embedding_projection(x)
x = self.embeddings_dropout(x)
# Apply successive FNet encoder blocks.
for transformer_layer in self.transformer_layers:
x = transformer_layer(x)
# Index of classification token in the vocabulary
cls_token_index = 0
# Construct the two FNet outputs. The pooled output is a dense layer on
# top of the [CLS] token.
sequence_output = x
pooled_output = self.pooled_dense(x[:, cls_token_index, :])
# Instantiate using Functional API Model constructor
super().__init__(
inputs={
"token_ids": token_id_input,
"segment_ids": segment_id_input,
},
outputs={
"sequence_output": sequence_output,
"pooled_output": pooled_output,
},
**kwargs,
)
# === Config ===
self.vocabulary_size = vocabulary_size
self.num_layers = num_layers
self.hidden_dim = hidden_dim
self.intermediate_dim = intermediate_dim
self.dropout = dropout
self.max_sequence_length = max_sequence_length
self.num_segments = num_segments
self.cls_token_index = cls_token_index
def get_config(self):
config = super().get_config()
config.update(
{
"vocabulary_size": self.vocabulary_size,
"num_layers": self.num_layers,
"hidden_dim": self.hidden_dim,
"intermediate_dim": self.intermediate_dim,
"dropout": self.dropout,
"max_sequence_length": self.max_sequence_length,
"num_segments": self.num_segments,
}
)
return config
@classproperty
def presets(cls):
return copy.deepcopy(backbone_presets)
| keras-nlp/keras_nlp/models/f_net/f_net_backbone.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/f_net/f_net_backbone.py",
"repo_id": "keras-nlp",
"token_count": 4020
} | 148 |
# Copyright 2024 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from keras_nlp.backend import keras
from keras_nlp.backend import ops
from keras_nlp.models.gemma.gemma_backbone import GemmaBackbone
from keras_nlp.tests.test_case import TestCase
@pytest.mark.keras_3_only
class GemmaBackboneTest(TestCase):
def setUp(self):
self.init_kwargs = {
"vocabulary_size": 256128,
"num_layers": 2,
"num_query_heads": 4,
"num_key_value_heads": 4,
"hidden_dim": 128,
"intermediate_dim": 256,
"head_dim": 128,
"layer_norm_epsilon": 1e-6,
}
self.input_data = {
"token_ids": ops.ones((2, 5), dtype="int32"),
"padding_mask": ops.ones((2, 5), dtype="int32"),
}
def test_backbone_basics(self):
self.run_backbone_test(
cls=GemmaBackbone,
init_kwargs=self.init_kwargs,
input_data=self.input_data,
expected_output_shape=(2, 5, 128),
)
@pytest.mark.large
def test_saved_model(self):
self.run_model_saving_test(
cls=GemmaBackbone,
init_kwargs=self.init_kwargs,
input_data=self.input_data,
)
@pytest.mark.large
def test_smallest_preset(self):
self.run_preset_test(
cls=GemmaBackbone,
preset="gemma_2b_en",
input_data={
"token_ids": ops.array([[651, 4320, 8426, 25341, 235265]]),
"padding_mask": ops.ones((1, 5), dtype="int32"),
},
expected_output_shape=(1, 5, 2048),
# The forward pass from a preset should be stable!
expected_partial_output=ops.array(
[1.073359, 0.262374, 0.170238, 0.605402, 2.336161]
),
)
@pytest.mark.extra_large
def test_all_presets(self):
for preset in GemmaBackbone.presets:
self.run_preset_test(
cls=GemmaBackbone,
preset=preset,
input_data=self.input_data,
)
def test_architecture_characteristics(self):
model = GemmaBackbone(**self.init_kwargs)
self.assertEqual(model.count_params(), 33407616)
self.assertEqual(len(model.layers), 6)
def test_distribution(self):
if keras.backend.backend() != "jax":
return
devices = keras.distribution.list_devices("CPU")
if len(devices) == 1:
# Need more than 1 device for distribution testing.
return
device_mesh = keras.distribution.DeviceMesh(
shape=(1, len(devices)),
axis_names=("batch", "model"),
devices=devices,
)
layout_map = GemmaBackbone.get_layout_map(device_mesh)
distribution = keras.distribution.ModelParallel(device_mesh, layout_map)
with distribution.scope():
model = GemmaBackbone(**self.init_kwargs)
for w in model.weights:
if "token_embedding/embeddings" in w.path:
self.assertEqual(tuple(w.value.sharding.spec), (None, "model"))
if "attention/query/kernel" in w.path:
self.assertEqual(
tuple(w.value.sharding.spec), (None, "model", None)
)
if "attention/key/kernel" in w.path:
self.assertEqual(
tuple(w.value.sharding.spec), (None, "model", None)
)
if "attention/value/kernel" in w.path:
self.assertEqual(
tuple(w.value.sharding.spec), (None, "model", None)
)
if "attention/attention_output/kernel" in w.path:
self.assertEqual(
tuple(w.value.sharding.spec), (None, None, "model")
)
if "ffw_gating/kernel" in w.path:
self.assertEqual(tuple(w.value.sharding.spec), ("model", None))
if "ffw_gating_2/kernel" in w.path:
self.assertEqual(tuple(w.value.sharding.spec), ("model", None))
if "ffw_linearl" in w.path:
self.assertEqual(tuple(w.value.sharding.spec), (None, "model"))
| keras-nlp/keras_nlp/models/gemma/gemma_backbone_test.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/gemma/gemma_backbone_test.py",
"repo_id": "keras-nlp",
"token_count": 2360
} | 149 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_nlp.models.gpt_neo_x.gpt_neo_x_causal_lm_preprocessor import (
GPTNeoXCausalLMPreprocessor,
)
from keras_nlp.models.gpt_neo_x.gpt_neo_x_tokenizer import GPTNeoXTokenizer
from keras_nlp.tests.test_case import TestCase
class GPTNeoXCausalLMPreprocessorTest(TestCase):
def setUp(self):
self.vocab = ["!", "air", "Ġair", "plane", "Ġat", "port"]
self.vocab += ["<|endoftext|>"]
self.vocab = dict([(token, i) for i, token in enumerate(self.vocab)])
self.merges = ["Ġ a", "Ġ t", "Ġ i", "Ġ b", "a i", "p l", "n e"]
self.merges += ["Ġa t", "p o", "r t", "Ġt h", "ai r", "pl a", "po rt"]
self.merges += ["Ġai r", "Ġa i", "pla ne"]
self.tokenizer = GPTNeoXTokenizer(
vocabulary=self.vocab,
merges=self.merges,
)
self.init_kwargs = {
"tokenizer": self.tokenizer,
"sequence_length": 8,
}
self.input_data = ["airplane at airport"]
def test_preprocessor_basics(self):
self.run_preprocessor_test(
cls=GPTNeoXCausalLMPreprocessor,
init_kwargs=self.init_kwargs,
input_data=self.input_data,
expected_output=(
{
"token_ids": [[6, 1, 3, 4, 2, 5, 6, 0]],
"padding_mask": [[1, 1, 1, 1, 1, 1, 1, 0]],
},
[[1, 3, 4, 2, 5, 6, 0, 0]], # Pass through labels.
[[1, 1, 1, 1, 1, 1, 0, 0]], # Pass through sample_weights.
),
)
def test_no_start_end_token(self):
input_data = ["airplane at airport"] * 4
preprocessor = GPTNeoXCausalLMPreprocessor(
**self.init_kwargs,
add_start_token=False,
add_end_token=False,
)
x, y, sw = preprocessor(input_data)
self.assertAllEqual(x["token_ids"], [[1, 3, 4, 2, 5, 0, 0, 0]] * 4)
self.assertAllEqual(x["padding_mask"], [[1, 1, 1, 1, 1, 0, 0, 0]] * 4)
self.assertAllEqual(y, [[3, 4, 2, 5, 0, 0, 0, 0]] * 4)
self.assertAllEqual(sw, [[1, 1, 1, 1, 0, 0, 0, 0]] * 4)
def test_generate_preprocess(self):
input_data = "airplane at airport"
preprocessor = GPTNeoXCausalLMPreprocessor(**self.init_kwargs)
x = preprocessor.generate_preprocess(input_data)
self.assertAllEqual(x["token_ids"], [6, 1, 3, 4, 2, 5, 0, 0])
self.assertAllEqual(x["padding_mask"], [1, 1, 1, 1, 1, 1, 0, 0])
def test_generate_postprocess(self):
input_data = {
"token_ids": tf.constant([6, 1, 3, 4, 2, 5, 0, 0]),
"padding_mask": tf.cast([1, 1, 1, 1, 1, 1, 0, 0], dtype="bool"),
}
preprocessor = GPTNeoXCausalLMPreprocessor(**self.init_kwargs)
x = preprocessor.generate_postprocess(input_data)
self.assertAllEqual(x, "airplane at airport")
| keras-nlp/keras_nlp/models/gpt_neo_x/gpt_neo_x_causal_lm_preprocessor_test.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/gpt_neo_x/gpt_neo_x_causal_lm_preprocessor_test.py",
"repo_id": "keras-nlp",
"token_count": 1658
} | 150 |