text
stringlengths 5
261k
| id
stringlengths 16
106
| metadata
dict | __index_level_0__
int64 0
266
|
---|---|---|---|
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_nlp.backend import keras
from keras_nlp.backend import ops
from keras_nlp.layers.modeling.rotary_embedding import RotaryEmbedding
from keras_nlp.utils.keras_utils import clone_initializer
# This is just a self-attention layer in Mistral. But it can be generalized
# to use the `keras_nlp.layers.CachedMultiHeadAttention` API. Since this layer
# implements grouped-query attention and sliding window attention, it might be
# useful outside of Mistral itself.
# TODO(tirthasheshpatel): Generalize the attention layer
# TODO(tirthasheshpatel): Merge `LlamaAttention` with this layer
# TODO(tirthasheshpatel): Use flash attention
class CachedMistralAttention(keras.layers.Layer):
"""A cached grounded query attention layer with sliding window."""
def __init__(
self,
num_query_heads,
num_key_value_heads,
rope_max_wavelength=10000,
rope_scaling_factor=1.0,
kernel_initializer="glorot_uniform",
sliding_window=512,
dropout=0,
**kwargs,
):
super().__init__(**kwargs)
self._num_query_heads = num_query_heads
self._num_key_value_heads = num_key_value_heads
self._sliding_window = sliding_window
self._dropout = dropout
self._num_key_value_groups = num_query_heads // num_key_value_heads
self._rope_max_wavelength = rope_max_wavelength
self._kernel_initializer = keras.initializers.get(
clone_initializer(kernel_initializer)
)
self._rope_scaling_factor = rope_scaling_factor
def build(self, inputs_shape):
# Einsum variables:
# b = batch size
# q = query length
# k = key/value length
# m = model dim
# u = num query heads
# v = num key/value heads
# h = head dim
self._hidden_dim = inputs_shape[-1]
self._head_dim = self._hidden_dim // self._num_query_heads
self._query_dense = keras.layers.EinsumDense(
equation="bqm,muh->bquh",
output_shape=(None, self._num_query_heads, self._head_dim),
kernel_initializer=self._kernel_initializer,
dtype=self.dtype_policy,
name="query",
)
self._query_dense.build(inputs_shape)
self._key_dense = keras.layers.EinsumDense(
equation="bkm,mvh->bkvh",
output_shape=(
None,
self._num_key_value_heads,
self._head_dim,
),
kernel_initializer=self._kernel_initializer,
dtype=self.dtype_policy,
name="key",
)
self._key_dense.build(inputs_shape)
self._value_dense = keras.layers.EinsumDense(
equation="bkm,mvh->bkvh",
output_shape=(
None,
self._num_key_value_heads,
self._head_dim,
),
kernel_initializer=self._kernel_initializer,
dtype=self.dtype_policy,
name="value",
)
self._value_dense.build(inputs_shape)
self._softmax = keras.layers.Softmax(
axis=-1,
dtype="float32",
name="attention_softmax",
)
self._dropout_layer = keras.layers.Dropout(
rate=self._dropout,
dtype=self.dtype_policy,
)
self._output_dense = keras.layers.EinsumDense(
equation="bquh,uhm->bqm",
output_shape=(None, self._hidden_dim),
kernel_initializer=self._kernel_initializer,
dtype=self.dtype_policy,
name="attention_output",
)
self._output_dense.build(
(None, None, self._num_query_heads, self._head_dim)
)
self.rotary_embedding_layer = RotaryEmbedding(
max_wavelength=self._rope_max_wavelength,
scaling_factor=self._rope_scaling_factor,
dtype=self.dtype_policy,
)
self._dot_product_equation = "bquh,bkuh->buqk"
self._combine_equation = "buqk,bkuh->bquh"
self.built = True
def call(
self,
hidden_states,
attention_mask=None,
cache=None,
cache_update_index=None,
training=None,
):
start_index = (
cache_update_index if cache_update_index is not None else 0
)
# If `cache_update_index` is a tensor, RotaryEmbedding expects it
# to have dtype `self.compute_dtype`.
start_index = ops.cast(
start_index, self.rotary_embedding_layer.compute_dtype
)
query = self._query_dense(hidden_states)
# Compute RoPE for queries
query = self.rotary_embedding_layer(query, start_index=start_index)
def _compute_key_value(x):
key, value = self._key_dense(x), self._value_dense(x)
# Compute RoPE for keys
key = self.rotary_embedding_layer(key, start_index=start_index)
return key, value
if cache is not None:
key_cache = cache[:, 0, ...]
value_cache = cache[:, 1, ...]
if cache_update_index is None:
key = key_cache
value = value_cache
else:
key_update, value_update = _compute_key_value(hidden_states)
start = [0, cache_update_index, 0, 0]
key = ops.slice_update(key_cache, start, key_update)
value = ops.slice_update(value_cache, start, value_update)
cache = ops.stack((key, value), axis=1)
else:
if cache_update_index is not None:
raise ValueError(
"`cache_update_index` should not be set if `cache` is "
f"`None`. Received: cache={cache}, "
f"cache_update_index={cache_update_index}"
)
key, value = _compute_key_value(hidden_states)
# [batch_shape, seq_len, num_key_value_heads, head_dim]
# -> [batch_shape, seq_len, num_heads, head_dim]
key = ops.repeat(key, repeats=self._num_key_value_groups, axis=2)
value = ops.repeat(value, repeats=self._num_key_value_groups, axis=2)
attention_output = self._compute_attention(
query, key, value, attention_mask
)
attention_output = self._dropout_layer(
attention_output, training=training
)
attention_output = self._output_dense(attention_output)
if cache is not None:
return attention_output, cache
return attention_output
def _masked_softmax(self, attention_scores, attention_mask=None):
if attention_mask is not None:
return self._softmax(
attention_scores, attention_mask[:, None, :, :]
)
return self._softmax(attention_scores)
def _compute_attention(self, query, key, value, attention_mask=None):
attention_scores = ops.einsum(self._dot_product_equation, query, key)
norm_factor = ops.sqrt(ops.cast(self._head_dim, self.compute_dtype))
attention_scores = attention_scores / norm_factor
attention_scores = self._masked_softmax(
attention_scores, attention_mask
)
attention_scores = ops.cast(attention_scores, self.compute_dtype)
attention_output = ops.einsum(
self._combine_equation, attention_scores, value
)
return attention_output
def get_config(self):
config = super().get_config()
config.update(
{
"num_query_heads": self._num_query_heads,
"num_key_value_heads": self._num_key_value_heads,
"rope_max_wavelength": self._rope_max_wavelength,
"rope_scaling_factor": self._rope_scaling_factor,
"kernel_initializer": keras.initializers.serialize(
self._kernel_initializer
),
"sliding_window": self._sliding_window,
"dropout": self._dropout,
}
)
return config
| keras-nlp/keras_nlp/models/mistral/mistral_attention.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/mistral/mistral_attention.py",
"repo_id": "keras-nlp",
"token_count": 4089
} | 151 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from keras_nlp.backend import ops
from keras_nlp.models.opt.opt_backbone import OPTBackbone
from keras_nlp.tests.test_case import TestCase
class OPTBackboneTest(TestCase):
def setUp(self):
self.init_kwargs = {
"vocabulary_size": 10,
"num_layers": 2,
"num_heads": 2,
"hidden_dim": 2,
"intermediate_dim": 4,
"max_sequence_length": 5,
}
self.input_data = {
"token_ids": ops.ones((2, 5), dtype="int32"),
"padding_mask": ops.ones((2, 5), dtype="int32"),
}
def test_backbone_basics(self):
self.run_backbone_test(
cls=OPTBackbone,
init_kwargs=self.init_kwargs,
input_data=self.input_data,
expected_output_shape=(2, 5, 2),
)
@pytest.mark.large
def test_saved_model(self):
self.run_model_saving_test(
cls=OPTBackbone,
init_kwargs=self.init_kwargs,
input_data=self.input_data,
)
@pytest.mark.large
def test_smallest_preset(self):
self.run_preset_test(
cls=OPTBackbone,
preset="opt_125m_en",
input_data={
"token_ids": ops.array([[133, 2119, 6219, 23602, 4]]),
"padding_mask": ops.ones((1, 5), dtype="int32"),
},
expected_output_shape=(1, 5, 768),
# The forward pass from a preset should be stable!
expected_partial_output=ops.array(
[-0.246, -1.004, -0.072, 0.097, 0.533]
),
)
@pytest.mark.extra_large
def test_all_presets(self):
for preset in OPTBackbone.presets:
self.run_preset_test(
cls=OPTBackbone,
preset=preset,
input_data=self.input_data,
)
| keras-nlp/keras_nlp/models/opt/opt_backbone_test.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/opt/opt_backbone_test.py",
"repo_id": "keras-nlp",
"token_count": 1177
} | 152 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
from keras_nlp.models.t5.t5_tokenizer import T5Tokenizer
from keras_nlp.tests.test_case import TestCase
class T5TokenizerTest(TestCase):
def setUp(self):
self.init_kwargs = {
# Generated using create_t5_test_proto.py
"proto": os.path.join(self.get_test_data_dir(), "t5_test_vocab.spm")
}
self.input_data = ["the quick brown fox", "the earth is round"]
def test_tokenizer_basics(self):
self.run_preprocessing_layer_test(
cls=T5Tokenizer,
init_kwargs=self.init_kwargs,
input_data=self.input_data,
expected_output=[[4, 9, 5, 7], [4, 6, 8, 10]],
)
def test_errors_missing_special_tokens(self):
with self.assertRaises(ValueError):
T5Tokenizer(
# Generated using create_no_special_token_proto.py
proto=os.path.join(
self.get_test_data_dir(), "no_special_token_vocab.spm"
)
)
@pytest.mark.large
def test_smallest_preset(self):
for preset in T5Tokenizer.presets:
self.run_preset_test(
cls=T5Tokenizer,
preset=preset,
input_data=["The quick brown fox."],
expected_output=[[37, 1704, 4216, 3, 20400, 5]],
)
@pytest.mark.extra_large
def test_all_presets(self):
for preset in T5Tokenizer.presets:
self.run_preset_test(
cls=T5Tokenizer,
preset=preset,
input_data=self.input_data,
)
| keras-nlp/keras_nlp/models/t5/t5_tokenizer_test.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/t5/t5_tokenizer_test.py",
"repo_id": "keras-nlp",
"token_count": 1012
} | 153 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from keras_nlp.models.whisper.whisper_tokenizer import WhisperTokenizer
from keras_nlp.tests.test_case import TestCase
class WhisperTokenizerTest(TestCase):
def setUp(self):
self.vocab = ["!", "air", "Ġair", "plane", "Ġat", "port"]
self.vocab += ["<|endoftext|>"]
self.vocab = dict([(token, i) for i, token in enumerate(self.vocab)])
self.merges = ["Ġ a", "Ġ t", "Ġ i", "Ġ b", "a i", "p l", "n e"]
self.merges += ["Ġa t", "p o", "r t", "Ġt h", "ai r", "pl a", "po rt"]
self.merges += ["Ġai r", "Ġa i", "pla ne"]
self.special_tokens = {
"<|startoftranscript|>": 9,
"<|endoftext|>": 10,
"<|notimestamps|>": 11,
"<|transcribe|>": 12,
"<|translate|>": 13,
}
self.language_tokens = {
"<|en|>": 14,
"<|fr|>": 15,
}
self.init_kwargs = {
"vocabulary": self.vocab,
"merges": self.merges,
"special_tokens": self.special_tokens,
"language_tokens": self.language_tokens,
}
self.input_data = [
" airplane at airport<|endoftext|>",
" airplane airport",
]
def test_tokenizer_basics(self):
self.run_preprocessing_layer_test(
cls=WhisperTokenizer,
init_kwargs=self.init_kwargs,
input_data=self.input_data,
expected_output=[[2, 3, 4, 2, 5, 10], [2, 3, 2, 5]],
)
def test_special_tokens(self):
tokenizer = WhisperTokenizer(**self.init_kwargs)
self.assertEqual(tokenizer.bos_token_id, 9)
self.assertEqual(tokenizer.eos_token_id, 10)
self.assertEqual(tokenizer.pad_token_id, 10)
self.assertEqual(tokenizer.no_timestamps_token_id, 11)
self.assertEqual(tokenizer.translate_token_id, 13)
self.assertEqual(tokenizer.transcribe_token_id, 12)
def test_errors_missing_special_tokens(self):
with self.assertRaises(ValueError):
WhisperTokenizer(
vocabulary=["a", "b", "c"], merges=[], special_tokens={}
)
@pytest.mark.large
def test_smallest_preset(self):
self.run_preset_test(
cls=WhisperTokenizer,
preset="whisper_tiny_en",
input_data=["The quick brown fox."],
expected_output=[[464, 2068, 7586, 21831, 13]],
)
@pytest.mark.extra_large
def test_all_presets(self):
for preset in WhisperTokenizer.presets:
self.run_preset_test(
cls=WhisperTokenizer,
preset=preset,
input_data=self.input_data,
)
| keras-nlp/keras_nlp/models/whisper/whisper_tokenizer_test.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/whisper/whisper_tokenizer_test.py",
"repo_id": "keras-nlp",
"token_count": 1556
} | 154 |
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import string
from keras_nlp.backend import keras
from keras_nlp.backend import ops
_CHR_IDX = string.ascii_lowercase
def _build_proj_equation(free_dims, bound_dims, output_dims):
"""
Builds an einsum equation for projections inside multi-head attention.
"""
input_str = ""
kernel_str = ""
output_str = ""
bias_axes = ""
letter_offset = 0
for i in range(free_dims):
char = _CHR_IDX[i + letter_offset]
input_str += char
output_str += char
letter_offset += free_dims
for i in range(bound_dims):
char = _CHR_IDX[i + letter_offset]
input_str += char
kernel_str += char
letter_offset += bound_dims
for i in range(output_dims):
char = _CHR_IDX[i + letter_offset]
kernel_str += char
output_str += char
bias_axes += char
equation = "%s,%s->%s" % (input_str, kernel_str, output_str)
return equation, bias_axes, len(output_str)
def _get_output_shape(output_rank, known_last_dims):
return [None] * (output_rank - len(known_last_dims)) + list(known_last_dims)
def _rel_shift(x, klen=-1):
"""
Performs relative shift to form the relative attention score.
"""
x = ops.transpose(x, [2, 3, 0, 1])
x_size = ops.shape(x)
x = ops.reshape(x, [x_size[1], x_size[0], x_size[2], x_size[3]])
x = ops.slice(
x, [1, 0, 0, 0], [x_size[1] - 1, x_size[0], x_size[2], x_size[3]]
)
x = ops.reshape(x, [x_size[0], x_size[1] - 1, x_size[2], x_size[3]])
x = ops.slice(x, [0, 0, 0, 0], [x_size[0], klen, x_size[2], x_size[3]])
x = ops.transpose(x, [2, 3, 0, 1])
return x
class TwoStreamRelativeAttention(keras.layers.MultiHeadAttention):
"""Two-stream relative self-attention for XLNet.
In XLNet, each token has two associated vectors at each self-attention layer,
the content stream (h) and the query stream (g). The content stream is the
self-attention stream as in Transformer XL and represents the context and
content (the token itself). The query stream only has access to contextual
information and the position, but not the content.
This layer shares the same build signature as `keras.layers.MultiHeadAttention`
but has different input/output projections.
We use the notations `B`, `T`, `S`, `M`, `L`, `E`, `P`, `dim`, `num_heads`
below, where
`B` is the batch dimension, `T` is the target sequence length,
`S` in the source sequence length, `M` is the length of the state or memory,
`L` is the length of relative positional encoding, `E` is the last dimension
of query input, `P` is the number of predictions, `dim` is the dimensionality
of the encoder layers. and `num_heads` is the number of attention heads.
Args:
content_stream: `Tensor` of shape `[B, T, dim]`.
content_attention_bias: Bias `Tensor` for content based attention of shape
`[num_heads, dim]`.
positional_attention_bias: Bias `Tensor` for position based attention of
shape `[num_heads, dim]`.
query_stream: `Tensor` of shape `[B, P, dim]`.
target_mapping: `Tensor` of shape `[B, P, S]`.
relative_position_encoding: Relative positional encoding `Tensor` of
shape `[B, L, dim]`.
segment_matrix: Optional `Tensor` representing segmentation IDs used in
XLNet of shape `[B, S, S + M]`.
segment_encoding: Optional `Tensor` representing the segmentation
encoding as used in XLNet of shape `[2, num_heads, dim]`.
segment_attention_bias: Optional trainable bias parameter added to the
query had when calculating the segment-based attention score used
in XLNet of shape `[num_heads, dim]`.
state: Optional `Tensor` of shape `[B, M, E]`.
If passed, this is also attended over as in Transformer XL.
content_attention_mask: a boolean mask of shape `[B, T, S]` that
prevents attention to certain positions for content attention
computation.
query_attention_mask: a boolean mask of shape `[B, T, S]` that
prevents attention to certain position for query attention
computation.
"""
def __init__(self, kernel_initializer="glorot_uniform", **kwargs):
super().__init__(kernel_initializer=kernel_initializer, **kwargs)
def _get_common_kwargs_for_sublayer(self):
common_kwargs = dict(
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint,
)
return common_kwargs
def build(self, content_stream_shape):
self._use_bias = False
self._query_shape = content_stream_shape
self._key_shape = content_stream_shape
self._value_shape = content_stream_shape
free_dims = len(self._query_shape) - 1
einsum_equation, bias_axes, output_rank = _build_proj_equation(
free_dims, bound_dims=1, output_dims=2
)
self._query_dense = keras.layers.EinsumDense(
einsum_equation,
output_shape=_get_output_shape(
output_rank - 1, [self._num_heads, self._key_dim]
),
bias_axes=bias_axes if self._use_bias else None,
dtype=self.dtype_policy,
name="query",
**self._get_common_kwargs_for_sublayer(),
)
self._query_dense.build(self._query_shape)
einsum_equation, bias_axes, output_rank = _build_proj_equation(
len(self._key_shape) - 1, bound_dims=1, output_dims=2
)
self._key_dense = keras.layers.EinsumDense(
einsum_equation,
output_shape=_get_output_shape(
output_rank - 1, [self._num_heads, self._key_dim]
),
bias_axes=bias_axes if self._use_bias else None,
dtype=self.dtype_policy,
name="key",
**self._get_common_kwargs_for_sublayer(),
)
self._key_dense.build(self._key_shape)
einsum_equation, bias_axes, output_rank = _build_proj_equation(
len(self._value_shape) - 1, bound_dims=1, output_dims=2
)
self._value_dense = keras.layers.EinsumDense(
einsum_equation,
output_shape=_get_output_shape(
output_rank - 1, [self._num_heads, self._value_dim]
),
bias_axes=bias_axes if self._use_bias else None,
dtype=self.dtype_policy,
name="value",
**self._get_common_kwargs_for_sublayer(),
)
self._value_dense.build(self._value_shape)
free_dims = len(self._query_shape) - 1
_, _, output_rank = _build_proj_equation(
free_dims, bound_dims=2, output_dims=1
)
self._output_dense = keras.layers.EinsumDense(
"ibnd,hnd->ibh",
output_shape=_get_output_shape(
output_rank - 1, [self._query_shape[-1]]
),
bias_axes=None,
dtype=self.dtype_policy,
name="attention_output",
**self._get_common_kwargs_for_sublayer(),
)
self._output_dense.build(
self._value_dense.compute_output_shape(self._value_dim)
)
einsum_equation, _, output_rank = _build_proj_equation(
len(self._key_shape) - 1, bound_dims=1, output_dims=2
)
self._encoding_dense = keras.layers.EinsumDense(
einsum_equation,
output_shape=_get_output_shape(
output_rank - 1, [self._num_heads, self._key_dim]
),
bias_axes=None,
dtype=self.dtype_policy,
name="encoding",
**self._get_common_kwargs_for_sublayer(),
)
self._encoding_dense.build(self._key_shape)
self._build_attention(output_rank)
self.built = True
def compute_attention(
self,
query,
key,
value,
position,
content_attention_bias,
positional_attention_bias,
segment_matrix=None,
segment_encoding=None,
segment_attention_bias=None,
attention_mask=None,
):
"""Computes the attention.
This function defines the computation inside `call` with projected
multihead Q, K, V, R inputs.
We use the notations `B`, `T`, `S`, `M`, `L`, `num_heads`, `key_dim`
below, where
`B` is the batch dimension, `T` is the target sequence length,
`S` in the source sequence length, `M` is the length of the state,
`L` is the length of relative positional encoding, `num_heads` is
number of attention heads and `key_dim` is size of each attention head
for query and key.
Args:
query: Projected query `Tensor` of shape
`[B, T, num_heads, key_dim]`.
key: Projected key `Tensor` of shape
`[B, S + M, num_heads, key_dim]`.
value: Projected value `Tensor` of shape
`[B, S + M, num_heads, key_dim]`.
position: Projected position `Tensor` of shape
`[B, L, num_heads, key_dim]`.
content_attention_bias: Trainable bias parameter added to the query
head when calculating the content-based attention score.
positional_attention_bias: Trainable bias parameter added to the
query head when calculating the position-based attention score.
segment_matrix: Optional `Tensor` representing segmentation IDs
used in XLNet.
segment_encoding: Optional trainable `Tensor` representing the
segmentation encoding as used in XLNet.
segment_attention_bias: Optional trainable bias parameter added
to the query had when calculating the segment-based attention
score used in XLNet.
attention_mask: (default None) Optional mask that is added to
attention logits. If state is not None, the mask source sequence
dimension should extend M.
Returns:
attention_output: Multi-headed output of attention computation of
shape `[B, S, num_heads, key_dim]`.
"""
content_attention = ops.einsum(
self._dot_product_equation, key, query + content_attention_bias
)
positional_attention = ops.einsum(
self._dot_product_equation,
position,
query + positional_attention_bias,
)
positional_attention = _rel_shift(
positional_attention, klen=ops.shape(content_attention)[3]
)
if segment_matrix is not None:
segment_attention = ops.einsum(
"bind,snd->bnis",
query + segment_attention_bias,
segment_encoding,
)
target_shape = ops.shape(positional_attention)
segment_attention = ops.where(
ops.broadcast_to(
ops.expand_dims(segment_matrix, 1), target_shape
),
ops.broadcast_to(segment_attention[:, :, :, 1:], target_shape),
ops.broadcast_to(segment_attention[:, :, :, :1], target_shape),
)
attention_sum = (
content_attention + positional_attention + segment_attention
)
else:
attention_sum = content_attention + positional_attention
attention_scores = ops.multiply(
attention_sum, 1.0 / math.sqrt(float(self._key_dim))
)
attention_scores = self._masked_softmax(
attention_scores, attention_mask
)
attention_output = self._dropout_layer(attention_scores)
attention_output = ops.einsum(
self._combine_equation, attention_output, value
)
return attention_output
def call(
self,
content_stream,
content_attention_bias,
positional_attention_bias,
relative_position_encoding,
query_stream=None,
target_mapping=None,
segment_matrix=None,
segment_encoding=None,
segment_attention_bias=None,
state=None,
content_attention_mask=None,
query_attention_mask=None,
):
"""Compute multi-head relative attention over inputs.
We use the notations `B`, `T`, `M`, `E` below, where
`B` is the batch dimension, `T` is the target sequence length,
`M` is the length of the state or memory and `E` is the last
dimension of query input.
Args:
content_stream: The content representation, commonly referred to as h.
This serves a similar role to the standard hidden states in
Transformer-XL.
content_attention_bias: A trainable bias parameter added to the query
head when calculating the content-based attention score.
positional_attention_bias: A trainable bias parameter added to the
query head when calculating the position-based attention score.
query_stream: The query representation, commonly referred to as g.
This only has access to contextual information and position, but
not content. If not provided, then this is
MultiHeadRelativeAttention with self-attention.
relative_position_encoding: relative positional encoding for key
and value.
target_mapping: Optional `Tensor` representing the target mapping
used in partial prediction.
segment_matrix: Optional `Tensor` representing segmentation IDs
used in XLNet.
segment_encoding: Optional `Tensor` representing the segmentation
encoding as used in XLNet.
segment_attention_bias: Optional trainable bias parameter added
to the query head when calculating the segment-based attention
score.
state: (default None) optional state. If passed, this is also
attended over as in TransformerXL and XLNet.
content_attention_mask: (default None) Optional mask that is added
to content attention logits. If state is not None, the mask
source sequence dimension should extend M.
query_attention_mask: (default None) Optional mask that is added to
query attention logits. If state is not None, the mask source
sequence dimension should extend M.
Returns:
content_attention_output, query_attention_output: the results of the
computation, both of shape `[B, T, E]`.
"""
if state is not None and len(state.shape) > 1:
content_and_memory_stream = ops.concatenate(
[state, content_stream], 1
)
else:
content_and_memory_stream = content_stream
# `query` = [B, T, N, H]
query = self._query_dense(content_stream)
# `key` = [B, S + M, N, H]
key = self._key_dense(content_and_memory_stream)
# `value` = [B, S + M, N, H]
value = self._value_dense(content_and_memory_stream)
# `position` = [B, L, N, H]
position = self._encoding_dense(relative_position_encoding)
content_attention_output = self.compute_attention(
query=query,
key=key,
value=value,
position=position,
content_attention_bias=content_attention_bias,
positional_attention_bias=positional_attention_bias,
segment_matrix=segment_matrix,
segment_encoding=segment_encoding,
segment_attention_bias=segment_attention_bias,
attention_mask=content_attention_mask,
)
# `content_attention_output` = [B, S, N, H]
content_attention_output = self._output_dense(content_attention_output)
query_attention_output = None
if query_stream is not None:
query = self._query_dense(query_stream)
if target_mapping is not None:
query = ops.einsum("bmnd,bml->blnd", query, target_mapping)
query_attention_output = self.compute_attention(
query=query,
key=key,
value=value,
position=position,
content_attention_bias=content_attention_bias,
positional_attention_bias=positional_attention_bias,
segment_matrix=segment_matrix,
segment_encoding=segment_encoding,
segment_attention_bias=segment_attention_bias,
attention_mask=query_attention_mask,
)
query_attention_output = ops.einsum(
"blnd,bml->bmnd", query_attention_output, target_mapping
)
else:
query_attention_output = self.compute_attention(
query=query,
key=key,
value=value,
position=position,
content_attention_bias=content_attention_bias,
positional_attention_bias=positional_attention_bias,
segment_matrix=segment_matrix,
segment_encoding=segment_encoding,
segment_attention_bias=segment_attention_bias,
attention_mask=query_attention_mask,
)
query_attention_output = self._output_dense(query_attention_output)
return content_attention_output, query_attention_output
| keras-nlp/keras_nlp/models/xlnet/relative_attention.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/xlnet/relative_attention.py",
"repo_id": "keras-nlp",
"token_count": 8545
} | 155 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_nlp.samplers.serialization import deserialize
from keras_nlp.samplers.serialization import get
from keras_nlp.samplers.serialization import serialize
from keras_nlp.samplers.top_k_sampler import TopKSampler
from keras_nlp.tests.test_case import TestCase
class SerializationTest(TestCase):
def test_serialization(self):
sampler = TopKSampler(k=5)
restored = deserialize(serialize(sampler))
self.assertDictEqual(sampler.get_config(), restored.get_config())
def test_get(self):
# Test get from string.
identifier = "top_k"
sampler = get(identifier)
self.assertIsInstance(sampler, TopKSampler)
# Test dict identifier.
original_sampler = TopKSampler(k=7)
config = serialize(original_sampler)
restored_sampler = get(config)
self.assertDictEqual(
serialize(restored_sampler),
serialize(original_sampler),
)
# Test identifier is already a sampler instance.
original_sampler = TopKSampler(k=7)
restored_sampler = get(original_sampler)
self.assertEqual(original_sampler, restored_sampler)
| keras-nlp/keras_nlp/samplers/serialization_test.py/0 | {
"file_path": "keras-nlp/keras_nlp/samplers/serialization_test.py",
"repo_id": "keras-nlp",
"token_count": 633
} | 156 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_nlp.api_export import keras_nlp_export
from keras_nlp.tokenizers import tokenizer
from keras_nlp.utils.tensor_utils import assert_tf_text_installed
from keras_nlp.utils.tensor_utils import convert_to_ragged_batch
from keras_nlp.utils.tensor_utils import is_int_dtype
try:
import tensorflow_text as tf_text
except ImportError:
tf_text = None
@keras_nlp_export("keras_nlp.tokenizers.UnicodeCodepointTokenizer")
class UnicodeCodepointTokenizer(tokenizer.Tokenizer):
"""A unicode character tokenizer layer.
This tokenizer is a vocabulary free tokenizer which tokenizes text as
unicode character codepoints.
Tokenizer outputs can either be padded and truncated with a
`sequence_length` argument, or left un-truncated. The exact output will
depend on the rank of the input tensors.
If input is a batch of strings (rank > 0):
By default, the layer will output a `tf.RaggedTensor` where the last
dimension of the output is ragged. If `sequence_length` is set, the layer
will output a dense `tf.Tensor` where all inputs have been padded or
truncated to `sequence_length`.
If input is a scalar string (rank == 0):
By default, the layer will output a dense `tf.Tensor` with static shape
`[None]`. If `sequence_length` is set, the output will be
a dense `tf.Tensor` of shape `[sequence_length]`.
The output dtype can be controlled via the `dtype` argument, which should be
an integer type ("int16", "int32", etc.).
Args:
lowercase: If `True`, the input text will be first lowered before
tokenization.
sequence_length: If set, the output will be converted to a dense
tensor and padded/trimmed so all outputs are of sequence_length.
normalization_form: One of the following string values (None, 'NFC',
'NFKC', 'NFD', 'NFKD'). If set will normalize unicode to the given
form before tokenizing.
errors: One of ('replace', 'remove', 'strict'). Specifies the
`detokenize()` behavior when an invalid codepoint is encountered.
The value of `'strict'` will cause the tokenizer to produce a
`InvalidArgument` error on any invalid input formatting. A value of
`'replace'` will cause the tokenizer to replace any invalid
formatting in the input with the replacement_char codepoint.
A value of `'ignore'` will cause the tokenizer to skip any invalid
formatting in the input and produce no corresponding output
character.
replacement_char: The unicode codepoint to use in place of invalid
codepoints. (U+FFFD) is `65533`. Defaults to `65533`.
input_encoding: One of ("UTF-8", "UTF-16-BE", or "UTF-32-BE").
One of The encoding of the input text. Defaults to `"UTF-8"`.
output_encoding: One of ("UTF-8", "UTF-16-BE", or "UTF-32-BE").
The encoding of the output text. Defaults to `"UTF-8"`.
vocabulary_size: Set the vocabulary `vocabulary_size`,
by clamping all codepoints to the range [0, vocabulary_size).
Effectively this will make the `vocabulary_size - 1` id the
the OOV value.
Examples:
Basic Usage.
>>> inputs = "Unicode Tokenizer"
>>> tokenizer = keras_nlp.tokenizers.UnicodeCodepointTokenizer()
>>> outputs = tokenizer(inputs)
>>> np.array(outputs)
array([117, 110, 105, 99, 111, 100, 101, 32, 116, 111, 107, 101, 110,
105, 122, 101, 114], dtype=int32)
Ragged outputs.
>>> inputs = ["पुस्तक", "کتاب"]
>>> tokenizer = keras_nlp.tokenizers.UnicodeCodepointTokenizer()
>>> seq1, seq2 = tokenizer(inputs)
>>> np.array(seq1)
array([2346, 2369, 2360, 2381, 2340, 2325], dtype=int32)
>>> np.array(seq2)
array([1705, 1578, 1575, 1576], dtype=int32)
Dense outputs.
>>> inputs = ["पुस्तक", "کتاب"]
>>> tokenizer = keras_nlp.tokenizers.UnicodeCodepointTokenizer(
... sequence_length=8)
>>> seq1, seq2 = tokenizer(inputs)
>>> np.array(seq1)
array([2346, 2369, 2360, 2381, 2340, 2325, 0, 0], dtype=int32)
>>> np.array(seq2)
array([1705, 1578, 1575, 1576, 0, 0, 0, 0], dtype=int32)
Tokenize, then batch for ragged outputs.
>>> inputs = ["Book", "पुस्तक", "کتاب"]
>>> tokenizer = keras_nlp.tokenizers.UnicodeCodepointTokenizer()
>>> ds = tf.data.Dataset.from_tensor_slices(inputs)
>>> ds = ds.map(tokenizer)
>>> ds = ds.apply(tf.data.experimental.dense_to_ragged_batch(3))
>>> ds.take(1).get_single_element()
<tf.RaggedTensor [[98, 111, 111, 107],
[2346, 2369, 2360, 2381, 2340, 2325],
[1705, 1578, 1575, 1576]]>
Batch, then tokenize for ragged outputs.
>>> inputs = ["Book", "पुस्तक", "کتاب"]
>>> tokenizer = keras_nlp.tokenizers.UnicodeCodepointTokenizer()
>>> ds = tf.data.Dataset.from_tensor_slices(inputs)
>>> ds = ds.batch(3).map(tokenizer)
>>> ds.take(1).get_single_element()
<tf.RaggedTensor [[98, 111, 111, 107],
[2346, 2369, 2360, 2381, 2340, 2325],
[1705, 1578, 1575, 1576]]>
Tokenize, then batch for dense outputs (`sequence_length` provided).
>>> inputs = ["Book", "पुस्तक", "کتاب"]
>>> tokenizer = keras_nlp.tokenizers.UnicodeCodepointTokenizer(
... sequence_length=5)
>>> ds = tf.data.Dataset.from_tensor_slices(inputs)
>>> ds = ds.map(tokenizer)
>>> ds = ds.apply(tf.data.experimental.dense_to_ragged_batch(3))
>>> ds.take(1).get_single_element()
<tf.Tensor: shape=(3, 5), dtype=int32, numpy=
array([[ 98, 111, 111, 107, 0],
[2346, 2369, 2360, 2381, 2340],
[1705, 1578, 1575, 1576, 0]], dtype=int32)>
Batch, then tokenize for dense outputs (`sequence_length` provided).
>>> inputs = ["Book", "पुस्तक", "کتاب"]
>>> tokenizer = keras_nlp.tokenizers.UnicodeCodepointTokenizer(
... sequence_length=5)
>>> ds = tf.data.Dataset.from_tensor_slices(inputs)
>>> ds = ds.batch(3).map(tokenizer)
>>> ds.take(1).get_single_element()
<tf.Tensor: shape=(3, 5), dtype=int32, numpy=
array([[ 98, 111, 111, 107, 0],
[2346, 2369, 2360, 2381, 2340],
[1705, 1578, 1575, 1576, 0]], dtype=int32)>
Tokenization with truncation.
>>> inputs = ["I Like to Travel a Lot", "मैं किताबें पढ़ना पसंद करता हूं"]
>>> tokenizer = keras_nlp.tokenizers.UnicodeCodepointTokenizer(
... sequence_length=5)
>>> outputs = tokenizer(inputs)
>>> np.array(outputs)
array([[ 105, 32, 108, 105, 107],
[2350, 2376, 2306, 32, 2325]], dtype=int32)
Tokenization with vocabulary_size.
>>> latin_ext_cutoff = 592
>>> tokenizer = keras_nlp.tokenizers.UnicodeCodepointTokenizer(
... vocabulary_size=latin_ext_cutoff)
>>> outputs = tokenizer("¿Cómo estás?")
>>> np.array(outputs)
array([191, 99, 243, 109, 111, 32, 101, 115, 116, 225, 115, 63],
dtype=int32)
>>> outputs = tokenizer("आप कैसे हैं")
>>> np.array(outputs)
array([591, 591, 32, 591, 591, 591, 591, 32, 591, 591, 591],
dtype=int32)
Detokenization.
>>> inputs = tf.constant([110, 105, 110, 106, 97], dtype="int32")
>>> tokenizer = keras_nlp.tokenizers.UnicodeCodepointTokenizer()
>>> outputs = tokenizer.detokenize(inputs)
>>> np.array(outputs).astype("U")
array('ninja', dtype='<U5')
Detokenization with padding.
>>> tokenizer = keras_nlp.tokenizers.UnicodeCodepointTokenizer(
... sequence_length=7)
>>> dataset = tf.data.Dataset.from_tensor_slices(["a b c", "b c", "a"])
>>> dataset = dataset.map(tokenizer)
>>> dataset.take(1).get_single_element()
<tf.Tensor: shape=(7,), dtype=int32,
numpy=array([97, 32, 98, 32, 99, 0, 0], dtype=int32)>
>>> detokunbatched = dataset.map(tokenizer.detokenize)
>>> detokunbatched.take(1).get_single_element()
<tf.Tensor: shape=(), dtype=string, numpy=b'a b c'>
Detokenization with invalid bytes.
>>> inputs = tf.constant([110, 105, 10000000, 110, 106, 97])
>>> tokenizer = keras_nlp.tokenizers.UnicodeCodepointTokenizer(
... errors="replace", replacement_char=88)
>>> outputs = tokenizer.detokenize(inputs)
>>> np.array(outputs).astype("U")
array('niXnja', dtype='<U6')
"""
def __init__(
self,
sequence_length: int = None,
lowercase: bool = True,
normalization_form: str = None,
errors: str = "replace",
replacement_char: int = 65533,
input_encoding: str = "UTF-8",
output_encoding: str = "UTF-8",
vocabulary_size: int = None,
dtype="int32",
**kwargs,
) -> None:
assert_tf_text_installed(self.__class__.__name__)
if not is_int_dtype(dtype):
raise ValueError(
"Output dtype must be an integer type. "
f"Received: dtype={dtype}"
)
# Check normalization_form.
if normalization_form not in [None, "NFC", "NFKC", "NFD", "NFKD"]:
raise ValueError(
'`normalization_form` must be one of None, "NFC", "NFKC", '
'"NFD", "NFKD". Received: normalization_form='
f"{normalization_form}"
)
# Check errors.
if errors not in ["strict", "replace", "ignore"]:
raise ValueError(
'`errors` must be one of "strict", "replace", "ignore" '
f"Received: errors={errors}"
)
# Check normalization_form matches input_encoding.
if normalization_form:
if input_encoding != "UTF-8":
raise ValueError(
"""Normalization Forms are Only Supported for Input Encoding
UTF-8"""
)
super().__init__(dtype=dtype, **kwargs)
self.sequence_length = sequence_length
self.lowercase = lowercase
self.normalization_form = normalization_form
self.errors = errors
self.replacement_char = replacement_char
self.input_encoding = input_encoding
self.output_encoding = output_encoding
self._vocabulary_size = vocabulary_size
def get_config(self):
config = super().get_config()
config.update(
{
"sequence_length": self.sequence_length,
"lowercase": self.lowercase,
"normalization_form": self.normalization_form,
"errors": self.errors,
"replacement_char": self.replacement_char,
"input_encoding": self.input_encoding,
"output_encoding": self.output_encoding,
"vocabulary_size": self._vocabulary_size,
}
)
return config
def vocabulary_size(self) -> int:
"""Get the size of the tokenizer vocabulary. None implies no vocabulary
size was provided"""
return self._vocabulary_size
def tokenize(self, inputs):
if not isinstance(inputs, (tf.Tensor, tf.RaggedTensor)):
inputs = tf.convert_to_tensor(inputs)
scalar_input = inputs.shape.rank == 0
if scalar_input:
inputs = tf.expand_dims(inputs, 0)
# Optionally lowercase the text
if self.lowercase:
inputs = tf_text.case_fold_utf8(inputs)
# Optionally normalize the text to a given form
if self.normalization_form:
inputs = tf_text.normalize_utf8(inputs, self.normalization_form)
tokens = tf.strings.unicode_decode(
inputs,
errors=self.errors,
replacement_char=self.replacement_char,
input_encoding=self.input_encoding,
)
tokens = tf.cast(tokens, self.compute_dtype)
if self.sequence_length:
output_shape = tokens.shape.as_list()
output_shape[-1] = self.sequence_length
tokens = tokens.to_tensor(shape=output_shape)
if scalar_input:
tokens = tf.squeeze(tokens, 0)
# Optionally clamps the output code point values to be in the
# range [0, vocabulary_size)
if self._vocabulary_size:
tokens = tf.clip_by_value(tokens, 0, self._vocabulary_size - 1)
return tokens
def detokenize(self, inputs):
inputs, unbatched, _ = convert_to_ragged_batch(inputs)
inputs = tf.ragged.boolean_mask(inputs, tf.not_equal(inputs, 0))
outputs = tf.strings.unicode_encode(
inputs,
errors=self.errors,
replacement_char=self.replacement_char,
output_encoding=self.output_encoding,
)
if unbatched:
outputs = tf.squeeze(outputs, 0)
return outputs
| keras-nlp/keras_nlp/tokenizers/unicode_codepoint_tokenizer.py/0 | {
"file_path": "keras-nlp/keras_nlp/tokenizers/unicode_codepoint_tokenizer.py",
"repo_id": "keras-nlp",
"token_count": 5919
} | 157 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_nlp.backend import ops
from keras_nlp.tests.test_case import TestCase
from keras_nlp.utils.tensor_utils import convert_to_ragged_batch
from keras_nlp.utils.tensor_utils import tensor_to_list
class TensorToListTest(TestCase):
def test_ragged_input(self):
input_data = tf.ragged.constant([[1, 2], [4, 5, 6]])
list_output = tensor_to_list(input_data)
self.assertAllEqual(list_output, [[1, 2], [4, 5, 6]])
def test_dense_input(self):
input_data = tf.constant([[1, 2], [3, 4]])
list_output = tensor_to_list(input_data)
self.assertAllEqual(list_output, [[1, 2], [3, 4]])
def test_scalar_input(self):
input_data = tf.constant(1)
list_output = tensor_to_list(input_data)
self.assertEqual(list_output, 1)
def test_ragged_strings(self):
input_data = tf.ragged.constant([["▀▁▂▃", "samurai"]])
detokenize_output = tensor_to_list(input_data)
self.assertAllEqual(detokenize_output, [["▀▁▂▃", "samurai"]])
def test_dense_strings(self):
input_data = tf.constant([["▀▁▂▃", "samurai"]])
detokenize_output = tensor_to_list(input_data)
self.assertAllEqual(detokenize_output, [["▀▁▂▃", "samurai"]])
def test_scalar_string(self):
input_data = tf.constant("▀▁▂▃")
detokenize_output = tensor_to_list(input_data)
self.assertEqual(detokenize_output, "▀▁▂▃")
def test_string_with_utf8_error(self):
input_data = tf.constant([b"hello\xf2\xf1\x91\xe5"])
detokenize_output = tensor_to_list(input_data)
self.assertEqual(detokenize_output, ["hello"])
class ConvertToRaggedBatch(TestCase):
def test_convert_1d_python(self):
inputs = [1, 2]
outputs, unbatched, rectangular = convert_to_ragged_batch(inputs)
self.assertIsInstance(outputs, tf.RaggedTensor)
self.assertAllEqual(outputs, [[1, 2]])
self.assertTrue(unbatched)
self.assertTrue(rectangular)
def test_convert_2d_python(self):
inputs = [[1, 2], [2]]
outputs, unbatched, rectangular = convert_to_ragged_batch(inputs)
self.assertIsInstance(outputs, tf.RaggedTensor)
self.assertAllEqual(outputs, [[1, 2], [2]])
self.assertFalse(unbatched)
self.assertFalse(rectangular)
def test_convert_1d_tensor(self):
inputs = ops.array([1, 2, 3])
outputs, unbatched, rectangular = convert_to_ragged_batch(inputs)
self.assertIsInstance(outputs, tf.RaggedTensor)
self.assertAllEqual(outputs, [[1, 2, 3]])
self.assertTrue(unbatched)
self.assertTrue(rectangular)
def test_convert_2d_tensor(self):
inputs = ops.array([[1, 2, 3], [1, 2, 3]])
outputs, unbatched, rectangular = convert_to_ragged_batch(inputs)
self.assertIsInstance(outputs, tf.RaggedTensor)
self.assertAllEqual(outputs, [[1, 2, 3], [1, 2, 3]])
self.assertFalse(unbatched)
self.assertTrue(rectangular)
def test_convert_ragged(self):
inputs = tf.ragged.constant([[1, 2], [1]])
outputs, unbatched, rectangular = convert_to_ragged_batch(inputs)
self.assertIsInstance(outputs, tf.RaggedTensor)
self.assertAllEqual(outputs, [[1, 2], [1]])
self.assertFalse(unbatched)
self.assertFalse(rectangular)
| keras-nlp/keras_nlp/utils/tensor_utils_test.py/0 | {
"file_path": "keras-nlp/keras_nlp/utils/tensor_utils_test.py",
"repo_id": "keras-nlp",
"token_count": 1699
} | 158 |
<jupyter_start><jupyter_text>Install deps<jupyter_code>!pip install git+https://github.com/jbischof/keras-nlp.git@bert_ckpt tensorflow tf-models-official --upgrade --quiet
import json
import keras_nlp
import tensorflow as tf
import tensorflow_models as tfm
from tensorflow import keras
TOKEN_TYPE = "cased"
MODEL_TYPE = "bert_base"
MODEL_NAME = MODEL_TYPE + "_" + TOKEN_TYPE
VOCAB_SIZE = 28996<jupyter_output><empty_output><jupyter_text>Load the model garden checkpoints and weights<jupyter_code># Model garden BERT paths.
zip_path = f"""https://storage.googleapis.com/tf_model_garden/nlp/bert/v3/{TOKEN_TYPE}_L-12_H-768_A-12.tar.gz"""
zip_file = keras.utils.get_file(
f"""/content/{MODEL_NAME}""",
zip_path,
extract=True,
archive_format="tar",
)
!tar -xvf """{MODEL_NAME}"""
# Model garden BERT paths.
extract_dir = "/content/tmp/temp_dir/raw/"
vocab_path = extract_dir + "vocab.txt"
checkpoint_path = extract_dir + "bert_model.ckpt"
config_path = extract_dir + "bert_config.json"
vars = tf.train.list_variables(checkpoint_path)
weights = {}
for name, shape in vars:
print(name, shape)
weight = tf.train.load_variable(checkpoint_path, name)
weights[name] = weight<jupyter_output>_CHECKPOINTABLE_OBJECT_GRAPH []
encoder/layer_with_weights-0/embeddings/.ATTRIBUTES/VARIABLE_VALUE [28996, 768]
encoder/layer_with_weights-1/embeddings/.ATTRIBUTES/VARIABLE_VALUE [512, 768]
encoder/layer_with_weights-10/_attention_layer/_key_dense/bias/.ATTRIBUTES/VARIABLE_VALUE [12, 64]
encoder/layer_with_weights-10/_attention_layer/_key_dense/kernel/.ATTRIBUTES/VARIABLE_VALUE [768, 12, 64]
encoder/layer_with_weights-10/_attention_layer/_output_dense/bias/.ATTRIBUTES/VARIABLE_VALUE [768]
encoder/layer_with_weights-10/_attention_layer/_output_dense/kernel/.ATTRIBUTES/VARIABLE_VALUE [12, 64, 768]
encoder/layer_with_weights-10/_attention_layer/_query_dense/bias/.ATTRIBUTES/VARIABLE_VALUE [12, 64]
encoder/layer_with_weights-10/_attention_layer/_query_dense/kernel/.ATTRIBUTES/VARIABLE_VALUE [768, 12, 64]
encoder/layer_with_weights-10/_attention_layer/_value_dense/bias/.ATTRIBUTES/VARIABLE_VALUE [12, 64]
encoder/layer_with_weights-10/_attention_layer/_value_dense/kernel/.ATTRIBUTES/VARIABLE[...]<jupyter_text>Load BertBase model with KerasNLP.<jupyter_code>model = keras_nlp.models.BertBase(vocabulary_size=VOCAB_SIZE)
model.summary()<jupyter_output>Model: "bert"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
token_ids (InputLayer) [(None, None)] 0 []
token_embedding (Embedding) (None, None, 768) 22268928 ['token_ids[0][0]']
segment_ids (InputLayer) [(None, None)] 0 []
position_embedding (PositionEm (None, None, 768) 393216 ['token_embedding[0][0]'] [...]<jupyter_text>Convert Weights<jupyter_code>model.get_layer("token_embedding").embeddings.assign(
weights[
"encoder/layer_with_weights-0/embeddings/.ATTRIBUTES/VARIABLE_VALUE"
]
)
model.get_layer("position_embedding").position_embeddings.assign(
weights[
"encoder/layer_with_weights-1/embeddings/.ATTRIBUTES/VARIABLE_VALUE"
]
)
model.get_layer("segment_embedding").embeddings.assign(
weights[
"encoder/layer_with_weights-2/embeddings/.ATTRIBUTES/VARIABLE_VALUE"
]
)
model.get_layer("embeddings_layer_norm").gamma.assign(
weights["encoder/layer_with_weights-3/gamma/.ATTRIBUTES/VARIABLE_VALUE"]
)
model.get_layer("embeddings_layer_norm").beta.assign(
weights["encoder/layer_with_weights-3/beta/.ATTRIBUTES/VARIABLE_VALUE"]
)
for i in range(model.num_layers):
model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer._key_dense.kernel.assign(
weights[
f"encoder/layer_with_weights-{i + 4}/_attention_layer/_key_dense/kernel/.ATTRIBUTES/VARIABLE_VALUE"
]
)
model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer._key_dense.bias.assign(
weights[
f"encoder/layer_with_weights-{i + 4}/_attention_layer/_key_dense/bias/.ATTRIBUTES/VARIABLE_VALUE"
]
)
model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer._query_dense.kernel.assign(
weights[
f"encoder/layer_with_weights-{i + 4}/_attention_layer/_query_dense/kernel/.ATTRIBUTES/VARIABLE_VALUE"
]
)
model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer._query_dense.bias.assign(
weights[
f"encoder/layer_with_weights-{i + 4}/_attention_layer/_query_dense/bias/.ATTRIBUTES/VARIABLE_VALUE"
]
)
model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer._value_dense.kernel.assign(
weights[
f"encoder/layer_with_weights-{i + 4}/_attention_layer/_value_dense/kernel/.ATTRIBUTES/VARIABLE_VALUE"
]
)
model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer._value_dense.bias.assign(
weights[
f"encoder/layer_with_weights-{i + 4}/_attention_layer/_value_dense/bias/.ATTRIBUTES/VARIABLE_VALUE"
]
)
model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer._output_dense.kernel.assign(
weights[
f"encoder/layer_with_weights-{i + 4}/_attention_layer/_output_dense/kernel/.ATTRIBUTES/VARIABLE_VALUE"
]
)
model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer._output_dense.bias.assign(
weights[
f"encoder/layer_with_weights-{i + 4}/_attention_layer/_output_dense/bias/.ATTRIBUTES/VARIABLE_VALUE"
]
)
model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer_norm.gamma.assign(
weights[
f"encoder/layer_with_weights-{i + 4}/_attention_layer_norm/gamma/.ATTRIBUTES/VARIABLE_VALUE"
]
)
model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer_norm.beta.assign(
weights[
f"encoder/layer_with_weights-{i + 4}/_attention_layer_norm/beta/.ATTRIBUTES/VARIABLE_VALUE"
]
)
model.get_layer(
f"transformer_layer_{i}"
)._feedforward_intermediate_dense.kernel.assign(
weights[
f"encoder/layer_with_weights-{i + 4}/_intermediate_dense/kernel/.ATTRIBUTES/VARIABLE_VALUE"
]
)
model.get_layer(
f"transformer_layer_{i}"
)._feedforward_intermediate_dense.bias.assign(
weights[
f"encoder/layer_with_weights-{i + 4}/_intermediate_dense/bias/.ATTRIBUTES/VARIABLE_VALUE"
]
)
model.get_layer(
f"transformer_layer_{i}"
)._feedforward_output_dense.kernel.assign(
weights[
f"encoder/layer_with_weights-{i + 4}/_output_dense/kernel/.ATTRIBUTES/VARIABLE_VALUE"
]
)
model.get_layer(
f"transformer_layer_{i}"
)._feedforward_output_dense.bias.assign(
weights[
f"encoder/layer_with_weights-{i + 4}/_output_dense/bias/.ATTRIBUTES/VARIABLE_VALUE"
]
)
model.get_layer(
f"transformer_layer_{i}"
)._feedforward_layer_norm.gamma.assign(
weights[
f"encoder/layer_with_weights-{i + 4}/_output_layer_norm/gamma/.ATTRIBUTES/VARIABLE_VALUE"
]
)
model.get_layer(
f"transformer_layer_{i}"
)._feedforward_layer_norm.beta.assign(
weights[
f"encoder/layer_with_weights-{i + 4}/_output_layer_norm/beta/.ATTRIBUTES/VARIABLE_VALUE"
]
)
model.get_layer("pooled_dense").kernel.assign(
weights["encoder/layer_with_weights-16/kernel/.ATTRIBUTES/VARIABLE_VALUE"]
)
model.get_layer("pooled_dense").bias.assign(
weights["encoder/layer_with_weights-16/bias/.ATTRIBUTES/VARIABLE_VALUE"]
)
pass<jupyter_output><empty_output><jupyter_text>Compare Output<jupyter_code>def preprocess(x):
tokenizer = keras_nlp.tokenizers.WordPieceTokenizer(
vocabulary=vocab_path,
)
packer = keras_nlp.layers.MultiSegmentPacker(
sequence_length=model.max_sequence_length,
start_value=tokenizer.token_to_id("[CLS]"),
end_value=tokenizer.token_to_id("[SEP]"),
)
return packer(tokenizer(x))
token_ids, segment_ids = preprocess(["the quick brown fox."])
encoder_config = tfm.nlp.encoders.EncoderConfig(
type="bert",
bert=json.load(tf.io.gfile.GFile(config_path)),
)
mg_model = tfm.nlp.encoders.build_encoder(encoder_config)
checkpoint = tf.train.Checkpoint(encoder=mg_model)
checkpoint.read(checkpoint_path).assert_consumed()
keras_nlp_output = model(
{
"token_ids": token_ids,
"segment_ids": segment_ids,
"padding_mask": token_ids != 0,
}
)["pooled_output"]
mg_output = mg_model(
{
"input_word_ids": token_ids,
"input_type_ids": segment_ids,
"padding_mask": token_ids != 0,
}
)["pooled_output"]
keras_nlp_output[0, 0:10]
mg_output[0, 0:10]
# Very close! Though not 100% exact.
tf.reduce_mean(keras_nlp_output - mg_output)
# Save BertBase checkpoint
model.save_weights(f"""{MODEL_NAME}.h5""")
model2 = keras_nlp.models.BertBase(vocabulary_size=VOCAB_SIZE)
model2.load_weights(f"""{MODEL_NAME}.h5""")
# Same output from loaded checkpoint
keras_nlp_output2 = model2(
{
"token_ids": token_ids,
"segment_ids": segment_ids,
"padding_mask": token_ids != 0,
}
)["pooled_output"]
tf.reduce_mean(keras_nlp_output - keras_nlp_output2)
# Save vocab file as well
vocab_info = tf.io.gfile.GFile(vocab_path).read()
f = open("vocab.txt", "w")
f.write(vocab_info)
# Get MD5 of model
!md5sum """{MODEL_NAME}.h5"""
# Upload model to drive
# from google.colab import drive
# drive.mount('/content/drive')
# Check uploaded model once added to repo
model_cloud = keras_nlp.models.BertBase(weights=MODEL_NAME)
# Same output from cloud model
keras_nlp_output_cloud = model_cloud(
{
"token_ids": token_ids,
"segment_ids": segment_ids,
"padding_mask": token_ids != 0,
}
)["pooled_output"]
tf.reduce_mean(keras_nlp_output - keras_nlp_output_cloud)
keras_nlp_output_cloud[0, 0:10]<jupyter_output><empty_output> | keras-nlp/tools/checkpoint_conversion/bert_base_cased.ipynb/0 | {
"file_path": "keras-nlp/tools/checkpoint_conversion/bert_base_cased.ipynb",
"repo_id": "keras-nlp",
"token_count": 5041
} | 159 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import numpy as np
import requests
import tensorflow as tf
import transformers
from absl import app
from absl import flags
from checkpoint_conversion_utils import get_md5_checksum
# Temporarily directly import gpt2 until we expose it.
from keras_nlp.models.gpt2.gpt2_backbone import GPT2Backbone
from keras_nlp.models.gpt2.gpt2_tokenizer import GPT2Tokenizer
PRESET_MAP = {
"gpt2_base_en": ("124M", "gpt2"),
"gpt2_medium_en": ("355M", "gpt2-medium"),
"gpt2_large_en": ("774M", "gpt2-large"),
"gpt2_extra_large_en": ("1558M", "gpt2-xl"),
}
DOWNLOAD_SCRIPT_URL = (
"https://raw.githubusercontent.com/openai/gpt-2/master/download_model.py"
)
EXTRACT_DIR = "./models/{}"
FLAGS = flags.FLAGS
flags.DEFINE_string(
"preset", None, f'Must be one of {",".join(PRESET_MAP.keys())}'
)
def download_model(num_params):
print("-> Download original weights.")
response = requests.get(DOWNLOAD_SCRIPT_URL)
open("download_model.py", "wb").write(response.content)
os.system(f"python download_model.py {num_params}")
def convert_checkpoints(num_params):
print("\n-> Convert original weights to KerasNLP format.")
# GPT-2 paths.
extract_dir = EXTRACT_DIR.format(num_params)
checkpoint_path = os.path.join(extract_dir, "model.ckpt")
config_path = os.path.join(extract_dir, "hparams.json")
with open(config_path, "r") as f:
cfg = json.load(f)
print("Config:", cfg)
print("Original weights:")
vars = tf.train.list_variables(checkpoint_path)
weights = {}
for name, shape in vars:
print(name, shape)
weight = tf.train.load_variable(checkpoint_path, name)
weights[name] = weight
# Temporary direct import, as we aren't exposing this quite yet.
keras_nlp_model = GPT2Backbone.from_preset(
FLAGS.preset,
load_weights=False,
)
keras_nlp_model.get_layer("token_embedding").embeddings.assign(
weights["model/wte"]
)
keras_nlp_model.get_layer("position_embedding").position_embeddings.assign(
weights["model/wpe"]
)
range_1 = (0, cfg["n_embd"])
range_2 = (cfg["n_embd"], 2 * cfg["n_embd"])
range_3 = (2 * cfg["n_embd"], 3 * cfg["n_embd"])
for i in range(keras_nlp_model.num_layers):
keras_nlp_model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer._query_dense.kernel.assign(
weights[f"model/h{i}/attn/c_attn/w"][
0, :, range_1[0] : range_1[1]
].reshape((cfg["n_embd"], cfg["n_head"], -1))
)
keras_nlp_model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer._query_dense.bias.assign(
weights[f"model/h{i}/attn/c_attn/b"][
range_1[0] : range_1[1]
].reshape((cfg["n_head"], -1))
)
keras_nlp_model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer._key_dense.kernel.assign(
weights[f"model/h{i}/attn/c_attn/w"][
0, :, range_2[0] : range_2[1]
].reshape((cfg["n_embd"], cfg["n_head"], -1))
)
keras_nlp_model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer._key_dense.bias.assign(
weights[f"model/h{i}/attn/c_attn/b"][
range_2[0] : range_2[1]
].reshape((cfg["n_head"], -1))
)
keras_nlp_model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer._value_dense.kernel.assign(
weights[f"model/h{i}/attn/c_attn/w"][
0, :, range_3[0] : range_3[1]
].reshape((cfg["n_embd"], cfg["n_head"], -1))
)
keras_nlp_model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer._value_dense.bias.assign(
weights[f"model/h{i}/attn/c_attn/b"][
range_3[0] : range_3[1]
].reshape((cfg["n_head"], -1))
)
keras_nlp_model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer._output_dense.kernel.assign(
weights[f"model/h{i}/attn/c_proj/w"][0].reshape(
(cfg["n_head"], -1, cfg["n_embd"])
)
)
keras_nlp_model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer._output_dense.bias.assign(
weights[f"model/h{i}/attn/c_proj/b"]
)
keras_nlp_model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer_norm.gamma.assign(weights[f"model/h{i}/ln_1/g"])
keras_nlp_model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer_norm.beta.assign(weights[f"model/h{i}/ln_1/b"])
keras_nlp_model.get_layer(
f"transformer_layer_{i}"
)._feedforward_intermediate_dense.kernel.assign(
weights[f"model/h{i}/mlp/c_fc/w"][0]
)
keras_nlp_model.get_layer(
f"transformer_layer_{i}"
)._feedforward_intermediate_dense.bias.assign(
weights[f"model/h{i}/mlp/c_fc/b"]
)
keras_nlp_model.get_layer(
f"transformer_layer_{i}"
)._feedforward_output_dense.kernel.assign(
weights[f"model/h{i}/mlp/c_proj/w"][0]
)
keras_nlp_model.get_layer(
f"transformer_layer_{i}"
)._feedforward_output_dense.bias.assign(
weights[f"model/h{i}/mlp/c_proj/b"]
)
keras_nlp_model.get_layer(
f"transformer_layer_{i}"
)._feedforward_layer_norm.gamma.assign(weights[f"model/h{i}/ln_2/g"])
keras_nlp_model.get_layer(
f"transformer_layer_{i}"
)._feedforward_layer_norm.beta.assign(weights[f"model/h{i}/ln_2/b"])
keras_nlp_model.get_layer("layer_norm").gamma.assign(
weights["model/ln_f/g"]
)
keras_nlp_model.get_layer("layer_norm").beta.assign(weights["model/ln_f/b"])
# Save the model.
print(f"\n-> Save KerasNLP model weights to `{FLAGS.preset}.h5`.")
keras_nlp_model.save_weights(f"{FLAGS.preset}.h5")
return keras_nlp_model
def define_tokenizer(num_params, hf_model_name):
print("\n-> Define the tokenizers.")
extract_dir = extract_dir = EXTRACT_DIR.format(num_params)
merges_path = os.path.join(extract_dir, "vocab.bpe")
vocab_path = os.path.join(extract_dir, "encoder.json")
keras_nlp_tokenizer = GPT2Tokenizer(
vocabulary=vocab_path,
merges=merges_path,
)
hf_tokenizer = transformers.AutoTokenizer.from_pretrained(hf_model_name)
print("\n-> Print MD5 checksum of the vocab files.")
print(f"`{vocab_path}` md5sum: ", get_md5_checksum(vocab_path))
print(f"`{merges_path}` md5sum: ", get_md5_checksum(merges_path))
return keras_nlp_tokenizer, hf_tokenizer
def check_output(
keras_nlp_model,
keras_nlp_tokenizer,
hf_model,
hf_tokenizer,
):
print("\n-> Check the outputs.")
input_str = ["the quick brown fox ran, galloped and jumped."]
# KerasNLP
token_ids = keras_nlp_tokenizer(input_str)
padding_mask = token_ids != 0
keras_nlp_inputs = {
"token_ids": token_ids.to_tensor(),
"padding_mask": padding_mask.to_tensor(),
}
keras_nlp_output = keras_nlp_model.predict(keras_nlp_inputs)
# HF
hf_inputs = hf_tokenizer(input_str, return_tensors="pt")
hf_output = hf_model(**hf_inputs).last_hidden_state
print("KerasNLP output:", keras_nlp_output[0, 0, :10])
print("HF output:", hf_output[0, 0, :10])
print("Difference:", np.mean(keras_nlp_output - hf_output.detach().numpy()))
# Show the MD5 checksum of the model weights.
print("Model md5sum: ", get_md5_checksum(f"./{FLAGS.preset}.h5"))
return keras_nlp_output
def main(_):
assert (
FLAGS.preset in PRESET_MAP.keys()
), f'Invalid preset {FLAGS.preset}. Must be one of {",".join(PRESET_MAP.keys())}'
num_params = PRESET_MAP[FLAGS.preset][0]
hf_model_name = PRESET_MAP[FLAGS.preset][1]
download_model(num_params)
keras_nlp_model = convert_checkpoints(num_params)
print("\n-> Load HF model.")
hf_model = transformers.AutoModel.from_pretrained(hf_model_name)
hf_model.eval()
keras_nlp_tokenizer, hf_tokenizer = define_tokenizer(
num_params, hf_model_name
)
check_output(
keras_nlp_model,
keras_nlp_tokenizer,
hf_model,
hf_tokenizer,
)
if __name__ == "__main__":
flags.mark_flag_as_required("preset")
app.run(main)
| keras-nlp/tools/checkpoint_conversion/convert_gpt2_checkpoints.py/0 | {
"file_path": "keras-nlp/tools/checkpoint_conversion/convert_gpt2_checkpoints.py",
"repo_id": "keras-nlp",
"token_count": 4426
} | 160 |
# Training WordPiece Vocabularies on Wikipedia
This is unmaintained helper code for training the vocabularies on Wikipedia.
It is advised to run these scripts on GCS.
Note: use either `screen` or `tmux` when running these commands remotely to
avoiding killing long running scripts.
## Instructions
The steps are listed below. You will need to run 1 and 2 for all wikipedia data
dumps that you want to train on to download and extract the data.
After, change the list in the cleaning script so that it matches the downloaded
data folder names
Finally, change the list in the train vocabulary and run the script to train the
vocabularies for each directory in the list.
### 1. Download Wikipedia Dataset from Wikipedia Dumps
Example: `curl -O https://dumps.wikimedia.org/ptwiki/20220801/ptwiki-20220801-pages-articles-multistream.xml.bz2`
### 2. Run Wikipedia Dataset Extractor
Example: `python3 -m wikiextractor.WikiExtractor arwiki-20220802-pages-articles-multistream.xml.bz2`
### 3. Additional Removals
`python3 word_piece_cleaning_script.py`
### 4. Run train vocabulary
`python3 word_piece_training_script.py` | keras-nlp/tools/pretrained_tokenizers/README.md/0 | {
"file_path": "keras-nlp/tools/pretrained_tokenizers/README.md",
"repo_id": "keras-nlp",
"token_count": 319
} | 161 |
include LICENSE
include README.md
include CONTRIBUTING.md
graft tests | keras-preprocessing/MANIFEST.in/0 | {
"file_path": "keras-preprocessing/MANIFEST.in",
"repo_id": "keras-preprocessing",
"token_count": 21
} | 162 |
import numpy as np
import pytest
from keras_preprocessing.image import affine_transformations
def test_random_transforms():
x = np.random.random((2, 28, 28))
assert affine_transformations.random_rotation(x, 45).shape == (2, 28, 28)
assert affine_transformations.random_shift(x, 1, 1).shape == (2, 28, 28)
assert affine_transformations.random_shear(x, 20).shape == (2, 28, 28)
assert affine_transformations.random_channel_shift(x, 20).shape == (2, 28, 28)
def test_deterministic_transform():
x = np.ones((3, 3, 3))
x_rotated = np.array([[[0., 0., 0.],
[1., 1., 1.],
[0., 0., 0.]],
[[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]],
[[0., 0., 0.],
[1., 1., 1.],
[0., 0., 0.]]])
assert np.allclose(
affine_transformations.apply_affine_transform(x,
theta=45,
row_axis=0,
col_axis=1,
channel_axis=2,
fill_mode='constant'),
x_rotated)
def test_matrix_center():
x = np.expand_dims(np.array([
[0, 1],
[0, 0],
]), -1)
x_rotated90 = np.expand_dims(np.array([
[1, 0],
[0, 0],
]), -1)
assert np.allclose(
affine_transformations.apply_affine_transform(x,
theta=90,
row_axis=0,
col_axis=1,
channel_axis=2),
x_rotated90)
def test_translation():
x = np.array([
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
])
x_up = np.array([
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
])
x_dn = np.array([
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 0, 0],
])
x_left = np.array([
[0, 0, 0, 0],
[1, 0, 0, 0],
[0, 0, 0, 0],
])
x_right = np.array([
[0, 0, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 0],
])
# Channels first
x_test = np.expand_dims(x, 0)
# Horizontal translation
assert np.alltrue(x_left == np.squeeze(
affine_transformations.apply_affine_transform(x_test, tx=1)))
assert np.alltrue(x_right == np.squeeze(
affine_transformations.apply_affine_transform(x_test, tx=-1)))
# change axes: x<->y
assert np.alltrue(x_left == np.squeeze(
affine_transformations.apply_affine_transform(
x_test, ty=1, row_axis=2, col_axis=1)))
assert np.alltrue(x_right == np.squeeze(
affine_transformations.apply_affine_transform(
x_test, ty=-1, row_axis=2, col_axis=1)))
# Vertical translation
assert np.alltrue(x_up == np.squeeze(
affine_transformations.apply_affine_transform(x_test, ty=1)))
assert np.alltrue(x_dn == np.squeeze(
affine_transformations.apply_affine_transform(x_test, ty=-1)))
# change axes: x<->y
assert np.alltrue(x_up == np.squeeze(
affine_transformations.apply_affine_transform(
x_test, tx=1, row_axis=2, col_axis=1)))
assert np.alltrue(x_dn == np.squeeze(
affine_transformations.apply_affine_transform(
x_test, tx=-1, row_axis=2, col_axis=1)))
# Channels last
x_test = np.expand_dims(x, -1)
# Horizontal translation
assert np.alltrue(x_left == np.squeeze(
affine_transformations.apply_affine_transform(
x_test, tx=1, row_axis=0, col_axis=1, channel_axis=2)))
assert np.alltrue(x_right == np.squeeze(
affine_transformations.apply_affine_transform(
x_test, tx=-1, row_axis=0, col_axis=1, channel_axis=2)))
# change axes: x<->y
assert np.alltrue(x_left == np.squeeze(
affine_transformations.apply_affine_transform(
x_test, ty=1, row_axis=1, col_axis=0, channel_axis=2)))
assert np.alltrue(x_right == np.squeeze(
affine_transformations.apply_affine_transform(
x_test, ty=-1, row_axis=1, col_axis=0, channel_axis=2)))
# Vertical translation
assert np.alltrue(x_up == np.squeeze(
affine_transformations.apply_affine_transform(
x_test, ty=1, row_axis=0, col_axis=1, channel_axis=2)))
assert np.alltrue(x_dn == np.squeeze(
affine_transformations.apply_affine_transform(
x_test, ty=-1, row_axis=0, col_axis=1, channel_axis=2)))
# change axes: x<->y
assert np.alltrue(x_up == np.squeeze(
affine_transformations.apply_affine_transform(
x_test, tx=1, row_axis=1, col_axis=0, channel_axis=2)))
assert np.alltrue(x_dn == np.squeeze(
affine_transformations.apply_affine_transform(
x_test, tx=-1, row_axis=1, col_axis=0, channel_axis=2)))
def test_random_zoom():
x = np.random.random((2, 28, 28))
assert affine_transformations.random_zoom(x, (5, 5)).shape == (2, 28, 28)
assert np.allclose(x, affine_transformations.random_zoom(x, (1, 1)))
def test_random_zoom_error():
with pytest.raises(ValueError):
affine_transformations.random_zoom(0, zoom_range=[0])
def test_apply_brightness_shift_error(monkeypatch):
monkeypatch.setattr(affine_transformations, 'ImageEnhance', None)
with pytest.raises(ImportError):
affine_transformations.apply_brightness_shift(0, [0])
def test_random_brightness(monkeypatch):
monkeypatch.setattr(affine_transformations,
'apply_brightness_shift', lambda x, y, z: (x, y))
assert (0, 3.) == affine_transformations.random_brightness(0, (3, 3))
def test_random_brightness_error():
with pytest.raises(ValueError):
affine_transformations.random_brightness(0, [0])
def test_random_brightness_scale():
img = np.ones((1, 1, 3)) * 128
zeros = np.zeros((1, 1, 3))
must_be_128 = affine_transformations.random_brightness(img, [1, 1], False)
assert np.array_equal(img, must_be_128)
must_be_0 = affine_transformations.random_brightness(img, [1, 1], True)
assert np.array_equal(zeros, must_be_0)
def test_random_brightness_scale_outside_range_positive():
img = np.ones((1, 1, 3)) * 1024
zeros = np.zeros((1, 1, 3))
must_be_1024 = affine_transformations.random_brightness(img, [1, 1], False)
assert np.array_equal(img, must_be_1024)
must_be_0 = affine_transformations.random_brightness(img, [1, 1], True)
assert np.array_equal(zeros, must_be_0)
def test_random_brightness_scale_outside_range_negative():
img = np.ones((1, 1, 3)) * -1024
zeros = np.zeros((1, 1, 3))
must_be_neg_1024 = affine_transformations.random_brightness(img, [1, 1], False)
assert np.array_equal(img, must_be_neg_1024)
must_be_0 = affine_transformations.random_brightness(img, [1, 1], True)
assert np.array_equal(zeros, must_be_0)
def test_apply_affine_transform_error(monkeypatch):
monkeypatch.setattr(affine_transformations, 'scipy', None)
with pytest.raises(ImportError):
affine_transformations.apply_affine_transform(0)
| keras-preprocessing/tests/image/affine_transformations_test.py/0 | {
"file_path": "keras-preprocessing/tests/image/affine_transformations_test.py",
"repo_id": "keras-preprocessing",
"token_count": 3761
} | 163 |
# Copyright 2019 The KerasTuner Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"HyperParameters logic."
import collections
import contextlib
import copy
import six
from keras_tuner import protos
from keras_tuner.api_export import keras_tuner_export
from keras_tuner.engine import conditions as conditions_mod
from keras_tuner.engine.hyperparameters import hp_types
from keras_tuner.engine.hyperparameters import hyperparameter as hp_module
@keras_tuner_export(
[
"keras_tuner.HyperParameters",
"keras_tuner.engine.hyperparameters.HyperParameters",
]
)
class HyperParameters:
"""Container for both a hyperparameter space, and current values.
A `HyperParameters` instance can be pass to `HyperModel.build(hp)` as an
argument to build a model.
To prevent the users from depending on inactive hyperparameter values, only
active hyperparameters should have values in `HyperParameters.values`.
Attributes:
space: A list of `HyperParameter` objects.
values: A dict mapping hyperparameter names to current values.
"""
def __init__(self):
# Current name scopes.
self._name_scopes = []
# Current `Condition`s, managed by `conditional_scope`.
self._conditions = []
# Dict of list of hyperparameters with same
# name but different conditions, e.g. `{name: [hp1, hp2]}`.
# Hyperparameters are uniquely identified by their name and
# conditions.
self._hps = collections.defaultdict(list)
# List of hyperparameters, maintained in insertion order.
# This guarantees that conditional params are always later in
# the list than their parents.
self._space = []
# Active values for this `Trial`.
self.values = {}
# A list of active `conditional_scope`s in a build,
# each of which is a list of condtions.
# Used by BaseTuner to activate all conditions.
# No need to empty these after builds since when building the model, hp
# is copied from the Oracle's hp, which always have these 2 fields
# empty.
self.active_scopes = []
# Similar for inactive `conditional_scope`s.
self.inactive_scopes = []
@contextlib.contextmanager
def name_scope(self, name):
self._name_scopes.append(name)
try:
yield
finally:
self._name_scopes.pop()
@contextlib.contextmanager
def conditional_scope(self, parent_name, parent_values):
"""Opens a scope to create conditional HyperParameters.
All `HyperParameter`s created under this scope will only be active when
the parent `HyperParameter` specified by `parent_name` is equal to one
of the values passed in `parent_values`.
When the condition is not met, creating a `HyperParameter` under this
scope will register the `HyperParameter`, but will return `None` rather
than a concrete value.
Note that any Python code under this scope will execute regardless of
whether the condition is met.
This feature is for the `Tuner` to collect more information of the
search space and the current trial. It is especially useful for model
selection. If the parent `HyperParameter` is for model selection, the
`HyperParameter`s in a model should only be active when the model
selected, which can be implemented using `conditional_scope`.
Examples:
```python
def MyHyperModel(HyperModel):
def build(self, hp):
model = Sequential()
model.add(Input(shape=(32, 32, 3)))
model_type = hp.Choice("model_type", ["mlp", "cnn"])
with hp.conditional_scope("model_type", ["mlp"]):
if model_type == "mlp":
model.add(Flatten())
model.add(Dense(32, activation='relu'))
with hp.conditional_scope("model_type", ["cnn"]):
if model_type == "cnn":
model.add(Conv2D(64, 3, activation='relu'))
model.add(GlobalAveragePooling2D())
model.add(Dense(10, activation='softmax'))
return model
```
Args:
parent_name: A string, specifying the name of the parent
`HyperParameter` to use as the condition to activate the
current `HyperParameter`.
parent_values: A list of the values of the parent `HyperParameter`
to use as the condition to activate the current
`HyperParameter`.
"""
parent_name = self._get_name(parent_name) # Add name_scopes.
if not self._exists(parent_name):
raise ValueError(
f"`HyperParameter` named: {parent_name} not defined."
)
condition = conditions_mod.Parent(parent_name, parent_values)
self._conditions.append(condition)
if condition.is_active(self.values):
self.active_scopes.append(copy.deepcopy(self._conditions))
else:
self.inactive_scopes.append(copy.deepcopy(self._conditions))
try:
yield
finally:
self._conditions.pop()
def is_active(self, hyperparameter):
"""Checks if a hyperparameter is currently active for a `Trial`.
A hyperparameter is considered active if and only if all its parent
conditions are active, and not affected by whether the hyperparameter
is used while building the model. The function is usually called by the
`Oracle` for populating new hyperparameter values and updating the trial
after receiving the evaluation results.
Args:
hp: A string or `HyperParameter` instance. If string, checks whether
any hyperparameter with that name is active. If `HyperParameter`
instance, checks whether the object is active.
Returns:
A boolean, whether the hyperparameter is active.
"""
hp = hyperparameter
if isinstance(hp, hp_module.HyperParameter):
return self._conditions_are_active(hp.conditions)
hp_name = str(hp)
return any(
self._conditions_are_active(temp_hp.conditions)
for temp_hp in self._hps[hp_name]
)
def _conditions_are_active(self, conditions):
return all(condition.is_active(self.values) for condition in conditions)
def _exists(self, name, conditions=None):
"""Checks for a hyperparameter with the same name and conditions."""
if conditions is None:
conditions = self._conditions
if name in self._hps:
hps = self._hps[name]
for hp in hps:
if hp.conditions == conditions:
return True
return False
def _retrieve(self, hp):
"""Gets or creates a hyperparameter.
Args:
hp: A `HyperParameter` instance.
Returns:
The value of the hyperparameter, or None if the hyperparameter is
not active.
"""
if self._exists(hp.name, hp.conditions):
if self.is_active(hp):
return self.values[hp.name]
return None # Ensures inactive values are not relied on by user.
return self._register(hp)
def _register(self, hyperparameter, overwrite=False):
"""Registers a hyperparameter in this container.
Args:
hp: A `HyperParameter` instance.
overwrite: Boolean, whether to overwrite the existing value with
the default hyperparameter value.
Returns:
The value of the hyperparameter, or None if the hyperparameter is
not active.
"""
hp = hyperparameter
self._validate_name(hp.name)
# Copy to ensure this param can be serialized.
hp = hp.__class__.from_config(hp.get_config())
self._hps[hp.name].append(hp)
self._space.append(hp)
# Only add active values to `self.values`.
if self.is_active(hp):
# Use the default value only if not populated.
if overwrite or hp.name not in self.values:
self.values[hp.name] = hp.default
return self.values[hp.name]
return None # Ensures inactive values are not relied on by user.
def get(self, name):
"""Return the current value of this hyperparameter set."""
name = self._get_name(name) # Add name_scopes.
if name in self.values:
return self.values[name] # Only active values are added here.
elif name in self._hps:
raise ValueError(f"{name} is currently inactive.")
else:
raise KeyError(f"{name} does not exist.")
def __getitem__(self, name):
return self.get(name)
def __contains__(self, name):
try:
self.get(name)
return True
except (KeyError, ValueError):
return False
def Choice(
self,
name,
values,
ordered=None,
default=None,
parent_name=None,
parent_values=None,
):
"""Choice of one value among a predefined set of possible values.
Args:
name: A string. the name of parameter. Must be unique for each
`HyperParameter` instance in the search space.
values: A list of possible values. Values must be int, float,
str, or bool. All values must be of the same type.
ordered: Optional boolean, whether the values passed should be
considered to have an ordering. Defaults to `True` for float/int
values. Must be `False` for any other values.
default: Optional default value to return for the parameter.
If unspecified, the default value will be:
- None if None is one of the choices in `values`
- The first entry in `values` otherwise.
parent_name: Optional string, specifying the name of the parent
`HyperParameter` to use as the condition to activate the
current `HyperParameter`.
parent_values: Optional list of the values of the parent
`HyperParameter` to use as the condition to activate the
current `HyperParameter`.
Returns:
The value of the hyperparameter, or None if the hyperparameter is
not active.
"""
with self._maybe_conditional_scope(parent_name, parent_values):
hp = hp_types.Choice(
name=self._get_name(name), # Add name_scopes.
values=values,
ordered=ordered,
default=default,
conditions=self._conditions,
)
return self._retrieve(hp)
def Int(
self,
name,
min_value,
max_value,
step=None,
sampling="linear",
default=None,
parent_name=None,
parent_values=None,
):
"""Integer hyperparameter.
Note that unlike Python's `range` function, `max_value` is *included* in
the possible values this parameter can take on.
Example #1:
```py
hp.Int(
"n_layers",
min_value=6,
max_value=12)
```
The possible values are [6, 7, 8, 9, 10, 11, 12].
Example #2:
```py
hp.Int(
"n_layers",
min_value=6,
max_value=13,
step=3)
```
`step` is the minimum distance between samples.
The possible values are [6, 9, 12].
Example #3:
```py
hp.Int(
"batch_size",
min_value=2,
max_value=32,
step=2,
sampling="log")
```
When `sampling="log"` the `step` is multiplied between samples.
The possible values are [2, 4, 8, 16, 32].
Args:
name: A string. the name of parameter. Must be unique for each
`HyperParameter` instance in the search space.
min_value: Integer, the lower limit of range, inclusive.
max_value: Integer, the upper limit of range, inclusive.
step: Optional integer, the distance between two consecutive samples
in the range. If left unspecified, it is possible to sample any
integers in the interval. If `sampling="linear"`, it will be the
minimum additve between two samples. If `sampling="log"`, it
will be the minimum multiplier between two samples.
sampling: String. One of "linear", "log", "reverse_log". Defaults to
"linear". When sampling value, it always start from a value in
range [0.0, 1.0). The `sampling` argument decides how the value
is projected into the range of [min_value, max_value].
"linear": min_value + value * (max_value - min_value)
"log": min_value * (max_value / min_value) ^ value
"reverse_log":
(max_value -
min_value * ((max_value / min_value) ^ (1 - value) - 1))
default: Integer, default value to return for the parameter. If
unspecified, the default value will be `min_value`.
parent_name: Optional string, specifying the name of the parent
`HyperParameter` to use as the condition to activate the
current `HyperParameter`.
parent_values: Optional list of the values of the parent
`HyperParameter` to use as the condition to activate the
current `HyperParameter`.
Returns:
The value of the hyperparameter, or None if the hyperparameter is
not active.
"""
with self._maybe_conditional_scope(parent_name, parent_values):
hp = hp_types.Int(
name=self._get_name(name), # Add name_scopes.
min_value=min_value,
max_value=max_value,
step=step,
sampling=sampling,
default=default,
conditions=self._conditions,
)
return self._retrieve(hp)
def Float(
self,
name,
min_value,
max_value,
step=None,
sampling="linear",
default=None,
parent_name=None,
parent_values=None,
):
"""Floating point value hyperparameter.
Example #1:
```py
hp.Float(
"image_rotation_factor",
min_value=0,
max_value=1)
```
All values in interval [0, 1] have equal probability of being sampled.
Example #2:
```py
hp.Float(
"image_rotation_factor",
min_value=0,
max_value=1,
step=0.2)
```
`step` is the minimum distance between samples.
The possible values are [0, 0.2, 0.4, 0.6, 0.8, 1.0].
Example #3:
```py
hp.Float(
"learning_rate",
min_value=0.001,
max_value=10,
step=10,
sampling="log")
```
When `sampling="log"`, the `step` is multiplied between samples.
The possible values are [0.001, 0.01, 0.1, 1, 10].
Args:
name: A string. the name of parameter. Must be unique for each
`HyperParameter` instance in the search space.
min_value: Float, the lower bound of the range.
max_value: Float, the upper bound of the range.
step: Optional float, the distance between two consecutive samples
in the range. If left unspecified, it is possible to sample any
value in the interval. If `sampling="linear"`, it will be the
minimum additve between two samples. If `sampling="log"`, it
will be the minimum multiplier between two samples.
sampling: String. One of "linear", "log", "reverse_log". Defaults to
"linear". When sampling value, it always start from a value in
range [0.0, 1.0). The `sampling` argument decides how the value
is projected into the range of [min_value, max_value].
"linear": min_value + value * (max_value - min_value)
"log": min_value * (max_value / min_value) ^ value
"reverse_log":
(max_value -
min_value * ((max_value / min_value) ^ (1 - value) - 1))
default: Float, the default value to return for the parameter. If
unspecified, the default value will be `min_value`.
parent_name: Optional string, specifying the name of the parent
`HyperParameter` to use as the condition to activate the
current `HyperParameter`.
parent_values: Optional list of the values of the parent
`HyperParameter` to use as the condition to activate the
current `HyperParameter`.
Returns:
The value of the hyperparameter, or None if the hyperparameter is
not active.
"""
with self._maybe_conditional_scope(parent_name, parent_values):
hp = hp_types.Float(
name=self._get_name(name), # Add name_scopes.
min_value=min_value,
max_value=max_value,
step=step,
sampling=sampling,
default=default,
conditions=self._conditions,
)
return self._retrieve(hp)
def Boolean(
self, name, default=False, parent_name=None, parent_values=None
):
"""Choice between True and False.
Args:
name: A string. the name of parameter. Must be unique for each
`HyperParameter` instance in the search space.
default: Boolean, the default value to return for the parameter.
If unspecified, the default value will be False.
parent_name: Optional string, specifying the name of the parent
`HyperParameter` to use as the condition to activate the
current `HyperParameter`.
parent_values: Optional list of the values of the parent
`HyperParameter` to use as the condition to activate the
current `HyperParameter`.
Returns:
The value of the hyperparameter, or None if the hyperparameter is
not active.
"""
with self._maybe_conditional_scope(parent_name, parent_values):
hp = hp_types.Boolean(
name=self._get_name(name), # Add name_scopes.
default=default,
conditions=self._conditions,
)
return self._retrieve(hp)
def Fixed(self, name, value, parent_name=None, parent_values=None):
"""Fixed, untunable value.
Args:
name: A string. the name of parameter. Must be unique for each
`HyperParameter` instance in the search space.
value: The value to use (can be any JSON-serializable Python type).
parent_name: Optional string, specifying the name of the parent
`HyperParameter` to use as the condition to activate the
current `HyperParameter`.
parent_values: Optional list of the values of the parent
`HyperParameter` to use as the condition to activate the
current `HyperParameter`.
Returns:
The value of the hyperparameter, or None if the hyperparameter is
not active.
"""
with self._maybe_conditional_scope(parent_name, parent_values):
hp = hp_types.Fixed(
name=self._get_name(name), # Add name_scopes.
value=value,
conditions=self._conditions,
)
return self._retrieve(hp)
@property
def space(self):
return self._space
def get_config(self):
return {
"space": [
{"class_name": p.__class__.__name__, "config": p.get_config()}
for p in self.space
],
"values": dict(self.values.items()),
}
@classmethod
def from_config(cls, config):
hps = cls()
for p in config["space"]:
p = hp_types.deserialize(p)
hps._hps[p.name].append(p)
hps._space.append(p)
hps.values = dict(config["values"].items())
return hps
def copy(self):
return HyperParameters.from_config(self.get_config())
def merge(self, hps, overwrite=True):
"""Merges hyperparameters into this object.
Args:
hps: A `HyperParameters` object or list of `HyperParameter`
objects.
overwrite: Boolean, whether existing `HyperParameter`s should be
overridden by those in `hps` with the same name and conditions.
"""
if isinstance(hps, HyperParameters):
hps = hps.space
if not overwrite:
hps = [hp for hp in hps if not self._exists(hp.name, hp.conditions)]
for hp in hps:
self._register(hp, overwrite)
def ensure_active_values(self):
"""Add and remove values if necessary.
KerasTuner requires only active values to be populated. This function
removes the inactive values and add the missing active values.
Args:
hps: HyperParameters, whose values to be ensured.
"""
for hp in self.space:
if self.is_active(hp):
if hp.name not in self.values:
self.values[hp.name] = hp.random_sample()
else:
self.values.pop(hp.name, None)
@classmethod
def from_proto(cls, proto):
hps = cls()
space = []
if isinstance(proto, protos.get_proto().HyperParameters.Values):
# Allows passing in only values, space becomes `Fixed`.
space.extend(
hp_types.Fixed(name, getattr(value, value.WhichOneof("kind")))
for name, value in proto.values.items()
)
else:
space.extend(
hp_types.Fixed.from_proto(fixed_proto)
for fixed_proto in proto.space.fixed_space
)
space.extend(
hp_types.Float.from_proto(float_proto)
for float_proto in proto.space.float_space
)
space.extend(
hp_types.Int.from_proto(int_proto)
for int_proto in proto.space.int_space
)
space.extend(
hp_types.Choice.from_proto(choice_proto)
for choice_proto in proto.space.choice_space
)
space.extend(
hp_types.Boolean.from_proto(boolean_proto)
for boolean_proto in proto.space.boolean_space
)
hps.merge(space)
if isinstance(proto, protos.get_proto().HyperParameters.Values):
values = proto.values
else:
values = proto.values.values
for name, val in values.items():
hps.values[name] = getattr(val, val.WhichOneof("kind"))
return hps
def to_proto(self):
fixed_space = []
float_space = []
int_space = []
choice_space = []
boolean_space = []
for hp in self.space:
if isinstance(hp, hp_types.Fixed):
fixed_space.append(hp.to_proto())
elif isinstance(hp, hp_types.Float):
float_space.append(hp.to_proto())
elif isinstance(hp, hp_types.Int):
int_space.append(hp.to_proto())
elif isinstance(hp, hp_types.Choice):
choice_space.append(hp.to_proto())
elif isinstance(hp, hp_types.Boolean):
boolean_space.append(hp.to_proto())
else:
raise ValueError(f"Unrecognized HP type: {hp}")
values = {}
for name, value in self.values.items():
if isinstance(value, bool):
val = protos.get_proto().Value(boolean_value=value)
elif isinstance(value, float):
val = protos.get_proto().Value(float_value=value)
elif isinstance(value, six.integer_types):
val = protos.get_proto().Value(int_value=value)
elif isinstance(value, six.string_types):
val = protos.get_proto().Value(string_value=value)
else:
raise ValueError(f"Unrecognized value type: {value}")
values[name] = val
return protos.get_proto().HyperParameters(
space=protos.get_proto().HyperParameters.Space(
fixed_space=fixed_space,
float_space=float_space,
int_space=int_space,
choice_space=choice_space,
boolean_space=boolean_space,
),
values=protos.get_proto().HyperParameters.Values(values=values),
)
@contextlib.contextmanager
def _maybe_conditional_scope(self, parent_name, parent_values):
if parent_name:
with self.conditional_scope(parent_name, parent_values):
yield
else:
yield
def _get_name(self, name, name_scopes=None):
"""Returns a name qualified by `name_scopes`."""
if name_scopes is None:
name_scopes = self._name_scopes
return (
"/".join(name_scopes) + "/" + str(name)
if name_scopes
else str(name)
)
def _validate_name(self, name):
for condition in self._conditions:
if condition.name == name:
raise ValueError(
"A conditional `HyperParameter` cannot have the same "
"name as its parent. Found: " + str(name) + " and "
"parent_name: " + str(condition.name)
)
| keras-tuner/keras_tuner/engine/hyperparameters/hyperparameters.py/0 | {
"file_path": "keras-tuner/keras_tuner/engine/hyperparameters/hyperparameters.py",
"repo_id": "keras-tuner",
"token_count": 12241
} | 164 |
# Copyright 2019 The KerasTuner Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""KerasTuner utilities."""
import json
from keras_tuner import backend
from keras_tuner.backend import keras
# Check if we are in a ipython/colab environement
try:
import IPython
class_name = IPython.get_ipython().__class__.__name__
IS_NOTEBOOK = "Terminal" not in class_name
except (NameError, ImportError): # pragma: no cover
IS_NOTEBOOK = False # pragma: no cover
if IS_NOTEBOOK:
from IPython import display
def try_clear():
if IS_NOTEBOOK:
display.clear_output()
else:
print()
def create_directory(path, remove_existing=False):
# Create the directory if it doesn't exist.
if not backend.io.exists(path):
backend.io.makedirs(path)
# If it does exist, and remove_existing is specified,
# the directory will be removed and recreated.
elif remove_existing:
backend.io.rmtree(path)
backend.io.makedirs(path)
def serialize_keras_object(obj):
if hasattr(keras.utils, "legacy"):
return keras.utils.legacy.serialize_keras_object( # pragma: no cover
obj
)
else:
return keras.utils.serialize_keras_object(obj) # pragma: no cover
def deserialize_keras_object(config, module_objects=None, custom_objects=None):
if hasattr(keras.utils, "legacy"):
return keras.utils.legacy.deserialize_keras_object( # pragma: no cover
config, custom_objects, module_objects
)
else:
return keras.utils.deserialize_keras_object( # pragma: no cover
config, custom_objects, module_objects
)
def to_list(values):
if isinstance(values, list):
return values
if isinstance(values, tuple):
return list(values)
return [values]
def save_json(path, obj):
"""Save Python object to a json file.
Args:
path: String. The path to the json file.
obj: Object. The Python object to be saved.
Returns:
The json string format of the object.
"""
obj_str = json.dumps(obj)
with backend.io.File(path, "w") as f:
f.write(obj_str)
return obj_str
def load_json(path):
"""Load json from file.
Args:
path: String. The path to the json file.
Returns:
A Python object.
"""
with backend.io.File(path, "r") as f:
obj_str = f.read()
return json.loads(obj_str)
| keras-tuner/keras_tuner/utils.py/0 | {
"file_path": "keras-tuner/keras_tuner/utils.py",
"repo_id": "keras-tuner",
"token_count": 1121
} | 165 |
{
"image": "mcr.microsoft.com/vscode/devcontainers/python:3.10",
"postCreateCommand": "sh ./.devcontainer/setup.sh",
"customizations": {
"vscode": {
"settings": {
"python.testing.pytestEnabled": true,
"editor.formatOnSave": true,
"editor.codeActionsOnSave": {
"source.organizeImports": true
},
"[python]": {
"editor.defaultFormatter": "ms-python.black-formatter"
},
"editor.rulers": [
80
]
},
"extensions": [
"ms-python.python",
"ms-python.isort",
"ms-python.flake8",
"ms-python.black-formatter"
]
}
},
"features": {
"ghcr.io/devcontainers/features/github-cli:1": {}
}
}
| keras/.devcontainer/devcontainer.json/0 | {
"file_path": "keras/.devcontainer/devcontainer.json",
"repo_id": "keras",
"token_count": 547
} | 166 |
Keras 3 is a high-velocity open-source project. We welcome contributions!
Contributions can be made in a variety of ways, including coding, enriching documentation, refining docstrings, and providing code examples.
## Current items open for contributions
At [this link](https://github.com/keras-team/keras/issues/18442), you'll find a list of items where you help is needed!
## How to contribute code
Follow these steps to submit your code contribution.
### Step 1. Open an issue
Before making any changes, we recommend opening an issue (if one doesn't already
exist) and discussing your proposed changes. This way, we can give you feedback
and validate the proposed changes.
If the changes are minor (simple bug fix or documentation fix), then feel free
to open a Pull Request (PR) without discussion.
### Step 2. Make code changes
To make code changes, you need to fork the repository. You will need to setup a
development environment and run the unit tests. This is covered in the section
"Setup environment".
### Step 3. Create a pull request
Once the change is ready, open a pull request from your branch in your fork to
the master branch in [keras-team/keras](https://github.com/keras-team/keras).
### Step 4. Sign the Contributor License Agreement
After creating the pull request, the `cla/google` check will be performed and,
if you haven't signed the Contributor License Agreement (CLA), it will fail with
instructions on how to do so. Please follow the instructions to sign the CLA and
the check will pass.
![CLA signed](https://github.com/keras-team/keras/assets/1091026/71c26353-e3b5-4135-8bae-64693c717775)
### Step 5. Code review
If the tests fail, look into the error messages and try to fix them.
![CI tests](https://github.com/keras-team/keras/assets/1091026/6f6c17ef-6bd7-4e95-9fbc-1906cde37380)
A reviewer will review the pull request and provide comments. There may be
several rounds of comments and code changes before the pull request gets
approved by the reviewer.
![Approval from reviewer](https://github.com/keras-team/keras/assets/1091026/8d28f74c-21e9-4146-b0ff-62d649a552a8)
### Step 6. Merging
Once the pull request is approved, a `ready to pull` tag will be added to the
pull request. A team member will take care of the merging.
![Ready to pull and merged](https://github.com/keras-team/keras/assets/1091026/c3908345-d7ae-44ee-a428-01f3b448b46b)
Here is an [example pull request](https://github.com/keras-team/keras/pull/18848)
for your reference.
## Setup environment
We provide two ways of setting up a development environment. One is to use a
dev container, and the other one is to set up a local environment by installing
the dev tools needed.
### Option 1: GitHub Codespace or dev container
We support GitHub Codespaces, Visual Studio Code dev containers and JetBrain dev
containers. Please see the
[Dev container documentation](https://github.com/keras-team/keras/tree/master/.devcontainer).
### Option 2: Set up a local environment
To set up your local dev environment, you will need the following tools.
1. [git](https://github.com/) for code repository management.
2. [python](https://www.python.org/) to build and code in Keras.
The following commands check the tools above are successfully installed. Note
that Keras requires at least Python 3.9 to run.
```shell
git --version
python --version
```
Clone your forked repo to your local machine. Go to the cloned directory to
install the dependencies.
```shell
git clone https://github.com/YOUR_GITHUB_USERNAME/keras.git
cd keras
pip install -r requirements.txt
```
You then need to configure the backend to use, see the
[Configuring your backend](https://github.com/keras-team/keras/blob/master/README.md#configuring-your-backend)
section of the README.
You can also add GPU support to your environment, see the
[Adding GPU support](https://github.com/keras-team/keras/blob/master/README.md#adding-gpu-support)
section of the README.
## Code style
Keras uses [Black](https://black.readthedocs.io/en/stable/) and
[isort](https://pycqa.github.io/isort/) to format the code. Please refer to
[requirements-common.txt](https://github.com/keras-team/keras/blob/master/requirements-common.txt)
for the required versions. Run the following command **at the root directory of
the repo** to format your code.
```
sh shell/format.sh
```
It will also display the errors that cannot be resolved by autoformatting. You
need to follow the output of the command to resolve them manually.
If you do not want to auto format the code but only show the lint errors, you
can run `sh shell/lint.sh` **at the root directory of the repo**.
### Docstrings
We do not have an automated way to check docstring style, so if you write
or edit any docstring, please make sure to check them manually.
Keras docstrings follow the conventions below:
A **class docstring** may contain the following items:
* A one-line description of the class.
* Paragraph(s) of more detailed information.
* Optional `Examples` section.
* `Args` section for arguments in `__init__()`.
* If it's a layer:
* `Call arguments` section for arguments in `Layer.call()`.
* `Returns` section for the return values of `Layer.call()`.
* Optional `Raises` section for possible errors.
You can check out `MultiHeadAttention` as an example
[(link)](https://github.com/keras-team/keras/blob/v3.0.0/keras/layers/attention/multi_head_attention.py#L20).
A **function docstring** may contain the following items:
* One-line description of the function.
* Paragraph(s) of more detailed information.
* Optional `Examples` section.
* `Args` section for the function arguments.
* `Returns` section for the return values.
* Optional `Raises` section for possible errors.
You can check out `text_dataset_from_directory` as an example
[(link)](https://github.com/keras-team/keras/blob/v3.0.0/keras/utils/text_dataset_utils.py#L27).
## Run tests
We use [pytest](https://pytest.org/) to run the tests.
### Run a test file
To run the tests in `keras/losses/losses_test.py`, use the following command
at the root directory of the repo.
```shell
pytest keras/losses/losses_test.py
```
### Run a single test case
You can specify a single test class to run within a file.
```shell
pytest keras/losses/losses_test.py::MeanSquaredErrorTest
```
You can also specify a single test method to run within a class.
```shell
pytest keras/losses/losses_test.py::MeanSquaredErrorTest::test_sample_weighted
```
### Run all tests
You can run all the tests locally by running the following command in the repo
root directory.
```shell
pytest keras
```
Note that you can skip the Keras applications tests using the
`SKIP_APPLICATIONS_TESTS` environment variable. This will cut down the testing
time significantly.
```shell
SKIP_APPLICATIONS_TESTS=True pytest keras
```
To run all tests using a different backend, you can simply specify it on the
command line.
```shell
KERAS_BACKEND=jax SKIP_APPLICATIONS_TESTS=True pytest keras
```
| keras/CONTRIBUTING.md/0 | {
"file_path": "keras/CONTRIBUTING.md",
"repo_id": "keras",
"token_count": 2085
} | 167 |
"""Benchmark reshaping layers.
To run benchmarks, see the following command for an example, please change the
flag to your custom value:
```
python3 -m benchmarks.layer_benchmark.reshaping_benchmark \
--benchmark_name=benchmark_cropping2d \
--num_samples=2048 \
--batch_size=256 \
--jit_compile=True
```
"""
from absl import app
from absl import flags
from benchmarks.layer_benchmark.base_benchmark import LayerBenchmark
FLAGS = flags.FLAGS
def benchmark_cropping1d(
num_samples,
batch_size,
jit_compile=True,
):
layer_name = "Cropping1D"
init_args = {}
benchmark = LayerBenchmark(
layer_name,
init_args,
input_shape=[1024, 256],
jit_compile=jit_compile,
)
benchmark.benchmark_predict(
num_samples=num_samples,
batch_size=batch_size,
)
benchmark.benchmark_train(
num_samples=num_samples,
batch_size=batch_size,
)
def benchmark_cropping2d(
num_samples,
batch_size,
jit_compile=True,
):
layer_name = "Cropping2D"
init_args = {}
benchmark = LayerBenchmark(
layer_name,
init_args,
input_shape=[256, 256, 3],
jit_compile=jit_compile,
)
benchmark.benchmark_predict(
num_samples=num_samples,
batch_size=batch_size,
)
benchmark.benchmark_train(
num_samples=num_samples,
batch_size=batch_size,
)
def benchmark_cropping3d(
num_samples,
batch_size,
jit_compile=True,
):
layer_name = "Cropping3D"
init_args = {}
benchmark = LayerBenchmark(
layer_name,
init_args,
input_shape=[32, 32, 32, 3],
jit_compile=jit_compile,
)
benchmark.benchmark_predict(
num_samples=num_samples,
batch_size=batch_size,
)
benchmark.benchmark_train(
num_samples=num_samples,
batch_size=batch_size,
)
def benchmark_flatten(
num_samples,
batch_size,
jit_compile=True,
):
layer_name = "Flatten"
init_args = {}
benchmark = LayerBenchmark(
layer_name,
init_args,
input_shape=[256, 256, 3],
jit_compile=jit_compile,
)
benchmark.benchmark_predict(
num_samples=num_samples,
batch_size=batch_size,
)
benchmark.benchmark_train(
num_samples=num_samples,
batch_size=batch_size,
)
def benchmark_permute(
num_samples,
batch_size,
jit_compile=True,
):
layer_name = "Permute"
init_args = {
"dims": (2, 1),
}
benchmark = LayerBenchmark(
layer_name,
init_args,
input_shape=[256, 256],
jit_compile=jit_compile,
)
benchmark.benchmark_predict(
num_samples=num_samples,
batch_size=batch_size,
)
benchmark.benchmark_train(
num_samples=num_samples,
batch_size=batch_size,
)
def benchmark_up_sampling1d(
num_samples,
batch_size,
jit_compile=True,
):
layer_name = "UpSampling1D"
init_args = {}
benchmark = LayerBenchmark(
layer_name,
init_args,
input_shape=[256, 3],
jit_compile=jit_compile,
)
benchmark.benchmark_predict(
num_samples=num_samples,
batch_size=batch_size,
)
benchmark.benchmark_train(
num_samples=num_samples,
batch_size=batch_size,
)
def benchmark_up_sampling2d(
num_samples,
batch_size,
jit_compile=True,
):
layer_name = "UpSampling2D"
init_args = {}
benchmark = LayerBenchmark(
layer_name,
init_args,
input_shape=[128, 128, 3],
jit_compile=jit_compile,
)
benchmark.benchmark_predict(
num_samples=num_samples,
batch_size=batch_size,
)
benchmark.benchmark_train(
num_samples=num_samples,
batch_size=batch_size,
)
def benchmark_up_sampling3d(
num_samples,
batch_size,
jit_compile=True,
):
layer_name = "UpSampling3D"
init_args = {}
benchmark = LayerBenchmark(
layer_name,
init_args,
input_shape=[32, 16, 16, 3],
jit_compile=jit_compile,
)
benchmark.benchmark_predict(
num_samples=num_samples,
batch_size=batch_size,
)
benchmark.benchmark_train(
num_samples=num_samples,
batch_size=batch_size,
)
def benchmark_zero_padding1d(
num_samples,
batch_size,
jit_compile=True,
):
layer_name = "ZeroPadding1D"
init_args = {}
benchmark = LayerBenchmark(
layer_name,
init_args,
input_shape=[256, 3],
jit_compile=jit_compile,
)
benchmark.benchmark_predict(
num_samples=num_samples,
batch_size=batch_size,
)
benchmark.benchmark_train(
num_samples=num_samples,
batch_size=batch_size,
)
def benchmark_zero_padding2d(
num_samples,
batch_size,
jit_compile=True,
):
layer_name = "ZeroPadding2D"
init_args = {}
benchmark = LayerBenchmark(
layer_name,
init_args,
input_shape=[256, 256, 3],
jit_compile=jit_compile,
)
benchmark.benchmark_predict(
num_samples=num_samples,
batch_size=batch_size,
)
benchmark.benchmark_train(
num_samples=num_samples,
batch_size=batch_size,
)
def benchmark_zero_padding3d(
num_samples,
batch_size,
jit_compile=True,
):
layer_name = "ZeroPadding3D"
init_args = {}
benchmark = LayerBenchmark(
layer_name,
init_args,
input_shape=[32, 32, 32, 3],
jit_compile=jit_compile,
)
benchmark.benchmark_predict(
num_samples=num_samples,
batch_size=batch_size,
)
benchmark.benchmark_train(
num_samples=num_samples,
batch_size=batch_size,
)
BENCHMARK_NAMES = {
"benchmark_cropping1d": benchmark_cropping1d,
"benchmark_cropping2d": benchmark_cropping2d,
"benchmark_cropping3d": benchmark_cropping3d,
"benchmark_flatten": benchmark_flatten,
"benchmark_permute": benchmark_permute,
"benchmark_up_sampling1d": benchmark_up_sampling1d,
"benchmark_up_sampling2d": benchmark_up_sampling2d,
"benchmark_up_sampling3d": benchmark_up_sampling3d,
"benchmark_zero_padding1d": benchmark_zero_padding1d,
"benchmark_zero_padding2d": benchmark_zero_padding2d,
"benchmark_zero_padding3d": benchmark_zero_padding3d,
}
def main(_):
benchmark_name = FLAGS.benchmark_name
num_samples = FLAGS.num_samples
batch_size = FLAGS.batch_size
jit_compile = FLAGS.jit_compile
if benchmark_name is None:
for name, benchmark_fn in BENCHMARK_NAMES.items():
benchmark_fn(num_samples, batch_size, jit_compile)
return
if benchmark_name not in BENCHMARK_NAMES:
raise ValueError(
f"Invalid benchmark name: {benchmark_name}, `benchmark_name` must "
f"be one of {BENCHMARK_NAMES.keys()}"
)
benchmark_fn = BENCHMARK_NAMES[benchmark_name]
benchmark_fn(num_samples, batch_size, jit_compile)
if __name__ == "__main__":
app.run(main)
| keras/benchmarks/layer_benchmark/reshaping_benchmark.py/0 | {
"file_path": "keras/benchmarks/layer_benchmark/reshaping_benchmark.py",
"repo_id": "keras",
"token_count": 3390
} | 168 |
# flake8: noqa
import os
# Set backend env to torch
os.environ["KERAS_BACKEND"] = "torch"
import torch
import torch.nn as nn
import torch.optim as optim
from keras import layers
import keras
import numpy as np
# Model / data parameters
num_classes = 10
input_shape = (28, 28, 1)
learning_rate = 0.01
batch_size = 64
num_epochs = 1
# Load the data and split it between train and test sets
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
# Scale images to the [0, 1] range
x_train = x_train.astype("float32") / 255
x_test = x_test.astype("float32") / 255
# Make sure images have shape (28, 28, 1)
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
print("x_train shape:", x_train.shape)
print(x_train.shape[0], "train samples")
print(x_test.shape[0], "test samples")
# Create the Keras model
model = keras.Sequential(
[
layers.Input(shape=(28, 28, 1)),
layers.Conv2D(32, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Flatten(),
layers.Dropout(0.5),
layers.Dense(num_classes),
]
)
#################################################################
######## Writing a torch training loop for a Keras model ########
#################################################################
# Instantiate the torch optimizer
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
# Instantiate the torch loss function
loss_fn = nn.CrossEntropyLoss()
def train(model, train_loader, num_epochs, optimizer, loss_fn):
for epoch in range(num_epochs):
running_loss = 0.0
for batch_idx, (inputs, targets) in enumerate(train_loader):
# Forward pass
outputs = model(inputs)
loss = loss_fn(outputs, targets)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss += loss.item()
# Print loss statistics
if (batch_idx + 1) % 10 == 0:
print(
f"Epoch [{epoch+1}/{num_epochs}], "
f"Batch [{batch_idx+1}/{len(train_loader)}], "
f"Loss: {running_loss / 10}"
)
running_loss = 0.0
# Create a TensorDataset
dataset = torch.utils.data.TensorDataset(
torch.from_numpy(x_train), torch.from_numpy(y_train)
)
# Create a DataLoader
train_loader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, shuffle=False
)
train(model, train_loader, num_epochs, optimizer, loss_fn)
################################################################
######## Using a Keras model or layer in a torch Module ########
################################################################
class MyModel(nn.Module):
def __init__(self):
super().__init__()
self.model = keras.Sequential(
[
layers.Input(shape=(28, 28, 1)),
layers.Conv2D(32, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Flatten(),
layers.Dropout(0.5),
layers.Dense(num_classes),
]
)
def forward(self, x):
return self.model(x)
torch_module = MyModel()
# Instantiate the torch optimizer
optimizer = optim.Adam(torch_module.parameters(), lr=learning_rate)
# Instantiate the torch loss function
loss_fn = nn.CrossEntropyLoss()
train(torch_module, train_loader, num_epochs, optimizer, loss_fn)
| keras/examples/demo_custom_torch_workflow.py/0 | {
"file_path": "keras/examples/demo_custom_torch_workflow.py",
"repo_id": "keras",
"token_count": 1620
} | 169 |
try:
import namex
except ImportError:
namex = None
# These dicts reference "canonical names" only
# (i.e. the first name an object was registered with).
REGISTERED_NAMES_TO_OBJS = {}
REGISTERED_OBJS_TO_NAMES = {}
def register_internal_serializable(path, symbol):
global REGISTERED_NAMES_TO_OBJS
if isinstance(path, (list, tuple)):
name = path[0]
else:
name = path
REGISTERED_NAMES_TO_OBJS[name] = symbol
REGISTERED_OBJS_TO_NAMES[symbol] = name
def get_symbol_from_name(name):
return REGISTERED_NAMES_TO_OBJS.get(name, None)
def get_name_from_symbol(symbol):
return REGISTERED_OBJS_TO_NAMES.get(symbol, None)
if namex:
class keras_export(namex.export):
def __init__(self, path):
super().__init__(package="keras", path=path)
def __call__(self, symbol):
register_internal_serializable(self.path, symbol)
return super().__call__(symbol)
else:
class keras_export:
def __init__(self, path):
self.path = path
def __call__(self, symbol):
register_internal_serializable(self.path, symbol)
return symbol
| keras/keras/api_export.py/0 | {
"file_path": "keras/keras/api_export.py",
"repo_id": "keras",
"token_count": 507
} | 170 |
import builtins
import math
import jax.experimental.sparse as jax_sparse
import jax.numpy as jnp
from keras.backend import config
from keras.backend.common import dtypes
from keras.backend.common.variables import standardize_dtype
from keras.backend.jax import sparse
from keras.backend.jax.core import cast
from keras.backend.jax.core import convert_to_tensor
@sparse.elementwise_binary_union(linear=True, use_sparsify=True)
def add(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.add(x1, x2)
def bincount(x, weights=None, minlength=0):
if len(x.shape) == 2:
if weights is None:
def bincount_fn(arr):
return jnp.bincount(arr, minlength=minlength)
bincounts = list(map(bincount_fn, x))
else:
def bincount_fn(arr_w):
return jnp.bincount(
arr_w[0], weights=arr_w[1], minlength=minlength
)
bincounts = list(map(bincount_fn, zip(x, weights)))
return jnp.stack(bincounts)
return jnp.bincount(x, weights=weights, minlength=minlength)
def einsum(subscripts, *operands, **kwargs):
operands = [convert_to_tensor(x) for x in operands]
return jnp.einsum(subscripts, *operands, **kwargs)
@sparse.elementwise_binary_union(linear=True, use_sparsify=True)
def subtract(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.subtract(x1, x2)
def matmul(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
# When both x1 and x2 are of int8, specifying `preferred_element_type` as
# int32 to enable hardware-accelerated matmul
x1_dtype = standardize_dtype(x1.dtype)
x2_dtype = standardize_dtype(x2.dtype)
if x1_dtype == "int8" and x2_dtype == "int8":
preferred_element_type = "int32"
else:
preferred_element_type = None
if isinstance(x1, jax_sparse.JAXSparse) or isinstance(
x2, jax_sparse.JAXSparse
):
if not hasattr(matmul, "sparse_matmul"):
matmul.sparse_matmul = jax_sparse.sparsify(jnp.matmul)
if isinstance(x1, jax_sparse.BCOO):
x1 = jax_sparse.bcoo_update_layout(
x1, n_batch=len(x1.shape) - 2, on_inefficient="warn"
)
if isinstance(x2, jax_sparse.BCOO):
x2 = jax_sparse.bcoo_update_layout(
x2, n_batch=len(x2.shape) - 2, on_inefficient="warn"
)
return matmul.sparse_matmul(
x1, x2, preferred_element_type=preferred_element_type
)
return jnp.matmul(x1, x2, preferred_element_type=preferred_element_type)
def multiply(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
if isinstance(x1, jax_sparse.BCOO):
if isinstance(x2, jax_sparse.BCOO):
# x1 is sparse, x2 is sparse.
if x1.indices is x2.indices:
# `bcoo_multiply_sparse` will not detect that the indices are
# the same, optimize this case here.
if not x1.unique_indices:
x1 = jax_sparse.bcoo_sum_duplicates(x1)
x2 = jax_sparse.bcoo_sum_duplicates(x2)
return jax_sparse.BCOO(
(jnp.multiply(x1.data, x2.data), x1.indices),
shape=x1.shape,
indices_sorted=True,
unique_indices=True,
)
else:
return jax_sparse.bcoo_multiply_sparse(x1, x2)
else:
# x1 is sparse, x2 is dense.
out_data = jax_sparse.bcoo_multiply_dense(x1, x2)
return jax_sparse.BCOO(
(out_data, x1.indices),
shape=x1.shape,
indices_sorted=x1.indices_sorted,
unique_indices=x1.unique_indices,
)
elif isinstance(x2, jax_sparse.BCOO):
# x1 is dense, x2 is sparse.
out_data = jax_sparse.bcoo_multiply_dense(x2, x1)
return jax_sparse.BCOO(
(out_data, x2.indices),
shape=x2.shape,
indices_sorted=x2.indices_sorted,
unique_indices=x2.unique_indices,
)
return jnp.multiply(x1, x2)
def mean(x, axis=None, keepdims=False):
x = convert_to_tensor(x)
ori_dtype = standardize_dtype(x.dtype)
# `jnp.mean` does not handle low precision (e.g., float16) overflow
# correctly, so we compute with float32 and cast back to the original type.
compute_dtype = dtypes.result_type(x.dtype, "float32")
if "int" in ori_dtype or ori_dtype == "bool":
result_dtype = compute_dtype
else:
result_dtype = ori_dtype
if isinstance(x, jax_sparse.BCOO):
if axis is None:
axis = tuple(range(len(x.shape)))
(
canonical_axis,
keep_dims_shape,
broadcast_dimensions,
) = sparse.axis_shape_dims_for_broadcast_in_dim(
axis, x.shape, insert_dims=False
)
divisor = math.prod(x.shape[i] for i in canonical_axis)
output = jax_sparse.bcoo_reduce_sum(x, axes=canonical_axis)
output = jax_sparse.BCOO(
(output.data.astype(result_dtype) / divisor, output.indices),
shape=output.shape,
)
if keepdims:
# `bcoo_reduce_sum` does not support keepdims, neither does
# sparsify(jnp.sum), so we recreate the empty dimensions.
output = jax_sparse.bcoo_broadcast_in_dim(
output,
shape=keep_dims_shape,
broadcast_dimensions=broadcast_dimensions,
)
return output
else:
output = jnp.mean(x, axis=axis, keepdims=keepdims, dtype=compute_dtype)
return cast(output, result_dtype)
def max(x, axis=None, keepdims=False, initial=None):
x = convert_to_tensor(x)
return jnp.max(x, axis=axis, keepdims=keepdims, initial=initial)
def ones(shape, dtype=None):
dtype = dtype or config.floatx()
return jnp.ones(shape, dtype=dtype)
def zeros(shape, dtype=None):
dtype = dtype or config.floatx()
return jnp.zeros(shape, dtype=dtype)
@sparse.elementwise_unary(linear=False)
def absolute(x):
x = convert_to_tensor(x)
return jnp.absolute(x)
@sparse.elementwise_unary(linear=False)
def abs(x):
x = convert_to_tensor(x)
return jnp.absolute(x)
def all(x, axis=None, keepdims=False):
return jnp.all(x, axis=axis, keepdims=keepdims)
def any(x, axis=None, keepdims=False):
return jnp.any(x, axis=axis, keepdims=keepdims)
def amax(x, axis=None, keepdims=False):
return jnp.amax(x, axis=axis, keepdims=keepdims)
def amin(x, axis=None, keepdims=False):
return jnp.amin(x, axis=axis, keepdims=keepdims)
def append(x1, x2, axis=None):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.append(x1, x2, axis=axis)
def arange(start, stop=None, step=1, dtype=None):
if dtype is None:
dtypes_to_resolve = [
getattr(start, "dtype", type(start)),
getattr(step, "dtype", type(step)),
]
if stop is not None:
dtypes_to_resolve.append(getattr(stop, "dtype", type(stop)))
dtype = dtypes.result_type(*dtypes_to_resolve)
dtype = standardize_dtype(dtype)
return jnp.arange(start, stop, step=step, dtype=dtype)
@sparse.densifying_unary
def arccos(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = cast(x, dtype)
return jnp.arccos(x)
@sparse.densifying_unary
def arccosh(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = cast(x, dtype)
return jnp.arccosh(x)
@sparse.elementwise_unary(linear=False)
def arcsin(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = cast(x, dtype)
return jnp.arcsin(x)
@sparse.elementwise_unary(linear=False)
def arcsinh(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = cast(x, dtype)
return jnp.arcsinh(x)
@sparse.elementwise_unary(linear=False)
def arctan(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = cast(x, dtype)
return jnp.arctan(x)
def arctan2(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(x1.dtype, x2.dtype, float)
x1 = cast(x1, dtype)
x2 = cast(x2, dtype)
return jnp.arctan2(x1, x2)
@sparse.elementwise_unary(linear=False)
def arctanh(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = cast(x, dtype)
return jnp.arctanh(x)
def argmax(x, axis=None):
return jnp.argmax(x, axis=axis)
def argmin(x, axis=None):
return jnp.argmin(x, axis=axis)
def argsort(x, axis=-1):
x = convert_to_tensor(x)
if x.ndim == 0:
return jnp.argsort(x, axis=None)
return jnp.argsort(x, axis=axis)
def array(x, dtype=None):
return jnp.array(x, dtype=dtype)
def average(x, axis=None, weights=None):
x = convert_to_tensor(x)
dtypes_to_resolve = [x.dtype, float]
if weights is not None:
weights = convert_to_tensor(weights)
dtypes_to_resolve.append(weights.dtype)
dtype = dtypes.result_type(*dtypes_to_resolve)
x = cast(x, dtype)
if weights is not None:
weights = cast(weights, dtype)
return jnp.average(x, weights=weights, axis=axis)
def broadcast_to(x, shape):
x = convert_to_tensor(x)
return jnp.broadcast_to(x, shape)
@sparse.elementwise_unary(linear=False)
def ceil(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
return cast(jnp.ceil(x), dtype)
def clip(x, x_min, x_max):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "bool":
x = cast(x, "int32")
return jnp.clip(x, x_min, x_max)
def concatenate(xs, axis=0):
bcoo_count = builtins.sum(isinstance(x, jax_sparse.BCOO) for x in xs)
if bcoo_count:
if bcoo_count == len(xs):
ndim = len(xs[0].shape)
if not -ndim <= axis < ndim:
raise ValueError(
f"In `axis`, axis {axis} is out of bounds for array "
f"of dimension {ndim}"
)
if axis < 0:
axis = axis + ndim
return jax_sparse.bcoo_concatenate(xs, dimension=axis)
else:
xs = [
x.todense() if isinstance(x, jax_sparse.JAXSparse) else x
for x in xs
]
return jnp.concatenate(xs, axis=axis)
@sparse.elementwise_unary(linear=True)
def conjugate(x):
x = convert_to_tensor(x)
return jnp.conjugate(x)
@sparse.elementwise_unary(linear=True)
def conj(x):
x = convert_to_tensor(x)
return jnp.conjugate(x)
@sparse.elementwise_unary(linear=True)
def copy(x):
x = convert_to_tensor(x)
return jnp.copy(x)
@sparse.densifying_unary
def cos(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = cast(x, dtype)
return jnp.cos(x)
@sparse.densifying_unary
def cosh(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = cast(x, dtype)
return jnp.cosh(x)
def count_nonzero(x, axis=None):
return cast(jnp.count_nonzero(x, axis=axis), "int32")
def cross(x1, x2, axisa=-1, axisb=-1, axisc=-1, axis=None):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.cross(
x1,
x2,
axisa=axisa,
axisb=axisb,
axisc=axisc,
axis=axis,
)
def cumprod(x, axis=None, dtype=None):
x = convert_to_tensor(x)
return jnp.cumprod(x, axis=axis, dtype=dtype)
def cumsum(x, axis=None, dtype=None):
x = convert_to_tensor(x)
return jnp.cumsum(x, axis=axis, dtype=dtype)
def diag(x, k=0):
x = convert_to_tensor(x)
return jnp.diag(x, k=k)
def diagonal(x, offset=0, axis1=0, axis2=1):
x = convert_to_tensor(x)
return jnp.diagonal(
x,
offset=offset,
axis1=axis1,
axis2=axis2,
)
def diff(a, n=1, axis=-1):
a = convert_to_tensor(a)
return jnp.diff(a, n=n, axis=axis)
def digitize(x, bins):
x = convert_to_tensor(x)
bins = convert_to_tensor(bins)
return jnp.digitize(x, bins)
def dot(x, y):
x = convert_to_tensor(x)
y = convert_to_tensor(y)
return jnp.dot(x, y)
def empty(shape, dtype=None):
dtype = dtype or config.floatx()
return jnp.empty(shape, dtype=dtype)
def equal(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.equal(x1, x2)
@sparse.densifying_unary
def exp(x):
x = convert_to_tensor(x)
ori_dtype = standardize_dtype(x.dtype)
if "int" in ori_dtype or ori_dtype == "bool":
x = cast(x, config.floatx())
return jnp.exp(x)
def expand_dims(x, axis):
x = convert_to_tensor(x)
if isinstance(x, jax_sparse.BCOO):
(
_,
result_shape,
broadcast_dimensions,
) = sparse.axis_shape_dims_for_broadcast_in_dim(
axis, x.shape, insert_dims=True
)
return jax_sparse.bcoo_broadcast_in_dim(
x, shape=result_shape, broadcast_dimensions=broadcast_dimensions
)
return jnp.expand_dims(x, axis)
@sparse.elementwise_unary(linear=False)
def expm1(x):
x = convert_to_tensor(x)
ori_dtype = standardize_dtype(x.dtype)
if "int" in ori_dtype or ori_dtype == "bool":
x = cast(x, config.floatx())
return jnp.expm1(x)
def flip(x, axis=None):
return jnp.flip(x, axis=axis)
@sparse.elementwise_unary(linear=False)
def floor(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
x = cast(x, config.floatx())
return jnp.floor(x)
def full(shape, fill_value, dtype=None):
dtype = dtype or config.floatx()
return jnp.full(shape, fill_value, dtype=dtype)
def full_like(x, fill_value, dtype=None):
return jnp.full_like(x, fill_value, dtype=dtype)
def greater(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.greater(x1, x2)
def greater_equal(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.greater_equal(x1, x2)
def hstack(xs):
return jnp.hstack(xs)
def identity(n, dtype=None):
dtype = dtype or config.floatx()
return jnp.identity(n, dtype=dtype)
@sparse.elementwise_unary(linear=True)
def imag(x):
x = convert_to_tensor(x)
return jnp.imag(x)
def isclose(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.isclose(x1, x2)
@sparse.densifying_unary
def isfinite(x):
x = convert_to_tensor(x)
return jnp.isfinite(x)
@sparse.elementwise_unary(linear=False)
def isinf(x):
x = convert_to_tensor(x)
return jnp.isinf(x)
@sparse.elementwise_unary(linear=False)
def isnan(x):
x = convert_to_tensor(x)
return jnp.isnan(x)
def less(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.less(x1, x2)
def less_equal(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.less_equal(x1, x2)
def linspace(
start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0
):
return jnp.linspace(
start,
stop,
num=num,
endpoint=endpoint,
retstep=retstep,
dtype=dtype,
axis=axis,
)
@sparse.densifying_unary
def log(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
x = cast(x, config.floatx())
return jnp.log(x)
@sparse.densifying_unary
def log10(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
x = cast(x, config.floatx())
return jnp.log10(x)
@sparse.elementwise_unary(linear=False)
def log1p(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
x = cast(x, config.floatx())
return jnp.log1p(x)
@sparse.densifying_unary
def log2(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
x = cast(x, config.floatx())
return jnp.log2(x)
def logaddexp(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(x1.dtype, x2.dtype, float)
x1 = cast(x1, dtype)
x2 = cast(x2, dtype)
return jnp.logaddexp(x1, x2)
def logical_and(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.logical_and(x1, x2)
def logical_not(x):
x = convert_to_tensor(x)
return jnp.logical_not(x)
def logical_or(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.logical_or(x1, x2)
def logspace(start, stop, num=50, endpoint=True, base=10, dtype=None, axis=0):
return jnp.logspace(
start,
stop,
num=num,
endpoint=endpoint,
base=base,
dtype=dtype,
axis=axis,
)
@sparse.elementwise_binary_union(linear=False, use_sparsify=False)
def maximum(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.maximum(x1, x2)
def median(x, axis=None, keepdims=False):
# axis of jnp.median must be hashable
if isinstance(axis, list):
axis = tuple(axis)
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
x = cast(x, config.floatx())
result = jnp.median(x, axis=axis, keepdims=keepdims)
# TODO: jnp.median failed to keepdims when axis is None
if keepdims is True and axis is None:
for _ in range(x.ndim - 1):
result = jnp.expand_dims(result, axis=-1)
return result
def meshgrid(*x, indexing="xy"):
return jnp.meshgrid(*x, indexing=indexing)
def min(x, axis=None, keepdims=False, initial=None):
x = convert_to_tensor(x)
return jnp.min(x, axis=axis, keepdims=keepdims, initial=initial)
@sparse.elementwise_binary_union(linear=False, use_sparsify=False)
def minimum(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.minimum(x1, x2)
def mod(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.mod(x1, x2)
def moveaxis(x, source, destination):
return jnp.moveaxis(x, source=source, destination=destination)
def nan_to_num(x):
x = convert_to_tensor(x)
return jnp.nan_to_num(x)
def ndim(x):
return jnp.ndim(x)
def nonzero(x):
return jnp.nonzero(x)
def not_equal(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.not_equal(x1, x2)
def ones_like(x, dtype=None):
return jnp.ones_like(x, dtype=dtype)
def zeros_like(x, dtype=None):
return jnp.zeros_like(x, dtype=dtype)
def outer(x1, x2):
return jnp.outer(x1, x2)
def pad(x, pad_width, mode="constant", constant_values=None):
x = convert_to_tensor(x)
kwargs = {}
if constant_values is not None:
if mode != "constant":
raise ValueError(
"Argument `constant_values` can only be "
"provided when `mode == 'constant'`. "
f"Received: mode={mode}"
)
kwargs["constant_values"] = constant_values
return jnp.pad(x, pad_width, mode=mode, **kwargs)
def prod(x, axis=None, keepdims=False, dtype=None):
x = convert_to_tensor(x)
return jnp.prod(x, axis=axis, keepdims=keepdims, dtype=dtype)
def quantile(x, q, axis=None, method="linear", keepdims=False):
x = convert_to_tensor(x)
q = convert_to_tensor(q)
if standardize_dtype(x.dtype) == "int64":
x = cast(x, config.floatx())
result = jnp.quantile(x, q, axis=axis, method=method, keepdims=keepdims)
# TODO: jnp.quantile failed to keepdims when axis is None
if keepdims is True and axis is None:
for _ in range(x.ndim - 1):
result = jnp.expand_dims(result, axis=-1)
return result
def ravel(x):
x = convert_to_tensor(x)
return jnp.ravel(x)
@sparse.elementwise_unary(linear=True)
def real(x):
x = convert_to_tensor(x)
return jnp.real(x)
@sparse.densifying_unary
def reciprocal(x):
x = convert_to_tensor(x)
return jnp.reciprocal(x)
def repeat(x, repeats, axis=None):
x = convert_to_tensor(x)
return jnp.repeat(x, repeats, axis=axis)
def reshape(x, newshape):
if isinstance(x, jax_sparse.BCOO):
from keras.ops import operation_utils
# Resolve the -1 in `new_shape` if applicable and possible
output_shape = operation_utils.compute_reshape_output_shape(
x.shape, newshape, "new_shape"
)
if None not in output_shape:
newshape = output_shape
return jax_sparse.bcoo_reshape(x, new_sizes=newshape)
return jnp.reshape(x, newshape)
def roll(x, shift, axis=None):
return jnp.roll(x, shift, axis=axis)
@sparse.elementwise_unary(linear=False)
def sign(x):
x = convert_to_tensor(x)
return jnp.sign(x)
@sparse.elementwise_unary(linear=False)
def sin(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = cast(x, dtype)
return jnp.sin(x)
@sparse.elementwise_unary(linear=False)
def sinh(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = cast(x, dtype)
return jnp.sinh(x)
def size(x):
return jnp.size(x)
def sort(x, axis=-1):
x = convert_to_tensor(x)
return jnp.sort(x, axis=axis)
def split(x, indices_or_sections, axis=0):
return jnp.split(x, indices_or_sections, axis=axis)
def stack(x, axis=0):
return jnp.stack(x, axis=axis)
def std(x, axis=None, keepdims=False):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
x = cast(x, config.floatx())
return jnp.std(x, axis=axis, keepdims=keepdims)
def swapaxes(x, axis1, axis2):
x = convert_to_tensor(x)
return jnp.swapaxes(x, axis1=axis1, axis2=axis2)
def take(x, indices, axis=None):
x = convert_to_tensor(x)
indices = convert_to_tensor(indices, sparse=False)
return jnp.take(x, indices, axis=axis)
def take_along_axis(x, indices, axis=None):
return jnp.take_along_axis(x, indices, axis=axis)
@sparse.elementwise_unary(linear=False)
def tan(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = cast(x, dtype)
return jnp.tan(x)
@sparse.elementwise_unary(linear=False)
def tanh(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = cast(x, dtype)
return jnp.tanh(x)
def tensordot(x1, x2, axes=2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.tensordot(x1, x2, axes=axes)
@sparse.elementwise_unary(linear=False)
def round(x, decimals=0):
x = convert_to_tensor(x)
return jnp.round(x, decimals=decimals)
def tile(x, repeats):
return jnp.tile(x, repeats)
def trace(x, offset=0, axis1=0, axis2=1):
x = convert_to_tensor(x)
dtype = None
if standardize_dtype(x.dtype) == "bool":
dtype = "int32"
return jnp.trace(x, offset=offset, axis1=axis1, axis2=axis2, dtype=dtype)
def tri(N, M=None, k=0, dtype=None):
dtype = dtype or config.floatx()
return jnp.tri(N, M=M, k=k, dtype=dtype)
def tril(x, k=0):
x = convert_to_tensor(x)
return jnp.tril(x, k=k)
def triu(x, k=0):
x = convert_to_tensor(x)
return jnp.triu(x, k=k)
def vdot(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.vdot(x1, x2)
def vstack(xs):
return jnp.vstack(xs)
def where(condition, x1, x2):
return jnp.where(condition, x1, x2)
@sparse.elementwise_division
def divide(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.divide(x1, x2)
def divide_no_nan(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.where(x2 == 0, 0, jnp.divide(x1, x2))
def true_divide(x1, x2):
return divide(x1, x2)
def power(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.power(x1, x2)
@sparse.elementwise_unary(linear=True)
def negative(x):
x = convert_to_tensor(x)
return jnp.negative(x)
@sparse.elementwise_unary(linear=False)
def square(x):
x = convert_to_tensor(x)
return jnp.square(x)
@sparse.elementwise_unary(linear=False)
def sqrt(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
x = cast(x, config.floatx())
return jnp.sqrt(x)
def squeeze(x, axis=None):
if isinstance(x, jax_sparse.BCOO):
if axis is None:
axis = tuple(i for i, d in enumerate(x.shape) if d == 1)
elif isinstance(axis, int):
axis = (axis,)
return jax_sparse.bcoo_squeeze(x, dimensions=axis)
return jnp.squeeze(x, axis=axis)
def transpose(x, axes=None):
x = convert_to_tensor(x)
if isinstance(x, jax_sparse.BCOO):
num_dims = len(x.shape)
if axes is None:
permutation = tuple(range(num_dims)[::-1])
else:
permutation = []
for a in axes:
if not -num_dims <= a < num_dims:
raise ValueError(
f"axis {a} out of bounds for tensor of rank {num_dims}"
)
permutation.append(a if a >= 0 else a + num_dims)
return jax_sparse.bcoo_transpose(x, permutation=permutation)
return jnp.transpose(x, axes=axes)
def var(x, axis=None, keepdims=False):
x = convert_to_tensor(x)
# `jnp.var` does not handle low precision (e.g., float16) overflow
# correctly, so we compute with float32 and cast back to the original type.
compute_dtype = dtypes.result_type(x.dtype, "float32")
result_dtype = dtypes.result_type(x.dtype, float)
return cast(
jnp.var(x, axis=axis, keepdims=keepdims, dtype=compute_dtype),
result_dtype,
)
def sum(x, axis=None, keepdims=False):
x = convert_to_tensor(x)
return jnp.sum(x, axis=axis, keepdims=keepdims)
def eye(N, M=None, k=0, dtype=None):
dtype = dtype or config.floatx()
return jnp.eye(N, M=M, k=k, dtype=dtype)
def floor_divide(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.floor_divide(x1, x2)
def logical_xor(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.logical_xor(x1, x2)
| keras/keras/backend/jax/numpy.py/0 | {
"file_path": "keras/keras/backend/jax/numpy.py",
"repo_id": "keras",
"token_count": 13624
} | 171 |
import numpy as np
import tree
from keras import backend
from keras import callbacks as callbacks_module
from keras.backend.common import standardize_dtype
from keras.backend.common.keras_tensor import KerasTensor
from keras.backend.numpy.core import is_tensor
from keras.trainers import trainer as base_trainer
from keras.trainers.data_adapters import data_adapter_utils
from keras.trainers.epoch_iterator import EpochIterator
from keras.utils import traceback_utils
class NumpyTrainer(base_trainer.Trainer):
def __init__(self):
super().__init__()
self.test_function = None
self.predict_function = None
def test_step(self, data):
(
x,
y,
sample_weight,
) = data_adapter_utils.unpack_x_y_sample_weight(data)
if self._call_has_training_arg:
y_pred = self(x, training=False)
else:
y_pred = self(x)
loss = self.compute_loss(
x=x, y=y, y_pred=y_pred, sample_weight=sample_weight
)
self._loss_tracker.update_state(loss)
return self.compute_metrics(x, y, y_pred, sample_weight=sample_weight)
def predict_step(self, data):
x, _, _ = data_adapter_utils.unpack_x_y_sample_weight(data)
if self._call_has_training_arg:
y_pred = self(x, training=False)
else:
y_pred = self(x)
return y_pred
def make_test_function(self, force=False):
if self.test_function is not None and not force:
return self.test_function
def one_test_step(data):
data = data[0]
return self.test_step(data)
def multi_test_steps(data):
for single_step_data in data:
logs = one_test_step([single_step_data])
return logs
if self.steps_per_execution > 1:
test_step = multi_test_steps
else:
test_step = one_test_step
self.test_function = test_step
def make_predict_function(self, force=False):
if self.predict_function is not None and not force:
return self.predict_function
def one_predict_step(data):
data = data[0]
return self.predict_step(data)
def multi_predict_steps(data):
outputs = one_predict_step(data[:1])
for single_step_data in data[1:]:
step_outputs = one_predict_step([single_step_data])
outputs = tree.map_structure(
lambda t1, t2: np.concatenate([t1, t2]),
outputs,
step_outputs,
)
return outputs
if self.steps_per_execution > 1:
predict_step = multi_predict_steps
else:
predict_step = one_predict_step
self.predict_function = predict_step
def _symbolic_build(self, data_batch):
model_unbuilt = not all(layer.built for layer in self._flatten_layers())
compile_metrics_unbuilt = (
self._compile_metrics is not None
and not self._compile_metrics.built
)
if model_unbuilt or compile_metrics_unbuilt:
# Create symbolic tensors matching an input batch.
def to_symbolic_input(v):
if is_tensor(v):
return KerasTensor(v.shape, standardize_dtype(v.dtype))
return v
data_batch = tree.map_structure(to_symbolic_input, data_batch)
(
x,
y,
sample_weight,
) = data_adapter_utils.unpack_x_y_sample_weight(data_batch)
# Build all model state with `backend.compute_output_spec`.
try:
y_pred = backend.compute_output_spec(self, x)
except:
raise RuntimeError(
"Unable to automatically build the model. "
"Please build it yourself before calling "
"fit/evaluate/predict. "
"A model is 'built' when its variables have "
"been created and its `self.built` attribute "
"is True. Usually, calling the model on a batch "
"of data is the right way to build it."
)
if compile_metrics_unbuilt:
# Build all metric state with `backend.compute_output_spec`.
backend.compute_output_spec(
self.compute_metrics,
x,
y,
y_pred,
sample_weight=sample_weight,
)
self._post_build()
def fit(
self,
x=None,
y=None,
batch_size=None,
epochs=1,
verbose="auto",
callbacks=None,
validation_split=0.0,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None,
validation_batch_size=None,
validation_freq=1,
):
raise NotImplementedError("fit not implemented for NumPy backend.")
@traceback_utils.filter_traceback
def predict(
self, x, batch_size=None, verbose="auto", steps=None, callbacks=None
):
# Create an iterator that yields batches of input data.
epoch_iterator = EpochIterator(
x=x,
batch_size=batch_size,
steps_per_epoch=steps,
shuffle=False,
steps_per_execution=self.steps_per_execution,
)
# Container that configures and calls callbacks.
if not isinstance(callbacks, callbacks_module.CallbackList):
callbacks = callbacks_module.CallbackList(
callbacks,
add_history=True,
add_progbar=verbose != 0,
verbose=verbose,
epochs=1,
steps=epoch_iterator.num_batches,
model=self,
)
def append_to_outputs(batch_outputs, outputs):
if outputs is None:
outputs = tree.map_structure(
lambda batch_output: [batch_output],
batch_outputs,
)
else:
tree.map_structure_up_to(
batch_outputs,
lambda output, batch_output: output.append(batch_output),
outputs,
batch_outputs,
)
return outputs
self.make_predict_function()
self.stop_predicting = False
callbacks.on_predict_begin()
outputs = None
for step, data in epoch_iterator.enumerate_epoch():
callbacks.on_predict_batch_begin(step)
batch_outputs = self.predict_function(data)
outputs = append_to_outputs(batch_outputs, outputs)
callbacks.on_predict_batch_end(step, {"outputs": batch_outputs})
if self.stop_predicting:
break
callbacks.on_predict_end()
return tree.map_structure_up_to(batch_outputs, np.concatenate, outputs)
@traceback_utils.filter_traceback
def evaluate(
self,
x=None,
y=None,
batch_size=None,
verbose="auto",
sample_weight=None,
steps=None,
callbacks=None,
return_dict=False,
**kwargs,
):
# TODO: respect compiled trainable state
use_cached_eval_dataset = kwargs.pop("_use_cached_eval_dataset", False)
if kwargs:
raise ValueError(f"Arguments not recognized: {kwargs}")
if use_cached_eval_dataset:
epoch_iterator = self._eval_epoch_iterator
else:
# Create an iterator that yields batches of input/target data.
epoch_iterator = EpochIterator(
x=x,
y=y,
sample_weight=sample_weight,
batch_size=batch_size,
steps_per_epoch=steps,
shuffle=False,
steps_per_execution=self.steps_per_execution,
)
if not all(layer.built for layer in self._flatten_layers()):
# Build the model on one batch of data.
for _, data in epoch_iterator.enumerate_epoch():
data_batch = data[0]
self._symbolic_build(data_batch)
break
# Container that configures and calls callbacks.
if not isinstance(callbacks, callbacks_module.CallbackList):
callbacks = callbacks_module.CallbackList(
callbacks,
add_history=True,
add_progbar=verbose != 0,
verbose=verbose,
epochs=1,
steps=epoch_iterator.num_batches,
model=self,
)
self.make_test_function()
self.stop_evaluating = False
callbacks.on_test_begin()
logs = None
self.reset_metrics()
for step, data in epoch_iterator.enumerate_epoch():
callbacks.on_test_batch_begin(step)
logs = self.test_function(data)
callbacks.on_test_batch_end(step, self._pythonify_logs(logs))
if self.stop_evaluating:
break
logs = self.get_metrics_result()
callbacks.on_test_end(logs)
if return_dict:
return logs
return self._flatten_metrics_in_order(logs)
def train_on_batch(
self,
x,
y=None,
sample_weight=None,
class_weight=None,
return_dict=False,
):
raise NotImplementedError(
"train_on_batch not implemented for NumPy backend."
)
def test_on_batch(
self,
x,
y=None,
sample_weight=None,
return_dict=False,
):
self._assert_compile_called("test_on_batch")
data = (x, y, sample_weight)
# Maybe build model
self._symbolic_build(data)
self.make_test_function()
logs = self.test_function([data])
logs = tree.map_structure(lambda x: np.array(x), logs)
if return_dict:
return logs
return self._flatten_metrics_in_order(logs)
def predict_on_batch(self, x):
self.make_predict_function()
batch_outputs = self.predict_function([(x,)])
batch_outputs = tree.map_structure(
backend.convert_to_numpy, batch_outputs
)
return batch_outputs
| keras/keras/backend/numpy/trainer.py/0 | {
"file_path": "keras/keras/backend/numpy/trainer.py",
"repo_id": "keras",
"token_count": 5408
} | 172 |
"""Tests for SavedModel functionality under tf implementation."""
import os
import numpy as np
import pytest
import tensorflow as tf
from keras import backend
from keras import layers
from keras import metrics
from keras import models
from keras import optimizers
from keras import testing
from keras.saving import object_registration
@object_registration.register_keras_serializable(package="my_package")
class CustomModelX(models.Model):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dense1 = layers.Dense(1)
self.dense2 = layers.Dense(1)
def call(self, inputs):
out = self.dense1(inputs)
return self.dense2(out)
def one(self):
return 1
@object_registration.register_keras_serializable(package="my_package")
class CustomSignatureModel(models.Model):
def __init__(self):
super(CustomSignatureModel, self).__init__()
self.v = tf.Variable(1.0)
@tf.function
def __call__(self, x):
return x * self.v
@tf.function(input_signature=[tf.TensorSpec([], tf.float32)])
def mutate(self, new_v):
self.v.assign(new_v)
@pytest.mark.skipif(
backend.backend() != "tensorflow",
reason="The SavedModel test can only run with TF backend.",
)
class SavedModelTest(testing.TestCase):
def test_sequential(self):
model = models.Sequential([layers.Dense(1)])
model.compile(loss="mse", optimizer="adam")
X_train = np.random.rand(100, 3)
y_train = np.random.rand(100, 1)
model.fit(X_train, y_train)
path = os.path.join(self.get_temp_dir(), "my_keras_model")
tf.saved_model.save(model, path)
restored_model = tf.saved_model.load(path)
self.assertAllClose(
model(X_train),
restored_model.signatures["serving_default"](
tf.convert_to_tensor(X_train, dtype=tf.float32)
)["output_0"],
rtol=1e-4,
atol=1e-4,
)
def test_functional(self):
inputs = layers.Input(shape=(3,))
x = layers.Dense(1, name="first_dense")(inputs)
outputs = layers.Dense(1, name="second_dense")(x)
model = models.Model(inputs, outputs)
model.compile(
optimizer="adam",
loss="mse",
)
X_train = np.random.rand(100, 3)
y_train = np.random.rand(100, 1)
model.fit(X_train, y_train)
path = os.path.join(self.get_temp_dir(), "my_keras_model")
tf.saved_model.save(model, path)
restored_model = tf.saved_model.load(path)
self.assertAllClose(
model(X_train),
restored_model.signatures["serving_default"](
tf.convert_to_tensor(X_train, dtype=tf.float32)
)["output_0"],
rtol=1e-4,
atol=1e-4,
)
def test_subclassed(self):
model = CustomModelX()
model.compile(
optimizer="adam",
loss="mse",
metrics=[metrics.Hinge(), "mse"],
)
X_train = np.random.rand(100, 3)
y_train = np.random.rand(100, 1)
model.fit(X_train, y_train)
path = os.path.join(self.get_temp_dir(), "my_keras_model")
tf.saved_model.save(model, path)
restored_model = tf.saved_model.load(path)
self.assertAllClose(
model(X_train),
restored_model.signatures["serving_default"](
tf.convert_to_tensor(X_train, dtype=tf.float32)
)["output_0"],
rtol=1e-4,
atol=1e-4,
)
def test_custom_model_and_layer(self):
@object_registration.register_keras_serializable(package="my_package")
class CustomLayer(layers.Layer):
def __call__(self, inputs):
return inputs
@object_registration.register_keras_serializable(package="my_package")
class Model(models.Model):
def __init__(self):
super().__init__()
self.layer = CustomLayer()
@tf.function(input_signature=[tf.TensorSpec([None, 1])])
def call(self, inputs):
return self.layer(inputs)
model = Model()
inp = np.array([[1.0]])
result = model(inp)
path = os.path.join(self.get_temp_dir(), "my_keras_model")
tf.saved_model.save(model, path)
restored_model = tf.saved_model.load(path)
self.assertAllClose(
result,
restored_model.call(inp),
rtol=1e-4,
atol=1e-4,
)
def test_multi_input_model(self):
input_1 = layers.Input(shape=(3,))
input_2 = layers.Input(shape=(5,))
model = models.Model([input_1, input_2], [input_1, input_2])
path = os.path.join(self.get_temp_dir(), "my_keras_model")
tf.saved_model.save(model, path)
restored_model = tf.saved_model.load(path)
input_arr_1 = np.random.random((1, 3)).astype("float32")
input_arr_2 = np.random.random((1, 5)).astype("float32")
outputs = restored_model.signatures["serving_default"](
inputs=tf.convert_to_tensor(input_arr_1, dtype=tf.float32),
inputs_1=tf.convert_to_tensor(input_arr_2, dtype=tf.float32),
)
self.assertAllClose(
input_arr_1, outputs["output_0"], rtol=1e-4, atol=1e-4
)
self.assertAllClose(
input_arr_2, outputs["output_1"], rtol=1e-4, atol=1e-4
)
def test_multi_input_custom_model_and_layer(self):
@object_registration.register_keras_serializable(package="my_package")
class CustomLayer(layers.Layer):
def __call__(self, *input_list):
self.add_loss(input_list[-2] * 2)
return sum(input_list)
@object_registration.register_keras_serializable(package="my_package")
class CustomModel(models.Model):
def build(self, input_shape):
super().build(input_shape)
self.layer = CustomLayer()
@tf.function
def call(self, *inputs):
inputs = list(inputs)
return self.layer(*inputs)
model = CustomModel()
inp = [
tf.constant(i, shape=[1, 1], dtype=tf.float32) for i in range(1, 4)
]
expected = model(*inp)
path = os.path.join(self.get_temp_dir(), "my_keras_model")
tf.saved_model.save(model, path)
restored_model = tf.saved_model.load(path)
output = restored_model.call(*inp)
self.assertAllClose(expected, output, rtol=1e-4, atol=1e-4)
def test_list_trackable_children_tracking(self):
@object_registration.register_keras_serializable(package="my_package")
class CustomLayerList(layers.Layer):
def __init__(self):
super().__init__()
self.sublayers = [
layers.Dense(2),
layers.Dense(2),
]
def call(self, inputs):
x = inputs
for sublayer in self.sublayers:
x = sublayer(x)
return x
inputs = layers.Input(shape=(1,))
outputs = CustomLayerList()(inputs)
model = models.Model(inputs, outputs)
inp = np.array([[1.0]])
expected = model(inp)
path = os.path.join(self.get_temp_dir(), "my_keras_model")
tf.saved_model.save(model, path)
restored_model = tf.saved_model.load(path)
self.assertAllClose(
expected,
restored_model.signatures["serving_default"](
tf.convert_to_tensor(inp, dtype=tf.float32)
)["output_0"],
rtol=1e-4,
atol=1e-4,
)
def test_dict_trackable_children_tracking(self):
@object_registration.register_keras_serializable(package="my_package")
class CustomLayerDict(layers.Layer):
def __init__(self):
super().__init__()
self.sublayers = {
"first_layer": layers.Dense(2),
"second_layer": layers.Dense(2),
}
def call(self, inputs):
x = inputs
for key, sublayer in self.sublayers.items():
x = sublayer(x)
return x
inputs = layers.Input(shape=(1,))
outputs = CustomLayerDict()(inputs)
model = models.Model(inputs, outputs)
inp = np.array([[1.0]])
expected = model(inp)
path = os.path.join(self.get_temp_dir(), "my_keras_model")
tf.saved_model.save(model, path)
restored_model = tf.saved_model.load(path)
self.assertAllClose(
expected,
restored_model.signatures["serving_default"](
tf.convert_to_tensor(inp, dtype=tf.float32)
)["output_0"],
rtol=1e-4,
atol=1e-4,
)
def test_fixed_signature_string_dtype(self):
@object_registration.register_keras_serializable(package="my_package")
class Adder(models.Model):
@tf.function(
input_signature=[tf.TensorSpec(shape=[], dtype=tf.string)]
)
def concat(self, x):
return x + x
model = Adder()
path = os.path.join(self.get_temp_dir(), "my_keras_model")
tf.saved_model.save(model, path)
restored_model = tf.saved_model.load(path)
self.assertEqual(model.concat("hello"), restored_model.concat("hello"))
def test_non_fixed_signature_string_dtype(self):
@object_registration.register_keras_serializable(package="my_package")
class Adder(models.Model):
@tf.function
def concat(self, x):
return x + x
model = Adder()
no_fn_path = os.path.join(self.get_temp_dir(), "my_keras_model_no_fn")
tf.saved_model.save(model, no_fn_path)
restored_model = tf.saved_model.load(no_fn_path)
with self.assertRaisesRegex(ValueError, "zero restored functions"):
_ = restored_model.concat("hello")
path = os.path.join(self.get_temp_dir(), "my_keras_model")
tf.saved_model.save(
model,
path,
signatures=model.concat.get_concrete_function(
tf.TensorSpec(shape=[], dtype=tf.string, name="string_input")
),
)
restored_model = tf.saved_model.load(path)
self.assertEqual(model.concat("hello"), restored_model.concat("hello"))
def test_fine_tuning(self):
model = CustomSignatureModel()
model_no_signatures_path = os.path.join(
self.get_temp_dir(), "model_no_signatures"
)
_ = model(tf.constant(0.0))
tf.saved_model.save(model, model_no_signatures_path)
restored_model = tf.saved_model.load(model_no_signatures_path)
self.assertLen(list(restored_model.signatures.keys()), 0)
self.assertEqual(restored_model(tf.constant(3.0)).numpy(), 3)
restored_model.mutate(tf.constant(2.0))
self.assertEqual(restored_model(tf.constant(3.0)).numpy(), 6)
optimizer = optimizers.SGD(0.05)
def train_step():
with tf.GradientTape() as tape:
loss = (10.0 - restored_model(tf.constant(2.0))) ** 2
variables = tape.watched_variables()
grads = tape.gradient(loss, variables)
optimizer.apply_gradients(zip(grads, variables))
return loss
for _ in range(10):
# "v" approaches 5, "loss" approaches 0
loss = train_step()
self.assertAllClose(loss, 0.0, rtol=1e-2, atol=1e-2)
self.assertAllClose(restored_model.v.numpy(), 5.0, rtol=1e-2, atol=1e-2)
def test_signatures_path(self):
model = CustomSignatureModel()
model_with_signature_path = os.path.join(
self.get_temp_dir(), "model_with_signature"
)
call = model.__call__.get_concrete_function(
tf.TensorSpec(None, tf.float32)
)
tf.saved_model.save(model, model_with_signature_path, signatures=call)
restored_model = tf.saved_model.load(model_with_signature_path)
self.assertEqual(
list(restored_model.signatures.keys()), ["serving_default"]
)
def test_multiple_signatures_dict_path(self):
model = CustomSignatureModel()
model_multiple_signatures_path = os.path.join(
self.get_temp_dir(), "model_with_multiple_signatures"
)
call = model.__call__.get_concrete_function(
tf.TensorSpec(None, tf.float32)
)
signatures = {
"serving_default": call,
"array_input": model.__call__.get_concrete_function(
tf.TensorSpec([None], tf.float32)
),
}
tf.saved_model.save(
model, model_multiple_signatures_path, signatures=signatures
)
restored_model = tf.saved_model.load(model_multiple_signatures_path)
self.assertEqual(
list(restored_model.signatures.keys()),
["serving_default", "array_input"],
)
| keras/keras/backend/tensorflow/saved_model_test.py/0 | {
"file_path": "keras/keras/backend/tensorflow/saved_model_test.py",
"repo_id": "keras",
"token_count": 6609
} | 173 |
import torch
from keras import ops
from keras import optimizers
from keras.backend.torch.optimizers import torch_parallel_optimizer
class Adadelta(
torch_parallel_optimizer.TorchParallelOptimizer, optimizers.Adadelta
):
def _parallel_update_step(
self,
grads,
variables,
learning_rate,
):
keras_variables = variables
variables = [v.value for v in variables]
dtype = variables[0].dtype
lr = ops.cast(learning_rate, dtype)
rho = self.rho
accumulated_grads = [
self._accumulated_grads[self._get_variable_index(variable)].value
for variable in keras_variables
]
accumulated_delta_vars = [
self._accumulated_delta_vars[
self._get_variable_index(variable)
].value
for variable in keras_variables
]
torch._foreach_mul_(accumulated_grads, rho)
torch._foreach_add_(
accumulated_grads, torch._foreach_mul(grads, grads), alpha=1 - rho
)
def rms(x):
return torch._foreach_sqrt(torch._foreach_add(x, self.epsilon))
delta_vars = torch._foreach_mul(
torch._foreach_div(
torch._foreach_mul(rms(accumulated_delta_vars), grads),
rms(accumulated_grads),
),
-1,
)
torch._foreach_mul_(accumulated_delta_vars, rho)
torch._foreach_add_(
accumulated_delta_vars,
torch._foreach_mul(delta_vars, delta_vars),
alpha=1 - rho,
)
torch._foreach_add_(variables, delta_vars, alpha=lr)
| keras/keras/backend/torch/optimizers/torch_adadelta.py/0 | {
"file_path": "keras/keras/backend/torch/optimizers/torch_adadelta.py",
"repo_id": "keras",
"token_count": 838
} | 174 |
import numpy as np
import pytest
from keras import callbacks
from keras import layers
from keras import testing
from keras.models import Sequential
from keras.utils import file_utils
class InterruptingCallback(callbacks.Callback):
"""A callback to intentionally interrupt training."""
def __init__(self, steps_int, epoch_int):
self.batch_count = 0
self.epoch_count = 0
self.steps_int = steps_int
self.epoch_int = epoch_int
def on_epoch_end(self, epoch, log=None):
self.epoch_count += 1
if self.epoch_int is not None and self.epoch_count == self.epoch_int:
raise RuntimeError("EpochInterruption")
def on_batch_end(self, batch, logs=None):
self.batch_count += 1
if self.steps_int is not None and self.batch_count == self.steps_int:
raise RuntimeError("StepsInterruption")
class CanaryLayer(layers.Layer):
def __init__(self):
super().__init__()
self.counter = self.add_weight(
shape=(), initializer="zeros", dtype="float32", trainable=False
)
def call(self, x):
self.counter.assign_add(1)
return x
class BackupAndRestoreCallbackTest(testing.TestCase):
def make_model(self):
model = Sequential(
[
CanaryLayer(),
layers.Dense(1),
]
)
model.compile(
loss="mse",
optimizer="sgd",
metrics=["mse"],
)
return model
# Check invalid save_freq, both string and non integer
def test_save_freq_unknown_error(self):
with self.assertRaisesRegex(ValueError, expected_regex="Invalid value"):
callbacks.BackupAndRestore(
backup_dir="backup_dir", save_freq="batch"
)
with self.assertRaisesRegex(ValueError, expected_regex="Invalid value"):
callbacks.BackupAndRestore(backup_dir="backup_dir", save_freq=0.15)
# Checking if after interruption, correct model params and
# weights are loaded in step-wise backup
@pytest.mark.requires_trainable_backend
def test_best_case_step(self):
temp_dir = self.get_temp_dir()
backup_dir = file_utils.join(temp_dir, "subdir")
self.assertFalse(file_utils.exists(backup_dir))
model = self.make_model()
cbk = callbacks.BackupAndRestore(backup_dir, save_freq=1)
x_train = np.random.random((10, 3))
y_train = np.random.random((10, 1))
try:
model.fit(
x_train,
y_train,
batch_size=4,
callbacks=[
cbk,
InterruptingCallback(steps_int=2, epoch_int=None),
],
epochs=2,
verbose=0,
)
except RuntimeError:
self.assertTrue(file_utils.exists(backup_dir))
self.assertEqual(cbk._current_epoch, 0)
self.assertEqual(cbk._last_batch_seen, 1)
self.assertEqual(int(model.layers[0].counter.value), 2)
hist = model.fit(
x_train, y_train, batch_size=4, callbacks=[cbk], epochs=5
)
self.assertEqual(cbk._current_epoch, 4)
self.assertEqual(hist.epoch[-1], 4)
self.assertEqual(int(model.layers[0].counter.value), 17)
# Checking if after interruption, correct model params and
# weights are loaded in epoch-wise backup
@pytest.mark.requires_trainable_backend
def test_best_case_epoch(self):
temp_dir = self.get_temp_dir()
backup_dir = file_utils.join(temp_dir, "subdir")
self.assertFalse(file_utils.exists(backup_dir))
model = self.make_model()
self.assertEqual(int(model.layers[0].counter.value), 0)
cbk = callbacks.BackupAndRestore(
backup_dir=backup_dir, save_freq="epoch"
)
x_train = np.random.random((10, 3))
y_train = np.random.random((10, 1))
try:
model.fit(
x_train,
y_train,
batch_size=4,
callbacks=[
cbk,
InterruptingCallback(steps_int=None, epoch_int=2),
],
epochs=6,
verbose=0,
)
except RuntimeError:
self.assertEqual(cbk._current_epoch, 1)
self.assertTrue(file_utils.exists(backup_dir))
self.assertEqual(int(model.layers[0].counter.value), 6)
hist = model.fit(
x_train, y_train, batch_size=4, callbacks=[cbk], epochs=5
)
self.assertEqual(cbk._current_epoch, 4)
self.assertEqual(hist.epoch[-1], 4)
self.assertEqual(int(model.layers[0].counter.value), 21)
# Checking if after interruption, when model is deleted
@pytest.mark.requires_trainable_backend
def test_model_deleted_case_epoch(self):
temp_dir = self.get_temp_dir()
backup_dir = file_utils.join(temp_dir, "subdir")
self.assertFalse(file_utils.exists(backup_dir))
model = self.make_model()
cbk = callbacks.BackupAndRestore(backup_dir, save_freq="epoch")
x_train = np.random.random((10, 3))
y_train = np.random.random((10, 1))
model.fit(
x_train,
y_train,
batch_size=4,
callbacks=[cbk],
epochs=2,
verbose=0,
)
self.assertFalse(file_utils.exists(backup_dir))
def test_backup_dir_empty_error(self):
with self.assertRaisesRegex(
ValueError, expected_regex="Empty `backup_dir` argument passed"
):
callbacks.BackupAndRestore(backup_dir="", save_freq="epoch")
def test_backup_dir_none_error(self):
with self.assertRaisesRegex(
ValueError, expected_regex="Empty `backup_dir` argument passed"
):
callbacks.BackupAndRestore(backup_dir=None, save_freq="epoch")
| keras/keras/callbacks/backup_and_restore_callback_test.py/0 | {
"file_path": "keras/keras/callbacks/backup_and_restore_callback_test.py",
"repo_id": "keras",
"token_count": 2986
} | 175 |
import warnings
import numpy as np
from keras import backend
from keras.api_export import keras_export
from keras.callbacks.callback import Callback
from keras.utils import io_utils
@keras_export("keras.callbacks.ReduceLROnPlateau")
class ReduceLROnPlateau(Callback):
"""Reduce learning rate when a metric has stopped improving.
Models often benefit from reducing the learning rate by a factor
of 2-10 once learning stagnates. This callback monitors a
quantity and if no improvement is seen for a 'patience' number
of epochs, the learning rate is reduced.
Example:
```python
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
patience=5, min_lr=0.001)
model.fit(x_train, y_train, callbacks=[reduce_lr])
```
Args:
monitor: String. Quantity to be monitored.
factor: Float. Factor by which the learning rate will be reduced.
`new_lr = lr * factor`.
patience: Integer. Number of epochs with no improvement after which
learning rate will be reduced.
verbose: Integer. 0: quiet, 1: update messages.
mode: String. One of `{'auto', 'min', 'max'}`. In `'min'` mode,
the learning rate will be reduced when the
quantity monitored has stopped decreasing; in `'max'` mode it will
be reduced when the quantity monitored has stopped increasing; in
`'auto'` mode, the direction is automatically inferred from the name
of the monitored quantity.
min_delta: Float. Threshold for measuring the new optimum, to only focus
on significant changes.
cooldown: Integer. Number of epochs to wait before resuming normal
operation after the learning rate has been reduced.
min_lr: Float. Lower bound on the learning rate.
"""
def __init__(
self,
monitor="val_loss",
factor=0.1,
patience=10,
verbose=0,
mode="auto",
min_delta=1e-4,
cooldown=0,
min_lr=0.0,
**kwargs,
):
super().__init__()
self.monitor = monitor
if factor >= 1.0:
raise ValueError(
"ReduceLROnPlateau does not support a factor >= 1.0. "
f"Received factor={factor}"
)
self.factor = factor
self.min_lr = min_lr
self.min_delta = min_delta
self.patience = patience
self.verbose = verbose
self.cooldown = cooldown
self.cooldown_counter = 0 # Cooldown counter.
self.wait = 0
self.best = 0
self.mode = mode
self.monitor_op = None
self._reset()
def _reset(self):
"""Resets wait counter and cooldown counter."""
if self.mode not in {"auto", "min", "max"}:
warnings.warn(
f"Learning rate reduction mode {self.mode} is unknown, "
"fallback to auto mode.",
stacklevel=2,
)
self.mode = "auto"
if self.mode == "min" or (
self.mode == "auto" and "acc" not in self.monitor
):
self.monitor_op = lambda a, b: np.less(a, b - self.min_delta)
self.best = np.Inf
else:
self.monitor_op = lambda a, b: np.greater(a, b + self.min_delta)
self.best = -np.Inf
self.cooldown_counter = 0
self.wait = 0
def on_train_begin(self, logs=None):
self._reset()
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs["learning_rate"] = float(
backend.convert_to_numpy(self.model.optimizer.learning_rate)
)
current = logs.get(self.monitor)
if current is None:
warnings.warn(
"Learning rate reduction is conditioned on metric "
f"`{self.monitor}` which is not available. Available metrics "
f"are: {','.join(list(logs.keys()))}.",
stacklevel=2,
)
else:
if self.in_cooldown():
self.cooldown_counter -= 1
self.wait = 0
if self.monitor_op(current, self.best):
self.best = current
self.wait = 0
elif not self.in_cooldown():
self.wait += 1
if self.wait >= self.patience:
old_lr = float(
backend.convert_to_numpy(
self.model.optimizer.learning_rate
)
)
if old_lr > np.float32(self.min_lr):
new_lr = old_lr * self.factor
new_lr = max(new_lr, self.min_lr)
self.model.optimizer.learning_rate = new_lr
if self.verbose > 0:
io_utils.print_msg(
f"\nEpoch {epoch +1}: "
"ReduceLROnPlateau reducing "
f"learning rate to {new_lr}."
)
self.cooldown_counter = self.cooldown
self.wait = 0
def in_cooldown(self):
return self.cooldown_counter > 0
| keras/keras/callbacks/reduce_lr_on_plateau.py/0 | {
"file_path": "keras/keras/callbacks/reduce_lr_on_plateau.py",
"repo_id": "keras",
"token_count": 2630
} | 176 |
import inspect
from keras.api_export import keras_export
from keras.initializers.constant_initializers import Constant
from keras.initializers.constant_initializers import Identity
from keras.initializers.constant_initializers import Ones
from keras.initializers.constant_initializers import Zeros
from keras.initializers.initializer import Initializer
from keras.initializers.random_initializers import GlorotNormal
from keras.initializers.random_initializers import GlorotUniform
from keras.initializers.random_initializers import HeNormal
from keras.initializers.random_initializers import HeUniform
from keras.initializers.random_initializers import LecunNormal
from keras.initializers.random_initializers import LecunUniform
from keras.initializers.random_initializers import OrthogonalInitializer
from keras.initializers.random_initializers import RandomNormal
from keras.initializers.random_initializers import RandomUniform
from keras.initializers.random_initializers import TruncatedNormal
from keras.initializers.random_initializers import VarianceScaling
from keras.saving import serialization_lib
from keras.utils.naming import to_snake_case
ALL_OBJECTS = {
Initializer,
Constant,
Ones,
Zeros,
GlorotNormal,
GlorotUniform,
HeNormal,
HeUniform,
LecunNormal,
LecunUniform,
RandomNormal,
TruncatedNormal,
RandomUniform,
VarianceScaling,
OrthogonalInitializer,
}
ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS}
ALL_OBJECTS_DICT.update(
{to_snake_case(cls.__name__): cls for cls in ALL_OBJECTS}
)
# Aliases
ALL_OBJECTS_DICT.update(
{
"uniform": RandomUniform,
"normal": RandomNormal,
"orthogonal": OrthogonalInitializer,
"one": Ones,
"zero": Zeros,
}
)
@keras_export("keras.initializers.serialize")
def serialize(initializer):
"""Returns the initializer configuration as a Python dict."""
return serialization_lib.serialize_keras_object(initializer)
@keras_export("keras.initializers.deserialize")
def deserialize(config, custom_objects=None):
"""Returns a Keras initializer object via its configuration."""
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.initializers.get")
def get(identifier):
"""Retrieves a Keras initializer object via an identifier.
The `identifier` may be the string name of a initializers function or class
(case-sensitively).
>>> identifier = 'Ones'
>>> keras.initializers.deserialize(identifier)
<...keras.initializers.initializers.Ones...>
You can also specify `config` of the initializer to this function by passing
dict containing `class_name` and `config` as an identifier. Also note that
the `class_name` must map to a `Initializer` class.
>>> cfg = {'class_name': 'Ones', 'config': {}}
>>> keras.initializers.deserialize(cfg)
<...keras.initializers.initializers.Ones...>
In the case that the `identifier` is a class, this method will return a new
instance of the class by its constructor.
Args:
identifier: String or dict that contains the initializer name or
configurations.
Returns:
Initializer instance base on the input identifier.
"""
if identifier is None:
return None
if isinstance(identifier, dict):
obj = deserialize(identifier)
elif isinstance(identifier, str):
config = {"class_name": str(identifier), "config": {}}
obj = deserialize(config)
else:
obj = identifier
if callable(obj):
if inspect.isclass(obj):
obj = obj()
return obj
else:
raise ValueError(
f"Could not interpret initializer identifier: {identifier}"
)
| keras/keras/initializers/__init__.py/0 | {
"file_path": "keras/keras/initializers/__init__.py",
"repo_id": "keras",
"token_count": 1374
} | 177 |
"""Keras base class for depthwise convolution layers."""
from keras import activations
from keras import constraints
from keras import initializers
from keras import ops
from keras import regularizers
from keras.backend import standardize_data_format
from keras.layers.input_spec import InputSpec
from keras.layers.layer import Layer
from keras.ops.operation_utils import compute_conv_output_shape
from keras.utils.argument_validation import standardize_padding
from keras.utils.argument_validation import standardize_tuple
class BaseDepthwiseConv(Layer):
"""Abstract N-D depthwise convolution layer.
Depthwise convolution is a type of convolution in which each input channel
is convolved with a different kernel (called a depthwise kernel). You can
understand depthwise convolution as the first step in a depthwise separable
convolution.
It is implemented via the following steps:
- Split the input into individual channels.
- Convolve each channel with an individual depthwise kernel with
`depth_multiplier` output channels.
- Concatenate the convolved outputs along the channels axis.
Unlike a regular convolution, depthwise convolution does not mix information
across different input channels.
The `depth_multiplier` argument determines how many filter are applied to
one input channel. As such, it controls the amount of output channels that
are generated per input channel in the depthwise step.
Args:
rank: int, the rank of the convolution, e.g. 2 for 2D convolution.
depth_multiplier: The number of depthwise convolution output channels
for each input channel. The total number of depthwise convolution
output channels will be equal to `input_channel * depth_multiplier`.
kernel_size: int or tuple/list of `rank` integers, specifying the size
of the depthwise convolution window.
strides: int or tuple/list of `rank` integers, specifying the stride
length of the depthwise convolution. If only one int is specified,
the same stride size will be used for all dimensions.
`strides > 1` is incompatible with `dilation_rate > 1`.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input. When `padding="same"` and
`strides=1`, the output has the same size as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, steps, features)`
while `"channels_first"` corresponds to inputs with shape
`(batch, features, steps)`. It defaults to the `image_data_format`
value found in your Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be `"channels_last"`.
dilation_rate: int or tuple/list of `rank` integers, specifying the
dilation rate to use for dilated convolution. If only one int is
specified, the same dilation rate will be used for all dimensions.
activation: Activation function. If `None`, no activation is applied.
use_bias: bool, if `True`, bias will be added to the output.
depthwise_initializer: Initializer for the depthwsie convolution
kernel. If `None`, the default initializer (`"glorot_uniform"`)
will be used.
bias_initializer: Initializer for the bias vector. If `None`, the
default initializer (`"zeros"`) will be used.
depthwise_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
depthwise_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The
function must take as input the unprojected variable and must return
the projected variable (which must have the same shape). Constraints
are not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
"""
def __init__(
self,
rank,
depth_multiplier,
kernel_size,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
activation=None,
use_bias=True,
depthwise_initializer="glorot_uniform",
bias_initializer="zeros",
depthwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs,
):
super().__init__(
trainable=trainable,
name=name,
activity_regularizer=regularizers.get(activity_regularizer),
**kwargs,
)
self.rank = rank
self.depth_multiplier = depth_multiplier
self.kernel_size = standardize_tuple(kernel_size, rank, "kernel_size")
self.strides = standardize_tuple(strides, rank, "strides")
self.dilation_rate = standardize_tuple(
dilation_rate, rank, "dilation_rate"
)
self.padding = standardize_padding(padding)
self.data_format = standardize_data_format(data_format)
self.activation = activations.get(activation)
self.use_bias = use_bias
self.depthwise_initializer = initializers.get(depthwise_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.depthwise_regularizer = regularizers.get(depthwise_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.depthwise_constraint = constraints.get(depthwise_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(min_ndim=self.rank + 2)
self.data_format = self.data_format
if self.depth_multiplier is not None and self.depth_multiplier <= 0:
raise ValueError(
"Invalid value for argument `depth_multiplier`. Expected a "
"strictly positive value. Received "
f"depth_multiplier={self.depth_multiplier}."
)
if not all(self.kernel_size):
raise ValueError(
"The argument `kernel_size` cannot contain 0. Received "
f"kernel_size={self.kernel_size}."
)
if not all(self.strides):
raise ValueError(
"The argument `strides` cannot contains 0. Received "
f"strides={self.strides}"
)
if max(self.strides) > 1 and max(self.dilation_rate) > 1:
raise ValueError(
"`strides > 1` not supported in conjunction with "
f"`dilation_rate > 1`. Received: strides={self.strides} and "
f"dilation_rate={self.dilation_rate}"
)
def build(self, input_shape):
if self.data_format == "channels_last":
channel_axis = -1
input_channel = input_shape[-1]
else:
channel_axis = 1
input_channel = input_shape[1]
self.input_spec = InputSpec(
min_ndim=self.rank + 2, axes={channel_axis: input_channel}
)
depthwise_shape = self.kernel_size + (
input_channel,
self.depth_multiplier,
)
self.kernel = self.add_weight(
name="kernel",
shape=depthwise_shape,
initializer=self.depthwise_initializer,
regularizer=self.depthwise_regularizer,
constraint=self.depthwise_constraint,
trainable=True,
dtype=self.dtype,
)
if self.use_bias:
self.bias = self.add_weight(
name="bias",
shape=(self.depth_multiplier * input_channel,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype,
)
else:
self.bias = None
self.built = True
def _get_input_channel(self, input_shape):
if self.data_format == "channels_last":
input_channel = input_shape[-1]
else:
input_channel = input_shape[1]
return input_channel
def call(self, inputs):
input_channel = self._get_input_channel(inputs.shape)
outputs = ops.depthwise_conv(
inputs,
self.kernel,
strides=self.strides,
padding=self.padding,
dilation_rate=self.dilation_rate,
data_format=self.data_format,
)
if self.use_bias:
if self.data_format == "channels_last":
bias_shape = (1,) * (self.rank + 1) + (
self.depth_multiplier * input_channel,
)
else:
bias_shape = (1, self.depth_multiplier * input_channel) + (
1,
) * self.rank
bias = ops.reshape(self.bias, bias_shape)
outputs += bias
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
input_channel = self._get_input_channel(input_shape)
return compute_conv_output_shape(
input_shape,
self.depth_multiplier * input_channel,
self.kernel_size,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate,
)
def get_config(self):
config = super().get_config()
config.update(
{
"depth_multiplier": self.depth_multiplier,
"kernel_size": self.kernel_size,
"strides": self.strides,
"padding": self.padding,
"data_format": self.data_format,
"dilation_rate": self.dilation_rate,
"activation": activations.serialize(self.activation),
"use_bias": self.use_bias,
"depthwise_initializer": initializers.serialize(
self.depthwise_initializer
),
"bias_initializer": initializers.serialize(
self.bias_initializer
),
"depthwise_regularizer": regularizers.serialize(
self.depthwise_regularizer
),
"bias_regularizer": regularizers.serialize(
self.bias_regularizer
),
"activity_regularizer": regularizers.serialize(
self.activity_regularizer
),
"depthwise_constraint": constraints.serialize(
self.depthwise_constraint
),
"bias_constraint": constraints.serialize(self.bias_constraint),
}
)
return config
| keras/keras/layers/convolutional/base_depthwise_conv.py/0 | {
"file_path": "keras/keras/layers/convolutional/base_depthwise_conv.py",
"repo_id": "keras",
"token_count": 5085
} | 178 |
import pytest
from keras import layers
from keras import ops
from keras import testing
class ExampleWrapper(layers.Wrapper):
"""Simple Wrapper subclass."""
def call(self, inputs, **kwargs):
return ops.cast(self.layer(inputs, **kwargs), self.compute_dtype)
class WrapperTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_wrapper_basics(self):
self.run_layer_test(
ExampleWrapper,
init_kwargs={
"layer": layers.Dense(2),
},
input_shape=(2, 3),
expected_output_shape=(2, 2),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
self.run_layer_test(
ExampleWrapper,
init_kwargs={
"layer": layers.Dense(2, activity_regularizer="l2"),
},
input_shape=(2, 3),
expected_output_shape=(2, 2),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=1,
supports_masking=False,
)
self.run_layer_test(
ExampleWrapper,
init_kwargs={
"layer": layers.Dense(2),
"activity_regularizer": "l2",
},
input_shape=(2, 3),
expected_output_shape=(2, 2),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=1,
supports_masking=False,
)
self.run_layer_test(
ExampleWrapper,
init_kwargs={
"layer": layers.BatchNormalization(),
},
input_shape=(2, 3),
expected_output_shape=(2, 3),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=2,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
def test_wrapper_invalid_layer(self):
invalid_layer = "This is not a valid Keras layer."
with self.assertRaisesRegex(
ValueError,
"Layer .* supplied to Wrapper isn't a supported layer type. "
"Please ensure wrapped layer is a valid Keras layer.",
):
layers.Wrapper(invalid_layer)
| keras/keras/layers/core/wrapper_test.py/0 | {
"file_path": "keras/keras/layers/core/wrapper_test.py",
"repo_id": "keras",
"token_count": 1324
} | 179 |
from keras import backend
from keras import constraints
from keras import initializers
from keras import ops
from keras import regularizers
from keras.api_export import keras_export
from keras.backend import standardize_dtype
from keras.layers.input_spec import InputSpec
from keras.layers.layer import Layer
@keras_export("keras.layers.BatchNormalization")
class BatchNormalization(Layer):
"""Layer that normalizes its inputs.
Batch normalization applies a transformation that maintains the mean output
close to 0 and the output standard deviation close to 1.
Importantly, batch normalization works differently during training and
during inference.
**During training** (i.e. when using `fit()` or when calling the layer/model
with the argument `training=True`), the layer normalizes its output using
the mean and standard deviation of the current batch of inputs. That is to
say, for each channel being normalized, the layer returns
`gamma * (batch - mean(batch)) / sqrt(var(batch) + epsilon) + beta`, where:
- `epsilon` is small constant (configurable as part of the constructor
arguments)
- `gamma` is a learned scaling factor (initialized as 1), which
can be disabled by passing `scale=False` to the constructor.
- `beta` is a learned offset factor (initialized as 0), which
can be disabled by passing `center=False` to the constructor.
**During inference** (i.e. when using `evaluate()` or `predict()` or when
calling the layer/model with the argument `training=False` (which is the
default), the layer normalizes its output using a moving average of the
mean and standard deviation of the batches it has seen during training. That
is to say, it returns
`gamma * (batch - self.moving_mean) / sqrt(self.moving_var+epsilon) + beta`.
`self.moving_mean` and `self.moving_var` are non-trainable variables that
are updated each time the layer in called in training mode, as such:
- `moving_mean = moving_mean * momentum + mean(batch) * (1 - momentum)`
- `moving_var = moving_var * momentum + var(batch) * (1 - momentum)`
As such, the layer will only normalize its inputs during inference
*after having been trained on data that has similar statistics as the
inference data*.
Args:
axis: Integer, the axis that should be normalized
(typically the features axis). For instance, after a `Conv2D` layer
with `data_format="channels_first"`, use `axis=1`.
momentum: Momentum for the moving average.
epsilon: Small float added to variance to avoid dividing by zero.
center: If `True`, add offset of `beta` to normalized tensor.
If `False`, `beta` is ignored.
scale: If `True`, multiply by `gamma`. If `False`, `gamma` is not used.
When the next layer is linear this can be disabled
since the scaling will be done by the next layer.
beta_initializer: Initializer for the beta weight.
gamma_initializer: Initializer for the gamma weight.
moving_mean_initializer: Initializer for the moving mean.
moving_variance_initializer: Initializer for the moving variance.
beta_regularizer: Optional regularizer for the beta weight.
gamma_regularizer: Optional regularizer for the gamma weight.
beta_constraint: Optional constraint for the beta weight.
gamma_constraint: Optional constraint for the gamma weight.
synchronized: Only applicable with the TensorFlow backend.
If `True`, synchronizes the global batch statistics (mean and
variance) for the layer across all devices at each training step
in a distributed training strategy.
If `False`, each replica uses its own local batch statistics.
**kwargs: Base layer keyword arguments (e.g. `name` and `dtype`).
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode.
- `training=True`: The layer will normalize its inputs using
the mean and variance of the current batch of inputs.
- `training=False`: The layer will normalize its inputs using
the mean and variance of its moving statistics, learned during
training.
mask: Binary tensor of shape broadcastable to `inputs` tensor, with
`True` values indicating the positions for which mean and variance
should be computed. Masked elements of the current inputs are not
taken into account for mean and variance computation during
training. Any prior unmasked element values will be taken into
account until their momentum expires.
Reference:
- [Ioffe and Szegedy, 2015](https://arxiv.org/abs/1502.03167).
**About setting `layer.trainable = False` on a `BatchNormalization` layer:**
The meaning of setting `layer.trainable = False` is to freeze the layer,
i.e. its internal state will not change during training:
its trainable weights will not be updated
during `fit()` or `train_on_batch()`, and its state updates will not be run.
Usually, this does not necessarily mean that the layer is run in inference
mode (which is normally controlled by the `training` argument that can
be passed when calling a layer). "Frozen state" and "inference mode"
are two separate concepts.
However, in the case of the `BatchNormalization` layer, **setting
`trainable = False` on the layer means that the layer will be
subsequently run in inference mode** (meaning that it will use
the moving mean and the moving variance to normalize the current batch,
rather than using the mean and variance of the current batch).
Note that:
- Setting `trainable` on an model containing other layers will recursively
set the `trainable` value of all inner layers.
- If the value of the `trainable` attribute is changed after calling
`compile()` on a model, the new value doesn't take effect for this model
until `compile()` is called again.
"""
def __init__(
self,
axis=-1,
momentum=0.99,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer="zeros",
gamma_initializer="ones",
moving_mean_initializer="zeros",
moving_variance_initializer="ones",
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
synchronized=False,
**kwargs,
):
super().__init__(**kwargs)
self.axis = int(axis)
if synchronized and backend.backend() != "tensorflow":
raise ValueError(
"Argument synchronized=True is only supported "
"with the TensorFlow backend."
)
self.synchronized = synchronized
self.momentum = float(momentum)
self.epsilon = float(epsilon)
self.center = center
self.scale = scale
self.beta_initializer = initializers.get(beta_initializer)
self.gamma_initializer = initializers.get(gamma_initializer)
self.moving_mean_initializer = initializers.get(moving_mean_initializer)
self.moving_variance_initializer = initializers.get(
moving_variance_initializer
)
self.beta_regularizer = regularizers.get(beta_regularizer)
self.gamma_regularizer = regularizers.get(gamma_regularizer)
self.beta_constraint = constraints.get(beta_constraint)
self.gamma_constraint = constraints.get(gamma_constraint)
self.supports_masking = True
def build(self, input_shape):
shape = (input_shape[self.axis],)
if self.scale:
self.gamma = self.add_weight(
shape=shape,
name="gamma",
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint,
trainable=True,
)
if self.center:
self.beta = self.add_weight(
shape=shape,
name="beta",
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint,
trainable=True,
)
self.moving_mean = self.add_weight(
shape=shape,
name="moving_mean",
initializer=self.moving_mean_initializer,
trainable=False,
)
self.moving_variance = self.add_weight(
shape=shape,
name="moving_variance",
initializer=self.moving_variance_initializer,
trainable=False,
)
self.input_spec = InputSpec(
ndim=len(input_shape), axes={self.axis: input_shape[self.axis]}
)
reduction_axes = list(range(len(input_shape)))
del reduction_axes[self.axis]
self._reduction_axes = reduction_axes
self.built = True
def compute_output_shape(self, input_shape):
return input_shape
def call(self, inputs, training=None, mask=None):
input_dtype = standardize_dtype(inputs.dtype)
if input_dtype in ("float16", "bfloat16"):
# BN is prone to overflowing for float16/bfloat16 inputs, so we opt
# out BN for mixed precision.
inputs = ops.cast(inputs, "float32")
if training and self.trainable:
mean, variance = self._moments(
inputs,
mask,
)
moving_mean = ops.cast(self.moving_mean, inputs.dtype)
moving_variance = ops.cast(self.moving_variance, inputs.dtype)
self.moving_mean.assign(
ops.cast(
moving_mean * self.momentum + mean * (1.0 - self.momentum),
inputs.dtype,
)
)
self.moving_variance.assign(
ops.cast(
moving_variance * self.momentum
+ variance * (1.0 - self.momentum),
inputs.dtype,
)
)
else:
moving_mean = ops.cast(self.moving_mean, inputs.dtype)
moving_variance = ops.cast(self.moving_variance, inputs.dtype)
mean = moving_mean
variance = moving_variance
if self.scale:
gamma = ops.cast(self.gamma, inputs.dtype)
else:
gamma = None
if self.center:
beta = ops.cast(self.beta, inputs.dtype)
else:
beta = None
outputs = ops.batch_normalization(
x=inputs,
mean=mean,
variance=variance,
axis=self.axis,
offset=beta,
scale=gamma,
epsilon=self.epsilon,
)
if input_dtype in ("float16", "bfloat16"):
outputs = ops.cast(outputs, input_dtype)
return outputs
def get_config(self):
base_config = super().get_config()
config = {
"axis": self.axis,
"momentum": self.momentum,
"epsilon": self.epsilon,
"center": self.center,
"scale": self.scale,
"beta_initializer": initializers.serialize(self.beta_initializer),
"gamma_initializer": initializers.serialize(self.gamma_initializer),
"moving_mean_initializer": initializers.serialize(
self.moving_mean_initializer
),
"moving_variance_initializer": initializers.serialize(
self.moving_variance_initializer
),
"beta_regularizer": regularizers.serialize(self.beta_regularizer),
"gamma_regularizer": regularizers.serialize(self.gamma_regularizer),
"beta_constraint": constraints.serialize(self.beta_constraint),
"gamma_constraint": constraints.serialize(self.gamma_constraint),
"synchronized": self.synchronized,
}
return {**base_config, **config}
def _moments(self, inputs, mask):
if mask is None:
return ops.moments(
inputs,
axes=self._reduction_axes,
synchronized=self.synchronized,
)
mask_weights = ops.cast(
mask,
inputs.dtype,
)
mask_weights_broadcasted = ops.expand_dims(
mask_weights,
axis=-1,
)
weighted_inputs = mask_weights_broadcasted * inputs
weighted_input_sum = ops.sum(
weighted_inputs,
self._reduction_axes,
keepdims=True,
)
sum_of_weights = ops.sum(
mask_weights_broadcasted,
self._reduction_axes,
keepdims=True,
)
mean = weighted_input_sum / (sum_of_weights + backend.config.epsilon())
difference = weighted_inputs - mean
squared_difference = ops.square(difference)
weighted_distsq = ops.sum(
mask_weights_broadcasted * squared_difference,
self._reduction_axes,
keepdims=True,
)
variance = weighted_distsq / (sum_of_weights + backend.config.epsilon())
return ops.squeeze(mean), ops.squeeze(variance)
| keras/keras/layers/normalization/batch_normalization.py/0 | {
"file_path": "keras/keras/layers/normalization/batch_normalization.py",
"repo_id": "keras",
"token_count": 5661
} | 180 |
from keras import backend
from keras import ops
from keras.layers.input_spec import InputSpec
from keras.layers.layer import Layer
from keras.ops.operation_utils import compute_pooling_output_shape
from keras.utils import argument_validation
class BasePooling(Layer):
"""Base pooling layer."""
def __init__(
self,
pool_size,
strides,
pool_dimensions,
pool_mode="max",
padding="valid",
data_format=None,
name=None,
**kwargs,
):
super().__init__(name=name, **kwargs)
self.pool_size = argument_validation.standardize_tuple(
pool_size, pool_dimensions, "pool_size"
)
strides = pool_size if strides is None else strides
self.strides = argument_validation.standardize_tuple(
strides, pool_dimensions, "strides", allow_zero=True
)
self.pool_mode = pool_mode
self.padding = padding
self.data_format = backend.standardize_data_format(data_format)
self.input_spec = InputSpec(ndim=pool_dimensions + 2)
def call(self, inputs):
if self.pool_mode == "max":
return ops.max_pool(
inputs,
pool_size=self.pool_size,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
)
elif self.pool_mode == "average":
return ops.average_pool(
inputs,
pool_size=self.pool_size,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
)
else:
raise ValueError(
"`pool_mode` must be either 'max' or 'average'. Received: "
f"{self.pool_mode}."
)
def compute_output_shape(self, input_shape):
return compute_pooling_output_shape(
input_shape,
self.pool_size,
self.strides,
self.padding,
self.data_format,
)
def get_config(self):
config = super().get_config()
config.update(
{
"pool_size": self.pool_size,
"padding": self.padding,
"strides": self.strides,
"data_format": self.data_format,
}
)
return config
| keras/keras/layers/pooling/base_pooling.py/0 | {
"file_path": "keras/keras/layers/pooling/base_pooling.py",
"repo_id": "keras",
"token_count": 1213
} | 181 |
from keras import backend
from keras.api_export import keras_export
from keras.layers.preprocessing.tf_data_layer import TFDataLayer
from keras.utils import image_utils
@keras_export("keras.layers.CenterCrop")
class CenterCrop(TFDataLayer):
"""A preprocessing layer which crops images.
This layers crops the central portion of the images to a target size. If an
image is smaller than the target size, it will be resized and cropped
so as to return the largest possible window in the image that matches
the target aspect ratio.
Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`).
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format,
or `(..., channels, height, width)`, in `"channels_first"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., target_height, target_width, channels)`,
or `(..., channels, target_height, target_width)`,
in `"channels_first"` format.
If the input height/width is even and the target height/width is odd (or
inversely), the input image is left-padded by 1 pixel.
**Note:** This layer is safe to use inside a `tf.data` pipeline
(independently of which backend you're using).
Args:
height: Integer, the height of the output shape.
width: Integer, the width of the output shape.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, height, width, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch, channels, height, width)`. It defaults to the
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be
`"channels_last"`.
"""
def __init__(self, height, width, data_format=None, **kwargs):
super().__init__(**kwargs)
self.height = height
self.width = width
self.data_format = backend.standardize_data_format(data_format)
def call(self, inputs):
inputs = self.backend.cast(inputs, self.compute_dtype)
if self.data_format == "channels_first":
init_height = inputs.shape[-2]
init_width = inputs.shape[-1]
else:
init_height = inputs.shape[-3]
init_width = inputs.shape[-2]
if init_height is None or init_width is None:
# Dynamic size case. TODO.
raise ValueError(
"At this time, CenterCrop can only "
"process images with a static spatial "
f"shape. Received: inputs.shape={inputs.shape}"
)
h_diff = init_height - self.height
w_diff = init_width - self.width
h_start = int(h_diff / 2)
w_start = int(w_diff / 2)
if h_diff >= 0 and w_diff >= 0:
if len(inputs.shape) == 4:
if self.data_format == "channels_first":
return inputs[
:,
:,
h_start : h_start + self.height,
w_start : w_start + self.width,
]
return inputs[
:,
h_start : h_start + self.height,
w_start : w_start + self.width,
:,
]
elif len(inputs.shape) == 3:
if self.data_format == "channels_first":
return inputs[
:,
h_start : h_start + self.height,
w_start : w_start + self.width,
]
return inputs[
h_start : h_start + self.height,
w_start : w_start + self.width,
:,
]
return image_utils.smart_resize(
inputs,
[self.height, self.width],
data_format=self.data_format,
backend_module=self.backend,
)
def compute_output_shape(self, input_shape):
input_shape = list(input_shape)
if len(input_shape) == 4:
if self.data_format == "channels_last":
input_shape[1] = self.height
input_shape[2] = self.width
else:
input_shape[2] = self.height
input_shape[3] = self.width
else:
if self.data_format == "channels_last":
input_shape[0] = self.height
input_shape[1] = self.width
else:
input_shape[1] = self.height
input_shape[2] = self.width
return tuple(input_shape)
def get_config(self):
base_config = super().get_config()
config = {
"height": self.height,
"width": self.width,
"data_format": self.data_format,
}
return {**base_config, **config}
| keras/keras/layers/preprocessing/center_crop.py/0 | {
"file_path": "keras/keras/layers/preprocessing/center_crop.py",
"repo_id": "keras",
"token_count": 2521
} | 182 |
from keras.api_export import keras_export
from keras.layers.preprocessing.tf_data_layer import TFDataLayer
from keras.random.seed_generator import SeedGenerator
@keras_export("keras.layers.RandomBrightness")
class RandomBrightness(TFDataLayer):
"""A preprocessing layer which randomly adjusts brightness during training.
This layer will randomly increase/reduce the brightness for the input RGB
images. At inference time, the output will be identical to the input.
Call the layer with `training=True` to adjust the brightness of the input.
**Note:** This layer is safe to use inside a `tf.data` pipeline
(independently of which backend you're using).
Args:
factor: Float or a list/tuple of 2 floats between -1.0 and 1.0. The
factor is used to determine the lower bound and upper bound of the
brightness adjustment. A float value will be chosen randomly between
the limits. When -1.0 is chosen, the output image will be black, and
when 1.0 is chosen, the image will be fully white.
When only one float is provided, eg, 0.2,
then -0.2 will be used for lower bound and 0.2
will be used for upper bound.
value_range: Optional list/tuple of 2 floats
for the lower and upper limit
of the values of the input data.
To make no change, use `[0.0, 1.0]`, e.g., if the image input
has been scaled before this layer. Defaults to `[0.0, 255.0]`.
The brightness adjustment will be scaled to this range, and the
output values will be clipped to this range.
seed: optional integer, for fixed RNG behavior.
Inputs: 3D (HWC) or 4D (NHWC) tensor, with float or int dtype. Input pixel
values can be of any range (e.g. `[0., 1.)` or `[0, 255]`)
Output: 3D (HWC) or 4D (NHWC) tensor with brightness adjusted based on the
`factor`. By default, the layer will output floats.
The output value will be clipped to the range `[0, 255]`,
the valid range of RGB colors, and
rescaled based on the `value_range` if needed.
Sample usage:
```python
random_bright = keras.layers.RandomBrightness(factor=0.2)
# An image with shape [2, 2, 3]
image = [[[1, 2, 3], [4 ,5 ,6]], [[7, 8, 9], [10, 11, 12]]]
# Assume we randomly select the factor to be 0.1, then it will apply
# 0.1 * 255 to all the channel
output = random_bright(image, training=True)
# output will be int64 with 25.5 added to each channel and round down.
>>> array([[[26.5, 27.5, 28.5]
[29.5, 30.5, 31.5]]
[[32.5, 33.5, 34.5]
[35.5, 36.5, 37.5]]],
shape=(2, 2, 3), dtype=int64)
```
"""
_FACTOR_VALIDATION_ERROR = (
"The `factor` argument should be a number (or a list of two numbers) "
"in the range [-1.0, 1.0]. "
)
_VALUE_RANGE_VALIDATION_ERROR = (
"The `value_range` argument should be a list of two numbers. "
)
def __init__(self, factor, value_range=(0, 255), seed=None, **kwargs):
super().__init__(**kwargs)
self._set_factor(factor)
self._set_value_range(value_range)
self.seed = seed
self.generator = SeedGenerator(seed)
def _set_value_range(self, value_range):
if not isinstance(value_range, (tuple, list)):
raise ValueError(
self.value_range_VALIDATION_ERROR
+ f"Received: value_range={value_range}"
)
if len(value_range) != 2:
raise ValueError(
self.value_range_VALIDATION_ERROR
+ f"Received: value_range={value_range}"
)
self.value_range = sorted(value_range)
def _set_factor(self, factor):
if isinstance(factor, (tuple, list)):
if len(factor) != 2:
raise ValueError(
self._FACTOR_VALIDATION_ERROR + f"Received: factor={factor}"
)
self._check_factor_range(factor[0])
self._check_factor_range(factor[1])
self._factor = sorted(factor)
elif isinstance(factor, (int, float)):
self._check_factor_range(factor)
factor = abs(factor)
self._factor = [-factor, factor]
else:
raise ValueError(
self._FACTOR_VALIDATION_ERROR + f"Received: factor={factor}"
)
def _check_factor_range(self, input_number):
if input_number > 1.0 or input_number < -1.0:
raise ValueError(
self._FACTOR_VALIDATION_ERROR
+ f"Received: input_number={input_number}"
)
def call(self, inputs, training=True):
inputs = self.backend.cast(inputs, self.compute_dtype)
if training:
return self._randomly_adjust_brightness(inputs)
else:
return inputs
def _randomly_adjust_brightness(self, images):
images_shape = self.backend.shape(images)
rank = len(images_shape)
if rank == 3:
rgb_delta_shape = (1, 1, 1)
elif rank == 4:
# Keep only the batch dim. This will ensure to have same adjustment
# with in one image, but different across the images.
rgb_delta_shape = [images_shape[0], 1, 1, 1]
else:
raise ValueError(
"Expected the input image to be rank 3 or 4. Received "
f"inputs.shape={images_shape}"
)
seed_generator = self._get_seed_generator(self.backend._backend)
rgb_delta = self.backend.random.uniform(
minval=self._factor[0],
maxval=self._factor[1],
shape=rgb_delta_shape,
seed=seed_generator,
)
rgb_delta = rgb_delta * (self.value_range[1] - self.value_range[0])
rgb_delta = self.backend.cast(rgb_delta, images.dtype)
images += rgb_delta
return self.backend.numpy.clip(
images, self.value_range[0], self.value_range[1]
)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
"factor": self._factor,
"value_range": self.value_range,
"seed": self.seed,
}
base_config = super().get_config()
return {**base_config, **config}
| keras/keras/layers/preprocessing/random_brightness.py/0 | {
"file_path": "keras/keras/layers/preprocessing/random_brightness.py",
"repo_id": "keras",
"token_count": 2884
} | 183 |
from keras import backend
from keras.api_export import keras_export
from keras.layers.preprocessing.tf_data_layer import TFDataLayer
from keras.utils import image_utils
@keras_export("keras.layers.Resizing")
class Resizing(TFDataLayer):
"""A preprocessing layer which resizes images.
This layer resizes an image input to a target height and width. The input
should be a 4D (batched) or 3D (unbatched) tensor in `"channels_last"`
format. Input pixel values can be of any range
(e.g. `[0., 1.)` or `[0, 255]`).
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format,
or `(..., channels, height, width)`, in `"channels_first"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., target_height, target_width, channels)`,
or `(..., channels, target_height, target_width)`,
in `"channels_first"` format.
**Note:** This layer is safe to use inside a `tf.data` pipeline
(independently of which backend you're using).
Args:
height: Integer, the height of the output shape.
width: Integer, the width of the output shape.
interpolation: String, the interpolation method.
Supports `"bilinear"`, `"nearest"`, `"bicubic"`,
`"lanczos3"`, `"lanczos5"`. Defaults to `"bilinear"`.
crop_to_aspect_ratio: If `True`, resize the images without aspect
ratio distortion. When the original aspect ratio differs
from the target aspect ratio, the output image will be
cropped so as to return the
largest possible window in the image (of size `(height, width)`)
that matches the target aspect ratio. By default
(`crop_to_aspect_ratio=False`), aspect ratio may not be preserved.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, height, width, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch, channels, height, width)`. It defaults to the
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be
`"channels_last"`.
**kwargs: Base layer keyword arguments, such as `name` and `dtype`.
"""
def __init__(
self,
height,
width,
interpolation="bilinear",
crop_to_aspect_ratio=False,
data_format=None,
**kwargs,
):
super().__init__(**kwargs)
self.height = height
self.width = width
self.interpolation = interpolation
self.data_format = backend.standardize_data_format(data_format)
self.crop_to_aspect_ratio = crop_to_aspect_ratio
def call(self, inputs):
size = (self.height, self.width)
if self.crop_to_aspect_ratio:
outputs = image_utils.smart_resize(
inputs,
size=size,
interpolation=self.interpolation,
data_format=self.data_format,
backend_module=self.backend,
)
else:
outputs = self.backend.image.resize(
inputs,
size=size,
interpolation=self.interpolation,
data_format=self.data_format,
)
return outputs
def compute_output_shape(self, input_shape):
input_shape = list(input_shape)
if len(input_shape) == 4:
if self.data_format == "channels_last":
input_shape[1] = self.height
input_shape[2] = self.width
else:
input_shape[2] = self.height
input_shape[3] = self.width
else:
if self.data_format == "channels_last":
input_shape[0] = self.height
input_shape[1] = self.width
else:
input_shape[1] = self.height
input_shape[2] = self.width
return tuple(input_shape)
def get_config(self):
base_config = super().get_config()
config = {
"height": self.height,
"width": self.width,
"interpolation": self.interpolation,
"crop_to_aspect_ratio": self.crop_to_aspect_ratio,
"data_format": self.data_format,
}
return {**base_config, **config}
| keras/keras/layers/preprocessing/resizing.py/0 | {
"file_path": "keras/keras/layers/preprocessing/resizing.py",
"repo_id": "keras",
"token_count": 2093
} | 184 |
from keras import backend
from keras import layers
from keras import ops
from keras.api_export import keras_export
@keras_export("keras.layers.GaussianNoise")
class GaussianNoise(layers.Layer):
"""Apply additive zero-centered Gaussian noise.
This is useful to mitigate overfitting
(you could see it as a form of random data augmentation).
Gaussian Noise (GS) is a natural choice as corruption process
for real valued inputs.
As it is a regularization layer, it is only active at training time.
Args:
stddev: Float, standard deviation of the noise distribution.
seed: Integer, optional random seed to enable deterministic behavior.
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode (adding noise) or in inference mode (doing nothing).
"""
def __init__(self, stddev, seed=None, **kwargs):
super().__init__(**kwargs)
if not 0 <= stddev <= 1:
raise ValueError(
f"Invalid value received for argument "
"`stddev`. Expected a float value between 0 and 1. "
f"Received: stddev={stddev}"
)
self.stddev = stddev
self.seed = seed
self.seed_generator = backend.random.SeedGenerator(seed)
self.supports_masking = True
def call(self, inputs, training=False):
if training and self.stddev > 0:
return inputs + backend.random.normal(
shape=ops.shape(inputs),
mean=0.0,
stddev=self.stddev,
seed=self.seed_generator,
)
return inputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super().get_config()
config = {
"stddev": self.stddev,
"seed": self.seed,
}
return {**base_config, **config}
| keras/keras/layers/regularization/gaussian_noise.py/0 | {
"file_path": "keras/keras/layers/regularization/gaussian_noise.py",
"repo_id": "keras",
"token_count": 836
} | 185 |
import numpy as np
import pytest
from keras import layers
from keras import ops
from keras import testing
class FlattenTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_repeat_vector(self):
inputs = np.random.random((2, 5)).astype("float32")
expected_output = ops.convert_to_tensor(
np.repeat(np.reshape(inputs, (2, 1, 5)), 3, axis=1)
)
self.run_layer_test(
layers.RepeatVector,
init_kwargs={"n": 3},
input_data=inputs,
expected_output=expected_output,
)
def test_repeat_vector_with_dynamic_batch_size(self):
input_layer = layers.Input(batch_shape=(None, 5))
repeated = layers.RepeatVector(n=3)(input_layer)
self.assertEqual(repeated.shape, (None, 3, 5))
def test_repeat_vector_with_dynamic_dimension(self):
input_layer = layers.Input(batch_shape=(2, None))
repeated = layers.RepeatVector(n=3)(input_layer)
self.assertEqual(repeated.shape, (2, 3, None))
def test_repeat_vector_with_invalid_n(self):
with self.assertRaisesRegex(
TypeError, "Expected an integer value for `n`"
):
layers.RepeatVector(n="3")
with self.assertRaisesRegex(
TypeError, "Expected an integer value for `n`"
):
layers.RepeatVector(n=3.5)
with self.assertRaisesRegex(
TypeError, "Expected an integer value for `n`"
):
layers.RepeatVector(n=[3])
| keras/keras/layers/reshaping/repeat_vector_test.py/0 | {
"file_path": "keras/keras/layers/reshaping/repeat_vector_test.py",
"repo_id": "keras",
"token_count": 697
} | 186 |
import copy
from keras import ops
from keras import utils
from keras.api_export import keras_export
from keras.layers.core.wrapper import Wrapper
from keras.layers.layer import Layer
from keras.saving import serialization_lib
@keras_export("keras.layers.Bidirectional")
class Bidirectional(Wrapper):
"""Bidirectional wrapper for RNNs.
Args:
layer: `keras.layers.RNN` instance, such as
`keras.layers.LSTM` or `keras.layers.GRU`.
It could also be a `keras.layers.Layer` instance
that meets the following criteria:
1. Be a sequence-processing layer (accepts 3D+ inputs).
2. Have a `go_backwards`, `return_sequences` and `return_state`
attribute (with the same semantics as for the `RNN` class).
3. Have an `input_spec` attribute.
4. Implement serialization via `get_config()` and `from_config()`.
Note that the recommended way to create new RNN layers is to write a
custom RNN cell and use it with `keras.layers.RNN`, instead of
subclassing `keras.layers.Layer` directly.
When `return_sequences` is `True`, the output of the masked
timestep will be zero regardless of the layer's original
`zero_output_for_mask` value.
merge_mode: Mode by which outputs of the forward and backward RNNs
will be combined. One of `{"sum", "mul", "concat", "ave", None}`.
If `None`, the outputs will not be combined,
they will be returned as a list. Defaults to `"concat"`.
backward_layer: Optional `keras.layers.RNN`,
or `keras.layers.Layer` instance to be used to handle
backwards input processing.
If `backward_layer` is not provided, the layer instance passed
as the `layer` argument will be used to generate the backward layer
automatically.
Note that the provided `backward_layer` layer should have properties
matching those of the `layer` argument, in particular
it should have the same values for `stateful`, `return_states`,
`return_sequences`, etc. In addition, `backward_layer`
and `layer` should have different `go_backwards` argument values.
A `ValueError` will be raised if these requirements are not met.
Call arguments:
The call arguments for this layer are the same as those of the
wrapped RNN layer. Beware that when passing the `initial_state`
argument during the call of this layer, the first half in the
list of elements in the `initial_state` list will be passed to
the forward RNN call and the last half in the list of elements
will be passed to the backward RNN call.
Note: instantiating a `Bidirectional` layer from an existing RNN layer
instance will not reuse the weights state of the RNN layer instance -- the
`Bidirectional` layer will have freshly initialized weights.
Examples:
```python
model = Sequential([
Input(shape=(5, 10)),
Bidirectional(LSTM(10, return_sequences=True),
Bidirectional(LSTM(10)),
Dense(5, activation="softmax"),
])
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
# With custom backward layer
forward_layer = LSTM(10, return_sequences=True)
backward_layer = LSTM(10, activation='relu', return_sequences=True,
go_backwards=True)
model = Sequential([
Input(shape=(5, 10)),
Bidirectional(forward_layer, backward_layer=backward_layer),
Dense(5, activation="softmax"),
])
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
```
"""
def __init__(
self,
layer,
merge_mode="concat",
weights=None,
backward_layer=None,
**kwargs,
):
if not isinstance(layer, Layer):
raise ValueError(
"Please initialize `Bidirectional` layer with a "
f"`keras.layers.Layer` instance. Received: {layer}"
)
if backward_layer is not None and not isinstance(backward_layer, Layer):
raise ValueError(
"`backward_layer` need to be a `keras.layers.Layer` "
f"instance. Received: {backward_layer}"
)
if merge_mode not in ["sum", "mul", "ave", "concat", None]:
raise ValueError(
f"Invalid merge mode. Received: {merge_mode}. "
"Merge mode should be one of "
'{"sum", "mul", "ave", "concat", None}'
)
super().__init__(layer, **kwargs)
# Recreate the forward layer from the original layer config, so that it
# will not carry over any state from the layer.
config = serialization_lib.serialize_keras_object(layer)
config["config"]["name"] = "forward_" + utils.removeprefix(
layer.name, "forward_"
)
self.forward_layer = serialization_lib.deserialize_keras_object(config)
if backward_layer is None:
config = serialization_lib.serialize_keras_object(layer)
config["config"]["go_backwards"] = True
config["config"]["name"] = "backward_" + utils.removeprefix(
layer.name, "backward_"
)
self.backward_layer = serialization_lib.deserialize_keras_object(
config
)
else:
self.backward_layer = backward_layer
self._verify_layer_config()
def force_zero_output_for_mask(layer):
# Force the zero_output_for_mask to be True if returning sequences.
if getattr(layer, "zero_output_for_mask", None) is not None:
layer.zero_output_for_mask = layer.return_sequences
force_zero_output_for_mask(self.forward_layer)
force_zero_output_for_mask(self.backward_layer)
self.merge_mode = merge_mode
if weights:
nw = len(weights)
self.forward_layer.initial_weights = weights[: nw // 2]
self.backward_layer.initial_weights = weights[nw // 2 :]
self.stateful = layer.stateful
self.return_sequences = layer.return_sequences
self.return_state = layer.return_state
self.supports_masking = True
self.input_spec = layer.input_spec
def _verify_layer_config(self):
"""Ensure the forward and backward layers have valid common property."""
if self.forward_layer.go_backwards == self.backward_layer.go_backwards:
raise ValueError(
"Forward layer and backward layer should have different "
"`go_backwards` value. Received: "
"forward_layer.go_backwards "
f"{self.forward_layer.go_backwards}, "
"backward_layer.go_backwards="
f"{self.backward_layer.go_backwards}"
)
common_attributes = ("stateful", "return_sequences", "return_state")
for a in common_attributes:
forward_value = getattr(self.forward_layer, a)
backward_value = getattr(self.backward_layer, a)
if forward_value != backward_value:
raise ValueError(
"Forward layer and backward layer are expected to have "
f'the same value for attribute "{a}", got '
f'"{forward_value}" for forward layer and '
f'"{backward_value}" for backward layer'
)
def compute_output_shape(self, sequences_shape, initial_state_shape=None):
output_shape = self.forward_layer.compute_output_shape(sequences_shape)
if self.return_state:
output_shape, state_shape = output_shape[0], output_shape[1:]
if self.merge_mode == "concat":
output_shape = list(output_shape)
output_shape[-1] *= 2
output_shape = tuple(output_shape)
elif self.merge_mode is None:
output_shape = [output_shape, copy.copy(output_shape)]
if self.return_state:
if self.merge_mode is None:
return output_shape + state_shape + copy.copy(state_shape)
return [output_shape] + state_shape + copy.copy(state_shape)
return output_shape
def call(
self,
sequences,
initial_state=None,
mask=None,
training=None,
):
kwargs = {}
if self.forward_layer._call_has_training_arg:
kwargs["training"] = training
if self.forward_layer._call_has_mask_arg:
kwargs["mask"] = mask
if initial_state is not None:
# initial_states are not keras tensors, eg eager tensor from np
# array. They are only passed in from kwarg initial_state, and
# should be passed to forward/backward layer via kwarg
# initial_state as well.
forward_inputs, backward_inputs = sequences, sequences
half = len(initial_state) // 2
forward_state = initial_state[:half]
backward_state = initial_state[half:]
else:
forward_inputs, backward_inputs = sequences, sequences
forward_state, backward_state = None, None
y = self.forward_layer(
forward_inputs, initial_state=forward_state, **kwargs
)
y_rev = self.backward_layer(
backward_inputs, initial_state=backward_state, **kwargs
)
if self.return_state:
states = tuple(y[1:] + y_rev[1:])
y = y[0]
y_rev = y_rev[0]
y = ops.cast(y, self.compute_dtype)
y_rev = ops.cast(y_rev, self.compute_dtype)
if self.return_sequences:
y_rev = ops.flip(y_rev, axis=1)
if self.merge_mode == "concat":
output = ops.concatenate([y, y_rev], axis=-1)
elif self.merge_mode == "sum":
output = y + y_rev
elif self.merge_mode == "ave":
output = (y + y_rev) / 2
elif self.merge_mode == "mul":
output = y * y_rev
elif self.merge_mode is None:
output = (y, y_rev)
else:
raise ValueError(
"Unrecognized value for `merge_mode`. "
f"Received: {self.merge_mode}"
'Expected one of {"concat", "sum", "ave", "mul"}.'
)
if self.return_state:
if self.merge_mode is None:
return output + states
return (output,) + states
return output
def reset_states(self):
# Compatibility alias.
self.reset_state()
def reset_state(self):
if not self.stateful:
raise AttributeError("Layer must be stateful.")
self.forward_layer.reset_state()
self.backward_layer.reset_state()
@property
def states(self):
if self.forward_layer.states and self.backward_layer.states:
return tuple(self.forward_layer.states + self.backward_layer.states)
return None
def build(self, sequences_shape, initial_state_shape=None):
self.forward_layer.build(sequences_shape)
self.backward_layer.build(sequences_shape)
self.built = True
def compute_mask(self, _, mask):
if isinstance(mask, list):
mask = mask[0]
if self.return_sequences:
if not self.merge_mode:
output_mask = (mask, mask)
else:
output_mask = mask
else:
output_mask = (None, None) if not self.merge_mode else None
if self.return_state and self.states is not None:
state_mask = (None for _ in self.states)
if isinstance(output_mask, list):
return output_mask + state_mask * 2
return (output_mask,) + state_mask * 2
return output_mask
def get_config(self):
config = {"merge_mode": self.merge_mode}
config["layer"] = serialization_lib.serialize_keras_object(
self.forward_layer
)
config["backward_layer"] = serialization_lib.serialize_keras_object(
self.backward_layer
)
base_config = super().get_config()
return {**base_config, **config}
@classmethod
def from_config(cls, config, custom_objects=None):
# Instead of updating the input, create a copy and use that.
config = copy.deepcopy(config)
config["layer"] = serialization_lib.deserialize_keras_object(
config["layer"], custom_objects=custom_objects
)
# Handle (optional) backward layer instantiation.
backward_layer_config = config.pop("backward_layer", None)
if backward_layer_config is not None:
backward_layer = serialization_lib.deserialize_keras_object(
backward_layer_config, custom_objects=custom_objects
)
config["backward_layer"] = backward_layer
# Instantiate the wrapper, adjust it and return it.
layer = cls(**config)
return layer
| keras/keras/layers/rnn/bidirectional.py/0 | {
"file_path": "keras/keras/layers/rnn/bidirectional.py",
"repo_id": "keras",
"token_count": 5874
} | 187 |
import tree
from keras import backend
from keras import ops
from keras.api_export import keras_export
from keras.layers.layer import Layer
from keras.layers.rnn.dropout_rnn_cell import DropoutRNNCell
from keras.layers.rnn.stacked_rnn_cells import StackedRNNCells
from keras.saving import serialization_lib
from keras.utils import tracking
@keras_export("keras.layers.RNN")
class RNN(Layer):
"""Base class for recurrent layers.
Args:
cell: A RNN cell instance or a list of RNN cell instances.
A RNN cell is a class that has:
- A `call(input_at_t, states_at_t)` method, returning
`(output_at_t, states_at_t_plus_1)`. The call method of the
cell can also take the optional argument `constants`, see
section "Note on passing external constants" below.
- A `state_size` attribute. This can be a single integer
(single state) in which case it is the size of the recurrent
state. This can also be a list/tuple of integers
(one size per state).
- A `output_size` attribute, a single integer.
- A `get_initial_state(batch_size=None)`
method that creates a tensor meant to be fed to `call()` as the
initial state, if the user didn't specify any initial state
via other means. The returned initial state should have
shape `(batch_size, cell.state_size)`.
The cell might choose to create a tensor full of zeros,
or other values based on the cell's implementation.
`inputs` is the input tensor to the RNN layer, with shape
`(batch_size, timesteps, features)`.
If this method is not implemented
by the cell, the RNN layer will create a zero filled tensor
with shape `(batch_size, cell.state_size)`.
In the case that `cell` is a list of RNN cell instances, the cells
will be stacked on top of each other in the RNN, resulting in an
efficient stacked RNN.
return_sequences: Boolean (default `False`). Whether to return the last
output in the output sequence, or the full sequence.
return_state: Boolean (default `False`).
Whether to return the last state in addition to the output.
go_backwards: Boolean (default `False`).
If `True`, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default `False`). If True, the last state
for each sample at index `i` in a batch will be used as initial
state for the sample of index `i` in the following batch.
unroll: Boolean (default `False`).
If True, the network will be unrolled, else a symbolic loop will be
used. Unrolling can speed-up a RNN, although it tends to be more
memory-intensive. Unrolling is only suitable for short sequences.
zero_output_for_mask: Boolean (default `False`).
Whether the output should use zeros for the masked timesteps.
Note that this field is only used when `return_sequences`
is `True` and `mask` is provided.
It can useful if you want to reuse the raw output sequence of
the RNN without interference from the masked timesteps, e.g.,
merging bidirectional RNNs.
Call arguments:
inputs: Input tensor.
initial_state: List of initial state tensors to be passed to the first
call of the cell.
mask: Binary tensor of shape `[batch_size, timesteps]`
indicating whether a given timestep should be masked.
An individual `True` entry indicates that the corresponding
timestep should be utilized, while a `False` entry indicates
that the corresponding timestep should be ignored.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. This argument is passed
to the cell when calling it.
This is for use with cells that use dropout.
Input shape:
3-D tensor with shape `(batch_size, timesteps, features)`.
Output shape:
- If `return_state`: a list of tensors. The first tensor is
the output. The remaining tensors are the last states,
each with shape `(batch_size, state_size)`, where `state_size` could
be a high dimension tensor shape.
- If `return_sequences`: 3D tensor with shape
`(batch_size, timesteps, output_size)`.
Masking:
This layer supports masking for input data with a variable number
of timesteps. To introduce masks to your data,
use a `keras.layers.Embedding` layer with the `mask_zero` parameter
set to `True`.
Note on using statefulness in RNNs:
You can set RNN layers to be 'stateful', which means that the states
computed for the samples in one batch will be reused as initial states
for the samples in the next batch. This assumes a one-to-one mapping
between samples in different successive batches.
To enable statefulness:
- Specify `stateful=True` in the layer constructor.
- Specify a fixed batch size for your model, by passing
If sequential model:
`batch_input_shape=(...)` to the first layer in your model.
Else for functional model with 1 or more Input layers:
`batch_shape=(...)` to all the first layers in your model.
This is the expected shape of your inputs
*including the batch size*.
It should be a tuple of integers, e.g. `(32, 10, 100)`.
- Specify `shuffle=False` when calling `fit()`.
To reset the states of your model, call `.reset_states()` on either
a specific layer, or on your entire model.
Note on specifying the initial state of RNNs:
You can specify the initial state of RNN layers symbolically by
calling them with the keyword argument `initial_state`. The value of
`initial_state` should be a tensor or list of tensors representing
the initial state of the RNN layer.
You can specify the initial state of RNN layers numerically by
calling `reset_states` with the keyword argument `states`. The value of
`states` should be a numpy array or list of numpy arrays representing
the initial state of the RNN layer.
Examples:
```python
from keras.layers import RNN
from keras import ops
# First, let's define a RNN Cell, as a layer subclass.
class MinimalRNNCell(keras.layers.Layer):
def __init__(self, units, **kwargs):
super().__init__(**kwargs)
self.units = units
self.state_size = units
def build(self, input_shape):
self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = ops.matmul(inputs, self.kernel)
output = h + ops.matmul(prev_output, self.recurrent_kernel)
return output, [output]
# Let's use this cell in a RNN layer:
cell = MinimalRNNCell(32)
x = keras.Input((None, 5))
layer = RNN(cell)
y = layer(x)
# Here's how to use the cell to build a stacked RNN:
cells = [MinimalRNNCell(32), MinimalRNNCell(64)]
x = keras.Input((None, 5))
layer = RNN(cells)
y = layer(x)
```
"""
def __init__(
self,
cell,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
zero_output_for_mask=False,
**kwargs,
):
if isinstance(cell, (list, tuple)):
cell = StackedRNNCells(cell)
if "call" not in dir(cell):
raise ValueError(
"Argument `cell` should have a `call` method. "
f"Received: cell={cell}"
)
if "state_size" not in dir(cell):
raise ValueError(
"The RNN cell should have a `state_size` attribute "
"(single integer or list of integers, "
"one integer per RNN state). "
f"Received: cell={cell}"
)
super().__init__(**kwargs)
# If True, the output for masked timestep will be zeros, whereas in the
# False case, output from previous timestep is returned for masked
# timestep.
self.zero_output_for_mask = zero_output_for_mask
self.cell = cell
self.return_sequences = return_sequences
self.return_state = return_state
self.go_backwards = go_backwards
self.stateful = stateful
self.unroll = unroll
self.supports_masking = True
self.input_spec = None
self.states = None
state_size = getattr(self.cell, "state_size", None)
if state_size is None:
raise ValueError(
"state_size must be specified as property on the RNN cell."
)
if not isinstance(state_size, (list, tuple, int)):
raise ValueError(
"state_size must be an integer, or a list/tuple of integers "
"(one for each state tensor)."
)
if isinstance(state_size, int):
self.state_size = [state_size]
self.single_state = True
else:
self.state_size = list(state_size)
self.single_state = False
def compute_output_shape(self, sequences_shape, initial_state_shape=None):
state_shape = [(sequences_shape[0], d) for d in self.state_size]
output_size = getattr(self.cell, "output_size", None)
if output_size is None:
output_size = self.state_size[0]
if not isinstance(output_size, int):
raise ValueError("output_size must be an integer.")
if self.return_sequences:
output_shape = (sequences_shape[0], sequences_shape[1], output_size)
else:
output_shape = (sequences_shape[0], output_size)
if self.return_state:
return output_shape, *state_shape
return output_shape
def compute_mask(self, _, mask):
# Time step masks must be the same for each input.
# This is because the mask for an RNN is of size [batch, time_steps, 1],
# and specifies which time steps should be skipped, and a time step
# must be skipped for all inputs.
mask = tree.flatten(mask)[0]
output_mask = mask if self.return_sequences else None
if self.return_state:
state_mask = [None for _ in self.state_size]
return [output_mask] + state_mask
else:
return output_mask
def build(self, sequences_shape, initial_state_shape=None):
# Build cell (if layer).
step_input_shape = (sequences_shape[0],) + tuple(sequences_shape[2:])
if isinstance(self.cell, Layer) and not self.cell.built:
self.cell.build(step_input_shape)
self.cell.built = True
if self.stateful:
if self.states is not None:
self.reset_state()
else:
if sequences_shape[0] is None:
raise ValueError(
"When using `stateful=True` in a RNN, the "
"batch size must be static. Found dynamic "
f"batch size: sequence.shape={sequences_shape}"
)
self._create_state_variables(sequences_shape[0])
self.built = True
@tracking.no_automatic_dependency_tracking
def _create_state_variables(self, batch_size):
with backend.name_scope(self.name, caller=self):
self.states = tree.map_structure(
lambda value: backend.Variable(
value,
trainable=False,
dtype=self.variable_dtype,
name="rnn_state",
),
self.get_initial_state(batch_size),
)
def get_initial_state(self, batch_size):
get_initial_state_fn = getattr(self.cell, "get_initial_state", None)
if get_initial_state_fn:
init_state = get_initial_state_fn(batch_size=batch_size)
else:
return [
ops.zeros((batch_size, d), dtype=self.cell.compute_dtype)
for d in self.state_size
]
# RNN expect the states in a list, even if single state.
if not tree.is_nested(init_state):
init_state = [init_state]
# Force the state to be a list in case it is a namedtuple eg
# LSTMStateTuple.
return list(init_state)
def reset_states(self):
# Compatibility alias.
self.reset_state()
def reset_state(self):
if self.states is not None:
for v in self.states:
v.assign(ops.zeros_like(v))
def inner_loop(self, sequences, initial_state, mask, training=False):
cell_kwargs = {}
if isinstance(self.cell, Layer) and self.cell._call_has_training_arg:
cell_kwargs["training"] = training
def step(inputs, states):
output, new_states = self.cell(inputs, states, **cell_kwargs)
if not tree.is_nested(new_states):
new_states = [new_states]
return output, new_states
if not tree.is_nested(initial_state):
initial_state = [initial_state]
return backend.rnn(
step,
sequences,
initial_state,
go_backwards=self.go_backwards,
mask=mask,
unroll=self.unroll,
input_length=sequences.shape[1],
zero_output_for_mask=self.zero_output_for_mask,
return_all_outputs=self.return_sequences,
)
def call(
self,
sequences,
initial_state=None,
mask=None,
training=False,
):
timesteps = sequences.shape[1]
if self.unroll and timesteps is None:
raise ValueError(
"Cannot unroll a RNN if the "
"time dimension is undefined. \n"
"- If using a Sequential model, "
"specify the time dimension by passing "
"an `Input()` as your first layer.\n"
"- If using the functional API, specify "
"the time dimension by passing a `shape` "
"or `batch_shape` argument to your `Input()`."
)
if initial_state is None:
if self.stateful:
initial_state = self.states
else:
initial_state = self.get_initial_state(
batch_size=ops.shape(sequences)[0]
)
# RNN expect the states in a list, even if single state.
if not tree.is_nested(initial_state):
initial_state = [initial_state]
initial_state = list(initial_state)
# Cast states to compute dtype.
# Note that states may be deeply nested
# (e.g. in the stacked cells case).
initial_state = tree.map_structure(
lambda x: backend.convert_to_tensor(
x, dtype=self.cell.compute_dtype
),
initial_state,
)
# Prepopulate the dropout state so that the inner_loop is stateless
# this is particularly important for JAX backend.
self._maybe_config_dropout_masks(self.cell, sequences, initial_state)
last_output, outputs, states = self.inner_loop(
sequences=sequences,
initial_state=initial_state,
mask=mask,
training=training,
)
last_output = ops.cast(last_output, self.compute_dtype)
outputs = ops.cast(outputs, self.compute_dtype)
states = tree.map_structure(
lambda x: ops.cast(x, dtype=self.compute_dtype), states
)
self._maybe_reset_dropout_masks(self.cell)
if self.stateful:
for self_state, state in zip(
tree.flatten(self.states), tree.flatten(states)
):
self_state.assign(state)
if self.return_sequences:
output = outputs
else:
output = last_output
if self.return_state:
if len(states) == 1:
state = states[0]
return output, state
return output, *states
return output
def _maybe_config_dropout_masks(self, cell, input_sequence, input_state):
step_input = input_sequence[:, 0, :]
state = (
input_state[0]
if isinstance(input_state, (list, tuple))
else input_state
)
if isinstance(cell, DropoutRNNCell):
cell.get_dropout_mask(step_input)
cell.get_recurrent_dropout_mask(state)
if isinstance(cell, StackedRNNCells):
for c, s in zip(cell.cells, input_state):
self._maybe_config_dropout_masks(c, input_sequence, s)
def _maybe_reset_dropout_masks(self, cell):
if isinstance(cell, DropoutRNNCell):
cell.reset_dropout_mask()
cell.reset_recurrent_dropout_mask()
if isinstance(cell, StackedRNNCells):
for c in cell.cells:
self._maybe_reset_dropout_masks(c)
def get_config(self):
config = {
"return_sequences": self.return_sequences,
"return_state": self.return_state,
"go_backwards": self.go_backwards,
"stateful": self.stateful,
"unroll": self.unroll,
"zero_output_for_mask": self.zero_output_for_mask,
}
config["cell"] = serialization_lib.serialize_keras_object(self.cell)
base_config = super().get_config()
return {**base_config, **config}
@classmethod
def from_config(cls, config, custom_objects=None):
cell = serialization_lib.deserialize_keras_object(
config.pop("cell"), custom_objects=custom_objects
)
layer = cls(cell, **config)
return layer
| keras/keras/layers/rnn/rnn.py/0 | {
"file_path": "keras/keras/layers/rnn/rnn.py",
"repo_id": "keras",
"token_count": 8191
} | 188 |
import numpy as np
from keras import activations
from keras import backend
from keras import initializers
from keras import ops
from keras.api_export import keras_export
from keras.metrics import metrics_utils
from keras.metrics.metric import Metric
from keras.utils.python_utils import to_list
class _ConfusionMatrixConditionCount(Metric):
"""Calculates the number of the given confusion matrix condition.
Args:
confusion_matrix_cond: One of `metrics_utils.ConfusionMatrix`
conditions.
thresholds: (Optional) Defaults to `0.5`. A float value or a python list
/ tuple of float threshold values in `[0, 1]`. A threshold is
compared with prediction values to determine the truth value of
predictions (i.e., above the threshold is `True`, below is `False`).
One metric value is generated for each threshold value.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
def __init__(
self, confusion_matrix_cond, thresholds=None, name=None, dtype=None
):
super().__init__(name=name, dtype=dtype)
self._confusion_matrix_cond = confusion_matrix_cond
self.init_thresholds = thresholds
self.thresholds = metrics_utils.parse_init_thresholds(
thresholds, default_threshold=0.5
)
self._thresholds_distributed_evenly = (
metrics_utils.is_evenly_distributed_thresholds(self.thresholds)
)
self.accumulator = self.add_variable(
shape=(len(self.thresholds),),
initializer=initializers.Zeros(),
name="accumulator",
)
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates the metric statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Defaults to `1`.
Can be a tensor whose rank is either 0, or the same rank as
`y_true`, and must be broadcastable to `y_true`.
"""
return metrics_utils.update_confusion_matrix_variables(
{self._confusion_matrix_cond: self.accumulator},
y_true,
y_pred,
thresholds=self.thresholds,
thresholds_distributed_evenly=self._thresholds_distributed_evenly,
sample_weight=sample_weight,
)
def result(self):
if len(self.thresholds) == 1:
result = self.accumulator[0]
else:
result = self.accumulator
return backend.convert_to_tensor(result)
def get_config(self):
config = {"thresholds": self.init_thresholds}
base_config = super().get_config()
return {**base_config, **config}
@keras_export("keras.metrics.FalsePositives")
class FalsePositives(_ConfusionMatrixConditionCount):
"""Calculates the number of false positives.
If `sample_weight` is given, calculates the sum of the weights of
false positives. This metric creates one local variable, `accumulator`
that is used to keep track of the number of false positives.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
thresholds: (Optional) Defaults to `0.5`. A float value, or a Python
list/tuple of float threshold values in `[0, 1]`. A threshold is
compared with prediction values to determine the truth value of
predictions (i.e., above the threshold is `True`, below is `False`).
If used with a loss function that sets `from_logits=True` (i.e. no
sigmoid applied to predictions), `thresholds` should be set to 0.
One metric value is generated for each threshold value.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = keras.metrics.FalsePositives()
>>> m.update_state([0, 1, 0, 0], [0, 0, 1, 1])
>>> m.result()
2.0
>>> m.reset_state()
>>> m.update_state([0, 1, 0, 0], [0, 0, 1, 1], sample_weight=[0, 0, 1, 0])
>>> m.result()
1.0
"""
def __init__(self, thresholds=None, name=None, dtype=None):
super().__init__(
confusion_matrix_cond=metrics_utils.ConfusionMatrix.FALSE_POSITIVES,
thresholds=thresholds,
name=name,
dtype=dtype,
)
@keras_export("keras.metrics.FalseNegatives")
class FalseNegatives(_ConfusionMatrixConditionCount):
"""Calculates the number of false negatives.
If `sample_weight` is given, calculates the sum of the weights of
false negatives. This metric creates one local variable, `accumulator`
that is used to keep track of the number of false negatives.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
thresholds: (Optional) Defaults to `0.5`. A float value, or a Python
list/tuple of float threshold values in `[0, 1]`. A threshold is
compared with prediction values to determine the truth value of
predictions (i.e., above the threshold is `True`, below is `False`).
If used with a loss function that sets `from_logits=True` (i.e. no
sigmoid applied to predictions), `thresholds` should be set to 0.
One metric value is generated for each threshold value.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = keras.metrics.FalseNegatives()
>>> m.update_state([0, 1, 1, 1], [0, 1, 0, 0])
>>> m.result()
2.0
>>> m.reset_state()
>>> m.update_state([0, 1, 1, 1], [0, 1, 0, 0], sample_weight=[0, 0, 1, 0])
>>> m.result()
1.0
"""
def __init__(self, thresholds=None, name=None, dtype=None):
super().__init__(
confusion_matrix_cond=metrics_utils.ConfusionMatrix.FALSE_NEGATIVES,
thresholds=thresholds,
name=name,
dtype=dtype,
)
@keras_export("keras.metrics.TrueNegatives")
class TrueNegatives(_ConfusionMatrixConditionCount):
"""Calculates the number of true negatives.
If `sample_weight` is given, calculates the sum of the weights of
true negatives. This metric creates one local variable, `accumulator`
that is used to keep track of the number of true negatives.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
thresholds: (Optional) Defaults to `0.5`. A float value, or a Python
list/tuple of float threshold values in `[0, 1]`. A threshold is
compared with prediction values to determine the truth value of
predictions (i.e., above the threshold is `True`, below is `False`).
If used with a loss function that sets `from_logits=True` (i.e. no
sigmoid applied to predictions), `thresholds` should be set to 0.
One metric value is generated for each threshold value.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = keras.metrics.TrueNegatives()
>>> m.update_state([0, 1, 0, 0], [1, 1, 0, 0])
>>> m.result()
2.0
>>> m.reset_state()
>>> m.update_state([0, 1, 0, 0], [1, 1, 0, 0], sample_weight=[0, 0, 1, 0])
>>> m.result()
1.0
"""
def __init__(self, thresholds=None, name=None, dtype=None):
super().__init__(
confusion_matrix_cond=metrics_utils.ConfusionMatrix.TRUE_NEGATIVES,
thresholds=thresholds,
name=name,
dtype=dtype,
)
@keras_export("keras.metrics.TruePositives")
class TruePositives(_ConfusionMatrixConditionCount):
"""Calculates the number of true positives.
If `sample_weight` is given, calculates the sum of the weights of
true positives. This metric creates one local variable, `true_positives`
that is used to keep track of the number of true positives.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
thresholds: (Optional) Defaults to `0.5`. A float value, or a Python
list/tuple of float threshold values in `[0, 1]`. A threshold is
compared with prediction values to determine the truth value of
predictions (i.e., above the threshold is `True`, below is `False`).
If used with a loss function that sets `from_logits=True` (i.e. no
sigmoid applied to predictions), `thresholds` should be set to 0.
One metric value is generated for each threshold value.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = keras.metrics.TruePositives()
>>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1])
>>> m.result()
2.0
>>> m.reset_state()
>>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1], sample_weight=[0, 0, 1, 0])
>>> m.result()
1.0
"""
def __init__(self, thresholds=None, name=None, dtype=None):
super().__init__(
confusion_matrix_cond=metrics_utils.ConfusionMatrix.TRUE_POSITIVES,
thresholds=thresholds,
name=name,
dtype=dtype,
)
@keras_export("keras.metrics.Precision")
class Precision(Metric):
"""Computes the precision of the predictions with respect to the labels.
The metric creates two local variables, `true_positives` and
`false_positives` that are used to compute the precision. This value is
ultimately returned as `precision`, an idempotent operation that simply
divides `true_positives` by the sum of `true_positives` and
`false_positives`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
If `top_k` is set, we'll calculate precision as how often on average a class
among the top-k classes with the highest predicted values of a batch entry
is correct and can be found in the label for that entry.
If `class_id` is specified, we calculate precision by considering only the
entries in the batch for which `class_id` is above the threshold and/or in
the top-k highest predictions, and computing the fraction of them for which
`class_id` is indeed a correct label.
Args:
thresholds: (Optional) A float value, or a Python list/tuple of float
threshold values in `[0, 1]`. A threshold is compared with
prediction values to determine the truth value of predictions (i.e.,
above the threshold is `True`, below is `False`). If used with a
loss function that sets `from_logits=True` (i.e. no sigmoid applied
to predictions), `thresholds` should be set to 0. One metric value
is generated for each threshold value. If neither `thresholds` nor
`top_k` are set, the default is to calculate precision with
`thresholds=0.5`.
top_k: (Optional) Unset by default. An int value specifying the top-k
predictions to consider when calculating precision.
class_id: (Optional) Integer class ID for which we want binary metrics.
This must be in the half-open interval `[0, num_classes)`, where
`num_classes` is the last dimension of predictions.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = keras.metrics.Precision()
>>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1])
>>> m.result()
0.6666667
>>> m.reset_state()
>>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1], sample_weight=[0, 0, 1, 0])
>>> m.result()
1.0
>>> # With top_k=2, it will calculate precision over y_true[:2]
>>> # and y_pred[:2]
>>> m = keras.metrics.Precision(top_k=2)
>>> m.update_state([0, 0, 1, 1], [1, 1, 1, 1])
>>> m.result()
0.0
>>> # With top_k=4, it will calculate precision over y_true[:4]
>>> # and y_pred[:4]
>>> m = keras.metrics.Precision(top_k=4)
>>> m.update_state([0, 0, 1, 1], [1, 1, 1, 1])
>>> m.result()
0.5
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='binary_crossentropy',
metrics=[keras.metrics.Precision()])
```
Usage with a loss with `from_logits=True`:
```python
model.compile(optimizer='adam',
loss=keras.losses.BinaryCrossentropy(from_logits=True),
metrics=[keras.metrics.Precision(thresholds=0)])
```
"""
def __init__(
self, thresholds=None, top_k=None, class_id=None, name=None, dtype=None
):
super().__init__(name=name, dtype=dtype)
# Metric should be maximized during optimization.
self._direction = "up"
self.init_thresholds = thresholds
self.top_k = top_k
self.class_id = class_id
default_threshold = 0.5 if top_k is None else metrics_utils.NEG_INF
self.thresholds = metrics_utils.parse_init_thresholds(
thresholds, default_threshold=default_threshold
)
self._thresholds_distributed_evenly = (
metrics_utils.is_evenly_distributed_thresholds(self.thresholds)
)
self.true_positives = self.add_variable(
shape=(len(self.thresholds),),
initializer=initializers.Zeros(),
name="true_positives",
)
self.false_positives = self.add_variable(
shape=(len(self.thresholds),),
initializer=initializers.Zeros(),
name="false_positives",
)
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates true positive and false positive statistics.
Args:
y_true: The ground truth values, with the same dimensions as
`y_pred`. Will be cast to `bool`.
y_pred: The predicted values. Each element must be in the range
`[0, 1]`.
sample_weight: Optional weighting of each example. Defaults to `1`.
Can be a tensor whose rank is either 0, or the same rank as
`y_true`, and must be broadcastable to `y_true`.
"""
metrics_utils.update_confusion_matrix_variables(
{
metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, # noqa: E501
metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives, # noqa: E501
},
y_true,
y_pred,
thresholds=self.thresholds,
thresholds_distributed_evenly=self._thresholds_distributed_evenly,
top_k=self.top_k,
class_id=self.class_id,
sample_weight=sample_weight,
)
def result(self):
result = ops.divide_no_nan(
self.true_positives,
ops.add(self.true_positives, self.false_positives),
)
return result[0] if len(self.thresholds) == 1 else result
def reset_state(self):
num_thresholds = len(to_list(self.thresholds))
self.true_positives.assign(ops.zeros((num_thresholds,)))
self.false_positives.assign(ops.zeros((num_thresholds,)))
def get_config(self):
config = {
"thresholds": self.init_thresholds,
"top_k": self.top_k,
"class_id": self.class_id,
}
base_config = super().get_config()
return {**base_config, **config}
@keras_export("keras.metrics.Recall")
class Recall(Metric):
"""Computes the recall of the predictions with respect to the labels.
This metric creates two local variables, `true_positives` and
`false_negatives`, that are used to compute the recall. This value is
ultimately returned as `recall`, an idempotent operation that simply divides
`true_positives` by the sum of `true_positives` and `false_negatives`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
If `top_k` is set, recall will be computed as how often on average a class
among the labels of a batch entry is in the top-k predictions.
If `class_id` is specified, we calculate recall by considering only the
entries in the batch for which `class_id` is in the label, and computing the
fraction of them for which `class_id` is above the threshold and/or in the
top-k predictions.
Args:
thresholds: (Optional) A float value, or a Python list/tuple of float
threshold values in `[0, 1]`. A threshold is compared with
prediction values to determine the truth value of predictions (i.e.,
above the threshold is `True`, below is `False`). If used with a
loss function that sets `from_logits=True` (i.e. no sigmoid
applied to predictions), `thresholds` should be set to 0.
One metric value is generated for each threshold value.
If neither `thresholds` nor `top_k` are set,
the default is to calculate recall with `thresholds=0.5`.
top_k: (Optional) Unset by default. An int value specifying the top-k
predictions to consider when calculating recall.
class_id: (Optional) Integer class ID for which we want binary metrics.
This must be in the half-open interval `[0, num_classes)`, where
`num_classes` is the last dimension of predictions.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = keras.metrics.Recall()
>>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1])
>>> m.result()
0.6666667
>>> m.reset_state()
>>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1], sample_weight=[0, 0, 1, 0])
>>> m.result()
1.0
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='binary_crossentropy',
metrics=[keras.metrics.Recall()])
```
Usage with a loss with `from_logits=True`:
```python
model.compile(optimizer='adam',
loss=keras.losses.BinaryCrossentropy(from_logits=True),
metrics=[keras.metrics.Recall(thresholds=0)])
```
"""
def __init__(
self, thresholds=None, top_k=None, class_id=None, name=None, dtype=None
):
super().__init__(name=name, dtype=dtype)
# Metric should be maximized during optimization.
self._direction = "up"
self.init_thresholds = thresholds
self.top_k = top_k
self.class_id = class_id
default_threshold = 0.5 if top_k is None else metrics_utils.NEG_INF
self.thresholds = metrics_utils.parse_init_thresholds(
thresholds, default_threshold=default_threshold
)
self._thresholds_distributed_evenly = (
metrics_utils.is_evenly_distributed_thresholds(self.thresholds)
)
self.true_positives = self.add_variable(
shape=(len(self.thresholds),),
initializer=initializers.Zeros(),
name="true_positives",
)
self.false_negatives = self.add_variable(
shape=(len(self.thresholds),),
initializer=initializers.Zeros(),
name="false_negatives",
)
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates true positive and false negative statistics.
Args:
y_true: The ground truth values, with the same dimensions as
`y_pred`. Will be cast to `bool`.
y_pred: The predicted values. Each element must be in the range
`[0, 1]`.
sample_weight: Optional weighting of each example. Defaults to `1`.
Can be a tensor whose rank is either 0, or the same rank as
`y_true`, and must be broadcastable to `y_true`.
"""
metrics_utils.update_confusion_matrix_variables(
{
metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, # noqa: E501
metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives, # noqa: E501
},
y_true,
y_pred,
thresholds=self.thresholds,
thresholds_distributed_evenly=self._thresholds_distributed_evenly,
top_k=self.top_k,
class_id=self.class_id,
sample_weight=sample_weight,
)
def result(self):
result = ops.divide_no_nan(
self.true_positives,
ops.add(self.true_positives, self.false_negatives),
)
return result[0] if len(self.thresholds) == 1 else result
def reset_state(self):
num_thresholds = len(to_list(self.thresholds))
self.true_positives.assign(ops.zeros((num_thresholds,)))
self.false_negatives.assign(ops.zeros((num_thresholds,)))
def get_config(self):
config = {
"thresholds": self.init_thresholds,
"top_k": self.top_k,
"class_id": self.class_id,
}
base_config = super().get_config()
return {**base_config, **config}
class SensitivitySpecificityBase(Metric):
"""Abstract base class for computing sensitivity and specificity.
For additional information about specificity and sensitivity, see
[the following](https://en.wikipedia.org/wiki/Sensitivity_and_specificity).
"""
def __init__(
self, value, num_thresholds=200, class_id=None, name=None, dtype=None
):
super().__init__(name=name, dtype=dtype)
# Metric should be maximized during optimization.
self._direction = "up"
if num_thresholds <= 0:
raise ValueError(
"Argument `num_thresholds` must be an integer > 0. "
f"Received: num_thresholds={num_thresholds}"
)
self.value = value
self.class_id = class_id
# Compute `num_thresholds` thresholds in [0, 1]
if num_thresholds == 1:
self.thresholds = [0.5]
self._thresholds_distributed_evenly = False
else:
thresholds = [
(i + 1) * 1.0 / (num_thresholds - 1)
for i in range(num_thresholds - 2)
]
self.thresholds = [0.0] + thresholds + [1.0]
self._thresholds_distributed_evenly = True
self.true_positives = self.add_variable(
shape=(len(self.thresholds),),
initializer=initializers.Zeros(),
name="true_positives",
)
self.false_positives = self.add_variable(
shape=(len(self.thresholds),),
initializer=initializers.Zeros(),
name="false_positives",
)
self.true_negatives = self.add_variable(
shape=(len(self.thresholds),),
initializer=initializers.Zeros(),
name="true_negatives",
)
self.false_negatives = self.add_variable(
shape=(len(self.thresholds),),
initializer=initializers.Zeros(),
name="false_negatives",
)
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates confusion matrix statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Defaults to `1`.
Can be a tensor whose rank is either 0, or the same rank as
`y_true`, and must be broadcastable to `y_true`.
"""
metrics_utils.update_confusion_matrix_variables(
{
metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, # noqa: E501
metrics_utils.ConfusionMatrix.TRUE_NEGATIVES: self.true_negatives, # noqa: E501
metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives, # noqa: E501
metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives, # noqa: E501
},
y_true,
y_pred,
thresholds=self.thresholds,
thresholds_distributed_evenly=self._thresholds_distributed_evenly,
class_id=self.class_id,
sample_weight=sample_weight,
)
def reset_state(self):
num_thresholds = len(self.thresholds)
self.true_positives.assign(ops.zeros((num_thresholds,)))
self.false_positives.assign(ops.zeros((num_thresholds,)))
self.true_negatives.assign(ops.zeros((num_thresholds,)))
self.false_negatives.assign(ops.zeros((num_thresholds,)))
def get_config(self):
config = {"class_id": self.class_id}
base_config = super().get_config()
return {**base_config, **config}
def _find_max_under_constraint(self, constrained, dependent, predicate):
"""Returns the maximum of dependent_statistic that satisfies the
constraint.
Args:
constrained: Over these values the constraint is specified. A rank-1
tensor.
dependent: From these values the maximum that satiesfies the
constraint is selected. Values in this tensor and in
`constrained` are linked by having the same threshold at each
position, hence this tensor must have the same shape.
predicate: A binary boolean functor to be applied to arguments
`constrained` and `self.value`, e.g. `ops.greater`.
Returns:
maximal dependent value, if no value satisfies the constraint 0.0.
"""
feasible = backend.convert_to_numpy(
ops.nonzero(predicate(constrained, self.value))
)
feasible_exists = ops.greater(ops.size(feasible), 0)
max_dependent = ops.max(ops.take(dependent, feasible), initial=0)
return ops.where(feasible_exists, max_dependent, 0.0)
@keras_export("keras.metrics.SensitivityAtSpecificity")
class SensitivityAtSpecificity(SensitivitySpecificityBase):
"""Computes best sensitivity where specificity is >= specified value.
`Sensitivity` measures the proportion of actual positives that are correctly
identified as such `(tp / (tp + fn))`.
`Specificity` measures the proportion of actual negatives that are correctly
identified as such `(tn / (tn + fp))`.
This metric creates four local variables, `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` that are used to
compute the sensitivity at the given specificity. The threshold for the
given specificity value is computed and used to evaluate the corresponding
sensitivity.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
If `class_id` is specified, we calculate precision by considering only the
entries in the batch for which `class_id` is above the threshold
predictions, and computing the fraction of them for which `class_id` is
indeed a correct label.
For additional information about specificity and sensitivity, see
[the following](https://en.wikipedia.org/wiki/Sensitivity_and_specificity).
Args:
specificity: A scalar value in range `[0, 1]`.
num_thresholds: (Optional) Defaults to 200. The number of thresholds to
use for matching the given specificity.
class_id: (Optional) Integer class ID for which we want binary metrics.
This must be in the half-open interval `[0, num_classes)`, where
`num_classes` is the last dimension of predictions.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = keras.metrics.SensitivityAtSpecificity(0.5)
>>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8])
>>> m.result()
0.5
>>> m.reset_state()
>>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8],
... sample_weight=[1, 1, 2, 2, 1])
>>> m.result()
0.333333
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='binary_crossentropy',
metrics=[keras.metrics.SensitivityAtSpecificity()])
```
"""
def __init__(
self,
specificity,
num_thresholds=200,
class_id=None,
name=None,
dtype=None,
):
if specificity < 0 or specificity > 1:
raise ValueError(
"Argument `specificity` must be in the range [0, 1]. "
f"Received: specificity={specificity}"
)
self.specificity = specificity
self.num_thresholds = num_thresholds
super().__init__(
specificity,
num_thresholds=num_thresholds,
class_id=class_id,
name=name,
dtype=dtype,
)
def result(self):
sensitivities = ops.divide_no_nan(
self.true_positives,
ops.add(self.true_positives, self.false_negatives),
)
specificities = ops.divide_no_nan(
self.true_negatives,
ops.add(self.true_negatives, self.false_positives),
)
return self._find_max_under_constraint(
specificities, sensitivities, ops.greater_equal
)
def get_config(self):
config = {
"num_thresholds": self.num_thresholds,
"specificity": self.specificity,
}
base_config = super().get_config()
return {**base_config, **config}
@keras_export("keras.metrics.SpecificityAtSensitivity")
class SpecificityAtSensitivity(SensitivitySpecificityBase):
"""Computes best specificity where sensitivity is >= specified value.
`Sensitivity` measures the proportion of actual positives that are correctly
identified as such `(tp / (tp + fn))`.
`Specificity` measures the proportion of actual negatives that are correctly
identified as such `(tn / (tn + fp))`.
This metric creates four local variables, `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` that are used to
compute the specificity at the given sensitivity. The threshold for the
given sensitivity value is computed and used to evaluate the corresponding
specificity.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
If `class_id` is specified, we calculate precision by considering only the
entries in the batch for which `class_id` is above the threshold
predictions, and computing the fraction of them for which `class_id` is
indeed a correct label.
For additional information about specificity and sensitivity, see
[the following](https://en.wikipedia.org/wiki/Sensitivity_and_specificity).
Args:
sensitivity: A scalar value in range `[0, 1]`.
num_thresholds: (Optional) Defaults to 200. The number of thresholds to
use for matching the given sensitivity.
class_id: (Optional) Integer class ID for which we want binary metrics.
This must be in the half-open interval `[0, num_classes)`, where
`num_classes` is the last dimension of predictions.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = keras.metrics.SpecificityAtSensitivity(0.5)
>>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8])
>>> m.result()
0.66666667
>>> m.reset_state()
>>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8],
... sample_weight=[1, 1, 2, 2, 2])
>>> m.result()
0.5
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='binary_crossentropy',
metrics=[keras.metrics.SpecificityAtSensitivity()])
```
"""
def __init__(
self,
sensitivity,
num_thresholds=200,
class_id=None,
name=None,
dtype=None,
):
if sensitivity < 0 or sensitivity > 1:
raise ValueError(
"Argument `sensitivity` must be in the range [0, 1]. "
f"Received: sensitivity={sensitivity}"
)
self.sensitivity = sensitivity
self.num_thresholds = num_thresholds
super().__init__(
sensitivity,
num_thresholds=num_thresholds,
class_id=class_id,
name=name,
dtype=dtype,
)
def result(self):
sensitivities = ops.divide_no_nan(
self.true_positives,
ops.add(self.true_positives, self.false_negatives),
)
specificities = ops.divide_no_nan(
self.true_negatives,
ops.add(self.true_negatives, self.false_positives),
)
return self._find_max_under_constraint(
sensitivities, specificities, ops.greater_equal
)
def get_config(self):
config = {
"num_thresholds": self.num_thresholds,
"sensitivity": self.sensitivity,
}
base_config = super().get_config()
return {**base_config, **config}
@keras_export("keras.metrics.PrecisionAtRecall")
class PrecisionAtRecall(SensitivitySpecificityBase):
"""Computes best precision where recall is >= specified value.
This metric creates four local variables, `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` that are used to
compute the precision at the given recall. The threshold for the given
recall value is computed and used to evaluate the corresponding precision.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
If `class_id` is specified, we calculate precision by considering only the
entries in the batch for which `class_id` is above the threshold
predictions, and computing the fraction of them for which `class_id` is
indeed a correct label.
Args:
recall: A scalar value in range `[0, 1]`.
num_thresholds: (Optional) Defaults to 200. The number of thresholds to
use for matching the given recall.
class_id: (Optional) Integer class ID for which we want binary metrics.
This must be in the half-open interval `[0, num_classes)`, where
`num_classes` is the last dimension of predictions.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = keras.metrics.PrecisionAtRecall(0.5)
>>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8])
>>> m.result()
0.5
>>> m.reset_state()
>>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8],
... sample_weight=[2, 2, 2, 1, 1])
>>> m.result()
0.33333333
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='binary_crossentropy',
metrics=[keras.metrics.PrecisionAtRecall(recall=0.8)])
```
"""
def __init__(
self, recall, num_thresholds=200, class_id=None, name=None, dtype=None
):
if recall < 0 or recall > 1:
raise ValueError(
"Argument `recall` must be in the range [0, 1]. "
f"Received: recall={recall}"
)
self.recall = recall
self.num_thresholds = num_thresholds
super().__init__(
value=recall,
num_thresholds=num_thresholds,
class_id=class_id,
name=name,
dtype=dtype,
)
def result(self):
recalls = ops.divide_no_nan(
self.true_positives,
ops.add(self.true_positives, self.false_negatives),
)
precisions = ops.divide_no_nan(
self.true_positives,
ops.add(self.true_positives, self.false_positives),
)
return self._find_max_under_constraint(
recalls, precisions, ops.greater_equal
)
def get_config(self):
config = {"num_thresholds": self.num_thresholds, "recall": self.recall}
base_config = super().get_config()
return {**base_config, **config}
@keras_export("keras.metrics.RecallAtPrecision")
class RecallAtPrecision(SensitivitySpecificityBase):
"""Computes best recall where precision is >= specified value.
For a given score-label-distribution the required precision might not
be achievable, in this case 0.0 is returned as recall.
This metric creates four local variables, `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` that are used to
compute the recall at the given precision. The threshold for the given
precision value is computed and used to evaluate the corresponding recall.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
If `class_id` is specified, we calculate precision by considering only the
entries in the batch for which `class_id` is above the threshold
predictions, and computing the fraction of them for which `class_id` is
indeed a correct label.
Args:
precision: A scalar value in range `[0, 1]`.
num_thresholds: (Optional) Defaults to 200. The number of thresholds
to use for matching the given precision.
class_id: (Optional) Integer class ID for which we want binary metrics.
This must be in the half-open interval `[0, num_classes)`, where
`num_classes` is the last dimension of predictions.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = keras.metrics.RecallAtPrecision(0.8)
>>> m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9])
>>> m.result()
0.5
>>> m.reset_state()
>>> m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9],
... sample_weight=[1, 0, 0, 1])
>>> m.result()
1.0
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='binary_crossentropy',
metrics=[keras.metrics.RecallAtPrecision(precision=0.8)])
```
"""
def __init__(
self,
precision,
num_thresholds=200,
class_id=None,
name=None,
dtype=None,
):
if precision < 0 or precision > 1:
raise ValueError(
"Argument `precision` must be in the range [0, 1]. "
f"Received: precision={precision}"
)
self.precision = precision
self.num_thresholds = num_thresholds
super().__init__(
value=precision,
num_thresholds=num_thresholds,
class_id=class_id,
name=name,
dtype=dtype,
)
def result(self):
recalls = ops.divide_no_nan(
self.true_positives,
ops.add(self.true_positives, self.false_negatives),
)
precisions = ops.divide_no_nan(
self.true_positives,
ops.add(self.true_positives, self.false_positives),
)
return self._find_max_under_constraint(
precisions, recalls, ops.greater_equal
)
def get_config(self):
config = {
"num_thresholds": self.num_thresholds,
"precision": self.precision,
}
base_config = super().get_config()
return {**base_config, **config}
@keras_export("keras.metrics.AUC")
class AUC(Metric):
"""Approximates the AUC (Area under the curve) of the ROC or PR curves.
The AUC (Area under the curve) of the ROC (Receiver operating
characteristic; default) or PR (Precision Recall) curves are quality
measures of binary classifiers. Unlike the accuracy, and like cross-entropy
losses, ROC-AUC and PR-AUC evaluate all the operational points of a model.
This class approximates AUCs using a Riemann sum. During the metric
accumulation phrase, predictions are accumulated within predefined buckets
by value. The AUC is then computed by interpolating per-bucket averages.
These buckets define the evaluated operational points.
This metric creates four local variables, `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` that are used to
compute the AUC. To discretize the AUC curve, a linearly spaced set of
thresholds is used to compute pairs of recall and precision values. The area
under the ROC-curve is therefore computed using the height of the recall
values by the false positive rate, while the area under the PR-curve is the
computed using the height of the precision values by the recall.
This value is ultimately returned as `auc`, an idempotent operation that
computes the area under a discretized curve of precision versus recall
values (computed using the aforementioned variables). The `num_thresholds`
variable controls the degree of discretization with larger numbers of
thresholds more closely approximating the true AUC. The quality of the
approximation may vary dramatically depending on `num_thresholds`. The
`thresholds` parameter can be used to manually specify thresholds which
split the predictions more evenly.
For a best approximation of the real AUC, `predictions` should be
distributed approximately uniformly in the range `[0, 1]` (if
`from_logits=False`). The quality of the AUC approximation may be poor if
this is not the case. Setting `summation_method` to 'minoring' or 'majoring'
can help quantify the error in the approximation by providing lower or upper
bound estimate of the AUC.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
num_thresholds: (Optional) The number of thresholds to
use when discretizing the roc curve. Values must be > 1.
Defaults to `200`.
curve: (Optional) Specifies the name of the curve to be computed,
`'ROC'` (default) or `'PR'` for the Precision-Recall-curve.
summation_method: (Optional) Specifies the [Riemann summation method](
https://en.wikipedia.org/wiki/Riemann_sum) used.
'interpolation' (default) applies mid-point summation scheme for
`ROC`. For PR-AUC, interpolates (true/false) positives but not
the ratio that is precision (see Davis & Goadrich 2006 for
details); 'minoring' applies left summation for increasing
intervals and right summation for decreasing intervals; 'majoring'
does the opposite.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
thresholds: (Optional) A list of floating point values to use as the
thresholds for discretizing the curve. If set, the `num_thresholds`
parameter is ignored. Values should be in `[0, 1]`. Endpoint
thresholds equal to {`-epsilon`, `1+epsilon`} for a small positive
epsilon value will be automatically included with these to correctly
handle predictions equal to exactly 0 or 1.
multi_label: boolean indicating whether multilabel data should be
treated as such, wherein AUC is computed separately for each label
and then averaged across labels, or (when `False`) if the data
should be flattened into a single label before AUC computation. In
the latter case, when multilabel data is passed to AUC, each
label-prediction pair is treated as an individual data point. Should
be set to False for multi-class data.
num_labels: (Optional) The number of labels, used when `multi_label` is
True. If `num_labels` is not specified, then state variables get
created on the first call to `update_state`.
label_weights: (Optional) list, array, or tensor of non-negative weights
used to compute AUCs for multilabel data. When `multi_label` is
True, the weights are applied to the individual label AUCs when they
are averaged to produce the multi-label AUC. When it's False, they
are used to weight the individual label predictions in computing the
confusion matrix on the flattened data. Note that this is unlike
`class_weights` in that `class_weights` weights the example
depending on the value of its label, whereas `label_weights` depends
only on the index of that label before flattening; therefore
`label_weights` should not be used for multi-class data.
from_logits: boolean indicating whether the predictions (`y_pred` in
`update_state`) are probabilities or sigmoid logits. As a rule of thumb,
when using a keras loss, the `from_logits` constructor argument of the
loss should match the AUC `from_logits` constructor argument.
Standalone usage:
>>> m = keras.metrics.AUC(num_thresholds=3)
>>> m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9])
>>> # threshold values are [0 - 1e-7, 0.5, 1 + 1e-7]
>>> # tp = [2, 1, 0], fp = [2, 0, 0], fn = [0, 1, 2], tn = [0, 2, 2]
>>> # tp_rate = recall = [1, 0.5, 0], fp_rate = [1, 0, 0]
>>> # auc = ((((1 + 0.5) / 2) * (1 - 0)) + (((0.5 + 0) / 2) * (0 - 0)))
>>> # = 0.75
>>> m.result()
0.75
>>> m.reset_state()
>>> m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9],
... sample_weight=[1, 0, 0, 1])
>>> m.result()
1.0
Usage with `compile()` API:
```python
# Reports the AUC of a model outputting a probability.
model.compile(optimizer='sgd',
loss=keras.losses.BinaryCrossentropy(),
metrics=[keras.metrics.AUC()])
# Reports the AUC of a model outputting a logit.
model.compile(optimizer='sgd',
loss=keras.losses.BinaryCrossentropy(from_logits=True),
metrics=[keras.metrics.AUC(from_logits=True)])
```
"""
def __init__(
self,
num_thresholds=200,
curve="ROC",
summation_method="interpolation",
name=None,
dtype=None,
thresholds=None,
multi_label=False,
num_labels=None,
label_weights=None,
from_logits=False,
):
# Metric should be maximized during optimization.
self._direction = "up"
# Validate configurations.
if isinstance(curve, metrics_utils.AUCCurve) and curve not in list(
metrics_utils.AUCCurve
):
raise ValueError(
f'Invalid `curve` argument value "{curve}". '
f"Expected one of: {list(metrics_utils.AUCCurve)}"
)
if isinstance(
summation_method, metrics_utils.AUCSummationMethod
) and summation_method not in list(metrics_utils.AUCSummationMethod):
raise ValueError(
"Invalid `summation_method` "
f'argument value "{summation_method}". '
f"Expected one of: {list(metrics_utils.AUCSummationMethod)}"
)
# Update properties.
self._init_from_thresholds = thresholds is not None
if thresholds is not None:
# If specified, use the supplied thresholds.
self.num_thresholds = len(thresholds) + 2
thresholds = sorted(thresholds)
self._thresholds_distributed_evenly = (
metrics_utils.is_evenly_distributed_thresholds(
np.array([0.0] + thresholds + [1.0])
)
)
else:
if num_thresholds <= 1:
raise ValueError(
"Argument `num_thresholds` must be an integer > 1. "
f"Received: num_thresholds={num_thresholds}"
)
# Otherwise, linearly interpolate (num_thresholds - 2) thresholds in
# (0, 1).
self.num_thresholds = num_thresholds
thresholds = [
(i + 1) * 1.0 / (num_thresholds - 1)
for i in range(num_thresholds - 2)
]
self._thresholds_distributed_evenly = True
# Add an endpoint "threshold" below zero and above one for either
# threshold method to account for floating point imprecisions.
self._thresholds = np.array(
[0.0 - backend.epsilon()] + thresholds + [1.0 + backend.epsilon()]
)
if isinstance(curve, metrics_utils.AUCCurve):
self.curve = curve
else:
self.curve = metrics_utils.AUCCurve.from_str(curve)
if isinstance(summation_method, metrics_utils.AUCSummationMethod):
self.summation_method = summation_method
else:
self.summation_method = metrics_utils.AUCSummationMethod.from_str(
summation_method
)
super().__init__(name=name, dtype=dtype)
# Handle multilabel arguments.
self.multi_label = multi_label
self.num_labels = num_labels
if label_weights is not None:
label_weights = ops.array(label_weights, dtype=self.dtype)
self.label_weights = label_weights
else:
self.label_weights = None
self._from_logits = from_logits
self._built = False
if self.multi_label:
if num_labels:
shape = [None, num_labels]
self._build(shape)
else:
if num_labels:
raise ValueError(
"`num_labels` is needed only when `multi_label` is True."
)
self._build(None)
@property
def thresholds(self):
"""The thresholds used for evaluating AUC."""
return list(self._thresholds)
def _build(self, shape):
"""Initialize TP, FP, TN, and FN tensors, given the shape of the
data."""
if self.multi_label:
if len(shape) != 2:
raise ValueError(
"`y_pred` must have rank 2 when `multi_label=True`. "
f"Found rank {len(shape)}. "
f"Full shape received for `y_pred`: {shape}"
)
self._num_labels = shape[1]
variable_shape = [self.num_thresholds, self._num_labels]
else:
variable_shape = [self.num_thresholds]
self._build_input_shape = shape
# Create metric variables
self.true_positives = self.add_variable(
shape=variable_shape,
initializer=initializers.Zeros(),
name="true_positives",
)
self.false_positives = self.add_variable(
shape=variable_shape,
initializer=initializers.Zeros(),
name="false_positives",
)
self.true_negatives = self.add_variable(
shape=variable_shape,
initializer=initializers.Zeros(),
name="true_negatives",
)
self.false_negatives = self.add_variable(
shape=variable_shape,
initializer=initializers.Zeros(),
name="false_negatives",
)
self._built = True
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates confusion matrix statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Can
be a tensor whose rank is either 0, or the same rank as
`y_true`, and must be broadcastable to `y_true`. Defaults to
`1`.
"""
if not self._built:
self._build(y_pred.shape)
if self.multi_label or (self.label_weights is not None):
# y_true should have shape (number of examples, number of labels).
shapes = [(y_true, ("N", "L"))]
if self.multi_label:
# TP, TN, FP, and FN should all have shape
# (number of thresholds, number of labels).
shapes.extend(
[
(self.true_positives, ("T", "L")),
(self.true_negatives, ("T", "L")),
(self.false_positives, ("T", "L")),
(self.false_negatives, ("T", "L")),
]
)
if self.label_weights is not None:
# label_weights should be of length equal to the number of
# labels.
shapes.append((self.label_weights, ("L",)))
# Only forward label_weights to update_confusion_matrix_variables when
# multi_label is False. Otherwise the averaging of individual label AUCs
# is handled in AUC.result
label_weights = None if self.multi_label else self.label_weights
if self._from_logits:
y_pred = activations.sigmoid(y_pred)
metrics_utils.update_confusion_matrix_variables(
{
metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, # noqa: E501
metrics_utils.ConfusionMatrix.TRUE_NEGATIVES: self.true_negatives, # noqa: E501
metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives, # noqa: E501
metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives, # noqa: E501
},
y_true,
y_pred,
self._thresholds,
thresholds_distributed_evenly=self._thresholds_distributed_evenly,
sample_weight=sample_weight,
multi_label=self.multi_label,
label_weights=label_weights,
)
def interpolate_pr_auc(self):
"""Interpolation formula inspired by section 4 of Davis & Goadrich 2006.
https://www.biostat.wisc.edu/~page/rocpr.pdf
Note here we derive & use a closed formula not present in the paper
as follows:
Precision = TP / (TP + FP) = TP / P
Modeling all of TP (true positive), FP (false positive) and their sum
P = TP + FP (predicted positive) as varying linearly within each
interval [A, B] between successive thresholds, we get
Precision slope = dTP / dP
= (TP_B - TP_A) / (P_B - P_A)
= (TP - TP_A) / (P - P_A)
Precision = (TP_A + slope * (P - P_A)) / P
The area within the interval is (slope / total_pos_weight) times
int_A^B{Precision.dP} = int_A^B{(TP_A + slope * (P - P_A)) * dP / P}
int_A^B{Precision.dP} = int_A^B{slope * dP + intercept * dP / P}
where intercept = TP_A - slope * P_A = TP_B - slope * P_B, resulting in
int_A^B{Precision.dP} = TP_B - TP_A + intercept * log(P_B / P_A)
Bringing back the factor (slope / total_pos_weight) we'd put aside, we
get
slope * [dTP + intercept * log(P_B / P_A)] / total_pos_weight
where dTP == TP_B - TP_A.
Note that when P_A == 0 the above calculation simplifies into
int_A^B{Precision.dTP} = int_A^B{slope * dTP}
= slope * (TP_B - TP_A)
which is really equivalent to imputing constant precision throughout the
first bucket having >0 true positives.
Returns:
pr_auc: an approximation of the area under the P-R curve.
"""
dtp = ops.subtract(
self.true_positives[: self.num_thresholds - 1],
self.true_positives[1:],
)
p = ops.add(self.true_positives, self.false_positives)
dp = ops.subtract(p[: self.num_thresholds - 1], p[1:])
prec_slope = ops.divide_no_nan(dtp, ops.maximum(dp, 0))
intercept = ops.subtract(
self.true_positives[1:], ops.multiply(prec_slope, p[1:])
)
safe_p_ratio = ops.where(
ops.logical_and(p[: self.num_thresholds - 1] > 0, p[1:] > 0),
ops.divide_no_nan(
p[: self.num_thresholds - 1], ops.maximum(p[1:], 0)
),
ops.ones_like(p[1:]),
)
pr_auc_increment = ops.divide_no_nan(
ops.multiply(
prec_slope,
(ops.add(dtp, ops.multiply(intercept, ops.log(safe_p_ratio)))),
),
ops.maximum(
ops.add(self.true_positives[1:], self.false_negatives[1:]), 0
),
)
if self.multi_label:
by_label_auc = ops.sum(pr_auc_increment, axis=0)
if self.label_weights is None:
# Evenly weighted average of the label AUCs.
return ops.mean(by_label_auc)
else:
# Weighted average of the label AUCs.
return ops.divide_no_nan(
ops.sum(ops.multiply(by_label_auc, self.label_weights)),
ops.sum(self.label_weights),
)
else:
return ops.sum(pr_auc_increment)
def result(self):
if (
self.curve == metrics_utils.AUCCurve.PR
and self.summation_method
== metrics_utils.AUCSummationMethod.INTERPOLATION
):
# This use case is different and is handled separately.
return self.interpolate_pr_auc()
# Set `x` and `y` values for the curves based on `curve` config.
recall = ops.divide_no_nan(
self.true_positives,
ops.add(self.true_positives, self.false_negatives),
)
if self.curve == metrics_utils.AUCCurve.ROC:
fp_rate = ops.divide_no_nan(
self.false_positives,
ops.add(self.false_positives, self.true_negatives),
)
x = fp_rate
y = recall
else: # curve == 'PR'.
precision = ops.divide_no_nan(
self.true_positives,
ops.add(self.true_positives, self.false_positives),
)
x = recall
y = precision
# Find the rectangle heights based on `summation_method`.
if (
self.summation_method
== metrics_utils.AUCSummationMethod.INTERPOLATION
):
# Note: the case ('PR', 'interpolation') has been handled above.
heights = ops.divide(
ops.add(y[: self.num_thresholds - 1], y[1:]), 2.0
)
elif self.summation_method == metrics_utils.AUCSummationMethod.MINORING:
heights = ops.minimum(y[: self.num_thresholds - 1], y[1:])
# self.summation_method = metrics_utils.AUCSummationMethod.MAJORING:
else:
heights = ops.maximum(y[: self.num_thresholds - 1], y[1:])
# Sum up the areas of all the rectangles.
riemann_terms = ops.multiply(
ops.subtract(x[: self.num_thresholds - 1], x[1:]), heights
)
if self.multi_label:
by_label_auc = ops.sum(riemann_terms, axis=0)
if self.label_weights is None:
# Unweighted average of the label AUCs.
return ops.mean(by_label_auc)
else:
# Weighted average of the label AUCs.
return ops.divide_no_nan(
ops.sum(ops.multiply(by_label_auc, self.label_weights)),
ops.sum(self.label_weights),
)
else:
return ops.sum(riemann_terms)
def reset_state(self):
if self._built:
if self.multi_label:
variable_shape = (self.num_thresholds, self._num_labels)
else:
variable_shape = (self.num_thresholds,)
self.true_positives.assign(ops.zeros(variable_shape))
self.false_positives.assign(ops.zeros(variable_shape))
self.true_negatives.assign(ops.zeros(variable_shape))
self.false_negatives.assign(ops.zeros(variable_shape))
def get_config(self):
label_weights = self.label_weights
config = {
"num_thresholds": self.num_thresholds,
"curve": self.curve.value,
"summation_method": self.summation_method.value,
"multi_label": self.multi_label,
"num_labels": self.num_labels,
"label_weights": label_weights,
"from_logits": self._from_logits,
}
# optimization to avoid serializing a large number of generated
# thresholds
if self._init_from_thresholds:
# We remove the endpoint thresholds as an inverse of how the
# thresholds were initialized. This ensures that a metric
# initialized from this config has the same thresholds.
config["thresholds"] = self.thresholds[1:-1]
base_config = super().get_config()
return {**base_config, **config}
| keras/keras/metrics/confusion_metrics.py/0 | {
"file_path": "keras/keras/metrics/confusion_metrics.py",
"repo_id": "keras",
"token_count": 26845
} | 189 |
import numpy as np
from absl.testing import parameterized
from keras import testing
from keras.metrics import regression_metrics as metrics
class MeanSquaredErrorTest(testing.TestCase):
def test_config(self):
# TODO
pass
def test_unweighted(self):
mse_obj = metrics.MeanSquaredError()
y_true = np.array(
[[0, 1, 0, 1, 0], [0, 0, 1, 1, 1], [1, 1, 1, 1, 0], [0, 0, 0, 0, 1]]
)
y_pred = np.array(
[[0, 0, 1, 1, 0], [1, 1, 1, 1, 1], [0, 1, 0, 1, 0], [1, 1, 1, 1, 1]]
)
mse_obj.update_state(y_true, y_pred)
result = mse_obj.result()
self.assertAllClose(0.5, result, atol=1e-5)
def test_weighted(self):
mse_obj = metrics.MeanSquaredError()
y_true = np.array(
[[0, 1, 0, 1, 0], [0, 0, 1, 1, 1], [1, 1, 1, 1, 0], [0, 0, 0, 0, 1]]
)
y_pred = np.array(
[[0, 0, 1, 1, 0], [1, 1, 1, 1, 1], [0, 1, 0, 1, 0], [1, 1, 1, 1, 1]]
)
sample_weight = np.array([1.0, 1.5, 2.0, 2.5])
result = mse_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.54285, result, atol=1e-5)
class CosineSimilarityTest(testing.TestCase):
def l2_norm(self, x, axis):
epsilon = 1e-12
square_sum = np.sum(np.square(x), axis=axis, keepdims=True)
x_inv_norm = 1 / np.sqrt(np.maximum(square_sum, epsilon))
return np.multiply(x, x_inv_norm)
def setup(self, axis=1):
self.np_y_true = np.asarray([[1, 9, 2], [-5, -2, 6]], dtype=np.float32)
self.np_y_pred = np.asarray([[4, 8, 12], [8, 1, 3]], dtype=np.float32)
y_true = self.l2_norm(self.np_y_true, axis)
y_pred = self.l2_norm(self.np_y_pred, axis)
self.expected_loss = np.sum(np.multiply(y_true, y_pred), axis=(axis,))
self.y_true = self.np_y_true
self.y_pred = self.np_y_pred
def test_config(self):
cosine_obj = metrics.CosineSimilarity(
axis=2, name="my_cos", dtype="int32"
)
self.assertEqual(cosine_obj.name, "my_cos")
self.assertEqual(cosine_obj.dtype, "int32")
# Check save and restore config
cosine_obj2 = metrics.CosineSimilarity.from_config(
cosine_obj.get_config()
)
self.assertEqual(cosine_obj2.name, "my_cos")
self.assertEqual(cosine_obj2._dtype, "int32")
def test_unweighted(self):
self.setup()
cosine_obj = metrics.CosineSimilarity()
loss = cosine_obj(self.y_true, self.y_pred)
expected_loss = np.mean(self.expected_loss)
self.assertAlmostEqual(loss, expected_loss, 3)
def test_weighted(self):
self.setup()
cosine_obj = metrics.CosineSimilarity()
sample_weight = np.asarray([1.2, 3.4])
loss = cosine_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
expected_loss = np.sum(self.expected_loss * sample_weight) / np.sum(
sample_weight
)
self.assertAlmostEqual(loss, expected_loss, 3)
def test_axis(self):
self.setup(axis=1)
cosine_obj = metrics.CosineSimilarity(axis=1)
loss = cosine_obj(self.y_true, self.y_pred)
expected_loss = np.mean(self.expected_loss)
self.assertAlmostEqual(loss, expected_loss, 3)
class MeanAbsoluteErrorTest(testing.TestCase):
def test_config(self):
mae_obj = metrics.MeanAbsoluteError(name="my_mae", dtype="int32")
self.assertEqual(mae_obj.name, "my_mae")
self.assertEqual(mae_obj._dtype, "int32")
# Check save and restore config
mae_obj2 = metrics.MeanAbsoluteError.from_config(mae_obj.get_config())
self.assertEqual(mae_obj2.name, "my_mae")
self.assertEqual(mae_obj2._dtype, "int32")
def test_unweighted(self):
mae_obj = metrics.MeanAbsoluteError()
y_true = np.array(
[[0, 1, 0, 1, 0], [0, 0, 1, 1, 1], [1, 1, 1, 1, 0], [0, 0, 0, 0, 1]]
)
y_pred = np.array(
[[0, 0, 1, 1, 0], [1, 1, 1, 1, 1], [0, 1, 0, 1, 0], [1, 1, 1, 1, 1]]
)
mae_obj.update_state(y_true, y_pred)
result = mae_obj.result()
self.assertAllClose(0.5, result, atol=1e-5)
def test_weighted(self):
mae_obj = metrics.MeanAbsoluteError()
y_true = np.array(
[[0, 1, 0, 1, 0], [0, 0, 1, 1, 1], [1, 1, 1, 1, 0], [0, 0, 0, 0, 1]]
)
y_pred = np.array(
[[0, 0, 1, 1, 0], [1, 1, 1, 1, 1], [0, 1, 0, 1, 0], [1, 1, 1, 1, 1]]
)
sample_weight = np.array([1.0, 1.5, 2.0, 2.5])
result = mae_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.54285, result, atol=1e-5)
class MeanAbsolutePercentageErrorTest(testing.TestCase):
def test_config(self):
mape_obj = metrics.MeanAbsolutePercentageError(
name="my_mape", dtype="int32"
)
self.assertEqual(mape_obj.name, "my_mape")
self.assertEqual(mape_obj._dtype, "int32")
# Check save and restore config
mape_obj2 = metrics.MeanAbsolutePercentageError.from_config(
mape_obj.get_config()
)
self.assertEqual(mape_obj2.name, "my_mape")
self.assertEqual(mape_obj2._dtype, "int32")
def test_unweighted(self):
mape_obj = metrics.MeanAbsolutePercentageError()
y_true = np.array(
[[0, 1, 0, 1, 0], [0, 0, 1, 1, 1], [1, 1, 1, 1, 0], [0, 0, 0, 0, 1]]
)
y_pred = np.array(
[
[0, 0, 1, 1, 0],
[1, 1, 1, 1, 1],
[0, 1, 0, 1, 0],
[1, 1, 1, 1, 1],
],
dtype="float32",
)
result = mape_obj(y_true, y_pred)
self.assertAllClose(35e7, result, atol=1e-5)
def test_weighted(self):
mape_obj = metrics.MeanAbsolutePercentageError()
y_true = np.array(
[[0, 1, 0, 1, 0], [0, 0, 1, 1, 1], [1, 1, 1, 1, 0], [0, 0, 0, 0, 1]]
)
y_pred = np.array(
[
[0, 0, 1, 1, 0],
[1, 1, 1, 1, 1],
[0, 1, 0, 1, 0],
[1, 1, 1, 1, 1],
],
dtype="float32",
)
sample_weight = np.array([1.0, 1.5, 2.0, 2.5])
result = mape_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(40e7, result, atol=1e-5)
class MeanSquaredLogarithmicErrorTest(testing.TestCase):
def test_config(self):
msle_obj = metrics.MeanSquaredLogarithmicError(
name="my_msle", dtype="int32"
)
self.assertEqual(msle_obj.name, "my_msle")
self.assertEqual(msle_obj._dtype, "int32")
# Check save and restore config
msle_obj2 = metrics.MeanSquaredLogarithmicError.from_config(
msle_obj.get_config()
)
self.assertEqual(msle_obj2.name, "my_msle")
self.assertEqual(msle_obj2._dtype, "int32")
def test_unweighted(self):
msle_obj = metrics.MeanSquaredLogarithmicError()
y_true = np.array(
[[0, 1, 0, 1, 0], [0, 0, 1, 1, 1], [1, 1, 1, 1, 0], [0, 0, 0, 0, 1]]
)
y_pred = np.array(
[[0, 0, 1, 1, 0], [1, 1, 1, 1, 1], [0, 1, 0, 1, 0], [1, 1, 1, 1, 1]]
)
msle_obj.update_state(y_true, y_pred)
result = msle_obj.result()
self.assertAllClose(0.24022, result, atol=1e-5)
def test_weighted(self):
msle_obj = metrics.MeanSquaredLogarithmicError()
y_true = np.array(
[[0, 1, 0, 1, 0], [0, 0, 1, 1, 1], [1, 1, 1, 1, 0], [0, 0, 0, 0, 1]]
)
y_pred = np.array(
[[0, 0, 1, 1, 0], [1, 1, 1, 1, 1], [0, 1, 0, 1, 0], [1, 1, 1, 1, 1]]
)
sample_weight = np.array([1.0, 1.5, 2.0, 2.5])
result = msle_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.26082, result, atol=1e-5)
class RootMeanSquaredErrorTest(testing.TestCase):
def test_config(self):
rmse_obj = metrics.RootMeanSquaredError(name="rmse", dtype="int32")
self.assertEqual(rmse_obj.name, "rmse")
self.assertEqual(rmse_obj._dtype, "int32")
rmse_obj2 = metrics.RootMeanSquaredError.from_config(
rmse_obj.get_config()
)
self.assertEqual(rmse_obj2.name, "rmse")
self.assertEqual(rmse_obj2._dtype, "int32")
def test_unweighted(self):
rmse_obj = metrics.RootMeanSquaredError()
y_true = np.array([2, 4, 6])
y_pred = np.array([1, 3, 2])
rmse_obj.update_state(y_true, y_pred)
result = rmse_obj.result()
# error = [-1, -1, -4], square(error) = [1, 1, 16], mean = 18/3 = 6
self.assertAllClose(np.sqrt(6), result, atol=1e-3)
def test_weighted(self):
rmse_obj = metrics.RootMeanSquaredError()
y_true = np.array([2, 4, 6])
y_pred = np.array([1, 3, 2])
y_true = np.array([2, 4, 6, 8])
y_pred = np.array([1, 3, 2, 3])
sample_weight = np.array([0, 1, 0, 1])
result = rmse_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(np.sqrt(13), result, atol=1e-3)
class LogCoshErrorTest(testing.TestCase):
def setup(self):
y_true = np.asarray([[1, 9, 2], [-5, -2, 6]], dtype=np.float32)
y_pred = np.asarray([[4, 8, 12], [8, 1, 3]], dtype=np.float32)
self.batch_size = 6
error = y_pred - y_true
self.expected_results = np.log((np.exp(error) + np.exp(-error)) / 2)
self.y_pred = y_pred
self.y_true = y_true
def test_config(self):
logcosh_obj = metrics.LogCoshError(name="logcosh", dtype="int32")
self.assertEqual(logcosh_obj.name, "logcosh")
self.assertEqual(logcosh_obj._dtype, "int32")
def test_unweighted(self):
self.setup()
logcosh_obj = metrics.LogCoshError()
logcosh_obj.update_state(self.y_true, self.y_pred)
result = logcosh_obj.result()
expected_result = np.sum(self.expected_results) / self.batch_size
self.assertAllClose(result, expected_result, atol=1e-3)
def test_weighted(self):
self.setup()
logcosh_obj = metrics.LogCoshError(dtype="float32")
sample_weight = np.array([[1.2], [3.4]])
result = logcosh_obj(
self.y_true, self.y_pred, sample_weight=sample_weight
)
sample_weight = np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape(
(2, 3)
)
expected_result = np.multiply(self.expected_results, sample_weight)
expected_result = np.sum(expected_result) / np.sum(sample_weight)
self.assertAllClose(result, expected_result, atol=1e-3)
class R2ScoreTest(parameterized.TestCase, testing.TestCase):
def _run_test(
self,
y_true,
y_pred,
sample_weights,
class_aggregation,
num_regressors,
reference_result,
):
r2 = metrics.R2Score(class_aggregation, num_regressors, dtype="float32")
r2.update_state(y_true, y_pred, sample_weights)
result = r2.result()
self.assertAllClose(result, reference_result, atol=1e-6)
def test_config(self):
r2_obj = metrics.R2Score(
class_aggregation=None, num_regressors=2, dtype="float32"
)
self.assertEqual(r2_obj.class_aggregation, None)
self.assertEqual(r2_obj.num_regressors, 2)
self.assertEqual(r2_obj.dtype, "float32")
# Check save and restore config
r2_obj2 = metrics.R2Score.from_config(r2_obj.get_config())
self.assertEqual(r2_obj2.class_aggregation, None)
self.assertEqual(r2_obj2.num_regressors, 2)
self.assertEqual(r2_obj2.dtype, "float32")
@parameterized.parameters(
# class_aggregation, num_regressors, result
(None, 0, [0.37, -1.295, 0.565]),
("uniform_average", 0, -0.12),
("variance_weighted_average", 0, -0.12),
)
def test_r2_sklearn_comparison(
self, class_aggregation, num_regressors, result
):
y_true = [[0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]]
y_pred = [[0.4, 0.5, 0.6], [0.1, 0.2, 0.3], [0.5, 0.8, 0.2]]
self._run_test(
y_true,
y_pred,
None,
class_aggregation=class_aggregation,
num_regressors=num_regressors,
reference_result=result,
)
@parameterized.parameters(
# class_aggregation, num_regressors, result
(None, 0, [0.17305559, -8.836666, -0.521]),
(None, 1, [0.054920673, -10.241904, -0.7382858]),
(None, 2, [-0.10259259, -12.115555, -1.0280001]),
("uniform_average", 0, -3.0615367889404297),
("uniform_average", 1, -3.641756534576416),
("uniform_average", 2, -4.415382385253906),
("variance_weighted_average", 0, -1.3710224628448486),
("variance_weighted_average", 1, -1.7097399234771729),
("variance_weighted_average", 2, -2.161363363265991),
)
def test_r2_tfa_comparison(self, class_aggregation, num_regressors, result):
y_true = [[0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]]
y_pred = [[0.4, 0.9, 1.6], [0.1, 1.2, 0.6], [1.5, 0.8, 0.6]]
sample_weights = [0.8, 0.1, 0.4]
self._run_test(
y_true,
y_pred,
sample_weights,
class_aggregation=class_aggregation,
num_regressors=num_regressors,
reference_result=result,
)
def test_errors(self):
# Bad class_aggregation value
with self.assertRaisesRegex(
ValueError, "Invalid value for argument `class_aggregation`"
):
metrics.R2Score(class_aggregation="wrong")
# Bad num_regressors value
with self.assertRaisesRegex(
ValueError, "Invalid value for argument `num_regressors`"
):
metrics.R2Score(num_regressors=-1)
# Bad input shape
with self.assertRaisesRegex(ValueError, "expects 2D inputs with shape"):
r2 = metrics.R2Score()
r2.update_state([0.0, 1.0], [0.0, 1.0])
| keras/keras/metrics/regression_metrics_test.py/0 | {
"file_path": "keras/keras/metrics/regression_metrics_test.py",
"repo_id": "keras",
"token_count": 7428
} | 190 |
import numpy as np
from keras import testing
from keras.backend.common import keras_tensor
from keras.ops import function
from keras.ops import numpy as knp
class FunctionTest(testing.TestCase):
def test_define_and_call(self):
x1 = keras_tensor.KerasTensor((2, 3))
x2 = keras_tensor.KerasTensor((2, 3))
x = knp.add(x1, x2)
y1 = x * 3
y2 = x**2
fn = function.Function(
inputs=[x1, x2], outputs=[y1, y2], name="test_function"
)
self.assertEqual(fn.name, "test_function")
# Eager call
y_val = fn([np.ones((2, 3)), np.ones((2, 3))])
self.assertIsInstance(y_val, list)
self.assertAllClose(y_val[0], np.ones((2, 3)) * 6)
self.assertAllClose(y_val[1], np.ones((2, 3)) * 4)
# Symbolic call
x1_alt = keras_tensor.KerasTensor((2, 3))
x2_alt = keras_tensor.KerasTensor((2, 3))
y_val = fn([x1_alt, x2_alt])
self.assertIsInstance(y_val[0], keras_tensor.KerasTensor)
self.assertEqual(y_val[0].shape, (2, 3))
self.assertIsInstance(y_val[1], keras_tensor.KerasTensor)
self.assertEqual(y_val[1].shape, (2, 3))
# Recursion
fn = function.Function(inputs=[x1_alt, x2_alt], outputs=y_val)
y_val = fn([np.ones((2, 3)), np.ones((2, 3))])
self.assertIsInstance(y_val, list)
self.assertAllClose(y_val[0], np.ones((2, 3)) * 6)
self.assertAllClose(y_val[1], np.ones((2, 3)) * 4)
def test_dynamic_shape_inference(self):
x = keras_tensor.KerasTensor((None, 3))
y = x**2
fn = function.Function(x, y)
# Test with compute_output_spec
out = fn.compute_output_spec(keras_tensor.KerasTensor((4, 3)))
self.assertIsInstance(out, keras_tensor.KerasTensor)
self.assertEqual(out.shape, (4, 3))
# Test with call
out = fn(keras_tensor.KerasTensor((4, 3)))
self.assertIsInstance(out, keras_tensor.KerasTensor)
self.assertEqual(out.shape, (4, 3))
def test_dict_io(self):
x1 = keras_tensor.KerasTensor((2, 3))
x2 = keras_tensor.KerasTensor((2, 3))
x = knp.add(x1, x2)
y1 = x * 3
y2 = x**2
fn = function.Function(
inputs={"x1": x1, "x2": x2}, outputs={"y1": y1, "y2": y2}
)
# Eager call
y_val = fn({"x1": np.ones((2, 3)), "x2": np.ones((2, 3))})
self.assertIsInstance(y_val, dict)
self.assertAllClose(y_val["y1"], np.ones((2, 3)) * 6)
self.assertAllClose(y_val["y2"], np.ones((2, 3)) * 4)
# Symbolic call
x1_alt = keras_tensor.KerasTensor((2, 3))
x2_alt = keras_tensor.KerasTensor((2, 3))
y_val = fn({"x1": x1_alt, "x2": x2_alt})
self.assertIsInstance(y_val["y1"], keras_tensor.KerasTensor)
self.assertEqual(y_val["y1"].shape, (2, 3))
self.assertIsInstance(y_val["y2"], keras_tensor.KerasTensor)
self.assertEqual(y_val["y2"].shape, (2, 3))
def test_invalid_inputs_error(self):
x1 = keras_tensor.KerasTensor((2, 3))
x2 = keras_tensor.KerasTensor((2, 3))
x = knp.add(x1, x2)
y1 = x * 3
y2 = x**2
fn = function.Function(
inputs=[x1, x2], outputs=[y1, y2], name="test_function"
)
self.assertEqual(fn.name, "test_function")
# Bad structure
with self.assertRaisesRegex(ValueError, "invalid input structure"):
_ = fn(np.ones((2, 3)))
# Bad rank
with self.assertRaisesRegex(ValueError, "incompatible inputs"):
_ = fn([np.ones((2, 3, 3)), np.ones((2, 3))])
# Bad shape
with self.assertRaisesRegex(ValueError, "incompatible inputs"):
_ = fn([np.ones((4, 3)), np.ones((2, 3))])
def test_graph_disconnected_error(self):
# TODO
pass
def test_serialization(self):
# TODO
pass
| keras/keras/ops/function_test.py/0 | {
"file_path": "keras/keras/ops/function_test.py",
"repo_id": "keras",
"token_count": 2034
} | 191 |
from keras import backend
from keras import ops
from keras import testing
from keras.layers.core import input_layer
from keras.ops import operation_utils
class OperationUtilsTest(testing.TestCase):
def test_get_source_inputs(self):
x1 = backend.KerasTensor(shape=(2,))
x2 = backend.KerasTensor(shape=(2,))
x = x1 + x2
x += 2
x = ops.square(x)
self.assertEqual(operation_utils.get_source_inputs(x), [x1, x2])
def test_get_source_inputs_return_input_tensor(self):
inputs = input_layer.Input(shape=(10,))
self.assertIs(operation_utils.get_source_inputs(inputs)[0], inputs)
def test_compute_pooling_output_shape(self):
input_shape = (1, 4, 4, 1)
pool_size = (2, 2)
strides = (2, 2)
output_shape = operation_utils.compute_pooling_output_shape(
input_shape, pool_size, strides
)
expected_output_shape = (1, 2, 2, 1)
self.assertEqual(output_shape, expected_output_shape)
def test_compute_pooling_output_shape_with_none(self):
input_shape = (None, 4, 4, 1)
pool_size = (2, 2)
strides = (2, 2)
output_shape = operation_utils.compute_pooling_output_shape(
input_shape, pool_size, strides
)
expected_output_shape = (None, 2, 2, 1)
self.assertEqual(output_shape, expected_output_shape)
def test_compute_pooling_output_shape_valid_padding(self):
input_shape = (1, 4, 4, 1)
pool_size = (2, 2)
strides = (2, 2)
output_shape = operation_utils.compute_pooling_output_shape(
input_shape, pool_size, strides, padding="valid"
)
self.assertEqual(output_shape, (1, 2, 2, 1))
def test_compute_pooling_output_shape_channels_last(self):
input_shape = (1, 4, 4, 3)
pool_size = (2, 2)
strides = (2, 2)
output_shape = operation_utils.compute_pooling_output_shape(
input_shape,
pool_size,
strides,
padding="valid",
data_format="channels_last",
)
self.assertEqual(output_shape, (1, 2, 2, 3))
def test_compute_pooling_output_shape_same_padding_stride1(self):
input_shape = (1, 4, 4, 3)
pool_size = (2, 2)
strides = (1, 1)
output_shape = operation_utils.compute_pooling_output_shape(
input_shape,
pool_size,
strides,
padding="same",
data_format="channels_last",
)
self.assertEqual(output_shape, (1, 4, 4, 3))
def test_compute_conv_output_shape(self):
input_shape = (1, 4, 4, 1)
filters = 1
kernel_size = (3, 3)
strides = (1, 1)
output_shape = operation_utils.compute_conv_output_shape(
input_shape, filters, kernel_size, strides
)
expected_output_shape = (1, 2, 2, 1)
self.assertEqual(output_shape, expected_output_shape)
def test_compute_conv_output_shape_with_none(self):
input_shape = (None, 4, 4, 1)
kernel_size = (3, 3)
filters = 1
strides = (1, 1)
output_shape = operation_utils.compute_conv_output_shape(
input_shape, filters, kernel_size, strides
)
expected_output_shape = (None, 2, 2, 1)
self.assertEqual(output_shape, expected_output_shape)
def test_compute_conv_output_shape_valid_padding(self):
input_shape = (1, 4, 4, 1)
kernel_size = (3, 3)
filters = 1
strides = (2, 2)
output_shape = operation_utils.compute_conv_output_shape(
input_shape, filters, kernel_size, strides, padding="valid"
)
self.assertEqual(output_shape, (1, 1, 1, 1))
def test_compute_conv_output_shape_channels_last(self):
input_shape = (1, 4, 4, 3)
kernel_size = (3, 3)
filters = 3
strides = (2, 2)
output_shape = operation_utils.compute_conv_output_shape(
input_shape,
filters,
kernel_size,
strides,
padding="valid",
data_format="channels_last",
)
self.assertEqual(output_shape, (1, 1, 1, 3))
def test_compute_conv_output_shape_same_padding_stride1(self):
input_shape = (1, 4, 4, 3)
kernel_size = (3, 3)
filters = 3
strides = (1, 1)
output_shape = operation_utils.compute_conv_output_shape(
input_shape,
filters,
kernel_size,
strides,
padding="same",
data_format="channels_last",
)
self.assertEqual(output_shape, (1, 4, 4, 3))
def test_compute_reshape_output_shape(self):
input_shape = (1, 4, 4, 1)
target_shape = (16, 1)
output_shape = operation_utils.compute_reshape_output_shape(
input_shape, newshape=target_shape, newshape_arg_name="New shape"
)
self.assertEqual(output_shape, target_shape)
def test_reduce_shape_no_axes_no_keepdims(self):
input_shape = (1, 4, 4, 1)
output_shape = operation_utils.reduce_shape(input_shape)
expected_output_shape = ()
self.assertEqual(output_shape, expected_output_shape)
def test_reduce_shape_no_axes_with_keepdims(self):
input_shape = (1, 4, 4, 1)
output_shape = operation_utils.reduce_shape(input_shape, keepdims=True)
expected_output_shape = (1, 1, 1, 1)
self.assertEqual(output_shape, expected_output_shape)
def test_reduce_shape_single_axis_no_keepdims(self):
input_shape = (1, 4, 4, 1)
axes = [1]
output_shape = operation_utils.reduce_shape(input_shape, axes)
expected_output_shape = (1, 4, 1)
self.assertEqual(output_shape, expected_output_shape)
def test_reduce_shape_single_axis_with_keepdims(self):
input_shape = (1, 4, 4, 1)
axes = [1]
output_shape = operation_utils.reduce_shape(
input_shape, axes, keepdims=True
)
expected_output_shape = (1, 1, 4, 1)
self.assertEqual(output_shape, expected_output_shape)
def test_reduce_shape_multiple_axes_no_keepdims(self):
input_shape = (1, 4, 4, 1)
axes = [1, 2]
output_shape = operation_utils.reduce_shape(input_shape, axes)
expected_output_shape = (1, 1)
self.assertEqual(output_shape, expected_output_shape)
def test_reduce_shape_out_of_order_axes_no_keepdims(self):
input_shape = (1, 4, 4, 1)
axes = [2, 1]
output_shape = operation_utils.reduce_shape(input_shape, axes)
expected_output_shape = (1, 1)
self.assertEqual(output_shape, expected_output_shape)
| keras/keras/ops/operation_utils_test.py/0 | {
"file_path": "keras/keras/ops/operation_utils_test.py",
"repo_id": "keras",
"token_count": 3213
} | 192 |
import re
import warnings
import numpy as np
from keras import backend
from keras import initializers
from keras import ops
from keras.optimizers.schedules import learning_rate_schedule
from keras.saving import serialization_lib
from keras.utils import tracking
from keras.utils.naming import auto_name
class BaseOptimizer:
def __init__(
self,
learning_rate,
weight_decay=None,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
loss_scale_factor=None,
gradient_accumulation_steps=None,
name=None,
**kwargs,
):
self._lock = False
if kwargs.pop("decay", None) is not None:
warnings.warn(
"Argument `decay` is no longer supported and will be ignored."
)
if kwargs:
raise ValueError(f"Argument(s) not recognized: {kwargs}")
if name is None:
name = auto_name(self.__class__.__name__)
self.name = name
self.weight_decay = weight_decay
self.clipnorm = clipnorm
self.global_clipnorm = global_clipnorm
self.clipvalue = clipvalue
self.use_ema = use_ema
self.loss_scale_factor = loss_scale_factor
self.gradient_accumulation_steps = gradient_accumulation_steps
if gradient_accumulation_steps:
if not gradient_accumulation_steps >= 2:
raise ValueError(
"`gradient_accumulation_steps` must be an integer >= 2. "
"Received: gradient_accumulation_steps="
f"{gradient_accumulation_steps}"
)
if use_ema:
# Verify the arguments related to EMA.
if ema_momentum > 1 or ema_momentum < 0:
raise ValueError(
"`ema_momentum` must be in the range [0, 1]. "
f"Received: ema_momentum={ema_momentum}"
)
if ema_overwrite_frequency and (
not isinstance(ema_overwrite_frequency, int)
or ema_overwrite_frequency < 1
):
raise ValueError(
"`ema_overwrite_frequency` must be an integer >= 1 or "
"None. Received: ema_overwrite_frequency="
f"{ema_overwrite_frequency}"
)
self.ema_momentum = ema_momentum
self.ema_overwrite_frequency = ema_overwrite_frequency
if self.clipnorm is not None and self.global_clipnorm is not None:
raise ValueError(
"Only one of `clipnorm` and `global_clipnorm` can "
f"be set. Received: clipnorm={self.clipnorm}, "
f"global_clipnorm={self.global_clipnorm}"
)
self.built = False
# Set up variable tracking.
self._variables = []
self._trainable_variables = []
self._tracker = tracking.Tracker(
{
"variables": (
lambda x: isinstance(x, backend.Variable),
self._variables,
),
}
)
self._trainable_variables_indices = {}
# Create iteration variable
# Note: dtype="int" will resolve to int32 in JAX
# (since int64 is disallowed in JAX) and to int64 in TF.
with backend.name_scope(self.name, caller=self):
iterations = backend.Variable(
0, name="iteration", dtype="int", trainable=False
)
self._track_variable(iterations)
self.iterations = iterations
# Create learning rate (schedule or variable)
if isinstance(
learning_rate, learning_rate_schedule.LearningRateSchedule
):
self._learning_rate = learning_rate
elif callable(learning_rate):
self._learning_rate = learning_rate
else:
if not isinstance(learning_rate, float):
raise ValueError(
"Argument `learning_rate` should be float, or an instance "
"of LearningRateSchedule, or a callable "
"(that takes in the current iteration value "
"and returns the corresponding learning rate value). "
f"Received instead: learning_rate={learning_rate}"
)
with backend.name_scope(self.name, caller=self):
learning_rate = backend.Variable(
learning_rate,
name="learning_rate",
dtype=backend.floatx(),
trainable=False,
)
self._track_variable(learning_rate)
self._learning_rate = learning_rate
def _track_variable(self, variable):
self._tracker.add_to_store("variables", variable)
@tracking.no_automatic_dependency_tracking
def build(self, variables):
if self.use_ema:
self._model_variables_moving_average = []
if self.gradient_accumulation_steps:
self._accumulated_gradients = []
for i, variable in enumerate(variables):
self._trainable_variables_indices[self._var_key(variable)] = i
if self.use_ema:
self._model_variables_moving_average.append(
self.add_variable_from_reference(
variable,
name="average",
)
)
if self.gradient_accumulation_steps:
self._accumulated_gradients.append(
self.add_variable_from_reference(
variable,
name="gradient_accumulator",
)
)
self._trainable_variables = variables[:]
self.built = True
def _var_key(self, variable):
# Helper function to get a stable ID and the variable instance mapping.
return id(variable)
@property
def variables(self):
return self._variables[:]
def _get_variable_index(self, variable):
return self._trainable_variables_indices[self._var_key(variable)]
def add_variable(
self,
shape,
initializer="zeros",
dtype=None,
name=None,
):
self._check_super_called()
initializer = initializers.get(initializer)
with backend.name_scope(self.name, caller=self):
variable = backend.Variable(
initializer=initializer,
shape=shape,
dtype=dtype,
trainable=False,
name=name,
)
self._track_variable(variable)
return variable
def add_variable_from_reference(
self, reference_variable, name=None, initializer="zeros"
):
"""Add an all-zeros variable with the shape and dtype of a reference
variable.
"""
name = name or "var"
if hasattr(reference_variable, "path"):
name = reference_variable.path.replace("/", "_") + "_" + name
else:
name = str(reference_variable.name).replace(":", "_") + "_" + name
return self.add_variable(
shape=reference_variable.shape,
initializer=initializer,
dtype=reference_variable.dtype,
name=name,
)
def _check_variables_are_known(self, variables):
for v in variables:
if self._var_key(v) not in self._trainable_variables_indices:
raise ValueError(
f"Unknown variable: {v}. This optimizer can only "
"be called for the variables it was originally built with. "
"When working with a new set of variables, you should "
"recreate a new optimizer instance."
)
def assign(self, variable, value):
"""Assign a value to a variable.
This should be used in optimizers instead of `variable.assign(value)` to
support backend specific optimizations.
Note that the variable can be a model variable or an optimizer variable;
it can be a backend native variable or a Keras variable.
Args:
variable: The variable to update.
value: The value to add to the variable.
"""
variable.assign(value)
def assign_add(self, variable, value):
"""Add a value to a variable.
This should be used in optimizers instead of
`variable.assign_add(value)` to support backend specific optimizations.
Note that the variable can be a model variable or an optimizer variable;
it can be a backend native variable or a Keras variable.
Args:
variable: The variable to update.
value: The value to add to the variable.
"""
variable.assign_add(value)
def assign_sub(self, variable, value):
"""Subtract a value from a variable.
This should be used in optimizers instead of
`variable.assign_sub(value)` to support backend specific optimizations.
Note that the variable can be a model variable or an optimizer variable;
it can be a backend native variable or a Keras variable.
Args:
variable: The variable to update.
value: The value to add to the variable.
"""
variable.assign_sub(value)
def update_step(self, gradient, variable, learning_rate):
raise NotImplementedError
def apply_gradients(self, grads_and_vars):
grads, trainable_variables = zip(*grads_and_vars)
self.apply(grads, trainable_variables)
# Return iterations for compat with tf.keras.
return self.iterations
def apply(self, grads, trainable_variables=None):
"""Update traininable variables according to provided gradient values.
`grads` should be a list of gradient tensors
with 1:1 mapping to the list of variables the optimizer was built with.
`trainable_variables` can be provided
on the first call to build the optimizer.
"""
if len(grads) == 0:
# It is possible that the grad is empty. In this case,
# `apply_gradients` is a no-op.
return
if trainable_variables is None:
if not self.built:
raise ValueError(
"When passing `grads` without `variables`, the optimizer "
"must already be built on a list of variables. "
"Call `optimizer.build(trainable_variables)` first. "
)
if len(grads) != len(self._trainable_variables_indices):
raise ValueError(
"When passing `grads` as a list of gradient tensors, the "
f"gradients must match `optimizer.variables` one-to-on. "
f"Received a list of {len(grads)} gradients, but the "
f"optimizer is tracking {len(self._trainable_variables)} "
"trainable variables."
)
trainable_variables = self._trainable_variables
else:
trainable_variables = list(trainable_variables)
# Optionally build optimizer.
if not self.built:
with backend.name_scope(self.name, caller=self):
self.build(trainable_variables)
self.built = True
self._check_variables_are_known(trainable_variables)
with backend.name_scope(self.name, caller=self):
# Filter empty gradients.
grads, trainable_variables = self._filter_empty_gradients(
grads, trainable_variables
)
if len(list(grads)) == 0:
return
# Unscale gradients.
scale = self.loss_scale_factor
if scale is not None:
grads = [g if g is None else g / scale for g in grads]
# Apply clipping and weight decay.
grads = self._clip_gradients(grads)
self._apply_weight_decay(trainable_variables)
# Apply gradient updates.
self._backend_apply_gradients(grads, trainable_variables)
# Apply variable constraints after applying gradients.
for variable in trainable_variables:
if variable.constraint is not None:
variable.assign(variable.constraint(variable))
def _backend_apply_gradients(self, grads, trainable_variables):
"""Apply method that can be overridden by different backends.
JAX overrides it in order to deal with statelessness in gradient
accumulation and EMA handling.
The below implementation is intended to be generally backend-agnostic,
but may not work with all backends.
This method does 4 things:
- Call the optimizer's update_step() to update trainable variables
and optimizer variables.
- Update EMA variables, if EMA is configured.
- Update gradient accumulators, if gradient accumulation is configured.
- Update the iteration counter.
"""
if self.gradient_accumulation_steps:
is_update_step = (
self.iterations + 1
) % self.gradient_accumulation_steps == 0
def _update_step_fn(self, grads, trainable_variables):
# Run update step with accumulated grads + reset accumulators
steps = self.gradient_accumulation_steps
grads = [
(grads[i] + self._accumulated_gradients[i]) / steps
for i in range(len(grads))
]
self._backend_update_step(
grads, trainable_variables, self.learning_rate
)
self._backend_reset_gradient_accumulators()
def _grad_accumulation_fn(self, grads):
# Update gradient accumulators
self._backend_increment_gradient_accumulators(grads)
ops.cond(
is_update_step,
lambda: _update_step_fn(self, grads, trainable_variables),
lambda: _grad_accumulation_fn(self, grads),
)
else:
# Run udpate step.
self._backend_update_step(
grads, trainable_variables, self.learning_rate
)
if self.use_ema:
self._update_model_variables_moving_average(
self._trainable_variables
)
if self.ema_overwrite_frequency:
# Only when self.ema_overwrite_frequency is not None, we
# overwrite the model variables.
should_overwrite_model_vars = (
self.iterations + 1
) % self.ema_overwrite_frequency == 0
ops.cond(
should_overwrite_model_vars,
lambda: self._overwrite_model_variables_with_average_value(
self._trainable_variables
),
lambda: None,
)
# Update iteration counter.
self.iterations.assign_add(1)
def _backend_update_step(self, grads, trainable_variables, learning_rate):
"""Collective update_step that can be overridden by the backend.
It is overridden by torch for performance reasons, and
by TF to support tf.distribute.
"""
for grad, var in zip(grads, trainable_variables):
self.update_step(grad, var, learning_rate)
def _backend_reset_gradient_accumulators(self):
for g_acc in self._accumulated_gradients:
g_acc.assign(np.zeros(g_acc.shape, dtype=g_acc.dtype))
def _backend_increment_gradient_accumulators(self, grads):
new_g_accs = [
(grads[i] + self._accumulated_gradients[i])
for i in range(len(grads))
]
for n_g_acc, g_acc in zip(new_g_accs, self._accumulated_gradients):
g_acc.assign(n_g_acc)
def stateless_apply(self, optimizer_variables, grads, trainable_variables):
self._check_super_called()
if not self.built:
raise ValueError(
f"To call `stateless_apply`, {self.__class__.__name__} "
"must be built (i.e. its variables must have been created). "
"You can build it via `optimizer.build(trainable_variables)`."
)
if len(optimizer_variables) != len(self.variables):
raise ValueError(
"Argument `optimizer_variables` must be a list of tensors "
f"corresponding 1:1 to {self.__class__.__name__}().variables. "
f"Received list with length {len(optimizer_variables)}, but "
f"expected {len(self.variables)} variables."
)
if len(trainable_variables) != len(self._trainable_variables):
raise ValueError(
"Argument `optimizer_variables` must be a list of tensors "
"corresponding 1:1 to the trainable variables list that "
"the optimizer was built with. Received "
f"len(trainable_variables) == {len(trainable_variables)} "
"whereas the optimizer was built with "
f"{len(self._trainable_variables)} variables."
)
# Gather variable mapping
mapping = list(
zip(self._trainable_variables, trainable_variables)
) + list(zip(self.variables, optimizer_variables))
# Call in stateless scope
with backend.StatelessScope(state_mapping=mapping) as scope:
self.apply(grads)
# Gather updated variables
trainable_variables = []
for v in self._trainable_variables:
new_v = scope.get_current_value(v)
if new_v is not None:
trainable_variables.append(new_v)
else:
trainable_variables.append(v)
optimizer_variables = []
for v in self.variables:
new_v = scope.get_current_value(v)
if new_v is not None:
optimizer_variables.append(new_v)
else:
optimizer_variables.append(v)
return trainable_variables, optimizer_variables
def scale_loss(self, loss):
"""Scale the loss before computing gradients.
Scales the loss before gradients are computed in a `train_step`. This
is primarily useful during mixed precision training to prevent numeric
underflow.
"""
if self.loss_scale_factor is not None:
return loss * self.loss_scale_factor
return loss
@property
def learning_rate(self):
return self._get_current_learning_rate()
@learning_rate.setter
def learning_rate(self, learning_rate):
if isinstance(
learning_rate, learning_rate_schedule.LearningRateSchedule
):
self._learning_rate = learning_rate
elif callable(learning_rate):
self._learning_rate = learning_rate
else:
if isinstance(
self._learning_rate, learning_rate_schedule.LearningRateSchedule
):
raise TypeError(
"This optimizer was created with a `LearningRateSchedule`"
" object as its `learning_rate` constructor argument, "
"hence its learning rate is not settable. If you need the"
" learning rate to be settable, you should instantiate "
"the optimizer with a float `learning_rate` argument."
)
self._learning_rate.assign(learning_rate)
def set_weights(self, weights):
"""Set the weights of the optimizer."""
if not self.built:
raise ValueError(
"You are calling `set_weights()` on an optimizer that has not "
"yet been built. Please call "
"`optimizer.build(trainable_variables)` to create the "
"optimizer weights before calling `set_weights()`."
)
for variable, weight in zip(self._variables, weights):
if variable.shape != weight.shape:
raise ValueError(
f"Optimizer variable {self._var_key(variable)} has shape "
f"{str(variable.shape)} not compatible with provided "
f"weight shape {str(weight.shape)}."
)
variable.assign(weight)
def save_own_variables(self, store):
"""Get the state of this optimizer object."""
for i, variable in enumerate(self.variables):
store[str(i)] = variable.numpy()
def load_own_variables(self, store):
"""Set the state of this optimizer object."""
if len(store.keys()) != len(self.variables):
msg = (
f"Skipping variable loading for optimizer '{self.name}', "
f"because it has {len(self.variables)} variables whereas "
f"the saved optimizer has {len(store.keys())} variables. "
)
if len(self.variables) == 0:
msg += (
"This is likely because the optimizer has not been "
"called/built yet."
)
warnings.warn(msg, stacklevel=2)
return
for i, variable in enumerate(self.variables):
variable.assign(store[str(i)])
def _get_current_learning_rate(self):
if isinstance(
self._learning_rate, learning_rate_schedule.LearningRateSchedule
):
return self._learning_rate(self.iterations)
elif callable(self._learning_rate):
return self._learning_rate(self.iterations)
return self._learning_rate
def _filter_empty_gradients(self, grads, vars):
for grad in grads:
if grad is None:
# Filtering is required.
filtered = [
(g, v) for g, v in zip(grads, vars) if g is not None
]
if not filtered:
raise ValueError("No gradients provided for any variable.")
if len(filtered) < len(grads):
missing_grad_vars = [
v for g, v in zip(grads, vars) if g is None
]
warnings.warn(
"Gradients do not exist for variables "
f"{[v.name for v in missing_grad_vars]} when "
"minimizing the loss. If using `model.compile()`, "
"did you forget to provide a `loss` argument?"
)
return zip(*filtered)
return grads, vars
def _clip_gradients(self, grads):
if self.clipnorm and self.clipnorm > 0:
clipped_grads = []
for g in grads:
if g is None:
clipped_grads.append(g)
else:
clipped_grads.append(self._clip_by_norm(g))
return clipped_grads
if self.global_clipnorm and self.global_clipnorm > 0:
return clip_by_global_norm(grads, self.global_clipnorm)
if self.clipvalue and self.clipvalue > 0:
clipped_grads = []
for g in grads:
if g is None:
clipped_grads.append(g)
else:
clipped_grads.append(
ops.clip(g, -self.clipvalue, self.clipvalue)
)
return clipped_grads
return grads
def exclude_from_weight_decay(self, var_list=None, var_names=None):
"""Exclude variables from weight decay.
This method must be called before the optimizer's `build` method is
called. You can set specific variables to exclude out, or set a list of
strings as the anchor words, if any of which appear in a variable's
name, then the variable is excluded.
Args:
var_list: A list of `tf.Variable`s to exclude from weight decay.
var_names: A list of strings. If any string in `var_names` appear
in the model variable's name, then this model variable is
excluded from weight decay. For example, `var_names=['bias']`
excludes all bias variables from weight decay.
"""
if hasattr(self, "_built") and self._built:
raise ValueError(
"`exclude_from_weight_decay()` can only be configued before "
"the optimizer is built."
)
if var_list:
self._exclude_from_weight_decay = [
self._var_key(variable) for variable in var_list
]
else:
self._exclude_from_weight_decay = []
self._exclude_from_weight_decay_names = var_names or []
def _use_weight_decay(self, variable):
exclude_from_weight_decay = getattr(
self, "_exclude_from_weight_decay", []
)
exclude_from_weight_decay_names = getattr(
self, "_exclude_from_weight_decay_names", []
)
variable_id = self._var_key(variable)
for exclude_id in exclude_from_weight_decay:
if variable_id == exclude_id:
return False
for name in exclude_from_weight_decay_names:
if re.search(name, variable.name) is not None:
return False
return True
def _apply_weight_decay(self, variables):
if self.weight_decay is None:
return
for variable in variables:
if self._use_weight_decay(variable):
lr = ops.cast(self.learning_rate, variable.dtype)
wd = ops.cast(self.weight_decay, variable.dtype)
variable.assign(variable - variable * wd * lr)
def _check_super_called(self):
if not hasattr(self, "_lock"):
raise RuntimeError(
f"In optimizer '{self.__class__.__name__}', you forgot to call "
"`super().__init__()` as the first statement "
"in the `__init__()` method. "
"Go add it!"
)
def _update_model_variables_moving_average(self, trainable_variables):
"""Update the stored moving average using the latest value."""
if self.use_ema:
for var, average in zip(
trainable_variables, self._model_variables_moving_average
):
not_first_step = ops.not_equal(self.iterations, 0)
momentum = (
ops.cast(not_first_step, var.dtype) * self.ema_momentum
)
average.assign(momentum * average + (1 - momentum) * var)
def _overwrite_model_variables_with_average_value(
self, trainable_variables
):
"""Overwrite model variables with its moving average."""
if len(trainable_variables) != len(
self._model_variables_moving_average
):
raise ValueError(
f"The length of model variables ({len(trainable_variables)}) "
"to override does not match the length of model variables "
"stored in the optimizer "
f"({len(self._model_variables_moving_average)}). Please "
"check if the optimizer was called on your model."
)
for var, average_var in zip(
trainable_variables, self._model_variables_moving_average
):
var.assign(average_var)
def finalize_variable_values(self, var_list):
"""Set the final value of model's trainable variables.
Sometimes there are some extra steps before ending the variable updates,
such as overriding the model variables with its average value.
Args:
var_list: list of model variables.
"""
if self.use_ema:
# If the optimizer uses EMA, then when finalizing, we replace the
# model variable value with its moving average stored inside
# optimizer.
self._overwrite_model_variables_with_average_value(var_list)
def get_config(self):
"""Returns the config of the optimizer.
An optimizer config is a Python dictionary (serializable)
containing the configuration of an optimizer.
The same optimizer can be reinstantiated later
(without any saved state) from this configuration.
Subclass optimizer should override this method to include other
hyperparameters.
Returns:
Python dictionary.
"""
if isinstance(
self._learning_rate, learning_rate_schedule.LearningRateSchedule
):
learning_rate = learning_rate_schedule.serialize(
self._learning_rate
)
elif isinstance(self._learning_rate, backend.Variable):
learning_rate = float(self._learning_rate.numpy())
elif ops.is_tensor(self._learning_rate):
learning_rate = float(self._learning_rate)
elif callable(self._learning_rate):
learning_rate = serialization_lib.serialize_keras_object(
self._learning_rate
)
config = {
"name": self.name,
"learning_rate": learning_rate,
"weight_decay": self.weight_decay,
"clipnorm": self.clipnorm,
"global_clipnorm": self.global_clipnorm,
"clipvalue": self.clipvalue,
"use_ema": self.use_ema,
"ema_momentum": self.ema_momentum,
"ema_overwrite_frequency": self.ema_overwrite_frequency,
"loss_scale_factor": self.loss_scale_factor,
"gradient_accumulation_steps": self.gradient_accumulation_steps,
}
return config
@classmethod
def from_config(cls, config, custom_objects=None):
"""Creates an optimizer from its config.
This method is the reverse of `get_config`, capable of instantiating the
same optimizer from the config dictionary.
Args:
config: A Python dictionary, typically the output of get_config.
custom_objects: A Python dictionary mapping names to additional
user-defined Python objects needed to recreate this optimizer.
Returns:
An optimizer instance.
"""
if "learning_rate" in config:
if isinstance(config["learning_rate"], dict):
config["learning_rate"] = (
serialization_lib.deserialize_keras_object(
config["learning_rate"], custom_objects=custom_objects
)
)
return cls(**config)
def __setattr__(self, name, value):
# Prevent users from attaching state to the
# layer before `super()` is called -- since that
# state would silently not be tracked.
if name != "_lock":
self._check_super_called()
# Track Variables.
if hasattr(self, "_tracker"):
value = self._tracker.track(value)
return super().__setattr__(name, value)
def _clip_by_norm(self, values, axes=None):
# Calculate L2-norm, clip elements by ratio of clip_norm to L2-norm
l2sum = ops.sum(ops.square(values), axes, keepdims=True)
pred = l2sum > 0
# Two-tap tf.where trick to bypass NaN gradients
l2sum_safe = ops.where(pred, l2sum, ops.ones_like(l2sum))
l2norm = ops.where(pred, ops.sqrt(l2sum_safe), l2sum)
intermediate = ops.multiply(values, self.clipnorm)
values_clip = ops.convert_to_tensor(intermediate) / ops.maximum(
l2norm, self.clipnorm
)
return values_clip
base_optimizer_keyword_args = """name: String. The name to use
for momentum accumulator weights created by
the optimizer.
weight_decay: Float. If set, weight decay is applied.
clipnorm: Float. If set, the gradient of each weight is individually
clipped so that its norm is no higher than this value.
clipvalue: Float. If set, the gradient of each weight is clipped to be
no higher than this value.
global_clipnorm: Float. If set, the gradient of all weights is clipped
so that their global norm is no higher than this value.
use_ema: Boolean, defaults to False. If True, exponential moving average
(EMA) is applied. EMA consists of computing an exponential moving
average of the weights of the model (as the weight values change
after each training batch), and periodically overwriting the
weights with their moving average.
ema_momentum: Float, defaults to 0.99. Only used if `use_ema=True`.
This is the momentum to use when computing
the EMA of the model's weights:
`new_average = ema_momentum * old_average + (1 - ema_momentum) *
current_variable_value`.
ema_overwrite_frequency: Int or None, defaults to None. Only used if
`use_ema=True`. Every `ema_overwrite_frequency` steps of iterations,
we overwrite the model variable by its moving average.
If None, the optimizer
does not overwrite model variables in the middle of training,
and you need to explicitly overwrite the variables
at the end of training by calling
`optimizer.finalize_variable_values()` (which updates the model
variables in-place). When using the built-in `fit()` training loop,
this happens automatically after the last epoch,
and you don't need to do anything.
loss_scale_factor: Float or `None`. If a float, the scale factor will
be multiplied the loss before computing gradients, and the inverse
of the scale factor will be multiplied by the gradients before
updating variables. Useful for preventing underflow during
mixed precision training. Alternately,
`keras.optimizers.LossScaleOptimizer` will
automatically set a loss scale factor.
gradient_accumulation_steps: Int or `None`. If an int, model & optimizer
variables will not be updated at every step; instead they will be
updated every `gradient_accumulation_steps` steps, using the average
value of the gradients since the last update. This is known as
"gradient accumulation". This can be useful
when your batch size is very small, in order to reduce gradient
noise at each update step.
"""
def global_norm(value_list):
"""Computes the global norm of multiple tensors."""
squared_norms = []
for v in value_list:
if v is not None:
squared_norms.append(ops.sum(ops.square(v)))
squared_norm = ops.sum(ops.stack(squared_norms))
return ops.sqrt(squared_norm)
def clip_by_global_norm(value_list, clip_norm):
use_norm = global_norm(value_list)
# Calculate L2-norm, clip elements by ratio of clip_norm to L2-norm
scale_for_finite = clip_norm * ops.minimum(1.0 / use_norm, 1.0 / clip_norm)
# If use_norm is any finite number, this is a no-op. For inf/-inf/NaN,
# this will make scale NaN.
scale = scale_for_finite + (use_norm - use_norm)
values_clipped = []
for v in value_list:
if v is None:
values_clipped.append(None)
else:
values_clipped.append(v * scale)
return values_clipped
| keras/keras/optimizers/base_optimizer.py/0 | {
"file_path": "keras/keras/optimizers/base_optimizer.py",
"repo_id": "keras",
"token_count": 16552
} | 193 |
"""Tests for learning rate schedule API."""
import math
import numpy as np
import pytest
from keras import backend
from keras import layers
from keras import optimizers
from keras import testing
from keras.models import Sequential
from keras.optimizers import schedules
class TestFitLRSchedulesFlow(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_fit_lr_correctness(self):
model = Sequential(
[
layers.Dense(
2, kernel_initializer="ones", bias_initializer="ones"
)
]
)
optimizer = optimizers.Adam(
learning_rate=schedules.ExponentialDecay(
initial_learning_rate=0.05, decay_steps=1, decay_rate=0.9
)
)
self.assertEqual(len(optimizer.variables), 1)
self.assertEqual(optimizer.variables[0], 0)
model.compile(optimizer=optimizer, loss="mse")
x = np.arange(32).reshape((16, 2))
y = np.arange(32).reshape((16, 2))
history = model.fit(x, y, epochs=3, batch_size=4, shuffle=False)
self.assertEqual(optimizer.variables[0], 4 * 3)
self.assertAllClose(
history.history["loss"],
[230.79457092285156, 128.30319213867188, 79.33648681640625],
rtol=5e-5,
)
class ExponentialDecayTest(testing.TestCase):
def test_config(self):
self.run_class_serialization_test(
schedules.ExponentialDecay(
initial_learning_rate=0.05,
decay_steps=10,
decay_rate=0.96,
staircase=True,
name="my_ed",
)
)
def test_continuous(self):
step = 5
decayed_lr = schedules.ExponentialDecay(0.05, 10, 0.96)
expected = 0.05 * 0.96 ** (5.0 / 10.0)
self.assertAllClose(decayed_lr(step), expected, 1e-6)
def test_staircase(self):
step = backend.Variable(1.0)
decayed_lr = schedules.ExponentialDecay(0.1, 3, 0.96, staircase=True)
# No change to learning rate due to staircase
expected = 0.1
self.assertAllClose(decayed_lr(step), expected, 1e-6)
expected = 0.1
step.assign(2)
self.assertAllClose(decayed_lr(step), expected, 1e-6)
# Decayed learning rate
expected = 0.1 * 0.96 ** (100 // 3)
step.assign(100)
self.assertAllClose(decayed_lr(step), expected, 1e-6)
def test_variables(self):
step = backend.Variable(1.0)
decayed_lr = schedules.ExponentialDecay(0.1, 3, 0.96, staircase=True)
# No change to learning rate
step.assign(1)
self.assertAllClose(decayed_lr(step), 0.1, 1e-6)
step.assign(2)
self.assertAllClose(decayed_lr(step), 0.1, 1e-6)
# Decayed learning rate
step.assign(100)
expected = 0.1 * 0.96 ** (100 // 3)
self.assertAllClose(decayed_lr(step), expected, 1e-6)
class PiecewiseConstantDecayTest(testing.TestCase):
def test_config(self):
self.run_class_serialization_test(
schedules.PiecewiseConstantDecay(
boundaries=[10, 20], values=[1, 2, 3], name="my_pcd"
)
)
def test_piecewise_values(self):
x = backend.Variable(-999.0)
decayed_lr = schedules.PiecewiseConstantDecay(
[100, 110, 120], [1.0, 0.1, 0.01, 0.001]
)
self.assertAllClose(decayed_lr(x), 1.0, 1e-6)
x.assign(100)
self.assertAllClose(decayed_lr(x), 1.0, 1e-6)
x.assign(105)
self.assertAllClose(decayed_lr(x), 0.1, 1e-6)
x.assign(110)
self.assertAllClose(decayed_lr(x), 0.1, 1e-6)
x.assign(120)
self.assertAllClose(decayed_lr(x), 0.01, 1e-6)
x.assign(999)
self.assertAllClose(decayed_lr(x), 0.001, 1e-6)
def test_boundary_values(self):
# Test casting boundaries from int32 to int64.
x_int64 = backend.Variable(0, dtype="int64", trainable=False)
boundaries, values = [1, 2, 3], [0.4, 0.5, 0.6, 0.7]
decayed_lr = schedules.PiecewiseConstantDecay(boundaries, values)
self.assertAllClose(decayed_lr(x_int64), 0.4, 1e-6)
x_int64.assign(1)
self.assertAllClose(decayed_lr(x_int64), 0.4, 1e-6)
x_int64.assign(2)
self.assertAllClose(decayed_lr(x_int64), 0.5, 1e-6)
x_int64.assign(3)
self.assertAllClose(decayed_lr(x_int64), 0.6, 1e-6)
x_int64.assign(4)
self.assertAllClose(decayed_lr(x_int64), 0.7, 1e-6)
class LinearDecayTest(testing.TestCase):
def test_config(self):
self.run_class_serialization_test(
schedules.PolynomialDecay(
initial_learning_rate=0.1,
decay_steps=100,
end_learning_rate=0.005,
power=1.0,
cycle=False,
name="my_ld",
)
)
def test_halfway(self):
step = 5
lr = 0.05
end_lr = 0.0
decayed_lr = schedules.PolynomialDecay(lr, 10, end_lr)
expected = lr * 0.5
self.assertAllClose(decayed_lr(step), expected, 1e-6)
def test_end(self):
step = 10
lr = 0.05
end_lr = 0.001
decayed_lr = schedules.PolynomialDecay(lr, 10, end_lr)
expected = end_lr
self.assertAllClose(decayed_lr(step), expected, 1e-6)
def test_halfway_with_end(self):
step = 5
lr = 0.05
end_lr = 0.001
decayed_lr = schedules.PolynomialDecay(lr, 10, end_lr)
expected = (lr + end_lr) * 0.5
self.assertAllClose(decayed_lr(step), expected, 1e-6)
def test_beyond_end(self):
step = 15
lr = 0.05
end_lr = 0.001
decayed_lr = schedules.PolynomialDecay(lr, 10, end_lr)
expected = end_lr
self.assertAllClose(decayed_lr(step), expected, 1e-6)
def test_beyond_end_with_cycle(self):
step = 15
lr = 0.05
end_lr = 0.001
decayed_lr = schedules.PolynomialDecay(lr, 10, end_lr, cycle=True)
expected = (lr - end_lr) * 0.25 + end_lr
self.assertAllClose(decayed_lr(step), expected, 1e-6)
class SqrtDecayTest(testing.TestCase):
def test_halfway(self):
step = 5
lr = 0.05
end_lr = 0.0
power = 0.5
decayed_lr = schedules.PolynomialDecay(lr, 10, end_lr, power=power)
expected = lr * 0.5**power
self.assertAllClose(decayed_lr(step), expected, 1e-6)
def test_end(self):
step = 10
lr = 0.05
end_lr = 0.001
power = 0.5
decayed_lr = schedules.PolynomialDecay(lr, 10, end_lr, power=power)
expected = end_lr
self.assertAllClose(decayed_lr(step), expected, 1e-6)
def test_halfway_with_end(self):
step = 5
lr = 0.05
end_lr = 0.001
power = 0.5
decayed_lr = schedules.PolynomialDecay(lr, 10, end_lr, power=power)
expected = (lr - end_lr) * 0.5**power + end_lr
self.assertAllClose(decayed_lr(step), expected, 1e-6)
def test_beyond_end(self):
step = 15
lr = 0.05
end_lr = 0.001
power = 0.5
decayed_lr = schedules.PolynomialDecay(lr, 10, end_lr, power=power)
expected = end_lr
self.assertAllClose(decayed_lr(step), expected, 1e-6)
def test_beyond_end_with_cycle(self):
step = 15
lr = 0.05
end_lr = 0.001
power = 0.5
decayed_lr = schedules.PolynomialDecay(
lr, 10, end_lr, power=power, cycle=True
)
expected = (lr - end_lr) * 0.25**power + end_lr
self.assertAllClose(decayed_lr(step), expected, 1e-6)
def test_begin_with_cycle(self):
lr = 0.001
decay_steps = 10
step = 0
decayed_lr = schedules.PolynomialDecay(lr, decay_steps, cycle=True)
expected = lr
self.assertAllClose(decayed_lr(step), expected, 1e-6)
class InverseTimeDecayTest(testing.TestCase):
def test_config(self):
self.run_class_serialization_test(
schedules.InverseTimeDecay(
initial_learning_rate=0.05,
decay_steps=10,
decay_rate=0.96,
staircase=True,
name="my_itd",
)
)
def test_decay(self):
initial_lr = 0.1
k = 10
decay_rate = 0.96
step = backend.Variable(0.0)
decayed_lr = schedules.InverseTimeDecay(initial_lr, k, decay_rate)
for i in range(k + 1):
expected = initial_lr / (1 + i / k * decay_rate)
self.assertAllClose(decayed_lr(step), expected, 1e-6)
step.assign(step + 1)
def test_staircase(self):
initial_lr = 0.1
k = 10
decay_rate = 0.96
step = backend.Variable(0.0)
decayed_lr = schedules.InverseTimeDecay(
initial_lr, k, decay_rate, staircase=True
)
for i in range(k + 1):
expected = initial_lr / (1 + decay_rate * (i // k))
self.assertAllClose(decayed_lr(step), expected, 1e-6)
step.assign(step + 1)
class CosineDecayTest(testing.TestCase):
def test_config(self):
self.run_class_serialization_test(
schedules.CosineDecay(
initial_learning_rate=0.05,
decay_steps=10,
alpha=0.1,
warmup_target=0.2,
warmup_steps=2,
name="my_cd",
)
)
def np_cosine_decay(self, step, decay_steps, alpha=0.0):
step = min(step, decay_steps)
completed_fraction = step / decay_steps
decay = 0.5 * (1.0 + math.cos(math.pi * completed_fraction))
return (1.0 - alpha) * decay + alpha
def test_decay(self):
num_training_steps = 1000
initial_lr = 1.0
for step in range(0, 1500, 250):
decayed_lr = schedules.CosineDecay(initial_lr, num_training_steps)
expected = self.np_cosine_decay(step, num_training_steps)
self.assertAllClose(decayed_lr(step), expected, 1e-6)
def linear_warmup(self, step, warmup_steps, initial_lr, target_lr):
completed_fraction = step / warmup_steps
total_delta = target_lr - initial_lr
return completed_fraction * total_delta
def test_warmup(self):
warmup_steps = 1500
initial_lr = 0.0
target_lr = 10.0
for step in range(0, 1500, 250):
lr = schedules.CosineDecay(
initial_lr,
10,
warmup_target=target_lr,
warmup_steps=warmup_steps,
)
expected = self.linear_warmup(
step, warmup_steps, initial_lr, target_lr
)
self.assertAllClose(lr(step), expected)
def test_alpha(self):
num_training_steps = 1000
initial_lr = 1.0
alpha = 0.1
for step in range(0, 1500, 250):
decayed_lr = schedules.CosineDecay(
initial_lr, num_training_steps, alpha
)
expected = self.np_cosine_decay(step, num_training_steps, alpha)
self.assertAllClose(decayed_lr(step), expected, 1e-6)
def test_float64(self):
num_training_steps = 1000
initial_lr = np.float64(1.0)
for step in range(0, 1500, 250):
decayed_lr = schedules.CosineDecay(initial_lr, num_training_steps)
expected = self.np_cosine_decay(step, num_training_steps)
self.assertAllClose(decayed_lr(step), expected, 1e-6)
def test_warmup_decay(self):
warmup_steps = 2000
decay_steps = 1000
initial_lr = 0.0
target_lr = 10.0
for step in range(0, 3000, 250):
lr = schedules.CosineDecay(
initial_lr,
decay_steps,
warmup_target=target_lr,
warmup_steps=warmup_steps,
)
if step < warmup_steps + 1:
expected = self.linear_warmup(
step, warmup_steps, initial_lr, target_lr
)
else:
expected = target_lr * self.np_cosine_decay(
step - warmup_steps, decay_steps
)
self.assertAllClose(lr(step), expected)
class CosineDecayRestartsTest(testing.TestCase):
def test_config(self):
self.run_class_serialization_test(
schedules.CosineDecayRestarts(
initial_learning_rate=0.05,
first_decay_steps=10,
alpha=0.1,
t_mul=3.0,
m_mul=4.0,
name="my_cdr",
)
)
def np_cosine_decay_restarts(
self, step, decay_steps, t_mul=2.0, m_mul=1.0, alpha=0.0
):
fac = 1.0
while step >= decay_steps:
step -= decay_steps
decay_steps *= t_mul
fac *= m_mul
completed_fraction = step / decay_steps
decay = fac * 0.5 * (1.0 + math.cos(math.pi * completed_fraction))
return (1.0 - alpha) * decay + alpha
def test_decay(self):
num_training_steps = 1000
initial_lr = 1.0
for step in range(0, 1500, 250):
decayed_lr = schedules.CosineDecayRestarts(
initial_lr, num_training_steps
)
expected = self.np_cosine_decay_restarts(step, num_training_steps)
self.assertAllClose(decayed_lr(step), expected, 1e-6)
def test_float64(self):
num_training_steps = 1000
initial_lr = np.float64(1.0)
for step in range(0, 1500, 250):
decayed_lr = schedules.CosineDecayRestarts(
initial_lr, num_training_steps
)
expected = self.np_cosine_decay_restarts(step, num_training_steps)
self.assertAllClose(decayed_lr(step), expected, 1e-6)
def test_alpha(self):
num_training_steps = 1000
initial_lr = 1.0
alpha = 0.1
for step in range(0, 1500, 250):
decayed_lr = schedules.CosineDecayRestarts(
initial_lr, num_training_steps, alpha=alpha
)
expected = self.np_cosine_decay_restarts(
step, num_training_steps, alpha=alpha
)
self.assertAllClose(decayed_lr(step), expected, 1e-6)
def test_mmul(self):
num_training_steps = 1000
initial_lr = 1.0
m_mul = 0.9
for step in range(0, 1500, 250):
decayed_lr = schedules.CosineDecayRestarts(
initial_lr, num_training_steps, m_mul=m_mul
)
expected = self.np_cosine_decay_restarts(
step, num_training_steps, m_mul=m_mul
)
self.assertAllClose(decayed_lr(step), expected, 1e-6)
def test_tmul(self):
num_training_steps = 1000
initial_lr = 1.0
t_mul = 1.0
for step in range(0, 1500, 250):
decayed_lr = schedules.CosineDecayRestarts(
initial_lr, num_training_steps, t_mul=t_mul
)
expected = self.np_cosine_decay_restarts(
step, num_training_steps, t_mul=t_mul
)
self.assertAllClose(decayed_lr(step), expected, 1e-6)
| keras/keras/optimizers/schedules/learning_rate_schedule_test.py/0 | {
"file_path": "keras/keras/optimizers/schedules/learning_rate_schedule_test.py",
"repo_id": "keras",
"token_count": 7949
} | 194 |
import os
import unittest.mock as mock
import numpy as np
from absl import logging
from absl.testing import parameterized
from keras import layers
from keras.models import Sequential
from keras.saving import saving_api
from keras.testing import test_case
from keras.testing.test_utils import named_product
class SaveModelTests(test_case.TestCase):
def get_model(self):
return Sequential(
[
layers.Dense(5, input_shape=(3,)),
layers.Softmax(),
]
)
def test_basic_saving(self):
"""Test basic model saving and loading."""
model = self.get_model()
filepath = os.path.join(self.get_temp_dir(), "test_model.keras")
saving_api.save_model(model, filepath)
loaded_model = saving_api.load_model(filepath)
x = np.random.uniform(size=(10, 3))
self.assertTrue(np.allclose(model.predict(x), loaded_model.predict(x)))
def test_invalid_save_format(self):
"""Test deprecated save_format argument."""
model = self.get_model()
with self.assertRaisesRegex(
ValueError, "The `save_format` argument is deprecated"
):
saving_api.save_model(model, "model.txt", save_format=True)
def test_unsupported_arguments(self):
"""Test unsupported argument during model save."""
model = self.get_model()
filepath = os.path.join(self.get_temp_dir(), "test_model.keras")
with self.assertRaisesRegex(
ValueError, r"The following argument\(s\) are not supported"
):
saving_api.save_model(model, filepath, random_arg=True)
def test_save_h5_format(self):
"""Test saving model in h5 format."""
model = self.get_model()
filepath_h5 = os.path.join(self.get_temp_dir(), "test_model.h5")
saving_api.save_model(model, filepath_h5)
self.assertTrue(os.path.exists(filepath_h5))
os.remove(filepath_h5)
def test_save_unsupported_extension(self):
"""Test saving model with unsupported extension."""
model = self.get_model()
with self.assertRaisesRegex(
ValueError, "Invalid filepath extension for saving"
):
saving_api.save_model(model, "model.png")
class LoadModelTests(test_case.TestCase, parameterized.TestCase):
def get_model(self, dtype=None):
return Sequential(
[
layers.Dense(5, input_shape=(3,), dtype=dtype),
layers.Softmax(),
]
)
@parameterized.named_parameters(
[
{"testcase_name": "bfloat16", "dtype": "bfloat16"},
{"testcase_name": "float16", "dtype": "float16"},
{"testcase_name": "float32", "dtype": "float32"},
{"testcase_name": "float64", "dtype": "float64"},
]
)
def test_basic_load(self, dtype):
"""Test basic model loading."""
model = self.get_model(dtype)
filepath = os.path.join(self.get_temp_dir(), "test_model.keras")
saving_api.save_model(model, filepath)
loaded_model = saving_api.load_model(filepath)
x = np.random.uniform(size=(10, 3))
self.assertEqual(loaded_model.weights[0].dtype, dtype)
self.assertTrue(np.allclose(model.predict(x), loaded_model.predict(x)))
def test_load_unsupported_format(self):
"""Test loading model with unsupported format."""
with self.assertRaisesRegex(ValueError, "File format not supported"):
saving_api.load_model("model.pkl")
def test_load_keras_not_zip(self):
"""Test loading keras file that's not a zip."""
with self.assertRaisesRegex(ValueError, "File not found"):
saving_api.load_model("not_a_zip.keras")
def test_load_h5_format(self):
"""Test loading model in h5 format."""
model = self.get_model()
filepath_h5 = os.path.join(self.get_temp_dir(), "test_model.h5")
saving_api.save_model(model, filepath_h5)
loaded_model = saving_api.load_model(filepath_h5)
x = np.random.uniform(size=(10, 3))
self.assertTrue(np.allclose(model.predict(x), loaded_model.predict(x)))
os.remove(filepath_h5)
def test_load_model_with_custom_objects(self):
"""Test loading model with custom objects."""
class CustomLayer(layers.Layer):
def call(self, inputs):
return inputs
model = Sequential([CustomLayer(input_shape=(3,))])
filepath = os.path.join(self.get_temp_dir(), "custom_model.keras")
model.save(filepath)
loaded_model = saving_api.load_model(
filepath, custom_objects={"CustomLayer": CustomLayer}
)
self.assertIsInstance(loaded_model.layers[0], CustomLayer)
os.remove(filepath)
class LoadWeightsTests(test_case.TestCase, parameterized.TestCase):
def get_model(self, dtype=None):
return Sequential(
[
layers.Dense(5, input_shape=(3,), dtype=dtype),
layers.Softmax(),
]
)
@parameterized.named_parameters(
named_product(
source_dtype=["float64", "float32", "float16", "bfloat16"],
dest_dtype=["float64", "float32", "float16", "bfloat16"],
)
)
def test_load_keras_weights(self, source_dtype, dest_dtype):
"""Test loading keras weights."""
src_model = self.get_model(dtype=source_dtype)
filepath = os.path.join(self.get_temp_dir(), "test_weights.weights.h5")
src_model.save_weights(filepath)
src_weights = src_model.get_weights()
dest_model = self.get_model(dtype=dest_dtype)
dest_model.load_weights(filepath)
dest_weights = dest_model.get_weights()
for orig, loaded in zip(src_weights, dest_weights):
self.assertAllClose(
orig.astype("float32"),
loaded.astype("float32"),
atol=0.001,
rtol=0.01,
)
def test_load_h5_weights_by_name(self):
"""Test loading h5 weights by name."""
model = self.get_model()
filepath = os.path.join(self.get_temp_dir(), "test_weights.weights.h5")
model.save_weights(filepath)
with self.assertRaisesRegex(ValueError, "Invalid keyword arguments"):
model.load_weights(filepath, by_name=True)
def test_load_weights_invalid_extension(self):
"""Test loading weights with unsupported extension."""
model = self.get_model()
with self.assertRaisesRegex(ValueError, "File format not supported"):
model.load_weights("invalid_extension.pkl")
class SaveModelTestsWarning(test_case.TestCase):
def get_model(self):
return Sequential(
[
layers.Dense(5, input_shape=(3,)),
layers.Softmax(),
]
)
def test_h5_deprecation_warning(self):
"""Test deprecation warning for h5 format."""
model = self.get_model()
filepath = os.path.join(self.get_temp_dir(), "test_model.h5")
with mock.patch.object(logging, "warning") as mock_warn:
saving_api.save_model(model, filepath)
mock_warn.assert_called_once_with(
"You are saving your model as an HDF5 file via "
"`model.save()` or `keras.saving.save_model(model)`. "
"This file format is considered legacy. "
"We recommend using instead the native Keras format, "
"e.g. `model.save('my_model.keras')` or "
"`keras.saving.save_model(model, 'my_model.keras')`. "
)
| keras/keras/saving/saving_api_test.py/0 | {
"file_path": "keras/keras/saving/saving_api_test.py",
"repo_id": "keras",
"token_count": 3496
} | 195 |
import math
import numpy as np
import tree
from keras import backend
from keras.api_export import keras_export
from keras.utils.dataset_utils import is_torch_tensor
try:
import pandas
except ImportError:
pandas = None
# Leave jax, tf, and torch arrays off this list. Instead we will use
# `__array__` to detect these types. Doing so allows us to avoid importing a
# backend framework we are not currently using just to do type-checking.
ARRAY_TYPES = (np.ndarray,)
if backend.backend() == "tensorflow":
from keras.utils.module_utils import tensorflow as tf
ARRAY_TYPES = ARRAY_TYPES + (tf.RaggedTensor,)
if pandas:
ARRAY_TYPES = ARRAY_TYPES + (pandas.Series, pandas.DataFrame)
@keras_export("keras.utils.unpack_x_y_sample_weight")
def unpack_x_y_sample_weight(data):
"""Unpacks user-provided data tuple.
This is a convenience utility to be used when overriding
`Model.train_step`, `Model.test_step`, or `Model.predict_step`.
This utility makes it easy to support data of the form `(x,)`,
`(x, y)`, or `(x, y, sample_weight)`.
Standalone usage:
>>> features_batch = ops.ones((10, 5))
>>> labels_batch = ops.zeros((10, 5))
>>> data = (features_batch, labels_batch)
>>> # `y` and `sample_weight` will default to `None` if not provided.
>>> x, y, sample_weight = unpack_x_y_sample_weight(data)
>>> sample_weight is None
True
Args:
data: A tuple of the form `(x,)`, `(x, y)`, or `(x, y, sample_weight)`.
Returns:
The unpacked tuple, with `None`s for `y` and `sample_weight` if they are
not provided.
"""
if isinstance(data, list):
data = tuple(data)
if not isinstance(data, tuple):
return (data, None, None)
elif len(data) == 1:
return (data[0], None, None)
elif len(data) == 2:
return (data[0], data[1], None)
elif len(data) == 3:
return (data[0], data[1], data[2])
error_msg = (
"Data is expected to be in format `x`, `(x,)`, `(x, y)`, "
f"or `(x, y, sample_weight)`, found: {data}"
)
raise ValueError(error_msg)
@keras_export("keras.utils.pack_x_y_sample_weight")
def pack_x_y_sample_weight(x, y=None, sample_weight=None):
"""Packs user-provided data into a tuple.
This is a convenience utility for packing data into the tuple formats
that `Model.fit()` uses.
Standalone usage:
>>> x = ops.ones((10, 1))
>>> data = pack_x_y_sample_weight(x)
>>> isinstance(data, ops.Tensor)
True
>>> y = ops.ones((10, 1))
>>> data = pack_x_y_sample_weight(x, y)
>>> isinstance(data, tuple)
True
>>> x, y = data
Args:
x: Features to pass to `Model`.
y: Ground-truth targets to pass to `Model`.
sample_weight: Sample weight for each element.
Returns:
Tuple in the format used in `Model.fit()`.
"""
if y is None:
# For single x-input, we do no tuple wrapping since in this case
# there is no ambiguity. This also makes NumPy and Dataset
# consistent in that the user does not have to wrap their Dataset
# data in an unnecessary tuple.
if not isinstance(x, tuple or list):
return x
else:
return (x,)
elif sample_weight is None:
return (x, y)
else:
return (x, y, sample_weight)
def list_to_tuple(maybe_list):
"""Datasets will stack any list of tensors, so we convert them to tuples."""
if isinstance(maybe_list, list):
return tuple(maybe_list)
return maybe_list
def check_data_cardinality(data):
num_samples = set(int(i.shape[0]) for i in tree.flatten(data))
if len(num_samples) > 1:
msg = (
"Data cardinality is ambiguous. "
"Make sure all arrays contain the same number of samples."
)
for label, single_data in zip(["x", "y", "sample_weight"], data):
sizes = ", ".join(
str(i.shape[0]) for i in tree.flatten(single_data)
)
msg += f"'{label}' sizes: {sizes}\n"
raise ValueError(msg)
def sync_shuffle(data, num_samples=None):
if num_samples is None:
num_samples_set = set(int(i.shape[0]) for i in tree.flatten(data))
assert len(num_samples_set) == 1
num_samples = num_samples_set.pop()
p = np.random.permutation(num_samples)
return tree.map_structure(lambda x: x[p], data)
def train_validation_split(arrays, validation_split):
"""Split arrays into train and validation subsets in deterministic order.
The last part of data will become validation data.
Args:
arrays: Tensors to split. Allowed inputs are arbitrarily nested
structures of Tensors and NumPy arrays.
validation_split: Float between 0 and 1. The proportion of the dataset
to include in the validation split. The rest of the dataset will be
included in the training split.
Returns:
`(train_arrays, validation_arrays)`
"""
def _can_split(t):
return backend.is_tensor(t) or isinstance(t, ARRAY_TYPES) or t is None
flat_arrays = tree.flatten(arrays)
unsplitable = [type(t) for t in flat_arrays if not _can_split(t)]
if unsplitable:
raise ValueError(
"Argument `validation_split` is only supported "
"for tensors or NumPy arrays."
f"Found incompatible type in the input: {unsplitable}"
)
if all(t is None for t in flat_arrays):
return arrays, arrays
first_non_none = None
for t in flat_arrays:
if t is not None:
first_non_none = t
break
# Assumes all arrays have the same batch shape or are `None`.
batch_dim = int(first_non_none.shape[0])
split_at = int(math.floor(batch_dim * (1.0 - validation_split)))
if split_at == 0 or split_at == batch_dim:
raise ValueError(
f"Training data contains {batch_dim} samples, which is not "
"sufficient to split it into a validation and training set as "
f"specified by `validation_split={validation_split}`. Either "
"provide more data, or a different value for the "
"`validation_split` argument."
)
def _split(t, start, end):
if t is None:
return t
return t[start:end]
train_arrays = tree.map_structure(
lambda x: _split(x, start=0, end=split_at), arrays
)
val_arrays = tree.map_structure(
lambda x: _split(x, start=split_at, end=batch_dim), arrays
)
return train_arrays, val_arrays
def class_weight_to_sample_weights(y, class_weight):
sample_weight = np.ones(shape=(y.shape[0],), dtype=backend.floatx())
if len(y.shape) > 1:
if y.shape[-1] != 1:
y = np.argmax(y, axis=-1)
else:
y = np.squeeze(y, axis=-1)
y = np.round(y).astype("int32")
for i in range(y.shape[0]):
sample_weight[i] = class_weight.get(int(y[i]), 1.0)
return sample_weight
def get_jax_iterator(iterable):
from keras.backend.jax.core import convert_to_tensor
for batch in iterable:
yield tree.map_structure(convert_to_tensor, batch)
def get_numpy_iterator(iterable):
def convert_to_numpy(x):
if not isinstance(x, np.ndarray):
# Using `__array__` should handle `tf.Tensor`, `jax.np.ndarray`,
# `torch.Tensor`, as well as any other tensor-like object that
# has added numpy support.
if hasattr(x, "__array__"):
if is_torch_tensor(x):
x = x.cpu()
x = np.asarray(x)
return x
for batch in iterable:
yield tree.map_structure(convert_to_numpy, batch)
def get_torch_dataloader(iterable):
import torch.utils.data as torch_data
from keras.backend.torch.core import convert_to_tensor
class ConverterIterableDataset(torch_data.IterableDataset):
def __init__(self, iterable):
self.iterable = iterable
def __iter__(self):
for batch in self.iterable:
yield tree.map_structure(convert_to_tensor, batch)
dataset = ConverterIterableDataset(iterable)
# `batch_size=None` indicates that we should not re-batch
return torch_data.DataLoader(dataset, batch_size=None)
| keras/keras/trainers/data_adapters/data_adapter_utils.py/0 | {
"file_path": "keras/keras/trainers/data_adapters/data_adapter_utils.py",
"repo_id": "keras",
"token_count": 3537
} | 196 |
import os
import numpy as np
from keras import testing
from keras.utils import audio_dataset_utils
from keras.utils.module_utils import tensorflow as tf
class AudioDatasetFromDirectoryTest(testing.TestCase):
def _get_audio_samples(self, count=16, different_sequence_lengths=False):
sequence_length = 30
num_channels = 1
audio_samples = []
for _ in range(count):
if different_sequence_lengths:
random_sequence_length = np.random.randint(
10, sequence_length + 1
)
audio = np.random.random((random_sequence_length, num_channels))
else:
audio = np.random.random((sequence_length, num_channels))
audio_samples.append(tf.audio.encode_wav(audio, 1000))
return audio_samples
def _prepare_directory(
self,
num_classes=2,
nested_dirs=False,
count=16,
different_sequence_lengths=False,
):
# Get a unique temp directory
temp_dir = self.get_temp_dir()
# Generate paths to class subdirectories
paths = []
for class_index in range(num_classes):
class_directory = f"class_{class_index}"
if nested_dirs:
class_paths = [
class_directory,
os.path.join(class_directory, "subfolder_1"),
os.path.join(class_directory, "subfolder_2"),
os.path.join(
class_directory, "subfolder_1", "sub-subfolder"
),
]
else:
class_paths = [class_directory]
for path in class_paths:
os.mkdir(os.path.join(temp_dir, path))
paths += class_paths
# Save audio samples to the paths
i = 0
for audio in self._get_audio_samples(
count=count, different_sequence_lengths=different_sequence_lengths
):
path = paths[i % len(paths)]
ext = "wav"
filename = os.path.join(path, f"audio_{i}.{ext}")
with open(os.path.join(temp_dir, filename), "wb") as f:
f.write(audio.numpy())
i += 1
return temp_dir
def test_audio_dataset_from_directory_standalone(self):
# Test retrieving audio samples withouts labels from a directory and its
# subdirs.
# Save a few extra audio in the parent directory.
directory = self._prepare_directory(count=7, num_classes=2)
for i, audio in enumerate(self._get_audio_samples(3)):
filename = f"audio_{i}.wav"
with open(os.path.join(directory, filename), "wb") as f:
f.write(audio.numpy())
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory, batch_size=5, output_sequence_length=30, labels=None
)
batch = next(iter(dataset))
# We return plain audio
self.assertEqual(batch.shape, (5, 30, 1))
self.assertEqual(batch.dtype.name, "float32")
# Count samples
batch_count = 0
sample_count = 0
for batch in dataset:
batch_count += 1
sample_count += batch.shape[0]
self.assertEqual(batch_count, 2)
self.assertEqual(sample_count, 10)
def test_audio_dataset_from_directory_binary(self):
directory = self._prepare_directory(num_classes=2)
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory, batch_size=8, output_sequence_length=30, label_mode="int"
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 30, 1))
self.assertEqual(batch[0].dtype.name, "float32")
self.assertEqual(batch[1].shape, (8,))
self.assertEqual(batch[1].dtype.name, "int32")
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory,
batch_size=8,
output_sequence_length=30,
label_mode="binary",
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 30, 1))
self.assertEqual(batch[0].dtype.name, "float32")
self.assertEqual(batch[1].shape, (8, 1))
self.assertEqual(batch[1].dtype.name, "float32")
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory,
batch_size=8,
output_sequence_length=30,
label_mode="categorical",
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 30, 1))
self.assertEqual(batch[0].dtype.name, "float32")
self.assertEqual(batch[1].shape, (8, 2))
self.assertEqual(batch[1].dtype.name, "float32")
def test_static_shape_in_graph(self):
directory = self._prepare_directory(num_classes=2)
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory, batch_size=8, output_sequence_length=30, label_mode="int"
)
test_case = self
@tf.function
def symbolic_fn(ds):
for x, _ in ds.take(1):
test_case.assertListEqual(x.shape.as_list(), [None, 30, None])
symbolic_fn(dataset)
def test_sample_count(self):
directory = self._prepare_directory(num_classes=4, count=15)
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory, batch_size=8, output_sequence_length=30, label_mode=None
)
sample_count = 0
for batch in dataset:
sample_count += batch.shape[0]
self.assertEqual(sample_count, 15)
def test_audio_dataset_from_directory_multiclass(self):
directory = self._prepare_directory(num_classes=4, count=15)
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory, batch_size=8, output_sequence_length=30, label_mode=None
)
batch = next(iter(dataset))
self.assertEqual(batch.shape, (8, 30, 1))
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory, batch_size=8, output_sequence_length=30, label_mode=None
)
sample_count = 0
iterator = iter(dataset)
for batch in dataset:
sample_count += next(iterator).shape[0]
self.assertEqual(sample_count, 15)
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory, batch_size=8, output_sequence_length=30, label_mode="int"
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 30, 1))
self.assertEqual(batch[0].dtype.name, "float32")
self.assertEqual(batch[1].shape, (8,))
self.assertEqual(batch[1].dtype.name, "int32")
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory,
batch_size=8,
output_sequence_length=30,
label_mode="categorical",
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 30, 1))
self.assertEqual(batch[0].dtype.name, "float32")
self.assertEqual(batch[1].shape, (8, 4))
self.assertEqual(batch[1].dtype.name, "float32")
def test_audio_dataset_from_directory_validation_split(self):
directory = self._prepare_directory(num_classes=2, count=10)
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory,
batch_size=10,
output_sequence_length=30,
validation_split=0.2,
subset="training",
seed=1337,
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (8, 30, 1))
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory,
batch_size=10,
output_sequence_length=30,
validation_split=0.2,
subset="validation",
seed=1337,
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertEqual(batch[0].shape, (2, 30, 1))
def test_audio_dataset_from_directory_manual_labels(self):
directory = self._prepare_directory(num_classes=2, count=2)
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory,
batch_size=8,
output_sequence_length=30,
labels=[0, 1],
shuffle=False,
)
batch = next(iter(dataset))
self.assertLen(batch, 2)
self.assertAllClose(batch[1], [0, 1])
def test_audio_dataset_from_directory_follow_links(self):
directory = self._prepare_directory(
num_classes=2, count=25, nested_dirs=True
)
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory,
batch_size=8,
output_sequence_length=30,
label_mode=None,
follow_links=True,
)
sample_count = 0
for batch in dataset:
sample_count += batch.shape[0]
self.assertEqual(sample_count, 25)
def test_audio_dataset_from_directory_no_audio(self):
directory = self._prepare_directory(num_classes=2, count=0)
with self.assertRaisesRegex(
ValueError, "No audio files found in directory"
):
_ = audio_dataset_utils.audio_dataset_from_directory(directory)
def test_audio_dataset_from_directory_ragged(self):
directory = self._prepare_directory(
num_classes=2, count=16, different_sequence_lengths=True
)
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory, ragged=True, batch_size=8
)
batch = next(iter(dataset))
self.assertEqual(batch[0].shape.as_list(), [8, None, None])
def test_audio_dataset_from_directory_no_output_sequence_length_no_ragged(
self,
):
# This test case tests `audio_dataset_from_directory` when `ragged` and
# `output_sequence_length` are not passed while the input sequence
# lengths are different.
directory = self._prepare_directory(
num_classes=2, count=16, different_sequence_lengths=True
)
# The tensor shapes are different and output_sequence_length is None
# should work fine and pad each sequence to the length of the longest
# sequence in it's batch
min_sequence_length, max_sequence_length = 10, 30
possible_sequence_lengths = [
i for i in range(min_sequence_length, max_sequence_length + 1)
]
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory, batch_size=2
)
sequence_lengths = list(set([b.shape[1] for b, _ in dataset]))
for seq_len in sequence_lengths:
self.assertIn(seq_len, possible_sequence_lengths)
def test_audio_dataset_from_directory_no_output_sequence_length_same_lengths( # noqa: E501
self,
):
# This test case tests `audio_dataset_from_directory` when `ragged` and
# `output_sequence_length` are not passed while the input sequence
# lengths are the same
directory = self._prepare_directory(
num_classes=2, count=16, different_sequence_lengths=False
)
# The tensor shapes are different and output_sequence_length is None
# should work fine and pad each sequence to the length of the longest
# sequence in it's batch
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory, batch_size=2
)
sequence_lengths = list(set([batch[0].shape[1] for batch in dataset]))
self.assertEqual(len(sequence_lengths), 1)
def test_audio_dataset_from_directory_errors(self):
directory = self._prepare_directory(num_classes=3, count=5)
with self.assertRaisesRegex(
ValueError, "`sampling_rate` should be higher than 0. Received:"
):
_ = audio_dataset_utils.audio_dataset_from_directory(
directory,
ragged=False,
output_sequence_length=10,
sampling_rate=-1,
)
with self.assertRaisesRegex(
ValueError,
"`sampling_rate` should have an integer value. Received:",
):
_ = audio_dataset_utils.audio_dataset_from_directory(
directory,
ragged=False,
output_sequence_length=10,
sampling_rate=1.2,
)
# Only run this test case when we don't have tensorflow_io.
try:
import tensorflow_io # noqa: F401
except ImportError:
with self.assertRaisesRegex(
ImportError,
"To use the argument `sampling_rate`.*tensorflow_io.*",
):
_ = audio_dataset_utils.audio_dataset_from_directory(
directory,
ragged=False,
output_sequence_length=10,
sampling_rate=44100,
)
with self.assertRaisesRegex(
ValueError, "Cannot set both `ragged` and `output_sequence_length`"
):
_ = audio_dataset_utils.audio_dataset_from_directory(
directory, ragged=True, output_sequence_length=30
)
with self.assertRaisesRegex(ValueError, "`labels` argument should be"):
_ = audio_dataset_utils.audio_dataset_from_directory(
directory, labels="other"
)
with self.assertRaisesRegex(
ValueError, "`label_mode` argument must be"
):
_ = audio_dataset_utils.audio_dataset_from_directory(
directory, label_mode="other"
)
with self.assertRaisesRegex(
ValueError, 'only pass `class_names` if `labels="inferred"`'
):
_ = audio_dataset_utils.audio_dataset_from_directory(
directory,
labels=[0, 0, 1, 1, 1],
class_names=["class_0", "class_1", "class_2"],
)
with self.assertRaisesRegex(
ValueError,
"Expected the lengths of `labels` to match the number of files",
):
_ = audio_dataset_utils.audio_dataset_from_directory(
directory, labels=[0, 0, 1, 1]
)
with self.assertRaisesRegex(
ValueError, "`class_names` passed did not match"
):
_ = audio_dataset_utils.audio_dataset_from_directory(
directory, class_names=["class_0", "wrong_class"]
)
with self.assertRaisesRegex(ValueError, "there must be exactly 2"):
_ = audio_dataset_utils.audio_dataset_from_directory(
directory, label_mode="binary"
)
with self.assertRaisesRegex(
ValueError, "`validation_split` must be between 0 and 1"
):
_ = audio_dataset_utils.audio_dataset_from_directory(
directory, validation_split=2
)
with self.assertRaisesRegex(
ValueError, '`subset` must be either "training",'
):
_ = audio_dataset_utils.audio_dataset_from_directory(
directory, validation_split=0.2, subset="other"
)
with self.assertRaisesRegex(
ValueError, "`validation_split` must be set"
):
_ = audio_dataset_utils.audio_dataset_from_directory(
directory, validation_split=0.0, subset="training"
)
with self.assertRaisesRegex(ValueError, "must provide a `seed`"):
_ = audio_dataset_utils.audio_dataset_from_directory(
directory, validation_split=0.2, subset="training"
)
def test_audio_dataset_from_directory_not_batched(self):
directory = self._prepare_directory(num_classes=2, count=2)
dataset = audio_dataset_utils.audio_dataset_from_directory(
directory,
batch_size=None,
output_sequence_length=30,
label_mode=None,
shuffle=False,
)
sample = next(iter(dataset))
self.assertEqual(len(sample.shape), 2)
| keras/keras/utils/audio_dataset_utils_test.py/0 | {
"file_path": "keras/keras/utils/audio_dataset_utils_test.py",
"repo_id": "keras",
"token_count": 7993
} | 197 |
"""Utilities related to model visualization."""
import os
import sys
from keras.api_export import keras_export
from keras.utils import io_utils
try:
# pydot-ng is a fork of pydot that is better maintained.
import pydot_ng as pydot
except ImportError:
# pydotplus is an improved version of pydot
try:
import pydotplus as pydot
except ImportError:
# Fall back on pydot if necessary.
try:
import pydot
except ImportError:
pydot = None
def check_pydot():
"""Returns True if PyDot is available."""
return pydot is not None
def check_graphviz():
"""Returns True if both PyDot and Graphviz are available."""
if not check_pydot():
return False
try:
# Attempt to create an image of a blank graph
# to check the pydot/graphviz installation.
pydot.Dot.create(pydot.Dot())
return True
except (OSError, pydot.InvocationException):
return False
def add_edge(dot, src, dst):
if not dot.get_edge(src, dst):
edge = pydot.Edge(src, dst)
edge.set("penwidth", "2")
dot.add_edge(edge)
def get_layer_activation_name(layer):
if hasattr(layer.activation, "name"):
activation_name = layer.activation.name
elif hasattr(layer.activation, "__name__"):
activation_name = layer.activation.__name__
else:
activation_name = str(layer.activation)
return activation_name
def make_layer_label(layer, **kwargs):
class_name = layer.__class__.__name__
show_layer_names = kwargs.pop("show_layer_names")
show_layer_activations = kwargs.pop("show_layer_activations")
show_dtype = kwargs.pop("show_dtype")
show_shapes = kwargs.pop("show_shapes")
show_trainable = kwargs.pop("show_trainable")
if kwargs:
raise ValueError(f"Invalid kwargs: {kwargs}")
table = (
'<<table border="0" cellborder="1" bgcolor="black" cellpadding="10">'
)
colspan = max(
1, sum(int(x) for x in (show_dtype, show_shapes, show_trainable))
)
if show_layer_names:
table += (
f'<tr><td colspan="{colspan}" bgcolor="black">'
'<font point-size="16" color="white">'
f"<b>{layer.name}</b> ({class_name})"
"</font></td></tr>"
)
else:
table += (
f'<tr><td colspan="{colspan}" bgcolor="black">'
'<font point-size="16" color="white">'
f"<b>{class_name}</b>"
"</font></td></tr>"
)
if (
show_layer_activations
and hasattr(layer, "activation")
and layer.activation is not None
):
table += (
f'<tr><td bgcolor="white" colspan="{colspan}">'
'<font point-size="14">'
f"Activation: <b>{get_layer_activation_name(layer)}</b>"
"</font></td></tr>"
)
cols = []
if show_shapes:
shape = None
try:
shape = layer.output.shape
except ValueError:
pass
cols.append(
(
'<td bgcolor="white"><font point-size="14">'
f'Output shape: <b>{shape or "?"}</b>'
"</font></td>"
)
)
if show_dtype:
dtype = None
try:
dtype = layer.output.dtype
except ValueError:
pass
cols.append(
(
'<td bgcolor="white"><font point-size="14">'
f'Output dtype: <b>{dtype or "?"}</b>'
"</font></td>"
)
)
if show_trainable and hasattr(layer, "trainable") and layer.weights:
if layer.trainable:
cols.append(
(
'<td bgcolor="forestgreen">'
'<font point-size="14" color="white">'
"<b>Trainable</b></font></td>"
)
)
else:
cols.append(
(
'<td bgcolor="firebrick">'
'<font point-size="14" color="white">'
"<b>Non-trainable</b></font></td>"
)
)
if cols:
colspan = len(cols)
else:
colspan = 1
if cols:
table += "<tr>" + "".join(cols) + "</tr>"
table += "</table>>"
return table
def make_node(layer, **kwargs):
node = pydot.Node(str(id(layer)), label=make_layer_label(layer, **kwargs))
node.set("fontname", "Helvetica")
node.set("border", "0")
node.set("margin", "0")
return node
@keras_export("keras.utils.model_to_dot")
def model_to_dot(
model,
show_shapes=False,
show_dtype=False,
show_layer_names=True,
rankdir="TB",
expand_nested=False,
dpi=200,
subgraph=False,
show_layer_activations=False,
show_trainable=False,
**kwargs,
):
"""Convert a Keras model to dot format.
Args:
model: A Keras model instance.
show_shapes: whether to display shape information.
show_dtype: whether to display layer dtypes.
show_layer_names: whether to display layer names.
rankdir: `rankdir` argument passed to PyDot,
a string specifying the format of the plot: `"TB"`
creates a vertical plot; `"LR"` creates a horizontal plot.
expand_nested: whether to expand nested Functional models
into clusters.
dpi: Image resolution in dots per inch.
subgraph: whether to return a `pydot.Cluster` instance.
show_layer_activations: Display layer activations (only for layers that
have an `activation` property).
show_trainable: whether to display if a layer is trainable.
Returns:
A `pydot.Dot` instance representing the Keras model or
a `pydot.Cluster` instance representing nested model if
`subgraph=True`.
"""
from keras.ops.function import make_node_key
if not model.built:
raise ValueError(
"This model has not yet been built. "
"Build the model first by calling `build()` or by calling "
"the model on a batch of data."
)
from keras.models import functional
from keras.models import sequential
# from keras.layers import Wrapper
if not check_pydot():
raise ImportError(
"You must install pydot (`pip install pydot`) for "
"model_to_dot to work."
)
if subgraph:
dot = pydot.Cluster(style="dashed", graph_name=model.name)
dot.set("label", model.name)
dot.set("labeljust", "l")
else:
dot = pydot.Dot()
dot.set("rankdir", rankdir)
dot.set("concentrate", True)
dot.set("dpi", dpi)
dot.set("splines", "ortho")
dot.set_node_defaults(shape="record")
if kwargs.pop("layer_range", None) is not None:
raise ValueError("Argument `layer_range` is no longer supported.")
if kwargs:
raise ValueError(f"Unrecognized keyword arguments: {kwargs}")
kwargs = {
"show_layer_names": show_layer_names,
"show_layer_activations": show_layer_activations,
"show_dtype": show_dtype,
"show_shapes": show_shapes,
"show_trainable": show_trainable,
}
if isinstance(model, sequential.Sequential):
# TODO
layers = model.layers
elif not isinstance(model, functional.Functional):
# We treat subclassed models as a single node.
node = make_node(model, **kwargs)
dot.add_node(node)
return dot
else:
layers = model._operations
# Create graph nodes.
sub_n_first_node = {}
sub_n_last_node = {}
for i, layer in enumerate(layers):
# Process nested functional models.
if expand_nested and isinstance(layer, functional.Functional):
submodel = model_to_dot(
layer,
show_shapes,
show_dtype,
show_layer_names,
rankdir,
expand_nested,
subgraph=True,
show_layer_activations=show_layer_activations,
show_trainable=show_trainable,
)
# sub_n : submodel
sub_n_nodes = submodel.get_nodes()
sub_n_first_node[layer.name] = sub_n_nodes[0]
sub_n_last_node[layer.name] = sub_n_nodes[-1]
dot.add_subgraph(submodel)
else:
node = make_node(layer, **kwargs)
dot.add_node(node)
# Connect nodes with edges.
# Sequential case.
if isinstance(model, sequential.Sequential):
for i in range(len(layers) - 1):
inbound_layer_id = str(id(layers[i]))
layer_id = str(id(layers[i + 1]))
add_edge(dot, inbound_layer_id, layer_id)
return dot
# Functional case.
for i, layer in enumerate(layers):
layer_id = str(id(layer))
for i, node in enumerate(layer._inbound_nodes):
node_key = make_node_key(layer, i)
if node_key in model._nodes:
for parent_node in node.parent_nodes:
inbound_layer = parent_node.operation
inbound_layer_id = str(id(inbound_layer))
if not expand_nested:
assert dot.get_node(inbound_layer_id)
assert dot.get_node(layer_id)
add_edge(dot, inbound_layer_id, layer_id)
else:
# if inbound_layer is not Functional
if not isinstance(inbound_layer, functional.Functional):
# if current layer is not Functional
if not isinstance(layer, functional.Functional):
assert dot.get_node(inbound_layer_id)
assert dot.get_node(layer_id)
add_edge(dot, inbound_layer_id, layer_id)
# if current layer is Functional
elif isinstance(layer, functional.Functional):
add_edge(
dot,
inbound_layer_id,
sub_n_first_node[layer.name].get_name(),
)
# if inbound_layer is Functional
elif isinstance(inbound_layer, functional.Functional):
name = sub_n_last_node[
inbound_layer.name
].get_name()
if isinstance(layer, functional.Functional):
output_name = sub_n_first_node[
layer.name
].get_name()
add_edge(dot, name, output_name)
else:
add_edge(dot, name, layer_id)
return dot
@keras_export("keras.utils.plot_model")
def plot_model(
model,
to_file="model.png",
show_shapes=False,
show_dtype=False,
show_layer_names=False,
rankdir="TB",
expand_nested=False,
dpi=200,
show_layer_activations=False,
show_trainable=False,
**kwargs,
):
"""Converts a Keras model to dot format and save to a file.
Example:
```python
inputs = ...
outputs = ...
model = keras.Model(inputs=inputs, outputs=outputs)
dot_img_file = '/tmp/model_1.png'
keras.utils.plot_model(model, to_file=dot_img_file, show_shapes=True)
```
Args:
model: A Keras model instance
to_file: File name of the plot image.
show_shapes: whether to display shape information.
show_dtype: whether to display layer dtypes.
show_layer_names: whether to display layer names.
rankdir: `rankdir` argument passed to PyDot,
a string specifying the format of the plot: `"TB"`
creates a vertical plot; `"LR"` creates a horizontal plot.
expand_nested: whether to expand nested Functional models
into clusters.
dpi: Image resolution in dots per inch.
show_layer_activations: Display layer activations (only for layers that
have an `activation` property).
show_trainable: whether to display if a layer is trainable.
Returns:
A Jupyter notebook Image object if Jupyter is installed.
This enables in-line display of the model plots in notebooks.
"""
if not model.built:
raise ValueError(
"This model has not yet been built. "
"Build the model first by calling `build()` or by calling "
"the model on a batch of data."
)
if not check_pydot():
message = (
"You must install pydot (`pip install pydot`) "
"for `plot_model` to work."
)
if "IPython.core.magics.namespace" in sys.modules:
# We don't raise an exception here in order to avoid crashing
# notebook tests where graphviz is not available.
io_utils.print_msg(message)
return
else:
raise ImportError(message)
if not check_graphviz():
message = (
"You must install graphviz "
"(see instructions at https://graphviz.gitlab.io/download/) "
"for `plot_model` to work."
)
if "IPython.core.magics.namespace" in sys.modules:
# We don't raise an exception here in order to avoid crashing
# notebook tests where graphviz is not available.
io_utils.print_msg(message)
return
else:
raise ImportError(message)
if kwargs.pop("layer_range", None) is not None:
raise ValueError("Argument `layer_range` is no longer supported.")
if kwargs:
raise ValueError(f"Unrecognized keyword arguments: {kwargs}")
dot = model_to_dot(
model,
show_shapes=show_shapes,
show_dtype=show_dtype,
show_layer_names=show_layer_names,
rankdir=rankdir,
expand_nested=expand_nested,
dpi=dpi,
show_layer_activations=show_layer_activations,
show_trainable=show_trainable,
)
to_file = str(to_file)
if dot is None:
return
_, extension = os.path.splitext(to_file)
if not extension:
extension = "png"
else:
extension = extension[1:]
# Save image to disk.
dot.write(to_file, format=extension)
# Return the image as a Jupyter Image object, to be displayed in-line.
# Note that we cannot easily detect whether the code is running in a
# notebook, and thus we always return the Image if Jupyter is available.
if extension != "pdf":
try:
from IPython import display
return display.Image(filename=to_file)
except ImportError:
pass
| keras/keras/utils/model_visualization.py/0 | {
"file_path": "keras/keras/utils/model_visualization.py",
"repo_id": "keras",
"token_count": 7355
} | 198 |
import numpy as np
import pytest
from absl.testing import parameterized
from keras import layers
from keras import models
from keras import testing
from keras.utils import summary_utils
class SummaryUtilsTest(testing.TestCase, parameterized.TestCase):
@parameterized.parameters([("adam",), (None,)])
@pytest.mark.requires_trainable_backend
def test_print_model_summary(self, optimizer):
inputs = layers.Input((2,))
outputs = layers.Dense(3)(inputs)
model = models.Model(inputs, outputs)
model.compile(optimizer=optimizer, loss="mse", metrics=["mse"])
if optimizer:
# Trigger the optimizer weights creation
model.fit(x=np.zeros([4, 2]), y=np.zeros([4, 3]))
summary_content = []
def print_to_variable(text, line_break=False):
summary_content.append(text)
try:
summary_utils.print_summary(model, print_fn=print_to_variable)
summary_content = "\n".join(summary_content)
if optimizer:
self.assertIn("Total params: 29", summary_content)
self.assertIn("Trainable params: 9", summary_content)
self.assertIn("Non-trainable params: 0", summary_content)
self.assertIn("Optimizer params: 20", summary_content)
else:
self.assertIn("Total params: 9", summary_content)
self.assertIn("Trainable params: 9", summary_content)
self.assertIn("Non-trainable params: 0", summary_content)
self.assertNotIn("Optimizer params", summary_content)
except ImportError:
pass
| keras/keras/utils/summary_utils_test.py/0 | {
"file_path": "keras/keras/utils/summary_utils_test.py",
"repo_id": "keras",
"token_count": 709
} | 199 |
#!/bin/bash
isort --check --sl -c tf_keras
if ! [ $? -eq 0 ]
then
echo "Please run \"sh shell/format.sh\" to format the code."
exit 1
fi
echo "no issues with isort"
flake8 tf_keras
if ! [ $? -eq 0 ]
then
echo "Please fix the code style issue."
exit 1
fi
echo "no issues with flake8"
black --check --line-length 80 tf_keras
if ! [ $? -eq 0 ]
then
echo "Please run \"sh shell/format.sh\" to format the code."
exit 1
fi
echo "no issues with black"
echo "linting success!"
| tf-keras/shell/lint.sh/0 | {
"file_path": "tf-keras/shell/lint.sh",
"repo_id": "tf-keras",
"token_count": 179
} | 200 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Integration tests for TF-Keras applications."""
import os
import tensorflow.compat.v2 as tf
from absl import flags
from absl.testing import parameterized
from tf_keras import backend
from tf_keras import utils
from tf_keras.applications import convnext
from tf_keras.applications import densenet
from tf_keras.applications import efficientnet
from tf_keras.applications import efficientnet_v2
from tf_keras.applications import inception_resnet_v2
from tf_keras.applications import inception_v3
from tf_keras.applications import mobilenet
from tf_keras.applications import mobilenet_v2
from tf_keras.applications import mobilenet_v3
from tf_keras.applications import nasnet
from tf_keras.applications import regnet
from tf_keras.applications import resnet
from tf_keras.applications import resnet_rs
from tf_keras.applications import resnet_v2
from tf_keras.applications import vgg16
from tf_keras.applications import vgg19
from tf_keras.applications import xception
from tf_keras.testing_infra import test_utils
_IMAGE_DATA_FORMAT = flags.DEFINE_string(
"image_data_format",
"channels_first",
"The image data format to use for the test.",
)
MODEL_LIST_NO_NASNET = [
(resnet.ResNet50, 2048),
(resnet.ResNet101, 2048),
(resnet.ResNet152, 2048),
(resnet_v2.ResNet50V2, 2048),
(resnet_v2.ResNet101V2, 2048),
(resnet_v2.ResNet152V2, 2048),
(vgg16.VGG16, 512),
(vgg19.VGG19, 512),
(xception.Xception, 2048),
(inception_v3.InceptionV3, 2048),
(inception_resnet_v2.InceptionResNetV2, 1536),
(mobilenet.MobileNet, 1024),
(mobilenet_v2.MobileNetV2, 1280),
(mobilenet_v3.MobileNetV3Small, 576),
(mobilenet_v3.MobileNetV3Large, 960),
(convnext.ConvNeXtTiny, 768),
(convnext.ConvNeXtSmall, 768),
(convnext.ConvNeXtBase, 1024),
(convnext.ConvNeXtLarge, 1536),
(convnext.ConvNeXtXLarge, 2048),
(densenet.DenseNet121, 1024),
(densenet.DenseNet169, 1664),
(densenet.DenseNet201, 1920),
(efficientnet.EfficientNetB0, 1280),
(efficientnet.EfficientNetB1, 1280),
(efficientnet.EfficientNetB2, 1408),
(efficientnet.EfficientNetB3, 1536),
(efficientnet.EfficientNetB4, 1792),
(efficientnet.EfficientNetB5, 2048),
(efficientnet.EfficientNetB6, 2304),
(efficientnet.EfficientNetB7, 2560),
(efficientnet_v2.EfficientNetV2B0, 1280),
(efficientnet_v2.EfficientNetV2B1, 1280),
(efficientnet_v2.EfficientNetV2B2, 1408),
(efficientnet_v2.EfficientNetV2B3, 1536),
(efficientnet_v2.EfficientNetV2S, 1280),
(efficientnet_v2.EfficientNetV2M, 1280),
(efficientnet_v2.EfficientNetV2L, 1280),
(regnet.RegNetX002, 368),
(regnet.RegNetX004, 384),
(regnet.RegNetX006, 528),
(regnet.RegNetX008, 672),
(regnet.RegNetX016, 912),
(regnet.RegNetX032, 1008),
(regnet.RegNetX040, 1360),
(regnet.RegNetX064, 1624),
(regnet.RegNetX080, 1920),
(regnet.RegNetX120, 2240),
(regnet.RegNetX160, 2048),
(regnet.RegNetX320, 2520),
(regnet.RegNetY002, 368),
(regnet.RegNetY004, 440),
(regnet.RegNetY006, 608),
(regnet.RegNetY008, 768),
(regnet.RegNetY016, 888),
(regnet.RegNetY032, 1512),
(regnet.RegNetY040, 1088),
(regnet.RegNetY064, 1296),
(regnet.RegNetY080, 2016),
(regnet.RegNetY120, 2240),
(regnet.RegNetY160, 3024),
(regnet.RegNetY320, 3712),
(resnet_rs.ResNetRS50, 2048),
(resnet_rs.ResNetRS101, 2048),
(resnet_rs.ResNetRS152, 2048),
(resnet_rs.ResNetRS200, 2048),
(resnet_rs.ResNetRS270, 2048),
(resnet_rs.ResNetRS350, 2048),
(resnet_rs.ResNetRS420, 2048),
]
NASNET_LIST = [
(nasnet.NASNetMobile, 1056),
(nasnet.NASNetLarge, 4032),
]
MODEL_LIST = MODEL_LIST_NO_NASNET + NASNET_LIST
MODELS_UNSUPPORTED_CHANNELS_FIRST = ["ConvNeXt", "NASNet", "RegNetX", "RegNetY"]
# Parameters for loading weights for MobileNetV3.
# (class, alpha, minimalistic, include_top)
MOBILENET_V3_FOR_WEIGHTS = [
(mobilenet_v3.MobileNetV3Large, 0.75, False, False),
(mobilenet_v3.MobileNetV3Large, 1.0, False, False),
(mobilenet_v3.MobileNetV3Large, 1.0, True, False),
(mobilenet_v3.MobileNetV3Large, 0.75, False, True),
(mobilenet_v3.MobileNetV3Large, 1.0, False, True),
(mobilenet_v3.MobileNetV3Large, 1.0, True, True),
(mobilenet_v3.MobileNetV3Small, 0.75, False, False),
(mobilenet_v3.MobileNetV3Small, 1.0, False, False),
(mobilenet_v3.MobileNetV3Small, 1.0, True, False),
(mobilenet_v3.MobileNetV3Small, 0.75, False, True),
(mobilenet_v3.MobileNetV3Small, 1.0, False, True),
(mobilenet_v3.MobileNetV3Small, 1.0, True, True),
]
class ApplicationsTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
self.original_image_data_format = backend.image_data_format()
def tearDown(self):
backend.set_image_data_format(self.original_image_data_format)
@classmethod
def assertShapeEqual(cls, shape1, shape2):
if len(shape1) != len(shape2):
raise AssertionError(
f"Shapes are different rank: {shape1} vs {shape2}"
)
for v1, v2 in zip(shape1, shape2):
if v1 != v2:
raise AssertionError(f"Shapes differ: {shape1} vs {shape2}")
def skip_if_invalid_image_data_format_for_model(
self, app, image_data_format
):
does_not_support_channels_first = any(
[
unsupported_name.lower() in app.__name__.lower()
for unsupported_name in MODELS_UNSUPPORTED_CHANNELS_FIRST
]
)
if (
image_data_format == "channels_first"
and does_not_support_channels_first
):
self.skipTest(
"{} does not support channels first".format(app.__name__)
)
@parameterized.parameters(*MODEL_LIST)
def test_application_base(self, app, _):
image_data_format = _IMAGE_DATA_FORMAT.value
self.skip_if_invalid_image_data_format_for_model(app, image_data_format)
backend.set_image_data_format(image_data_format)
# Can be instantiated with default arguments
model = app(weights=None)
# Can be serialized and deserialized
config = model.get_config()
if "ConvNeXt" in app.__name__:
custom_objects = {"LayerScale": convnext.LayerScale}
with utils.custom_object_scope(custom_objects):
reconstructed_model = model.__class__.from_config(config)
else:
reconstructed_model = model.__class__.from_config(config)
self.assertEqual(len(model.weights), len(reconstructed_model.weights))
backend.clear_session()
@parameterized.parameters(*MODEL_LIST)
def test_application_notop(self, app, last_dim):
image_data_format = _IMAGE_DATA_FORMAT.value
self.skip_if_invalid_image_data_format_for_model(app, image_data_format)
backend.set_image_data_format(image_data_format)
if image_data_format == "channels_first":
input_shape = (3, None, None)
correct_output_shape = (None, last_dim, None, None)
channels_axis = 1
else:
input_shape = (None, None, 3)
correct_output_shape = (None, None, None, last_dim)
channels_axis = -1
if "NASNet" in app.__name__:
only_check_last_dim = True
else:
only_check_last_dim = False
output_shape = app(
weights=None, include_top=False, input_shape=input_shape
).output_shape
if only_check_last_dim:
self.assertEqual(output_shape[channels_axis], last_dim)
else:
self.assertShapeEqual(output_shape, correct_output_shape)
backend.clear_session()
@parameterized.parameters(*MODEL_LIST)
def test_application_notop_custom_input_shape(self, app, last_dim):
image_data_format = _IMAGE_DATA_FORMAT.value
self.skip_if_invalid_image_data_format_for_model(app, image_data_format)
backend.set_image_data_format(image_data_format)
if image_data_format == "channels_first":
input_shape = (3, 224, 224)
channels_axis = 1
else:
input_shape = (224, 224, 3)
channels_axis = -1
output_shape = app(
weights="imagenet", include_top=False, input_shape=input_shape
).output_shape
self.assertEqual(output_shape[channels_axis], last_dim)
@parameterized.parameters(MODEL_LIST)
def test_application_pooling(self, app, last_dim):
output_shape = app(
weights=None, include_top=False, pooling="avg"
).output_shape
self.assertShapeEqual(output_shape, (None, last_dim))
@parameterized.parameters(MODEL_LIST)
def test_application_classifier_activation(self, app, _):
if "RegNet" in app.__name__:
self.skipTest("RegNet models do not support classifier activation")
model = app(
weights=None, include_top=True, classifier_activation="softmax"
)
last_layer_act = model.layers[-1].activation.__name__
self.assertEqual(last_layer_act, "softmax")
@parameterized.parameters(*MODEL_LIST)
def test_application_variable_input_channels(self, app, last_dim):
image_data_format = _IMAGE_DATA_FORMAT.value
self.skip_if_invalid_image_data_format_for_model(app, image_data_format)
backend.set_image_data_format(image_data_format)
if backend.image_data_format() == "channels_first":
input_shape = (1, None, None)
correct_output_shape = (None, last_dim, None, None)
else:
input_shape = (None, None, 1)
correct_output_shape = (None, None, None, last_dim)
output_shape = app(
weights=None, include_top=False, input_shape=input_shape
).output_shape
self.assertShapeEqual(output_shape, correct_output_shape)
backend.clear_session()
if backend.image_data_format() == "channels_first":
input_shape = (4, None, None)
else:
input_shape = (None, None, 4)
output_shape = app(
weights=None, include_top=False, input_shape=input_shape
).output_shape
self.assertShapeEqual(output_shape, correct_output_shape)
backend.clear_session()
@parameterized.parameters(*MOBILENET_V3_FOR_WEIGHTS)
def test_mobilenet_v3_load_weights(
self, mobilenet_class, alpha, minimalistic, include_top
):
mobilenet_class(
input_shape=(224, 224, 3),
weights="imagenet",
alpha=alpha,
minimalistic=minimalistic,
include_top=include_top,
)
@parameterized.parameters(*MODEL_LIST)
@test_utils.run_v2_only
def test_model_checkpoint(self, app, _):
image_data_format = _IMAGE_DATA_FORMAT.value
self.skip_if_invalid_image_data_format_for_model(app, image_data_format)
backend.set_image_data_format(image_data_format)
model = app(weights=None)
checkpoint = tf.train.Checkpoint(model=model)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint,
directory=os.path.join(self.get_temp_dir(), model.name),
max_to_keep=1,
)
checkpoint_manager.save(checkpoint_number=1)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/applications/applications_test.py/0 | {
"file_path": "tf-keras/tf_keras/applications/applications_test.py",
"repo_id": "tf-keras",
"token_count": 5321
} | 201 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ResNet v2 models for TF-Keras.
Reference:
- [Identity Mappings in Deep Residual Networks](
https://arxiv.org/abs/1603.05027) (CVPR 2016)
"""
from tf_keras.applications import imagenet_utils
from tf_keras.applications import resnet
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export(
"keras.applications.resnet_v2.ResNet50V2", "keras.applications.ResNet50V2"
)
def ResNet50V2(
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
):
"""Instantiates the ResNet50V2 architecture."""
def stack_fn(x):
x = resnet.stack2(x, 64, 3, name="conv2")
x = resnet.stack2(x, 128, 4, name="conv3")
x = resnet.stack2(x, 256, 6, name="conv4")
return resnet.stack2(x, 512, 3, stride1=1, name="conv5")
return resnet.ResNet(
stack_fn,
True,
True,
"resnet50v2",
include_top,
weights,
input_tensor,
input_shape,
pooling,
classes,
classifier_activation=classifier_activation,
)
@keras_export(
"keras.applications.resnet_v2.ResNet101V2", "keras.applications.ResNet101V2"
)
def ResNet101V2(
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
):
"""Instantiates the ResNet101V2 architecture."""
def stack_fn(x):
x = resnet.stack2(x, 64, 3, name="conv2")
x = resnet.stack2(x, 128, 4, name="conv3")
x = resnet.stack2(x, 256, 23, name="conv4")
return resnet.stack2(x, 512, 3, stride1=1, name="conv5")
return resnet.ResNet(
stack_fn,
True,
True,
"resnet101v2",
include_top,
weights,
input_tensor,
input_shape,
pooling,
classes,
classifier_activation=classifier_activation,
)
@keras_export(
"keras.applications.resnet_v2.ResNet152V2", "keras.applications.ResNet152V2"
)
def ResNet152V2(
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
):
"""Instantiates the ResNet152V2 architecture."""
def stack_fn(x):
x = resnet.stack2(x, 64, 3, name="conv2")
x = resnet.stack2(x, 128, 8, name="conv3")
x = resnet.stack2(x, 256, 36, name="conv4")
return resnet.stack2(x, 512, 3, stride1=1, name="conv5")
return resnet.ResNet(
stack_fn,
True,
True,
"resnet152v2",
include_top,
weights,
input_tensor,
input_shape,
pooling,
classes,
classifier_activation=classifier_activation,
)
@keras_export("keras.applications.resnet_v2.preprocess_input")
def preprocess_input(x, data_format=None):
return imagenet_utils.preprocess_input(
x, data_format=data_format, mode="tf"
)
@keras_export("keras.applications.resnet_v2.decode_predictions")
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
mode="",
ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TF,
error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC,
)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
DOC = """
Reference:
- [Identity Mappings in Deep Residual Networks](
https://arxiv.org/abs/1603.05027) (CVPR 2016)
For image classification use cases, see
[this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
Note: each TF-Keras Application expects a specific kind of input
preprocessing. For ResNetV2, call
`tf.keras.applications.resnet_v2.preprocess_input` on your inputs before
passing them to the model. `resnet_v2.preprocess_input` will scale input
pixels between -1 and 1.
Args:
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional TF-Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `'channels_last'` data format)
or `(3, 224, 224)` (with `'channels_first'` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
When loading pretrained weights, `classifier_activation` can only
be `None` or `"softmax"`.
Returns:
A `keras.Model` instance.
"""
setattr(ResNet50V2, "__doc__", ResNet50V2.__doc__ + DOC)
setattr(ResNet101V2, "__doc__", ResNet101V2.__doc__ + DOC)
setattr(ResNet152V2, "__doc__", ResNet152V2.__doc__ + DOC)
| tf-keras/tf_keras/applications/resnet_v2.py/0 | {
"file_path": "tf-keras/tf_keras/applications/resnet_v2.py",
"repo_id": "tf-keras",
"token_count": 2759
} | 202 |
# Description:
# Implementation of TF-Keras benchmarks.
load("@org_keras//tf_keras:tf_keras.bzl", "cuda_py_test")
package(
# copybara:uncomment default_applicable_licenses = ["//tf_keras:license"],
default_visibility = ["//visibility:public"],
licenses = ["notice"],
)
# To run CPU benchmarks:
# bazel run -c opt benchmarks_test -- --benchmarks=.
# To run GPU benchmarks:
# bazel run --config=cuda -c opt --copt="-mavx" benchmarks_test -- \
# --benchmarks=.
# To run a subset of benchmarks using --benchmarks flag.
# --benchmarks: the list of benchmarks to run. The specified value is interpreted
# as a regular expression and any benchmark whose name contains a partial match
# to the regular expression is executed.
# e.g. --benchmarks=".*lstm*." will run all lstm layer related benchmarks.
COMMON_TAGS = [
"no_pip", # b/161253163
"no_windows", # b/160628318
]
cuda_py_test(
name = "bidirectional_lstm_benchmark_test",
srcs = ["bidirectional_lstm_benchmark_test.py"],
python_version = "PY3",
tags = COMMON_TAGS,
deps = [
"//:expect_tensorflow_installed",
"//tf_keras/api:tf_keras_api",
"//tf_keras/benchmarks:benchmark_util",
"//tf_keras/benchmarks:profiler_lib",
],
)
cuda_py_test(
name = "text_classification_transformer_benchmark_test",
srcs = ["text_classification_transformer_benchmark_test.py"],
python_version = "PY3",
tags = COMMON_TAGS,
deps = [
"//:expect_tensorflow_installed",
"//tf_keras/api:tf_keras_api",
"//tf_keras/benchmarks:benchmark_util",
"//tf_keras/benchmarks:profiler_lib",
],
)
cuda_py_test(
name = "antirectifier_benchmark_test",
srcs = ["antirectifier_benchmark_test.py"],
python_version = "PY3",
tags = COMMON_TAGS,
deps = [
"//:expect_tensorflow_installed",
"//tf_keras/api:tf_keras_api",
"//tf_keras/benchmarks:benchmark_util",
"//tf_keras/benchmarks:profiler_lib",
],
)
cuda_py_test(
name = "mnist_conv_benchmark_test",
srcs = ["mnist_conv_benchmark_test.py"],
python_version = "PY3",
tags = COMMON_TAGS,
deps = [
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras/api:tf_keras_api",
"//tf_keras/benchmarks:benchmark_util",
"//tf_keras/benchmarks:profiler_lib",
],
)
cuda_py_test(
name = "mnist_hierarchical_rnn_benchmark_test",
srcs = ["mnist_hierarchical_rnn_benchmark_test.py"],
python_version = "PY3",
tags = COMMON_TAGS,
deps = [
"//:expect_tensorflow_installed",
"//tf_keras/api:tf_keras_api",
"//tf_keras/benchmarks:benchmark_util",
"//tf_keras/benchmarks:profiler_lib",
],
)
cuda_py_test(
name = "mnist_irnn_benchmark_test",
srcs = ["mnist_irnn_benchmark_test.py"],
python_version = "PY3",
tags = COMMON_TAGS,
deps = [
"//:expect_tensorflow_installed",
"//tf_keras/api:tf_keras_api",
"//tf_keras/benchmarks:benchmark_util",
"//tf_keras/benchmarks:profiler_lib",
],
)
cuda_py_test(
name = "reuters_mlp_benchmark_test",
srcs = ["reuters_mlp_benchmark_test.py"],
python_version = "PY3",
tags = COMMON_TAGS,
deps = [
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras/api:tf_keras_api",
"//tf_keras/benchmarks:benchmark_util",
"//tf_keras/benchmarks:profiler_lib",
],
)
cuda_py_test(
name = "cifar10_cnn_benchmark_test",
srcs = ["cifar10_cnn_benchmark_test.py"],
python_version = "PY3",
tags = COMMON_TAGS,
deps = [
"//:expect_tensorflow_installed",
"//tf_keras/api:tf_keras_api",
"//tf_keras/benchmarks:benchmark_util",
"//tf_keras/benchmarks:profiler_lib",
],
)
cuda_py_test(
name = "mnist_conv_custom_training_benchmark_test",
srcs = ["mnist_conv_custom_training_benchmark_test.py"],
python_version = "PY3",
tags = COMMON_TAGS,
deps = [
"//:expect_tensorflow_installed",
"//tf_keras/api:tf_keras_api",
"//tf_keras/benchmarks:benchmark_util",
"//tf_keras/benchmarks:distribution_util",
"//tf_keras/benchmarks:profiler_lib",
],
)
| tf-keras/tf_keras/benchmarks/keras_examples_benchmarks/BUILD/0 | {
"file_path": "tf-keras/tf_keras/benchmarks/keras_examples_benchmarks/BUILD",
"repo_id": "tf-keras",
"token_count": 1997
} | 203 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Benchmarks on TF-Keras components with different TF-Keras model types."""
import time
import numpy as np
import tensorflow.compat.v2 as tf
import tf_keras as keras
# isort: off
from tensorflow.python.eager import context
from tensorflow.python.eager.context import get_executor
class SubclassedKerasModel(keras.Model):
def __init__(self, initializer="ones"):
super().__init__()
self.layer_a = keras.layers.Dense(
64, kernel_initializer=initializer, bias_initializer="zeros"
)
self.layer_b = keras.layers.Dense(
128, kernel_initializer=initializer, bias_initializer="zeros"
)
self.layer_c = keras.layers.Dense(
256, kernel_initializer=initializer, bias_initializer="zeros"
)
self.layer_d = keras.layers.Dense(
256, kernel_initializer=initializer, bias_initializer="zeros"
)
self.layer_e = keras.layers.Dense(
10, kernel_initializer=initializer, bias_initializer="zeros"
)
def call(self, x):
x = self.layer_a(x)
x = self.layer_b(x)
x = self.layer_c(x)
x = self.layer_d(x)
return self.layer_e(x)
def make_keras_model(initializer="ones"):
model_input = keras.Input(shape=(10,))
x = keras.layers.Dense(
64, kernel_initializer=initializer, bias_initializer="zeros"
)(model_input)
x = keras.layers.Dense(
128, kernel_initializer=initializer, bias_initializer="zeros"
)(x)
x = keras.layers.Dense(
256, kernel_initializer=initializer, bias_initializer="zeros"
)(x)
x = keras.layers.Dense(
256, kernel_initializer=initializer, bias_initializer="zeros"
)(x)
x = keras.layers.Dense(
10, kernel_initializer=initializer, bias_initializer="zeros"
)(x)
return keras.Model(inputs=model_input, outputs=x)
def make_sequential_keras_model(initializer="ones"):
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
64,
kernel_initializer=initializer,
bias_initializer="zeros",
input_shape=(10,),
)
)
model.add(
keras.layers.Dense(
128, kernel_initializer=initializer, bias_initializer="zeros"
)
)
model.add(
keras.layers.Dense(
256, kernel_initializer=initializer, bias_initializer="zeros"
)
)
model.add(
keras.layers.Dense(
256, kernel_initializer=initializer, bias_initializer="zeros"
)
)
model.add(
keras.layers.Dense(
10, kernel_initializer=initializer, bias_initializer="zeros"
)
)
return model
def run_benchmark(func, num_iters, execution_mode=None):
with context.execution_mode(execution_mode):
# call func to warm up
func()
if execution_mode == context.ASYNC:
get_executor().wait()
start = time.time()
for _ in range(num_iters):
func()
if execution_mode == context.ASYNC:
get_executor().wait()
end = time.time()
return end - start
class KerasComponentsBenchmarks(tf.test.Benchmark):
def _run(self, func, num_iters, execution_mode=None):
total_time = run_benchmark(func, num_iters, execution_mode)
mean_us = total_time * 1e6 / num_iters
self.report_benchmark(
iters=num_iters,
wall_time=mean_us,
metrics=[
{
"name": "exp_per_sec",
"value": float(f"{num_iters / total_time:.3f}"),
},
{
"name": "us_per_exp",
"value": float(f"{total_time * 1000000.0 / num_iters:.3f}"),
},
],
)
def benchmark_keras_model_subclassed(self):
model = SubclassedKerasModel()
data = tf.random.uniform((10, 10))
func = lambda: model(data)
# First call is more expensive (creates variables etc.), discount that.
func()
# The whole point of this test is to contrast subclassing with
# the functional style of keras model building, so validate that
# the models are equivalent.
assert np.equal(func(), make_keras_model()(data)).all()
self._run(func, 30000)
def benchmark_keras_model_functional(self):
model = make_keras_model()
data = tf.random.uniform((10, 10))
func = lambda: model(data)
# Symmetry with benchmark_keras_model_subclassed
func()
assert np.equal(func(), SubclassedKerasModel()(data)).all()
self._run(func, 30000)
def benchmark_keras_model_sequential(self):
model = make_sequential_keras_model()
data = tf.random.uniform((10, 10))
func = lambda: model(data)
# Symmetry with benchmark_keras_model_functional
func()
assert np.equal(func(), make_keras_model()(data)).all()
self._run(func, 30000)
def _benchmark_keras_model_fit(self, model, run_eagerly=False):
data = tf.random.uniform((10, 10), minval=-1, maxval=1)
labels = tf.random.uniform((10, 10), minval=-1, maxval=1)
dataset = tf.data.Dataset.from_tensors((data, labels)).repeat()
model.compile("sgd", loss="mse", run_eagerly=run_eagerly)
func = lambda: model.fit(
dataset, epochs=1, steps_per_epoch=1000, verbose=0
)
# First call is more expensive (creates variables etc.), discount that.
model.fit(dataset, epochs=1, steps_per_epoch=1, verbose=0)
self._run(func, 1)
def _benchmark_keras_model_evaluate(self, model, run_eagerly=False):
data = tf.random.uniform((10, 10), minval=-1, maxval=1)
labels = tf.random.uniform((10, 10), minval=-1, maxval=1)
dataset = tf.data.Dataset.from_tensors((data, labels)).repeat()
model.compile("sgd", loss="mse", run_eagerly=run_eagerly)
func = lambda: model.evaluate(dataset, steps=1000, verbose=0)
# First call is more expensive (creates variables etc.), discount that.
model.evaluate(dataset, steps=1, verbose=0)
self._run(func, 1)
def _benchmark_keras_model_predict(self, model, run_eagerly=False):
data = tf.random.uniform((10, 10), minval=-1, maxval=1)
dataset = tf.data.Dataset.from_tensors(data).repeat()
model.compile("sgd", loss="mse", run_eagerly=run_eagerly)
func = lambda: model.predict(dataset, steps=1000, verbose=0)
# First call is more expensive (creates variables etc.), discount that.
model.predict(dataset, steps=1, verbose=0)
self._run(func, 1)
def benchmark_keras_model_subclassed_fit(self):
model = SubclassedKerasModel(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model)
def benchmark_keras_model_subclassed_fit_graph_mode(self):
with context.graph_mode():
model = SubclassedKerasModel(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model)
def benchmark_keras_model_subclassed_fit_run_model_eagerly(self):
model = SubclassedKerasModel(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model, run_eagerly=True)
def benchmark_keras_model_functional_fit(self):
model = make_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model)
def benchmark_keras_model_functional_fit_graph_mode(self):
with context.graph_mode():
model = make_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model)
def benchmark_keras_model_functional_fit_graph_mode_with_profiler(self):
tf.profiler.experimental.start("")
with context.graph_mode():
model = make_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model)
tf.profiler.experimental.stop(save=False)
def benchmark_keras_model_functional_fit_run_model_eagerly(self):
model = make_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model, run_eagerly=True)
def benchmark_keras_model_functional_fit_run_model_eagerly_with_profiler(
self,
):
tf.profiler.experimental.start("")
model = make_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model, run_eagerly=True)
tf.profiler.experimental.stop(save=False)
def benchmark_keras_model_sequential_fit(self):
model = make_sequential_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model)
def benchmark_keras_model_sequential_fit_graph_mode(self):
with context.graph_mode():
model = make_sequential_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model)
def benchmark_keras_model_sequential_fit_run_model_eagerly(self):
model = make_sequential_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model, run_eagerly=True)
def benchmark_keras_model_subclassed_evaluate(self):
model = SubclassedKerasModel(initializer="glorot_uniform")
self._benchmark_keras_model_evaluate(model)
def benchmark_keras_model_subclassed_evaluate_run_model_eagerly(self):
model = SubclassedKerasModel(initializer="glorot_uniform")
self._benchmark_keras_model_evaluate(model, run_eagerly=True)
def benchmark_keras_model_functional_evaluate(self):
model = make_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_evaluate(model)
def benchmark_keras_model_functional_evaluate_run_model_eagerly(self):
model = make_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_evaluate(model, run_eagerly=True)
def benchmark_keras_model_sequential_evaluate(self):
model = make_sequential_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_evaluate(model)
def benchmark_keras_model_sequential_evaluate_run_model_eagerly(self):
model = make_sequential_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_evaluate(model, run_eagerly=True)
def benchmark_keras_model_subclassed_predict(self):
model = SubclassedKerasModel(initializer="glorot_uniform")
self._benchmark_keras_model_predict(model)
def benchmark_keras_model_subclassed_predict_run_model_eagerly(self):
model = SubclassedKerasModel(initializer="glorot_uniform")
self._benchmark_keras_model_predict(model, run_eagerly=True)
def benchmark_keras_model_functional_predict(self):
model = make_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_predict(model)
def benchmark_keras_model_functional_predict_run_model_eagerly(self):
model = make_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_predict(model, run_eagerly=True)
def benchmark_keras_model_sequential_predict(self):
model = make_sequential_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_predict(model)
def benchmark_keras_model_sequential_predict_run_model_eagerly(self):
model = make_sequential_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_predict(model, run_eagerly=True)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/benchmarks/model_components_benchmarks_test.py/0 | {
"file_path": "tf-keras/tf_keras/benchmarks/model_components_benchmarks_test.py",
"repo_id": "tf-keras",
"token_count": 5294
} | 204 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TF-Keras callbacks."""
import os
import shutil
import tempfile
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
from tf_keras import callbacks
from tf_keras import callbacks_v1
from tf_keras import layers
from tf_keras.engine import input_layer
from tf_keras.engine import sequential
from tf_keras.engine import training
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
from tf_keras.utils import np_utils
TRAIN_SAMPLES = 10
TEST_SAMPLES = 10
NUM_CLASSES = 2
INPUT_DIM = 3
NUM_HIDDEN = 5
BATCH_SIZE = 5
class TestTensorBoardV1(tf.test.TestCase, parameterized.TestCase):
def test_TensorBoard(self):
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
(x_train, y_train), (x_test, y_test) = test_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES,
)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def data_generator(train):
if train:
max_batch_index = len(x_train) // BATCH_SIZE
else:
max_batch_index = len(x_test) // BATCH_SIZE
i = 0
while 1:
if train:
yield (
x_train[i * BATCH_SIZE : (i + 1) * BATCH_SIZE],
y_train[i * BATCH_SIZE : (i + 1) * BATCH_SIZE],
)
else:
yield (
x_test[i * BATCH_SIZE : (i + 1) * BATCH_SIZE],
y_test[i * BATCH_SIZE : (i + 1) * BATCH_SIZE],
)
i += 1
i %= max_batch_index
# case: Sequential
with tf.Graph().as_default(), self.cached_session():
model = sequential.Sequential()
model.add(
layers.Dense(NUM_HIDDEN, input_dim=INPUT_DIM, activation="relu")
)
# non_trainable_weights: moving_variance, moving_mean
model.add(layers.BatchNormalization())
model.add(layers.Dense(NUM_CLASSES, activation="softmax"))
model.compile(
loss="categorical_crossentropy",
optimizer="sgd",
metrics=["accuracy"],
)
tsb = callbacks_v1.TensorBoard(
log_dir=temp_dir,
histogram_freq=1,
write_images=True,
write_grads=True,
batch_size=5,
)
cbks = [tsb]
# fit with validation data
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=3,
verbose=0,
)
# fit with validation data and accuracy
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0,
)
# fit generator with validation data
model.fit_generator(
data_generator(True),
len(x_train),
epochs=2,
validation_data=(x_test, y_test),
callbacks=cbks,
verbose=0,
)
# fit generator without validation data
# histogram_freq must be zero
tsb.histogram_freq = 0
model.fit_generator(
data_generator(True),
len(x_train),
epochs=2,
callbacks=cbks,
verbose=0,
)
# fit generator with validation data and accuracy
tsb.histogram_freq = 1
model.fit_generator(
data_generator(True),
len(x_train),
epochs=2,
validation_data=(x_test, y_test),
callbacks=cbks,
verbose=0,
)
# fit generator without validation data and accuracy
tsb.histogram_freq = 0
model.fit_generator(
data_generator(True), len(x_train), epochs=2, callbacks=cbks
)
assert os.path.exists(temp_dir)
def test_TensorBoard_multi_input_output(self):
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
with tf.Graph().as_default(), self.cached_session():
filepath = os.path.join(tmpdir, "logs")
(x_train, y_train), (x_test, y_test) = test_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES,
)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def data_generator(train):
if train:
max_batch_index = len(x_train) // BATCH_SIZE
else:
max_batch_index = len(x_test) // BATCH_SIZE
i = 0
while 1:
if train:
# simulate multi-input/output models
yield (
[x_train[i * BATCH_SIZE : (i + 1) * BATCH_SIZE]]
* 2,
[y_train[i * BATCH_SIZE : (i + 1) * BATCH_SIZE]]
* 2,
)
else:
yield (
[x_test[i * BATCH_SIZE : (i + 1) * BATCH_SIZE]] * 2,
[y_test[i * BATCH_SIZE : (i + 1) * BATCH_SIZE]] * 2,
)
i += 1
i %= max_batch_index
inp1 = input_layer.Input((INPUT_DIM,))
inp2 = input_layer.Input((INPUT_DIM,))
inp = layers.add([inp1, inp2])
hidden = layers.Dense(2, activation="relu")(inp)
hidden = layers.Dropout(0.1)(hidden)
output1 = layers.Dense(NUM_CLASSES, activation="softmax")(hidden)
output2 = layers.Dense(NUM_CLASSES, activation="softmax")(hidden)
model = training.Model([inp1, inp2], [output1, output2])
model.compile(
loss="categorical_crossentropy",
optimizer="sgd",
metrics=["accuracy"],
)
# we must generate new callbacks for each test, as they aren't
# stateless
def callbacks_factory(histogram_freq):
return [
callbacks_v1.TensorBoard(
log_dir=filepath,
histogram_freq=histogram_freq,
write_images=True,
write_grads=True,
batch_size=5,
)
]
# fit without validation data
model.fit(
[x_train] * 2,
[y_train] * 2,
batch_size=BATCH_SIZE,
callbacks=callbacks_factory(histogram_freq=0),
epochs=3,
)
# fit with validation data and accuracy
model.fit(
[x_train] * 2,
[y_train] * 2,
batch_size=BATCH_SIZE,
validation_data=([x_test] * 2, [y_test] * 2),
callbacks=callbacks_factory(histogram_freq=1),
epochs=2,
)
# fit generator without validation data
model.fit_generator(
data_generator(True),
len(x_train),
epochs=2,
callbacks=callbacks_factory(histogram_freq=0),
)
# fit generator with validation data and accuracy
model.fit_generator(
data_generator(True),
len(x_train),
epochs=2,
validation_data=([x_test] * 2, [y_test] * 2),
callbacks=callbacks_factory(histogram_freq=1),
)
assert os.path.isdir(filepath)
def test_Tensorboard_histogram_summaries_in_test_function(self):
class FileWriterStub:
def __init__(self, logdir, graph=None):
self.logdir = logdir
self.graph = graph
self.steps_seen = []
def add_summary(self, summary, global_step):
summary_obj = tf.compat.v1.Summary()
# ensure a valid Summary proto is being sent
if isinstance(summary, bytes):
summary_obj.ParseFromString(summary)
else:
assert isinstance(summary, tf.compat.v1.Summary)
summary_obj = summary
# keep track of steps seen for the merged_summary op,
# which contains the histogram summaries
if len(summary_obj.value) > 1:
self.steps_seen.append(global_step)
def flush(self):
pass
def close(self):
pass
def _init_writer(obj, _):
obj.writer = FileWriterStub(obj.log_dir)
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
(x_train, y_train), (x_test, y_test) = test_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES,
)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
with tf.Graph().as_default(), self.cached_session():
model = sequential.Sequential()
model.add(
layers.Dense(NUM_HIDDEN, input_dim=INPUT_DIM, activation="relu")
)
# non_trainable_weights: moving_variance, moving_mean
model.add(layers.BatchNormalization())
model.add(layers.Dense(NUM_CLASSES, activation="softmax"))
model.compile(
loss="categorical_crossentropy",
optimizer="sgd",
metrics=["accuracy"],
)
callbacks_v1.TensorBoard._init_writer = _init_writer
tsb = callbacks_v1.TensorBoard(
log_dir=tmpdir,
histogram_freq=1,
write_images=True,
write_grads=True,
batch_size=5,
)
cbks = [tsb]
# fit with validation data
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=3,
verbose=0,
)
self.assertAllEqual(tsb.writer.steps_seen, [0, 1, 2, 3, 4, 5])
def test_Tensorboard_histogram_summaries_with_generator(self):
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
def generator():
x = np.random.randn(10, 100).astype(np.float32)
y = np.random.randn(10, 10).astype(np.float32)
while True:
yield x, y
with tf.Graph().as_default(), self.cached_session():
model = test_utils.get_small_sequential_mlp(
num_hidden=10, num_classes=10, input_dim=100
)
model.compile(
loss="categorical_crossentropy",
optimizer="sgd",
metrics=["accuracy"],
)
tsb = callbacks_v1.TensorBoard(
log_dir=tmpdir,
histogram_freq=1,
write_images=True,
write_grads=True,
batch_size=5,
)
cbks = [tsb]
# fit with validation generator
model.fit_generator(
generator(),
steps_per_epoch=2,
epochs=2,
validation_data=generator(),
validation_steps=2,
callbacks=cbks,
verbose=0,
)
with self.assertRaises(ValueError):
# fit with validation generator but no
# validation_steps
model.fit_generator(
generator(),
steps_per_epoch=2,
epochs=2,
validation_data=generator(),
callbacks=cbks,
verbose=0,
)
self.assertTrue(os.path.exists(tmpdir))
def test_TensorBoard_with_ReduceLROnPlateau(self):
with self.cached_session():
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
(x_train, y_train), (x_test, y_test) = test_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES,
)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = test_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN,
num_classes=NUM_CLASSES,
input_dim=INPUT_DIM,
)
model.compile(
loss="binary_crossentropy",
optimizer="sgd",
metrics=["accuracy"],
)
cbks = [
callbacks.ReduceLROnPlateau(
monitor="val_loss", factor=0.5, patience=4, verbose=1
),
callbacks_v1.TensorBoard(log_dir=temp_dir),
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0,
)
assert os.path.exists(temp_dir)
def test_Tensorboard_batch_logging(self):
class FileWriterStub:
def __init__(self, logdir, graph=None):
self.logdir = logdir
self.graph = graph
self.batches_logged = []
self.summary_values = []
self.summary_tags = []
def add_summary(self, summary, step):
self.summary_values.append(summary.value[0].simple_value)
self.summary_tags.append(summary.value[0].tag)
self.batches_logged.append(step)
def flush(self):
pass
def close(self):
pass
with tf.Graph().as_default():
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
tb_cbk = callbacks_v1.TensorBoard(temp_dir, update_freq="batch")
tb_cbk.writer = FileWriterStub(temp_dir)
for batch in range(5):
tb_cbk.on_batch_end(batch, {"acc": batch})
self.assertEqual(tb_cbk.writer.batches_logged, [0, 1, 2, 3, 4])
self.assertEqual(
tb_cbk.writer.summary_values, [0.0, 1.0, 2.0, 3.0, 4.0]
)
self.assertEqual(tb_cbk.writer.summary_tags, ["batch_acc"] * 5)
def test_Tensorboard_epoch_and_batch_logging(self):
class FileWriterStub:
def __init__(self, logdir, graph=None):
self.logdir = logdir
self.graph = graph
def add_summary(self, summary, step):
if "batch_" in summary.value[0].tag:
self.batch_summary = (step, summary)
elif "epoch_" in summary.value[0].tag:
self.epoch_summary = (step, summary)
def flush(self):
pass
def close(self):
pass
with tf.Graph().as_default():
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
tb_cbk = callbacks_v1.TensorBoard(temp_dir, update_freq="batch")
tb_cbk.writer = FileWriterStub(temp_dir)
tb_cbk.on_batch_end(0, {"acc": 5.0})
tb_cbk.on_train_end()
batch_step, batch_summary = tb_cbk.writer.batch_summary
self.assertEqual(batch_step, 0)
self.assertEqual(batch_summary.value[0].simple_value, 5.0)
tb_cbk = callbacks_v1.TensorBoard(temp_dir, update_freq="epoch")
tb_cbk.writer = FileWriterStub(temp_dir)
tb_cbk.on_epoch_end(0, {"acc": 10.0})
tb_cbk.on_train_end()
epoch_step, epoch_summary = tb_cbk.writer.epoch_summary
self.assertEqual(epoch_step, 0)
self.assertEqual(epoch_summary.value[0].simple_value, 10.0)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_Tensorboard_eager(self):
temp_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
(x_train, y_train), (x_test, y_test) = test_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES,
)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = test_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM
)
model.compile(
loss="binary_crossentropy",
optimizer=tf.compat.v1.train.AdamOptimizer(0.01),
metrics=["accuracy"],
)
cbks = [callbacks_v1.TensorBoard(log_dir=temp_dir)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0,
)
self.assertTrue(os.path.exists(temp_dir))
def test_TensorBoard_update_freq(self):
class FileWriterStub:
def __init__(self, logdir, graph=None):
self.logdir = logdir
self.graph = graph
self.batch_summaries = []
self.epoch_summaries = []
def add_summary(self, summary, step):
if "batch_" in summary.value[0].tag:
self.batch_summaries.append((step, summary))
elif "epoch_" in summary.value[0].tag:
self.epoch_summaries.append((step, summary))
def flush(self):
pass
def close(self):
pass
with tf.Graph().as_default():
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
# Epoch mode
tb_cbk = callbacks_v1.TensorBoard(temp_dir, update_freq="epoch")
tb_cbk.writer = FileWriterStub(temp_dir)
tb_cbk.on_batch_end(0, {"acc": 5.0, "size": 1})
self.assertEqual(tb_cbk.writer.batch_summaries, [])
tb_cbk.on_epoch_end(0, {"acc": 10.0, "size": 1})
self.assertLen(tb_cbk.writer.epoch_summaries, 1)
tb_cbk.on_train_end()
# Batch mode
tb_cbk = callbacks_v1.TensorBoard(temp_dir, update_freq="batch")
tb_cbk.writer = FileWriterStub(temp_dir)
tb_cbk.on_batch_end(0, {"acc": 5.0, "size": 1})
self.assertLen(tb_cbk.writer.batch_summaries, 1)
tb_cbk.on_batch_end(0, {"acc": 5.0, "size": 1})
self.assertLen(tb_cbk.writer.batch_summaries, 2)
self.assertFalse(tb_cbk.writer.epoch_summaries)
tb_cbk.on_train_end()
# Integer mode
tb_cbk = callbacks_v1.TensorBoard(temp_dir, update_freq=20)
tb_cbk.writer = FileWriterStub(temp_dir)
tb_cbk.on_batch_end(0, {"acc": 5.0, "size": 10})
self.assertFalse(tb_cbk.writer.batch_summaries)
tb_cbk.on_batch_end(0, {"acc": 5.0, "size": 10})
self.assertLen(tb_cbk.writer.batch_summaries, 1)
tb_cbk.on_batch_end(0, {"acc": 5.0, "size": 10})
self.assertLen(tb_cbk.writer.batch_summaries, 1)
tb_cbk.on_batch_end(0, {"acc": 5.0, "size": 10})
self.assertLen(tb_cbk.writer.batch_summaries, 2)
tb_cbk.on_batch_end(0, {"acc": 10.0, "size": 10})
self.assertLen(tb_cbk.writer.batch_summaries, 2)
self.assertFalse(tb_cbk.writer.epoch_summaries)
tb_cbk.on_train_end()
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/callbacks_v1_test.py/0 | {
"file_path": "tf-keras/tf_keras/callbacks_v1_test.py",
"repo_id": "tf-keras",
"token_count": 12472
} | 205 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
from tf_keras.optimizers.legacy import adam
class TrainingCheckpointTests(tf.test.TestCase, parameterized.TestCase):
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.mirrored_strategy_with_one_cpu, # noqa: E501
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu, # noqa: E501
tf.__internal__.distribute.combinations.tpu_strategy, # noqa: E501
tf.__internal__.distribute.combinations.tpu_strategy_packed_var, # noqa: E501
tf.__internal__.distribute.combinations.central_storage_strategy_with_two_gpus, # noqa: E501
],
mode=["eager"],
)
)
def testCheckpointRestoreOptimizerSlots(self, distribution):
def state():
with distribution.scope():
v = tf.Variable(tf.random.normal([]))
opt = adam.Adam(0.001)
@tf.function
def step():
def f():
with tf.GradientTape() as tape:
loss = v + v
gradients = tape.gradient(loss, [v])
opt.apply_gradients(zip(gradients, [v]))
distribution.run(f)
return v, opt, step
def checkpoint():
v, opt, step = state()
step()
# Save random weights into checkpoint.
checkpoint = tf.train.Checkpoint(v=v, opt=opt)
prefix = os.path.join(self.get_temp_dir(), "ckpt")
with self.test_session():
save_path = checkpoint.save(prefix)
return save_path
save_path = checkpoint()
v, opt, step = state()
checkpoint = tf.train.Checkpoint(v=v, opt=opt)
# Restore from the checkpoint inside a distribution.scope().
with self.test_session():
with distribution.scope():
checkpoint.restore(save_path)
step()
slot = opt.get_slot(v, "m")
self.assertEqual(v._distribute_strategy, slot._distribute_strategy)
v, opt, step = state()
checkpoint = tf.train.Checkpoint(v=v, opt=opt)
# Restore from the checkpoint outside a distribution.scope().
with self.test_session():
with self.assertRaisesRegex(
ValueError, "optimizer slot variable under the scope"
):
checkpoint.restore(save_path)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.mirrored_strategy_with_one_cpu, # noqa: E501
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu, # noqa: E501
tf.__internal__.distribute.combinations.cloud_tpu_strategy, # noqa: E501
tf.__internal__.distribute.combinations.tpu_strategy, # noqa: E501
tf.__internal__.distribute.combinations.tpu_strategy_packed_var, # noqa: E501
tf.__internal__.distribute.combinations.central_storage_strategy_with_two_gpus, # noqa: E501
],
mode=["eager"],
)
)
def testCheckpointSaveRestoreIoDevice(self, distribution):
def state():
with distribution.scope():
v = tf.Variable(tf.random.normal([]))
return v
ckpt_options = tf.train.CheckpointOptions(
experimental_io_device="/job:localhost"
)
def checkpoint():
v = state()
# Save random weights into checkpoint.
checkpoint = tf.train.Checkpoint(v=v)
prefix = os.path.join(self.get_temp_dir(), "ckpt")
with self.test_session():
save_path = checkpoint.save(prefix, options=ckpt_options)
return save_path
save_path = checkpoint()
v = state()
checkpoint = tf.train.Checkpoint(v=v)
# Restore from the checkpoint inside a distribution.scope().
# Check that restore works without error.
with self.test_session():
with distribution.scope():
checkpoint.restore(save_path, options=ckpt_options)
if __name__ == "__main__":
tf.compat.v1.enable_eager_execution()
tf.test.main()
| tf-keras/tf_keras/distribute/checkpointing_test.py/0 | {
"file_path": "tf-keras/tf_keras/distribute/checkpointing_test.py",
"repo_id": "tf-keras",
"token_count": 2297
} | 206 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Correctness tests for tf.keras using DistributionStrategy."""
import functools
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import tf_keras as keras
from tf_keras.distribute import distributed_training_utils
from tf_keras.distribute.strategy_combinations import all_strategies
from tf_keras.distribute.strategy_combinations import (
multi_worker_mirrored_strategies,
)
from tf_keras.distribute.strategy_combinations import strategies_minus_tpu
from tf_keras.mixed_precision import policy
from tf_keras.utils import data_utils
_RANDOM_SEED = 1337
_EVAL_STEPS = 20
_GLOBAL_BATCH_SIZE = 64
# Note: Please make sure the tests in this file are also covered in
# keras_backward_compat_test for features that are supported with both APIs.
def eager_mode_test_configuration():
return tf.__internal__.test.combinations.combine(
mode="eager", use_numpy=[True, False], use_validation_data=[True, False]
)
def graph_mode_test_configuration():
return tf.__internal__.test.combinations.combine(
mode="graph", use_numpy=[True, False], use_validation_data=[True, False]
)
def all_strategy_and_input_config_combinations():
return tf.__internal__.test.combinations.times(
tf.__internal__.test.combinations.combine(distribution=all_strategies),
eager_mode_test_configuration() + graph_mode_test_configuration(),
)
def all_strategy_and_input_config_combinations_eager():
return tf.__internal__.test.combinations.times(
tf.__internal__.test.combinations.combine(distribution=all_strategies),
eager_mode_test_configuration(),
)
def strategy_minus_tpu_and_input_config_combinations_eager():
return tf.__internal__.test.combinations.times(
tf.__internal__.test.combinations.combine(
distribution=strategies_minus_tpu
),
eager_mode_test_configuration(),
)
def strategies_for_embedding_models():
"""Returns distribution strategies to test for embedding models.
Since embedding models take longer to train, we disregard DefaultStrategy
in order to prevent testing timeouts.
"""
return [
s
for s in all_strategies
if s.required_tpu
or s.required_gpus
or s is tf.__internal__.distribute.combinations.one_device_strategy
]
def test_combinations_for_embedding_model():
# TODO(sourabhbajaj): Enable tests for eager mode
eager_mode_strategies = [
s for s in strategies_for_embedding_models() if not s.required_tpu
]
return tf.__internal__.test.combinations.times(
tf.__internal__.test.combinations.combine(
distribution=strategies_for_embedding_models()
),
(graph_mode_test_configuration()),
) + tf.__internal__.test.combinations.times(
tf.__internal__.test.combinations.combine(
distribution=eager_mode_strategies
),
(eager_mode_test_configuration()),
)
def test_combinations_with_tpu_strategies_graph():
tpu_strategies = [
tf.__internal__.distribute.combinations.tpu_strategy,
]
return tf.__internal__.test.combinations.times(
tf.__internal__.test.combinations.combine(distribution=tpu_strategies),
graph_mode_test_configuration(),
)
def multi_worker_mirrored_eager():
return tf.__internal__.test.combinations.times(
tf.__internal__.test.combinations.combine(
distribution=multi_worker_mirrored_strategies
),
eager_mode_test_configuration(),
)
def multi_worker_mirrored_eager_and_graph():
return tf.__internal__.test.combinations.times(
tf.__internal__.test.combinations.combine(
distribution=multi_worker_mirrored_strategies
),
eager_mode_test_configuration() + graph_mode_test_configuration(),
)
class MaybeDistributionScope:
"""Provides a context allowing no distribution strategy."""
def __init__(self, distribution):
self._distribution = distribution
self._scope = None
def __enter__(self):
if self._distribution:
self._scope = self._distribution.scope()
self._scope.__enter__()
def __exit__(self, exc_type, value, traceback):
if self._distribution:
self._scope.__exit__(exc_type, value, traceback)
self._scope = None
def batch_wrapper(dataset, batch_size, repeat=None):
if repeat:
dataset = dataset.repeat(repeat)
return dataset.batch(batch_size)
def get_batch_size(global_batch_size, distribution):
batch_size = global_batch_size
# TODO(b/118776054): Use global batch size for Keras/DS support.
use_per_core_batch_size = (
distribution
and not distributed_training_utils.global_batch_size_supported(
distribution
)
)
if use_per_core_batch_size:
batch_size //= distribution.num_replicas_in_sync
return batch_size
def get_data_size(data):
"""Gets the size of data in list, tuple, dict, or a numpy array."""
assert isinstance(data, (np.ndarray, list, dict, tuple))
if isinstance(data, np.ndarray):
return len(data)
if isinstance(data, (list, tuple)):
return len(data[0])
return len(data.values())
def get_shapes(data):
shapes = None
if all(hasattr(x, "shape") for x in tf.nest.flatten(data)):
shapes = tf.nest.map_structure(lambda x: x.shape, data)
return shapes
def get_correctness_test_inputs(
use_numpy,
use_validation_data,
with_distribution,
x_train,
y_train,
x_eval,
y_eval,
x_predict,
training_epochs,
):
"""Generates inputs for correctness check when enable TF-Keras with DS."""
global_batch_size = _GLOBAL_BATCH_SIZE
batch_size = get_batch_size(global_batch_size, with_distribution)
if use_numpy:
training_inputs = {
"batch_size": batch_size,
"x": x_train,
"y": y_train,
"epochs": training_epochs,
"shuffle": False,
}
if use_validation_data:
eval_inputs = None
training_inputs["validation_data"] = (x_eval, y_eval)
else:
eval_inputs = {
"batch_size": batch_size,
"x": x_eval,
"y": y_eval,
}
predict_inputs = {"x": x_predict}
else:
training_data_size = get_data_size(x_train)
# For dataset inputs, we do not pass batch_size to
# keras.fit/evaluate/predict. The batch size is part of the dataset.
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
x = batch_wrapper(train_dataset, batch_size, repeat=training_epochs)
steps_per_epoch = int(
np.ceil(1.0 * training_data_size / global_batch_size)
)
training_inputs = {
"batch_size": None,
"x": x,
"y": None,
"epochs": training_epochs,
"shuffle": False,
"steps_per_epoch": steps_per_epoch,
}
if use_validation_data:
eval_inputs = None # Remove the eval_inputs
eval_dataset = tf.data.Dataset.from_tensor_slices((x_eval, y_eval))
x = batch_wrapper(eval_dataset, batch_size)
training_inputs["validation_data"] = x
training_inputs["validation_steps"] = 5
else:
eval_dataset = tf.data.Dataset.from_tensor_slices((x_eval, y_eval))
x = batch_wrapper(eval_dataset, batch_size)
eval_steps = int(
np.ceil(1.0 * get_data_size(x_eval) / global_batch_size)
)
eval_inputs = {
"batch_size": None,
"x": x,
"y": None,
"steps": eval_steps,
}
predict_batch_size = get_batch_size(
get_data_size(x_predict), with_distribution
)
predict_dataset = tf.data.Dataset.from_tensor_slices(x_predict)
predict_dataset = batch_wrapper(predict_dataset, predict_batch_size)
predict_inputs = {
"steps": 1,
"x": predict_dataset,
}
return training_inputs, eval_inputs, predict_inputs
def fit_eval_and_predict(
initial_weights,
input_fn,
model_fn,
distribution=None,
is_stateful_model=False,
):
"""Generates results for fit/predict/evaluate for given model."""
training_inputs, eval_inputs, predict_inputs = input_fn()
model = model_fn(
initial_weights=initial_weights,
distribution=distribution,
input_shapes=get_shapes(training_inputs["x"]),
)
result = {}
result["training_history_1"] = model.fit(**training_inputs).history
if eval_inputs is not None:
result["eval_result_1"] = model.evaluate(**eval_inputs)
result["weights_1"] = model.get_weights()
if predict_inputs is not None:
# Check correctness of the result of predict() invoked
# multiple times -- as for stateful models, result of
# predict may differ for each batch.
predict_length = 1
if is_stateful_model:
predict_length = 3
for i in range(predict_length):
result_key = f"predict_result_{i}"
result[result_key] = model.predict(**predict_inputs)
# Train and eval again to mimic user's flow.
result["training_history_2"] = model.fit(**training_inputs).history
if eval_inputs is not None:
result["eval_result_2"] = model.evaluate(**eval_inputs)
result["weights_2"] = model.get_weights()
return result
def compare_results(
results_with_ds,
results_without_ds,
distribution,
testcase,
partial_last_batch=None,
):
"""Compares results of model compiled with/without distribution strategy."""
if policy.global_policy().compute_dtype in ("float16", "bfloat16"):
default_tolerance = 1e-2
relaxed_tolerance = 1e-2
elif partial_last_batch == "train_and_eval":
# We relax the tolerance a lot in the partial last batch case as
# 1. the examples in uneven batches may have different weights when
# applying the gradients in the distributed case.
# 2. TF-Keras and TF-Keras DS have different ways to handle the case
# when training with epochs > 1 with numpy inputs. In TF-Keras,
# every epoch may have a partial batch. While in TF-Keras DS, as we
# convert numpy inputs into dataset, it will do a repeat() first
# and calculate steps_per_epoch, so it will at most have one
# partial batch. This makes the 1-CPU result even different.
default_tolerance = 1e-3
relaxed_tolerance = 1e-3
else:
default_tolerance = 4e-5
relaxed_tolerance = 1e-4
def _get_compare_result_tolerance(key):
"""Returns tolerance to compare results."""
# See b/119257215 for more details. DS test run on GPU could have larger
# variance then test on CPU.
if tf.test.is_gpu_available() and key.startswith(
("weights_1", "weights_2", "predict_result")
):
return relaxed_tolerance
return default_tolerance
for key in sorted(results_with_ds.keys()):
if (
key.startswith("training_history")
and isinstance(
distribution,
(
tf.distribute.experimental.TPUStrategy,
tf.compat.v1.distribute.experimental.TPUStrategy,
),
)
and distribution.extended.steps_per_run > 1
):
# TODO(b/119894254): Enable this test for all cases once the
# underlying bug is fixed.
continue
tolerance = _get_compare_result_tolerance(key)
# We don't compare the loss as loss is currently not computed as metric
# in Keras, the loss value is inaccurate for last partial batch due to
# more weights for the last batch samples.
if partial_last_batch is not None:
if key.startswith("eval_result"):
results_with_ds[key] = results_with_ds[key][1:]
results_without_ds[key] = results_without_ds[key][1:]
if key.startswith("training_history"):
results_with_ds[key]["val_loss"] = 0
results_without_ds[key]["val_loss"] = 0
testcase.assertAllClose(
results_with_ds[key],
results_without_ds[key],
atol=tolerance,
rtol=tolerance,
msg=f"Fail to assert {key}.",
)
def should_skip_tpu_with_eager(distribution):
return tf.executing_eagerly() and isinstance(
distribution,
(
tf.distribute.experimental.TPUStrategy,
tf.compat.v1.distribute.experimental.TPUStrategy,
),
)
class LearningRateBatchScheduler(keras.callbacks.Callback):
"""Scheduler that dynamically sets the learning rate of model."""
def __init__(self, update_freq=None):
self._update_freq = update_freq
def on_batch_begin(self, batch, logs=None):
if self._update_freq and batch % self._update_freq != 0:
return
# To avoid divergence, limit the value range.
lr = 0.001 * (batch % 10)
keras.backend.set_value(self.model.optimizer.lr, lr)
class TestDistributionStrategyCorrectnessBase(
tf.test.TestCase, parameterized.TestCase
):
"""Model agnostic testing infra to test correctness of TF-Keras models."""
def set_up_test_config(
self, use_numpy=False, use_validation_data=False, with_batch_norm=None
):
self.use_numpy = use_numpy
self.use_validation_data = use_validation_data
self.with_batch_norm = with_batch_norm
keras.backend.set_image_data_format("channels_last")
np.random.seed(_RANDOM_SEED)
tf.compat.v1.set_random_seed(_RANDOM_SEED)
def get_data(self):
num_samples = 10000
x_train = np.random.randint(0, 2, num_samples)
x_train = np.reshape(x_train, (num_samples, 1))
y_train = x_train
return (x_train.astype("float32"), y_train.astype("float32"), None)
def get_data_with_partial_last_batch(self):
raise NotImplementedError
def get_data_with_partial_last_batch_eval(self):
raise NotImplementedError
def get_input_for_correctness_test(self, **kwargs):
"""Generates inputs that are dictionaries.
We only provide a default implementation of this method here. If you
need more customized way of providing input to your model, overwrite
this method.
Args:
**kwargs: key word arguments about how to create the input
dictionaries
Returns:
Three dictionaries representing the input for fit(), evaluate() and
predict()
"""
return get_correctness_test_inputs(**kwargs)
def get_model(self, distribution=None, input_shapes=None):
raise NotImplementedError
def run_correctness_test(
self,
distribution,
use_numpy,
use_validation_data,
with_batch_norm=None,
is_stateful_model=False,
partial_last_batch=None,
training_epochs=2,
):
with self.cached_session():
self.set_up_test_config(
use_numpy, use_validation_data, with_batch_norm
)
if partial_last_batch == "eval":
(
x_train,
y_train,
x_eval,
y_eval,
x_predict,
) = self.get_data_with_partial_last_batch_eval()
elif partial_last_batch == "train_and_eval":
(
x_train,
y_train,
x_eval,
y_eval,
x_predict,
) = self.get_data_with_partial_last_batch()
else:
x_train, y_train, x_predict = self.get_data()
x_eval = x_train
y_eval = y_train
# The model is built once and the initial weights are saved.
# This is used to initialize the model for both the distribution and
# non-distribution run.
model = self.get_model(input_shapes=get_shapes(x_train))
initial_weights = model.get_weights()
ds_input_fn = functools.partial(
self.get_input_for_correctness_test,
use_numpy=use_numpy,
use_validation_data=use_validation_data,
with_distribution=distribution,
x_train=x_train,
y_train=y_train,
x_eval=x_eval,
y_eval=y_eval,
x_predict=x_predict,
training_epochs=training_epochs,
)
nods_input_fn = functools.partial(
self.get_input_for_correctness_test,
use_numpy=use_numpy,
use_validation_data=use_validation_data,
with_distribution=None,
x_train=x_train,
y_train=y_train,
x_eval=x_eval,
y_eval=y_eval,
x_predict=x_predict,
training_epochs=training_epochs,
)
results_with_ds = fit_eval_and_predict(
initial_weights,
input_fn=ds_input_fn,
model_fn=self.get_model,
distribution=distribution,
is_stateful_model=is_stateful_model,
)
results_without_ds = fit_eval_and_predict(
initial_weights,
input_fn=nods_input_fn,
model_fn=self.get_model,
distribution=None,
is_stateful_model=is_stateful_model,
)
# First, special case, for multi-replica distributed training, batch
# norm is not aggregated globally. So it is expected to have
# different weights.
if (
self.with_batch_norm == "regular"
and distribution.num_replicas_in_sync > 1
):
with self.assertRaises(AssertionError):
compare_results(
results_with_ds,
results_without_ds,
distribution,
testcase=self,
partial_last_batch=partial_last_batch,
)
else:
compare_results(
results_with_ds,
results_without_ds,
distribution,
testcase=self,
partial_last_batch=partial_last_batch,
)
def get_input_for_dynamic_lr_test(self, **kwargs):
"""Generates inputs that are dictionaries.
We only provide a default implementation of this method here. If you
need more customized way of providing input to your model, overwrite
this method.
Args:
**kwargs: key word arguments about how to create the input
dictionaries
Returns:
Three dictionaries representing the input for fit(), evaluate() and
predict()
"""
training_input = kwargs
return training_input, None, None
def run_dynamic_lr_test(self, distribution):
with self.cached_session():
self.set_up_test_config()
x_train, y_train, _ = self.get_data()
model = self.get_model(input_shapes=get_shapes(x_train))
initial_weights = model.get_weights()
update_freq = None
if (
isinstance(
distribution,
tf.compat.v1.distribute.experimental.TPUStrategy,
)
and distribution.extended.steps_per_run > 1
):
# For TPUStrategy with steps_per_run > 1, the callback is not
# invoked every step. So, to compare the CPU/TPU, we let the CPU
# to behave the same as TPU.
update_freq = distribution.extended.steps_per_run
training_epochs = 2
global_batch_size = 64
ds_batch_size = get_batch_size(global_batch_size, distribution)
nods_batch_size = get_batch_size(global_batch_size, None)
ds_input_fn = functools.partial(
self.get_input_for_dynamic_lr_test,
x=x_train,
y=y_train,
batch_size=ds_batch_size,
shuffle=False,
epochs=training_epochs,
callbacks=[LearningRateBatchScheduler(update_freq)],
validation_data=(x_train, y_train),
)
nods_input_fn = functools.partial(
self.get_input_for_dynamic_lr_test,
x=x_train,
y=y_train,
batch_size=nods_batch_size,
shuffle=False,
epochs=training_epochs,
callbacks=[LearningRateBatchScheduler(update_freq)],
validation_data=(x_train, y_train),
)
results_with_ds = fit_eval_and_predict(
initial_weights,
input_fn=ds_input_fn,
model_fn=self.get_model,
distribution=distribution,
)
results_without_ds = fit_eval_and_predict(
initial_weights,
input_fn=nods_input_fn,
model_fn=self.get_model,
distribution=None,
)
compare_results(
results_with_ds, results_without_ds, distribution, testcase=self
)
class TestDistributionStrategyEmbeddingModelCorrectnessBase(
TestDistributionStrategyCorrectnessBase
):
"""Base class to test correctness of TF-Keras models with embedding
layers.
"""
def get_data(
self,
count=(_GLOBAL_BATCH_SIZE * _EVAL_STEPS),
min_words=5,
max_words=10,
max_word_id=19,
num_classes=2,
):
distribution = []
for _ in range(num_classes):
dist = np.abs(np.random.randn(max_word_id))
dist /= np.sum(dist)
distribution.append(dist)
features = []
labels = []
for _ in range(count):
label = np.random.randint(0, num_classes, size=1)[0]
num_words = np.random.randint(min_words, max_words, size=1)[0]
word_ids = np.random.choice(
max_word_id, size=num_words, replace=True, p=distribution[label]
)
word_ids = word_ids
labels.append(label)
features.append(word_ids)
features = data_utils.pad_sequences(features, maxlen=max_words)
x_train = np.asarray(features, dtype=np.float32)
y_train = np.asarray(labels, dtype=np.int32).reshape((count, 1))
x_predict = x_train[:_GLOBAL_BATCH_SIZE]
return x_train, y_train, x_predict
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/distribute/keras_correctness_test_base.py/0 | {
"file_path": "tf-keras/tf_keras/distribute/keras_correctness_test_base.py",
"repo_id": "tf-keras",
"token_count": 11255
} | 207 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A base class to provide a model and corresponding input data for testing."""
class ModelAndInput:
"""Base class to provide model and its corresponding inputs."""
def get_model(self):
"""Returns a compiled keras model object, together with output name.
Returns:
model: a keras model object
output_name: a string for the name of the output layer
"""
raise NotImplementedError("must be implemented in descendants")
def get_data(self):
"""Returns data for training and predicting.
Returns:
x_train: data used for training
y_train: label used for training
x_predict: data used for predicting
"""
raise NotImplementedError("must be implemented in descendants")
def get_batch_size(self):
"""Returns the batch_size used by the model."""
raise NotImplementedError("must be implemented in descendants")
| tf-keras/tf_keras/distribute/model_collection_base.py/0 | {
"file_path": "tf-keras/tf_keras/distribute/model_collection_base.py",
"repo_id": "tf-keras",
"token_count": 490
} | 208 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training state management."""
import os
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras.distribute import distributed_file_utils
from tf_keras.utils import mode_keys
# isort: off
from tf_keras.distribute.distributed_file_utils import (
support_on_demand_checkpoint_callback,
) # noqa: E501
MAX_CHECKPOINT_TO_KEEP = 1
class WorkerTrainingState:
"""Training state management class.
This class provides apis for backing up and restoring the training state.
This allows model and epoch and batch information to be saved periodically
and restore for fault-tolerance, also known as preemption-recovery purpose.
"""
# Constant for `tf.keras.Model` attribute to store the epoch and batch
# at which the most recently saved checkpoint was saved.
CKPT_SAVED_EPOCH_UNUSED_VALUE = -1
CKPT_SAVED_BATCH_UNUSED_VALUE = -1
def __init__(
self,
model,
checkpoint_dir,
save_freq="epoch",
save_before_preemption_arg=None,
):
self._enable_save_before_preemption = save_before_preemption_arg and (
support_on_demand_checkpoint_callback(model.distribute_strategy)
)
self._model = model
self._save_freq = save_freq
# The batch and epoch at which the checkpoint is saved. Used for
# fault-tolerance. GPU device only has int64 dtype registered
# VarHandleOp.
self._ckpt_saved_epoch = tf.Variable(
initial_value=tf.constant(
self.CKPT_SAVED_EPOCH_UNUSED_VALUE, dtype=tf.int64
),
name="ckpt_saved_epoch",
)
self._ckpt_saved_batch = tf.Variable(
initial_value=tf.constant(
self.CKPT_SAVED_BATCH_UNUSED_VALUE, dtype=tf.int64
),
name="ckpt_saved_batch",
)
# Variable initialization.
backend.set_value(
self._ckpt_saved_epoch, self.CKPT_SAVED_EPOCH_UNUSED_VALUE
)
backend.set_value(
self._ckpt_saved_batch, self.CKPT_SAVED_BATCH_UNUSED_VALUE
)
# _ckpt_saved_epoch and _ckpt_saved_batch gets tracked and is included
# in the checkpoint file when backing up.
checkpoint = tf.train.Checkpoint(
model=self._model,
ckpt_saved_epoch=self._ckpt_saved_epoch,
ckpt_saved_batch=self._ckpt_saved_batch,
train_counter=self._model._train_counter,
)
# If this is single-worker training, checkpoint_dir are the same for
# write_checkpoint_manager and read_checkpoint_manager.
#
# If this is multi-worker training, and this worker should not save
# checkpoint, we replace the write_checkpoint_manager's checkpoint_dir
# with a temp filepath, so it writes to a file that will be removed at
# the end of back_up() call. This is necessary because the
# SyncOnReadVariable needs to be synced across all the workers in order
# to be read, and all workers need to perform `save()`. But all workers
# should restore from the same checkpoint_dir as passed in
# read_checkpoint_manager.
self.read_checkpoint_manager = tf.train.CheckpointManager(
checkpoint,
directory=os.path.join(checkpoint_dir, "chief"),
max_to_keep=MAX_CHECKPOINT_TO_KEEP,
)
write_checkpoint_dir = distributed_file_utils.write_dirpath(
checkpoint_dir, self._model.distribute_strategy
)
if self._model.distribute_strategy.extended.should_checkpoint:
self.write_checkpoint_manager = self.read_checkpoint_manager
else:
self.write_checkpoint_manager = tf.train.CheckpointManager(
checkpoint,
directory=write_checkpoint_dir,
max_to_keep=MAX_CHECKPOINT_TO_KEEP,
)
if self._enable_save_before_preemption:
self.preemption_handler = (
tf.distribute.experimental.PreemptionCheckpointHandler(
self._model.distribute_strategy.cluster_resolver,
self.write_checkpoint_manager,
)
)
self.preemption_handler._read_checkpoint_manager = (
self.read_checkpoint_manager
)
self._model._preemption_handler = self.preemption_handler
def back_up(self, epoch, batch=0):
"""Back up the current state of training into a checkpoint file.
Args:
epoch: The current epoch information to be saved.
batch: The current batch(step) information to be saved.
"""
# Save the model plus CKPT_SAVED_EPOCH and CKPT_SAVED_BATCH variable.
if self.write_checkpoint_manager.save():
distributed_file_utils.remove_temp_dirpath(
self.write_checkpoint_manager.directory,
self._model.distribute_strategy,
)
def backup_if_preempted(self):
if self._enable_save_before_preemption:
self.preemption_handler._run_counter += 1
self.preemption_handler._check_preemption_and_maybe_checkpoint()
def restore(self):
"""Restore the training state from the backed up checkpoint file.
Returns:
True if the training state is successfully restored. False if the
training state doesn't need to be restored, or error occurred so it
can't.
"""
# When creating the PreemptionCheckpointHandler object, we have already
# restored the checkpoint.
if not self._enable_save_before_preemption:
self.read_checkpoint_manager.restore_or_initialize()
def delete_backup(self):
"""Delete the backup directories.
Delete the backup directories which should not exist after `fit()`
successfully finishes.
"""
if self.write_checkpoint_manager is self.read_checkpoint_manager:
try:
tf.io.gfile.rmtree(self.write_checkpoint_manager.directory)
except tf.errors.NotFoundError:
pass
def maybe_load_initial_counters_from_ckpt(
self, steps_per_epoch, initial_epoch, mode
):
"""Maybe load 1st epoch from checkpoint, considering worker recovery.
When `_ckpt_saved_epoch` attribute exists and is not
`CKPT_SAVED_EPOCH_UNUSED_VALUE`, this is under multi-worker training
setting and indicates the worker is recovering from previous failure. In
this case, infer `initial_epoch` from `self._ckpt_saved_epoch` to
continue previous unfinished training from certain epoch.
Args:
steps_per_epoch: The number of steps per epoch value.
initial_epoch: The original initial_epoch user passes in in `fit()`.
mode: The mode for running `model.fit()`.
Returns:
If the training is recovering from previous failure under multi-worker
training setting, return the (epoch, step) the training is supposed to
continue at. Otherwise, return the `initial_epoch, initial_step` the
user passes in.
"""
initial_step = 0
epoch = backend.eval(self._ckpt_saved_epoch)
batch = backend.eval(self._ckpt_saved_batch)
if mode == mode_keys.ModeKeys.TRAIN:
# For batch-level saving
if self._enable_save_before_preemption or isinstance(
self._save_freq, int
):
if batch >= 0:
# If the checkpoint was last saved at last batch of the
# epoch, return the next epoch number and batch=0
if batch == steps_per_epoch - 1:
initial_epoch = epoch + 1
initial_step = 0
else:
# If the checkpoint was not last saved at last batch of
# the epoch, return the same epoch and next batch number
initial_epoch = epoch
initial_step = batch + 1
else:
if epoch >= 0:
# The most recently saved epoch is one epoch prior to the
# epoch it failed at, so return the value of
# 'self._ckpt_saved_epoch' plus one.
initial_epoch = epoch + 1
return (initial_epoch, initial_step)
| tf-keras/tf_keras/distribute/worker_training_state.py/0 | {
"file_path": "tf-keras/tf_keras/distribute/worker_training_state.py",
"repo_id": "tf-keras",
"token_count": 3897
} | 209 |
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras Utilities for DTensor related API."""
import inspect
import tensorflow.compat.v2 as tf
from tf_keras.dtensor import dtensor_api as dtensor
# All the variable names in the default keras layers. We will use those to map
# against the args in the __init__ method to find corresponding layout args.
# See allow_layout() for more details.
KERAS_VARIABLE_NAMES = [
"alpha",
"beta",
"bias",
"depthwise",
"embeddings",
"gamma",
"kernel",
"moving_mean",
"moving_variance",
"pointwise",
"recurrent",
]
def allow_initializer_layout(init_method):
"""A decorator for injecting layout information to layer.__init__.
Layout will be a new param for any of the weights for all the keras layers.
Adding the param to all the __init__ method will be a big/duplicated work.
This decorator is design to reduce and code duplication and make it easy to
add/remove the dtensor feature if needed.
Sample usage:
```python
class Dense(tf.keras.layer.Layer):
@allow_initializer_layout
def __init__(self, units,
kernel_initializer='zeros',
bias_initializer='zeros',
**kwargs):
super().__init__(**kwargs)
d = Dense(units=8, kernel_layout=layout1, bias_layout=layout2)
d.kernel_layout == layout1
d.bias_layout == layout2
```
By adding this annotation, it will:
1. Filter out the kwargs based on some keywords, eg if the
'kernel_initialzer' appears in method signature, then it will try to pop
the 'kernel_layout' if it presents. Same for "bias" and
"recurrent_kernel", etc. This will make sure the layout related param is
not passed to `BaseLayer.__init__`, which will raise error about unexpect
keyword args.
2. Set the self.kernel/bias_layout attribute after the `__init__` method is
called. TF-Keras framework will use those fields to create weights down
the stream.
Args:
init_method: the `__init__` method of the TF-Keras layer to annotate.
Returns:
the annotated __init__ method.
"""
def _wrap_function(layer_instance, *args, **kwargs):
signature = inspect.signature(init_method)
layout_args = {}
# Check args like 'kernel_initializer' and pop the 'kernel_layout' if it
# presents.
for variable_name in KERAS_VARIABLE_NAMES:
if variable_name + "_initializer" in signature.parameters:
layout = kwargs.pop(variable_name + "_layout", None)
if layout:
layout_args[variable_name + "_layout"] = layout
init_method(layer_instance, *args, **kwargs)
# Inject the layout parameter after the invocation of __init__()
for layout_param_name, layout in layout_args.items():
setattr(layer_instance, layout_param_name, layout)
# return decorated
return tf.__internal__.decorator.make_decorator(
target=init_method, decorator_func=_wrap_function
)
def inject_mesh(init_method):
"""Inject DTensor mesh information to an object.
This is useful for keras object like `Metric` and `Optimizer` which need
DTensor mesh to create the weights, but doesn't want to change the current
public API interface.
This is for temporary usage and eventually the mesh/layout information will
be public arguments in the `__init__` method.
Sample usage:
```python
class Accuracy(tf.keras.metrics.Metric):
@inject_mesh
def __init__(self, name='accuracy', dtype=None):
super().__init__(**kwargs)
acc = Accuracy(mesh=mesh)
assert acc._mesh == mesh
```
Args:
init_method: the `__init__` method of the TF-Keras class to annotate.
Returns:
the annotated __init__ method.
"""
def _wrap_function(instance, *args, **kwargs):
mesh = kwargs.pop("mesh", None)
# Note that the injection of _mesh need to happen before the invocation
# of __init__, since the class might need the mesh to create weights in
# the __init__.
if mesh is not None:
instance._mesh = mesh
init_method(instance, *args, **kwargs)
return tf.__internal__.decorator.make_decorator(
target=init_method, decorator_func=_wrap_function
)
def call_with_layout(fn, layout, *args, **kwargs):
"""Invoke the function with inputs and relayout the result.
Args:
fn: the function to invoke.
layout: if not None, the output of the fn will be relayout with this.
*args: positional arguments to be called with fn.
**kwargs: keyword arguments to be called with fn.
Returns:
The output of fn, with potential relayout with the layout specified.
"""
if layout:
with dtensor.default_mesh(layout.mesh):
result = fn(*args, **kwargs)
return dtensor.relayout(result, layout)
return fn(*args, **kwargs)
def running_with_dtensor_strategy():
"""Check whether running with a `Strategy` that is backed by DTensor.
In the DTensor based training, all the tensors are in global context, which
is different from the local context. Some keras components need to
behave differently, e.g. BatchNormalization and SyncBatchNormalization, as
well as optimizers.
This check will help those layer to branch the logic and keep the correct
behavior between different context.
"""
if not tf.distribute.has_strategy():
return False
strategy = tf.distribute.get_strategy()
# TODO(scottzhu): Finalize the strategy API to check if a strategy is backed
# by DTensor.
return getattr(strategy, "_mesh", None) is not None
| tf-keras/tf_keras/dtensor/utils.py/0 | {
"file_path": "tf-keras/tf_keras/dtensor/utils.py",
"repo_id": "tf-keras",
"token_count": 2291
} | 210 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""DataAdapter tests."""
import math
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import tf_keras as keras
from tf_keras.engine import data_adapter
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
from tf_keras.utils import data_utils
# isort: off
from tensorflow.python.eager import context
class DummyArrayLike:
"""Dummy array-like object."""
def __init__(self, data):
self.data = data
def __len__(self):
return len(self.data)
def __getitem__(self, key):
return self.data[key]
@property
def shape(self):
return self.data.shape
@property
def dtype(self):
return self.data.dtype
def fail_on_convert(x, **kwargs):
_ = x
_ = kwargs
raise TypeError("Cannot convert DummyArrayLike to a tensor")
tf.register_tensor_conversion_function(DummyArrayLike, fail_on_convert)
class DataAdapterTestBase(test_combinations.TestCase):
def setUp(self):
super().setUp()
self.batch_size = 5
self.numpy_input = np.zeros((50, 10))
self.numpy_target = np.ones(50)
self.tensor_input = tf.constant(2.0, shape=(50, 10))
self.tensor_target = tf.ones((50,))
self.arraylike_input = DummyArrayLike(self.numpy_input)
self.arraylike_target = DummyArrayLike(self.numpy_target)
self.dataset_input = (
tf.data.Dataset.from_tensor_slices(
(self.numpy_input, self.numpy_target)
)
.shuffle(50)
.batch(self.batch_size)
)
def generator():
while True:
yield (
np.zeros((self.batch_size, 10)),
np.ones(self.batch_size),
)
self.generator_input = generator()
self.iterator_input = data_utils.threadsafe_generator(generator)()
self.sequence_input = TestSequence(
batch_size=self.batch_size, feature_shape=10
)
self.text_input = [["abc"]]
self.bytes_input = [[b"abc"]]
self.model = keras.models.Sequential(
[keras.layers.Dense(8, input_shape=(10,), activation="softmax")]
)
class TestSequence(data_utils.Sequence):
def __init__(self, batch_size, feature_shape):
self.batch_size = batch_size
self.feature_shape = feature_shape
def __getitem__(self, item):
return (
np.zeros((self.batch_size, self.feature_shape)),
np.ones((self.batch_size,)),
)
def __len__(self):
return 10
class TestSparseSequence(TestSequence):
def __getitem__(self, item):
indices = [
[row, self.feature_shape - 1] for row in range(self.batch_size)
]
values = [1 for row in range(self.batch_size)]
st = tf.SparseTensor(
indices, values, (self.batch_size, self.feature_shape)
)
return (st, np.ones((self.batch_size,)))
class TestRaggedSequence(TestSequence):
def __getitem__(self, item):
values = np.random.randint(
0, self.feature_shape, (self.batch_size, 2)
).reshape(-1)
row_lengths = np.full(self.batch_size, 2)
rt = tf.RaggedTensor.from_row_lengths(values, row_lengths)
return (rt, np.ones((self.batch_size,)))
class TestBatchSequence(data_utils.Sequence):
def __init__(self, batch_size, feature_shape, epochs=2):
"""Creates a keras.utils.Sequence with increasing batch_size.
Args:
batch_size (Union[int, List[int]]): Can be a list containing two
values: start and end batch_size
feature_shape (int): Number of features in a sample
epochs (int, optional): Number of epochs
"""
self.batch_size = batch_size
self.feature_shape = feature_shape
self._epochs = epochs
# we use `on_epoch_end` method to prepare data for the next epoch set
# current epoch to `-1`, so that `on_epoch_end` will increase it to `0`
self._current_epoch = -1
# actual batch size will be set inside `on_epoch_end`
self._current_batch_size = 0
self.on_epoch_end()
def __len__(self):
"""Number of batches in the Sequence.
Returns: int
The number of batches in the Sequence.
"""
# data was rebalanced, so need to recalculate number of examples
num_examples = 20
batch_size = self._current_batch_size
return num_examples // batch_size + int(
num_examples % batch_size > 0
) # = math.ceil(num_examples / batch_size )
def __getitem__(self, index):
"""Gets batch at position `index`.
Arguments:
index (int): position of the batch in the Sequence.
Returns: Tuple[Any, Any] A batch (tuple of input data and target data).
"""
# return input and target data, as our target data is inside the input
# data return None for the target data
return (
np.zeros((self._current_batch_size, self.feature_shape)),
np.ones((self._current_batch_size,)),
)
def on_epoch_end(self):
"""Updates the data after every epoch."""
self._current_epoch += 1
if self._current_epoch < self._epochs:
self._current_batch_size = self._linearly_increasing_batch_size()
def _linearly_increasing_batch_size(self):
"""Linearly increase batch size with every epoch.
The idea comes from https://arxiv.org/abs/1711.00489.
Returns: int
The batch size to use in this epoch.
"""
if not isinstance(self.batch_size, list):
return int(self.batch_size)
if self._epochs > 1:
return int(
self.batch_size[0]
+ self._current_epoch
* (self.batch_size[1] - self.batch_size[0])
/ (self._epochs - 1)
)
else:
return int(self.batch_size[0])
class TensorLikeDataAdapterTest(DataAdapterTestBase):
def setUp(self):
super().setUp()
self.adapter_cls = data_adapter.TensorLikeDataAdapter
def test_can_handle_numpy(self):
self.assertTrue(self.adapter_cls.can_handle(self.numpy_input))
self.assertTrue(
self.adapter_cls.can_handle(self.numpy_input, self.numpy_target)
)
self.assertFalse(self.adapter_cls.can_handle(self.dataset_input))
self.assertFalse(self.adapter_cls.can_handle(self.generator_input))
self.assertFalse(self.adapter_cls.can_handle(self.sequence_input))
self.assertFalse(self.adapter_cls.can_handle(self.text_input))
self.assertFalse(self.adapter_cls.can_handle(self.bytes_input))
def test_size_numpy(self):
adapter = self.adapter_cls(
self.numpy_input, self.numpy_target, batch_size=5
)
self.assertEqual(adapter.get_size(), 10)
self.assertFalse(adapter.has_partial_batch())
def test_batch_size_numpy(self):
adapter = self.adapter_cls(
self.numpy_input, self.numpy_target, batch_size=5
)
self.assertEqual(adapter.batch_size(), 5)
def test_partial_batch_numpy(self):
adapter = self.adapter_cls(
self.numpy_input, self.numpy_target, batch_size=4
)
self.assertEqual(adapter.get_size(), 13) # 50/4
self.assertTrue(adapter.has_partial_batch())
self.assertEqual(adapter.partial_batch_size(), 2)
def test_epochs(self):
num_epochs = 3
adapter = self.adapter_cls(
self.numpy_input, self.numpy_target, batch_size=5, epochs=num_epochs
)
ds_iter = iter(adapter.get_dataset())
num_batches_per_epoch = self.numpy_input.shape[0] // 5
for _ in range(num_batches_per_epoch * num_epochs):
next(ds_iter)
with self.assertRaises(StopIteration):
next(ds_iter)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_training_numpy(self):
self.model.compile(
loss="sparse_categorical_crossentropy",
optimizer="sgd",
run_eagerly=test_utils.should_run_eagerly(),
)
self.model.fit(self.numpy_input, self.numpy_target, batch_size=5)
def test_can_handle_pandas(self):
try:
import pandas as pd
except ImportError:
self.skipTest("Skipping test because pandas is not installed.")
self.assertTrue(
self.adapter_cls.can_handle(pd.DataFrame(self.numpy_input))
)
self.assertTrue(
self.adapter_cls.can_handle(pd.DataFrame(self.numpy_input)[0])
)
self.assertTrue(
self.adapter_cls.can_handle(
pd.DataFrame(self.numpy_input),
pd.DataFrame(self.numpy_input)[0],
)
)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_training_pandas(self):
try:
import pandas as pd
except ImportError:
self.skipTest("Skipping test because pandas is not installed.")
input_a = keras.Input(shape=(3,), name="input_a")
input_b = keras.Input(shape=(3,), name="input_b")
input_c = keras.Input(shape=(1,), name="input_b")
x = keras.layers.Dense(4, name="dense_1")(input_a)
y = keras.layers.Dense(3, name="dense_2")(input_b)
z = keras.layers.Dense(1, name="dense_3")(input_c)
model_1 = keras.Model(inputs=input_a, outputs=x)
model_2 = keras.Model(inputs=[input_a, input_b], outputs=[x, y])
model_3 = keras.Model(inputs=input_c, outputs=z)
model_1.compile(optimizer="rmsprop", loss="mse")
model_2.compile(optimizer="rmsprop", loss="mse")
model_3.compile(optimizer="rmsprop", loss="mse")
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
input_a_df = pd.DataFrame(input_a_np)
input_b_df = pd.DataFrame(input_b_np)
output_a_df = pd.DataFrame(np.random.random((10, 4)))
output_b_df = pd.DataFrame(np.random.random((10, 3)))
output_c_series = pd.DataFrame(np.random.random((10, 4)))[0]
model_1.fit(input_a_df, output_a_df)
model_2.fit([input_a_df, input_b_df], [output_a_df, output_b_df])
model_3.fit(input_a_df[[0]], output_c_series)
model_1.fit([input_a_df], [output_a_df])
model_1.fit({"input_a": input_a_df}, output_a_df)
model_2.fit(
{"input_a": input_a_df, "input_b": input_b_df},
[output_a_df, output_b_df],
)
model_1.evaluate(input_a_df, output_a_df)
model_2.evaluate([input_a_df, input_b_df], [output_a_df, output_b_df])
model_3.evaluate(input_a_df[[0]], output_c_series)
model_1.evaluate([input_a_df], [output_a_df])
model_1.evaluate({"input_a": input_a_df}, output_a_df)
model_2.evaluate(
{"input_a": input_a_df, "input_b": input_b_df},
[output_a_df, output_b_df],
)
# Verify predicting on pandas vs numpy returns the same result
predict_1_pandas = model_1.predict(input_a_df)
predict_2_pandas = model_2.predict([input_a_df, input_b_df])
predict_3_pandas = model_3.predict(input_a_df[[0]])
predict_3_pandas_batch = model_3.predict_on_batch(input_a_df[0])
predict_1_numpy = model_1.predict(input_a_np)
predict_2_numpy = model_2.predict([input_a_np, input_b_np])
predict_3_numpy = model_3.predict(np.asarray(input_a_df[0]))
self.assertAllClose(predict_1_numpy, predict_1_pandas)
self.assertAllClose(predict_2_numpy, predict_2_pandas)
self.assertAllClose(predict_3_numpy, predict_3_pandas_batch)
self.assertAllClose(predict_3_numpy, predict_3_pandas)
# Extra ways to pass in dataframes
model_1.predict([input_a_df])
model_1.predict({"input_a": input_a_df})
model_2.predict({"input_a": input_a_df, "input_b": input_b_df})
def test_can_handle(self):
self.assertTrue(self.adapter_cls.can_handle(self.tensor_input))
self.assertTrue(
self.adapter_cls.can_handle(self.tensor_input, self.tensor_target)
)
self.assertFalse(self.adapter_cls.can_handle(self.arraylike_input))
self.assertFalse(
self.adapter_cls.can_handle(
self.arraylike_input, self.arraylike_target
)
)
self.assertFalse(self.adapter_cls.can_handle(self.dataset_input))
self.assertFalse(self.adapter_cls.can_handle(self.generator_input))
self.assertFalse(self.adapter_cls.can_handle(self.sequence_input))
self.assertFalse(self.adapter_cls.can_handle(self.text_input))
self.assertFalse(self.adapter_cls.can_handle(self.bytes_input))
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_training(self):
self.model.compile(
loss="sparse_categorical_crossentropy",
optimizer="sgd",
run_eagerly=test_utils.should_run_eagerly(),
)
self.model.fit(self.tensor_input, self.tensor_target, batch_size=5)
def test_size(self):
adapter = self.adapter_cls(
self.tensor_input, self.tensor_target, batch_size=5
)
self.assertEqual(adapter.get_size(), 10)
self.assertFalse(adapter.has_partial_batch())
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_shuffle_correctness(self):
num_samples = 100
batch_size = 32
x = np.arange(num_samples)
np.random.seed(99)
adapter = self.adapter_cls(
x, y=None, batch_size=batch_size, shuffle=True, epochs=2
)
def _get_epoch(ds_iter):
ds_data = []
for _ in range(int(math.ceil(num_samples / batch_size))):
ds_data.append(next(ds_iter).numpy())
return np.concatenate(ds_data)
ds_iter = iter(adapter.get_dataset())
# First epoch.
epoch_data = _get_epoch(ds_iter)
# Check that shuffling occurred.
self.assertNotAllClose(x, epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(epoch_data))
# Second epoch.
second_epoch_data = _get_epoch(ds_iter)
# Check that shuffling occurred.
self.assertNotAllClose(x, second_epoch_data)
# Check that shuffling is different across epochs.
self.assertNotAllClose(epoch_data, second_epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(second_epoch_data))
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_batch_shuffle_correctness(self):
num_samples = 100
batch_size = 6
x = np.arange(num_samples)
np.random.seed(99)
adapter = self.adapter_cls(
x, y=None, batch_size=batch_size, shuffle="batch", epochs=2
)
def _get_epoch_batches(ds_iter):
ds_data = []
for _ in range(int(math.ceil(num_samples / batch_size))):
ds_data.append(next(ds_iter)[0].numpy())
return ds_data
ds_iter = iter(adapter.get_dataset())
# First epoch.
epoch_batch_data = _get_epoch_batches(ds_iter)
epoch_data = np.concatenate(epoch_batch_data)
def _verify_batch(batch):
# Verify that a batch contains only contiguous data, and that it has
# been shuffled.
shuffled_batch = np.sort(batch)
self.assertNotAllClose(batch, shuffled_batch)
for i in range(1, len(batch)):
self.assertEqual(shuffled_batch[i - 1] + 1, shuffled_batch[i])
# Assert that the data within each batch remains contiguous
for batch in epoch_batch_data:
_verify_batch(batch)
# Check that individual batches are unshuffled
# Check that shuffling occurred.
self.assertNotAllClose(x, epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(epoch_data))
# Second epoch.
second_epoch_batch_data = _get_epoch_batches(ds_iter)
second_epoch_data = np.concatenate(second_epoch_batch_data)
# Assert that the data within each batch remains contiguous
for batch in second_epoch_batch_data:
_verify_batch(batch)
# Check that shuffling occurred.
self.assertNotAllClose(x, second_epoch_data)
# Check that shuffling is different across epochs.
self.assertNotAllClose(epoch_data, second_epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(second_epoch_data))
@parameterized.named_parameters(
("batch_size_5", 5, None, 5),
(
"batch_size_50",
50,
4,
50,
), # Sanity check: batch_size takes precedence
("steps_1", None, 1, 50),
("steps_4", None, 4, 13),
)
def test_batch_size(self, batch_size_in, steps, batch_size_out):
adapter = self.adapter_cls(
self.tensor_input,
self.tensor_target,
batch_size=batch_size_in,
steps=steps,
)
self.assertEqual(adapter.batch_size(), batch_size_out)
@parameterized.named_parameters(
("batch_size_5", 5, None, 10, 0),
("batch_size_4", 4, None, 13, 2),
("steps_1", None, 1, 1, 0),
("steps_5", None, 5, 5, 0),
("steps_4", None, 4, 4, 11),
)
def test_partial_batch(
self, batch_size_in, steps, size, partial_batch_size
):
adapter = self.adapter_cls(
self.tensor_input,
self.tensor_target,
batch_size=batch_size_in,
steps=steps,
)
self.assertEqual(adapter.get_size(), size) # 50/steps
self.assertEqual(adapter.has_partial_batch(), bool(partial_batch_size))
self.assertEqual(
adapter.partial_batch_size(), partial_batch_size or None
)
class IncreasingBatchSizeAdapterTest(test_combinations.TestCase):
def setUp(self):
super(IncreasingBatchSizeAdapterTest, self).setUp()
self.adapter_cls = data_adapter.KerasSequenceAdapter
self.epochs = 2
self.increasing_batch_size = [5, 10]
self.sequence_input = TestBatchSequence(
batch_size=self.increasing_batch_size,
feature_shape=10,
epochs=self.epochs,
)
self.model = keras.models.Sequential(
[keras.layers.Dense(8, input_shape=(10,), activation="softmax")]
)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_training_with_test_batch_sequence(self):
"""Ensures TestBatchSequence works as expected."""
self.model.compile(
loss="sparse_categorical_crossentropy",
optimizer="sgd",
run_eagerly=test_utils.should_run_eagerly(),
)
# Check state before fit()
self.assertEqual(self.sequence_input._current_epoch, 0)
self.assertEqual(self.sequence_input._current_batch_size, 5)
# Execute fit()
self.model.fit(self.sequence_input, epochs=self.epochs)
# Check state after fit()
self.assertEqual(self.sequence_input._current_epoch, 2)
self.assertEqual(self.sequence_input._current_batch_size, 10)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_training_with_increasing_batch_size(self):
"""Ensures data_adapters DataHandler & DataAdapter work as expected."""
self.model.compile(
loss="sparse_categorical_crossentropy",
optimizer="sgd",
run_eagerly=test_utils.should_run_eagerly(),
)
self.model.stop_training = False
self.model.train_function = self.model.make_train_function()
# Check state before fit()
self.assertEqual(self.sequence_input._current_epoch, 0)
self.assertEqual(self.sequence_input._current_batch_size, 5)
data_handler = data_adapter.get_data_handler(
self.sequence_input,
epochs=self.epochs,
model=self.model,
)
self.assertEqual(
data_handler.inferred_steps, 4
) # 20 samples / 5 bs = 4
# Execute fit()-loop
for epoch, iterator in data_handler.enumerate_epochs():
self.model.reset_metrics()
with data_handler.catch_stop_iteration():
for step in data_handler.steps():
with tf.profiler.experimental.Trace(
"train",
epoch_num=epoch,
step_num=step,
batch_size=self.sequence_input._current_batch_size,
_r=1,
):
if data_handler.should_sync:
context.async_wait()
if self.model.stop_training:
break
# Check state after fit()
self.assertEqual(
data_handler.inferred_steps, 2
) # 20 samples / 10 bs = 2
class GenericArrayLikeDataAdapterTest(DataAdapterTestBase):
def setUp(self):
super().setUp()
self.adapter_cls = data_adapter.GenericArrayLikeDataAdapter
def test_can_handle_some_numpy(self):
self.assertTrue(self.adapter_cls.can_handle(self.arraylike_input))
self.assertTrue(
self.adapter_cls.can_handle(
self.arraylike_input, self.arraylike_target
)
)
# Because adapters are mutually exclusive, don't handle cases
# where all the data is numpy or an eagertensor
self.assertFalse(self.adapter_cls.can_handle(self.numpy_input))
self.assertFalse(
self.adapter_cls.can_handle(self.numpy_input, self.numpy_target)
)
self.assertFalse(self.adapter_cls.can_handle(self.tensor_input))
self.assertFalse(
self.adapter_cls.can_handle(self.tensor_input, self.tensor_target)
)
# But do handle mixes that include generic arraylike data
self.assertTrue(
self.adapter_cls.can_handle(self.numpy_input, self.arraylike_target)
)
self.assertTrue(
self.adapter_cls.can_handle(self.arraylike_input, self.numpy_target)
)
self.assertTrue(
self.adapter_cls.can_handle(
self.arraylike_input, self.tensor_target
)
)
self.assertTrue(
self.adapter_cls.can_handle(
self.tensor_input, self.arraylike_target
)
)
self.assertFalse(self.adapter_cls.can_handle(self.dataset_input))
self.assertFalse(self.adapter_cls.can_handle(self.generator_input))
self.assertFalse(self.adapter_cls.can_handle(self.sequence_input))
self.assertFalse(self.adapter_cls.can_handle(self.text_input))
self.assertFalse(self.adapter_cls.can_handle(self.bytes_input))
def test_size(self):
adapter = self.adapter_cls(
self.arraylike_input, self.arraylike_target, batch_size=5
)
self.assertEqual(adapter.get_size(), 10)
self.assertFalse(adapter.has_partial_batch())
def test_epochs(self):
num_epochs = 3
adapter = self.adapter_cls(
self.arraylike_input,
self.numpy_target,
batch_size=5,
epochs=num_epochs,
)
ds_iter = iter(adapter.get_dataset())
num_batches_per_epoch = self.numpy_input.shape[0] // 5
for _ in range(num_batches_per_epoch * num_epochs):
next(ds_iter)
with self.assertRaises(StopIteration):
next(ds_iter)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_training(self):
# First verify that DummyArrayLike can't be converted to a Tensor
with self.assertRaises(TypeError):
tf.convert_to_tensor(self.arraylike_input)
# Then train on the array like.
# It should not be converted to a tensor directly (which would force it
# into memory), only the sliced data should be converted.
self.model.compile(
loss="sparse_categorical_crossentropy",
optimizer="sgd",
run_eagerly=test_utils.should_run_eagerly(),
)
self.model.fit(
self.arraylike_input, self.arraylike_target, batch_size=5
)
self.model.fit(
self.arraylike_input,
self.arraylike_target,
shuffle=True,
batch_size=5,
)
self.model.fit(
self.arraylike_input,
self.arraylike_target,
shuffle="batch",
batch_size=5,
)
self.model.evaluate(
self.arraylike_input, self.arraylike_target, batch_size=5
)
self.model.predict(self.arraylike_input, batch_size=5)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_training_numpy_target(self):
self.model.compile(
loss="sparse_categorical_crossentropy",
optimizer="sgd",
run_eagerly=test_utils.should_run_eagerly(),
)
self.model.fit(self.arraylike_input, self.numpy_target, batch_size=5)
self.model.fit(
self.arraylike_input, self.numpy_target, shuffle=True, batch_size=5
)
self.model.fit(
self.arraylike_input,
self.numpy_target,
shuffle="batch",
batch_size=5,
)
self.model.evaluate(
self.arraylike_input, self.numpy_target, batch_size=5
)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_training_tensor_target(self):
self.model.compile(
loss="sparse_categorical_crossentropy",
optimizer="sgd",
run_eagerly=test_utils.should_run_eagerly(),
)
self.model.fit(self.arraylike_input, self.tensor_target, batch_size=5)
self.model.fit(
self.arraylike_input, self.tensor_target, shuffle=True, batch_size=5
)
self.model.fit(
self.arraylike_input,
self.tensor_target,
shuffle="batch",
batch_size=5,
)
self.model.evaluate(
self.arraylike_input, self.tensor_target, batch_size=5
)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_shuffle_correctness(self):
num_samples = 100
batch_size = 32
x = DummyArrayLike(np.arange(num_samples))
np.random.seed(99)
adapter = self.adapter_cls(
x, y=None, batch_size=batch_size, shuffle=True, epochs=2
)
def _get_epoch(ds_iter):
ds_data = []
for _ in range(int(math.ceil(num_samples / batch_size))):
ds_data.append(next(ds_iter).numpy())
return np.concatenate(ds_data)
ds_iter = iter(adapter.get_dataset())
# First epoch.
epoch_data = _get_epoch(ds_iter)
# Check that shuffling occurred.
self.assertNotAllClose(x, epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(epoch_data))
# Second epoch.
second_epoch_data = _get_epoch(ds_iter)
# Check that shuffling occurred.
self.assertNotAllClose(x, second_epoch_data)
# Check that shuffling is different across epochs.
self.assertNotAllClose(epoch_data, second_epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(second_epoch_data))
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_batch_shuffle_correctness(self):
num_samples = 100
batch_size = 6
x = DummyArrayLike(np.arange(num_samples))
np.random.seed(99)
adapter = self.adapter_cls(
x, y=None, batch_size=batch_size, shuffle="batch", epochs=2
)
def _get_epoch_batches(ds_iter):
ds_data = []
for _ in range(int(math.ceil(num_samples / batch_size))):
ds_data.append(next(ds_iter)[0].numpy())
return ds_data
ds_iter = iter(adapter.get_dataset())
# First epoch.
epoch_batch_data = _get_epoch_batches(ds_iter)
epoch_data = np.concatenate(epoch_batch_data)
def _verify_batch(batch):
# Verify that a batch contains only contiguous data, but that it has
# been shuffled.
shuffled_batch = np.sort(batch)
self.assertNotAllClose(batch, shuffled_batch)
for i in range(1, len(batch)):
self.assertEqual(shuffled_batch[i - 1] + 1, shuffled_batch[i])
# Assert that the data within each batch is shuffled contiguous data
for batch in epoch_batch_data:
_verify_batch(batch)
# Check that individual batches are unshuffled
# Check that shuffling occurred.
self.assertNotAllClose(x, epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(epoch_data))
# Second epoch.
second_epoch_batch_data = _get_epoch_batches(ds_iter)
second_epoch_data = np.concatenate(second_epoch_batch_data)
# Assert that the data within each batch remains contiguous
for batch in second_epoch_batch_data:
_verify_batch(batch)
# Check that shuffling occurred.
self.assertNotAllClose(x, second_epoch_data)
# Check that shuffling is different across epochs.
self.assertNotAllClose(epoch_data, second_epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(second_epoch_data))
@parameterized.named_parameters(
("batch_size_5", 5, None, 5),
(
"batch_size_50",
50,
4,
50,
), # Sanity check: batch_size takes precedence
("steps_1", None, 1, 50),
("steps_4", None, 4, 13),
)
def test_batch_size(self, batch_size_in, steps, batch_size_out):
adapter = self.adapter_cls(
self.arraylike_input,
self.arraylike_target,
batch_size=batch_size_in,
steps=steps,
)
self.assertEqual(adapter.batch_size(), batch_size_out)
@parameterized.named_parameters(
("batch_size_5", 5, None, 10, 0),
("batch_size_4", 4, None, 13, 2),
("steps_1", None, 1, 1, 0),
("steps_5", None, 5, 5, 0),
("steps_4", None, 4, 4, 11),
)
def test_partial_batch(
self, batch_size_in, steps, size, partial_batch_size
):
adapter = self.adapter_cls(
self.arraylike_input,
self.arraylike_target,
batch_size=batch_size_in,
steps=steps,
)
self.assertEqual(adapter.get_size(), size) # 50/steps
self.assertEqual(adapter.has_partial_batch(), bool(partial_batch_size))
self.assertEqual(
adapter.partial_batch_size(), partial_batch_size or None
)
class DatasetAdapterTest(DataAdapterTestBase):
def setUp(self):
super().setUp()
self.adapter_cls = data_adapter.DatasetAdapter
def test_can_handle(self):
self.assertFalse(self.adapter_cls.can_handle(self.numpy_input))
self.assertFalse(self.adapter_cls.can_handle(self.tensor_input))
self.assertTrue(self.adapter_cls.can_handle(self.dataset_input))
self.assertFalse(self.adapter_cls.can_handle(self.generator_input))
self.assertFalse(self.adapter_cls.can_handle(self.sequence_input))
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_training(self):
dataset = self.adapter_cls(self.dataset_input).get_dataset()
self.model.compile(
loss="sparse_categorical_crossentropy",
optimizer="sgd",
run_eagerly=test_utils.should_run_eagerly(),
)
self.model.fit(dataset)
def test_size(self):
adapter = self.adapter_cls(self.dataset_input)
self.assertIsNone(adapter.get_size())
def test_batch_size(self):
adapter = self.adapter_cls(self.dataset_input)
self.assertIsNone(adapter.batch_size())
def test_partial_batch(self):
adapter = self.adapter_cls(self.dataset_input)
self.assertFalse(adapter.has_partial_batch())
self.assertIsNone(adapter.partial_batch_size())
def test_invalid_targets_argument(self):
with self.assertRaisesRegex(
ValueError, r"`y` argument is not supported"
):
self.adapter_cls(self.dataset_input, y=self.dataset_input)
def test_invalid_sample_weights_argument(self):
with self.assertRaisesRegex(
ValueError, r"`sample_weight` argument is not supported"
):
self.adapter_cls(
self.dataset_input, sample_weights=self.dataset_input
)
class GeneratorDataAdapterTest(DataAdapterTestBase):
def setUp(self):
super().setUp()
self.adapter_cls = data_adapter.GeneratorDataAdapter
def test_can_handle(self):
self.assertFalse(self.adapter_cls.can_handle(self.numpy_input))
self.assertFalse(self.adapter_cls.can_handle(self.tensor_input))
self.assertFalse(self.adapter_cls.can_handle(self.dataset_input))
self.assertTrue(self.adapter_cls.can_handle(self.generator_input))
self.assertFalse(self.adapter_cls.can_handle(self.sequence_input))
self.assertFalse(self.adapter_cls.can_handle(self.text_input))
self.assertFalse(self.adapter_cls.can_handle(self.bytes_input))
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_training(self):
self.model.compile(
loss="sparse_categorical_crossentropy",
optimizer="sgd",
run_eagerly=test_utils.should_run_eagerly(),
)
self.model.fit(self.generator_input, steps_per_epoch=10)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
@test_utils.run_v2_only
@data_utils.dont_use_multiprocessing_pool
def test_with_multiprocessing_training(self):
self.model.compile(
loss="sparse_categorical_crossentropy",
optimizer="sgd",
run_eagerly=test_utils.should_run_eagerly(),
)
self.model.fit(
self.iterator_input,
workers=1,
use_multiprocessing=True,
max_queue_size=10,
steps_per_epoch=10,
)
# Fit twice to ensure there isn't any duplication that prevent the
# worker from starting.
self.model.fit(
self.iterator_input,
workers=1,
use_multiprocessing=True,
max_queue_size=10,
steps_per_epoch=10,
)
def test_size(self):
adapter = self.adapter_cls(self.generator_input)
self.assertIsNone(adapter.get_size())
def test_batch_size(self):
adapter = self.adapter_cls(self.generator_input)
self.assertEqual(adapter.batch_size(), None)
self.assertEqual(adapter.representative_batch_size(), 5)
def test_partial_batch(self):
adapter = self.adapter_cls(self.generator_input)
self.assertFalse(adapter.has_partial_batch())
self.assertIsNone(adapter.partial_batch_size())
def test_invalid_targets_argument(self):
with self.assertRaisesRegex(
ValueError, r"`y` argument is not supported"
):
self.adapter_cls(self.generator_input, y=self.generator_input)
def test_invalid_sample_weights_argument(self):
with self.assertRaisesRegex(
ValueError, r"`sample_weight` argument is not supported"
):
self.adapter_cls(
self.generator_input, sample_weights=self.generator_input
)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_not_shuffled(self):
def generator():
for i in range(10):
yield np.ones((1, 1)) * i
adapter = self.adapter_cls(generator(), shuffle=True)
for i, data in enumerate(adapter.get_dataset()):
self.assertEqual(i, data[0].numpy().flatten())
def test_model_without_forward_pass(self):
class MyModel(keras.Model):
def train_step(self, data):
return {"loss": 0.0}
def test_step(self, data):
return {"loss": 0.0}
model = MyModel()
model.compile("rmsprop")
model.fit(self.generator_input, steps_per_epoch=5)
out = model.evaluate(self.generator_input, steps=5)
self.assertEqual(out, 0)
class KerasSequenceAdapterTest(DataAdapterTestBase):
def setUp(self):
super().setUp()
self.adapter_cls = data_adapter.KerasSequenceAdapter
def test_can_handle(self):
self.assertFalse(self.adapter_cls.can_handle(self.numpy_input))
self.assertFalse(self.adapter_cls.can_handle(self.tensor_input))
self.assertFalse(self.adapter_cls.can_handle(self.dataset_input))
self.assertFalse(self.adapter_cls.can_handle(self.generator_input))
self.assertTrue(self.adapter_cls.can_handle(self.sequence_input))
self.assertFalse(self.adapter_cls.can_handle(self.text_input))
self.assertFalse(self.adapter_cls.can_handle(self.bytes_input))
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_training(self):
self.model.compile(
loss="sparse_categorical_crossentropy",
optimizer="sgd",
run_eagerly=test_utils.should_run_eagerly(),
)
self.model.fit(self.sequence_input)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
@test_utils.run_v2_only
@data_utils.dont_use_multiprocessing_pool
def test_with_multiprocessing_training(self):
self.model.compile(
loss="sparse_categorical_crossentropy",
optimizer="sgd",
run_eagerly=test_utils.should_run_eagerly(),
)
self.model.fit(
self.sequence_input,
workers=1,
use_multiprocessing=True,
max_queue_size=10,
steps_per_epoch=10,
)
# Fit twice to ensure there isn't any duplication that prevent the
# worker from starting.
self.model.fit(
self.sequence_input,
workers=1,
use_multiprocessing=True,
max_queue_size=10,
steps_per_epoch=10,
)
def test_size(self):
adapter = self.adapter_cls(self.sequence_input)
self.assertEqual(adapter.get_size(), 10)
def test_batch_size(self):
adapter = self.adapter_cls(self.sequence_input)
self.assertEqual(adapter.batch_size(), None)
self.assertEqual(adapter.representative_batch_size(), 5)
def test_partial_batch(self):
adapter = self.adapter_cls(self.sequence_input)
self.assertFalse(adapter.has_partial_batch())
self.assertIsNone(adapter.partial_batch_size())
def test_invalid_targets_argument(self):
with self.assertRaisesRegex(
ValueError, r"`y` argument is not supported"
):
self.adapter_cls(self.sequence_input, y=self.sequence_input)
def test_invalid_sample_weights_argument(self):
with self.assertRaisesRegex(
ValueError, r"`sample_weight` argument is not supported"
):
self.adapter_cls(
self.sequence_input, sample_weights=self.sequence_input
)
class KerasSequenceAdapterSparseTest(KerasSequenceAdapterTest):
def setUp(self):
super().setUp()
self.sequence_input = TestSparseSequence(self.batch_size, 10)
class KerasSequenceAdapterRaggedTest(KerasSequenceAdapterTest):
def setUp(self):
super().setUp()
self.sequence_input = TestRaggedSequence(self.batch_size, 10)
self.model = keras.models.Sequential(
[
keras.layers.Input(shape=(None,), ragged=True),
keras.layers.Embedding(10, 10),
keras.layers.Lambda(tf.reduce_mean, arguments=dict(axis=1)),
keras.layers.Dense(8, input_shape=(10,), activation="relu"),
]
)
class DataHandlerTest(test_combinations.TestCase):
def test_finite_dataset_with_steps_per_epoch(self):
data = tf.data.Dataset.from_tensor_slices([0, 1, 2, 3]).batch(1)
# User can choose to only partially consume `Dataset`.
data_handler = data_adapter.DataHandler(
data, initial_epoch=0, epochs=2, steps_per_epoch=2
)
self.assertEqual(data_handler.inferred_steps, 2)
self.assertFalse(data_handler._adapter.should_recreate_iterator())
returned_data = []
for _, iterator in data_handler.enumerate_epochs():
epoch_data = []
for _ in data_handler.steps():
epoch_data.append(next(iterator).numpy())
returned_data.append(epoch_data)
self.assertEqual(returned_data, [[0, 1], [2, 3]])
def test_finite_dataset_without_steps_per_epoch(self):
data = tf.data.Dataset.from_tensor_slices([0, 1, 2]).batch(1)
data_handler = data_adapter.DataHandler(data, initial_epoch=0, epochs=2)
self.assertEqual(data_handler.inferred_steps, 3)
returned_data = []
for _, iterator in data_handler.enumerate_epochs():
epoch_data = []
for _ in data_handler.steps():
epoch_data.append(next(iterator).numpy())
returned_data.append(epoch_data)
self.assertEqual(returned_data, [[0, 1, 2], [0, 1, 2]])
def test_finite_dataset_with_steps_per_epoch_exact_size(self):
data = tf.data.Dataset.from_tensor_slices([0, 1, 2, 3]).batch(1)
# If user specifies exact size of `Dataset` as `steps_per_epoch`,
# create a new iterator each epoch.
data_handler = data_adapter.DataHandler(
data, initial_epoch=0, epochs=2, steps_per_epoch=4
)
self.assertTrue(data_handler._adapter.should_recreate_iterator())
returned_data = []
for _, iterator in data_handler.enumerate_epochs():
epoch_data = []
for _ in data_handler.steps():
epoch_data.append(next(iterator).numpy())
returned_data.append(epoch_data)
self.assertEqual(returned_data, [[0, 1, 2, 3], [0, 1, 2, 3]])
def test_infinite_dataset_with_steps_per_epoch(self):
data = tf.data.Dataset.from_tensor_slices([0, 1, 2]).batch(1).repeat()
data_handler = data_adapter.DataHandler(
data, initial_epoch=0, epochs=2, steps_per_epoch=3
)
returned_data = []
for _, iterator in data_handler.enumerate_epochs():
epoch_data = []
for _ in data_handler.steps():
epoch_data.append(next(iterator).numpy())
returned_data.append(epoch_data)
self.assertEqual(returned_data, [[0, 1, 2], [0, 1, 2]])
def test_unknown_cardinality_dataset_with_steps_per_epoch(self):
ds = tf.data.Dataset.from_tensor_slices([0, 1, 2, 3, 4, 5, 6])
filtered_ds = ds.filter(lambda x: x < 4)
self.assertEqual(
tf.data.experimental.cardinality(filtered_ds).numpy(),
tf.data.experimental.UNKNOWN_CARDINALITY,
)
# User can choose to only partially consume `Dataset`.
data_handler = data_adapter.DataHandler(
filtered_ds, initial_epoch=0, epochs=2, steps_per_epoch=2
)
self.assertFalse(data_handler._adapter.should_recreate_iterator())
returned_data = []
for _, iterator in data_handler.enumerate_epochs():
epoch_data = []
for _ in data_handler.steps():
epoch_data.append(next(iterator))
returned_data.append(epoch_data)
returned_data = self.evaluate(returned_data)
self.assertEqual(returned_data, [[0, 1], [2, 3]])
self.assertEqual(data_handler.inferred_steps, 2)
def test_unknown_cardinality_dataset_without_steps_per_epoch(self):
ds = tf.data.Dataset.from_tensor_slices([0, 1, 2, 3, 4, 5, 6])
filtered_ds = ds.filter(lambda x: x < 4)
self.assertEqual(
tf.data.experimental.cardinality(filtered_ds).numpy(),
tf.data.experimental.UNKNOWN_CARDINALITY,
)
data_handler = data_adapter.DataHandler(
filtered_ds, initial_epoch=0, epochs=2
)
self.assertEqual(data_handler.inferred_steps, None)
self.assertTrue(data_handler._adapter.should_recreate_iterator())
returned_data = []
for _, iterator in data_handler.enumerate_epochs():
epoch_data = []
with data_handler.catch_stop_iteration():
for _ in data_handler.steps():
epoch_data.append(next(iterator))
returned_data.append(epoch_data)
returned_data = self.evaluate(returned_data)
self.assertEqual(returned_data, [[0, 1, 2, 3], [0, 1, 2, 3]])
self.assertEqual(data_handler.inferred_steps, 4)
def test_insufficient_data(self):
ds = tf.data.Dataset.from_tensor_slices([0, 1])
ds = ds.filter(lambda *args, **kwargs: True)
data_handler = data_adapter.DataHandler(
ds, initial_epoch=0, epochs=2, steps_per_epoch=3
)
returned_data = []
for _, iterator in data_handler.enumerate_epochs():
epoch_data = []
for _ in data_handler.steps():
with data_handler.catch_stop_iteration():
epoch_data.append(next(iterator))
returned_data.append(epoch_data)
returned_data = self.evaluate(returned_data)
self.assertTrue(data_handler._insufficient_data)
self.assertEqual(returned_data, [[0, 1]])
def test_numpy(self):
x = np.array([0, 1, 2])
y = np.array([0, 2, 4])
sw = np.array([0, 4, 8])
data_handler = data_adapter.DataHandler(
x=x, y=y, sample_weight=sw, batch_size=1, epochs=2
)
returned_data = []
for _, iterator in data_handler.enumerate_epochs():
epoch_data = []
for _ in data_handler.steps():
epoch_data.append(next(iterator))
returned_data.append(epoch_data)
returned_data = self.evaluate(returned_data)
self.assertEqual(
returned_data,
[
[(0, 0, 0), (1, 2, 4), (2, 4, 8)],
[(0, 0, 0), (1, 2, 4), (2, 4, 8)],
],
)
def test_generator(self):
def generator():
for _ in range(2):
for step in range(3):
yield (tf.convert_to_tensor([step]),)
data_handler = data_adapter.DataHandler(
generator(), epochs=2, steps_per_epoch=3
)
returned_data = []
for _, iterator in data_handler.enumerate_epochs():
epoch_data = []
for _ in data_handler.steps():
epoch_data.append(next(iterator))
returned_data.append(epoch_data)
returned_data = self.evaluate(returned_data)
self.assertEqual(
returned_data, [[([0],), ([1],), ([2],)], [([0],), ([1],), ([2],)]]
)
def test_composite_tensor(self):
st = tf.SparseTensor(
indices=[[0, 0], [1, 0], [2, 0]],
values=[0, 1, 2],
dense_shape=[3, 1],
)
data_handler = data_adapter.DataHandler(st, epochs=2, steps_per_epoch=3)
returned_data = []
for _, iterator in data_handler.enumerate_epochs():
epoch_data = []
for _ in data_handler.steps():
epoch_data.append(next(iterator))
returned_data.append(epoch_data)
returned_data = self.evaluate(
tf.nest.map_structure(tf.sparse.to_dense, returned_data)
)
self.assertEqual(
returned_data, [[([0],), ([1],), ([2],)], [([0],), ([1],), ([2],)]]
)
def test_iterator(self):
def generator():
for _ in range(2):
for step in range(3):
yield (tf.convert_to_tensor([step]),)
it = iter(
tf.data.Dataset.from_generator(generator, output_types=("float32",))
)
data_handler = data_adapter.DataHandler(it, epochs=2, steps_per_epoch=3)
returned_data = []
for _, iterator in data_handler.enumerate_epochs():
epoch_data = []
for _ in data_handler.steps():
epoch_data.append(next(iterator))
returned_data.append(epoch_data)
returned_data = self.evaluate(returned_data)
self.assertEqual(
returned_data, [[([0],), ([1],), ([2],)], [([0],), ([1],), ([2],)]]
)
def test_list_of_scalars(self):
data_handler = data_adapter.DataHandler(
[[0], [1], [2]], epochs=2, steps_per_epoch=3
)
returned_data = []
for _, iterator in data_handler.enumerate_epochs():
epoch_data = []
for _ in data_handler.steps():
epoch_data.append(next(iterator))
returned_data.append(epoch_data)
returned_data = self.evaluate(returned_data)
self.assertEqual(
returned_data, [[([0],), ([1],), ([2],)], [([0],), ([1],), ([2],)]]
)
def test_class_weight_user_errors(self):
with self.assertRaisesRegex(ValueError, "to be a dict with keys"):
data_adapter.DataHandler(
x=[[0], [1], [2]],
y=[[2], [1], [0]],
batch_size=1,
sample_weight=[[1.0], [2.0], [4.0]],
class_weight={0: 0.5, 1: 1.0, 3: 1.5}, # Skips class `2`.
)
with self.assertRaisesRegex(ValueError, "with a single output"):
data_adapter.DataHandler(
x=np.ones((10, 1)),
y=[np.ones((10, 1)), np.zeros((10, 1))],
batch_size=2,
class_weight={0: 0.5, 1: 1.0, 2: 1.5},
)
@parameterized.named_parameters(("one_hot", True), ("sparse", False))
def test_class_weights_applied(self, one_hot):
num_channels = 3
num_classes = 5
batch_size = 2
image_width = 8
input_shape = (batch_size, image_width, image_width, num_channels)
output_shape = (batch_size, image_width, image_width)
x = tf.random.uniform(input_shape)
sparse_y = tf.random.uniform(
output_shape, maxval=num_classes, dtype=tf.int32
)
if one_hot:
y = tf.one_hot(sparse_y, num_classes)
else:
y = tf.expand_dims(sparse_y, axis=-1)
# Class weight is equal to class number + 1
class_weight = dict([(x, x + 1) for x in range(num_classes)])
sample_weight = np.array([1, 2])
data_handler = data_adapter.DataHandler(
x=x,
y=y,
class_weight=class_weight,
sample_weight=sample_weight,
batch_size=batch_size,
epochs=1,
)
returned_data = []
for _, iterator in data_handler.enumerate_epochs():
epoch_data = []
for _ in data_handler.steps():
epoch_data.append(next(iterator))
returned_data.append(epoch_data)
returned_data = self.evaluate(returned_data)
# We had only 1 batch and 1 epoch, so we extract x, y, sample_weight
result_x, result_y, result_sample_weight = returned_data[0][0]
self.assertAllEqual(x, result_x)
self.assertAllEqual(y, result_y)
# Because class weight = class + 1, resulting class weight = y + 1
# Sample weight is 1 for the first sample, 2 for the second,
# so we double the expected sample weight for the second sample.
self.assertAllEqual(sparse_y[0] + 1, result_sample_weight[0])
self.assertAllEqual(2 * (sparse_y[1] + 1), result_sample_weight[1])
@parameterized.named_parameters(("numpy", True), ("dataset", False))
def test_single_x_input_no_tuple_wrapping(self, use_numpy):
x = np.ones((10, 1))
if use_numpy:
batch_size = 2
else:
x = tf.data.Dataset.from_tensor_slices(x).batch(2)
batch_size = None
data_handler = data_adapter.DataHandler(x, batch_size=batch_size)
for _, iterator in data_handler.enumerate_epochs():
for _ in data_handler.steps():
# Check that single x input is not wrapped in a tuple.
self.assertIsInstance(next(iterator), tf.Tensor)
def test_error_if_zero_steps_per_epoch(self):
data = tf.data.Dataset.from_tensor_slices([0, 1, 2, 3]).batch(1)
with self.assertRaisesRegex(
ValueError,
"steps_per_epoch must be positive, None or -1. Received 0.",
):
data_adapter.DataHandler(
data, initial_epoch=0, epochs=2, steps_per_epoch=0
)
def test_error_if_empty_array_input_data(self):
x = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y = np.array([0, 1, 1, 0])
idx = []
with self.assertRaisesWithLiteralMatch(
ValueError,
"Expected input data to be non-empty.",
):
data_adapter.DataHandler(x[idx], y[idx])
def test_error_if_empty_dataset_input_data(self):
data = tf.data.Dataset.from_tensor_slices([]).batch(1)
with self.assertRaisesWithLiteralMatch(
ValueError,
"Expected input data to be non-empty.",
):
data_adapter.DataHandler(data)
class TestValidationSplit(test_combinations.TestCase):
@parameterized.named_parameters(("numpy_arrays", True), ("tensors", False))
def test_validation_split_unshuffled(self, use_numpy):
if use_numpy:
x = np.array([0, 1, 2, 3, 4])
y = np.array([0, 2, 4, 6, 8])
sw = np.array([0, 4, 8, 12, 16])
else:
x = tf.convert_to_tensor([0, 1, 2, 3, 4])
y = tf.convert_to_tensor([0, 2, 4, 6, 8])
sw = tf.convert_to_tensor([0, 4, 8, 12, 16])
(train_x, train_y, train_sw), (
val_x,
val_y,
val_sw,
) = data_adapter.train_validation_split(
(x, y, sw), validation_split=0.2
)
if use_numpy:
train_x = tf.convert_to_tensor(train_x)
train_y = tf.convert_to_tensor(train_y)
train_sw = tf.convert_to_tensor(train_sw)
val_x = tf.convert_to_tensor(val_x)
val_y = tf.convert_to_tensor(val_y)
val_sw = tf.convert_to_tensor(val_sw)
self.assertEqual(train_x.numpy().tolist(), [0, 1, 2, 3])
self.assertEqual(train_y.numpy().tolist(), [0, 2, 4, 6])
self.assertEqual(train_sw.numpy().tolist(), [0, 4, 8, 12])
self.assertEqual(val_x.numpy().tolist(), [4])
self.assertEqual(val_y.numpy().tolist(), [8])
self.assertEqual(val_sw.numpy().tolist(), [16])
def test_validation_split_user_error(self):
with self.assertRaisesRegex(
ValueError, "is only supported for Tensors"
):
data_adapter.train_validation_split(
lambda: np.ones((10, 1)), validation_split=0.2
)
def test_validation_split_examples_too_few(self):
with self.assertRaisesRegex(ValueError, "not sufficient to split it"):
data_adapter.train_validation_split(
np.ones((1, 10)), validation_split=0.2
)
def test_validation_split_none(self):
train_sw, val_sw = data_adapter.train_validation_split(
None, validation_split=0.2
)
self.assertIsNone(train_sw)
self.assertIsNone(val_sw)
(_, train_sw), (_, val_sw) = data_adapter.train_validation_split(
(np.ones((10, 1)), None), validation_split=0.2
)
self.assertIsNone(train_sw)
self.assertIsNone(val_sw)
class ListsOfScalarsDataAdapterTest(DataAdapterTestBase):
def setUp(self):
super().setUp()
self.adapter_cls = data_adapter.ListsOfScalarsDataAdapter
def test_can_list_inputs(self):
self.assertTrue(self.adapter_cls.can_handle(self.text_input))
self.assertTrue(self.adapter_cls.can_handle(self.bytes_input))
self.assertFalse(self.adapter_cls.can_handle(self.numpy_input))
self.assertFalse(self.adapter_cls.can_handle(self.tensor_input))
self.assertFalse(self.adapter_cls.can_handle(self.dataset_input))
self.assertFalse(self.adapter_cls.can_handle(self.generator_input))
self.assertFalse(self.adapter_cls.can_handle(self.sequence_input))
self.assertFalse(self.adapter_cls.can_handle([]))
class TestDataAdapterUtils(DataAdapterTestBase):
def test_unpack_x_y_sample_weight_with_tuple_and_list(self):
tuple_version = data_adapter.unpack_x_y_sample_weight(
(self.tensor_input, self.tensor_target)
)
list_version = data_adapter.unpack_x_y_sample_weight(
[self.tensor_input, self.tensor_target]
)
self.assertEqual(tuple_version, list_version)
def test_unpack_pack_dict(self):
# A dictionary can be unambiguously represented without a tuple.
x = {"key": self.tensor_input}
packed_x = data_adapter.pack_x_y_sample_weight(x)
self.assertEqual(packed_x, x)
unpacked_x, _, _ = data_adapter.unpack_x_y_sample_weight(x)
self.assertEqual(unpacked_x, x)
if __name__ == "__main__":
tf.compat.v1.enable_eager_execution()
tf.test.main()
| tf-keras/tf_keras/engine/data_adapter_test.py/0 | {
"file_path": "tf-keras/tf_keras/engine/data_adapter_test.py",
"repo_id": "tf-keras",
"token_count": 28539
} | 211 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""RaggedKerasTensor tests."""
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
from tf_keras import layers
from tf_keras.engine import training
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
@test_utils.run_v2_only
class RaggedKerasTensorTest(test_combinations.TestCase):
@parameterized.parameters(
{"batch_size": None, "shape": (None, 5), "ragged_rank": 1},
{"batch_size": None, "shape": (None, 3, 5), "ragged_rank": 1},
{"batch_size": None, "shape": (5, None), "ragged_rank": 2},
{"batch_size": None, "shape": (3, 5, None), "ragged_rank": 3},
{"batch_size": None, "shape": (None, 3, 5, None), "ragged_rank": 4},
{
"batch_size": None,
"shape": (2, 3, None, 4, 5, None),
"ragged_rank": 6,
},
{"batch_size": 8, "shape": (None, 5), "ragged_rank": 1},
{"batch_size": 9, "shape": (None, 3, 5), "ragged_rank": 1},
{"batch_size": 1, "shape": (5, None), "ragged_rank": 2},
{"batch_size": 4, "shape": (3, 5, None), "ragged_rank": 3},
{"batch_size": 7, "shape": (None, 3, 5, None), "ragged_rank": 4},
{"batch_size": 12, "shape": (2, 3, None, 4, 5, None), "ragged_rank": 6},
)
def test_to_placeholder(self, shape, batch_size, ragged_rank):
inp = layers.Input(shape=shape, batch_size=batch_size, ragged=True)
self.assertEqual(inp.ragged_rank, ragged_rank)
self.assertAllEqual(inp.shape, [batch_size] + list(shape))
with tf.__internal__.FuncGraph("test").as_default():
placeholder = inp._to_placeholder()
self.assertEqual(placeholder.ragged_rank, ragged_rank)
self.assertAllEqual(placeholder.shape, [batch_size] + list(shape))
def test_add(self):
inp = layers.Input(shape=[None], ragged=True)
out = inp + inp
model = training.Model(inp, out)
x = tf.ragged.constant([[3, 4], [1, 2], [3, 5]])
self.assertAllEqual(model(x), x + x)
def test_mul(self):
inp = layers.Input(shape=[None], ragged=True)
out = inp * inp
model = training.Model(inp, out)
x = tf.ragged.constant([[3, 4], [1, 2], [3, 5]])
self.assertAllEqual(model(x), x * x)
def test_sub(self):
inp = layers.Input(shape=[None], ragged=True)
out = inp - inp
model = training.Model(inp, out)
x = tf.ragged.constant([[3, 4], [1, 2], [3, 5]])
self.assertAllEqual(model(x), x - x)
def test_div(self):
inp = layers.Input(shape=[None], ragged=True)
out = inp / inp
model = training.Model(inp, out)
x = tf.ragged.constant([[3, 4], [1, 2], [3, 5]])
self.assertAllEqual(model(x), x / x)
def test_getitem(self):
# Test slicing / getitem
inp = layers.Input(shape=(None, 2), ragged=True)
out = inp[:, :2]
model = training.Model(inp, out)
x = tf.RaggedTensor.from_row_lengths(
tf.cast(np.random.randn(6, 2), dtype=tf.float32), [3, 1, 2]
)
expected = x[:, :2]
self.assertAllEqual(model(x), expected)
# Test that models w/ slicing are correctly serialized/deserialized
config = model.get_config()
model = training.Model.from_config(config)
self.assertAllEqual(model(x), expected)
@parameterized.parameters(
{"property_name": "values"},
{"property_name": "flat_values"},
{"property_name": "row_splits"},
{"property_name": "nested_row_splits"},
)
def test_instance_property(self, property_name):
inp = layers.Input(shape=[None], ragged=True)
out = getattr(inp, property_name)
model = training.Model(inp, out)
x = tf.ragged.constant([[3, 4], [1, 2], [3, 5]])
expected_property = getattr(x, property_name)
self.assertAllEqual(model(x), expected_property)
# Test that it works with serialization and deserialization as well
model_config = model.get_config()
model2 = training.Model.from_config(model_config)
self.assertAllEqual(model2(x), expected_property)
@parameterized.parameters(
{"name": "value_rowids"},
{"name": "nested_value_rowids"},
{"name": "nrows"},
{"name": "row_starts"},
{"name": "row_limits"},
{"name": "row_lengths"},
{"name": "nested_row_lengths"},
{"name": "bounding_shape"},
{"name": "with_values", "args": [[1, 2, 3, 4, 5, 6]]},
{
"name": "with_flat_values",
"kwargs": {"new_values": [1, 2, 3, 4, 5, 6]},
},
{"name": "with_row_splits_dtype", "kwargs": {"dtype": tf.int32}},
{"name": "merge_dims", "args": [0], "kwargs": {"inner_axis": 1}},
{"name": "to_tensor"},
{"name": "to_sparse"},
)
def test_instance_method(self, name, args=None, kwargs=None):
if not args:
args = []
if not kwargs:
kwargs = {}
inp = layers.Input(shape=[None], ragged=True)
out = getattr(inp, name)(*args, **kwargs)
model = training.Model(inp, out)
x = tf.ragged.constant([[3, 4], [1, 2], [3, 5]])
expected_property = getattr(x, name)(*args, **kwargs)
# We expand composites before checking equality because
# assertAllEqual otherwise wouldn't work for SparseTensor outputs
for a, b in zip(
tf.nest.flatten(model(x), expand_composites=True),
tf.nest.flatten(expected_property, expand_composites=True),
):
self.assertAllEqual(a, b)
# Test that the model can serialize and deserialize as well
model_config = model.get_config()
model2 = training.Model.from_config(model_config)
for a, b in zip(
tf.nest.flatten(model2(x), expand_composites=True),
tf.nest.flatten(expected_property, expand_composites=True),
):
self.assertAllEqual(a, b)
@test_utils.run_v2_only
class RaggedTensorClassMethodAsLayerTest(test_combinations.TestCase):
def test_from_value_rowids(self):
inp = layers.Input(shape=[None])
out = tf.RaggedTensor.from_value_rowids(
inp, value_rowids=[0, 0, 0, 0, 2, 2, 2, 3], nrows=5
)
model = training.Model(inp, out)
x = tf.constant([3, 1, 4, 1, 5, 9, 2, 6])
expected = tf.RaggedTensor.from_value_rowids(
x, value_rowids=[0, 0, 0, 0, 2, 2, 2, 3], nrows=5
)
self.assertAllEqual(model(x), expected)
# Test that the model can serialize and deserialize as well
model_config = model.get_config()
model2 = training.Model.from_config(model_config)
self.assertAllEqual(model2(x), expected)
def test_from_row_splits(self):
inp = layers.Input(shape=[None])
out = tf.RaggedTensor.from_row_splits(
inp, row_splits=[0, 4, 4, 7, 8, 8]
)
model = training.Model(inp, out)
x = tf.constant([3, 1, 4, 1, 5, 9, 2, 6])
expected = tf.RaggedTensor.from_row_splits(
x, row_splits=[0, 4, 4, 7, 8, 8]
)
self.assertAllEqual(model(x), expected)
# Test that the model can serialize and deserialize as well
model_config = model.get_config()
model2 = training.Model.from_config(model_config)
self.assertAllEqual(model2(x), expected)
def test_from_row_lengths(self):
inp = layers.Input(shape=[None])
out = tf.RaggedTensor.from_row_lengths(inp, row_lengths=[4, 0, 3, 1, 0])
model = training.Model(inp, out)
x = tf.constant([3, 1, 4, 1, 5, 9, 2, 6])
expected = tf.RaggedTensor.from_row_lengths(
x, row_lengths=[4, 0, 3, 1, 0]
)
self.assertAllEqual(model(x), expected)
# Test that the model can serialize and deserialize as well
model_config = model.get_config()
model2 = training.Model.from_config(model_config)
self.assertAllEqual(model2(x), expected)
def test_from_row_starts(self):
inp = layers.Input(shape=[None])
out = tf.RaggedTensor.from_row_starts(inp, row_starts=[0, 4, 4, 7, 8])
model = training.Model(inp, out)
x = tf.constant([3, 1, 4, 1, 5, 9, 2, 6])
expected = tf.RaggedTensor.from_row_starts(
x, row_starts=[0, 4, 4, 7, 8]
)
self.assertAllEqual(model(x), expected)
# Test that the model can serialize and deserialize as well
model_config = model.get_config()
model2 = training.Model.from_config(model_config)
self.assertAllEqual(model2(x), expected)
def test_from_row_limits(self):
row_limits = tf.constant([2, 2, 5, 6, 7], tf.int64)
inp = layers.Input(shape=[None], dtype=tf.string)
out = tf.RaggedTensor.from_row_limits(inp, row_limits, validate=False)
model = training.Model(inp, out)
x = tf.constant(["a", "b", "c", "d", "e", "f", "g"])
expected = tf.RaggedTensor.from_row_limits(
x, row_limits, validate=False
)
self.assertAllEqual(model(x), expected)
# Test that the model can serialize and deserialize as well
model_config = model.get_config()
model2 = training.Model.from_config(model_config)
self.assertAllEqual(model2(x), expected)
def test_from_uniform_row_length(self):
inp = layers.Input(shape=[None])
out = tf.RaggedTensor.from_uniform_row_length(inp, 2, 8)
model = training.Model(inp, out)
x = tf.constant([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16])
expected = tf.RaggedTensor.from_uniform_row_length(x, 2, 8)
self.assertAllEqual(model(x), expected)
# Test that the model can serialize and deserialize as well
model_config = model.get_config()
model2 = training.Model.from_config(model_config)
self.assertAllEqual(model2(x), expected)
def test_from_nested_value_row_ids(self):
nested_value_rowids = [
tf.constant([0, 0, 1, 3, 3], tf.int64),
tf.constant([0, 0, 2, 2, 2, 3, 4], tf.int64),
]
inp = layers.Input(shape=[None], dtype=tf.string)
out = tf.RaggedTensor.from_nested_value_rowids(inp, nested_value_rowids)
model = training.Model(inp, out)
x = tf.constant(["a", "b", "c", "d", "e", "f", "g"])
expected = tf.RaggedTensor.from_nested_value_rowids(
x, nested_value_rowids
)
self.assertAllEqual(model(x), expected)
# Test that the model can serialize and deserialize as well
model_config = model.get_config()
model2 = training.Model.from_config(model_config)
self.assertAllEqual(model2(x), expected)
def test_from_nested_row_splits(self):
nested_row_splits = [
tf.constant([0, 2, 3, 3, 5], tf.int64),
tf.constant([0, 2, 2, 5, 6, 7], tf.int64),
]
inp = layers.Input(shape=[None], dtype=tf.string)
out = tf.RaggedTensor.from_nested_row_splits(inp, nested_row_splits)
model = training.Model(inp, out)
x = tf.constant(["a", "b", "c", "d", "e", "f", "g"])
expected = tf.RaggedTensor.from_nested_row_splits(x, nested_row_splits)
self.assertAllEqual(model(x), expected)
# Test that the model can serialize and deserialize as well
model_config = model.get_config()
model2 = training.Model.from_config(model_config)
self.assertAllEqual(model2(x), expected)
def test_from_nested_row_lengths(self):
nested_row_lengths = [
tf.constant([2, 1, 0, 2], tf.int64),
tf.constant([2, 0, 3, 1, 1], tf.int64),
]
inp = layers.Input(shape=[None], dtype=tf.string)
out = tf.RaggedTensor.from_nested_row_lengths(inp, nested_row_lengths)
model = training.Model(inp, out)
x = tf.constant(["a", "b", "c", "d", "e", "f", "g"])
expected = tf.RaggedTensor.from_nested_row_lengths(
x, nested_row_lengths
)
self.assertAllEqual(model(x), expected)
# Test that the model can serialize and deserialize as well
model_config = model.get_config()
model2 = training.Model.from_config(model_config)
self.assertAllEqual(model2(x), expected)
def test_from_tensor(self):
inp = layers.Input(shape=[None], ragged=False)
out = tf.RaggedTensor.from_tensor(inp)
model = training.Model(inp, out)
x = tf.constant([[3.0, 4.0], [1.0, 2.0], [3.0, 5.0]])
expected = tf.RaggedTensor.from_tensor(x)
self.assertAllEqual(model(x), expected)
# Test that the model can serialize and deserialize as well
model_config = model.get_config()
model2 = training.Model.from_config(model_config)
self.assertAllEqual(model2(x), expected)
def test_from_sparse(self):
inp = layers.Input(shape=[None], sparse=True, dtype=tf.string)
out = tf.RaggedTensor.from_sparse(inp)
model = training.Model(inp, out)
indices = [[0, 0], [1, 0], [1, 1], [2, 0]]
values = [b"a", b"b", b"c", b"d"]
shape = [4, 5]
sp_value = tf.SparseTensor(indices, values, shape)
expected = tf.RaggedTensor.from_sparse(sp_value)
self.assertAllEqual(model(sp_value), expected)
# Test that the model can serialize and deserialize as well
model_config = model.get_config()
model2 = training.Model.from_config(model_config)
self.assertAllEqual(model2(sp_value), expected)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/engine/ragged_keras_tensor_test.py/0 | {
"file_path": "tf-keras/tf_keras/engine/ragged_keras_tensor_test.py",
"repo_id": "tf-keras",
"token_count": 6632
} | 212 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training-related utilities."""
import numpy as np
import tensorflow.compat.v2 as tf
from tf_keras.utils import generic_utils
def slice_arrays(arrays, indices, contiguous=True):
"""Slices batches out of provided arrays (workaround for eager tensors).
Unfortunately eager tensors don't have the same slicing behavior as
Numpy arrays (they follow the same slicing behavior as symbolic TF tensors),
hence we cannot use `generic_utils.slice_arrays` directly
and we have to implement this workaround based on `concat`. This has a
performance cost.
Args:
arrays: Single array or list of arrays.
indices: List of indices in the array that should be included in the
output batch.
contiguous: Boolean flag indicating whether the indices are contiguous.
Returns:
Slice of data (either single array or list of arrays).
"""
converted_to_list = False
if not isinstance(arrays, list):
converted_to_list = True
arrays = [arrays]
if any(tf.is_tensor(x) for x in arrays):
if not contiguous:
entries = [[x[i : i + 1] for i in indices] for x in arrays]
slices = [tf.concat(x, axis=0) for x in entries]
else:
slices = [x[indices[0] : indices[-1] + 1] for x in arrays]
else:
slices = generic_utils.slice_arrays(arrays, indices)
if converted_to_list:
slices = slices[0]
return slices
def handle_partial_sample_weights(
outputs, sample_weights, sample_weight_modes, check_all_flat=False
):
"""Adds 1.0 as sample weights for the outputs for which there is no weight.
Args:
outputs: List of model outputs.
sample_weights: List of sample weight inputs.
sample_weight_modes: List of sample weight modes or None.
check_all_flat: Ensure that inputs are not nested structures. This is not
a free check, so we may not want to run it eagerly every iteration.
Returns:
Tuple of sample weights, one sample weight for every output, and booleans
describing the raw sample weights.
"""
if not isinstance(sample_weights, (list, tuple)):
any_sample_weight = sample_weights is not None
partial_sample_weight = any_sample_weight and sample_weights is None
else:
any_sample_weight = sample_weights is not None and any(
w is not None for w in sample_weights
)
partial_sample_weight = any_sample_weight and any(
w is None for w in sample_weights
)
if not any_sample_weight:
return None, any_sample_weight, partial_sample_weight
if not partial_sample_weight:
return sample_weights, any_sample_weight, partial_sample_weight
if check_all_flat:
tf.nest.assert_same_structure(
list_to_tuple(sample_weights),
list_to_tuple(tf.nest.flatten(sample_weights)),
)
tf.nest.assert_same_structure(
list_to_tuple(outputs), list_to_tuple(tf.nest.flatten(outputs))
)
if sample_weight_modes is not None:
tf.nest.assert_same_structure(
sample_weight_modes, tf.nest.flatten(sample_weight_modes)
)
new_sample_weights = []
for i, sw in enumerate(sample_weights):
if sw is None:
as_numpy = isinstance(outputs[i], np.ndarray)
output = outputs[i]
output_shape = output.shape if as_numpy else tf.shape(output)
is_temporal = (
sample_weight_modes is not None
and sample_weight_modes[i] == "temporal"
)
sw_shape = (
(output_shape[0], output_shape[1])
if is_temporal
else (output_shape[0],)
)
new_sample_weights.append(
np.ones(sw_shape) if as_numpy else tf.ones(sw_shape)
)
else:
new_sample_weights.append(sw)
return (
list_to_tuple(new_sample_weights),
any_sample_weight,
partial_sample_weight,
)
class RespectCompiledTrainableState:
"""Set and restore trainable state if it has changed since compile.
The keras API guarantees that the value of each Layer's `trainable` property
at `Model.compile` time will be used when training that model. In order to
respect this requirement, it may be necessary to set the trainable value of
layers to their compile time values before beginning a training endpoint and
restore the values before returning from said endpoint. This scope checks if
any layer's trainable state has changed since Model compile, and performs
this set and un-set bookkeeping.
However, the trainable state of a layer changes quite infrequently, if ever,
for many kinds of workflows. Moreover, updating every layer in a model is an
expensive operation. As a result, we will only explicitly set and unset the
trainable state of a model if a trainable value has changed since compile.
"""
def __init__(self, model):
self._model = model
self._current_trainable_state = None
self._compiled_trainable_state = None
self._should_set_trainable = False
def __enter__(self):
self._current_trainable_state = self._model._get_trainable_state()
self._compiled_trainable_state = self._model._compiled_trainable_state
# Check to see if any layer's trainable state has changed since
# `compile`.
for layer, trainable in self._compiled_trainable_state.items():
if (
layer in self._current_trainable_state
and trainable != self._current_trainable_state[layer]
):
self._should_set_trainable = True
break
# If so, restore the model to its compiled state.
if self._should_set_trainable:
self._model._set_trainable_state(self._compiled_trainable_state)
def __exit__(self, type_arg, value_arg, traceback_arg):
# If we set the values to their compiled state in __enter__, we need to
# restore the original values before leaving the scope.
if self._should_set_trainable:
self._model._set_trainable_state(self._current_trainable_state)
return False # False values do not suppress exceptions
# Allow use of methods not exposed to the user.
def get_input_shape_and_dtype(layer):
"""Retrieves input shape and input dtype of layer if applicable.
Args:
layer: Layer (or model) instance.
Returns:
Tuple (input_shape, input_dtype). Both could be None if the layer
does not have a defined input shape.
Raises:
ValueError: in case an empty Sequential or Functional model is passed.
"""
def _is_graph_model(layer):
return (
hasattr(layer, "_is_graph_network") and layer._is_graph_network
) or layer.__class__.__name__ == "Sequential"
# In case of nested models: recover the first layer
# of the deepest model to infer input shape and dtype.
# Subclassed Models may not have been built so can't be checked.
while _is_graph_model(layer):
if not layer.layers:
raise ValueError("An empty Model cannot be used as a Layer.")
layer = layer.layers[0]
if getattr(layer, "_batch_input_shape", None):
return layer._batch_input_shape, layer.dtype
return None, None
def get_static_batch_size(layer):
"""Gets the static batch size of a Layer.
Args:
layer: a `Layer` instance.
Returns:
The static batch size of a Layer.
"""
batch_input_shape, _ = get_input_shape_and_dtype(layer)
if batch_input_shape is not None:
return tf.compat.v1.Dimension(batch_input_shape[0]).value
return None
def list_to_tuple(maybe_list):
"""Datasets will stack the list of tensor, so switch them to tuples."""
if isinstance(maybe_list, list):
return tuple(maybe_list)
return maybe_list
| tf-keras/tf_keras/engine/training_utils.py/0 | {
"file_path": "tf-keras/tf_keras/engine/training_utils.py",
"repo_id": "tf-keras",
"token_count": 3277
} | 213 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dense_features_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v2 as tf
from tf_keras.feature_column import dense_features_v2 as df
from tf_keras.testing_infra import test_combinations
# isort: off
from tensorflow.python.eager import backprop
def _initialized_session(config=None):
sess = tf.compat.v1.Session(config=config)
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(tf.compat.v1.tables_initializer())
return sess
class DenseFeaturesTest(test_combinations.TestCase):
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_retrieving_input(self):
features = {"a": [0.0]}
dense_features = df.DenseFeatures(tf.feature_column.numeric_column("a"))
inputs = self.evaluate(dense_features(features))
self.assertAllClose([[0.0]], inputs)
@test_combinations.generate(test_combinations.combine(mode=["eager"]))
def test_reuses_variables(self):
sparse_input = tf.SparseTensor(
indices=((0, 0), (1, 0), (2, 0)),
values=(0, 1, 2),
dense_shape=(3, 3),
)
# Create feature columns (categorical and embedding).
categorical_column = tf.feature_column.categorical_column_with_identity(
key="a", num_buckets=3
)
embedding_dimension = 2
def _embedding_column_initializer(shape, dtype, partition_info=None):
del shape # unused
del dtype # unused
del partition_info # unused
embedding_values = ((1, 0), (0, 1), (1, 1)) # id 0 # id 1 # id 2
return embedding_values
embedding_column = tf.feature_column.embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_embedding_column_initializer,
)
dense_features = df.DenseFeatures([embedding_column])
features = {"a": sparse_input}
inputs = dense_features(features)
variables = dense_features.variables
# Sanity check: test that the inputs are correct.
self.assertAllEqual([[1, 0], [0, 1], [1, 1]], inputs)
# Check that only one variable was created.
self.assertEqual(1, len(variables))
# Check that invoking dense_features on the same features does not
# create additional variables
_ = dense_features(features)
self.assertEqual(1, len(variables))
self.assertIs(variables[0], dense_features.variables[0])
@test_combinations.generate(test_combinations.combine(mode=["eager"]))
def test_feature_column_dense_features_gradient(self):
sparse_input = tf.SparseTensor(
indices=((0, 0), (1, 0), (2, 0)),
values=(0, 1, 2),
dense_shape=(3, 3),
)
# Create feature columns (categorical and embedding).
categorical_column = tf.feature_column.categorical_column_with_identity(
key="a", num_buckets=3
)
embedding_dimension = 2
def _embedding_column_initializer(shape, dtype, partition_info=None):
del shape # unused
del dtype # unused
del partition_info # unused
embedding_values = ((1, 0), (0, 1), (1, 1)) # id 0 # id 1 # id 2
return embedding_values
embedding_column = tf.feature_column.embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_embedding_column_initializer,
)
dense_features = df.DenseFeatures([embedding_column])
features = {"a": sparse_input}
def scale_matrix():
matrix = dense_features(features)
return 2 * matrix
# Sanity check: Verify that scale_matrix returns the correct output.
self.assertAllEqual([[2, 0], [0, 2], [2, 2]], scale_matrix())
# Check that the returned gradient is correct.
grad_function = backprop.implicit_grad(scale_matrix)
grads_and_vars = grad_function()
indexed_slice = grads_and_vars[0][0]
gradient = grads_and_vars[0][0].values
self.assertAllEqual([0, 1, 2], indexed_slice.indices)
self.assertAllEqual([[2, 2], [2, 2], [2, 2]], gradient)
def test_dense_feature_with_training_arg(self):
price1 = tf.feature_column.numeric_column("price1", shape=2)
price2 = tf.feature_column.numeric_column("price2")
# Monkey patch the second numeric column to simulate a column that has
# different behavior by mode.
def training_aware_get_dense_tensor(
transformation_cache, state_manager, training=None
):
return transformation_cache.get(
price2, state_manager, training=training
)
def training_aware_transform_feature(
transformation_cache, state_manager, training=None
):
input_tensor = transformation_cache.get(
price2.key, state_manager, training=training
)
if training:
return input_tensor * 10.0
else:
return input_tensor * 20.0
price2.get_dense_tensor = training_aware_get_dense_tensor
price2.transform_feature = training_aware_transform_feature
with tf.Graph().as_default():
features = {
"price1": [[1.0, 2.0], [5.0, 6.0]],
"price2": [[3.0], [4.0]],
}
train_mode = df.DenseFeatures([price1, price2])(
features, training=True
)
predict_mode = df.DenseFeatures([price1, price2])(
features, training=False
)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.compat.v1.tables_initializer())
self.assertAllClose(
[[1.0, 2.0, 30.0], [5.0, 6.0, 40.0]], self.evaluate(train_mode)
)
self.assertAllClose(
[[1.0, 2.0, 60.0], [5.0, 6.0, 80.0]],
self.evaluate(predict_mode),
)
def test_raises_if_empty_feature_columns(self):
with self.assertRaisesRegex(
ValueError, "feature_columns must not be empty"
):
df.DenseFeatures(feature_columns=[])(features={})
def test_should_be_dense_column(self):
with self.assertRaisesRegex(ValueError, "must be a .*DenseColumn"):
df.DenseFeatures(
feature_columns=[
tf.feature_column.categorical_column_with_hash_bucket(
"wire_cast", 4
)
]
)(features={"a": [[0]]})
def test_does_not_support_dict_columns(self):
with self.assertRaisesRegex(
ValueError, "Expected feature_columns to be iterable, found dict."
):
df.DenseFeatures(
feature_columns={"a": tf.feature_column.numeric_column("a")}
)(features={"a": [[0]]})
def test_bare_column(self):
with tf.Graph().as_default():
features = features = {"a": [0.0]}
net = df.DenseFeatures(tf.feature_column.numeric_column("a"))(
features
)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.compat.v1.tables_initializer())
self.assertAllClose([[0.0]], self.evaluate(net))
def test_column_generator(self):
with tf.Graph().as_default():
features = features = {"a": [0.0], "b": [1.0]}
columns = (
tf.feature_column.numeric_column(key) for key in features
)
net = df.DenseFeatures(columns)(features)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.compat.v1.tables_initializer())
self.assertAllClose([[0.0, 1.0]], self.evaluate(net))
def test_raises_if_duplicate_name(self):
with self.assertRaisesRegex(
ValueError, "Duplicate feature column name found for columns"
):
df.DenseFeatures(
feature_columns=[
tf.feature_column.numeric_column("a"),
tf.feature_column.numeric_column("a"),
]
)(features={"a": [[0]]})
def test_one_column(self):
price = tf.feature_column.numeric_column("price")
with tf.Graph().as_default():
features = {"price": [[1.0], [5.0]]}
net = df.DenseFeatures([price])(features)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.compat.v1.tables_initializer())
self.assertAllClose([[1.0], [5.0]], self.evaluate(net))
def test_multi_dimension(self):
price = tf.feature_column.numeric_column("price", shape=2)
with tf.Graph().as_default():
features = {"price": [[1.0, 2.0], [5.0, 6.0]]}
net = df.DenseFeatures([price])(features)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.compat.v1.tables_initializer())
self.assertAllClose([[1.0, 2.0], [5.0, 6.0]], self.evaluate(net))
def test_compute_output_shape(self):
price1 = tf.feature_column.numeric_column("price1", shape=2)
price2 = tf.feature_column.numeric_column("price2", shape=4)
with tf.Graph().as_default():
features = {
"price1": [[1.0, 2.0], [5.0, 6.0]],
"price2": [[3.0, 4.0, 5.0, 6.0], [7.0, 8.0, 9.0, 10.0]],
}
dense_features = df.DenseFeatures([price1, price2])
self.assertEqual(
(None, 6), dense_features.compute_output_shape((None,))
)
net = dense_features(features)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.compat.v1.tables_initializer())
self.assertAllClose(
[
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0],
[5.0, 6.0, 7.0, 8.0, 9.0, 10.0],
],
self.evaluate(net),
)
def test_raises_if_shape_mismatch(self):
price = tf.feature_column.numeric_column("price", shape=2)
with tf.Graph().as_default():
features = {"price": [[1.0], [5.0]]}
with self.assertRaisesRegex(
Exception,
r"Cannot reshape a tensor with 2 elements to shape \[2,2\]",
):
df.DenseFeatures([price])(features)
def test_reshaping(self):
price = tf.feature_column.numeric_column("price", shape=[1, 2])
with tf.Graph().as_default():
features = {"price": [[[1.0, 2.0]], [[5.0, 6.0]]]}
net = df.DenseFeatures([price])(features)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.compat.v1.tables_initializer())
self.assertAllClose([[1.0, 2.0], [5.0, 6.0]], self.evaluate(net))
def test_multi_column(self):
price1 = tf.feature_column.numeric_column("price1", shape=2)
price2 = tf.feature_column.numeric_column("price2")
with tf.Graph().as_default():
features = {
"price1": [[1.0, 2.0], [5.0, 6.0]],
"price2": [[3.0], [4.0]],
}
net = df.DenseFeatures([price1, price2])(features)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.compat.v1.tables_initializer())
self.assertAllClose(
[[1.0, 2.0, 3.0], [5.0, 6.0, 4.0]], self.evaluate(net)
)
def test_cols_to_output_tensors(self):
price1 = tf.feature_column.numeric_column("price1", shape=2)
price2 = tf.feature_column.numeric_column("price2")
with tf.Graph().as_default():
cols_dict = {}
features = {
"price1": [[1.0, 2.0], [5.0, 6.0]],
"price2": [[3.0], [4.0]],
}
dense_features = df.DenseFeatures([price1, price2])
net = dense_features(features, cols_dict)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.compat.v1.tables_initializer())
self.assertAllClose(
[[1.0, 2.0], [5.0, 6.0]], self.evaluate(cols_dict[price1])
)
self.assertAllClose(
[[3.0], [4.0]], self.evaluate(cols_dict[price2])
)
self.assertAllClose(
[[1.0, 2.0, 3.0], [5.0, 6.0, 4.0]], self.evaluate(net)
)
def test_column_order(self):
price_a = tf.feature_column.numeric_column("price_a")
price_b = tf.feature_column.numeric_column("price_b")
with tf.Graph().as_default():
features = {
"price_a": [[1.0]],
"price_b": [[3.0]],
}
net1 = df.DenseFeatures([price_a, price_b])(features)
net2 = df.DenseFeatures([price_b, price_a])(features)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.compat.v1.tables_initializer())
self.assertAllClose([[1.0, 3.0]], self.evaluate(net1))
self.assertAllClose([[1.0, 3.0]], self.evaluate(net2))
def test_fails_for_categorical_column(self):
animal = tf.feature_column.categorical_column_with_identity(
"animal", num_buckets=4
)
with tf.Graph().as_default():
features = {
"animal": tf.SparseTensor(
indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2]
)
}
with self.assertRaisesRegex(Exception, "must be a .*DenseColumn"):
df.DenseFeatures([animal])(features)
def test_static_batch_size_mismatch(self):
price1 = tf.feature_column.numeric_column("price1")
price2 = tf.feature_column.numeric_column("price2")
with tf.Graph().as_default():
features = {
"price1": [[1.0], [5.0], [7.0]], # batchsize = 3
"price2": [[3.0], [4.0]], # batchsize = 2
}
with self.assertRaisesRegex(
ValueError,
r"Batch size \(first dimension\) of each feature must be same.",
):
df.DenseFeatures([price1, price2])(features)
def test_subset_of_static_batch_size_mismatch(self):
price1 = tf.feature_column.numeric_column("price1")
price2 = tf.feature_column.numeric_column("price2")
price3 = tf.feature_column.numeric_column("price3")
with tf.Graph().as_default():
features = {
"price1": tf.compat.v1.placeholder(
dtype=tf.int64
), # batchsize = 3
"price2": [[3.0], [4.0]], # batchsize = 2
"price3": [[3.0], [4.0], [5.0]], # batchsize = 3
}
with self.assertRaisesRegex(
ValueError,
r"Batch size \(first dimension\) of each feature must be same.",
):
df.DenseFeatures([price1, price2, price3])(features)
def test_runtime_batch_size_mismatch(self):
price1 = tf.feature_column.numeric_column("price1")
price2 = tf.feature_column.numeric_column("price2")
with tf.Graph().as_default():
features = {
"price1": tf.compat.v1.placeholder(
dtype=tf.int64
), # batchsize = 3
"price2": [[3.0], [4.0]], # batchsize = 2
}
net = df.DenseFeatures([price1, price2])(features)
with _initialized_session() as sess:
with self.assertRaisesRegex(
tf.errors.OpError,
"Dimension 0 in both shapes must be equal|"
"Dimensions of inputs should match",
):
sess.run(
net,
feed_dict={features["price1"]: [[1.0], [5.0], [7.0]]},
)
def test_runtime_batch_size_matches(self):
price1 = tf.feature_column.numeric_column("price1")
price2 = tf.feature_column.numeric_column("price2")
with tf.Graph().as_default():
features = {
"price1": tf.compat.v1.placeholder(
dtype=tf.int64
), # batchsize = 2
"price2": tf.compat.v1.placeholder(
dtype=tf.int64
), # batchsize = 2
}
net = df.DenseFeatures([price1, price2])(features)
with _initialized_session() as sess:
sess.run(
net,
feed_dict={
features["price1"]: [[1.0], [5.0]],
features["price2"]: [[1.0], [5.0]],
},
)
def test_multiple_layers_with_same_embedding_column(self):
some_sparse_column = (
tf.feature_column.categorical_column_with_hash_bucket(
"sparse_feature", hash_bucket_size=5
)
)
some_embedding_column = tf.feature_column.embedding_column(
some_sparse_column, dimension=10
)
with tf.Graph().as_default():
features = {
"sparse_feature": [["a"], ["x"]],
}
all_cols = [some_embedding_column]
df.DenseFeatures(all_cols)(features)
df.DenseFeatures(all_cols)(features)
# Make sure that 2 variables get created in this case.
self.assertEqual(
2,
len(
tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES
)
),
)
expected_var_names = [
"dense_features/sparse_feature_embedding/embedding_weights:0",
"dense_features_1/sparse_feature_embedding/embedding_weights:0",
]
self.assertItemsEqual(
expected_var_names,
[
v.name
for v in tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES
)
],
)
def test_multiple_layers_with_same_shared_embedding_column(self):
categorical_column_a = (
tf.feature_column.categorical_column_with_identity(
key="aaa", num_buckets=3
)
)
categorical_column_b = (
tf.feature_column.categorical_column_with_identity(
key="bbb", num_buckets=3
)
)
embedding_dimension = 2
# feature_column.shared_embeddings is not supported in eager.
with tf.Graph().as_default():
(
embedding_column_b,
embedding_column_a,
) = tf.feature_column.shared_embeddings(
[categorical_column_b, categorical_column_a],
dimension=embedding_dimension,
)
features = {
"aaa": tf.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2),
),
"bbb": tf.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(1, 2, 1),
dense_shape=(2, 2),
),
}
all_cols = [embedding_column_a, embedding_column_b]
df.DenseFeatures(all_cols)(features)
df.DenseFeatures(all_cols)(features)
# Make sure that only 1 variable gets created in this case.
self.assertEqual(
1,
len(
tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES
)
),
)
self.assertItemsEqual(
["aaa_bbb_shared_embedding:0"],
[
v.name
for v in tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES
)
],
)
def test_multiple_layers_with_same_shared_embedding_column_diff_graphs(
self,
):
categorical_column_a = (
tf.feature_column.categorical_column_with_identity(
key="aaa", num_buckets=3
)
)
categorical_column_b = (
tf.feature_column.categorical_column_with_identity(
key="bbb", num_buckets=3
)
)
embedding_dimension = 2
# feature_column.shared_embeddings is not supported in eager.
with tf.Graph().as_default():
(
embedding_column_b,
embedding_column_a,
) = tf.feature_column.shared_embeddings(
[categorical_column_b, categorical_column_a],
dimension=embedding_dimension,
)
all_cols = [embedding_column_a, embedding_column_b]
features = {
"aaa": tf.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2),
),
"bbb": tf.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(1, 2, 1),
dense_shape=(2, 2),
),
}
df.DenseFeatures(all_cols)(features)
# Make sure that only 1 variable gets created in this case.
self.assertEqual(
1,
len(
tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES
)
),
)
with tf.Graph().as_default():
features1 = {
"aaa": tf.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2),
),
"bbb": tf.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(1, 2, 1),
dense_shape=(2, 2),
),
}
df.DenseFeatures(all_cols)(features1)
# Make sure that only 1 variable gets created in this case.
self.assertEqual(
1,
len(
tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES
)
),
)
self.assertItemsEqual(
["aaa_bbb_shared_embedding:0"],
[
v.name
for v in tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES
)
],
)
def test_with_1d_sparse_tensor(self):
embedding_values = (
(1.0, 2.0, 3.0, 4.0, 5.0), # id 0
(6.0, 7.0, 8.0, 9.0, 10.0), # id 1
(11.0, 12.0, 13.0, 14.0, 15.0), # id 2
)
def _initializer(shape, dtype, partition_info=None):
del shape, dtype, partition_info
return embedding_values
# price has 1 dimension in dense_features
price = tf.feature_column.numeric_column("price")
# one_hot_body_style has 3 dims in dense_features.
body_style = tf.feature_column.categorical_column_with_vocabulary_list(
"body-style", vocabulary_list=["hardtop", "wagon", "sedan"]
)
one_hot_body_style = tf.feature_column.indicator_column(body_style)
# embedded_body_style has 5 dims in dense_features.
country = tf.feature_column.categorical_column_with_vocabulary_list(
"country", vocabulary_list=["US", "JP", "CA"]
)
embedded_country = tf.feature_column.embedding_column(
country, dimension=5, initializer=_initializer
)
with tf.Graph().as_default():
# Provides 1-dim tensor and dense tensor.
features = {
"price": tf.constant(
[
11.0,
12.0,
]
),
"body-style": tf.SparseTensor(
indices=((0,), (1,)),
values=("sedan", "hardtop"),
dense_shape=(2,),
),
# This is dense tensor for the categorical_column.
"country": tf.constant(["CA", "US"]),
}
self.assertEqual(1, features["price"].shape.ndims)
self.assertEqual(
1, features["body-style"].dense_shape.get_shape()[0]
)
self.assertEqual(1, features["country"].shape.ndims)
net = df.DenseFeatures(
[price, one_hot_body_style, embedded_country]
)(features)
self.assertEqual(1 + 3 + 5, net.shape[1])
with _initialized_session() as sess:
# Each row is formed by concatenating `embedded_body_style`,
# `one_hot_body_style`, and `price` in order.
self.assertAllEqual(
[
[0.0, 0.0, 1.0, 11.0, 12.0, 13.0, 14.0, 15.0, 11.0],
[1.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 12.0],
],
sess.run(net),
)
def test_with_1d_unknown_shape_sparse_tensor(self):
embedding_values = (
(1.0, 2.0), # id 0
(6.0, 7.0), # id 1
(11.0, 12.0), # id 2
)
def _initializer(shape, dtype, partition_info=None):
del shape, dtype, partition_info
return embedding_values
# price has 1 dimension in dense_features
price = tf.feature_column.numeric_column("price")
# one_hot_body_style has 3 dims in dense_features.
body_style = tf.feature_column.categorical_column_with_vocabulary_list(
"body-style", vocabulary_list=["hardtop", "wagon", "sedan"]
)
one_hot_body_style = tf.feature_column.indicator_column(body_style)
# embedded_body_style has 5 dims in dense_features.
country = tf.feature_column.categorical_column_with_vocabulary_list(
"country", vocabulary_list=["US", "JP", "CA"]
)
embedded_country = tf.feature_column.embedding_column(
country, dimension=2, initializer=_initializer
)
# Provides 1-dim tensor and dense tensor.
with tf.Graph().as_default():
features = {
"price": tf.compat.v1.placeholder(tf.float32),
"body-style": tf.compat.v1.sparse_placeholder(tf.string),
# This is dense tensor for the categorical_column.
"country": tf.compat.v1.placeholder(tf.string),
}
self.assertIsNone(features["price"].shape.ndims)
self.assertIsNone(features["body-style"].get_shape().ndims)
self.assertIsNone(features["country"].shape.ndims)
price_data = np.array([11.0, 12.0])
body_style_data = tf.compat.v1.SparseTensorValue(
indices=((0,), (1,)),
values=("sedan", "hardtop"),
dense_shape=(2,),
)
country_data = np.array([["US"], ["CA"]])
net = df.DenseFeatures(
[price, one_hot_body_style, embedded_country]
)(features)
self.assertEqual(1 + 3 + 2, net.shape[1])
with _initialized_session() as sess:
# Each row is formed by concatenating `embedded_body_style`,
# `one_hot_body_style`, and `price` in order.
self.assertAllEqual(
[
[0.0, 0.0, 1.0, 1.0, 2.0, 11.0],
[1.0, 0.0, 0.0, 11.0, 12.0, 12.0],
],
sess.run(
net,
feed_dict={
features["price"]: price_data,
features["body-style"]: body_style_data,
features["country"]: country_data,
},
),
)
def test_with_rank_0_feature(self):
# price has 1 dimension in dense_features
price = tf.feature_column.numeric_column("price")
features = {
"price": tf.constant(0),
}
self.assertEqual(0, features["price"].shape.ndims)
# Static rank 0 should fail
with self.assertRaisesRegex(
ValueError, "Feature .* cannot have rank 0"
):
df.DenseFeatures([price])(features)
with tf.Graph().as_default():
# Dynamic rank 0 should fail
features = {
"price": tf.compat.v1.placeholder(tf.float32),
}
net = df.DenseFeatures([price])(features)
self.assertEqual(1, net.shape[1])
with _initialized_session() as sess:
with self.assertRaisesOpError("Feature .* cannot have rank 0"):
sess.run(net, feed_dict={features["price"]: np.array(1)})
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/feature_column/dense_features_v2_test.py/0 | {
"file_path": "tf-keras/tf_keras/feature_column/dense_features_v2_test.py",
"repo_id": "tf-keras",
"token_count": 16269
} | 214 |
"""Test Model inference and save/load with an ExtensionType."""
import os
import typing
import tensorflow.compat.v2 as tf
import tf_keras as keras
from tf_keras.engine.input_layer import Input
from tf_keras.engine.training import Model
from tf_keras.saving.saving_api import load_model
from tf_keras.testing_infra import test_utils
class MaskedTensor(tf.experimental.BatchableExtensionType):
"""Example subclass of ExtensionType, used for testing.
This version adds TF-Keras required properties to MaskedTensor and its Spec
class, to test TF-Keras integration.
"""
__name__ = "tf.test.MaskedTensor.Spec"
values: typing.Union[tf.Tensor, tf.RaggedTensor]
mask: typing.Union[tf.Tensor, tf.RaggedTensor]
def __init__(self, values, mask):
if isinstance(values, tf.RaggedTensor):
assert isinstance(mask, tf.RaggedTensor)
assert mask.dtype == tf.dtypes.bool
else:
values = tf.convert_to_tensor(values)
mask = tf.convert_to_tensor(mask, tf.dtypes.bool)
self.values = values
self.mask = mask
# Required by assert_input_compatibility in keras/engine/input_spec.py
@property
def shape(self):
return self.values.shape
@property
def dtype(self):
return self.values.dtype
class Spec:
# Required by KerasTensor.shape in keras/engine/keras_tensor.py
@property
def shape(self):
return self.values._shape
class ExtensionTypeTest(tf.test.TestCase):
@test_utils.run_v2_only
def testKerasModel(self):
mt_spec = MaskedTensor.Spec(
tf.TensorSpec(shape=[None, 1], dtype=tf.dtypes.int32),
tf.TensorSpec(shape=[None, 1], dtype=tf.dtypes.bool),
)
model_input = Input(type_spec=mt_spec)
model_output = keras.layers.Lambda(
lambda x: tf.identity(x, name="output")
)(model_input)
model = Model(inputs=model_input, outputs=model_output)
mt = MaskedTensor([[1], [2], [3]], [[True], [False], [True]])
self.assertEqual(model(mt), mt)
ds = tf.data.Dataset.from_tensors(mt)
self.assertEqual(model.predict(ds), mt)
with self.subTest("keras save"):
path = self.create_tempdir().full_path
model.save(path)
loaded_model = load_model(path)
self.assertEqual(loaded_model.input.type_spec, mt_spec)
self.assertEqual(loaded_model(mt), mt)
loaded_fn = tf.saved_model.load(path)
self.assertEqual(loaded_fn(mt), mt)
with self.assertRaisesRegex(
ValueError,
"Could not find matching concrete function to call "
"loaded from the SavedModel",
):
loaded_fn(MaskedTensor([1, 2, 3], [True, False, True]))
# The serving_fn use flatten signature
serving_fn = loaded_fn.signatures["serving_default"]
self.assertEqual(
serving_fn(args_0=mt.values, args_0_1=mt.mask)["lambda"], mt
)
with self.subTest("keras v3"):
path = os.path.join(self.create_tempdir().full_path, "model.keras")
model.save(path)
loaded_model = load_model(path, safe_mode=False)
self.assertEqual(loaded_model.input.type_spec, mt_spec)
self.assertEqual(loaded_model(mt), mt)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/integration_test/extension_type_test.py/0 | {
"file_path": "tf-keras/tf_keras/integration_test/extension_type_test.py",
"repo_id": "tf-keras",
"token_count": 1579
} | 215 |
"""Segmentation model.
Adapted from https://keras.io/examples/vision/oxford_pets_image_segmentation/
"""
from tensorflow import keras
from tf_keras.integration_test.models.input_spec import InputSpec
IMG_SIZE = (224, 224)
NUM_CLASSES = 5
def get_data_spec(batch_size):
return (
InputSpec((batch_size,) + IMG_SIZE + (3,)),
InputSpec((batch_size,) + IMG_SIZE + (NUM_CLASSES,)),
)
def get_input_preprocessor():
return None
def get_model(
build=False, compile=False, jit_compile=False, include_preprocessing=True
):
inputs = keras.Input(shape=IMG_SIZE + (3,))
x = keras.layers.Conv2D(32, 3, strides=2, padding="same")(inputs)
x = keras.layers.BatchNormalization()(x)
x = keras.layers.Activation("relu")(x)
previous_block_activation = x
for filters in [64, 128, 256]:
x = keras.layers.Activation("relu")(x)
x = keras.layers.SeparableConv2D(filters, 3, padding="same")(x)
x = keras.layers.BatchNormalization()(x)
x = keras.layers.Activation("relu")(x)
x = keras.layers.SeparableConv2D(filters, 3, padding="same")(x)
x = keras.layers.BatchNormalization()(x)
x = keras.layers.MaxPooling2D(3, strides=2, padding="same")(x)
residual = keras.layers.Conv2D(filters, 1, strides=2, padding="same")(
previous_block_activation
)
x = keras.layers.add([x, residual])
previous_block_activation = x
for filters in [256, 128, 64, 32]:
x = keras.layers.Activation("relu")(x)
x = keras.layers.Conv2DTranspose(filters, 3, padding="same")(x)
x = keras.layers.BatchNormalization()(x)
x = keras.layers.Activation("relu")(x)
x = keras.layers.Conv2DTranspose(filters, 3, padding="same")(x)
x = keras.layers.BatchNormalization()(x)
x = keras.layers.UpSampling2D(2)(x)
residual = keras.layers.UpSampling2D(2)(previous_block_activation)
residual = keras.layers.Conv2D(filters, 1, padding="same")(residual)
x = keras.layers.add([x, residual])
previous_block_activation = x
outputs = keras.layers.Conv2D(
NUM_CLASSES, 3, activation="softmax", padding="same"
)(x)
model = keras.Model(inputs, outputs)
if compile:
model.compile(
optimizer="rmsprop",
loss="categorical_crossentropy",
jit_compile=jit_compile,
)
return model
def get_custom_objects():
return {}
| tf-keras/tf_keras/integration_test/models/mini_unet.py/0 | {
"file_path": "tf-keras/tf_keras/integration_test/models/mini_unet.py",
"repo_id": "tf-keras",
"token_count": 1129
} | 216 |
# Description:
# Contains the TF-Keras activation layers.
# Placeholder: load unaliased py_library
load("@org_keras//tf_keras:tf_keras.bzl", "tf_py_test")
package(
# copybara:uncomment default_applicable_licenses = ["//tf_keras:license"],
default_visibility = [
"//tf_keras:friends",
],
licenses = ["notice"],
)
py_library(
name = "activation",
srcs = [
"__init__.py",
],
srcs_version = "PY3",
deps = [
":elu",
":leaky_relu",
":prelu",
":relu",
":softmax",
":thresholded_relu",
],
)
py_library(
name = "relu",
srcs = ["relu.py"],
srcs_version = "PY3",
deps = [
"//tf_keras:backend",
"//tf_keras/engine:base_layer",
"//tf_keras/utils:tf_utils",
],
)
py_library(
name = "softmax",
srcs = ["softmax.py"],
srcs_version = "PY3",
deps = [
"//:expect_tensorflow_installed",
"//tf_keras:backend",
"//tf_keras/engine:base_layer",
"//tf_keras/utils:tf_utils",
],
)
py_library(
name = "leaky_relu",
srcs = ["leaky_relu.py"],
srcs_version = "PY3",
deps = [
"//tf_keras:backend",
"//tf_keras/engine:base_layer",
"//tf_keras/utils:tf_utils",
],
)
py_library(
name = "prelu",
srcs = ["prelu.py"],
srcs_version = "PY3",
deps = [
"//tf_keras:backend",
"//tf_keras:constraints",
"//tf_keras:regularizers",
"//tf_keras/engine:base_layer",
"//tf_keras/engine:input_spec",
"//tf_keras/initializers",
"//tf_keras/utils:tf_utils",
],
)
py_library(
name = "elu",
srcs = ["elu.py"],
srcs_version = "PY3",
deps = [
"//tf_keras:backend",
"//tf_keras/engine:base_layer",
"//tf_keras/utils:tf_utils",
],
)
py_library(
name = "thresholded_relu",
srcs = ["thresholded_relu.py"],
srcs_version = "PY3",
deps = [
"//:expect_tensorflow_installed",
"//tf_keras:backend",
"//tf_keras/engine:base_layer",
"//tf_keras/utils:tf_utils",
],
)
tf_py_test(
name = "relu_test",
size = "medium",
srcs = ["relu_test.py"],
python_version = "PY3",
deps = [
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/testing_infra:test_combinations",
"//tf_keras/testing_infra:test_utils",
],
)
tf_py_test(
name = "softmax_test",
size = "medium",
srcs = ["softmax_test.py"],
python_version = "PY3",
deps = [
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/testing_infra:test_combinations",
"//tf_keras/testing_infra:test_utils",
],
)
tf_py_test(
name = "leaky_relu_test",
size = "medium",
srcs = ["leaky_relu_test.py"],
python_version = "PY3",
deps = [
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/testing_infra:test_combinations",
"//tf_keras/testing_infra:test_utils",
],
)
tf_py_test(
name = "prelu_test",
size = "medium",
srcs = ["prelu_test.py"],
python_version = "PY3",
deps = [
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/testing_infra:test_combinations",
"//tf_keras/testing_infra:test_utils",
],
)
tf_py_test(
name = "elu_test",
size = "medium",
srcs = ["elu_test.py"],
python_version = "PY3",
deps = [
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/testing_infra:test_combinations",
"//tf_keras/testing_infra:test_utils",
],
)
tf_py_test(
name = "thresholded_relu_test",
size = "medium",
srcs = ["thresholded_relu_test.py"],
python_version = "PY3",
deps = [
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/testing_infra:test_combinations",
"//tf_keras/testing_infra:test_utils",
],
)
| tf-keras/tf_keras/layers/activation/BUILD/0 | {
"file_path": "tf-keras/tf_keras/layers/activation/BUILD",
"repo_id": "tf-keras",
"token_count": 2098
} | 217 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Additive attention layer that can be used in sequence DNN/CNN models.
This file follows the terminology of https://arxiv.org/abs/1706.03762 Figure 2.
Attention is formed by three tensors: Query, Key and Value.
"""
import tensorflow.compat.v2 as tf
from tf_keras.layers.attention.base_dense_attention import BaseDenseAttention
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.layers.AdditiveAttention")
class AdditiveAttention(BaseDenseAttention):
"""Additive attention layer, a.k.a. Bahdanau-style attention.
Inputs are `query` tensor of shape `[batch_size, Tq, dim]`, `value` tensor
of shape `[batch_size, Tv, dim]` and `key` tensor of shape
`[batch_size, Tv, dim]`. The calculation follows the steps:
1. Reshape `query` and `key` into shapes `[batch_size, Tq, 1, dim]`
and `[batch_size, 1, Tv, dim]` respectively.
2. Calculate scores with shape `[batch_size, Tq, Tv]` as a non-linear
sum: `scores = tf.reduce_sum(tf.tanh(query + key), axis=-1)`
3. Use scores to calculate a distribution with shape
`[batch_size, Tq, Tv]`: `distribution = tf.nn.softmax(scores)`.
4. Use `distribution` to create a linear combination of `value` with
shape `[batch_size, Tq, dim]`:
`return tf.matmul(distribution, value)`.
Args:
use_scale: If `True`, will create a variable to scale the attention
scores.
dropout: Float between 0 and 1. Fraction of the units to drop for the
attention scores. Defaults to `0.0`.
Call arguments:
inputs: List of the following tensors:
* query: Query `Tensor` of shape `[batch_size, Tq, dim]`.
* value: Value `Tensor` of shape `[batch_size, Tv, dim]`.
* key: Optional key `Tensor` of shape `[batch_size, Tv, dim]`.
If not given, will use `value` for both `key` and `value`,
which is the most common case.
mask: List of the following tensors:
* query_mask: A boolean mask `Tensor` of shape `[batch_size, Tq]`.
If given, the output will be zero at the positions where
`mask==False`.
* value_mask: A boolean mask `Tensor` of shape `[batch_size, Tv]`.
If given, will apply the mask such that values at positions
where `mask==False` do not contribute to the result.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (no dropout).
return_attention_scores: bool, it `True`, returns the attention scores
(after masking and softmax) as an additional output argument.
use_causal_mask: Boolean. Set to `True` for decoder self-attention. Adds
a mask such that position `i` cannot attend to positions `j > i`.
This prevents the flow of information from the future towards the
past. Defaults to `False`.
Output:
Attention outputs of shape `[batch_size, Tq, dim]`.
[Optional] Attention scores after masking and softmax with shape
`[batch_size, Tq, Tv]`.
The meaning of `query`, `value` and `key` depend on the application. In the
case of text similarity, for example, `query` is the sequence embeddings of
the first piece of text and `value` is the sequence embeddings of the second
piece of text. `key` is usually the same tensor as `value`.
Here is a code example for using `AdditiveAttention` in a CNN+Attention
network:
```python
# Variable-length int sequences.
query_input = tf.keras.Input(shape=(None,), dtype='int32')
value_input = tf.keras.Input(shape=(None,), dtype='int32')
# Embedding lookup.
token_embedding = tf.keras.layers.Embedding(max_tokens, dimension)
# Query embeddings of shape [batch_size, Tq, dimension].
query_embeddings = token_embedding(query_input)
# Value embeddings of shape [batch_size, Tv, dimension].
value_embeddings = token_embedding(value_input)
# CNN layer.
cnn_layer = tf.keras.layers.Conv1D(
filters=100,
kernel_size=4,
# Use 'same' padding so outputs have the same shape as inputs.
padding='same')
# Query encoding of shape [batch_size, Tq, filters].
query_seq_encoding = cnn_layer(query_embeddings)
# Value encoding of shape [batch_size, Tv, filters].
value_seq_encoding = cnn_layer(value_embeddings)
# Query-value attention of shape [batch_size, Tq, filters].
query_value_attention_seq = tf.keras.layers.AdditiveAttention()(
[query_seq_encoding, value_seq_encoding])
# Reduce over the sequence axis to produce encodings of shape
# [batch_size, filters].
query_encoding = tf.keras.layers.GlobalAveragePooling1D()(
query_seq_encoding)
query_value_attention = tf.keras.layers.GlobalAveragePooling1D()(
query_value_attention_seq)
# Concatenate query and document encodings to produce a DNN input layer.
input_layer = tf.keras.layers.Concatenate()(
[query_encoding, query_value_attention])
# Add DNN layers, and create Model.
# ...
```
"""
def __init__(self, use_scale=True, **kwargs):
super().__init__(**kwargs)
self.use_scale = use_scale
def build(self, input_shape):
v_shape = tf.TensorShape(input_shape[1])
dim = v_shape[-1]
dim = tf.compat.dimension_value(dim)
if self.use_scale:
self.scale = self.add_weight(
name="scale",
shape=[dim],
initializer="glorot_uniform",
dtype=self.dtype,
trainable=True,
)
else:
self.scale = None
super().build(input_shape)
def _calculate_scores(self, query, key):
"""Calculates attention scores as a nonlinear sum of query and key.
Args:
query: Query tensor of shape `[batch_size, Tq, dim]`.
key: Key tensor of shape `[batch_size, Tv, dim]`.
Returns:
Tensor of shape `[batch_size, Tq, Tv]`.
"""
# Reshape tensors to enable broadcasting.
# Reshape into [batch_size, Tq, 1, dim].
q_reshaped = tf.expand_dims(query, axis=-2)
# Reshape into [batch_size, 1, Tv, dim].
k_reshaped = tf.expand_dims(key, axis=-3)
if self.use_scale:
scale = self.scale
else:
scale = 1.0
return tf.reduce_sum(scale * tf.tanh(q_reshaped + k_reshaped), axis=-1)
def get_config(self):
config = {"use_scale": self.use_scale}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| tf-keras/tf_keras/layers/attention/additive_attention.py/0 | {
"file_path": "tf-keras/tf_keras/layers/attention/additive_attention.py",
"repo_id": "tf-keras",
"token_count": 2966
} | 218 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras 2D transposed convolution layer (sometimes called deconvolution)."""
import tensorflow.compat.v2 as tf
from tf_keras import activations
from tf_keras import backend
from tf_keras import constraints
from tf_keras import initializers
from tf_keras import regularizers
from tf_keras.dtensor import utils
from tf_keras.engine.input_spec import InputSpec
from tf_keras.layers.convolutional.conv2d import Conv2D
from tf_keras.utils import conv_utils
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export(
"keras.layers.Conv2DTranspose", "keras.layers.Convolution2DTranspose"
)
class Conv2DTranspose(Conv2D):
"""Transposed convolution layer (sometimes called Deconvolution).
The need for transposed convolutions generally arises
from the desire to use a transformation going in the opposite direction
of a normal convolution, i.e., from something that has the shape of the
output of some convolution to something that has the shape of its input
while maintaining a connectivity pattern that is compatible with
said convolution.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers or `None`, does not include the sample axis),
e.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures
in `data_format="channels_last"`.
Args:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding with zeros
evenly to the left/right or up/down of the input such that output has
the same height/width dimension as the input.
output_padding: An integer or tuple/list of 2 integers,
specifying the amount of padding along the height and width
of the output tensor.
Can be a single integer to specify the same value for all
spatial dimensions.
The amount of output padding along a given dimension must be
lower than the stride along that same dimension.
If set to `None` (default), the output shape is inferred.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch_size, channels, height, width)`.
When unspecified, uses `image_data_format` value found in your Keras
config file at `~/.keras/keras.json` (if exists) else 'channels_last'.
Defaults to "channels_last".
dilation_rate: an integer, specifying the dilation rate for all spatial
dimensions for dilated convolution. Specifying different dilation rates
for different dimensions is not supported.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(see `keras.activations`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix
(see `keras.initializers`). Defaults to 'glorot_uniform'.
bias_initializer: Initializer for the bias vector
(see `keras.initializers`). Defaults to 'zeros'.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix (see `keras.regularizers`).
bias_regularizer: Regularizer function applied to the bias vector
(see `keras.regularizers`).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation") (see `keras.regularizers`).
kernel_constraint: Constraint function applied to the kernel matrix
(see `keras.constraints`).
bias_constraint: Constraint function applied to the bias vector
(see `keras.constraints`).
Input shape:
4D tensor with shape:
`(batch_size, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch_size, rows, cols, channels)` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`(batch_size, filters, new_rows, new_cols)` if
data_format='channels_first'
or 4D tensor with shape:
`(batch_size, new_rows, new_cols, filters)` if
data_format='channels_last'. `rows` and `cols` values might have changed
due to padding.
If `output_padding` is specified:
```
new_rows = ((rows - 1) * strides[0] + kernel_size[0] - 2 * padding[0] +
output_padding[0])
new_cols = ((cols - 1) * strides[1] + kernel_size[1] - 2 * padding[1] +
output_padding[1])
```
Returns:
A tensor of rank 4 representing
`activation(conv2dtranspose(inputs, kernel) + bias)`.
Raises:
ValueError: if `padding` is "causal".
ValueError: when both `strides` > 1 and `dilation_rate` > 1.
References:
- [A guide to convolution arithmetic for deep
learning](https://arxiv.org/abs/1603.07285v1)
- [Deconvolutional
Networks](https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)
"""
@utils.allow_initializer_layout
def __init__(
self,
filters,
kernel_size,
strides=(1, 1),
padding="valid",
output_padding=None,
data_format=None,
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs,
)
self.output_padding = output_padding
if self.output_padding is not None:
self.output_padding = conv_utils.normalize_tuple(
self.output_padding, 2, "output_padding", allow_zero=True
)
for stride, out_pad in zip(self.strides, self.output_padding):
if out_pad >= stride:
raise ValueError(
"Strides must be greater than output padding. "
f"Received strides={self.strides}, "
f"output_padding={self.output_padding}."
)
def build(self, input_shape):
input_shape = tf.TensorShape(input_shape)
if len(input_shape) != 4:
raise ValueError(
"Inputs should have rank 4. "
f"Received input_shape={input_shape}."
)
channel_axis = self._get_channel_axis()
if input_shape.dims[channel_axis].value is None:
raise ValueError(
"The channel dimension of the inputs "
"to `Conv2DTranspose` should be defined. "
f"The input_shape received is {input_shape}, "
f"where axis {channel_axis} (0-based) "
"is the channel dimension, which found to be `None`."
)
input_dim = int(input_shape[channel_axis])
self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim})
kernel_shape = self.kernel_size + (self.filters, input_dim)
self.kernel = self.add_weight(
name="kernel",
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype,
)
if self.use_bias:
self.bias = self.add_weight(
name="bias",
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype,
)
else:
self.bias = None
self.built = True
def call(self, inputs):
inputs_shape = tf.shape(inputs)
batch_size = inputs_shape[0]
if self.data_format == "channels_first":
h_axis, w_axis = 2, 3
else:
h_axis, w_axis = 1, 2
# Use the constant height and weight when possible.
# TODO(scottzhu): Extract this into a utility function that can be
# applied to all convolutional layers, which currently lost the static
# shape information due to tf.shape().
height, width = None, None
if inputs.shape.rank is not None:
dims = inputs.shape.as_list()
height = dims[h_axis]
width = dims[w_axis]
height = height if height is not None else inputs_shape[h_axis]
width = width if width is not None else inputs_shape[w_axis]
kernel_h, kernel_w = self.kernel_size
stride_h, stride_w = self.strides
if self.output_padding is None:
out_pad_h = out_pad_w = None
else:
out_pad_h, out_pad_w = self.output_padding
# Infer the dynamic output shape:
out_height = conv_utils.deconv_output_length(
height,
kernel_h,
padding=self.padding,
output_padding=out_pad_h,
stride=stride_h,
dilation=self.dilation_rate[0],
)
out_width = conv_utils.deconv_output_length(
width,
kernel_w,
padding=self.padding,
output_padding=out_pad_w,
stride=stride_w,
dilation=self.dilation_rate[1],
)
if self.data_format == "channels_first":
output_shape = (batch_size, self.filters, out_height, out_width)
else:
output_shape = (batch_size, out_height, out_width, self.filters)
output_shape_tensor = tf.stack(output_shape)
outputs = backend.conv2d_transpose(
inputs,
self.kernel,
output_shape_tensor,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate,
)
if not tf.executing_eagerly() and inputs.shape.rank:
# Infer the static output shape:
out_shape = self.compute_output_shape(inputs.shape)
outputs.set_shape(out_shape)
if self.use_bias:
outputs = tf.nn.bias_add(
outputs,
self.bias,
data_format=conv_utils.convert_data_format(
self.data_format, ndim=4
),
)
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
output_shape = list(input_shape)
if self.data_format == "channels_first":
c_axis, h_axis, w_axis = 1, 2, 3
else:
c_axis, h_axis, w_axis = 3, 1, 2
kernel_h, kernel_w = self.kernel_size
stride_h, stride_w = self.strides
if self.output_padding is None:
out_pad_h = out_pad_w = None
else:
out_pad_h, out_pad_w = self.output_padding
output_shape[c_axis] = self.filters
output_shape[h_axis] = conv_utils.deconv_output_length(
output_shape[h_axis],
kernel_h,
padding=self.padding,
output_padding=out_pad_h,
stride=stride_h,
dilation=self.dilation_rate[0],
)
output_shape[w_axis] = conv_utils.deconv_output_length(
output_shape[w_axis],
kernel_w,
padding=self.padding,
output_padding=out_pad_w,
stride=stride_w,
dilation=self.dilation_rate[1],
)
return tf.TensorShape(output_shape)
def get_config(self):
config = super().get_config()
config["output_padding"] = self.output_padding
return config
# Alias
Convolution2DTranspose = Conv2DTranspose
| tf-keras/tf_keras/layers/convolutional/conv2d_transpose.py/0 | {
"file_path": "tf-keras/tf_keras/layers/convolutional/conv2d_transpose.py",
"repo_id": "tf-keras",
"token_count": 6218
} | 219 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras-based einsum dense layer."""
import re
import tensorflow.compat.v2 as tf
from tf_keras import activations
from tf_keras import constraints
from tf_keras import initializers
from tf_keras import regularizers
from tf_keras.engine.base_layer import Layer
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export(
"keras.layers.EinsumDense", "keras.layers.experimental.EinsumDense"
)
class EinsumDense(Layer):
"""A layer that uses `tf.einsum` as the backing computation.
This layer can perform einsum calculations of arbitrary dimensionality.
Args:
equation: An equation describing the einsum to perform. This equation must
be a valid einsum string of the form `ab,bc->ac`, `...ab,bc->...ac`, or
`ab...,bc->ac...` where 'ab', 'bc', and 'ac' can be any valid einsum
axis expression sequence.
output_shape: The expected shape of the output tensor (excluding the batch
dimension and any dimensions represented by ellipses). You can specify
None for any dimension that is unknown or can be inferred from the input
shape.
activation: Activation function to use. If you don't specify anything, no
activation is applied (that is, a "linear" activation: `a(x) = x`).
bias_axes: A string containing the output dimension(s) to apply a bias to.
Each character in the `bias_axes` string should correspond to a
character in the output portion of the `equation` string.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to the output of the
layer (its "activation").
kernel_constraint: Constraint function applied to the `kernel` weights
matrix.
bias_constraint: Constraint function applied to the bias vector.
Examples:
**Biased dense layer with einsums**
This example shows how to instantiate a standard TF-Keras dense layer using
einsum operations. This example is equivalent to
`tf.keras.layers.Dense(64, use_bias=True)`.
>>> layer = tf.keras.layers.EinsumDense("ab,bc->ac",
... output_shape=64,
... bias_axes="c")
>>> input_tensor = tf.keras.Input(shape=[32])
>>> output_tensor = layer(input_tensor)
>>> output_tensor
<... shape=(None, 64) dtype=...>
**Applying a dense layer to a sequence**
This example shows how to instantiate a layer that applies the same dense
operation to every element in a sequence. Here, the `output_shape` has two
values (since there are two non-batch dimensions in the output); the first
dimension in the `output_shape` is `None`, because the sequence dimension
`b` has an unknown shape.
>>> layer = tf.keras.layers.EinsumDense("abc,cd->abd",
... output_shape=(None, 64),
... bias_axes="d")
>>> input_tensor = tf.keras.Input(shape=[32, 128])
>>> output_tensor = layer(input_tensor)
>>> output_tensor
<... shape=(None, 32, 64) dtype=...>
**Applying a dense layer to a sequence using ellipses**
This example shows how to instantiate a layer that applies the same dense
operation to every element in a sequence, but uses the ellipsis notation
instead of specifying the batch and sequence dimensions.
Because we are using ellipsis notation and have specified only one axis, the
`output_shape` arg is a single value. When instantiated in this way, the
layer can handle any number of sequence dimensions - including the case
where no sequence dimension exists.
>>> layer = tf.keras.layers.EinsumDense("...x,xy->...y",
... output_shape=64,
... bias_axes="y")
>>> input_tensor = tf.keras.Input(shape=[32, 128])
>>> output_tensor = layer(input_tensor)
>>> output_tensor
<... shape=(None, 32, 64) dtype=...>
"""
def __init__(
self,
equation,
output_shape,
activation=None,
bias_axes=None,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(**kwargs)
self.equation = equation
if isinstance(output_shape, int):
self.partial_output_shape = [output_shape]
else:
self.partial_output_shape = list(output_shape)
self.bias_axes = bias_axes
self.activation = activations.get(activation)
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
def build(self, input_shape):
input_shape = tf.TensorShape(input_shape)
shape_data = _analyze_einsum_string(
self.equation,
self.bias_axes,
input_shape,
self.partial_output_shape,
)
kernel_shape, bias_shape, self.full_output_shape = shape_data
self.kernel = self.add_weight(
"kernel",
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
dtype=self.dtype,
trainable=True,
)
if bias_shape is not None:
self.bias = self.add_weight(
"bias",
shape=bias_shape,
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
dtype=self.dtype,
trainable=True,
)
else:
self.bias = None
super().build(input_shape)
def compute_output_shape(self, _):
return tf.TensorShape(self.full_output_shape)
def get_config(self):
config = {
"output_shape": self.partial_output_shape,
"equation": self.equation,
"activation": activations.serialize(self.activation),
"bias_axes": self.bias_axes,
"kernel_initializer": initializers.serialize(
self.kernel_initializer
),
"bias_initializer": initializers.serialize(self.bias_initializer),
"kernel_regularizer": regularizers.serialize(
self.kernel_regularizer
),
"bias_regularizer": regularizers.serialize(self.bias_regularizer),
"activity_regularizer": regularizers.serialize(
self.activity_regularizer
),
"kernel_constraint": constraints.serialize(self.kernel_constraint),
"bias_constraint": constraints.serialize(self.bias_constraint),
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs):
ret = tf.einsum(self.equation, inputs, self.kernel)
if self.bias is not None:
ret += self.bias
if self.activation is not None:
ret = self.activation(ret)
return ret
def _analyze_einsum_string(equation, bias_axes, input_shape, output_shape):
"""Analyzes an einsum string to determine the required weight shape."""
dot_replaced_string = re.sub(r"\.\.\.", "0", equation)
# This is the case where no ellipses are present in the string.
split_string = re.match(
"([a-zA-Z]+),([a-zA-Z]+)->([a-zA-Z]+)", dot_replaced_string
)
if split_string:
return _analyze_split_string(
split_string, bias_axes, input_shape, output_shape
)
# This is the case where ellipses are present on the left.
split_string = re.match(
"0([a-zA-Z]+),([a-zA-Z]+)->0([a-zA-Z]+)", dot_replaced_string
)
if split_string:
return _analyze_split_string(
split_string, bias_axes, input_shape, output_shape, left_elided=True
)
# This is the case where ellipses are present on the right.
split_string = re.match(
"([a-zA-Z]{2,})0,([a-zA-Z]+)->([a-zA-Z]+)0", dot_replaced_string
)
if split_string:
return _analyze_split_string(
split_string, bias_axes, input_shape, output_shape
)
raise ValueError(
f"Invalid einsum equation '{equation}'. Equations must be in the form "
"[X],[Y]->[Z], ...[X],[Y]->...[Z], or [X]...,[Y]->[Z]...."
)
def _analyze_split_string(
split_string, bias_axes, input_shape, output_shape, left_elided=False
):
"""Analyze an pre-split einsum string to find the weight shape."""
input_spec = split_string.group(1)
weight_spec = split_string.group(2)
output_spec = split_string.group(3)
elided = len(input_shape) - len(input_spec)
if isinstance(output_shape, int):
output_shape = [output_shape]
else:
output_shape = list(output_shape)
output_shape.insert(0, input_shape[0])
if elided > 0 and left_elided:
for i in range(1, elided):
# We already inserted the 0th input dimension at dim 0, so we need
# to start at location 1 here.
output_shape.insert(1, input_shape[i])
elif elided > 0 and not left_elided:
for i in range(len(input_shape) - elided, len(input_shape)):
output_shape.append(input_shape[i])
if left_elided:
# If we have beginning dimensions elided, we need to use negative
# indexing to determine where in the input dimension our values are.
input_dim_map = {
dim: (i + elided) - len(input_shape)
for i, dim in enumerate(input_spec)
}
# Because we've constructed the full output shape already, we don't need
# to do negative indexing.
output_dim_map = {
dim: (i + elided) for i, dim in enumerate(output_spec)
}
else:
input_dim_map = {dim: i for i, dim in enumerate(input_spec)}
output_dim_map = {dim: i for i, dim in enumerate(output_spec)}
for dim in input_spec:
input_shape_at_dim = input_shape[input_dim_map[dim]]
if dim in output_dim_map:
output_shape_at_dim = output_shape[output_dim_map[dim]]
if (
output_shape_at_dim is not None
and output_shape_at_dim != input_shape_at_dim
):
raise ValueError(
"Input shape and output shape do not match at shared "
f"dimension '{dim}'. Input shape is {input_shape_at_dim}, "
"and output shape "
f"is {output_shape[output_dim_map[dim]]}."
)
for dim in output_spec:
if dim not in input_spec and dim not in weight_spec:
raise ValueError(
f"Dimension '{dim}' was specified in the output "
f"'{output_spec}' but has no corresponding dim in the input "
f"spec '{input_spec}' or weight spec '{output_spec}'"
)
weight_shape = []
for dim in weight_spec:
if dim in input_dim_map:
weight_shape.append(input_shape[input_dim_map[dim]])
elif dim in output_dim_map:
weight_shape.append(output_shape[output_dim_map[dim]])
else:
raise ValueError(
f"Weight dimension '{dim}' did not have a match in either "
f"the input spec '{input_spec}' or the output "
f"spec '{output_spec}'. For this layer, the weight must "
"be fully specified."
)
if bias_axes is not None:
num_left_elided = elided if left_elided else 0
idx_map = {
char: output_shape[i + num_left_elided]
for i, char in enumerate(output_spec)
}
for char in bias_axes:
if char not in output_spec:
raise ValueError(
f"Bias dimension '{char}' was requested, but is not part "
f"of the output spec '{output_spec}'"
)
first_bias_location = min(
[output_spec.find(char) for char in bias_axes]
)
bias_output_spec = output_spec[first_bias_location:]
bias_shape = [
idx_map[char] if char in bias_axes else 1
for char in bias_output_spec
]
if not left_elided:
for _ in range(elided):
bias_shape.append(1)
else:
bias_shape = None
return weight_shape, bias_shape, output_shape
| tf-keras/tf_keras/layers/core/einsum_dense.py/0 | {
"file_path": "tf-keras/tf_keras/layers/core/einsum_dense.py",
"repo_id": "tf-keras",
"token_count": 6044
} | 220 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras layers that implement explicit (approximate) kernel feature maps."""
import numpy as np
import tensorflow.compat.v2 as tf
from tf_keras import initializers
from tf_keras.engine import base_layer
from tf_keras.engine import input_spec
# isort: off
from tensorflow.python.util.tf_export import keras_export
_SUPPORTED_RBF_KERNEL_TYPES = ["gaussian", "laplacian"]
@keras_export("keras.layers.experimental.RandomFourierFeatures")
class RandomFourierFeatures(base_layer.Layer):
r"""Layer that projects its inputs into a random feature space.
This layer implements a mapping from input space to a space with
`output_dim` dimensions, which approximates shift-invariant kernels. A
kernel function `K(x, y)` is shift-invariant if `K(x, y) == k(x - y)` for
some function `k`. Many popular Radial Basis Functions (RBF), including
Gaussian and Laplacian kernels, are shift-invariant.
The implementation of this layer is based on the following paper:
["Random Features for Large-Scale Kernel Machines"](
https://people.eecs.berkeley.edu/~brecht/papers/07.rah.rec.nips.pdf)
by Ali Rahimi and Ben Recht.
The distribution from which the parameters of the random features map
(layer) are sampled determines which shift-invariant kernel the layer
approximates (see paper for more details). You can use the distribution of
your choice. The layer supports out-of-the-box approximations of the
following two RBF kernels:
- Gaussian: `K(x, y) == exp(- square(x - y) / (2 * square(scale)))`
- Laplacian: `K(x, y) = exp(-abs(x - y) / scale))`
**Note:** Unlike what is described in the paper and unlike what is used in
the Scikit-Learn implementation, the output of this layer does not apply
the `sqrt(2 / D)` normalization factor.
**Usage:** Typically, this layer is used to "kernelize" linear models by
applying a non-linear transformation (this layer) to the input features and
then training a linear model on top of the transformed features. Depending
on the loss function of the linear model, the composition of this layer and
the linear model results to models that are equivalent (up to approximation)
to kernel SVMs (for hinge loss), kernel logistic regression (for logistic
loss), kernel linear regression (for squared loss), etc.
Examples:
A kernel multinomial logistic regression model with Gaussian kernel for
MNIST:
```python
model = keras.Sequential([
keras.Input(shape=(784,)),
RandomFourierFeatures(
output_dim=4096,
scale=10.,
kernel_initializer='gaussian'),
layers.Dense(units=10, activation='softmax'),
])
model.compile(
optimizer='adam',
loss='categorical_crossentropy',
metrics=['categorical_accuracy']
)
```
A quasi-SVM classifier for MNIST:
```python
model = keras.Sequential([
keras.Input(shape=(784,)),
RandomFourierFeatures(
output_dim=4096,
scale=10.,
kernel_initializer='gaussian'),
layers.Dense(units=10),
])
model.compile(
optimizer='adam',
loss='hinge',
metrics=['categorical_accuracy']
)
```
To use another kernel, just replace the layer creation line with:
```python
random_features_layer = RandomFourierFeatures(
output_dim=500,
kernel_initializer=<my_initializer>,
scale=...,
...)
```
Args:
output_dim: Positive integer, the dimension of the layer's output, i.e.,
the number of random features used to approximate the kernel.
kernel_initializer: Determines the distribution of the parameters of the
random features map (and therefore the kernel approximated by the
layer). It can be either a string identifier or a TF-Keras
`Initializer` instance. Currently only 'gaussian' and 'laplacian' are
supported string identifiers (case insensitive). Note that the kernel
matrix is not trainable.
scale: For Gaussian and Laplacian kernels, this corresponds to a scaling
factor of the corresponding kernel approximated by the layer (see
concrete definitions above). When provided, it should be a positive
float. If None, a default value is used: if the kernel initializer is
set to "gaussian", `scale` becomes `sqrt(input_dim / 2)`, otherwise,
it becomes 1.0. Both the approximation error of the kernel and the
classification quality are sensitive to this parameter. If `trainable`
is set to `True`, this parameter is learned end-to-end during training
and the provided value serves as the initial value.
**Note:** When features from this layer are fed to a linear model,
by making `scale` trainable, the resulting optimization problem is
no longer convex (even if the loss function used by the linear model
is convex).
Defaults to `None`.
trainable: Whether the scaling parameter of the layer should be trainable.
Defaults to `False`.
name: String, name to use for this layer.
"""
def __init__(
self,
output_dim,
kernel_initializer="gaussian",
scale=None,
trainable=False,
name=None,
**kwargs,
):
if output_dim <= 0:
raise ValueError(
"`output_dim` should be a positive integer. "
f"Received: {output_dim}"
)
if isinstance(kernel_initializer, str):
if kernel_initializer.lower() not in _SUPPORTED_RBF_KERNEL_TYPES:
raise ValueError(
f"Unsupported `kernel_initializer`: {kernel_initializer} "
f"Expected one of: {_SUPPORTED_RBF_KERNEL_TYPES}"
)
if scale is not None and scale <= 0.0:
raise ValueError(
"When provided, `scale` should be a positive float. "
f"Received: {scale}"
)
super().__init__(trainable=trainable, name=name, **kwargs)
self.output_dim = output_dim
self.kernel_initializer = kernel_initializer
self.scale = scale
def build(self, input_shape):
input_shape = tf.TensorShape(input_shape)
# TODO(pmol): Allow higher dimension inputs. Currently the input is
# expected to have shape [batch_size, dimension].
if input_shape.rank != 2:
raise ValueError(
"The rank of the input tensor should be 2. "
f"Received input with rank {input_shape.ndims} instead. "
f"Full input shape received: {input_shape}"
)
if input_shape.dims[1].value is None:
raise ValueError(
"The last dimension of the input tensor should be defined. "
f"Found `None`. Full input shape received: {input_shape}"
)
self.input_spec = input_spec.InputSpec(
ndim=2, axes={1: input_shape.dims[1].value}
)
input_dim = input_shape.dims[1].value
kernel_initializer = _get_random_features_initializer(
self.kernel_initializer, shape=(input_dim, self.output_dim)
)
self.unscaled_kernel = self.add_weight(
name="unscaled_kernel",
shape=(input_dim, self.output_dim),
dtype=tf.float32,
initializer=kernel_initializer,
trainable=False,
)
self.bias = self.add_weight(
name="bias",
shape=(self.output_dim,),
dtype=tf.float32,
initializer=initializers.RandomUniform(
minval=0.0, maxval=2 * np.pi
),
trainable=False,
)
if self.scale is None:
self.scale = _get_default_scale(self.kernel_initializer, input_dim)
self.kernel_scale = self.add_weight(
name="kernel_scale",
shape=(1,),
dtype=tf.float32,
initializer=tf.compat.v1.constant_initializer(self.scale),
trainable=True,
constraint="NonNeg",
)
super().build(input_shape)
def call(self, inputs):
inputs = tf.convert_to_tensor(inputs, dtype=self.dtype)
inputs = tf.cast(inputs, tf.float32)
kernel = (1.0 / self.kernel_scale) * self.unscaled_kernel
outputs = tf.matmul(a=inputs, b=kernel)
outputs = tf.nn.bias_add(outputs, self.bias)
return tf.cos(outputs)
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape)
input_shape = input_shape.with_rank(2)
if input_shape.dims[-1].value is None:
raise ValueError(
"The last dimension of the input tensor should be defined. "
f"Found `None`. Full input shape received: {input_shape}"
)
return input_shape[:-1].concatenate(self.output_dim)
def get_config(self):
kernel_initializer = self.kernel_initializer
if not isinstance(kernel_initializer, str):
kernel_initializer = initializers.serialize(kernel_initializer)
config = {
"output_dim": self.output_dim,
"kernel_initializer": kernel_initializer,
"scale": self.scale,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def _get_random_features_initializer(initializer, shape):
"""Returns Initializer object for random features."""
def _get_cauchy_samples(loc, scale, shape):
probs = np.random.uniform(low=0.0, high=1.0, size=shape)
return loc + scale * np.tan(np.pi * (probs - 0.5))
random_features_initializer = initializer
if isinstance(initializer, str):
if initializer.lower() == "gaussian":
random_features_initializer = initializers.RandomNormal(stddev=1.0)
elif initializer.lower() == "laplacian":
random_features_initializer = initializers.Constant(
_get_cauchy_samples(loc=0.0, scale=1.0, shape=shape)
)
else:
raise ValueError(
f'Unsupported `kernel_initializer`: "{initializer}" '
f"Expected one of: {_SUPPORTED_RBF_KERNEL_TYPES}"
)
return random_features_initializer
def _get_default_scale(initializer, input_dim):
if isinstance(initializer, str) and initializer.lower() == "gaussian":
return np.sqrt(input_dim / 2.0)
return 1.0
| tf-keras/tf_keras/layers/kernelized.py/0 | {
"file_path": "tf-keras/tf_keras/layers/kernelized.py",
"repo_id": "tf-keras",
"token_count": 4548
} | 221 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Layer that computes the maximum (element-wise) of several inputs."""
import tensorflow.compat.v2 as tf
from tf_keras.layers.merging.base_merge import _Merge
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.layers.Maximum")
class Maximum(_Merge):
"""Layer that computes the maximum (element-wise) a list of inputs.
It takes as input a list of tensors, all of the same shape, and returns
a single tensor (also of the same shape).
>>> tf.keras.layers.Maximum()([np.arange(5).reshape(5, 1),
... np.arange(5, 10).reshape(5, 1)])
<tf.Tensor: shape=(5, 1), dtype=int64, numpy=
array([[5],
[6],
[7],
[8],
[9]])>
>>> x1 = tf.keras.layers.Dense(8)(np.arange(10).reshape(5, 2))
>>> x2 = tf.keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2))
>>> maxed = tf.keras.layers.Maximum()([x1, x2])
>>> maxed.shape
TensorShape([5, 8])
"""
def _merge_function(self, inputs):
output = inputs[0]
for i in range(1, len(inputs)):
output = tf.maximum(output, inputs[i])
return output
@keras_export("keras.layers.maximum")
def maximum(inputs, **kwargs):
"""Functional interface to compute maximum (element-wise) list of `inputs`.
This is equivalent to the `tf.keras.layers.Maximum` layer.
For example:
```python
input1 = tf.keras.layers.Input(shape=(16,))
x1 = tf.keras.layers.Dense(8, activation='relu')(input1) #shape=(None, 8)
input2 = tf.keras.layers.Input(shape=(32,))
x2 = tf.keras.layers.Dense(8, activation='relu')(input2) #shape=(None, 8)
max_inp=tf.keras.layers.maximum([x1,x2]) #shape=(None, 8)
out = tf.keras.layers.Dense(4)(max_inp)
model = tf.keras.models.Model(inputs=[input1, input2], outputs=out)
```
Args:
inputs: A list of input tensors of same shape.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor (of same shape as input tensor) with the element-wise
maximum of the inputs.
Raises:
ValueError: If input tensors are of different shape.
"""
return Maximum(**kwargs)(inputs)
| tf-keras/tf_keras/layers/merging/maximum.py/0 | {
"file_path": "tf-keras/tf_keras/layers/merging/maximum.py",
"repo_id": "tf-keras",
"token_count": 1116
} | 222 |
# Copyright 2023 The TF-Keras Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow.compat.v2 as tf
from tf_keras.initializers import TruncatedNormal
from tf_keras.layers.rnn import Wrapper
# isort: off
from tensorflow.python.util.tf_export import keras_export
# Adapted from TF-Addons implementation
@keras_export("keras.layers.SpectralNormalization", v1=[])
class SpectralNormalization(Wrapper):
"""Performs spectral normalization on the weights of a target layer.
This wrapper controls the Lipschitz constant of the weights of a layer by
constraining their spectral norm, which can stabilize the training of GANs.
Args:
layer: A `keras.layers.Layer` instance that
has either a `kernel` (e.g. `Conv2D`, `Dense`...)
or an `embeddings` attribute (`Embedding` layer).
power_iterations: int, the number of iterations during normalization.
Examples:
Wrap `keras.layers.Conv2D`:
>>> x = np.random.rand(1, 10, 10, 1)
>>> conv2d = SpectralNormalization(tf.keras.layers.Conv2D(2, 2))
>>> y = conv2d(x)
>>> y.shape
TensorShape([1, 9, 9, 2])
Wrap `keras.layers.Dense`:
>>> x = np.random.rand(1, 10, 10, 1)
>>> dense = SpectralNormalization(tf.keras.layers.Dense(10))
>>> y = dense(x)
>>> y.shape
TensorShape([1, 10, 10, 10])
Reference:
- [Spectral Normalization for GAN](https://arxiv.org/abs/1802.05957).
"""
def __init__(self, layer, power_iterations=1, **kwargs):
super().__init__(layer, **kwargs)
if power_iterations <= 0:
raise ValueError(
"`power_iterations` should be greater than zero. Received: "
f"`power_iterations={power_iterations}`"
)
self.power_iterations = power_iterations
def build(self, input_shape):
super().build(input_shape)
input_shape = tf.TensorShape(input_shape)
self.input_spec = tf.keras.layers.InputSpec(
shape=[None] + input_shape[1:]
)
if hasattr(self.layer, "kernel"):
self.kernel = self.layer.kernel
elif hasattr(self.layer, "embeddings"):
self.kernel = self.layer.embeddings
else:
raise ValueError(
f"{type(self.layer).__name__} object has no attribute 'kernel' "
"nor 'embeddings'"
)
self.kernel_shape = self.kernel.shape.as_list()
self.vector_u = self.add_weight(
shape=(1, self.kernel_shape[-1]),
initializer=TruncatedNormal(stddev=0.02),
trainable=False,
name="vector_u",
dtype=self.kernel.dtype,
)
def call(self, inputs, training=False):
if training:
self.normalize_weights()
output = self.layer(inputs)
return output
def compute_output_shape(self, input_shape):
return tf.TensorShape(
self.layer.compute_output_shape(input_shape).as_list()
)
def normalize_weights(self):
"""Generate spectral normalized weights.
This method will update the value of `self.kernel` with the
spectral normalized value, so that the layer is ready for `call()`.
"""
weights = tf.reshape(self.kernel, [-1, self.kernel_shape[-1]])
vector_u = self.vector_u
# check for zeroes weights
if not tf.reduce_all(tf.equal(weights, 0.0)):
for _ in range(self.power_iterations):
vector_v = tf.math.l2_normalize(
tf.matmul(vector_u, weights, transpose_b=True)
)
vector_u = tf.math.l2_normalize(tf.matmul(vector_v, weights))
vector_u = tf.stop_gradient(vector_u)
vector_v = tf.stop_gradient(vector_v)
sigma = tf.matmul(
tf.matmul(vector_v, weights), vector_u, transpose_b=True
)
self.vector_u.assign(tf.cast(vector_u, self.vector_u.dtype))
self.kernel.assign(
tf.cast(
tf.reshape(self.kernel / sigma, self.kernel_shape),
self.kernel.dtype,
)
)
def get_config(self):
config = {"power_iterations": self.power_iterations}
base_config = super().get_config()
return {**base_config, **config}
| tf-keras/tf_keras/layers/normalization/spectral_normalization.py/0 | {
"file_path": "tf-keras/tf_keras/layers/normalization/spectral_normalization.py",
"repo_id": "tf-keras",
"token_count": 2150
} | 223 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Global average pooling 1D layer."""
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras.layers.pooling.base_global_pooling1d import GlobalPooling1D
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export(
"keras.layers.GlobalAveragePooling1D", "keras.layers.GlobalAvgPool1D"
)
class GlobalAveragePooling1D(GlobalPooling1D):
"""Global average pooling operation for temporal data.
Examples:
>>> input_shape = (2, 3, 4)
>>> x = tf.random.normal(input_shape)
>>> y = tf.keras.layers.GlobalAveragePooling1D()(x)
>>> print(y.shape)
(2, 4)
Args:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, steps, features)` while `channels_first`
corresponds to inputs with shape
`(batch, features, steps)`.
keepdims: A boolean, whether to keep the temporal dimension or not.
If `keepdims` is `False` (default), the rank of the tensor is reduced
for spatial dimensions.
If `keepdims` is `True`, the temporal dimension are retained with
length 1.
The behavior is the same as for `tf.reduce_mean` or `np.mean`.
Call arguments:
inputs: A 3D tensor.
mask: Binary tensor of shape `(batch_size, steps)` indicating whether
a given step should be masked (excluded from the average).
Input shape:
- If `data_format='channels_last'`:
3D tensor with shape:
`(batch_size, steps, features)`
- If `data_format='channels_first'`:
3D tensor with shape:
`(batch_size, features, steps)`
Output shape:
- If `keepdims`=False:
2D tensor with shape `(batch_size, features)`.
- If `keepdims`=True:
- If `data_format='channels_last'`:
3D tensor with shape `(batch_size, 1, features)`
- If `data_format='channels_first'`:
3D tensor with shape `(batch_size, features, 1)`
"""
def __init__(self, data_format="channels_last", **kwargs):
super().__init__(data_format=data_format, **kwargs)
self.supports_masking = True
def call(self, inputs, mask=None):
steps_axis = 1 if self.data_format == "channels_last" else 2
if mask is not None:
mask = tf.cast(mask, inputs[0].dtype)
mask = tf.expand_dims(
mask, 2 if self.data_format == "channels_last" else 1
)
inputs *= mask
return backend.sum(
inputs, axis=steps_axis, keepdims=self.keepdims
) / tf.reduce_sum(mask, axis=steps_axis, keepdims=self.keepdims)
else:
return backend.mean(inputs, axis=steps_axis, keepdims=self.keepdims)
def compute_mask(self, inputs, mask=None):
return None
# Alias
GlobalAvgPool1D = GlobalAveragePooling1D
| tf-keras/tf_keras/layers/pooling/global_average_pooling1d.py/0 | {
"file_path": "tf-keras/tf_keras/layers/pooling/global_average_pooling1d.py",
"repo_id": "tf-keras",
"token_count": 1427
} | 224 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for KPL implementation of bucketized columns with dense inputs."""
import numpy as np
import tensorflow.compat.v2 as tf
import tf_keras as keras
from tf_keras.layers.preprocessing import discretization
from tf_keras.layers.preprocessing.benchmarks import (
feature_column_benchmark as fc_bm,
)
# isort: off
from tensorflow.python.eager.def_function import (
function as tf_function,
)
NUM_REPEATS = 10 # The number of times to run each benchmark.
BATCH_SIZES = [32, 256]
### KPL AND FC IMPLEMENTATION BENCHMARKS ###
def embedding_varlen(batch_size, max_length):
"""Benchmark a variable-length embedding."""
# Data and constants.
max_value = 25.0
bins = np.arange(1.0, max_value)
data = fc_bm.create_data(
max_length, batch_size * NUM_REPEATS, 100000, dtype=float
)
# TF-Keras implementation
model = keras.Sequential()
model.add(keras.Input(shape=(max_length,), name="data", dtype=tf.float32))
model.add(discretization.Discretization(bins))
# FC implementation
fc = tf.feature_column.bucketized_column(
tf.feature_column.numeric_column("data"), boundaries=list(bins)
)
# Wrap the FC implementation in a tf.function for a fair comparison
@tf_function()
def fc_fn(tensors):
fc.transform_feature(
tf.__internal__.feature_column.FeatureTransformationCache(tensors),
None,
)
# Benchmark runs
keras_data = {"data": data.to_tensor(default_value=0.0)}
k_avg_time = fc_bm.run_keras(keras_data, model, batch_size, NUM_REPEATS)
fc_data = {"data": data.to_tensor(default_value=0.0)}
fc_avg_time = fc_bm.run_fc(fc_data, fc_fn, batch_size, NUM_REPEATS)
return k_avg_time, fc_avg_time
class BenchmarkLayer(fc_bm.LayerBenchmark):
"""Benchmark the layer forward pass."""
def benchmark_layer(self):
for batch in BATCH_SIZES:
name = f"bucketized|dense|batch_{batch}"
k_time, f_time = embedding_varlen(batch_size=batch, max_length=256)
self.report(name, k_time, f_time, NUM_REPEATS)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/layers/preprocessing/benchmarks/bucketized_column_dense_benchmark.py/0 | {
"file_path": "tf-keras/tf_keras/layers/preprocessing/benchmarks/bucketized_column_dense_benchmark.py",
"repo_id": "tf-keras",
"token_count": 1043
} | 225 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for TF-Keras image preprocessing layer."""
import functools
import time
import numpy as np
import tensorflow.compat.v2 as tf
import tf_keras as keras
from tf_keras.layers.preprocessing import image_preprocessing
LOWER = 0.2
UPPER = 0.4
BATCH_SIZE = 32
def rotate(inputs):
"""rotate image."""
inputs_shape = tf.shape(inputs)
batch_size = inputs_shape[0]
img_hd = tf.cast(inputs_shape[1], tf.float32)
img_wd = tf.cast(inputs_shape[2], tf.float32)
min_angle = LOWER * 2.0 * np.pi
max_angle = UPPER * 2.0 * np.pi
angles = tf.random.uniform(
shape=[batch_size], minval=min_angle, maxval=max_angle
)
return image_preprocessing.transform(
inputs, image_preprocessing.get_rotation_matrix(angles, img_hd, img_wd)
)
def zoom(inputs):
"""zoom image."""
inputs_shape = tf.shape(inputs)
batch_size = inputs_shape[0]
img_hd = tf.cast(inputs_shape[1], tf.float32)
img_wd = tf.cast(inputs_shape[2], tf.float32)
height_zoom = tf.random.uniform(
shape=[batch_size, 1], minval=1.0 + LOWER, maxval=1.0 + UPPER
)
width_zoom = tf.random.uniform(
shape=[batch_size, 1], minval=1.0 + LOWER, maxval=1.0 + UPPER
)
zooms = tf.cast(
tf.concat([width_zoom, height_zoom], axis=1), dtype=tf.float32
)
return image_preprocessing.transform(
inputs, image_preprocessing.get_zoom_matrix(zooms, img_hd, img_wd)
)
def image_augmentation(inputs, batch_size):
"""image augmentation."""
img = inputs
img = tf.image.resize(img, size=[224, 224])
img = tf.image.random_crop(img, size=[batch_size, 224, 224, 3])
img = rotate(img)
img = zoom(img)
return img
class BenchmarkLayer(tf.test.Benchmark):
"""Benchmark the layer forward pass."""
def run_dataset_implementation(self, batch_size):
num_repeats = 5
starts = []
ends = []
for _ in range(num_repeats):
ds = tf.data.Dataset.from_tensor_slices(
np.random.random((batch_size, 256, 256, 3))
)
ds = ds.shuffle(batch_size * 100)
ds = ds.batch(batch_size)
ds = ds.prefetch(batch_size)
img_augmentation = functools.partial(
image_augmentation, batch_size=batch_size
)
ds = ds.map(img_augmentation, num_parallel_calls=8)
starts.append(time.time())
count = 0
# Benchmarked code begins here.
for i in ds:
_ = i
count += 1
# Benchmarked code ends here.
ends.append(time.time())
avg_time = np.mean(np.array(ends) - np.array(starts)) / count
return avg_time
def bm_layer_implementation(self, batch_size):
with tf.device("/gpu:0"):
img = keras.Input(shape=(256, 256, 3), dtype=tf.float32)
preprocessor = keras.Sequential(
[
image_preprocessing.Resizing(224, 224),
image_preprocessing.RandomCrop(height=224, width=224),
image_preprocessing.RandomRotation(factor=(0.2, 0.4)),
image_preprocessing.RandomFlip(mode="horizontal"),
image_preprocessing.RandomZoom(0.2, 0.2),
]
)
_ = preprocessor(img)
num_repeats = 5
starts = []
ends = []
for _ in range(num_repeats):
ds = tf.data.Dataset.from_tensor_slices(
np.random.random((batch_size, 256, 256, 3))
)
ds = ds.shuffle(batch_size * 100)
ds = ds.batch(batch_size)
ds = ds.prefetch(batch_size)
starts.append(time.time())
count = 0
# Benchmarked code begins here.
for i in ds:
_ = preprocessor(i)
count += 1
# Benchmarked code ends here.
ends.append(time.time())
avg_time = np.mean(np.array(ends) - np.array(starts)) / count
name = f"image_preprocessing|batch_{batch_size}"
baseline = self.run_dataset_implementation(batch_size)
extras = {
"dataset implementation baseline": baseline,
"delta seconds": (baseline - avg_time),
"delta percent": ((baseline - avg_time) / baseline) * 100,
}
self.report_benchmark(
iters=num_repeats, wall_time=avg_time, extras=extras, name=name
)
def benchmark_vocab_size_by_batch(self):
for batch in [32, 64, 256]:
self.bm_layer_implementation(batch_size=batch)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/layers/preprocessing/benchmarks/image_preproc_benchmark.py/0 | {
"file_path": "tf-keras/tf_keras/layers/preprocessing/benchmarks/image_preproc_benchmark.py",
"repo_id": "tf-keras",
"token_count": 2545
} | 226 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras image preprocessing layers."""
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow.python.util.tf_export import keras_export
from tf_keras import backend
from tf_keras.engine import base_layer
from tf_keras.layers.preprocessing import preprocessing_utils as utils
from tf_keras.utils import image_utils
from tf_keras.utils import tf_utils
H_AXIS = -3
W_AXIS = -2
def check_fill_mode_and_interpolation(fill_mode, interpolation):
if fill_mode not in {"reflect", "wrap", "constant", "nearest"}:
raise NotImplementedError(
f"Unknown `fill_mode` {fill_mode}. Only `reflect`, `wrap`, "
"`constant` and `nearest` are supported."
)
if interpolation not in {"nearest", "bilinear"}:
raise NotImplementedError(
f"Unknown `interpolation` {interpolation}. Only `nearest` and "
"`bilinear` are supported."
)
@keras_export(
"keras.layers.Resizing", "keras.layers.experimental.preprocessing.Resizing"
)
class Resizing(base_layer.Layer):
"""A preprocessing layer which resizes images.
This layer resizes an image input to a target height and width. The input
should be a 4D (batched) or 3D (unbatched) tensor in `"channels_last"`
format. Input pixel values can be of any range
(e.g. `[0., 1.)` or `[0, 255]`) and of integer or floating point dtype.
By default, the layer will output floats.
This layer can be called on tf.RaggedTensor batches of input images of
distinct sizes, and will resize the outputs to dense tensors of uniform
size.
For an overview and full list of preprocessing layers, see the preprocessing
[guide](https://www.tensorflow.org/guide/keras/preprocessing_layers).
Args:
height: Integer, the height of the output shape.
width: Integer, the width of the output shape.
interpolation: String, the interpolation method.
Supports `"bilinear"`, `"nearest"`, `"bicubic"`, `"area"`,
`"lanczos3"`, `"lanczos5"`, `"gaussian"`, `"mitchellcubic"`.
Defaults to `"bilinear"`.
crop_to_aspect_ratio: If True, resize the images without aspect
ratio distortion. When the original aspect ratio differs
from the target aspect ratio, the output image will be
cropped so as to return the
largest possible window in the image (of size `(height, width)`)
that matches the target aspect ratio. By default
(`crop_to_aspect_ratio=False`), aspect ratio may not be preserved.
"""
def __init__(
self,
height,
width,
interpolation="bilinear",
crop_to_aspect_ratio=False,
**kwargs,
):
self.height = height
self.width = width
self.interpolation = interpolation
self.crop_to_aspect_ratio = crop_to_aspect_ratio
self._interpolation_method = image_utils.get_interpolation(
interpolation
)
super().__init__(**kwargs)
def call(self, inputs):
# tf.image.resize will always output float32
# and operate more efficiently on float32
# unless interpolation is nearest, in which case ouput type matches
# input type.
if self.interpolation == "nearest":
input_dtype = self.compute_dtype
else:
input_dtype = tf.float32
inputs = convert_inputs(inputs, dtype=input_dtype)
size = [self.height, self.width]
if self.crop_to_aspect_ratio:
def resize_to_aspect(x):
if tf_utils.is_ragged(inputs):
x = x.to_tensor()
return image_utils.smart_resize(
x, size=size, interpolation=self._interpolation_method
)
if tf_utils.is_ragged(inputs):
size_as_shape = tf.TensorShape(size)
shape = size_as_shape + inputs.shape[-1:]
spec = tf.TensorSpec(shape, input_dtype)
outputs = tf.map_fn(
resize_to_aspect, inputs, fn_output_signature=spec
)
else:
outputs = resize_to_aspect(inputs)
else:
outputs = tf.image.resize(
inputs, size=size, method=self._interpolation_method
)
return tf.cast(outputs, self.compute_dtype)
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
input_shape[H_AXIS] = self.height
input_shape[W_AXIS] = self.width
return tf.TensorShape(input_shape)
def get_config(self):
config = {
"height": self.height,
"width": self.width,
"interpolation": self.interpolation,
"crop_to_aspect_ratio": self.crop_to_aspect_ratio,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export(
"keras.layers.CenterCrop",
"keras.layers.experimental.preprocessing.CenterCrop",
)
class CenterCrop(base_layer.Layer):
"""A preprocessing layer which crops images.
This layers crops the central portion of the images to a target size. If an
image is smaller than the target size, it will be resized and cropped
so as to return the largest possible window in the image that matches
the target aspect ratio.
Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
of integer or floating point dtype.
By default, the layer will output floats.
For an overview and full list of preprocessing layers, see the preprocessing
[guide](https://www.tensorflow.org/guide/keras/preprocessing_layers).
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., target_height, target_width, channels)`.
If the input height/width is even and the target height/width is odd (or
inversely), the input image is left-padded by 1 pixel.
Args:
height: Integer, the height of the output shape.
width: Integer, the width of the output shape.
"""
def __init__(self, height, width, **kwargs):
self.height = height
self.width = width
super().__init__(**kwargs, autocast=False)
def call(self, inputs):
inputs = convert_inputs(inputs, self.compute_dtype)
input_shape = tf.shape(inputs)
h_diff = input_shape[H_AXIS] - self.height
w_diff = input_shape[W_AXIS] - self.width
def center_crop():
h_start = tf.cast(h_diff / 2, tf.int32)
w_start = tf.cast(w_diff / 2, tf.int32)
return tf.image.crop_to_bounding_box(
inputs, h_start, w_start, self.height, self.width
)
def upsize():
outputs = image_utils.smart_resize(
inputs, [self.height, self.width]
)
# smart_resize will always output float32, so we need to re-cast.
return tf.cast(outputs, self.compute_dtype)
return tf.cond(
tf.reduce_all((h_diff >= 0, w_diff >= 0)), center_crop, upsize
)
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
input_shape[H_AXIS] = self.height
input_shape[W_AXIS] = self.width
return tf.TensorShape(input_shape)
def get_config(self):
config = {
"height": self.height,
"width": self.width,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export(
"keras.layers.RandomCrop",
"keras.layers.experimental.preprocessing.RandomCrop",
v1=[],
)
class RandomCrop(base_layer.BaseRandomLayer):
"""A preprocessing layer which randomly crops images during training.
During training, this layer will randomly choose a location to crop images
down to a target size. The layer will crop all the images in the same batch
to the same cropping location.
At inference time, and during training if an input image is smaller than the
target size, the input will be resized and cropped so as to return the
largest possible window in the image that matches the target aspect ratio.
If you need to apply random cropping at inference time, set `training` to
True when calling the layer.
Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
of integer or floating point dtype. By default, the layer will output
floats.
For an overview and full list of preprocessing layers, see the preprocessing
[guide](https://www.tensorflow.org/guide/keras/preprocessing_layers).
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., target_height, target_width, channels)`.
Args:
height: Integer, the height of the output shape.
width: Integer, the width of the output shape.
seed: Integer. Used to create a random seed.
"""
def __init__(self, height, width, seed=None, **kwargs):
super().__init__(
**kwargs, autocast=False, seed=seed, force_generator=True
)
self.height = height
self.width = width
self.seed = seed
def call(self, inputs, training=True):
inputs = convert_inputs(inputs, dtype=self.compute_dtype)
input_shape = tf.shape(inputs)
h_diff = input_shape[H_AXIS] - self.height
w_diff = input_shape[W_AXIS] - self.width
def random_crop():
dtype = input_shape.dtype
rands = self._random_generator.random_uniform(
[2], 0, dtype.max, dtype
)
h_start = rands[0] % (h_diff + 1)
w_start = rands[1] % (w_diff + 1)
return tf.image.crop_to_bounding_box(
inputs, h_start, w_start, self.height, self.width
)
def resize():
outputs = image_utils.smart_resize(
inputs, [self.height, self.width]
)
# smart_resize will always output float32, so we need to re-cast.
return tf.cast(outputs, self.compute_dtype)
return tf.cond(
tf.reduce_all((training, h_diff >= 0, w_diff >= 0)),
random_crop,
resize,
)
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
input_shape[H_AXIS] = self.height
input_shape[W_AXIS] = self.width
return tf.TensorShape(input_shape)
def get_config(self):
config = {
"height": self.height,
"width": self.width,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export(
"keras.layers.Rescaling",
"keras.layers.experimental.preprocessing.Rescaling",
)
class Rescaling(base_layer.Layer):
"""A preprocessing layer which rescales input values to a new range.
This layer rescales every value of an input (often an image) by multiplying
by `scale` and adding `offset`.
For instance:
1. To rescale an input in the `[0, 255]` range
to be in the `[0, 1]` range, you would pass `scale=1./255`.
2. To rescale an input in the `[0, 255]` range to be in the `[-1, 1]` range,
you would pass `scale=1./127.5, offset=-1`.
The rescaling is applied both during training and inference. Inputs can be
of integer or floating point dtype, and by default the layer will output
floats.
For an overview and full list of preprocessing layers, see the preprocessing
[guide](https://www.tensorflow.org/guide/keras/preprocessing_layers).
Input shape:
Arbitrary.
Output shape:
Same as input.
Args:
scale: Float, the scale to apply to the inputs.
offset: Float, the offset to apply to the inputs.
"""
def __init__(self, scale, offset=0.0, **kwargs):
self.scale = scale
self.offset = offset
super().__init__(**kwargs)
def call(self, inputs):
dtype = self.compute_dtype
inputs = convert_inputs(inputs, dtype=dtype)
scale = tf.cast(self.scale, dtype)
offset = tf.cast(self.offset, dtype)
return tf.cast(inputs, dtype) * scale + offset
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
"scale": self.scale,
"offset": self.offset,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
HORIZONTAL = "horizontal"
VERTICAL = "vertical"
HORIZONTAL_AND_VERTICAL = "horizontal_and_vertical"
@keras_export(
"keras.layers.RandomFlip",
"keras.layers.experimental.preprocessing.RandomFlip",
v1=[],
)
class RandomFlip(base_layer.BaseRandomLayer):
"""A preprocessing layer which randomly flips images during training.
This layer will flip the images horizontally and or vertically based on the
`mode` attribute. During inference time, the output will be identical to
input. Call the layer with `training=True` to flip the input.
Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
of integer or floating point dtype.
By default, the layer will output floats.
For an overview and full list of preprocessing layers, see the preprocessing
[guide](https://www.tensorflow.org/guide/keras/preprocessing_layers).
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Args:
mode: String indicating which flip mode to use. Can be `"horizontal"`,
`"vertical"`, or `"horizontal_and_vertical"`. `"horizontal"` is a
left-right flip and `"vertical"` is a top-bottom flip. Defaults to
`"horizontal_and_vertical"`
seed: Integer. Used to create a random seed.
"""
def __init__(self, mode=HORIZONTAL_AND_VERTICAL, seed=None, **kwargs):
super().__init__(seed=seed, force_generator=True, **kwargs)
self.mode = mode
if mode == HORIZONTAL:
self.horizontal = True
self.vertical = False
elif mode == VERTICAL:
self.horizontal = False
self.vertical = True
elif mode == HORIZONTAL_AND_VERTICAL:
self.horizontal = True
self.vertical = True
else:
raise ValueError(
f"RandomFlip layer {self.name} received an unknown mode "
f"argument {mode}"
)
self.seed = seed
def call(self, inputs, training=True):
inputs = convert_inputs(inputs, self.compute_dtype)
def random_flipped_inputs(inputs):
flipped_outputs = inputs
if self.horizontal:
seed = self._random_generator.make_seed_for_stateless_op()
if seed is not None:
flipped_outputs = tf.image.stateless_random_flip_left_right(
flipped_outputs, seed=seed
)
else:
flipped_outputs = tf.image.random_flip_left_right(
flipped_outputs,
self._random_generator.make_legacy_seed(),
)
if self.vertical:
seed = self._random_generator.make_seed_for_stateless_op()
if seed is not None:
flipped_outputs = tf.image.stateless_random_flip_up_down(
flipped_outputs, seed=seed
)
else:
flipped_outputs = tf.image.random_flip_up_down(
flipped_outputs,
self._random_generator.make_legacy_seed(),
)
flipped_outputs.set_shape(inputs.shape)
return flipped_outputs
if training:
return random_flipped_inputs(inputs)
else:
return inputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
"mode": self.mode,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
# TODO(tanzheny): Add examples, here and everywhere.
@keras_export(
"keras.layers.RandomTranslation",
"keras.layers.experimental.preprocessing.RandomTranslation",
v1=[],
)
class RandomTranslation(base_layer.BaseRandomLayer):
"""A preprocessing layer which randomly translates images during training.
This layer will apply random translations to each image during training,
filling empty space according to `fill_mode`.
Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
of integer or floating point dtype. By default, the layer will output
floats.
For an overview and full list of preprocessing layers, see the preprocessing
[guide](https://www.tensorflow.org/guide/keras/preprocessing_layers).
Args:
height_factor: a float represented as fraction of value, or a tuple of
size 2 representing lower and upper bound for shifting vertically. A
negative value means shifting image up, while a positive value means
shifting image down. When represented as a single positive float, this
value is used for both the upper and lower bound. For instance,
`height_factor=(-0.2, 0.3)` results in an output shifted by a random
amount in the range `[-20%, +30%]`. `height_factor=0.2` results in an
output height shifted by a random amount in the range `[-20%, +20%]`.
width_factor: a float represented as fraction of value, or a tuple of size
2 representing lower and upper bound for shifting horizontally. A
negative value means shifting image left, while a positive value means
shifting image right. When represented as a single positive float,
this value is used for both the upper and lower bound. For instance,
`width_factor=(-0.2, 0.3)` results in an output shifted left by 20%,
and shifted right by 30%. `width_factor=0.2` results
in an output height shifted left or right by 20%.
fill_mode: Points outside the boundaries of the input are filled according
to the given mode
(one of `{"constant", "reflect", "wrap", "nearest"}`).
- *reflect*: `(d c b a | a b c d | d c b a)` The input is extended by
reflecting about the edge of the last pixel.
- *constant*: `(k k k k | a b c d | k k k k)` The input is extended by
filling all values beyond the edge with the same constant value
k = 0.
- *wrap*: `(a b c d | a b c d | a b c d)` The input is extended by
wrapping around to the opposite edge.
- *nearest*: `(a a a a | a b c d | d d d d)` The input is extended by
the nearest pixel.
interpolation: Interpolation mode. Supported values: `"nearest"`,
`"bilinear"`.
seed: Integer. Used to create a random seed.
fill_value: a float represents the value to be filled outside the
boundaries when `fill_mode="constant"`.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
"""
def __init__(
self,
height_factor,
width_factor,
fill_mode="reflect",
interpolation="bilinear",
seed=None,
fill_value=0.0,
**kwargs,
):
super().__init__(seed=seed, force_generator=True, **kwargs)
self.height_factor = height_factor
if isinstance(height_factor, (tuple, list)):
self.height_lower = height_factor[0]
self.height_upper = height_factor[1]
else:
self.height_lower = -height_factor
self.height_upper = height_factor
if self.height_upper < self.height_lower:
raise ValueError(
"`height_factor` cannot have upper bound less than "
f"lower bound, got {height_factor}"
)
if abs(self.height_lower) > 1.0 or abs(self.height_upper) > 1.0:
raise ValueError(
"`height_factor` argument must have values between [-1, 1]. "
f"Received: height_factor={height_factor}"
)
self.width_factor = width_factor
if isinstance(width_factor, (tuple, list)):
self.width_lower = width_factor[0]
self.width_upper = width_factor[1]
else:
self.width_lower = -width_factor
self.width_upper = width_factor
if self.width_upper < self.width_lower:
raise ValueError(
"`width_factor` cannot have upper bound less than "
f"lower bound, got {width_factor}"
)
if abs(self.width_lower) > 1.0 or abs(self.width_upper) > 1.0:
raise ValueError(
"`width_factor` must have values between [-1, 1], "
f"got {width_factor}"
)
check_fill_mode_and_interpolation(fill_mode, interpolation)
self.fill_mode = fill_mode
self.fill_value = fill_value
self.interpolation = interpolation
self.seed = seed
def call(self, inputs, training=True):
inputs = convert_inputs(inputs, self.compute_dtype)
def random_translated_inputs(inputs):
"""Translated inputs with random ops."""
# The transform op only accepts rank 4 inputs,
# so if we have an unbatched image,
# we need to temporarily expand dims to a batch.
original_shape = inputs.shape
unbatched = inputs.shape.rank == 3
if unbatched:
inputs = tf.expand_dims(inputs, 0)
inputs_shape = tf.shape(inputs)
batch_size = inputs_shape[0]
img_hd = tf.cast(inputs_shape[H_AXIS], tf.float32)
img_wd = tf.cast(inputs_shape[W_AXIS], tf.float32)
height_translate = self._random_generator.random_uniform(
shape=[batch_size, 1],
minval=self.height_lower,
maxval=self.height_upper,
dtype=tf.float32,
)
height_translate = height_translate * img_hd
width_translate = self._random_generator.random_uniform(
shape=[batch_size, 1],
minval=self.width_lower,
maxval=self.width_upper,
dtype=tf.float32,
)
width_translate = width_translate * img_wd
translations = tf.cast(
tf.concat([width_translate, height_translate], axis=1),
dtype=tf.float32,
)
output = transform(
inputs,
get_translation_matrix(translations),
interpolation=self.interpolation,
fill_mode=self.fill_mode,
fill_value=self.fill_value,
)
if unbatched:
output = tf.squeeze(output, 0)
output.set_shape(original_shape)
return output
if training:
return random_translated_inputs(inputs)
else:
return inputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
"height_factor": self.height_factor,
"width_factor": self.width_factor,
"fill_mode": self.fill_mode,
"fill_value": self.fill_value,
"interpolation": self.interpolation,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def get_translation_matrix(translations, name=None):
"""Returns projective transform(s) for the given translation(s).
Args:
translations: A matrix of 2-element lists representing `[dx, dy]`
to translate for each image (for a batch of images).
name: The name of the op.
Returns:
A tensor of shape `(num_images, 8)` projective transforms
which can be given to `transform`.
"""
with backend.name_scope(name or "translation_matrix"):
num_translations = tf.shape(translations)[0]
# The translation matrix looks like:
# [[1 0 -dx]
# [0 1 -dy]
# [0 0 1]]
# where the last entry is implicit.
# Translation matrices are always float32.
return tf.concat(
values=[
tf.ones((num_translations, 1), tf.float32),
tf.zeros((num_translations, 1), tf.float32),
-translations[:, 0, None],
tf.zeros((num_translations, 1), tf.float32),
tf.ones((num_translations, 1), tf.float32),
-translations[:, 1, None],
tf.zeros((num_translations, 2), tf.float32),
],
axis=1,
)
def transform(
images,
transforms,
fill_mode="reflect",
fill_value=0.0,
interpolation="bilinear",
output_shape=None,
name=None,
):
"""Applies the given transform(s) to the image(s).
Args:
images: A tensor of shape
`(num_images, num_rows, num_columns, num_channels)` (NHWC).
The rank must be statically known
(the shape is not `TensorShape(None)`).
transforms: Projective transform matrix/matrices.
A vector of length 8 or tensor of size N x 8.
If one row of transforms is [a0, a1, a2, b0, b1, b2,
c0, c1], then it maps the *output* point `(x, y)`
to a transformed *input* point
`(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`, where
`k = c0 x + c1 y + 1`. The transforms are *inverted* compared to the
transform mapping input points to output points.
Note that gradients are not backpropagated
into transformation parameters.
fill_mode: Points outside the boundaries of the input are filled
according to the given mode
(one of `{"constant", "reflect", "wrap", "nearest"}`).
fill_value: a float represents the value to be filled outside
the boundaries when `fill_mode="constant"`.
interpolation: Interpolation mode. Supported values: `"nearest"`,
`"bilinear"`.
output_shape: Output dimension after the transform, `[height, width]`.
If `None`, output is the same size as input image.
name: The name of the op.
Fill mode behavior for each valid value is as follows:
- `"reflect"`: `(d c b a | a b c d | d c b a)`
The input is extended by reflecting about the edge of the last pixel.
- `"constant"`: `(k k k k | a b c d | k k k k)`
The input is extended by filling all
values beyond the edge with the same constant value k = 0.
- `"wrap"`: `(a b c d | a b c d | a b c d)`
The input is extended by wrapping around to the opposite edge.
- `"nearest"`: `(a a a a | a b c d | d d d d)`
The input is extended by the nearest pixel.
Input shape:
4D tensor with shape: `(samples, height, width, channels)`,
in `"channels_last"` format.
Output shape:
4D tensor with shape: `(samples, height, width, channels)`,
in `"channels_last"` format.
Returns:
Image(s) with the same type and shape as `images`, with the given
transform(s) applied. Transformed coordinates outside of the input image
will be filled with zeros.
"""
with backend.name_scope(name or "transform"):
if output_shape is None:
output_shape = tf.shape(images)[1:3]
if not tf.executing_eagerly():
output_shape_value = tf.get_static_value(output_shape)
if output_shape_value is not None:
output_shape = output_shape_value
output_shape = tf.convert_to_tensor(
output_shape, tf.int32, name="output_shape"
)
if not output_shape.get_shape().is_compatible_with([2]):
raise ValueError(
"output_shape must be a 1-D Tensor of 2 elements: "
"new_height, new_width, instead got "
f"output_shape={output_shape}"
)
fill_value = tf.convert_to_tensor(
fill_value, tf.float32, name="fill_value"
)
return tf.raw_ops.ImageProjectiveTransformV3(
images=images,
output_shape=output_shape,
fill_value=fill_value,
transforms=transforms,
fill_mode=fill_mode.upper(),
interpolation=interpolation.upper(),
)
def get_rotation_matrix(angles, image_height, image_width, name=None):
"""Returns projective transform(s) for the given angle(s).
Args:
angles: A scalar angle to rotate all images by,
or (for batches of images) a vector with an angle to
rotate each image in the batch. The rank must be
statically known (the shape is not `TensorShape(None)`).
image_height: Height of the image(s) to be transformed.
image_width: Width of the image(s) to be transformed.
name: The name of the op.
Returns:
A tensor of shape (num_images, 8).
Projective transforms which can be given
to operation `image_projective_transform_v2`.
If one row of transforms is
[a0, a1, a2, b0, b1, b2, c0, c1], then it maps the *output* point
`(x, y)` to a transformed *input* point
`(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`,
where `k = c0 x + c1 y + 1`.
"""
with backend.name_scope(name or "rotation_matrix"):
x_offset = (
(image_width - 1)
- (
tf.cos(angles) * (image_width - 1)
- tf.sin(angles) * (image_height - 1)
)
) / 2.0
y_offset = (
(image_height - 1)
- (
tf.sin(angles) * (image_width - 1)
+ tf.cos(angles) * (image_height - 1)
)
) / 2.0
num_angles = tf.shape(angles)[0]
return tf.concat(
values=[
tf.cos(angles)[:, None],
-tf.sin(angles)[:, None],
x_offset[:, None],
tf.sin(angles)[:, None],
tf.cos(angles)[:, None],
y_offset[:, None],
tf.zeros((num_angles, 2), tf.float32),
],
axis=1,
)
@keras_export(
"keras.layers.RandomRotation",
"keras.layers.experimental.preprocessing.RandomRotation",
v1=[],
)
class RandomRotation(base_layer.BaseRandomLayer):
"""A preprocessing layer which randomly rotates images during training.
This layer will apply random rotations to each image, filling empty space
according to `fill_mode`.
By default, random rotations are only applied during training.
At inference time, the layer does nothing. If you need to apply random
rotations at inference time, set `training` to True when calling the layer.
Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
of integer or floating point dtype.
By default, the layer will output floats.
For an overview and full list of preprocessing layers, see the preprocessing
[guide](https://www.tensorflow.org/guide/keras/preprocessing_layers).
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format
Args:
factor: a float represented as fraction of 2 Pi, or a tuple of size 2
representing lower and upper bound for rotating clockwise and
counter-clockwise. A positive values means rotating
counter clock-wise,
while a negative value means clock-wise.
When represented as a single
float, this value is used for both the upper and lower bound.
For instance, `factor=(-0.2, 0.3)`
results in an output rotation by a random
amount in the range `[-20% * 2pi, 30% * 2pi]`.
`factor=0.2` results in an
output rotating by a random amount
in the range `[-20% * 2pi, 20% * 2pi]`.
fill_mode: Points outside the boundaries of the input are filled
according to the given mode
(one of `{"constant", "reflect", "wrap", "nearest"}`).
- *reflect*: `(d c b a | a b c d | d c b a)`
The input is extended by reflecting about
the edge of the last pixel.
- *constant*: `(k k k k | a b c d | k k k k)`
The input is extended by
filling all values beyond the edge with
the same constant value k = 0.
- *wrap*: `(a b c d | a b c d | a b c d)` The input is extended by
wrapping around to the opposite edge.
- *nearest*: `(a a a a | a b c d | d d d d)`
The input is extended by the nearest pixel.
interpolation: Interpolation mode. Supported values: `"nearest"`,
`"bilinear"`.
seed: Integer. Used to create a random seed.
fill_value: a float represents the value to be filled outside
the boundaries when `fill_mode="constant"`.
"""
def __init__(
self,
factor,
fill_mode="reflect",
interpolation="bilinear",
seed=None,
fill_value=0.0,
**kwargs,
):
super().__init__(seed=seed, force_generator=True, **kwargs)
self.factor = factor
if isinstance(factor, (tuple, list)):
self.lower = factor[0]
self.upper = factor[1]
else:
self.lower = -factor
self.upper = factor
if self.upper < self.lower:
raise ValueError(
"`factor` argument cannot have a negative value. "
f"Received: factor={factor}"
)
check_fill_mode_and_interpolation(fill_mode, interpolation)
self.fill_mode = fill_mode
self.fill_value = fill_value
self.interpolation = interpolation
self.seed = seed
def call(self, inputs, training=True):
inputs = convert_inputs(inputs, self.compute_dtype)
def random_rotated_inputs(inputs):
"""Rotated inputs with random ops."""
original_shape = inputs.shape
unbatched = inputs.shape.rank == 3
# The transform op only accepts rank 4 inputs,
# so if we have an unbatched image,
# we need to temporarily expand dims to a batch.
if unbatched:
inputs = tf.expand_dims(inputs, 0)
inputs_shape = tf.shape(inputs)
batch_size = inputs_shape[0]
img_hd = tf.cast(inputs_shape[H_AXIS], tf.float32)
img_wd = tf.cast(inputs_shape[W_AXIS], tf.float32)
min_angle = self.lower * 2.0 * np.pi
max_angle = self.upper * 2.0 * np.pi
angles = self._random_generator.random_uniform(
shape=[batch_size], minval=min_angle, maxval=max_angle
)
output = transform(
inputs,
get_rotation_matrix(angles, img_hd, img_wd),
fill_mode=self.fill_mode,
fill_value=self.fill_value,
interpolation=self.interpolation,
)
if unbatched:
output = tf.squeeze(output, 0)
output.set_shape(original_shape)
return output
if training:
return random_rotated_inputs(inputs)
else:
return inputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
"factor": self.factor,
"fill_mode": self.fill_mode,
"fill_value": self.fill_value,
"interpolation": self.interpolation,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export(
"keras.layers.RandomZoom",
"keras.layers.experimental.preprocessing.RandomZoom",
v1=[],
)
class RandomZoom(base_layer.BaseRandomLayer):
"""A preprocessing layer which randomly zooms images during training.
This layer will randomly zoom in or out on each axis of an image
independently, filling empty space according to `fill_mode`.
Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
of integer or floating point dtype.
By default, the layer will output floats.
For an overview and full list of preprocessing layers, see the preprocessing
[guide](https://www.tensorflow.org/guide/keras/preprocessing_layers).
Args:
height_factor: a float represented as fraction of value,
or a tuple of size 2 representing lower and upper bound
for zooming vertically. When represented as a single float,
this value is used for both the upper and
lower bound. A positive value means zooming out,
while a negative value
means zooming in. For instance, `height_factor=(0.2, 0.3)`
result in an output zoomed out by a random amount
in the range `[+20%, +30%]`.
`height_factor=(-0.3, -0.2)` result in an output zoomed
in by a random amount in the range `[+20%, +30%]`.
width_factor: a float represented as fraction of value,
or a tuple of size 2 representing lower and upper bound
for zooming horizontally. When
represented as a single float, this value is used
for both the upper and
lower bound. For instance, `width_factor=(0.2, 0.3)`
result in an output
zooming out between 20% to 30%.
`width_factor=(-0.3, -0.2)` result in an
output zooming in between 20% to 30%. `None` means
i.e., zooming vertical and horizontal directions
by preserving the aspect ratio. Defaults to `None`.
fill_mode: Points outside the boundaries of the input are
filled according to the given mode
(one of `{"constant", "reflect", "wrap", "nearest"}`).
- *reflect*: `(d c b a | a b c d | d c b a)`
The input is extended by reflecting about
the edge of the last pixel.
- *constant*: `(k k k k | a b c d | k k k k)`
The input is extended by filling all values beyond
the edge with the same constant value k = 0.
- *wrap*: `(a b c d | a b c d | a b c d)` The input is extended by
wrapping around to the opposite edge.
- *nearest*: `(a a a a | a b c d | d d d d)`
The input is extended by the nearest pixel.
interpolation: Interpolation mode. Supported values: `"nearest"`,
`"bilinear"`.
seed: Integer. Used to create a random seed.
fill_value: a float represents the value to be filled outside
the boundaries when `fill_mode="constant"`.
Example:
>>> input_img = np.random.random((32, 224, 224, 3))
>>> layer = tf.keras.layers.RandomZoom(.5, .2)
>>> out_img = layer(input_img)
>>> out_img.shape
TensorShape([32, 224, 224, 3])
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
"""
def __init__(
self,
height_factor,
width_factor=None,
fill_mode="reflect",
interpolation="bilinear",
seed=None,
fill_value=0.0,
**kwargs,
):
super().__init__(seed=seed, force_generator=True, **kwargs)
self.height_factor = height_factor
if isinstance(height_factor, (tuple, list)):
self.height_lower = height_factor[0]
self.height_upper = height_factor[1]
else:
self.height_lower = -height_factor
self.height_upper = height_factor
if abs(self.height_lower) > 1.0 or abs(self.height_upper) > 1.0:
raise ValueError(
"`height_factor` argument must have values between [-1, 1]. "
f"Received: height_factor={height_factor}"
)
self.width_factor = width_factor
if width_factor is not None:
if isinstance(width_factor, (tuple, list)):
self.width_lower = width_factor[0]
self.width_upper = width_factor[1]
else:
self.width_lower = -width_factor
self.width_upper = width_factor
if self.width_lower < -1.0 or self.width_upper < -1.0:
raise ValueError(
"`width_factor` argument must have values larger than -1. "
f"Received: width_factor={width_factor}"
)
check_fill_mode_and_interpolation(fill_mode, interpolation)
self.fill_mode = fill_mode
self.fill_value = fill_value
self.interpolation = interpolation
self.seed = seed
def call(self, inputs, training=True):
inputs = convert_inputs(inputs, self.compute_dtype)
def random_zoomed_inputs(inputs):
"""Zoomed inputs with random ops."""
original_shape = inputs.shape
unbatched = inputs.shape.rank == 3
# The transform op only accepts rank 4 inputs,
# so if we have an unbatched image,
# we need to temporarily expand dims to a batch.
if unbatched:
inputs = tf.expand_dims(inputs, 0)
inputs_shape = tf.shape(inputs)
batch_size = inputs_shape[0]
img_hd = tf.cast(inputs_shape[H_AXIS], tf.float32)
img_wd = tf.cast(inputs_shape[W_AXIS], tf.float32)
height_zoom = self._random_generator.random_uniform(
shape=[batch_size, 1],
minval=1.0 + self.height_lower,
maxval=1.0 + self.height_upper,
)
if self.width_factor is not None:
width_zoom = self._random_generator.random_uniform(
shape=[batch_size, 1],
minval=1.0 + self.width_lower,
maxval=1.0 + self.width_upper,
)
else:
width_zoom = height_zoom
zooms = tf.cast(
tf.concat([width_zoom, height_zoom], axis=1), dtype=tf.float32
)
output = transform(
inputs,
get_zoom_matrix(zooms, img_hd, img_wd),
fill_mode=self.fill_mode,
fill_value=self.fill_value,
interpolation=self.interpolation,
)
if unbatched:
output = tf.squeeze(output, 0)
output.set_shape(original_shape)
return output
if training:
return random_zoomed_inputs(inputs)
else:
return inputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
"height_factor": self.height_factor,
"width_factor": self.width_factor,
"fill_mode": self.fill_mode,
"fill_value": self.fill_value,
"interpolation": self.interpolation,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def get_zoom_matrix(zooms, image_height, image_width, name=None):
"""Returns projective transform(s) for the given zoom(s).
Args:
zooms: A matrix of 2-element lists representing `[zx, zy]`
to zoom for each image (for a batch of images).
image_height: Height of the image(s) to be transformed.
image_width: Width of the image(s) to be transformed.
name: The name of the op.
Returns:
A tensor of shape `(num_images, 8)`. Projective transforms which can be
given to operation `image_projective_transform_v2`.
If one row of transforms is
`[a0, a1, a2, b0, b1, b2, c0, c1]`, then it maps the *output* point
`(x, y)` to a transformed *input* point
`(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`,
where `k = c0 x + c1 y + 1`.
"""
with backend.name_scope(name or "zoom_matrix"):
num_zooms = tf.shape(zooms)[0]
# The zoom matrix looks like:
# [[zx 0 0]
# [0 zy 0]
# [0 0 1]]
# where the last entry is implicit.
# Zoom matrices are always float32.
x_offset = ((image_width - 1.0) / 2.0) * (1.0 - zooms[:, 0, None])
y_offset = ((image_height - 1.0) / 2.0) * (1.0 - zooms[:, 1, None])
return tf.concat(
values=[
zooms[:, 0, None],
tf.zeros((num_zooms, 1), tf.float32),
x_offset,
tf.zeros((num_zooms, 1), tf.float32),
zooms[:, 1, None],
y_offset,
tf.zeros((num_zooms, 2), tf.float32),
],
axis=1,
)
@keras_export(
"keras.layers.RandomContrast",
"keras.layers.experimental.preprocessing.RandomContrast",
v1=[],
)
class RandomContrast(base_layer.BaseRandomLayer):
"""A preprocessing layer which randomly adjusts contrast during training.
This layer will randomly adjust the contrast of an image or images
by a random factor. Contrast is adjusted independently
for each channel of each image during training.
For each channel, this layer computes the mean of the image pixels in the
channel and then adjusts each component `x` of each pixel to
`(x - mean) * contrast_factor + mean`.
Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
in integer or floating point dtype.
By default, the layer will output floats.
The output value will be clipped to the range `[0, 255]`, the valid
range of RGB colors.
For an overview and full list of preprocessing layers, see the preprocessing
[guide](https://www.tensorflow.org/guide/keras/preprocessing_layers).
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Args:
factor: a positive float represented as fraction of value, or a tuple of
size 2 representing lower and upper bound.
When represented as a single float, lower = upper.
The contrast factor will be randomly picked between
`[1.0 - lower, 1.0 + upper]`. For any pixel x in the channel,
the output will be `(x - mean) * factor + mean`
where `mean` is the mean value of the channel.
seed: Integer. Used to create a random seed.
"""
def __init__(self, factor, seed=None, **kwargs):
super().__init__(seed=seed, force_generator=True, **kwargs)
self.factor = factor
if isinstance(factor, (tuple, list)):
self.lower = factor[0]
self.upper = factor[1]
else:
self.lower = self.upper = factor
if self.lower < 0.0 or self.upper < 0.0 or self.lower > 1.0:
raise ValueError(
"`factor` argument cannot have negative values or values "
"greater than 1."
f"Received: factor={factor}"
)
self.seed = seed
def call(self, inputs, training=True):
inputs = convert_inputs(inputs, self.compute_dtype)
def random_contrasted_inputs(inputs):
seed = self._random_generator.make_seed_for_stateless_op()
if seed is not None:
output = tf.image.stateless_random_contrast(
inputs, 1.0 - self.lower, 1.0 + self.upper, seed=seed
)
else:
output = tf.image.random_contrast(
inputs,
1.0 - self.lower,
1.0 + self.upper,
seed=self._random_generator.make_legacy_seed(),
)
output = tf.clip_by_value(output, 0, 255)
output.set_shape(inputs.shape)
return output
if training:
return random_contrasted_inputs(inputs)
else:
return inputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
"factor": self.factor,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export("keras.layers.RandomBrightness", v1=[])
class RandomBrightness(base_layer.BaseRandomLayer):
"""A preprocessing layer which randomly adjusts brightness during training.
This layer will randomly increase/reduce the brightness for the input RGB
images. At inference time, the output will be identical to the input.
Call the layer with `training=True` to adjust the brightness of the input.
Note that different brightness adjustment factors
will be apply to each the images in the batch.
For an overview and full list of preprocessing layers, see the preprocessing
[guide](https://www.tensorflow.org/guide/keras/preprocessing_layers).
Args:
factor: Float or a list/tuple of 2 floats between -1.0 and 1.0. The
factor is used to determine the lower bound and upper bound of the
brightness adjustment. A float value will be chosen randomly between
the limits. When -1.0 is chosen, the output image will be black, and
when 1.0 is chosen, the image will be fully white.
When only one float is provided, eg, 0.2,
then -0.2 will be used for lower bound and 0.2
will be used for upper bound.
value_range: Optional list/tuple of 2 floats
for the lower and upper limit
of the values of the input data.
To make no change, use [0.0, 1.0], e.g., if the image input
has been scaled before this layer. Defaults to [0.0, 255.0].
The brightness adjustment will be scaled to this range, and the
output values will be clipped to this range.
seed: optional integer, for fixed RNG behavior.
Inputs: 3D (HWC) or 4D (NHWC) tensor, with float or int dtype. Input pixel
values can be of any range (e.g. `[0., 1.)` or `[0, 255]`)
Output: 3D (HWC) or 4D (NHWC) tensor with brightness adjusted based on the
`factor`. By default, the layer will output floats.
The output value will be clipped to the range `[0, 255]`,
the valid range of RGB colors, and
rescaled based on the `value_range` if needed.
Sample usage:
```python
random_bright = tf.keras.layers.RandomBrightness(factor=0.2)
# An image with shape [2, 2, 3]
image = [[[1, 2, 3], [4 ,5 ,6]], [[7, 8, 9], [10, 11, 12]]]
# Assume we randomly select the factor to be 0.1, then it will apply
# 0.1 * 255 to all the channel
output = random_bright(image, training=True)
# output will be int64 with 25.5 added to each channel and round down.
tf.Tensor([[[26.5, 27.5, 28.5]
[29.5, 30.5, 31.5]]
[[32.5, 33.5, 34.5]
[35.5, 36.5, 37.5]]],
shape=(2, 2, 3), dtype=int64)
```
"""
_FACTOR_VALIDATION_ERROR = (
"The `factor` argument should be a number (or a list of two numbers) "
"in the range [-1.0, 1.0]. "
)
_VALUE_RANGE_VALIDATION_ERROR = (
"The `value_range` argument should be a list of two numbers. "
)
def __init__(self, factor, value_range=(0, 255), seed=None, **kwargs):
super().__init__(seed=seed, force_generator=True, **kwargs)
self._set_factor(factor)
self._set_value_range(value_range)
self._seed = seed
def _set_value_range(self, value_range):
if not isinstance(value_range, (tuple, list)):
raise ValueError(
self._VALUE_RANGE_VALIDATION_ERROR + f"Got {value_range}"
)
if len(value_range) != 2:
raise ValueError(
self._VALUE_RANGE_VALIDATION_ERROR + f"Got {value_range}"
)
self._value_range = sorted(value_range)
def _set_factor(self, factor):
if isinstance(factor, (tuple, list)):
if len(factor) != 2:
raise ValueError(
self._FACTOR_VALIDATION_ERROR + f"Got {factor}"
)
self._check_factor_range(factor[0])
self._check_factor_range(factor[1])
self._factor = sorted(factor)
elif isinstance(factor, (int, float)):
self._check_factor_range(factor)
factor = abs(factor)
self._factor = [-factor, factor]
else:
raise ValueError(self._FACTOR_VALIDATION_ERROR + f"Got {factor}")
def _check_factor_range(self, input_number):
if input_number > 1.0 or input_number < -1.0:
raise ValueError(
self._FACTOR_VALIDATION_ERROR + f"Got {input_number}"
)
def call(self, inputs, training=True):
inputs = convert_inputs(inputs, dtype=self.compute_dtype)
if training:
return self._brightness_adjust(inputs)
else:
return inputs
def _brightness_adjust(self, images):
rank = images.shape.rank
if rank == 3:
rgb_delta_shape = (1, 1, 1)
elif rank == 4:
# Keep only the batch dim. This will ensure to have same adjustment
# with in one image, but different across the images.
rgb_delta_shape = [tf.shape(images)[0], 1, 1, 1]
else:
raise ValueError(
"Expected the input image to be rank 3 or 4. Got "
f"inputs.shape = {images.shape}"
)
rgb_delta = self._random_generator.random_uniform(
shape=rgb_delta_shape,
minval=self._factor[0],
maxval=self._factor[1],
)
rgb_delta = rgb_delta * (self._value_range[1] - self._value_range[0])
rgb_delta = tf.cast(rgb_delta, images.dtype)
images += rgb_delta
return tf.clip_by_value(
images, self._value_range[0], self._value_range[1]
)
def get_config(self):
config = {
"factor": self._factor,
"value_range": self._value_range,
"seed": self._seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export(
"keras.layers.RandomHeight",
"keras.layers.experimental.preprocessing.RandomHeight",
v1=[],
)
class RandomHeight(base_layer.BaseRandomLayer):
"""A preprocessing layer which randomly varies image height during training.
This layer adjusts the height of a batch of images by a random factor.
The input should be a 3D (unbatched) or 4D (batched) tensor in the
`"channels_last"` image data format. Input pixel values can be of any range
(e.g. `[0., 1.)` or `[0, 255]`) and of integer or floating point dtype. By
default, the layer will output floats.
By default, this layer is inactive during inference.
For an overview and full list of preprocessing layers, see the preprocessing
[guide](https://www.tensorflow.org/guide/keras/preprocessing_layers).
Args:
factor: A positive float (fraction of original height),
or a tuple of size 2 representing lower and upper bound
for resizing vertically. When represented as a single float,
this value is used for both the upper and
lower bound. For instance, `factor=(0.2, 0.3)` results
in an output with
height changed by a random amount in the range `[20%, 30%]`.
`factor=(-0.2, 0.3)` results in an output with height
changed by a random amount in the range `[-20%, +30%]`.
`factor=0.2` results in an output with
height changed by a random amount in the range `[-20%, +20%]`.
interpolation: String, the interpolation method.
Supports `"bilinear"`, `"nearest"`, `"bicubic"`, `"area"`,
`"lanczos3"`, `"lanczos5"`, `"gaussian"`, `"mitchellcubic"`.
Defaults to `"bilinear"`.
seed: Integer. Used to create a random seed.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., random_height, width, channels)`.
"""
def __init__(self, factor, interpolation="bilinear", seed=None, **kwargs):
super().__init__(seed=seed, force_generator=True, **kwargs)
self.factor = factor
if isinstance(factor, (tuple, list)):
self.height_lower = factor[0]
self.height_upper = factor[1]
else:
self.height_lower = -factor
self.height_upper = factor
if self.height_upper < self.height_lower:
raise ValueError(
"`factor` argument cannot have an upper bound lesser than the "
f"lower bound. Received: factor={factor}"
)
if self.height_lower < -1.0 or self.height_upper < -1.0:
raise ValueError(
"`factor` argument must have values larger than -1. "
f"Received: factor={factor}"
)
self.interpolation = interpolation
self._interpolation_method = image_utils.get_interpolation(
interpolation
)
self.seed = seed
def call(self, inputs, training=True):
inputs = convert_inputs(inputs)
def random_height_inputs(inputs):
"""Inputs height-adjusted with random ops."""
inputs_shape = tf.shape(inputs)
img_hd = tf.cast(inputs_shape[H_AXIS], tf.float32)
img_wd = inputs_shape[W_AXIS]
height_factor = self._random_generator.random_uniform(
shape=[],
minval=(1.0 + self.height_lower),
maxval=(1.0 + self.height_upper),
)
adjusted_height = tf.cast(height_factor * img_hd, tf.int32)
adjusted_size = tf.stack([adjusted_height, img_wd])
output = tf.image.resize(
images=inputs,
size=adjusted_size,
method=self._interpolation_method,
)
# tf.resize will output float32 regardless of input type.
output = tf.cast(output, self.compute_dtype)
output_shape = inputs.shape.as_list()
output_shape[H_AXIS] = None
output.set_shape(output_shape)
return output
if training:
return random_height_inputs(inputs)
else:
return inputs
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
input_shape[H_AXIS] = None
return tf.TensorShape(input_shape)
def get_config(self):
config = {
"factor": self.factor,
"interpolation": self.interpolation,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export(
"keras.layers.RandomWidth",
"keras.layers.experimental.preprocessing.RandomWidth",
v1=[],
)
class RandomWidth(base_layer.BaseRandomLayer):
"""A preprocessing layer which randomly varies image width during training.
This layer will randomly adjusts the width of a batch of images of a
batch of images by a random factor. The input should be a 3D (unbatched) or
4D (batched) tensor in the `"channels_last"` image data format. Input pixel
values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and of integer or
floating point dtype. By default, the layer will output floats.
By default, this layer is inactive during inference.
For an overview and full list of preprocessing layers, see the preprocessing
[guide](https://www.tensorflow.org/guide/keras/preprocessing_layers).
Args:
factor: A positive float (fraction of original width),
or a tuple of size 2 representing lower and upper bound
for resizing horizontally. When represented as a single float,
this value is used for both the upper and
lower bound. For instance, `factor=(0.2, 0.3)`
results in an output with
width changed by a random amount in the range `[20%, 30%]`.
`factor=(-0.2, 0.3)` results in an output with width changed
by a random amount in the range `[-20%, +30%]`.
`factor=0.2` results in an output with width changed
by a random amount in the range `[-20%, +20%]`.
interpolation: String, the interpolation method.
Supports `"bilinear"`, `"nearest"`, `"bicubic"`, `"area"`,
`"lanczos3"`, `"lanczos5"`, `"gaussian"`, `"mitchellcubic"`.
Defaults to `bilinear`.
seed: Integer. Used to create a random seed.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, random_width, channels)`.
"""
def __init__(self, factor, interpolation="bilinear", seed=None, **kwargs):
super().__init__(seed=seed, force_generator=True, **kwargs)
self.factor = factor
if isinstance(factor, (tuple, list)):
self.width_lower = factor[0]
self.width_upper = factor[1]
else:
self.width_lower = -factor
self.width_upper = factor
if self.width_upper < self.width_lower:
raise ValueError(
"`factor` argument cannot have an upper bound less than the "
f"lower bound. Received: factor={factor}"
)
if self.width_lower < -1.0 or self.width_upper < -1.0:
raise ValueError(
"`factor` argument must have values larger than -1. "
f"Received: factor={factor}"
)
self.interpolation = interpolation
self._interpolation_method = image_utils.get_interpolation(
interpolation
)
self.seed = seed
def call(self, inputs, training=True):
inputs = convert_inputs(inputs)
def random_width_inputs(inputs):
"""Inputs width-adjusted with random ops."""
inputs_shape = tf.shape(inputs)
img_hd = inputs_shape[H_AXIS]
img_wd = tf.cast(inputs_shape[W_AXIS], tf.float32)
width_factor = self._random_generator.random_uniform(
shape=[],
minval=(1.0 + self.width_lower),
maxval=(1.0 + self.width_upper),
)
adjusted_width = tf.cast(width_factor * img_wd, tf.int32)
adjusted_size = tf.stack([img_hd, adjusted_width])
output = tf.image.resize(
images=inputs,
size=adjusted_size,
method=self._interpolation_method,
)
# tf.resize will output float32 regardless of input type.
output = tf.cast(output, self.compute_dtype)
output_shape = inputs.shape.as_list()
output_shape[W_AXIS] = None
output.set_shape(output_shape)
return output
if training:
return random_width_inputs(inputs)
else:
return inputs
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
input_shape[W_AXIS] = None
return tf.TensorShape(input_shape)
def get_config(self):
config = {
"factor": self.factor,
"interpolation": self.interpolation,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def convert_inputs(inputs, dtype=None):
if isinstance(inputs, dict):
raise ValueError(
"This layer can only process a tensor representing an image or "
f"a batch of images. Received: type(inputs)={type(inputs)}."
"If you need to pass a dict containing "
"images, labels, and bounding boxes, you should "
"instead use the preprocessing and augmentation layers "
"from `keras_cv.layers`. See docs at "
"https://keras.io/api/keras_cv/layers/"
)
inputs = utils.ensure_tensor(inputs, dtype=dtype)
return inputs
| tf-keras/tf_keras/layers/preprocessing/image_preprocessing.py/0 | {
"file_path": "tf-keras/tf_keras/layers/preprocessing/image_preprocessing.py",
"repo_id": "tf-keras",
"token_count": 30240
} | 227 |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for preprocessing utils."""
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
from tf_keras.layers.preprocessing import preprocessing_utils
from tf_keras.testing_infra import test_combinations
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class ListifyTensorsTest(test_combinations.TestCase):
def test_tensor_input(self):
inputs = tf.constant([0, 1, 2, 3, 4])
outputs = preprocessing_utils.listify_tensors(inputs)
self.assertAllEqual([0, 1, 2, 3, 4], outputs)
self.assertIsInstance(outputs, list)
def test_numpy_input(self):
inputs = np.array([0, 1, 2, 3, 4])
outputs = preprocessing_utils.listify_tensors(inputs)
self.assertAllEqual([0, 1, 2, 3, 4], outputs)
self.assertIsInstance(outputs, list)
@test_combinations.run_all_keras_modes
class EncodeCategoricalInputsTest(test_combinations.TestCase):
def test_int_encoding(self):
inputs = tf.constant([0, 1, 2])
outputs = preprocessing_utils.encode_categorical_inputs(
inputs, output_mode="int", depth=4
)
self.assertAllEqual([0, 1, 2], outputs)
@parameterized.named_parameters(
("sparse", True),
("dense", False),
)
def test_one_hot_encoding(self, sparse):
inputs = tf.constant([0, 1, 2])
outputs = preprocessing_utils.encode_categorical_inputs(
inputs, output_mode="one_hot", depth=4, sparse=sparse
)
if sparse:
outputs = tf.sparse.to_dense(outputs)
self.assertAllEqual([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]], outputs)
@parameterized.named_parameters(
("sparse", True),
("dense", False),
)
def test_multi_hot_encoding(self, sparse):
inputs = tf.constant([0, 1, 2])
outputs = preprocessing_utils.encode_categorical_inputs(
inputs, output_mode="multi_hot", depth=4, sparse=sparse
)
if sparse:
outputs = tf.sparse.to_dense(outputs)
self.assertAllEqual([1, 1, 1, 0], outputs)
@parameterized.named_parameters(
("sparse", True),
("dense", False),
)
def test_count_encoding(self, sparse):
inputs = tf.constant([0, 1, 1, 2, 2, 2])
outputs = preprocessing_utils.encode_categorical_inputs(
inputs, output_mode="count", depth=4, sparse=sparse
)
if sparse:
outputs = tf.sparse.to_dense(outputs)
self.assertAllEqual([1, 2, 3, 0], outputs)
@parameterized.named_parameters(
("sparse", True),
("dense", False),
)
def test_tf_idf_encoding(self, sparse):
inputs = tf.constant([0, 1, 1, 2, 2, 2])
outputs = preprocessing_utils.encode_categorical_inputs(
inputs,
output_mode="tf_idf",
depth=4,
sparse=sparse,
idf_weights=[0.1, 1.0, 10.0, 0],
)
if sparse:
outputs = tf.sparse.to_dense(outputs)
self.assertAllClose([0.1, 2, 30, 0], outputs)
def test_output_dtype(self):
inputs = tf.constant([0, 1, 2], dtype=tf.dtypes.int32)
outputs = preprocessing_utils.encode_categorical_inputs(
inputs, output_mode="int", depth=4, dtype=tf.dtypes.int64
)
self.assertAllEqual(outputs.dtype, tf.dtypes.int64)
outputs = preprocessing_utils.encode_categorical_inputs(
inputs, output_mode="one_hot", depth=4, dtype=tf.dtypes.float64
)
self.assertAllEqual(outputs.dtype, tf.dtypes.float64)
def test_rank_3_output_fails(self):
inputs = tf.constant([[[0]], [[1]], [[2]]])
with self.assertRaisesRegex(
ValueError, "maximum supported output rank is 2"
):
preprocessing_utils.encode_categorical_inputs(
inputs, "multi_hot", 4, "float32"
)
def test_tf_idf_output_with_no_weights_fails(self):
inputs = tf.constant([0, 1, 2])
with self.assertRaisesRegex(ValueError, "idf_weights must be provided"):
preprocessing_utils.encode_categorical_inputs(
inputs, "tf_idf", 4, "float32"
)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/layers/preprocessing/preprocessing_utils_test.py/0 | {
"file_path": "tf-keras/tf_keras/layers/preprocessing/preprocessing_utils_test.py",
"repo_id": "tf-keras",
"token_count": 2173
} | 228 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the GaussianNoise layer."""
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras.engine import base_layer
from tf_keras.utils import tf_utils
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.layers.GaussianNoise")
class GaussianNoise(base_layer.BaseRandomLayer):
"""Apply additive zero-centered Gaussian noise.
This is useful to mitigate overfitting
(you could see it as a form of random data augmentation).
Gaussian Noise (GS) is a natural choice as corruption process
for real valued inputs.
As it is a regularization layer, it is only active at training time.
Args:
stddev: Float, standard deviation of the noise distribution.
seed: Integer, optional random seed to enable deterministic behavior.
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode (adding noise) or in inference mode (doing nothing).
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
"""
def __init__(self, stddev, seed=None, **kwargs):
super().__init__(seed=seed, **kwargs)
self.supports_masking = True
self.stddev = stddev
self.seed = seed
def call(self, inputs, training=None):
def noised():
return inputs + self._random_generator.random_normal(
shape=tf.shape(inputs),
mean=0.0,
stddev=self.stddev,
dtype=inputs.dtype,
)
return backend.in_train_phase(noised, inputs, training=training)
def get_config(self):
config = {"stddev": self.stddev, "seed": self.seed}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
| tf-keras/tf_keras/layers/regularization/gaussian_noise.py/0 | {
"file_path": "tf-keras/tf_keras/layers/regularization/gaussian_noise.py",
"repo_id": "tf-keras",
"token_count": 970
} | 229 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the RepeatVector layer."""
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras.engine.base_layer import Layer
from tf_keras.engine.input_spec import InputSpec
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.layers.RepeatVector")
class RepeatVector(Layer):
"""Repeats the input n times.
Example:
```python
model = Sequential()
model.add(Dense(32, input_dim=32))
# now: model.output_shape == (None, 32)
# note: `None` is the batch dimension
model.add(RepeatVector(3))
# now: model.output_shape == (None, 3, 32)
```
Args:
n: Integer, repetition factor.
Input shape: 2D tensor of shape `(num_samples, features)`.
Output shape: 3D tensor of shape `(num_samples, n, features)`.
"""
def __init__(self, n, **kwargs):
super().__init__(**kwargs)
self.n = n
if not isinstance(n, int):
raise TypeError(
f"Expected an integer value for `n`, got {type(n)}."
)
self.input_spec = InputSpec(ndim=2)
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
return tf.TensorShape([input_shape[0], self.n, input_shape[1]])
def call(self, inputs):
return backend.repeat(inputs, self.n)
def get_config(self):
config = {"n": self.n}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| tf-keras/tf_keras/layers/reshaping/repeat_vector.py/0 | {
"file_path": "tf-keras/tf_keras/layers/reshaping/repeat_vector.py",
"repo_id": "tf-keras",
"token_count": 798
} | 230 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for convolutional-recurrent layers."""
import numpy as np
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras.engine import base_layer
from tf_keras.engine.input_spec import InputSpec
from tf_keras.layers.rnn.base_rnn import RNN
from tf_keras.utils import conv_utils
from tf_keras.utils import generic_utils
from tf_keras.utils import tf_utils
class ConvRNN(RNN):
"""N-Dimensional Base class for convolutional-recurrent layers.
Args:
rank: Integer, rank of the convolution, e.g. "2" for 2D convolutions.
cell: A RNN cell instance. A RNN cell is a class that has: - a
`call(input_at_t, states_at_t)` method, returning `(output_at_t,
states_at_t_plus_1)`. The call method of the cell can also take the
optional argument `constants`, see section "Note on passing external
constants" below. - a `state_size` attribute. This can be a single
integer (single state) in which case it is the number of channels of the
recurrent state (which should be the same as the number of channels of
the cell output). This can also be a list/tuple of integers (one size
per state). In this case, the first entry (`state_size[0]`) should be
the same as the size of the cell output.
return_sequences: Boolean. Whether to return the last output. in the
output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state in addition to the
output.
go_backwards: Boolean (default False). If True, process the input sequence
backwards and return the reversed sequence.
stateful: Boolean (default False). If True, the last state for each sample
at index i in a batch will be used as initial state for the sample of
index i in the following batch.
input_shape: Use this argument to specify the shape of the input when this
layer is the first one in a model.
Call arguments:
inputs: A (2 + `rank`)D tensor.
mask: Binary tensor of shape `(samples, timesteps)` indicating whether a
given timestep should be masked.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. This argument is passed to the cell
when calling it. This is for use with cells that use dropout.
initial_state: List of initial state tensors to be passed to the first
call of the cell.
constants: List of constant tensors to be passed to the cell at each
timestep.
Input shape:
(3 + `rank`)D tensor with shape: `(samples, timesteps, channels,
img_dimensions...)`
if data_format='channels_first' or shape: `(samples, timesteps,
img_dimensions..., channels)` if data_format='channels_last'.
Output shape:
- If `return_state`: a list of tensors. The first tensor is the output.
The remaining tensors are the last states,
each (2 + `rank`)D tensor with shape: `(samples, filters,
new_img_dimensions...)` if data_format='channels_first'
or shape: `(samples, new_img_dimensions..., filters)` if
data_format='channels_last'. img_dimension values might have changed
due to padding.
- If `return_sequences`: (3 + `rank`)D tensor with shape: `(samples,
timesteps, filters, new_img_dimensions...)` if
data_format='channels_first'
or shape: `(samples, timesteps, new_img_dimensions..., filters)` if
data_format='channels_last'.
- Else, (2 + `rank`)D tensor with shape: `(samples, filters,
new_img_dimensions...)` if data_format='channels_first'
or shape: `(samples, new_img_dimensions..., filters)` if
data_format='channels_last'.
Masking: This layer supports masking for input data with a variable number
of timesteps.
Note on using statefulness in RNNs: You can set RNN layers to be 'stateful',
which means that the states computed for the samples in one batch will be
reused as initial states for the samples in the next batch. This assumes a
one-to-one mapping between samples in different successive batches.
To enable statefulness: - Specify `stateful=True` in the layer
constructor.
- Specify a fixed batch size for your model, by passing
- If sequential model: `batch_input_shape=(...)` to the first layer
in your model.
- If functional model with 1 or more Input layers:
`batch_shape=(...)` to all the first layers in your model. This is
the expected shape of your inputs *including the batch size*. It
should be a tuple of integers, e.g. `(32, 10, 100, 100, 32)`. for
rank 2 convolution Note that the image dimensions should be
specified too. - Specify `shuffle=False` when calling fit(). To
reset the states of your model, call `.reset_states()` on either a
specific layer, or on your entire model.
Note on specifying the initial state of RNNs: You can specify the initial
state of RNN layers symbolically by calling them with the keyword argument
`initial_state`. The value of `initial_state` should be a tensor or list
of tensors representing the initial state of the RNN layer. You can
specify the initial state of RNN layers numerically by calling
`reset_states` with the keyword argument `states`. The value of `states`
should be a numpy array or list of numpy arrays representing the initial
state of the RNN layer.
Note on passing external constants to RNNs: You can pass "external"
constants to the cell using the `constants` keyword argument of
`RNN.__call__` (as well as `RNN.call`) method. This requires that the
`cell.call` method accepts the same keyword argument `constants`. Such
constants can be used to condition the cell transformation on additional
static inputs (not changing over time), a.k.a. an attention mechanism.
"""
def __init__(
self,
rank,
cell,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
**kwargs,
):
if unroll:
raise TypeError(
"Unrolling is not possible with convolutional RNNs. "
f"Received: unroll={unroll}"
)
if isinstance(cell, (list, tuple)):
# The StackedConvRNN3DCells isn't implemented yet.
raise TypeError(
"It is not possible at the moment to"
"stack convolutional cells. Only pass a single cell "
"instance as the `cell` argument. Received: "
f"cell={cell}"
)
super().__init__(
cell,
return_sequences,
return_state,
go_backwards,
stateful,
unroll,
**kwargs,
)
self.rank = rank
self.input_spec = [InputSpec(ndim=rank + 3)]
self.states = None
self._num_constants = None
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
cell = self.cell
if cell.data_format == "channels_first":
img_dims = input_shape[3:]
elif cell.data_format == "channels_last":
img_dims = input_shape[2:-1]
norm_img_dims = tuple(
[
conv_utils.conv_output_length(
img_dims[idx],
cell.kernel_size[idx],
padding=cell.padding,
stride=cell.strides[idx],
dilation=cell.dilation_rate[idx],
)
for idx in range(len(img_dims))
]
)
if cell.data_format == "channels_first":
output_shape = input_shape[:2] + (cell.filters,) + norm_img_dims
elif cell.data_format == "channels_last":
output_shape = input_shape[:2] + norm_img_dims + (cell.filters,)
if not self.return_sequences:
output_shape = output_shape[:1] + output_shape[2:]
if self.return_state:
output_shape = [output_shape]
if cell.data_format == "channels_first":
output_shape += [
(input_shape[0], cell.filters) + norm_img_dims
for _ in range(2)
]
elif cell.data_format == "channels_last":
output_shape += [
(input_shape[0],) + norm_img_dims + (cell.filters,)
for _ in range(2)
]
return output_shape
@tf_utils.shape_type_conversion
def build(self, input_shape):
# Note input_shape will be list of shapes of initial states and
# constants if these are passed in __call__.
if self._num_constants is not None:
constants_shape = input_shape[-self._num_constants :]
else:
constants_shape = None
if isinstance(input_shape, list):
input_shape = input_shape[0]
batch_size = input_shape[0] if self.stateful else None
self.input_spec[0] = InputSpec(
shape=(batch_size, None) + input_shape[2 : self.rank + 3]
)
# allow cell (if layer) to build before we set or validate state_spec
if isinstance(self.cell, base_layer.Layer):
step_input_shape = (input_shape[0],) + input_shape[2:]
if constants_shape is not None:
self.cell.build([step_input_shape] + constants_shape)
else:
self.cell.build(step_input_shape)
# set or validate state_spec
if hasattr(self.cell.state_size, "__len__"):
state_size = list(self.cell.state_size)
else:
state_size = [self.cell.state_size]
if self.state_spec is not None:
# initial_state was passed in call, check compatibility
if self.cell.data_format == "channels_first":
ch_dim = 1
elif self.cell.data_format == "channels_last":
ch_dim = self.rank + 1
if [spec.shape[ch_dim] for spec in self.state_spec] != state_size:
raise ValueError(
"An `initial_state` was passed that is not compatible with "
"`cell.state_size`. Received state shapes "
f"{[spec.shape for spec in self.state_spec]}. "
f"However `cell.state_size` is {self.cell.state_size}"
)
else:
img_dims = tuple((None for _ in range(self.rank)))
if self.cell.data_format == "channels_first":
self.state_spec = [
InputSpec(shape=(None, dim) + img_dims)
for dim in state_size
]
elif self.cell.data_format == "channels_last":
self.state_spec = [
InputSpec(shape=(None,) + img_dims + (dim,))
for dim in state_size
]
if self.stateful:
self.reset_states()
self.built = True
def get_initial_state(self, inputs):
# (samples, timesteps, img_dims..., filters)
initial_state = backend.zeros_like(inputs)
# (samples, img_dims..., filters)
initial_state = backend.sum(initial_state, axis=1)
shape = list(self.cell.kernel_shape)
shape[-1] = self.cell.filters
initial_state = self.cell.input_conv(
initial_state,
tf.zeros(tuple(shape), initial_state.dtype),
padding=self.cell.padding,
)
if hasattr(self.cell.state_size, "__len__"):
return [initial_state for _ in self.cell.state_size]
else:
return [initial_state]
def call(
self,
inputs,
mask=None,
training=None,
initial_state=None,
constants=None,
):
# note that the .build() method of subclasses MUST define
# self.input_spec and self.state_spec with complete input shapes.
inputs, initial_state, constants = self._process_inputs(
inputs, initial_state, constants
)
if isinstance(mask, list):
mask = mask[0]
timesteps = backend.int_shape(inputs)[1]
kwargs = {}
if generic_utils.has_arg(self.cell.call, "training"):
kwargs["training"] = training
if constants:
if not generic_utils.has_arg(self.cell.call, "constants"):
raise ValueError(
f"RNN cell {self.cell} does not support constants. "
f"Received: constants={constants}"
)
def step(inputs, states):
constants = states[-self._num_constants :]
states = states[: -self._num_constants]
return self.cell.call(
inputs, states, constants=constants, **kwargs
)
else:
def step(inputs, states):
return self.cell.call(inputs, states, **kwargs)
last_output, outputs, states = backend.rnn(
step,
inputs,
initial_state,
constants=constants,
go_backwards=self.go_backwards,
mask=mask,
input_length=timesteps,
return_all_outputs=self.return_sequences,
)
if self.stateful:
updates = [
backend.update(self_state, state)
for self_state, state in zip(self.states, states)
]
self.add_update(updates)
if self.return_sequences:
output = outputs
else:
output = last_output
if self.return_state:
if not isinstance(states, (list, tuple)):
states = [states]
else:
states = list(states)
return [output] + states
return output
def reset_states(self, states=None):
if not self.stateful:
raise AttributeError("Layer must be stateful.")
input_shape = self.input_spec[0].shape
state_shape = self.compute_output_shape(input_shape)
if self.return_state:
state_shape = state_shape[0]
if self.return_sequences:
state_shape = state_shape[:1].concatenate(state_shape[2:])
if None in state_shape:
raise ValueError(
"If a RNN is stateful, it needs to know "
"its batch size. Specify the batch size "
"of your input tensors: \n"
"- If using a Sequential model, "
"specify the batch size by passing "
"a `batch_input_shape` "
"argument to your first layer.\n"
"- If using the functional API, specify "
"the time dimension by passing a "
"`batch_shape` argument to your Input layer.\n"
"The same thing goes for the number of rows and "
"columns."
)
# helper function
def get_tuple_shape(nb_channels):
result = list(state_shape)
if self.cell.data_format == "channels_first":
result[1] = nb_channels
elif self.cell.data_format == "channels_last":
result[self.rank + 1] = nb_channels
else:
raise KeyError(
"Cell data format must be one of "
'{"channels_first", "channels_last"}. Received: '
f"cell.data_format={self.cell.data_format}"
)
return tuple(result)
# initialize state if None
if self.states[0] is None:
if hasattr(self.cell.state_size, "__len__"):
self.states = [
backend.zeros(get_tuple_shape(dim))
for dim in self.cell.state_size
]
else:
self.states = [
backend.zeros(get_tuple_shape(self.cell.state_size))
]
elif states is None:
if hasattr(self.cell.state_size, "__len__"):
for state, dim in zip(self.states, self.cell.state_size):
backend.set_value(state, np.zeros(get_tuple_shape(dim)))
else:
backend.set_value(
self.states[0],
np.zeros(get_tuple_shape(self.cell.state_size)),
)
else:
if not isinstance(states, (list, tuple)):
states = [states]
if len(states) != len(self.states):
raise ValueError(
f"Layer {self.name} expects {len(self.states)} states, "
f"but it received {len(states)} state values. "
f"States received: {states}"
)
for index, (value, state) in enumerate(zip(states, self.states)):
if hasattr(self.cell.state_size, "__len__"):
dim = self.cell.state_size[index]
else:
dim = self.cell.state_size
if value.shape != get_tuple_shape(dim):
raise ValueError(
"State {index} is incompatible with layer "
f"{self.name}: expected shape={get_tuple_shape(dim)}, "
f"found shape={value.shape}"
)
backend.set_value(state, value)
| tf-keras/tf_keras/layers/rnn/base_conv_rnn.py/0 | {
"file_path": "tf-keras/tf_keras/layers/rnn/base_conv_rnn.py",
"repo_id": "tf-keras",
"token_count": 8462
} | 231 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for cudnn recurrent layers."""
import os
import tempfile
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import tf_keras as keras
from tf_keras.optimizers.legacy.rmsprop import RMSprop
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
# isort: off
from tensorflow.python.framework import (
test_util as tf_test_utils,
)
@test_combinations.run_all_keras_modes
class CuDNNTest(test_combinations.TestCase):
@parameterized.named_parameters(
*test_utils.generate_combinations_with_testcase_name(
layer_class=[keras.layers.CuDNNGRU, keras.layers.CuDNNLSTM],
return_sequences=[True, False],
)
)
@tf_test_utils.run_gpu_only
def test_cudnn_rnn_return_sequence(self, layer_class, return_sequences):
input_size = 10
timesteps = 6
units = 2
num_samples = 32
test_utils.layer_test(
layer_class,
kwargs={"units": units, "return_sequences": return_sequences},
input_shape=(num_samples, timesteps, input_size),
)
@parameterized.named_parameters(
*test_utils.generate_combinations_with_testcase_name(
layer_class=[keras.layers.CuDNNGRU, keras.layers.CuDNNLSTM],
go_backwards=[True, False],
)
)
@tf_test_utils.run_gpu_only
def test_cudnn_rnn_go_backward(self, layer_class, go_backwards):
input_size = 10
timesteps = 6
units = 2
num_samples = 32
test_utils.layer_test(
layer_class,
kwargs={"units": units, "go_backwards": go_backwards},
input_shape=(num_samples, timesteps, input_size),
)
@parameterized.named_parameters(
("cudnngru", keras.layers.CuDNNGRU),
("cudnnlstm", keras.layers.CuDNNLSTM),
)
@tf_test_utils.run_gpu_only
def test_return_state(self, layer_class):
input_size = 10
timesteps = 6
units = 2
num_samples = 32
num_states = 2 if layer_class is keras.layers.CuDNNLSTM else 1
inputs = keras.Input(batch_shape=(num_samples, timesteps, input_size))
layer = layer_class(units, return_state=True, stateful=True)
outputs = layer(inputs)
_, state = outputs[0], outputs[1:]
self.assertEqual(len(state), num_states)
model = keras.models.Model(inputs, state[0])
model.run_eagerly = test_utils.should_run_eagerly()
inputs = np.random.random((num_samples, timesteps, input_size))
state = model.predict(inputs)
np.testing.assert_allclose(
keras.backend.eval(layer.states[0]), state, atol=1e-4
)
@parameterized.named_parameters(
("cudnngru", keras.layers.CuDNNGRU),
("cudnnlstm", keras.layers.CuDNNLSTM),
)
@tf_test_utils.run_gpu_only
def test_time_major_input(self, layer_class):
input_size = 10
timesteps = 6
units = 2
num_samples = 32
model = keras.models.Sequential()
model.add(keras.layers.Lambda(lambda t: tf.transpose(t, [1, 0, 2])))
layer = layer_class(units, time_major=True, return_sequences=True)
model.add(layer)
model.add(keras.layers.Lambda(lambda t: tf.transpose(t, [1, 0, 2])))
model.compile(
loss="categorical_crossentropy",
optimizer=RMSprop(learning_rate=0.001),
)
model.fit(
np.ones((num_samples, timesteps, input_size)),
np.ones((num_samples, timesteps, units)),
)
out = model.predict(np.ones((num_samples, timesteps, input_size)))
self.assertEqual(out.shape, (num_samples, timesteps, units))
@parameterized.named_parameters(
("cudnngru", keras.layers.CuDNNGRU),
("cudnnlstm", keras.layers.CuDNNLSTM),
)
@tf_test_utils.run_gpu_only
def test_specify_initial_state_keras_tensor(self, layer_class):
input_size = 10
timesteps = 6
units = 2
num_samples = 32
num_states = 2 if layer_class is keras.layers.CuDNNLSTM else 1
inputs = keras.Input((timesteps, input_size))
initial_state = [keras.Input((units,)) for _ in range(num_states)]
layer = layer_class(units)
if len(initial_state) == 1:
output = layer(inputs, initial_state=initial_state[0])
else:
output = layer(inputs, initial_state=initial_state)
self.assertTrue(
any(
initial_state[0] is t
for t in layer._inbound_nodes[0].input_tensors
)
)
model = keras.models.Model([inputs] + initial_state, output)
model.compile(
loss="categorical_crossentropy",
optimizer=RMSprop(learning_rate=0.001),
run_eagerly=test_utils.should_run_eagerly(),
)
inputs = np.random.random((num_samples, timesteps, input_size))
initial_state = [
np.random.random((num_samples, units)) for _ in range(num_states)
]
targets = np.random.random((num_samples, units))
model.fit([inputs] + initial_state, targets)
class CuDNNGraphOnlyTest(test_combinations.TestCase):
@parameterized.named_parameters(
("cudnngru", keras.layers.CuDNNGRU),
("cudnnlstm", keras.layers.CuDNNLSTM),
)
@tf_test_utils.run_gpu_only
def test_regularizer(self, layer_class):
input_size = 10
timesteps = 6
units = 2
num_samples = 32
with tf.Graph().as_default():
layer = layer_class(
units,
return_sequences=False,
input_shape=(timesteps, input_size),
kernel_regularizer=keras.regularizers.l1(0.01),
recurrent_regularizer=keras.regularizers.l1(0.01),
bias_regularizer="l2",
)
layer.build((None, None, input_size))
self.assertEqual(len(layer.losses), 3)
layer = layer_class(
units,
return_sequences=False,
input_shape=(timesteps, input_size),
activity_regularizer="l2",
)
self.assertTrue(layer.activity_regularizer)
x = keras.backend.variable(
np.ones((num_samples, timesteps, input_size))
)
layer(x)
self.assertEqual(len(layer.get_losses_for(x)), 1)
@parameterized.named_parameters(
("cudnngru", keras.layers.CuDNNGRU),
("cudnnlstm", keras.layers.CuDNNLSTM),
)
@tf_test_utils.run_gpu_only
@tf_test_utils.run_v1_only("b/120941292")
def test_statefulness(self, layer_class):
input_size = 10
timesteps = 6
units = 2
num_samples = 32
with self.cached_session():
model = keras.models.Sequential()
model.add(
keras.layers.Embedding(
10,
input_size,
input_length=timesteps,
batch_input_shape=(num_samples, timesteps),
)
)
layer = layer_class(
units, return_sequences=False, stateful=True, weights=None
)
model.add(layer)
model.compile(
optimizer=tf.compat.v1.train.GradientDescentOptimizer(0.01),
loss="mse",
)
out1 = model.predict(np.ones((num_samples, timesteps)))
self.assertEqual(out1.shape, (num_samples, units))
# train once so that the states change
model.train_on_batch(
np.ones((num_samples, timesteps)), np.ones((num_samples, units))
)
out2 = model.predict(np.ones((num_samples, timesteps)))
# if the state is not reset, output should be different
self.assertNotEqual(out1.max(), out2.max())
# check that output changes after states are reset
# (even though the model itself didn't change)
layer.reset_states()
out3 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out2.max(), out3.max())
# check that container-level reset_states() works
model.reset_states()
out4 = model.predict(np.ones((num_samples, timesteps)))
self.assertAllClose(out3, out4, atol=1e-5)
# check that the call to `predict` updated the states
out5 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out4.max(), out5.max())
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class CuDNNV1OnlyTest(test_combinations.TestCase):
@tf_test_utils.run_gpu_only
def test_trainability(self):
input_size = 10
units = 2
for layer_class in [keras.layers.CuDNNGRU, keras.layers.CuDNNLSTM]:
layer = layer_class(units)
layer.build((None, None, input_size))
self.assertEqual(len(layer.weights), 3)
self.assertEqual(len(layer.trainable_weights), 3)
self.assertEqual(len(layer.non_trainable_weights), 0)
layer.trainable = False
self.assertEqual(len(layer.weights), 3)
self.assertEqual(len(layer.non_trainable_weights), 3)
self.assertEqual(len(layer.trainable_weights), 0)
layer.trainable = True
self.assertEqual(len(layer.weights), 3)
self.assertEqual(len(layer.trainable_weights), 3)
self.assertEqual(len(layer.non_trainable_weights), 0)
@parameterized.named_parameters(
*test_utils.generate_combinations_with_testcase_name(
rnn_type=["LSTM", "GRU"],
to_cudnn=[True, False],
bidirectional=[True, False],
implementation=[1, 2],
model_nest_level=[1, 2],
model_type=["seq", "func"],
)
)
@tf_test_utils.run_v1_only("b/120911602, b/112083752")
@tf_test_utils.run_gpu_only
def test_load_weights_between_noncudnn_rnn(
self,
rnn_type,
to_cudnn,
bidirectional,
implementation,
model_nest_level,
model_type,
):
input_size = 10
timesteps = 6
input_shape = (timesteps, input_size)
units = 2
num_samples = 32
inputs = np.random.random((num_samples, timesteps, input_size))
rnn_layer_kwargs = {
"recurrent_activation": "sigmoid",
# ensure biases are non-zero and properly converted
"bias_initializer": "random_uniform",
"implementation": implementation,
}
if rnn_type == "LSTM":
rnn_layer_class = keras.layers.LSTM
cudnn_rnn_layer_class = keras.layers.CuDNNLSTM
else:
rnn_layer_class = keras.layers.GRU
cudnn_rnn_layer_class = keras.layers.CuDNNGRU
rnn_layer_kwargs["reset_after"] = True
layer = rnn_layer_class(units, **rnn_layer_kwargs)
if bidirectional:
layer = keras.layers.Bidirectional(layer)
cudnn_layer = cudnn_rnn_layer_class(units)
if bidirectional:
cudnn_layer = keras.layers.Bidirectional(cudnn_layer)
model = self._make_nested_model(
input_shape, layer, model_nest_level, model_type
)
cudnn_model = self._make_nested_model(
input_shape, cudnn_layer, model_nest_level, model_type
)
if to_cudnn:
self._convert_model_weights(model, cudnn_model)
else:
self._convert_model_weights(cudnn_model, model)
self.assertAllClose(
model.predict(inputs), cudnn_model.predict(inputs), atol=1e-4
)
def _make_nested_model(
self, input_shape, layer, level=1, model_type="func"
):
# example: make_nested_seq_model((1,), Dense(10), level=2).summary()
def make_nested_seq_model(input_shape, layer, level=1):
model = layer
for i in range(1, level + 1):
layers = (
[keras.layers.InputLayer(input_shape), model]
if (i == 1)
else [model]
)
model = keras.models.Sequential(layers)
if i > 1:
model.build((None,) + input_shape)
return model
# example: make_nested_func_model((1,), Dense(10), level=2).summary()
def make_nested_func_model(input_shape, layer, level=1):
model_input = keras.layers.Input(input_shape)
model = layer
for _ in range(level):
model = keras.models.Model(model_input, model(model_input))
return model
if model_type == "func":
return make_nested_func_model(input_shape, layer, level)
elif model_type == "seq":
return make_nested_seq_model(input_shape, layer, level)
def _convert_model_weights(self, source_model, target_model):
_, fname = tempfile.mkstemp(".h5")
source_model.save_weights(fname)
target_model.load_weights(fname)
os.remove(fname)
@parameterized.named_parameters(
*test_utils.generate_combinations_with_testcase_name(
rnn_type=["LSTM", "GRU"], to_cudnn=[True, False]
)
)
@tf_test_utils.run_v1_only("b/120911602")
@tf_test_utils.run_gpu_only
def test_load_weights_between_noncudnn_rnn_time_distributed(
self, rnn_type, to_cudnn
):
# Similar test as test_load_weights_between_noncudnn_rnn() but has
# different rank of input due to usage of TimeDistributed. Issue:
# #10356.
input_size = 10
steps = 6
timesteps = 6
input_shape = (timesteps, steps, input_size)
units = 2
num_samples = 32
inputs = np.random.random((num_samples, timesteps, steps, input_size))
rnn_layer_kwargs = {
"recurrent_activation": "sigmoid",
# ensure biases are non-zero and properly converted
"bias_initializer": "random_uniform",
}
if rnn_type == "LSTM":
rnn_layer_class = keras.layers.LSTM
cudnn_rnn_layer_class = keras.layers.CuDNNLSTM
else:
rnn_layer_class = keras.layers.GRU
cudnn_rnn_layer_class = keras.layers.CuDNNGRU
rnn_layer_kwargs["reset_after"] = True
layer = rnn_layer_class(units, **rnn_layer_kwargs)
layer = keras.layers.TimeDistributed(layer)
cudnn_layer = cudnn_rnn_layer_class(units)
cudnn_layer = keras.layers.TimeDistributed(cudnn_layer)
model = self._make_nested_model(input_shape, layer)
cudnn_model = self._make_nested_model(input_shape, cudnn_layer)
if to_cudnn:
self._convert_model_weights(model, cudnn_model)
else:
self._convert_model_weights(cudnn_model, model)
self.assertAllClose(
model.predict(inputs), cudnn_model.predict(inputs), atol=1e-4
)
@tf_test_utils.run_gpu_only
def test_cudnnrnn_bidirectional(self):
rnn = keras.layers.CuDNNGRU
samples = 2
dim = 2
timesteps = 2
output_dim = 2
mode = "concat"
x = np.random.random((samples, timesteps, dim))
target_dim = 2 * output_dim if mode == "concat" else output_dim
y = np.random.random((samples, target_dim))
# test with Sequential model
model = keras.Sequential()
model.add(
keras.layers.Bidirectional(
rnn(output_dim), merge_mode=mode, input_shape=(None, dim)
)
)
model.compile(loss="mse", optimizer="rmsprop")
model.fit(x, y, epochs=1, batch_size=1)
# test config
model.get_config()
model = keras.models.model_from_json(model.to_json())
model.summary()
# test stacked bidirectional layers
model = keras.Sequential()
model.add(
keras.layers.Bidirectional(
rnn(output_dim, return_sequences=True),
merge_mode=mode,
input_shape=(None, dim),
)
)
model.add(keras.layers.Bidirectional(rnn(output_dim), merge_mode=mode))
model.compile(loss="mse", optimizer=R"rmsprop")
model.fit(x, y, epochs=1, batch_size=1)
# test with functional API
inputs = keras.Input((timesteps, dim))
outputs = keras.layers.Bidirectional(rnn(output_dim), merge_mode=mode)(
inputs
)
model = keras.Model(inputs, outputs)
model.compile(loss="mse", optimizer=R"rmsprop")
model.fit(x, y, epochs=1, batch_size=1)
# Bidirectional and stateful
inputs = keras.Input(batch_shape=(1, timesteps, dim))
outputs = keras.layers.Bidirectional(
rnn(output_dim, stateful=True), merge_mode=mode
)(inputs)
model = keras.Model(inputs, outputs)
model.compile(loss="mse", optimizer="rmsprop")
model.fit(x, y, epochs=1, batch_size=1)
@tf_test_utils.run_gpu_only
def test_preprocess_weights_for_loading_gru_incompatible(self):
"""Test loading weights between incompatible layers.
Should fail fast with an exception.
"""
input_shape = (3, 5)
def gru(cudnn=False, **kwargs):
layer_class = keras.layers.CuDNNGRU if cudnn else keras.layers.GRUV1
return layer_class(2, input_shape=input_shape, **kwargs)
def get_layer_weights(layer):
layer.build(input_shape=input_shape)
return layer.get_weights()
def assert_not_compatible(src, dest, message):
with self.assertRaises(ValueError) as ex:
keras.saving.legacy.hdf5_format.preprocess_weights_for_loading(
dest, get_layer_weights(src)
)
self.assertIn(message, str(ex.exception))
assert_not_compatible(
gru(),
gru(cudnn=True),
"GRU(reset_after=False) is not compatible with CuDNNGRU",
)
assert_not_compatible(
gru(cudnn=True),
gru(),
"CuDNNGRU is not compatible with GRU(reset_after=False)",
)
assert_not_compatible(
gru(),
gru(reset_after=True),
"GRU(reset_after=False) is not compatible with "
"GRU(reset_after=True)",
)
assert_not_compatible(
gru(reset_after=True),
gru(),
"GRU(reset_after=True) is not compatible with "
"GRU(reset_after=False)",
)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/layers/rnn/cudnn_test.py/0 | {
"file_path": "tf-keras/tf_keras/layers/rnn/cudnn_test.py",
"repo_id": "tf-keras",
"token_count": 9647
} | 232 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Fully connected RNN layer."""
import tensorflow.compat.v2 as tf
from tf_keras import activations
from tf_keras import backend
from tf_keras import constraints
from tf_keras import initializers
from tf_keras import regularizers
from tf_keras.engine import base_layer
from tf_keras.engine.input_spec import InputSpec
from tf_keras.layers.rnn import rnn_utils
from tf_keras.layers.rnn.base_rnn import RNN
from tf_keras.layers.rnn.dropout_rnn_cell_mixin import DropoutRNNCellMixin
from tf_keras.utils import tf_utils
# isort: off
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.layers.SimpleRNNCell")
class SimpleRNNCell(DropoutRNNCellMixin, base_layer.BaseRandomLayer):
"""Cell class for SimpleRNN.
See
[the TF-Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)
for details about the usage of RNN API.
This class processes one step within the whole time sequence input, whereas
`tf.keras.layer.SimpleRNN` processes the whole sequence.
Args:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, (default `True`), whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs. Default:
`glorot_uniform`.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix, used for the linear transformation of the recurrent
state. Default: `orthogonal`.
bias_initializer: Initializer for the bias vector. Default: `zeros`.
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_regularizer: Regularizer function applied to the
`recurrent_kernel` weights matrix. Default: `None`.
bias_regularizer: Regularizer function applied to the bias vector.
Default: `None`.
kernel_constraint: Constraint function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_constraint: Constraint function applied to the
`recurrent_kernel` weights matrix. Default: `None`.
bias_constraint: Constraint function applied to the bias vector. Default:
`None`.
dropout: Float between 0 and 1. Fraction of the units to drop for the
linear transformation of the inputs. Default: 0.
recurrent_dropout: Float between 0 and 1. Fraction of the units to drop
for the linear transformation of the recurrent state. Default: 0.
Call arguments:
inputs: A 2D tensor, with shape of `[batch, feature]`.
states: A 2D tensor with shape of `[batch, units]`, which is the state
from the previous time step. For timestep 0, the initial state provided
by user will be feed to cell.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. Only relevant when `dropout` or
`recurrent_dropout` is used.
Examples:
```python
inputs = np.random.random([32, 10, 8]).astype(np.float32)
rnn = tf.keras.layers.RNN(tf.keras.layers.SimpleRNNCell(4))
output = rnn(inputs) # The output has shape `[32, 4]`.
rnn = tf.keras.layers.RNN(
tf.keras.layers.SimpleRNNCell(4),
return_sequences=True,
return_state=True)
# whole_sequence_output has shape `[32, 10, 4]`.
# final_state has shape `[32, 4]`.
whole_sequence_output, final_state = rnn(inputs)
```
"""
def __init__(
self,
units,
activation="tanh",
use_bias=True,
kernel_initializer="glorot_uniform",
recurrent_initializer="orthogonal",
bias_initializer="zeros",
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.0,
recurrent_dropout=0.0,
**kwargs,
):
if units <= 0:
raise ValueError(
"Received an invalid value for argument `units`, "
f"expected a positive integer, got {units}."
)
# By default use cached variable under v2 mode, see b/143699808.
if tf.compat.v1.executing_eagerly_outside_functions():
self._enable_caching_device = kwargs.pop(
"enable_caching_device", True
)
else:
self._enable_caching_device = kwargs.pop(
"enable_caching_device", False
)
super().__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1.0, max(0.0, dropout))
self.recurrent_dropout = min(1.0, max(0.0, recurrent_dropout))
self.state_size = self.units
self.output_size = self.units
@tf_utils.shape_type_conversion
def build(self, input_shape):
super().build(input_shape)
default_caching_device = rnn_utils.caching_device(self)
self.kernel = self.add_weight(
shape=(input_shape[-1], self.units),
name="kernel",
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
caching_device=default_caching_device,
)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
name="recurrent_kernel",
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint,
caching_device=default_caching_device,
)
if self.use_bias:
self.bias = self.add_weight(
shape=(self.units,),
name="bias",
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
caching_device=default_caching_device,
)
else:
self.bias = None
self.built = True
def call(self, inputs, states, training=None):
prev_output = states[0] if tf.nest.is_nested(states) else states
dp_mask = self.get_dropout_mask_for_cell(inputs, training)
rec_dp_mask = self.get_recurrent_dropout_mask_for_cell(
prev_output, training
)
if dp_mask is not None:
h = backend.dot(inputs * dp_mask, self.kernel)
else:
h = backend.dot(inputs, self.kernel)
if self.bias is not None:
h = backend.bias_add(h, self.bias)
if rec_dp_mask is not None:
prev_output = prev_output * rec_dp_mask
output = h + backend.dot(prev_output, self.recurrent_kernel)
if self.activation is not None:
output = self.activation(output)
new_state = [output] if tf.nest.is_nested(states) else output
return output, new_state
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
return rnn_utils.generate_zero_filled_state_for_cell(
self, inputs, batch_size, dtype
)
def get_config(self):
config = {
"units": self.units,
"activation": activations.serialize(self.activation),
"use_bias": self.use_bias,
"kernel_initializer": initializers.serialize(
self.kernel_initializer
),
"recurrent_initializer": initializers.serialize(
self.recurrent_initializer
),
"bias_initializer": initializers.serialize(self.bias_initializer),
"kernel_regularizer": regularizers.serialize(
self.kernel_regularizer
),
"recurrent_regularizer": regularizers.serialize(
self.recurrent_regularizer
),
"bias_regularizer": regularizers.serialize(self.bias_regularizer),
"kernel_constraint": constraints.serialize(self.kernel_constraint),
"recurrent_constraint": constraints.serialize(
self.recurrent_constraint
),
"bias_constraint": constraints.serialize(self.bias_constraint),
"dropout": self.dropout,
"recurrent_dropout": self.recurrent_dropout,
}
config.update(rnn_utils.config_for_enable_caching_device(self))
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export("keras.layers.SimpleRNN")
class SimpleRNN(RNN):
"""Fully-connected RNN where the output is to be fed back to input.
See
[the TF-Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)
for details about the usage of RNN API.
Args:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, (default `True`), whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs. Default:
`glorot_uniform`.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix, used for the linear transformation of the recurrent
state. Default: `orthogonal`.
bias_initializer: Initializer for the bias vector. Default: `zeros`.
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_regularizer: Regularizer function applied to the
`recurrent_kernel` weights matrix. Default: `None`.
bias_regularizer: Regularizer function applied to the bias vector.
Default: `None`.
activity_regularizer: Regularizer function applied to the output of the
layer (its "activation"). Default: `None`.
kernel_constraint: Constraint function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_constraint: Constraint function applied to the
`recurrent_kernel` weights matrix. Default: `None`.
bias_constraint: Constraint function applied to the bias vector. Default:
`None`.
dropout: Float between 0 and 1.
Fraction of the units to drop for the linear transformation of the
inputs. Default: 0.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for the linear transformation of the
recurrent state. Default: 0.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence. Default: `False`.
return_state: Boolean. Whether to return the last state
in addition to the output. Default: `False`
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
Call arguments:
inputs: A 3D tensor, with shape `[batch, timesteps, feature]`.
mask: Binary tensor of shape `[batch, timesteps]` indicating whether
a given timestep should be masked. An individual `True` entry indicates
that the corresponding timestep should be utilized, while a `False`
entry indicates that the corresponding timestep should be ignored.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. This argument is passed to the cell
when calling it. This is only relevant if `dropout` or
`recurrent_dropout` is used.
initial_state: List of initial state tensors to be passed to the first
call of the cell.
Examples:
```python
inputs = np.random.random([32, 10, 8]).astype(np.float32)
simple_rnn = tf.keras.layers.SimpleRNN(4)
output = simple_rnn(inputs) # The output has shape `[32, 4]`.
simple_rnn = tf.keras.layers.SimpleRNN(
4, return_sequences=True, return_state=True)
# whole_sequence_output has shape `[32, 10, 4]`.
# final_state has shape `[32, 4]`.
whole_sequence_output, final_state = simple_rnn(inputs)
```
"""
def __init__(
self,
units,
activation="tanh",
use_bias=True,
kernel_initializer="glorot_uniform",
recurrent_initializer="orthogonal",
bias_initializer="zeros",
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.0,
recurrent_dropout=0.0,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
**kwargs,
):
if "implementation" in kwargs:
kwargs.pop("implementation")
logging.warning(
"The `implementation` argument "
"in `SimpleRNN` has been deprecated. "
"Please remove it from your layer call."
)
if "enable_caching_device" in kwargs:
cell_kwargs = {
"enable_caching_device": kwargs.pop("enable_caching_device")
}
else:
cell_kwargs = {}
cell = SimpleRNNCell(
units,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
dtype=kwargs.get("dtype"),
trainable=kwargs.get("trainable", True),
name="simple_rnn_cell",
**cell_kwargs,
)
super().__init__(
cell,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
unroll=unroll,
**kwargs,
)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.input_spec = [InputSpec(ndim=3)]
def call(self, inputs, mask=None, training=None, initial_state=None):
return super().call(
inputs, mask=mask, training=training, initial_state=initial_state
)
@property
def units(self):
return self.cell.units
@property
def activation(self):
return self.cell.activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
def get_config(self):
config = {
"units": self.units,
"activation": activations.serialize(self.activation),
"use_bias": self.use_bias,
"kernel_initializer": initializers.serialize(
self.kernel_initializer
),
"recurrent_initializer": initializers.serialize(
self.recurrent_initializer
),
"bias_initializer": initializers.serialize(self.bias_initializer),
"kernel_regularizer": regularizers.serialize(
self.kernel_regularizer
),
"recurrent_regularizer": regularizers.serialize(
self.recurrent_regularizer
),
"bias_regularizer": regularizers.serialize(self.bias_regularizer),
"activity_regularizer": regularizers.serialize(
self.activity_regularizer
),
"kernel_constraint": constraints.serialize(self.kernel_constraint),
"recurrent_constraint": constraints.serialize(
self.recurrent_constraint
),
"bias_constraint": constraints.serialize(self.bias_constraint),
"dropout": self.dropout,
"recurrent_dropout": self.recurrent_dropout,
}
base_config = super().get_config()
config.update(rnn_utils.config_for_enable_caching_device(self.cell))
del base_config["cell"]
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if "implementation" in config:
config.pop("implementation")
return cls(**config)
| tf-keras/tf_keras/layers/rnn/simple_rnn.py/0 | {
"file_path": "tf-keras/tf_keras/layers/rnn/simple_rnn.py",
"repo_id": "tf-keras",
"token_count": 8279
} | 233 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.layers.core."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import platform
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
from tf_keras.legacy_tf_layers import core as core_layers
from tf_keras.testing_infra import test_combinations
# isort: off
from tensorflow.python.framework import (
test_util as tf_test_utils,
)
from tensorflow.python.ops import variable_scope
class DenseTest(tf.test.TestCase, parameterized.TestCase):
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testDenseProperties(self):
dense = core_layers.Dense(2, activation=tf.nn.relu, name="my_dense")
self.assertEqual(dense.units, 2)
self.assertEqual(dense.activation, tf.nn.relu)
self.assertEqual(dense.kernel_regularizer, None)
self.assertEqual(dense.bias_regularizer, None)
self.assertEqual(dense.activity_regularizer, None)
self.assertEqual(dense.use_bias, True)
# Test auto-naming
dense = core_layers.Dense(2, activation=tf.nn.relu)
dense(tf.random.uniform((5, 2)))
self.assertEqual(dense.name, "dense_1")
dense = core_layers.Dense(2, activation=tf.nn.relu)
dense(tf.random.uniform((5, 2)))
self.assertEqual(dense.name, "dense_2")
@tf_test_utils.run_deprecated_v1
def testVariableInput(self):
with self.cached_session():
v = tf.compat.v1.get_variable(
"X", initializer=tf.compat.v1.zeros_initializer(), shape=(1, 1)
)
x = core_layers.Dense(1)(v)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllEqual(x, [[0.0]])
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testCall(self):
dense = core_layers.Dense(2, activation=tf.nn.relu, name="my_dense")
inputs = tf.random.uniform((5, 4), seed=1)
outputs = dense(inputs)
self.assertListEqual([5, 2], outputs.get_shape().as_list())
self.assertListEqual(dense.variables, [dense.kernel, dense.bias])
self.assertListEqual(
dense.trainable_variables, [dense.kernel, dense.bias]
)
self.assertListEqual(dense.non_trainable_variables, [])
if not tf.executing_eagerly():
self.assertEqual(
len(
tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES
)
),
2,
)
self.assertEqual(dense.kernel.name, "my_dense/kernel:0")
self.assertEqual(dense.bias.name, "my_dense/bias:0")
@tf_test_utils.assert_no_new_pyobjects_executing_eagerly()
def testNoEagerLeak(self):
# Tests that repeatedly constructing and building a Layer does not leak
# Python objects.
inputs = tf.random.uniform((5, 4), seed=1)
core_layers.Dense(5)(inputs)
core_layers.Dense(2, activation=tf.nn.relu, name="my_dense")(inputs)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testCallTensorDot(self):
dense = core_layers.Dense(2, activation=tf.nn.relu, name="my_dense")
inputs = tf.random.uniform((5, 4, 3), seed=1)
outputs = dense(inputs)
self.assertListEqual([5, 4, 2], outputs.get_shape().as_list())
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testNoBias(self):
dense = core_layers.Dense(2, use_bias=False, name="my_dense")
inputs = tf.random.uniform((5, 2), seed=1)
_ = dense(inputs)
self.assertListEqual(dense.variables, [dense.kernel])
self.assertListEqual(dense.trainable_variables, [dense.kernel])
self.assertListEqual(dense.non_trainable_variables, [])
if not tf.executing_eagerly():
self.assertEqual(
len(
tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES
)
),
1,
)
self.assertEqual(dense.kernel.name, "my_dense/kernel:0")
self.assertEqual(dense.bias, None)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testNonTrainable(self):
dense = core_layers.Dense(2, trainable=False, name="my_dense")
inputs = tf.random.uniform((5, 2), seed=1)
_ = dense(inputs)
self.assertListEqual(dense.variables, [dense.kernel, dense.bias])
self.assertListEqual(
dense.non_trainable_variables, [dense.kernel, dense.bias]
)
self.assertListEqual(dense.trainable_variables, [])
if not tf.executing_eagerly():
self.assertEqual(
len(
tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES
)
),
0,
)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testOutputShape(self):
dense = core_layers.Dense(7, activation=tf.nn.relu, name="my_dense")
inputs = tf.random.uniform((5, 3), seed=1)
outputs = dense(inputs)
self.assertEqual(outputs.get_shape().as_list(), [5, 7])
inputs = tf.random.uniform((5, 2, 3), seed=1)
outputs = dense(inputs)
self.assertEqual(outputs.get_shape().as_list(), [5, 2, 7])
inputs = tf.random.uniform((1, 2, 4, 3), seed=1)
outputs = dense(inputs)
self.assertEqual(outputs.get_shape().as_list(), [1, 2, 4, 7])
@tf_test_utils.run_deprecated_v1
def testCallOnPlaceHolder(self):
inputs = tf.compat.v1.placeholder(dtype=tf.float32)
dense = core_layers.Dense(4, name="my_dense")
with self.assertRaises(ValueError):
dense(inputs)
inputs = tf.compat.v1.placeholder(dtype=tf.float32, shape=[None, None])
dense = core_layers.Dense(4, name="my_dense")
with self.assertRaises(ValueError):
dense(inputs)
inputs = tf.compat.v1.placeholder(
dtype=tf.float32, shape=[None, None, None]
)
dense = core_layers.Dense(4, name="my_dense")
with self.assertRaises(ValueError):
dense(inputs)
inputs = tf.compat.v1.placeholder(dtype=tf.float32, shape=[None, 3])
dense = core_layers.Dense(4, name="my_dense")
dense(inputs)
inputs = tf.compat.v1.placeholder(
dtype=tf.float32, shape=[None, None, 3]
)
dense = core_layers.Dense(4, name="my_dense")
dense(inputs)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testActivation(self):
dense = core_layers.Dense(2, activation=tf.nn.relu, name="dense1")
inputs = tf.random.uniform((5, 3), seed=1)
outputs = dense(inputs)
if not tf.executing_eagerly():
self.assertEqual(outputs.op.name, "dense1/Relu")
dense = core_layers.Dense(2, name="dense2")
inputs = tf.random.uniform((5, 3), seed=1)
outputs = dense(inputs)
if not tf.executing_eagerly():
self.assertEqual(outputs.op.name, "dense2/BiasAdd")
@tf_test_utils.run_deprecated_v1
def testActivityRegularizer(self):
regularizer = lambda x: tf.reduce_sum(x) * 1e-3
dense = core_layers.Dense(
2, name="my_dense", activity_regularizer=regularizer
)
inputs = tf.random.uniform((5, 3), seed=1)
_ = dense(inputs)
loss_keys = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES
)
self.assertEqual(len(loss_keys), 1)
self.assertListEqual(dense.losses, loss_keys)
@tf_test_utils.run_deprecated_v1
def testKernelRegularizer(self):
regularizer = lambda x: tf.reduce_sum(x) * 1e-3
dense = core_layers.Dense(
2, name="my_dense", kernel_regularizer=regularizer
)
inputs = tf.random.uniform((5, 3), seed=1)
_ = dense(inputs)
loss_keys = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES
)
self.assertEqual(len(loss_keys), 1)
self.evaluate([v.initializer for v in dense.variables])
self.assertAllEqual(
self.evaluate(dense.losses), self.evaluate(loss_keys)
)
@tf_test_utils.run_deprecated_v1
def testKernelRegularizerWithReuse(self):
regularizer = lambda x: tf.reduce_sum(x) * 1e-3
inputs = tf.random.uniform((5, 3), seed=1)
_ = core_layers.dense(
inputs, 2, name="my_dense", kernel_regularizer=regularizer
)
self.assertEqual(
len(
tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES
)
),
1,
)
_ = core_layers.dense(
inputs,
2,
name="my_dense",
kernel_regularizer=regularizer,
reuse=True,
)
self.assertEqual(
len(
tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES
)
),
1,
)
@tf_test_utils.run_deprecated_v1
def testBiasRegularizer(self):
regularizer = lambda x: tf.reduce_sum(x) * 1e-3
dense = core_layers.Dense(
2, name="my_dense", bias_regularizer=regularizer
)
inputs = tf.random.uniform((5, 3), seed=1)
_ = dense(inputs)
loss_keys = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES
)
self.assertEqual(len(loss_keys), 1)
self.evaluate([v.initializer for v in dense.variables])
self.assertAllEqual(
self.evaluate(dense.losses), self.evaluate(loss_keys)
)
@tf_test_utils.run_deprecated_v1
def testFunctionalDense(self):
with self.cached_session():
inputs = tf.random.uniform((5, 3), seed=1)
outputs = core_layers.dense(
inputs, 2, activation=tf.nn.relu, name="my_dense"
)
self.assertEqual(
len(
tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES
)
),
2,
)
self.assertEqual(outputs.op.name, "my_dense/Relu")
@tf_test_utils.run_deprecated_v1
def testFunctionalDenseTwice(self):
inputs = tf.random.uniform((5, 3), seed=1)
core_layers.dense(inputs, 2)
vars1 = _get_variable_dict_from_varstore().values()
core_layers.dense(inputs, 2)
vars2 = _get_variable_dict_from_varstore().values()
self.assertEqual(len(vars1), 2)
self.assertEqual(len(vars2), 4)
# TODO(alive): get this to work in eager mode.
def testFunctionalDenseTwiceReuse(self):
with self.cached_session():
inputs = tf.random.uniform((5, 3), seed=1)
core_layers.dense(inputs, 2, name="my_dense")
vars1 = tf.compat.v1.trainable_variables()
core_layers.dense(inputs, 2, name="my_dense", reuse=True)
vars2 = tf.compat.v1.trainable_variables()
self.assertEqual(vars1, vars2)
# TODO(alive): get this to work in eager mode.
def testFunctionalDenseTwiceReuseFromScope(self):
with self.cached_session():
with tf.compat.v1.variable_scope("scope"):
inputs = tf.random.uniform((5, 3), seed=1)
core_layers.dense(inputs, 2, name="my_dense")
vars1 = tf.compat.v1.trainable_variables()
with tf.compat.v1.variable_scope("scope", reuse=True):
core_layers.dense(inputs, 2, name="my_dense")
vars2 = tf.compat.v1.trainable_variables()
self.assertEqual(vars1, vars2)
@tf_test_utils.run_deprecated_v1
def testFunctionalDenseInitializerFromScope(self):
with tf.compat.v1.variable_scope(
"scope", initializer=tf.compat.v1.ones_initializer()
), self.cached_session():
inputs = tf.random.uniform((5, 3), seed=1)
core_layers.dense(inputs, 2)
self.evaluate(tf.compat.v1.global_variables_initializer())
weights = _get_variable_dict_from_varstore()
self.assertEqual(len(weights), 2)
# Check that the matrix weights got initialized to ones (from
# scope).
self.assertAllClose(
weights["scope/dense/kernel"].read_value(), np.ones((3, 2))
)
# Check that the bias still got initialized to zeros.
self.assertAllClose(
weights["scope/dense/bias"].read_value(), np.zeros((2))
)
def testFunctionalDenseWithCustomGetter(self):
called = [0]
def custom_getter(getter, *args, **kwargs):
called[0] += 1
return getter(*args, **kwargs)
with tf.compat.v1.variable_scope("test", custom_getter=custom_getter):
inputs = tf.random.uniform((5, 3), seed=1)
core_layers.dense(inputs, 2)
self.assertEqual(called[0], 2)
@tf_test_utils.run_deprecated_v1
def testFunctionalDenseInScope(self):
with self.cached_session():
with tf.compat.v1.variable_scope("test"):
inputs = tf.random.uniform((5, 3), seed=1)
core_layers.dense(inputs, 2, name="my_dense")
var_dict = _get_variable_dict_from_varstore()
var_key = "test/my_dense/kernel"
self.assertEqual(var_dict[var_key].name, f"{var_key}:0")
with tf.compat.v1.variable_scope("test1") as scope:
inputs = tf.random.uniform((5, 3), seed=1)
core_layers.dense(inputs, 2, name=scope)
var_dict = _get_variable_dict_from_varstore()
var_key = "test1/kernel"
self.assertEqual(var_dict[var_key].name, f"{var_key}:0")
with tf.compat.v1.variable_scope("test2"):
inputs = tf.random.uniform((5, 3), seed=1)
core_layers.dense(inputs, 2)
var_dict = _get_variable_dict_from_varstore()
var_key = "test2/dense/kernel"
self.assertEqual(var_dict[var_key].name, f"{var_key}:0")
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testComputeOutputShape(self):
dense = core_layers.Dense(2, activation=tf.nn.relu, name="dense1")
ts = tf.TensorShape
with self.assertRaises(ValueError):
dense.compute_output_shape(ts(None))
with self.assertRaises(ValueError):
dense.compute_output_shape(ts([]))
with self.assertRaises(ValueError):
dense.compute_output_shape(ts([1]))
self.assertEqual(
[None, 2], dense.compute_output_shape((None, 3)).as_list()
)
self.assertEqual(
[None, 2], dense.compute_output_shape(ts([None, 3])).as_list()
)
self.assertEqual(
[None, 4, 2], dense.compute_output_shape(ts([None, 4, 3])).as_list()
)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testConstraints(self):
k_constraint = lambda x: x / tf.reduce_sum(x)
b_constraint = lambda x: x / tf.reduce_max(x)
dense = core_layers.Dense(
2, kernel_constraint=k_constraint, bias_constraint=b_constraint
)
inputs = tf.random.uniform((5, 3), seed=1)
dense(inputs)
self.assertEqual(dense.kernel_constraint, k_constraint)
self.assertEqual(dense.bias_constraint, b_constraint)
def _get_variable_dict_from_varstore():
var_dict = variable_scope._get_default_variable_store()._vars
sorted_var_dict = collections.OrderedDict(
sorted(var_dict.items(), key=lambda t: t[0])
)
return sorted_var_dict
class DropoutTest(tf.test.TestCase, parameterized.TestCase):
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testDropoutProperties(self):
dp = core_layers.Dropout(0.5, name="dropout")
self.assertEqual(dp.rate, 0.5)
self.assertEqual(dp.noise_shape, None)
dp(tf.ones(()))
self.assertEqual(dp.name, "dropout")
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testBooleanLearningPhase(self):
dp = core_layers.Dropout(0.5)
inputs = tf.ones((5, 3))
dropped = dp(inputs, training=True)
if not tf.executing_eagerly():
self.evaluate(tf.compat.v1.global_variables_initializer())
np_output = self.evaluate(dropped)
self.assertAlmostEqual(0.0, np_output.min())
dropped = dp(inputs, training=False)
np_output = self.evaluate(dropped)
self.assertAllClose(np.ones((5, 3)), np_output)
@tf_test_utils.run_deprecated_v1
def testDynamicLearningPhase(self):
with self.cached_session() as sess:
dp = core_layers.Dropout(0.5, seed=1)
inputs = tf.ones((5, 5))
training = tf.compat.v1.placeholder(dtype="bool")
dropped = dp(inputs, training=training)
self.evaluate(tf.compat.v1.global_variables_initializer())
np_output = sess.run(dropped, feed_dict={training: True})
self.assertAlmostEqual(0.0, np_output.min())
np_output = sess.run(dropped, feed_dict={training: False})
self.assertAllClose(np.ones((5, 5)), np_output)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testDynamicNoiseShape(self):
inputs = tf.ones((5, 3, 2))
noise_shape = [None, 1, None]
dp = core_layers.Dropout(0.5, noise_shape=noise_shape, seed=1)
dropped = dp(inputs, training=True)
self.evaluate(tf.compat.v1.global_variables_initializer())
np_output = self.evaluate(dropped)
self.assertAlmostEqual(0.0, np_output.min())
self.assertAllClose(np_output[:, 0, :], np_output[:, 1, :])
def testCustomNoiseShape(self):
inputs = tf.ones((5, 3, 2))
noise_shape = [5, 1, 2]
dp = core_layers.Dropout(0.5, noise_shape=noise_shape, seed=1)
dropped = dp(inputs, training=True)
self.evaluate(tf.compat.v1.global_variables_initializer())
np_output = self.evaluate(dropped)
self.assertAlmostEqual(0.0, np_output.min())
self.assertAllClose(np_output[:, 0, :], np_output[:, 1, :])
@tf_test_utils.run_deprecated_v1
def testFunctionalDropout(self):
with self.cached_session():
inputs = tf.ones((5, 5))
dropped = core_layers.dropout(inputs, 0.5, training=True, seed=1)
self.evaluate(tf.compat.v1.global_variables_initializer())
np_output = self.evaluate(dropped)
self.assertAlmostEqual(0.0, np_output.min())
dropped = core_layers.dropout(inputs, 0.5, training=False, seed=1)
np_output = self.evaluate(dropped)
self.assertAllClose(np.ones((5, 5)), np_output)
@tf_test_utils.run_deprecated_v1
def testDynamicRate(self):
with self.cached_session() as sess:
rate = tf.compat.v1.placeholder(dtype="float32", name="rate")
dp = core_layers.Dropout(rate, name="dropout")
inputs = tf.ones((5, 5))
dropped = dp(inputs, training=True)
self.evaluate(tf.compat.v1.global_variables_initializer())
np_output = sess.run(dropped, feed_dict={rate: 0.5})
self.assertAlmostEqual(0.0, np_output.min())
np_output = sess.run(dropped, feed_dict={rate: 0.0})
self.assertAllClose(np.ones((5, 5)), np_output)
class FlattenTest(tf.test.TestCase):
@tf_test_utils.run_deprecated_v1
def testCreateFlatten(self):
with self.cached_session() as sess:
x = tf.compat.v1.placeholder(shape=(None, 2, 3), dtype="float32")
y = core_layers.Flatten()(x)
np_output = sess.run(y, feed_dict={x: np.zeros((3, 2, 3))})
self.assertEqual(list(np_output.shape), [3, 6])
self.assertEqual(y.get_shape().as_list(), [None, 6])
x = tf.compat.v1.placeholder(shape=(1, 2, 3, 2), dtype="float32")
y = core_layers.Flatten()(x)
np_output = sess.run(y, feed_dict={x: np.zeros((1, 2, 3, 2))})
self.assertEqual(list(np_output.shape), [1, 12])
self.assertEqual(y.get_shape().as_list(), [1, 12])
def testComputeShape(self):
shape = core_layers.Flatten().compute_output_shape((1, 2, 3, 2))
self.assertEqual(shape.as_list(), [1, 12])
shape = core_layers.Flatten().compute_output_shape((None, 3, 2))
self.assertEqual(shape.as_list(), [None, 6])
shape = core_layers.Flatten().compute_output_shape((None, 3, None))
self.assertEqual(shape.as_list(), [None, None])
@tf_test_utils.run_deprecated_v1
def testDataFormat5d(self):
np_input_channels_last = np.arange(120, dtype="float32").reshape(
[1, 5, 4, 3, 2]
)
with self.test_session() as sess:
x = tf.compat.v1.placeholder(shape=(1, 5, 4, 3, 2), dtype="float32")
y = core_layers.Flatten(data_format="channels_last")(x)
np_output_cl = sess.run(y, feed_dict={x: np_input_channels_last})
x = tf.compat.v1.placeholder(shape=(1, 2, 5, 4, 3), dtype="float32")
y = core_layers.Flatten(data_format="channels_first")(x)
np_input_channels_first = np.transpose(
np_input_channels_last, [0, 4, 1, 2, 3]
)
np_output_cf = sess.run(y, feed_dict={x: np_input_channels_first})
self.assertAllEqual(np_output_cl, np_output_cf)
@tf_test_utils.run_deprecated_v1
def testDataFormat4d(self):
np_input_channels_last = np.arange(24, dtype="float32").reshape(
[1, 4, 3, 2]
)
with self.test_session() as sess:
x = tf.compat.v1.placeholder(shape=(1, 4, 3, 2), dtype="float32")
y = core_layers.Flatten(data_format="channels_last")(x)
np_output_cl = sess.run(y, feed_dict={x: np_input_channels_last})
x = tf.compat.v1.placeholder(shape=(1, 2, 4, 3), dtype="float32")
y = core_layers.Flatten(data_format="channels_first")(x)
np_input_channels_first = np.transpose(
np_input_channels_last, [0, 3, 1, 2]
)
np_output_cf = sess.run(y, feed_dict={x: np_input_channels_first})
self.assertAllEqual(np_output_cl, np_output_cf)
@tf_test_utils.run_deprecated_v1
def testFunctionalFlatten(self):
x = tf.compat.v1.placeholder(shape=(None, 2, 3), dtype="float32")
y = core_layers.flatten(x, name="flatten")
self.assertEqual(y.get_shape().as_list(), [None, 6])
@tf_test_utils.run_deprecated_v1
def testFlatten0D(self):
x = tf.compat.v1.placeholder(shape=(None,), dtype="float32")
y = core_layers.Flatten()(x)
with self.cached_session() as sess:
np_output = sess.run(y, feed_dict={x: np.zeros((5,))})
self.assertEqual(list(np_output.shape), [5, 1])
self.assertEqual(y.shape.as_list(), [None, 1])
@tf_test_utils.run_deprecated_v1
def testFlattenUnknownAxes(self):
with self.cached_session() as sess:
x = tf.compat.v1.placeholder(shape=(5, None, None), dtype="float32")
y = core_layers.Flatten()(x)
np_output = sess.run(y, feed_dict={x: np.zeros((5, 2, 3))})
self.assertEqual(list(np_output.shape), [5, 6])
self.assertEqual(y.get_shape().as_list(), [5, None])
x = tf.compat.v1.placeholder(shape=(5, None, 2), dtype="float32")
y = core_layers.Flatten()(x)
np_output = sess.run(y, feed_dict={x: np.zeros((5, 3, 2))})
self.assertEqual(list(np_output.shape), [5, 6])
self.assertEqual(y.get_shape().as_list(), [5, None])
@tf_test_utils.run_deprecated_v1
def testFlattenLargeDim(self):
if any(platform.win32_ver()):
self.skipTest(
"values are truncated on windows causing test failures"
)
x = tf.compat.v1.placeholder(
shape=(None, 21316, 21316, 80), dtype="float32"
)
y = core_layers.Flatten()(x)
self.assertEqual(y.shape.as_list(), [None, 21316 * 21316 * 80])
@tf_test_utils.run_deprecated_v1
def testFlattenLargeBatchDim(self):
batch_size = np.iinfo(np.int32).max + 10
x = tf.compat.v1.placeholder(
shape=(batch_size, None, None, 1), dtype="float32"
)
y = core_layers.Flatten()(x)
self.assertEqual(y.shape.as_list(), [batch_size, None])
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/legacy_tf_layers/core_test.py/0 | {
"file_path": "tf-keras/tf_keras/legacy_tf_layers/core_test.py",
"repo_id": "tf-keras",
"token_count": 12932
} | 234 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TF-Keras base Metric classes."""
import copy
import os
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
from tf_keras import Model
from tf_keras import layers
from tf_keras import metrics
from tf_keras.engine import base_layer
from tf_keras.engine import training as training_module
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class KerasSumTest(tf.test.TestCase, parameterized.TestCase):
def test_sum(self):
with self.test_session():
m = metrics.Sum(name="my_sum")
# check config
self.assertEqual(m.name, "my_sum")
self.assertTrue(m.stateful)
self.assertEqual(m.dtype, tf.float32)
self.assertLen(m.variables, 1)
self.evaluate(tf.compat.v1.variables_initializer(m.variables))
# check initial state
self.assertEqual(self.evaluate(m.total), 0)
# check __call__()
self.assertEqual(self.evaluate(m(100)), 100)
self.assertEqual(self.evaluate(m.total), 100)
# check update_state() and result() + state accumulation + tensor
# input
update_op = m.update_state(tf.convert_to_tensor([1, 5]))
self.evaluate(update_op)
self.assertAlmostEqual(self.evaluate(m.result()), 106)
self.assertEqual(self.evaluate(m.total), 106) # 100 + 1 + 5
# check reset_state()
m.reset_state()
self.assertEqual(self.evaluate(m.total), 0)
def test_sum_with_sample_weight(self):
m = metrics.Sum(dtype=tf.float64)
self.assertEqual(m.dtype, tf.float64)
self.evaluate(tf.compat.v1.variables_initializer(m.variables))
# check scalar weight
result_t = m(100, sample_weight=0.5)
self.assertEqual(self.evaluate(result_t), 50)
self.assertEqual(self.evaluate(m.total), 50)
# check weights not scalar and weights rank matches values rank
result_t = m([1, 5], sample_weight=[1, 0.2])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 52.0, 4) # 50 + 1 + 5 * 0.2
self.assertAlmostEqual(self.evaluate(m.total), 52.0, 4)
# check weights broadcast
result_t = m([1, 2], sample_weight=0.5)
self.assertAlmostEqual(self.evaluate(result_t), 53.5, 1) # 52 + 0.5 + 1
self.assertAlmostEqual(self.evaluate(m.total), 53.5, 1)
# check weights squeeze
result_t = m([1, 5], sample_weight=[[1], [0.2]])
self.assertAlmostEqual(self.evaluate(result_t), 55.5, 1) # 53.5 + 1 + 1
self.assertAlmostEqual(self.evaluate(m.total), 55.5, 1)
# check weights expand
result_t = m([[1], [5]], sample_weight=[1, 0.2])
self.assertAlmostEqual(self.evaluate(result_t), 57.5, 2) # 55.5 + 1 + 1
self.assertAlmostEqual(self.evaluate(m.total), 57.5, 1)
# check values reduced to the dimensions of weight
result_t = m(
[[[1.0, 2.0], [3.0, 2.0], [0.5, 4.0]]], sample_weight=[0.5]
)
result = np.round(self.evaluate(result_t), decimals=2)
# result = (prev: 57.5) + 0.5 + 1 + 1.5 + 1 + 0.25 + 2
self.assertAlmostEqual(result, 63.75, 2)
self.assertAlmostEqual(self.evaluate(m.total), 63.75, 2)
def test_sum_graph_with_placeholder(self):
with tf.compat.v1.get_default_graph().as_default(), self.cached_session() as sess: # noqa: E501
m = metrics.Sum()
v = tf.compat.v1.placeholder(tf.float32)
w = tf.compat.v1.placeholder(tf.float32)
self.evaluate(tf.compat.v1.variables_initializer(m.variables))
# check __call__()
result_t = m(v, sample_weight=w)
result = sess.run(result_t, feed_dict=({v: 100, w: 0.5}))
self.assertEqual(result, 50)
self.assertEqual(self.evaluate(m.total), 50)
# check update_state() and result()
result = sess.run(result_t, feed_dict=({v: [1, 5], w: [1, 0.2]}))
self.assertAlmostEqual(result, 52.0, 2) # 50 + 1 + 5 * 0.2
self.assertAlmostEqual(self.evaluate(m.total), 52.0, 2)
def test_save_restore(self):
with self.test_session():
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
m = metrics.Sum()
checkpoint = tf.train.Checkpoint(sum=m)
self.evaluate(tf.compat.v1.variables_initializer(m.variables))
# update state
self.evaluate(m(100.0))
self.evaluate(m(200.0))
# save checkpoint and then add an update
save_path = checkpoint.save(checkpoint_prefix)
self.evaluate(m(1000.0))
# restore to the same checkpoint sum object (= 300)
checkpoint.restore(save_path).assert_consumed().run_restore_ops()
self.evaluate(m(300.0))
self.assertEqual(600.0, self.evaluate(m.result()))
# restore to a different checkpoint sum object
restore_sum = metrics.Sum()
restore_checkpoint = tf.train.Checkpoint(sum=restore_sum)
status = restore_checkpoint.restore(save_path)
restore_update = restore_sum(300.0)
status.assert_consumed().run_restore_ops()
self.evaluate(restore_update)
self.assertEqual(600.0, self.evaluate(restore_sum.result()))
def test_init_scope_during_add_weight(self):
seen_variables = 0
def capture_variable_creation(next_creator_fn, **kwargs) -> tf.Variable:
nonlocal seen_variables
seen_variables += 1
return tf.constant(seen_variables)
@tf.function
def create_variables():
# When this method is called in a graph context, any usage of
# `tf.init_scope` will bypass this variable creator scope, resulting
# in different behavior.
with tf.variable_creator_scope(capture_variable_creation):
return metrics.Sum().variables
metric_variables = self.evaluate(create_variables())
# The Sum metric contains a single `total` variable, which the creation
# scope has changed to a `1` tensor.
self.assertAllEqual([1], metric_variables)
class MeanTest(test_combinations.TestCase):
# TODO(b/120949004): Re-enable garbage collection check
# @tf_test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
@test_combinations.run_all_keras_modes
def test_mean(self):
m = metrics.Mean(name="my_mean")
# check config
self.assertEqual(m.name, "my_mean")
self.assertTrue(m.stateful)
self.assertEqual(m.dtype, tf.float32)
self.assertEqual(len(m.variables), 2)
self.evaluate(tf.compat.v1.variables_initializer(m.variables))
# check initial state
self.assertEqual(self.evaluate(m.total), 0)
self.assertEqual(self.evaluate(m.count), 0)
# check __call__()
self.assertEqual(self.evaluate(m(100)), 100)
self.assertEqual(self.evaluate(m.total), 100)
self.assertEqual(self.evaluate(m.count), 1)
# check update_state() and result() + state accumulation + tensor input
update_op = m.update_state(
[tf.convert_to_tensor(1), tf.convert_to_tensor(5)]
)
self.evaluate(update_op)
self.assertAlmostEqual(self.evaluate(m.result()), 106 / 3, 2)
self.assertEqual(self.evaluate(m.total), 106) # 100 + 1 + 5
self.assertEqual(self.evaluate(m.count), 3)
# check reset_state()
m.reset_state()
self.assertEqual(self.evaluate(m.total), 0)
self.assertEqual(self.evaluate(m.count), 0)
# Check save and restore config
m2 = metrics.Mean.from_config(m.get_config())
self.assertEqual(m2.name, "my_mean")
self.assertTrue(m2.stateful)
self.assertEqual(m2.dtype, tf.float32)
self.assertEqual(len(m2.variables), 2)
@test_utils.run_v2_only
def test_function_wrapped_reset_state(self):
m = metrics.Mean(name="my_mean")
# check reset_state in function.
@tf.function
def reset_in_fn():
m.reset_state()
m.update_state(100)
for _ in range(5):
self.evaluate(reset_in_fn())
self.assertEqual(self.evaluate(m.count), 1)
@test_combinations.run_all_keras_modes
def test_mean_with_sample_weight(self):
m = metrics.Mean(dtype=tf.float64)
self.assertEqual(m.dtype, tf.float64)
self.evaluate(tf.compat.v1.variables_initializer(m.variables))
# check scalar weight
result_t = m(100, sample_weight=0.5)
self.assertEqual(self.evaluate(result_t), 50 / 0.5)
self.assertEqual(self.evaluate(m.total), 50)
self.assertEqual(self.evaluate(m.count), 0.5)
# check weights not scalar and weights rank matches values rank
result_t = m([1, 5], sample_weight=[1, 0.2])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 52 / 1.7, 2)
self.assertAlmostEqual(
self.evaluate(m.total), 52, 2
) # 50 + 1 + 5 * 0.2
self.assertAlmostEqual(self.evaluate(m.count), 1.7, 2) # 0.5 + 1.2
# check weights broadcast
result_t = m([1, 2], sample_weight=0.5)
self.assertAlmostEqual(self.evaluate(result_t), 53.5 / 2.7, 2)
self.assertAlmostEqual(self.evaluate(m.total), 53.5, 2) # 52 + 0.5 + 1
self.assertAlmostEqual(
self.evaluate(m.count), 2.7, 2
) # 1.7 + 0.5 + 0.5
# check weights squeeze
result_t = m([1, 5], sample_weight=[[1], [0.2]])
self.assertAlmostEqual(self.evaluate(result_t), 55.5 / 3.9, 2)
self.assertAlmostEqual(self.evaluate(m.total), 55.5, 2) # 53.5 + 1 + 1
self.assertAlmostEqual(self.evaluate(m.count), 3.9, 2) # 2.7 + 1.2
# check weights expand
result_t = m([[1], [5]], sample_weight=[1, 0.2])
self.assertAlmostEqual(self.evaluate(result_t), 57.5 / 5.1, 2)
self.assertAlmostEqual(self.evaluate(m.total), 57.5, 2) # 55.5 + 1 + 1
self.assertAlmostEqual(self.evaluate(m.count), 5.1, 2) # 3.9 + 1.2
# check values reduced to the dimensions of weight
result_t = m(
[[[1.0, 2.0], [3.0, 2.0], [0.5, 4.0]]], sample_weight=[0.5]
)
result = np.round(self.evaluate(result_t), decimals=2) # 58.5 / 5.6
self.assertEqual(result, 10.45)
self.assertEqual(np.round(self.evaluate(m.total), decimals=2), 58.54)
self.assertEqual(np.round(self.evaluate(m.count), decimals=2), 5.6)
@test_combinations.run_all_keras_modes
def test_mean_graph_with_placeholder(self):
with tf.compat.v1.get_default_graph().as_default(), self.cached_session() as sess: # noqa: E501
m = metrics.Mean()
v = tf.compat.v1.placeholder(tf.float32)
w = tf.compat.v1.placeholder(tf.float32)
self.evaluate(tf.compat.v1.variables_initializer(m.variables))
# check __call__()
result_t = m(v, sample_weight=w)
result = sess.run(result_t, feed_dict=({v: 100, w: 0.5}))
self.assertEqual(self.evaluate(m.total), 50)
self.assertEqual(self.evaluate(m.count), 0.5)
self.assertEqual(result, 50 / 0.5)
# check update_state() and result()
result = sess.run(result_t, feed_dict=({v: [1, 5], w: [1, 0.2]}))
self.assertAlmostEqual(
self.evaluate(m.total), 52, 2
) # 50 + 1 + 5 * 0.2
self.assertAlmostEqual(self.evaluate(m.count), 1.7, 2) # 0.5 + 1.2
self.assertAlmostEqual(result, 52 / 1.7, 2)
@test_combinations.run_all_keras_modes
def test_save_restore(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
m = metrics.Mean()
checkpoint = tf.train.Checkpoint(mean=m)
self.evaluate(tf.compat.v1.variables_initializer(m.variables))
# update state
self.evaluate(m(100.0))
self.evaluate(m(200.0))
# save checkpoint and then add an update
save_path = checkpoint.save(checkpoint_prefix)
self.evaluate(m(1000.0))
# restore to the same checkpoint mean object
checkpoint.restore(save_path).assert_consumed().run_restore_ops()
self.evaluate(m(300.0))
self.assertEqual(200.0, self.evaluate(m.result()))
# restore to a different checkpoint mean object
restore_mean = metrics.Mean()
restore_checkpoint = tf.train.Checkpoint(mean=restore_mean)
status = restore_checkpoint.restore(save_path)
restore_update = restore_mean(300.0)
status.assert_consumed().run_restore_ops()
self.evaluate(restore_update)
self.assertEqual(200.0, self.evaluate(restore_mean.result()))
self.assertEqual(3, self.evaluate(restore_mean.count))
@test_combinations.run_all_keras_modes
def test_multiple_instances(self):
m = metrics.Mean()
m2 = metrics.Mean()
self.assertEqual(m.name, "mean")
self.assertEqual(m2.name, "mean")
self.assertEqual(
[v.name for v in m.variables],
test_utils.get_expected_metric_variable_names(["total", "count"]),
)
self.assertEqual(
[v.name for v in m2.variables],
test_utils.get_expected_metric_variable_names(
["total", "count"], name_suffix="_1"
),
)
self.evaluate(tf.compat.v1.variables_initializer(m.variables))
self.evaluate(tf.compat.v1.variables_initializer(m2.variables))
# check initial state
self.assertEqual(self.evaluate(m.total), 0)
self.assertEqual(self.evaluate(m.count), 0)
self.assertEqual(self.evaluate(m2.total), 0)
self.assertEqual(self.evaluate(m2.count), 0)
# check __call__()
self.assertEqual(self.evaluate(m(100)), 100)
self.assertEqual(self.evaluate(m.total), 100)
self.assertEqual(self.evaluate(m.count), 1)
self.assertEqual(self.evaluate(m2.total), 0)
self.assertEqual(self.evaluate(m2.count), 0)
self.assertEqual(self.evaluate(m2([63, 10])), 36.5)
self.assertEqual(self.evaluate(m2.total), 73)
self.assertEqual(self.evaluate(m2.count), 2)
self.assertEqual(self.evaluate(m.result()), 100)
self.assertEqual(self.evaluate(m.total), 100)
self.assertEqual(self.evaluate(m.count), 1)
@test_utils.run_v2_only
def test_deepcopy_of_metrics(self):
m = metrics.Mean(name="my_mean")
m.reset_state()
m.update_state(100)
m_copied = copy.deepcopy(m)
m_copied.update_state(200)
self.assertEqual(self.evaluate(m.result()), 100)
self.assertEqual(self.evaluate(m_copied.result()), 150)
m.reset_state()
self.assertEqual(self.evaluate(m.result()), 0)
self.assertEqual(self.evaluate(m_copied.result()), 150)
class MeanTensorTest(tf.test.TestCase, parameterized.TestCase):
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_config(self):
with self.test_session():
m = metrics.MeanTensor(name="mean_by_element")
# check config
self.assertEqual(m.name, "mean_by_element")
self.assertTrue(m.stateful)
self.assertEqual(m.dtype, tf.float32)
self.assertEmpty(m.variables)
with self.assertRaisesRegex(
ValueError, "does not have any value yet"
):
m.result()
self.evaluate(m([[3], [5], [3]]))
self.assertAllEqual(m._shape, [3, 1])
m2 = metrics.MeanTensor.from_config(m.get_config())
self.assertEqual(m2.name, "mean_by_element")
self.assertTrue(m2.stateful)
self.assertEqual(m2.dtype, tf.float32)
self.assertEmpty(m2.variables)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_unweighted(self):
with self.test_session():
m = metrics.MeanTensor(dtype=tf.float64)
# check __call__()
self.assertAllClose(self.evaluate(m([100, 40])), [100, 40])
self.assertAllClose(self.evaluate(m.total), [100, 40])
self.assertAllClose(self.evaluate(m.count), [1, 1])
# check update_state() and result() + state accumulation + tensor
# input
update_op = m.update_state(
[tf.convert_to_tensor(1), tf.convert_to_tensor(5)]
)
self.evaluate(update_op)
self.assertAllClose(self.evaluate(m.result()), [50.5, 22.5])
self.assertAllClose(self.evaluate(m.total), [101, 45])
self.assertAllClose(self.evaluate(m.count), [2, 2])
# check reset_state()
m.reset_state()
self.assertAllClose(self.evaluate(m.total), [0, 0])
self.assertAllClose(self.evaluate(m.count), [0, 0])
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_weighted(self):
with self.test_session():
m = metrics.MeanTensor(dtype=tf.float64)
self.assertEqual(m.dtype, tf.float64)
# check scalar weight
result_t = m([100, 30], sample_weight=0.5)
self.assertAllClose(self.evaluate(result_t), [100, 30])
self.assertAllClose(self.evaluate(m.total), [50, 15])
self.assertAllClose(self.evaluate(m.count), [0.5, 0.5])
# check weights not scalar and weights rank matches values rank
result_t = m([1, 5], sample_weight=[1, 0.2])
result = self.evaluate(result_t)
self.assertAllClose(result, [51 / 1.5, 16 / 0.7], 2)
self.assertAllClose(self.evaluate(m.total), [51, 16])
self.assertAllClose(self.evaluate(m.count), [1.5, 0.7])
# check weights broadcast
result_t = m([1, 2], sample_weight=0.5)
self.assertAllClose(self.evaluate(result_t), [51.5 / 2, 17 / 1.2])
self.assertAllClose(self.evaluate(m.total), [51.5, 17])
self.assertAllClose(self.evaluate(m.count), [2, 1.2])
# check weights squeeze
result_t = m([1, 5], sample_weight=[[1], [0.2]])
self.assertAllClose(self.evaluate(result_t), [52.5 / 3, 18 / 1.4])
self.assertAllClose(self.evaluate(m.total), [52.5, 18])
self.assertAllClose(self.evaluate(m.count), [3, 1.4])
# check weights expand
m = metrics.MeanTensor(dtype=tf.float64)
self.evaluate(tf.compat.v1.variables_initializer(m.variables))
result_t = m([[1], [5]], sample_weight=[1, 0.2])
self.assertAllClose(self.evaluate(result_t), [[1], [5]])
self.assertAllClose(self.evaluate(m.total), [[1], [1]])
self.assertAllClose(self.evaluate(m.count), [[1], [0.2]])
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_invalid_value_shape(self):
m = metrics.MeanTensor(dtype=tf.float64)
m([1])
with self.assertRaisesRegex(
ValueError,
"MeanTensor input values must always have the same shape",
):
m([1, 5])
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_build_in_tf_function(self):
"""Ensure that variables are created correctly in a tf function."""
m = metrics.MeanTensor(dtype=tf.float64)
@tf.function
def call_metric(x):
return m(x)
with self.test_session():
self.assertAllClose(
self.evaluate(call_metric([100, 40])), [100, 40]
)
self.assertAllClose(self.evaluate(m.total), [100, 40])
self.assertAllClose(self.evaluate(m.count), [1, 1])
self.assertAllClose(self.evaluate(call_metric([20, 2])), [60, 21])
@test_combinations.generate(test_combinations.combine(mode=["eager"]))
def test_in_keras_model(self):
class ModelWithMetric(Model):
def __init__(self):
super().__init__()
self.dense1 = layers.Dense(
3, activation="relu", kernel_initializer="ones"
)
self.dense2 = layers.Dense(
1, activation="sigmoid", kernel_initializer="ones"
)
self.mean_tensor = metrics.MeanTensor()
def call(self, x):
x = self.dense1(x)
x = self.dense2(x)
self.mean_tensor(self.dense1.kernel)
return x
model = ModelWithMetric()
model.compile(loss="mae", optimizer="rmsprop", run_eagerly=True)
x = np.ones((100, 4))
y = np.zeros((100, 1))
model.evaluate(x, y, batch_size=50)
self.assertAllClose(
self.evaluate(model.mean_tensor.result()), np.ones((4, 3))
)
self.assertAllClose(
self.evaluate(model.mean_tensor.total), np.full((4, 3), 2)
)
self.assertAllClose(
self.evaluate(model.mean_tensor.count), np.full((4, 3), 2)
)
model.evaluate(x, y, batch_size=25)
self.assertAllClose(
self.evaluate(model.mean_tensor.result()), np.ones((4, 3))
)
self.assertAllClose(
self.evaluate(model.mean_tensor.total), np.full((4, 3), 4)
)
self.assertAllClose(
self.evaluate(model.mean_tensor.count), np.full((4, 3), 4)
)
class BinaryTruePositives(metrics.Metric):
def __init__(self, name="binary_true_positives", **kwargs):
super().__init__(name=name, **kwargs)
self.true_positives = self.add_weight(name="tp", initializer="zeros")
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = tf.cast(y_true, tf.bool)
y_pred = tf.cast(y_pred, tf.bool)
values = tf.logical_and(tf.equal(y_true, True), tf.equal(y_pred, True))
values = tf.cast(values, self.dtype)
if sample_weight is not None:
sample_weight = tf.cast(sample_weight, dtype=self.dtype)
sample_weight = tf.__internal__.ops.broadcast_weights(
sample_weight, values
)
values = tf.multiply(values, sample_weight)
self.true_positives.assign_add(tf.reduce_sum(values))
def result(self):
return self.true_positives
class BinaryTruePositivesViaControlFlow(metrics.Metric):
def __init__(self, name="binary_true_positives", **kwargs):
super().__init__(name=name, **kwargs)
self.true_positives = self.add_weight(name="tp", initializer="zeros")
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = tf.cast(y_true, tf.bool)
y_pred = tf.cast(y_pred, tf.bool)
for i in range(len(y_true)):
for j in range(len(y_true[i])):
if y_true[i][j] and y_pred[i][j]:
if sample_weight is None:
self.true_positives.assign_add(1)
else:
self.true_positives.assign_add(sample_weight[i][0])
def result(self):
if tf.constant(True):
return self.true_positives
return 0.0
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class CustomMetricsTest(tf.test.TestCase):
def test_config(self):
btp_obj = BinaryTruePositives(name="btp", dtype=tf.int32)
self.assertEqual(btp_obj.name, "btp")
self.assertEqual(btp_obj.dtype, tf.int32)
# Check save and restore config
btp_obj2 = BinaryTruePositives.from_config(btp_obj.get_config())
self.assertEqual(btp_obj2.name, "btp")
self.assertEqual(btp_obj2.dtype, tf.int32)
def test_unweighted(self):
btp_obj = BinaryTruePositives()
self.evaluate(tf.compat.v1.variables_initializer(btp_obj.variables))
y_true = tf.constant(
[
[0, 0.9, 0, 1, 0],
[0, 0, 1, 1, 1],
[1, 1, 1, 1, 0],
[0, 0, 0, 0, 1.5],
]
)
y_pred = tf.constant(
[
[0, 0, 1, 5, 0],
[1, 1, 1, 1, 1],
[0, 1, 0, 1, 0],
[1, 10, 1, 1, 1],
]
)
update_op = btp_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = btp_obj.result()
self.assertEqual(7, self.evaluate(result))
def test_weighted(self):
btp_obj = BinaryTruePositives()
self.evaluate(tf.compat.v1.variables_initializer(btp_obj.variables))
y_true = tf.constant(
[
[0, 0.9, 0, 1, 0],
[0, 0, 1, 1, 1],
[1, 1, 1, 1, 0],
[0, 0, 0, 0, 1.5],
]
)
y_pred = tf.constant(
[
[0, 0, 1, 5, 0],
[1, 1, 1, 1, 1],
[0, 1, 0, 1, 0],
[1, 10, 1, 1, 1],
]
)
sample_weight = tf.constant([[1.0], [1.5], [2.0], [2.5]])
result = btp_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertEqual(12, self.evaluate(result))
def test_autograph(self):
metric = BinaryTruePositivesViaControlFlow()
self.evaluate(tf.compat.v1.variables_initializer(metric.variables))
y_true = tf.constant(
[
[0, 0.9, 0, 1, 0],
[0, 0, 1, 1, 1],
[1, 1, 1, 1, 0],
[0, 0, 0, 0, 1.5],
]
)
y_pred = tf.constant(
[
[0, 0, 1, 5, 0],
[1, 1, 1, 1, 1],
[0, 1, 0, 1, 0],
[1, 10, 1, 1, 1],
]
)
sample_weight = tf.constant([[1.0], [1.5], [2.0], [2.5]])
@tf.function
def compute_metric(y_true, y_pred, sample_weight):
metric(y_true, y_pred, sample_weight)
return metric.result()
result = compute_metric(y_true, y_pred, sample_weight)
self.assertEqual(12, self.evaluate(result))
def test_metric_wrappers_autograph(self):
def metric_fn(y_true, y_pred):
x = tf.constant(0.0)
for i in range(len(y_true)):
for j in range(len(y_true[i])):
if (
tf.equal(y_true[i][j], y_pred[i][j])
and y_true[i][j] > 0
):
x += 1.0
return x
mean_metric = metrics.MeanMetricWrapper(metric_fn)
sum_metric = metrics.SumOverBatchSizeMetricWrapper(metric_fn)
self.evaluate(tf.compat.v1.variables_initializer(mean_metric.variables))
self.evaluate(tf.compat.v1.variables_initializer(sum_metric.variables))
y_true = tf.constant(
[[0, 0, 0, 1, 0], [0, 0, 1, 1, 1], [1, 1, 1, 1, 0], [1, 1, 1, 0, 1]]
)
y_pred = tf.constant(
[[0, 0, 1, 1, 0], [1, 1, 1, 1, 1], [0, 1, 0, 1, 0], [1, 1, 1, 1, 1]]
)
@tf.function
def tf_functioned_metric_fn(metric, y_true, y_pred):
return metric(y_true, y_pred)
metric_result = tf_functioned_metric_fn(mean_metric, y_true, y_pred)
self.assertAllClose(self.evaluate(metric_result), 10, 1e-2)
metric_result = tf_functioned_metric_fn(sum_metric, y_true, y_pred)
self.assertAllClose(self.evaluate(metric_result), 10, 1e-2)
def test_metric_not_tracked_as_sublayer_in_layer(self):
class MyLayer(base_layer.Layer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.mean_obj = metrics.Mean(name="my_mean_obj")
def call(self, x):
self.add_metric(
tf.reduce_sum(x), aggregation="mean", name="my_mean_tensor"
)
self.add_metric(self.mean_obj(x))
return x
layer = MyLayer()
x = np.ones((1, 1))
layer(x)
self.assertLen(list(layer._flatten_layers(include_self=False)), 0)
self.assertLen(layer.metrics, 2)
def test_metric_not_tracked_as_sublayer_in_model(self):
class MyModel(training_module.Model):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.mean_obj = metrics.Mean(name="my_mean_obj")
def call(self, x):
self.add_metric(
tf.reduce_sum(x), aggregation="mean", name="my_mean_tensor"
)
self.add_metric(self.mean_obj(x))
return x
model = MyModel()
x = np.ones((1, 1))
model(x)
self.assertLen(list(model._flatten_layers(include_self=False)), 0)
self.assertLen(model.layers, 0)
self.assertLen(model.metrics, 2)
def test_invalid_custom_metric_class_error_msg(self):
x = layers.Input(shape=(2,))
y = layers.Dense(3)(x)
model = training_module.Model(x, y)
class BadMetric(metrics.Metric):
def update_state(self, y_true, y_pred, sample_weight=None):
return
def result(self):
return
with self.assertRaisesRegex(RuntimeError, "can only be a single"):
model.compile("sgd", "mse", metrics=[BadMetric()])
model.fit(np.ones((10, 2)), np.ones((10, 3)))
def test_invalid_custom_metric_fn_error_msg(self):
x = layers.Input(shape=(2,))
y = layers.Dense(3)(x)
model = training_module.Model(x, y)
def bad_metric(y_true, y_pred, sample_weight=None):
return None
def dict_metric(y_true, y_pred, sample_weight=None):
return {"value": 0.0}
with self.assertRaisesRegex(
RuntimeError, "The output of a metric function can only be"
):
model.compile("sgd", "mse", metrics=[bad_metric])
model.fit(np.ones((10, 2)), np.ones((10, 3)))
with self.assertRaisesRegex(
RuntimeError, "To return a dict of values, implement"
):
model.compile("sgd", "mse", metrics=[dict_metric])
model.fit(np.ones((10, 2)), np.ones((10, 3)))
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/metrics/base_metric_test.py/0 | {
"file_path": "tf-keras/tf_keras/metrics/base_metric_test.py",
"repo_id": "tf-keras",
"token_count": 15601
} | 235 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TF-Keras metrics."""
import math
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
from tf_keras import Input
from tf_keras import metrics
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class CosineSimilarityTest(tf.test.TestCase):
def l2_norm(self, x, axis):
epsilon = 1e-12
square_sum = np.sum(np.square(x), axis=axis, keepdims=True)
x_inv_norm = 1 / np.sqrt(np.maximum(square_sum, epsilon))
return np.multiply(x, x_inv_norm)
def setup(self, axis=1):
self.np_y_true = np.asarray([[1, 9, 2], [-5, -2, 6]], dtype=np.float32)
self.np_y_pred = np.asarray([[4, 8, 12], [8, 1, 3]], dtype=np.float32)
y_true = self.l2_norm(self.np_y_true, axis)
y_pred = self.l2_norm(self.np_y_pred, axis)
self.expected_loss = np.sum(np.multiply(y_true, y_pred), axis=(axis,))
self.y_true = tf.constant(self.np_y_true)
self.y_pred = tf.constant(self.np_y_pred)
def test_config(self):
cosine_obj = metrics.CosineSimilarity(
axis=2, name="my_cos", dtype=tf.int32
)
self.assertEqual(cosine_obj.name, "my_cos")
self.assertEqual(cosine_obj._dtype, tf.int32)
# Check save and restore config
cosine_obj2 = metrics.CosineSimilarity.from_config(
cosine_obj.get_config()
)
self.assertEqual(cosine_obj2.name, "my_cos")
self.assertEqual(cosine_obj2._dtype, tf.int32)
def test_unweighted(self):
self.setup()
cosine_obj = metrics.CosineSimilarity()
self.evaluate(tf.compat.v1.variables_initializer(cosine_obj.variables))
loss = cosine_obj(self.y_true, self.y_pred)
expected_loss = np.mean(self.expected_loss)
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
def test_weighted(self):
self.setup()
cosine_obj = metrics.CosineSimilarity()
self.evaluate(tf.compat.v1.variables_initializer(cosine_obj.variables))
sample_weight = np.asarray([1.2, 3.4])
loss = cosine_obj(
self.y_true, self.y_pred, sample_weight=tf.constant(sample_weight)
)
expected_loss = np.sum(self.expected_loss * sample_weight) / np.sum(
sample_weight
)
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
def test_axis(self):
self.setup(axis=1)
cosine_obj = metrics.CosineSimilarity(axis=1)
self.evaluate(tf.compat.v1.variables_initializer(cosine_obj.variables))
loss = cosine_obj(self.y_true, self.y_pred)
expected_loss = np.mean(self.expected_loss)
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class MeanAbsoluteErrorTest(tf.test.TestCase):
def test_config(self):
mae_obj = metrics.MeanAbsoluteError(name="my_mae", dtype=tf.int32)
self.assertEqual(mae_obj.name, "my_mae")
self.assertEqual(mae_obj._dtype, tf.int32)
# Check save and restore config
mae_obj2 = metrics.MeanAbsoluteError.from_config(mae_obj.get_config())
self.assertEqual(mae_obj2.name, "my_mae")
self.assertEqual(mae_obj2._dtype, tf.int32)
def test_unweighted(self):
mae_obj = metrics.MeanAbsoluteError()
self.evaluate(tf.compat.v1.variables_initializer(mae_obj.variables))
y_true = tf.constant(
((0, 1, 0, 1, 0), (0, 0, 1, 1, 1), (1, 1, 1, 1, 0), (0, 0, 0, 0, 1))
)
y_pred = tf.constant(
((0, 0, 1, 1, 0), (1, 1, 1, 1, 1), (0, 1, 0, 1, 0), (1, 1, 1, 1, 1))
)
update_op = mae_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = mae_obj.result()
self.assertAllClose(0.5, result, atol=1e-5)
def test_weighted(self):
mae_obj = metrics.MeanAbsoluteError()
self.evaluate(tf.compat.v1.variables_initializer(mae_obj.variables))
y_true = tf.constant(
((0, 1, 0, 1, 0), (0, 0, 1, 1, 1), (1, 1, 1, 1, 0), (0, 0, 0, 0, 1))
)
y_pred = tf.constant(
((0, 0, 1, 1, 0), (1, 1, 1, 1, 1), (0, 1, 0, 1, 0), (1, 1, 1, 1, 1))
)
sample_weight = tf.constant((1.0, 1.5, 2.0, 2.5))
result = mae_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.54285, self.evaluate(result), atol=1e-5)
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class MeanAbsolutePercentageErrorTest(tf.test.TestCase):
def test_config(self):
mape_obj = metrics.MeanAbsolutePercentageError(
name="my_mape", dtype=tf.int32
)
self.assertEqual(mape_obj.name, "my_mape")
self.assertEqual(mape_obj._dtype, tf.int32)
# Check save and restore config
mape_obj2 = metrics.MeanAbsolutePercentageError.from_config(
mape_obj.get_config()
)
self.assertEqual(mape_obj2.name, "my_mape")
self.assertEqual(mape_obj2._dtype, tf.int32)
def test_unweighted(self):
mape_obj = metrics.MeanAbsolutePercentageError()
self.evaluate(tf.compat.v1.variables_initializer(mape_obj.variables))
y_true = tf.constant(
((0, 1, 0, 1, 0), (0, 0, 1, 1, 1), (1, 1, 1, 1, 0), (0, 0, 0, 0, 1))
)
y_pred = tf.constant(
((0, 0, 1, 1, 0), (1, 1, 1, 1, 1), (0, 1, 0, 1, 0), (1, 1, 1, 1, 1))
)
update_op = mape_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = mape_obj.result()
self.assertAllClose(35e7, result, atol=1e-5)
def test_weighted(self):
mape_obj = metrics.MeanAbsolutePercentageError()
self.evaluate(tf.compat.v1.variables_initializer(mape_obj.variables))
y_true = tf.constant(
((0, 1, 0, 1, 0), (0, 0, 1, 1, 1), (1, 1, 1, 1, 0), (0, 0, 0, 0, 1))
)
y_pred = tf.constant(
((0, 0, 1, 1, 0), (1, 1, 1, 1, 1), (0, 1, 0, 1, 0), (1, 1, 1, 1, 1))
)
sample_weight = tf.constant((1.0, 1.5, 2.0, 2.5))
result = mape_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(40e7, self.evaluate(result), atol=1e-5)
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class MeanSquaredErrorTest(tf.test.TestCase):
def test_config(self):
mse_obj = metrics.MeanSquaredError(name="my_mse", dtype=tf.int32)
self.assertEqual(mse_obj.name, "my_mse")
self.assertEqual(mse_obj._dtype, tf.int32)
# Check save and restore config
mse_obj2 = metrics.MeanSquaredError.from_config(mse_obj.get_config())
self.assertEqual(mse_obj2.name, "my_mse")
self.assertEqual(mse_obj2._dtype, tf.int32)
def test_unweighted(self):
mse_obj = metrics.MeanSquaredError()
self.evaluate(tf.compat.v1.variables_initializer(mse_obj.variables))
y_true = tf.constant(
((0, 1, 0, 1, 0), (0, 0, 1, 1, 1), (1, 1, 1, 1, 0), (0, 0, 0, 0, 1))
)
y_pred = tf.constant(
((0, 0, 1, 1, 0), (1, 1, 1, 1, 1), (0, 1, 0, 1, 0), (1, 1, 1, 1, 1))
)
update_op = mse_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = mse_obj.result()
self.assertAllClose(0.5, result, atol=1e-5)
def test_weighted(self):
mse_obj = metrics.MeanSquaredError()
self.evaluate(tf.compat.v1.variables_initializer(mse_obj.variables))
y_true = tf.constant(
((0, 1, 0, 1, 0), (0, 0, 1, 1, 1), (1, 1, 1, 1, 0), (0, 0, 0, 0, 1))
)
y_pred = tf.constant(
((0, 0, 1, 1, 0), (1, 1, 1, 1, 1), (0, 1, 0, 1, 0), (1, 1, 1, 1, 1))
)
sample_weight = tf.constant((1.0, 1.5, 2.0, 2.5))
result = mse_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.54285, self.evaluate(result), atol=1e-5)
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class MeanSquaredLogarithmicErrorTest(tf.test.TestCase):
def test_config(self):
msle_obj = metrics.MeanSquaredLogarithmicError(
name="my_msle", dtype=tf.int32
)
self.assertEqual(msle_obj.name, "my_msle")
self.assertEqual(msle_obj._dtype, tf.int32)
# Check save and restore config
msle_obj2 = metrics.MeanSquaredLogarithmicError.from_config(
msle_obj.get_config()
)
self.assertEqual(msle_obj2.name, "my_msle")
self.assertEqual(msle_obj2._dtype, tf.int32)
def test_unweighted(self):
msle_obj = metrics.MeanSquaredLogarithmicError()
self.evaluate(tf.compat.v1.variables_initializer(msle_obj.variables))
y_true = tf.constant(
((0, 1, 0, 1, 0), (0, 0, 1, 1, 1), (1, 1, 1, 1, 0), (0, 0, 0, 0, 1))
)
y_pred = tf.constant(
((0, 0, 1, 1, 0), (1, 1, 1, 1, 1), (0, 1, 0, 1, 0), (1, 1, 1, 1, 1))
)
update_op = msle_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = msle_obj.result()
self.assertAllClose(0.24022, result, atol=1e-5)
def test_weighted(self):
msle_obj = metrics.MeanSquaredLogarithmicError()
self.evaluate(tf.compat.v1.variables_initializer(msle_obj.variables))
y_true = tf.constant(
((0, 1, 0, 1, 0), (0, 0, 1, 1, 1), (1, 1, 1, 1, 0), (0, 0, 0, 0, 1))
)
y_pred = tf.constant(
((0, 0, 1, 1, 0), (1, 1, 1, 1, 1), (0, 1, 0, 1, 0), (1, 1, 1, 1, 1))
)
sample_weight = tf.constant((1.0, 1.5, 2.0, 2.5))
result = msle_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.26082, self.evaluate(result), atol=1e-5)
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class RootMeanSquaredErrorTest(tf.test.TestCase):
def test_config(self):
rmse_obj = metrics.RootMeanSquaredError(name="rmse", dtype=tf.int32)
self.assertEqual(rmse_obj.name, "rmse")
self.assertEqual(rmse_obj._dtype, tf.int32)
rmse_obj2 = metrics.RootMeanSquaredError.from_config(
rmse_obj.get_config()
)
self.assertEqual(rmse_obj2.name, "rmse")
self.assertEqual(rmse_obj2._dtype, tf.int32)
def test_unweighted(self):
rmse_obj = metrics.RootMeanSquaredError()
self.evaluate(tf.compat.v1.variables_initializer(rmse_obj.variables))
y_true = tf.constant((2, 4, 6))
y_pred = tf.constant((1, 3, 2))
update_op = rmse_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = rmse_obj.result()
# error = [-1, -1, -4], square(error) = [1, 1, 16], mean = 18/3 = 6
self.assertAllClose(math.sqrt(6), result, atol=1e-3)
def test_weighted(self):
rmse_obj = metrics.RootMeanSquaredError()
self.evaluate(tf.compat.v1.variables_initializer(rmse_obj.variables))
y_true = tf.constant((2, 4, 6, 8))
y_pred = tf.constant((1, 3, 2, 3))
sample_weight = tf.constant((0, 1, 0, 1))
result = rmse_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(math.sqrt(13), self.evaluate(result), atol=1e-3)
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class LogCoshErrorTest(tf.test.TestCase):
def setup(self):
y_pred = np.asarray([1, 9, 2, -5, -2, 6]).reshape((2, 3))
y_true = np.asarray([4, 8, 12, 8, 1, 3]).reshape((2, 3))
self.batch_size = 6
error = y_pred - y_true
self.expected_results = np.log((np.exp(error) + np.exp(-error)) / 2)
self.y_pred = tf.constant(y_pred, dtype=tf.float32)
self.y_true = tf.constant(y_true)
def test_config(self):
logcosh_obj = metrics.LogCoshError(name="logcosh", dtype=tf.int32)
self.assertEqual(logcosh_obj.name, "logcosh")
self.assertEqual(logcosh_obj._dtype, tf.int32)
def test_unweighted(self):
self.setup()
logcosh_obj = metrics.LogCoshError()
self.evaluate(tf.compat.v1.variables_initializer(logcosh_obj.variables))
update_op = logcosh_obj.update_state(self.y_true, self.y_pred)
self.evaluate(update_op)
result = logcosh_obj.result()
expected_result = np.sum(self.expected_results) / self.batch_size
self.assertAllClose(result, expected_result, atol=1e-3)
def test_weighted(self):
self.setup()
logcosh_obj = metrics.LogCoshError()
self.evaluate(tf.compat.v1.variables_initializer(logcosh_obj.variables))
sample_weight = tf.constant([1.2, 3.4], shape=(2, 1))
result = logcosh_obj(
self.y_true, self.y_pred, sample_weight=sample_weight
)
sample_weight = np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape(
(2, 3)
)
expected_result = np.multiply(self.expected_results, sample_weight)
expected_result = np.sum(expected_result) / np.sum(sample_weight)
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class MeanRelativeErrorTest(tf.test.TestCase):
def test_config(self):
normalizer = tf.constant([1, 3], dtype=tf.float32)
mre_obj = metrics.MeanRelativeError(normalizer=normalizer, name="mre")
self.assertEqual(mre_obj.name, "mre")
self.assertArrayNear(self.evaluate(mre_obj.normalizer), [1, 3], 1e-1)
mre_obj2 = metrics.MeanRelativeError.from_config(mre_obj.get_config())
self.assertEqual(mre_obj2.name, "mre")
self.assertArrayNear(self.evaluate(mre_obj2.normalizer), [1, 3], 1e-1)
def test_unweighted(self):
np_y_pred = np.asarray([2, 4, 6, 8], dtype=np.float32)
np_y_true = np.asarray([1, 3, 2, 3], dtype=np.float32)
expected_error = np.mean(
np.divide(np.absolute(np_y_pred - np_y_true), np_y_true)
)
y_pred = tf.constant(np_y_pred, shape=(1, 4), dtype=tf.float32)
y_true = tf.constant(np_y_true, shape=(1, 4))
mre_obj = metrics.MeanRelativeError(normalizer=y_true)
self.evaluate(tf.compat.v1.variables_initializer(mre_obj.variables))
result = mre_obj(y_true, y_pred)
self.assertAllClose(self.evaluate(result), expected_error, atol=1e-3)
def test_weighted(self):
np_y_pred = np.asarray([2, 4, 6, 8], dtype=np.float32)
np_y_true = np.asarray([1, 3, 2, 3], dtype=np.float32)
sample_weight = np.asarray([0.2, 0.3, 0.5, 0], dtype=np.float32)
rel_errors = np.divide(np.absolute(np_y_pred - np_y_true), np_y_true)
expected_error = np.sum(rel_errors * sample_weight)
y_pred = tf.constant(np_y_pred, dtype=tf.float32)
y_true = tf.constant(np_y_true)
mre_obj = metrics.MeanRelativeError(normalizer=y_true)
self.evaluate(tf.compat.v1.variables_initializer(mre_obj.variables))
result = mre_obj(
y_true, y_pred, sample_weight=tf.constant(sample_weight)
)
self.assertAllClose(self.evaluate(result), expected_error, atol=1e-3)
def test_zero_normalizer(self):
y_pred = tf.constant([2, 4], dtype=tf.float32)
y_true = tf.constant([1, 3])
mre_obj = metrics.MeanRelativeError(normalizer=tf.zeros_like(y_true))
self.evaluate(tf.compat.v1.variables_initializer(mre_obj.variables))
result = mre_obj(y_true, y_pred)
self.assertEqual(self.evaluate(result), 0)
@test_utils.run_v2_only
class R2ScoreTest(parameterized.TestCase, tf.test.TestCase):
def _run_test(
self,
y_true,
y_pred,
sample_weights,
class_aggregation,
num_regressors,
reference_result,
):
y_true = tf.constant(y_true, dtype="float32")
y_pred = tf.constant(y_pred, dtype="float32")
r2 = metrics.R2Score(class_aggregation, num_regressors)
r2.update_state(y_true, y_pred, sample_weights)
result = r2.result().numpy()
self.assertAllClose(result, reference_result, atol=1e-6)
def test_config(self):
r2_obj = metrics.R2Score(
class_aggregation=None,
num_regressors=2,
)
self.assertEqual(r2_obj.class_aggregation, None)
self.assertEqual(r2_obj.num_regressors, 2)
self.assertEqual(r2_obj.dtype, tf.float32)
# Check save and restore config
r2_obj2 = metrics.R2Score.from_config(r2_obj.get_config())
self.assertEqual(r2_obj2.class_aggregation, None)
self.assertEqual(r2_obj2.num_regressors, 2)
self.assertEqual(r2_obj2.dtype, tf.float32)
@parameterized.parameters(
# class_aggregation, num_regressors, result
(None, 0, [0.37, -1.295, 0.565]),
("uniform_average", 0, -0.12),
("variance_weighted_average", 0, -0.12),
)
def test_r2_sklearn_comparison(
self, class_aggregation, num_regressors, result
):
y_true = [[0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]]
y_pred = [[0.4, 0.5, 0.6], [0.1, 0.2, 0.3], [0.5, 0.8, 0.2]]
self._run_test(
y_true,
y_pred,
None,
class_aggregation=class_aggregation,
num_regressors=num_regressors,
reference_result=result,
)
@parameterized.parameters(
# class_aggregation, num_regressors, result
(None, 0, [0.17305559, -8.836666, -0.521]),
(None, 1, [0.054920673, -10.241904, -0.7382858]),
(None, 2, [-0.10259259, -12.115555, -1.0280001]),
("uniform_average", 0, -3.0615367889404297),
("uniform_average", 1, -3.641756534576416),
("uniform_average", 2, -4.415382385253906),
("variance_weighted_average", 0, -1.3710224628448486),
("variance_weighted_average", 1, -1.7097399234771729),
("variance_weighted_average", 2, -2.161363363265991),
)
def test_r2_tfa_comparison(self, class_aggregation, num_regressors, result):
y_true = [[0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]]
y_pred = [[0.4, 0.9, 1.6], [0.1, 1.2, 0.6], [1.5, 0.8, 0.6]]
sample_weights = [0.8, 0.1, 0.4]
self._run_test(
y_true,
y_pred,
sample_weights,
class_aggregation=class_aggregation,
num_regressors=num_regressors,
reference_result=result,
)
def test_errors(self):
# Bad class_aggregation value
with self.assertRaisesRegex(
ValueError, "Invalid value for argument `class_aggregation`"
):
metrics.R2Score(class_aggregation="wrong")
# Bad num_regressors value
with self.assertRaisesRegex(
ValueError, "Invalid value for argument `num_regressors`"
):
metrics.R2Score(num_regressors=-1)
# Bad input shape
with self.assertRaisesRegex(ValueError, "expects 2D inputs with shape"):
r2 = metrics.R2Score()
r2.update_state(tf.constant([0.0, 1.0]), tf.constant([0.0, 1.0]))
with self.assertRaisesRegex(
ValueError, "with output_dim fully defined"
):
r2 = metrics.R2Score()
r2.update_state(Input(shape=(None,)), tf.constant([[0.0], [1.0]]))
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/metrics/regression_metrics_test.py/0 | {
"file_path": "tf-keras/tf_keras/metrics/regression_metrics_test.py",
"repo_id": "tf-keras",
"token_count": 9965
} | 236 |
# Description:
# Contains checkpoints and SavedModels for testing purposes.
package(
# copybara:uncomment default_applicable_licenses = ["//tf_keras:license"],
default_visibility = ["//tf_keras:friends"],
licenses = ["notice"],
)
# These files were generated by running the following program with TensorFlow
# 2.2rc2. The final release of TF 2.2 was not out when this change was created.:
# import os
# import numpy as np
# import tensorflow as tf
#
# tf.random.set_seed(1)
# opt = tf.keras.optimizers.SGD(0.1, momentum=0.1)
# opt = tf.keras.mixed_precision.experimental.LossScaleOptimizer(opt, 'dynamic')
# model = tf.keras.Sequential([tf.keras.layers.Dense(2)])
# model.compile(opt, 'mse')
#
# x = np.ones((10, 2))
# y = x * 100
# model.fit(x, y)
# weight_dir = os.environ['TF_LSO_WEIGHT_DIR']
# model_dir = os.environ['TF_LSO_MODEL_DIR']
# model.save_weights(weight_dir)
# model.save(model_dir)
# print(model.get_weights()[0])
# print(opt._optimizer.get_slot(model.weights[0], 'momentum'))
# print(opt.loss_scale)
filegroup(
name = "lso_ckpt_tf2.2",
srcs = glob(["lso_ckpt_tf2.2/**"]),
tags = ["no_pip"],
)
filegroup(
name = "lso_savedmodel_tf2.2",
srcs = glob(["lso_savedmodel_tf2.2/**"]),
tags = ["no_pip"],
)
| tf-keras/tf_keras/mixed_precision/testdata/BUILD/0 | {
"file_path": "tf-keras/tf_keras/mixed_precision/testdata/BUILD",
"repo_id": "tf-keras",
"token_count": 515
} | 237 |
# Description:
# Contains the TF-Keras Optimizer API.
# Placeholder: load unaliased py_library
load("@org_keras//tf_keras:tf_keras.bzl", "cuda_py_test")
# buildifier: disable=same-origin-load
load("@org_keras//tf_keras:tf_keras.bzl", "tf_py_test")
load("@org_keras//tf_keras:tf_keras.bzl", "distribute_py_test")
package(
# copybara:uncomment default_applicable_licenses = ["//tf_keras:license"],
default_visibility = [
"//tf_keras:friends",
"//third_party/tensorflow/python:__pkg__",
"//third_party/tensorflow/python/distribute:__pkg__",
"//third_party/tensorflow/python/saved_model:__pkg__", # For unit tests.
"//third_party/tensorflow/python/tpu/tests:__pkg__", # For unit tests.
"//third_party/tensorflow/python/trackable:__pkg__",
],
licenses = ["notice"],
)
py_library(
name = "optimizers",
srcs = [
"__init__.py",
"adadelta.py",
"adafactor.py",
"adagrad.py",
"adam.py",
"adamax.py",
"adamw.py",
"ftrl.py",
"lion.py",
"nadam.py",
"optimizer.py",
"optimizer_v1.py",
"rmsprop.py",
"sgd.py",
],
srcs_version = "PY3",
deps = [
":utils",
"//:expect_tensorflow_installed",
"//tf_keras:backend",
"//tf_keras/dtensor:utils",
"//tf_keras/optimizers/legacy:optimizers",
"//tf_keras/optimizers/schedules:learning_rate_schedule",
"//tf_keras/utils:engine_utils",
],
)
py_library(
name = "utils",
srcs = ["utils.py"],
srcs_version = "PY3",
deps = [
"//:expect_tensorflow_installed",
],
)
py_library(
name = "legacy_learning_rate_decay",
srcs = ["legacy_learning_rate_decay.py"],
srcs_version = "PY3",
deps = [
"//:expect_tensorflow_installed",
"//tf_keras/optimizers/schedules:learning_rate_schedule",
],
)
tf_py_test(
name = "optimizer_v1_test",
size = "medium",
srcs = ["optimizer_v1_test.py"],
python_version = "PY3",
shard_count = 8,
tags = ["notsan"],
deps = [
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/testing_infra:test_combinations",
],
)
cuda_py_test(
name = "legacy_learning_rate_decay_test",
size = "medium",
srcs = ["legacy_learning_rate_decay_test.py"],
deps = [
":legacy_learning_rate_decay",
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/testing_infra:test_combinations",
],
)
# TODO(b/228209527): Combine this test with optimizer_test after
# fixing the NCCL issue.
distribute_py_test(
name = "optimizer_pss_test",
size = "medium",
srcs = ["optimizer_pss_test.py"],
shard_count = 50,
tags = [
"multi_gpu",
"no_oss",
"no_windows",
],
deps = [
":optimizers",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/testing_infra:test_combinations",
],
)
distribute_py_test(
name = "optimizer_test",
size = "medium",
srcs = ["optimizer_test.py"],
shard_count = 16,
tags = [
"multi_gpu",
"no_windows",
"nomultivm", # TODO(b/203558991): Re-enable.
],
deps = [
":optimizers",
"//:expect_absl_installed", # absl/testing:parameterized
"//:expect_tensorflow_installed",
"//tf_keras",
"//tf_keras/testing_infra:test_combinations",
],
)
cuda_py_test(
name = "lion_test",
size = "medium",
srcs = ["lion_test.py"],
shard_count = 4,
deps = [
"//:expect_numpy_installed",
"//:expect_tensorflow_installed",
"//tf_keras",
],
)
| tf-keras/tf_keras/optimizers/BUILD/0 | {
"file_path": "tf-keras/tf_keras/optimizers/BUILD",
"repo_id": "tf-keras",
"token_count": 1951
} | 238 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Adam."""
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
from tf_keras.optimizers import optimizer_v1
from tf_keras.optimizers.legacy import adam
from tf_keras.optimizers.schedules import learning_rate_schedule
from tf_keras.testing_infra import test_combinations
def adam_update_numpy(
param, g_t, t, m, v, lr=0.001, beta1=0.9, beta2=0.999, epsilon=1e-7
):
lr_t = lr * np.sqrt(1 - beta2 ** (t + 1)) / (1 - beta1 ** (t + 1))
m_t = beta1 * m + (1 - beta1) * g_t
v_t = beta2 * v + (1 - beta2) * g_t * g_t
param_t = param - lr_t * m_t / (np.sqrt(v_t) + epsilon)
return param_t, m_t, v_t
def adam_update_numpy_amsgrad(
param, g_t, t, m, v, vhat, lr=0.001, beta1=0.9, beta2=0.999, epsilon=1e-7
):
lr_t = lr * np.sqrt(1 - beta2 ** (t + 1)) / (1 - beta1 ** (t + 1))
m_t = beta1 * m + (1 - beta1) * g_t
v_t = beta2 * v + (1 - beta2) * g_t * g_t
vhat_t = np.maximum(vhat, v_t)
param_t = param - lr_t * m_t / (np.sqrt(vhat_t) + epsilon)
return param_t, m_t, v_t, vhat_t
def adam_sparse_update_numpy_amsgrad(
param,
indices,
g_t,
t,
m,
v,
vhat,
lr=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-7,
):
m_t, v_t, vhat_t, param_t = (
np.copy(m),
np.copy(v),
np.copy(vhat),
np.copy(param),
)
lr_t = lr * np.sqrt(1 - beta2 ** (t + 1)) / (1 - beta1 ** (t + 1))
m_t_slice = beta1 * m[indices] + (1 - beta1) * g_t
v_t_slice = beta2 * v[indices] + (1 - beta2) * g_t * g_t
m_t[indices] = m_t_slice
v_t[indices] = v_t_slice
v_hat_t = np.maximum(vhat_t, v_t)
v_hat_t_slice = v_hat_t[indices]
param_t_slice = param[indices] - (
lr_t * (m_t_slice / (np.sqrt(v_hat_t_slice) + epsilon))
)
param_t[indices] = param_t_slice
return param_t, m_t, v_t, vhat_t
def get_beta_accumulators(opt, dtype):
local_step = tf.cast(opt.iterations + 1, dtype)
beta_1_t = tf.cast(opt._get_hyper("beta_1"), dtype)
beta_1_power = tf.pow(beta_1_t, local_step)
beta_2_t = tf.cast(opt._get_hyper("beta_2"), dtype)
beta_2_power = tf.pow(beta_2_t, local_step)
return (beta_1_power, beta_2_power)
class AdamOptimizerTest(tf.test.TestCase, parameterized.TestCase):
def testSparse(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [tf.half, tf.float32, tf.float64]:
with tf.Graph().as_default(), self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array(
[0.1, 0.0, 0.1], dtype=dtype.as_numpy_dtype
)
var1_np = np.array([3.0, 3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array(
[0.01, 0.0, 0.01], dtype=dtype.as_numpy_dtype
)
var0 = tf.Variable(var0_np)
var1 = tf.Variable(var1_np)
grads0_np_indices = np.array([0, 2], dtype=np.int32)
grads0 = tf.IndexedSlices(
tf.constant(grads0_np[grads0_np_indices]),
tf.constant(grads0_np_indices),
tf.constant([3]),
)
grads1_np_indices = np.array([0, 2], dtype=np.int32)
grads1 = tf.IndexedSlices(
tf.constant(grads1_np[grads1_np_indices]),
tf.constant(grads1_np_indices),
tf.constant([3]),
)
opt = adam.Adam()
update = opt.apply_gradients(
zip([grads0, grads1], [var0, var1])
)
self.evaluate(tf.compat.v1.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 3.0, 4.0], self.evaluate(var1))
beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype)
# Run 3 steps of Adam
for t in range(3):
self.assertAllCloseAccordingToType(
0.9 ** (t + 1), self.evaluate(beta_1_power)
)
self.assertAllCloseAccordingToType(
0.999 ** (t + 1), self.evaluate(beta_2_power)
)
update.run()
var0_np, m0, v0 = adam_update_numpy(
var0_np, grads0_np, t, m0, v0
)
var1_np, m1, v1 = adam_update_numpy(
var1_np, grads1_np, t, m1, v1
)
# Validate updated params
self.assertAllCloseAccordingToType(
var0_np, self.evaluate(var0)
)
self.assertAllCloseAccordingToType(
var1_np, self.evaluate(var1)
)
def testSparseDevicePlacement(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for index_dtype in [tf.int32, tf.int64]:
with tf.Graph().as_default(), self.cached_session(
force_gpu=tf.test.is_gpu_available()
):
# If a GPU is available, tests that all optimizer ops can be
# placed on it (i.e. they have GPU kernels).
var = tf.Variable([[1.0], [2.0]])
indices = tf.constant([0, 1], dtype=index_dtype)
g_sum = lambda: tf.reduce_sum(tf.gather(var, indices))
optimizer = adam.Adam(3.0)
minimize_op = optimizer.minimize(g_sum, var_list=[var])
self.evaluate(tf.compat.v1.global_variables_initializer())
minimize_op.run()
def testSparseRepeatedIndices(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [tf.half, tf.float32, tf.float64]:
with tf.Graph().as_default(), self.cached_session():
repeated_index_update_var = tf.Variable(
[[1.0], [2.0]], dtype=dtype
)
aggregated_update_var = tf.Variable([[1.0], [2.0]], dtype=dtype)
grad_repeated_index = tf.IndexedSlices(
tf.constant([0.1, 0.1], shape=[2, 1], dtype=dtype),
tf.constant([1, 1]),
tf.constant([2, 1]),
)
grad_aggregated = tf.IndexedSlices(
tf.constant([0.2], shape=[1, 1], dtype=dtype),
tf.constant([1]),
tf.constant([2, 1]),
)
repeated_update = adam.Adam().apply_gradients(
[(grad_repeated_index, repeated_index_update_var)]
)
aggregated_update = adam.Adam().apply_gradients(
[(grad_aggregated, aggregated_update_var)]
)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllClose(
aggregated_update_var,
self.evaluate(repeated_index_update_var),
)
for _ in range(3):
repeated_update.run()
aggregated_update.run()
self.assertAllClose(
aggregated_update_var,
self.evaluate(repeated_index_update_var),
)
def doTestBasic(self, use_callable_params=False):
for i, dtype in enumerate([tf.half, tf.float32, tf.float64]):
with self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = tf.Variable(var0_np, name="var0_%d" % i)
var1 = tf.Variable(var1_np, name="var1_%d" % i)
grads0 = tf.constant(grads0_np)
grads1 = tf.constant(grads1_np)
learning_rate = lambda: 0.001
beta1 = lambda: 0.9
beta2 = lambda: 0.999
epsilon = lambda: 1e-8
if not use_callable_params:
learning_rate = learning_rate()
beta1 = beta1()
beta2 = beta2()
epsilon = epsilon()
opt = adam.Adam(learning_rate=learning_rate)
if not tf.executing_eagerly():
update = opt.apply_gradients(
zip([grads0, grads1], [var0, var1])
)
self.evaluate(tf.compat.v1.global_variables_initializer())
# Run 3 steps of Adam
for t in range(3):
beta_1_power, beta_2_power = get_beta_accumulators(
opt, dtype
)
self.assertAllCloseAccordingToType(
0.9 ** (t + 1), self.evaluate(beta_1_power)
)
self.assertAllCloseAccordingToType(
0.999 ** (t + 1), self.evaluate(beta_2_power)
)
if not tf.executing_eagerly():
self.evaluate(update)
else:
opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
var0_np, m0, v0 = adam_update_numpy(
var0_np, grads0_np, t, m0, v0
)
var1_np, m1, v1 = adam_update_numpy(
var1_np, grads1_np, t, m1, v1
)
# Validate updated params
self.assertAllCloseAccordingToType(
var0_np, self.evaluate(var0)
)
self.assertAllCloseAccordingToType(
var1_np, self.evaluate(var1)
)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testResourceBasic(self):
self.doTestBasic()
@test_combinations.generate(test_combinations.combine(mode=["eager"]))
def testBasicCallableParams(self):
self.doTestBasic(use_callable_params=True)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testBasicWithAmsgrad(self):
for i, dtype in enumerate([tf.half, tf.float32, tf.float64]):
with self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, v0hat, m1, v1, v1hat = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = tf.Variable(var0_np, name="var0_%d" % i)
var1 = tf.Variable(var1_np, name="var1_%d" % i)
grads0 = tf.constant(grads0_np)
grads1 = tf.constant(grads1_np)
opt = adam.Adam(amsgrad=True)
if not tf.executing_eagerly():
update = opt.apply_gradients(
zip([grads0, grads1], [var0, var1])
)
self.evaluate(tf.compat.v1.global_variables_initializer())
# Run 3 steps of Adam
for t in range(3):
beta_1_power, beta_2_power = get_beta_accumulators(
opt, dtype
)
self.assertAllCloseAccordingToType(
0.9 ** (t + 1), self.evaluate(beta_1_power)
)
self.assertAllCloseAccordingToType(
0.999 ** (t + 1), self.evaluate(beta_2_power)
)
if not tf.executing_eagerly():
self.evaluate(update)
else:
opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
var0_np, m0, v0, v0hat = adam_update_numpy_amsgrad(
var0_np, grads0_np, t, m0, v0, v0hat
)
var1_np, m1, v1, v1hat = adam_update_numpy_amsgrad(
var1_np, grads1_np, t, m1, v1, v1hat
)
# Validate updated params
self.assertAllCloseAccordingToType(
var0_np, self.evaluate(var0)
)
self.assertAllCloseAccordingToType(
var1_np, self.evaluate(var1)
)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testSparseWithAmsgrad(self):
# dtypes.half does not work on gpu + eager.
for dtype in [tf.float32, tf.float64]:
with self.cached_session():
m0 = np.array([[0.0], [0.0]])
v0 = np.array([[0.0], [0.0]])
v0hat = np.array([[0.0], [0.0]])
indices_np = np.array([1])
indices = tf.constant(indices_np, dtype=tf.int32)
var0_np = np.array([[1.0], [2.0]], dtype=dtype.as_numpy_dtype)
repeated_index_update_var = tf.Variable(var0_np, dtype=dtype)
aggregated_update_var = tf.Variable(var0_np, dtype=dtype)
grads0_np = np.array([[0.2]], dtype=dtype.as_numpy_dtype)
grad_repeated_index = tf.IndexedSlices(
tf.constant([0.1, 0.1], shape=[2, 1], dtype=dtype),
tf.constant([1, 1]),
tf.constant([2, 1]),
)
grad_aggregated = tf.IndexedSlices(
grads0_np, indices, tf.constant([2, 1])
)
opt_repeated = adam.Adam(amsgrad=True)
opt_aggregated = adam.Adam(amsgrad=True)
if not tf.executing_eagerly():
repeated_update = opt_repeated.apply_gradients(
[(grad_repeated_index, repeated_index_update_var)]
)
aggregated_update = opt_aggregated.apply_gradients(
[(grad_aggregated, aggregated_update_var)]
)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllClose(
self.evaluate(aggregated_update_var),
self.evaluate(repeated_index_update_var),
)
for t in range(3):
if not tf.executing_eagerly():
self.evaluate(repeated_update)
self.evaluate(aggregated_update)
else:
opt_repeated.apply_gradients(
[(grad_repeated_index, repeated_index_update_var)]
)
opt_aggregated.apply_gradients(
[(grad_aggregated, aggregated_update_var)]
)
var0_np, m0, v0, v0hat = adam_sparse_update_numpy_amsgrad(
var0_np, indices_np, grads0_np, t, m0, v0, v0hat
)
# Validate updated params
self.assertAllCloseAccordingToType(
var0_np, self.evaluate(aggregated_update_var)
)
self.assertAllCloseAccordingToType(
self.evaluate(aggregated_update_var),
self.evaluate(repeated_index_update_var),
)
def testBasicWithLearningRateDecay(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for i, dtype in enumerate([tf.half, tf.float32, tf.float64]):
with tf.Graph().as_default(), self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = tf.Variable(var0_np, name="var0_%d" % i)
var1 = tf.Variable(var1_np, name="var1_%d" % i)
grads0 = tf.constant(grads0_np)
grads1 = tf.constant(grads1_np)
learning_rate = 0.001
beta_1 = 0.9
beta_2 = 0.999
epsilon = 1e-7
decay = 0.5
opt = adam.Adam(
learning_rate=learning_rate,
beta_1=beta_1,
beta_2=beta_2,
epsilon=epsilon,
decay=decay,
)
update = opt.apply_gradients(
zip([grads0, grads1], [var0, var1])
)
self.evaluate(tf.compat.v1.global_variables_initializer())
# Run 3 steps of Adam
for t in range(3):
self.evaluate(update)
lr_np = learning_rate / (1 + decay * t)
var0_np, m0, v0 = adam_update_numpy(
var0_np, grads0_np, t, m0, v0, lr=lr_np
)
var1_np, m1, v1 = adam_update_numpy(
var1_np, grads1_np, t, m1, v1, lr=lr_np
)
# Validate updated params
self.assertAllCloseAccordingToType(
var0_np, self.evaluate(var0)
)
self.assertAllCloseAccordingToType(
var1_np, self.evaluate(var1)
)
def testBasicWithLearningRateInverseTimeDecay(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for i, dtype in enumerate([tf.half, tf.float32, tf.float64]):
with tf.Graph().as_default(), self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = tf.Variable(var0_np, name="var0_%d" % i)
var1 = tf.Variable(var1_np, name="var1_%d" % i)
grads0 = tf.constant(grads0_np)
grads1 = tf.constant(grads1_np)
learning_rate = 0.001
decay = 0.5
lr_schedule = learning_rate_schedule.InverseTimeDecay(
learning_rate, decay_steps=1.0, decay_rate=decay
)
beta_1 = 0.9
beta_2 = 0.999
epsilon = 1e-7
opt = adam.Adam(
learning_rate=lr_schedule,
beta_1=beta_1,
beta_2=beta_2,
epsilon=epsilon,
)
update = opt.apply_gradients(
zip([grads0, grads1], [var0, var1])
)
self.evaluate(tf.compat.v1.global_variables_initializer())
# Run 3 steps of Adam
for t in range(3):
self.evaluate(update)
lr_np = learning_rate / (1 + decay * t)
var0_np, m0, v0 = adam_update_numpy(
var0_np, grads0_np, t, m0, v0, lr=lr_np
)
var1_np, m1, v1 = adam_update_numpy(
var1_np, grads1_np, t, m1, v1, lr=lr_np
)
# Validate updated params
self.assertAllCloseAccordingToType(
var0_np, self.evaluate(var0)
)
self.assertAllCloseAccordingToType(
var1_np, self.evaluate(var1)
)
def testTensorLearningRate(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [tf.half, tf.float32, tf.float64]:
with tf.Graph().as_default(), self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = tf.Variable(var0_np)
var1 = tf.Variable(var1_np)
grads0 = tf.constant(grads0_np)
grads1 = tf.constant(grads1_np)
opt = adam.Adam(tf.constant(0.001))
update = opt.apply_gradients(
zip([grads0, grads1], [var0, var1])
)
self.evaluate(tf.compat.v1.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype)
# Run 3 steps of Adam
for t in range(3):
self.assertAllCloseAccordingToType(
0.9 ** (t + 1), self.evaluate(beta_1_power)
)
self.assertAllCloseAccordingToType(
0.999 ** (t + 1), self.evaluate(beta_2_power)
)
update.run()
var0_np, m0, v0 = adam_update_numpy(
var0_np, grads0_np, t, m0, v0
)
var1_np, m1, v1 = adam_update_numpy(
var1_np, grads1_np, t, m1, v1
)
# Validate updated params
self.assertAllCloseAccordingToType(
var0_np, self.evaluate(var0)
)
self.assertAllCloseAccordingToType(
var1_np, self.evaluate(var1)
)
def testSharing(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [tf.half, tf.float32, tf.float64]:
with tf.Graph().as_default(), self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = tf.Variable(var0_np)
var1 = tf.Variable(var1_np)
grads0 = tf.constant(grads0_np)
grads1 = tf.constant(grads1_np)
opt = adam.Adam()
update1 = opt.apply_gradients(
zip([grads0, grads1], [var0, var1])
)
update2 = opt.apply_gradients(
zip([grads0, grads1], [var0, var1])
)
self.evaluate(tf.compat.v1.global_variables_initializer())
beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of intertwined Adam1 and Adam2.
for t in range(3):
self.assertAllCloseAccordingToType(
0.9 ** (t + 1), self.evaluate(beta_1_power)
)
self.assertAllCloseAccordingToType(
0.999 ** (t + 1), self.evaluate(beta_2_power)
)
if t % 2 == 0:
update1.run()
else:
update2.run()
var0_np, m0, v0 = adam_update_numpy(
var0_np, grads0_np, t, m0, v0
)
var1_np, m1, v1 = adam_update_numpy(
var1_np, grads1_np, t, m1, v1
)
# Validate updated params
self.assertAllCloseAccordingToType(
var0_np, self.evaluate(var0)
)
self.assertAllCloseAccordingToType(
var1_np, self.evaluate(var1)
)
@test_combinations.generate(test_combinations.combine(mode=["eager"]))
def testSlotsUniqueEager(self):
v1 = tf.Variable(1.0)
v2 = tf.Variable(1.0)
opt = adam.Adam(1.0)
opt.minimize(lambda: v1 + v2, var_list=[v1, v2])
# There should be iteration, and two unique slot variables for v1 and
# v2.
self.assertLen(set(v.ref() for v in opt.variables()), 5)
self.assertEqual(
self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations)
)
def testSetWeightsFromV1AdamWithoutMinimize(self):
keras_v1_adam = optimizer_v1.Adam()
keras_v2_adam = adam.Adam()
keras_v2_adam.set_weights(keras_v1_adam.get_weights())
keras_v1_iteration = keras_v1_adam.iterations
keras_v2_iteration = keras_v2_adam.iterations
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertEqual(
self.evaluate(keras_v1_iteration), self.evaluate(keras_v2_iteration)
)
def testConstructAdamWithLR(self):
opt = adam.Adam(lr=1.0)
opt_2 = adam.Adam(learning_rate=0.1, lr=1.0)
opt_3 = adam.Adam(learning_rate=0.1)
self.assertIsInstance(opt.lr, tf.Variable)
self.assertIsInstance(opt_2.lr, tf.Variable)
self.assertIsInstance(opt_3.lr, tf.Variable)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllClose(self.evaluate(opt.lr), (1.0))
self.assertAllClose(self.evaluate(opt_2.lr), (1.0))
self.assertAllClose(self.evaluate(opt_3.lr), (0.1))
class NonFusedAdamOptimizerTest(tf.test.TestCase, parameterized.TestCase):
def testSparse(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [tf.half, tf.float32, tf.float64]:
with tf.Graph().as_default(), self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array(
[0.1, 0.0, 0.1], dtype=dtype.as_numpy_dtype
)
var1_np = np.array([3.0, 3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array(
[0.01, 0.0, 0.01], dtype=dtype.as_numpy_dtype
)
var0 = tf.Variable(var0_np)
var1 = tf.Variable(var1_np)
grads0_np_indices = np.array([0, 2], dtype=np.int32)
grads0 = tf.IndexedSlices(
tf.constant(grads0_np[grads0_np_indices]),
tf.constant(grads0_np_indices),
tf.constant([3]),
)
grads1_np_indices = np.array([0, 2], dtype=np.int32)
grads1 = tf.IndexedSlices(
tf.constant(grads1_np[grads1_np_indices]),
tf.constant(grads1_np_indices),
tf.constant([3]),
)
opt = adam.NonFusedAdam()
update = opt.apply_gradients(
zip([grads0, grads1], [var0, var1])
)
self.evaluate(tf.compat.v1.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 3.0, 4.0], self.evaluate(var1))
beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype)
# Run 3 steps of NonFusedAdam
for t in range(3):
self.assertAllCloseAccordingToType(
0.9 ** (t + 1), self.evaluate(beta_1_power)
)
self.assertAllCloseAccordingToType(
0.999 ** (t + 1), self.evaluate(beta_2_power)
)
update.run()
var0_np, m0, v0 = adam_update_numpy(
var0_np, grads0_np, t, m0, v0
)
var1_np, m1, v1 = adam_update_numpy(
var1_np, grads1_np, t, m1, v1
)
# Validate updated params
self.assertAllCloseAccordingToType(
var0_np, self.evaluate(var0)
)
self.assertAllCloseAccordingToType(
var1_np, self.evaluate(var1)
)
def testSparseDevicePlacement(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for index_dtype in [tf.int32, tf.int64]:
with tf.Graph().as_default(), self.cached_session(
force_gpu=tf.test.is_gpu_available()
):
# If a GPU is available, tests that all optimizer ops can be
# placed on it (i.e. they have GPU kernels).
var = tf.Variable([[1.0], [2.0]])
indices = tf.constant([0, 1], dtype=index_dtype)
g_sum = lambda: tf.reduce_sum(tf.gather(var, indices))
optimizer = adam.NonFusedAdam(3.0)
minimize_op = optimizer.minimize(g_sum, var_list=[var])
self.evaluate(tf.compat.v1.global_variables_initializer())
minimize_op.run()
def testSparseRepeatedIndices(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [tf.half, tf.float32, tf.float64]:
with tf.Graph().as_default(), self.cached_session():
repeated_index_update_var = tf.Variable(
[[1.0], [2.0]], dtype=dtype
)
aggregated_update_var = tf.Variable([[1.0], [2.0]], dtype=dtype)
grad_repeated_index = tf.IndexedSlices(
tf.constant([0.1, 0.1], shape=[2, 1], dtype=dtype),
tf.constant([1, 1]),
tf.constant([2, 1]),
)
grad_aggregated = tf.IndexedSlices(
tf.constant([0.2], shape=[1, 1], dtype=dtype),
tf.constant([1]),
tf.constant([2, 1]),
)
repeated_update = adam.NonFusedAdam().apply_gradients(
[(grad_repeated_index, repeated_index_update_var)]
)
aggregated_update = adam.NonFusedAdam().apply_gradients(
[(grad_aggregated, aggregated_update_var)]
)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllClose(
aggregated_update_var,
self.evaluate(repeated_index_update_var),
)
for _ in range(3):
repeated_update.run()
aggregated_update.run()
self.assertAllClose(
aggregated_update_var,
self.evaluate(repeated_index_update_var),
)
def doTestBasic(self, use_callable_params=False):
for i, dtype in enumerate([tf.half, tf.float32, tf.float64]):
with self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = tf.Variable(var0_np, name="var0_%d" % i)
var1 = tf.Variable(var1_np, name="var1_%d" % i)
grads0 = tf.constant(grads0_np)
grads1 = tf.constant(grads1_np)
learning_rate = lambda: 0.001
beta1 = lambda: 0.9
beta2 = lambda: 0.999
epsilon = lambda: 1e-8
if not use_callable_params:
learning_rate = learning_rate()
beta1 = beta1()
beta2 = beta2()
epsilon = epsilon()
opt = adam.NonFusedAdam(learning_rate=learning_rate)
if not tf.executing_eagerly():
update = opt.apply_gradients(
zip([grads0, grads1], [var0, var1])
)
self.evaluate(tf.compat.v1.global_variables_initializer())
# Run 3 steps of NonFusedAdam
for t in range(3):
beta_1_power, beta_2_power = get_beta_accumulators(
opt, dtype
)
self.assertAllCloseAccordingToType(
0.9 ** (t + 1), self.evaluate(beta_1_power)
)
self.assertAllCloseAccordingToType(
0.999 ** (t + 1), self.evaluate(beta_2_power)
)
if not tf.executing_eagerly():
self.evaluate(update)
else:
opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
var0_np, m0, v0 = adam_update_numpy(
var0_np, grads0_np, t, m0, v0
)
var1_np, m1, v1 = adam_update_numpy(
var1_np, grads1_np, t, m1, v1
)
# Validate updated params
self.assertAllCloseAccordingToType(
var0_np, self.evaluate(var0), rtol=1e-4, atol=1e-4
)
self.assertAllCloseAccordingToType(
var1_np, self.evaluate(var1), rtol=1e-4, atol=1e-4
)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testResourceBasic(self):
self.doTestBasic()
@test_combinations.generate(test_combinations.combine(mode=["eager"]))
def testBasicCallableParams(self):
self.doTestBasic(use_callable_params=True)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testBasicWithAmsgrad(self):
for i, dtype in enumerate([tf.half, tf.float32, tf.float64]):
with self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, v0hat, m1, v1, v1hat = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = tf.Variable(var0_np, name="var0_%d" % i)
var1 = tf.Variable(var1_np, name="var1_%d" % i)
grads0 = tf.constant(grads0_np)
grads1 = tf.constant(grads1_np)
opt = adam.NonFusedAdam(amsgrad=True)
if not tf.executing_eagerly():
update = opt.apply_gradients(
zip([grads0, grads1], [var0, var1])
)
self.evaluate(tf.compat.v1.global_variables_initializer())
# Run 3 steps of NonFusedAdam
for t in range(3):
beta_1_power, beta_2_power = get_beta_accumulators(
opt, dtype
)
self.assertAllCloseAccordingToType(
0.9 ** (t + 1), self.evaluate(beta_1_power)
)
self.assertAllCloseAccordingToType(
0.999 ** (t + 1), self.evaluate(beta_2_power)
)
if not tf.executing_eagerly():
self.evaluate(update)
else:
opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
var0_np, m0, v0, v0hat = adam_update_numpy_amsgrad(
var0_np, grads0_np, t, m0, v0, v0hat
)
var1_np, m1, v1, v1hat = adam_update_numpy_amsgrad(
var1_np, grads1_np, t, m1, v1, v1hat
)
# Validate updated params
self.assertAllCloseAccordingToType(
var0_np, self.evaluate(var0), rtol=1e-4, atol=1e-4
)
self.assertAllCloseAccordingToType(
var1_np, self.evaluate(var1), rtol=1e-4, atol=1e-4
)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def testSparseWithAmsgrad(self):
# dtypes.half does not work on gpu + eager.
for dtype in [tf.float32, tf.float64]:
with self.cached_session():
m0 = np.array([[0.0], [0.0]])
v0 = np.array([[0.0], [0.0]])
v0hat = np.array([[0.0], [0.0]])
indices_np = np.array([1])
indices = tf.constant(indices_np, dtype=tf.int32)
var0_np = np.array([[1.0], [2.0]], dtype=dtype.as_numpy_dtype)
repeated_index_update_var = tf.Variable(var0_np, dtype=dtype)
aggregated_update_var = tf.Variable(var0_np, dtype=dtype)
grads0_np = np.array([[0.2]], dtype=dtype.as_numpy_dtype)
grad_repeated_index = tf.IndexedSlices(
tf.constant([0.1, 0.1], shape=[2, 1], dtype=dtype),
tf.constant([1, 1]),
tf.constant([2, 1]),
)
grad_aggregated = tf.IndexedSlices(
grads0_np, indices, tf.constant([2, 1])
)
opt_repeated = adam.NonFusedAdam(amsgrad=True)
opt_aggregated = adam.NonFusedAdam(amsgrad=True)
if not tf.executing_eagerly():
repeated_update = opt_repeated.apply_gradients(
[(grad_repeated_index, repeated_index_update_var)]
)
aggregated_update = opt_aggregated.apply_gradients(
[(grad_aggregated, aggregated_update_var)]
)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllClose(
self.evaluate(aggregated_update_var),
self.evaluate(repeated_index_update_var),
)
for t in range(3):
if not tf.executing_eagerly():
self.evaluate(repeated_update)
self.evaluate(aggregated_update)
else:
opt_repeated.apply_gradients(
[(grad_repeated_index, repeated_index_update_var)]
)
opt_aggregated.apply_gradients(
[(grad_aggregated, aggregated_update_var)]
)
var0_np, m0, v0, v0hat = adam_sparse_update_numpy_amsgrad(
var0_np, indices_np, grads0_np, t, m0, v0, v0hat
)
# Validate updated params
self.assertAllCloseAccordingToType(
var0_np, self.evaluate(aggregated_update_var)
)
self.assertAllCloseAccordingToType(
self.evaluate(aggregated_update_var),
self.evaluate(repeated_index_update_var),
)
def testBasicWithLearningRateDecay(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for i, dtype in enumerate([tf.half, tf.float32, tf.float64]):
with tf.Graph().as_default(), self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = tf.Variable(var0_np, name="var0_%d" % i)
var1 = tf.Variable(var1_np, name="var1_%d" % i)
grads0 = tf.constant(grads0_np)
grads1 = tf.constant(grads1_np)
learning_rate = 0.001
beta_1 = 0.9
beta_2 = 0.999
epsilon = 1e-7
decay = 0.5
opt = adam.NonFusedAdam(
learning_rate=learning_rate,
beta_1=beta_1,
beta_2=beta_2,
epsilon=epsilon,
decay=decay,
)
update = opt.apply_gradients(
zip([grads0, grads1], [var0, var1])
)
self.evaluate(tf.compat.v1.global_variables_initializer())
# Run 3 steps of NonFusedAdam
for t in range(3):
self.evaluate(update)
lr_np = learning_rate / (1 + decay * t)
var0_np, m0, v0 = adam_update_numpy(
var0_np, grads0_np, t, m0, v0, lr=lr_np
)
var1_np, m1, v1 = adam_update_numpy(
var1_np, grads1_np, t, m1, v1, lr=lr_np
)
# Validate updated params
self.assertAllCloseAccordingToType(
var0_np, self.evaluate(var0)
)
self.assertAllCloseAccordingToType(
var1_np, self.evaluate(var1)
)
def testBasicWithLearningRateInverseTimeDecay(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for i, dtype in enumerate([tf.half, tf.float32, tf.float64]):
with tf.Graph().as_default(), self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = tf.Variable(var0_np, name="var0_%d" % i)
var1 = tf.Variable(var1_np, name="var1_%d" % i)
grads0 = tf.constant(grads0_np)
grads1 = tf.constant(grads1_np)
learning_rate = 0.001
decay = 0.5
lr_schedule = learning_rate_schedule.InverseTimeDecay(
learning_rate, decay_steps=1.0, decay_rate=decay
)
beta_1 = 0.9
beta_2 = 0.999
epsilon = 1e-7
opt = adam.NonFusedAdam(
learning_rate=lr_schedule,
beta_1=beta_1,
beta_2=beta_2,
epsilon=epsilon,
)
update = opt.apply_gradients(
zip([grads0, grads1], [var0, var1])
)
self.evaluate(tf.compat.v1.global_variables_initializer())
# Run 3 steps of NonFusedAdam
for t in range(3):
self.evaluate(update)
lr_np = learning_rate / (1 + decay * t)
var0_np, m0, v0 = adam_update_numpy(
var0_np, grads0_np, t, m0, v0, lr=lr_np
)
var1_np, m1, v1 = adam_update_numpy(
var1_np, grads1_np, t, m1, v1, lr=lr_np
)
# Validate updated params
self.assertAllCloseAccordingToType(
var0_np, self.evaluate(var0)
)
self.assertAllCloseAccordingToType(
var1_np, self.evaluate(var1)
)
def testTensorLearningRate(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [tf.half, tf.float32, tf.float64]:
with tf.Graph().as_default(), self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = tf.Variable(var0_np)
var1 = tf.Variable(var1_np)
grads0 = tf.constant(grads0_np)
grads1 = tf.constant(grads1_np)
opt = adam.NonFusedAdam(tf.constant(0.001))
update = opt.apply_gradients(
zip([grads0, grads1], [var0, var1])
)
self.evaluate(tf.compat.v1.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype)
# Run 3 steps of NonFusedAdam
for t in range(3):
self.assertAllCloseAccordingToType(
0.9 ** (t + 1), self.evaluate(beta_1_power)
)
self.assertAllCloseAccordingToType(
0.999 ** (t + 1), self.evaluate(beta_2_power)
)
update.run()
var0_np, m0, v0 = adam_update_numpy(
var0_np, grads0_np, t, m0, v0
)
var1_np, m1, v1 = adam_update_numpy(
var1_np, grads1_np, t, m1, v1
)
# Validate updated params
self.assertAllCloseAccordingToType(
var0_np, self.evaluate(var0)
)
self.assertAllCloseAccordingToType(
var1_np, self.evaluate(var1)
)
def testSharing(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [tf.half, tf.float32, tf.float64]:
with tf.Graph().as_default(), self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = tf.Variable(var0_np)
var1 = tf.Variable(var1_np)
grads0 = tf.constant(grads0_np)
grads1 = tf.constant(grads1_np)
opt = adam.NonFusedAdam()
update1 = opt.apply_gradients(
zip([grads0, grads1], [var0, var1])
)
update2 = opt.apply_gradients(
zip([grads0, grads1], [var0, var1])
)
self.evaluate(tf.compat.v1.global_variables_initializer())
beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of intertwined NonFusedAdam1 and NonFusedAdam2.
for t in range(3):
self.assertAllCloseAccordingToType(
0.9 ** (t + 1), self.evaluate(beta_1_power)
)
self.assertAllCloseAccordingToType(
0.999 ** (t + 1), self.evaluate(beta_2_power)
)
if t % 2 == 0:
update1.run()
else:
update2.run()
var0_np, m0, v0 = adam_update_numpy(
var0_np, grads0_np, t, m0, v0
)
var1_np, m1, v1 = adam_update_numpy(
var1_np, grads1_np, t, m1, v1
)
# Validate updated params
self.assertAllCloseAccordingToType(
var0_np, self.evaluate(var0)
)
self.assertAllCloseAccordingToType(
var1_np, self.evaluate(var1)
)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/optimizers/legacy/adam_test.py/0 | {
"file_path": "tf-keras/tf_keras/optimizers/legacy/adam_test.py",
"repo_id": "tf-keras",
"token_count": 31093
} | 239 |
# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Lion."""
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow.python.framework import dtypes
from tf_keras.optimizers.lion import Lion
def lion_update_numpy(
params,
grads,
momentums,
learning_rate=0.0001,
beta_1=0.9,
beta_2=0.99,
):
params = params - learning_rate * np.sign(
beta_1 * momentums + (1 - beta_1) * grads
)
momentums = beta_2 * momentums + (1 - beta_2) * grads
return params, momentums
class LionOptimizerTest(tf.test.TestCase):
def testDense(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
learning_rate = 0.0001
beta_1 = 0.9
beta_2 = 0.99
with self.cached_session():
m0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
m1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.9, 0.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.1, 0.0], dtype=dtype.as_numpy_dtype)
var0 = tf.Variable(var0_np)
var1 = tf.Variable(var1_np)
grads0 = tf.constant(grads0_np)
grads1 = tf.constant(grads1_np)
optimizer = Lion(
learning_rate=learning_rate,
beta_1=beta_1,
beta_2=beta_2,
)
# Run 3 steps of Lion
for _ in range(3):
optimizer.apply_gradients(
zip([grads0, grads1], [var0, var1])
)
var0_np, m0_np = lion_update_numpy(
var0_np,
grads0_np,
m0_np,
learning_rate=learning_rate,
beta_1=beta_1,
beta_2=beta_2,
)
var1_np, m1_np = lion_update_numpy(
var1_np,
grads1_np,
m1_np,
learning_rate=learning_rate,
beta_1=beta_1,
beta_2=beta_2,
)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0)
self.assertAllCloseAccordingToType(var1_np, var1)
def testSparse(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
learning_rate = 0.0001
beta_1 = 0.9
beta_2 = 0.99
with self.cached_session():
m0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
m1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.9, 0.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.1, 0.0], dtype=dtype.as_numpy_dtype)
var0 = tf.Variable(var0_np)
var1 = tf.Variable(var1_np)
grads0_np_indices = np.array([0], dtype=np.int32)
grads0 = tf.IndexedSlices(
tf.constant(grads0_np[grads0_np_indices]),
tf.constant(grads0_np_indices),
tf.constant([2]),
)
grads1_np_indices = np.array([0], dtype=np.int32)
grads1 = tf.IndexedSlices(
tf.constant(grads1_np[grads1_np_indices]),
tf.constant(grads1_np_indices),
tf.constant([2]),
)
optimizer = Lion(
learning_rate=learning_rate,
beta_1=beta_1,
beta_2=beta_2,
)
# Run 3 steps of Lion
for _ in range(3):
optimizer.apply_gradients(
zip([grads0, grads1], [var0, var1])
)
var0_np, m0_np = lion_update_numpy(
var0_np,
grads0_np,
m0_np,
learning_rate=learning_rate,
beta_1=beta_1,
beta_2=beta_2,
)
var1_np, m1_np = lion_update_numpy(
var1_np,
grads1_np,
m1_np,
learning_rate=learning_rate,
beta_1=beta_1,
beta_2=beta_2,
)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0)
self.assertAllCloseAccordingToType(var1_np, var1)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/optimizers/lion_test.py/0 | {
"file_path": "tf-keras/tf_keras/optimizers/lion_test.py",
"repo_id": "tf-keras",
"token_count": 3430
} | 240 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Built-in linear model classes."""
import tensorflow.compat.v2 as tf
from tf_keras import activations
from tf_keras import initializers
from tf_keras import regularizers
from tf_keras.engine import base_layer
from tf_keras.engine import input_spec
from tf_keras.engine import training
from tf_keras.layers import core
# isort: off
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import keras_export
@keras_export(
"keras.experimental.LinearModel",
v1=["keras.experimental.LinearModel", "keras.models.LinearModel"],
)
@deprecation.deprecated_endpoints("keras.experimental.LinearModel")
class LinearModel(training.Model):
r"""Linear Model for regression and classification problems.
This model approximates the following function:
$$y = \beta + \sum_{i=1}^{N} w_{i} * x_{i}$$
where $$\beta$$ is the bias and $$w_{i}$$ is the weight for each feature.
Example:
```python
model = LinearModel()
model.compile(optimizer='sgd', loss='mse')
model.fit(x, y, epochs=epochs)
```
This model accepts sparse float inputs as well:
Example:
```python
model = LinearModel()
opt = tf.keras.optimizers.Adam()
loss_fn = tf.keras.losses.MeanSquaredError()
with tf.GradientTape() as tape:
output = model(sparse_input)
loss = tf.reduce_mean(loss_fn(target, output))
grads = tape.gradient(loss, model.weights)
opt.apply_gradients(zip(grads, model.weights))
```
"""
def __init__(
self,
units=1,
activation=None,
use_bias=True,
kernel_initializer="zeros",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
**kwargs,
):
"""Create a Linear Model.
Args:
units: Positive integer, output dimension without the batch size.
activation: Activation function to use.
If you don't specify anything, no activation is applied.
use_bias: whether to calculate the bias/intercept for this model. If
set to False, no bias/intercept will be used in calculations, e.g.,
the data is already centered.
kernel_initializer: Initializer for the `kernel` weights matrices.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: regularizer for kernel vectors.
bias_regularizer: regularizer for bias vector.
**kwargs: The keyword arguments that are passed on to
BaseLayer.__init__.
"""
self.units = units
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
super().__init__(**kwargs)
def build(self, input_shape):
if isinstance(input_shape, dict):
names = sorted(list(input_shape.keys()))
self.input_specs = []
self.dense_layers = []
for name in names:
shape = input_shape[name]
layer = core.Dense(
units=self.units,
use_bias=False,
kernel_initializer=self.kernel_initializer,
kernel_regularizer=self.kernel_regularizer,
name=name,
)
layer.build(shape)
self.input_specs.append(
input_spec.InputSpec(shape=shape, name=name)
)
self.dense_layers.append(layer)
elif isinstance(input_shape, (tuple, list)) and all(
isinstance(shape, tf.TensorShape) for shape in input_shape
):
self.dense_layers = []
for shape in input_shape:
layer = core.Dense(
units=self.units,
use_bias=False,
kernel_initializer=self.kernel_initializer,
kernel_regularizer=self.kernel_regularizer,
)
layer.build(shape)
self.dense_layers.append(layer)
else:
# input_shape can be a single TensorShape or a tuple of ints.
layer = core.Dense(
units=self.units,
use_bias=False,
kernel_initializer=self.kernel_initializer,
kernel_regularizer=self.kernel_regularizer,
)
layer.build(input_shape)
self.dense_layers = [layer]
if self.use_bias:
self.bias = self.add_weight(
"bias",
shape=self.units,
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
dtype=self.dtype,
trainable=True,
)
else:
self.bias = None
self.built = True
def call(self, inputs):
result = None
if isinstance(inputs, dict):
names = [layer.name for layer in self.dense_layers]
different_keys = set(names) - set(inputs.keys())
if different_keys:
raise ValueError(
"The `inputs` dictionary does not match "
"the structure expected by the model."
f"\n\tExpected keys: {set(names)}"
f"\n\tReceived keys: {set(inputs.keys())}"
f"\n\tMissing keys: {different_keys}"
)
inputs = [inputs[name] for name in names]
for inp, layer in zip(inputs, self.dense_layers):
output = layer(inp)
if result is None:
result = output
else:
result += output
elif isinstance(inputs, (tuple, list)):
for inp, layer in zip(inputs, self.dense_layers):
output = layer(inp)
if result is None:
result = output
else:
result += output
else:
result = self.dense_layers[0](inputs)
if self.use_bias:
result = tf.nn.bias_add(result, self.bias)
if self.activation is not None:
return self.activation(result)
return result
def get_config(self):
config = {
"units": self.units,
"activation": activations.serialize(self.activation),
"use_bias": self.use_bias,
"kernel_initializer": initializers.serialize(
self.kernel_initializer
),
"bias_initializer": initializers.serialize(self.bias_initializer),
"kernel_regularizer": regularizers.serialize(
self.kernel_regularizer
),
"bias_regularizer": regularizers.serialize(self.bias_regularizer),
}
base_config = base_layer.Layer.get_config(self)
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
del custom_objects
return cls(**config)
| tf-keras/tf_keras/premade_models/linear.py/0 | {
"file_path": "tf-keras/tf_keras/premade_models/linear.py",
"repo_id": "tf-keras",
"token_count": 3688
} | 241 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Built-in regularizers."""
import math
import warnings
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras.saving.legacy import serialization as legacy_serialization
from tf_keras.saving.serialization_lib import deserialize_keras_object
from tf_keras.saving.serialization_lib import serialize_keras_object
# isort: off
from tensorflow.python.util.tf_export import keras_export
def _check_penalty_number(x):
"""check penalty number availability, raise ValueError if failed."""
if not isinstance(x, (float, int)):
raise ValueError(
f"Value {x} is not a valid regularization penalty number, "
"expected an int or float value."
)
if math.isinf(x) or math.isnan(x):
raise ValueError(
f"Value {x} is not a valid regularization penalty number, "
"an infinite number or NaN are not valid values."
)
def _none_to_default(inputs, default):
return default if inputs is None else default
@keras_export("keras.regularizers.Regularizer")
class Regularizer:
"""Regularizer base class.
Regularizers allow you to apply penalties on layer parameters or layer
activity during optimization. These penalties are summed into the loss
function that the network optimizes.
Regularization penalties are applied on a per-layer basis. The exact API
will depend on the layer, but many layers (e.g. `Dense`, `Conv1D`, `Conv2D`
and `Conv3D`) have a unified API.
These layers expose 3 keyword arguments:
- `kernel_regularizer`: Regularizer to apply a penalty on the layer's kernel
- `bias_regularizer`: Regularizer to apply a penalty on the layer's bias
- `activity_regularizer`: Regularizer to apply a penalty on the layer's
output
All layers (including custom layers) expose `activity_regularizer` as a
settable property, whether or not it is in the constructor arguments.
The value returned by the `activity_regularizer` is divided by the input
batch size so that the relative weighting between the weight regularizers
and the activity regularizers does not change with the batch size.
You can access a layer's regularization penalties by calling `layer.losses`
after calling the layer on inputs.
## Example
>>> layer = tf.keras.layers.Dense(
... 5, input_dim=5,
... kernel_initializer='ones',
... kernel_regularizer=tf.keras.regularizers.L1(0.01),
... activity_regularizer=tf.keras.regularizers.L2(0.01))
>>> tensor = tf.ones(shape=(5, 5)) * 2.0
>>> out = layer(tensor)
>>> # The kernel regularization term is 0.25
>>> # The activity regularization term (after dividing by the batch size)
>>> # is 5
>>> tf.math.reduce_sum(layer.losses)
<tf.Tensor: shape=(), dtype=float32, numpy=5.25>
## Available penalties
```python
tf.keras.regularizers.L1(0.3) # L1 Regularization Penalty
tf.keras.regularizers.L2(0.1) # L2 Regularization Penalty
tf.keras.regularizers.L1L2(l1=0.01, l2=0.01) # L1 + L2 penalties
```
## Directly calling a regularizer
Compute a regularization loss on a tensor by directly calling a regularizer
as if it is a one-argument function.
E.g.
>>> regularizer = tf.keras.regularizers.L2(2.)
>>> tensor = tf.ones(shape=(5, 5))
>>> regularizer(tensor)
<tf.Tensor: shape=(), dtype=float32, numpy=50.0>
## Developing new regularizers
Any function that takes in a weight matrix and returns a scalar
tensor can be used as a regularizer, e.g.:
>>> @tf.keras.utils.register_keras_serializable(package='Custom', name='l1')
... def l1_reg(weight_matrix):
... return 0.01 * tf.math.reduce_sum(tf.math.abs(weight_matrix))
...
>>> layer = tf.keras.layers.Dense(5, input_dim=5,
... kernel_initializer='ones', kernel_regularizer=l1_reg)
>>> tensor = tf.ones(shape=(5, 5))
>>> out = layer(tensor)
>>> layer.losses
[<tf.Tensor: shape=(), dtype=float32, numpy=0.25>]
Alternatively, you can write your custom regularizers in an
object-oriented way by extending this regularizer base class, e.g.:
>>> @tf.keras.utils.register_keras_serializable(package='Custom', name='l2')
... class L2Regularizer(tf.keras.regularizers.Regularizer):
... def __init__(self, l2=0.):
... self.l2 = l2
...
... def __call__(self, x):
... return self.l2 * tf.math.reduce_sum(tf.math.square(x))
...
... def get_config(self):
... return {'l2': float(self.l2)}
...
>>> layer = tf.keras.layers.Dense(
... 5, input_dim=5, kernel_initializer='ones',
... kernel_regularizer=L2Regularizer(l2=0.5))
>>> tensor = tf.ones(shape=(5, 5))
>>> out = layer(tensor)
>>> layer.losses
[<tf.Tensor: shape=(), dtype=float32, numpy=12.5>]
### A note on serialization and deserialization:
Registering the regularizers as serializable is optional if you are just
training and executing models, exporting to and from SavedModels, or saving
and loading weight checkpoints.
Registration is required for saving and
loading models to HDF5 format, TF-Keras model cloning, some visualization
utilities, and exporting models to and from JSON. If using this
functionality, you must make sure any python process running your model has
also defined and registered your custom regularizer.
"""
def __call__(self, x):
"""Compute a regularization penalty from an input tensor."""
return 0.0
@classmethod
def from_config(cls, config):
"""Creates a regularizer from its config.
This method is the reverse of `get_config`,
capable of instantiating the same regularizer from the config
dictionary.
This method is used by TF-Keras `model_to_estimator`, saving and
loading models to HDF5 formats, TF-Keras model cloning, some
visualization utilities, and exporting models to and from JSON.
Args:
config: A Python dictionary, typically the output of get_config.
Returns:
A regularizer instance.
"""
return cls(**config)
def get_config(self):
"""Returns the config of the regularizer.
An regularizer config is a Python dictionary (serializable)
containing all configuration parameters of the regularizer.
The same regularizer can be reinstantiated later
(without any saved state) from this configuration.
This method is optional if you are just training and executing models,
exporting to and from SavedModels, or using weight checkpoints.
This method is required for TF-Keras `model_to_estimator`, saving and
loading models to HDF5 formats, TF-Keras model cloning, some
visualization utilities, and exporting models to and from JSON.
Returns:
Python dictionary.
"""
raise NotImplementedError(f"{self} does not implement get_config()")
@keras_export("keras.regularizers.L1L2")
class L1L2(Regularizer):
"""A regularizer that applies both L1 and L2 regularization penalties.
The L1 regularization penalty is computed as:
`loss = l1 * reduce_sum(abs(x))`
The L2 regularization penalty is computed as
`loss = l2 * reduce_sum(square(x))`
L1L2 may be passed to a layer as a string identifier:
>>> dense = tf.keras.layers.Dense(3, kernel_regularizer='l1_l2')
In this case, the default values used are `l1=0.01` and `l2=0.01`.
Arguments:
l1: Float; L1 regularization factor.
l2: Float; L2 regularization factor.
"""
def __init__(self, l1=0.0, l2=0.0):
# The default value for l1 and l2 are different from the value in l1_l2
# for backward compatibility reason. Eg, L1L2(l2=0.1) will only have l2
# and no l1 penalty.
l1 = 0.0 if l1 is None else l1
l2 = 0.0 if l2 is None else l2
_check_penalty_number(l1)
_check_penalty_number(l2)
self.l1 = backend.cast_to_floatx(l1)
self.l2 = backend.cast_to_floatx(l2)
def __call__(self, x):
regularization = backend.constant(0.0, dtype=x.dtype)
if self.l1:
regularization += self.l1 * tf.reduce_sum(tf.abs(x))
if self.l2:
# equivalent to "self.l2 * tf.reduce_sum(tf.square(x))"
regularization += 2.0 * self.l2 * tf.nn.l2_loss(x)
return regularization
def get_config(self):
return {"l1": float(self.l1), "l2": float(self.l2)}
@keras_export("keras.regularizers.L1", "keras.regularizers.l1")
class L1(Regularizer):
"""A regularizer that applies a L1 regularization penalty.
The L1 regularization penalty is computed as:
`loss = l1 * reduce_sum(abs(x))`
L1 may be passed to a layer as a string identifier:
>>> dense = tf.keras.layers.Dense(3, kernel_regularizer='l1')
In this case, the default value used is `l1=0.01`.
Arguments:
l1: Float; L1 regularization factor.
"""
def __init__(self, l1=0.01, **kwargs):
l1 = kwargs.pop("l", l1) # Backwards compatibility
if kwargs:
raise TypeError(f"Argument(s) not recognized: {kwargs}")
l1 = 0.01 if l1 is None else l1
_check_penalty_number(l1)
self.l1 = backend.cast_to_floatx(l1)
def __call__(self, x):
return self.l1 * tf.reduce_sum(tf.abs(x))
def get_config(self):
return {"l1": float(self.l1)}
@keras_export("keras.regularizers.L2", "keras.regularizers.l2")
class L2(Regularizer):
"""A regularizer that applies a L2 regularization penalty.
The L2 regularization penalty is computed as:
`loss = l2 * reduce_sum(square(x))`
L2 may be passed to a layer as a string identifier:
>>> dense = tf.keras.layers.Dense(3, kernel_regularizer='l2')
In this case, the default value used is `l2=0.01`.
Arguments:
l2: Float; L2 regularization factor.
"""
def __init__(self, l2=0.01, **kwargs):
l2 = kwargs.pop("l", l2) # Backwards compatibility
if kwargs:
raise TypeError(f"Argument(s) not recognized: {kwargs}")
l2 = 0.01 if l2 is None else l2
_check_penalty_number(l2)
self.l2 = backend.cast_to_floatx(l2)
def __call__(self, x):
# equivalent to "self.l2 * tf.reduce_sum(tf.square(x))"
return 2.0 * self.l2 * tf.nn.l2_loss(x)
def get_config(self):
return {"l2": float(self.l2)}
@keras_export(
"keras.regularizers.OrthogonalRegularizer",
"keras.regularizers.orthogonal_regularizer",
v1=[],
)
class OrthogonalRegularizer(Regularizer):
"""Regularizer that encourages input vectors to be orthogonal to each other.
It can be applied to either the rows of a matrix (`mode="rows"`) or its
columns (`mode="columns"`). When applied to a `Dense` kernel of shape
`(input_dim, units)`, rows mode will seek to make the feature vectors
(i.e. the basis of the output space) orthogonal to each other.
Arguments:
factor: Float. The regularization factor. The regularization penalty will
be proportional to `factor` times the mean of the dot products between
the L2-normalized rows (if `mode="rows"`, or columns if
`mode="columns"`) of the inputs, excluding the product of each
row/column with itself. Defaults to 0.01.
mode: String, one of `{"rows", "columns"}`. Defaults to `"rows"`. In rows
mode, the regularization effect seeks to make the rows of the input
orthogonal to each other. In columns mode, it seeks to make the columns
of the input orthogonal to each other.
Example:
>>> regularizer = tf.keras.regularizers.OrthogonalRegularizer(factor=0.01)
>>> layer = tf.keras.layers.Dense(units=4, kernel_regularizer=regularizer)
"""
def __init__(self, factor=0.01, mode="rows"):
_check_penalty_number(factor)
self.factor = backend.cast_to_floatx(factor)
if mode not in {"rows", "columns"}:
raise ValueError(
"Invalid value for argument `mode`. Expected one of "
f'{{"rows", "columns"}}. Received: mode={mode}'
)
self.mode = mode
def __call__(self, inputs):
if inputs.shape.rank != 2:
raise ValueError(
"Inputs to OrthogonalRegularizer must have rank 2. Received: "
f"inputs.shape == {inputs.shape}"
)
if self.mode == "rows":
inputs = tf.math.l2_normalize(inputs, axis=1)
product = tf.matmul(inputs, tf.transpose(inputs))
size = inputs.shape[0]
else:
inputs = tf.math.l2_normalize(inputs, axis=0)
product = tf.matmul(tf.transpose(inputs), inputs)
size = inputs.shape[1]
product_no_diagonal = product * (1.0 - tf.eye(size, dtype=inputs.dtype))
num_pairs = size * (size - 1.0) / 2.0
return (
self.factor
* 0.5
* tf.reduce_sum(tf.abs(product_no_diagonal))
/ num_pairs
)
def get_config(self):
return {"factor": float(self.factor), "mode": self.mode}
@keras_export("keras.regularizers.l1_l2")
def l1_l2(l1=0.01, l2=0.01):
r"""Create a regularizer that applies both L1 and L2 penalties.
The L1 regularization penalty is computed as:
`loss = l1 * reduce_sum(abs(x))`
The L2 regularization penalty is computed as:
`loss = l2 * reduce_sum(square(x))`
Args:
l1: Float; L1 regularization factor.
l2: Float; L2 regularization factor.
Returns:
An L1L2 Regularizer with the given regularization factors.
"""
return L1L2(l1=l1, l2=l2)
# Deserialization aliases.
l1 = L1
l2 = L2
orthogonal_regularizer = OrthogonalRegularizer
@keras_export("keras.regularizers.serialize")
def serialize(regularizer, use_legacy_format=False):
if regularizer is None:
return None
if not isinstance(regularizer, Regularizer):
warnings.warn(
"The `keras.regularizers.serialize()` API should only be used for "
"objects of type `keras.regularizers.Regularizer`. Found an "
f"instance of type {type(regularizer)}, which may lead to improper "
"serialization."
)
if use_legacy_format:
return legacy_serialization.serialize_keras_object(regularizer)
return serialize_keras_object(regularizer)
@keras_export("keras.regularizers.deserialize")
def deserialize(config, custom_objects=None, use_legacy_format=False):
if config == "l1_l2":
# Special case necessary since the defaults used for "l1_l2" (string)
# differ from those of the L1L2 class.
return L1L2(l1=0.01, l2=0.01)
if use_legacy_format:
return legacy_serialization.deserialize_keras_object(
config,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name="regularizer",
)
return deserialize_keras_object(
config,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name="regularizer",
)
@keras_export("keras.regularizers.get")
def get(identifier):
"""Retrieve a regularizer instance from a config or identifier."""
if identifier is None:
return None
if isinstance(identifier, dict):
use_legacy_format = "module" not in identifier
return deserialize(identifier, use_legacy_format=use_legacy_format)
elif isinstance(identifier, str):
return deserialize(str(identifier))
elif callable(identifier):
return identifier
else:
raise ValueError(
f"Could not interpret regularizer identifier: {identifier}"
)
| tf-keras/tf_keras/regularizers.py/0 | {
"file_path": "tf-keras/tf_keras/regularizers.py",
"repo_id": "tf-keras",
"token_count": 6463
} | 242 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Constants for TF-Keras SavedModel serialization."""
# Namespace used to store all attributes added during serialization.
# e.g. the list of layers can be accessed using `loaded.keras_api.layers`, in an
# object loaded from `tf.saved_model.load()`.
KERAS_ATTR = "keras_api"
# Keys for the serialization cache.
# Maps to the keras serialization dict {Layer --> SerializedAttributes object}
KERAS_CACHE_KEY = "keras_serialized_attributes"
# Name of TF-Keras metadata file stored in the SavedModel.
SAVED_METADATA_PATH = "keras_metadata.pb"
# Names of SavedObject TF-Keras identifiers.
INPUT_LAYER_IDENTIFIER = "_tf_keras_input_layer"
LAYER_IDENTIFIER = "_tf_keras_layer"
METRIC_IDENTIFIER = "_tf_keras_metric"
MODEL_IDENTIFIER = "_tf_keras_model"
NETWORK_IDENTIFIER = "_tf_keras_network"
RNN_LAYER_IDENTIFIER = "_tf_keras_rnn_layer"
SEQUENTIAL_IDENTIFIER = "_tf_keras_sequential"
KERAS_OBJECT_IDENTIFIERS = (
INPUT_LAYER_IDENTIFIER,
LAYER_IDENTIFIER,
METRIC_IDENTIFIER,
MODEL_IDENTIFIER,
NETWORK_IDENTIFIER,
RNN_LAYER_IDENTIFIER,
SEQUENTIAL_IDENTIFIER,
)
| tf-keras/tf_keras/saving/legacy/saved_model/constants.py/0 | {
"file_path": "tf-keras/tf_keras/saving/legacy/saved_model/constants.py",
"repo_id": "tf-keras",
"token_count": 583
} | 243 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions shared between SavedModel saving/loading
implementations."""
import copy
import itertools
import threading
import types
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras.engine import base_layer_utils
from tf_keras.utils import control_flow_util
from tf_keras.utils import tf_contextlib
from tf_keras.utils.generic_utils import LazyLoader
from tf_keras.utils.layer_utils import CallFunctionSpec
training_lib = LazyLoader("training_lib", globals(), "tf_keras.engine.training")
def use_wrapped_call(
layer, call_fn, call_spec, default_training_value=None, return_method=False
):
"""Creates fn that adds losses returned by call_fn & returns the outputs.
Args:
layer: A TF-Keras layer object
call_fn: tf.function that takes layer inputs (and possibly a training
arg), and returns a tuple of (outputs, list of losses).
call_spec: The `CallFunctionSpec` for the layer's call function.
default_training_value: Default value of the training kwarg. If `None`,
the default is `tf.keras.backend.learning_phase()`.
return_method: Whether to return a method bound to the layer.
Returns:
function that calls call_fn and returns the outputs. Losses returned by
call_fn are added to the layer losses.
"""
expects_training_arg = layer_uses_training_bool(layer)
fn, arg_spec = maybe_add_training_arg(
call_spec, call_fn, expects_training_arg, default_training_value
)
def return_outputs_and_add_losses(*args, **kwargs):
"""Returns the outputs from the layer call function, and adds the
losses."""
if return_method:
args = args[1:]
outputs, losses = fn(*args, **kwargs)
layer.add_loss(losses)
# TODO(kathywu): This is a temporary hack. When a network of layers is
# revived from SavedModel, only the top-level layer will have losses.
# This causes issues in eager mode because the child layers may have
# graph losses (thus model.losses returns a mix of Eager and graph
# tensors). To fix this, whenever eager losses are added to one layer,
# add eager losses to all child layers. This causes `.losses` to only
# return eager losses.
if tf.executing_eagerly():
for i in layer._flatten_layers():
if i is not layer:
i._eager_losses = [
base_layer_utils.REVIVED_LOSS_PLACEHOLDER
]
return outputs
decorated = tf.__internal__.decorator.make_decorator(
target=call_fn,
decorator_func=return_outputs_and_add_losses,
decorator_argspec=arg_spec,
)
if return_method:
return types.MethodType(decorated, layer)
else:
return decorated
def layer_uses_training_bool(layer):
"""Returns whether this layer or any of its children uses the training
arg."""
if layer._expects_training_arg:
return True
visited = {layer}
to_visit = list_all_layers(layer)
while to_visit:
layer = to_visit.pop()
if layer in visited:
continue
if getattr(layer, "_expects_training_arg", True):
return True
visited.add(layer)
to_visit.extend(list_all_layers(layer))
return False
def list_all_layers(obj):
if isinstance(obj, training_lib.Model):
# Handle special case of Sequential, which doesn't return
# the `Input` layer.
return obj.layers
else:
return list(obj._flatten_layers(include_self=False, recursive=False))
def list_all_layers_and_sublayers(obj):
s = set([obj])
s.update(
itertools.chain.from_iterable(
list_all_layers_and_sublayers(layer)
for layer in list_all_layers(obj)
)
)
return s
def maybe_add_training_arg(
call_spec, wrapped_call, expects_training_arg, default_training_value
):
"""Decorate call and optionally adds training argument.
If a layer expects a training argument, this function ensures that
'training' is present in the layer args or kwonly args, with the default
training value.
Args:
call_spec: CallFunctionSpec of the layer.
wrapped_call: Wrapped call function.
expects_training_arg: Whether to include 'training' argument.
default_training_value: Default value of the training kwarg to include in
the arg spec. If `None`, the default is
`tf.keras.backend.learning_phase()`.
Returns:
Tuple of (
function that calls `wrapped_call` and sets the training arg,
Argspec of returned function or `None` if the argspec is unchanged)
"""
if not expects_training_arg:
return wrapped_call, None
arg_spec = set_training_arg_spec(
call_spec.full_argspec, default_training_value
)
call_spec = CallFunctionSpec(arg_spec)
def wrap_with_training_arg(*args, **kwargs):
"""Wrap the `wrapped_call` function, and set training argument."""
try:
training = call_spec.get_arg_value(
"training", args, kwargs, inputs_in_args=True
)
except KeyError:
training = None
if training is None:
training = (
default_training_value
or base_layer_utils.call_context().training
or backend.learning_phase()
)
args = list(args)
kwargs = kwargs.copy()
def replace_training_and_call(training):
new_args, new_kwargs = call_spec.set_arg_value(
"training", training, args, kwargs, inputs_in_args=True
)
return wrapped_call(*new_args, **new_kwargs)
return control_flow_util.smart_cond(
training,
lambda: replace_training_and_call(True),
lambda: replace_training_and_call(False),
)
return wrap_with_training_arg, arg_spec
def set_training_arg_spec(arg_spec, default_training_value):
"""Set `training=DEFAULT` argument in an ArgSpec."""
if "training" in arg_spec.args:
# If `training` is already in the args list, try to set the default
# value.
index = arg_spec.args.index("training")
training_default_index = len(arg_spec.args) - index
defaults = (
list(arg_spec.defaults) if arg_spec.defaults is not None else []
)
if (
arg_spec.defaults
and len(arg_spec.defaults) >= training_default_index
and defaults[-training_default_index] is None
):
defaults[-training_default_index] = default_training_value
return arg_spec._replace(defaults=defaults)
elif "training" not in arg_spec.kwonlyargs:
kwonlyargs = arg_spec.kwonlyargs + ["training"]
kwonlydefaults = copy.copy(arg_spec.kwonlydefaults) or {}
kwonlydefaults["training"] = default_training_value
return arg_spec._replace(
kwonlyargs=kwonlyargs, kwonlydefaults=kwonlydefaults
)
return arg_spec
class SaveOptionsContext(threading.local):
def __init__(self):
super().__init__()
self.save_traces = True
self.in_tf_saved_model_scope = False
_save_options_context = SaveOptionsContext()
@tf_contextlib.contextmanager
def keras_option_scope(save_traces, in_tf_saved_model_scope=True):
save_traces_previous_value = _save_options_context.save_traces
in_scope_previous_value = _save_options_context.in_tf_saved_model_scope
try:
_save_options_context.save_traces = save_traces
_save_options_context.in_tf_saved_model_scope = in_tf_saved_model_scope
yield
finally:
_save_options_context.save_traces = save_traces_previous_value
_save_options_context.in_tf_saved_model_scope = in_scope_previous_value
def should_save_traces():
"""Whether to trace layer functions-can be disabled in the save_traces
arg."""
return _save_options_context.save_traces
def in_tf_saved_model_scope():
return _save_options_context.in_tf_saved_model_scope
@tf_contextlib.contextmanager
def no_automatic_dependency_tracking_scope(obj):
"""Context that disables automatic dependency tracking when assigning attrs.
Objects that inherit from Autotrackable automatically creates dependencies
to trackable objects through attribute assignments, and wraps data
structures (lists or dicts) with trackable classes. This scope may be used
to temporarily disable this behavior. This works similar to the decorator
`no_automatic_dependency_tracking`.
Example usage:
```
model = tf.keras.Model()
model.arr1 = [] # Creates a ListWrapper object
with no_automatic_dependency_tracking_scope(model):
model.arr2 = [] # Creates a regular, untracked python list
```
Args:
obj: A trackable object.
Yields:
a scope in which the object doesn't track dependencies.
"""
previous_value = getattr(obj, "_setattr_tracking", True)
obj._setattr_tracking = False
try:
yield
finally:
obj._setattr_tracking = previous_value
| tf-keras/tf_keras/saving/legacy/saved_model/utils.py/0 | {
"file_path": "tf-keras/tf_keras/saving/legacy/saved_model/utils.py",
"repo_id": "tf-keras",
"token_count": 3835
} | 244 |
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf_doctest."""
import doctest
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
from tf_keras.testing_infra import keras_doctest_lib
class KerasDoctestOutputCheckerTest(parameterized.TestCase):
@parameterized.parameters(
# Don't match ints.
["result = 1", []],
# Match floats.
["0.0", [0.0]],
["text 1.0 text", [1.0]],
["text 1. text", [1.0]],
["text .1 text", [0.1]],
["text 1e3 text", [1000.0]],
["text 1.e3 text", [1000.0]],
["text +1. text", [1.0]],
["text -1. text", [-1.0]],
["text 1e+3 text", [1000.0]],
["text 1e-3 text", [0.001]],
["text +1E3 text", [1000.0]],
["text -1E3 text", [-1000.0]],
["text +1e-3 text", [0.001]],
["text -1e+3 text", [-1000.0]],
# Match at the start and end of a string.
[".1", [0.1]],
[".1 text", [0.1]],
["text .1", [0.1]],
["0.1 text", [0.1]],
["text 0.1", [0.1]],
["0. text", [0.0]],
["text 0.", [0.0]],
["1e-1 text", [0.1]],
["text 1e-1", [0.1]],
# Don't match floats mixed into text
["text1.0 text", []],
["text 1.0text", []],
["text1.0text", []],
["0x12e4", []], # not 12000
["TensorBoard: http://128.0.0.1:8888", []],
# With a newline
["1.0 text\n 2.0 3.0 text", [1.0, 2.0, 3.0]],
# With ints and a float.
["shape (1,2,3) value -1e9", [-1e9]],
# "." after a float.
["No floats at end of sentence: 1.0.", []],
["No floats with ellipsis: 1.0...", []],
# A numpy array
[
"""array([[1., 2., 3.],
[4., 5., 6.]], dtype=float32)""",
[1, 2, 3, 4, 5, 6],
],
# Match both parts of a complex number
# python style
["(0.0002+30000j)", [0.0002, 30000]],
["(2.3e-10-3.34e+9j)", [2.3e-10, -3.34e9]],
# numpy style
["array([1.27+5.j])", [1.27, 5]],
["(2.3e-10+3.34e+9j)", [2.3e-10, 3.34e9]],
[
"""array([1.27e-09+5.e+00j,
2.30e+01-1.e-03j])""",
[1.27e-09, 5.0e00, 2.30e01, -1.0e-03],
],
# Check examples in tolerence.
["1e-6", [0]],
["0.0", [1e-6]],
["1.000001e9", [1e9]],
["1e9", [1.000001e9]],
)
def test_extract_floats(self, text, expected_floats):
extract_floats = keras_doctest_lib._FloatExtractor()
output_checker = keras_doctest_lib.KerasDoctestOutputChecker()
(text_parts, extracted_floats) = extract_floats(text)
text_with_wildcards = "...".join(text_parts)
# Check that the lengths match before doing anything else.
try:
self.assertLen(extracted_floats, len(expected_floats))
except AssertionError as e:
msg = "\n\n expected: {}\n found: {}".format(
expected_floats, extracted_floats
)
e.args = (e.args[0] + msg,)
raise e
# The floats should match according to allclose
try:
self.assertTrue(
output_checker._allclose(expected_floats, extracted_floats)
)
except AssertionError as e:
msg = "\n\nexpected: {}\nfound: {}".format(
expected_floats, extracted_floats
)
e.args = (e.args[0] + msg,)
raise e
# The wildcard text should match the input text, according to the
# OutputChecker base class.
try:
self.assertTrue(
doctest.OutputChecker().check_output(
want=text_with_wildcards,
got=text,
optionflags=doctest.ELLIPSIS,
)
)
except AssertionError as e:
msg = f"\n\n expected: {text_with_wildcards}\n found: {text}"
e.args = (e.args[0] + msg,)
raise e
@parameterized.parameters(
# CHeck examples out of tolerence.
["1.001e-2", [0]],
["0.0", [1.001e-3]],
)
def test_fail_tolerences(self, text, expected_floats):
extract_floats = keras_doctest_lib._FloatExtractor()
output_checker = keras_doctest_lib.KerasDoctestOutputChecker()
(_, extracted_floats) = extract_floats(text)
# These floats should not match according to allclose
try:
self.assertFalse(
output_checker._allclose(expected_floats, extracted_floats)
)
except AssertionError as e:
msg = (
"\n\nThese matched! They should not have.\n"
"\n\n Expected: {}\n found: {}".format(
expected_floats, extracted_floats
)
)
e.args = (e.args[0] + msg,)
raise e
def test_no_floats(self):
want = "text ... text"
got = "text 1.0 1.2 1.9 text"
output_checker = keras_doctest_lib.KerasDoctestOutputChecker()
self.assertTrue(
output_checker.check_output(
want=want, got=got, optionflags=doctest.ELLIPSIS
)
)
@parameterized.parameters(
["1.0, ..., 1.0", "1.0, 1.0, 1.0"],
["1.0, 1.0..., 1.0", "1.0, 1.002, 1.0"],
)
def test_warning_messages(self, want, got):
output_checker = keras_doctest_lib.KerasDoctestOutputChecker()
output_checker.check_output(
want=want, got=got, optionflags=doctest.ELLIPSIS
)
example = doctest.Example("None", want=want)
result = output_checker.output_difference(
example=example, got=got, optionflags=doctest.ELLIPSIS
)
self.assertIn("doesn't work if *some* of the", result)
@parameterized.parameters(
["<...>", ("<...>", False)],
["TensorFlow", ("TensorFlow", False)],
[
"tf.Variable([[1, 2], [3, 4]])",
("tf.Variable([[1, 2], [3, 4]])", False),
],
["<tf.Tensor: shape=(), dtype=float32, numpy=inf>", ("inf", True)],
[
"<tf.RaggedTensor:... shape=(2, 2), numpy=1>",
("<tf.RaggedTensor:... shape=(2, 2), numpy=1>", False),
],
[
"""<tf.Tensor: shape=(2, 2), dtype=int32, numpy=
array([[2, 2],
[3, 5]], dtype=int32)>""",
(
"\n array([[2, 2],\n [3, 5]], "
+ "dtype=int32)",
True,
),
],
[
"[<tf.Tensor: shape=(2,), dtype=int32, numpy=array([1, 2], "
+ "dtype=int32)>, "
+ "<tf.Tensor: shape=(2,), dtype=int32, numpy=array([3, 4], "
+ "dtype=int32)>]",
("[array([1, 2], dtype=int32), array([3, 4], dtype=int32)]", True),
],
)
def test_tf_tensor_numpy_output(self, string, expected_output):
output_checker = keras_doctest_lib.KerasDoctestOutputChecker()
output = output_checker._tf_tensor_numpy_output(string)
self.assertEqual(expected_output, output)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/testing_infra/keras_doctest_lib_test.py/0 | {
"file_path": "tf-keras/tf_keras/testing_infra/keras_doctest_lib_test.py",
"repo_id": "tf-keras",
"token_count": 4163
} | 245 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for memory leaks in eager execution.
It is possible that this test suite will eventually become flaky due to taking
too long to run (since the tests iterate many times), but for now they are
helpful for finding memory leaks since not all PyObject leaks are found by
introspection (test_util decorators). Please be careful adding new tests here.
"""
import tensorflow.compat.v2 as tf
import tf_keras as keras
# isort: off
from tensorflow.python.eager.memory_tests import (
memory_test_util,
)
class SingleLayerNet(keras.Model):
"""Simple keras model used to ensure that there are no leaks."""
def __init__(self):
super().__init__()
self.fc1 = keras.layers.Dense(5)
def call(self, x):
return self.fc1(x)
class MemoryTest(tf.test.TestCase):
def testMemoryLeakInSimpleModelForwardOnly(self):
if not memory_test_util.memory_profiler_is_available():
self.skipTest("memory_profiler required to run this test")
inputs = tf.zeros([1000, 1000], tf.float32)
net = SingleLayerNet()
def f():
with tf.GradientTape():
net(inputs)
memory_test_util.assert_no_leak(f, num_iters=1000)
def testMemoryLeakInSimpleModelForwardAndBackward(self):
if not memory_test_util.memory_profiler_is_available():
self.skipTest("memory_profiler required to run this test")
inputs = tf.zeros([1000, 1000], tf.float32)
net = SingleLayerNet()
def f():
with tf.GradientTape() as tape:
result = net(inputs)
tape.gradient(result, net.variables)
del tape
memory_test_util.assert_no_leak(f, num_iters=1000)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/tests/memory_test.py/0 | {
"file_path": "tf-keras/tf_keras/tests/memory_test.py",
"repo_id": "tf-keras",
"token_count": 868
} | 246 |
package(
licenses = ["notice"], # Apache 2.0
)
sh_binary(
name = "parallel_gpu_execute",
srcs = ["parallel_gpu_execute.sh"],
)
| tf-keras/tf_keras/tools/gpu_build/BUILD/0 | {
"file_path": "tf-keras/tf_keras/tools/gpu_build/BUILD",
"repo_id": "tf-keras",
"token_count": 59
} | 247 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for data_utils."""
import os
import tarfile
import urllib
import zipfile
from itertools import cycle
import numpy as np
import tensorflow.compat.v2 as tf
import tf_keras as keras
from tf_keras.utils import data_utils
class TestGetFile(tf.test.TestCase):
def test_get_file_and_validate_it(self):
"""Tests get_file from a url, plus extraction and validation."""
dest_dir = self.get_temp_dir()
orig_dir = self.get_temp_dir()
text_file_path = os.path.join(orig_dir, "test.txt")
zip_file_path = os.path.join(orig_dir, "test.zip")
tar_file_path = os.path.join(orig_dir, "test.tar.gz")
with open(text_file_path, "w") as text_file:
text_file.write("Float like a butterfly, sting like a bee.")
with tarfile.open(tar_file_path, "w:gz") as tar_file:
tar_file.add(text_file_path)
with zipfile.ZipFile(zip_file_path, "w") as zip_file:
zip_file.write(text_file_path)
origin = urllib.parse.urljoin(
"file://",
urllib.request.pathname2url(os.path.abspath(tar_file_path)),
)
path = keras.utils.data_utils.get_file(
"test.txt", origin, untar=True, cache_subdir=dest_dir
)
filepath = path + ".tar.gz"
hashval_sha256 = keras.utils.data_utils._hash_file(filepath)
hashval_md5 = keras.utils.data_utils._hash_file(
filepath, algorithm="md5"
)
path = keras.utils.data_utils.get_file(
"test.txt",
origin,
md5_hash=hashval_md5,
untar=True,
cache_subdir=dest_dir,
)
path = keras.utils.data_utils.get_file(
filepath,
origin,
file_hash=hashval_sha256,
extract=True,
cache_subdir=dest_dir,
)
self.assertTrue(os.path.exists(filepath))
self.assertTrue(
keras.utils.data_utils.validate_file(filepath, hashval_sha256)
)
self.assertTrue(
keras.utils.data_utils.validate_file(filepath, hashval_md5)
)
os.remove(filepath)
origin = urllib.parse.urljoin(
"file://",
urllib.request.pathname2url(os.path.abspath(zip_file_path)),
)
hashval_sha256 = keras.utils.data_utils._hash_file(zip_file_path)
hashval_md5 = keras.utils.data_utils._hash_file(
zip_file_path, algorithm="md5"
)
path = keras.utils.data_utils.get_file(
"test",
origin,
md5_hash=hashval_md5,
extract=True,
cache_subdir=dest_dir,
)
path = keras.utils.data_utils.get_file(
"test",
origin,
file_hash=hashval_sha256,
extract=True,
cache_subdir=dest_dir,
)
self.assertTrue(os.path.exists(path))
self.assertTrue(
keras.utils.data_utils.validate_file(path, hashval_sha256)
)
self.assertTrue(keras.utils.data_utils.validate_file(path, hashval_md5))
os.remove(path)
for file_path, extract in [
(text_file_path, False),
(tar_file_path, True),
(zip_file_path, True),
]:
origin = urllib.parse.urljoin(
"file://",
urllib.request.pathname2url(os.path.abspath(file_path)),
)
hashval_sha256 = keras.utils.data_utils._hash_file(file_path)
path = keras.utils.data_utils.get_file(
origin=origin,
file_hash=hashval_sha256,
extract=extract,
cache_subdir=dest_dir,
)
self.assertTrue(os.path.exists(path))
self.assertTrue(
keras.utils.data_utils.validate_file(path, hashval_sha256)
)
os.remove(path)
with self.assertRaisesRegexp(
ValueError, 'Please specify the "origin".*'
):
_ = keras.utils.data_utils.get_file()
def test_get_file_with_tgz_extension(self):
"""Tests get_file from a url, plus extraction and validation."""
dest_dir = self.get_temp_dir()
orig_dir = self.get_temp_dir()
text_file_path = os.path.join(orig_dir, "test.txt")
tar_file_path = os.path.join(orig_dir, "test.tar.gz")
with open(text_file_path, "w") as text_file:
text_file.write("Float like a butterfly, sting like a bee.")
with tarfile.open(tar_file_path, "w:gz") as tar_file:
tar_file.add(text_file_path)
origin = urllib.parse.urljoin(
"file://",
urllib.request.pathname2url(os.path.abspath(tar_file_path)),
)
path = keras.utils.data_utils.get_file(
"test.txt.tar.gz", origin, untar=True, cache_subdir=dest_dir
)
self.assertEndsWith(path, ".txt")
self.assertTrue(os.path.exists(path))
def test_get_file_with_integrity_check(self):
"""Tests get_file with validation before download."""
orig_dir = self.get_temp_dir()
file_path = os.path.join(orig_dir, "test.txt")
with open(file_path, "w") as text_file:
text_file.write("Float like a butterfly, sting like a bee.")
hashval = keras.utils.data_utils._hash_file(file_path)
origin = urllib.parse.urljoin(
"file://", urllib.request.pathname2url(os.path.abspath(file_path))
)
path = keras.utils.data_utils.get_file(
"test.txt", origin, file_hash=hashval
)
self.assertTrue(os.path.exists(path))
def test_get_file_with_failed_integrity_check(self):
"""Tests get_file with validation before download."""
orig_dir = self.get_temp_dir()
file_path = os.path.join(orig_dir, "test.txt")
with open(file_path, "w") as text_file:
text_file.write("Float like a butterfly, sting like a bee.")
hashval = "0" * 64
origin = urllib.parse.urljoin(
"file://", urllib.request.pathname2url(os.path.abspath(file_path))
)
with self.assertRaisesRegex(
ValueError, "Incomplete or corrupted file.*"
):
_ = keras.utils.data_utils.get_file(
"test.txt", origin, file_hash=hashval
)
class TestSequence(keras.utils.data_utils.Sequence):
def __init__(self, shape, value=1.0):
self.shape = shape
self.inner = value
def __getitem__(self, item):
return np.ones(self.shape, dtype=np.uint32) * item * self.inner
def __len__(self):
return 100
def on_epoch_end(self):
self.inner *= 5.0
class FaultSequence(keras.utils.data_utils.Sequence):
def __getitem__(self, item):
raise IndexError(item, "item is not present")
def __len__(self):
return 100
@data_utils.threadsafe_generator
def create_generator_from_sequence_threads(ds):
for i in cycle(range(len(ds))):
yield ds[i]
def create_generator_from_sequence_pcs(ds):
for i in cycle(range(len(ds))):
yield ds[i]
class TestEnqueuers(tf.test.TestCase):
def test_generator_enqueuer_threads(self):
enqueuer = keras.utils.data_utils.GeneratorEnqueuer(
create_generator_from_sequence_threads(
TestSequence([3, 200, 200, 3])
),
use_multiprocessing=False,
)
enqueuer.start(3, 10)
gen_output = enqueuer.get()
acc = []
for _ in range(100):
acc.append(int(next(gen_output)[0, 0, 0, 0]))
self.assertEqual(len(set(acc) - set(range(100))), 0)
enqueuer.stop()
@data_utils.dont_use_multiprocessing_pool
def test_generator_enqueuer_processes(self):
enqueuer = keras.utils.data_utils.GeneratorEnqueuer(
create_generator_from_sequence_threads(
TestSequence([3, 200, 200, 3])
),
use_multiprocessing=True,
)
enqueuer.start(4, 10)
gen_output = enqueuer.get()
acc = []
for _ in range(300):
acc.append(int(next(gen_output)[0, 0, 0, 0]))
self.assertNotEqual(acc, list(range(100)))
enqueuer.stop()
def test_generator_enqueuer_fail_threads(self):
enqueuer = keras.utils.data_utils.GeneratorEnqueuer(
create_generator_from_sequence_threads(FaultSequence()),
use_multiprocessing=False,
)
enqueuer.start(3, 10)
gen_output = enqueuer.get()
with self.assertRaises(IndexError):
next(gen_output)
@data_utils.dont_use_multiprocessing_pool
def test_generator_enqueuer_fail_processes(self):
enqueuer = keras.utils.data_utils.GeneratorEnqueuer(
create_generator_from_sequence_threads(FaultSequence()),
use_multiprocessing=True,
)
enqueuer.start(3, 10)
gen_output = enqueuer.get()
with self.assertRaises(IndexError):
next(gen_output)
def test_ordered_enqueuer_threads(self):
enqueuer = keras.utils.data_utils.OrderedEnqueuer(
TestSequence([3, 200, 200, 3]), use_multiprocessing=False
)
enqueuer.start(3, 10)
gen_output = enqueuer.get()
acc = []
for _ in range(100):
acc.append(next(gen_output)[0, 0, 0, 0])
self.assertEqual(acc, list(range(100)))
enqueuer.stop()
@data_utils.dont_use_multiprocessing_pool
def test_ordered_enqueuer_processes(self):
enqueuer = keras.utils.data_utils.OrderedEnqueuer(
TestSequence([3, 200, 200, 3]), use_multiprocessing=True
)
enqueuer.start(3, 10)
gen_output = enqueuer.get()
acc = []
for _ in range(100):
acc.append(next(gen_output)[0, 0, 0, 0])
self.assertEqual(acc, list(range(100)))
enqueuer.stop()
def test_ordered_enqueuer_fail_threads(self):
enqueuer = keras.utils.data_utils.OrderedEnqueuer(
FaultSequence(), use_multiprocessing=False
)
enqueuer.start(3, 10)
gen_output = enqueuer.get()
with self.assertRaises(IndexError):
next(gen_output)
@data_utils.dont_use_multiprocessing_pool
def test_ordered_enqueuer_fail_processes(self):
enqueuer = keras.utils.data_utils.OrderedEnqueuer(
FaultSequence(), use_multiprocessing=True
)
enqueuer.start(3, 10)
gen_output = enqueuer.get()
with self.assertRaises(IndexError):
next(gen_output)
@data_utils.dont_use_multiprocessing_pool
def test_on_epoch_end_processes(self):
enqueuer = keras.utils.data_utils.OrderedEnqueuer(
TestSequence([3, 200, 200, 3]), use_multiprocessing=True
)
enqueuer.start(3, 10)
gen_output = enqueuer.get()
acc = []
for _ in range(200):
acc.append(next(gen_output)[0, 0, 0, 0])
# Check that order was keep in GeneratorEnqueuer with processes
self.assertEqual(acc[100:], list([k * 5 for k in range(100)]))
enqueuer.stop()
@data_utils.dont_use_multiprocessing_pool
def test_context_switch(self):
enqueuer = keras.utils.data_utils.OrderedEnqueuer(
TestSequence([3, 200, 200, 3]), use_multiprocessing=True
)
enqueuer2 = keras.utils.data_utils.OrderedEnqueuer(
TestSequence([3, 200, 200, 3], value=15), use_multiprocessing=True
)
enqueuer.start(3, 10)
enqueuer2.start(3, 10)
gen_output = enqueuer.get()
gen_output2 = enqueuer2.get()
acc = []
for _ in range(100):
acc.append(next(gen_output)[0, 0, 0, 0])
self.assertEqual(acc[-1], 99)
# One epoch is completed so enqueuer will switch the Sequence
acc = []
self.skipTest("b/145555807 flakily timing out.")
for _ in range(100):
acc.append(next(gen_output2)[0, 0, 0, 0])
self.assertEqual(acc[-1], 99 * 15)
# One epoch has been completed so enqueuer2 will switch
# Be sure that both Sequence were updated
self.assertEqual(next(gen_output)[0, 0, 0, 0], 0)
self.assertEqual(next(gen_output)[0, 0, 0, 0], 5)
self.assertEqual(next(gen_output2)[0, 0, 0, 0], 0)
self.assertEqual(next(gen_output2)[0, 0, 0, 0], 15 * 5)
# Tear down everything
enqueuer.stop()
enqueuer2.stop()
def test_on_epoch_end_threads(self):
enqueuer = keras.utils.data_utils.OrderedEnqueuer(
TestSequence([3, 200, 200, 3]), use_multiprocessing=False
)
enqueuer.start(3, 10)
gen_output = enqueuer.get()
acc = []
for _ in range(100):
acc.append(next(gen_output)[0, 0, 0, 0])
acc = []
for _ in range(100):
acc.append(next(gen_output)[0, 0, 0, 0])
# Check that order was keep in GeneratorEnqueuer with processes
self.assertEqual(acc, list([k * 5 for k in range(100)]))
enqueuer.stop()
class PadSequencesTest(tf.test.TestCase):
def test_pad_sequences(self):
a = [[1], [1, 2], [1, 2, 3]]
# test padding
b = data_utils.pad_sequences(a, maxlen=3, padding="pre")
self.assertAllClose(b, [[0, 0, 1], [0, 1, 2], [1, 2, 3]])
b = data_utils.pad_sequences(a, maxlen=3, padding="post")
self.assertAllClose(b, [[1, 0, 0], [1, 2, 0], [1, 2, 3]])
# test truncating
b = data_utils.pad_sequences(a, maxlen=2, truncating="pre")
self.assertAllClose(b, [[0, 1], [1, 2], [2, 3]])
b = data_utils.pad_sequences(a, maxlen=2, truncating="post")
self.assertAllClose(b, [[0, 1], [1, 2], [1, 2]])
# test value
b = data_utils.pad_sequences(a, maxlen=3, value=1)
self.assertAllClose(b, [[1, 1, 1], [1, 1, 2], [1, 2, 3]])
def test_pad_sequences_str(self):
a = [["1"], ["1", "2"], ["1", "2", "3"]]
# test padding
b = data_utils.pad_sequences(
a, maxlen=3, padding="pre", value="pad", dtype=object
)
self.assertAllEqual(
b, [["pad", "pad", "1"], ["pad", "1", "2"], ["1", "2", "3"]]
)
b = data_utils.pad_sequences(
a, maxlen=3, padding="post", value="pad", dtype="<U3"
)
self.assertAllEqual(
b, [["1", "pad", "pad"], ["1", "2", "pad"], ["1", "2", "3"]]
)
# test truncating
b = data_utils.pad_sequences(
a, maxlen=2, truncating="pre", value="pad", dtype=object
)
self.assertAllEqual(b, [["pad", "1"], ["1", "2"], ["2", "3"]])
b = data_utils.pad_sequences(
a, maxlen=2, truncating="post", value="pad", dtype="<U3"
)
self.assertAllEqual(b, [["pad", "1"], ["1", "2"], ["1", "2"]])
with self.assertRaisesRegex(
ValueError, "`dtype` int32 is not compatible with "
):
data_utils.pad_sequences(
a, maxlen=2, truncating="post", value="pad"
)
def test_pad_sequences_vector(self):
a = [[[1, 1]], [[2, 1], [2, 2]], [[3, 1], [3, 2], [3, 3]]]
# test padding
b = data_utils.pad_sequences(a, maxlen=3, padding="pre")
self.assertAllClose(
b,
[
[[0, 0], [0, 0], [1, 1]],
[[0, 0], [2, 1], [2, 2]],
[[3, 1], [3, 2], [3, 3]],
],
)
b = data_utils.pad_sequences(a, maxlen=3, padding="post")
self.assertAllClose(
b,
[
[[1, 1], [0, 0], [0, 0]],
[[2, 1], [2, 2], [0, 0]],
[[3, 1], [3, 2], [3, 3]],
],
)
# test truncating
b = data_utils.pad_sequences(a, maxlen=2, truncating="pre")
self.assertAllClose(
b, [[[0, 0], [1, 1]], [[2, 1], [2, 2]], [[3, 2], [3, 3]]]
)
b = data_utils.pad_sequences(a, maxlen=2, truncating="post")
self.assertAllClose(
b, [[[0, 0], [1, 1]], [[2, 1], [2, 2]], [[3, 1], [3, 2]]]
)
# test value
b = data_utils.pad_sequences(a, maxlen=3, value=1)
self.assertAllClose(
b,
[
[[1, 1], [1, 1], [1, 1]],
[[1, 1], [2, 1], [2, 2]],
[[3, 1], [3, 2], [3, 3]],
],
)
if __name__ == "__main__":
# Bazel sets these environment variables to very long paths.
# Tempfile uses them to create long paths, and in turn multiprocessing
# library tries to create sockets named after paths. Delete whatever bazel
# writes to these to avoid tests failing due to socket addresses being too
# long.
for var in ("TMPDIR", "TMP", "TEMP"):
if var in os.environ:
del os.environ[var]
tf.test.main()
| tf-keras/tf_keras/utils/data_utils_test.py/0 | {
"file_path": "tf-keras/tf_keras/utils/data_utils_test.py",
"repo_id": "tf-keras",
"token_count": 8806
} | 248 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility methods related to kernelized layers."""
import tensorflow.compat.v2 as tf
def _to_matrix(u):
"""If input tensor is a vector (i.e., has rank 1), converts it to matrix."""
u_rank = len(u.shape)
if u_rank not in [1, 2]:
raise ValueError(
f"The input tensor should have rank 1 or 2. Received rank: {u_rank}"
)
if u_rank == 1:
return tf.expand_dims(u, 0)
return u
def _align_matrices(x, y):
"""Aligns x and y tensors to allow computations over pairs of their rows."""
x_matrix = _to_matrix(x)
y_matrix = _to_matrix(y)
x_shape = x_matrix.shape
y_shape = y_matrix.shape
if y_shape[1] != x_shape[1]: # dimensions do not match.
raise ValueError(
"The outermost dimensions of the input tensors should match. "
f"Received y = {y_shape[1]} vs x = {x_shape[1]}."
)
x_tile = tf.tile(tf.expand_dims(x_matrix, 1), [1, y_shape[0], 1])
y_tile = tf.tile(tf.expand_dims(y_matrix, 0), [x_shape[0], 1, 1])
return x_tile, y_tile
def inner_product(u, v):
u = _to_matrix(u)
v = _to_matrix(v)
return tf.matmul(u, v, transpose_b=True)
def exact_gaussian_kernel(x, y, stddev):
r"""Computes exact Gaussian kernel value(s) for tensors x and y and stddev.
The Gaussian kernel for vectors u, v is defined as follows:
K(u, v) = exp(-||u-v||^2 / (2* stddev^2))
where the norm is the l2-norm. x, y can be either vectors or matrices. If
they are vectors, they must have the same dimension. If they are matrices,
they must have the same number of columns. In the latter case, the method
returns (as a matrix) K(u, v) values for all pairs (u, v) where u is a row
from x and v is a row from y.
Args:
x: a tensor of rank 1 or 2. It's shape should be either [dim] or [m, dim].
y: a tensor of rank 1 or 2. It's shape should be either [dim] or [n, dim].
stddev: The width of the Gaussian kernel.
Returns:
A single value (scalar) with shape (1, 1) (if x, y are vectors) or a
matrix of shape (m, n) with entries K(u, v) (where K is the Gaussian
kernel) for all (u,v) pairs where u, v are rows from x and y respectively.
Raises:
ValueError: if the shapes of x, y are not compatible.
"""
x_aligned, y_aligned = _align_matrices(x, y)
diff_squared_l2_norm = tf.reduce_sum(
tf.math.squared_difference(x_aligned, y_aligned), 2
)
return tf.exp(-diff_squared_l2_norm / (2 * stddev * stddev))
def exact_laplacian_kernel(x, y, stddev):
r"""Computes exact Laplacian kernel value(s) for tensors x & y using stddev.
The Laplacian kernel for vectors u, v is defined as follows:
K(u, v) = exp(-||u-v|| / stddev)
where the norm is the l1-norm. x, y can be either vectors or matrices. If
they are vectors, they must have the same dimension. If they are matrices,
they must have the same number of columns. In the latter case, the method
returns (as a matrix) K(u, v) values for all pairs (u, v) where u is a row
from x and v is a row from y.
Args:
x: a tensor of rank 1 or 2. It's shape should be either [dim] or [m, dim].
y: a tensor of rank 1 or 2. It's shape should be either [dim] or [n, dim].
stddev: The width of the Gaussian kernel.
Returns:
A single value (scalar) with shape (1, 1) if x, y are vectors or a matrix
of shape (m, n) with entries K(u, v) (where K is the Laplacian kernel) for
all (u,v) pairs where u, v are rows from x and y respectively.
Raises:
ValueError: if the shapes of x, y are not compatible.
"""
x_aligned, y_aligned = _align_matrices(x, y)
diff_l1_norm = tf.reduce_sum(tf.abs(tf.subtract(x_aligned, y_aligned)), 2)
return tf.exp(-diff_l1_norm / stddev)
| tf-keras/tf_keras/utils/kernelized_utils.py/0 | {
"file_path": "tf-keras/tf_keras/utils/kernelized_utils.py",
"repo_id": "tf-keras",
"token_count": 1735
} | 249 |
# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Steps per execution autotuning for TF-Keras engine."""
import logging
import threading
import time
import numpy as np
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.utils.StepsPerExecutionTuner")
class StepsPerExecutionTuner:
"""Steps per execution tuner class.
Args:
optimizer: The optimizer used for training/evaluation/prediction. Used
to measure iterations and global throughput
(`optimizer.iterations`/second).
spe_variable: A `tf.Variable` representing the `steps_per_execution`
variable used during training/evaluation/prediction. Must be
updatable with `spe_variable.assign`.
interval: Optional int, the amount of seconds to wait between calls to
measure throughput and tune `spe_variable`. Defaults to 5.
change_spe_interval: Optional int, the number of throughput measurements
before tuning. Defaults to 10.
change_threshold: Optional float, the percent different in throughput to
trigger a `steps_per_execution` change. For example, `0.1` triggers
changes if throughput changes more than 10%.
Examples:
If you're using `model.compile` and `model.fit`, this functionality is
available at compile time with `steps_per_execution='auto'`
```python
model.compile(..., steps_per_execution='auto')
```
Custom training loop usage:
```python
# Get model
inputs = keras.Input(shape=(784,), name="digits")
x = layers.Dense(64, activation="relu", name="dense_1")(inputs)
x = layers.Dense(64, activation="relu", name="dense_2")(x)
outputs = layers.Dense(10, name="predictions")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
# Instantiate an optimizer to train the model.
optimizer = keras.optimizers.SGD(learning_rate=1e-3)
# Instantiate a loss function.
loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
# Prepare the training dataset.
batch_size = 64
(x_train, y_train), (_, _) = keras.datasets.mnist.load_data()
x_train = np.reshape(x_train, (-1, 784))
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
# Create our steps per execution variable
steps_per_execution = tf.Variable(
1,
dtype="int64",
aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA
)
# Create the tuner
tuner = StepsPerExecutionTuner(
optimizer, steps_per_execution
)
# Create a step function that runs a single training step
@tf.function
def step_fn(iterator):
batch_data, labels = next(iterator)
with tf.GradientTape() as tape:
logits = model(batch_data, training=True)
loss_value = loss_fn(labels, logits)
grads = tape.gradient(loss_value, model.trainable_weights)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
# We can now pack multiple execution steps into one call
@tf.function
def multi_step_train_fn(iterator, steps_per_execution):
for _ in tf.range(steps_per_execution):
outputs = step_fn(iterator)
return
initial_steps_per_execution = 1
steps_per_epoch = 100
epochs = 2
# Start the tuner before training
tuner.start()
# We can now call our multi step training with our data
for epoch in range(epochs):
for _ in range(steps_per_epoch):
multi_step_train_fn(iterator, steps_per_execution)
# End the tuner after training
tuner.stop()
```
"""
def __init__(
self,
optimizer,
spe_variable,
interval=5,
change_spe_interval=10,
change_threshold=0.1,
):
self.optimizer = optimizer
self._steps_per_execution = spe_variable
self.interval = interval
self.change_spe_interval = change_spe_interval
self.spe_change_threshold = change_threshold
self.steps_per_execution_stop_event = threading.Event()
self.thread = None
def start(self):
"""Starts steps per execution tuning thread.
Returns a `threading.Thread` which will run every `self.interval`
seconds to measure throughput and tune steps per execution.
"""
if self.thread and self.thread.is_alive():
return self.thread
self._begin_tuning()
self.thread = threading.Thread(
target=self._steps_per_execution_interval_call, daemon=True
) # needed to shut down successfully
self.thread.start()
return self.thread
@property
def steps_per_execution(self):
"""Settable attribute representing`steps_per_execution` variable."""
return self._steps_per_execution
@steps_per_execution.setter
def steps_per_execution(self, value):
self._steps_per_execution.assign(value)
self.init_spe = value
def _steps_per_execution_interval_call(self):
while not self.steps_per_execution_stop_event.is_set():
self._measure_and_tune()
self.steps_per_execution_stop_event.wait(self.interval)
def _begin_tuning(self):
self.start_time = time.time()
self.init_iterations = self.optimizer.iterations.numpy()
self.init_spe = self._steps_per_execution.numpy().item()
self.spe_last_logged = {
"iteration": self.init_iterations,
"time_secs": self.start_time,
}
self.rgsps = [] # rgsps = recent global steps per second
self.avg_rgsps = 0
self.prev_avg_rgsps = 0
self.spe_tune_last_action_add = True
self.spe_measurement_count = 0
def stop(self):
"""Stops steps per execution tuning thread."""
if not self.steps_per_execution_stop_event.is_set():
self.steps_per_execution_stop_event.set()
def _should_tune(self):
epoch_boundary = False
if self.rgsps[-1] == 0:
epoch_boundary = True
return (
self.spe_measurement_count % self.change_spe_interval == 0
and self.rgsps
and not epoch_boundary
)
def _tune(self):
"""Changes the steps per execution using the following algorithm.
If there is more than a 10% increase in the throughput, then the last
recorded action is repeated (i.e. if increasing the SPE caused an
increase in throughput, it is increased again). If there is more than a
10% decrease in the throughput, then the opposite of the last action is
performed (i.e. if increasing the SPE decreased the throughput, then the
SPE is decreased).
"""
self.avg_rgsps = sum(self.rgsps) / len(self.rgsps)
fast_threshold = (1 + self.spe_change_threshold) * self.prev_avg_rgsps
slow_threshold = (1 - self.spe_change_threshold) * self.prev_avg_rgsps
if self.spe_tune_last_action_add:
repeat_action_mult = 1.5
opposite_action_mult = 0.5
else:
repeat_action_mult = 0.5
opposite_action_mult = 1.5
spe_variable = self._steps_per_execution
spe_limit = spe_variable.dtype.max / 1.5
current_spe = spe_variable.numpy().item()
if self.avg_rgsps > fast_threshold:
# Note that our first iteration will always trigger this as our
# threshold should be 0
new_spe = current_spe * repeat_action_mult
elif self.avg_rgsps < slow_threshold:
new_spe = current_spe * opposite_action_mult
self.spe_tune_last_action_add = not self.spe_tune_last_action_add
else:
new_spe = current_spe
if current_spe >= spe_limit:
new_spe = current_spe
elif current_spe == 0:
new_spe = self.init_spe
self._steps_per_execution.assign(np.round(new_spe))
self.prev_avg_rgsps = self.avg_rgsps
def _measure_and_tune(self):
self.spe_measurement_count += 1
cur_iteration = self.optimizer.iterations.numpy()
cur_time_secs = time.time()
recent_gsps = (cur_iteration - self.spe_last_logged["iteration"]) / (
cur_time_secs - self.spe_last_logged["time_secs"]
)
self.rgsps.append(recent_gsps)
if len(self.rgsps) > self.change_spe_interval:
self.rgsps.pop(0)
if cur_iteration == 0: # No need to tune, we have no measurements
self.start_time = cur_time_secs
return
self.spe_last_logged["iteration"] = cur_iteration
self.spe_last_logged["time_secs"] = cur_time_secs
try:
if self._should_tune():
self._tune()
except RuntimeError:
logging.exception("Steps per execution autotuner failed to run.")
return
| tf-keras/tf_keras/utils/steps_per_execution_tuning.py/0 | {
"file_path": "tf-keras/tf_keras/utils/steps_per_execution_tuning.py",
"repo_id": "tf-keras",
"token_count": 3966
} | 250 |