text
stringlengths 5
261k
| id
stringlengths 16
106
| metadata
dict | __index_level_0__
int64 0
266
|
---|---|---|---|
sudo pip install --upgrade pip
sudo pip install -e ".[tests]"
echo "sh shell/lint.sh" > .git/hooks/pre-commit
chmod a+x .git/hooks/pre-commit | autokeras/.devcontainer/setup.sh/0 | {
"file_path": "autokeras/.devcontainer/setup.sh",
"repo_id": "autokeras",
"token_count": 53
} | 0 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from tensorflow import nest
from autokeras.blocks import basic
from autokeras.blocks import preprocessing
from autokeras.blocks import reduction
from autokeras.engine import block as block_module
BLOCK_TYPE = "block_type"
RESNET = "resnet"
XCEPTION = "xception"
VANILLA = "vanilla"
EFFICIENT = "efficient"
NORMALIZE = "normalize"
AUGMENT = "augment"
TRANSFORMER = "transformer"
MAX_TOKENS = "max_tokens"
NGRAM = "ngram"
BERT = "bert"
class ImageBlock(block_module.Block):
"""Block for image data.
The image blocks is a block choosing from ResNetBlock, XceptionBlock,
ConvBlock, which is controlled by a hyperparameter, 'block_type'.
# Arguments
block_type: String. 'resnet', 'xception', 'vanilla'. The type of Block
to use. If unspecified, it will be tuned automatically.
normalize: Boolean. Whether to channel-wise normalize the images.
If unspecified, it will be tuned automatically.
augment: Boolean. Whether to do image augmentation. If unspecified,
it will be tuned automatically.
"""
def __init__(
self,
block_type: Optional[str] = None,
normalize: Optional[bool] = None,
augment: Optional[bool] = None,
**kwargs
):
super().__init__(**kwargs)
self.block_type = block_type
self.normalize = normalize
self.augment = augment
def get_config(self):
config = super().get_config()
config.update(
{
BLOCK_TYPE: self.block_type,
NORMALIZE: self.normalize,
AUGMENT: self.augment,
}
)
return config
def _build_block(self, hp, output_node, block_type):
if block_type == RESNET:
return basic.ResNetBlock().build(hp, output_node)
elif block_type == XCEPTION:
return basic.XceptionBlock().build(hp, output_node)
elif block_type == VANILLA:
return basic.ConvBlock().build(hp, output_node)
elif block_type == EFFICIENT:
return basic.EfficientNetBlock().build(hp, output_node)
def build(self, hp, inputs=None):
input_node = nest.flatten(inputs)[0]
output_node = input_node
if self.normalize is None and hp.Boolean(NORMALIZE):
with hp.conditional_scope(NORMALIZE, [True]):
output_node = preprocessing.Normalization().build(
hp, output_node
)
elif self.normalize:
output_node = preprocessing.Normalization().build(hp, output_node)
if self.augment is None and hp.Boolean(AUGMENT):
with hp.conditional_scope(AUGMENT, [True]):
output_node = preprocessing.ImageAugmentation().build(
hp, output_node
)
elif self.augment:
output_node = preprocessing.ImageAugmentation().build(
hp, output_node
)
if self.block_type is None:
block_type = hp.Choice(
BLOCK_TYPE, [RESNET, XCEPTION, VANILLA, EFFICIENT]
)
with hp.conditional_scope(BLOCK_TYPE, [block_type]):
output_node = self._build_block(hp, output_node, block_type)
else:
output_node = self._build_block(hp, output_node, self.block_type)
return output_node
class TextBlock(block_module.Block):
"""Block for text data.
# Arguments
block_type: String. 'vanilla', 'transformer', and 'ngram'. The type of
Block to use. 'vanilla' and 'transformer' use a TextToIntSequence
vectorizer, whereas 'ngram' uses TextToNgramVector. If unspecified,
it will be tuned automatically.
max_tokens: Int. The maximum size of the vocabulary.
If left unspecified, it will be tuned automatically.
pretraining: String. 'random' (use random weights instead any pretrained
model), 'glove', 'fasttext' or 'word2vec'. Use pretrained word
embedding. If left unspecified, it will be tuned automatically.
"""
def __init__(
self,
block_type: Optional[str] = None,
max_tokens: Optional[int] = None,
pretraining: Optional[str] = None,
**kwargs
):
super().__init__(**kwargs)
self.block_type = block_type
self.max_tokens = max_tokens
self.pretraining = pretraining
def get_config(self):
config = super().get_config()
config.update(
{
BLOCK_TYPE: self.block_type,
MAX_TOKENS: self.max_tokens,
"pretraining": self.pretraining,
}
)
return config
def build(self, hp, inputs=None):
input_node = nest.flatten(inputs)[0]
output_node = input_node
if self.block_type is None:
block_type = hp.Choice(
BLOCK_TYPE, [VANILLA, TRANSFORMER, NGRAM, BERT]
)
with hp.conditional_scope(BLOCK_TYPE, [block_type]):
output_node = self._build_block(hp, output_node, block_type)
else:
output_node = self._build_block(hp, output_node, self.block_type)
return output_node
def _build_block(self, hp, output_node, block_type):
max_tokens = self.max_tokens or hp.Choice(
MAX_TOKENS, [500, 5000, 20000], default=5000
)
if block_type == NGRAM:
output_node = preprocessing.TextToNgramVector(
max_tokens=max_tokens
).build(hp, output_node)
return basic.DenseBlock().build(hp, output_node)
if block_type == BERT:
output_node = basic.BertBlock().build(hp, output_node)
else:
output_node = preprocessing.TextToIntSequence(
max_tokens=max_tokens
).build(hp, output_node)
if block_type == TRANSFORMER:
output_node = basic.Transformer(
max_features=max_tokens + 1,
pretraining=self.pretraining,
).build(hp, output_node)
else:
output_node = basic.Embedding(
max_features=max_tokens + 1,
pretraining=self.pretraining,
).build(hp, output_node)
output_node = basic.ConvBlock().build(hp, output_node)
output_node = reduction.SpatialReduction().build(hp, output_node)
output_node = basic.DenseBlock().build(hp, output_node)
return output_node
class StructuredDataBlock(block_module.Block):
"""Block for structured data.
# Arguments
categorical_encoding: Boolean. Whether to use the CategoricalToNumerical
to encode the categorical features to numerical features. Defaults
to True.
normalize: Boolean. Whether to normalize the features.
If unspecified, it will be tuned automatically.
seed: Int. Random seed.
"""
def __init__(
self,
categorical_encoding: bool = True,
normalize: Optional[bool] = None,
seed: Optional[int] = None,
**kwargs
):
super().__init__(**kwargs)
self.categorical_encoding = categorical_encoding
self.normalize = normalize
self.seed = seed
self.column_types = None
self.column_names = None
@classmethod
def from_config(cls, config):
column_types = config.pop("column_types")
column_names = config.pop("column_names")
instance = cls(**config)
instance.column_types = column_types
instance.column_names = column_names
return instance
def get_config(self):
config = super().get_config()
config.update(
{
"categorical_encoding": self.categorical_encoding,
"normalize": self.normalize,
"seed": self.seed,
"column_types": self.column_types,
"column_names": self.column_names,
}
)
return config
def build(self, hp, inputs=None):
input_node = nest.flatten(inputs)[0]
output_node = input_node
if self.categorical_encoding:
block = preprocessing.CategoricalToNumerical()
block.column_types = self.column_types
block.column_names = self.column_names
output_node = block.build(hp, output_node)
if self.normalize is None and hp.Boolean(NORMALIZE):
with hp.conditional_scope(NORMALIZE, [True]):
output_node = preprocessing.Normalization().build(
hp, output_node
)
elif self.normalize:
output_node = preprocessing.Normalization().build(hp, output_node)
output_node = basic.DenseBlock().build(hp, output_node)
return output_node
class TimeseriesBlock(block_module.Block):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def get_config(self):
return super().get_config()
def build(self, hp, inputs=None):
input_node = nest.flatten(inputs)[0]
output_node = input_node
output_node = basic.RNNBlock().build(hp, output_node)
return output_node
class GeneralBlock(block_module.Block):
"""A general neural network block when the input type is unknown.
When the input type is unknown. The GeneralBlock would search in a large
space for a good model.
# Arguments
name: String.
"""
def build(self, hp, inputs=None):
raise NotImplementedError
| autokeras/autokeras/blocks/wrapper.py/0 | {
"file_path": "autokeras/autokeras/blocks/wrapper.py",
"repo_id": "autokeras",
"token_count": 4560
} | 1 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from autokeras.engine import serializable
class Preprocessor(serializable.Serializable):
"""A preprocessor for tf.data.Dataset.
A preprocessor transforms the dataset using `tf.data` operations.
"""
def fit(self, dataset):
"""Fit the preprocessor with the dataset.
# Arguments
dataset: an instance of `tf.data.Dataset`.
"""
# TODO: may need to change to a streaming way of fit to reduce the
# number of iterations through the dataset for speed. Need to be
# decided when we have more use cases for this fit.
pass
def transform(self, dataset):
"""Transform the dataset wth the preprocessor.
# Arguments
dataset: an instance of `tf.data.Dataset`.
# Returns
The transformed dataset.
"""
raise NotImplementedError
def get_config(self):
return {}
class TargetPreprocessor(Preprocessor):
"""Preprocessor for target data."""
def postprocess(self, dataset):
"""Postprocess the output of the Keras model.
# Arguments
dataset: numpy.ndarray. The corresponding output of the model.
# Returns
numpy.ndarray. The postprocessed data.
"""
raise NotImplementedError
| autokeras/autokeras/engine/preprocessor.py/0 | {
"file_path": "autokeras/autokeras/engine/preprocessor.py",
"repo_id": "autokeras",
"token_count": 649
} | 2 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow import keras
from tensorflow import nest
from autokeras import preprocessors as preprocessors_module
from autokeras.engine import hyper_preprocessor as hpps_module
from autokeras.engine import preprocessor as pps_module
from autokeras.utils import data_utils
from autokeras.utils import io_utils
class HyperPipeline(hpps_module.HyperPreprocessor):
"""A search space consists of HyperPreprocessors.
# Arguments
inputs: a list of lists of HyperPreprocessors.
outputs: a list of lists of HyperPreprocessors.
"""
def __init__(self, inputs, outputs, **kwargs):
super().__init__(**kwargs)
self.inputs = inputs
self.outputs = outputs
@staticmethod
def _build_preprocessors(hp, hpps_lists, dataset):
sources = data_utils.unzip_dataset(dataset)
preprocessors_list = []
for source, hpps_list in zip(sources, hpps_lists):
data = source
preprocessors = []
for hyper_preprocessor in hpps_list:
preprocessor = hyper_preprocessor.build(hp, data)
preprocessor.fit(data)
data = preprocessor.transform(data)
preprocessors.append(preprocessor)
preprocessors_list.append(preprocessors)
return preprocessors_list
def build(self, hp, dataset):
"""Build a Pipeline by Hyperparameters.
# Arguments
hp: Hyperparameters.
dataset: tf.data.Dataset.
# Returns
An instance of Pipeline.
"""
x = dataset.map(lambda x, y: x)
y = dataset.map(lambda x, y: y)
return Pipeline(
inputs=self._build_preprocessors(hp, self.inputs, x),
outputs=self._build_preprocessors(hp, self.outputs, y),
)
def load_pipeline(filepath, custom_objects=None):
"""Load a Pipeline instance from disk."""
if custom_objects is None:
custom_objects = {}
with keras.utils.custom_object_scope(custom_objects):
return Pipeline.from_config(io_utils.load_json(filepath))
class Pipeline(pps_module.Preprocessor):
"""A data pipeline for transform the entire dataset.
# Arguments
inputs: A list of lists of Preprocessors. For the input datasets for
the model.
outputs: A list of lists of Preprocessors. For the target datasets for
the model.
"""
def __init__(self, inputs, outputs, **kwargs):
super().__init__(**kwargs)
self.inputs = inputs
self.outputs = outputs
def fit(self, dataset):
"""Fit the Preprocessors."""
x = dataset.map(lambda x, y: x)
sources_x = data_utils.unzip_dataset(x)
for pps_list, data in zip(self.inputs, sources_x):
for preprocessor in pps_list:
preprocessor.fit(data)
data = preprocessor.transform(data)
y = dataset.map(lambda x, y: y)
sources_y = data_utils.unzip_dataset(y)
for pps_list, data in zip(self.outputs, sources_y):
for preprocessor in pps_list:
preprocessor.fit(data)
data = preprocessor.transform(data)
return
def transform(self, dataset):
"""Transform the dataset to be ready for the model.
# Arguments
dataset: tf.data.Dataset.
# Returns
An instance of tf.data.Dataset. The transformed dataset.
"""
x = dataset.map(lambda x, y: x)
y = dataset.map(lambda x, y: y)
x = self.transform_x(x)
y = self.transform_y(y)
return tf.data.Dataset.zip((x, y))
def transform_x(self, dataset):
"""Transform the input dataset for the model.
# Arguments
dataset: tf.data.Dataset. The input dataset for the model.
# Returns
An instance of tf.data.Dataset. The transformed dataset.
"""
return self._transform_data(dataset, self.inputs)
def transform_y(self, dataset):
"""Transform the target dataset for the model.
# Arguments
dataset: tf.data.Dataset. The target dataset for the model.
# Returns
An instance of tf.data.Dataset. The transformed dataset.
"""
return self._transform_data(dataset, self.outputs)
def _transform_data(self, dataset, pps_lists):
sources = data_utils.unzip_dataset(dataset)
transformed = []
for pps_list, data in zip(pps_lists, sources):
for preprocessor in pps_list:
data = preprocessor.transform(data)
transformed.append(data)
if len(transformed) == 1:
return transformed[0]
return tf.data.Dataset.zip(tuple(transformed))
def save(self, filepath):
io_utils.save_json(filepath, self.get_config())
def get_config(self):
return {
"inputs": [
[
preprocessors_module.serialize(preprocessor)
for preprocessor in preprocessors
]
for preprocessors in self.inputs
],
"outputs": [
[
preprocessors_module.serialize(preprocessor)
for preprocessor in preprocessors
]
for preprocessors in self.outputs
],
}
@classmethod
def from_config(cls, config):
return cls(
inputs=[
[
preprocessors_module.deserialize(preprocessor)
for preprocessor in preprocessors
]
for preprocessors in config["inputs"]
],
outputs=[
[
preprocessors_module.deserialize(preprocessor)
for preprocessor in preprocessors
]
for preprocessors in config["outputs"]
],
)
def postprocess(self, y):
"""Postprocess the outputs of the model.
# Arguments
y: numpy.ndarray or a list of numpy.ndarrays. The output of the
Keras model.
# Returns
A list or an instance of numpy.ndarray. The postprocessed data for
the heads.
"""
outputs = []
for data, preprocessors in zip(nest.flatten(y), self.outputs):
for preprocessor in preprocessors[::-1]:
if isinstance(preprocessor, pps_module.TargetPreprocessor):
data = preprocessor.postprocess(data)
outputs.append(data)
if len(outputs) == 1:
return outputs[0]
return outputs
| autokeras/autokeras/pipeline.py/0 | {
"file_path": "autokeras/autokeras/pipeline.py",
"repo_id": "autokeras",
"token_count": 3266
} | 3 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
import keras_tuner
import numpy as np
from autokeras.engine import tuner as tuner_module
class TrieNode(object):
def __init__(self):
super().__init__()
self.num_leaves = 0
self.children = {}
self.hp_name = None
def is_leaf(self):
return len(self.children) == 0
class Trie(object):
def __init__(self):
super().__init__()
self.root = TrieNode()
def insert(self, hp_name):
names = hp_name.split("/")
new_word = False
current_node = self.root
nodes_on_path = [current_node]
for name in names:
if name not in current_node.children:
current_node.children[name] = TrieNode()
new_word = True
current_node = current_node.children[name]
nodes_on_path.append(current_node)
current_node.hp_name = hp_name
if new_word:
for node in nodes_on_path:
node.num_leaves += 1
@property
def nodes(self):
return self._get_all_nodes(self.root)
def _get_all_nodes(self, node):
ret = [node]
for key, value in node.children.items():
ret += self._get_all_nodes(value)
return ret
def get_hp_names(self, node):
if node.is_leaf():
return [node.hp_name]
ret = []
for key, value in node.children.items():
ret += self.get_hp_names(value)
return ret
class GreedyOracle(keras_tuner.Oracle):
"""An oracle combining random search and greedy algorithm.
It groups the HyperParameters into several categories, namely, HyperGraph,
Preprocessor, Architecture, and Optimization. The oracle tunes each group
separately using random search. In each trial, it use a greedy strategy to
generate new values for one of the categories of HyperParameters and use the
best trial so far for the rest of the HyperParameters values.
# Arguments
initial_hps: A list of dictionaries in the form of
{HyperParameter name (String): HyperParameter value}.
Each dictionary is one set of HyperParameters, which are used as the
initial trials for the search. Defaults to None.
seed: Int. Random seed.
"""
def __init__(self, initial_hps=None, seed=None, **kwargs):
super().__init__(seed=seed, **kwargs)
self.initial_hps = copy.deepcopy(initial_hps) or []
self._tried_initial_hps = [False] * len(self.initial_hps)
def get_state(self):
state = super().get_state()
state.update(
{
"initial_hps": self.initial_hps,
"tried_initial_hps": self._tried_initial_hps,
}
)
return state
def set_state(self, state):
super().set_state(state)
self.initial_hps = state["initial_hps"]
self._tried_initial_hps = state["tried_initial_hps"]
def _select_hps(self):
trie = Trie()
best_hps = self._get_best_hps()
for hp in best_hps.space:
# Not picking the fixed hps for generating new values.
if best_hps.is_active(hp) and not isinstance(
hp, keras_tuner.engine.hyperparameters.Fixed
):
trie.insert(hp.name)
all_nodes = trie.nodes
if len(all_nodes) <= 1:
return []
probabilities = np.array([1 / node.num_leaves for node in all_nodes])
sum_p = np.sum(probabilities)
probabilities = probabilities / sum_p
node = np.random.choice(all_nodes, p=probabilities)
return trie.get_hp_names(node)
def _next_initial_hps(self):
for index, hps in enumerate(self.initial_hps):
if not self._tried_initial_hps[index]:
self._tried_initial_hps[index] = True
return hps
def populate_space(self, trial_id):
if not all(self._tried_initial_hps):
values = self._next_initial_hps()
return {
"status": keras_tuner.engine.trial.TrialStatus.RUNNING,
"values": values,
}
for _ in range(self._max_collisions):
hp_names = self._select_hps()
values = self._generate_hp_values(hp_names)
# Reached max collisions.
if values is None:
continue
# Values found.
return {
"status": keras_tuner.engine.trial.TrialStatus.RUNNING,
"values": values,
}
# All stages reached max collisions.
return {
"status": keras_tuner.engine.trial.TrialStatus.STOPPED,
"values": None,
}
def _get_best_hps(self):
best_trials = self.get_best_trials()
if best_trials:
return best_trials[0].hyperparameters.copy()
else:
return self.hyperparameters.copy()
def _generate_hp_values(self, hp_names):
best_hps = self._get_best_hps()
collisions = 0
while True:
hps = keras_tuner.HyperParameters()
# Generate a set of random values.
for hp in self.hyperparameters.space:
hps.merge([hp])
# if not active, do nothing.
# if active, check if selected to be changed.
if hps.is_active(hp):
# if was active and not selected, do nothing.
if best_hps.is_active(hp.name) and hp.name not in hp_names:
hps.values[hp.name] = best_hps.values[hp.name]
continue
# if was not active or selected, sample.
hps.values[hp.name] = hp.random_sample(self._seed_state)
self._seed_state += 1
values = hps.values
# Keep trying until the set of values is unique,
# or until we exit due to too many collisions.
values_hash = self._compute_values_hash(values)
if values_hash in self._tried_so_far:
collisions += 1
if collisions <= self._max_collisions:
continue
return None
self._tried_so_far.add(values_hash)
break
return values
class Greedy(tuner_module.AutoTuner):
def __init__(
self,
hypermodel: keras_tuner.HyperModel,
objective: str = "val_loss",
max_trials: int = 10,
initial_hps: Optional[List[Dict[str, Any]]] = None,
seed: Optional[int] = None,
hyperparameters: Optional[keras_tuner.HyperParameters] = None,
tune_new_entries: bool = True,
allow_new_entries: bool = True,
**kwargs
):
self.seed = seed
oracle = GreedyOracle(
objective=objective,
max_trials=max_trials,
initial_hps=initial_hps,
seed=seed,
hyperparameters=hyperparameters,
tune_new_entries=tune_new_entries,
allow_new_entries=allow_new_entries,
)
super().__init__(oracle=oracle, hypermodel=hypermodel, **kwargs)
| autokeras/autokeras/tuners/greedy.py/0 | {
"file_path": "autokeras/autokeras/tuners/greedy.py",
"repo_id": "autokeras",
"token_count": 3643
} | 4 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import warnings
import keras_tuner
import tensorflow as tf
from packaging.version import parse
from tensorflow import nest
def validate_num_inputs(inputs, num):
inputs = nest.flatten(inputs)
if not len(inputs) == num:
raise ValueError(
"Expected {num} elements in the inputs list "
"but received {len} inputs.".format(num=num, len=len(inputs))
)
def to_snake_case(name):
intermediate = re.sub("(.)([A-Z][a-z0-9]+)", r"\1_\2", name)
insecure = re.sub("([a-z])([A-Z])", r"\1_\2", intermediate).lower()
return insecure
def check_tf_version() -> None:
if parse(tf.__version__) < parse("2.7.0"):
warnings.warn(
"The Tensorflow package version needs to be at least 2.7.0 \n"
"for AutoKeras to run. Currently, your TensorFlow version is \n"
f"{tf.__version__}. Please upgrade with \n"
"`$ pip install --upgrade tensorflow`. \n"
"You can use `pip freeze` to check afterwards "
"that everything is ok.",
ImportWarning,
)
def check_kt_version() -> None:
if parse(keras_tuner.__version__) < parse("1.1.0"):
warnings.warn(
"The Keras Tuner package version needs to be at least 1.1.0 \n"
"for AutoKeras to run. Currently, your Keras Tuner version is \n"
f"{keras_tuner.__version__}. Please upgrade with \n"
"`$ pip install --upgrade keras-tuner`. \n"
"You can use `pip freeze` to check afterwards "
"that everything is ok.",
ImportWarning,
)
def contain_instance(instance_list, instance_type):
return any(
[isinstance(instance, instance_type) for instance in instance_list]
)
def evaluate_with_adaptive_batch_size(
model, batch_size, verbose=1, **fit_kwargs
):
return run_with_adaptive_batch_size(
batch_size,
lambda x, validation_data, **kwargs: model.evaluate(
x, verbose=verbose, **kwargs
),
**fit_kwargs,
)
def predict_with_adaptive_batch_size(
model, batch_size, verbose=1, **fit_kwargs
):
return run_with_adaptive_batch_size(
batch_size,
lambda x, validation_data, **kwargs: model.predict(
x, verbose=verbose, **kwargs
),
**fit_kwargs,
)
def fit_with_adaptive_batch_size(model, batch_size, **fit_kwargs):
history = run_with_adaptive_batch_size(
batch_size, lambda **kwargs: model.fit(**kwargs), **fit_kwargs
)
return model, history
def run_with_adaptive_batch_size(batch_size, func, **fit_kwargs):
x = fit_kwargs.pop("x")
validation_data = None
if "validation_data" in fit_kwargs:
validation_data = fit_kwargs.pop("validation_data")
while batch_size > 0:
try:
history = func(x=x, validation_data=validation_data, **fit_kwargs)
break
except tf.errors.ResourceExhaustedError as e:
if batch_size == 1:
raise e
batch_size //= 2
print(
"Not enough memory, reduce batch size to {batch_size}.".format(
batch_size=batch_size
)
)
x = x.unbatch().batch(batch_size)
if validation_data is not None:
validation_data = validation_data.unbatch().batch(batch_size)
return history
def get_hyperparameter(value, hp, dtype):
if value is None:
return hp
return value
def add_to_hp(hp, hps, name=None):
"""Add the HyperParameter (self) to the HyperParameters.
# Arguments
hp: keras_tuner.HyperParameters.
name: String. If left unspecified, the hp name is used.
"""
if not isinstance(hp, keras_tuner.engine.hyperparameters.HyperParameter):
return hp
kwargs = hp.get_config()
if name is None:
name = hp.name
kwargs.pop("conditions")
kwargs.pop("name")
class_name = hp.__class__.__name__
func = getattr(hps, class_name)
return func(name=name, **kwargs)
def serialize_keras_object(obj):
if hasattr(tf.keras.utils, "legacy"):
return tf.keras.utils.legacy.serialize_keras_object(
obj
) # pragma: no cover
else:
return tf.keras.utils.serialize_keras_object(obj) # pragma: no cover
def deserialize_keras_object(
config, module_objects=None, custom_objects=None, printable_module_name=None
):
if hasattr(tf.keras.utils, "legacy"):
return (
tf.keras.utils.legacy.deserialize_keras_object( # pragma: no cover
config, custom_objects, module_objects, printable_module_name
)
)
else:
return tf.keras.utils.deserialize_keras_object( # pragma: no cover
config, custom_objects, module_objects, printable_module_name
)
| autokeras/autokeras/utils/utils.py/0 | {
"file_path": "autokeras/autokeras/utils/utils.py",
"repo_id": "autokeras",
"token_count": 2354
} | 5 |
FROM python:3.7
RUN pip install flake8 black isort
WORKDIR /autokeras
CMD ["python", "docker/pre_commit.py"]
| autokeras/docker/pre-commit.Dockerfile/0 | {
"file_path": "autokeras/docker/pre-commit.Dockerfile",
"repo_id": "autokeras",
"token_count": 43
} | 6 |
import pathlib
import shutil
from inspect import getdoc
from inspect import isclass
from typing import Dict
from typing import List
from typing import Union
from typing import get_type_hints
from . import utils
from .docstring import process_docstring
from .examples import copy_examples
from .get_signatures import get_signature
class DocumentationGenerator:
"""Generates the documentation.
# Arguments
pages: A dictionary. The keys are the files' paths, the values
are lists of strings, functions /classes / methods names
with dotted access to the object. For example,
`pages = {'my_file.md': ['keras.layers.Dense']}` is valid.
project_url: The url pointing to the module directory of your project on
GitHub. This will be used to make a `[Sources]` link.
template_dir: Where to put the markdown files which will be copied and
filled in the destination directory. You should put files like
`index.md` inside. If you want a markdown file to be filled with
the docstring of a function, use the `{{autogenerated}}` tag inside,
and then add the markdown file to the `pages` dictionary.
example_dir: Where you store examples in your project. Usually
standalone files with a markdown docstring at the top. Will be
inserted in the docs.
extra_aliases: When displaying type hints, it's possible that the full
dotted path is displayed instead of alias. The aliases present in
`pages` are used, but it may happen if you're using a third-party
library. For example `tensorflow.python.ops.variables.Variable` is
displayed instead of `tensorflow.Variable`. Here you have two
solutions, either you provide the import keras-autodoc should
follow:
`extra_aliases=["tensorflow.Variable"]`, either you provide a
mapping to use
`extra_aliases={"tensorflow.python.ops.variables.Variable":
"tf.Variable"}`. The second option should be used if you want more
control and that you don't want to respect the alias corresponding
to the import (you can't do `import tf.Variable`). When giving a
list, keras-autodoc will try to import the object from the string to
understand what object you want to replace.
max_signature_line_length: When displaying class and function
signatures, keras-autodoc formats them using Black. This parameter
controls the maximum line length of these signatures, and is passed
directly through to Black.
titles_size: `"#"` signs to put before a title in the generated
markdown.
"""
def __init__(
self,
pages: Dict[str, list] = {},
project_url: Union[str, Dict[str, str]] = None,
template_dir=None,
examples_dir=None,
extra_aliases: Union[List[str], Dict[str, str]] = None,
max_signature_line_length: int = 110,
titles_size="###",
):
self.pages = pages
self.project_url = project_url
self.template_dir = template_dir
self.examples_dir = examples_dir
self.class_aliases = {}
self._fill_aliases(extra_aliases)
self.max_signature_line_length = max_signature_line_length
self.titles_size = titles_size
def generate(self, dest_dir):
"""Generate the docs.
# Arguments
dest_dir: Where to put the resulting markdown files.
"""
dest_dir = pathlib.Path(dest_dir)
print("Cleaning up existing sources directory.")
if dest_dir.exists():
shutil.rmtree(dest_dir)
print("Populating sources directory with templates.")
if self.template_dir:
shutil.copytree(self.template_dir, dest_dir)
for file_path, elements in self.pages.items():
markdown_text = ""
for element in elements:
markdown_text += self._render(element)
utils.insert_in_file(markdown_text, dest_dir / file_path)
if self.examples_dir is not None:
copy_examples(self.examples_dir, dest_dir / "examples")
def process_docstring(self, docstring, types: dict = None):
"""Can be overridden."""
processsed = process_docstring(docstring, types, self.class_aliases)
return processsed
def process_signature(self, signature):
"""Can be overridden."""
return signature
def _render(self, element):
if isinstance(element, str):
object_ = utils.import_object(element)
if utils.ismethod(object_):
# we remove the modules when displaying the methods
signature_override = ".".join(element.split(".")[-2:])
else:
signature_override = element
else:
signature_override = None
object_ = element
return self._render_from_object(object_, signature_override)
def _render_from_object(self, object_, signature_override: str):
subblocks = []
if self.project_url is not None:
subblocks.append(utils.make_source_link(object_, self.project_url))
signature = get_signature(
object_, signature_override, self.max_signature_line_length
)
signature = self.process_signature(signature)
subblocks.append(f"{self.titles_size} {object_.__name__}\n")
subblocks.append(utils.code_snippet(signature))
docstring = getdoc(object_)
if docstring:
if isclass(object_):
type_hints = get_type_hints(object_.__init__)
else:
type_hints = get_type_hints(object_)
docstring = self.process_docstring(docstring, type_hints)
subblocks.append(docstring)
return "\n\n".join(subblocks) + "\n\n----\n\n"
def _fill_aliases(self, extra_aliases):
for list_elements in self.pages.values():
for element_as_str in list_elements:
element = utils.import_object(element_as_str)
if not isclass(element):
continue
true_dotted_path = utils.get_dotted_path(element)
self.class_aliases[true_dotted_path] = element_as_str
if isinstance(extra_aliases, dict):
self.class_aliases.update(extra_aliases)
elif isinstance(extra_aliases, list):
for alias in extra_aliases:
full_dotted_path = utils.get_dotted_path(
utils.import_object(alias)
)
self.class_aliases[full_dotted_path] = alias
| autokeras/docs/keras_autodoc/autogen.py/0 | {
"file_path": "autokeras/docs/keras_autodoc/autogen.py",
"repo_id": "autokeras",
"token_count": 2855
} | 7 |
"""shell
pip install autokeras
"""
import os
import numpy as np
import tensorflow as tf
from sklearn.datasets import load_files
import autokeras as ak
"""
To make this tutorial easy to follow, we just treat IMDB dataset as a
regression dataset. It means we will treat prediction targets of IMDB dataset,
which are 0s and 1s as numerical values, so that they can be directly used as
the regression targets.
## A Simple Example
The first step is to prepare your data. Here we use the [IMDB
dataset](https://keras.io/datasets/#imdb-movie-reviews-sentiment-classification)
as an example.
"""
dataset = tf.keras.utils.get_file(
fname="aclImdb.tar.gz",
origin="http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz",
extract=True,
)
# set path to dataset
IMDB_DATADIR = os.path.join(os.path.dirname(dataset), "aclImdb")
classes = ["pos", "neg"]
train_data = load_files(
os.path.join(IMDB_DATADIR, "train"), shuffle=True, categories=classes
)
test_data = load_files(
os.path.join(IMDB_DATADIR, "test"), shuffle=False, categories=classes
)
x_train = np.array(train_data.data)
y_train = np.array(train_data.target)
x_test = np.array(test_data.data)
y_test = np.array(test_data.target)
print(x_train.shape) # (25000,)
print(y_train.shape) # (25000, 1)
print(x_train[0][:50]) # <START> this film was just brilliant casting <UNK>
"""
The second step is to run the [TextRegressor](/text_regressor). As a quick
demo, we set epochs to 2. You can also leave the epochs unspecified for an
adaptive number of epochs.
"""
# Initialize the text regressor.
reg = ak.TextRegressor(
overwrite=True, max_trials=10 # It tries 10 different models.
)
# Feed the text regressor with training data.
reg.fit(x_train, y_train, epochs=2)
# Predict with the best model.
predicted_y = reg.predict(x_test)
# Evaluate the best model with testing data.
print(reg.evaluate(x_test, y_test))
"""
## Validation Data
By default, AutoKeras use the last 20% of training data as validation data. As
shown in the example below, you can use `validation_split` to specify the
percentage.
"""
reg.fit(
x_train,
y_train,
# Split the training data and use the last 15% as validation data.
validation_split=0.15,
)
"""
You can also use your own validation set instead of splitting it from the
training data with `validation_data`.
"""
split = 5000
x_val = x_train[split:]
y_val = y_train[split:]
x_train = x_train[:split]
y_train = y_train[:split]
reg.fit(
x_train,
y_train,
epochs=2,
# Use your own validation set.
validation_data=(x_val, y_val),
)
"""
## Customized Search Space
For advanced users, you may customize your search space by using
[AutoModel](/auto_model/#automodel-class) instead of
[TextRegressor](/text_regressor). You can configure the
[TextBlock](/block/#textblock-class) for some high-level configurations, e.g.,
`vectorizer` for the type of text vectorization method to use. You can use
'sequence', which uses [TextToInteSequence](/block/#texttointsequence-class) to
convert the words to integers and use [Embedding](/block/#embedding-class) for
embedding the integer sequences, or you can use 'ngram', which uses
[TextToNgramVector](/block/#texttongramvector-class) to vectorize the
sentences. You can also do not specify these arguments, which would leave the
different choices to be tuned automatically. See the following example for
detail.
"""
input_node = ak.TextInput()
output_node = ak.TextBlock(block_type="ngram")(input_node)
output_node = ak.RegressionHead()(output_node)
reg = ak.AutoModel(
inputs=input_node, outputs=output_node, overwrite=True, max_trials=1
)
reg.fit(x_train, y_train, epochs=2)
"""
The usage of [AutoModel](/auto_model/#automodel-class) is similar to the
[functional API](https://www.tensorflow.org/guide/keras/functional) of Keras.
Basically, you are building a graph, whose edges are blocks and the nodes are
intermediate outputs of blocks. To add an edge from `input_node` to
`output_node` with `output_node = ak.[some_block]([block_args])(input_node)`.
You can even also use more fine grained blocks to customize the search space
even further. See the following example.
"""
input_node = ak.TextInput()
output_node = ak.TextToIntSequence()(input_node)
output_node = ak.Embedding()(output_node)
# Use separable Conv layers in Keras.
output_node = ak.ConvBlock(separable=True)(output_node)
output_node = ak.RegressionHead()(output_node)
reg = ak.AutoModel(
inputs=input_node, outputs=output_node, overwrite=True, max_trials=1
)
reg.fit(x_train, y_train, epochs=2)
"""
## Data Format
The AutoKeras TextRegressor is quite flexible for the data format.
For the text, the input data should be one-dimensional For the regression
targets, it should be a vector of numerical values. AutoKeras accepts
numpy.ndarray.
We also support using [tf.data.Dataset](
https://www.tensorflow.org/api_docs/python/tf/data/Dataset?version=stable)
format for the training data.
"""
train_set = tf.data.Dataset.from_tensor_slices(((x_train,), (y_train,))).batch(
32
)
test_set = tf.data.Dataset.from_tensor_slices(((x_test,), (y_test,))).batch(32)
reg = ak.TextRegressor(overwrite=True, max_trials=2)
# Feed the tensorflow Dataset to the regressor.
reg.fit(train_set, epochs=2)
# Predict with the best model.
predicted_y = reg.predict(test_set)
# Evaluate the best model with testing data.
print(reg.evaluate(test_set))
"""
## Reference
[TextRegressor](/text_regressor),
[AutoModel](/auto_model/#automodel-class),
[TextBlock](/block/#textblock-class),
[TextToInteSequence](/block/#texttointsequence-class),
[Embedding](/block/#embedding-class),
[TextToNgramVector](/block/#texttongramvector-class),
[ConvBlock](/block/#convblock-class),
[TextInput](/node/#textinput-class),
[RegressionHead](/block/#regressionhead-class).
"""
| autokeras/docs/py/text_regression.py/0 | {
"file_path": "autokeras/docs/py/text_regression.py",
"repo_id": "autokeras",
"token_count": 1992
} | 8 |
:root>* {
--md-primary-fg-color: #d00000;
--md-accent-fg-color: #d00000;
} | autokeras/docs/templates/stylesheets/extra.css/0 | {
"file_path": "autokeras/docs/templates/stylesheets/extra.css",
"repo_id": "autokeras",
"token_count": 38
} | 9 |
from distutils.core import setup
from pathlib import Path
from setuptools import find_packages
this_file = Path(__file__).resolve()
readme = this_file.parent / "README.md"
setup(
name="autokeras",
description="AutoML for deep learning",
package_data={"": ["README.md"]},
long_description=readme.read_text(encoding="utf-8"),
long_description_content_type="text/markdown",
author="DATA Lab, Keras Team",
author_email="[email protected]",
url="http://autokeras.com",
keywords=["AutoML", "Keras"],
install_requires=[
"packaging",
"tensorflow>=2.8.0",
"keras-tuner>=1.1.0",
"keras-nlp>=0.4.0",
"pandas",
],
extras_require={
"tests": [
"pytest>=4.4.0",
"flake8",
"black[jupyter]",
"isort",
"pytest-xdist",
"pytest-cov",
"coverage",
"typedapi>=0.2,<0.3",
"scikit-learn",
],
},
python_requires=">=3.8",
classifiers=[
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Topic :: Scientific/Engineering :: Mathematics",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Software Development :: Libraries",
],
license="Apache License 2.0",
packages=find_packages(exclude=("*test*",)),
)
| autokeras/setup.py/0 | {
"file_path": "autokeras/setup.py",
"repo_id": "autokeras",
"token_count": 771
} | 10 |
# keras-cv Single Stage Two-Dimensional Object Detection API
| Status | Proposed |
:-------------- |:---------------------------------------------------- |
| **Author(s)** | Zhenyu Tan ([email protected]), Francois Chollet ([email protected])|
| **Contributor(s)** | Pengchong Jin ([email protected])|
| **Updated** | 2020-09-28 |
## Objective
We aim at providing the core primitive components for training and serving single-stage two-dimensional object
detection models, such as Single-Shot MultiBox Detector (SSD), RetinaNet, and You-Only-Look-Once (YOLO).
Pretrained models will also be provides, similar to keras-applications.
## Key Benefits
Single-stage object detection models are a state-of-art technique that powers many computer vision tasks, they provide
faster detection compared to two-stage models (such as FasterRCNN), while maintaining comparable performance.
With this proposal, Keras users will be able to build end-to-end models with a simple API.
## Design overview
This proposal includes the specific core components for building single-stage object detection models. It does not, however, include:
1. Data augmentation, such as image and groundtruth box preprocessing
2. Model backbone, such as DarkNet, or functions to generate feature maps
3. Detection heads, such as Feature Pyramid
4. metrics utilities such as COCO Evaluator, or visualization utils.
Data augmentation will be included as a separate RFC that handles a
broader context than object detection.
Model backbone and detection heads are model-specific, we anticipate them to be analyzed and proposed in
`keras.applications` for heavily used patterns, however the user can build them easily using Keras.
#### Training
Case where a user want to train from scratch:
```python
import tensorflow as tf
import tensorflow_datasets as tfds
import keras_cv
# Considering a COCO dataset
coco_dataset = tfds.load('coco/2017')
train_ds, eval_ds = coco_dataset['train'], coco_dataset['validation']
def preprocess(features):
image, gt_boxes, gt_labels = features['image'], features['objects']['bbox'], features['objects']['label']
# preprocess image, gt_boxes, gt_labels, such as flip, resize, and padding, and reserve 0 for background label.
return image, gt_boxes, gt_labels
anchor_generator = keras_cv.ops.AnchorGenerator(anchor_sizes, scales, aspect_ratios, strides)
similarity_calculator = keras_cv.layers.IOUSimilarity()
box_matcher = keras_cv.ops.BoxMatcher(positive_threshold, negative_threshold)
target_gather = keras_cv.ops.TargetGather()
box_coder = keras_cv.ops.BoxCoder(offset='sigmoid')
def encode_label(image, gt_boxes, gt_labels):
anchor_boxes = anchor_generator(image_size)
iou = similarity_calculator(gt_boxes, anchor_boxes)
match_indices, match_indicators = box_matcher(iou)
mask = tf.less_equal(match_indicators, 0)
class_mask = tf.expand_dims(mask, -1)
box_mask = tf.tile(class_mask, [1, 4])
class_targets = target_gather(gt_labels, match_indices, class_mask, -1)
box_targets = target_gather(gt_boxes, match_indices, box_mask, 0.0)
box_targets = box_coder.encode(box_targets, anchor_boxes)
weights = tf.squeeze(tf.ones_like(gt_labels), axis=-1)
ignore_mask = tf.equal(match_indicators, -2)
class_weights = target_gather(weights, match_indices, ignore_mask, 0.0)
box_weights = target_gather(weights, match_indices, mask, 0.0)
return (image, {'classification': class_targets, 'regression': box_targets},
{'classification': class_weights, 'regression': box_weights})
class RetinaNet(tf.keras.Model):
# includes backbone and feature pyramid head.
def __init__(self):
# self.backbone = Model Backbone that returns dict of feature map
# self.fpn = Feature Pyramid Heads that
# self.head = classification and regression heads
def call(self, image, training=None):
feature_map = self.backbone(image, training)
feature_map = self.fpn(feature_map, training)
class_scores, boxes = self.head(feature_map, training)
return {'classification': class_scores, 'regression': boxes}
transformed_train_ds = train_ds.map(preprocess).map(encode_label).batch(128).shuffle(1024)
transformed_eval_ds = eval_ds.map(preprocess).map(encode_label).batch(128)
strategy = tf.distribute.TPUStrategy(...)
with strategy.scope():
optimizer = tf.keras.optimizers.SGD(lr_scheduler)
model = RetinaNet()
model.compile(optimizer=optimizer,
loss={'classification': keras_cv.losses.Focal(), 'regression': tf.keras.losses.Huber()},
metrics=[])
model.fit(transformed_train_ds, epochs=120, validation_data=transformed_eval_ds)
model.save(file_path)
```
#### Serving
Case where a user want to serve the trained model for a single image.
```python
loaded_model = tf.keras.models.load(file_path)
box_coder = keras_cv.ops.BoxCoder(offset='sigmoid')
anchor_generator = keras_cv.ops.AnchorGenerator()
anchor_boxes = anchor_generator(image_size)
detection_generator = keras_cv.layers.NMSDetectionDecoder()
@tf.function
def serving_fn(image):
batched_image = tf.expand_dims(image)
raw_boxes, scores = loaded_model(batched_image, training=False)
decoded_boxes = box_coder.decode(raw_boxes, anchor_boxes)
classes, scores, boxes, _ = detection_generator(scores, decoded_boxes)
return {'classes': classes, 'scores': scores, 'boxes': boxes}
```
## Detailed Design
For the rest of the design, we denote `B` as batch size, `N` as the number of ground truth boxes, and `M` as the number
of anchor boxes.
We propose 2 layers, 1 loss and 4 ops in this RFC.
#### Layers -- IouSimilarity
We propose IouSimilarity layer to support ragged tensor directly, however user can also pad ground truth
boxes or anchor boxes and pass a mask
```python
class IouSimilarity(tf.keras.layers.Layer):
"""Class to compute similarity based on Intersection over Union (IOU) metric."""
def __init__(self, mask_value):
"""Initializes IouSimilarity layer.
Args:
mask_value: A float mask value to fill where `mask` is True.
"""
def call(self, groundtruth_boxes, anchors, mask=None):
"""Compute pairwise IOU similarity between ground truth boxes and anchors.
Args:
groundtruth_boxes: A float Tensor [N], or [B, N] represent coordinates.
anchors: A float Tensor [M], or [B, M] represent coordinates.
mask: A boolean tensor with [N, M] or [B, N, M].
Returns:
A float tensor with shape [M, N] or [B, M, N] representing pairwise
iou scores, anchor per row and groundtruth_box per colulmn.
Input shape:
groundtruth_boxes: [N, 4], or [B, N, 4]
anchors: [M, 4], or [B, M, 4]
Output shape:
[M, N], or [B, M, N]
"""
```
#### Layers -- NMSDetectionDecoder
```python
class NMSDetectionDecoder(tf.keras.layers.Layer):
"""Generates detected boxes with scores and classes for one-stage detector."""
def __init__(self,
pre_nms_top_k=5000,
pre_nms_score_threshold=0.05,
nms_iou_threshold=0.5,
max_num_detections=100,
use_batched_nms=False,
**kwargs):
"""Initializes a detection generator.
Args:
pre_nms_top_k: int, the number of top scores proposals to be kept before
applying NMS.
pre_nms_score_threshold: float, the score threshold to apply before
applying NMS. Proposals whose scores are below this threshold are
thrown away.
nms_iou_threshold: float in [0, 1], the NMS IoU threshold.
max_num_detections: int, the final number of total detections to generate.
use_batched_nms: bool, whether or not use
`tf.image.combined_non_max_suppression`.
**kwargs: other key word arguments passed to Layer.
"""
def call(self, raw_boxes, raw_scores, anchor_boxes, image_shape):
"""Generate final detections.
Args:
raw_boxes: a single Tensor or dict with keys representing FPN levels and values
representing box tenors of shape
[batch, feature_h, feature_w, num_anchors * 4].
raw_scores: a single Tensor or dict with keys representing FPN levels and values
representing logit tensors of shape
[batch, feature_h, feature_w, num_anchors].
anchor_boxes: a tensor of shape of [batch_size, K, 4] representing the
corresponding anchor boxes w.r.t `box_outputs`.
image_shape: a tensor of shape of [batch_size, 2] storing the image height
and width w.r.t. the scaled image, i.e. the same image space as
`box_outputs` and `anchor_boxes`.
Returns:
`detection_boxes`: float Tensor of shape [B, max_num_detections, 4]
representing top detected boxes in [y1, x1, y2, x2].
`detection_scores`: float Tensor of shape [B, max_num_detections]
representing sorted confidence scores for detected boxes. The values
are between [0, 1].
`detection_classes`: int Tensor of shape [B, max_num_detections]
representing classes for detected boxes.
`num_detections`: int Tensor of shape [B] only the first
`num_detections` boxes are valid detections
"""
```
#### Losses -- Focal
```python
class FocalLoss(tf.keras.losses.Loss):
"""Implements a Focal loss for classification problems.
Reference:
[Focal Loss for Dense Object Detection](https://arxiv.org/abs/1708.02002).
"""
def __init__(self,
alpha=0.25,
gamma=2.0,
reduction=tf.keras.losses.Reduction.AUTO,
name=None):
"""Initializes `FocalLoss`.
Arguments:
alpha: The `alpha` weight factor for binary class imbalance.
gamma: The `gamma` focusing parameter to re-weight loss.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial](
https://www.tensorflow.org/tutorials/distribute/custom_training) for
more details.
name: Optional name for the op. Defaults to 'retinanet_class_loss'.
"""
def call(self, y_true, y_pred):
"""Invokes the `FocalLoss`.
Arguments:
y_true: A tensor of size [batch, num_anchors, num_classes]
y_pred: A tensor of size [batch, num_anchors, num_classes]
Returns:
Summed loss float `Tensor`.
"""
```
#### Ops -- AnchorGenerator
```python
class AnchorGenerator:
"""Utility to generate anchors for a multiple feature maps."""
def __init__(self,
anchor_sizes,
scales,
aspect_ratios,
strides,
clip_boxes=False):
"""Constructs multiscale anchors.
Args:
anchor_sizes: A list/dict of int represents the anchor size for each scale. The
anchor height will be `anchor_size / sqrt(aspect_ratio)`, anchor width
will be `anchor_size * sqrt(aspect_ratio)` for each scale.
scales: A list/tuple/dict, or a list/tuple/dict of a list/tuple of positive
floats representing the actual anchor size to the base `anchor_size`.
aspect_ratios: A list/tuple/dict, or a list/tuple/dict of a list/tuple of positive
floats representing the ratio of anchor width to anchor height.
strides: A list/tuple of ints represent the anchor stride size between
center of anchors at each scale.
clip_boxes: Boolean to represents whether the anchor coordinates should be
clipped to the image size. Defaults to `False`.
Input shape: the size of the image, `[H, W, C]`
Output shape: the size of anchors concat on each level, `[(H /
strides) * (W / strides), K * 4]`
"""
def __call__(self, image_size):
"""
Args:
image_size: a tuple of 2 for image_height and image_width.
Returns:
anchors: a dict or single Tensor.
"""
```
#### Ops -- BoxMatcher
```python
class BoxMatcher:
"""Matcher based on highest value.
This class computes matches from a similarity matrix. Each column is matched
to a single row.
To support object detection target assignment this class enables setting both
positive_threshold (upper threshold) and negative_threshold (lower thresholds)
defining three categories of similarity which define whether examples are
positive, negative, or ignored:
(1) similarity >= positive_threshold: Highest similarity. Matched/Positive!
(2) positive_threshold > similarity >= negative_threshold: Medium similarity.
This is Ignored.
(3) negative_threshold > similarity: Lowest similarity for Negative Match.
For ignored matches this class sets the values in the Match object to -2.
"""
def __init__(
self,
positive_threshold,
negative_threshold=None,
force_match_for_each_col=False,
positive_value=1,
negative_value=-1,
ignore_value=-2):
"""Construct BoxMatcher.
Args:
positive_threshold: Threshold for positive matches. Positive if
sim >= positive_threshold, where sim is the maximum value of the
similarity matrix for a given column. Set to None for no threshold.
negative_threshold: Threshold for negative matches. Negative if
sim < negative_threshold. Defaults to positive_threshold when set to None.
force_match_for_each_col: If True, ensures that each column is matched to
at least one row (which is not guaranteed otherwise if the
positive_threshold is high). Defaults to False.
positive_value: An integer to fill for positive match indicators.
negative_value: An integer to fill for negative match indicators.
ignore_value: An integer to fill for ignored match indicators.
Raises:
ValueError: If negative_threshold > positive_threshold.
"""
def __call__(self, similarity_matrix):
"""Tries to match each column of the similarity matrix to a row.
Args:
similarity_matrix: A float tensor of shape [N, M], or [Batch_size, N, M]
representing any similarity metric.
Returns:
matched_indices: A integer tensor of shape [N] with corresponding match indices for each
of M columns, the value represent the column index that argmax match in the matrix.
matched_indicators: A integer tensor of shape [N] or [B, N]. For positive match, the match
result will be the `positive_value`, for negative match, the match will be
`negative_value`, for ignored match, the match result will be
`ignore_value`.
"""
```
#### Ops -- TargetGather
```python
class TargetGather:
"""Labeler for dense object detector."""
def __init__(self):
"""Constructs Anchor Labeler."""
def __call__(self, labels, match_indices, mask, mask_val=0.0):
"""Labels anchors with ground truth inputs.
Args:
labels: An integer tensor with shape [N, dim], or [B, N, dim] representing
groundtruth classes.
match_indices: An integer tensor with shape [N] or [B, N] representing match
ground truth box index.
mask: An integer tensor with shape [N] representing match
labels, e.g., 1 for positive, -1 for negative, -2 for ignore.
mask_val: An python primitive to fill in places where mask is True.
Returns:
targets: A tensor with [M, dim] or [B, M, dim] selected from the `match_indices`.
"""
```
#### Ops -- BoxCoder
```python
class BoxCoder:
"""box coder for RetinaNet, FasterRcnn, SSD, and YOLO."""
def __init__(self, scale_factors=None):
"""Constructor for BoxCoder.
Args:
scale_factors: List of 4 positive scalars to scale ty, tx, th and tw. If
set to None, does not perform scaling. For Faster RCNN, the open-source
implementation recommends using [10.0, 10.0, 5.0, 5.0].
offset: The offset used to code the box coordinates, it can be 'sigmoid',
i.e., coded_coord = coord + sigmoid(tx) which
is used for RetinaNet, FasterRcnn, and SSD, or it can be 'linear',
i.e., encoded_coord = coord + width * tx which is used for YOLO.
"""
def encode(self, boxes, anchors):
"""Compute coded_coord from coord."""
def decode(self, boxes, anchors):
"""Compute coord from coded_coord."""
```
## Questions and Discussion Topics
* Whether `BoxMatcher` should take a list of thresholds (e.g., size 2) and a list of values (e.g., size 3).
* Gathering feedbacks on arguments & naming conventions.
* How to better generalize box coding, to differentiate RCNN-family encoding and YOLO-family encoding.
* Whether to have BoxCoder(inverse=False) and a single call method, or BoxCoder with `encode` and `decode` methods. | governance/rfcs/20200928-keras-cv-single-stage-2d-object-detection.md/0 | {
"file_path": "governance/rfcs/20200928-keras-cv-single-stage-2d-object-detection.md",
"repo_id": "governance",
"token_count": 6025
} | 11 |
sudo: required
dist: trusty
language: python
matrix:
include:
- python: 2.7
env: KERAS_BACKEND=tensorflow TEST_MODE=PEP8
- python: 2.7
env: KERAS_BACKEND=tensorflow
- python: 2.7
env: KERAS_BACKEND=tensorflow KERAS_HEAD=true
- python: 3.6
env: KERAS_BACKEND=tensorflow
- python: 2.7
env: KERAS_BACKEND=theano KERAS_HEAD=true THEANO_FLAGS=optimizer=fast_compile
- python: 3.6
env: KERAS_BACKEND=theano THEANO_FLAGS=optimizer=fast_compile
- python: 2.7
env: KERAS_BACKEND=cntk KERAS_HEAD=true PYTHONWARNINGS=ignore
- python: 3.6
env: KERAS_BACKEND=cntk PYTHONWARNINGS=ignore
install:
# code below is taken from http://conda.pydata.org/docs/travis.html
# We do this conditionally because it saves us some downloading if the
# version is the same.
- if [[ "$TRAVIS_PYTHON_VERSION" == "2.7" ]]; then
wget https://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh -O miniconda.sh;
else
wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh;
fi
- bash miniconda.sh -b -p $HOME/miniconda
- export PATH="$HOME/miniconda/bin:$PATH"
- hash -r
- conda config --set always_yes yes --set changeps1 no
- conda update -q conda
# Useful for debugging any issues with conda
- conda info -a
- conda create -q -n test-environment python=$TRAVIS_PYTHON_VERSION pytest pandas
- source activate test-environment
- pip install --only-binary=numpy,scipy numpy nose scipy matplotlib h5py theano keras==2.2.4
- conda install mkl mkl-service
# set library path
- export LD_LIBRARY_PATH=$HOME/miniconda/envs/test-environment/lib/:$LD_LIBRARY_PATH
# install PIL
- if [[ "$TRAVIS_PYTHON_VERSION" == "2.7" ]]; then
conda install pil;
elif [[ "$TRAVIS_PYTHON_VERSION" == "3.6" ]]; then
conda install Pillow;
fi
#- if [[ $KERAS_HEAD == "true" ]]; then
# pip install --no-deps git+https://github.com/keras-team/keras.git;
# fi
- pip install -e .[tests]
# install TensorFlow (CPU version).
- pip install tensorflow==1.9
# install cntk
- if [[ "$TRAVIS_PYTHON_VERSION" == "2.7" ]]; then
pip install https://cntk.ai/PythonWheel/CPU-Only/cntk-2.3.1-cp27-cp27mu-linux_x86_64.whl;
elif [[ "$TRAVIS_PYTHON_VERSION" == "3.6" ]]; then
pip install https://cntk.ai/PythonWheel/CPU-Only/cntk-2.3.1-cp36-cp36m-linux_x86_64.whl;
fi
# install pydot for visualization tests
- conda install pydot graphviz
# detect one of markdown files is changed or not
- export DOC_ONLY_CHANGED=False;
- if [ $(git diff --name-only HEAD~1 | wc -l) == "1" ] && [[ "$(git diff --name-only HEAD~1)" == *"md" ]]; then
export DOC_ONLY_CHANGED=True;
fi
#install open mpi
- rm -rf ~/mpi
- mkdir ~/mpi
- pushd ~/mpi
- wget http://cntk.ai/PythonWheel/ForKeras/depends/openmpi_1.10-3.zip
- unzip ./openmpi_1.10-3.zip
- sudo dpkg -i openmpi_1.10-3.deb
- popd
# command to run tests
script:
- export MKL_THREADING_LAYER="GNU"
# run keras backend init to initialize backend config
- python -c "import keras.backend"
# create models directory to avoid concurrent directory creation at runtime
- mkdir ~/.keras/models
# set up keras backend
- sed -i -e 's/"backend":[[:space:]]*"[^"]*/"backend":\ "'$KERAS_BACKEND'/g' ~/.keras/keras.json;
- echo -e "Running tests with the following config:\n$(cat ~/.keras/keras.json)"
- if [[ "$DOC_ONLY_CHANGED" == "False" ]]; then
if [[ "$TEST_MODE" == "PEP8" ]]; then
PYTHONPATH=$PWD:$PYTHONPATH py.test --pep8 -m pep8 -n0;
else
PYTHONPATH=$PWD:$PYTHONPATH py.test tests/ --cov-config .coveragerc --cov=keras_applications tests/;
fi;
fi
| keras-applications/.travis.yml/0 | {
"file_path": "keras-applications/.travis.yml",
"repo_id": "keras-applications",
"token_count": 1665
} | 12 |
"""ResNet models for Keras.
# Reference paper
- [Deep Residual Learning for Image Recognition]
(https://arxiv.org/abs/1512.03385) (CVPR 2016 Best Paper Award)
# Reference implementations
- [TensorNets]
(https://github.com/taehoonlee/tensornets/blob/master/tensornets/resnets.py)
- [Caffe ResNet]
(https://github.com/KaimingHe/deep-residual-networks/tree/master/prototxt)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from . import imagenet_utils
from .imagenet_utils import decode_predictions
from .resnet_common import ResNet50
from .resnet_common import ResNet101
from .resnet_common import ResNet152
def preprocess_input(x, **kwargs):
"""Preprocesses a numpy array encoding a batch of images.
# Arguments
x: a 4D numpy array consists of RGB values within [0, 255].
data_format: data format of the image tensor.
# Returns
Preprocessed array.
"""
return imagenet_utils.preprocess_input(x, mode='caffe', **kwargs)
| keras-applications/keras_applications/resnet.py/0 | {
"file_path": "keras-applications/keras_applications/resnet.py",
"repo_id": "keras-applications",
"token_count": 363
} | 13 |
# keras-contrib : Keras community contributions
Keras-contrib is deprecated. Use [TensorFlow Addons](https://github.com/tensorflow/addons).
## The future of Keras-contrib:
We're migrating to [tensorflow/addons](https://github.com/tensorflow/addons). See the announcement [here](https://github.com/keras-team/keras-contrib/issues/519).
[![Build Status](https://travis-ci.org/keras-team/keras-contrib.svg?branch=master)](https://travis-ci.org/keras-team/keras-contrib)
This library is the official extension repository for the python deep learning library [Keras](http://www.keras.io). It contains additional layers, activations, loss functions, optimizers, etc. which are not yet available within Keras itself. All of these additional modules can be used in conjunction with core Keras models and modules.
As the community contributions in Keras-Contrib are tested, used, validated, and their utility proven, they may be integrated into the Keras core repository. In the interest of keeping Keras succinct, clean, and powerfully simple, only the most useful contributions make it into Keras. This contribution repository is both the proving ground for new functionality, and the archive for functionality that (while useful) may not fit well into the Keras paradigm.
---
## Installation
#### Install keras_contrib for keras-team/keras
For instructions on how to install Keras,
see [the Keras installation page](https://keras.io/#installation).
```shell
git clone https://www.github.com/keras-team/keras-contrib.git
cd keras-contrib
python setup.py install
```
Alternatively, using pip:
```shell
sudo pip install git+https://www.github.com/keras-team/keras-contrib.git
```
to uninstall:
```pip
pip uninstall keras_contrib
```
#### Install keras_contrib for tensorflow.keras
```shell
git clone https://www.github.com/keras-team/keras-contrib.git
cd keras-contrib
python convert_to_tf_keras.py
USE_TF_KERAS=1 python setup.py install
```
to uninstall:
```shell
pip uninstall tf_keras_contrib
```
For contributor guidelines see [CONTRIBUTING.md](https://github.com/keras-team/keras-contrib/blob/master/CONTRIBUTING.md)
---
## Example Usage
Modules from the Keras-Contrib library are used in the same way as modules within Keras itself.
```python
from keras.models import Sequential
from keras.layers import Dense
import numpy as np
# I wish Keras had the Parametric Exponential Linear activation..
# Oh, wait..!
from keras_contrib.layers.advanced_activations import PELU
# Create the Keras model, including the PELU advanced activation
model = Sequential()
model.add(Dense(100, input_shape=(10,)))
model.add(PELU())
# Compile and fit on random data
model.compile(loss='mse', optimizer='adam')
model.fit(x=np.random.random((100, 10)), y=np.random.random((100, 100)), epochs=5, verbose=0)
# Save our model
model.save('example.h5')
```
### A Common "Gotcha"
As Keras-Contrib is external to the Keras core, loading a model requires a bit more work. While a pure Keras model is loadable with nothing more than an import of `keras.models.load_model`, a model which contains a contributed module requires an additional import of `keras_contrib`:
```python
# Required, as usual
from keras.models import load_model
# Recommended method; requires knowledge of the underlying architecture of the model
from keras_contrib.layers import PELU
from keras_contrib.layers import GroupNormalization
# Load our model
custom_objects = {'PELU': PELU, 'GroupNormalization': GroupNormalization}
model = load_model('example.h5', custom_objects)
```
| keras-contrib/README.md/0 | {
"file_path": "keras-contrib/README.md",
"repo_id": "keras-contrib",
"token_count": 1072
} | 14 |
{%- for toc_item in toc_item.children %}
<li class="toctree-l{{ navlevel}}"><a class="reference internal" href="{% if not nav_item == page %}{{ nav_item.url|url }}{% endif %}{{ toc_item.url }}">{{ toc_item.title }}</a>
{%- set navlevel = navlevel + 1 %}
{%- if navlevel <= config.theme.navigation_depth and toc_item.children %}
<ul>
{%- include 'toc.html' %}
</ul>
{%- endif %}
{%- set navlevel = navlevel - 1 %}
</li>
{%- endfor %}
| keras-contrib/contrib_docs/theme/toc.html/0 | {
"file_path": "keras-contrib/contrib_docs/theme/toc.html",
"repo_id": "keras-contrib",
"token_count": 215
} | 15 |
from .densenet import DenseNet
from .resnet import ResNet, ResNet18, ResNet34, ResNet50, ResNet101, ResNet152
from .wide_resnet import WideResidualNetwork
from .nasnet import NASNet, NASNetLarge, NASNetMobile
| keras-contrib/keras_contrib/applications/__init__.py/0 | {
"file_path": "keras-contrib/keras_contrib/applications/__init__.py",
"repo_id": "keras-contrib",
"token_count": 67
} | 16 |
from __future__ import absolute_import
from keras import backend as K
from keras.constraints import Constraint
class Clip(Constraint):
"""Clips weights to [-c, c].
# Arguments
c: Clipping parameter.
"""
def __init__(self, c=0.01):
self.c = c
def __call__(self, p):
return K.clip(p, -self.c, self.c)
def get_config(self):
return {'name': self.__class__.__name__,
'c': self.c}
| keras-contrib/keras_contrib/constraints/clip.py/0 | {
"file_path": "keras-contrib/keras_contrib/constraints/clip.py",
"repo_id": "keras-contrib",
"token_count": 202
} | 17 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from keras.layers import Layer
from keras_contrib import backend as KC
from keras_contrib.utils.conv_utils import normalize_data_format
class SubPixelUpscaling(Layer):
""" Sub-pixel convolutional upscaling layer.
This layer requires a Convolution2D prior to it,
having output filters computed according to
the formula :
filters = k * (scale_factor * scale_factor)
where k = a user defined number of filters (generally larger than 32)
scale_factor = the upscaling factor (generally 2)
This layer performs the depth to space operation on
the convolution filters, and returns a
tensor with the size as defined below.
# Example :
```python
# A standard subpixel upscaling block
x = Convolution2D(256, 3, 3, padding='same', activation='relu')(...)
u = SubPixelUpscaling(scale_factor=2)(x)
# Optional
x = Convolution2D(256, 3, 3, padding='same', activation='relu')(u)
```
In practice, it is useful to have a second convolution layer after the
SubPixelUpscaling layer to speed up the learning process.
However, if you are stacking multiple
SubPixelUpscaling blocks, it may increase
the number of parameters greatly, so the
Convolution layer after SubPixelUpscaling
layer can be removed.
# Arguments
scale_factor: Upscaling factor.
data_format: Can be None, 'channels_first' or 'channels_last'.
# Input shape
4D tensor with shape:
`(samples, k * (scale_factor * scale_factor) channels, rows, cols)`
if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, k * (scale_factor * scale_factor) channels)`
if data_format='channels_last'.
# Output shape
4D tensor with shape:
`(samples, k channels, rows * scale_factor, cols * scale_factor))`
if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows * scale_factor, cols * scale_factor, k channels)`
if data_format='channels_last'.
# References
- [Real-Time Single Image and Video Super-Resolution Using an
Efficient Sub-Pixel Convolutional Neural Network](
https://arxiv.org/abs/1609.05158)
"""
def __init__(self, scale_factor=2, data_format=None, **kwargs):
super(SubPixelUpscaling, self).__init__(**kwargs)
self.scale_factor = scale_factor
self.data_format = normalize_data_format(data_format)
def build(self, input_shape):
pass
def call(self, x, mask=None):
y = KC.depth_to_space(x, self.scale_factor, self.data_format)
return y
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_first':
b, k, r, c = input_shape
new_k = k // (self.scale_factor ** 2)
new_r = r * self.scale_factor
new_c = c * self.scale_factor
return b, new_k, new_r, new_c
else:
b, r, c, k = input_shape
new_r = r * self.scale_factor
new_c = c * self.scale_factor
new_k = k // (self.scale_factor ** 2)
return b, new_r, new_c, new_k
def get_config(self):
config = {'scale_factor': self.scale_factor,
'data_format': self.data_format}
base_config = super(SubPixelUpscaling, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| keras-contrib/keras_contrib/layers/convolutional/subpixelupscaling.py/0 | {
"file_path": "keras-contrib/keras_contrib/layers/convolutional/subpixelupscaling.py",
"repo_id": "keras-contrib",
"token_count": 1456
} | 18 |
from keras import backend as K
from keras.optimizers import Optimizer
class Yogi(Optimizer):
"""Yogi optimizer.
Yogi is a variation of Adam that controls the increase in effective
learning rate, which (according to the paper) leads to even better
performance than Adam with similar theoretical guarantees on convergence.
Default parameters follow those provided in the original paper, Tab.1
# Arguments
lr: float >= 0. Learning rate.
beta_1: float, 0 < beta < 1. Generally close to 1.
beta_2: float, 0 < beta < 1. Generally close to 1.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
decay: float >= 0. Learning rate decay over each update.
# References
- [Adaptive Methods for Nonconvex Optimization](
https://papers.nips.cc/paper/8186-adaptive-methods-for-nonconvex-optimization)
If you open an issue or a pull request about the Yogi optimizer,
please add 'cc @MarcoAndreaBuchmann' to notify him.
"""
def __init__(self, lr=0.01, beta_1=0.9, beta_2=0.999,
epsilon=1e-3, decay=0., **kwargs):
super(Yogi, self).__init__(**kwargs)
if beta_1 <= 0 or beta_1 >= 1:
raise ValueError("beta_1 has to be in ]0, 1[")
if beta_2 <= 0 or beta_2 >= 1:
raise ValueError("beta_2 has to be in ]0, 1[")
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.lr = K.variable(lr, name='lr')
self.beta_1 = K.variable(beta_1, name='beta_1')
self.beta_2 = K.variable(beta_2, name='beta_2')
self.decay = K.variable(decay, name='decay')
if epsilon is None:
epsilon = K.epsilon()
if epsilon <= 0:
raise ValueError("epsilon has to be larger than 0")
self.epsilon = epsilon
self.initial_decay = decay
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * (1. / (1. + self.decay * K.cast(self.iterations,
K.dtype(self.decay))))
t = K.cast(self.iterations, K.floatx()) + 1
lr_t = lr * (K.sqrt(1. - K.pow(self.beta_2, t)) /
(1. - K.pow(self.beta_1, t)))
ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
vhats = [K.zeros(1) for _ in params]
self.weights = [self.iterations] + ms + vs + vhats
for p, g, m, v, vhat in zip(params, grads, ms, vs, vhats):
g2 = K.square(g)
m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
v_t = v - (1. - self.beta_2) * K.sign(v - g2) * g2
p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon)
self.updates.append(K.update(m, m_t))
self.updates.append(K.update(v, v_t))
new_p = p_t
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(K.update(p, new_p))
return self.updates
def get_config(self):
config = {'lr': float(K.get_value(self.lr)),
'beta_1': float(K.get_value(self.beta_1)),
'beta_2': float(K.get_value(self.beta_2)),
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon}
base_config = super(Yogi, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| keras-contrib/keras_contrib/optimizers/yogi.py/0 | {
"file_path": "keras-contrib/keras_contrib/optimizers/yogi.py",
"repo_id": "keras-contrib",
"token_count": 1858
} | 19 |
import numpy as np
import pytest
from keras import backend as K
from keras.layers import Input
from keras.models import Sequential, Model
from numpy.testing import assert_allclose
from keras_contrib.layers import InstanceNormalization
from keras_contrib.utils.test_utils import layer_test
input_1 = np.arange(10)
input_2 = np.zeros(10)
input_3 = np.ones(10)
input_shapes = [np.ones((10, 10)), np.ones((10, 10, 10))]
def basic_instancenorm_test():
from keras import regularizers
layer_test(InstanceNormalization,
kwargs={'epsilon': 0.1,
'gamma_regularizer': regularizers.l2(0.01),
'beta_regularizer': regularizers.l2(0.01)},
input_shape=(3, 4, 2))
layer_test(InstanceNormalization,
kwargs={'gamma_initializer': 'ones',
'beta_initializer': 'ones'},
input_shape=(3, 4, 2))
layer_test(InstanceNormalization,
kwargs={'scale': False, 'center': False},
input_shape=(3, 3))
@pytest.mark.parametrize('input_shape,axis', [((10, 1), -1),
((10,), None)])
def test_instancenorm_correctness_rank2(input_shape, axis):
model = Sequential()
norm = InstanceNormalization(input_shape=input_shape, axis=axis)
model.add(norm)
model.compile(loss='mse', optimizer='sgd')
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000,) + input_shape)
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= K.eval(norm.beta)
out /= K.eval(norm.gamma)
assert_allclose(out.mean(), 0.0, atol=1e-1)
assert_allclose(out.std(), 1.0, atol=1e-1)
def test_instancenorm_training_argument():
bn1 = InstanceNormalization(input_shape=(10,))
x1 = Input(shape=(10,))
y1 = bn1(x1, training=True)
model1 = Model(x1, y1)
np.random.seed(123)
x = np.random.normal(loc=5.0, scale=10.0, size=(20, 10))
output_a = model1.predict(x)
model1.compile(loss='mse', optimizer='rmsprop')
model1.fit(x, x, epochs=1, verbose=0)
output_b = model1.predict(x)
assert np.abs(np.sum(output_a - output_b)) > 0.1
assert_allclose(output_b.mean(), 0.0, atol=1e-1)
assert_allclose(output_b.std(), 1.0, atol=1e-1)
bn2 = InstanceNormalization(input_shape=(10,))
x2 = Input(shape=(10,))
bn2(x2, training=False)
def test_instancenorm_convnet():
model = Sequential()
norm = InstanceNormalization(axis=1, input_shape=(3, 4, 4))
model.add(norm)
model.compile(loss='mse', optimizer='sgd')
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 3, 4, 4))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= np.reshape(K.eval(norm.beta), (1, 3, 1, 1))
out /= np.reshape(K.eval(norm.gamma), (1, 3, 1, 1))
assert_allclose(np.mean(out, axis=(0, 2, 3)), 0.0, atol=1e-1)
assert_allclose(np.std(out, axis=(0, 2, 3)), 1.0, atol=1e-1)
def test_shared_instancenorm():
'''Test that a IN layer can be shared
across different data streams.
'''
# Test single layer reuse
bn = InstanceNormalization(input_shape=(10,))
x1 = Input(shape=(10,))
bn(x1)
x2 = Input(shape=(10,))
y2 = bn(x2)
x = np.random.normal(loc=5.0, scale=10.0, size=(2, 10))
model = Model(x2, y2)
model.compile('sgd', 'mse')
model.train_on_batch(x, x)
# Test model-level reuse
x3 = Input(shape=(10,))
y3 = model(x3)
new_model = Model(x3, y3)
new_model.compile('sgd', 'mse')
new_model.train_on_batch(x, x)
def test_instancenorm_perinstancecorrectness():
model = Sequential()
norm = InstanceNormalization(input_shape=(10,))
model.add(norm)
model.compile(loss='mse', optimizer='sgd')
# bimodal distribution
z = np.random.normal(loc=5.0, scale=10.0, size=(2, 10))
y = np.random.normal(loc=-5.0, scale=17.0, size=(2, 10))
x = np.append(z, y)
x = np.reshape(x, (4, 10))
model.fit(x, x, epochs=4, batch_size=4, verbose=1)
out = model.predict(x)
out -= K.eval(norm.beta)
out /= K.eval(norm.gamma)
# verify that each instance in the batch is individually normalized
for i in range(4):
instance = out[i]
assert_allclose(instance.mean(), 0.0, atol=1e-1)
assert_allclose(instance.std(), 1.0, atol=1e-1)
# if each instance is normalized, so should the batch
assert_allclose(out.mean(), 0.0, atol=1e-1)
assert_allclose(out.std(), 1.0, atol=1e-1)
def test_instancenorm_perchannel_correctness():
# have each channel with a different average and std
x = np.random.normal(loc=5.0, scale=2.0, size=(10, 1, 4, 4))
y = np.random.normal(loc=10.0, scale=3.0, size=(10, 1, 4, 4))
z = np.random.normal(loc=-5.0, scale=5.0, size=(10, 1, 4, 4))
batch = np.append(x, y, axis=1)
batch = np.append(batch, z, axis=1)
# this model does not provide a normalization axis
model = Sequential()
norm = InstanceNormalization(axis=None,
input_shape=(3, 4, 4),
center=False,
scale=False)
model.add(norm)
model.compile(loss='mse', optimizer='sgd')
model.fit(batch, batch, epochs=4, verbose=0)
out = model.predict(batch)
# values will not be normalized per-channel
for instance in range(10):
for channel in range(3):
activations = out[instance, channel]
assert abs(activations.mean()) > 1e-2
assert abs(activations.std() - 1.0) > 1e-6
# but values are still normalized per-instance
activations = out[instance]
assert_allclose(activations.mean(), 0.0, atol=1e-1)
assert_allclose(activations.std(), 1.0, atol=1e-1)
# this model sets the channel as a normalization axis
model = Sequential()
norm = InstanceNormalization(axis=1,
input_shape=(3, 4, 4),
center=False,
scale=False)
model.add(norm)
model.compile(loss='mse', optimizer='sgd')
model.fit(batch, batch, epochs=4, verbose=0)
out = model.predict(batch)
# values are now normalized per-channel
for instance in range(10):
for channel in range(3):
activations = out[instance, channel]
assert_allclose(activations.mean(), 0.0, atol=1e-1)
assert_allclose(activations.std(), 1.0, atol=1e-1)
if __name__ == '__main__':
pytest.main([__file__])
| keras-contrib/tests/keras_contrib/layers/normalization/test_instancenormalization.py/0 | {
"file_path": "keras-contrib/tests/keras_contrib/layers/normalization/test_instancenormalization.py",
"repo_id": "keras-contrib",
"token_count": 3078
} | 20 |
from markdown import markdown
from docs import autogen
import pytest
test_doc1 = {
'doc': """Base class for recurrent layers.
# Arguments
cell: A RNN cell instance. A RNN cell is a class that has:
- a `call(input_at_t, states_at_t)` method, returning
`(output_at_t, states_at_t_plus_1)`. The call method of the
cell can also take the optional argument `constants`, see
section "Note on passing external constants" below.
- a `state_size` attribute. This can be a single integer
(single state) in which case it is
the size of the recurrent state
(which should be the same as the size of the cell output).
This can also be a list/tuple of integers
(one size per state). In this case, the first entry
(`state_size[0]`) should be the same as
the size of the cell output.
It is also possible for `cell` to be a list of RNN cell instances,
in which cases the cells get stacked on after the other in the RNN,
implementing an efficient stacked RNN.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
input_dim: dimensionality of the input (integer).
This argument (or alternatively,
the keyword argument `input_shape`)
is required when using this layer as the first layer in a model.
input_length: Length of input sequences, to be specified
when it is constant.
This argument is required if you are going to connect
`Flatten` then `Dense` layers upstream
(without it, the shape of the dense outputs cannot be computed).
Note that if the recurrent layer is not the first layer
in your model, you would need to specify the input length
at the level of the first layer
(e.g. via the `input_shape` argument)
# Input shape
3D tensor with shape `(batch_size, timesteps, input_dim)`.
# Output shape
- if `return_state`: a list of tensors. The first tensor is
the output. The remaining tensors are the last states,
each with shape `(batch_size, units)`.
- if `return_sequences`: 3D tensor with shape
`(batch_size, timesteps, units)`.
- else, 2D tensor with shape `(batch_size, units)`.
# Masking
This layer supports masking for input data with a variable number
of timesteps. To introduce masks to your data,
use an [Embedding](embeddings.md) layer with the `mask_zero` parameter
set to `True`.
# Note on using statefulness in RNNs
You can set RNN layers to be 'stateful', which means that the states
computed for the samples in one batch will be reused as initial states
for the samples in the next batch. This assumes a one-to-one mapping
between samples in different successive batches.
To enable statefulness:
- specify `stateful=True` in the layer constructor.
- specify a fixed batch size for your model, by passing
if sequential model:
`batch_input_shape=(...)` to the first layer in your model.
else for functional model with 1 or more Input layers:
`batch_shape=(...)` to all the first layers in your model.
This is the expected shape of your inputs
*including the batch size*.
It should be a tuple of integers, e.g. `(32, 10, 100)`.
- specify `shuffle=False` when calling fit().
To reset the states of your model, call `.reset_states()` on either
a specific layer, or on your entire model.
# Note on specifying the initial state of RNNs
Note: that
One: You can specify the initial state of RNN layers symbolically by
calling them with the keyword argument `initial_state`.
Two: The value of `initial_state` should be a tensor or list of
tensors representing
the initial state of the RNN layer.
You can specify the initial state of RNN layers numerically by:
One: calling `reset_states`
- With the keyword argument `states`.
- The value of
`states` should be a numpy array or
list of numpy arrays representing
the initial state of the RNN layer.
# Note on passing external constants to RNNs
You can pass "external" constants to the cell using the `constants`
keyword: argument of `RNN.__call__` (as well as `RNN.call`) method.
This: requires that the `cell.call` method accepts the same keyword argument
`constants`. Such constants can be used to condition the cell
transformation on additional static inputs (not changing over time),
a.k.a. an attention mechanism.
# Examples
```python
# First, let's define a RNN Cell, as a layer subclass.
class MinimalRNNCell(keras.layers.Layer):
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
super(MinimalRNNCell, self).__init__(**kwargs)
def build(self, input_shape):
self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = K.dot(inputs, self.kernel)
output = h + K.dot(prev_output, self.recurrent_kernel)
return output, [output]
# Let's use this cell in a RNN layer:
cell = MinimalRNNCell(32)
x = keras.Input((None, 5))
layer = RNN(cell)
y = layer(x)
# Here's how to use the cell to build a stacked RNN:
cells = [MinimalRNNCell(32), MinimalRNNCell(64)]
x = keras.Input((None, 5))
layer = RNN(cells)
y = layer(x)
```
""",
'result': '''Base class for recurrent layers.
__Arguments__
- __cell__: A RNN cell instance. A RNN cell is a class that has:
- a `call(input_at_t, states_at_t)` method, returning
`(output_at_t, states_at_t_plus_1)`. The call method of the
cell can also take the optional argument `constants`, see
section "Note on passing external constants" below.
- a `state_size` attribute. This can be a single integer
(single state) in which case it is
the size of the recurrent state
(which should be the same as the size of the cell output).
This can also be a list/tuple of integers
(one size per state). In this case, the first entry
(`state_size[0]`) should be the same as
the size of the cell output.
It is also possible for `cell` to be a list of RNN cell instances,
in which cases the cells get stacked on after the other in the RNN,
implementing an efficient stacked RNN.
- __return_sequences__: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
- __return_state__: Boolean. Whether to return the last state
in addition to the output.
- __go_backwards__: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
- __stateful__: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
- __unroll__: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
- __input_dim__: dimensionality of the input (integer).
This argument (or alternatively,
the keyword argument `input_shape`)
is required when using this layer as the first layer in a model.
- __input_length__: Length of input sequences, to be specified
when it is constant.
This argument is required if you are going to connect
`Flatten` then `Dense` layers upstream
(without it, the shape of the dense outputs cannot be computed).
Note that if the recurrent layer is not the first layer
in your model, you would need to specify the input length
at the level of the first layer
(e.g. via the `input_shape` argument)
__Input shape__
3D tensor with shape `(batch_size, timesteps, input_dim)`.
__Output shape__
- if `return_state`: a list of tensors. The first tensor is
the output. The remaining tensors are the last states,
each with shape `(batch_size, units)`.
- if `return_sequences`: 3D tensor with shape
`(batch_size, timesteps, units)`.
- else, 2D tensor with shape `(batch_size, units)`.
__Masking__
This layer supports masking for input data with a variable number
of timesteps. To introduce masks to your data,
use an [Embedding](embeddings.md) layer with the `mask_zero` parameter
set to `True`.
__Note on using statefulness in RNNs__
You can set RNN layers to be 'stateful', which means that the states
computed for the samples in one batch will be reused as initial states
for the samples in the next batch. This assumes a one-to-one mapping
between samples in different successive batches.
To enable statefulness:
- specify `stateful=True` in the layer constructor.
- specify a fixed batch size for your model, by passing
if sequential model:
`batch_input_shape=(...)` to the first layer in your model.
else for functional model with 1 or more Input layers:
`batch_shape=(...)` to all the first layers in your model.
This is the expected shape of your inputs
*including the batch size*.
It should be a tuple of integers, e.g. `(32, 10, 100)`.
- specify `shuffle=False` when calling fit().
To reset the states of your model, call `.reset_states()` on either
a specific layer, or on your entire model.
__Note on specifying the initial state of RNNs__
Note: that
- __One__: You can specify the initial state of RNN layers symbolically by
calling them with the keyword argument `initial_state`.
- __Two__: The value of `initial_state` should be a tensor or list of
tensors representing
the initial state of the RNN layer.
You can specify the initial state of RNN layers numerically by:
- __One__: calling `reset_states`
- With the keyword argument `states`.
- The value of
`states` should be a numpy array or
list of numpy arrays representing
the initial state of the RNN layer.
__Note on passing external constants to RNNs__
You can pass "external" constants to the cell using the `constants`
- __keyword__: argument of `RNN.__call__` (as well as `RNN.call`) method.
- __This__: requires that the `cell.call` method accepts the same keyword argument
`constants`. Such constants can be used to condition the cell
transformation on additional static inputs (not changing over time),
a.k.a. an attention mechanism.
__Examples__
```python
# First, let's define a RNN Cell, as a layer subclass.
class MinimalRNNCell(keras.layers.Layer):
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
super(MinimalRNNCell, self).__init__(**kwargs)
def build(self, input_shape):
self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = K.dot(inputs, self.kernel)
output = h + K.dot(prev_output, self.recurrent_kernel)
return output, [output]
# Let's use this cell in a RNN layer:
cell = MinimalRNNCell(32)
x = keras.Input((None, 5))
layer = RNN(cell)
y = layer(x)
# Here's how to use the cell to build a stacked RNN:
cells = [MinimalRNNCell(32), MinimalRNNCell(64)]
x = keras.Input((None, 5))
layer = RNN(cells)
y = layer(x)
```
'''}
def test_doc_lists():
docstring = autogen.process_docstring(test_doc1['doc'])
assert markdown(docstring) == markdown(test_doc1['result'])
dummy_docstring = """Multiplies 2 tensors (and/or variables) and returns a *tensor*.
When attempting to multiply a nD tensor
with a nD tensor, it reproduces the Theano behavior.
(e.g. `(2, 3) * (4, 3, 5) -> (2, 4, 5)`)
# Examples
```python
# Theano-like behavior example
>>> x = K.random_uniform_variable(shape=(2, 3), low=0, high=1)
>>> y = K.ones((4, 3, 5))
>>> xy = K.dot(x, y)
>>> K.int_shape(xy)
(2, 4, 5)
```
# Numpy implementation
```python
def dot(x, y):
return dot(x, y)
```
"""
def test_doc_multiple_sections_code():
""" Checks that we can have code blocks in multiple sections."""
generated = autogen.process_docstring(dummy_docstring)
assert '# Theano-like behavior example' in generated
assert 'def dot(x, y):' in generated
if __name__ == '__main__':
pytest.main([__file__])
| keras-contrib/tests/tooling/test_doc_auto_generation.py/0 | {
"file_path": "keras-contrib/tests/tooling/test_doc_auto_generation.py",
"repo_id": "keras-contrib",
"token_count": 5533
} | 21 |
# Keras Core is becoming Keras 3 and has moved to keras-team/keras
Multi-backend Keras has a new repo: [keras-team/keras](https://github.com/keras-team/keras).
Open any issues / PRs there. `keras-team/keras-core` is no longer in use.
**Keras Core** was the codename of the multi-backend Keras project throughout its initial development
(April 2023 - July 2023) and its public beta test (July 2023 - September 2023). Now, Keras Core
is gearing up to become Keras 3, to be released under the `keras` name. As such, we've moved development
back to [keras-team/keras](https://github.com/keras-team/keras).
Meanwhile, the legacy `tf.keras` codebase is now available at
[keras-team/tf-keras](https://github.com/keras-team/tf-keras).
If you were a contributor to Keras Core -- thank you! Your commits have been reapplied in
`keras-team/keras` under your own authorship, so that your contribution record is fully preserved.
Likewise, if you were a contributor to `keras-team/keras` before the swap, your contribution
history has been fully preserved: the new commits have been reapplied on top of the old git tree.
| keras-core/README.md/0 | {
"file_path": "keras-core/README.md",
"repo_id": "keras-core",
"token_count": 331
} | 22 |
import time
import keras_core
class BenchmarkMetricsCallback(keras_core.callbacks.Callback):
def __init__(self, start_batch=1, stop_batch=None):
self.start_batch = start_batch
self.stop_batch = stop_batch
# Store the throughput of each epoch.
self.state = {"throughput": []}
def on_train_batch_begin(self, batch, logs=None):
if batch == self.start_batch:
self.state["epoch_begin_time"] = time.time()
def on_train_batch_end(self, batch, logs=None):
if batch == self.stop_batch:
epoch_end_time = time.time()
throughput = (self.stop_batch - self.start_batch + 1) / (
epoch_end_time - self.state["epoch_begin_time"]
)
self.state["throughput"].append(throughput)
| keras-core/benchmarks/model_benchmark/benchmark_utils.py/0 | {
"file_path": "keras-core/benchmarks/model_benchmark/benchmark_utils.py",
"repo_id": "keras-core",
"token_count": 348
} | 23 |
import numpy as np
import keras_core
from keras_core import layers
from keras_core.utils import to_categorical
# Model / data parameters
num_classes = 10
input_shape = (28, 28, 1)
# Load the data and split it between train and test sets
(x_train, y_train), (x_test, y_test) = keras_core.datasets.mnist.load_data()
# Scale images to the [0, 1] range
x_train = x_train.astype("float32") / 255
x_test = x_test.astype("float32") / 255
# Make sure images have shape (28, 28, 1)
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
print("x_train shape:", x_train.shape)
print(x_train.shape[0], "train samples")
print(x_test.shape[0], "test samples")
# convert class vectors to binary class matrices
y_train = to_categorical(y_train, num_classes)
y_test = to_categorical(y_test, num_classes)
batch_size = 128
epochs = 3
model = keras_core.Sequential(
[
layers.Input(shape=input_shape),
layers.Conv2D(32, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Flatten(),
layers.Dropout(0.5),
layers.Dense(num_classes, activation="softmax"),
]
)
model.summary()
model.compile(
loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]
)
model.fit(
x_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.1
)
score = model.evaluate(x_test, y_test, verbose=0)
print("Test loss:", score[0])
print("Test accuracy:", score[1])
| keras-core/examples/demo_mnist_convnet.py/0 | {
"file_path": "keras-core/examples/demo_mnist_convnet.py",
"repo_id": "keras-core",
"token_count": 645
} | 24 |
"""
Title: English speaker accent recognition using Transfer Learning
Author: [Fadi Badine](https://twitter.com/fadibadine)
Converted to Keras Core by: [Fadi Badine](https://twitter.com/fadibadine)
Date created: 2022/04/16
Last modified: 2023/07/19
Description: Training a model to classify UK & Ireland accents using feature extraction from Yamnet.
Accelerator: GPU
"""
"""
## Introduction
The following example shows how to use feature extraction in order to
train a model to classify the English accent spoken in an audio wave.
Instead of training a model from scratch, transfer learning enables us to
take advantage of existing state-of-the-art deep learning models and use them as feature extractors.
Our process:
* Use a TF Hub pre-trained model (Yamnet) and apply it as part of the tf.data pipeline which transforms
the audio files into feature vectors.
* Train a dense model on the feature vectors.
* Use the trained model for inference on a new audio file.
Note:
* We need to install TensorFlow IO in order to resample audio files to 16 kHz as required by Yamnet model.
* In the test section, ffmpeg is used to convert the mp3 file to wav.
You can install TensorFlow IO with the following command:
"""
"""shell
pip install -U -q tensorflow_io
"""
"""
## Configuration
"""
SEED = 1337
EPOCHS = 1
BATCH_SIZE = 64
VALIDATION_RATIO = 0.1
MODEL_NAME = "uk_irish_accent_recognition"
# Location where the dataset will be downloaded.
# By default (None), keras.utils.get_file will use ~/.keras/ as the CACHE_DIR
CACHE_DIR = None
# The location of the dataset
URL_PATH = "https://www.openslr.org/resources/83/"
# List of datasets compressed files that contain the audio files
zip_files = {
0: "irish_english_male.zip",
1: "midlands_english_female.zip",
2: "midlands_english_male.zip",
3: "northern_english_female.zip",
4: "northern_english_male.zip",
5: "scottish_english_female.zip",
6: "scottish_english_male.zip",
7: "southern_english_female.zip",
8: "southern_english_male.zip",
9: "welsh_english_female.zip",
10: "welsh_english_male.zip",
}
# We see that there are 2 compressed files for each accent (except Irish):
# - One for male speakers
# - One for female speakers
# However, we will be using a gender agnostic dataset.
# List of gender agnostic categories
gender_agnostic_categories = [
"ir", # Irish
"mi", # Midlands
"no", # Northern
"sc", # Scottish
"so", # Southern
"we", # Welsh
]
class_names = [
"Irish",
"Midlands",
"Northern",
"Scottish",
"Southern",
"Welsh",
"Not a speech",
]
"""
## Imports
"""
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import io
import csv
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_io as tfio
import keras_core as keras
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
from IPython.display import Audio
# Set all random seeds in order to get reproducible results
keras.utils.set_random_seed(SEED)
# Where to download the dataset
DATASET_DESTINATION = os.path.join(
CACHE_DIR if CACHE_DIR else "~/.keras/", "datasets"
)
"""
## Yamnet Model
Yamnet is an audio event classifier trained on the AudioSet dataset to predict audio
events from the AudioSet ontology. It is available on TensorFlow Hub.
Yamnet accepts a 1-D tensor of audio samples with a sample rate of 16 kHz.
As output, the model returns a 3-tuple:
* Scores of shape `(N, 521)` representing the scores of the 521 classes.
* Embeddings of shape `(N, 1024)`.
* The log-mel spectrogram of the entire audio frame.
We will use the embeddings, which are the features extracted from the audio samples, as the input to our dense model.
For more detailed information about Yamnet, please refer to its [TensorFlow Hub](https://tfhub.dev/google/yamnet/1) page.
"""
yamnet_model = hub.load("https://tfhub.dev/google/yamnet/1")
"""
## Dataset
The dataset used is the
[Crowdsourced high-quality UK and Ireland English Dialect speech data set](https://openslr.org/83/)
which consists of a total of 17,877 high-quality audio wav files.
This dataset includes over 31 hours of recording from 120 volunteers who self-identify as
native speakers of Southern England, Midlands, Northern England, Wales, Scotland and Ireland.
For more info, please refer to the above link or to the following paper:
[Open-source Multi-speaker Corpora of the English Accents in the British Isles](https://aclanthology.org/2020.lrec-1.804.pdf)
"""
"""
## Download the data
"""
# CSV file that contains information about the dataset. For each entry, we have:
# - ID
# - wav file name
# - transcript
line_index_file = keras.utils.get_file(
fname="line_index_file", origin=URL_PATH + "line_index_all.csv"
)
# Download the list of compressed files that contain the audio wav files
for i in zip_files:
fname = zip_files[i].split(".")[0]
url = URL_PATH + zip_files[i]
zip_file = keras.utils.get_file(fname=fname, origin=url, extract=True)
os.remove(zip_file)
"""
## Load the data in a Dataframe
Of the 3 columns (ID, filename and transcript), we are only interested in the filename column in order to read the audio file.
We will ignore the other two.
"""
dataframe = pd.read_csv(
line_index_file,
names=["id", "filename", "transcript"],
usecols=["filename"],
)
dataframe.head()
"""
Let's now preprocess the dataset by:
* Adjusting the filename (removing a leading space & adding ".wav" extension to the
filename).
* Creating a label using the first 2 characters of the filename which indicate the
accent.
* Shuffling the samples.
"""
# The purpose of this function is to preprocess the dataframe by applying the following:
# - Cleaning the filename from a leading space
# - Generating a label column that is gender agnostic i.e.
# welsh english male and welsh english female for example are both labeled as
# welsh english
# - Add extension .wav to the filename
# - Shuffle samples
def preprocess_dataframe(dataframe):
# Remove leading space in filename column
dataframe["filename"] = dataframe.apply(
lambda row: row["filename"].strip(), axis=1
)
# Create gender agnostic labels based on the filename first 2 letters
dataframe["label"] = dataframe.apply(
lambda row: gender_agnostic_categories.index(row["filename"][:2]),
axis=1,
)
# Add the file path to the name
dataframe["filename"] = dataframe.apply(
lambda row: os.path.join(DATASET_DESTINATION, row["filename"] + ".wav"),
axis=1,
)
# Shuffle the samples
dataframe = dataframe.sample(frac=1, random_state=SEED).reset_index(
drop=True
)
return dataframe
dataframe = preprocess_dataframe(dataframe)
dataframe.head()
"""
## Prepare training & validation sets
Let's split the samples creating training and validation sets.
"""
split = int(len(dataframe) * (1 - VALIDATION_RATIO))
train_df = dataframe[:split]
valid_df = dataframe[split:]
print(
f"We have {train_df.shape[0]} training samples & {valid_df.shape[0]} validation ones"
)
"""
## Prepare a TensorFlow Dataset
Next, we need to create a `tf.data.Dataset`.
This is done by creating a `dataframe_to_dataset` function that does the following:
* Create a dataset using filenames and labels.
* Get the Yamnet embeddings by calling another function `filepath_to_embeddings`.
* Apply caching, reshuffling and setting batch size.
The `filepath_to_embeddings` does the following:
* Load audio file.
* Resample audio to 16 kHz.
* Generate scores and embeddings from Yamnet model.
* Since Yamnet generates multiple samples for each audio file,
this function also duplicates the label for all the generated samples
that have `score=0` (speech) whereas sets the label for the others as
'other' indicating that this audio segment is not a speech and we won't label it as one of the accents.
The below `load_16k_audio_file` is copied from the following tutorial
[Transfer learning with YAMNet for environmental sound classification](https://www.tensorflow.org/tutorials/audio/transfer_learning_audio)
"""
@tf.function
def load_16k_audio_wav(filename):
# Read file content
file_content = tf.io.read_file(filename)
# Decode audio wave
audio_wav, sample_rate = tf.audio.decode_wav(
file_content, desired_channels=1
)
audio_wav = tf.squeeze(audio_wav, axis=-1)
sample_rate = tf.cast(sample_rate, dtype=tf.int64)
# Resample to 16k
audio_wav = tfio.audio.resample(
audio_wav, rate_in=sample_rate, rate_out=16000
)
return audio_wav
def filepath_to_embeddings(filename, label):
# Load 16k audio wave
audio_wav = load_16k_audio_wav(filename)
# Get audio embeddings & scores.
# The embeddings are the audio features extracted using transfer learning
# while scores will be used to identify time slots that are not speech
# which will then be gathered into a specific new category 'other'
scores, embeddings, _ = yamnet_model(audio_wav)
# Number of embeddings in order to know how many times to repeat the label
embeddings_num = tf.shape(embeddings)[0]
labels = tf.repeat(label, embeddings_num)
# Change labels for time-slots that are not speech into a new category 'other'
labels = tf.where(
tf.argmax(scores, axis=1) == 0, label, len(class_names) - 1
)
# Using one-hot in order to use AUC
return (embeddings, tf.one_hot(labels, len(class_names)))
def dataframe_to_dataset(dataframe, batch_size=64):
dataset = tf.data.Dataset.from_tensor_slices(
(dataframe["filename"], dataframe["label"])
)
dataset = dataset.map(
lambda x, y: filepath_to_embeddings(x, y),
num_parallel_calls=tf.data.experimental.AUTOTUNE,
).unbatch()
return dataset.cache().batch(batch_size).prefetch(tf.data.AUTOTUNE)
train_ds = dataframe_to_dataset(train_df)
valid_ds = dataframe_to_dataset(valid_df)
"""
## Build the model
The model that we use consists of:
* An input layer which is the embedding output of the Yamnet classifier.
* 4 dense hidden layers and 4 dropout layers.
* An output dense layer.
The model's hyperparameters were selected using
[KerasTuner](https://keras.io/keras_tuner/).
"""
keras.backend.clear_session()
def build_and_compile_model():
inputs = keras.layers.Input(shape=(1024,), name="embedding")
x = keras.layers.Dense(256, activation="relu", name="dense_1")(inputs)
x = keras.layers.Dropout(0.15, name="dropout_1")(x)
x = keras.layers.Dense(384, activation="relu", name="dense_2")(x)
x = keras.layers.Dropout(0.2, name="dropout_2")(x)
x = keras.layers.Dense(192, activation="relu", name="dense_3")(x)
x = keras.layers.Dropout(0.25, name="dropout_3")(x)
x = keras.layers.Dense(384, activation="relu", name="dense_4")(x)
x = keras.layers.Dropout(0.2, name="dropout_4")(x)
outputs = keras.layers.Dense(
len(class_names), activation="softmax", name="ouput"
)(x)
model = keras.Model(
inputs=inputs, outputs=outputs, name="accent_recognition"
)
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=1.9644e-5),
loss=keras.losses.CategoricalCrossentropy(),
metrics=["accuracy", keras.metrics.AUC(name="auc")],
)
return model
model = build_and_compile_model()
model.summary()
"""
## Class weights calculation
Since the dataset is quite unbalanced, we wil use `class_weight` argument during training.
Getting the class weights is a little tricky because even though we know the number of
audio files for each class, it does not represent the number of samples for that class
since Yamnet transforms each audio file into multiple audio samples of 0.96 seconds each.
So every audio file will be split into a number of samples that is proportional to its length.
Therefore, to get those weights, we have to calculate the number of samples for each class
after preprocessing through Yamnet.
"""
class_counts = tf.zeros(shape=(len(class_names),), dtype=tf.int32)
for x, y in iter(train_ds):
class_counts = class_counts + tf.math.bincount(
tf.cast(tf.math.argmax(y, axis=1), tf.int32), minlength=len(class_names)
)
class_weight = {
i: tf.math.reduce_sum(class_counts).numpy() / class_counts[i].numpy()
for i in range(len(class_counts))
}
print(class_weight)
"""
## Callbacks
We use Keras callbacks in order to:
* Stop whenever the validation AUC stops improving.
* Save the best model.
* Call TensorBoard in order to later view the training and validation logs.
"""
early_stopping_cb = keras.callbacks.EarlyStopping(
monitor="val_auc", patience=10, restore_best_weights=True
)
model_checkpoint_cb = keras.callbacks.ModelCheckpoint(
MODEL_NAME + ".keras", monitor="val_auc", save_best_only=True
)
tensorboard_cb = keras.callbacks.TensorBoard(
os.path.join(os.curdir, "logs", model.name)
)
callbacks = [early_stopping_cb, model_checkpoint_cb, tensorboard_cb]
"""
## Training
"""
history = model.fit(
train_ds,
epochs=EPOCHS,
validation_data=valid_ds,
class_weight=class_weight,
callbacks=callbacks,
verbose=2,
)
"""
## Results
Let's plot the training and validation AUC and accuracy.
"""
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(14, 5))
axs[0].plot(range(EPOCHS), history.history["accuracy"], label="Training")
axs[0].plot(range(EPOCHS), history.history["val_accuracy"], label="Validation")
axs[0].set_xlabel("Epochs")
axs[0].set_title("Training & Validation Accuracy")
axs[0].legend()
axs[0].grid(True)
axs[1].plot(range(EPOCHS), history.history["auc"], label="Training")
axs[1].plot(range(EPOCHS), history.history["val_auc"], label="Validation")
axs[1].set_xlabel("Epochs")
axs[1].set_title("Training & Validation AUC")
axs[1].legend()
axs[1].grid(True)
plt.show()
"""
## Evaluation
"""
train_loss, train_acc, train_auc = model.evaluate(train_ds)
valid_loss, valid_acc, valid_auc = model.evaluate(valid_ds)
"""
Let's try to compare our model's performance to Yamnet's using one of Yamnet metrics (d-prime)
Yamnet achieved a d-prime value of 2.318.
Let's check our model's performance.
"""
# The following function calculates the d-prime score from the AUC
def d_prime(auc):
standard_normal = stats.norm()
d_prime = standard_normal.ppf(auc) * np.sqrt(2.0)
return d_prime
print(
"train d-prime: {0:.3f}, validation d-prime: {1:.3f}".format(
d_prime(train_auc), d_prime(valid_auc)
)
)
"""
We can see that the model achieves the following results:
Results | Training | Validation
-----------|-----------|------------
Accuracy | 54% | 51%
AUC | 0.91 | 0.89
d-prime | 1.882 | 1.740
"""
"""
## Confusion Matrix
Let's now plot the confusion matrix for the validation dataset.
The confusion matrix lets us see, for every class, not only how many samples were correctly classified,
but also which other classes were the samples confused with.
It allows us to calculate the precision and recall for every class.
"""
# Create x and y tensors
x_valid = None
y_valid = None
for x, y in iter(valid_ds):
if x_valid is None:
x_valid = x.numpy()
y_valid = y.numpy()
else:
x_valid = np.concatenate((x_valid, x.numpy()), axis=0)
y_valid = np.concatenate((y_valid, y.numpy()), axis=0)
# Generate predictions
y_pred = model.predict(x_valid)
# Calculate confusion matrix
confusion_mtx = tf.math.confusion_matrix(
np.argmax(y_valid, axis=1), np.argmax(y_pred, axis=1)
)
# Plot the confusion matrix
plt.figure(figsize=(10, 8))
sns.heatmap(
confusion_mtx,
xticklabels=class_names,
yticklabels=class_names,
annot=True,
fmt="g",
)
plt.xlabel("Prediction")
plt.ylabel("Label")
plt.title("Validation Confusion Matrix")
plt.show()
"""
## Precision & recall
For every class:
* Recall is the ratio of correctly classified samples i.e. it shows how many samples
of this specific class, the model is able to detect.
It is the ratio of diagonal elements to the sum of all elements in the row.
* Precision shows the accuracy of the classifier. It is the ratio of correctly predicted
samples among the ones classified as belonging to this class.
It is the ratio of diagonal elements to the sum of all elements in the column.
"""
for i, label in enumerate(class_names):
precision = confusion_mtx[i, i] / np.sum(confusion_mtx[:, i])
recall = confusion_mtx[i, i] / np.sum(confusion_mtx[i, :])
print(
"{0:15} Precision:{1:.2f}%; Recall:{2:.2f}%".format(
label, precision * 100, recall * 100
)
)
"""
## Run inference on test data
Let's now run a test on a single audio file.
Let's check this example from [The Scottish Voice](https://www.thescottishvoice.org.uk/home/)
We will:
* Download the mp3 file.
* Convert it to a 16k wav file.
* Run the model on the wav file.
* Plot the results.
"""
filename = "audio-sample-Stuart"
url = "https://www.thescottishvoice.org.uk/files/cm/files/"
if os.path.exists(filename + ".wav") == False:
print(f"Downloading {filename}.mp3 from {url}")
command = f"wget {url}{filename}.mp3"
os.system(command)
print(f"Converting mp3 to wav and resampling to 16 kHZ")
command = (
f"ffmpeg -hide_banner -loglevel panic -y -i {filename}.mp3 -acodec "
f"pcm_s16le -ac 1 -ar 16000 {filename}.wav"
)
os.system(command)
filename = filename + ".wav"
"""
The below function `yamnet_class_names_from_csv` was copied and very slightly changed
from this [Yamnet Notebook](https://colab.research.google.com/github/tensorflow/hub/blob/master/examples/colab/yamnet.ipynb).
"""
def yamnet_class_names_from_csv(yamnet_class_map_csv_text):
"""Returns list of class names corresponding to score vector."""
yamnet_class_map_csv = io.StringIO(yamnet_class_map_csv_text)
yamnet_class_names = [
name for (class_index, mid, name) in csv.reader(yamnet_class_map_csv)
]
yamnet_class_names = yamnet_class_names[1:] # Skip CSV header
return yamnet_class_names
yamnet_class_map_path = yamnet_model.class_map_path().numpy()
yamnet_class_names = yamnet_class_names_from_csv(
tf.io.read_file(yamnet_class_map_path).numpy().decode("utf-8")
)
def calculate_number_of_non_speech(scores):
number_of_non_speech = tf.math.reduce_sum(
tf.where(
tf.math.argmax(scores, axis=1, output_type=tf.int32) != 0, 1, 0
)
)
return number_of_non_speech
def filename_to_predictions(filename):
# Load 16k audio wave
audio_wav = load_16k_audio_wav(filename)
# Get audio embeddings & scores.
scores, embeddings, mel_spectrogram = yamnet_model(audio_wav)
print(
"Out of {} samples, {} are not speech".format(
scores.shape[0], calculate_number_of_non_speech(scores)
)
)
# Predict the output of the accent recognition model with embeddings as input
predictions = model.predict(embeddings)
return audio_wav, predictions, mel_spectrogram
"""
Let's run the model on the audio file:
"""
audio_wav, predictions, mel_spectrogram = filename_to_predictions(filename)
infered_class = class_names[predictions.mean(axis=0).argmax()]
print(f"The main accent is: {infered_class} English")
"""
Listen to the audio
"""
Audio(audio_wav, rate=16000)
"""
The below function was copied from this [Yamnet notebook](tinyurl.com/4a8xn7at) and adjusted to our need.
This function plots the following:
* Audio waveform
* Mel spectrogram
* Predictions for every time step
"""
plt.figure(figsize=(10, 6))
# Plot the waveform.
plt.subplot(3, 1, 1)
plt.plot(audio_wav)
plt.xlim([0, len(audio_wav)])
# Plot the log-mel spectrogram (returned by the model).
plt.subplot(3, 1, 2)
plt.imshow(
mel_spectrogram.numpy().T,
aspect="auto",
interpolation="nearest",
origin="lower",
)
# Plot and label the model output scores for the top-scoring classes.
mean_predictions = np.mean(predictions, axis=0)
top_class_indices = np.argsort(mean_predictions)[::-1]
plt.subplot(3, 1, 3)
plt.imshow(
predictions[:, top_class_indices].T,
aspect="auto",
interpolation="nearest",
cmap="gray_r",
)
# patch_padding = (PATCH_WINDOW_SECONDS / 2) / PATCH_HOP_SECONDS
# values from the model documentation
patch_padding = (0.025 / 2) / 0.01
plt.xlim([-patch_padding - 0.5, predictions.shape[0] + patch_padding - 0.5])
# Label the top_N classes.
yticks = range(0, len(class_names), 1)
plt.yticks(yticks, [class_names[top_class_indices[x]] for x in yticks])
_ = plt.ylim(-0.5 + np.array([len(class_names), 0]))
| keras-core/examples/keras_io/tensorflow/audio/uk_ireland_accent_recognition.py/0 | {
"file_path": "keras-core/examples/keras_io/tensorflow/audio/uk_ireland_accent_recognition.py",
"repo_id": "keras-core",
"token_count": 7353
} | 25 |
"""
Title: End-to-end Masked Language Modeling with BERT
Author: [Ankur Singh](https://twitter.com/ankur310794)
Converted to Keras-Core: [Mrutyunjay Biswal](https://twitter.com/LearnStochastic)
Date created: 2020/09/18
Last modified: 2023/09/06
Description: Implement a Masked Language Model (MLM) with BERT and fine-tune it on the IMDB Reviews dataset.
Accelerator: GPU
"""
"""
## Introduction
Masked Language Modeling is a fill-in-the-blank task,
where a model uses the context words surrounding a mask token to try to predict what the
masked word should be.
For an input that contains one or more mask tokens,
the model will generate the most likely substitution for each.
Example:
- Input: "I have watched this [MASK] and it was awesome."
- Output: "I have watched this movie and it was awesome."
Masked language modeling is a great way to train a language
model in a self-supervised setting (without human-annotated labels).
Such a model can then be fine-tuned to accomplish various supervised
NLP tasks.
This example teaches you how to build a BERT model from scratch,
train it with the masked language modeling task,
and then fine-tune this model on a sentiment classification task.
We will use the Keras-Core `TextVectorization` and `MultiHeadAttention` layers
to create a BERT Transformer-Encoder network architecture.
Note: This is only tensorflow backend compatible.
"""
"""
## Setup
"""
import os
import re
import glob
import numpy as np
import pandas as pd
from pathlib import Path
from dataclasses import dataclass
import tensorflow as tf
import keras_core as keras
from keras_core import layers
"""
## Configuration
"""
@dataclass
class Config:
MAX_LEN = 256
BATCH_SIZE = 32
LR = 0.001
VOCAB_SIZE = 30000
EMBED_DIM = 128
NUM_HEAD = 8 # used in bert model
FF_DIM = 128 # used in bert model
NUM_LAYERS = 1
NUM_EPOCHS = 1
STEPS_PER_EPOCH = 2
config = Config()
"""
## Download the Data: IMDB Movie Review Sentiment Classification
Download the IMDB data and load into a Pandas DataFrame.
"""
fpath = keras.utils.get_file(
origin="https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz"
)
dirpath = Path(fpath).parent.absolute()
os.system(f"tar -xf {fpath} -C {dirpath}")
"""
The `aclImdb` folder contains a `train` and `test` subfolder:
"""
os.system(f"ls {dirpath}/aclImdb")
os.system(f"ls {dirpath}/aclImdb/train")
os.system(f"ls {dirpath}/aclImdb/test")
"""
We are only interested in the `pos` and `neg` subfolders, so let's delete the rest:
"""
os.system(f"rm -r {dirpath}/aclImdb/train/unsup")
os.system(f"rm -r {dirpath}/aclImdb/train/*.feat")
os.system(f"rm -r {dirpath}/aclImdb/train/*.txt")
os.system(f"rm -r {dirpath}/aclImdb/test/*.feat")
os.system(f"rm -r {dirpath}/aclImdb/test/*.txt")
"""
Let's read the dataset from the text files to a DataFrame.
"""
def get_text_list_from_files(files):
text_list = []
for name in files:
with open(name) as f:
for line in f:
text_list.append(line)
return text_list
def get_data_from_text_files(folder_name):
pos_files = glob.glob(f"{dirpath}/aclImdb/" + folder_name + "/pos/*.txt")
pos_texts = get_text_list_from_files(pos_files)
neg_files = glob.glob(f"{dirpath}/aclImdb/" + folder_name + "/neg/*.txt")
neg_texts = get_text_list_from_files(neg_files)
df = pd.DataFrame(
{
"review": pos_texts + neg_texts,
"sentiment": [0] * len(pos_texts) + [1] * len(neg_texts),
}
)
df = df.sample(len(df)).reset_index(drop=True)
return df
train_df = get_data_from_text_files("train")
test_df = get_data_from_text_files("test")
all_data = pd.concat([train_df, test_df], axis=0).reset_index(drop=True)
assert len(all_data) != 0, f'{all_data} is empty'
"""
## Dataset preparation
We will use the `TextVectorization` layer to vectorize the text into integer token ids.
It transforms a batch of strings into either
a sequence of token indices (one sample = 1D array of integer token indices, in order)
or a dense representation (one sample = 1D array of float values encoding an unordered set of tokens).
Below, we define 3 preprocessing functions.
1. The `get_vectorize_layer` function builds the `TextVectorization` layer.
2. The `encode` function encodes raw text into integer token ids.
3. The `get_masked_input_and_labels` function will mask input token ids. It masks 15% of all input tokens in each sequence at random.
"""
def custom_standardization(input_data):
lowercase = tf.strings.lower(input_data)
stripped_html = tf.strings.regex_replace(lowercase, "<br />", " ")
return tf.strings.regex_replace(
stripped_html, "[%s]" % re.escape("!#$%&'()*+,-./:;<=>?@\^_`{|}~"), ""
)
def get_vectorize_layer(texts, vocab_size, max_seq, special_tokens=["[MASK]"]):
"""Build Text vectorization layer
Args:
texts (list): List of string i.e input texts
vocab_size (int): vocab size
max_seq (int): Maximum sequence lenght.
special_tokens (list, optional): List of special tokens. Defaults to `['[MASK]']`.
Returns:
layers.Layer: Return TextVectorization Keras Layer
"""
vectorize_layer = layers.TextVectorization(
max_tokens=vocab_size,
output_mode="int",
standardize=custom_standardization,
output_sequence_length=max_seq,
)
vectorize_layer.adapt(texts)
# Insert mask token in vocabulary
vocab = vectorize_layer.get_vocabulary()
vocab = vocab[2 : vocab_size - len(special_tokens)] + ["[mask]"]
vectorize_layer.set_vocabulary(vocab)
return vectorize_layer
vectorize_layer = get_vectorize_layer(
all_data.review.values.tolist(),
config.VOCAB_SIZE,
config.MAX_LEN,
special_tokens=["[mask]"],
)
# Get mask token id for masked language model
mask_token_id = vectorize_layer(["[mask]"]).numpy()[0][0]
def encode(texts):
encoded_texts = vectorize_layer(texts)
return encoded_texts.numpy()
def get_masked_input_and_labels(encoded_texts):
# 15% BERT masking
inp_mask = np.random.rand(*encoded_texts.shape) < 0.15
# Do not mask special tokens
inp_mask[encoded_texts <= 2] = False
# Set targets to -1 by default, it means ignore
labels = -1 * np.ones(encoded_texts.shape, dtype=int)
# Set labels for masked tokens
labels[inp_mask] = encoded_texts[inp_mask]
# Prepare input
encoded_texts_masked = np.copy(encoded_texts)
# Set input to [MASK] which is the last token for the 90% of tokens
# This means leaving 10% unchanged
inp_mask_2mask = inp_mask & (np.random.rand(*encoded_texts.shape) < 0.90)
encoded_texts_masked[
inp_mask_2mask
] = mask_token_id # mask token is the last in the dict
# Set 10% to a random token
inp_mask_2random = inp_mask_2mask & (np.random.rand(*encoded_texts.shape) < 1 / 9)
encoded_texts_masked[inp_mask_2random] = np.random.randint(
3, mask_token_id, inp_mask_2random.sum()
)
# Prepare sample_weights to pass to .fit() method
sample_weights = np.ones(labels.shape)
sample_weights[labels == -1] = 0
# y_labels would be same as encoded_texts i.e input tokens
y_labels = np.copy(encoded_texts)
return encoded_texts_masked, y_labels, sample_weights
# We have 25000 examples for training
x_train = encode(train_df.review.values) # encode reviews with vectorizer
y_train = train_df.sentiment.values
train_classifier_ds = (
tf.data.Dataset.from_tensor_slices((x_train, y_train))
.shuffle(1000)
.batch(config.BATCH_SIZE)
)
# We have 25000 examples for testing
x_test = encode(test_df.review.values)
y_test = test_df.sentiment.values
test_classifier_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(
config.BATCH_SIZE
)
# Build dataset for end to end model input (will be used at the end)
test_raw_classifier_ds = tf.data.Dataset.from_tensor_slices(
(test_df.review.values, y_test)
).batch(config.BATCH_SIZE)
# Prepare data for masked language model
x_all_review = encode(all_data.review.values)
x_masked_train, y_masked_labels, sample_weights = get_masked_input_and_labels(
x_all_review
)
mlm_ds = tf.data.Dataset.from_tensor_slices(
(x_masked_train, y_masked_labels, sample_weights)
)
mlm_ds = mlm_ds.shuffle(1000).batch(config.BATCH_SIZE)
id2token = dict(enumerate(vectorize_layer.get_vocabulary()))
token2id = {y: x for x, y in id2token.items()}
class MaskedTextGenerator(keras.callbacks.Callback):
def __init__(self, sample_tokens, top_k=5):
self.sample_tokens = sample_tokens
self.k = top_k
def decode(self, tokens):
return " ".join([id2token[t] for t in tokens if t != 0])
def convert_ids_to_tokens(self, id):
return id2token[id]
def on_epoch_end(self, epoch, logs=None):
prediction = self.model.predict(self.sample_tokens)
masked_index = np.where(self.sample_tokens == mask_token_id)
masked_index = masked_index[1]
mask_prediction = prediction[0][masked_index]
top_indices = mask_prediction[0].argsort()[-self.k :][::-1]
values = mask_prediction[0][top_indices]
for i in range(len(top_indices)):
p = top_indices[i]
v = values[i]
tokens = np.copy(self.sample_tokens[0])
tokens[masked_index[0]] = p
result = {
"input_text": self.decode(self.sample_tokens[0]),
"prediction": self.decode(tokens),
"probability": v,
"predicted mask token": self.convert_ids_to_tokens(p),
}
sample_tokens = vectorize_layer(["I have watched this [mask] and it was awesome"])
generator_callback = MaskedTextGenerator(sample_tokens.numpy())
"""
## Create BERT model (Pretraining Model) for masked language modeling
We will create a BERT-like pretraining model architecture
using the `MultiHeadAttention` layer.
It will take token ids as inputs (including masked tokens)
and it will predict the correct ids for the masked input tokens.
"""
def bert_module(query, key, value, layer_num):
# Multi headed self-attention
attention_output = layers.MultiHeadAttention(
num_heads=config.NUM_HEAD,
key_dim=config.EMBED_DIM // config.NUM_HEAD,
name=f"encoder_{layer_num}_multiheadattention",
)(query, key, value)
attention_output = layers.Dropout(0.1, name=f"encoder_{layer_num}_att_dropout")(
attention_output
)
attention_output = layers.LayerNormalization(
epsilon=1e-6, name=f"encoder_{layer_num}_att_layernormalization"
)(query + attention_output)
# Feed-forward layer
ffn = keras.Sequential(
[
layers.Dense(config.FF_DIM, activation="relu"),
layers.Dense(config.EMBED_DIM),
],
name=f"encoder_{layer_num}_ffn",
)
ffn_output = ffn(attention_output)
ffn_output = layers.Dropout(0.1, name=f"encoder_{layer_num}_ffn_dropout")(
ffn_output
)
sequence_output = layers.LayerNormalization(
epsilon=1e-6, name=f"encoder_{layer_num}_ffn_layernormalization"
)(attention_output + ffn_output)
return sequence_output
def get_pos_encoding_matrix(max_len, d_emb):
pos_enc = np.array(
[
[pos / np.power(10000, 2 * (j // 2) / d_emb) for j in range(d_emb)]
if pos != 0
else np.zeros(d_emb)
for pos in range(max_len)
]
)
pos_enc[1:, 0::2] = np.sin(pos_enc[1:, 0::2]) # dim 2i
pos_enc[1:, 1::2] = np.cos(pos_enc[1:, 1::2]) # dim 2i+1
return pos_enc
loss_fn = keras.losses.SparseCategoricalCrossentropy(
reduction=None
)
loss_tracker = keras.metrics.Mean(name="loss")
class MaskedLanguageModel(keras.Model):
def train_step(self, inputs):
if len(inputs) == 3:
features, labels, sample_weight = inputs
else:
features, labels = inputs
sample_weight = None
with tf.GradientTape() as tape:
predictions = self(features, training=True)
loss = loss_fn(labels, predictions, sample_weight=sample_weight)
# Compute gradients
trainable_vars = self.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# Compute our own metrics
loss_tracker.update_state(loss, sample_weight=sample_weight)
# Return a dict mapping metric names to current value
return {"loss": loss_tracker.result()}
@property
def metrics(self):
# We list our `Metric` objects here so that `reset_states()` can be
# called automatically at the start of each epoch
# or at the start of `evaluate()`.
# If you don't implement this property, you have to call
# `reset_states()` yourself at the time of your choosing.
return [loss_tracker]
def create_masked_language_bert_model():
inputs = layers.Input((config.MAX_LEN,), dtype=tf.int64)
word_embeddings = layers.Embedding(
config.VOCAB_SIZE, config.EMBED_DIM, name="word_embedding"
)(inputs)
position_embeddings = layers.Embedding(
input_dim=config.MAX_LEN,
output_dim=config.EMBED_DIM,
embeddings_initializer=keras.initializers.Constant(get_pos_encoding_matrix(config.MAX_LEN, config.EMBED_DIM)),
name="position_embedding",
)(tf.range(start=0, limit=config.MAX_LEN, delta=1))
embeddings = word_embeddings + position_embeddings
encoder_output = embeddings
for i in range(config.NUM_LAYERS):
encoder_output = bert_module(encoder_output, encoder_output, encoder_output, i)
mlm_output = layers.Dense(config.VOCAB_SIZE, name="mlm_cls", activation="softmax")(
encoder_output
)
mlm_model = MaskedLanguageModel(inputs, mlm_output, name="masked_bert_model")
optimizer = keras.optimizers.Adam(learning_rate=config.LR)
mlm_model.compile(optimizer=optimizer)
return mlm_model
bert_masked_model = create_masked_language_bert_model()
bert_masked_model.summary()
"""
## Train and Save
"""
bert_masked_model.fit(mlm_ds, epochs=Config.NUM_EPOCHS, steps_per_epoch=Config.STEPS_PER_EPOCH, callbacks=[generator_callback])
bert_masked_model.save("bert_mlm_imdb.keras")
"""
## Fine-tune a sentiment classification model
We will fine-tune our self-supervised model on a downstream task of sentiment classification.
To do this, let's create a classifier by adding a pooling layer and a `Dense` layer on top of the
pretrained BERT features.
"""
# Load pretrained bert model
mlm_model = keras.models.load_model(
"bert_mlm_imdb.keras", custom_objects={"MaskedLanguageModel": MaskedLanguageModel}
)
pretrained_bert_model = keras.Model(
mlm_model.input, mlm_model.get_layer("encoder_0_ffn_layernormalization").output
)
# Freeze it
pretrained_bert_model.trainable = False
def create_classifier_bert_model():
inputs = layers.Input((config.MAX_LEN,), dtype=tf.int64)
sequence_output = pretrained_bert_model(inputs)
pooled_output = layers.GlobalMaxPooling1D()(sequence_output)
hidden_layer = layers.Dense(64, activation="relu")(pooled_output)
outputs = layers.Dense(1, activation="sigmoid")(hidden_layer)
classifer_model = keras.Model(inputs, outputs, name="classification")
optimizer = keras.optimizers.Adam()
classifer_model.compile(
optimizer=optimizer, loss="binary_crossentropy", metrics=["accuracy"]
)
return classifer_model
classifer_model = create_classifier_bert_model()
classifer_model.summary()
# Train the classifier with frozen BERT stage
classifer_model.fit(
train_classifier_ds,
epochs=Config.NUM_EPOCHS,
steps_per_epoch=Config.STEPS_PER_EPOCH,
validation_data=test_classifier_ds,
)
# Unfreeze the BERT model for fine-tuning
pretrained_bert_model.trainable = True
optimizer = keras.optimizers.Adam()
classifer_model.compile(
optimizer=optimizer, loss="binary_crossentropy", metrics=["accuracy"]
)
classifer_model.fit(
train_classifier_ds,
epochs=Config.NUM_EPOCHS,
steps_per_epoch=Config.STEPS_PER_EPOCH,
validation_data=test_classifier_ds,
)
"""
## Create an end-to-end model and evaluate it
When you want to deploy a model, it's best if it already includes its preprocessing
pipeline, so that you don't have to reimplement the preprocessing logic in your
production environment. Let's create an end-to-end model that incorporates
the `TextVectorization` layer, and let's evaluate. Our model will accept raw strings
as input.
"""
def get_end_to_end(model):
inputs_string = keras.Input(shape=(1,), dtype="string")
indices = vectorize_layer(inputs_string)
outputs = model(indices)
end_to_end_model = keras.Model(inputs_string, outputs, name="end_to_end_model")
optimizer = keras.optimizers.Adam(learning_rate=config.LR)
end_to_end_model.compile(
optimizer=optimizer, loss="binary_crossentropy", metrics=["accuracy"]
)
return end_to_end_model
end_to_end_classification_model = get_end_to_end(classifer_model)
end_to_end_classification_model.evaluate(test_raw_classifier_ds) | keras-core/examples/keras_io/tensorflow/nlp/end_to_end_mlm_with_bert.py/0 | {
"file_path": "keras-core/examples/keras_io/tensorflow/nlp/end_to_end_mlm_with_bert.py",
"repo_id": "keras-core",
"token_count": 6779
} | 26 |
"""
Title: Zero-DCE for low-light image enhancement
Author: [Soumik Rakshit](http://github.com/soumik12345)
Converted to Keras Core by: [Soumik Rakshit](http://github.com/soumik12345)
Date created: 2021/09/18
Last modified: 2023/07/15
Description: Implementing Zero-Reference Deep Curve Estimation for low-light image enhancement.
Accelerator: GPU
"""
"""
## Introduction
**Zero-Reference Deep Curve Estimation** or **Zero-DCE** formulates low-light image
enhancement as the task of estimating an image-specific
[*tonal curve*](https://en.wikipedia.org/wiki/Curve_(tonality)) with a deep neural network.
In this example, we train a lightweight deep network, **DCE-Net**, to estimate
pixel-wise and high-order tonal curves for dynamic range adjustment of a given image.
Zero-DCE takes a low-light image as input and produces high-order tonal curves as its output.
These curves are then used for pixel-wise adjustment on the dynamic range of the input to
obtain an enhanced image. The curve estimation process is done in such a way that it maintains
the range of the enhanced image and preserves the contrast of neighboring pixels. This
curve estimation is inspired by curves adjustment used in photo editing software such as
Adobe Photoshop where users can adjust points throughout an image’s tonal range.
Zero-DCE is appealing because of its relaxed assumptions with regard to reference images:
it does not require any input/output image pairs during training.
This is achieved through a set of carefully formulated non-reference loss functions,
which implicitly measure the enhancement quality and guide the training of the network.
### References
- [Zero-Reference Deep Curve Estimation for Low-Light Image Enhancement](https://arxiv.org/pdf/2001.06826.pdf)
- [Curves adjustment in Adobe Photoshop](https://helpx.adobe.com/photoshop/using/curves-adjustment.html)
"""
"""
## Downloading LOLDataset
The **LoL Dataset** has been created for low-light image enhancement. It provides 485
images for training and 15 for testing. Each image pair in the dataset consists of a
low-light input image and its corresponding well-exposed reference image.
"""
import os
import random
import numpy as np
from glob import glob
from PIL import Image, ImageOps
import matplotlib.pyplot as plt
import keras_core as keras
from keras_core import layers
import tensorflow as tf
"""shell
wget https://huggingface.co/datasets/geekyrakshit/LoL-Dataset/resolve/main/lol_dataset.zip
unzip -q lol_dataset.zip && rm lol_dataset.zip
"""
"""
## Creating a TensorFlow Dataset
We use 300 low-light images from the LoL Dataset training set for training, and we use
the remaining 185 low-light images for validation. We resize the images to size `256 x
256` to be used for both training and validation. Note that in order to train the DCE-Net,
we will not require the corresponding enhanced images.
"""
IMAGE_SIZE = 256
BATCH_SIZE = 16
MAX_TRAIN_IMAGES = 400
def load_data(image_path):
image = tf.io.read_file(image_path)
image = tf.image.decode_png(image, channels=3)
image = tf.image.resize(images=image, size=[IMAGE_SIZE, IMAGE_SIZE])
image = image / 255.0
return image
def data_generator(low_light_images):
dataset = tf.data.Dataset.from_tensor_slices((low_light_images))
dataset = dataset.map(load_data, num_parallel_calls=tf.data.AUTOTUNE)
dataset = dataset.batch(BATCH_SIZE, drop_remainder=True)
return dataset
train_low_light_images = sorted(glob("./lol_dataset/our485/low/*"))[
:MAX_TRAIN_IMAGES
]
val_low_light_images = sorted(glob("./lol_dataset/our485/low/*"))[
MAX_TRAIN_IMAGES:
]
test_low_light_images = sorted(glob("./lol_dataset/eval15/low/*"))
train_dataset = data_generator(train_low_light_images)
val_dataset = data_generator(val_low_light_images)
print("Train Dataset:", train_dataset)
print("Validation Dataset:", val_dataset)
"""
## The Zero-DCE Framework
The goal of DCE-Net is to estimate a set of best-fitting light-enhancement curves
(LE-curves) given an input image. The framework then maps all pixels of the input’s RGB
channels by applying the curves iteratively to obtain the final enhanced image.
### Understanding light-enhancement curves
A ligh-enhancement curve is a kind of curve that can map a low-light image
to its enhanced version automatically,
where the self-adaptive curve parameters are solely dependent on the input image.
When designing such a curve, three objectives should be taken into account:
- Each pixel value of the enhanced image should be in the normalized range `[0,1]`, in order to
avoid information loss induced by overflow truncation.
- It should be monotonous, to preserve the contrast between neighboring pixels.
- The shape of this curve should be as simple as possible,
and the curve should be differentiable to allow backpropagation.
The light-enhancement curve is separately applied to three RGB channels instead of solely on the
illumination channel. The three-channel adjustment can better preserve the inherent color and reduce
the risk of over-saturation.
![](https://li-chongyi.github.io/Zero-DCE_files/framework.png)
### DCE-Net
The DCE-Net is a lightweight deep neural network that learns the mapping between an input
image and its best-fitting curve parameter maps. The input to the DCE-Net is a low-light
image while the outputs are a set of pixel-wise curve parameter maps for corresponding
higher-order curves. It is a plain CNN of seven convolutional layers with symmetrical
concatenation. Each layer consists of 32 convolutional kernels of size 3×3 and stride 1
followed by the ReLU activation function. The last convolutional layer is followed by the
Tanh activation function, which produces 24 parameter maps for 8 iterations, where each
iteration requires three curve parameter maps for the three channels.
![](https://i.imgur.com/HtIg34W.png)
"""
def build_dce_net():
input_img = keras.Input(shape=[None, None, 3])
conv1 = layers.Conv2D(
32, (3, 3), strides=(1, 1), activation="relu", padding="same"
)(input_img)
conv2 = layers.Conv2D(
32, (3, 3), strides=(1, 1), activation="relu", padding="same"
)(conv1)
conv3 = layers.Conv2D(
32, (3, 3), strides=(1, 1), activation="relu", padding="same"
)(conv2)
conv4 = layers.Conv2D(
32, (3, 3), strides=(1, 1), activation="relu", padding="same"
)(conv3)
int_con1 = layers.Concatenate(axis=-1)([conv4, conv3])
conv5 = layers.Conv2D(
32, (3, 3), strides=(1, 1), activation="relu", padding="same"
)(int_con1)
int_con2 = layers.Concatenate(axis=-1)([conv5, conv2])
conv6 = layers.Conv2D(
32, (3, 3), strides=(1, 1), activation="relu", padding="same"
)(int_con2)
int_con3 = layers.Concatenate(axis=-1)([conv6, conv1])
x_r = layers.Conv2D(
24, (3, 3), strides=(1, 1), activation="tanh", padding="same"
)(int_con3)
return keras.Model(inputs=input_img, outputs=x_r)
"""
## Loss functions
To enable zero-reference learning in DCE-Net, we use a set of differentiable
zero-reference losses that allow us to evaluate the quality of enhanced images.
"""
"""
### Color constancy loss
The *color constancy loss* is used to correct the potential color deviations in the
enhanced image.
"""
def color_constancy_loss(x):
mean_rgb = tf.reduce_mean(x, axis=(1, 2), keepdims=True)
mr, mg, mb = (
mean_rgb[:, :, :, 0],
mean_rgb[:, :, :, 1],
mean_rgb[:, :, :, 2],
)
d_rg = tf.square(mr - mg)
d_rb = tf.square(mr - mb)
d_gb = tf.square(mb - mg)
return tf.sqrt(tf.square(d_rg) + tf.square(d_rb) + tf.square(d_gb))
"""
### Exposure loss
To restrain under-/over-exposed regions, we use the *exposure control loss*.
It measures the distance between the average intensity value of a local region
and a preset well-exposedness level (set to `0.6`).
"""
def exposure_loss(x, mean_val=0.6):
x = tf.reduce_mean(x, axis=3, keepdims=True)
mean = tf.nn.avg_pool2d(x, ksize=16, strides=16, padding="VALID")
return tf.reduce_mean(tf.square(mean - mean_val))
"""
### Illumination smoothness loss
To preserve the monotonicity relations between neighboring pixels, the
*illumination smoothness loss* is added to each curve parameter map.
"""
def illumination_smoothness_loss(x):
batch_size = tf.shape(x)[0]
h_x = tf.shape(x)[1]
w_x = tf.shape(x)[2]
count_h = (tf.shape(x)[2] - 1) * tf.shape(x)[3]
count_w = tf.shape(x)[2] * (tf.shape(x)[3] - 1)
h_tv = tf.reduce_sum(tf.square((x[:, 1:, :, :] - x[:, : h_x - 1, :, :])))
w_tv = tf.reduce_sum(tf.square((x[:, :, 1:, :] - x[:, :, : w_x - 1, :])))
batch_size = tf.cast(batch_size, dtype=tf.float32)
count_h = tf.cast(count_h, dtype=tf.float32)
count_w = tf.cast(count_w, dtype=tf.float32)
return 2 * (h_tv / count_h + w_tv / count_w) / batch_size
"""
### Spatial consistency loss
The *spatial consistency loss* encourages spatial coherence of the enhanced image by
preserving the contrast between neighboring regions across the input image and its enhanced version.
"""
class SpatialConsistencyLoss(keras.losses.Loss):
def __init__(self, **kwargs):
super().__init__(reduction="none")
self.left_kernel = tf.constant(
[[[[0, 0, 0]], [[-1, 1, 0]], [[0, 0, 0]]]], dtype=tf.float32
)
self.right_kernel = tf.constant(
[[[[0, 0, 0]], [[0, 1, -1]], [[0, 0, 0]]]], dtype=tf.float32
)
self.up_kernel = tf.constant(
[[[[0, -1, 0]], [[0, 1, 0]], [[0, 0, 0]]]], dtype=tf.float32
)
self.down_kernel = tf.constant(
[[[[0, 0, 0]], [[0, 1, 0]], [[0, -1, 0]]]], dtype=tf.float32
)
def call(self, y_true, y_pred):
original_mean = tf.reduce_mean(y_true, 3, keepdims=True)
enhanced_mean = tf.reduce_mean(y_pred, 3, keepdims=True)
original_pool = tf.nn.avg_pool2d(
original_mean, ksize=4, strides=4, padding="VALID"
)
enhanced_pool = tf.nn.avg_pool2d(
enhanced_mean, ksize=4, strides=4, padding="VALID"
)
d_original_left = tf.nn.conv2d(
original_pool,
self.left_kernel,
strides=[1, 1, 1, 1],
padding="SAME",
)
d_original_right = tf.nn.conv2d(
original_pool,
self.right_kernel,
strides=[1, 1, 1, 1],
padding="SAME",
)
d_original_up = tf.nn.conv2d(
original_pool, self.up_kernel, strides=[1, 1, 1, 1], padding="SAME"
)
d_original_down = tf.nn.conv2d(
original_pool,
self.down_kernel,
strides=[1, 1, 1, 1],
padding="SAME",
)
d_enhanced_left = tf.nn.conv2d(
enhanced_pool,
self.left_kernel,
strides=[1, 1, 1, 1],
padding="SAME",
)
d_enhanced_right = tf.nn.conv2d(
enhanced_pool,
self.right_kernel,
strides=[1, 1, 1, 1],
padding="SAME",
)
d_enhanced_up = tf.nn.conv2d(
enhanced_pool, self.up_kernel, strides=[1, 1, 1, 1], padding="SAME"
)
d_enhanced_down = tf.nn.conv2d(
enhanced_pool,
self.down_kernel,
strides=[1, 1, 1, 1],
padding="SAME",
)
d_left = tf.square(d_original_left - d_enhanced_left)
d_right = tf.square(d_original_right - d_enhanced_right)
d_up = tf.square(d_original_up - d_enhanced_up)
d_down = tf.square(d_original_down - d_enhanced_down)
return d_left + d_right + d_up + d_down
"""
### Deep curve estimation model
We implement the Zero-DCE framework as a Keras subclassed model.
"""
class ZeroDCE(keras.Model):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.dce_model = build_dce_net()
def compile(self, learning_rate, **kwargs):
super().compile(**kwargs)
self.optimizer = keras.optimizers.Adam(learning_rate=learning_rate)
self.spatial_constancy_loss = SpatialConsistencyLoss(reduction="none")
self.total_loss_tracker = keras.metrics.Mean(name="total_loss")
self.illumination_smoothness_loss_tracker = keras.metrics.Mean(
name="illumination_smoothness_loss"
)
self.spatial_constancy_loss_tracker = keras.metrics.Mean(
name="spatial_constancy_loss"
)
self.color_constancy_loss_tracker = keras.metrics.Mean(
name="color_constancy_loss"
)
self.exposure_loss_tracker = keras.metrics.Mean(name="exposure_loss")
@property
def metrics(self):
return [
self.total_loss_tracker,
self.illumination_smoothness_loss_tracker,
self.spatial_constancy_loss_tracker,
self.color_constancy_loss_tracker,
self.exposure_loss_tracker,
]
def get_enhanced_image(self, data, output):
r1 = output[:, :, :, :3]
r2 = output[:, :, :, 3:6]
r3 = output[:, :, :, 6:9]
r4 = output[:, :, :, 9:12]
r5 = output[:, :, :, 12:15]
r6 = output[:, :, :, 15:18]
r7 = output[:, :, :, 18:21]
r8 = output[:, :, :, 21:24]
x = data + r1 * (tf.square(data) - data)
x = x + r2 * (tf.square(x) - x)
x = x + r3 * (tf.square(x) - x)
enhanced_image = x + r4 * (tf.square(x) - x)
x = enhanced_image + r5 * (tf.square(enhanced_image) - enhanced_image)
x = x + r6 * (tf.square(x) - x)
x = x + r7 * (tf.square(x) - x)
enhanced_image = x + r8 * (tf.square(x) - x)
return enhanced_image
def call(self, data):
dce_net_output = self.dce_model(data)
return self.get_enhanced_image(data, dce_net_output)
def compute_losses(self, data, output):
enhanced_image = self.get_enhanced_image(data, output)
loss_illumination = 200 * illumination_smoothness_loss(output)
loss_spatial_constancy = tf.reduce_mean(
self.spatial_constancy_loss(enhanced_image, data)
)
loss_color_constancy = 5 * tf.reduce_mean(
color_constancy_loss(enhanced_image)
)
loss_exposure = 10 * tf.reduce_mean(exposure_loss(enhanced_image))
total_loss = (
loss_illumination
+ loss_spatial_constancy
+ loss_color_constancy
+ loss_exposure
)
return {
"total_loss": total_loss,
"illumination_smoothness_loss": loss_illumination,
"spatial_constancy_loss": loss_spatial_constancy,
"color_constancy_loss": loss_color_constancy,
"exposure_loss": loss_exposure,
}
def train_step(self, data):
with tf.GradientTape() as tape:
output = self.dce_model(data)
losses = self.compute_losses(data, output)
gradients = tape.gradient(
losses["total_loss"], self.dce_model.trainable_weights
)
self.optimizer.apply_gradients(
zip(gradients, self.dce_model.trainable_weights)
)
self.total_loss_tracker.update_state(losses["total_loss"])
self.illumination_smoothness_loss_tracker.update_state(
losses["illumination_smoothness_loss"]
)
self.spatial_constancy_loss_tracker.update_state(
losses["spatial_constancy_loss"]
)
self.color_constancy_loss_tracker.update_state(
losses["color_constancy_loss"]
)
self.exposure_loss_tracker.update_state(losses["exposure_loss"])
return {metric.name: metric.result() for metric in self.metrics}
def test_step(self, data):
output = self.dce_model(data)
losses = self.compute_losses(data, output)
self.total_loss_tracker.update_state(losses["total_loss"])
self.illumination_smoothness_loss_tracker.update_state(
losses["illumination_smoothness_loss"]
)
self.spatial_constancy_loss_tracker.update_state(
losses["spatial_constancy_loss"]
)
self.color_constancy_loss_tracker.update_state(
losses["color_constancy_loss"]
)
self.exposure_loss_tracker.update_state(losses["exposure_loss"])
return {metric.name: metric.result() for metric in self.metrics}
def save_weights(
self, filepath, overwrite=True, save_format=None, options=None
):
"""While saving the weights, we simply save the weights of the DCE-Net"""
self.dce_model.save_weights(
filepath,
overwrite=overwrite,
save_format=save_format,
options=options,
)
def load_weights(
self, filepath, by_name=False, skip_mismatch=False, options=None
):
"""While loading the weights, we simply load the weights of the DCE-Net"""
self.dce_model.load_weights(
filepath=filepath,
by_name=by_name,
skip_mismatch=skip_mismatch,
options=options,
)
"""
## Training
"""
zero_dce_model = ZeroDCE()
zero_dce_model.compile(learning_rate=1e-4)
history = zero_dce_model.fit(
train_dataset, validation_data=val_dataset, epochs=100
)
def plot_result(item):
plt.plot(history.history[item], label=item)
plt.plot(history.history["val_" + item], label="val_" + item)
plt.xlabel("Epochs")
plt.ylabel(item)
plt.title("Train and Validation {} Over Epochs".format(item), fontsize=14)
plt.legend()
plt.grid()
plt.show()
plot_result("total_loss")
plot_result("illumination_smoothness_loss")
plot_result("spatial_constancy_loss")
plot_result("color_constancy_loss")
plot_result("exposure_loss")
"""
## Inference
"""
def plot_results(images, titles, figure_size=(12, 12)):
fig = plt.figure(figsize=figure_size)
for i in range(len(images)):
fig.add_subplot(1, len(images), i + 1).set_title(titles[i])
_ = plt.imshow(images[i])
plt.axis("off")
plt.show()
def infer(original_image):
image = keras.utils.img_to_array(original_image)
image = image.astype("float32") / 255.0
image = np.expand_dims(image, axis=0)
output_image = zero_dce_model(image)
output_image = tf.cast((output_image[0, :, :, :] * 255), dtype=np.uint8)
output_image = Image.fromarray(output_image.numpy())
return output_image
"""
### Inference on test images
We compare the test images from LOLDataset enhanced by MIRNet with images enhanced via
the `PIL.ImageOps.autocontrast()` function.
You can use the trained model hosted on [Hugging Face Hub](https://huggingface.co/keras-io/low-light-image-enhancement)
and try the demo on [Hugging Face Spaces](https://huggingface.co/spaces/keras-io/low-light-image-enhancement).
"""
for val_image_file in test_low_light_images:
original_image = Image.open(val_image_file)
enhanced_image = infer(original_image)
plot_results(
[original_image, ImageOps.autocontrast(original_image), enhanced_image],
["Original", "PIL Autocontrast", "Enhanced"],
(20, 12),
)
| keras-core/examples/keras_io/tensorflow/vision/zero_dce.py/0 | {
"file_path": "keras-core/examples/keras_io/tensorflow/vision/zero_dce.py",
"repo_id": "keras-core",
"token_count": 7947
} | 27 |
"""
Title: Training & evaluation with the built-in methods
Author: [fchollet](https://twitter.com/fchollet)
Date created: 2019/03/01
Last modified: 2023/03/25
Description: Complete guide to training & evaluation with `fit()` and `evaluate()`.
Accelerator: GPU
"""
"""
## Setup
"""
# We import torch & TF so as to use torch Dataloaders & tf.data.Datasets.
import torch
import tensorflow as tf
import os
import numpy as np
import keras_core as keras
from keras_core import layers
from keras_core import ops
"""
## Introduction
This guide covers training, evaluation, and prediction (inference) models
when using built-in APIs for training & validation (such as `Model.fit()`,
`Model.evaluate()` and `Model.predict()`).
If you are interested in leveraging `fit()` while specifying your
own training step function, see the
[Customizing what happens in `fit()` guide](/guides/customizing_what_happens_in_fit/).
If you are interested in writing your own training & evaluation loops from
scratch, see the guide
["writing a training loop from scratch"](/guides/writing_a_training_loop_from_scratch/).
In general, whether you are using built-in loops or writing your own, model training &
evaluation works strictly in the same way across every kind of Keras model --
Sequential models, models built with the Functional API, and models written from
scratch via model subclassing.
This guide doesn't cover distributed training, which is covered in our
[guide to multi-GPU & distributed training](https://keras.io/guides/distributed_training/).
"""
"""
## API overview: a first end-to-end example
When passing data to the built-in training loops of a model, you should either use:
- NumPy arrays (if your data is small and fits in memory)
- Subclasses of `keras_core.utils.PyDataset`
- `tf.data.Dataset` objects
- PyTorch `DataLoader` instances
In the next few paragraphs, we'll use the MNIST dataset as NumPy arrays, in
order to demonstrate how to use optimizers, losses, and metrics. Afterwards, we'll
take a close look at each of the other options.
Let's consider the following model (here, we build in with the Functional API, but it
could be a Sequential model or a subclassed model as well):
"""
inputs = keras.Input(shape=(784,), name="digits")
x = layers.Dense(64, activation="relu", name="dense_1")(inputs)
x = layers.Dense(64, activation="relu", name="dense_2")(x)
outputs = layers.Dense(10, activation="softmax", name="predictions")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
"""
Here's what the typical end-to-end workflow looks like, consisting of:
- Training
- Validation on a holdout set generated from the original training data
- Evaluation on the test data
We'll use MNIST data for this example.
"""
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
# Preprocess the data (these are NumPy arrays)
x_train = x_train.reshape(60000, 784).astype("float32") / 255
x_test = x_test.reshape(10000, 784).astype("float32") / 255
y_train = y_train.astype("float32")
y_test = y_test.astype("float32")
# Reserve 10,000 samples for validation
x_val = x_train[-10000:]
y_val = y_train[-10000:]
x_train = x_train[:-10000]
y_train = y_train[:-10000]
"""
We specify the training configuration (optimizer, loss, metrics):
"""
model.compile(
optimizer=keras.optimizers.RMSprop(), # Optimizer
# Loss function to minimize
loss=keras.losses.SparseCategoricalCrossentropy(),
# List of metrics to monitor
metrics=[keras.metrics.SparseCategoricalAccuracy()],
)
"""
We call `fit()`, which will train the model by slicing the data into "batches" of size
`batch_size`, and repeatedly iterating over the entire dataset for a given number of
`epochs`.
"""
print("Fit model on training data")
history = model.fit(
x_train,
y_train,
batch_size=64,
epochs=2,
# We pass some validation for
# monitoring validation loss and metrics
# at the end of each epoch
validation_data=(x_val, y_val),
)
"""
The returned `history` object holds a record of the loss values and metric values
during training:
"""
history.history
"""
We evaluate the model on the test data via `evaluate()`:
"""
# Evaluate the model on the test data using `evaluate`
print("Evaluate on test data")
results = model.evaluate(x_test, y_test, batch_size=128)
print("test loss, test acc:", results)
# Generate predictions (probabilities -- the output of the last layer)
# on new data using `predict`
print("Generate predictions for 3 samples")
predictions = model.predict(x_test[:3])
print("predictions shape:", predictions.shape)
"""
Now, let's review each piece of this workflow in detail.
"""
"""
## The `compile()` method: specifying a loss, metrics, and an optimizer
To train a model with `fit()`, you need to specify a loss function, an optimizer, and
optionally, some metrics to monitor.
You pass these to the model as arguments to the `compile()` method:
"""
model.compile(
optimizer=keras.optimizers.RMSprop(learning_rate=1e-3),
loss=keras.losses.SparseCategoricalCrossentropy(),
metrics=[keras.metrics.SparseCategoricalAccuracy()],
)
"""
The `metrics` argument should be a list -- your model can have any number of metrics.
If your model has multiple outputs, you can specify different losses and metrics for
each output, and you can modulate the contribution of each output to the total loss of
the model. You will find more details about this in the **Passing data to multi-input,
multi-output models** section.
Note that if you're satisfied with the default settings, in many cases the optimizer,
loss, and metrics can be specified via string identifiers as a shortcut:
"""
model.compile(
optimizer="rmsprop",
loss="sparse_categorical_crossentropy",
metrics=["sparse_categorical_accuracy"],
)
"""
For later reuse, let's put our model definition and compile step in functions; we will
call them several times across different examples in this guide.
"""
def get_uncompiled_model():
inputs = keras.Input(shape=(784,), name="digits")
x = layers.Dense(64, activation="relu", name="dense_1")(inputs)
x = layers.Dense(64, activation="relu", name="dense_2")(x)
outputs = layers.Dense(10, activation="softmax", name="predictions")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
return model
def get_compiled_model():
model = get_uncompiled_model()
model.compile(
optimizer="rmsprop",
loss="sparse_categorical_crossentropy",
metrics=["sparse_categorical_accuracy"],
)
return model
"""
### Many built-in optimizers, losses, and metrics are available
In general, you won't have to create your own losses, metrics, or optimizers
from scratch, because what you need is likely to be already part of the Keras API:
Optimizers:
- `SGD()` (with or without momentum)
- `RMSprop()`
- `Adam()`
- etc.
Losses:
- `MeanSquaredError()`
- `KLDivergence()`
- `CosineSimilarity()`
- etc.
Metrics:
- `AUC()`
- `Precision()`
- `Recall()`
- etc.
"""
"""
### Custom losses
If you need to create a custom loss, Keras provides three ways to do so.
The first method involves creating a function that accepts inputs `y_true` and
`y_pred`. The following example shows a loss function that computes the mean squared
error between the real data and the predictions:
"""
def custom_mean_squared_error(y_true, y_pred):
return ops.mean(ops.square(y_true - y_pred), axis=-1)
model = get_uncompiled_model()
model.compile(optimizer=keras.optimizers.Adam(), loss=custom_mean_squared_error)
# We need to one-hot encode the labels to use MSE
y_train_one_hot = ops.one_hot(y_train, num_classes=10)
model.fit(x_train, y_train_one_hot, batch_size=64, epochs=1)
"""
If you need a loss function that takes in parameters beside `y_true` and `y_pred`, you
can subclass the `keras.losses.Loss` class and implement the following two methods:
- `__init__(self)`: accept parameters to pass during the call of your loss function
- `call(self, y_true, y_pred)`: use the targets (y_true) and the model predictions
(y_pred) to compute the model's loss
Let's say you want to use mean squared error, but with an added term that
will de-incentivize prediction values far from 0.5 (we assume that the categorical
targets are one-hot encoded and take values between 0 and 1). This
creates an incentive for the model not to be too confident, which may help
reduce overfitting (we won't know if it works until we try!).
Here's how you would do it:
"""
class CustomMSE(keras.losses.Loss):
def __init__(self, regularization_factor=0.1, name="custom_mse"):
super().__init__(name=name)
self.regularization_factor = regularization_factor
def call(self, y_true, y_pred):
mse = ops.mean(ops.square(y_true - y_pred), axis=-1)
reg = ops.mean(ops.square(0.5 - y_pred), axis=-1)
return mse + reg * self.regularization_factor
model = get_uncompiled_model()
model.compile(optimizer=keras.optimizers.Adam(), loss=CustomMSE())
y_train_one_hot = ops.one_hot(y_train, num_classes=10)
model.fit(x_train, y_train_one_hot, batch_size=64, epochs=1)
"""
### Custom metrics
If you need a metric that isn't part of the API, you can easily create custom metrics
by subclassing the `keras.metrics.Metric` class. You will need to implement 4
methods:
- `__init__(self)`, in which you will create state variables for your metric.
- `update_state(self, y_true, y_pred, sample_weight=None)`, which uses the targets
y_true and the model predictions y_pred to update the state variables.
- `result(self)`, which uses the state variables to compute the final results.
- `reset_state(self)`, which reinitializes the state of the metric.
State update and results computation are kept separate (in `update_state()` and
`result()`, respectively) because in some cases, the results computation might be very
expensive and would only be done periodically.
Here's a simple example showing how to implement a `CategoricalTruePositives` metric
that counts how many samples were correctly classified as belonging to a given class:
"""
class CategoricalTruePositives(keras.metrics.Metric):
def __init__(self, name="categorical_true_positives", **kwargs):
super().__init__(name=name, **kwargs)
self.true_positives = self.add_variable(
shape=(), name="ctp", initializer="zeros"
)
def update_state(self, y_true, y_pred, sample_weight=None):
y_pred = ops.reshape(ops.argmax(y_pred, axis=1), (-1, 1))
values = ops.cast(y_true, "int32") == ops.cast(y_pred, "int32")
values = ops.cast(values, "float32")
if sample_weight is not None:
sample_weight = ops.cast(sample_weight, "float32")
values = ops.multiply(values, sample_weight)
self.true_positives.assign_add(ops.sum(values))
def result(self):
return self.true_positives
def reset_state(self):
# The state of the metric will be reset at the start of each epoch.
self.true_positives.assign(0.0)
model = get_uncompiled_model()
model.compile(
optimizer=keras.optimizers.RMSprop(learning_rate=1e-3),
loss=keras.losses.SparseCategoricalCrossentropy(),
metrics=[CategoricalTruePositives()],
)
model.fit(x_train, y_train, batch_size=64, epochs=3)
"""
### Handling losses and metrics that don't fit the standard signature
The overwhelming majority of losses and metrics can be computed from `y_true` and
`y_pred`, where `y_pred` is an output of your model -- but not all of them. For
instance, a regularization loss may only require the activation of a layer (there are
no targets in this case), and this activation may not be a model output.
In such cases, you can call `self.add_loss(loss_value)` from inside the call method of
a custom layer. Losses added in this way get added to the "main" loss during training
(the one passed to `compile()`). Here's a simple example that adds activity
regularization (note that activity regularization is built-in in all Keras layers --
this layer is just for the sake of providing a concrete example):
"""
class ActivityRegularizationLayer(layers.Layer):
def call(self, inputs):
self.add_loss(ops.sum(inputs) * 0.1)
return inputs # Pass-through layer.
inputs = keras.Input(shape=(784,), name="digits")
x = layers.Dense(64, activation="relu", name="dense_1")(inputs)
# Insert activity regularization as a layer
x = ActivityRegularizationLayer()(x)
x = layers.Dense(64, activation="relu", name="dense_2")(x)
outputs = layers.Dense(10, name="predictions")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(
optimizer=keras.optimizers.RMSprop(learning_rate=1e-3),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
)
# The displayed loss will be much higher than before
# due to the regularization component.
model.fit(x_train, y_train, batch_size=64, epochs=1)
"""
Note that when you pass losses via `add_loss()`, it becomes possible to call
`compile()` without a loss function, since the model already has a loss to minimize.
Consider the following `LogisticEndpoint` layer: it takes as inputs
targets & logits, and it tracks a crossentropy loss via `add_loss()`.
"""
class LogisticEndpoint(keras.layers.Layer):
def __init__(self, name=None):
super().__init__(name=name)
self.loss_fn = keras.losses.BinaryCrossentropy(from_logits=True)
def call(self, targets, logits, sample_weights=None):
# Compute the training-time loss value and add it
# to the layer using `self.add_loss()`.
loss = self.loss_fn(targets, logits, sample_weights)
self.add_loss(loss)
# Return the inference-time prediction tensor (for `.predict()`).
return ops.softmax(logits)
"""
You can use it in a model with two inputs (input data & targets), compiled without a
`loss` argument, like this:
"""
inputs = keras.Input(shape=(3,), name="inputs")
targets = keras.Input(shape=(10,), name="targets")
logits = keras.layers.Dense(10)(inputs)
predictions = LogisticEndpoint(name="predictions")(targets, logits)
model = keras.Model(inputs=[inputs, targets], outputs=predictions)
model.compile(optimizer="adam") # No loss argument!
data = {
"inputs": np.random.random((3, 3)),
"targets": np.random.random((3, 10)),
}
model.fit(data)
"""
For more information about training multi-input models, see the section **Passing data
to multi-input, multi-output models**.
"""
"""
### Automatically setting apart a validation holdout set
In the first end-to-end example you saw, we used the `validation_data` argument to pass
a tuple of NumPy arrays `(x_val, y_val)` to the model for evaluating a validation loss
and validation metrics at the end of each epoch.
Here's another option: the argument `validation_split` allows you to automatically
reserve part of your training data for validation. The argument value represents the
fraction of the data to be reserved for validation, so it should be set to a number
higher than 0 and lower than 1. For instance, `validation_split=0.2` means "use 20% of
the data for validation", and `validation_split=0.6` means "use 60% of the data for
validation".
The way the validation is computed is by taking the last x% samples of the arrays
received by the `fit()` call, before any shuffling.
Note that you can only use `validation_split` when training with NumPy data.
"""
model = get_compiled_model()
model.fit(x_train, y_train, batch_size=64, validation_split=0.2, epochs=1)
"""
## Training & evaluation using `tf.data` Datasets
In the past few paragraphs, you've seen how to handle losses, metrics, and optimizers,
and you've seen how to use the `validation_data` and `validation_split` arguments in
`fit()`, when your data is passed as NumPy arrays.
Another option is to use an iterator-like, such as a `tf.data.Dataset`, a
PyTorch `DataLoader`, or a Keras `PyDataset`. Let's take look at the former.
The `tf.data` API is a set of utilities in TensorFlow 2.0 for loading and preprocessing
data in a way that's fast and scalable. For a complete guide about creating `Datasets`,
see the [tf.data documentation](https://www.tensorflow.org/guide/data).
**You can use `tf.data` to train your Keras
models regardless of the backend you're using --
whether it's JAX, PyTorch, or TensorFlow.**
You can pass a `Dataset` instance directly to the methods `fit()`, `evaluate()`, and
`predict()`:
"""
model = get_compiled_model()
# First, let's create a training Dataset instance.
# For the sake of our example, we'll use the same MNIST data as before.
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
# Shuffle and slice the dataset.
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)
# Now we get a test dataset.
test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))
test_dataset = test_dataset.batch(64)
# Since the dataset already takes care of batching,
# we don't pass a `batch_size` argument.
model.fit(train_dataset, epochs=3)
# You can also evaluate or predict on a dataset.
print("Evaluate")
result = model.evaluate(test_dataset)
dict(zip(model.metrics_names, result))
"""
Note that the Dataset is reset at the end of each epoch, so it can be reused of the
next epoch.
If you want to run training only on a specific number of batches from this Dataset, you
can pass the `steps_per_epoch` argument, which specifies how many training steps the
model should run using this Dataset before moving on to the next epoch.
"""
model = get_compiled_model()
# Prepare the training dataset
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)
# Only use the 100 batches per epoch (that's 64 * 100 samples)
model.fit(train_dataset, epochs=3, steps_per_epoch=100)
"""
You can also pass a `Dataset` instance as the `validation_data` argument in `fit()`:
"""
model = get_compiled_model()
# Prepare the training dataset
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)
# Prepare the validation dataset
val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val))
val_dataset = val_dataset.batch(64)
model.fit(train_dataset, epochs=1, validation_data=val_dataset)
"""
At the end of each epoch, the model will iterate over the validation dataset and
compute the validation loss and validation metrics.
If you want to run validation only on a specific number of batches from this dataset,
you can pass the `validation_steps` argument, which specifies how many validation
steps the model should run with the validation dataset before interrupting validation
and moving on to the next epoch:
"""
model = get_compiled_model()
# Prepare the training dataset
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)
# Prepare the validation dataset
val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val))
val_dataset = val_dataset.batch(64)
model.fit(
train_dataset,
epochs=1,
# Only run validation using the first 10 batches of the dataset
# using the `validation_steps` argument
validation_data=val_dataset,
validation_steps=10,
)
"""
Note that the validation dataset will be reset after each use (so that you will always
be evaluating on the same samples from epoch to epoch).
The argument `validation_split` (generating a holdout set from the training data) is
not supported when training from `Dataset` objects, since this feature requires the
ability to index the samples of the datasets, which is not possible in general with
the `Dataset` API.
"""
"""
## Training & evaluation using `PyDataset` instances
`keras.utils.PyDataset` is a utility that you can subclass to obtain
a Python generator with two important properties:
- It works well with multiprocessing.
- It can be shuffled (e.g. when passing `shuffle=True` in `fit()`).
A `PyDataset` must implement two methods:
- `__getitem__`
- `__len__`
The method `__getitem__` should return a complete batch.
If you want to modify your dataset between epochs, you may implement `on_epoch_end`.
Here's a quick example:
"""
class ExamplePyDataset(keras.utils.PyDataset):
def __init__(self, x, y, batch_size, **kwargs):
super().__init__(**kwargs)
self.x = x
self.y = y
self.batch_size = batch_size
def __len__(self):
return int(np.ceil(len(self.x) / float(self.batch_size)))
def __getitem__(self, idx):
batch_x = self.x[idx * self.batch_size : (idx + 1) * self.batch_size]
batch_y = self.y[idx * self.batch_size : (idx + 1) * self.batch_size]
return batch_x, batch_y
train_py_dataset = ExamplePyDataset(x_train, y_train, batch_size=32)
val_py_dataset = ExamplePyDataset(x_val, y_val, batch_size=32)
"""
To fit the model, pass the dataset instead as the `x` argument (no need for a `y`
argument since the dataset includes the targets), and pass the validation dataset
as the `validation_data` argument. And no need for the `batch_size` argument, since
the dataset is already batched!
"""
model = get_compiled_model()
model.fit(
train_py_dataset, batch_size=64, validation_data=val_py_dataset, epochs=1
)
"""
Evaluating the model is just as easy:
"""
model.evaluate(val_py_dataset)
"""
Importantly, `PyDataset` objects support three common constructor arguments
that handle the parallel processing configuration:
- `workers`: Number of workers to use in multithreading or
multiprocessing. Typically, you'd set it to the number of
cores on your CPU.
- `use_multiprocessing`: Whether to use Python multiprocessing for
parallelism. Setting this to `True` means that your
dataset will be replicated in multiple forked processes.
This is necessary to gain compute-level (rather than I/O level)
benefits from parallelism. However it can only be set to
`True` if your dataset can be safely pickled.
- `max_queue_size`: Maximum number of batches to keep in the queue
when iterating over the dataset in a multithreaded or
multipricessed setting.
You can reduce this value to reduce the CPU memory consumption of
your dataset. It defaults to 10.
By default, multiprocessing is disabled (`use_multiprocessing=False`) and only
one thread is used. You should make sure to only turn on `use_multiprocessing` if
your code is running inside a Python `if __name__ == "__main__":` block in order
to avoid issues.
Here's a 4-thread, non-multiprocessed example:
"""
train_py_dataset = ExamplePyDataset(x_train, y_train, batch_size=32, workers=4)
val_py_dataset = ExamplePyDataset(x_val, y_val, batch_size=32, workers=4)
model = get_compiled_model()
model.fit(
train_py_dataset, batch_size=64, validation_data=val_py_dataset, epochs=1
)
"""
## Training & evaluation using PyTorch `DataLoader` objects
All built-in training and evaluation APIs are also compatible with `torch.utils.data.Dataset` and
`torch.utils.data.DataLoader` objects -- regardless of whether you're using the PyTorch backend,
or the JAX or TensorFlow backends. Let's take a look at a simple example.
Unlike `PyDataset` which are batch-centric, PyTorch `Dataset` objects are sample-centric:
the `__len__` method returns the number of samples,
and the `__getitem__` method returns a specific sample.
"""
class ExampleTorchDataset(torch.utils.data.Dataset):
def __init__(self, x, y):
self.x = x
self.y = y
def __len__(self):
return len(self.x)
def __getitem__(self, idx):
return self.x[idx], self.y[idx]
train_torch_dataset = ExampleTorchDataset(x_train, y_train)
val_torch_dataset = ExampleTorchDataset(x_val, y_val)
"""
To use a PyTorch Dataset, you need to wrap it into a `Dataloader` which takes care
of batching and shuffling:
"""
train_dataloader = torch.utils.data.DataLoader(
train_torch_dataset, batch_size=32, shuffle=True
)
val_dataloader = torch.utils.data.DataLoader(
val_torch_dataset, batch_size=32, shuffle=True
)
"""
Now you can use them in the Keras API just like any other iterator:
"""
model = get_compiled_model()
model.fit(
train_dataloader, batch_size=64, validation_data=val_dataloader, epochs=1
)
model.evaluate(val_dataloader)
"""
## Using sample weighting and class weighting
With the default settings the weight of a sample is decided by its frequency
in the dataset. There are two methods to weight the data, independent of
sample frequency:
* Class weights
* Sample weights
"""
"""
### Class weights
This is set by passing a dictionary to the `class_weight` argument to
`Model.fit()`. This dictionary maps class indices to the weight that should
be used for samples belonging to this class.
This can be used to balance classes without resampling, or to train a
model that gives more importance to a particular class.
For instance, if class "0" is half as represented as class "1" in your data,
you could use `Model.fit(..., class_weight={0: 1., 1: 0.5})`.
"""
"""
Here's a NumPy example where we use class weights or sample weights to
give more importance to the correct classification of class #5 (which
is the digit "5" in the MNIST dataset).
"""
class_weight = {
0: 1.0,
1: 1.0,
2: 1.0,
3: 1.0,
4: 1.0,
# Set weight "2" for class "5",
# making this class 2x more important
5: 2.0,
6: 1.0,
7: 1.0,
8: 1.0,
9: 1.0,
}
print("Fit with class weight")
model = get_compiled_model()
model.fit(x_train, y_train, class_weight=class_weight, batch_size=64, epochs=1)
"""
### Sample weights
For fine grained control, or if you are not building a classifier,
you can use "sample weights".
- When training from NumPy data: Pass the `sample_weight`
argument to `Model.fit()`.
- When training from `tf.data` or any other sort of iterator:
Yield `(input_batch, label_batch, sample_weight_batch)` tuples.
A "sample weights" array is an array of numbers that specify how much weight
each sample in a batch should have in computing the total loss. It is commonly
used in imbalanced classification problems (the idea being to give more weight
to rarely-seen classes).
When the weights used are ones and zeros, the array can be used as a *mask* for
the loss function (entirely discarding the contribution of certain samples to
the total loss).
"""
sample_weight = np.ones(shape=(len(y_train),))
sample_weight[y_train == 5] = 2.0
print("Fit with sample weight")
model = get_compiled_model()
model.fit(
x_train, y_train, sample_weight=sample_weight, batch_size=64, epochs=1
)
"""
Here's a matching `Dataset` example:
"""
sample_weight = np.ones(shape=(len(y_train),))
sample_weight[y_train == 5] = 2.0
# Create a Dataset that includes sample weights
# (3rd element in the return tuple).
train_dataset = tf.data.Dataset.from_tensor_slices(
(x_train, y_train, sample_weight)
)
# Shuffle and slice the dataset.
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)
model = get_compiled_model()
model.fit(train_dataset, epochs=1)
"""
## Passing data to multi-input, multi-output models
In the previous examples, we were considering a model with a single input (a tensor of
shape `(764,)`) and a single output (a prediction tensor of shape `(10,)`). But what
about models that have multiple inputs or outputs?
Consider the following model, which has an image input of shape `(32, 32, 3)` (that's
`(height, width, channels)`) and a time series input of shape `(None, 10)` (that's
`(timesteps, features)`). Our model will have two outputs computed from the
combination of these inputs: a "score" (of shape `(1,)`) and a probability
distribution over five classes (of shape `(5,)`).
"""
image_input = keras.Input(shape=(32, 32, 3), name="img_input")
timeseries_input = keras.Input(shape=(None, 10), name="ts_input")
x1 = layers.Conv2D(3, 3)(image_input)
x1 = layers.GlobalMaxPooling2D()(x1)
x2 = layers.Conv1D(3, 3)(timeseries_input)
x2 = layers.GlobalMaxPooling1D()(x2)
x = layers.concatenate([x1, x2])
score_output = layers.Dense(1, name="score_output")(x)
class_output = layers.Dense(5, name="class_output")(x)
model = keras.Model(
inputs=[image_input, timeseries_input], outputs=[score_output, class_output]
)
"""
Let's plot this model, so you can clearly see what we're doing here (note that the
shapes shown in the plot are batch shapes, rather than per-sample shapes).
"""
keras.utils.plot_model(
model, "multi_input_and_output_model.png", show_shapes=True
)
"""
At compilation time, we can specify different losses to different outputs, by passing
the loss functions as a list:
"""
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss=[
keras.losses.MeanSquaredError(),
keras.losses.CategoricalCrossentropy(),
],
)
"""
If we only passed a single loss function to the model, the same loss function would be
applied to every output (which is not appropriate here).
Likewise for metrics:
"""
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss=[
keras.losses.MeanSquaredError(),
keras.losses.CategoricalCrossentropy(),
],
metrics=[
[
keras.metrics.MeanAbsolutePercentageError(),
keras.metrics.MeanAbsoluteError(),
],
[keras.metrics.CategoricalAccuracy()],
],
)
"""
Since we gave names to our output layers, we could also specify per-output losses and
metrics via a dict:
"""
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss={
"score_output": keras.losses.MeanSquaredError(),
"class_output": keras.losses.CategoricalCrossentropy(),
},
metrics={
"score_output": [
keras.metrics.MeanAbsolutePercentageError(),
keras.metrics.MeanAbsoluteError(),
],
"class_output": [keras.metrics.CategoricalAccuracy()],
},
)
"""
We recommend the use of explicit names and dicts if you have more than 2 outputs.
It's possible to give different weights to different output-specific losses (for
instance, one might wish to privilege the "score" loss in our example, by giving to 2x
the importance of the class loss), using the `loss_weights` argument:
"""
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss={
"score_output": keras.losses.MeanSquaredError(),
"class_output": keras.losses.CategoricalCrossentropy(),
},
metrics={
"score_output": [
keras.metrics.MeanAbsolutePercentageError(),
keras.metrics.MeanAbsoluteError(),
],
"class_output": [keras.metrics.CategoricalAccuracy()],
},
loss_weights={"score_output": 2.0, "class_output": 1.0},
)
"""
You could also choose not to compute a loss for certain outputs, if these outputs are
meant for prediction but not for training:
"""
# List loss version
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss=[None, keras.losses.CategoricalCrossentropy()],
)
# Or dict loss version
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss={"class_output": keras.losses.CategoricalCrossentropy()},
)
"""
Passing data to a multi-input or multi-output model in `fit()` works in a similar way as
specifying a loss function in compile: you can pass **lists of NumPy arrays** (with
1:1 mapping to the outputs that received a loss function) or **dicts mapping output
names to NumPy arrays**.
"""
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss=[
keras.losses.MeanSquaredError(),
keras.losses.CategoricalCrossentropy(),
],
)
# Generate dummy NumPy data
img_data = np.random.random_sample(size=(100, 32, 32, 3))
ts_data = np.random.random_sample(size=(100, 20, 10))
score_targets = np.random.random_sample(size=(100, 1))
class_targets = np.random.random_sample(size=(100, 5))
# Fit on lists
model.fit(
[img_data, ts_data], [score_targets, class_targets], batch_size=32, epochs=1
)
# Alternatively, fit on dicts
model.fit(
{"img_input": img_data, "ts_input": ts_data},
{"score_output": score_targets, "class_output": class_targets},
batch_size=32,
epochs=1,
)
"""
Here's the `Dataset` use case: similarly as what we did for NumPy arrays, the `Dataset`
should return a tuple of dicts.
"""
train_dataset = tf.data.Dataset.from_tensor_slices(
(
{"img_input": img_data, "ts_input": ts_data},
{"score_output": score_targets, "class_output": class_targets},
)
)
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)
model.fit(train_dataset, epochs=1)
"""
## Using callbacks
Callbacks in Keras are objects that are called at different points during training (at
the start of an epoch, at the end of a batch, at the end of an epoch, etc.). They
can be used to implement certain behaviors, such as:
- Doing validation at different points during training (beyond the built-in per-epoch
validation)
- Checkpointing the model at regular intervals or when it exceeds a certain accuracy
threshold
- Changing the learning rate of the model when training seems to be plateauing
- Doing fine-tuning of the top layers when training seems to be plateauing
- Sending email or instant message notifications when training ends or where a certain
performance threshold is exceeded
- Etc.
Callbacks can be passed as a list to your call to `fit()`:
"""
model = get_compiled_model()
callbacks = [
keras.callbacks.EarlyStopping(
# Stop training when `val_loss` is no longer improving
monitor="val_loss",
# "no longer improving" being defined as "no better than 1e-2 less"
min_delta=1e-2,
# "no longer improving" being further defined as "for at least 2 epochs"
patience=2,
verbose=1,
)
]
model.fit(
x_train,
y_train,
epochs=20,
batch_size=64,
callbacks=callbacks,
validation_split=0.2,
)
"""
### Many built-in callbacks are available
There are many built-in callbacks already available in Keras, such as:
- `ModelCheckpoint`: Periodically save the model.
- `EarlyStopping`: Stop training when training is no longer improving the validation
metrics.
- `TensorBoard`: periodically write model logs that can be visualized in
[TensorBoard](https://www.tensorflow.org/tensorboard) (more details in the section
"Visualization").
- `CSVLogger`: streams loss and metrics data to a CSV file.
- etc.
See the [callbacks documentation](/api/callbacks/) for the complete list.
### Writing your own callback
You can create a custom callback by extending the base class
`keras.callbacks.Callback`. A callback has access to its associated model through the
class property `self.model`.
Make sure to read the
[complete guide to writing custom callbacks](/guides/writing_your_own_callbacks/).
Here's a simple example saving a list of per-batch loss values during training:
"""
class LossHistory(keras.callbacks.Callback):
def on_train_begin(self, logs):
self.per_batch_losses = []
def on_batch_end(self, batch, logs):
self.per_batch_losses.append(logs.get("loss"))
"""
## Checkpointing models
When you're training model on relatively large datasets, it's crucial to save
checkpoints of your model at frequent intervals.
The easiest way to achieve this is with the `ModelCheckpoint` callback:
"""
model = get_compiled_model()
callbacks = [
keras.callbacks.ModelCheckpoint(
# Path where to save the model
# The two parameters below mean that we will overwrite
# the current checkpoint if and only if
# the `val_loss` score has improved.
# The saved model name will include the current epoch.
filepath="mymodel_{epoch}.keras",
save_best_only=True, # Only save a model if `val_loss` has improved.
monitor="val_loss",
verbose=1,
)
]
model.fit(
x_train,
y_train,
epochs=2,
batch_size=64,
callbacks=callbacks,
validation_split=0.2,
)
"""
The `ModelCheckpoint` callback can be used to implement fault-tolerance:
the ability to restart training from the last saved state of the model in case training
gets randomly interrupted. Here's a basic example:
"""
# Prepare a directory to store all the checkpoints.
checkpoint_dir = "./ckpt"
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
def make_or_restore_model():
# Either restore the latest model, or create a fresh one
# if there is no checkpoint available.
checkpoints = [
checkpoint_dir + "/" + name for name in os.listdir(checkpoint_dir)
]
if checkpoints:
latest_checkpoint = max(checkpoints, key=os.path.getctime)
print("Restoring from", latest_checkpoint)
return keras.models.load_model(latest_checkpoint)
print("Creating a new model")
return get_compiled_model()
model = make_or_restore_model()
callbacks = [
# This callback saves the model every 100 batches.
# We include the training loss in the saved model name.
keras.callbacks.ModelCheckpoint(
filepath=checkpoint_dir + "/model-loss={loss:.2f}.keras", save_freq=100
)
]
model.fit(x_train, y_train, epochs=1, callbacks=callbacks)
"""
You call also write your own callback for saving and restoring models.
For a complete guide on serialization and saving, see the
[guide to saving and serializing Models](/guides/serialization_and_saving/).
"""
"""
## Using learning rate schedules
A common pattern when training deep learning models is to gradually reduce the learning
as training progresses. This is generally known as "learning rate decay".
The learning decay schedule could be static (fixed in advance, as a function of the
current epoch or the current batch index), or dynamic (responding to the current
behavior of the model, in particular the validation loss).
### Passing a schedule to an optimizer
You can easily use a static learning rate decay schedule by passing a schedule object
as the `learning_rate` argument in your optimizer:
"""
initial_learning_rate = 0.1
lr_schedule = keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate, decay_steps=100000, decay_rate=0.96, staircase=True
)
optimizer = keras.optimizers.RMSprop(learning_rate=lr_schedule)
"""
Several built-in schedules are available: `ExponentialDecay`, `PiecewiseConstantDecay`,
`PolynomialDecay`, and `InverseTimeDecay`.
### Using callbacks to implement a dynamic learning rate schedule
A dynamic learning rate schedule (for instance, decreasing the learning rate when the
validation loss is no longer improving) cannot be achieved with these schedule objects,
since the optimizer does not have access to validation metrics.
However, callbacks do have access to all metrics, including validation metrics! You can
thus achieve this pattern by using a callback that modifies the current learning rate
on the optimizer. In fact, this is even built-in as the `ReduceLROnPlateau` callback.
"""
"""
## Visualizing loss and metrics during training with TensorBoard
The best way to keep an eye on your model during training is to use
[TensorBoard](https://www.tensorflow.org/tensorboard) -- a browser-based application
that you can run locally that provides you with:
- Live plots of the loss and metrics for training and evaluation
- (optionally) Visualizations of the histograms of your layer activations
- (optionally) 3D visualizations of the embedding spaces learned by your `Embedding`
layers
If you have installed TensorFlow with pip, you should be able to launch TensorBoard
from the command line:
```
tensorboard --logdir=/full_path_to_your_logs
```
"""
"""
### Using the TensorBoard callback
The easiest way to use TensorBoard with a Keras model and the `fit()` method is the
`TensorBoard` callback.
In the simplest case, just specify where you want the callback to write logs, and
you're good to go:
"""
keras.callbacks.TensorBoard(
log_dir="/full_path_to_your_logs",
histogram_freq=0, # How often to log histogram visualizations
embeddings_freq=0, # How often to log embedding visualizations
update_freq="epoch",
) # How often to write logs (default: once per epoch)
"""
For more information, see the
[documentation for the `TensorBoard` callback](https://keras.io/api/callbacks/tensorboard/).
"""
| keras-core/guides/training_with_built_in_methods.py/0 | {
"file_path": "keras-core/guides/training_with_built_in_methods.py",
"repo_id": "keras-core",
"token_count": 13375
} | 28 |
try:
import namex
except ImportError:
namex = None
# These dicts reference "canonical names" only
# (i.e. the first name an object was registered with).
REGISTERED_NAMES_TO_OBJS = {}
REGISTERED_OBJS_TO_NAMES = {}
def register_internal_serializable(path, symbol):
global REGISTERED_NAMES_TO_OBJS
if isinstance(path, (list, tuple)):
name = path[0]
else:
name = path
REGISTERED_NAMES_TO_OBJS[name] = symbol
REGISTERED_OBJS_TO_NAMES[symbol] = name
def get_symbol_from_name(name):
REGISTERED_NAMES_TO_OBJS.get(name, None)
def get_name_from_symbol(symbol):
REGISTERED_OBJS_TO_NAMES.get(symbol, None)
if namex:
class keras_core_export(namex.export):
def __init__(self, path):
super().__init__(package="keras_core", path=path)
def __call__(self, symbol):
register_internal_serializable(self.path, symbol)
return super().__call__(symbol)
else:
class keras_core_export:
def __init__(self, path):
self.path = path
def __call__(self, symbol):
register_internal_serializable(self.path, symbol)
return symbol
| keras-core/keras_core/api_export.py/0 | {
"file_path": "keras-core/keras_core/api_export.py",
"repo_id": "keras-core",
"token_count": 511
} | 29 |
from keras_core.api_export import keras_core_export
from keras_core.applications import imagenet_utils
from keras_core.applications import resnet
@keras_core_export(
[
"keras_core.applications.ResNet50V2",
"keras_core.applications.resnet_v2.ResNet50V2",
]
)
def ResNet50V2(
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
):
"""Instantiates the ResNet50V2 architecture."""
def stack_fn(x):
x = resnet.stack_residual_blocks_v2(x, 64, 3, name="conv2")
x = resnet.stack_residual_blocks_v2(x, 128, 4, name="conv3")
x = resnet.stack_residual_blocks_v2(x, 256, 6, name="conv4")
return resnet.stack_residual_blocks_v2(
x, 512, 3, stride1=1, name="conv5"
)
return resnet.ResNet(
stack_fn,
True,
True,
"resnet50v2",
include_top,
weights,
input_tensor,
input_shape,
pooling,
classes,
classifier_activation=classifier_activation,
)
@keras_core_export(
[
"keras_core.applications.ResNet101V2",
"keras_core.applications.resnet_v2.ResNet101V2",
]
)
def ResNet101V2(
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
):
"""Instantiates the ResNet101V2 architecture."""
def stack_fn(x):
x = resnet.stack_residual_blocks_v2(x, 64, 3, name="conv2")
x = resnet.stack_residual_blocks_v2(x, 128, 4, name="conv3")
x = resnet.stack_residual_blocks_v2(x, 256, 23, name="conv4")
return resnet.stack_residual_blocks_v2(
x, 512, 3, stride1=1, name="conv5"
)
return resnet.ResNet(
stack_fn,
True,
True,
"resnet101v2",
include_top,
weights,
input_tensor,
input_shape,
pooling,
classes,
classifier_activation=classifier_activation,
)
@keras_core_export(
[
"keras_core.applications.ResNet152V2",
"keras_core.applications.resnet_v2.ResNet152V2",
]
)
def ResNet152V2(
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
):
"""Instantiates the ResNet152V2 architecture."""
def stack_fn(x):
x = resnet.stack_residual_blocks_v2(x, 64, 3, name="conv2")
x = resnet.stack_residual_blocks_v2(x, 128, 8, name="conv3")
x = resnet.stack_residual_blocks_v2(x, 256, 36, name="conv4")
return resnet.stack_residual_blocks_v2(
x, 512, 3, stride1=1, name="conv5"
)
return resnet.ResNet(
stack_fn,
True,
True,
"resnet152v2",
include_top,
weights,
input_tensor,
input_shape,
pooling,
classes,
classifier_activation=classifier_activation,
)
@keras_core_export("keras_core.applications.resnet_v2.preprocess_input")
def preprocess_input(x, data_format=None):
return imagenet_utils.preprocess_input(
x, data_format=data_format, mode="tf"
)
@keras_core_export("keras_core.applications.resnet_v2.decode_predictions")
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
mode="",
ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TF,
error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC,
)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
DOC = """
Reference:
- [Identity Mappings in Deep Residual Networks](
https://arxiv.org/abs/1603.05027) (CVPR 2016)
For image classification use cases, see [this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
Note: each Keras Application expects a specific kind of input preprocessing.
For ResNet, call `keras_core.applications.resnet_v2.preprocess_input` on your
inputs before passing them to the model. `resnet_v2.preprocess_input` will
scale input pixels between -1 and 1.
Args:
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
`"imagenet"` (pre-training on ImageNet), or the path to the weights
file to be loaded.
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified if `include_top`
is `False` (otherwise the input shape has to be `(224, 224, 3)`
(with `"channels_last"` data format) or `(3, 224, 224)`
(with `"channels_first"` data format). It should have exactly 3
inputs channels, and width and height should be no smaller than 32.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction when `include_top`
is `False`.
- `None` means that the output of the model will be the 4D tensor
output of the last convolutional block.
- `avg` means that global average pooling will be applied to the output
of the last convolutional block, and thus the output of the
model will be a 2D tensor.
- `max` means that global max pooling will be applied.
classes: optional number of classes to classify images into, only to be
specified if `include_top` is `True`, and if no `weights` argument is
specified.
classifier_activation: A `str` or callable. The activation function to
use on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
When loading pretrained weights, `classifier_activation` can only
be `None` or `"softmax"`.
Returns:
A Model instance.
"""
setattr(ResNet50V2, "__doc__", ResNet50V2.__doc__ + DOC)
setattr(ResNet101V2, "__doc__", ResNet101V2.__doc__ + DOC)
setattr(ResNet152V2, "__doc__", ResNet152V2.__doc__ + DOC)
| keras-core/keras_core/applications/resnet_v2.py/0 | {
"file_path": "keras-core/keras_core/applications/resnet_v2.py",
"repo_id": "keras-core",
"token_count": 2778
} | 30 |
import numpy as np
from keras_core import backend
from keras_core import ops
from keras_core import testing
from keras_core.backend.common.stateless_scope import StatelessScope
class TestStatelessScope(testing.TestCase):
def test_basic_flow(self):
var1 = backend.Variable(np.zeros((2,)))
var2 = backend.Variable(np.zeros((2,)))
var_out = backend.Variable(np.zeros((2,)))
value1 = ops.ones(shape=(2,))
value2 = ops.ones(shape=(2,))
with StatelessScope(
state_mapping=[(var1, value1), (var2, value2)]
) as scope:
out = var1 + var2
var_out.assign(out)
var_out_value = var_out + 0.0
# Inside scope: new value is used.
self.assertAllClose(var_out_value, 2 * np.ones((2,)))
# Out of scope: old value is used.
var_out_value = var_out + 0.0
self.assertAllClose(var_out_value, np.zeros((2,)))
# Updates are tracked.
var_out_value = scope.get_current_value(var_out)
self.assertAllClose(var_out_value, 2 * np.ones((2,)))
# Updates can be reapplied.
var_out.assign(scope.get_current_value(var_out))
self.assertAllClose(var_out_value, 2 * np.ones((2,)))
def test_invalid_key_in_state_mapping(self):
# var1 = backend.Variable(np.zeros((2,)))
invalid_key = "not_a_keras_variable"
value1 = ops.ones(shape=(2,))
with self.assertRaisesRegex(
ValueError, "all keys in argument `mapping` must be KerasVariable"
):
StatelessScope(state_mapping=[(invalid_key, value1)])
def test_invalid_value_shape_in_state_mapping(self):
var1 = backend.Variable(np.zeros((2,)))
invalid_value = ops.ones(shape=(3,)) # Incorrect shape
with self.assertRaisesRegex(
ValueError, "all values in argument `mapping` must be tensors with"
):
StatelessScope(state_mapping=[(var1, invalid_value)])
| keras-core/keras_core/backend/common/stateless_scope_test.py/0 | {
"file_path": "keras-core/keras_core/backend/common/stateless_scope_test.py",
"repo_id": "keras-core",
"token_count": 900
} | 31 |
from keras_core.backend.numpy import core
from keras_core.backend.numpy import image
from keras_core.backend.numpy import math
from keras_core.backend.numpy import nn
from keras_core.backend.numpy import numpy
from keras_core.backend.numpy import random
from keras_core.backend.numpy.core import SUPPORTS_SPARSE_TENSORS
from keras_core.backend.numpy.core import Variable
from keras_core.backend.numpy.core import cast
from keras_core.backend.numpy.core import compute_output_spec
from keras_core.backend.numpy.core import cond
from keras_core.backend.numpy.core import convert_to_numpy
from keras_core.backend.numpy.core import convert_to_tensor
from keras_core.backend.numpy.core import is_tensor
from keras_core.backend.numpy.core import shape
from keras_core.backend.numpy.core import vectorized_map
from keras_core.backend.numpy.rnn import cudnn_ok
from keras_core.backend.numpy.rnn import gru
from keras_core.backend.numpy.rnn import lstm
from keras_core.backend.numpy.rnn import rnn
| keras-core/keras_core/backend/numpy/__init__.py/0 | {
"file_path": "keras-core/keras_core/backend/numpy/__init__.py",
"repo_id": "keras-core",
"token_count": 351
} | 32 |
import tensorflow as tf
from keras_core.backend.tensorflow.core import name_scope
from keras_core.testing import TestCase
class TFNameScopeTest(TestCase):
def test_stacking(self):
self.assertEqual(tf.Variable(0, name="x").name, "x:0")
with name_scope("outer") as outer:
self.assertEqual(outer.name, "outer")
self.assertEqual(tf.Variable(0, name="x").name, "outer/x:0")
with name_scope("middle") as middle:
self.assertEqual(middle.name, "middle")
self.assertEqual(
tf.Variable(0, name="x").name, "outer/middle/x:0"
)
with name_scope("inner") as inner:
self.assertEqual(inner.name, "inner")
self.assertEqual(
tf.Variable(0, name="x").name, "outer/middle/inner/x:0"
)
self.assertEqual(
tf.Variable(0, name="x").name, "outer/middle/x:0"
)
self.assertEqual(tf.Variable(0, name="x").name, "outer/x:0")
self.assertEqual(tf.Variable(0, name="x").name, "x:0")
def test_deduplicate(self):
self.assertEqual(tf.Variable(0, name="x").name, "x:0")
with name_scope("name", caller=1):
with name_scope("name", caller=1):
self.assertEqual(tf.Variable(0, name="x").name, "name/x:0")
self.assertEqual(tf.Variable(0, name="x").name, "x:0")
with name_scope("name"):
with name_scope("name"):
self.assertEqual(tf.Variable(0, name="x").name, "name/name/x:0")
| keras-core/keras_core/backend/tensorflow/name_scope_test.py/0 | {
"file_path": "keras-core/keras_core/backend/tensorflow/name_scope_test.py",
"repo_id": "keras-core",
"token_count": 842
} | 33 |
import numpy as np
import torch
import torch.nn.functional as tnn
from keras_core.backend import standardize_data_format
from keras_core.backend import standardize_dtype
from keras_core.backend.common.backend_utils import (
compute_conv_transpose_padding_args_for_torch,
)
from keras_core.backend.config import epsilon
from keras_core.backend.torch.core import cast
from keras_core.backend.torch.core import convert_to_tensor
from keras_core.backend.torch.core import get_device
from keras_core.backend.torch.numpy import expand_dims
from keras_core.backend.torch.numpy import maximum
from keras_core.backend.torch.numpy import where
from keras_core.utils.argument_validation import standardize_tuple
def relu(x):
x = convert_to_tensor(x)
return tnn.relu(x)
def relu6(x):
x = convert_to_tensor(x)
return tnn.relu6(x)
def sigmoid(x):
x = convert_to_tensor(x)
return tnn.sigmoid(x)
def tanh(x):
x = convert_to_tensor(x)
return tnn.tanh(x)
def softplus(x):
x = convert_to_tensor(x)
return tnn.softplus(x)
def softsign(x):
x = convert_to_tensor(x)
return tnn.softsign(x)
def silu(x, beta=1.0):
x = convert_to_tensor(x)
return x * sigmoid(beta * x)
def log_sigmoid(x):
x = convert_to_tensor(x)
return tnn.logsigmoid(x)
def leaky_relu(x, negative_slope=0.2):
x = convert_to_tensor(x)
return tnn.leaky_relu(x, negative_slope=negative_slope)
def hard_sigmoid(x):
x = convert_to_tensor(x)
return tnn.hardsigmoid(x)
def elu(x, alpha=1.0):
x = convert_to_tensor(x)
return tnn.elu(x, alpha)
def selu(x):
x = convert_to_tensor(x)
return tnn.selu(x)
def gelu(x, approximate=True):
# TODO: torch.nn.gelu expects string approximate of `"none"` or `"tanh"`
x = convert_to_tensor(x)
if approximate:
return tnn.gelu(x, approximate="tanh")
return tnn.gelu(x)
def softmax(x, axis=-1):
x = convert_to_tensor(x)
if axis is None:
# Unlike numpy, PyTorch will handle axis=None as axis=-1.
# We need this workaround for the reduction on every dim.
output = torch.reshape(x, [-1])
output = tnn.softmax(output, dim=-1)
return torch.reshape(output, x.shape)
return tnn.softmax(x, dim=axis)
def log_softmax(x, axis=-1):
x = convert_to_tensor(x)
if axis is None:
# Unlike numpy, PyTorch will handle axis=None as axis=-1.
# We need this workaround for the reduction on every dim.
output = torch.reshape(x, [-1])
output = tnn.log_softmax(output, dim=-1)
return torch.reshape(output, x.shape)
return tnn.log_softmax(x, dim=axis)
def _compute_padding_length(
input_length, kernel_length, stride, dilation_rate=1
):
"""Compute padding length along one dimension."""
if (input_length - 1) % stride == 0:
total_padding_length = dilation_rate * (kernel_length - 1)
else:
total_padding_length = (
dilation_rate * (kernel_length - 1) - (input_length - 1) % stride
)
left_padding = int(np.floor(total_padding_length / 2))
right_padding = int(np.ceil(total_padding_length / 2))
return (left_padding, right_padding)
def _apply_same_padding(
inputs, kernel_size, strides, operation_type, dilation_rate=1
):
spatial_shape = inputs.shape[2:]
num_spatial_dims = len(spatial_shape)
padding = ()
for i in range(num_spatial_dims):
if operation_type == "pooling":
padding_size = _compute_padding_length(
spatial_shape[i], kernel_size[i], strides[i]
)
mode = "replicate"
else:
dilation_rate = standardize_tuple(
dilation_rate, num_spatial_dims, "dilation_rate"
)
padding_size = _compute_padding_length(
spatial_shape[i], kernel_size[i], strides[i], dilation_rate[i]
)
mode = "constant"
padding = padding_size + padding
return tnn.pad(inputs, padding, mode=mode)
def _transpose_spatial_inputs(inputs):
num_spatial_dims = inputs.ndim - 2
# Torch pooling does not support `channels_last` format, so
# we need to transpose to `channels_first` format.
if num_spatial_dims == 1:
inputs = torch.permute(inputs, (0, 2, 1))
elif num_spatial_dims == 2:
inputs = torch.permute(inputs, (0, 3, 1, 2))
elif num_spatial_dims == 3:
inputs = torch.permute(inputs, (0, 4, 1, 2, 3))
else:
raise ValueError(
"Inputs must have ndim=3, 4 or 5, "
"corresponding to 1D, 2D and 3D inputs. "
f"Received input shape: {inputs.shape}."
)
return inputs
def _transpose_spatial_outputs(outputs):
# Undo the tranpose in `_transpose_spatial_inputs`.
num_spatial_dims = len(outputs.shape) - 2
if num_spatial_dims == 1:
outputs = torch.permute(outputs, (0, 2, 1))
elif num_spatial_dims == 2:
outputs = torch.permute(outputs, (0, 2, 3, 1))
elif num_spatial_dims == 3:
outputs = torch.permute(outputs, (0, 2, 3, 4, 1))
return outputs
def _transpose_conv_kernel(kernel):
# Torch requires conv kernel of format
# `(out_channels, in_channels, spatial_dims)`, we need to transpose.
num_spatial_dims = len(kernel.shape) - 2
if num_spatial_dims == 1:
kernel = torch.permute(kernel, (2, 1, 0))
elif num_spatial_dims == 2:
kernel = torch.permute(kernel, (3, 2, 0, 1))
elif num_spatial_dims == 3:
kernel = torch.permute(kernel, (4, 3, 0, 1, 2))
return kernel
def max_pool(
inputs,
pool_size,
strides=None,
padding="valid",
data_format=None,
):
inputs = convert_to_tensor(inputs)
num_spatial_dims = inputs.ndim - 2
pool_size = standardize_tuple(pool_size, num_spatial_dims, "pool_size")
if strides is None:
strides = pool_size
else:
strides = standardize_tuple(strides, num_spatial_dims, "strides")
data_format = standardize_data_format(data_format)
if data_format == "channels_last":
inputs = _transpose_spatial_inputs(inputs)
if padding == "same":
# Torch does not natively support `"same"` padding, we need to manually
# apply the right amount of padding to `inputs`.
inputs = _apply_same_padding(
inputs, pool_size, strides, operation_type="pooling"
)
device = get_device()
# Torch max pooling ops do not support symbolic tensors.
# Create a real tensor to execute the ops.
if device == "meta":
inputs = torch.empty(
size=inputs.shape, dtype=inputs.dtype, device="cpu"
)
if num_spatial_dims == 1:
outputs = tnn.max_pool1d(inputs, kernel_size=pool_size, stride=strides)
elif num_spatial_dims == 2:
outputs = tnn.max_pool2d(inputs, kernel_size=pool_size, stride=strides)
elif num_spatial_dims == 3:
outputs = tnn.max_pool3d(inputs, kernel_size=pool_size, stride=strides)
else:
raise ValueError(
"Inputs to pooling op must have ndim=3, 4 or 5, "
"corresponding to 1D, 2D and 3D inputs. "
f"Received input shape: {inputs.shape}."
)
outputs = outputs.to(device)
if data_format == "channels_last":
outputs = _transpose_spatial_outputs(outputs)
return outputs
def average_pool(
inputs,
pool_size,
strides=None,
padding="valid",
data_format=None,
):
inputs = convert_to_tensor(inputs)
num_spatial_dims = inputs.ndim - 2
pool_size = standardize_tuple(pool_size, num_spatial_dims, "pool_size")
if strides is None:
strides = pool_size
else:
strides = standardize_tuple(strides, num_spatial_dims, "strides")
data_format = standardize_data_format(data_format)
if data_format == "channels_last":
inputs = _transpose_spatial_inputs(inputs)
padding_value = 0
if padding == "same":
spatial_shape = inputs.shape[2:]
num_spatial_dims = len(spatial_shape)
padding_value = []
uneven_padding = []
for i in range(num_spatial_dims):
padding_size = _compute_padding_length(
spatial_shape[i], pool_size[i], strides[i]
)
# Torch only supports even padding on each dim, to replicate the
# behavior of "same" padding of `tf.keras` as much as possible,
# we need to pad evenly using the shorter padding.
padding_value.append(padding_size[0])
if padding_size[0] != padding_size[1]:
# Handle unequal padding.
# `torch.nn.pad` sets padding value in the reverse order.
uneven_padding = [0, 1] + uneven_padding
inputs = tnn.pad(inputs, uneven_padding)
if num_spatial_dims == 1:
outputs = tnn.avg_pool1d(
inputs,
kernel_size=pool_size,
stride=strides,
padding=padding_value,
count_include_pad=False,
)
elif num_spatial_dims == 2:
outputs = tnn.avg_pool2d(
inputs,
kernel_size=pool_size,
stride=strides,
padding=padding_value,
count_include_pad=False,
)
elif num_spatial_dims == 3:
outputs = tnn.avg_pool3d(
inputs,
kernel_size=pool_size,
stride=strides,
padding=padding_value,
count_include_pad=False,
)
else:
raise ValueError(
"Inputs to pooling op must have ndim=3, 4 or 5, "
"corresponding to 1D, 2D and 3D inputs. "
f"Received input shape: {inputs.shape}."
)
if data_format == "channels_last":
outputs = _transpose_spatial_outputs(outputs)
return outputs
def conv(
inputs,
kernel,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
):
inputs = convert_to_tensor(inputs)
kernel = convert_to_tensor(kernel)
num_spatial_dims = inputs.ndim - 2
strides = standardize_tuple(strides, num_spatial_dims, "strides")
data_format = standardize_data_format(data_format)
if data_format == "channels_last":
inputs = _transpose_spatial_inputs(inputs)
# Transpose kernel from keras format to torch format.
kernel = _transpose_conv_kernel(kernel)
if padding == "same":
inputs = _apply_same_padding(
inputs,
kernel.shape[2:],
strides,
operation_type="conv",
dilation_rate=dilation_rate,
)
channels = inputs.shape[1]
kernel_in_channels = kernel.shape[1]
if channels % kernel_in_channels > 0:
raise ValueError(
"The number of input channels must be evenly divisible by "
f"kernel.shape[1]. Received: inputs.shape={inputs.shape}, "
f"kernel.shape={kernel.shape}"
)
groups = channels // kernel_in_channels
if num_spatial_dims == 1:
outputs = tnn.conv1d(
inputs,
kernel,
stride=strides,
dilation=dilation_rate,
groups=groups,
)
elif num_spatial_dims == 2:
outputs = tnn.conv2d(
inputs,
kernel,
stride=strides,
dilation=dilation_rate,
groups=groups,
)
elif num_spatial_dims == 3:
outputs = tnn.conv3d(
inputs,
kernel,
stride=strides,
dilation=dilation_rate,
groups=groups,
)
else:
raise ValueError(
"Inputs to conv operation should have ndim=3, 4, or 5,"
"corresponding to 1D, 2D and 3D inputs. Received input "
f"shape: {inputs.shape}."
)
if data_format == "channels_last":
outputs = _transpose_spatial_outputs(outputs)
return outputs
def depthwise_conv(
inputs,
kernel,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
):
kernel = convert_to_tensor(kernel)
kernel = torch.reshape(
kernel, kernel.shape[:-2] + (1, kernel.shape[-2] * kernel.shape[-1])
)
return conv(inputs, kernel, strides, padding, data_format, dilation_rate)
def separable_conv(
inputs,
depthwise_kernel,
pointwise_kernel,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
):
depthwise_conv_output = depthwise_conv(
inputs,
depthwise_kernel,
strides,
padding,
data_format,
dilation_rate,
)
return conv(
depthwise_conv_output,
pointwise_kernel,
strides=1,
padding="valid",
data_format=data_format,
dilation_rate=dilation_rate,
)
def conv_transpose(
inputs,
kernel,
strides=1,
padding="valid",
output_padding=None,
data_format=None,
dilation_rate=1,
):
inputs = convert_to_tensor(inputs)
kernel = convert_to_tensor(kernel)
num_spatial_dims = inputs.ndim - 2
strides = standardize_tuple(strides, num_spatial_dims, "strides")
data_format = standardize_data_format(data_format)
(
torch_padding,
torch_output_padding,
) = compute_conv_transpose_padding_args_for_torch(
input_shape=inputs.shape,
kernel_shape=kernel.shape,
strides=strides,
padding=padding,
output_padding=output_padding,
dilation_rate=dilation_rate,
)
if data_format == "channels_last":
inputs = _transpose_spatial_inputs(inputs)
# Transpose kernel from keras format to torch format.
kernel = _transpose_conv_kernel(kernel)
kernel_spatial_shape = kernel.shape[2:]
if isinstance(dilation_rate, int):
dilation_rate = [dilation_rate] * len(kernel_spatial_shape)
if num_spatial_dims == 1:
outputs = tnn.conv_transpose1d(
inputs,
kernel,
stride=strides,
padding=torch_padding,
output_padding=torch_output_padding,
dilation=dilation_rate,
)
elif num_spatial_dims == 2:
outputs = tnn.conv_transpose2d(
inputs,
kernel,
stride=strides,
padding=torch_padding,
output_padding=torch_output_padding,
dilation=dilation_rate,
)
elif num_spatial_dims == 3:
outputs = tnn.conv_transpose3d(
inputs,
kernel,
stride=strides,
padding=torch_padding,
output_padding=torch_output_padding,
dilation=dilation_rate,
)
else:
raise ValueError(
"Inputs to conv transpose operation should have ndim=3, 4, or 5,"
"corresponding to 1D, 2D and 3D inputs. Received input "
f"shape: {inputs.shape}."
)
if data_format == "channels_last":
outputs = _transpose_spatial_outputs(outputs)
return outputs
def one_hot(x, num_classes, axis=-1, dtype="float32"):
# Axis is the output axis. By default, PyTorch, outputs to last axis.
# If axis is not last, change output to axis and shift remaining elements.
x = convert_to_tensor(x, dtype=torch.long)
# Torch one_hot does not natively handle negative values, so we add some
# manual handling for negatives in the input to one_hot by using max(x, 0).
# The output will have some invalid results, so we set them back to 0 using
# `where` afterwards.
output = tnn.one_hot(maximum(x, 0), num_classes)
output = where(expand_dims(x, axis=-1) >= 0, output, 0)
output = convert_to_tensor(output, dtype=dtype)
dims = output.dim()
if axis != -1 and axis != dims:
new_axes_order = list(range(dims))
new_axes_order[axis] = -1 # Shifts output to axis positon
# Shift remaining axes with offset by 1 since output moved to `axis`.
for ax in range(axis + 1, dims):
new_axes_order[ax] -= 1
output = output.permute(new_axes_order)
return output
def multi_hot(x, num_classes, axis=-1, dtype="float32"):
reduction_axis = 1 if len(x.shape) > 1 else 0
outputs = torch.amax(
one_hot(cast(x, "int32"), num_classes, axis=axis, dtype=dtype),
dim=reduction_axis,
)
return outputs
def categorical_crossentropy(target, output, from_logits=False, axis=-1):
target = convert_to_tensor(target)
output = convert_to_tensor(output)
if target.shape != output.shape:
raise ValueError(
"Arguments `target` and `output` must have the same shape. "
"Received: "
f"target.shape={target.shape}, output.shape={output.shape}"
)
if len(target.shape) < 1:
raise ValueError(
"Arguments `target` and `output` must be at least rank 1. "
"Received: "
f"target.shape={target.shape}, output.shape={output.shape}"
)
if from_logits:
log_prob = tnn.log_softmax(output, dim=axis)
else:
output = output / torch.sum(output, dim=axis, keepdim=True)
output = torch.clip(output, epsilon(), 1.0 - epsilon())
log_prob = torch.log(output)
return -torch.sum(target * log_prob, dim=axis)
def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1):
target = convert_to_tensor(target, dtype=torch.long)
output = convert_to_tensor(output)
if len(target.shape) == len(output.shape) and target.shape[-1] == 1:
target = torch.squeeze(target, dim=-1)
if len(output.shape) < 1:
raise ValueError(
"Argument `output` must be at least rank 1. "
"Received: "
f"output.shape={output.shape}"
)
if target.shape != output.shape[:-1]:
raise ValueError(
"Arguments `target` and `output` must have the same shape "
"up until the last dimension: "
f"target.shape={target.shape}, output.shape={output.shape}"
)
if from_logits:
log_prob = tnn.log_softmax(output, dim=axis)
else:
output = output / torch.sum(output, dim=axis, keepdim=True)
output = torch.clip(output, epsilon(), 1.0 - epsilon())
log_prob = torch.log(output)
target = one_hot(target, output.shape[axis], axis=axis)
return -torch.sum(target * log_prob, dim=axis)
def binary_crossentropy(target, output, from_logits=False):
target = convert_to_tensor(target)
output = convert_to_tensor(output)
if target.shape != output.shape:
raise ValueError(
"Arguments `target` and `output` must have the same shape. "
"Received: "
f"target.shape={target.shape}, output.shape={output.shape}"
)
# By default, PyTorch, does reduction of `sum` over all rows,
# change reduction to `none` to keep dim
if from_logits:
return tnn.binary_cross_entropy_with_logits(
output, target, reduction="none"
)
else:
output = torch.clip(output, epsilon(), 1.0 - epsilon())
return tnn.binary_cross_entropy(output, target, reduction="none")
def moments(x, axes, keepdims=False):
x = convert_to_tensor(x)
# The dynamic range of float16 is too limited for statistics. As a
# workaround, we simply perform the operations on float32 and convert back
# to float16
need_cast = False
ori_dtype = standardize_dtype(x.dtype)
if ori_dtype == "float16":
need_cast = True
x = cast(x, "float32")
mean = torch.mean(x, dim=axes, keepdim=True)
# The variance is computed using $Var = E[|x|^2] - |E[x]|^2$, It is faster
# but less numerically stable.
# Note: stop_gradient does not change the gradient to the mean, because that
# gradient is zero.
variance = torch.mean(
torch.square(x), dim=axes, keepdim=True
) - torch.square(mean.detach())
if not keepdims:
mean = torch.squeeze(mean, axes)
variance = torch.squeeze(variance, axes)
if need_cast:
# avoid overflow and underflow when casting from float16 to float32
mean = torch.clip(
mean,
torch.finfo(torch.float16).min,
torch.finfo(torch.float16).max,
)
variance = torch.clip(
variance,
torch.finfo(torch.float16).min,
torch.finfo(torch.float16).max,
)
mean = cast(mean, ori_dtype)
variance = cast(variance, ori_dtype)
return mean, variance
| keras-core/keras_core/backend/torch/nn.py/0 | {
"file_path": "keras-core/keras_core/backend/torch/nn.py",
"repo_id": "keras-core",
"token_count": 9280
} | 34 |
"""Boston housing price regression dataset."""
import numpy as np
from keras_core.api_export import keras_core_export
from keras_core.utils.file_utils import get_file
@keras_core_export("keras_core.datasets.boston_housing.load_data")
def load_data(path="california_housing.npz", test_split=0.2, seed=113):
"""Loads the California Housing dataset.
This dataset was obtained from the [StatLib repository](
https://www.dcc.fc.up.pt/~ltorgo/Regression/cal_housing.html).
It's a continuous regression dataset with 20,640 samples with
8 features each.
The target variable is a scalar: the median house value
for California districts, in dollars.
The 8 input features are the following:
- MedInc: median income in block group
- HouseAge: median house age in block group
- AveRooms: average number of rooms per household
- AveBedrms: average number of bedrooms per household
- Population: block group population
- AveOccup: average number of household members
- Latitude: block group latitude
- Longitude: block group longitude
This dataset was derived from the 1990 U.S. census, using one row
per census block group. A block group is the smallest geographical
unit for which the U.S. Census Bureau publishes sample data
(a block group typically has a population of 600 to 3,000 people).
A household is a group of people residing within a home.
Since the average number of rooms and bedrooms in this dataset are
provided per household, these columns may take surprisingly large
values for block groups with few households and many empty houses,
such as vacation resorts.
Args:
path: path where to cache the dataset locally
(relative to `~/.keras/datasets`).
test_split: fraction of the data to reserve as test set.
seed: Random seed for shuffling the data
before computing the test split.
Returns:
Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
**`x_train`, `x_test`**: numpy arrays with shape `(num_samples, 8)`
containing either the training samples (for `x_train`),
or test samples (for `y_train`).
**`y_train`, `y_test`**: numpy arrays of shape `(num_samples,)`
containing the target scalars. The targets are float scalars
typically between 25,000 and 500,000 that represent
the home prices in dollars.
"""
assert 0 <= test_split < 1
origin_folder = (
"https://storage.googleapis.com/tensorflow/tf-keras-datasets/"
)
path = get_file(
path,
origin=origin_folder + "california_housing.npz",
file_hash=( # noqa: E501
"1a2e3a52e0398de6463aebe6f4a8da34fb21fbb6b934cf88c3425e766f2a1a6f"
),
)
with np.load(path, allow_pickle=True) as f:
x = f["x"]
y = f["y"]
rng = np.random.RandomState(seed)
indices = np.arange(len(x))
rng.shuffle(indices)
x = x[indices]
y = y[indices]
x_train = np.array(x[: int(len(x) * (1 - test_split))])
y_train = np.array(y[: int(len(x) * (1 - test_split))])
x_test = np.array(x[int(len(x) * (1 - test_split)) :])
y_test = np.array(y[int(len(x) * (1 - test_split)) :])
return (x_train, y_train), (x_test, y_test)
| keras-core/keras_core/datasets/california_housing.py/0 | {
"file_path": "keras-core/keras_core/datasets/california_housing.py",
"repo_id": "keras-core",
"token_count": 1232
} | 35 |
import numpy as np
from keras_core import backend
from keras_core import initializers
from keras_core import testing
class ConstantInitializersTest(testing.TestCase):
def test_zeros_initializer(self):
shape = (3, 3)
initializer = initializers.Zeros()
values = initializer(shape=shape)
self.assertEqual(values.shape, shape)
np_values = backend.convert_to_numpy(values)
self.assertAllClose(np_values, np.zeros(shape=shape))
self.run_class_serialization_test(initializer)
def test_ones_initializer(self):
shape = (3, 3)
initializer = initializers.Ones()
values = initializer(shape=shape)
self.assertEqual(values.shape, shape)
np_values = backend.convert_to_numpy(values)
self.assertAllClose(np_values, np.ones(shape=shape))
self.run_class_serialization_test(initializer)
def test_constant_initializer(self):
shape = (3, 3)
constant_value = 6.0
initializer = initializers.Constant(value=constant_value)
values = initializer(shape=shape)
self.assertEqual(values.shape, shape)
np_values = backend.convert_to_numpy(values)
self.assertAllClose(
np_values, np.full(shape=shape, fill_value=constant_value)
)
self.run_class_serialization_test(initializer)
def test_identity_initializer(self):
shape = (3, 3)
gain = 2
initializer = initializers.Identity(gain=gain)
values = initializer(shape=shape)
self.assertEqual(values.shape, shape)
np_values = backend.convert_to_numpy(values)
self.assertAllClose(np_values, np.eye(*shape) * gain)
self.run_class_serialization_test(initializer)
| keras-core/keras_core/initializers/constant_initializers_test.py/0 | {
"file_path": "keras-core/keras_core/initializers/constant_initializers_test.py",
"repo_id": "keras-core",
"token_count": 737
} | 36 |
from keras_core import activations
from keras_core import backend
from keras_core.api_export import keras_core_export
from keras_core.layers.layer import Layer
def _large_negative_number(dtype):
"""Return a Large negative number based on dtype."""
if backend.standardize_dtype(dtype) == "float16":
return -3e4
return -1e9
@keras_core_export("keras_core.layers.Softmax")
class Softmax(Layer):
"""Softmax activation layer.
Formula:
``` python
exp_x = exp(x - max(x))
f(x) = exp_x / sum(exp_x)
```
Example:
>>>softmax_layer = keras_core.layers.activations.Softmax()
>>>input = np.array([1.0, 2.0, 1.0])
>>>result = softmax_layer(input)
[0.21194157, 0.5761169, 0.21194157]
Args:
axis: Integer, or list of Integers, axis along which the softmax
normalization is applied.
**kwargs: Base layer keyword arguments, such as `name` and `dtype`.
Call arguments:
inputs: The inputs (logits) to the softmax layer.
mask: A boolean mask of the same shape as `inputs`. The mask
specifies 1 to keep and 0 to mask. Defaults to `None`.
Returns:
Softmaxed output with the same shape as `inputs`.
"""
def __init__(self, axis=-1, **kwargs):
super().__init__(**kwargs)
self.supports_masking = True
self.axis = axis
self.built = True
def call(self, inputs, mask=None):
if mask is not None:
adder = (
1.0 - backend.cast(mask, inputs.dtype)
) * _large_negative_number(inputs.dtype)
inputs += adder
if isinstance(self.axis, (tuple, list)):
if len(self.axis) > 1:
return backend.numpy.exp(
inputs
- backend.math.logsumexp(
inputs, axis=self.axis, keepdims=True
)
)
else:
return activations.softmax(inputs, axis=self.axis[0])
return activations.softmax(inputs, axis=self.axis)
def get_config(self):
config = super().get_config()
config.update({"axis": self.axis})
return config
def compute_output_shape(self, input_shape):
return input_shape
| keras-core/keras_core/layers/activations/softmax.py/0 | {
"file_path": "keras-core/keras_core/layers/activations/softmax.py",
"repo_id": "keras-core",
"token_count": 1039
} | 37 |
import pytest
from absl.testing import parameterized
from keras_core import layers
from keras_core import testing
class EinsumDenseTest(testing.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{
"testcase_name": "_1d_end_weight",
"equation": "ab,b->a",
"bias_axes": None,
"input_shape": (2, 32),
"output_shape": (),
"expected_kernel_shape": (32,),
"expected_bias_shape": None,
"expected_output_shape": (2,),
},
{
"testcase_name": "_2d_middle_weight",
"equation": "ab,bc->ac",
"bias_axes": None,
"input_shape": (2, 32),
"output_shape": (64),
"expected_kernel_shape": (32, 64),
"expected_bias_shape": None,
"expected_output_shape": (2, 64),
},
{
"testcase_name": "_3d_bert",
"equation": "abc,cde->abde",
"bias_axes": None,
"input_shape": (2, 1, 2),
"output_shape": (1, 3, 4),
"expected_kernel_shape": (2, 3, 4),
"expected_bias_shape": None,
"expected_output_shape": (2, 1, 3, 4),
},
{
"testcase_name": "_3d_3_bias",
"equation": "abc,cde->abde",
"bias_axes": "e",
"input_shape": (2, 1, 2),
"output_shape": (1, 3, 4),
"expected_kernel_shape": (2, 3, 4),
"expected_bias_shape": (4,),
"expected_output_shape": (2, 1, 3, 4),
},
{
"testcase_name": "_3d_2_bias",
"equation": "abc,cde->abde",
"bias_axes": "d",
"input_shape": (2, 1, 2),
"output_shape": (1, 3, 4),
"expected_kernel_shape": (2, 3, 4),
"expected_bias_shape": (3, 1),
"expected_output_shape": (2, 1, 3, 4),
},
{
"testcase_name": "_3d_1_3_bias",
"equation": "abc,cde->abde",
"bias_axes": "be",
"input_shape": (2, 7, 2),
"output_shape": (7, 3, 4),
"expected_kernel_shape": (2, 3, 4),
"expected_bias_shape": (7, 1, 4),
"expected_output_shape": (2, 7, 3, 4),
},
{
"testcase_name": "_3d_bert_projection",
"equation": "BFNH,NHD->BFD",
"bias_axes": None,
"input_shape": (2, 1, 2, 3),
"output_shape": (1, 4),
"expected_kernel_shape": (2, 3, 4),
"expected_bias_shape": None,
"expected_output_shape": (2, 1, 4),
},
{
"testcase_name": "_2d_bert",
"equation": "abc,cd->abd",
"bias_axes": None,
"input_shape": (2, 1, 2),
"output_shape": (1, 4),
"expected_kernel_shape": (2, 4),
"expected_bias_shape": None,
"expected_output_shape": (2, 1, 4),
},
{
"testcase_name": "_embedding_1d",
"equation": "i,d->id",
"bias_axes": None,
"input_shape": (2,),
"output_shape": (2,),
"expected_kernel_shape": (2,),
"expected_bias_shape": None,
"expected_output_shape": (2, 2),
},
{
"testcase_name": "_xlnet_lm",
"equation": "ibd,nd->ibn",
"bias_axes": None,
"input_shape": (2, 2, 1),
"output_shape": (2, 2),
"expected_kernel_shape": (2, 1),
"expected_bias_shape": None,
"expected_output_shape": (2, 2, 2),
},
{
"testcase_name": "_2d_precast",
"equation": "...b,bc->...c",
"bias_axes": None,
"input_shape": (2, 32),
"output_shape": (64,),
"expected_kernel_shape": (32, 64),
"expected_bias_shape": None,
"expected_output_shape": (2, 64),
},
{
"testcase_name": "_2d_precast_elided_input_used_in_output",
"equation": "...bc,bc->...b",
"bias_axes": None,
"input_shape": (2, 32, 64),
"output_shape": (32,),
"expected_kernel_shape": (32, 64),
"expected_bias_shape": None,
"expected_output_shape": (2, 32),
},
{
"testcase_name": "_2d_precast_multiple_elided_dims",
"equation": "...b,bc->...c",
"bias_axes": None,
"input_shape": (2, 3, 32),
"output_shape": (64,),
"expected_kernel_shape": (32, 64),
"expected_bias_shape": None,
"expected_output_shape": (2, 3, 64),
},
{
"testcase_name": "_3d_precast",
"equation": "...c,cde->...de",
"bias_axes": None,
"input_shape": (2, 1, 2),
"output_shape": (3, 4),
"expected_kernel_shape": (2, 3, 4),
"expected_bias_shape": None,
"expected_output_shape": (2, 1, 3, 4),
},
{
"testcase_name": "_3d_precast_3_bias",
"equation": "...c,cde->...de",
"bias_axes": "e",
"input_shape": (2, 1, 2),
"output_shape": (3, 4),
"expected_kernel_shape": (2, 3, 4),
"expected_bias_shape": (4,),
"expected_output_shape": (2, 1, 3, 4),
},
{
"testcase_name": "_3d_precast_2_bias",
"equation": "...c,cde->...de",
"bias_axes": "d",
"input_shape": (2, 1, 2),
"output_shape": (3, 4),
"expected_kernel_shape": (2, 3, 4),
"expected_bias_shape": (3, 1),
"expected_output_shape": (2, 1, 3, 4),
},
{
"testcase_name": "_3d_precast_2_3_bias",
"equation": "...c,cde->...de",
"bias_axes": "de",
"input_shape": (2, 1, 2),
"output_shape": (3, 4),
"expected_kernel_shape": (2, 3, 4),
"expected_bias_shape": (3, 4),
"expected_output_shape": (2, 1, 3, 4),
},
{
"testcase_name": "_2d_postcast",
"equation": "bc...,cd->bd...",
"bias_axes": None,
"input_shape": (2, 1, 2, 3),
"output_shape": (4,),
"expected_kernel_shape": (1, 4),
"expected_bias_shape": None,
"expected_output_shape": (2, 4, 2, 3),
},
{
"testcase_name": "_3d_postcast",
"equation": "bc...,cde->bde...",
"bias_axes": None,
"input_shape": (2, 1, 2),
"output_shape": (3, 4),
"expected_kernel_shape": (1, 3, 4),
"expected_bias_shape": None,
"expected_output_shape": (2, 3, 4, 2),
},
{
"testcase_name": "_3d_postcast_1_bias",
"equation": "bc...,cde->bde...",
"bias_axes": "d",
"input_shape": (2, 1, 2),
"output_shape": (3, 4),
"expected_kernel_shape": (1, 3, 4),
"expected_bias_shape": (3, 1, 1),
"expected_output_shape": (2, 3, 4, 2),
},
{
"testcase_name": "_3d_postcast_2_bias",
"equation": "bc...,cde->bde...",
"bias_axes": "e",
"input_shape": (2, 1, 2),
"output_shape": (3, 4),
"expected_kernel_shape": (1, 3, 4),
"expected_bias_shape": (4, 1),
"expected_output_shape": (2, 3, 4, 2),
},
{
"testcase_name": "_3d_postcast_1_2_bias",
"equation": "bc...,cde->bde...",
"bias_axes": "de",
"input_shape": (2, 1, 2),
"output_shape": (3, 4),
"expected_kernel_shape": (1, 3, 4),
"expected_bias_shape": (3, 4, 1),
"expected_output_shape": (2, 3, 4, 2),
},
)
@pytest.mark.requires_trainable_backend
def test_einsum_dense_basics(
self,
equation,
bias_axes,
input_shape,
output_shape,
expected_kernel_shape,
expected_bias_shape,
expected_output_shape,
):
self.run_layer_test(
layers.EinsumDense,
init_kwargs={
"equation": equation,
"output_shape": output_shape,
"bias_axes": bias_axes,
},
input_shape=input_shape,
expected_output_shape=expected_output_shape,
expected_num_trainable_weights=2
if expected_bias_shape is not None
else 1,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
layer = layers.EinsumDense(
equation, output_shape=output_shape, bias_axes=bias_axes
)
layer.build(input_shape)
self.assertEqual(layer.kernel.shape, expected_kernel_shape)
if expected_bias_shape is not None:
self.assertEqual(layer.bias.shape, expected_bias_shape)
| keras-core/keras_core/layers/core/einsum_dense_test.py/0 | {
"file_path": "keras-core/keras_core/layers/core/einsum_dense_test.py",
"repo_id": "keras-core",
"token_count": 5390
} | 38 |
from keras_core import constraints
from keras_core import initializers
from keras_core import ops
from keras_core import regularizers
from keras_core.api_export import keras_core_export
from keras_core.layers.layer import Layer
@keras_core_export("keras_core.layers.LayerNormalization")
class LayerNormalization(Layer):
"""Layer normalization layer (Ba et al., 2016).
Normalize the activations of the previous layer for each given example in a
batch independently, rather than across a batch like Batch Normalization.
i.e. applies a transformation that maintains the mean activation within each
example close to 0 and the activation standard deviation close to 1.
If `scale` or `center` are enabled, the layer will scale the normalized
outputs by broadcasting them with a trainable variable `gamma`, and center
the outputs by broadcasting with a trainable variable `beta`. `gamma` will
default to a ones tensor and `beta` will default to a zeros tensor, so that
centering and scaling are no-ops before training has begun.
So, with scaling and centering enabled the normalization equations
are as follows:
Let the intermediate activations for a mini-batch to be the `inputs`.
For each sample `x_i` in `inputs` with `k` features, we compute the mean and
variance of the sample:
```python
mean_i = sum(x_i[j] for j in range(k)) / k
var_i = sum((x_i[j] - mean_i) ** 2 for j in range(k)) / k
```
and then compute a normalized `x_i_normalized`, including a small factor
`epsilon` for numerical stability.
```python
x_i_normalized = (x_i - mean_i) / sqrt(var_i + epsilon)
```
And finally `x_i_normalized ` is linearly transformed by `gamma` and `beta`,
which are learned parameters:
```python
output_i = x_i_normalized * gamma + beta
```
`gamma` and `beta` will span the axes of `inputs` specified in `axis`, and
this part of the inputs' shape must be fully defined.
For example:
>>> layer = keras_core.layers.LayerNormalization(axis=[1, 2, 3])
>>> layer.build([5, 20, 30, 40])
>>> print(layer.beta.shape)
(20, 30, 40)
>>> print(layer.gamma.shape)
(20, 30, 40)
Note that other implementations of layer normalization may choose to define
`gamma` and `beta` over a separate set of axes from the axes being
normalized across. For example, Group Normalization
([Wu et al. 2018](https://arxiv.org/abs/1803.08494)) with group size of 1
corresponds to a Layer Normalization that normalizes across height, width,
and channel and has `gamma` and `beta` span only the channel dimension.
So, this Layer Normalization implementation will not match a Group
Normalization layer with group size set to 1.
Args:
axis: Integer or List/Tuple. The axis or axes to normalize across.
Typically, this is the features axis/axes. The left-out axes are
typically the batch axis/axes. `-1` is the last dimension in the
input. Defaults to `-1`.
epsilon: Small float added to variance to avoid dividing by zero.
Defaults to 1e-3.
center: If True, add offset of `beta` to normalized tensor. If False,
`beta` is ignored. Defaults to `True`.
scale: If True, multiply by `gamma`. If False, `gamma` is not used.
When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling will be done by the next layer.
Defaults to `True`.
rms_scaling: If True, `center` and `scale` are ignored, and the
inputs are scaled by `gamma` and the inverse square root
of the square of all inputs. This is an approximate and faster
approach that avoids ever computing the mean of the input.
beta_initializer: Initializer for the beta weight. Defaults to zeros.
gamma_initializer: Initializer for the gamma weight. Defaults to ones.
beta_regularizer: Optional regularizer for the beta weight.
None by default.
gamma_regularizer: Optional regularizer for the gamma weight.
None by default.
beta_constraint: Optional constraint for the beta weight.
None by default.
gamma_constraint: Optional constraint for the gamma weight.
None by default.
**kwargs: Base layer keyword arguments (e.g. `name` and `dtype`).
Reference:
- [Lei Ba et al., 2016](https://arxiv.org/abs/1607.06450).
"""
def __init__(
self,
axis=-1,
epsilon=1e-3,
center=True,
scale=True,
rms_scaling=False,
beta_initializer="zeros",
gamma_initializer="ones",
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
**kwargs
):
super().__init__(**kwargs)
if isinstance(axis, (list, tuple)):
self.axis = list(axis)
elif isinstance(axis, int):
self.axis = axis
else:
raise TypeError(
"Expected an int or a list/tuple of ints for the "
"argument 'axis', but received: %r" % axis
)
self.epsilon = epsilon
self.center = center
self.scale = scale
self.rms_scaling = rms_scaling
self.beta_initializer = initializers.get(beta_initializer)
self.gamma_initializer = initializers.get(gamma_initializer)
self.beta_regularizer = regularizers.get(beta_regularizer)
self.gamma_regularizer = regularizers.get(gamma_regularizer)
self.beta_constraint = constraints.get(beta_constraint)
self.gamma_constraint = constraints.get(gamma_constraint)
self.supports_masking = True
self.autocast = False
def build(self, input_shape):
if isinstance(self.axis, list):
shape = tuple([input_shape[dim] for dim in self.axis])
else:
shape = (input_shape[self.axis],)
self.axis = [self.axis]
if self.scale or self.rms_scaling:
self.gamma = self.add_weight(
name="gamma",
shape=shape,
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint,
trainable=True,
)
else:
self.gamma = None
if self.center and not self.rms_scaling:
self.beta = self.add_weight(
name="beta",
shape=shape,
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint,
trainable=True,
)
else:
self.beta = None
self.built = True
def call(self, inputs):
inputs = ops.cast(inputs, self.compute_dtype)
# Compute the axes along which to reduce the mean / variance
input_shape = inputs.shape
ndims = len(input_shape)
# Broadcasting only necessary for norm when the axis is not just
# the last dimension
broadcast_shape = [1] * ndims
for dim in self.axis:
broadcast_shape[dim] = input_shape[dim]
def _broadcast(v):
if (
v is not None
and len(v.shape) != ndims
and self.axis != [ndims - 1]
):
return ops.reshape(v, broadcast_shape)
return v
input_dtype = inputs.dtype
if input_dtype in ("float16", "bfloat16") and self.dtype == "float32":
# If mixed precision is used, cast inputs to float32 so that
# this is at least as numerically stable as the fused version.
inputs = ops.cast(inputs, "float32")
if self.rms_scaling:
# Calculate outputs with only variance and gamma if rms scaling
# is enabled
# Calculate the variance along self.axis (layer activations).
variance = ops.var(inputs, axis=self.axis, keepdims=True)
inv = ops.rsqrt(variance + self.epsilon)
outputs = inputs * inv * ops.cast(self.gamma, inputs.dtype)
else:
# Calculate the mean & variance along self.axis (layer activations).
mean, variance = ops.moments(inputs, axes=self.axis, keepdims=True)
gamma, beta = _broadcast(self.gamma), _broadcast(self.beta)
inv = ops.rsqrt(variance + self.epsilon)
if gamma is not None:
gamma = ops.cast(gamma, inputs.dtype)
inv = inv * gamma
res = -mean * inv
if beta is not None:
beta = ops.cast(beta, inputs.dtype)
res = res + beta
outputs = inputs * inv + res
return ops.cast(outputs, input_dtype)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
"axis": self.axis,
"epsilon": self.epsilon,
"center": self.center,
"scale": self.scale,
"beta_initializer": initializers.serialize(self.beta_initializer),
"gamma_initializer": initializers.serialize(self.gamma_initializer),
"beta_regularizer": regularizers.serialize(self.beta_regularizer),
"gamma_regularizer": regularizers.serialize(self.gamma_regularizer),
"beta_constraint": constraints.serialize(self.beta_constraint),
"gamma_constraint": constraints.serialize(self.gamma_constraint),
}
base_config = super().get_config()
return {**base_config, **config}
| keras-core/keras_core/layers/normalization/layer_normalization.py/0 | {
"file_path": "keras-core/keras_core/layers/normalization/layer_normalization.py",
"repo_id": "keras-core",
"token_count": 4134
} | 39 |
import numpy as np
import pytest
from absl.testing import parameterized
from keras_core import layers
from keras_core import testing
@pytest.mark.requires_trainable_backend
class GlobalAveragePoolingBasicTest(testing.TestCase, parameterized.TestCase):
@parameterized.parameters(
("channels_last", False, (3, 5, 4), (3, 4)),
("channels_last", True, (3, 5, 4), (3, 1, 4)),
("channels_first", False, (3, 5, 4), (3, 5)),
)
def test_global_average_pooling1d(
self,
data_format,
keepdims,
input_shape,
output_shape,
):
self.run_layer_test(
layers.GlobalAveragePooling1D,
init_kwargs={
"data_format": data_format,
"keepdims": keepdims,
},
input_shape=input_shape,
expected_output_shape=output_shape,
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_losses=0,
supports_masking=True,
)
@parameterized.parameters(
("channels_last", False, (3, 5, 6, 4), (3, 4)),
("channels_last", True, (3, 5, 6, 4), (3, 1, 1, 4)),
("channels_first", False, (3, 5, 6, 4), (3, 5)),
)
def test_global_average_pooling2d(
self,
data_format,
keepdims,
input_shape,
output_shape,
):
self.run_layer_test(
layers.GlobalAveragePooling2D,
init_kwargs={
"data_format": data_format,
"keepdims": keepdims,
},
input_shape=input_shape,
expected_output_shape=output_shape,
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_losses=0,
supports_masking=False,
)
@parameterized.parameters(
("channels_last", False, (3, 5, 6, 5, 4), (3, 4)),
("channels_last", True, (3, 5, 6, 5, 4), (3, 1, 1, 1, 4)),
("channels_first", False, (3, 5, 6, 5, 4), (3, 5)),
)
def test_global_average_pooling3d(
self,
data_format,
keepdims,
input_shape,
output_shape,
):
self.run_layer_test(
layers.GlobalAveragePooling3D,
init_kwargs={
"data_format": data_format,
"keepdims": keepdims,
},
input_shape=input_shape,
expected_output_shape=output_shape,
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_losses=0,
supports_masking=False,
)
class GlobalAveragePoolingCorrectnessTest(
testing.TestCase, parameterized.TestCase
):
@parameterized.parameters(
("channels_last", False),
("channels_last", True),
("channels_first", False),
("channels_first", True),
)
def test_global_average_pooling1d(self, data_format, keepdims):
def np_gap1d(x, data_format, keepdims, mask=None):
steps_axis = 1 if data_format == "channels_last" else 2
if mask is not None:
mask = np.expand_dims(
mask, 2 if data_format == "channels_last" else 1
)
x *= mask
res = np.sum(x, axis=steps_axis) / np.sum(mask, axis=steps_axis)
else:
res = np.mean(x, axis=steps_axis)
if keepdims:
res = np.expand_dims(res, axis=steps_axis)
return res
inputs = np.arange(24, dtype="float32").reshape((2, 3, 4))
layer = layers.GlobalAveragePooling1D(
data_format=data_format,
keepdims=keepdims,
)
outputs = layer(inputs)
expected = np_gap1d(inputs, data_format, keepdims)
self.assertAllClose(outputs, expected)
if data_format == "channels_last":
mask = np.array([[1, 1, 0], [0, 1, 0]], dtype="int32")
else:
mask = np.array([[1, 1, 0, 0], [0, 1, 0, 1]], dtype="int32")
outputs = layer(inputs, mask)
expected = np_gap1d(inputs, data_format, keepdims, mask)
self.assertAllClose(outputs, expected)
@parameterized.parameters(
("channels_last", False),
("channels_last", True),
("channels_first", False),
("channels_first", True),
)
def test_global_average_pooling2d(self, data_format, keepdims):
def np_gap2d(x, data_format, keepdims):
steps_axis = [1, 2] if data_format == "channels_last" else [2, 3]
res = np.apply_over_axes(np.mean, x, steps_axis)
if not keepdims:
res = res.squeeze()
return res
inputs = np.arange(96, dtype="float32").reshape((2, 3, 4, 4))
layer = layers.GlobalAveragePooling2D(
data_format=data_format,
keepdims=keepdims,
)
outputs = layer(inputs)
expected = np_gap2d(inputs, data_format, keepdims)
self.assertAllClose(outputs, expected)
@parameterized.parameters(
("channels_last", False),
("channels_last", True),
("channels_first", False),
("channels_first", True),
)
def test_global_average_pooling3d(self, data_format, keepdims):
def np_gap3d(x, data_format, keepdims):
steps_axis = (
[1, 2, 3] if data_format == "channels_last" else [2, 3, 4]
)
res = np.apply_over_axes(np.mean, x, steps_axis)
if not keepdims:
res = res.squeeze()
return res
inputs = np.arange(360, dtype="float32").reshape((2, 3, 3, 5, 4))
layer = layers.GlobalAveragePooling3D(
data_format=data_format,
keepdims=keepdims,
)
outputs = layer(inputs)
expected = np_gap3d(inputs, data_format, keepdims)
self.assertAllClose(outputs, expected)
| keras-core/keras_core/layers/pooling/global_average_pooling_test.py/0 | {
"file_path": "keras-core/keras_core/layers/pooling/global_average_pooling_test.py",
"repo_id": "keras-core",
"token_count": 3088
} | 40 |
import tree
from keras_core import backend
from keras_core import layers
from keras_core.api_export import keras_core_export
from keras_core.layers.layer import Layer
from keras_core.saving import saving_lib
from keras_core.saving import serialization_lib
from keras_core.utils import backend_utils
from keras_core.utils.module_utils import tensorflow as tf
from keras_core.utils.naming import auto_name
class Cross:
def __init__(self, feature_names, crossing_dim, output_mode="one_hot"):
if output_mode not in {"int", "one_hot"}:
raise ValueError(
"Invalid value for argument `output_mode`. "
"Expected one of {'int', 'one_hot'}. "
f"Received: output_mode={output_mode}"
)
self.feature_names = tuple(feature_names)
self.crossing_dim = crossing_dim
self.output_mode = output_mode
@property
def name(self):
return "_X_".join(self.feature_names)
def get_config(self):
return {
"feature_names": self.feature_names,
"crossing_dim": self.crossing_dim,
"output_mode": self.output_mode,
}
@classmethod
def from_config(cls, config):
return cls(**config)
class Feature:
def __init__(self, dtype, preprocessor, output_mode):
if output_mode not in {"int", "one_hot", "float"}:
raise ValueError(
"Invalid value for argument `output_mode`. "
"Expected one of {'int', 'one_hot', 'float'}. "
f"Received: output_mode={output_mode}"
)
self.dtype = dtype
if isinstance(preprocessor, dict):
preprocessor = serialization_lib.deserialize_keras_object(
preprocessor
)
self.preprocessor = preprocessor
self.output_mode = output_mode
def get_config(self):
return {
"dtype": self.dtype,
"preprocessor": serialization_lib.serialize_keras_object(
self.preprocessor
),
"output_mode": self.output_mode,
}
@classmethod
def from_config(cls, config):
return cls(**config)
@keras_core_export("keras_core.utils.FeatureSpace")
class FeatureSpace(Layer):
"""One-stop utility for preprocessing and encoding structured data.
Arguments:
feature_names: Dict mapping the names of your features to their
type specification, e.g. `{"my_feature": "integer_categorical"}`
or `{"my_feature": FeatureSpace.integer_categorical()}`.
For a complete list of all supported types, see
"Available feature types" paragraph below.
output_mode: One of `"concat"` or `"dict"`. In concat mode, all
features get concatenated together into a single vector.
In dict mode, the FeatureSpace returns a dict of individually
encoded features (with the same keys as the input dict keys).
crosses: List of features to be crossed together, e.g.
`crosses=[("feature_1", "feature_2")]`. The features will be
"crossed" by hashing their combined value into
a fixed-length vector.
crossing_dim: Default vector size for hashing crossed features.
Defaults to `32`.
hashing_dim: Default vector size for hashing features of type
`"integer_hashed"` and `"string_hashed"`. Defaults to `32`.
num_discretization_bins: Default number of bins to be used for
discretizing features of type `"float_discretized"`.
Defaults to `32`.
**Available feature types:**
Note that all features can be referred to by their string name,
e.g. `"integer_categorical"`. When using the string name, the default
argument values are used.
```python
# Plain float values.
FeatureSpace.float(name=None)
# Float values to be preprocessed via featurewise standardization
# (i.e. via a `keras_core.layers.Normalization` layer).
FeatureSpace.float_normalized(name=None)
# Float values to be preprocessed via linear rescaling
# (i.e. via a `keras_core.layers.Rescaling` layer).
FeatureSpace.float_rescaled(scale=1., offset=0., name=None)
# Float values to be discretized. By default, the discrete
# representation will then be one-hot encoded.
FeatureSpace.float_discretized(
num_bins, bin_boundaries=None, output_mode="one_hot", name=None)
# Integer values to be indexed. By default, the discrete
# representation will then be one-hot encoded.
FeatureSpace.integer_categorical(
max_tokens=None, num_oov_indices=1, output_mode="one_hot", name=None)
# String values to be indexed. By default, the discrete
# representation will then be one-hot encoded.
FeatureSpace.string_categorical(
max_tokens=None, num_oov_indices=1, output_mode="one_hot", name=None)
# Integer values to be hashed into a fixed number of bins.
# By default, the discrete representation will then be one-hot encoded.
FeatureSpace.integer_hashed(num_bins, output_mode="one_hot", name=None)
# String values to be hashed into a fixed number of bins.
# By default, the discrete representation will then be one-hot encoded.
FeatureSpace.string_hashed(num_bins, output_mode="one_hot", name=None)
```
Examples:
**Basic usage with a dict of input data:**
```python
raw_data = {
"float_values": [0.0, 0.1, 0.2, 0.3],
"string_values": ["zero", "one", "two", "three"],
"int_values": [0, 1, 2, 3],
}
dataset = tf.data.Dataset.from_tensor_slices(raw_data)
feature_space = FeatureSpace(
features={
"float_values": "float_normalized",
"string_values": "string_categorical",
"int_values": "integer_categorical",
},
crosses=[("string_values", "int_values")],
output_mode="concat",
)
# Before you start using the FeatureSpace,
# you must `adapt()` it on some data.
feature_space.adapt(dataset)
# You can call the FeatureSpace on a dict of data (batched or unbatched).
output_vector = feature_space(raw_data)
```
**Basic usage with `tf.data`:**
```python
# Unlabeled data
preprocessed_ds = unlabeled_dataset.map(feature_space)
# Labeled data
preprocessed_ds = labeled_dataset.map(lambda x, y: (feature_space(x), y))
```
**Basic usage with the Keras Functional API:**
```python
# Retrieve a dict Keras Input objects
inputs = feature_space.get_inputs()
# Retrieve the corresponding encoded Keras tensors
encoded_features = feature_space.get_encoded_features()
# Build a Functional model
outputs = keras_core.layers.Dense(1, activation="sigmoid")(encoded_features)
model = keras_core.Model(inputs, outputs)
```
**Customizing each feature or feature cross:**
```python
feature_space = FeatureSpace(
features={
"float_values": FeatureSpace.float_normalized(),
"string_values": FeatureSpace.string_categorical(max_tokens=10),
"int_values": FeatureSpace.integer_categorical(max_tokens=10),
},
crosses=[
FeatureSpace.cross(("string_values", "int_values"), crossing_dim=32)
],
output_mode="concat",
)
```
**Returning a dict of integer-encoded features:**
```python
feature_space = FeatureSpace(
features={
"string_values": FeatureSpace.string_categorical(output_mode="int"),
"int_values": FeatureSpace.integer_categorical(output_mode="int"),
},
crosses=[
FeatureSpace.cross(
feature_names=("string_values", "int_values"),
crossing_dim=32,
output_mode="int",
)
],
output_mode="dict",
)
```
**Specifying your own Keras preprocessing layer:**
```python
# Let's say that one of the features is a short text paragraph that
# we want to encode as a vector (one vector per paragraph) via TF-IDF.
data = {
"text": ["1st string", "2nd string", "3rd string"],
}
# There's a Keras layer for this: TextVectorization.
custom_layer = layers.TextVectorization(output_mode="tf_idf")
# We can use FeatureSpace.feature to create a custom feature
# that will use our preprocessing layer.
feature_space = FeatureSpace(
features={
"text": FeatureSpace.feature(
preprocessor=custom_layer, dtype="string", output_mode="float"
),
},
output_mode="concat",
)
feature_space.adapt(tf.data.Dataset.from_tensor_slices(data))
output_vector = feature_space(data)
```
**Retrieving the underlying Keras preprocessing layers:**
```python
# The preprocessing layer of each feature is available in `.preprocessors`.
preprocessing_layer = feature_space.preprocessors["feature1"]
# The crossing layer of each feature cross is available in `.crossers`.
# It's an instance of keras_core.layers.HashedCrossing.
crossing_layer = feature_space.crossers["feature1_X_feature2"]
```
**Saving and reloading a FeatureSpace:**
```python
feature_space.save("featurespace.keras")
reloaded_feature_space = keras_core.models.load_model("featurespace.keras")
```
"""
@classmethod
def cross(cls, feature_names, crossing_dim, output_mode="one_hot"):
return Cross(feature_names, crossing_dim, output_mode=output_mode)
@classmethod
def feature(cls, dtype, preprocessor, output_mode):
return Feature(dtype, preprocessor, output_mode)
@classmethod
def float(cls, name=None):
from keras_core.layers.core import identity
name = name or auto_name("float")
preprocessor = identity.Identity(
dtype="float32", name=f"{name}_preprocessor"
)
return Feature(
dtype="float32", preprocessor=preprocessor, output_mode="float"
)
@classmethod
def float_rescaled(cls, scale=1.0, offset=0.0, name=None):
name = name or auto_name("float_rescaled")
preprocessor = layers.Rescaling(
scale=scale, offset=offset, name=f"{name}_preprocessor"
)
return Feature(
dtype="float32", preprocessor=preprocessor, output_mode="float"
)
@classmethod
def float_normalized(cls, name=None):
name = name or auto_name("float_normalized")
preprocessor = layers.Normalization(
axis=-1, name=f"{name}_preprocessor"
)
return Feature(
dtype="float32", preprocessor=preprocessor, output_mode="float"
)
@classmethod
def float_discretized(
cls, num_bins, bin_boundaries=None, output_mode="one_hot", name=None
):
name = name or auto_name("float_discretized")
preprocessor = layers.Discretization(
num_bins=num_bins,
bin_boundaries=bin_boundaries,
name=f"{name}_preprocessor",
)
return Feature(
dtype="float32", preprocessor=preprocessor, output_mode=output_mode
)
@classmethod
def integer_categorical(
cls,
max_tokens=None,
num_oov_indices=1,
output_mode="one_hot",
name=None,
):
name = name or auto_name("integer_categorical")
preprocessor = layers.IntegerLookup(
name=f"{name}_preprocessor",
max_tokens=max_tokens,
num_oov_indices=num_oov_indices,
)
return Feature(
dtype="int32", preprocessor=preprocessor, output_mode=output_mode
)
@classmethod
def string_categorical(
cls,
max_tokens=None,
num_oov_indices=1,
output_mode="one_hot",
name=None,
):
name = name or auto_name("string_categorical")
preprocessor = layers.StringLookup(
name=f"{name}_preprocessor",
max_tokens=max_tokens,
num_oov_indices=num_oov_indices,
)
return Feature(
dtype="string", preprocessor=preprocessor, output_mode=output_mode
)
@classmethod
def string_hashed(cls, num_bins, output_mode="one_hot", name=None):
name = name or auto_name("string_hashed")
preprocessor = layers.Hashing(
name=f"{name}_preprocessor", num_bins=num_bins
)
return Feature(
dtype="string", preprocessor=preprocessor, output_mode=output_mode
)
@classmethod
def integer_hashed(cls, num_bins, output_mode="one_hot", name=None):
name = name or auto_name("integer_hashed")
preprocessor = layers.Hashing(
name=f"{name}_preprocessor", num_bins=num_bins
)
return Feature(
dtype="int32", preprocessor=preprocessor, output_mode=output_mode
)
def __init__(
self,
features,
output_mode="concat",
crosses=None,
crossing_dim=32,
hashing_dim=32,
num_discretization_bins=32,
name=None,
):
super().__init__(name=name)
if not features:
raise ValueError("The `features` argument cannot be None or empty.")
self.crossing_dim = crossing_dim
self.hashing_dim = hashing_dim
self.num_discretization_bins = num_discretization_bins
self.features = {
name: self._standardize_feature(name, value)
for name, value in features.items()
}
self.crosses = []
if crosses:
feature_set = set(features.keys())
for cross in crosses:
if isinstance(cross, dict):
cross = serialization_lib.deserialize_keras_object(cross)
if isinstance(cross, Cross):
self.crosses.append(cross)
else:
if not crossing_dim:
raise ValueError(
"When specifying `crosses`, the argument "
"`crossing_dim` "
"(dimensionality of the crossing space) "
"should be specified as well."
)
for key in cross:
if key not in feature_set:
raise ValueError(
"All features referenced "
"in the `crosses` argument "
"should be present in the `features` dict. "
f"Received unknown features: {cross}"
)
self.crosses.append(Cross(cross, crossing_dim=crossing_dim))
self.crosses_by_name = {cross.name: cross for cross in self.crosses}
if output_mode not in {"dict", "concat"}:
raise ValueError(
"Invalid value for argument `output_mode`. "
"Expected one of {'dict', 'concat'}. "
f"Received: output_mode={output_mode}"
)
self.output_mode = output_mode
self.inputs = {
name: self._feature_to_input(name, value)
for name, value in self.features.items()
}
self.preprocessors = {
name: value.preprocessor for name, value in self.features.items()
}
self.encoded_features = None
self.crossers = {
cross.name: self._cross_to_crosser(cross) for cross in self.crosses
}
self.one_hot_encoders = {}
self._is_adapted = False
self.concat = None
self._preprocessed_features_names = None
self._crossed_features_names = None
self._sublayers_built = False
def _feature_to_input(self, name, feature):
return layers.Input(shape=(1,), dtype=feature.dtype, name=name)
def _standardize_feature(self, name, feature):
if isinstance(feature, Feature):
return feature
if isinstance(feature, dict):
return serialization_lib.deserialize_keras_object(feature)
if feature == "float":
return self.float(name=name)
elif feature == "float_normalized":
return self.float_normalized(name=name)
elif feature == "float_rescaled":
return self.float_rescaled(name=name)
elif feature == "float_discretized":
return self.float_discretized(
name=name, num_bins=self.num_discretization_bins
)
elif feature == "integer_categorical":
return self.integer_categorical(name=name)
elif feature == "string_categorical":
return self.string_categorical(name=name)
elif feature == "integer_hashed":
return self.integer_hashed(self.hashing_dim, name=name)
elif feature == "string_hashed":
return self.string_hashed(self.hashing_dim, name=name)
else:
raise ValueError(f"Invalid feature type: {feature}")
def _cross_to_crosser(self, cross):
return layers.HashedCrossing(cross.crossing_dim, name=cross.name)
def _list_adaptable_preprocessors(self):
adaptable_preprocessors = []
for name in self.features.keys():
preprocessor = self.preprocessors[name]
# Special case: a Normalization layer with preset mean/variance.
# Not adaptable.
if isinstance(preprocessor, layers.Normalization):
if preprocessor.input_mean is not None:
continue
if hasattr(preprocessor, "adapt"):
adaptable_preprocessors.append(name)
return adaptable_preprocessors
def adapt(self, dataset):
if not isinstance(dataset, tf.data.Dataset):
raise ValueError(
"`adapt()` can only be called on a tf.data.Dataset. "
f"Received instead: {dataset} (of type {type(dataset)})"
)
for name in self._list_adaptable_preprocessors():
# Call adapt() on each individual adaptable layer.
# TODO: consider rewriting this to instead iterate on the
# dataset once, split each batch into individual features,
# and call the layer's `_adapt_function` on each batch
# to simulate the behavior of adapt() in a more performant fashion.
feature_dataset = dataset.map(lambda x: x[name])
preprocessor = self.preprocessors[name]
# TODO: consider adding an adapt progress bar.
# Sample 1 element to check the rank
for x in feature_dataset.take(1):
pass
if len(x.shape) == 0:
# The dataset yields unbatched scalars; batch it.
feature_dataset = feature_dataset.batch(32)
if len(x.shape) in {0, 1}:
# If the rank is 1, add a dimension
# so we can reduce on axis=-1.
# Note: if rank was previously 0, it is now 1.
feature_dataset = feature_dataset.map(
lambda x: tf.expand_dims(x, -1)
)
preprocessor.adapt(feature_dataset)
self._is_adapted = True
self.get_encoded_features() # Finish building the layer
self.built = True
self._sublayers_built = True
def get_inputs(self):
self._check_if_built()
return self.inputs
def get_encoded_features(self):
self._check_if_adapted()
if self.encoded_features is None:
preprocessed_features = self._preprocess_features(self.inputs)
crossed_features = self._cross_features(preprocessed_features)
merged_features = self._merge_features(
preprocessed_features, crossed_features
)
self.encoded_features = merged_features
return self.encoded_features
def _preprocess_features(self, features):
return {
name: self.preprocessors[name](features[name])
for name in features.keys()
}
def _cross_features(self, features):
all_outputs = {}
for cross in self.crosses:
inputs = [features[name] for name in cross.feature_names]
outputs = self.crossers[cross.name](inputs)
all_outputs[cross.name] = outputs
return all_outputs
def _merge_features(self, preprocessed_features, crossed_features):
if not self._preprocessed_features_names:
self._preprocessed_features_names = sorted(
preprocessed_features.keys()
)
self._crossed_features_names = sorted(crossed_features.keys())
all_names = (
self._preprocessed_features_names + self._crossed_features_names
)
all_features = [
preprocessed_features[name]
for name in self._preprocessed_features_names
] + [crossed_features[name] for name in self._crossed_features_names]
if self.output_mode == "dict":
output_dict = {}
else:
features_to_concat = []
if self._sublayers_built:
# Fast mode.
for name, feature in zip(all_names, all_features):
encoder = self.one_hot_encoders.get(name, None)
if encoder:
feature = encoder(feature)
if self.output_mode == "dict":
output_dict[name] = feature
else:
features_to_concat.append(feature)
if self.output_mode == "dict":
return output_dict
else:
return self.concat(features_to_concat)
# If the object isn't built,
# we create the encoder and concat layers below
all_specs = [
self.features[name] for name in self._preprocessed_features_names
] + [
self.crosses_by_name[name] for name in self._crossed_features_names
]
for name, feature, spec in zip(all_names, all_features, all_specs):
if tree.is_nested(feature):
dtype = tree.flatten(feature)[0].dtype
else:
dtype = feature.dtype
dtype = backend.standardize_dtype(dtype)
if spec.output_mode == "one_hot":
preprocessor = self.preprocessors.get(
name
) or self.crossers.get(name)
cardinality = None
if not dtype.startswith("int"):
raise ValueError(
f"Feature '{name}' has `output_mode='one_hot'`. "
"Thus its preprocessor should return an integer dtype. "
f"Instead it returns a {dtype} dtype."
)
if isinstance(
preprocessor, (layers.IntegerLookup, layers.StringLookup)
):
cardinality = preprocessor.vocabulary_size()
elif isinstance(preprocessor, layers.CategoryEncoding):
cardinality = preprocessor.num_tokens
elif isinstance(preprocessor, layers.Discretization):
cardinality = preprocessor.num_bins
elif isinstance(
preprocessor, (layers.HashedCrossing, layers.Hashing)
):
cardinality = preprocessor.num_bins
else:
raise ValueError(
f"Feature '{name}' has `output_mode='one_hot'`. "
"However it isn't a standard feature and the "
"dimensionality of its output space is not known, "
"thus it cannot be one-hot encoded. "
"Try using `output_mode='int'`."
)
if cardinality is not None:
encoder = layers.CategoryEncoding(
num_tokens=cardinality, output_mode="multi_hot"
)
self.one_hot_encoders[name] = encoder
feature = encoder(feature)
if self.output_mode == "concat":
dtype = feature.dtype
if dtype.startswith("int") or dtype == "string":
raise ValueError(
f"Cannot concatenate features because feature '{name}' "
f"has not been encoded (it has dtype {dtype}). "
"Consider using `output_mode='dict'`."
)
features_to_concat.append(feature)
else:
output_dict[name] = feature
if self.output_mode == "concat":
self.concat = layers.Concatenate(axis=-1)
return self.concat(features_to_concat)
else:
return output_dict
def _check_if_adapted(self):
if not self._is_adapted:
if not self._list_adaptable_preprocessors():
self._is_adapted = True
else:
raise ValueError(
"You need to call `.adapt(dataset)` on the FeatureSpace "
"before you can start using it."
)
def _check_if_built(self):
if not self._sublayers_built:
self._check_if_adapted()
# Finishes building
self.get_encoded_features()
self._sublayers_built = True
def _convert_input(self, x):
if not isinstance(x, (tf.Tensor, tf.SparseTensor, tf.RaggedTensor)):
if not isinstance(x, (list, tuple, int, float)):
x = backend.convert_to_numpy(x)
x = tf.convert_to_tensor(x)
return x
def __call__(self, data):
self._check_if_built()
if not isinstance(data, dict):
raise ValueError(
"A FeatureSpace can only be called with a dict. "
f"Received: data={data} (of type {type(data)}"
)
# Many preprocessing layers support all backends but many do not.
# Switch to TF to make FeatureSpace work universally.
data = {key: self._convert_input(value) for key, value in data.items()}
rebatched = False
for name, x in data.items():
if len(x.shape) == 0:
data[name] = tf.reshape(x, (1, 1))
rebatched = True
elif len(x.shape) == 1:
data[name] = tf.expand_dims(x, -1)
with backend_utils.TFGraphScope():
# This scope is to make sure that inner TFDataLayers
# will not convert outputs back to backend-native --
# they should be TF tensors throughout
preprocessed_data = self._preprocess_features(data)
preprocessed_data = tree.map_structure(
lambda x: self._convert_input(x), preprocessed_data
)
crossed_data = self._cross_features(preprocessed_data)
crossed_data = tree.map_structure(
lambda x: self._convert_input(x), crossed_data
)
merged_data = self._merge_features(preprocessed_data, crossed_data)
if rebatched:
if self.output_mode == "concat":
assert merged_data.shape[0] == 1
merged_data = tf.squeeze(merged_data, axis=0)
else:
for name, x in merged_data.items():
if len(x.shape) == 2 and x.shape[0] == 1:
merged_data[name] = tf.squeeze(x, axis=0)
if (
backend.backend() != "tensorflow"
and not backend_utils.in_tf_graph()
):
merged_data = tree.map_structure(
lambda x: backend.convert_to_tensor(x, dtype=x.dtype),
merged_data,
)
return merged_data
def get_config(self):
return {
"features": serialization_lib.serialize_keras_object(self.features),
"output_mode": self.output_mode,
"crosses": serialization_lib.serialize_keras_object(self.crosses),
"crossing_dim": self.crossing_dim,
"hashing_dim": self.hashing_dim,
"num_discretization_bins": self.num_discretization_bins,
}
@classmethod
def from_config(cls, config):
return cls(**config)
def get_build_config(self):
return {
name: feature.preprocessor.get_build_config()
for name, feature in self.features.items()
}
def build_from_config(self, config):
for name in config.keys():
preprocessor = self.features[name].preprocessor
if not preprocessor.built:
preprocessor.build_from_config(config[name])
self._is_adapted = True
def save(self, filepath):
"""Save the `FeatureSpace` instance to a `.keras` file.
You can reload it via `keras_core.models.load_model()`:
```python
feature_space.save("featurespace.keras")
reloaded_fs = keras_core.models.load_model("featurespace.keras")
```
"""
saving_lib.save_model(self, filepath)
def save_own_variables(self, store):
return
def load_own_variables(self, store):
return
| keras-core/keras_core/layers/preprocessing/feature_space.py/0 | {
"file_path": "keras-core/keras_core/layers/preprocessing/feature_space.py",
"repo_id": "keras-core",
"token_count": 13723
} | 41 |
from keras_core import backend
from keras_core.api_export import keras_core_export
from keras_core.layers.preprocessing.tf_data_layer import TFDataLayer
from keras_core.random.seed_generator import SeedGenerator
from keras_core.utils import image_utils
@keras_core_export("keras_core.layers.RandomCrop")
class RandomCrop(TFDataLayer):
"""A preprocessing layer which randomly crops images during training.
During training, this layer will randomly choose a location to crop images
down to a target size. The layer will crop all the images in the same batch
to the same cropping location.
At inference time, and during training if an input image is smaller than the
target size, the input will be resized and cropped so as to return the
largest possible window in the image that matches the target aspect ratio.
If you need to apply random cropping at inference time, set `training` to
True when calling the layer.
Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
of integer or floating point dtype. By default, the layer will output
floats.
**Note:** This layer is safe to use inside a `tf.data` pipeline
(independently of which backend you're using).
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., target_height, target_width, channels)`.
Args:
height: Integer, the height of the output shape.
width: Integer, the width of the output shape.
seed: Integer. Used to create a random seed.
**kwargs: Base layer keyword arguments, such as
`name` and `dtype`.
"""
def __init__(
self, height, width, seed=None, data_format=None, name=None, **kwargs
):
super().__init__(name=name, **kwargs)
self.height = height
self.width = width
self.seed = seed or backend.random.make_default_seed()
self.generator = SeedGenerator(seed)
self.data_format = backend.standardize_data_format(data_format)
if self.data_format == "channels_first":
self.heigh_axis = -2
self.width_axis = -1
elif self.data_format == "channels_last":
self.height_axis = -3
self.width_axis = -2
self.supports_masking = False
self.supports_jit = False
self._convert_input_args = False
self._allow_non_tensor_positional_args = True
def call(self, inputs, training=True):
inputs = self.backend.cast(inputs, self.compute_dtype)
input_shape = self.backend.shape(inputs)
is_batched = len(input_shape) > 3
if not is_batched:
inputs = self.backend.numpy.expand_dims(inputs, axis=0)
h_diff = input_shape[self.height_axis] - self.height
w_diff = input_shape[self.width_axis] - self.width
def random_crop():
input_height, input_width = (
input_shape[self.height_axis],
input_shape[self.width_axis],
)
seed_generator = self._get_seed_generator(self.backend._backend)
h_start = self.backend.cast(
self.backend.random.uniform(
(),
0,
maxval=float(input_height - self.height + 1),
seed=seed_generator,
),
"int32",
)
w_start = self.backend.cast(
self.backend.random.uniform(
(),
0,
maxval=float(input_width - self.width + 1),
seed=seed_generator,
),
"int32",
)
if self.data_format == "channels_last":
return self.backend.core.slice(
inputs,
self.backend.numpy.stack([0, h_start, w_start, 0]),
[
self.backend.shape(inputs)[0],
self.height,
self.width,
self.backend.shape(inputs)[3],
],
)
else:
return self.backend.core.slice(
inputs,
self.backend.numpy.stack([0, 0, h_start, w_start]),
[
self.backend.shape(inputs)[0],
self.backend.shape(inputs)[1],
self.height,
self.width,
],
)
def resize():
outputs = image_utils.smart_resize(
inputs,
[self.height, self.width],
data_format=self.data_format,
backend_module=self.backend,
)
# smart_resize will always output float32, so we need to re-cast.
return self.backend.cast(outputs, self.compute_dtype)
if isinstance(h_diff, int) and isinstance(w_diff, int):
if training and h_diff >= 0 and w_diff >= 0:
outputs = random_crop()
else:
outputs = resize()
else:
predicate = self.backend.numpy.logical_and(
training,
self.backend.numpy.logical_and(h_diff >= 0, w_diff >= 0),
)
outputs = self.backend.cond(
predicate,
random_crop,
resize,
)
if not is_batched:
outputs = self.backend.numpy.squeeze(outputs, axis=0)
return outputs
def compute_output_shape(self, input_shape, *args, **kwargs):
input_shape = list(input_shape)
input_shape[self.height_axis] = self.height
input_shape[self.width_axis] = self.width
return tuple(input_shape)
def get_config(self):
config = super().get_config()
config.update(
{
"height": self.height,
"width": self.width,
"seed": self.seed,
"data_format": self.data_format,
}
)
return config
| keras-core/keras_core/layers/preprocessing/random_crop.py/0 | {
"file_path": "keras-core/keras_core/layers/preprocessing/random_crop.py",
"repo_id": "keras-core",
"token_count": 3120
} | 42 |
import numpy as np
from keras_core import backend
from keras_core.api_export import keras_core_export
from keras_core.layers.layer import Layer
from keras_core.layers.preprocessing.index_lookup import listify_tensors
from keras_core.layers.preprocessing.string_lookup import StringLookup
from keras_core.saving import serialization_lib
from keras_core.utils import argument_validation
from keras_core.utils import backend_utils
from keras_core.utils import tf_utils
from keras_core.utils.module_utils import tensorflow as tf
@keras_core_export("keras_core.layers.TextVectorization")
class TextVectorization(Layer):
"""A preprocessing layer which maps text features to integer sequences.
This layer has basic options for managing text in a Keras model. It
transforms a batch of strings (one example = one string) into either a list
of token indices (one example = 1D tensor of integer token indices) or a
dense representation (one example = 1D tensor of float values representing
data about the example's tokens). This layer is meant to handle natural
language inputs. To handle simple string inputs (categorical strings or
pre-tokenized strings) see `kers_core.layers.StringLookup`.
The vocabulary for the layer must be either supplied on construction or
learned via `adapt()`. When this layer is adapted, it will analyze the
dataset, determine the frequency of individual string values, and create a
vocabulary from them. This vocabulary can have unlimited size or be capped,
depending on the configuration options for this layer; if there are more
unique values in the input than the maximum vocabulary size, the most
frequent terms will be used to create the vocabulary.
The processing of each example contains the following steps:
1. Standardize each example (usually lowercasing + punctuation stripping)
2. Split each example into substrings (usually words)
3. Recombine substrings into tokens (usually ngrams)
4. Index tokens (associate a unique int value with each token)
5. Transform each example using this index, either into a vector of ints or
a dense float vector.
Some notes on passing callables to customize splitting and normalization for
this layer:
1. Any callable can be passed to this Layer, but if you want to serialize
this object you should only pass functions that are registered Keras
serializables (see `keras_core.saving.register_keras_serializable`
for more details).
2. When using a custom callable for `standardize`, the data received
by the callable will be exactly as passed to this layer. The callable
should return a tensor of the same shape as the input.
3. When using a custom callable for `split`, the data received by the
callable will have the 1st dimension squeezed out - instead of
`[["string to split"], ["another string to split"]]`, the Callable will
see `["string to split", "another string to split"]`.
The callable should return a `tf.Tensor` of dtype `string`
with the first dimension containing the split tokens -
in this example, we should see something like `[["string", "to",
"split"], ["another", "string", "to", "split"]]`.
**Note:** This layer uses TensorFlow internally. It cannot
be used as part of the compiled computation graph of a model with
any backend other than TensorFlow.
It can however be used with any backend when running eagerly.
It can also always be used as part of an input preprocessing pipeline
with any backend (outside the model itself), which is how we recommend
to use this layer.
**Note:** This layer is safe to use inside a `tf.data` pipeline
(independently of which backend you're using).
Args:
max_tokens: Maximum size of the vocabulary for this layer. This should
only be specified when adapting a vocabulary or when setting
`pad_to_max_tokens=True`. Note that this vocabulary
contains 1 OOV token, so the effective number of tokens is
`(max_tokens - 1 - (1 if output_mode == "int" else 0))`.
standardize: Optional specification for standardization to apply to the
input text. Values can be:
- `None`: No standardization.
- `"lower_and_strip_punctuation"`: Text will be lowercased and all
punctuation removed.
- `"lower"`: Text will be lowercased.
- `"strip_punctuation"`: All punctuation will be removed.
- Callable: Inputs will passed to the callable function,
which should be standardized and returned.
split: Optional specification for splitting the input text.
Values can be:
- `None`: No splitting.
- `"whitespace"`: Split on whitespace.
- `"character"`: Split on each unicode character.
- Callable: Standardized inputs will passed to the callable
function, which should be split and returned.
ngrams: Optional specification for ngrams to create from the
possibly-split input text. Values can be `None`, an integer
or tuple of integers; passing an integer will create ngrams
up to that integer, and passing a tuple of integers will
create ngrams for the specified values in the tuple.
Passing `None` means that no ngrams will be created.
output_mode: Optional specification for the output of the layer.
Values can be `"int"`, `"multi_hot"`, `"count"` or `"tf_idf"`,
configuring the layer as follows:
- `"int"`: Outputs integer indices, one integer index per split
string token. When `output_mode == "int"`,
0 is reserved for masked locations;
this reduces the vocab size to `max_tokens - 2`
instead of `max_tokens - 1`.
- `"multi_hot"`: Outputs a single int array per batch, of either
vocab_size or max_tokens size, containing 1s in all elements
where the token mapped to that index exists at least
once in the batch item.
- `"count"`: Like `"multi_hot"`, but the int array contains
a count of the number of times the token at that index
appeared in the batch item.
- `"tf_idf"`: Like `"multi_hot"`, but the TF-IDF algorithm
is applied to find the value in each token slot.
For `"int"` output, any shape of input and output is supported.
For all other output modes, currently only rank 1 inputs
(and rank 2 outputs after splitting) are supported.
output_sequence_length: Only valid in INT mode. If set, the output will
have its time dimension padded or truncated to exactly
`output_sequence_length` values, resulting in a tensor of shape
`(batch_size, output_sequence_length)` regardless of how many tokens
resulted from the splitting step. Defaults to `None`.
pad_to_max_tokens: Only valid in `"multi_hot"`, `"count"`,
and `"tf_idf"` modes. If `True`, the output will have
its feature axis padded to `max_tokens` even if the number
of unique tokens in the vocabulary is less than `max_tokens`,
resulting in a tensor of shape `(batch_size, max_tokens)`
regardless of vocabulary size. Defaults to `False`.
vocabulary: Optional. Either an array of strings or a string path to a
text file. If passing an array, can pass a tuple, list,
1D NumPy array, or 1D tensor containing the string vocabulary terms.
If passing a file path, the file should contain one line per term
in the vocabulary. If this argument is set,
there is no need to `adapt()` the layer.
idf_weights: Only valid when `output_mode` is `"tf_idf"`. A tuple, list,
1D NumPy array, or 1D tensor of the same length as the vocabulary,
containing the floating point inverse document frequency weights,
which will be multiplied by per sample term counts for
the final `tf_idf` weight. If the `vocabulary` argument is set,
and `output_mode` is `"tf_idf"`, this argument must be supplied.
ragged: Boolean. Only applicable to `"int"` output mode.
Only supported with TensorFlow backend.
If `True`, returns a `RaggedTensor` instead of a dense `Tensor`,
where each sequence may have a different length
after string splitting. Defaults to `False`.
sparse: Boolean. Only applicable to `"multi_hot"`, `"count"`, and
`"tf_idf"` output modes. Only supported with TensorFlow
backend. If `True`, returns a `SparseTensor`
instead of a dense `Tensor`. Defaults to `False`.
encoding: Optional. The text encoding to use to interpret the input
strings. Defaults to `"utf-8"`.
Examples:
This example instantiates a `TextVectorization` layer that lowercases text,
splits on whitespace, strips punctuation, and outputs integer vocab indices.
>>> max_tokens = 5000 # Maximum vocab size.
>>> max_len = 4 # Sequence length to pad the outputs to.
>>> # Create the layer.
>>> vectorize_layer = TextVectorization(
... max_tokens=max_tokens,
... output_mode='int',
... output_sequence_length=max_len)
>>> # Now that the vocab layer has been created, call `adapt` on the
>>> # list of strings to create the vocabulary.
>>> vectorize_layer.adapt(["foo bar", "bar baz", "baz bada boom"])
>>> # Now, the layer can map strings to integers -- you can use an
>>> # embedding layer to map these integers to learned embeddings.
>>> input_data = [["foo qux bar"], ["qux baz"]]
>>> vectorize_layer(input_data)
array([[4, 1, 3, 0],
[1, 2, 0, 0]])
This example instantiates a `TextVectorization` layer by passing a list
of vocabulary terms to the layer's `__init__()` method.
>>> vocab_data = ["earth", "wind", "and", "fire"]
>>> max_len = 4 # Sequence length to pad the outputs to.
>>> # Create the layer, passing the vocab directly. You can also pass the
>>> # vocabulary arg a path to a file containing one vocabulary word per
>>> # line.
>>> vectorize_layer = keras_core.layers.TextVectorization(
... max_tokens=max_tokens,
... output_mode='int',
... output_sequence_length=max_len,
... vocabulary=vocab_data)
>>> # Because we've passed the vocabulary directly, we don't need to adapt
>>> # the layer - the vocabulary is already set. The vocabulary contains the
>>> # padding token ('') and OOV token ('[UNK]')
>>> # as well as the passed tokens.
>>> vectorize_layer.get_vocabulary()
['', '[UNK]', 'earth', 'wind', 'and', 'fire']
"""
def __init__(
self,
max_tokens=None,
standardize="lower_and_strip_punctuation",
split="whitespace",
ngrams=None,
output_mode="int",
output_sequence_length=None,
pad_to_max_tokens=False,
vocabulary=None,
idf_weights=None,
sparse=False,
ragged=False,
encoding="utf-8",
name=None,
**kwargs,
):
if not tf.available:
raise ImportError(
"Layer TextVectorization requires TensorFlow. "
"Install it via `pip install tensorflow`."
)
if sparse and backend.backend() != "tensorflow":
raise ValueError(
"`sparse` can only be set to True with the "
"TensorFlow backend."
)
if ragged and backend.backend() != "tensorflow":
raise ValueError(
"`ragged` can only be set to True with the "
"TensorFlow backend."
)
# 'standardize' must be one of
# (None, "lower_and_strip_punctuation", "lower", "strip_punctuation",
# callable)
argument_validation.validate_string_arg(
standardize,
allowable_strings=(
"lower_and_strip_punctuation",
"lower",
"strip_punctuation",
),
caller_name=self.__class__.__name__,
arg_name="standardize",
allow_none=True,
allow_callables=True,
)
# 'split' must be one of (None, "whitespace", "character", callable)
argument_validation.validate_string_arg(
split,
allowable_strings=("whitespace", "character"),
caller_name=self.__class__.__name__,
arg_name="split",
allow_none=True,
allow_callables=True,
)
# Support deprecated names for output_modes.
if output_mode == "binary":
output_mode = "multi_hot"
if output_mode == "tf-idf":
output_mode = "tf_idf"
argument_validation.validate_string_arg(
output_mode,
allowable_strings=(
"int",
"one_hot",
"multi_hot",
"count",
"tf_idf",
),
caller_name=self.__class__.__name__,
arg_name="output_mode",
)
# 'ngrams' must be one of (None, int, tuple(int))
if not (
ngrams is None
or isinstance(ngrams, int)
or isinstance(ngrams, tuple)
and all(isinstance(item, int) for item in ngrams)
):
raise ValueError(
"`ngrams` must be None, an integer, or a tuple of "
f"integers. Received: ngrams={ngrams}"
)
# 'output_sequence_length' must be one of (None, int) and is only
# set if output_mode is "int"".
if output_mode == "int" and not (
isinstance(output_sequence_length, int)
or (output_sequence_length is None)
):
raise ValueError(
"`output_sequence_length` must be either None or an "
"integer when `output_mode` is 'int'. Received: "
f"output_sequence_length={output_sequence_length}"
)
if output_mode != "int" and output_sequence_length is not None:
raise ValueError(
"`output_sequence_length` must not be set if `output_mode` is "
"not 'int'. "
f"Received output_sequence_length={output_sequence_length}."
)
if ragged and output_mode != "int":
raise ValueError(
"`ragged` must not be true if `output_mode` is "
f"`'int'`. Received: ragged={ragged} and "
f"output_mode={output_mode}"
)
if ragged and output_sequence_length is not None:
raise ValueError(
"`output_sequence_length` must not be set if ragged "
f"is True. Received: ragged={ragged} and "
f"output_sequence_length={output_sequence_length}"
)
self._max_tokens = max_tokens
self._standardize = standardize
self._split = split
self._ngrams_arg = ngrams
if isinstance(ngrams, int):
self._ngrams = tuple(range(1, ngrams + 1))
else:
self._ngrams = ngrams
self._ragged = ragged
self._output_mode = output_mode
self._output_sequence_length = output_sequence_length
self._encoding = encoding
# We save this hidden option to persist the fact
# that we have have a non-adaptable layer with a
# manually set vocab.
self._has_input_vocabulary = kwargs.pop(
"has_input_vocabulary", (vocabulary is not None)
)
vocabulary_size = kwargs.pop("vocabulary_size", None)
super().__init__(name=name, **kwargs)
self._lookup_layer = StringLookup(
max_tokens=max_tokens,
vocabulary=vocabulary,
idf_weights=idf_weights,
pad_to_max_tokens=pad_to_max_tokens,
mask_token="",
output_mode=output_mode,
sparse=sparse,
has_input_vocabulary=self._has_input_vocabulary,
encoding=encoding,
vocabulary_size=vocabulary_size,
)
self._convert_input_args = False
self._allow_non_tensor_positional_args = True
self.supports_jit = False
@property
def compute_dtype(self):
return "string"
@property
def variable_dtype(self):
return "string"
def compute_output_shape(self, input_shape):
if self._output_mode == "int":
return (input_shape[0], self._output_sequence_length)
if self._split is None:
if len(input_shape) <= 1:
input_shape = tuple(input_shape) + (1,)
else:
input_shape = tuple(input_shape) + (None,)
return self._lookup_layer.compute_output_shape(input_shape)
def compute_output_spec(self, inputs):
output_shape = self.compute_output_shape(inputs.shape)
if self._output_mode == "int":
output_dtype = "int64"
else:
output_dtype = backend.floatx()
return backend.KerasTensor(output_shape, dtype=output_dtype)
def adapt(self, data, batch_size=None, steps=None):
"""Computes a vocabulary of string terms from tokens in a dataset.
Calling `adapt()` on a `TextVectorization` layer is an alternative to
passing in a precomputed vocabulary on construction via the `vocabulary`
argument. A `TextVectorization` layer should always be either adapted
over a dataset or supplied with a vocabulary.
During `adapt()`, the layer will build a vocabulary of all string tokens
seen in the dataset, sorted by occurrence count, with ties broken by
sort order of the tokens (high to low). At the end of `adapt()`, if
`max_tokens` is set, the vocabulary wil be truncated to `max_tokens`
size. For example, adapting a layer with `max_tokens=1000` will compute
the 1000 most frequent tokens occurring in the input dataset. If
`output_mode='tf-idf'`, `adapt()` will also learn the document
frequencies of each token in the input dataset.
Arguments:
data: The data to train on. It can be passed either as a
batched `tf.data.Dataset`, as a list of strings,
or as a NumPy array.
steps: Integer or `None`.
Total number of steps (batches of samples) to process.
If `data` is a `tf.data.Dataset`, and `steps` is `None`,
`adapt()` will run until the input dataset is exhausted.
When passing an infinitely
repeating dataset, you must specify the `steps` argument. This
argument is not supported with array inputs or list inputs.
"""
self.reset_state()
if isinstance(data, tf.data.Dataset):
if steps is not None:
data = data.take(steps)
for batch in data:
self.update_state(batch)
else:
data = tf_utils.ensure_tensor(data, dtype="string")
if data.shape.rank == 1:
# A plain list of strings
# is treated as as many documents
data = tf.expand_dims(data, -1)
self.update_state(data)
self.finalize_state()
def update_state(self, data):
self._lookup_layer.update_state(self._preprocess(data))
def finalize_state(self):
self._lookup_layer.finalize_state()
def reset_state(self):
self._lookup_layer.reset_state()
def get_vocabulary(self, include_special_tokens=True):
"""Returns the current vocabulary of the layer.
Args:
include_special_tokens: If `True`, the returned vocabulary
will include the padding and OOV tokens,
and a term's index in the vocabulary will equal
the term's index when calling the layer. If `False`, the
returned vocabulary will not include any padding
or OOV tokens.
"""
return self._lookup_layer.get_vocabulary(include_special_tokens)
def vocabulary_size(self):
"""Gets the current size of the layer's vocabulary.
Returns:
The integer size of the vocabulary, including optional
mask and OOV indices.
"""
return self._lookup_layer.vocabulary_size()
def get_config(self):
config = {
"max_tokens": self._lookup_layer.max_tokens,
"standardize": self._standardize,
"split": self._split,
"ngrams": self._ngrams_arg,
"output_mode": self._output_mode,
"output_sequence_length": self._output_sequence_length,
"pad_to_max_tokens": self._lookup_layer.pad_to_max_tokens,
"sparse": self._lookup_layer.sparse,
"ragged": self._ragged,
"vocabulary": listify_tensors(self._lookup_layer.input_vocabulary),
"idf_weights": listify_tensors(
self._lookup_layer.input_idf_weights
),
"encoding": self._encoding,
"vocabulary_size": self.vocabulary_size(),
}
base_config = super().get_config()
return {**base_config, **config}
@classmethod
def from_config(cls, config):
if not isinstance(config["standardize"], str):
config["standardize"] = serialization_lib.deserialize_keras_object(
config["standardize"]
)
if not isinstance(config["split"], str):
config["split"] = serialization_lib.deserialize_keras_object(
config["split"]
)
return cls(**config)
def set_vocabulary(self, vocabulary, idf_weights=None):
"""Sets vocabulary (and optionally document frequency) for this layer.
This method sets the vocabulary and IDF weights for this layer directly,
instead of analyzing a dataset through `adapt()`. It should be used
whenever the vocab (and optionally document frequency) information is
already known. If vocabulary data is already present in the layer, this
method will replace it.
Args:
vocabulary: Either an array or a string path to a text file.
If passing an array, can pass a tuple, list, 1D NumPy array,
or 1D tensor containing the vocbulary terms.
If passing a file path, the file should contain one line
per term in the vocabulary.
idf_weights: A tuple, list, 1D NumPy array, or 1D tensor of inverse
document frequency weights with equal length to vocabulary.
Must be set if `output_mode` is `"tf_idf"`.
Should not be set otherwise.
"""
self._lookup_layer.set_vocabulary(vocabulary, idf_weights=idf_weights)
def _preprocess(self, inputs):
inputs = tf_utils.ensure_tensor(inputs, dtype=tf.string)
if self._standardize in ("lower", "lower_and_strip_punctuation"):
inputs = tf.strings.lower(inputs)
if self._standardize in (
"strip_punctuation",
"lower_and_strip_punctuation",
):
inputs = tf.strings.regex_replace(
inputs, r'[!"#$%&()\*\+,-\./:;<=>?@\[\\\]^_`{|}~\']', ""
)
if callable(self._standardize):
inputs = self._standardize(inputs)
if self._split is not None:
# If we are splitting, we validate that the 1st axis is of dimension
# 1 and so can be squeezed out. We do this here instead of after
# splitting for performance reasons - it's more expensive to squeeze
# a ragged tensor.
if inputs.shape.rank > 1:
if inputs.shape[-1] != 1:
raise ValueError(
"When using `TextVectorization` to tokenize strings, "
"the input rank must be 1 or the last shape dimension "
f"must be 1. Received: inputs.shape={inputs.shape} "
f"with rank={inputs.shape.rank}"
)
else:
inputs = tf.squeeze(inputs, axis=-1)
if self._split == "whitespace":
# This treats multiple whitespaces as one whitespace, and strips
# leading and trailing whitespace.
inputs = tf.strings.split(inputs)
elif self._split == "character":
inputs = tf.strings.unicode_split(inputs, "UTF-8")
elif callable(self._split):
inputs = self._split(inputs)
# Note that 'inputs' here can be either ragged or dense depending on the
# configuration choices for this Layer. The strings.ngrams op, however,
# does support both ragged and dense inputs.
if self._ngrams is not None:
inputs = tf.strings.ngrams(
inputs, ngram_width=self._ngrams, separator=" "
)
return inputs
def call(self, inputs):
if not isinstance(
inputs, (tf.Tensor, tf.RaggedTensor, np.ndarray, list, tuple)
):
inputs = tf.convert_to_tensor(backend.convert_to_numpy(inputs))
inputs = self._preprocess(inputs)
# If we're not doing any output processing, return right away.
if self._output_mode is None:
outputs = inputs
lookup_data = self._lookup_layer.call(inputs)
# For any non-int output, we can return directly from the underlying
# layer.
if self._output_mode != "int":
outputs = lookup_data
if self._ragged:
outputs = lookup_data
# If we have a ragged tensor, we can pad during the conversion to dense.
if isinstance(lookup_data, tf.RaggedTensor):
shape = lookup_data.shape.as_list()
# If output sequence length is None, to_tensor will pad the last
# dimension to the bounding shape of the ragged dimension.
shape[-1] = self._output_sequence_length
outputs = lookup_data.to_tensor(default_value=0, shape=shape)
print("outputs (after conversion from ragged):", outputs)
# If we have a dense tensor, we need to pad/trim directly.
if self._output_sequence_length is not None:
# Maybe trim the output.
outputs = outputs[..., : self._output_sequence_length]
# Maybe pad the output. We need to be careful to use dynamic shape
# here as required_space_to_batch_paddings requires a fully known
# shape.
shape = tf.shape(outputs)
padded_shape = tf.concat(
(shape[:-1], [self._output_sequence_length]), 0
)
padding, _ = tf.required_space_to_batch_paddings(
shape, padded_shape
)
outputs = tf.pad(outputs, padding)
if (
backend.backend() != "tensorflow"
and not backend_utils.in_tf_graph()
):
outputs = backend.convert_to_tensor(outputs)
return outputs
def save_own_variables(self, store):
self._lookup_layer.save_own_variables(store)
def load_own_variables(self, store):
self._lookup_layer.load_own_variables(store)
def save_assets(self, dir_path):
self._lookup_layer.save_assets(dir_path)
def load_assets(self, dir_path):
self._lookup_layer.load_assets(dir_path)
| keras-core/keras_core/layers/preprocessing/text_vectorization.py/0 | {
"file_path": "keras-core/keras_core/layers/preprocessing/text_vectorization.py",
"repo_id": "keras-core",
"token_count": 11797
} | 43 |
import numpy as np
import pytest
from keras_core import layers
from keras_core import ops
from keras_core import testing
class Cropping1DTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_cropping_1d(self):
inputs = np.random.rand(3, 5, 7)
# Cropping with different values on the left and the right.
self.run_layer_test(
layers.Cropping1D,
init_kwargs={"cropping": (1, 2)},
input_data=inputs,
expected_output=ops.convert_to_tensor(inputs[:, 1:3, :]),
)
# Same cropping on the left and the right.
self.run_layer_test(
layers.Cropping1D,
init_kwargs={"cropping": (1, 1)},
input_data=inputs,
expected_output=ops.convert_to_tensor(inputs[:, 1:4, :]),
)
# Same cropping on the left and the right provided as an int.
self.run_layer_test(
layers.Cropping1D,
init_kwargs={"cropping": 1},
input_data=inputs,
expected_output=ops.convert_to_tensor(inputs[:, 1:4, :]),
)
# Cropping on the right only.
self.run_layer_test(
layers.Cropping1D,
init_kwargs={"cropping": (0, 1)},
input_data=inputs,
expected_output=ops.convert_to_tensor(inputs[:, 0:4, :]),
)
# Cropping on the left only.
self.run_layer_test(
layers.Cropping1D,
init_kwargs={"cropping": (1, 0)},
input_data=inputs,
expected_output=ops.convert_to_tensor(inputs[:, 1:5, :]),
)
@pytest.mark.requires_trainable_backend
def test_cropping_1d_with_dynamic_spatial_dim(self):
input_layer = layers.Input(batch_shape=(1, None, 7))
cropped = layers.Cropping1D((1, 2))(input_layer)
self.assertEqual(cropped.shape, (1, None, 7))
def test_cropping_1d_errors_if_cropping_argument_invalid(self):
with self.assertRaises(ValueError):
layers.Cropping1D(cropping=(1,))
with self.assertRaises(ValueError):
layers.Cropping1D(cropping=(1, 2, 3))
with self.assertRaises(ValueError):
layers.Cropping1D(cropping="1")
def test_cropping_1d_errors_if_cropping_more_than_available(self):
with self.assertRaises(ValueError):
input_layer = layers.Input(batch_shape=(3, 5, 7))
layers.Cropping1D(cropping=(2, 3))(input_layer)
| keras-core/keras_core/layers/reshaping/cropping1d_test.py/0 | {
"file_path": "keras-core/keras_core/layers/reshaping/cropping1d_test.py",
"repo_id": "keras-core",
"token_count": 1209
} | 44 |
# flake8: noqa
import numpy as np
import pytest
from absl.testing import parameterized
from keras_core import backend
from keras_core import layers
from keras_core import testing
class UpSampling2dTest(testing.TestCase, parameterized.TestCase):
@parameterized.product(
data_format=["channels_first", "channels_last"],
length_row=[2],
length_col=[2, 3],
)
@pytest.mark.requires_trainable_backend
def test_upsampling_2d(self, data_format, length_row, length_col):
num_samples = 2
stack_size = 2
input_num_row = 11
input_num_col = 12
if data_format == "channels_first":
inputs = np.random.rand(
num_samples, stack_size, input_num_row, input_num_col
)
else:
inputs = np.random.rand(
num_samples, input_num_row, input_num_col, stack_size
)
# basic test
self.run_layer_test(
layers.UpSampling2D,
init_kwargs={"size": (2, 2), "data_format": data_format},
input_shape=inputs.shape,
)
layer = layers.UpSampling2D(
size=(length_row, length_col),
data_format=data_format,
)
layer.build(inputs.shape)
np_output = layer(inputs=backend.Variable(inputs))
if data_format == "channels_first":
assert np_output.shape[2] == length_row * input_num_row
assert np_output.shape[3] == length_col * input_num_col
else:
assert np_output.shape[1] == length_row * input_num_row
assert np_output.shape[2] == length_col * input_num_col
# compare with numpy
if data_format == "channels_first":
expected_out = np.repeat(inputs, length_row, axis=2)
expected_out = np.repeat(expected_out, length_col, axis=3)
else:
expected_out = np.repeat(inputs, length_row, axis=1)
expected_out = np.repeat(expected_out, length_col, axis=2)
self.assertAllClose(np_output, expected_out)
@parameterized.product(
data_format=["channels_first", "channels_last"],
length_row=[2],
length_col=[2, 3],
)
@pytest.mark.requires_trainable_backend
def test_upsampling_2d_bilinear(self, data_format, length_row, length_col):
num_samples = 2
stack_size = 2
input_num_row = 11
input_num_col = 12
if data_format == "channels_first":
inputs = np.random.rand(
num_samples, stack_size, input_num_row, input_num_col
)
else:
inputs = np.random.rand(
num_samples, input_num_row, input_num_col, stack_size
)
self.run_layer_test(
layers.UpSampling2D,
init_kwargs={
"size": (2, 2),
"data_format": data_format,
"interpolation": "bilinear",
},
input_shape=inputs.shape,
)
layer = layers.UpSampling2D(
size=(length_row, length_col),
data_format=data_format,
)
layer.build(inputs.shape)
np_output = layer(inputs=backend.Variable(inputs))
if data_format == "channels_first":
self.assertEqual(np_output.shape[2], length_row * input_num_row)
self.assertEqual(np_output.shape[3], length_col * input_num_col)
else:
self.assertEqual(np_output.shape[1], length_row * input_num_row)
self.assertEqual(np_output.shape[2], length_col * input_num_col)
def test_upsampling_2d_correctness(self):
input_shape = (2, 2, 1, 3)
x = np.arange(np.prod(input_shape)).reshape(input_shape)
self.assertAllClose(
layers.UpSampling2D(size=(1, 2))(x),
# fmt: off
np.array(
[[[[ 0., 1., 2.],
[ 0., 1., 2.]],
[[ 3., 4., 5.],
[ 3., 4., 5.]]],
[[[ 6., 7., 8.],
[ 6., 7., 8.]],
[[ 9., 10., 11.],
[ 9., 10., 11.]]]]
),
# fmt: on
)
def test_upsampling_2d_various_interpolation_methods(self):
input_shape = (2, 2, 1, 3)
x = np.arange(np.prod(input_shape)).reshape(input_shape)
for interpolation in ["nearest", "bilinear", "bicubic"]:
layers.UpSampling2D(size=(1, 2), interpolation=interpolation)(x)
@pytest.mark.skipif(
backend.backend() == "torch", reason="Torch does not support lanczos."
)
def test_upsampling_2d_lanczos_interpolation_methods(self):
input_shape = (2, 2, 1, 3)
x = np.arange(np.prod(input_shape)).reshape(input_shape)
for interpolation in ["lanczos3", "lanczos5"]:
layers.UpSampling2D(size=(1, 2), interpolation=interpolation)(x)
| keras-core/keras_core/layers/reshaping/up_sampling2d_test.py/0 | {
"file_path": "keras-core/keras_core/layers/reshaping/up_sampling2d_test.py",
"repo_id": "keras-core",
"token_count": 2511
} | 45 |
import numpy as np
import pytest
from keras_core import initializers
from keras_core import layers
from keras_core import testing
class ConvLSTM2DTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_basics(self):
self.run_layer_test(
layers.ConvLSTM2D,
init_kwargs={"filters": 5, "kernel_size": 3, "padding": "same"},
input_shape=(3, 2, 4, 4, 3),
expected_output_shape=(3, 4, 4, 5),
expected_num_trainable_weights=3,
expected_num_non_trainable_weights=0,
supports_masking=True,
)
self.run_layer_test(
layers.ConvLSTM2D,
init_kwargs={
"filters": 5,
"kernel_size": 3,
"padding": "valid",
"recurrent_dropout": 0.5,
},
input_shape=(3, 2, 8, 8, 3),
call_kwargs={"training": True},
expected_output_shape=(3, 6, 6, 5),
expected_num_trainable_weights=3,
expected_num_non_trainable_weights=0,
supports_masking=True,
)
self.run_layer_test(
layers.ConvLSTM2D,
init_kwargs={
"filters": 5,
"kernel_size": 3,
"padding": "valid",
"return_sequences": True,
},
input_shape=(3, 2, 8, 8, 3),
expected_output_shape=(3, 2, 6, 6, 5),
expected_num_trainable_weights=3,
expected_num_non_trainable_weights=0,
supports_masking=True,
)
def test_correctness(self):
sequence = (
np.arange(480).reshape((2, 3, 4, 4, 5)).astype("float32") / 100
)
layer = layers.ConvLSTM2D(
filters=2,
kernel_size=3,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[
[[0.48694518, 0.48694518], [0.50237733, 0.50237733]],
[[0.5461202, 0.5461202], [0.5598283, 0.5598283]],
],
[
[[0.8661607, 0.8661607], [0.86909103, 0.86909103]],
[[0.8774414, 0.8774414], [0.8800861, 0.8800861]],
],
]
),
output,
)
| keras-core/keras_core/layers/rnn/conv_lstm2d_test.py/0 | {
"file_path": "keras-core/keras_core/layers/rnn/conv_lstm2d_test.py",
"repo_id": "keras-core",
"token_count": 1510
} | 46 |
"""Wrapper layer to apply every temporal slice of an input."""
from keras_core import backend
from keras_core import ops
from keras_core.api_export import keras_core_export
from keras_core.layers.core.wrapper import Wrapper
from keras_core.layers.layer import Layer
@keras_core_export("keras_core.layers.TimeDistributed")
class TimeDistributed(Wrapper):
"""This wrapper allows to apply a layer to every temporal slice of an input.
Every input should be at least 3D, and the dimension of index one of the
first input will be considered to be the temporal dimension.
Consider a batch of 32 video samples, where each sample is a 128x128 RGB
image with `channels_last` data format, across 10 timesteps.
The batch input shape is `(32, 10, 128, 128, 3)`.
You can then use `TimeDistributed` to apply the same `Conv2D` layer to each
of the 10 timesteps, independently:
>>> inputs = layers.Input(shape=(10, 128, 128, 3), batch_size=32)
>>> conv_2d_layer = layers.Conv2D(64, (3, 3))
>>> outputs = layers.TimeDistributed(conv_2d_layer)(inputs)
>>> outputs.shape
(32, 10, 126, 126, 64)
Because `TimeDistributed` applies the same instance of `Conv2D` to each of
the timestamps, the same set of weights are used at each timestamp.
Args:
layer: a `keras_core.layers.Layer` instance.
Call arguments:
inputs: Input tensor of shape (batch, time, ...) or nested tensors,
and each of which has shape (batch, time, ...).
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. This argument is passed to the
wrapped layer (only if the layer supports this argument).
mask: Binary tensor of shape `(samples, timesteps)` indicating whether
a given timestep should be masked. This argument is passed to the
wrapped layer (only if the layer supports this argument).
"""
def __init__(self, layer, **kwargs):
if not isinstance(layer, Layer):
raise ValueError(
"Please initialize `TimeDistributed` layer with a "
f"`keras_core.layers.Layer` instance. Received: {layer}"
)
super().__init__(layer, **kwargs)
self.supports_masking = True
def _get_child_input_shape(self, input_shape):
if not isinstance(input_shape, (tuple, list)) or len(input_shape) < 3:
raise ValueError(
"`TimeDistributed` Layer should be passed an `input_shape` "
f"with at least 3 dimensions, received: {input_shape}"
)
return (input_shape[0], *input_shape[2:])
def compute_output_shape(self, input_shape):
child_input_shape = self._get_child_input_shape(input_shape)
child_output_shape = self.layer.compute_output_shape(child_input_shape)
return (child_output_shape[0], input_shape[1], *child_output_shape[1:])
def build(self, input_shape):
child_input_shape = self._get_child_input_shape(input_shape)
super().build(child_input_shape)
self.built = True
def call(self, inputs, training=None, mask=None):
input_shape = inputs.shape
mask_shape = None if mask is None else tuple(mask.shape)
batch_size = input_shape[0]
timesteps = input_shape[1]
if mask_shape is not None and mask_shape[:2] != (batch_size, timesteps):
raise ValueError(
"`TimeDistributed` Layer should be passed a `mask` of shape "
f"({batch_size}, {timesteps}, ...), "
f"received: mask.shape={mask_shape}"
)
def time_distributed_transpose(data):
"""Swaps the timestep and batch dimensions of a tensor."""
axes = [1, 0, *range(2, len(data.shape))]
return ops.transpose(data, axes=axes)
inputs = time_distributed_transpose(inputs)
if mask is not None:
mask = time_distributed_transpose(mask)
def step_function(i):
kwargs = {}
if self.layer._call_has_mask_arg and mask is not None:
kwargs["mask"] = mask[i]
if self.layer._call_has_training_arg:
kwargs["training"] = training
return self.layer.call(inputs[i], **kwargs)
# Implementation #1: is the time axis is static, use a Python for loop.
if inputs.shape[0] is not None:
outputs = ops.stack(
[step_function(i) for i in range(inputs.shape[0])]
)
return time_distributed_transpose(outputs)
# Implementation #2: use backend.vectorized_map.
outputs = backend.vectorized_map(step_function, ops.arange(timesteps))
return time_distributed_transpose(outputs)
| keras-core/keras_core/layers/rnn/time_distributed.py/0 | {
"file_path": "keras-core/keras_core/layers/rnn/time_distributed.py",
"repo_id": "keras-core",
"token_count": 1949
} | 47 |
import json
import threading
import tree
from absl import logging
from keras_core import backend
from keras_core import layers
from keras_core import losses
from keras_core import metrics as metrics_module
from keras_core import models
from keras_core import optimizers
from keras_core.legacy.saving import serialization
from keras_core.saving import object_registration
MODULE_OBJECTS = threading.local()
# Legacy lambda arguments not found in Keras Core
LAMBDA_DEP_ARGS = (
"module",
"function_type",
"output_shape_type",
"output_shape_module",
)
def model_from_config(config, custom_objects=None):
"""Instantiates a Keras model from its config.
Args:
config: Configuration dictionary.
custom_objects: Optional dictionary mapping names
(strings) to custom classes or functions to be
considered during deserialization.
Returns:
A Keras model instance (uncompiled).
Raises:
TypeError: if `config` is not a dictionary.
"""
if isinstance(config, list):
raise TypeError(
"`model_from_config` expects a dictionary, not a list. "
f"Received: config={config}. Did you meant to use "
"`Sequential.from_config(config)`?"
)
global MODULE_OBJECTS
if not hasattr(MODULE_OBJECTS, "ALL_OBJECTS"):
MODULE_OBJECTS.ALL_OBJECTS = layers.__dict__
MODULE_OBJECTS.ALL_OBJECTS["InputLayer"] = layers.InputLayer
MODULE_OBJECTS.ALL_OBJECTS["Functional"] = models.Functional
MODULE_OBJECTS.ALL_OBJECTS["Model"] = models.Model
MODULE_OBJECTS.ALL_OBJECTS["Sequential"] = models.Sequential
batch_input_shape = config["config"].pop("batch_input_shape", None)
if batch_input_shape is not None:
if config["class_name"] == "InputLayer":
config["config"]["batch_shape"] = batch_input_shape
else:
config["config"]["input_shape"] = batch_input_shape
axis = config["config"].pop("axis", None)
if axis is not None and isinstance(axis, list) and len(axis) == 1:
config["config"]["axis"] = int(axis[0])
# Handle backwards compatibility for Keras lambdas
if config["class_name"] == "Lambda":
for dep_arg in LAMBDA_DEP_ARGS:
_ = config["config"].pop(dep_arg, None)
function_config = config["config"]["function"]
if isinstance(function_config, list):
function_dict = {"class_name": "__lambda__", "config": {}}
function_dict["config"]["code"] = function_config[0]
function_dict["config"]["defaults"] = function_config[1]
function_dict["config"]["closure"] = function_config[2]
config["config"]["function"] = function_dict
# TODO(nkovela): Swap find and replace args during Keras 3.0 release
# Replace keras refs with keras_core
config = _find_replace_nested_dict(config, "keras.", "keras_core.")
return serialization.deserialize_keras_object(
config,
module_objects=MODULE_OBJECTS.ALL_OBJECTS,
custom_objects=custom_objects,
printable_module_name="layer",
)
def model_metadata(model, include_optimizer=True, require_config=True):
"""Returns a dictionary containing the model metadata."""
from keras_core import __version__ as keras_version
model_config = {"class_name": model.__class__.__name__}
try:
model_config["config"] = model.get_config()
except NotImplementedError as e:
if require_config:
raise e
metadata = dict(
keras_version=str(keras_version),
backend=backend.backend(),
model_config=model_config,
)
if getattr(model, "optimizer", False) and include_optimizer:
if model.compiled:
training_config = model._compile_config.config
training_config.pop("optimizer", None) # Handled separately.
metadata["training_config"] = _serialize_nested_config(
training_config
)
optimizer_config = {
"class_name": object_registration.get_registered_name(
model.optimizer.__class__
),
"config": model.optimizer.get_config(),
}
metadata["training_config"]["optimizer_config"] = optimizer_config
return metadata
def compile_args_from_training_config(training_config, custom_objects=None):
"""Return model.compile arguments from training config."""
if custom_objects is None:
custom_objects = {}
with object_registration.CustomObjectScope(custom_objects):
optimizer_config = training_config["optimizer_config"]
optimizer = optimizers.deserialize(optimizer_config)
# Ensure backwards compatibility for optimizers in legacy H5 files
optimizer = _resolve_compile_arguments_compat(
optimizer, optimizer_config, optimizers
)
# Recover losses.
loss = None
loss_config = training_config.get("loss", None)
if loss_config is not None:
loss = _deserialize_nested_config(losses.deserialize, loss_config)
# Ensure backwards compatibility for losses in legacy H5 files
loss = _resolve_compile_arguments_compat(loss, loss_config, losses)
# Recover metrics.
metrics = None
metrics_config = training_config.get("metrics", None)
if metrics_config is not None:
metrics = _deserialize_nested_config(
_deserialize_metric, metrics_config
)
# Ensure backwards compatibility for metrics in legacy H5 files
metrics = _resolve_compile_arguments_compat(
metrics, metrics_config, metrics_module
)
# Recover weighted metrics.
weighted_metrics = None
weighted_metrics_config = training_config.get("weighted_metrics", None)
if weighted_metrics_config is not None:
weighted_metrics = _deserialize_nested_config(
_deserialize_metric, weighted_metrics_config
)
loss_weights = training_config["loss_weights"]
return dict(
optimizer=optimizer,
loss=loss,
metrics=metrics,
weighted_metrics=weighted_metrics,
loss_weights=loss_weights,
)
def _serialize_nested_config(config):
"""Serialized a nested structure of Keras objects."""
def _serialize_fn(obj):
if callable(obj):
return serialization.serialize_keras_object(obj)
return obj
return tree.map_structure(_serialize_fn, config)
def _deserialize_nested_config(deserialize_fn, config):
"""Deserializes arbitrary Keras `config` using `deserialize_fn`."""
def _is_single_object(obj):
if isinstance(obj, dict) and "class_name" in obj:
return True # Serialized Keras object.
if isinstance(obj, str):
return True # Serialized function or string.
return False
if config is None:
return None
if _is_single_object(config):
return deserialize_fn(config)
elif isinstance(config, dict):
return {
k: _deserialize_nested_config(deserialize_fn, v)
for k, v in config.items()
}
elif isinstance(config, (tuple, list)):
return [
_deserialize_nested_config(deserialize_fn, obj) for obj in config
]
raise ValueError(
"Saved configuration not understood. Configuration should be a "
f"dictionary, string, tuple or list. Received: config={config}."
)
def _deserialize_metric(metric_config):
"""Deserialize metrics, leaving special strings untouched."""
if metric_config in ["accuracy", "acc", "crossentropy", "ce"]:
# Do not deserialize accuracy and cross-entropy strings as we have
# special case handling for these in compile, based on model output
# shape.
return metric_config
return metrics_module.deserialize(metric_config)
def _find_replace_nested_dict(config, find, replace):
dict_str = json.dumps(config)
dict_str = dict_str.replace(find, replace)
config = json.loads(dict_str)
return config
def _resolve_compile_arguments_compat(obj, obj_config, module):
"""Resolves backwards compatiblity issues with training config arguments.
This helper function accepts built-in Keras modules such as optimizers,
losses, and metrics to ensure an object being deserialized is compatible
with Keras Core built-ins. For legacy H5 files saved within Keras Core,
this does nothing.
"""
if isinstance(obj, str) and obj not in module.ALL_OBJECTS_DICT:
obj = module.get(obj_config["config"]["name"])
return obj
def try_build_compiled_arguments(model):
try:
if not model.compiled_loss.built:
model.compiled_loss.build(model.outputs)
if not model.compiled_metrics.built:
model.compiled_metrics.build(model.outputs, model.outputs)
except:
logging.warning(
"Compiled the loaded model, but the compiled metrics have "
"yet to be built. `model.compile_metrics` will be empty "
"until you train or evaluate the model."
)
| keras-core/keras_core/legacy/saving/saving_utils.py/0 | {
"file_path": "keras-core/keras_core/legacy/saving/saving_utils.py",
"repo_id": "keras-core",
"token_count": 3741
} | 48 |
from keras_core import backend
from keras_core import initializers
from keras_core import ops
from keras_core.api_export import keras_core_export
from keras_core.metrics.metric import Metric
from keras_core.metrics.metrics_utils import confusion_matrix
class _IoUBase(Metric):
"""Computes the confusion matrix for Intersection-Over-Union metrics.
Formula:
```python
iou = true_positives / (true_positives + false_positives + false_negatives)
```
Intersection-Over-Union is a common evaluation metric for semantic image
segmentation.
From IoUs of individual classes, the MeanIoU can be computed as the mean of
the individual IoUs.
To compute IoUs, the predictions are accumulated in a confusion matrix,
weighted by `sample_weight` and the metric is then calculated from it.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
num_classes: The possible number of labels the prediction task can have.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
ignore_class: Optional integer. The ID of a class to be ignored during
metric computation. This is useful, for example, in segmentation
problems featuring a "void" class (commonly -1 or 255) in
segmentation maps. By default (`ignore_class=None`), all classes are
considered.
sparse_y_true: Whether labels are encoded using integers or
dense floating point vectors. If `False`, the `argmax` function
is used to determine each sample's most likely associated label.
sparse_y_pred: Whether predictions are encoded using integers or
dense floating point vectors. If `False`, the `argmax` function
is used to determine each sample's most likely associated label.
axis: (Optional) -1 is the dimension containing the logits.
Defaults to `-1`.
"""
def __init__(
self,
num_classes,
name=None,
dtype=None,
ignore_class=None,
sparse_y_true=True,
sparse_y_pred=True,
axis=-1,
):
# defaulting to float32 to avoid issues with confusion matrix
super().__init__(name=name, dtype=dtype or "float32")
self.num_classes = num_classes
self.ignore_class = ignore_class
self.sparse_y_true = sparse_y_true
self.sparse_y_pred = sparse_y_pred
self.axis = axis
self.total_cm = self.add_variable(
name="total_confusion_matrix",
shape=(num_classes, num_classes),
initializer=initializers.Zeros(),
)
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates the confusion matrix statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Can
be a `Tensor` whose rank is either 0, or the same as `y_true`,
and must be broadcastable to `y_true`. Defaults to `1`.
Returns:
Update op.
"""
if not self.sparse_y_true:
y_true = ops.argmax(y_true, axis=self.axis)
if not self.sparse_y_pred:
y_pred = ops.argmax(y_pred, axis=self.axis)
y_true = ops.convert_to_tensor(y_true, dtype=self.dtype)
y_pred = ops.convert_to_tensor(y_pred, dtype=self.dtype)
# Flatten the input if its rank > 1.
if len(y_pred.shape) > 1:
y_pred = ops.reshape(y_pred, [-1])
if len(y_true.shape) > 1:
y_true = ops.reshape(y_true, [-1])
if sample_weight is None:
sample_weight = 1
sample_weight = ops.convert_to_tensor(sample_weight, dtype=self.dtype)
if len(sample_weight.shape) > 1:
sample_weight = ops.reshape(sample_weight, [-1])
sample_weight = ops.broadcast_to(sample_weight, ops.shape(y_true))
if self.ignore_class is not None:
ignore_class = ops.convert_to_tensor(
self.ignore_class, y_true.dtype
)
valid_mask = ops.not_equal(y_true, ignore_class)
y_true = y_true[valid_mask]
y_pred = y_pred[valid_mask]
if sample_weight is not None:
sample_weight = sample_weight[valid_mask]
y_pred = ops.cast(y_pred, dtype=self.dtype)
y_true = ops.cast(y_true, dtype=self.dtype)
sample_weight = ops.cast(sample_weight, dtype=self.dtype)
current_cm = confusion_matrix(
y_true,
y_pred,
self.num_classes,
weights=sample_weight,
dtype="float32",
)
return self.total_cm.assign(self.total_cm + current_cm)
def reset_state(self):
self.total_cm.assign(
ops.zeros(self.total_cm.shape, dtype=self.total_cm.dtype)
)
@keras_core_export("keras_core.metrics.IoU")
class IoU(_IoUBase):
"""Computes the Intersection-Over-Union metric for specific target classes.
Formula:
```python
iou = true_positives / (true_positives + false_positives + false_negatives)
```
Intersection-Over-Union is a common evaluation metric for semantic image
segmentation.
To compute IoUs, the predictions are accumulated in a confusion matrix,
weighted by `sample_weight` and the metric is then calculated from it.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Note, this class first computes IoUs for all individual classes, then
returns the mean of IoUs for the classes that are specified by
`target_class_ids`. If `target_class_ids` has only one id value, the IoU of
that specific class is returned.
Args:
num_classes: The possible number of labels the prediction task can have.
target_class_ids: A tuple or list of target class ids for which the
metric is returned. To compute IoU for a specific class, a list
(or tuple) of a single id value should be provided.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
ignore_class: Optional integer. The ID of a class to be ignored during
metric computation. This is useful, for example, in segmentation
problems featuring a "void" class (commonly -1 or 255) in
segmentation maps. By default (`ignore_class=None`), all classes are
considered.
sparse_y_true: Whether labels are encoded using integers or
dense floating point vectors. If `False`, the `argmax` function
is used to determine each sample's most likely associated label.
sparse_y_pred: Whether predictions are encoded using integers or
dense floating point vectors. If `False`, the `argmax` function
is used to determine each sample's most likely associated label.
axis: (Optional) -1 is the dimension containing the logits.
Defaults to `-1`.
Examples:
Standalone usage:
>>> # cm = [[1, 1],
>>> # [1, 1]]
>>> # sum_row = [2, 2], sum_col = [2, 2], true_positives = [1, 1]
>>> # iou = true_positives / (sum_row + sum_col - true_positives))
>>> # iou = [0.33, 0.33]
>>> m = keras_core.metrics.IoU(num_classes=2, target_class_ids=[0])
>>> m.update_state([0, 0, 1, 1], [0, 1, 0, 1])
>>> m.result()
0.33333334
>>> m.reset_state()
>>> m.update_state([0, 0, 1, 1], [0, 1, 0, 1],
... sample_weight=[0.3, 0.3, 0.3, 0.1])
>>> # cm = [[0.3, 0.3],
>>> # [0.3, 0.1]]
>>> # sum_row = [0.6, 0.4], sum_col = [0.6, 0.4],
>>> # true_positives = [0.3, 0.1]
>>> # iou = [0.33, 0.14]
>>> m.result()
0.33333334
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[keras_core.metrics.IoU(num_classes=2, target_class_ids=[0])])
```
"""
def __init__(
self,
num_classes,
target_class_ids,
name=None,
dtype=None,
ignore_class=None,
sparse_y_true=True,
sparse_y_pred=True,
axis=-1,
):
super().__init__(
name=name,
num_classes=num_classes,
ignore_class=ignore_class,
sparse_y_true=sparse_y_true,
sparse_y_pred=sparse_y_pred,
axis=axis,
dtype=dtype,
)
if max(target_class_ids) >= num_classes:
raise ValueError(
f"Target class id {max(target_class_ids)} "
"is out of range, which is "
f"[{0}, {num_classes})."
)
self.target_class_ids = list(target_class_ids)
def result(self):
"""Compute the intersection-over-union via the confusion matrix."""
sum_over_row = ops.cast(
ops.sum(self.total_cm, axis=0), dtype=self.dtype
)
sum_over_col = ops.cast(
ops.sum(self.total_cm, axis=1), dtype=self.dtype
)
true_positives = ops.cast(ops.diag(self.total_cm), dtype=self.dtype)
# sum_over_row + sum_over_col =
# 2 * true_positives + false_positives + false_negatives.
denominator = sum_over_row + sum_over_col - true_positives
target_class_ids = ops.convert_to_tensor(
self.target_class_ids, dtype="int32"
)
# Only keep the target classes
true_positives = ops.take_along_axis(
true_positives, target_class_ids, axis=-1
)
denominator = ops.take_along_axis(
denominator, target_class_ids, axis=-1
)
# If the denominator is 0, we need to ignore the class.
num_valid_entries = ops.sum(
ops.cast(ops.greater(denominator, 1e-9), dtype=self.dtype)
)
iou = ops.divide(true_positives, denominator + backend.epsilon())
return ops.divide(
ops.sum(iou, axis=self.axis), num_valid_entries + backend.epsilon()
)
def get_config(self):
config = {
"num_classes": self.num_classes,
"target_class_ids": self.target_class_ids,
"ignore_class": self.ignore_class,
"sparse_y_true": self.sparse_y_true,
"sparse_y_pred": self.sparse_y_pred,
"axis": self.axis,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_core_export("keras_core.metrics.BinaryIoU")
class BinaryIoU(IoU):
"""Computes the Intersection-Over-Union metric for class 0 and/or 1.
Formula:
```python
iou = true_positives / (true_positives + false_positives + false_negatives)
```
Intersection-Over-Union is a common evaluation metric for semantic image
segmentation.
To compute IoUs, the predictions are accumulated in a confusion matrix,
weighted by `sample_weight` and the metric is then calculated from it.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
This class can be used to compute IoUs for a binary classification task
where the predictions are provided as logits. First a `threshold` is applied
to the predicted values such that those that are below the `threshold` are
converted to class 0 and those that are above the `threshold` are converted
to class 1.
IoUs for classes 0 and 1 are then computed, the mean of IoUs for the classes
that are specified by `target_class_ids` is returned.
Note: with `threshold=0`, this metric has the same behavior as `IoU`.
Args:
target_class_ids: A tuple or list of target class ids for which the
metric is returned. Options are `[0]`, `[1]`, or `[0, 1]`. With
`[0]` (or `[1]`), the IoU metric for class 0 (or class 1,
respectively) is returned. With `[0, 1]`, the mean of IoUs for the
two classes is returned.
threshold: A threshold that applies to the prediction logits to convert
them to either predicted class 0 if the logit is below `threshold`
or predicted class 1 if the logit is above `threshold`.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Examples:
Standalone usage:
>>> m = keras_core.metrics.BinaryIoU(target_class_ids=[0, 1], threshold=0.3)
>>> m.update_state([0, 1, 0, 1], [0.1, 0.2, 0.4, 0.7])
>>> m.result()
0.33333334
>>> m.reset_state()
>>> m.update_state([0, 1, 0, 1], [0.1, 0.2, 0.4, 0.7],
... sample_weight=[0.2, 0.3, 0.4, 0.1])
>>> # cm = [[0.2, 0.4],
>>> # [0.3, 0.1]]
>>> # sum_row = [0.6, 0.4], sum_col = [0.5, 0.5],
>>> # true_positives = [0.2, 0.1]
>>> # iou = [0.222, 0.125]
>>> m.result()
0.17361112
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[keras_core.metrics.BinaryIoU(
target_class_ids=[0],
threshold=0.5
)]
)
```
"""
def __init__(
self,
target_class_ids=(0, 1),
threshold=0.5,
name=None,
dtype=None,
):
super().__init__(
num_classes=2,
target_class_ids=target_class_ids,
name=name,
dtype=dtype,
)
self.threshold = threshold
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates the confusion matrix statistics.
Before the confusion matrix is updated, the predicted values are
thresholded to be:
0 for values that are smaller than the `threshold`
1 for values that are larger or equal to the `threshold`
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Can
be a `Tensor` whose rank is either 0, or the same as `y_true`,
and must be broadcastable to `y_true`. Defaults to `1`.
Returns:
Update op.
"""
y_true = ops.convert_to_tensor(y_true, dtype=self.dtype)
y_pred = ops.convert_to_tensor(y_pred, dtype=self.dtype)
y_pred = ops.cast(y_pred >= self.threshold, self.dtype)
return super().update_state(y_true, y_pred, sample_weight)
def get_config(self):
return {
"target_class_ids": self.target_class_ids,
"threshold": self.threshold,
"name": self.name,
"dtype": self._dtype,
}
@keras_core_export("keras_core.metrics.MeanIoU")
class MeanIoU(IoU):
"""Computes the mean Intersection-Over-Union metric.
Formula:
```python
iou = true_positives / (true_positives + false_positives + false_negatives)
```
Intersection-Over-Union is a common evaluation metric for semantic image
segmentation.
To compute IoUs, the predictions are accumulated in a confusion matrix,
weighted by `sample_weight` and the metric is then calculated from it.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Note that this class first computes IoUs for all individual classes, then
returns the mean of these values.
Args:
num_classes: The possible number of labels the prediction task can have.
This value must be provided, since a confusion matrix of dimension =
[num_classes, num_classes] will be allocated.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
ignore_class: Optional integer. The ID of a class to be ignored during
metric computation. This is useful, for example, in segmentation
problems featuring a "void" class (commonly -1 or 255) in
segmentation maps. By default (`ignore_class=None`), all classes are
considered.
sparse_y_true: Whether labels are encoded using integers or
dense floating point vectors. If `False`, the `argmax` function
is used to determine each sample's most likely associated label.
sparse_y_pred: Whether predictions are encoded using integers or
dense floating point vectors. If `False`, the `argmax` function
is used to determine each sample's most likely associated label.
axis: (Optional) The dimension containing the logits. Defaults to `-1`.
Examples:
Standalone usage:
>>> # cm = [[1, 1],
>>> # [1, 1]]
>>> # sum_row = [2, 2], sum_col = [2, 2], true_positives = [1, 1]
>>> # iou = true_positives / (sum_row + sum_col - true_positives))
>>> # result = (1 / (2 + 2 - 1) + 1 / (2 + 2 - 1)) / 2 = 0.33
>>> m = keras_core.metrics.MeanIoU(num_classes=2)
>>> m.update_state([0, 0, 1, 1], [0, 1, 0, 1])
>>> m.result()
0.33333334
>>> m.reset_state()
>>> m.update_state([0, 0, 1, 1], [0, 1, 0, 1],
... sample_weight=[0.3, 0.3, 0.3, 0.1])
>>> m.result().numpy()
0.23809525
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[keras_core.metrics.MeanIoU(num_classes=2)])
```
"""
def __init__(
self,
num_classes,
name=None,
dtype=None,
ignore_class=None,
sparse_y_true=True,
sparse_y_pred=True,
axis=-1,
):
target_class_ids = list(range(num_classes))
super().__init__(
name=name,
num_classes=num_classes,
target_class_ids=target_class_ids,
axis=axis,
dtype=dtype,
ignore_class=ignore_class,
sparse_y_true=sparse_y_true,
sparse_y_pred=sparse_y_pred,
)
def get_config(self):
return {
"num_classes": self.num_classes,
"name": self.name,
"dtype": self._dtype,
"ignore_class": self.ignore_class,
"sparse_y_true": self.sparse_y_true,
"sparse_y_pred": self.sparse_y_pred,
"axis": self.axis,
}
@keras_core_export("keras_core.metrics.OneHotIoU")
class OneHotIoU(IoU):
"""Computes the Intersection-Over-Union metric for one-hot encoded labels.
Formula:
```python
iou = true_positives / (true_positives + false_positives + false_negatives)
```
Intersection-Over-Union is a common evaluation metric for semantic image
segmentation.
To compute IoUs, the predictions are accumulated in a confusion matrix,
weighted by `sample_weight` and the metric is then calculated from it.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
This class can be used to compute IoU for multi-class classification tasks
where the labels are one-hot encoded (the last axis should have one
dimension per class). Note that the predictions should also have the same
shape. To compute the IoU, first the labels and predictions are converted
back into integer format by taking the argmax over the class axis. Then the
same computation steps as for the base `IoU` class apply.
Note, if there is only one channel in the labels and predictions, this class
is the same as class `IoU`. In this case, use `IoU` instead.
Also, make sure that `num_classes` is equal to the number of classes in the
data, to avoid a "labels out of bound" error when the confusion matrix is
computed.
Args:
num_classes: The possible number of labels the prediction task can have.
target_class_ids: A tuple or list of target class ids for which the
metric is returned. To compute IoU for a specific class, a list
(or tuple) of a single id value should be provided.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
ignore_class: Optional integer. The ID of a class to be ignored during
metric computation. This is useful, for example, in segmentation
problems featuring a "void" class (commonly -1 or 255) in
segmentation maps. By default (`ignore_class=None`), all classes are
considered.
sparse_y_pred: Whether predictions are encoded using integers or
dense floating point vectors. If `False`, the `argmax` function
is used to determine each sample's most likely associated label.
axis: (Optional) The dimension containing the logits. Defaults to `-1`.
Examples:
Standalone usage:
>>> y_true = np.array([[0, 0, 1], [1, 0, 0], [0, 1, 0], [1, 0, 0]])
>>> y_pred = np.array([[0.2, 0.3, 0.5], [0.1, 0.2, 0.7], [0.5, 0.3, 0.1],
... [0.1, 0.4, 0.5]])
>>> sample_weight = [0.1, 0.2, 0.3, 0.4]
>>> m = keras_core.metrics.OneHotIoU(num_classes=3, target_class_ids=[0, 2])
>>> m.update_state(
... y_true=y_true, y_pred=y_pred, sample_weight=sample_weight)
>>> # cm = [[0, 0, 0.2+0.4],
>>> # [0.3, 0, 0],
>>> # [0, 0, 0.1]]
>>> # sum_row = [0.3, 0, 0.7], sum_col = [0.6, 0.3, 0.1]
>>> # true_positives = [0, 0, 0.1]
>>> # single_iou = true_positives / (sum_row + sum_col - true_positives))
>>> # mean_iou = (0 / (0.3 + 0.6 - 0) + 0.1 / (0.7 + 0.1 - 0.1)) / 2
>>> m.result()
0.071
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[keras_core.metrics.OneHotIoU(
num_classes=3,
target_class_id=[1]
)]
)
```
"""
def __init__(
self,
num_classes,
target_class_ids,
name=None,
dtype=None,
ignore_class=None,
sparse_y_pred=False,
axis=-1,
):
super().__init__(
num_classes=num_classes,
target_class_ids=target_class_ids,
name=name,
dtype=dtype,
ignore_class=ignore_class,
sparse_y_true=False,
sparse_y_pred=sparse_y_pred,
axis=axis,
)
def get_config(self):
return {
"num_classes": self.num_classes,
"target_class_ids": self.target_class_ids,
"name": self.name,
"dtype": self._dtype,
"ignore_class": self.ignore_class,
"sparse_y_pred": self.sparse_y_pred,
"axis": self.axis,
}
@keras_core_export("keras_core.metrics.OneHotMeanIoU")
class OneHotMeanIoU(MeanIoU):
"""Computes mean Intersection-Over-Union metric for one-hot encoded labels.
Formula:
```python
iou = true_positives / (true_positives + false_positives + false_negatives)
```
Intersection-Over-Union is a common evaluation metric for semantic image
segmentation.
To compute IoUs, the predictions are accumulated in a confusion matrix,
weighted by `sample_weight` and the metric is then calculated from it.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
This class can be used to compute the mean IoU for multi-class
classification tasks where the labels are one-hot encoded (the last axis
should have one dimension per class). Note that the predictions should also
have the same shape. To compute the mean IoU, first the labels and
predictions are converted back into integer format by taking the argmax over
the class axis. Then the same computation steps as for the base `MeanIoU`
class apply.
Note, if there is only one channel in the labels and predictions, this class
is the same as class `MeanIoU`. In this case, use `MeanIoU` instead.
Also, make sure that `num_classes` is equal to the number of classes in the
data, to avoid a "labels out of bound" error when the confusion matrix is
computed.
Args:
num_classes: The possible number of labels the prediction task can have.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
ignore_class: Optional integer. The ID of a class to be ignored during
metric computation. This is useful, for example, in segmentation
problems featuring a "void" class (commonly -1 or 255) in
segmentation maps. By default (`ignore_class=None`), all classes are
considered.
sparse_y_pred: Whether predictions are encoded using natural numbers or
probability distribution vectors. If `False`, the `argmax`
function will be used to determine each sample's most likely
associated label.
axis: (Optional) The dimension containing the logits. Defaults to `-1`.
Examples:
Standalone usage:
>>> y_true = np.array([[0, 0, 1], [1, 0, 0], [0, 1, 0], [1, 0, 0]])
>>> y_pred = np.array([[0.2, 0.3, 0.5], [0.1, 0.2, 0.7], [0.5, 0.3, 0.1],
... [0.1, 0.4, 0.5]])
>>> sample_weight = [0.1, 0.2, 0.3, 0.4]
>>> m = keras_core.metrics.OneHotMeanIoU(num_classes=3)
>>> m.update_state(
... y_true=y_true, y_pred=y_pred, sample_weight=sample_weight)
>>> # cm = [[0, 0, 0.2+0.4],
>>> # [0.3, 0, 0],
>>> # [0, 0, 0.1]]
>>> # sum_row = [0.3, 0, 0.7], sum_col = [0.6, 0.3, 0.1]
>>> # true_positives = [0, 0, 0.1]
>>> # single_iou = true_positives / (sum_row + sum_col - true_positives))
>>> # mean_iou = (0 + 0 + 0.1 / (0.7 + 0.1 - 0.1)) / 3
>>> m.result()
0.048
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[keras_core.metrics.OneHotMeanIoU(num_classes=3)])
```
"""
def __init__(
self,
num_classes,
name=None,
dtype=None,
ignore_class=None,
sparse_y_pred=False,
axis=-1,
):
super().__init__(
num_classes=num_classes,
axis=axis,
name=name,
dtype=dtype,
ignore_class=ignore_class,
sparse_y_true=False,
sparse_y_pred=sparse_y_pred,
)
def get_config(self):
return {
"num_classes": self.num_classes,
"name": self.name,
"dtype": self._dtype,
"ignore_class": self.ignore_class,
"sparse_y_pred": self.sparse_y_pred,
"axis": self.axis,
}
| keras-core/keras_core/metrics/iou_metrics.py/0 | {
"file_path": "keras-core/keras_core/metrics/iou_metrics.py",
"repo_id": "keras-core",
"token_count": 11729
} | 49 |
import copy
import inspect
import warnings
import tree
from keras_core import backend
from keras_core import ops
from keras_core.backend.common import global_state
from keras_core.layers.input_spec import InputSpec
from keras_core.layers.layer import Layer
from keras_core.legacy.saving import saving_utils
from keras_core.legacy.saving import serialization as legacy_serialization
from keras_core.models.model import Model
from keras_core.ops.function import Function
from keras_core.ops.function import make_node_key
from keras_core.saving import serialization_lib
from keras_core.utils import tracking
class Functional(Function, Model):
"""A `Functional` model is a `Model` defined as a directed graph of layers.
Three types of `Model` exist: subclassed `Model`, `Functional` model,
and `Sequential` (a special case of `Functional`).
A `Functional` model can be instantiated by passing two arguments to
`__init__()`. The first argument is the `keras_core.Input` objects
that represent the inputs to the model.
The second argument specifies the output tensors that represent
the outputs of this model. Both arguments can be a nested structure
of tensors.
Example:
```
inputs = {'x1': keras_core.Input(shape=(10,), name='x1'),
'x2': keras_core.Input(shape=(1,), name='x2')}
t = keras_core.layers.Dense(1, activation='relu')(inputs['x1'])
outputs = keras_core.layers.Add()([t, inputs['x2']])
model = keras_core.Model(inputs, outputs)
```
A `Functional` model constructed using the Functional API can also
include raw Keras Core ops.
Example:
```python
inputs = keras_core.Input(shape=(10,))
x = keras_core.layers.Dense(1)(inputs)
outputs = ops.nn.relu(x)
model = keras_core.Model(inputs, outputs)
```
A new `Functional` model can also be created by using the
intermediate tensors. This enables you to quickly extract sub-components
of the model.
Example:
```python
inputs = keras_core.Input(shape=(None, None, 3))
processed = keras_core.layers.RandomCrop(width=32, height=32)(inputs)
conv = keras_core.layers.Conv2D(filters=2, kernel_size=3)(processed)
pooling = keras_core.layers.GlobalAveragePooling2D()(conv)
feature = keras_core.layers.Dense(10)(pooling)
full_model = keras_core.Model(inputs, feature)
backbone = keras_core.Model(processed, conv)
activations = keras_core.Model(conv, feature)
```
Note that the `backbone` and `activations` models are not
created with `keras_core.Input` objects, but with the tensors
that are originated from `keras_core.Input` objects.
Under the hood, the layers and weights will
be shared across these models, so that user can train the `full_model`, and
use `backbone` or `activations` to do feature extraction.
The inputs and outputs of the model can be nested structures of tensors as
well, and the created models are standard `Functional` model that support
all the existing API.
Args:
inputs: List of input tensors (must be created via `keras_core.Input()`
or originated from `keras_core.Input()`).
outputs: List of output tensors.
name: String, optional. Name of the model.
trainable: Boolean, optional. If the model's variables should be
trainable.
"""
@tracking.no_automatic_dependency_tracking
def __init__(self, inputs, outputs, name=None, **kwargs):
if isinstance(inputs, dict):
for k, v in inputs.items():
if not isinstance(v, backend.KerasTensor):
raise ValueError(
"When providing `inputs` as a dict, all values in the "
f"dict must be KerasTensors. Received: inputs={inputs} "
f"including invalid value {v} of type {type(v)}"
)
if k != v.name:
warnings.warn(
"When providing `inputs` as a dict, all keys in the "
"dict must match the names of the corresponding "
f"tensors. Received key '{k}' mapping to value {v} "
f"which has name '{v.name}'. Change the tensor name to "
f"'{k}' (via `Input(..., name='{k}')`)"
)
elif isinstance(inputs, (list, tuple)):
for x in inputs:
if not isinstance(x, backend.KerasTensor):
raise ValueError(
"When providing `inputs` as a list/tuple, all values "
f"in the list/tuple must be KerasTensors. Received: "
f"inputs={inputs} including invalid value {x} of type "
f"{type(x)}"
)
elif not isinstance(inputs, backend.KerasTensor):
raise ValueError(
f"Unrecognized type for `inputs`: {inputs} "
f"(of type {type(inputs)})"
)
if isinstance(outputs, dict):
for k, v in outputs.items():
if not isinstance(v, backend.KerasTensor):
raise ValueError(
"When providing `outputs` as a dict, all values in the "
f"dict must be KerasTensors. Received: "
f"outputs={outputs} including invalid value {v} of "
f"type {type(v)}"
)
elif isinstance(outputs, (list, tuple)):
for x in outputs:
if not isinstance(x, backend.KerasTensor):
raise ValueError(
"When providing `outputs` as a list/tuple, all values "
f"in the list/tuple must be KerasTensors. Received: "
f"outputs={outputs} including invalid value {x} of "
f"type {type(x)}"
)
elif not isinstance(outputs, backend.KerasTensor):
raise ValueError(
f"Unrecognized type for `outputs`: {outputs} "
f"(of type {type(outputs)})"
)
trainable = kwargs.pop("trainable", None)
Function.__init__(self, inputs, outputs, name=name, **kwargs)
if trainable is not None:
self.trainable = trainable
self._layers = self.layers
self.build(None)
# We will convert directly (to the correct dtype per input).
self._convert_input_args = False
self._allow_non_tensor_positional_args = True
output_layers = [x._keras_history[0] for x in self.outputs]
self.output_names = [x.name for x in output_layers]
def _lock_state(self):
# Unlike other layers, we allow Functional state to be mutable after
# build. E.g. to attach a layer to a model that is not part of the
# functional DAG.
pass
@property
def layers(self):
layers = []
for operation in self._operations:
if isinstance(operation, Layer):
layers.append(operation)
return layers
def call(self, inputs, training=None, mask=None):
# Add support for traning, masking
inputs = self._standardize_inputs(inputs)
if mask is None:
masks = [None] * len(inputs)
else:
masks = self._flatten_to_reference_inputs(mask)
for x, mask in zip(inputs, masks):
if mask is not None:
x._keras_mask = mask
outputs = self._run_through_graph(
inputs, operation_fn=lambda op: operation_fn(op, training=training)
)
return unpack_singleton(outputs)
def compute_output_spec(self, inputs, training=None, mask=None):
# From Function
return super().compute_output_spec(inputs)
def build(self, input_shape):
self.built = True
@property
def input_shape(self):
input_shapes = tree.map_structure(lambda x: x.shape, self.inputs)
if isinstance(input_shapes, list) and len(input_shapes) == 1:
return input_shapes[0]
return input_shapes
@property
def output_shape(self):
output_shapes = tree.map_structure(lambda x: x.shape, self.outputs)
if isinstance(output_shapes, list) and len(output_shapes) == 1:
return output_shapes[0]
return output_shapes
def _assert_input_compatibility(self, *args):
return super(Model, self)._assert_input_compatibility(*args)
def _flatten_to_reference_inputs(self, inputs, allow_extra_keys=True):
if isinstance(inputs, dict):
ref_inputs = self._inputs_struct
if not tree.is_nested(ref_inputs):
ref_inputs = [self._inputs_struct]
if isinstance(ref_inputs, dict):
# In the case that the graph is constructed with dict input
# tensors, We will use the original dict key to map with the
# keys in the input data. Note that the model.inputs is using
# tree.flatten to process the input tensors, which means the
# dict input tensors are ordered by their keys.
ref_input_names = sorted(ref_inputs.keys())
else:
ref_input_names = [
inp._keras_history.operation.name for inp in ref_inputs
]
# Raise an warning if there are more input data comparing to input
# tensor
if not allow_extra_keys and len(inputs) > len(ref_input_names):
warnings.warn(
"Input dict contained keys {} which did not match any "
"model input. They will be ignored by the model.".format(
[n for n in inputs.keys() if n not in ref_input_names]
),
stacklevel=2,
)
# Flatten in the order `Input`s were passed during Model
# construction.
return [inputs[n] for n in ref_input_names]
# Otherwise both ref inputs and inputs will already be in same order.
return tree.flatten(inputs)
def _convert_inputs_to_tensors(self, flat_inputs):
flat_dtypes = [x.dtype for x in self._inputs]
converted = []
for x, dtype in zip(flat_inputs, flat_dtypes):
if backend.is_tensor(x):
converted.append(backend.cast(x, dtype=dtype))
else:
converted.append(backend.convert_to_tensor(x, dtype=dtype))
return converted
def _adjust_input_rank(self, flat_inputs):
flat_ref_shapes = [x.shape for x in self._inputs]
adjusted = []
for x, ref_shape in zip(flat_inputs, flat_ref_shapes):
x_rank = len(x.shape)
ref_rank = len(ref_shape)
if x_rank == ref_rank:
adjusted.append(x)
continue
if x_rank == ref_rank + 1:
if x.shape[-1] == 1:
adjusted.append(ops.squeeze(x, axis=-1))
continue
if x_rank == ref_rank - 1:
if ref_shape[-1] == 1:
adjusted.append(ops.expand_dims(x, axis=-1))
continue
raise ValueError(
f"Invalid input shape for input {x}. Expected shape "
f"{ref_shape}, but input has incompatible shape {x.shape}"
)
# Add back metadata.
for i in range(len(flat_inputs)):
if hasattr(flat_inputs[i], "_keras_history"):
adjusted[i]._keras_history = flat_inputs[i]._keras_history
if hasattr(flat_inputs[i], "_keras_mask"):
adjusted[i]._keras_mask = flat_inputs[i]._keras_mask
return adjusted
def _standardize_inputs(self, inputs):
flat_inputs = self._flatten_to_reference_inputs(inputs)
flat_inputs = self._convert_inputs_to_tensors(flat_inputs)
return self._adjust_input_rank(flat_inputs)
@property
def input(self):
# For backwards compatibility,
# override `input` to retrieve the used-provided
# constructor inputs
return self._inputs_struct
@property
def output(self):
return self._outputs_struct
def add_loss(self, loss):
# Symbolic only. TODO
raise NotImplementedError
@property
def input_spec(self):
if hasattr(self, "_manual_input_spec"):
return self._manual_input_spec
def shape_with_no_batch_size(x):
x = list(x)
if x:
x[0] = None
return tuple(x)
if isinstance(self._inputs_struct, dict):
# Case where `_nested_inputs` is a plain dict of Inputs.
names = sorted(self._inputs_struct.keys())
return [
InputSpec(
shape=shape_with_no_batch_size(
self._inputs_struct[name].shape
),
allow_last_axis_squeeze=True,
name=name,
)
for name in names
]
else:
# Single input, or list/tuple of inputs.
# The data may be passed as a dict keyed by input name.
return [
InputSpec(
shape=shape_with_no_batch_size(x.shape),
allow_last_axis_squeeze=True,
name=x._keras_history[0].name,
)
for x in self._inputs
]
@input_spec.setter
def input_spec(self, value):
self._manual_input_spec = value
def get_config(self):
if not functional_like_constructor(self.__class__):
# Subclassed networks are not serializable
# (unless serialization is implemented by
# the author of the subclassed network).
return Model.get_config(self)
config = {
"name": self.name,
"trainable": self.trainable,
}
# Build a map from a layer unique name (make_node_key)
# to the index of the nodes that are saved in the config.
# Only nodes in network_nodes are saved.
node_reindexing_map = {}
for operation in self.operations:
if issubclass(operation.__class__, Functional):
# Functional models start with a pre-existing node
# linking their input to output.
kept_nodes = 1
else:
kept_nodes = 0
for original_node_index, node in enumerate(
operation._inbound_nodes
):
node_key = make_node_key(operation, original_node_index)
if node_key in self._nodes:
# i.e. we mark it to be saved
node_reindexing_map[node_key] = kept_nodes
kept_nodes += 1
# serialize and save the layers in layer_configs
layer_configs = []
for operation in self.operations: # From the earliest layers on.
filtered_inbound_nodes = []
for original_node_index, node in enumerate(
operation._inbound_nodes
):
node_key = make_node_key(operation, original_node_index)
if node_key in self._nodes:
# The node is relevant to the model:
# add to filtered_inbound_nodes.
node_data = serialize_node(node, node_reindexing_map)
if node_data is not None:
filtered_inbound_nodes.append(node_data)
serialize_obj_fn = serialization_lib.serialize_keras_object
if global_state.get_global_attribute("use_legacy_config", False):
# Legacy format serialization used for H5 and SavedModel
serialize_obj_fn = legacy_serialization.serialize_keras_object
layer_config = serialize_obj_fn(operation)
layer_config["name"] = operation.name
layer_config["inbound_nodes"] = filtered_inbound_nodes
layer_configs.append(layer_config)
config["layers"] = layer_configs
# Gather info about inputs and outputs.
def get_tensor_config(tensor):
operation = tensor._keras_history[0]
node_index = tensor._keras_history[1]
tensor_index = tensor._keras_history[2]
node_key = make_node_key(operation, node_index)
assert node_key in self._nodes
new_node_index = node_reindexing_map[node_key]
return [operation.name, new_node_index, tensor_index]
def map_tensors(tensors):
if isinstance(tensors, dict):
return {k: get_tensor_config(v) for k, v in tensors.items()}
if isinstance(tensors, (list, tuple)):
return [get_tensor_config(v) for v in tensors]
else:
return [get_tensor_config(tensors)]
config["input_layers"] = map_tensors(self._inputs_struct)
config["output_layers"] = map_tensors(self._outputs_struct)
return copy.deepcopy(config)
def functional_from_config(cls, config, custom_objects=None):
"""Instantiates a Functional model from its config (from `get_config()`).
Args:
cls: Class of the model, e.g. a custom subclass of `Model`.
config: Output of `get_config()` for the original model instance.
custom_objects: Optional dict of custom objects.
Returns:
An instance of `cls`.
"""
# Layer instances created during
# the graph reconstruction process
created_layers = {}
# Dictionary mapping layer instances to
# node data that specifies a layer call.
# It acts as a queue that maintains any unprocessed
# layer call until it becomes possible to process it
# (i.e. until the input tensors to the call all exist).
unprocessed_nodes = {}
def add_unprocessed_node(layer, node_data):
"""Add node to layer list
Arg:
layer: layer object
node_data: Node data specifying layer call
"""
if layer not in unprocessed_nodes:
unprocessed_nodes[layer] = [node_data]
else:
unprocessed_nodes[layer].append(node_data)
def process_node(layer, node_data):
"""Reconstruct node by linking to inbound layers
Args:
layer: Layer to process
node_data: List of layer configs
"""
args, kwargs = deserialize_node(node_data, created_layers)
# Call layer on its inputs, thus creating the node
# and building the layer if needed.
layer(*args, **kwargs)
def process_layer(layer_data):
"""Deserializes a layer, then call it on appropriate inputs.
Args:
layer_data: layer config dict.
"""
layer_name = layer_data["name"]
# Instantiate layer.
if "module" not in layer_data:
# Legacy format deserialization (no "module" key)
# used for H5 and SavedModel formats
layer = saving_utils.model_from_config(
layer_data, custom_objects=custom_objects
)
else:
layer = serialization_lib.deserialize_keras_object(
layer_data, custom_objects=custom_objects
)
created_layers[layer_name] = layer
# Gather layer inputs.
inbound_nodes_data = layer_data["inbound_nodes"]
for node_data in inbound_nodes_data:
# We don't process nodes (i.e. make layer calls)
# on the fly because the inbound node may not yet exist,
# in case of layer shared at different topological depths
# (e.g. a model such as A(B(A(B(x)))))
add_unprocessed_node(layer, node_data)
# First, we create all layers and enqueue nodes to be processed
for layer_data in config["layers"]:
process_layer(layer_data)
# Then we process nodes in order of layer depth.
# Nodes that cannot yet be processed (if the inbound node
# does not yet exist) are re-enqueued, and the process
# is repeated until all nodes are processed.
while unprocessed_nodes:
for layer_data in config["layers"]:
layer = created_layers[layer_data["name"]]
# Process all nodes in layer, if not yet processed
if layer in unprocessed_nodes:
node_data_list = unprocessed_nodes[layer]
# Process nodes in order
node_index = 0
while node_index < len(node_data_list):
node_data = node_data_list[node_index]
try:
process_node(layer, node_data)
# If the node does not have all inbound layers
# available, stop processing and continue later
except IndexError:
break
node_index += 1
# If not all nodes processed then store unprocessed nodes
if node_index < len(node_data_list):
unprocessed_nodes[layer] = node_data_list[node_index:]
# If all nodes processed remove the layer
else:
del unprocessed_nodes[layer]
# Create lits of input and output tensors and return new class
name = config.get("name")
trainable = config.get("trainable")
def get_tensor(layer_name, node_index, tensor_index):
assert layer_name in created_layers
layer = created_layers[layer_name]
layer_output_tensors = layer._inbound_nodes[node_index].output_tensors
return layer_output_tensors[tensor_index]
def map_tensors(tensors):
if isinstance(tensors, dict):
return {k: get_tensor(*v) for k, v in tensors.items()}
else:
return [get_tensor(*v) for v in tensors]
input_tensors = map_tensors(config["input_layers"])
output_tensors = map_tensors(config["output_layers"])
return cls(
inputs=input_tensors,
outputs=output_tensors,
name=name,
trainable=trainable,
)
def operation_fn(operation, training):
def call(*args, **kwargs):
if (
hasattr(operation, "_call_has_training_arg")
and operation._call_has_training_arg
and training is not None
):
kwargs["training"] = training
return operation(*args, **kwargs)
return call
def functional_like_constructor(cls):
init_args = inspect.getfullargspec(cls.__init__).args[1:]
functional_init_args = inspect.getfullargspec(Functional.__init__).args[1:]
if init_args == functional_init_args:
return True
return False
def unpack_singleton(x):
if isinstance(x, (list, tuple)) and len(x) == 1:
return x[0]
return x
def serialize_node(node, node_reindexing_map):
if not node.input_tensors:
# Does not need to be serialized.
return
args = node.arguments.args
kwargs = node.arguments.kwargs
return {
"args": serialization_lib.serialize_keras_object(args),
"kwargs": serialization_lib.serialize_keras_object(kwargs),
}
def deserialize_node(node_data, created_layers):
"""Return (args, kwargs) for calling the node layer."""
if not node_data:
return [], {}
if isinstance(node_data, list):
# Legacy case.
input_tensors = []
for input_data in node_data:
inbound_layer_name = input_data[0]
inbound_node_index = input_data[1]
inbound_tensor_index = input_data[2]
if len(input_data) == 3:
kwargs = {}
elif len(input_data) == 4:
kwargs = input_data[3]
else:
raise ValueError(
"Cannot deserialize the model (invalid config data?)"
)
inbound_layer = created_layers[inbound_layer_name]
# Raise an error if the corresponding layer node
# has not yet been created
if len(inbound_layer._inbound_nodes) <= inbound_node_index:
raise IndexError(
"Layer node index out of bounds.\n"
f"inbound_layer = {inbound_layer}\n"
"inbound_layer._inbound_nodes = "
f"{inbound_layer._inbound_nodes}\n"
f"inbound_node_index = {inbound_node_index}"
)
inbound_node = inbound_layer._inbound_nodes[inbound_node_index]
input_tensors.append(
inbound_node.output_tensors[inbound_tensor_index]
)
return [unpack_singleton(input_tensors)], kwargs
args = serialization_lib.deserialize_keras_object(node_data["args"])
kwargs = serialization_lib.deserialize_keras_object(node_data["kwargs"])
def convert_revived_tensor(x):
if isinstance(x, backend.KerasTensor):
history = x._pre_serialization_keras_history
if history is None:
return x
layer = created_layers.get(history[0], None)
if layer is None:
raise ValueError(f"Unknown layer: {history[0]}")
inbound_node_index = history[1]
inbound_tensor_index = history[2]
if len(layer._inbound_nodes) <= inbound_node_index:
raise ValueError(
"Layer node index out of bounds.\n"
f"inbound_layer = {layer}\n"
f"inbound_layer._inbound_nodes = {layer._inbound_nodes}\n"
f"inbound_node_index = {inbound_node_index}"
)
inbound_node = layer._inbound_nodes[inbound_node_index]
return inbound_node.output_tensors[inbound_tensor_index]
return x
args = tree.map_structure(convert_revived_tensor, args)
kwargs = tree.map_structure(convert_revived_tensor, kwargs)
return args, kwargs
| keras-core/keras_core/models/functional.py/0 | {
"file_path": "keras-core/keras_core/models/functional.py",
"repo_id": "keras-core",
"token_count": 12279
} | 50 |
import math
import numpy as np
import pytest
import scipy.signal
from absl.testing import parameterized
from keras_core import backend
from keras_core import testing
from keras_core.backend.common.keras_tensor import KerasTensor
from keras_core.ops import math as kmath
def _stft(
x, sequence_length, sequence_stride, fft_length, window="hann", center=True
):
# pure numpy version of stft that matches librosa's implementation
x = np.array(x)
ori_dtype = x.dtype
if center:
pad_width = [(0, 0) for _ in range(len(x.shape))]
pad_width[-1] = (fft_length // 2, fft_length // 2)
x = np.pad(x, pad_width, mode="reflect")
l_pad = (fft_length - sequence_length) // 2
r_pad = fft_length - sequence_length - l_pad
if window is not None:
if isinstance(window, str):
window = scipy.signal.get_window(window, sequence_length)
win = np.array(window, dtype=x.dtype)
win = np.pad(win, [[l_pad, r_pad]])
else:
win = np.ones((sequence_length + l_pad + r_pad), dtype=x.dtype)
x = scipy.signal.stft(
x,
fs=1.0,
window=win,
nperseg=(sequence_length + l_pad + r_pad),
noverlap=(sequence_length + l_pad + r_pad - sequence_stride),
nfft=fft_length,
boundary=None,
padded=False,
)[-1]
# scale and swap to (..., num_sequences, fft_bins)
x = x / np.sqrt(1.0 / win.sum() ** 2)
x = np.swapaxes(x, -2, -1)
return np.real(x).astype(ori_dtype), np.imag(x).astype(ori_dtype)
def _istft(
x,
sequence_length,
sequence_stride,
fft_length,
length=None,
window="hann",
center=True,
):
# pure numpy version of istft that matches librosa's implementation
complex_input = x[0] + 1j * x[1]
x = np.fft.irfft(
complex_input, n=fft_length, axis=-1, norm="backward"
).astype(x[0].dtype)
expected_output_len = fft_length + sequence_stride * (x.shape[-2] - 1)
if window is not None:
if isinstance(window, str):
win = np.array(
scipy.signal.get_window(window, sequence_length), dtype=x.dtype
)
else:
win = np.array(window, dtype=x.dtype)
l_pad = (fft_length - sequence_length) // 2
r_pad = fft_length - sequence_length - l_pad
win = np.pad(win, [[l_pad, r_pad]])
# square and sum
_sequence_length = sequence_length + l_pad + r_pad
denom = np.square(win)
overlaps = -(-_sequence_length // sequence_stride)
denom = np.pad(
denom, [(0, overlaps * sequence_stride - _sequence_length)]
)
denom = np.reshape(denom, [overlaps, sequence_stride])
denom = np.sum(denom, 0, keepdims=True)
denom = np.tile(denom, [overlaps, 1])
denom = np.reshape(denom, [overlaps * sequence_stride])
win = np.divide(win, denom[:_sequence_length])
x = np.multiply(x, win)
# overlap_sequences
def _overlap_sequences(x, sequence_stride):
*batch_shape, num_sequences, sequence_length = x.shape
flat_batchsize = math.prod(batch_shape)
x = np.reshape(x, (flat_batchsize, num_sequences, sequence_length))
output_size = sequence_stride * (num_sequences - 1) + sequence_length
nstep_per_segment = 1 + (sequence_length - 1) // sequence_stride
padded_segment_len = nstep_per_segment * sequence_stride
x = np.pad(
x, ((0, 0), (0, 0), (0, padded_segment_len - sequence_length))
)
x = np.reshape(
x,
(flat_batchsize, num_sequences, nstep_per_segment, sequence_stride),
)
x = x.transpose((0, 2, 1, 3))
x = np.pad(x, ((0, 0), (0, 0), (0, num_sequences), (0, 0)))
shrinked = x.shape[2] - 1
x = np.reshape(x, (flat_batchsize, -1))
x = x[:, : (nstep_per_segment * shrinked * sequence_stride)]
x = np.reshape(
x, (flat_batchsize, nstep_per_segment, shrinked * sequence_stride)
)
x = np.sum(x, axis=1)[:, :output_size]
return np.reshape(x, tuple(batch_shape) + (-1,))
x = _overlap_sequences(x, sequence_stride)
if backend.backend() in {"numpy", "jax"}:
x = np.nan_to_num(x)
start = 0 if center is False else fft_length // 2
if length is not None:
end = start + length
elif center:
end = -(fft_length // 2)
else:
end = expected_output_len
return x[..., start:end]
class MathOpsDynamicShapeTest(testing.TestCase, parameterized.TestCase):
def test_segment_sum(self):
data = KerasTensor((None, 4), dtype="float32")
segment_ids = KerasTensor((10,), dtype="int32")
outputs = kmath.segment_sum(data, segment_ids)
self.assertEqual(outputs.shape, (None, 4))
data = KerasTensor((None, 4), dtype="float32")
segment_ids = KerasTensor((10,), dtype="int32")
outputs = kmath.segment_sum(data, segment_ids, num_segments=5)
self.assertEqual(outputs.shape, (5, 4))
def test_segment_max(self):
data = KerasTensor((None, 4), dtype="float32")
segment_ids = KerasTensor((10,), dtype="int32")
outputs = kmath.segment_max(data, segment_ids)
self.assertEqual(outputs.shape, (None, 4))
data = KerasTensor((None, 4), dtype="float32")
segment_ids = KerasTensor((10,), dtype="int32")
outputs = kmath.segment_max(data, segment_ids, num_segments=5)
self.assertEqual(outputs.shape, (5, 4))
def test_top_k(self):
x = KerasTensor((None, 2, 3))
values, indices = kmath.top_k(x, k=1)
self.assertEqual(values.shape, (None, 2, 1))
self.assertEqual(indices.shape, (None, 2, 1))
def test_in_top_k(self):
targets = KerasTensor((None,))
predictions = KerasTensor((None, 10))
self.assertEqual(
kmath.in_top_k(targets, predictions, k=1).shape, (None,)
)
def test_logsumexp(self):
x = KerasTensor((None, 2, 3), dtype="float32")
result = kmath.logsumexp(x)
self.assertEqual(result.shape, ())
def test_qr(self):
x = KerasTensor((None, 4, 3), dtype="float32")
q, r = kmath.qr(x, mode="reduced")
qref, rref = np.linalg.qr(np.ones((2, 4, 3)), mode="reduced")
qref_shape = (None,) + qref.shape[1:]
rref_shape = (None,) + rref.shape[1:]
self.assertEqual(q.shape, qref_shape)
self.assertEqual(r.shape, rref_shape)
q, r = kmath.qr(x, mode="complete")
qref, rref = np.linalg.qr(np.ones((2, 4, 3)), mode="complete")
qref_shape = (None,) + qref.shape[1:]
rref_shape = (None,) + rref.shape[1:]
self.assertEqual(q.shape, qref_shape)
self.assertEqual(r.shape, rref_shape)
def test_extract_sequences(self):
# Defined dimension
x = KerasTensor((None, 32), dtype="float32")
sequence_length = 3
sequence_stride = 2
outputs = kmath.extract_sequences(x, sequence_length, sequence_stride)
num_sequences = 1 + (x.shape[-1] - sequence_length) // sequence_stride
self.assertEqual(outputs.shape, (None, num_sequences, sequence_length))
# Undefined dimension
x = KerasTensor((None, None), dtype="float32")
sequence_length = 3
sequence_stride = 2
outputs = kmath.extract_sequences(x, sequence_length, sequence_stride)
self.assertEqual(outputs.shape, (None, None, sequence_length))
def test_fft(self):
real = KerasTensor((None, 4, 3), dtype="float32")
imag = KerasTensor((None, 4, 3), dtype="float32")
real_output, imag_output = kmath.fft((real, imag))
ref = np.fft.fft(np.ones((2, 4, 3)))
ref_shape = (None,) + ref.shape[1:]
self.assertEqual(real_output.shape, ref_shape)
self.assertEqual(imag_output.shape, ref_shape)
def test_fft2(self):
real = KerasTensor((None, 4, 3), dtype="float32")
imag = KerasTensor((None, 4, 3), dtype="float32")
real_output, imag_output = kmath.fft2((real, imag))
ref = np.fft.fft2(np.ones((2, 4, 3)))
ref_shape = (None,) + ref.shape[1:]
self.assertEqual(real_output.shape, ref_shape)
self.assertEqual(imag_output.shape, ref_shape)
@parameterized.parameters([(None,), (1,), (5,)])
def test_rfft(self, fft_length):
x = KerasTensor((None, 4, 3), dtype="float32")
real_output, imag_output = kmath.rfft(x, fft_length=fft_length)
ref = np.fft.rfft(np.ones((2, 4, 3)), n=fft_length)
ref_shape = (None,) + ref.shape[1:]
self.assertEqual(real_output.shape, ref_shape)
self.assertEqual(imag_output.shape, ref_shape)
@parameterized.parameters([(None,), (1,), (5,)])
def test_irfft(self, fft_length):
real = KerasTensor((None, 4, 3), dtype="float32")
imag = KerasTensor((None, 4, 3), dtype="float32")
output = kmath.irfft((real, imag), fft_length=fft_length)
ref = np.fft.irfft(np.ones((2, 4, 3)), n=fft_length)
ref_shape = (None,) + ref.shape[1:]
self.assertEqual(output.shape, ref_shape)
def test_stft(self):
x = KerasTensor((None, 32), dtype="float32")
sequence_length = 10
sequence_stride = 3
fft_length = 15
real_output, imag_output = kmath.stft(
x, sequence_length, sequence_stride, fft_length
)
real_ref, imag_ref = _stft(
np.ones((2, 32)), sequence_length, sequence_stride, fft_length
)
real_ref_shape = (None,) + real_ref.shape[1:]
imag_ref_shape = (None,) + imag_ref.shape[1:]
self.assertEqual(real_output.shape, real_ref_shape)
self.assertEqual(imag_output.shape, imag_ref_shape)
def test_istft(self):
sequence_length = 10
sequence_stride = 3
fft_length = 15
real = KerasTensor((None, 32), dtype="float32")
imag = KerasTensor((None, 32), dtype="float32")
output = kmath.istft(
(real, imag), sequence_length, sequence_stride, fft_length
)
ref = _istft(
(np.ones((5, 32)), np.ones((5, 32))),
sequence_length,
sequence_stride,
fft_length,
)
ref_shape = (None,) + ref.shape[1:]
self.assertEqual(output.shape, ref_shape)
def test_rsqrt(self):
x = KerasTensor([None, 3])
self.assertEqual(kmath.rsqrt(x).shape, (None, 3))
class MathOpsStaticShapeTest(testing.TestCase):
@pytest.mark.skipif(
backend.backend() == "jax",
reason="JAX does not support `num_segments=None`.",
)
def test_segment_sum(self):
data = KerasTensor((10, 4), dtype="float32")
segment_ids = KerasTensor((10,), dtype="int32")
outputs = kmath.segment_sum(data, segment_ids)
self.assertEqual(outputs.shape, (None, 4))
def test_segment_sum_explicit_num_segments(self):
data = KerasTensor((10, 4), dtype="float32")
segment_ids = KerasTensor((10,), dtype="int32")
outputs = kmath.segment_sum(data, segment_ids, num_segments=5)
self.assertEqual(outputs.shape, (5, 4))
@pytest.mark.skipif(
backend.backend() == "jax",
reason="JAX does not support `num_segments=None`.",
)
def test_segment_max(self):
data = KerasTensor((10, 4), dtype="float32")
segment_ids = KerasTensor((10,), dtype="int32")
outputs = kmath.segment_max(data, segment_ids)
self.assertEqual(outputs.shape, (None, 4))
def test_segment_max_explicit_num_segments(self):
data = KerasTensor((10, 4), dtype="float32")
segment_ids = KerasTensor((10,), dtype="int32")
outputs = kmath.segment_max(data, segment_ids, num_segments=5)
self.assertEqual(outputs.shape, (5, 4))
def test_topk(self):
x = KerasTensor((1, 2, 3))
values, indices = kmath.top_k(x, k=1)
self.assertEqual(values.shape, (1, 2, 1))
self.assertEqual(indices.shape, (1, 2, 1))
def test_in_top_k(self):
targets = KerasTensor((5,))
predictions = KerasTensor((5, 10))
self.assertEqual(kmath.in_top_k(targets, predictions, k=1).shape, (5,))
def test_logsumexp(self):
x = KerasTensor((1, 2, 3), dtype="float32")
result = kmath.logsumexp(x)
self.assertEqual(result.shape, ())
def test_qr(self):
x = KerasTensor((4, 3), dtype="float32")
q, r = kmath.qr(x, mode="reduced")
qref, rref = np.linalg.qr(np.ones((4, 3)), mode="reduced")
self.assertEqual(q.shape, qref.shape)
self.assertEqual(r.shape, rref.shape)
q, r = kmath.qr(x, mode="complete")
qref, rref = np.linalg.qr(np.ones((4, 3)), mode="complete")
self.assertEqual(q.shape, qref.shape)
self.assertEqual(r.shape, rref.shape)
def test_extract_sequences(self):
x = KerasTensor((10, 16), dtype="float32")
sequence_length = 3
sequence_stride = 2
outputs = kmath.extract_sequences(x, sequence_length, sequence_stride)
num_sequences = 1 + (x.shape[-1] - sequence_length) // sequence_stride
self.assertEqual(outputs.shape, (10, num_sequences, sequence_length))
def test_fft(self):
real = KerasTensor((2, 4, 3), dtype="float32")
imag = KerasTensor((2, 4, 3), dtype="float32")
real_output, imag_output = kmath.fft((real, imag))
ref = np.fft.fft(np.ones((2, 4, 3)))
self.assertEqual(real_output.shape, ref.shape)
self.assertEqual(imag_output.shape, ref.shape)
def test_fft2(self):
real = KerasTensor((2, 4, 3), dtype="float32")
imag = KerasTensor((2, 4, 3), dtype="float32")
real_output, imag_output = kmath.fft2((real, imag))
ref = np.fft.fft2(np.ones((2, 4, 3)))
self.assertEqual(real_output.shape, ref.shape)
self.assertEqual(imag_output.shape, ref.shape)
def test_rfft(self):
x = KerasTensor((2, 4, 3), dtype="float32")
real_output, imag_output = kmath.rfft(x)
ref = np.fft.rfft(np.ones((2, 4, 3)))
self.assertEqual(real_output.shape, ref.shape)
self.assertEqual(imag_output.shape, ref.shape)
def test_irfft(self):
real = KerasTensor((2, 4, 3), dtype="float32")
imag = KerasTensor((2, 4, 3), dtype="float32")
output = kmath.irfft((real, imag))
ref = np.fft.irfft(np.ones((2, 4, 3)))
self.assertEqual(output.shape, ref.shape)
def test_rsqrt(self):
x = KerasTensor([4, 3], dtype="float32")
self.assertEqual(kmath.rsqrt(x).shape, (4, 3))
def test_stft(self):
x = KerasTensor((2, 32), dtype="float32")
sequence_length = 10
sequence_stride = 3
fft_length = 15
real_output, imag_output = kmath.stft(
x, sequence_length, sequence_stride, fft_length
)
real_ref, imag_ref = _stft(
np.ones((2, 32)), sequence_length, sequence_stride, fft_length
)
self.assertEqual(real_output.shape, real_ref.shape)
self.assertEqual(imag_output.shape, imag_ref.shape)
def test_istft(self):
# sequence_stride must <= x[0].shape[-1]
# sequence_stride must >= fft_length / num_sequences
sequence_length = 10
sequence_stride = 3
fft_length = 15
num_sequences = fft_length // sequence_stride + 1
real = KerasTensor((num_sequences, 32), dtype="float32")
imag = KerasTensor((num_sequences, 32), dtype="float32")
output = kmath.istft(
(real, imag), sequence_length, sequence_stride, fft_length
)
ref = _istft(
(np.ones((num_sequences, 32)), np.ones((num_sequences, 32))),
sequence_length,
sequence_stride,
fft_length,
)
self.assertEqual(output.shape, ref.shape)
class MathOpsCorrectnessTest(testing.TestCase, parameterized.TestCase):
@pytest.mark.skipif(
backend.backend() == "jax",
reason="JAX does not support `num_segments=None`.",
)
def test_segment_sum(self):
# Test 1D case.
data = np.array([1, 2, 3, 4, 5, 6, 7, 8], dtype=np.float32)
segment_ids = np.array([0, 0, 1, 1, 1, 2, 2, 2], dtype=np.int32)
outputs = kmath.segment_sum(data, segment_ids)
# Segment 0: 1 + 2 = 3
# Segment 1: 3 + 4 + 5 = 12
# Segment 2: 6 + 7 + 8 = 21
expected = np.array([3, 12, 21], dtype=np.float32)
self.assertAllClose(outputs, expected)
# Test N-D case.
data = np.random.rand(9, 3, 3)
segment_ids = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2], dtype=np.int32)
outputs = kmath.segment_sum(data, segment_ids)
expected = np.zeros((3, 3, 3))
for i in range(data.shape[0]):
segment_id = segment_ids[i]
expected[segment_id] += data[i]
self.assertAllClose(outputs, expected)
def test_segment_sum_explicit_num_segments(self):
# Test 1D case.
data = np.array([1, 2, 3, 4, 5, 6, 7, 8], dtype=np.float32)
segment_ids = np.array([0, 0, 1, 1, 1, 2, 2, 2], dtype=np.int32)
outputs = kmath.segment_sum(data, segment_ids, num_segments=4)
expected = np.array([3, 12, 21, 0], dtype=np.float32)
self.assertAllClose(outputs, expected)
# Test 1D with -1 case.
data = np.array([1, 2, 3, 4, 5, 6, 7, 8], dtype=np.float32)
segment_ids = np.array([0, 0, 1, 1, -1, 2, 2, -1], dtype=np.int32)
outputs = kmath.segment_sum(data, segment_ids, num_segments=4)
# Segment ID 0: First two elements (1 + 2) = 3
# Segment ID 1: Next two elements (3 + 4) = 7
# Segment ID -1: Ignore the next two elements, because segment ID is -1.
# Segment ID 2: Next two elements (6 + 7) = 13
# Segment ID 3: No elements, so output is 0.
expected = np.array([3, 7, 13, 0], dtype=np.float32)
self.assertAllClose(outputs, expected)
# Test N-D case.
data = np.random.rand(9, 3, 3)
segment_ids = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2], dtype=np.int32)
outputs = kmath.segment_sum(data, segment_ids, num_segments=4)
expected = np.zeros((4, 3, 3))
for i in range(data.shape[0]):
segment_id = segment_ids[i]
if segment_id != -1:
expected[segment_id] += data[i]
self.assertAllClose(outputs, expected)
@pytest.mark.skipif(
backend.backend() == "jax",
reason="JAX does not support `num_segments=None`.",
)
def test_segment_max(self):
# Test 1D case.
data = np.array([1, 2, 3, 4, 5, 6, 7, 8], dtype=np.float32)
segment_ids = np.array([0, 0, 1, 1, 1, 2, 2, 2], dtype=np.int32)
outputs = kmath.segment_max(data, segment_ids)
# Segment ID 0: Max of the first two elements = 2
# Segment ID 1: Max of the next three elements = 5
# Segment ID 2: Max of the next three elements = 8
expected = np.array([2, 5, 8], dtype=np.float32)
self.assertAllClose(outputs, expected)
# Test N-D case.
data = np.random.rand(9, 3, 3)
segment_ids = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2], dtype=np.int32)
outputs = kmath.segment_max(data, segment_ids)
expected = np.zeros((3, 3, 3))
for i in range(data.shape[0]):
segment_id = segment_ids[i]
expected[segment_id] = np.maximum(expected[segment_id], data[i])
self.assertAllClose(outputs, expected)
def test_segment_max_explicit_num_segments(self):
# Test 1D case.
data = np.array([1, 2, 3, 4, 5, 6, 7, 8], dtype=np.float32)
segment_ids = np.array([0, 0, 1, 1, 1, 2, 2, 2], dtype=np.int32)
outputs = kmath.segment_max(data, segment_ids, num_segments=3)
# Segment ID 0: Max of the first two elements = 2
# Segment ID 1: Max of the next three elements = 5
# Segment ID 2: Max of the next three elements = 8
expected = np.array([2, 5, 8], dtype=np.float32)
self.assertAllClose(outputs, expected)
# Test 1D with -1 case.
data = np.array([1, 2, 3, 4, 5, 6, 7, 8], dtype=np.float32)
segment_ids = np.array([0, 0, 1, 1, -1, 2, 2, -1], dtype=np.int32)
outputs = kmath.segment_max(data, segment_ids, num_segments=3)
expected = np.array([2, 4, 7], dtype=np.float32)
self.assertAllClose(outputs, expected)
# Test N-D case.
data = np.random.rand(9, 3, 3)
segment_ids = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2], dtype=np.int32)
outputs = kmath.segment_max(data, segment_ids, num_segments=3)
expected = np.full((3, 3, 3), -np.inf)
for i in range(data.shape[0]):
segment_id = segment_ids[i]
expected[segment_id] = np.maximum(expected[segment_id], data[i])
self.assertAllClose(outputs, expected)
def test_top_k(self):
x = np.array([0, 4, 2, 1, 3, -1], dtype=np.float32)
values, indices = kmath.top_k(x, k=2)
self.assertAllClose(values, [4, 3])
self.assertAllClose(indices, [1, 4])
x = np.array([0, 4, 2, 1, 3, -1], dtype=np.float32)
values, indices = kmath.top_k(x, k=2, sorted=False)
# Any order ok when `sorted=False`.
self.assertEqual(set(backend.convert_to_numpy(values)), set([4, 3]))
self.assertEqual(set(backend.convert_to_numpy(indices)), set([1, 4]))
x = np.random.rand(5, 5)
outputs = kmath.top_k(x, k=2)
expected_values = np.zeros((5, 2))
expected_indices = np.zeros((5, 2), dtype=np.int32)
for i in range(x.shape[0]):
top_k_indices = np.argsort(x[i])[-2:][::-1]
expected_values[i] = x[i, top_k_indices]
expected_indices[i] = top_k_indices
self.assertAllClose(outputs[0], expected_values)
self.assertAllClose(outputs[1], expected_indices)
def test_in_top_k(self):
targets = np.array([1, 0, 2])
predictions = np.array(
[
[0.1, 0.9, 0.8, 0.8],
[0.05, 0.95, 0, 1],
[0.1, 0.8, 0.3, 1],
]
)
self.assertAllEqual(
kmath.in_top_k(targets, predictions, k=1), [True, False, False]
)
self.assertAllEqual(
kmath.in_top_k(targets, predictions, k=2), [True, False, False]
)
self.assertAllEqual(
kmath.in_top_k(targets, predictions, k=3), [True, True, True]
)
# Test tie cases.
targets = np.array([1, 0, 2])
predictions = np.array(
[
[0.1, 0.9, 0.8, 0.8],
[0.95, 0.95, 0, 0.95],
[0.1, 0.8, 0.8, 0.95],
]
)
self.assertAllEqual(
kmath.in_top_k(targets, predictions, k=1), [True, True, False]
)
self.assertAllEqual(
kmath.in_top_k(targets, predictions, k=2), [True, True, True]
)
self.assertAllEqual(
kmath.in_top_k(targets, predictions, k=3), [True, True, True]
)
def test_logsumexp(self):
x = np.random.rand(5, 5)
outputs = kmath.logsumexp(x)
expected = np.log(np.sum(np.exp(x)))
self.assertAllClose(outputs, expected)
outputs = kmath.logsumexp(x, axis=1)
expected = np.log(np.sum(np.exp(x), axis=1))
self.assertAllClose(outputs, expected)
def test_qr(self):
x = np.random.random((4, 5))
q, r = kmath.qr(x, mode="reduced")
qref, rref = np.linalg.qr(x, mode="reduced")
self.assertAllClose(qref, q)
self.assertAllClose(rref, r)
q, r = kmath.qr(x, mode="complete")
qref, rref = np.linalg.qr(x, mode="complete")
self.assertAllClose(qref, q)
self.assertAllClose(rref, r)
def test_extract_sequences(self):
# Test 1D case.
x = np.random.random((10,))
sequence_length = 3
sequence_stride = 2
output = kmath.extract_sequences(x, sequence_length, sequence_stride)
num_sequences = 1 + (x.shape[-1] - sequence_length) // sequence_stride
expected = np.zeros(shape=(num_sequences, sequence_length))
pos = 0
for i in range(num_sequences):
expected[i] = x[pos : pos + sequence_length]
pos += sequence_stride
self.assertAllClose(output, expected)
# Test N-D case.
x = np.random.random((4, 8))
sequence_length = 3
sequence_stride = 2
output = kmath.extract_sequences(x, sequence_length, sequence_stride)
num_sequences = 1 + (x.shape[-1] - sequence_length) // sequence_stride
expected = np.zeros(shape=(4, num_sequences, sequence_length))
pos = 0
for i in range(num_sequences):
expected[:, i] = x[:, pos : pos + sequence_length]
pos += sequence_stride
self.assertAllClose(output, expected)
def test_fft(self):
real = np.random.random((2, 4, 3))
imag = np.random.random((2, 4, 3))
complex_arr = real + 1j * imag
real_output, imag_output = kmath.fft((real, imag))
ref = np.fft.fft(complex_arr)
real_ref = np.real(ref)
imag_ref = np.imag(ref)
self.assertAllClose(real_ref, real_output)
self.assertAllClose(imag_ref, imag_output)
def test_fft2(self):
real = np.random.random((2, 4, 3))
imag = np.random.random((2, 4, 3))
complex_arr = real + 1j * imag
real_output, imag_output = kmath.fft2((real, imag))
ref = np.fft.fft2(complex_arr)
real_ref = np.real(ref)
imag_ref = np.imag(ref)
self.assertAllClose(real_ref, real_output)
self.assertAllClose(imag_ref, imag_output)
@parameterized.parameters([(None,), (3,), (15,)])
def test_rfft(self, n):
# Test 1D.
x = np.random.random((10,))
real_output, imag_output = kmath.rfft(x, fft_length=n)
ref = np.fft.rfft(x, n=n)
real_ref = np.real(ref)
imag_ref = np.imag(ref)
self.assertAllClose(real_ref, real_output, atol=1e-5, rtol=1e-5)
self.assertAllClose(imag_ref, imag_output, atol=1e-5, rtol=1e-5)
# Test N-D case.
x = np.random.random((2, 3, 10))
real_output, imag_output = kmath.rfft(x, fft_length=n)
ref = np.fft.rfft(x, n=n)
real_ref = np.real(ref)
imag_ref = np.imag(ref)
self.assertAllClose(real_ref, real_output, atol=1e-5, rtol=1e-5)
self.assertAllClose(imag_ref, imag_output, atol=1e-5, rtol=1e-5)
@parameterized.parameters([(None,), (3,), (15,)])
def test_irfft(self, n):
# Test 1D.
real = np.random.random((10,))
imag = np.random.random((10,))
complex_arr = real + 1j * imag
output = kmath.irfft((real, imag), fft_length=n)
ref = np.fft.irfft(complex_arr, n=n)
self.assertAllClose(output, ref, atol=1e-5, rtol=1e-5)
# Test N-D case.
real = np.random.random((2, 3, 10))
imag = np.random.random((2, 3, 10))
complex_arr = real + 1j * imag
output = kmath.irfft((real, imag), fft_length=n)
ref = np.fft.irfft(complex_arr, n=n)
self.assertAllClose(output, ref, atol=1e-5, rtol=1e-5)
@parameterized.parameters(
[
(32, 8, 32, "hann", True),
(8, 8, 16, "hann", True),
(4, 4, 7, "hann", True),
(32, 8, 32, "hamming", True),
(32, 8, 32, "hann", False),
(32, 8, 32, np.ones((32,)), True),
(32, 8, 32, None, True),
]
)
def test_stft(
self, sequence_length, sequence_stride, fft_length, window, center
):
# Test 1D case.
x = np.random.random((32,))
real_output, imag_output = kmath.stft(
x, sequence_length, sequence_stride, fft_length, window, center
)
real_ref, imag_ref = _stft(
x, sequence_length, sequence_stride, fft_length, window, center
)
self.assertAllClose(real_ref, real_output, atol=1e-5, rtol=1e-5)
self.assertAllClose(imag_ref, imag_output, atol=1e-5, rtol=1e-5)
# Test N-D case.
x = np.random.random((2, 3, 32))
real_output, imag_output = kmath.stft(
x, sequence_length, sequence_stride, fft_length, window, center
)
real_ref, imag_ref = _stft(
x, sequence_length, sequence_stride, fft_length, window, center
)
self.assertAllClose(real_ref, real_output, atol=1e-5, rtol=1e-5)
self.assertAllClose(imag_ref, imag_output, atol=1e-5, rtol=1e-5)
@parameterized.parameters(
[
(32, 8, 32, "hann", True),
(8, 8, 16, "hann", True),
(4, 4, 7, "hann", True),
(32, 8, 32, "hamming", True),
(8, 4, 8, "hann", False),
(32, 8, 32, np.ones((32,)), True),
(32, 8, 32, None, True),
]
)
def test_istft(
self, sequence_length, sequence_stride, fft_length, window, center
):
# sequence_stride must <= x[0].shape[-1]
# sequence_stride must >= fft_length / num_sequences
# Test 1D case.
x = np.random.random((256,))
real_x, imag_x = _stft(
x, sequence_length, sequence_stride, fft_length, window, center
)
output = kmath.istft(
(real_x, imag_x),
sequence_length,
sequence_stride,
fft_length,
window=window,
center=center,
)
ref = _istft(
(real_x, imag_x),
sequence_length,
sequence_stride,
fft_length,
window=window,
center=center,
)
if backend.backend() in ("numpy", "jax", "torch"):
# these backends have different implementation for the boundary of
# the output, so we need to truncate 5% befroe assertAllClose
truncated_len = int(output.shape[-1] * 0.05)
output = output[..., truncated_len:-truncated_len]
ref = ref[..., truncated_len:-truncated_len]
self.assertAllClose(output, ref, atol=1e-5, rtol=1e-5)
# Test N-D case.
x = np.random.random((2, 3, 256))
real_x, imag_x = _stft(
x, sequence_length, sequence_stride, fft_length, window, center
)
output = kmath.istft(
(real_x, imag_x),
sequence_length,
sequence_stride,
fft_length,
window=window,
center=center,
)
ref = _istft(
(real_x, imag_x),
sequence_length,
sequence_stride,
fft_length,
window=window,
center=center,
)
if backend.backend() in ("numpy", "jax", "torch"):
# these backends have different implementation for the boundary of
# the output, so we need to truncate 5% befroe assertAllClose
truncated_len = int(output.shape[-1] * 0.05)
output = output[..., truncated_len:-truncated_len]
ref = ref[..., truncated_len:-truncated_len]
self.assertAllClose(output, ref, atol=1e-5, rtol=1e-5)
def test_rsqrt(self):
x = np.array([[1, 4, 9], [16, 25, 36]], dtype="float32")
self.assertAllClose(kmath.rsqrt(x), 1 / np.sqrt(x))
self.assertAllClose(kmath.Rsqrt()(x), 1 / np.sqrt(x))
| keras-core/keras_core/ops/math_test.py/0 | {
"file_path": "keras-core/keras_core/ops/math_test.py",
"repo_id": "keras-core",
"token_count": 15659
} | 51 |
from keras_core import backend
from keras_core import ops
from keras_core.api_export import keras_core_export
from keras_core.optimizers import optimizer
@keras_core_export(["keras_core.optimizers.Adafactor"])
class Adafactor(optimizer.Optimizer):
"""Optimizer that implements the Adafactor algorithm.
Adafactor is commonly used in NLP tasks, and has the advantage
of taking less memory because it only saves partial information of previous
gradients.
The default argument setup is based on the original paper (see reference).
When gradients are of dimension > 2, Adafactor optimizer will delete the
last 2 dimensions separately in its accumulator variables.
Args:
learning_rate: A float, a
`keras_core.optimizers.schedules.LearningRateSchedule` instance, or
a callable that takes no arguments and returns the actual value to
use. The learning rate. Defaults to `0.001`.
beta_2_decay: float, defaults to -0.8. The decay rate of `beta_2`.
epsilon_1: float, defaults to 1e-30. A small offset to keep demoninator
away from 0.
epsilon_2: float, defaults to 1e-3. A small offset to avoid learning
rate becoming too small by time.
clip_threshold: float, defaults to 1.0. Clipping threshold. This is a
part of Adafactor algorithm, independent from `clipnorm`,
`clipvalue`, and `global_clipnorm`.
relative_step: bool, defaults to True. If `learning_rate` is a
constant and `relative_step=True`, learning rate will be adjusted
based on current iterations. This is a default learning rate decay
in Adafactor.
{{base_optimizer_keyword_args}}
Reference:
- [Shazeer, Noam et al., 2018](https://arxiv.org/abs/1804.04235).
"""
def __init__(
self,
learning_rate=0.001,
beta_2_decay=-0.8,
epsilon_1=1e-30,
epsilon_2=1e-3,
clip_threshold=1.0,
relative_step=True,
weight_decay=None,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
name="adafactor",
**kwargs,
):
super().__init__(
learning_rate=learning_rate,
name=name,
weight_decay=weight_decay,
clipnorm=clipnorm,
clipvalue=clipvalue,
global_clipnorm=global_clipnorm,
use_ema=use_ema,
ema_momentum=ema_momentum,
ema_overwrite_frequency=ema_overwrite_frequency,
**kwargs,
)
self.beta_2_decay = beta_2_decay
self.epsilon_1 = epsilon_1
self.epsilon_2 = epsilon_2
self.clip_threshold = clip_threshold
self.relative_step = relative_step
def build(self, var_list):
"""Initialize optimizer variables.
Adam optimizer has 3 types of variables: momentums, velocities and
velocity_hat (only set when amsgrad is applied),
Args:
var_list: list of model variables to build Adam variables on.
"""
if self.built:
return
super().build(var_list)
self._r = []
self._c = []
self._v = []
for var in var_list:
if len(var.shape) < 2:
# Don't factor if variable is of dimension < 2, but we still
# need to create dummy variables as placeholder.
with backend.name_scope(self.name, caller=self):
self._r.append(
backend.Variable(0, name=var.name, trainable=False)
)
self._c.append(
backend.Variable(0, name=var.name, trainable=False)
)
else:
# Always factor the last 2 dimenstions.
r_shape = var.shape[:-1]
c_shape = var.shape[:-2] + var.shape[-1]
self._r.append(
self.add_variable(
shape=r_shape,
dtype=var.dtype,
name=var.name,
)
)
self._c.append(
self.add_variable(
shape=c_shape,
dtype=var.dtype,
name=var.name,
)
)
self._v.append(
self.add_variable_from_reference(
reference_variable=var, name="velocity"
)
)
def _rms(self, x):
return ops.sqrt(ops.mean(ops.square(x)))
def update_step(self, gradient, variable, learning_rate):
"""Update step given gradient and the associated model variable."""
lr = ops.cast(learning_rate, variable.dtype)
gradient = ops.cast(gradient, variable.dtype)
epsilon_2 = ops.cast(self.epsilon_2, variable.dtype)
one = ops.cast(1.0, variable.dtype)
local_step = ops.cast(self.iterations + 1, variable.dtype)
if not callable(self._learning_rate) and self.relative_step:
lr = ops.minimum(lr, 1 / ops.sqrt(local_step))
r = self._r[self._get_variable_index(variable)]
c = self._c[self._get_variable_index(variable)]
v = self._v[self._get_variable_index(variable)]
rho_t = ops.minimum(lr, 1 / ops.sqrt(local_step))
alpha_t = ops.maximum(epsilon_2, self._rms(variable)) * rho_t
regulated_grad_square = ops.square(gradient) + self.epsilon_1
beta_2_t = 1 - ops.power(local_step, self.beta_2_decay)
if len(variable.shape) >= 2:
# `r` deletes the last dimension of gradient, so it is of shape
# `gradient.shape[:-1]`.
r.assign(
beta_2_t * r
+ (1 - beta_2_t) * ops.mean(regulated_grad_square, axis=-1)
)
# `c` deletes the second last dimension of gradient, so it is of
# shape `gradient.shape[:-2] + gradient.shape[-1]`.
c.assign(
beta_2_t * c
+ (1 - beta_2_t) * ops.mean(regulated_grad_square, axis=-2)
)
v.assign(
ops.expand_dims(
r / ops.mean(r, axis=-1, keepdims=True), axis=-1
)
* ops.expand_dims(c, -2)
)
else:
v.assign(beta_2_t * v + (1 - beta_2_t) * regulated_grad_square)
# `convert_to_tensor` unifies the handling of sparse and dense grads.
u_t = gradient / ops.sqrt(v)
u_t_hat = u_t / ops.maximum(one, (self._rms(u_t) / self.clip_threshold))
variable.assign(variable - alpha_t * u_t_hat)
def get_config(self):
config = super().get_config()
config.update(
{
"beta_2_decay": self.beta_2_decay,
"epsilon_1": self.epsilon_1,
"epsilon_2": self.epsilon_2,
"clip_threshold": self.clip_threshold,
"relative_step": self.relative_step,
}
)
return config
Adafactor.__doc__ = Adafactor.__doc__.replace(
"{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args
)
| keras-core/keras_core/optimizers/adafactor.py/0 | {
"file_path": "keras-core/keras_core/optimizers/adafactor.py",
"repo_id": "keras-core",
"token_count": 3683
} | 52 |
import numpy as np
from absl.testing import parameterized
from keras_core import backend
from keras_core import ops
from keras_core import testing
from keras_core.optimizers.loss_scale_optimizer import LossScaleOptimizer
from keras_core.optimizers.sgd import SGD
class LossScaleOptimizerTest(testing.TestCase, parameterized.TestCase):
def test_config(self):
inner_optimizer = SGD(
learning_rate=0.5,
momentum=0.06,
nesterov=True,
weight_decay=0.004,
)
optimizer = LossScaleOptimizer(inner_optimizer)
self.run_class_serialization_test(optimizer)
@parameterized.named_parameters(("stateless", True), ("stateful", False))
def test_finite_step(self, stateless):
if not stateless and backend.backend() == "jax":
self.skipTest("LossScaleOptimizer must use stateless_apply on jax.")
inner_optimizer = SGD(learning_rate=0.5)
optimizer = LossScaleOptimizer(inner_optimizer)
grads = [ops.array([1.0, 6.0, 7.0, 2.0]) * optimizer.initial_scale]
vars = [backend.Variable([1.0, 2.0, 3.0, 4.0])]
if stateless:
optimizer.build(vars)
vars, _ = optimizer.stateless_apply(
optimizer.variables, grads, vars
)
else:
optimizer.apply(grads, vars)
self.assertAllClose(
vars, [[0.5, -1.0, -0.5, 3.0]], rtol=1e-4, atol=1e-4
)
@parameterized.named_parameters(("stateless", True), ("stateful", False))
def test_infinite_step(self, stateless):
if not stateless and backend.backend() == "jax":
self.skipTest("LossScaleOptimizer must use stateless_apply on jax.")
inner_optimizer = SGD(learning_rate=0.5)
optimizer = LossScaleOptimizer(inner_optimizer)
grads = [ops.array([np.inf, np.inf, np.inf, np.inf])]
vars = [backend.Variable([1.0, 2.0, 3.0, 4.0])]
if stateless:
optimizer.build(vars)
vars, _ = optimizer.stateless_apply(
optimizer.variables, grads, vars
)
else:
optimizer.apply(grads, vars)
self.assertAllClose(vars, [[1.0, 2.0, 3.0, 4.0]], rtol=1e-4, atol=1e-4)
@parameterized.named_parameters(("stateless", True), ("stateful", False))
def test_downscaling(self, stateless):
if not stateless and backend.backend() == "jax":
self.skipTest("LossScaleOptimizer must use stateless_apply on jax.")
inner_optimizer = SGD(learning_rate=0.5)
optimizer = LossScaleOptimizer(inner_optimizer, initial_scale=400.0)
vars = [backend.Variable([1.0, 2.0, 3.0, 4.0])]
optimizer.build(vars)
opt_vars = optimizer.variables
grads = [ops.array([np.inf, np.inf, np.inf, np.inf])]
for _ in range(4):
if stateless:
_, opt_vars = optimizer.stateless_apply(opt_vars, grads, vars)
for ref_v, v in zip(optimizer.variables, opt_vars):
ref_v.assign(v)
else:
optimizer.apply(grads, vars)
self.assertAllClose(optimizer.scale_loss(1.0), 25.0)
@parameterized.named_parameters(("stateless", True), ("stateful", False))
def test_upscaling(self, stateless):
if not stateless and backend.backend() == "jax":
self.skipTest("LossScaleOptimizer must use stateless_apply on jax.")
inner_optimizer = SGD(learning_rate=0.5)
optimizer = LossScaleOptimizer(
inner_optimizer,
initial_scale=2.0,
dynamic_growth_steps=2,
)
vars = [backend.Variable([1.0, 2.0, 3.0, 4.0])]
optimizer.build(vars)
opt_vars = optimizer.variables
grads = [ops.array([1.0, 6.0, 7.0, 2.0])]
for _ in range(8):
if stateless:
_, opt_vars = optimizer.stateless_apply(opt_vars, grads, vars)
for ref_v, v in zip(optimizer.variables, opt_vars):
ref_v.assign(v)
else:
optimizer.apply(grads, vars)
self.assertAllClose(optimizer.scale_loss(1.0), 32.0)
| keras-core/keras_core/optimizers/loss_scale_optimizer_test.py/0 | {
"file_path": "keras-core/keras_core/optimizers/loss_scale_optimizer_test.py",
"repo_id": "keras-core",
"token_count": 2047
} | 53 |
import inspect
from keras_core.api_export import keras_core_export
from keras_core.regularizers.regularizers import L1
from keras_core.regularizers.regularizers import L1L2
from keras_core.regularizers.regularizers import L2
from keras_core.regularizers.regularizers import OrthogonalRegularizer
from keras_core.regularizers.regularizers import Regularizer
from keras_core.saving import serialization_lib
from keras_core.utils.naming import to_snake_case
ALL_OBJECTS = {
Regularizer,
L1,
L2,
L1L2,
OrthogonalRegularizer,
}
ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS}
ALL_OBJECTS_DICT.update(
{to_snake_case(cls.__name__): cls for cls in ALL_OBJECTS}
)
@keras_core_export("keras_core.regularizers.serialize")
def serialize(initializer):
return serialization_lib.serialize_keras_object(initializer)
@keras_core_export("keras_core.regularizers.deserialize")
def deserialize(config, custom_objects=None):
"""Return a Keras regularizer object via its config."""
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_core_export("keras_core.regularizers.get")
def get(identifier):
"""Retrieve a Keras regularizer object via an identifier."""
if identifier is None:
return None
if isinstance(identifier, dict):
obj = deserialize(identifier)
elif isinstance(identifier, str):
config = {"class_name": str(identifier), "config": {}}
obj = deserialize(config)
else:
obj = identifier
if callable(obj):
if inspect.isclass(obj):
obj = obj()
return obj
else:
raise ValueError(
f"Could not interpret regularizer identifier: {identifier}"
)
| keras-core/keras_core/regularizers/__init__.py/0 | {
"file_path": "keras-core/keras_core/regularizers/__init__.py",
"repo_id": "keras-core",
"token_count": 709
} | 54 |
import tree
from keras_core import backend
from keras_core import losses as losses_module
from keras_core import metrics as metrics_module
from keras_core import ops
from keras_core.utils.naming import get_object_name
class MetricsList(metrics_module.Metric):
def __init__(self, metrics, name="metrics_list", output_name=None):
super().__init__(name=name)
self.metrics = metrics
self.output_name = output_name
def update_state(self, y_true, y_pred, sample_weight=None):
for m in self.metrics:
m.update_state(y_true, y_pred, sample_weight=sample_weight)
def reset_state(self):
for m in self.metrics:
m.reset_state()
def get_result(self):
return {m.name: m.result() for m in self.metrics}
def get_config(self):
raise NotImplementedError
@classmethod
def from_config(cls, config):
raise NotImplementedError
def is_function_like(value):
if value is None:
return True
if isinstance(value, str):
return True
if callable(value):
return True
return False
def is_binary_or_sparse_categorical(y_true, y_pred):
y_t_rank = len(y_true.shape)
y_p_rank = len(y_pred.shape)
y_t_last_dim = y_true.shape[-1]
y_p_last_dim = y_pred.shape[-1]
is_binary = y_p_last_dim == 1
is_sparse_categorical = (
y_t_rank < y_p_rank or y_t_last_dim == 1 and y_p_last_dim > 1
)
return is_binary, is_sparse_categorical
def get_metric(identifier, y_true, y_pred):
if identifier is None:
return None # Ok to have no metric for an output.
# Convenience feature for selecting b/t binary, categorical,
# and sparse categorical.
if str(identifier).lower() not in ["accuracy", "acc"]:
metric_obj = metrics_module.get(identifier)
else:
is_binary, is_sparse_categorical = is_binary_or_sparse_categorical(
y_true, y_pred
)
if is_binary:
metric_obj = metrics_module.BinaryAccuracy(name=str(identifier))
elif is_sparse_categorical:
metric_obj = metrics_module.SparseCategoricalAccuracy(
name=str(identifier)
)
else:
metric_obj = metrics_module.CategoricalAccuracy(
name=str(identifier)
)
if not isinstance(metric_obj, metrics_module.Metric):
metric_obj = metrics_module.MeanMetricWrapper(metric_obj)
if isinstance(identifier, str):
metric_name = identifier
else:
metric_name = get_object_name(metric_obj)
metric_obj.name = metric_name
return metric_obj
def get_loss(identifier, y_true, y_pred):
if identifier is None:
return None # Ok to have no loss for an output.
# Convenience feature for selecting b/t binary, categorical,
# and sparse categorical.
if str(identifier).lower() not in ["crossentropy", "ce"]:
loss_obj = losses_module.get(identifier)
else:
is_binary, is_sparse_categorical = is_binary_or_sparse_categorical(
y_true, y_pred
)
if is_binary:
loss_obj = losses_module.binary_crossentropy
elif is_sparse_categorical:
loss_obj = losses_module.sparse_categorical_crossentropy
else:
loss_obj = losses_module.categorical_crossentropy
if not isinstance(loss_obj, losses_module.Loss):
if isinstance(identifier, str):
loss_name = identifier
else:
loss_name = get_object_name(loss_obj)
loss_obj = losses_module.LossFunctionWrapper(loss_obj, name=loss_name)
return loss_obj
class CompileMetrics(metrics_module.Metric):
def __init__(
self,
metrics,
weighted_metrics,
name="compile_metric",
output_names=None,
):
super().__init__(name=name)
if metrics and not isinstance(metrics, (list, tuple, dict)):
raise ValueError(
"Expected `metrics` argument to be a list, tuple, or dict. "
f"Received instead: metrics={metrics} of type {type(metrics)}"
)
if weighted_metrics and not isinstance(
weighted_metrics, (list, tuple, dict)
):
raise ValueError(
"Expected `weighted_metrics` argument to be a list, tuple, or "
f"dict. Received instead: weighted_metrics={weighted_metrics} "
f"of type {type(weighted_metrics)}"
)
self._user_metrics = metrics
self._user_weighted_metrics = weighted_metrics
self.built = False
self.name = "compile_metrics"
self.output_names = output_names
@property
def variables(self):
# Avoiding relying on implicit tracking since
# CompileMetrics may be instantiated or built in a no tracking scope.
if not self.built:
return []
vars = []
for m in self._flat_metrics + self._flat_weighted_metrics:
if m is not None:
vars.extend(m.variables)
return vars
def build(self, y_true, y_pred):
if self.output_names:
output_names = self.output_names
elif isinstance(y_pred, dict):
output_names = sorted(list(y_pred.keys()))
elif isinstance(y_pred, (list, tuple)):
num_outputs = len(y_pred)
if all(hasattr(x, "_keras_history") for x in y_pred):
output_names = [x._keras_history.operation.name for x in y_pred]
else:
output_names = None
else:
output_names = None
num_outputs = 1
if output_names:
num_outputs = len(output_names)
y_pred = self._flatten_y(y_pred)
y_true = self._flatten_y(y_true)
metrics = self._user_metrics
weighted_metrics = self._user_weighted_metrics
self._flat_metrics = self._build_metrics_set(
metrics,
num_outputs,
output_names,
y_true,
y_pred,
argument_name="metrics",
)
self._flat_weighted_metrics = self._build_metrics_set(
weighted_metrics,
num_outputs,
output_names,
y_true,
y_pred,
argument_name="weighted_metrics",
)
self.built = True
def _build_metrics_set(
self, metrics, num_outputs, output_names, y_true, y_pred, argument_name
):
flat_metrics = []
if isinstance(metrics, dict):
for name in metrics.keys():
if name not in output_names:
raise ValueError(
f"In the dict argument `{argument_name}`, key "
f"'{name}' does not correspond to any model "
f"output. Received:\n{argument_name}={metrics}"
)
if num_outputs == 1:
if not metrics:
flat_metrics.append(None)
else:
if isinstance(metrics, dict):
metrics = tree.flatten(metrics)
if not isinstance(metrics, list):
metrics = [metrics]
if not all(is_function_like(m) for m in metrics):
raise ValueError(
f"Expected all entries in the `{argument_name}` list "
f"to be metric objects. Received instead:\n"
f"{argument_name}={metrics}"
)
flat_metrics.append(
MetricsList(
[
get_metric(m, y_true[0], y_pred[0])
for m in metrics
if m is not None
]
)
)
else:
if isinstance(metrics, (list, tuple)):
if len(metrics) != len(y_pred):
raise ValueError(
"For a model with multiple outputs, "
f"when providing the `{argument_name}` argument as a "
"list, it should have as many entries as the model has "
f"outputs. Received:\n{argument_name}={metrics}\nof "
f"length {len(metrics)} whereas the model has "
f"{len(y_pred)} outputs."
)
for idx, (mls, yt, yp) in enumerate(
zip(metrics, y_true, y_pred)
):
if not isinstance(mls, list):
mls = [mls]
name = output_names[idx] if output_names else None
if not all(is_function_like(e) for e in mls):
raise ValueError(
f"All entries in the sublists of the "
f"`{argument_name}` list should be metric objects. "
f"Found the following sublist with unknown "
f"types: {mls}"
)
flat_metrics.append(
MetricsList(
[
get_metric(m, yt, yp)
for m in mls
if m is not None
],
output_name=name,
)
)
elif isinstance(metrics, dict):
if output_names is None:
raise ValueError(
f"Argument `{argument_name}` can only be provided as a "
"dict when the model also returns a dict of outputs. "
f"Received {argument_name}={metrics}"
)
for name in metrics.keys():
if not isinstance(metrics[name], list):
metrics[name] = [metrics[name]]
if not all(is_function_like(e) for e in metrics[name]):
raise ValueError(
f"All entries in the sublists of the "
f"`{argument_name}` dict should be metric objects. "
f"At key '{name}', found the following sublist "
f"with unknown types: {metrics[name]}"
)
for name, yt, yp in zip(output_names, y_true, y_pred):
if name in metrics:
flat_metrics.append(
MetricsList(
[
get_metric(m, yt, yp)
for m in metrics[name]
if m is not None
],
output_name=name,
)
)
else:
flat_metrics.append(None)
return flat_metrics
def _flatten_y(self, y):
if isinstance(y, dict) and self.output_names:
result = []
for name in self.output_names:
if name in y:
result.append(y[name])
return result
return tree.flatten(y)
def update_state(self, y_true, y_pred, sample_weight=None):
if not self.built:
self.build(y_true, y_pred)
y_true = self._flatten_y(y_true)
y_pred = self._flatten_y(y_pred)
for m, y_t, y_p in zip(self._flat_metrics, y_true, y_pred):
if m:
m.update_state(y_t, y_p)
if sample_weight is not None:
sample_weight = self._flatten_y(sample_weight)
# For multi-outputs, repeat sample weights for n outputs.
if len(sample_weight) < len(y_true):
sample_weight = [sample_weight[0] for _ in range(len(y_true))]
else:
sample_weight = [None for _ in range(len(y_true))]
for m, y_t, y_p, s_w in zip(
self._flat_weighted_metrics, y_true, y_pred, sample_weight
):
if m:
m.update_state(y_t, y_p, s_w)
def reset_state(self):
if not self.built:
return
for m in self._flat_metrics:
if m:
m.reset_state()
for m in self._flat_weighted_metrics:
if m:
m.reset_state()
def result(self):
if not self.built:
raise ValueError(
"Cannot get result() since the metric has not yet been built."
)
results = {}
unique_name_counters = {}
for mls in self._flat_metrics:
if not mls:
continue
for m in mls.metrics:
name = m.name
if mls.output_name:
name = f"{mls.output_name}_{name}"
if name not in unique_name_counters:
results[name] = m.result()
unique_name_counters[name] = 1
else:
index = unique_name_counters[name]
unique_name_counters[name] += 1
name = f"{name}_{index}"
results[name] = m.result()
for mls in self._flat_weighted_metrics:
if not mls:
continue
for m in mls.metrics:
name = m.name
if mls.output_name:
name = f"{mls.output_name}_{name}"
if name not in unique_name_counters:
results[name] = m.result()
unique_name_counters[name] = 1
else:
name = f"weighted_{m.name}"
if mls.output_name:
name = f"{mls.output_name}_{name}"
if name not in unique_name_counters:
unique_name_counters[name] = 1
else:
index = unique_name_counters[name]
unique_name_counters[name] += 1
name = f"{name}_{index}"
results[name] = m.result()
return results
def get_config(self):
raise NotImplementedError
@classmethod
def from_config(cls, config):
raise NotImplementedError
class CompileLoss(losses_module.Loss):
def __init__(
self,
loss,
loss_weights=None,
reduction="sum_over_batch_size",
output_names=None,
):
if loss_weights and not isinstance(loss_weights, (list, tuple, dict)):
raise ValueError(
"Expected `loss_weights` argument to be a list, tuple, or "
f"dict. Received instead: loss_weights={loss_weights} "
f"of type {type(loss_weights)}"
)
self._user_loss = loss
self._user_loss_weights = loss_weights
self.built = False
self.output_names = output_names
super().__init__(name="compile_loss", reduction=reduction)
def build(self, y_true, y_pred):
if self.output_names:
output_names = self.output_names
elif isinstance(y_pred, dict):
output_names = sorted(list(y_pred.keys()))
elif isinstance(y_pred, (list, tuple)):
num_outputs = len(y_pred)
if all(hasattr(x, "_keras_history") for x in y_pred):
output_names = [x._keras_history.operation.name for x in y_pred]
else:
output_names = None
else:
output_names = None
num_outputs = 1
if output_names:
num_outputs = len(output_names)
y_pred = self._flatten_y(y_pred)
loss = self._user_loss
loss_weights = self._user_loss_weights
flat_losses = []
flat_loss_weights = []
if isinstance(loss, dict):
for name in loss.keys():
if name not in output_names:
raise ValueError(
"In the dict argument `loss`, key "
f"'{name}' does not correspond to any model output. "
f"Received:\nloss={loss}"
)
if num_outputs == 1:
if isinstance(loss, dict):
loss = tree.flatten(loss)
if isinstance(loss, list) and len(loss) == 1:
loss = loss[0]
if not is_function_like(loss):
raise ValueError(
"When there is only a single output, the `loss` argument "
"must be a callable. "
f"Received instead:\nloss={loss} of type {type(loss)}"
)
if is_function_like(loss) and tree.is_nested(y_pred):
# The model has multiple outputs but only one loss fn
# was provided. Broadcast loss to all outputs.
loss = tree.map_structure(lambda x: loss, y_pred)
# Iterate over all possible loss formats:
# plain function, list/tuple, dict
if is_function_like(loss):
flat_losses.append(get_loss(loss, y_true, y_pred))
if loss_weights:
if not isinstance(loss_weights, float):
raise ValueError(
"When there is only a single output, the "
"`loss_weights` argument "
"must be a Python float. "
f"Received instead: loss_weights={loss_weights} of "
f"type {type(loss_weights)}"
)
flat_loss_weights.append(loss_weights)
else:
flat_loss_weights.append(1.0)
elif isinstance(loss, (list, tuple)):
loss = tree.flatten(loss)
if len(loss) != len(y_pred):
raise ValueError(
"For a model with multiple outputs, "
"when providing the `loss` argument as a list, "
"it should have as many entries as the model has outputs. "
f"Received:\nloss={loss}\nof length {len(loss)} "
f"whereas the model has {len(y_pred)} outputs."
)
if not all(is_function_like(e) for e in loss):
raise ValueError(
"For a model with multiple outputs, "
"when providing the `loss` argument as a list, "
"each list entry should be a callable (the loss function "
"corresponding to that output). "
f"Received: loss={loss}"
)
flat_losses = [
get_loss(fn, y_true, y_pred) for fn in loss if fn is not None
]
if loss_weights:
if not isinstance(loss_weights, (list, tuple)):
raise ValueError(
"If the `loss` argument is provided as a list/tuple, "
"the `loss_weight` argument should also be provided as "
"a list/tuple, of equal length. "
f"Received: loss_weights={loss_weights}"
)
if len(loss_weights) != len(y_pred):
raise ValueError(
"For a model with multiple outputs, "
"when providing the `loss_weights` argument as a list, "
"it should have as many entries as the model has "
f"outputs. Received: loss_weights={loss_weights} of "
f"length {len(loss_weights)} whereas the model has "
f"{len(y_pred)} outputs."
)
if not all(isinstance(e, (int, float)) for e in loss_weights):
raise ValueError(
"For a model with multiple outputs, when providing "
"the `loss_weights` argument as a list, "
"each list entry should be a Python int or float (the "
"weighting coefficient corresponding to the loss for "
f"that output). Received: loss_weights={loss_weights}"
)
flat_loss_weights = list(loss_weights)
else:
flat_loss_weights = [1.0 for _ in loss]
elif isinstance(loss, dict):
if output_names is None:
raise ValueError(
"Argument `loss` can only be provided as a dict "
"when the model also returns a dict of outputs. "
f"Received loss={loss}"
)
for name in loss.keys():
if isinstance(loss[name], list) and len(loss[name]) == 1:
loss[name] = loss[name][0]
if not is_function_like(loss[name]):
raise ValueError(
"For a model with multiple outputs, "
"when providing the `loss` argument as a dict, "
"each dict entry should be a callable (the loss "
"function corresponding to that output). "
f"At key '{name}', received invalid type:\n{loss[name]}"
)
for name, yt, yp in zip(output_names, y_true, y_pred):
if name in loss:
if loss[name]:
flat_losses.append(get_loss(loss[name], yt, yp))
else:
flat_losses.append(None)
else:
flat_losses.append(None)
if loss_weights:
if not isinstance(loss_weights, dict):
raise ValueError(
"If the `loss` argument is provided as a dict, "
"the `loss_weight` argument should also be provided as "
f"a dict. Received: loss_weights={loss_weights}"
)
for name in loss_weights.keys():
if name not in output_names:
raise ValueError(
"In the dict argument `loss_weights`, key "
f"'{name}' does not correspond to any model "
f"output. Received: loss_weights={loss_weights}"
)
if not isinstance(loss_weights[name], float):
raise ValueError(
"For a model with multiple outputs, "
"when providing the `loss_weights` argument as a "
"dict, each dict entry should be a Python float "
"(the weighting coefficient corresponding to the "
f"loss for that output). At key '{name}', "
f"received invalid type:\n{loss_weights[name]}"
)
for name in output_names:
if name in loss_weights:
flat_loss_weights.append(loss_weights[name])
else:
flat_loss_weights.append(1.0)
else:
flat_loss_weights = [1.0 for _ in flat_losses]
self.flat_losses = flat_losses
self.flat_loss_weights = flat_loss_weights
self.built = True
def __call__(self, y_true, y_pred, sample_weight=None):
with ops.name_scope(self.name):
return self.call(y_true, y_pred, sample_weight)
def _flatten_y(self, y):
if isinstance(y, dict) and self.output_names:
result = []
for name in self.output_names:
if name in y:
result.append(y[name])
return result
return tree.flatten(y)
def call(self, y_true, y_pred, sample_weight=None):
if not self.built:
self.build(y_true, y_pred)
y_true = self._flatten_y(y_true)
y_pred = self._flatten_y(y_pred)
if sample_weight is not None:
sample_weight = self._flatten_y(sample_weight)
# For multi-outputs, repeat sample weights for n outputs.
if len(sample_weight) < len(y_true):
sample_weight = [sample_weight[0] for _ in range(len(y_true))]
else:
sample_weight = [None for _ in y_true]
loss_values = []
for loss, y_t, y_p, loss_weight, sample_weight in zip(
self.flat_losses,
y_true,
y_pred,
self.flat_loss_weights,
sample_weight,
):
if loss:
value = loss_weight * ops.cast(
loss(y_t, y_p, sample_weight), dtype=backend.floatx()
)
loss_values.append(value)
if loss_values:
total_loss = sum(loss_values)
return total_loss
return None
def get_config(self):
raise NotImplementedError
@classmethod
def from_config(cls, config):
raise NotImplementedError
| keras-core/keras_core/trainers/compile_utils.py/0 | {
"file_path": "keras-core/keras_core/trainers/compile_utils.py",
"repo_id": "keras-core",
"token_count": 13763
} | 55 |
import numpy as np
import pytest
import tensorflow as tf
from keras_core import backend
from keras_core import testing
from keras_core.trainers import epoch_iterator
class TestEpochIterator(testing.TestCase):
def _test_basic_flow(self, return_type):
x = np.random.random((100, 16))
y = np.random.random((100, 4))
sample_weight = np.random.random((100,))
batch_size = 16
shuffle = True
iterator = epoch_iterator.EpochIterator(
x=x,
y=y,
sample_weight=sample_weight,
batch_size=batch_size,
shuffle=shuffle,
)
steps_seen = []
for step, batch in iterator.enumerate_epoch(return_type=return_type):
batch = batch[0]
steps_seen.append(step)
self.assertEqual(len(batch), 3)
if return_type == "np":
self.assertIsInstance(batch[0], np.ndarray)
else:
self.assertIsInstance(batch[0], tf.Tensor)
self.assertEqual(steps_seen, [0, 1, 2, 3, 4, 5, 6])
def test_basic_flow_np(self):
self._test_basic_flow("np")
def test_basic_flow_tf(self):
self._test_basic_flow("tf")
def test_insufficient_data(self):
batch_size = 8
steps_per_epoch = 6
dataset_size = batch_size * (steps_per_epoch - 2)
x = np.arange(dataset_size).reshape((dataset_size, 1))
y = x * 2
iterator = epoch_iterator.EpochIterator(
x=x,
y=y,
batch_size=batch_size,
steps_per_epoch=steps_per_epoch,
)
steps_seen = []
for step, _ in iterator.enumerate_epoch():
steps_seen.append(step)
self.assertLen(steps_seen, steps_per_epoch - 2)
self.assertIsInstance(iterator, epoch_iterator.EpochIterator)
self.assertTrue(iterator._insufficient_data)
def test_unsupported_y_arg_tfdata(self):
with self.assertRaisesRegex(ValueError, "`y` should not be passed"):
x = tf.data.Dataset.from_tensor_slices(np.random.random((100, 16)))
y = np.random.random((100, 4))
_ = epoch_iterator.EpochIterator(x=x, y=y)
def test_unsupported_sample_weights_arg_tfdata(self):
with self.assertRaisesRegex(
ValueError, "`sample_weights` should not be passed"
):
x = tf.data.Dataset.from_tensor_slices(np.random.random((100, 16)))
sample_weights = np.random.random((100,))
_ = epoch_iterator.EpochIterator(x=x, sample_weight=sample_weights)
@pytest.mark.skipif(
backend.backend() != "torch", reason="Need to import torch"
)
def test_torch_dataloader(self):
import torch
class ExampleTorchDataset(torch.utils.data.Dataset):
def __init__(self, x, y):
self.x = x
self.y = y
def __len__(self):
return len(self.x)
def __getitem__(self, idx):
return self.x[idx], self.y[idx]
torch_dataset = ExampleTorchDataset(
np.random.random((64, 2)), np.random.random((64, 1))
)
torch_dataloader = torch.utils.data.DataLoader(
torch_dataset, batch_size=8, shuffle=True
)
iterator = epoch_iterator.EpochIterator(torch_dataloader)
for _, batch in iterator.enumerate_epoch(return_type="np"):
batch = batch[0]
self.assertEqual(batch[0].shape, (8, 2))
self.assertEqual(batch[1].shape, (8, 1))
@pytest.mark.skipif(
backend.backend() != "torch", reason="Need to import torch"
)
def test_unsupported_y_arg_torch_dataloader(self):
import torch
class ExampleTorchDataset(torch.utils.data.Dataset):
def __init__(self, x, y):
self.x = x
self.y = y
def __len__(self):
return len(self.x)
def __getitem__(self, idx):
return self.x[idx], self.y[idx]
torch_dataset = ExampleTorchDataset(
np.random.random((100, 16)), np.random.random((100, 4))
)
x = torch.utils.data.DataLoader(
torch_dataset, batch_size=8, shuffle=True
)
y = np.random.random((100, 4))
with self.assertRaisesRegex(
ValueError,
"When providing `x` as a torch DataLoader, `y` should not",
):
_ = epoch_iterator.EpochIterator(x=x, y=y)
@pytest.mark.skipif(
backend.backend() != "torch", reason="Need to import torch"
)
def test_unsupported_sample_weights_arg_torch_dataloader(self):
import torch
class ExampleTorchDataset(torch.utils.data.Dataset):
def __init__(self, x, y):
self.x = x
self.y = y
def __len__(self):
return len(self.x)
def __getitem__(self, idx):
return self.x[idx], self.y[idx]
torch_dataset = ExampleTorchDataset(
np.random.random((100, 16)), np.random.random((100, 4))
)
x = torch.utils.data.DataLoader(
torch_dataset, batch_size=8, shuffle=True
)
sample_weights = np.random.random((100,))
with self.assertRaisesRegex(
ValueError,
"When providing `x` as a torch DataLoader, `sample_weights`",
):
_ = epoch_iterator.EpochIterator(x=x, sample_weight=sample_weights)
def test_python_generator_input(self):
def generator_example():
for i in range(100):
yield (np.array([i]), np.array([i * 2]))
x = generator_example()
epoch_iter = epoch_iterator.EpochIterator(x=x)
self.assertIsInstance(
epoch_iter.data_adapter,
epoch_iterator.generator_data_adapter.GeneratorDataAdapter,
)
def test_unrecognized_data_type(self):
x = "unsupported_data"
with self.assertRaisesRegex(ValueError, "Unrecognized data type"):
_ = epoch_iterator.EpochIterator(x=x)
def test_invalid_return_type_in_get_iterator(self):
x = np.random.random((100, 16))
y = np.random.random((100, 4))
epoch_iter = epoch_iterator.EpochIterator(x=x, y=y)
with self.assertRaisesRegex(
ValueError,
"Argument `return_type` must be one of `{'np', 'tf'}`",
):
_ = epoch_iter._get_iterator("unsupported")
| keras-core/keras_core/trainers/epoch_iterator_test.py/0 | {
"file_path": "keras-core/keras_core/trainers/epoch_iterator_test.py",
"repo_id": "keras-core",
"token_count": 3234
} | 56 |
import numpy as np
from keras_core.api_export import keras_core_export
from keras_core.backend.config import standardize_data_format
from keras_core.utils import dataset_utils
from keras_core.utils import image_utils
from keras_core.utils.module_utils import tensorflow as tf
ALLOWLIST_FORMATS = (".bmp", ".gif", ".jpeg", ".jpg", ".png")
@keras_core_export(
[
"keras_core.utils.image_dataset_from_directory",
"keras_core.preprocessing.image_dataset_from_directory",
]
)
def image_dataset_from_directory(
directory,
labels="inferred",
label_mode="int",
class_names=None,
color_mode="rgb",
batch_size=32,
image_size=(256, 256),
shuffle=True,
seed=None,
validation_split=None,
subset=None,
interpolation="bilinear",
follow_links=False,
crop_to_aspect_ratio=False,
data_format=None,
):
"""Generates a `tf.data.Dataset` from image files in a directory.
If your directory structure is:
```
main_directory/
...class_a/
......a_image_1.jpg
......a_image_2.jpg
...class_b/
......b_image_1.jpg
......b_image_2.jpg
```
Then calling `image_dataset_from_directory(main_directory,
labels='inferred')` will return a `tf.data.Dataset` that yields batches of
images from the subdirectories `class_a` and `class_b`, together with labels
0 and 1 (0 corresponding to `class_a` and 1 corresponding to `class_b`).
Supported image formats: `.jpeg`, `.jpg`, `.png`, `.bmp`, `.gif`.
Animated gifs are truncated to the first frame.
Args:
directory: Directory where the data is located.
If `labels` is `"inferred"`, it should contain
subdirectories, each containing images for a class.
Otherwise, the directory structure is ignored.
labels: Either `"inferred"`
(labels are generated from the directory structure),
`None` (no labels),
or a list/tuple of integer labels of the same size as the number of
image files found in the directory. Labels should be sorted
according to the alphanumeric order of the image file paths
(obtained via `os.walk(directory)` in Python).
label_mode: String describing the encoding of `labels`. Options are:
- `"int"`: means that the labels are encoded as integers
(e.g. for `sparse_categorical_crossentropy` loss).
- `"categorical"` means that the labels are
encoded as a categorical vector
(e.g. for `categorical_crossentropy` loss).
- `"binary"` means that the labels (there can be only 2)
are encoded as `float32` scalars with values 0 or 1
(e.g. for `binary_crossentropy`).
- `None` (no labels).
class_names: Only valid if `labels` is `"inferred"`.
This is the explicit list of class names
(must match names of subdirectories). Used to control the order
of the classes (otherwise alphanumerical order is used).
color_mode: One of `"grayscale"`, `"rgb"`, `"rgba"`.
Defaults to `"rgb"`. Whether the images will be converted to
have 1, 3, or 4 channels.
batch_size: Size of the batches of data. Defaults to 32.
If `None`, the data will not be batched
(the dataset will yield individual samples).
image_size: Size to resize images to after they are read from disk,
specified as `(height, width)`. Defaults to `(256, 256)`.
Since the pipeline processes batches of images that must all have
the same size, this must be provided.
shuffle: Whether to shuffle the data. Defaults to `True`.
If set to `False`, sorts the data in alphanumeric order.
seed: Optional random seed for shuffling and transformations.
validation_split: Optional float between 0 and 1,
fraction of data to reserve for validation.
subset: Subset of the data to return.
One of `"training"`, `"validation"`, or `"both"`.
Only used if `validation_split` is set.
When `subset="both"`, the utility returns a tuple of two datasets
(the training and validation datasets respectively).
interpolation: String, the interpolation method used when
resizing images. Defaults to `"bilinear"`.
Supports `"bilinear"`, `"nearest"`, `"bicubic"`, `"area"`,
`"lanczos3"`, `"lanczos5"`, `"gaussian"`, `"mitchellcubic"`.
follow_links: Whether to visit subdirectories pointed to by symlinks.
Defaults to `False`.
crop_to_aspect_ratio: If `True`, resize the images without aspect
ratio distortion. When the original aspect ratio differs from the
target aspect ratio, the output image will be cropped so as to
return the largest possible window in the image
(of size `image_size`) that matches the target aspect ratio. By
default (`crop_to_aspect_ratio=False`), aspect ratio may not be
preserved.
data_format: If None uses keras_core.config.image_data_format()
otherwise either 'channel_last' or 'channel_first'.
Returns:
A `tf.data.Dataset` object.
- If `label_mode` is `None`, it yields `float32` tensors of shape
`(batch_size, image_size[0], image_size[1], num_channels)`,
encoding images (see below for rules regarding `num_channels`).
- Otherwise, it yields a tuple `(images, labels)`, where `images` has
shape `(batch_size, image_size[0], image_size[1], num_channels)`,
and `labels` follows the format described below.
Rules regarding labels format:
- if `label_mode` is `"int"`, the labels are an `int32` tensor of shape
`(batch_size,)`.
- if `label_mode` is `"binary"`, the labels are a `float32` tensor of
1s and 0s of shape `(batch_size, 1)`.
- if `label_mode` is `"categorical"`, the labels are a `float32` tensor
of shape `(batch_size, num_classes)`, representing a one-hot
encoding of the class index.
Rules regarding number of channels in the yielded images:
- if `color_mode` is `"grayscale"`,
there's 1 channel in the image tensors.
- if `color_mode` is `"rgb"`,
there are 3 channels in the image tensors.
- if `color_mode` is `"rgba"`,
there are 4 channels in the image tensors.
"""
if labels not in ("inferred", None):
if not isinstance(labels, (list, tuple)):
raise ValueError(
"`labels` argument should be a list/tuple of integer labels, "
"of the same size as the number of image files in the target "
"directory. If you wish to infer the labels from the "
"subdirectory "
'names in the target directory, pass `labels="inferred"`. '
"If you wish to get a dataset that only contains images "
f"(no labels), pass `labels=None`. Received: labels={labels}"
)
if class_names:
raise ValueError(
"You can only pass `class_names` if "
f'`labels="inferred"`. Received: labels={labels}, and '
f"class_names={class_names}"
)
if label_mode not in {"int", "categorical", "binary", None}:
raise ValueError(
'`label_mode` argument must be one of "int", '
'"categorical", "binary", '
f"or None. Received: label_mode={label_mode}"
)
if labels is None or label_mode is None:
labels = None
label_mode = None
if color_mode == "rgb":
num_channels = 3
elif color_mode == "rgba":
num_channels = 4
elif color_mode == "grayscale":
num_channels = 1
else:
raise ValueError(
'`color_mode` must be one of {"rgb", "rgba", "grayscale"}. '
f"Received: color_mode={color_mode}"
)
interpolation = interpolation.lower()
supported_interpolations = (
"bilinear",
"nearest",
"bicubic",
"area",
"lanczos3",
"lanczos5",
"gaussian",
"mitchellcubic",
)
if interpolation not in supported_interpolations:
raise ValueError(
"Argument `interpolation` should be one of "
f"{supported_interpolations}. "
f"Received: interpolation={interpolation}"
)
dataset_utils.check_validation_split_arg(
validation_split, subset, shuffle, seed
)
if seed is None:
seed = np.random.randint(1e6)
image_paths, labels, class_names = dataset_utils.index_directory(
directory,
labels,
formats=ALLOWLIST_FORMATS,
class_names=class_names,
shuffle=shuffle,
seed=seed,
follow_links=follow_links,
)
if label_mode == "binary" and len(class_names) != 2:
raise ValueError(
'When passing `label_mode="binary"`, there must be exactly 2 '
f"class_names. Received: class_names={class_names}"
)
data_format = standardize_data_format(data_format=data_format)
if subset == "both":
(
image_paths_train,
labels_train,
) = dataset_utils.get_training_or_validation_split(
image_paths, labels, validation_split, "training"
)
(
image_paths_val,
labels_val,
) = dataset_utils.get_training_or_validation_split(
image_paths, labels, validation_split, "validation"
)
if not image_paths_train:
raise ValueError(
f"No training images found in directory {directory}. "
f"Allowed formats: {ALLOWLIST_FORMATS}"
)
if not image_paths_val:
raise ValueError(
f"No validation images found in directory {directory}. "
f"Allowed formats: {ALLOWLIST_FORMATS}"
)
train_dataset = paths_and_labels_to_dataset(
image_paths=image_paths_train,
image_size=image_size,
num_channels=num_channels,
labels=labels_train,
label_mode=label_mode,
num_classes=len(class_names),
interpolation=interpolation,
crop_to_aspect_ratio=crop_to_aspect_ratio,
data_format=data_format,
)
val_dataset = paths_and_labels_to_dataset(
image_paths=image_paths_val,
image_size=image_size,
num_channels=num_channels,
labels=labels_val,
label_mode=label_mode,
num_classes=len(class_names),
interpolation=interpolation,
crop_to_aspect_ratio=crop_to_aspect_ratio,
data_format=data_format,
)
if batch_size is not None:
if shuffle:
# Shuffle locally at each iteration
train_dataset = train_dataset.shuffle(
buffer_size=batch_size * 8, seed=seed
)
train_dataset = train_dataset.batch(batch_size)
val_dataset = val_dataset.batch(batch_size)
else:
if shuffle:
train_dataset = train_dataset.shuffle(
buffer_size=1024, seed=seed
)
train_dataset = train_dataset.prefetch(tf.data.AUTOTUNE)
val_dataset = val_dataset.prefetch(tf.data.AUTOTUNE)
# Users may need to reference `class_names`.
train_dataset.class_names = class_names
val_dataset.class_names = class_names
# Include file paths for images as attribute.
train_dataset.file_paths = image_paths_train
val_dataset.file_paths = image_paths_val
dataset = [train_dataset, val_dataset]
else:
image_paths, labels = dataset_utils.get_training_or_validation_split(
image_paths, labels, validation_split, subset
)
if not image_paths:
raise ValueError(
f"No images found in directory {directory}. "
f"Allowed formats: {ALLOWLIST_FORMATS}"
)
dataset = paths_and_labels_to_dataset(
image_paths=image_paths,
image_size=image_size,
num_channels=num_channels,
labels=labels,
label_mode=label_mode,
num_classes=len(class_names),
interpolation=interpolation,
crop_to_aspect_ratio=crop_to_aspect_ratio,
data_format=data_format,
)
if batch_size is not None:
if shuffle:
# Shuffle locally at each iteration
dataset = dataset.shuffle(buffer_size=batch_size * 8, seed=seed)
dataset = dataset.batch(batch_size)
else:
if shuffle:
dataset = dataset.shuffle(buffer_size=1024, seed=seed)
dataset = dataset.prefetch(tf.data.AUTOTUNE)
# Users may need to reference `class_names`.
dataset.class_names = class_names
# Include file paths for images as attribute.
dataset.file_paths = image_paths
return dataset
def paths_and_labels_to_dataset(
image_paths,
image_size,
num_channels,
labels,
label_mode,
num_classes,
interpolation,
data_format,
crop_to_aspect_ratio=False,
):
"""Constructs a dataset of images and labels."""
# TODO(fchollet): consider making num_parallel_calls settable
path_ds = tf.data.Dataset.from_tensor_slices(image_paths)
args = (
image_size,
num_channels,
interpolation,
data_format,
crop_to_aspect_ratio,
)
img_ds = path_ds.map(
lambda x: load_image(x, *args), num_parallel_calls=tf.data.AUTOTUNE
)
if label_mode:
label_ds = dataset_utils.labels_to_dataset(
labels, label_mode, num_classes
)
img_ds = tf.data.Dataset.zip((img_ds, label_ds))
return img_ds
def load_image(
path,
image_size,
num_channels,
interpolation,
data_format,
crop_to_aspect_ratio=False,
):
"""Load an image from a path and resize it."""
img = tf.io.read_file(path)
img = tf.image.decode_image(
img, channels=num_channels, expand_animations=False
)
if crop_to_aspect_ratio:
from keras_core.backend import tensorflow as tf_backend
img = image_utils.smart_resize(
img,
image_size,
interpolation=interpolation,
data_format=data_format,
backend_module=tf_backend,
)
else:
img = tf.image.resize(img, image_size, method=interpolation)
img.set_shape((image_size[0], image_size[1], num_channels))
return img
| keras-core/keras_core/utils/image_dataset_utils.py/0 | {
"file_path": "keras-core/keras_core/utils/image_dataset_utils.py",
"repo_id": "keras-core",
"token_count": 6722
} | 57 |
import random
import numpy as np
from keras_core import backend
from keras_core.api_export import keras_core_export
from keras_core.utils.module_utils import tensorflow as tf
@keras_core_export("keras_core.utils.set_random_seed")
def set_random_seed(seed):
"""Sets all random seeds (Python, NumPy, and backend framework, e.g. TF).
You can use this utility to make almost any Keras program fully
deterministic. Some limitations apply in cases where network communications
are involved (e.g. parameter server distribution), which creates additional
sources of randomness, or when certain non-deterministic cuDNN ops are
involved.
Calling this utility is equivalent to the following:
```python
import random
import numpy as np
from keras_core.utils.module_utils import tensorflow as tf
random.seed(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
```
Note that the TensorFlow seed is set even if you're not using TensorFlow
as your backend framework, since many workflows leverage `tf.data`
pipelines (which feature random shuffling). Likewise many workflows
might leverage NumPy APIs.
Arguments:
seed: Integer, the random seed to use.
"""
if not isinstance(seed, int):
raise ValueError(
"Expected `seed` argument to be an integer. "
f"Received: seed={seed} (of type {type(seed)})"
)
random.seed(seed)
np.random.seed(seed)
if tf.available:
tf.random.set_seed(seed)
if backend.backend() == "torch":
import torch
torch.manual_seed(seed)
| keras-core/keras_core/utils/rng_utils.py/0 | {
"file_path": "keras-core/keras_core/utils/rng_utils.py",
"repo_id": "keras-core",
"token_count": 566
} | 58 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
# Setup/utils
"""
import time
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow.keras.layers as layers
from tensorflow import keras
from tensorflow.keras import backend
from keras_cv.utils import bounding_box
from keras_cv.utils import fill_utils
def single_rectangle_mask(corners, mask_shape):
"""Computes masks of rectangles
Args:
corners: tensor of rectangle coordinates with shape (batch_size, 4) in
corners format (x0, y0, x1, y1).
mask_shape: a shape tuple as (width, height) indicating the output
width and height of masks.
Returns:
boolean masks with shape (batch_size, width, height) where True values
indicate positions within rectangle coordinates.
"""
# add broadcasting axes
corners = corners[..., tf.newaxis, tf.newaxis]
# split coordinates
x0 = corners[0]
y0 = corners[1]
x1 = corners[2]
y1 = corners[3]
# repeat height and width
width, height = mask_shape
x0_rep = tf.repeat(x0, height, axis=0)
y0_rep = tf.repeat(y0, width, axis=1)
x1_rep = tf.repeat(x1, height, axis=0)
y1_rep = tf.repeat(y1, width, axis=1)
# range grid
range_row = tf.range(0, height, dtype=corners.dtype)
range_col = tf.range(0, width, dtype=corners.dtype)
range_row = range_row[:, tf.newaxis]
range_col = range_col[tf.newaxis, :]
# boolean masks
mask_x0 = tf.less_equal(x0_rep, range_col)
mask_y0 = tf.less_equal(y0_rep, range_row)
mask_x1 = tf.less(range_col, x1_rep)
mask_y1 = tf.less(range_row, y1_rep)
masks = mask_x0 & mask_y0 & mask_x1 & mask_y1
return masks
def fill_single_rectangle(
image, centers_x, centers_y, widths, heights, fill_values
):
"""Fill rectangles with fill value into images.
Args:
image: Tensor of images to fill rectangles into.
centers_x: Tensor of positions of the rectangle centers on the x-axis.
centers_y: Tensor of positions of the rectangle centers on the y-axis.
widths: Tensor of widths of the rectangles
heights: Tensor of heights of the rectangles
fill_values: Tensor with same shape as images to get rectangle fill
from.
Returns:
images with filled rectangles.
"""
images_shape = tf.shape(image)
images_height = images_shape[0]
images_width = images_shape[1]
xywh = tf.stack([centers_x, centers_y, widths, heights], axis=0)
xywh = tf.cast(xywh, tf.float32)
corners = bounding_box.convert_to_corners(xywh, format="coco")
mask_shape = (images_width, images_height)
is_rectangle = single_rectangle_mask(corners, mask_shape)
is_rectangle = tf.expand_dims(is_rectangle, -1)
images = tf.where(is_rectangle, fill_values, image)
return images
"""
# Layer Implementations
## Fully Vectorized
"""
class VectorizedRandomCutout(layers.Layer):
def __init__(
self,
height_factor,
width_factor,
fill_mode="constant",
fill_value=0.0,
seed=None,
**kwargs,
):
super().__init__(**kwargs)
self.height_lower, self.height_upper = self._parse_bounds(height_factor)
self.width_lower, self.width_upper = self._parse_bounds(width_factor)
if fill_mode not in ["gaussian_noise", "constant"]:
raise ValueError(
'`fill_mode` should be "gaussian_noise" '
f'or "constant". Got `fill_mode`={fill_mode}'
)
if not isinstance(self.height_lower, type(self.height_upper)):
raise ValueError(
"`height_factor` must have lower bound and upper bound "
"with same type, got {} and {}".format(
type(self.height_lower), type(self.height_upper)
)
)
if not isinstance(self.width_lower, type(self.width_upper)):
raise ValueError(
"`width_factor` must have lower bound and upper bound "
"with same type, got {} and {}".format(
type(self.width_lower), type(self.width_upper)
)
)
if self.height_upper < self.height_lower:
raise ValueError(
"`height_factor` cannot have upper bound less than "
"lower bound, got {}".format(height_factor)
)
self._height_is_float = isinstance(self.height_lower, float)
if self._height_is_float:
if not self.height_lower >= 0.0 or not self.height_upper <= 1.0:
raise ValueError(
"`height_factor` must have values between [0, 1] "
"when is float, got {}".format(height_factor)
)
if self.width_upper < self.width_lower:
raise ValueError(
"`width_factor` cannot have upper bound less than "
"lower bound, got {}".format(width_factor)
)
self._width_is_float = isinstance(self.width_lower, float)
if self._width_is_float:
if not self.width_lower >= 0.0 or not self.width_upper <= 1.0:
raise ValueError(
"`width_factor` must have values between [0, 1] "
"when is float, got {}".format(width_factor)
)
self.fill_mode = fill_mode
self.fill_value = fill_value
self.seed = seed
def _parse_bounds(self, factor):
if isinstance(factor, (tuple, list)):
return factor[0], factor[1]
else:
return type(factor)(0), factor
@tf.function(jit_compile=True)
def call(self, inputs, training=True):
if training is None:
training = backend.learning_phase()
augment = lambda: self._random_cutout(inputs)
no_augment = lambda: inputs
return tf.cond(tf.cast(training, tf.bool), augment, no_augment)
def _random_cutout(self, inputs):
"""Apply random cutout."""
center_x, center_y = self._compute_rectangle_position(inputs)
rectangle_height, rectangle_width = self._compute_rectangle_size(inputs)
rectangle_fill = self._compute_rectangle_fill(inputs)
inputs = fill_utils.fill_rectangle(
inputs,
center_x,
center_y,
rectangle_width,
rectangle_height,
rectangle_fill,
)
return inputs
def _compute_rectangle_position(self, inputs):
input_shape = tf.shape(inputs)
batch_size, image_height, image_width = (
input_shape[0],
input_shape[1],
input_shape[2],
)
center_x = tf.random.uniform(
shape=[batch_size],
minval=0,
maxval=image_width,
dtype=tf.int32,
seed=self.seed,
)
center_y = tf.random.uniform(
shape=[batch_size],
minval=0,
maxval=image_height,
dtype=tf.int32,
seed=self.seed,
)
return center_x, center_y
def _compute_rectangle_size(self, inputs):
input_shape = tf.shape(inputs)
batch_size, image_height, image_width = (
input_shape[0],
input_shape[1],
input_shape[2],
)
height = tf.random.uniform(
[batch_size],
minval=self.height_lower,
maxval=self.height_upper,
dtype=tf.float32,
)
width = tf.random.uniform(
[batch_size],
minval=self.width_lower,
maxval=self.width_upper,
dtype=tf.float32,
)
if self._height_is_float:
height = height * tf.cast(image_height, tf.float32)
if self._width_is_float:
width = width * tf.cast(image_width, tf.float32)
height = tf.cast(tf.math.ceil(height), tf.int32)
width = tf.cast(tf.math.ceil(width), tf.int32)
height = tf.minimum(height, image_height)
width = tf.minimum(width, image_width)
return height, width
def _compute_rectangle_fill(self, inputs):
input_shape = tf.shape(inputs)
if self.fill_mode == "constant":
fill_value = tf.fill(input_shape, self.fill_value)
else:
# gaussian noise
fill_value = tf.random.normal(input_shape)
return fill_value
def get_config(self):
config = {
"height_factor": self.height_factor,
"width_factor": self.width_factor,
"fill_mode": self.fill_mode,
"fill_value": self.fill_value,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
"""
## tf.map_fn
"""
class MapFnRandomCutout(layers.Layer):
def __init__(
self,
height_factor,
width_factor,
fill_mode="constant",
fill_value=0.0,
seed=None,
**kwargs,
):
super().__init__(**kwargs)
self.height_lower, self.height_upper = self._parse_bounds(height_factor)
self.width_lower, self.width_upper = self._parse_bounds(width_factor)
if fill_mode not in ["gaussian_noise", "constant"]:
raise ValueError(
'`fill_mode` should be "gaussian_noise" '
f'or "constant". Got `fill_mode`={fill_mode}'
)
if not isinstance(self.height_lower, type(self.height_upper)):
raise ValueError(
"`height_factor` must have lower bound and upper bound "
"with same type, got {} and {}".format(
type(self.height_lower), type(self.height_upper)
)
)
if not isinstance(self.width_lower, type(self.width_upper)):
raise ValueError(
"`width_factor` must have lower bound and upper bound "
"with same type, got {} and {}".format(
type(self.width_lower), type(self.width_upper)
)
)
if self.height_upper < self.height_lower:
raise ValueError(
"`height_factor` cannot have upper bound less than "
"lower bound, got {}".format(height_factor)
)
self._height_is_float = isinstance(self.height_lower, float)
if self._height_is_float:
if not self.height_lower >= 0.0 or not self.height_upper <= 1.0:
raise ValueError(
"`height_factor` must have values between [0, 1] "
"when is float, got {}".format(height_factor)
)
if self.width_upper < self.width_lower:
raise ValueError(
"`width_factor` cannot have upper bound less than "
"lower bound, got {}".format(width_factor)
)
self._width_is_float = isinstance(self.width_lower, float)
if self._width_is_float:
if not self.width_lower >= 0.0 or not self.width_upper <= 1.0:
raise ValueError(
"`width_factor` must have values between [0, 1] "
"when is float, got {}".format(width_factor)
)
self.fill_mode = fill_mode
self.fill_value = fill_value
self.seed = seed
def _parse_bounds(self, factor):
if isinstance(factor, (tuple, list)):
return factor[0], factor[1]
else:
return type(factor)(0), factor
@tf.function(jit_compile=True)
def call(self, inputs, training=True):
augment = lambda: tf.map_fn(self._random_cutout, inputs)
no_augment = lambda: inputs
return tf.cond(tf.cast(training, tf.bool), augment, no_augment)
def _random_cutout(self, input):
center_x, center_y = self._compute_rectangle_position(input)
rectangle_height, rectangle_width = self._compute_rectangle_size(input)
rectangle_fill = self._compute_rectangle_fill(input)
input = fill_single_rectangle(
input,
center_x,
center_y,
rectangle_width,
rectangle_height,
rectangle_fill,
)
return input
def _compute_rectangle_position(self, inputs):
input_shape = tf.shape(inputs)
image_height, image_width = (
input_shape[0],
input_shape[1],
)
center_x = tf.random.uniform(
shape=[],
minval=0,
maxval=image_width,
dtype=tf.int32,
seed=self.seed,
)
center_y = tf.random.uniform(
shape=[],
minval=0,
maxval=image_height,
dtype=tf.int32,
seed=self.seed,
)
return center_x, center_y
def _compute_rectangle_size(self, inputs):
input_shape = tf.shape(inputs)
image_height, image_width = (
input_shape[0],
input_shape[1],
)
height = tf.random.uniform(
[],
minval=self.height_lower,
maxval=self.height_upper,
dtype=tf.float32,
)
width = tf.random.uniform(
[],
minval=self.width_lower,
maxval=self.width_upper,
dtype=tf.float32,
)
if self._height_is_float:
height = height * tf.cast(image_height, tf.float32)
if self._width_is_float:
width = width * tf.cast(image_width, tf.float32)
height = tf.cast(tf.math.ceil(height), tf.int32)
width = tf.cast(tf.math.ceil(width), tf.int32)
height = tf.minimum(height, image_height)
width = tf.minimum(width, image_width)
return height, width
def _compute_rectangle_fill(self, inputs):
input_shape = tf.shape(inputs)
if self.fill_mode == "constant":
fill_value = tf.fill(input_shape, self.fill_value)
else:
# gaussian noise
fill_value = tf.random.normal(input_shape)
return fill_value
def get_config(self):
config = {
"height_factor": self.height_factor,
"width_factor": self.width_factor,
"fill_mode": self.fill_mode,
"fill_value": self.fill_value,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
"""
## tf.vectorized_map
"""
class VMapRandomCutout(layers.Layer):
def __init__(
self,
height_factor,
width_factor,
fill_mode="constant",
fill_value=0.0,
seed=None,
**kwargs,
):
super().__init__(**kwargs)
self.height_lower, self.height_upper = self._parse_bounds(height_factor)
self.width_lower, self.width_upper = self._parse_bounds(width_factor)
if fill_mode not in ["gaussian_noise", "constant"]:
raise ValueError(
'`fill_mode` should be "gaussian_noise" '
f'or "constant". Got `fill_mode`={fill_mode}'
)
if not isinstance(self.height_lower, type(self.height_upper)):
raise ValueError(
"`height_factor` must have lower bound and upper bound "
"with same type, got {} and {}".format(
type(self.height_lower), type(self.height_upper)
)
)
if not isinstance(self.width_lower, type(self.width_upper)):
raise ValueError(
"`width_factor` must have lower bound and upper bound "
"with same type, got {} and {}".format(
type(self.width_lower), type(self.width_upper)
)
)
if self.height_upper < self.height_lower:
raise ValueError(
"`height_factor` cannot have upper bound less than "
"lower bound, got {}".format(height_factor)
)
self._height_is_float = isinstance(self.height_lower, float)
if self._height_is_float:
if not self.height_lower >= 0.0 or not self.height_upper <= 1.0:
raise ValueError(
"`height_factor` must have values between [0, 1] "
"when is float, got {}".format(height_factor)
)
if self.width_upper < self.width_lower:
raise ValueError(
"`width_factor` cannot have upper bound less than "
"lower bound, got {}".format(width_factor)
)
self._width_is_float = isinstance(self.width_lower, float)
if self._width_is_float:
if not self.width_lower >= 0.0 or not self.width_upper <= 1.0:
raise ValueError(
"`width_factor` must have values between [0, 1] "
"when is float, got {}".format(width_factor)
)
self.fill_mode = fill_mode
self.fill_value = fill_value
self.seed = seed
def _parse_bounds(self, factor):
if isinstance(factor, (tuple, list)):
return factor[0], factor[1]
else:
return type(factor)(0), factor
@tf.function(jit_compile=True)
def call(self, inputs, training=True):
augment = lambda: tf.vectorized_map(self._random_cutout, inputs)
no_augment = lambda: inputs
return tf.cond(tf.cast(training, tf.bool), augment, no_augment)
def _random_cutout(self, input):
center_x, center_y = self._compute_rectangle_position(input)
rectangle_height, rectangle_width = self._compute_rectangle_size(input)
rectangle_fill = self._compute_rectangle_fill(input)
input = fill_single_rectangle(
input,
center_x,
center_y,
rectangle_width,
rectangle_height,
rectangle_fill,
)
return input
def _compute_rectangle_position(self, inputs):
input_shape = tf.shape(inputs)
image_height, image_width = (
input_shape[0],
input_shape[1],
)
center_x = tf.random.uniform(
shape=[],
minval=0,
maxval=image_width,
dtype=tf.int32,
seed=self.seed,
)
center_y = tf.random.uniform(
shape=[],
minval=0,
maxval=image_height,
dtype=tf.int32,
seed=self.seed,
)
return center_x, center_y
def _compute_rectangle_size(self, inputs):
input_shape = tf.shape(inputs)
image_height, image_width = (
input_shape[0],
input_shape[1],
)
height = tf.random.uniform(
[],
minval=self.height_lower,
maxval=self.height_upper,
dtype=tf.float32,
)
width = tf.random.uniform(
[],
minval=self.width_lower,
maxval=self.width_upper,
dtype=tf.float32,
)
if self._height_is_float:
height = height * tf.cast(image_height, tf.float32)
if self._width_is_float:
width = width * tf.cast(image_width, tf.float32)
height = tf.cast(tf.math.ceil(height), tf.int32)
width = tf.cast(tf.math.ceil(width), tf.int32)
height = tf.minimum(height, image_height)
width = tf.minimum(width, image_width)
return height, width
def _compute_rectangle_fill(self, inputs):
input_shape = tf.shape(inputs)
if self.fill_mode == "constant":
fill_value = tf.fill(input_shape, self.fill_value)
else:
# gaussian noise
fill_value = tf.random.normal(input_shape)
return fill_value
def get_config(self):
config = {
"height_factor": self.height_factor,
"width_factor": self.width_factor,
"fill_mode": self.fill_mode,
"fill_value": self.fill_value,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
"""
JIT COMPILED
# Layer Implementations
## Fully Vectorized
"""
class JITVectorizedRandomCutout(layers.Layer):
def __init__(
self,
height_factor,
width_factor,
fill_mode="constant",
fill_value=0.0,
seed=None,
**kwargs,
):
super().__init__(**kwargs)
self.height_lower, self.height_upper = self._parse_bounds(height_factor)
self.width_lower, self.width_upper = self._parse_bounds(width_factor)
if fill_mode not in ["gaussian_noise", "constant"]:
raise ValueError(
'`fill_mode` should be "gaussian_noise" '
f'or "constant". Got `fill_mode`={fill_mode}'
)
if not isinstance(self.height_lower, type(self.height_upper)):
raise ValueError(
"`height_factor` must have lower bound and upper bound "
"with same type, got {} and {}".format(
type(self.height_lower), type(self.height_upper)
)
)
if not isinstance(self.width_lower, type(self.width_upper)):
raise ValueError(
"`width_factor` must have lower bound and upper bound "
"with same type, got {} and {}".format(
type(self.width_lower), type(self.width_upper)
)
)
if self.height_upper < self.height_lower:
raise ValueError(
"`height_factor` cannot have upper bound less than "
"lower bound, got {}".format(height_factor)
)
self._height_is_float = isinstance(self.height_lower, float)
if self._height_is_float:
if not self.height_lower >= 0.0 or not self.height_upper <= 1.0:
raise ValueError(
"`height_factor` must have values between [0, 1] "
"when is float, got {}".format(height_factor)
)
if self.width_upper < self.width_lower:
raise ValueError(
"`width_factor` cannot have upper bound less than "
"lower bound, got {}".format(width_factor)
)
self._width_is_float = isinstance(self.width_lower, float)
if self._width_is_float:
if not self.width_lower >= 0.0 or not self.width_upper <= 1.0:
raise ValueError(
"`width_factor` must have values between [0, 1] "
"when is float, got {}".format(width_factor)
)
self.fill_mode = fill_mode
self.fill_value = fill_value
self.seed = seed
def _parse_bounds(self, factor):
if isinstance(factor, (tuple, list)):
return factor[0], factor[1]
else:
return type(factor)(0), factor
@tf.function(jit_compile=True)
def call(self, inputs, training=True):
if training is None:
training = backend.learning_phase()
augment = lambda: self._random_cutout(inputs)
no_augment = lambda: inputs
return tf.cond(tf.cast(training, tf.bool), augment, no_augment)
def _random_cutout(self, inputs):
"""Apply random cutout."""
center_x, center_y = self._compute_rectangle_position(inputs)
rectangle_height, rectangle_width = self._compute_rectangle_size(inputs)
rectangle_fill = self._compute_rectangle_fill(inputs)
inputs = fill_utils.fill_rectangle(
inputs,
center_x,
center_y,
rectangle_width,
rectangle_height,
rectangle_fill,
)
return inputs
def _compute_rectangle_position(self, inputs):
input_shape = tf.shape(inputs)
batch_size, image_height, image_width = (
input_shape[0],
input_shape[1],
input_shape[2],
)
center_x = tf.random.uniform(
shape=[batch_size],
minval=0,
maxval=image_width,
dtype=tf.int32,
seed=self.seed,
)
center_y = tf.random.uniform(
shape=[batch_size],
minval=0,
maxval=image_height,
dtype=tf.int32,
seed=self.seed,
)
return center_x, center_y
def _compute_rectangle_size(self, inputs):
input_shape = tf.shape(inputs)
batch_size, image_height, image_width = (
input_shape[0],
input_shape[1],
input_shape[2],
)
height = tf.random.uniform(
[batch_size],
minval=self.height_lower,
maxval=self.height_upper,
dtype=tf.float32,
)
width = tf.random.uniform(
[batch_size],
minval=self.width_lower,
maxval=self.width_upper,
dtype=tf.float32,
)
if self._height_is_float:
height = height * tf.cast(image_height, tf.float32)
if self._width_is_float:
width = width * tf.cast(image_width, tf.float32)
height = tf.cast(tf.math.ceil(height), tf.int32)
width = tf.cast(tf.math.ceil(width), tf.int32)
height = tf.minimum(height, image_height)
width = tf.minimum(width, image_width)
return height, width
def _compute_rectangle_fill(self, inputs):
input_shape = tf.shape(inputs)
if self.fill_mode == "constant":
fill_value = tf.fill(input_shape, self.fill_value)
else:
# gaussian noise
fill_value = tf.random.normal(input_shape)
return fill_value
def get_config(self):
config = {
"height_factor": self.height_factor,
"width_factor": self.width_factor,
"fill_mode": self.fill_mode,
"fill_value": self.fill_value,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
"""
## tf.map_fn
"""
class JITMapFnRandomCutout(layers.Layer):
def __init__(
self,
height_factor,
width_factor,
fill_mode="constant",
fill_value=0.0,
seed=None,
**kwargs,
):
super().__init__(**kwargs)
self.height_lower, self.height_upper = self._parse_bounds(height_factor)
self.width_lower, self.width_upper = self._parse_bounds(width_factor)
if fill_mode not in ["gaussian_noise", "constant"]:
raise ValueError(
'`fill_mode` should be "gaussian_noise" '
f'or "constant". Got `fill_mode`={fill_mode}'
)
if not isinstance(self.height_lower, type(self.height_upper)):
raise ValueError(
"`height_factor` must have lower bound and upper bound "
"with same type, got {} and {}".format(
type(self.height_lower), type(self.height_upper)
)
)
if not isinstance(self.width_lower, type(self.width_upper)):
raise ValueError(
"`width_factor` must have lower bound and upper bound "
"with same type, got {} and {}".format(
type(self.width_lower), type(self.width_upper)
)
)
if self.height_upper < self.height_lower:
raise ValueError(
"`height_factor` cannot have upper bound less than "
"lower bound, got {}".format(height_factor)
)
self._height_is_float = isinstance(self.height_lower, float)
if self._height_is_float:
if not self.height_lower >= 0.0 or not self.height_upper <= 1.0:
raise ValueError(
"`height_factor` must have values between [0, 1] "
"when is float, got {}".format(height_factor)
)
if self.width_upper < self.width_lower:
raise ValueError(
"`width_factor` cannot have upper bound less than "
"lower bound, got {}".format(width_factor)
)
self._width_is_float = isinstance(self.width_lower, float)
if self._width_is_float:
if not self.width_lower >= 0.0 or not self.width_upper <= 1.0:
raise ValueError(
"`width_factor` must have values between [0, 1] "
"when is float, got {}".format(width_factor)
)
self.fill_mode = fill_mode
self.fill_value = fill_value
self.seed = seed
def _parse_bounds(self, factor):
if isinstance(factor, (tuple, list)):
return factor[0], factor[1]
else:
return type(factor)(0), factor
@tf.function(jit_compile=True)
def call(self, inputs, training=True):
augment = lambda: tf.map_fn(self._random_cutout, inputs)
no_augment = lambda: inputs
return tf.cond(tf.cast(training, tf.bool), augment, no_augment)
def _random_cutout(self, input):
center_x, center_y = self._compute_rectangle_position(input)
rectangle_height, rectangle_width = self._compute_rectangle_size(input)
rectangle_fill = self._compute_rectangle_fill(input)
input = fill_single_rectangle(
input,
center_x,
center_y,
rectangle_width,
rectangle_height,
rectangle_fill,
)
return input
def _compute_rectangle_position(self, inputs):
input_shape = tf.shape(inputs)
image_height, image_width = (
input_shape[0],
input_shape[1],
)
center_x = tf.random.uniform(
shape=[],
minval=0,
maxval=image_width,
dtype=tf.int32,
seed=self.seed,
)
center_y = tf.random.uniform(
shape=[],
minval=0,
maxval=image_height,
dtype=tf.int32,
seed=self.seed,
)
return center_x, center_y
def _compute_rectangle_size(self, inputs):
input_shape = tf.shape(inputs)
image_height, image_width = (
input_shape[0],
input_shape[1],
)
height = tf.random.uniform(
[],
minval=self.height_lower,
maxval=self.height_upper,
dtype=tf.float32,
)
width = tf.random.uniform(
[],
minval=self.width_lower,
maxval=self.width_upper,
dtype=tf.float32,
)
if self._height_is_float:
height = height * tf.cast(image_height, tf.float32)
if self._width_is_float:
width = width * tf.cast(image_width, tf.float32)
height = tf.cast(tf.math.ceil(height), tf.int32)
width = tf.cast(tf.math.ceil(width), tf.int32)
height = tf.minimum(height, image_height)
width = tf.minimum(width, image_width)
return height, width
def _compute_rectangle_fill(self, inputs):
input_shape = tf.shape(inputs)
if self.fill_mode == "constant":
fill_value = tf.fill(input_shape, self.fill_value)
else:
# gaussian noise
fill_value = tf.random.normal(input_shape)
return fill_value
def get_config(self):
config = {
"height_factor": self.height_factor,
"width_factor": self.width_factor,
"fill_mode": self.fill_mode,
"fill_value": self.fill_value,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
"""
## tf.vectorized_map
"""
class JITVMapRandomCutout(layers.Layer):
def __init__(
self,
height_factor,
width_factor,
fill_mode="constant",
fill_value=0.0,
seed=None,
**kwargs,
):
super().__init__(**kwargs)
self.height_lower, self.height_upper = self._parse_bounds(height_factor)
self.width_lower, self.width_upper = self._parse_bounds(width_factor)
if fill_mode not in ["gaussian_noise", "constant"]:
raise ValueError(
'`fill_mode` should be "gaussian_noise" '
f'or "constant". Got `fill_mode`={fill_mode}'
)
if not isinstance(self.height_lower, type(self.height_upper)):
raise ValueError(
"`height_factor` must have lower bound and upper bound "
"with same type, got {} and {}".format(
type(self.height_lower), type(self.height_upper)
)
)
if not isinstance(self.width_lower, type(self.width_upper)):
raise ValueError(
"`width_factor` must have lower bound and upper bound "
"with same type, got {} and {}".format(
type(self.width_lower), type(self.width_upper)
)
)
if self.height_upper < self.height_lower:
raise ValueError(
"`height_factor` cannot have upper bound less than "
"lower bound, got {}".format(height_factor)
)
self._height_is_float = isinstance(self.height_lower, float)
if self._height_is_float:
if not self.height_lower >= 0.0 or not self.height_upper <= 1.0:
raise ValueError(
"`height_factor` must have values between [0, 1] "
"when is float, got {}".format(height_factor)
)
if self.width_upper < self.width_lower:
raise ValueError(
"`width_factor` cannot have upper bound less than "
"lower bound, got {}".format(width_factor)
)
self._width_is_float = isinstance(self.width_lower, float)
if self._width_is_float:
if not self.width_lower >= 0.0 or not self.width_upper <= 1.0:
raise ValueError(
"`width_factor` must have values between [0, 1] "
"when is float, got {}".format(width_factor)
)
self.fill_mode = fill_mode
self.fill_value = fill_value
self.seed = seed
def _parse_bounds(self, factor):
if isinstance(factor, (tuple, list)):
return factor[0], factor[1]
else:
return type(factor)(0), factor
@tf.function(jit_compile=True)
def call(self, inputs, training=True):
augment = lambda: tf.vectorized_map(self._random_cutout, inputs)
no_augment = lambda: inputs
return tf.cond(tf.cast(training, tf.bool), augment, no_augment)
def _random_cutout(self, input):
center_x, center_y = self._compute_rectangle_position(input)
rectangle_height, rectangle_width = self._compute_rectangle_size(input)
rectangle_fill = self._compute_rectangle_fill(input)
input = fill_single_rectangle(
input,
center_x,
center_y,
rectangle_width,
rectangle_height,
rectangle_fill,
)
return input
def _compute_rectangle_position(self, inputs):
input_shape = tf.shape(inputs)
image_height, image_width = (
input_shape[0],
input_shape[1],
)
center_x = tf.random.uniform(
shape=[],
minval=0,
maxval=image_width,
dtype=tf.int32,
seed=self.seed,
)
center_y = tf.random.uniform(
shape=[],
minval=0,
maxval=image_height,
dtype=tf.int32,
seed=self.seed,
)
return center_x, center_y
def _compute_rectangle_size(self, inputs):
input_shape = tf.shape(inputs)
image_height, image_width = (
input_shape[0],
input_shape[1],
)
height = tf.random.uniform(
[],
minval=self.height_lower,
maxval=self.height_upper,
dtype=tf.float32,
)
width = tf.random.uniform(
[],
minval=self.width_lower,
maxval=self.width_upper,
dtype=tf.float32,
)
if self._height_is_float:
height = height * tf.cast(image_height, tf.float32)
if self._width_is_float:
width = width * tf.cast(image_width, tf.float32)
height = tf.cast(tf.math.ceil(height), tf.int32)
width = tf.cast(tf.math.ceil(width), tf.int32)
height = tf.minimum(height, image_height)
width = tf.minimum(width, image_width)
return height, width
def _compute_rectangle_fill(self, inputs):
input_shape = tf.shape(inputs)
if self.fill_mode == "constant":
fill_value = tf.fill(input_shape, self.fill_value)
else:
# gaussian noise
fill_value = tf.random.normal(input_shape)
return fill_value
def get_config(self):
config = {
"height_factor": self.height_factor,
"width_factor": self.width_factor,
"fill_mode": self.fill_mode,
"fill_value": self.fill_value,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
"""
# Benchmarking
"""
(x_train, _), _ = keras.datasets.cifar10.load_data()
x_train = x_train.astype(float)
x_train.shape
images = []
num_images = [1000, 2000, 5000, 10000, 25000, 37500, 50000]
results = {}
for aug in [
VectorizedRandomCutout,
VMapRandomCutout,
MapFnRandomCutout,
JITVectorizedRandomCutout,
JITVMapRandomCutout,
JITMapFnRandomCutout,
]:
c = aug.__name__
layer = aug(0.2, 0.2)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
layer(x_train[:n_images])
t0 = time.time()
r1 = layer(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.show()
"""
# Sanity check
all of these should have comparable outputs
"""
images = []
for aug in [VectorizedRandomCutout, VMapRandomCutout, MapFnRandomCutout]:
layer = aug(0.5, 0.5)
images.append(layer(x_train[:3]))
images = [y for x in images for y in x]
plt.figure(figsize=(8, 8))
for i in range(9):
plt.subplot(3, 3, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.axis("off")
plt.show()
"""
# Extra notes
## Warnings
it would be really annoying as a user to use an official keras_cv component and
get warned that "RandomUniform" or "RandomUniformInt" inside pfor may not get
the same output.
"""
| keras-cv/benchmarks/vectorization_strategy_benchmark.py/0 | {
"file_path": "keras-cv/benchmarks/vectorization_strategy_benchmark.py",
"repo_id": "keras-cv",
"token_count": 19626
} | 59 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from keras import backend
from tensorflow import keras
from keras_cv.layers import RandomTranslation
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing as preprocessing_utils
H_AXIS = -3
W_AXIS = -2
def check_fill_mode_and_interpolation(fill_mode, interpolation):
if fill_mode not in {"reflect", "wrap", "constant", "nearest"}:
raise NotImplementedError(
f"Unknown `fill_mode` {fill_mode}. Only `reflect`, `wrap`, "
"`constant` and `nearest` are supported."
)
if interpolation not in {"nearest", "bilinear"}:
raise NotImplementedError(
f"Unknown `interpolation` {interpolation}. Only `nearest` and "
"`bilinear` are supported."
)
def get_translation_matrix(translations, name=None):
"""Returns projective transform(s) for the given translation(s).
Args:
translations: A matrix of 2-element lists representing `[dx, dy]`
to translate for each image (for a batch of images).
name: The name of the op.
Returns:
A tensor of shape `(num_images, 8)` projective transforms which can be
given to `transform`.
"""
with backend.name_scope(name or "translation_matrix"):
num_translations = tf.shape(translations)[0]
# The translation matrix looks like:
# [[1 0 -dx]
# [0 1 -dy]
# [0 0 1]]
# where the last entry is implicit.
# Translation matrices are always float32.
return tf.concat(
values=[
tf.ones((num_translations, 1), tf.float32),
tf.zeros((num_translations, 1), tf.float32),
-translations[:, 0, None],
tf.zeros((num_translations, 1), tf.float32),
tf.ones((num_translations, 1), tf.float32),
-translations[:, 1, None],
tf.zeros((num_translations, 2), tf.float32),
],
axis=1,
)
class OldRandomTranslation(BaseImageAugmentationLayer):
"""A preprocessing layer which randomly translates images during training.
This layer will apply random translations to each image during training,
filling empty space according to `fill_mode`.
Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
of integer or floating point dtype. By default, the layer will output
floats.
Args:
height_factor: a float represented as fraction of value, or a tuple of
size 2 representing lower and upper bound for shifting vertically. A
negative value means shifting image up, while a positive value means
shifting image down. When represented as a single positive float, this
value is used for both the upper and lower bound. For instance,
`height_factor=(-0.2, 0.3)` results in an output shifted by a random
amount in the range `[-20%, +30%]`. `height_factor=0.2` results in an
output height shifted by a random amount in the range `[-20%, +20%]`.
width_factor: a float represented as fraction of value, or a tuple of size
2 representing lower and upper bound for shifting horizontally. A
negative value means shifting image left, while a positive value means
shifting image right. When represented as a single positive float,
this value is used for both the upper and lower bound. For instance,
`width_factor=(-0.2, 0.3)` results in an output shifted left by 20%,
and shifted right by 30%. `width_factor=0.2` results
in an output height shifted left or right by 20%.
fill_mode: Points outside the boundaries of the input are filled according
to the given mode
(one of `{"constant", "reflect", "wrap", "nearest"}`).
- *reflect*: `(d c b a | a b c d | d c b a)` The input is extended by
reflecting about the edge of the last pixel.
- *constant*: `(k k k k | a b c d | k k k k)` The input is extended by
filling all values beyond the edge with the same constant value
k = 0.
- *wrap*: `(a b c d | a b c d | a b c d)` The input is extended by
wrapping around to the opposite edge.
- *nearest*: `(a a a a | a b c d | d d d d)` The input is extended by
the nearest pixel.
interpolation: Interpolation mode. Supported values: `"nearest"`,
`"bilinear"`.
seed: Integer. Used to create a random seed.
fill_value: a float represents the value to be filled outside the
boundaries when `fill_mode="constant"`.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
"""
def __init__(
self,
height_factor,
width_factor,
fill_mode="reflect",
interpolation="bilinear",
seed=None,
fill_value=0.0,
**kwargs,
):
super().__init__(seed=seed, **kwargs)
self.height_factor = height_factor
if isinstance(height_factor, (tuple, list)):
self.height_lower = height_factor[0]
self.height_upper = height_factor[1]
else:
self.height_lower = -height_factor
self.height_upper = height_factor
if self.height_upper < self.height_lower:
raise ValueError(
"`height_factor` cannot have upper bound less than "
f"lower bound, got {height_factor}"
)
if abs(self.height_lower) > 1.0 or abs(self.height_upper) > 1.0:
raise ValueError(
"`height_factor` must have values between [-1, 1], "
f"got {height_factor}"
)
self.width_factor = width_factor
if isinstance(width_factor, (tuple, list)):
self.width_lower = width_factor[0]
self.width_upper = width_factor[1]
else:
self.width_lower = -width_factor
self.width_upper = width_factor
if self.width_upper < self.width_lower:
raise ValueError(
"`width_factor` cannot have upper bound less than "
f"lower bound, got {width_factor}"
)
if abs(self.width_lower) > 1.0 or abs(self.width_upper) > 1.0:
raise ValueError(
"`width_factor` must have values between [-1, 1], "
f"got {width_factor}"
)
check_fill_mode_and_interpolation(fill_mode, interpolation)
self.fill_mode = fill_mode
self.fill_value = fill_value
self.interpolation = interpolation
self.seed = seed
def augment_image(self, image, transformation, **kwargs):
"""Translated inputs with random ops."""
# The transform op only accepts rank 4 inputs, so if we have an
# unbatched image, we need to temporarily expand dims to a batch.
original_shape = image.shape
inputs = tf.expand_dims(image, 0)
inputs_shape = tf.shape(inputs)
img_hd = tf.cast(inputs_shape[H_AXIS], tf.float32)
img_wd = tf.cast(inputs_shape[W_AXIS], tf.float32)
height_translation = transformation["height_translation"]
width_translation = transformation["width_translation"]
height_translation = height_translation * img_hd
width_translation = width_translation * img_wd
translations = tf.cast(
tf.concat([width_translation, height_translation], axis=1),
dtype=tf.float32,
)
output = preprocessing_utils.transform(
inputs,
get_translation_matrix(translations),
interpolation=self.interpolation,
fill_mode=self.fill_mode,
fill_value=self.fill_value,
)
output = tf.squeeze(output, 0)
output.set_shape(original_shape)
return output
def get_random_transformation(self, image=None, **kwargs):
batch_size = 1
height_translation = self._random_generator.uniform(
shape=[batch_size, 1],
minval=self.height_lower,
maxval=self.height_upper,
dtype=tf.float32,
)
width_translation = self._random_generator.uniform(
shape=[batch_size, 1],
minval=self.width_lower,
maxval=self.width_upper,
dtype=tf.float32,
)
return {
"height_translation": height_translation,
"width_translation": width_translation,
}
def _batch_augment(self, inputs):
# Change to vectorized_map for better performance, as well as work
# around issue for different tensorspec between inputs and outputs.
return tf.vectorized_map(self._augment, inputs)
def augment_label(self, label, transformation, **kwargs):
return label
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
"height_factor": self.height_factor,
"width_factor": self.width_factor,
"fill_mode": self.fill_mode,
"fill_value": self.fill_value,
"interpolation": self.interpolation,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
class RandomTranslationTest(tf.test.TestCase):
def test_consistency_with_old_impl(self):
image_shape = (16, 32, 32, 3)
fixed_height_factor = (0.5, 0.5)
fixed_width_factor = (0.5, 0.5)
image = tf.random.uniform(shape=image_shape) * 255.0
layer = RandomTranslation(fixed_height_factor, fixed_width_factor)
old_layer = OldRandomTranslation(
fixed_height_factor, fixed_width_factor
)
output = layer(image)
old_output = old_layer(image)
self.assertAllClose(old_output, output)
if __name__ == "__main__":
# Run benchmark
(x_train, _), _ = keras.datasets.cifar10.load_data()
x_train = x_train.astype(np.float32)
num_images = [100, 200, 500, 1000]
results = {}
aug_candidates = [RandomTranslation, OldRandomTranslation]
aug_args = {"height_factor": 0.5, "width_factor": 0.5}
for aug in aug_candidates:
# Eager Mode
c = aug.__name__
layer = aug(**aug_args)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
layer(x_train[:n_images])
t0 = time.time()
r1 = layer(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# Graph Mode
c = aug.__name__ + " Graph Mode"
layer = aug(**aug_args)
@tf.function()
def apply_aug(inputs):
return layer(inputs)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
apply_aug(x_train[:n_images])
t0 = time.time()
r1 = apply_aug(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# XLA Mode
# cannot run tf.raw_ops.ImageProjectiveTransformV3 on XLA
# for more information please refer:
# https://github.com/tensorflow/tensorflow/issues/55194
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison.png")
# So we can actually see more relevant margins
del results[aug_candidates[1].__name__]
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison_no_old_eager.png")
# Run unit tests
tf.test.main()
| keras-cv/benchmarks/vectorized_random_translation.py/0 | {
"file_path": "keras-cv/benchmarks/vectorized_random_translation.py",
"repo_id": "keras-cv",
"token_count": 5646
} | 60 |
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""random_zoom_demo.py.py shows how to use the RandomZoom
preprocessing layer. Operates on an image of elephant. In this script the image
is loaded, then are passed through the preprocessing layers.
Finally, they are shown using matplotlib.
"""
import demo_utils
from keras_cv.layers.preprocessing import RandomZoom
def main():
many_elephants = demo_utils.load_elephant_tensor(output_size=(300, 300))
layer = RandomZoom(0.5)
augmented = layer(many_elephants)
demo_utils.gallery_show(augmented.numpy())
if __name__ == "__main__":
main()
| keras-cv/examples/layers/preprocessing/classification/random_zoom_demo.py/0 | {
"file_path": "keras-cv/examples/layers/preprocessing/classification/random_zoom_demo.py",
"repo_id": "keras-cv",
"token_count": 312
} | 61 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Title: Training a KerasCV model for Imagenet Classification
Author: [Ian Stenbit](https://github.com/ianstenbit)
Date created: 2022/07/25
Last modified: 2022/07/25
Description: Use KerasCV to train an image classifier using modern best
practices
"""
import math
import sys
import tensorflow as tf
from absl import flags
import keras_cv
from keras_cv import models
from keras_cv.backend import keras
from keras_cv.datasets import imagenet
"""
## Overview
KerasCV makes training state-of-the-art classification models easy by providing
implementations of modern models, preprocessing techniques, and layers.
In this tutorial, we walk through training a model against the Imagenet dataset
using Keras and KerasCV.
This tutorial requires you to have KerasCV installed:
```shell
pip install keras-cv
```
Note that this depends on TF>=2.11
"""
"""
## Setup, constants and flags
"""
flags.DEFINE_string(
"model_name", None, "The name of the model in KerasCV.models to use."
)
flags.DEFINE_string(
"imagenet_path", None, "Directory from which to load Imagenet."
)
flags.DEFINE_string(
"weights_path",
None,
"Directory which will be used to store weight checkpoints.",
)
flags.DEFINE_string(
"tensorboard_path",
None,
"Directory which will be used to store tensorboard logs.",
)
flags.DEFINE_integer(
"batch_size",
128,
"Batch size for training and evaluation. This will be multiplied by the "
"number of accelerators in use.",
)
flags.DEFINE_boolean(
"use_xla", True, "whether to use XLA (jit_compile) for training."
)
flags.DEFINE_boolean(
"use_mixed_precision",
False,
"whether to use FP16 mixed precision for training.",
)
flags.DEFINE_boolean(
"use_ema",
True,
"whether to use exponential moving average weight updating",
)
flags.DEFINE_float(
"initial_learning_rate",
0.05,
"Initial learning rate which will reduce on plateau. This will be "
"multiplied by the number of accelerators in use",
)
flags.DEFINE_string(
"model_kwargs",
"{}",
"Keyword argument dictionary to pass to the constructor of the model being "
"trained",
)
flags.DEFINE_string(
"learning_rate_schedule",
"ReduceOnPlateau",
"String denoting the type of learning rate schedule to be used",
)
flags.DEFINE_float(
"warmup_steps_percentage",
0.1,
"For how many steps expressed in percentage (0..1 float) of total steps "
"should the schedule warm up if we're using the warmup schedule",
)
flags.DEFINE_float(
"warmup_hold_steps_percentage",
0.45,
"For how many steps expressed in percentage (0..1 float) of total steps "
"should the schedule hold the initial learning rate after warmup is "
"finished, and before applying cosine decay.",
)
flags.DEFINE_float(
"weight_decay",
5e-4,
"Weight decay parameter for the optimizer",
)
# An upper bound for number of epochs (this script uses EarlyStopping).
flags.DEFINE_integer("epochs", 1000, "Epochs to train for")
FLAGS = flags.FLAGS
FLAGS(sys.argv)
NUM_CLASSES = 1000
IMAGE_SIZE = (224, 224)
REDUCE_ON_PLATEAU = "ReduceOnPlateau"
COSINE_DECAY_WITH_WARMUP = "CosineDecayWithWarmup"
if FLAGS.model_name not in models.__dict__:
raise ValueError(f"Invalid model name: {FLAGS.model_name}")
if FLAGS.use_mixed_precision:
keras.mixed_precision.set_global_policy("mixed_float16")
"""
We start by detecting the type of accelerators we have available and picking an
appropriate distribution strategy accordingly. We scale our learning rate and
batch size based on the number of accelerators being used.
"""
# Try to detect an available TPU. If none is present, defaults to
# MirroredStrategy
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver.connect()
strategy = tf.distribute.TPUStrategy(tpu)
if FLAGS.use_mixed_precision:
keras.mixed_precision.set_global_policy("mixed_bfloat16")
except ValueError:
# MirroredStrategy is best for a single machine with one or multiple GPUs
strategy = tf.distribute.MirroredStrategy()
print("Number of accelerators: ", strategy.num_replicas_in_sync)
BATCH_SIZE = FLAGS.batch_size * strategy.num_replicas_in_sync
INITIAL_LEARNING_RATE = (
FLAGS.initial_learning_rate * strategy.num_replicas_in_sync
)
"""TFRecord-based tf.data.Dataset loads lazily so we can't get the length of
the dataset. Temporary."""
NUM_IMAGES = 1281167
"""
## Data loading
This guide uses the
[Imagenet dataset](https://www.tensorflow.org/datasets/catalog/imagenet2012).
Note that this requires manual download and preprocessing. You can find more
information about preparing this dataset at keras_cv/datasets/imagenet/README.md
"""
train_ds = imagenet.load(
split="train",
tfrecord_path=FLAGS.imagenet_path,
shuffle_buffer=BATCH_SIZE * 8,
reshuffle_each_iteration=True,
)
test_ds = imagenet.load(
split="validation",
tfrecord_path=FLAGS.imagenet_path,
batch_size=BATCH_SIZE,
img_size=IMAGE_SIZE,
)
"""
Next, we augment our dataset.
We define a set of augmentation layers and then apply them to our input dataset.
"""
random_crop_and_resize = keras_cv.layers.RandomCropAndResize(
target_size=IMAGE_SIZE,
crop_area_factor=(0.8, 1),
aspect_ratio_factor=(3 / 4, 4 / 3),
)
@tf.function
def crop_and_resize(img, label):
inputs = {"images": img, "labels": label}
inputs = random_crop_and_resize(inputs)
return inputs["images"], inputs["labels"]
AUGMENT_LAYERS = [
keras_cv.layers.RandomFlip(mode="horizontal"),
keras_cv.layers.RandAugment(value_range=(0, 255), magnitude=0.3),
]
@tf.function
def augment(img, label):
inputs = {"images": img, "labels": label}
for layer in AUGMENT_LAYERS:
inputs = layer(inputs)
if tf.random.uniform(()) > 0.5:
inputs = keras_cv.layers.CutMix()(inputs)
else:
inputs = keras_cv.layers.MixUp()(inputs)
return inputs["images"], inputs["labels"]
train_ds = (
train_ds.map(crop_and_resize, num_parallel_calls=tf.data.AUTOTUNE)
.batch(BATCH_SIZE)
.map(augment, num_parallel_calls=tf.data.AUTOTUNE)
.prefetch(tf.data.AUTOTUNE)
)
test_ds = test_ds.prefetch(tf.data.AUTOTUNE)
"""
Now we can begin training our model. We begin by loading a model from KerasCV.
"""
with strategy.scope():
backbone = models.__dict__[FLAGS.model_name]
model = models.ImageClassifier(
backbone=backbone(input_shape=IMAGE_SIZE + (3,)),
num_classes=NUM_CLASSES,
activation="softmax",
**eval(FLAGS.model_kwargs),
)
"""
Optional LR schedule with cosine decay instead of ReduceLROnPlateau
TODO: Replace with Core Keras LRWarmup when it's released. This is a temporary
solution.
Convenience method for calculating LR at given timestep, for the
WarmUpCosineDecay class.
"""
def lr_warmup_cosine_decay(
global_step,
warmup_steps,
hold=0,
total_steps=0,
start_lr=0.0,
target_lr=1e-2,
):
# Cosine decay
learning_rate = (
0.5
* target_lr
* (
1
+ tf.cos(
tf.constant(math.pi)
* tf.cast(global_step - warmup_steps - hold, tf.float32)
/ float(total_steps - warmup_steps - hold)
)
)
)
warmup_lr = tf.cast(target_lr * (global_step / warmup_steps), tf.float32)
target_lr = tf.cast(target_lr, tf.float32)
if hold > 0:
learning_rate = tf.where(
global_step > warmup_steps + hold, learning_rate, target_lr
)
learning_rate = tf.where(
global_step < warmup_steps, warmup_lr, learning_rate
)
return learning_rate
"""
LearningRateSchedule implementing the learning rate warmup with cosine decay
strategy. Learning rate warmup should help with initial training instability,
while the decay strategy may be variable, cosine being a popular choice.
The schedule will start from 0.0 (or supplied start_lr) and gradually "warm up"
linearly to the target_lr. From there, it will apply a cosine decay to the
learning rate, after an optional holding period.
args:
- [float] start_lr: default 0.0, the starting learning rate at the beginning
of training from which the warmup starts
- [float] target_lr: default 1e-2, the target (initial) learning rate from
which you'd usually start without a LR warmup schedule
- [int] warmup_steps: number of training steps to warm up for expressed in
batches
- [int] total_steps: the total steps (epochs * number of batches per epoch)
in the dataset
- [int] hold: optional argument to hold the target_lr before applying cosine
decay on it
"""
class WarmUpCosineDecay(keras.optimizers.schedules.LearningRateSchedule):
def __init__(
self, warmup_steps, total_steps, hold, start_lr=0.0, target_lr=1e-2
):
super().__init__()
self.start_lr = start_lr
self.target_lr = target_lr
self.warmup_steps = warmup_steps
self.total_steps = total_steps
self.hold = hold
def __call__(self, step):
lr = lr_warmup_cosine_decay(
global_step=step,
total_steps=self.total_steps,
warmup_steps=self.warmup_steps,
start_lr=self.start_lr,
target_lr=self.target_lr,
hold=self.hold,
)
return tf.where(step > self.total_steps, 0.0, lr, name="learning_rate")
total_steps = (NUM_IMAGES // BATCH_SIZE) * FLAGS.epochs
warmup_steps = int(FLAGS.warmup_steps_percentage * total_steps)
hold_steps = int(FLAGS.warmup_hold_steps_percentage * total_steps)
schedule = WarmUpCosineDecay(
start_lr=0.0,
target_lr=INITIAL_LEARNING_RATE,
warmup_steps=warmup_steps,
total_steps=total_steps,
hold=hold_steps,
)
"""
Next, we pick an optimizer. Here we use SGD.
Note that learning rate will decrease over time due to the ReduceLROnPlateau
callback or with the LRWarmup scheduler.
"""
with strategy.scope():
if FLAGS.learning_rate_schedule == COSINE_DECAY_WITH_WARMUP:
optimizer = keras.optimizers.SGD(
weight_decay=FLAGS.weight_decay,
learning_rate=schedule,
momentum=0.9,
use_ema=FLAGS.use_ema,
)
else:
optimizer = keras.optimizers.SGD(
weight_decay=FLAGS.weight_decay,
learning_rate=INITIAL_LEARNING_RATE,
momentum=0.9,
global_clipnorm=10,
use_ema=FLAGS.use_ema,
)
"""
Next, we pick a loss function. We use CategoricalCrossentropy with label
smoothing.
"""
loss_fn = keras.losses.CategoricalCrossentropy(label_smoothing=0.1)
"""
Next, we specify the metrics that we want to track. For this example, we track
accuracy.
"""
with strategy.scope():
training_metrics = [
keras.metrics.CategoricalAccuracy(),
keras.metrics.TopKCategoricalAccuracy(k=5),
]
"""
As a last piece of configuration, we configure callbacks for the method.
We use EarlyStopping, BackupAndRestore, and a model checkpointing callback.
"""
model_callbacks = [
keras.callbacks.EarlyStopping(patience=20),
keras.callbacks.ModelCheckpoint(
FLAGS.weights_path, save_weights_only=True, save_best_only=True
),
keras.callbacks.TensorBoard(
log_dir=FLAGS.tensorboard_path, write_steps_per_second=True
),
]
if FLAGS.learning_rate_schedule == REDUCE_ON_PLATEAU:
model_callbacks.append(
keras.callbacks.ReduceLROnPlateau(
monitor="val_loss",
factor=0.1,
patience=10,
min_delta=0.001,
min_lr=0.0001,
)
)
"""
We can now compile the model and fit it to the training dataset.
"""
model.compile(
optimizer=optimizer,
loss=loss_fn,
metrics=training_metrics,
jit_compile=FLAGS.use_xla,
)
model.fit(
train_ds,
batch_size=BATCH_SIZE,
epochs=FLAGS.epochs,
callbacks=model_callbacks,
validation_data=test_ds,
)
| keras-cv/examples/training/classification/imagenet/basic_training.py/0 | {
"file_path": "keras-cv/examples/training/classification/imagenet/basic_training.py",
"repo_id": "keras-cv",
"token_count": 4844
} | 62 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import types
from keras_cv.backend import config
_KERAS_CORE_ALIASES = {
"utils->saving": [
"register_keras_serializable",
"deserialize_keras_object",
"serialize_keras_object",
"get_registered_object",
],
"models->saving": ["load_model"],
}
if config.keras_3():
import keras # noqa: F403, F401
from keras import * # noqa: F403, F401
keras.backend.name_scope = keras.name_scope
else:
from tensorflow import keras # noqa: F403, F401
from tensorflow.keras import * # noqa: F403, F401
if not hasattr(keras, "saving"):
keras.saving = types.SimpleNamespace()
# add aliases
for key, value in _KERAS_CORE_ALIASES.items():
src, _, dst = key.partition("->")
src = src.split(".")
dst = dst.split(".")
src_mod, dst_mod = keras, keras
# navigate to where we want to alias the attributes
for mod in src:
src_mod = getattr(src_mod, mod)
for mod in dst:
dst_mod = getattr(dst_mod, mod)
# add an alias for each attribute
for attr in value:
if isinstance(attr, tuple):
src_attr, dst_attr = attr
else:
src_attr, dst_attr = attr, attr
attr_val = getattr(src_mod, src_attr)
setattr(dst_mod, dst_attr, attr_val)
# TF Keras doesn't have this rename.
keras.activations.silu = keras.activations.swish
| keras-cv/keras_cv/backend/keras.py/0 | {
"file_path": "keras-cv/keras_cv/backend/keras.py",
"repo_id": "keras-cv",
"token_count": 834
} | 63 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import keras_cv.bounding_box.validate_format as validate_format
from keras_cv.api_export import keras_cv_export
from keras_cv.backend.scope import tf_data
def _box_shape(batched, boxes_shape, max_boxes):
# ensure we dont drop the final axis in RaggedTensor mode
if max_boxes is None:
shape = list(boxes_shape)
shape[-1] = 4
return shape
if batched:
return [None, max_boxes, 4]
return [max_boxes, 4]
def _classes_shape(batched, classes_shape, max_boxes):
if max_boxes is None:
return None
if batched:
return [None, max_boxes] + classes_shape[2:]
return [max_boxes] + classes_shape[2:]
@keras_cv_export("keras_cv.bounding_box.to_dense")
@tf_data
def to_dense(bounding_boxes, max_boxes=None, default_value=-1):
"""to_dense converts bounding boxes to Dense tensors
Args:
bounding_boxes: bounding boxes in KerasCV dictionary format.
max_boxes: the maximum number of boxes, used to pad tensors to a given
shape. This can be used to make object detection pipelines TPU
compatible.
default_value: the default value to pad bounding boxes with. defaults
to -1.
"""
info = validate_format.validate_format(bounding_boxes)
# guards against errors in metrics regarding modification of inputs.
# also guards against unexpected behavior when modifying downstream
bounding_boxes = bounding_boxes.copy()
# Already running in masked mode
if not info["ragged"]:
# even if already ragged, still copy the dictionary for API consistency
return bounding_boxes
if isinstance(bounding_boxes["classes"], tf.RaggedTensor):
bounding_boxes["classes"] = bounding_boxes["classes"].to_tensor(
default_value=default_value,
shape=_classes_shape(
info["is_batched"], bounding_boxes["classes"].shape, max_boxes
),
)
if isinstance(bounding_boxes["boxes"], tf.RaggedTensor):
bounding_boxes["boxes"] = bounding_boxes["boxes"].to_tensor(
default_value=default_value,
shape=_box_shape(
info["is_batched"], bounding_boxes["boxes"].shape, max_boxes
),
)
if "confidence" in bounding_boxes:
if isinstance(bounding_boxes["confidence"], tf.RaggedTensor):
bounding_boxes["confidence"] = bounding_boxes[
"confidence"
].to_tensor(
default_value=default_value,
shape=_classes_shape(
info["is_batched"],
bounding_boxes["confidence"].shape,
max_boxes,
),
)
return bounding_boxes
| keras-cv/keras_cv/bounding_box/to_dense.py/0 | {
"file_path": "keras-cv/keras_cv/bounding_box/to_dense.py",
"repo_id": "keras-cv",
"token_count": 1323
} | 64 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common data structures for Waymo Open Dataset inputs."""
import dataclasses
from typing import Optional
import tensorflow as tf
@dataclasses.dataclass
class PointTensors:
"""Wraps point related tensors."""
# [N, 3] point x, y, z global cartesian coordinates.
point_xyz: tf.Tensor
# [N, 4] point feature: intensity, elongation, has_second, is_second.
point_feature: tf.Tensor
# [N, 3] range image row, column indices and sensor id.
point_range_image_row_col_sensor_id: tf.Tensor
# [N] NLZ (no label zone) mask. Set to true if the point is in NLZ.
label_point_nlz: tf.Tensor
@dataclasses.dataclass
class LabelTensors:
"""Wraps label related tensors."""
# [M, 7] 3d boxes in [center_{x,y,z}, length, width, height, heading].
label_box: Optional[tf.Tensor] = None
# [M] box id.
label_box_id: Optional[tf.Tensor] = None
# [M, 4] box speed_{x,y} and accel_{x,y}.
label_box_meta: Optional[tf.Tensor] = None
# [M] box class.
label_box_class: Optional[tf.Tensor] = None
# [M] number of points in each box.
label_box_density: Optional[tf.Tensor] = None
# [M] detection difficulty level.
label_box_detection_difficulty: Optional[tf.Tensor] = None
# [M] valid box mask.
label_box_mask: Optional[tf.Tensor] = None
# [M] object class of each point.
label_point_class: Optional[tf.Tensor] = None
| keras-cv/keras_cv/datasets/waymo/struct.py/0 | {
"file_path": "keras-cv/keras_cv/datasets/waymo/struct.py",
"repo_id": "keras-cv",
"token_count": 688
} | 65 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow import keras
from keras_cv import bounding_box
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import assert_tf_keras
@keras_cv_export("keras_cv.layers.ROIPooler")
class ROIPooler(keras.layers.Layer):
"""
Pooling feature map of dynamic shape into region of interest (ROI) of fixed
shape.
Mainly used in Region CNN (RCNN) networks. This works for a single-level
input feature map.
This layer splits the feature map into [target_size[0], target_size[1]]
areas, and performs max pooling for each area. The area coordinates will be
quantized.
Args:
bounding_box_format: a case-insensitive string.
For detailed information on the supported format, see the
[KerasCV bounding box documentation](https://keras.io/api/keras_cv/bounding_box/formats/).
target_size: List or Tuple of 2 integers of the pooled shape
image_shape: List of Tuple of 3 integers, or `TensorShape` of the input
image shape.
Usage:
```python
feature_map = tf.random.normal([2, 16, 16, 512])
roi_pooler = ROIPooler(bounding_box_format="yxyx", target_size=[7, 7],
image_shape=[224, 224, 3])
rois = tf.constant([[[15., 30., 25., 45.]], [[22., 1., 30., 32.]]])
pooled_feature_map = roi_pooler(feature_map, rois)
```
""" # noqa: E501
def __init__(
self,
bounding_box_format,
# TODO(consolidate size vs shape for KPL and here)
target_size,
image_shape,
**kwargs,
):
assert_tf_keras("keras_cv.layers.ROIPooler")
if not isinstance(target_size, (tuple, list)):
raise ValueError(
"Expected `target_size` to be tuple or list, got "
f"{type(target_size)}"
)
if len(target_size) != 2:
raise ValueError(
f"Expected `target_size` to be size 2, got {len(target_size)}"
)
if (
image_shape[0] is None
or image_shape[1] is None
or image_shape[2] is None
):
raise ValueError(
f"`image_shape` cannot have dynamic shape, got {image_shape}"
)
super().__init__(**kwargs)
self.bounding_box_format = bounding_box_format
self.target_height = target_size[0]
self.target_width = target_size[1]
self.image_shape = image_shape
self.built = True
def call(self, feature_map, rois):
"""
Args:
feature_map: [batch_size, H, W, C] float Tensor, the feature map
extracted from image.
rois: [batch_size, N, 4] float Tensor, the region of interests to be
pooled.
Returns:
pooled_feature_map: [batch_size, N, target_size, C] float Tensor
"""
# convert to relative format given feature map shape != image shape
rois = bounding_box.convert_format(
rois,
source=self.bounding_box_format,
target="rel_yxyx",
image_shape=self.image_shape,
)
pooled_feature_map = tf.vectorized_map(
self._pool_single_sample, (feature_map, rois)
)
return pooled_feature_map
def _pool_single_sample(self, args):
"""
Args: tuple of
feature_map: [H, W, C] float Tensor
rois: [N, 4] float Tensor
Returns:
pooled_feature_map: [target_size, C] float Tensor
"""
feature_map, rois = args
num_rois = rois.get_shape().as_list()[0]
height, width, channel = feature_map.get_shape().as_list()
# TODO (consider vectorize it for better performance)
for n in range(num_rois):
# [4]
roi = rois[n, :]
y_start = height * roi[0]
x_start = width * roi[1]
region_height = height * (roi[2] - roi[0])
region_width = width * (roi[3] - roi[1])
h_step = region_height / self.target_height
w_step = region_width / self.target_width
regions = []
for i in range(self.target_height):
for j in range(self.target_width):
height_start = y_start + i * h_step
height_end = height_start + h_step
height_start = tf.cast(height_start, tf.int32)
height_end = tf.cast(height_end, tf.int32)
# if feature_map shape smaller than roi, h_step would be 0
# in this case the result will be feature_map[0, 0, ...]
height_end = height_start + tf.maximum(
1, height_end - height_start
)
width_start = x_start + j * w_step
width_end = width_start + w_step
width_start = tf.cast(width_start, tf.int32)
width_end = tf.cast(width_end, tf.int32)
width_end = width_start + tf.maximum(
1, width_end - width_start
)
# [h_step, w_step, C]
region = feature_map[
height_start:height_end, width_start:width_end, :
]
# target_height * target_width * [C]
regions.append(tf.reduce_max(region, axis=[0, 1]))
regions = tf.reshape(
tf.stack(regions),
[self.target_height, self.target_width, channel],
)
return regions
def get_config(self):
config = {
"bounding_box_format": self.bounding_box_format,
"target_size": [self.target_height, self.target_width],
"image_shape": self.image_shape,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| keras-cv/keras_cv/layers/object_detection/roi_pool.py/0 | {
"file_path": "keras-cv/keras_cv/layers/object_detection/roi_pool.py",
"repo_id": "keras-cv",
"token_count": 3115
} | 66 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
@keras_cv_export("keras_cv.layers.OverlappingPatchingAndEmbedding")
class OverlappingPatchingAndEmbedding(keras.layers.Layer):
def __init__(self, project_dim=32, patch_size=7, stride=4, **kwargs):
"""
Overlapping Patching and Embedding layer. Differs from `PatchingAndEmbedding`
in that the patch size does not affect the sequence length. It's fully derived
from the `stride` parameter. Additionally, no positional embedding is done
as part of the layer - only a projection using a `Conv2D` layer.
References:
- [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) (CVPR 2021) # noqa: E501
- [Official PyTorch implementation](https://github.com/NVlabs/SegFormer/blob/master/mmseg/models/backbones/mix_transformer.py) # noqa: E501
- [Ported from the TensorFlow implementation from DeepVision](https://github.com/DavidLandup0/deepvision/blob/main/deepvision/layers/hierarchical_transformer_encoder.py) # noqa: E501
Args:
project_dim: integer, the dimensionality of the projection.
Defaults to `32`.
patch_size: integer, the size of the patches to encode.
Defaults to `7`.
stride: integer, the stride to use for the patching before
projection. Defaults to `5`.
Basic usage:
```
project_dim = 1024
patch_size = 16
encoded_patches = keras_cv.layers.OverlappingPatchingAndEmbedding(
project_dim=project_dim, patch_size=patch_size)(img_batch)
print(encoded_patches.shape) # (1, 3136, 1024)
```
"""
super().__init__(**kwargs)
self.project_dim = project_dim
self.patch_size = patch_size
self.stride = stride
self.proj = keras.layers.Conv2D(
filters=project_dim,
kernel_size=patch_size,
strides=stride,
padding="same",
)
self.norm = keras.layers.LayerNormalization()
def call(self, x):
x = self.proj(x)
# B, H, W, C
shape = x.shape
x = ops.reshape(x, (-1, shape[1] * shape[2], shape[3]))
x = self.norm(x)
return x
def get_config(self):
config = super().get_config()
config.update(
{
"project_dim": self.project_dim,
"patch_size": self.patch_size,
"stride": self.stride,
}
)
return config
| keras-cv/keras_cv/layers/overlapping_patching_embedding.py/0 | {
"file_path": "keras-cv/keras_cv/layers/overlapping_patching_embedding.py",
"repo_id": "keras-cv",
"token_count": 1363
} | 67 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import tensorflow as tf
from keras_cv.layers.preprocessing.fourier_mix import FourierMix
from keras_cv.tests.test_case import TestCase
num_classes = 10
class FourierMixTest(TestCase):
def test_return_shapes(self):
xs = tf.ones((2, 512, 512, 3))
# randomly sample labels
ys = tf.random.categorical(tf.math.log([[0.5, 0.5]]), 2)
ys = tf.squeeze(ys)
ys = tf.one_hot(ys, num_classes)
# randomly sample segmentation mask
ys_segmentation_masks = tf.cast(
tf.stack(
[2 * tf.ones((512, 512)), tf.ones((512, 512))],
axis=0,
),
tf.uint8,
)
ys_segmentation_masks = tf.one_hot(ys_segmentation_masks, 3)
layer = FourierMix()
outputs = layer(
{
"images": xs,
"labels": ys,
"segmentation_masks": ys_segmentation_masks,
}
)
xs, ys, ys_segmentation_masks = (
outputs["images"],
outputs["labels"],
outputs["segmentation_masks"],
)
self.assertEqual(xs.shape, (2, 512, 512, 3))
self.assertEqual(ys.shape, (2, 10))
self.assertEqual(ys_segmentation_masks.shape, (2, 512, 512, 3))
def test_fourier_mix_call_results_with_labels(self):
xs = tf.cast(
tf.stack(
[2 * tf.ones((4, 4, 3)), tf.ones((4, 4, 3))],
axis=0,
),
tf.float32,
)
ys = tf.one_hot(tf.constant([0, 1]), 2)
layer = FourierMix()
outputs = layer({"images": xs, "labels": ys})
xs, ys = outputs["images"], outputs["labels"]
# None of the individual values should still be close to 1 or 0
self.assertNotAllClose(xs, 1.0)
self.assertNotAllClose(xs, 2.0)
# No labels should still be close to their originals
self.assertNotAllClose(ys, 1.0)
self.assertNotAllClose(ys, 0.0)
def test_mix_up_call_results_with_masks(self):
xs = tf.cast(
tf.stack(
[2 * tf.ones((4, 4, 3)), tf.ones((4, 4, 3))],
axis=0,
),
tf.float32,
)
ys_segmentation_masks = tf.cast(
tf.stack(
[2 * tf.ones((4, 4)), tf.ones((4, 4))],
axis=0,
),
tf.uint8,
)
ys_segmentation_masks = tf.one_hot(ys_segmentation_masks, 3)
layer = FourierMix()
outputs = layer(
{"images": xs, "segmentation_masks": ys_segmentation_masks}
)
xs, ys_segmentation_masks = (
outputs["images"],
outputs["segmentation_masks"],
)
# None of the individual values should still be close to 1 or 0
self.assertNotAllClose(xs, 1.0)
self.assertNotAllClose(xs, 2.0)
# No masks should still be close to their originals
self.assertNotAllClose(ys_segmentation_masks, 1.0)
self.assertNotAllClose(ys_segmentation_masks, 0.0)
@pytest.mark.tf_only
def test_in_tf_function(self):
xs = tf.cast(
tf.stack(
[2 * tf.ones((4, 4, 3)), tf.ones((4, 4, 3))],
axis=0,
),
tf.float32,
)
ys = tf.one_hot(tf.constant([0, 1]), 2)
layer = FourierMix()
@tf.function
def augment(x, y):
return layer({"images": x, "labels": y})
outputs = augment(xs, ys)
xs, ys = outputs["images"], outputs["labels"]
# None of the individual values should still be close to 1 or 0
self.assertNotAllClose(xs, 1.0)
self.assertNotAllClose(xs, 2.0)
# No labels should still be close to their originals
self.assertNotAllClose(ys, 1.0)
self.assertNotAllClose(ys, 0.0)
def test_image_input_only(self):
xs = tf.cast(
tf.stack(
[2 * tf.ones((100, 100, 1)), tf.ones((100, 100, 1))], axis=0
),
tf.float32,
)
layer = FourierMix()
with self.assertRaisesRegexp(
ValueError, "expects inputs in a dictionary"
):
_ = layer(xs)
def test_single_image_input(self):
xs = tf.ones((512, 512, 3))
ys = tf.one_hot(tf.constant([1]), 2)
inputs = {"images": xs, "labels": ys}
layer = FourierMix()
with self.assertRaisesRegexp(
ValueError, "FourierMix received a single image to `call`"
):
_ = layer(inputs)
| keras-cv/keras_cv/layers/preprocessing/fourier_mix_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/fourier_mix_test.py",
"repo_id": "keras-cv",
"token_count": 2606
} | 68 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
@keras_cv_export("keras_cv.layers.RandomApply")
class RandomApply(BaseImageAugmentationLayer):
"""Apply provided layer to random elements in a batch.
Args:
layer: a keras `Layer` or `BaseImageAugmentationLayer`. This layer will
be applied to randomly chosen samples in a batch. Layer should not
modify the size of provided inputs.
rate: controls the frequency of applying the layer. 1.0 means all
elements in a batch will be modified. 0.0 means no elements will be
modified. Defaults to 0.5.
batchwise: (Optional) bool, whether to pass entire batches to the
underlying layer. When set to true, only a single random sample is
drawn to determine if the batch should be passed to the underlying
layer. This is useful when using `MixUp()`, `CutMix()`, `Mosaic()`,
etc.
auto_vectorize: bool, whether to use tf.vectorized_map or tf.map_fn for
batched input. Setting this to True might give better performance
but currently doesn't work with XLA. Defaults to False.
seed: integer, controls random behaviour.
Example usage:
```
# Let's declare an example layer that will set all image pixels to zero.
zero_out = keras.layers.Lambda(lambda x: {"images": 0 * x["images"]})
# Create a small batch of random, single-channel, 2x2 images:
images = tf.random.stateless_uniform(shape=(5, 2, 2, 1), seed=[0, 1])
print(images[..., 0])
# <tf.Tensor: shape=(5, 2, 2), dtype=float32, numpy=
# array([[[0.08216608, 0.40928006],
# [0.39318466, 0.3162533 ]],
#
# [[0.34717774, 0.73199546],
# [0.56369007, 0.9769211 ]],
#
# [[0.55243933, 0.13101244],
# [0.2941643 , 0.5130266 ]],
#
# [[0.38977218, 0.80855536],
# [0.6040567 , 0.10502195]],
#
# [[0.51828027, 0.12730157],
# [0.288486 , 0.252975 ]]], dtype=float32)>
# Apply the layer with 50% probability:
random_apply = RandomApply(layer=zero_out, rate=0.5, seed=1234)
outputs = random_apply(images)
print(outputs[..., 0])
# <tf.Tensor: shape=(5, 2, 2), dtype=float32, numpy=
# array([[[0. , 0. ],
# [0. , 0. ]],
#
# [[0.34717774, 0.73199546],
# [0.56369007, 0.9769211 ]],
#
# [[0.55243933, 0.13101244],
# [0.2941643 , 0.5130266 ]],
#
# [[0.38977218, 0.80855536],
# [0.6040567 , 0.10502195]],
#
# [[0. , 0. ],
# [0. , 0. ]]], dtype=float32)>
# We can observe that the layer has been randomly applied to 2 out of 5
samples.
```
"""
def __init__(
self,
layer,
rate=0.5,
batchwise=False,
auto_vectorize=False,
seed=None,
**kwargs,
):
super().__init__(seed=seed, **kwargs)
if not (0 <= rate <= 1.0):
raise ValueError(
f"rate must be in range [0, 1]. Received rate: {rate}"
)
self._layer = layer
self._rate = rate
self.auto_vectorize = auto_vectorize
self.batchwise = batchwise
self.seed = seed
self.built = True
def _should_augment(self):
return self._random_generator.uniform(shape=()) > 1.0 - self._rate
def _batch_augment(self, inputs):
if self.batchwise:
# batchwise augmentations
if self._should_augment():
return self._layer(inputs)
else:
return inputs
# non-batchwise augmentations
return super()._batch_augment(inputs)
def _augment(self, inputs):
if self._should_augment():
return self._layer(inputs)
else:
return inputs
def get_config(self):
config = super().get_config()
config.update(
{
"rate": self._rate,
"layer": self._layer,
"seed": self.seed,
"batchwise": self.batchwise,
"auto_vectorize": self.auto_vectorize,
}
)
return config
| keras-cv/keras_cv/layers/preprocessing/random_apply.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_apply.py",
"repo_id": "keras-cv",
"token_count": 2283
} | 69 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
VectorizedBaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing as preprocessing_utils
@keras_cv_export("keras_cv.layers.RandomContrast")
class RandomContrast(VectorizedBaseImageAugmentationLayer):
"""RandomContrast randomly adjusts contrast.
This layer will randomly adjust the contrast of an image or images by a
random factor. Contrast is adjusted independently for each channel of each
image.
For each channel, this layer computes the mean of the image pixels in the
channel and then adjusts each component `x` of each pixel to
`(x - mean) * contrast_factor + mean`.
Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
in integer or floating point dtype. By default, the layer will output
floats. The output value will be clipped to the range `[0, 255]`, the valid
range of RGB colors.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Args:
value_range: A tuple or a list of two elements. The first value
represents the lower bound for values in passed images, the second
represents the upper bound. Images passed to the layer should have
values within `value_range`.
factor: A positive float represented as fraction of value, or a tuple of
size 2 representing lower and upper bound. When represented as a
single float, lower = upper. The contrast factor will be randomly
picked between `[1.0 - lower, 1.0 + upper]`. For any pixel x in the
channel, the output will be `(x - mean) * factor + mean` where
`mean` is the mean value of the channel.
seed: Integer. Used to create a random seed.
Usage:
```python
(images, labels), _ = keras.datasets.cifar10.load_data()
random_contrast = keras_cv.layers.preprocessing.RandomContrast()
augmented_images = random_contrast(images)
```
"""
def __init__(self, value_range, factor, seed=None, **kwargs):
super().__init__(seed=seed, **kwargs)
if isinstance(factor, (tuple, list)):
min = 1 - factor[0]
max = 1 + factor[1]
else:
min = 1 - factor
max = 1 + factor
self.factor_input = factor
self.factor = preprocessing_utils.parse_factor(
(min, max), min_value=-1, max_value=2
)
self.value_range = value_range
self.seed = seed
def get_random_transformation_batch(self, batch_size, **kwargs):
return self.factor(shape=(batch_size, 1, 1, 1))
def augment_ragged_image(self, image, transformation, **kwargs):
return self.augment_images(
images=image, transformations=transformation, **kwargs
)
def augment_images(self, images, transformations, **kwargs):
contrast_factors = tf.cast(transformations, dtype=images.dtype)
means = tf.reduce_mean(images, axis=(1, 2), keepdims=True)
images = (images - means) * contrast_factors + means
images = tf.clip_by_value(
images, self.value_range[0], self.value_range[1]
)
return images
def augment_labels(self, labels, transformations, **kwargs):
return labels
def augment_segmentation_masks(
self, segmentation_masks, transformations, **kwargs
):
return segmentation_masks
def augment_bounding_boxes(self, bounding_boxes, transformations, **kwargs):
return bounding_boxes
def get_config(self):
config = {
"factor": self.factor_input,
"value_range": self.value_range,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
return cls(**config)
| keras-cv/keras_cv/layers/preprocessing/random_contrast.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_contrast.py",
"repo_id": "keras-cv",
"token_count": 1804
} | 70 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
from keras_cv import bounding_box
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
VectorizedBaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing as preprocessing_utils
# In order to support both unbatched and batched inputs, the horizontal
# and vertical axis is reverse indexed
H_AXIS = -3
W_AXIS = -2
@keras_cv_export("keras_cv.layers.RandomRotation")
class RandomRotation(VectorizedBaseImageAugmentationLayer):
"""A preprocessing layer which randomly rotates images.
This layer will apply random rotations to each image, filling empty space
according to `fill_mode`.
Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
of integer or floating point dtype. By default, the layer will output
floats.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format
Arguments:
factor: a float represented as fraction of 2 Pi, or a tuple of size 2
representing lower and upper bound for rotating clockwise and
counter-clockwise. A positive values means rotating counter clock-wise,
while a negative value means clock-wise. When represented as a single
float, this value is used for both the upper and lower bound. For
instance, `factor=(-0.2, 0.3)` results in an output rotation by a random
amount in the range `[-20% * 2pi, 30% * 2pi]`. `factor=0.2` results in
an output rotating by a random amount in the range
`[-20% * 2pi, 20% * 2pi]`.
fill_mode: Points outside the boundaries of the input are filled according
to the given mode (one of `{"constant", "reflect", "wrap", "nearest"}`).
- *reflect*: `(d c b a | a b c d | d c b a)` The input is extended by
reflecting about the edge of the last pixel.
- *constant*: `(k k k k | a b c d | k k k k)` The input is extended by
filling all values beyond the edge with the same constant value k = 0.
- *wrap*: `(a b c d | a b c d | a b c d)` The input is extended by
wrapping around to the opposite edge.
- *nearest*: `(a a a a | a b c d | d d d d)` The input is extended by
the nearest pixel.
interpolation: Interpolation mode. Supported values: `"nearest"`,
`"bilinear"`.
seed: Integer. Used to create a random seed.
fill_value: a float represents the value to be filled outside the
boundaries when `fill_mode="constant"`.
bounding_box_format: The format of bounding boxes of input dataset. Refer
https://github.com/keras-team/keras-cv/blob/master/keras_cv/bounding_box/converters.py
for more details on supported bounding box formats.
segmentation_classes: an optional integer with the number of classes in
the input segmentation mask. Required iff augmenting data with sparse
(non one-hot) segmentation masks. Include the background class in this
count (e.g. for segmenting dog vs background, this should be set to 2).
"""
def __init__(
self,
factor,
fill_mode="reflect",
interpolation="bilinear",
seed=None,
fill_value=0.0,
bounding_box_format=None,
segmentation_classes=None,
**kwargs,
):
super().__init__(seed=seed, **kwargs)
self.factor = factor
if isinstance(factor, (tuple, list)):
self.lower = factor[0]
self.upper = factor[1]
else:
self.lower = -factor
self.upper = factor
if self.upper < self.lower:
raise ValueError(
"Factor cannot have negative values, " "got {}".format(factor)
)
preprocessing_utils.check_fill_mode_and_interpolation(
fill_mode, interpolation
)
self.fill_mode = fill_mode
self.fill_value = fill_value
self.interpolation = interpolation
self.seed = seed
self.bounding_box_format = bounding_box_format
self.segmentation_classes = segmentation_classes
def get_random_transformation_batch(self, batch_size, **kwargs):
min_angle = self.lower * 2.0 * np.pi
max_angle = self.upper * 2.0 * np.pi
angles = self._random_generator.uniform(
shape=[batch_size], minval=min_angle, maxval=max_angle
)
return {"angles": angles}
def augment_ragged_image(self, image, transformation, **kwargs):
image = tf.expand_dims(image, axis=0)
transformation = {
"angles": tf.expand_dims(transformation["angles"], axis=0),
}
image = self.augment_images(
images=image, transformations=transformation, **kwargs
)
return tf.squeeze(image, axis=0)
def augment_images(self, images, transformations, **kwargs):
return self._rotate_images(images, transformations)
def augment_labels(self, labels, transformations, **kwargs):
return labels
def augment_bounding_boxes(
self, bounding_boxes, transformations, raw_images=None, **kwargs
):
if self.bounding_box_format is None:
raise ValueError(
"`RandomRotation()` was called with bounding boxes,"
"but no `bounding_box_format` was specified in the constructor."
"Please specify a bounding box format in the constructor. i.e."
"`RandomRotation(bounding_box_format='xyxy')`"
)
bounding_boxes = bounding_box.to_dense(bounding_boxes)
bounding_boxes = bounding_box.convert_format(
bounding_boxes,
source=self.bounding_box_format,
target="xyxy",
images=raw_images,
)
image_shape = tf.shape(raw_images)
h = image_shape[H_AXIS]
w = image_shape[W_AXIS]
# origin coordinates, all the points on the image are rotated around
# this point
origin_x = tf.cast(w / 2, dtype=self.compute_dtype)
origin_y = tf.cast(h / 2, dtype=self.compute_dtype)
angles = -transformations["angles"]
angles = angles[:, tf.newaxis, tf.newaxis, tf.newaxis]
# calculate coordinates of all four corners of the bounding box
boxes = bounding_boxes["boxes"]
points = tf.stack(
[
tf.stack([boxes[:, :, 0], boxes[:, :, 1]], axis=2),
tf.stack([boxes[:, :, 2], boxes[:, :, 1]], axis=2),
tf.stack([boxes[:, :, 2], boxes[:, :, 3]], axis=2),
tf.stack([boxes[:, :, 0], boxes[:, :, 3]], axis=2),
],
axis=2,
)
# point_x : x coordinates of all corners of the bounding box
point_xs = tf.gather(points, [0], axis=3)
point_x_offsets = tf.cast((point_xs - origin_x), dtype=tf.float32)
# point_y : y coordinates of all corners of the bounding box
point_ys = tf.gather(points, [1], axis=3)
point_y_offsets = tf.cast((point_ys - origin_y), dtype=tf.float32)
# rotated bounding box coordinates
# new_x : new position of x coordinates of corners of bounding box
new_x = (
origin_x
+ tf.multiply(tf.cos(angles), point_x_offsets)
- tf.multiply(tf.sin(angles), point_y_offsets)
)
# new_y : new position of y coordinates of corners of bounding box
new_y = (
origin_y
+ tf.multiply(tf.sin(angles), point_x_offsets)
+ tf.multiply(tf.cos(angles), point_y_offsets)
)
# rotated bounding box coordinates
out = tf.concat([new_x, new_y], axis=3)
# find readjusted coordinates of bounding box to represent it in corners
# format
min_coordinates = tf.math.reduce_min(out, axis=2)
max_coordinates = tf.math.reduce_max(out, axis=2)
boxes = tf.concat([min_coordinates, max_coordinates], axis=2)
bounding_boxes = bounding_boxes.copy()
bounding_boxes["boxes"] = boxes
bounding_boxes = bounding_box.clip_to_image(
bounding_boxes,
bounding_box_format="xyxy",
images=raw_images,
)
# coordinates cannot be float values, it is cast to int32
bounding_boxes = bounding_box.convert_format(
bounding_boxes,
source="xyxy",
target=self.bounding_box_format,
dtype=self.compute_dtype,
images=raw_images,
)
return bounding_boxes
def augment_segmentation_masks(
self, segmentation_masks, transformations, **kwargs
):
# If segmentation_classes is specified, we have a dense segmentation
# mask. We therefore one-hot encode before rotation to avoid bad
# interpolation during the rotation transformation. We then make the
# mask sparse again using tf.argmax.
if self.segmentation_classes:
one_hot_mask = tf.one_hot(
tf.squeeze(tf.cast(segmentation_masks, tf.int32), axis=-1),
self.segmentation_classes,
)
rotated_one_hot_mask = self._rotate_images(
one_hot_mask, transformations
)
rotated_mask = tf.argmax(rotated_one_hot_mask, axis=-1)
return tf.expand_dims(rotated_mask, axis=-1)
else:
if segmentation_masks.shape[-1] == 1:
raise ValueError(
"Segmentation masks must be one-hot encoded, or "
"RandomRotate must be initialized with "
"`segmentation_classes`. `segmentation_classes` was not "
f"specified, and mask has shape {segmentation_masks.shape}"
)
rotated_mask = self._rotate_images(
segmentation_masks, transformations
)
# Round because we are in one-hot encoding, and we may have
# pixels with ambiguous value due to floating point math for
# rotation.
return tf.round(rotated_mask)
def _rotate_images(self, images, transformations):
images = preprocessing_utils.ensure_tensor(images, self.compute_dtype)
original_shape = images.shape
image_shape = tf.shape(images)
img_hd = tf.cast(image_shape[H_AXIS], tf.float32)
img_wd = tf.cast(image_shape[W_AXIS], tf.float32)
angles = transformations["angles"]
outputs = preprocessing_utils.transform(
images,
preprocessing_utils.get_rotation_matrix(angles, img_hd, img_wd),
fill_mode=self.fill_mode,
fill_value=self.fill_value,
interpolation=self.interpolation,
)
outputs.set_shape(original_shape)
return outputs
def get_config(self):
config = {
"factor": self.factor,
"fill_mode": self.fill_mode,
"fill_value": self.fill_value,
"interpolation": self.interpolation,
"bounding_box_format": self.bounding_box_format,
"segmentation_classes": self.segmentation_classes,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
return cls(**config)
| keras-cv/keras_cv/layers/preprocessing/random_rotation.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_rotation.py",
"repo_id": "keras-cv",
"token_count": 5244
} | 71 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv import bounding_box
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import tf_ops
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.utils import get_interpolation
H_AXIS = -3
W_AXIS = -2
supported_keys = [
"images",
"labels",
"targets",
"bounding_boxes",
"segmentation_masks",
]
@keras_cv_export("keras_cv.layers.Resizing")
class Resizing(BaseImageAugmentationLayer):
"""A preprocessing layer which resizes images.
This layer resizes an image input to a target height and width. The input
should be a 4D (batched) or 3D (unbatched) tensor in `"channels_last"`
format. Input pixel values can be of any range (e.g. `[0., 1.)` or `[0,
255]`) and of integer or floating point dtype. By default, the layer will
output floats.
This layer can be called on tf.RaggedTensor batches of input images of
distinct sizes, and will resize the outputs to dense tensors of uniform
size.
For an overview and full list of preprocessing layers, see the preprocessing
[guide](https://www.tensorflow.org/guide/keras/preprocessing_layers).
Args:
height: Integer, the height of the output shape.
width: Integer, the width of the output shape.
interpolation: String, the interpolation method, defaults to
`"bilinear"`. Supports `"bilinear"`, `"nearest"`, `"bicubic"`,
`"area"`, `"lanczos3"`, `"lanczos5"`, `"gaussian"`,
`"mitchellcubic"`.
crop_to_aspect_ratio: If True, resize the images without aspect ratio
distortion. When the original aspect ratio differs from the target
aspect ratio, the output image will be cropped to return the largest
possible window in the image (of size `(height, width)`) that
matches the target aspect ratio. By default,
(`crop_to_aspect_ratio=False`), aspect ratio may not be preserved.
pad_to_aspect_ratio: If True, resize the images without aspect ratio
distortion. When the original aspect ratio differs from the target
aspect ratio, the output image will be padded to return the largest
possible resize of the image (of size `(height, width)`) that
matches the target aspect ratio. By default,
(`pad_to_aspect_ratio=False`), aspect ratio may not be preserved.
bounding_box_format: The format of bounding boxes of input dataset.
Refer to
https://github.com/keras-team/keras-cv/blob/master/keras_cv/bounding_box/converters.py
for more details on supported bounding box formats.
"""
def __init__(
self,
height,
width,
interpolation="bilinear",
crop_to_aspect_ratio=False,
pad_to_aspect_ratio=False,
bounding_box_format=None,
**kwargs,
):
self.height = height
self.width = width
self.interpolation = interpolation
self.crop_to_aspect_ratio = crop_to_aspect_ratio
self.pad_to_aspect_ratio = pad_to_aspect_ratio
self._interpolation_method = get_interpolation(interpolation)
self.bounding_box_format = bounding_box_format
self.force_output_dense_images = True
if pad_to_aspect_ratio and crop_to_aspect_ratio:
raise ValueError(
"`Resizing()` expects at most one of `crop_to_aspect_ratio` or "
"`pad_to_aspect_ratio` to be True."
)
if not pad_to_aspect_ratio and bounding_box_format:
raise ValueError(
"Resizing() only supports bounding boxes when in "
"`pad_to_aspect_ratio=True` mode. "
"Please pass `pad_to_aspect_ratio=True`"
"when processing bounding boxes with `Resizing()`"
)
super().__init__(**kwargs)
def compute_image_signature(self, images):
return tf.TensorSpec(
shape=(self.height, self.width, images.shape[-1]),
dtype=self.compute_dtype,
)
def _augment(self, inputs):
images = inputs.get("images", None)
bounding_boxes = inputs.get("bounding_boxes", None)
segmentation_masks = inputs.get("segmentation_masks", None)
if images is not None:
images = tf.expand_dims(images, axis=0)
inputs["images"] = images
if bounding_boxes is not None:
bounding_boxes = bounding_boxes.copy()
bounding_boxes["classes"] = tf.expand_dims(
bounding_boxes["classes"], axis=0
)
bounding_boxes["boxes"] = tf.expand_dims(
bounding_boxes["boxes"], axis=0
)
inputs["bounding_boxes"] = bounding_boxes
if segmentation_masks is not None:
segmentation_masks = tf.expand_dims(segmentation_masks, axis=0)
inputs["segmentation_masks"] = segmentation_masks
outputs = self._batch_augment(inputs)
if images is not None:
images = tf.squeeze(outputs["images"], axis=0)
inputs["images"] = images
if bounding_boxes is not None:
outputs["bounding_boxes"]["classes"] = tf.squeeze(
outputs["bounding_boxes"]["classes"], axis=0
)
outputs["bounding_boxes"]["boxes"] = tf.squeeze(
outputs["bounding_boxes"]["boxes"], axis=0
)
inputs["bounding_boxes"] = outputs["bounding_boxes"]
if segmentation_masks is not None:
segmentation_masks = tf.squeeze(
outputs["segmentation_masks"], axis=0
)
inputs["segmentation_masks"] = segmentation_masks
return inputs
def _resize_with_distortion(self, inputs):
images = inputs.get("images", None)
segmentation_masks = inputs.get("segmentation_masks", None)
size = [self.height, self.width]
images = tf.image.resize(
images, size=size, method=self._interpolation_method
)
images = tf.cast(images, self.compute_dtype)
if segmentation_masks is not None:
segmentation_masks = tf.image.resize(
segmentation_masks, size=size, method="nearest"
)
inputs["images"] = images
inputs["segmentation_masks"] = segmentation_masks
return inputs
def _resize_with_pad(self, inputs):
def resize_single_with_pad_to_aspect(x):
image = x.get("images", None)
bounding_boxes = x.get("bounding_boxes", None)
segmentation_masks = x.get("segmentation_masks", None)
# images must be dense-able at this point.
if isinstance(image, tf.RaggedTensor):
image = image.to_tensor()
img_size = tf.shape(image)
img_height = tf.cast(img_size[H_AXIS], self.compute_dtype)
img_width = tf.cast(img_size[W_AXIS], self.compute_dtype)
if bounding_boxes is not None:
bounding_boxes = bounding_box.to_dense(bounding_boxes)
bounding_boxes = bounding_box.convert_format(
bounding_boxes,
image_shape=img_size,
source=self.bounding_box_format,
target="rel_xyxy",
)
# how much we scale height by to hit target height
height_scale = self.height / img_height
width_scale = self.width / img_width
resize_scale = tf.math.minimum(height_scale, width_scale)
target_height = img_height * resize_scale
target_width = img_width * resize_scale
image = tf.image.resize(
image,
size=(target_height, target_width),
method=self._interpolation_method,
)
if bounding_boxes is not None:
bounding_boxes = bounding_box.convert_format(
bounding_boxes,
images=image,
source="rel_xyxy",
target="xyxy",
)
image = tf.image.pad_to_bounding_box(
image, 0, 0, self.height, self.width
)
if bounding_boxes is not None:
bounding_boxes = bounding_box.clip_to_image(
bounding_boxes, images=image, bounding_box_format="xyxy"
)
bounding_boxes = bounding_box.convert_format(
bounding_boxes,
images=image,
source="xyxy",
target=self.bounding_box_format,
)
inputs["images"] = image
if bounding_boxes is not None:
inputs["bounding_boxes"] = bounding_box.to_ragged(
bounding_boxes
)
if segmentation_masks is not None:
segmentation_masks = tf.image.resize(
segmentation_masks,
size=(target_height, target_width),
method="nearest",
)
segmentation_masks = tf.image.pad_to_bounding_box(
tf.cast(segmentation_masks, dtype="float32"),
0,
0,
self.height,
self.width,
)
inputs["segmentation_masks"] = segmentation_masks
return inputs
size_as_shape = tf.TensorShape((self.height, self.width))
shape = size_as_shape + inputs["images"].shape[-1:]
img_spec = tf.TensorSpec(shape, self.compute_dtype)
fn_output_signature = {"images": img_spec}
bounding_boxes = inputs.get("bounding_boxes", None)
if bounding_boxes is not None:
boxes_spec = self._compute_bounding_box_signature(bounding_boxes)
fn_output_signature["bounding_boxes"] = boxes_spec
segmentation_masks = inputs.get("segmentation_masks", None)
if segmentation_masks is not None:
seg_map_shape = (
size_as_shape + inputs["segmentation_masks"].shape[-1:]
)
seg_map_spec = tf.TensorSpec(seg_map_shape, self.compute_dtype)
fn_output_signature["segmentation_masks"] = seg_map_spec
return tf.map_fn(
resize_single_with_pad_to_aspect,
inputs,
fn_output_signature=fn_output_signature,
)
def _resize_with_crop(self, inputs):
images = inputs.get("images", None)
bounding_boxes = inputs.get("bounding_boxes", None)
segmentation_masks = inputs.get("segmentation_masks", None)
if bounding_boxes is not None:
raise ValueError(
"Resizing(crop_to_aspect_ratio=True) does not support "
"bounding box inputs. Please use `pad_to_aspect_ratio=True` "
"when processing bounding boxes with Resizing()."
)
inputs["images"] = images
size = [self.height, self.width]
# tf.image.resize will always output float32 and operate more
# efficiently on float32 unless interpolation is nearest, in which case
# output type matches input type.
if self.interpolation == "nearest":
input_dtype = self.compute_dtype
else:
input_dtype = tf.float32
def resize_with_crop_to_aspect(x, interpolation_method):
if isinstance(x, tf.RaggedTensor):
x = x.to_tensor()
return tf_ops.smart_resize(
x,
size=size,
interpolation=interpolation_method,
)
def resize_with_crop_to_aspect_images(x):
return resize_with_crop_to_aspect(
x, interpolation_method=self._interpolation_method
)
def resize_with_crop_to_aspect_masks(x):
return resize_with_crop_to_aspect(x, interpolation_method="nearest")
if isinstance(images, tf.RaggedTensor):
size_as_shape = tf.TensorShape(size)
shape = size_as_shape + images.shape[-1:]
spec = tf.TensorSpec(shape, input_dtype)
images = tf.map_fn(
resize_with_crop_to_aspect_images,
images,
fn_output_signature=spec,
)
else:
images = resize_with_crop_to_aspect_images(images)
inputs["images"] = images
if segmentation_masks is not None:
if isinstance(segmentation_masks, tf.RaggedTensor):
size_as_shape = tf.TensorShape(size)
shape = size_as_shape + segmentation_masks.shape[-1:]
spec = tf.TensorSpec(shape, input_dtype)
segmentation_masks = tf.map_fn(
resize_with_crop_to_aspect_masks,
segmentation_masks,
fn_output_signature=spec,
)
else:
segmentation_masks = resize_with_crop_to_aspect_masks(
segmentation_masks
)
inputs["segmentation_masks"] = segmentation_masks
return inputs
def _check_inputs(self, inputs):
for key in inputs:
if key not in supported_keys:
raise ValueError(
"Resizing() currently only supports keys "
f"[{', '.join(supported_keys)}]. "
f"Key `{key}` found in inputs to `Resizing()`. "
)
def _batch_augment(self, inputs):
if (
inputs.get("bounding_boxes", None) is not None
and self.bounding_box_format is None
):
raise ValueError(
"Resizing requires `bounding_box_format` to be set when "
"augmenting bounding boxes, but "
"`self.bounding_box_format=None`."
)
if self.crop_to_aspect_ratio:
return self._resize_with_crop(inputs)
if self.pad_to_aspect_ratio:
return self._resize_with_pad(inputs)
return self._resize_with_distortion(inputs)
def get_config(self):
config = {
"height": self.height,
"width": self.width,
"interpolation": self.interpolation,
"crop_to_aspect_ratio": self.crop_to_aspect_ratio,
"pad_to_aspect_ratio": self.pad_to_aspect_ratio,
"bounding_box_format": self.bounding_box_format,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| keras-cv/keras_cv/layers/preprocessing/resizing.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/resizing.py",
"repo_id": "keras-cv",
"token_count": 7352
} | 72 |
# Copyright 2022 Waymo LLC.
#
# Licensed under the terms in https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing_3d/waymo/LICENSE # noqa: E501
import numpy as np
from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d
from keras_cv.layers.preprocessing_3d.waymo.frustum_random_dropping_points import ( # noqa: E501
FrustumRandomDroppingPoints,
)
from keras_cv.tests.test_case import TestCase
POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS
BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES
class FrustumRandomDroppingPointTest(TestCase):
def test_augment_point_clouds_and_bounding_boxes(self):
add_layer = FrustumRandomDroppingPoints(
r_distance=0, theta_width=1, phi_width=1, drop_rate=0.5
)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertNotAllClose(inputs, outputs)
def test_not_augment_drop_rate0_point_clouds_and_bounding_boxes(self):
add_layer = FrustumRandomDroppingPoints(
r_distance=0, theta_width=1, phi_width=1, drop_rate=0.0
)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertAllClose(inputs, outputs)
def test_not_augment_drop_rate1_frustum_empty_point_clouds_and_bounding_boxes( # noqa: E501
self,
):
add_layer = FrustumRandomDroppingPoints(
r_distance=10, theta_width=0, phi_width=0, drop_rate=1.0
)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertAllClose(inputs, outputs)
def test_drop_rate1_large_frustum_drop_all_point_clouds(self):
add_layer = FrustumRandomDroppingPoints(
r_distance=0, theta_width=np.pi, phi_width=np.pi, drop_rate=1.0
)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertAllClose(inputs[POINT_CLOUDS] * 0.0, outputs[POINT_CLOUDS])
def test_exclude_all_points(self):
add_layer = FrustumRandomDroppingPoints(
r_distance=0,
theta_width=np.pi,
phi_width=np.pi,
drop_rate=1.0,
exclude_classes=1,
)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
exclude_classes = np.ones(shape=(2, 50, 1)).astype("float32")
point_clouds = np.concatenate([point_clouds, exclude_classes], axis=-1)
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertAllClose(inputs, outputs)
def test_exclude_the_first_half_points(self):
add_layer = FrustumRandomDroppingPoints(
r_distance=0,
theta_width=np.pi,
phi_width=np.pi,
drop_rate=1.0,
exclude_classes=[1, 2],
)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
class_1 = np.ones(shape=(2, 10, 1)).astype("float32")
class_2 = np.ones(shape=(2, 15, 1)).astype("float32") * 2
classes = np.concatenate(
[class_1, class_2, np.zeros(shape=(2, 25, 1)).astype("float32")],
axis=1,
)
point_clouds = np.concatenate([point_clouds, classes], axis=-1)
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertAllClose(
inputs[POINT_CLOUDS][:, 25:, :] * 0.0,
outputs[POINT_CLOUDS][:, 25:, :],
)
self.assertAllClose(
inputs[POINT_CLOUDS][:, :25, :], outputs[POINT_CLOUDS][:, :25, :]
)
def test_augment_batch_point_clouds_and_bounding_boxes(self):
add_layer = FrustumRandomDroppingPoints(
r_distance=0, theta_width=1, phi_width=1, drop_rate=0.5
)
point_clouds = np.random.random(size=(3, 2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(3, 2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertNotAllClose(inputs, outputs)
| keras-cv/keras_cv/layers/preprocessing_3d/waymo/frustum_random_dropping_points_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing_3d/waymo/frustum_random_dropping_points_test.py",
"repo_id": "keras-cv",
"token_count": 2383
} | 73 |
# Copyright 2022 Waymo LLC.
#
# Licensed under the terms in https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing_3d/waymo/LICENSE # noqa: E501
import os
import numpy as np
import pytest
from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d
from keras_cv.layers.preprocessing_3d.waymo.random_copy_paste import (
RandomCopyPaste,
)
from keras_cv.tests.test_case import TestCase
POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS
BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES
OBJECT_POINT_CLOUDS = base_augmentation_layer_3d.OBJECT_POINT_CLOUDS
OBJECT_BOUNDING_BOXES = base_augmentation_layer_3d.OBJECT_BOUNDING_BOXES
class RandomCopyPasteTest(TestCase):
@pytest.mark.skipif(
"TEST_CUSTOM_OPS" not in os.environ
or os.environ["TEST_CUSTOM_OPS"] != "true",
reason="Requires binaries compiled from source",
)
def test_augment_point_clouds_and_bounding_boxes(self):
add_layer = RandomCopyPaste(
label_index=1,
min_paste_bounding_boxes=1,
max_paste_bounding_boxes=1,
)
# point_clouds: 3D (multi frames) float32 Tensor with shape
# [num of frames, num of points, num of point features].
# The first 5 features are [x, y, z, class, range].
point_clouds = np.array(
[
[
[0, 1, 2, 3, 4],
[10, 1, 2, 3, 4],
[0, -1, 2, 3, 4],
[100, 100, 2, 3, 4],
[0, 0, 0, 0, 0],
]
]
* 2
).astype("float32")
# bounding_boxes: 3D (multi frames) float32 Tensor with shape
# [num of frames, num of boxes, num of box features].
# The first 8 features are [x, y, z, dx, dy, dz, phi, box class].
bounding_boxes = np.array(
[
[
[0, 0, 0, 4, 4, 4, 0, 1],
[20, 20, 20, 1, 1, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
]
* 2
).astype("float32")
object_point_clouds = np.array(
[
[
[[0, 1, 2, 3, 4], [0, 1, 1, 3, 4]],
[[100, 101, 2, 3, 4], [0, 0, 0, 0, 0]],
]
]
* 2
).astype("float32")
object_bounding_boxes = np.array(
[
[
[0, 0, 1, 4, 4, 4, 0, 1],
[100, 100, 2, 5, 5, 5, 0, 1],
]
]
* 2
).astype("float32")
inputs = {
POINT_CLOUDS: point_clouds,
BOUNDING_BOXES: bounding_boxes,
OBJECT_POINT_CLOUDS: object_point_clouds,
OBJECT_BOUNDING_BOXES: object_bounding_boxes,
}
outputs = add_layer(inputs)
# The first object bounding box [0, 0, 1, 4, 4, 4, 0, 1] overlaps with
# existing bounding box [0, 0, 0, 4, 4, 4, 0, 1], thus not used.
# The second object bounding box [100, 100, 2, 5, 5, 5, 0, 1] and object
# point clouds [100, 101, 2, 3, 4] are pasted.
augmented_point_clouds = np.array(
[
[
[100, 101, 2, 3, 4],
[0, 1, 2, 3, 4],
[10, 1, 2, 3, 4],
[0, -1, 2, 3, 4],
[0, 0, 0, 0, 0],
]
]
* 2
).astype("float32")
augmented_bounding_boxes = np.array(
[
[
[100, 100, 2, 5, 5, 5, 0, 1],
[0, 0, 0, 4, 4, 4, 0, 1],
[20, 20, 20, 1, 1, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0],
]
]
* 2
).astype("float32")
self.assertAllClose(
inputs[OBJECT_POINT_CLOUDS], outputs[OBJECT_POINT_CLOUDS]
)
self.assertAllClose(
inputs[OBJECT_BOUNDING_BOXES], outputs[OBJECT_BOUNDING_BOXES]
)
self.assertAllClose(outputs[POINT_CLOUDS], augmented_point_clouds)
self.assertAllClose(outputs[BOUNDING_BOXES], augmented_bounding_boxes)
@pytest.mark.skipif(
"TEST_CUSTOM_OPS" not in os.environ
or os.environ["TEST_CUSTOM_OPS"] != "true",
reason="Requires binaries compiled from source",
)
def test_augment_batch_point_clouds_and_bounding_boxes(self):
add_layer = RandomCopyPaste(
label_index=1,
min_paste_bounding_boxes=1,
max_paste_bounding_boxes=1,
)
point_clouds = np.array(
[
[
[
[0, 1, 2, 3, 4],
[10, 1, 2, 3, 4],
[0, -1, 2, 3, 4],
[100, 100, 2, 3, 4],
[0, 0, 0, 0, 0],
]
]
* 2
]
* 3
).astype("float32")
bounding_boxes = np.array(
[
[
[
[0, 0, 0, 4, 4, 4, 0, 1],
[20, 20, 20, 1, 1, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
]
* 2
]
* 3
).astype("float32")
object_point_clouds = np.array(
[
[
[
[[0, 1, 2, 3, 4], [0, 1, 1, 3, 4]],
[[100, 101, 2, 3, 4], [0, 0, 0, 0, 0]],
]
]
* 2
]
* 3
).astype("float32")
object_bounding_boxes = np.array(
[
[
[
[0, 0, 1, 4, 4, 4, 0, 1],
[100, 100, 2, 5, 5, 5, 0, 1],
]
]
* 2
]
* 3
).astype("float32")
inputs = {
POINT_CLOUDS: point_clouds,
BOUNDING_BOXES: bounding_boxes,
OBJECT_POINT_CLOUDS: object_point_clouds,
OBJECT_BOUNDING_BOXES: object_bounding_boxes,
}
outputs = add_layer(inputs)
# The first object bounding box [0, 0, 1, 4, 4, 4, 0, 1] overlaps with
# existing bounding box [0, 0, 0, 4, 4, 4, 0, 1], thus not used.
# The second object bounding box [100, 100, 2, 5, 5, 5, 0, 1] and object
# point clouds [100, 101, 2, 3, 4] are pasted.
augmented_point_clouds = np.array(
[
[
[
[100, 101, 2, 3, 4],
[0, 1, 2, 3, 4],
[10, 1, 2, 3, 4],
[0, -1, 2, 3, 4],
[0, 0, 0, 0, 0],
]
]
* 2
]
* 3
).astype("float32")
augmented_bounding_boxes = np.array(
[
[
[
[100, 100, 2, 5, 5, 5, 0, 1],
[0, 0, 0, 4, 4, 4, 0, 1],
[20, 20, 20, 1, 1, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0],
]
]
* 2
]
* 3
).astype("float32")
self.assertAllClose(
inputs[OBJECT_POINT_CLOUDS], outputs[OBJECT_POINT_CLOUDS]
)
self.assertAllClose(
inputs[OBJECT_BOUNDING_BOXES], outputs[OBJECT_BOUNDING_BOXES]
)
self.assertAllClose(outputs[POINT_CLOUDS], augmented_point_clouds)
self.assertAllClose(outputs[BOUNDING_BOXES], augmented_bounding_boxes)
| keras-cv/keras_cv/layers/preprocessing_3d/waymo/random_copy_paste_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing_3d/waymo/random_copy_paste_test.py",
"repo_id": "keras-cv",
"token_count": 5017
} | 74 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any
from typing import List
from typing import Mapping
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.backend.config import keras_3
@keras_cv_export("keras_cv.layers.SpatialPyramidPooling")
class SpatialPyramidPooling(keras.layers.Layer):
"""Implements the Atrous Spatial Pyramid Pooling.
References:
[Rethinking Atrous Convolution for Semantic Image Segmentation](https://arxiv.org/pdf/1706.05587.pdf)
[Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation](https://arxiv.org/pdf/1802.02611.pdf)
inp = keras.layers.Input((384, 384, 3))
backbone = keras.applications.EfficientNetB0(
input_tensor=inp,
include_top=False)
output = backbone(inp)
output = keras_cv.layers.SpatialPyramidPooling(
dilation_rates=[6, 12, 18])(output)
# output[4].shape = [None, 16, 16, 256]
""" # noqa: E501
def __init__(
self,
dilation_rates: List[int],
num_channels: int = 256,
activation: str = "relu",
dropout: float = 0.0,
**kwargs,
):
"""Initializes an Atrous Spatial Pyramid Pooling layer.
Args:
dilation_rates: A `list` of integers for parallel dilated conv.
Usually a sample choice of rates are [6, 12, 18].
num_channels: An `int` number of output channels, defaults to 256.
activation: A `str` activation to be used, defaults to 'relu'.
dropout: A `float` for the dropout rate of the final projection
output after the activations and batch norm, defaults to 0.0,
which means no dropout is applied to the output.
**kwargs: Additional keyword arguments to be passed.
"""
super().__init__(**kwargs)
self.dilation_rates = dilation_rates
self.num_channels = num_channels
self.activation = activation
self.dropout = dropout
# TODO(ianstenbit): Remove this once TF 2.14 is released which adds
# XLA support for resizing with bilinear interpolation.
if keras_3() and keras.backend.backend() == "tensorflow":
self.supports_jit = False
def build(self, input_shape):
channels = input_shape[3]
# This is the parallel networks that process the input features with
# different dilation rates. The output from each channel will be merged
# together and feed to the output.
self.aspp_parallel_channels = []
# Channel1 with Conv2D and 1x1 kernel size.
conv_sequential = keras.Sequential(
[
keras.layers.Conv2D(
filters=self.num_channels,
kernel_size=(1, 1),
use_bias=False,
),
keras.layers.BatchNormalization(),
keras.layers.Activation(self.activation),
]
)
conv_sequential.build(input_shape)
self.aspp_parallel_channels.append(conv_sequential)
# Channel 2 and afterwards are based on self.dilation_rates, and each of
# them will have conv2D with 3x3 kernel size.
for dilation_rate in self.dilation_rates:
conv_sequential = keras.Sequential(
[
keras.layers.Conv2D(
filters=self.num_channels,
kernel_size=(3, 3),
padding="same",
dilation_rate=dilation_rate,
use_bias=False,
),
keras.layers.BatchNormalization(),
keras.layers.Activation(self.activation),
]
)
conv_sequential.build(input_shape)
self.aspp_parallel_channels.append(conv_sequential)
# Last channel is the global average pooling with conv2D 1x1 kernel.
pool_sequential = keras.Sequential(
[
keras.layers.GlobalAveragePooling2D(),
keras.layers.Reshape((1, 1, channels)),
keras.layers.Conv2D(
filters=self.num_channels,
kernel_size=(1, 1),
use_bias=False,
),
keras.layers.BatchNormalization(),
keras.layers.Activation(self.activation),
]
)
pool_sequential.build(input_shape)
self.aspp_parallel_channels.append(pool_sequential)
# Final projection layers
projection = keras.Sequential(
[
keras.layers.Conv2D(
filters=self.num_channels,
kernel_size=(1, 1),
use_bias=False,
),
keras.layers.BatchNormalization(),
keras.layers.Activation(self.activation),
keras.layers.Dropout(rate=self.dropout),
],
)
projection_input_channels = (
2 + len(self.dilation_rates)
) * self.num_channels
projection.build(tuple(input_shape[:-1]) + (projection_input_channels,))
self.projection = projection
def call(self, inputs, training=None):
"""Calls the Atrous Spatial Pyramid Pooling layer on an input.
Args:
inputs: A tensor of shape [batch, height, width, channels]
Returns:
A tensor of shape [batch, height, width, num_channels]
"""
result = []
for channel in self.aspp_parallel_channels:
temp = ops.cast(channel(inputs, training=training), inputs.dtype)
result.append(temp)
image_shape = ops.shape(inputs)
height, width = image_shape[1], image_shape[2]
result[-1] = keras.layers.Resizing(
height,
width,
interpolation="bilinear",
)(result[-1])
result = ops.concatenate(result, axis=-1)
result = self.projection(result, training=training)
return result
def compute_output_shape(self, input_shape):
return tuple(input_shape[:-1]) + (self.num_channels,)
def get_config(self) -> Mapping[str, Any]:
config = {
"dilation_rates": self.dilation_rates,
"num_channels": self.num_channels,
"activation": self.activation,
"dropout": self.dropout,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| keras-cv/keras_cv/layers/spatial_pyramid.py/0 | {
"file_path": "keras-cv/keras_cv/layers/spatial_pyramid.py",
"repo_id": "keras-cv",
"token_count": 3324
} | 75 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.losses.giou_loss import GIoULoss
from keras_cv.tests.test_case import TestCase
class GIoUTest(TestCase):
def test_output_shape(self):
y_true = tf.random.uniform(
shape=(2, 2, 4), minval=0, maxval=10, dtype=tf.int32
)
y_pred = tf.random.uniform(
shape=(2, 2, 4), minval=0, maxval=20, dtype=tf.int32
)
giou_loss = GIoULoss(bounding_box_format="xywh")
self.assertAllEqual(giou_loss(y_true, y_pred).shape, ())
def test_output_shape_reduction_none(self):
y_true = tf.random.uniform(
shape=(2, 2, 4), minval=0, maxval=10, dtype=tf.int32
)
y_pred = tf.random.uniform(
shape=(2, 2, 4), minval=0, maxval=20, dtype=tf.int32
)
giou_loss = GIoULoss(bounding_box_format="xywh", reduction="none")
self.assertAllEqual(
giou_loss(y_true, y_pred).shape,
[
2,
],
)
def test_output_shape_relative_formats(self):
y_true = [
[0.0, 0.0, 0.1, 0.1],
[0.0, 0.0, 0.2, 0.3],
[0.4, 0.5, 0.5, 0.6],
[0.2, 0.2, 0.3, 0.3],
]
y_pred = [
[0.0, 0.0, 0.5, 0.6],
[0.0, 0.0, 0.7, 0.3],
[0.4, 0.5, 0.5, 0.6],
[0.2, 0.1, 0.3, 0.3],
]
giou_loss = GIoULoss(bounding_box_format="rel_xyxy")
self.assertAllEqual(giou_loss(y_true, y_pred).shape, ())
def test_output_value(self):
y_true = [
[0, 0, 1, 1],
[0, 0, 2, 3],
[4, 5, 3, 6],
[2, 2, 3, 3],
]
y_pred = [
[0, 0, 5, 6],
[0, 0, 7, 3],
[4, 5, 5, 6],
[2, 1, 3, 3],
]
iou_loss = GIoULoss(bounding_box_format="xywh")
# expected value for these values is 0.6452381
self.assertAllClose(iou_loss(y_true, y_pred), 0.6452381)
| keras-cv/keras_cv/losses/giou_loss_test.py/0 | {
"file_path": "keras-cv/keras_cv/losses/giou_loss_test.py",
"repo_id": "keras-cv",
"token_count": 1347
} | 76 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import tensorflow as tf
from keras_cv import bounding_box
from keras_cv.metrics import BoxCOCOMetrics
from keras_cv.tests.test_case import TestCase
SAMPLE_FILE = (
os.path.dirname(os.path.abspath(__file__)) + "/test_data/sample_boxes.npz"
)
def load_samples(fname):
npzfile = np.load(fname)
y_true = npzfile["arr_0"].astype(np.float32)
y_pred = npzfile["arr_1"].astype(np.float32)
y_true = {
"boxes": y_true[:, :, :4],
"classes": y_true[:, :, 4],
}
y_pred = {
"boxes": y_pred[:, :, :4],
"classes": y_pred[:, :, 4],
"confidence": y_pred[:, :, 5],
}
y_true = bounding_box.convert_format(y_true, source="xywh", target="xyxy")
y_pred = bounding_box.convert_format(y_pred, source="xywh", target="xyxy")
categories = set(int(x) for x in y_true["classes"].flatten())
categories = [x for x in categories if x != -1]
return y_true, y_pred, categories
golden_metrics = {
"MaP": 0.61690974,
"MaP@[IoU=50]": 1.0,
"MaP@[IoU=75]": 0.70687747,
"MaP@[area=small]": 0.6041764,
"MaP@[area=medium]": 0.6262922,
"MaP@[area=large]": 0.61016285,
"Recall@[max_detections=1]": 0.47804594,
"Recall@[max_detections=10]": 0.6451851,
"Recall@[max_detections=100]": 0.6484465,
"Recall@[area=small]": 0.62842655,
"Recall@[area=medium]": 0.65336424,
"Recall@[area=large]": 0.6405466,
}
class BoxCOCOMetricsTest(TestCase):
def test_coco_metric_suite_returns_all_coco_metrics(self):
suite = BoxCOCOMetrics(bounding_box_format="xyxy", evaluate_freq=1)
y_true, y_pred, categories = load_samples(SAMPLE_FILE)
suite.update_state(y_true, y_pred)
metrics = suite.result()
for metric_name, metric_value in metrics.items():
self.assertEqual(metric_value, golden_metrics[metric_name])
def test_coco_metric_suite_evaluate_freq(self):
suite = BoxCOCOMetrics(bounding_box_format="xyxy", evaluate_freq=2)
y_true, y_pred, categories = load_samples(SAMPLE_FILE)
suite.update_state(y_true, y_pred)
metrics = suite.result()
self.assertAllEqual(metrics, {key: 0 for key in golden_metrics})
suite.update_state(y_true, y_pred)
metrics = suite.result()
#
for metric in metrics:
# The metrics do not match golden metrics because two batches were
# passed which actually modifies the final area under curve value.
self.assertNotEqual(metrics[metric], 0.0)
def test_coco_metric_graph_mode(self):
suite = BoxCOCOMetrics(bounding_box_format="xyxy", evaluate_freq=1)
y_true, y_pred, categories = load_samples(SAMPLE_FILE)
@tf.function()
def update_state(y_true, y_pred):
suite.update_state(y_true, y_pred)
@tf.function()
def result():
return suite.result()
metrics = result()
self.assertAllEqual(metrics, {key: 0 for key in golden_metrics})
update_state(y_true, y_pred)
metrics = result()
for metric in metrics:
self.assertNotEqual(metrics[metric], 0.0)
def test_coco_metric_suite_force_eval(self):
suite = BoxCOCOMetrics(bounding_box_format="xyxy", evaluate_freq=512)
y_true, y_pred, categories = load_samples(SAMPLE_FILE)
suite.update_state(y_true, y_pred)
metrics = suite.result()
self.assertAllEqual(metrics, {key: 0 for key in golden_metrics})
suite.update_state(y_true, y_pred)
metrics = suite.result(force=True)
for metric in metrics:
# The metrics do not match golden metrics because two batches were
# passed which actually modifies the final area under curve value.
self.assertNotEqual(metrics[metric], 0.0)
def test_name_parameter(self):
suite = BoxCOCOMetrics(
bounding_box_format="xyxy", evaluate_freq=1, name="coco_metrics"
)
y_true, y_pred, categories = load_samples(SAMPLE_FILE)
suite.update_state(y_true, y_pred)
metrics = suite.result()
for metric in golden_metrics:
self.assertAlmostEqual(
metrics["coco_metrics_" + metric], golden_metrics[metric]
)
def test_coco_metric_suite_ragged_prediction(self):
suite = BoxCOCOMetrics(bounding_box_format="xyxy", evaluate_freq=1)
ragged_bounding_boxes = {
# shape: (2, (2, 1), 4)
"boxes": tf.ragged.constant(
[
[[10, 10, 20, 20], [100, 100, 150, 150]], # small, medium
[[200, 200, 400, 400]], # large
],
ragged_rank=1,
dtype=tf.float32,
),
"classes": tf.ragged.constant(
[[0, 1], [2]],
ragged_rank=1,
dtype=tf.float32,
),
"confidence": tf.ragged.constant(
[[0.7, 0.8], [0.9]],
ragged_rank=1,
dtype=tf.float32,
),
}
different_ragged_bounding_boxes = {
# shape: (2, (2, 3), 4)
"boxes": tf.ragged.constant(
[
[[10, 10, 25, 25], [100, 105, 155, 155]],
[[200, 200, 450, 450], [1, 1, 5, 5], [50, 50, 300, 300]],
],
ragged_rank=1,
dtype=tf.float32,
),
"classes": tf.ragged.constant(
[[0, 1], [2, 3, 3]],
ragged_rank=1,
dtype=tf.float32,
),
"confidence": tf.ragged.constant(
[[0.7, 0.8], [0.9, 0.7, 0.7]],
ragged_rank=1,
dtype=tf.float32,
),
}
suite.update_state(
ragged_bounding_boxes,
bounding_box.to_dense(ragged_bounding_boxes),
)
metrics = suite.result()
for metric in metrics:
# The metrics will be all 1.0 because the predictions and ground
# truth values are identical.
self.assertEqual(metrics[metric], 1.0)
suite.reset_state()
suite.update_state(
ragged_bounding_boxes,
bounding_box.to_dense(different_ragged_bounding_boxes),
)
metrics = suite.result()
for metric in metrics:
# The metrics will not be 1.0 because the predictions and ground
# truth values are completely different.
self.assertNotEqual(metrics[metric], 1.0)
def test_coco_metric_suite_ragged_labels(self):
suite = BoxCOCOMetrics(bounding_box_format="xyxy", evaluate_freq=1)
ragged_bounding_boxes = {
# shape: (2, (2, 1), 4)
"boxes": tf.ragged.constant(
[
[[10, 10, 20, 20], [100, 100, 150, 150]], # small, medium
[[200, 200, 400, 400]], # large
],
ragged_rank=1,
dtype=tf.float32,
),
"classes": tf.ragged.constant(
[[0, 1], [2]],
ragged_rank=1,
dtype=tf.float32,
),
"confidence": tf.ragged.constant(
[[0.7, 0.8], [0.9]],
ragged_rank=1,
dtype=tf.float32,
),
}
different_ragged_bounding_boxes = {
# shape: (2, (2, 3), 4)
"boxes": tf.ragged.constant(
[
[[10, 10, 25, 25], [100, 105, 155, 155]],
[[200, 200, 450, 450], [1, 1, 5, 5], [50, 50, 300, 300]],
],
ragged_rank=1,
dtype=tf.float32,
),
"classes": tf.ragged.constant(
[[0, 1], [2, 3, 3]],
ragged_rank=1,
dtype=tf.float32,
),
"confidence": tf.ragged.constant(
[[0.7, 0.8], [0.9, 0.7, 0.7]],
ragged_rank=1,
dtype=tf.float32,
),
}
suite.update_state(ragged_bounding_boxes, ragged_bounding_boxes)
metrics = suite.result()
for metric in metrics:
# The metrics will be all 1.0 because the predictions and ground
# truth values are identical.
self.assertEqual(metrics[metric], 1.0)
suite.reset_state()
suite.update_state(
ragged_bounding_boxes, different_ragged_bounding_boxes
)
metrics = suite.result()
for metric in metrics:
# The metrics will not be 1.0 because the predictions and ground
# truth values are completely different.
self.assertNotEqual(metrics[metric], 1.0)
| keras-cv/keras_cv/metrics/object_detection/box_coco_metrics_test.py/0 | {
"file_path": "keras-cv/keras_cv/metrics/object_detection/box_coco_metrics_test.py",
"repo_id": "keras-cv",
"token_count": 4825
} | 77 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DenseNet model preset configurations."""
backbone_presets_no_weights = {
"densenet121": {
"metadata": {
"description": "DenseNet model with 121 layers.",
},
"kaggle_handle": "kaggle://keras/densenet/keras/densenet121/2",
},
"densenet169": {
"metadata": {
"description": "DenseNet model with 169 layers.",
},
"kaggle_handle": "kaggle://keras/densenet/keras/densenet169/2",
},
"densenet201": {
"metadata": {
"description": "DenseNet model with 201 layers.",
},
"kaggle_handle": "kaggle://keras/densenet/keras/densenet201/2",
},
}
backbone_presets_with_weights = {
"densenet121_imagenet": {
"metadata": {
"description": (
"DenseNet model with 121 layers. Trained on Imagenet 2012 "
"classification task."
),
},
"kaggle_handle": "kaggle://keras/densenet/keras/densenet121_imagenet/2",
},
"densenet169_imagenet": {
"metadata": {
"description": (
"DenseNet model with 169 layers. Trained on Imagenet 2012 "
"classification task."
),
},
"kaggle_handle": "kaggle://keras/densenet/keras/densenet169_imagenet/2",
},
"densenet201_imagenet": {
"metadata": {
"description": (
"DenseNet model with 201 layers. Trained on Imagenet 2012 "
"classification task."
),
},
"kaggle_handle": "kaggle://keras/densenet/keras/densenet201_imagenet/2",
},
}
backbone_presets = {
**backbone_presets_no_weights,
**backbone_presets_with_weights,
}
| keras-cv/keras_cv/models/backbones/densenet/densenet_backbone_presets.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/densenet/densenet_backbone_presets.py",
"repo_id": "keras-cv",
"token_count": 1022
} | 78 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from keras_cv.api_export import keras_cv_export
from keras_cv.models.backbones.efficientnet_v2.efficientnet_v2_backbone import (
EfficientNetV2Backbone,
)
from keras_cv.models.backbones.efficientnet_v2.efficientnet_v2_backbone_presets import ( # noqa: E501
backbone_presets,
)
from keras_cv.utils.python_utils import classproperty
ALIAS_DOCSTRING = """Instantiates the {name} architecture.
Reference:
- [EfficientNetV2: Smaller Models and Faster Training](https://arxiv.org/abs/2104.00298)
(ICML 2021)
Args:
include_rescaling: bool, whether to rescale the inputs. If set
to `True`, inputs will be passed through a `Rescaling(1/255.0)`
layer.
input_shape: optional shape tuple, defaults to (None, None, 3).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
""" # noqa: E501
@keras_cv_export("keras_cv.models.EfficientNetV2SBackbone")
class EfficientNetV2SBackbone(EfficientNetV2Backbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return EfficientNetV2Backbone.from_preset("efficientnetv2_s", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {
"efficientnetv2_s_imagenet": copy.deepcopy(
backbone_presets["efficientnetv2_s_imagenet"]
),
}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return cls.presets
@keras_cv_export("keras_cv.models.EfficientNetV2MBackbone")
class EfficientNetV2MBackbone(EfficientNetV2Backbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return EfficientNetV2Backbone.from_preset("efficientnetv2_m", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
@keras_cv_export("keras_cv.models.EfficientNetV2LBackbone")
class EfficientNetV2LBackbone(EfficientNetV2Backbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return EfficientNetV2Backbone.from_preset("efficientnetv2_l", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
@keras_cv_export("keras_cv.models.EfficientNetV2B0Backbone")
class EfficientNetV2B0Backbone(EfficientNetV2Backbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return EfficientNetV2Backbone.from_preset("efficientnetv2_b0", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {
"efficientnetv2_b0_imagenet": copy.deepcopy(
backbone_presets["efficientnetv2_b0_imagenet"]
),
}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return cls.presets
@keras_cv_export("keras_cv.models.EfficientNetV2B1Backbone")
class EfficientNetV2B1Backbone(EfficientNetV2Backbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return EfficientNetV2Backbone.from_preset("efficientnetv2_b1", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {
"efficientnetv2_b1_imagenet": copy.deepcopy(
backbone_presets["efficientnetv2_b1_imagenet"]
),
}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return cls.presets
@keras_cv_export("keras_cv.models.EfficientNetV2B2Backbone")
class EfficientNetV2B2Backbone(EfficientNetV2Backbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return EfficientNetV2Backbone.from_preset("efficientnetv2_b2", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {
"efficientnetv2_b2_imagenet": copy.deepcopy(
backbone_presets["efficientnetv2_b2_imagenet"]
),
}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return cls.presets
@keras_cv_export("keras_cv.models.EfficientNetV2B3Backbone")
class EfficientNetV2B3Backbone(EfficientNetV2Backbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return EfficientNetV2Backbone.from_preset("efficientnetv2_b3", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
setattr(
EfficientNetV2SBackbone,
"__doc__",
ALIAS_DOCSTRING.format(name="EfficientNetV2S"),
)
setattr(
EfficientNetV2MBackbone,
"__doc__",
ALIAS_DOCSTRING.format(name="EfficientNetV2M"),
)
setattr(
EfficientNetV2LBackbone,
"__doc__",
ALIAS_DOCSTRING.format(name="EfficientNetV2L"),
)
setattr(
EfficientNetV2B0Backbone,
"__doc__",
ALIAS_DOCSTRING.format(name="EfficientNetV2B0"),
)
setattr(
EfficientNetV2B1Backbone,
"__doc__",
ALIAS_DOCSTRING.format(name="EfficientNetV2B1"),
)
setattr(
EfficientNetV2B2Backbone,
"__doc__",
ALIAS_DOCSTRING.format(name="EfficientNetV2B2"),
)
setattr(
EfficientNetV2B3Backbone,
"__doc__",
ALIAS_DOCSTRING.format(name="EfficientNetV2B3"),
)
| keras-cv/keras_cv/models/backbones/efficientnet_v2/efficientnet_v2_aliases.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/efficientnet_v2/efficientnet_v2_aliases.py",
"repo_id": "keras-cv",
"token_count": 4141
} | 79 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import pytest
from absl.testing import parameterized
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.models.backbones.mobilenet_v3.mobilenet_v3_aliases import (
MobileNetV3SmallBackbone,
)
from keras_cv.models.backbones.mobilenet_v3.mobilenet_v3_backbone import (
MobileNetV3Backbone,
)
from keras_cv.tests.test_case import TestCase
from keras_cv.utils.train import get_feature_extractor
class MobileNetV3BackboneTest(TestCase):
def setUp(self):
self.input_batch = np.ones(shape=(2, 224, 224, 3))
def test_valid_call(self):
model = MobileNetV3SmallBackbone(
include_rescaling=False,
)
model(self.input_batch)
def test_valid_call_with_rescaling(self):
model = MobileNetV3SmallBackbone(
include_rescaling=True,
)
model(self.input_batch)
@pytest.mark.large # Saving is slow, so mark these large.
def test_saved_model(self):
model = MobileNetV3SmallBackbone()
model_output = model(self.input_batch)
save_path = os.path.join(
self.get_temp_dir(), "mobilenet_v3_backbone.keras"
)
model.save(save_path)
restored_model = keras.models.load_model(save_path)
# Check we got the real object back.
self.assertIsInstance(restored_model, MobileNetV3Backbone)
# Check that output matches.
restored_output = restored_model(self.input_batch)
self.assertAllClose(
ops.convert_to_numpy(model_output),
ops.convert_to_numpy(restored_output),
)
def test_feature_pyramid_inputs(self):
model = MobileNetV3SmallBackbone()
backbone_model = get_feature_extractor(
model,
model.pyramid_level_inputs.values(),
model.pyramid_level_inputs.keys(),
)
input_size = 256
inputs = keras.Input(shape=[input_size, input_size, 3])
outputs = backbone_model(inputs)
levels = ["P1", "P2", "P3", "P4", "P5"]
self.assertEquals(list(outputs.keys()), levels)
self.assertEquals(
outputs["P1"].shape,
(None, input_size // 2**1, input_size // 2**1, 16),
)
self.assertEquals(
outputs["P2"].shape,
(None, input_size // 2**2, input_size // 2**2, 16),
)
self.assertEquals(
outputs["P3"].shape,
(None, input_size // 2**3, input_size // 2**3, 24),
)
self.assertEquals(
outputs["P4"].shape,
(None, input_size // 2**4, input_size // 2**4, 48),
)
self.assertEquals(
outputs["P5"].shape,
(None, input_size // 2**5, input_size // 2**5, 96),
)
@parameterized.named_parameters(
("one_channel", 1),
("four_channels", 4),
)
def test_application_variable_input_channels(self, num_channels):
model = MobileNetV3SmallBackbone(
input_shape=(None, None, num_channels),
include_rescaling=False,
)
self.assertEqual(model.output_shape, (None, None, None, 576))
| keras-cv/keras_cv/models/backbones/mobilenet_v3/mobilenet_v3_backbone_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/mobilenet_v3/mobilenet_v3_backbone_test.py",
"repo_id": "keras-cv",
"token_count": 1656
} | 80 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from keras_cv.models.backbones.vit_det.vit_det_backbone import ViTDetBackbone
from keras_cv.models.backbones.vit_det.vit_det_backbone_presets import (
backbone_presets,
)
from keras_cv.utils.python_utils import classproperty
ALIAS_DOCSTRING = """VitDet{size}Backbone model.
Reference:
- [Detectron2](https://github.com/facebookresearch/detectron2)
- [Segment Anything paper](https://arxiv.org/abs/2304.02643)
- [Segment Anything GitHub](https://github.com/facebookresearch/segment-anything)
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](https://keras.io/guides/transfer_learning/).
Examples:
```python
input_data = np.ones(shape=(1, 1024, 1024, 3))
# Randomly initialized backbone
model = VitDet{size}Backbone()
output = model(input_data)
```
""" # noqa: E501
class ViTDetBBackbone(ViTDetBackbone):
def __new__(
cls,
**kwargs,
):
return ViTDetBackbone.from_preset("vitdet_base", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {
"vitdet_base_sa1b": copy.deepcopy(
backbone_presets["vitdet_base_sa1b"]
),
}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return cls.presets
class ViTDetLBackbone(ViTDetBackbone):
def __new__(
cls,
**kwargs,
):
return ViTDetBackbone.from_preset("vitdet_large", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {
"vitdet_large_sa1b": copy.deepcopy(
backbone_presets["vitdet_large_sa1b"]
),
}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return cls.presets
class ViTDetHBackbone(ViTDetBackbone):
def __new__(
cls,
**kwargs,
):
return ViTDetBackbone.from_preset("vitdet_huge", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {
"vitdet_huge_sa1b": copy.deepcopy(
backbone_presets["vitdet_huge_sa1b"]
),
}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return cls.presets
setattr(ViTDetBBackbone, "__doc__", ALIAS_DOCSTRING.format(size="B"))
setattr(ViTDetLBackbone, "__doc__", ALIAS_DOCSTRING.format(size="L"))
setattr(ViTDetHBackbone, "__doc__", ALIAS_DOCSTRING.format(size="H"))
| keras-cv/keras_cv/models/backbones/vit_det/vit_det_aliases.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/vit_det/vit_det_aliases.py",
"repo_id": "keras-cv",
"token_count": 1393
} | 81 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_nlp.layers import StartEndPacker
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.models.feature_extractor.clip.clip_tokenizer import CLIPTokenizer
@keras_cv_export("keras_cv.models.feature_extractors.CLIPProcessor")
class CLIPProcessor:
"""
CLIPProcessor is a utility class that provides functionality for processing
images and texts in the context of the CLIP (Contrastive Language-Image
Pretraining) model.
Args:
input_resolution (int): The resolution of input images.
vocabulary (str): string or dict, maps token to integer ids. If it is a
string, it should be the file path to a json file.
merges: string or list, contains the merge rule. If it is a string, it
should be the file path to merge rules. The merge rule file should
have one merge rule per line.
Methods:
process_images(image_path: List[str]): Transforms an image located at
the specified path.
process_texts(texts: Union[str, List[str]], context_length: int = 77):
Processes a single text or a list of texts, returning packed token
sequences.
"""
def __init__(self, input_resolution, vocabulary, merges, **kwargs):
self.input_resolution = input_resolution
self.vocabulary = vocabulary
self.merges = merges
self.image_transform = self.transform_image
self.tokenizer = CLIPTokenizer(
vocabulary=self.vocabulary,
merges=self.merges,
unsplittable_tokens=["</w>"],
)
self.packer = StartEndPacker(
start_value=self.tokenizer.token_to_id("<|startoftext|>"),
end_value=self.tokenizer.token_to_id("<|endoftext|>"),
pad_value=None,
sequence_length=77,
return_padding_mask=True,
)
def transform_image(self, image_path):
input_resolution = self.input_resolution
mean = ops.array([0.48145466, 0.4578275, 0.40821073])
std = ops.array([0.26862954, 0.26130258, 0.27577711])
image = keras.utils.load_img(image_path)
image = keras.utils.img_to_array(image)
image = (
ops.image.resize(
image,
(input_resolution, input_resolution),
interpolation="bicubic",
)
/ 255.0
)
central_fraction = input_resolution / image.shape[0]
width, height = image.shape[0], image.shape[1]
left = ops.cast((width - width * central_fraction) / 2, dtype="int32")
top = ops.cast((height - height * central_fraction) / 2, dtype="int32")
right = ops.cast((width + width * central_fraction) / 2, dtype="int32")
bottom = ops.cast(
(height + height * central_fraction) / 2, dtype="int32"
)
image = ops.slice(
image, [left, top, 0], [right - left, bottom - top, 3]
)
image = (image - mean) / std
return image
def process_images(self, images):
if isinstance(images, str):
images = [images]
def process_image(image):
if isinstance(image, str):
return self.image_transform(image)
processed_images = list(map(process_image, images))
processed_images = ops.stack(processed_images)
return processed_images
def process_texts(self, texts, context_length: int = 77):
if isinstance(texts, str):
texts = [texts]
def pack_tokens(text):
return self.packer(
self.tokenizer(text),
sequence_length=context_length,
add_start_value=True,
add_end_value=True,
)
return pack_tokens(texts)
def get_config(self):
config = super().get_config()
config.update(
{
"input_resolution": self.input_resolution,
"vocabulary": self.vocabulary,
"merges": self.merges,
}
)
return config
| keras-cv/keras_cv/models/feature_extractor/clip/clip_processor.py/0 | {
"file_path": "keras-cv/keras_cv/models/feature_extractor/clip/clip_processor.py",
"repo_id": "keras-cv",
"token_count": 2025
} | 82 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from absl import logging
from tensorflow import keras
from keras_cv import bounding_box
from keras_cv import layers as cv_layers
from keras_cv import models
from keras_cv.bounding_box.converters import _decode_deltas_to_boxes
from keras_cv.bounding_box.utils import _clip_boxes
from keras_cv.layers.object_detection.anchor_generator import AnchorGenerator
from keras_cv.layers.object_detection.box_matcher import BoxMatcher
from keras_cv.layers.object_detection.roi_align import _ROIAligner
from keras_cv.layers.object_detection.roi_generator import ROIGenerator
from keras_cv.layers.object_detection.roi_sampler import _ROISampler
from keras_cv.layers.object_detection.rpn_label_encoder import _RpnLabelEncoder
from keras_cv.models.object_detection import predict_utils
from keras_cv.models.object_detection.__internal__ import unpack_input
from keras_cv.utils.train import get_feature_extractor
BOX_VARIANCE = [0.1, 0.1, 0.2, 0.2]
class FeaturePyramid(keras.layers.Layer):
"""Builds the Feature Pyramid with the feature maps from the backbone."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.conv_c2_1x1 = keras.layers.Conv2D(256, 1, 1, "same")
self.conv_c3_1x1 = keras.layers.Conv2D(256, 1, 1, "same")
self.conv_c4_1x1 = keras.layers.Conv2D(256, 1, 1, "same")
self.conv_c5_1x1 = keras.layers.Conv2D(256, 1, 1, "same")
self.conv_c2_3x3 = keras.layers.Conv2D(256, 3, 1, "same")
self.conv_c3_3x3 = keras.layers.Conv2D(256, 3, 1, "same")
self.conv_c4_3x3 = keras.layers.Conv2D(256, 3, 1, "same")
self.conv_c5_3x3 = keras.layers.Conv2D(256, 3, 1, "same")
self.conv_c6_3x3 = keras.layers.Conv2D(256, 3, 1, "same")
self.conv_c6_pool = keras.layers.MaxPool2D()
self.upsample_2x = keras.layers.UpSampling2D(2)
def call(self, inputs, training=None):
c2_output = inputs["P2"]
c3_output = inputs["P3"]
c4_output = inputs["P4"]
c5_output = inputs["P5"]
c6_output = self.conv_c6_pool(c5_output)
p6_output = c6_output
p5_output = self.conv_c5_1x1(c5_output)
p4_output = self.conv_c4_1x1(c4_output)
p3_output = self.conv_c3_1x1(c3_output)
p2_output = self.conv_c2_1x1(c2_output)
p4_output = p4_output + self.upsample_2x(p5_output)
p3_output = p3_output + self.upsample_2x(p4_output)
p2_output = p2_output + self.upsample_2x(p3_output)
p6_output = self.conv_c6_3x3(p6_output)
p5_output = self.conv_c5_3x3(p5_output)
p4_output = self.conv_c4_3x3(p4_output)
p3_output = self.conv_c3_3x3(p3_output)
p2_output = self.conv_c2_3x3(p2_output)
return {
"P2": p2_output,
"P3": p3_output,
"P4": p4_output,
"P5": p5_output,
"P6": p6_output,
}
def get_config(self):
config = {}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
class RPNHead(keras.layers.Layer):
def __init__(
self,
num_anchors_per_location=3,
**kwargs,
):
super().__init__(**kwargs)
self.num_anchors = num_anchors_per_location
def build(self, input_shape):
if isinstance(input_shape, (dict, list, tuple)):
input_shape = tf.nest.flatten(input_shape)
input_shape = input_shape[0]
filters = input_shape[-1]
self.conv = keras.layers.Conv2D(
filters=filters,
kernel_size=3,
strides=1,
padding="same",
activation="relu",
kernel_initializer="truncated_normal",
)
self.objectness_logits = keras.layers.Conv2D(
filters=self.num_anchors * 1,
kernel_size=1,
strides=1,
padding="same",
kernel_initializer="truncated_normal",
)
self.anchor_deltas = keras.layers.Conv2D(
filters=self.num_anchors * 4,
kernel_size=1,
strides=1,
padding="same",
kernel_initializer="truncated_normal",
)
def call(self, feature_map, training=None):
def call_single_level(f_map):
batch_size = f_map.get_shape().as_list()[0] or tf.shape(f_map)[0]
# [BS, H, W, C]
t = self.conv(f_map)
# [BS, H, W, K]
rpn_scores = self.objectness_logits(t)
# [BS, H, W, K * 4]
rpn_boxes = self.anchor_deltas(t)
# [BS, H*W*K, 4]
rpn_boxes = tf.reshape(rpn_boxes, [batch_size, -1, 4])
# [BS, H*W*K, 1]
rpn_scores = tf.reshape(rpn_scores, [batch_size, -1, 1])
return rpn_boxes, rpn_scores
if not isinstance(feature_map, (dict, list, tuple)):
return call_single_level(feature_map)
elif isinstance(feature_map, (list, tuple)):
rpn_boxes = []
rpn_scores = []
for f_map in feature_map:
rpn_box, rpn_score = call_single_level(f_map)
rpn_boxes.append(rpn_box)
rpn_scores.append(rpn_score)
return rpn_boxes, rpn_scores
else:
rpn_boxes = {}
rpn_scores = {}
for lvl, f_map in feature_map.items():
rpn_box, rpn_score = call_single_level(f_map)
rpn_boxes[lvl] = rpn_box
rpn_scores[lvl] = rpn_score
return rpn_boxes, rpn_scores
def get_config(self):
config = {
"num_anchors_per_location": self.num_anchors,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
# class agnostic regression
class RCNNHead(keras.layers.Layer):
def __init__(
self,
num_classes,
conv_dims=[],
fc_dims=[1024, 1024],
**kwargs,
):
super().__init__(**kwargs)
self.num_classes = num_classes
self.conv_dims = conv_dims
self.fc_dims = fc_dims
self.convs = []
for conv_dim in conv_dims:
layer = keras.layers.Conv2D(
filters=conv_dim,
kernel_size=3,
strides=1,
padding="same",
activation="relu",
)
self.convs.append(layer)
self.fcs = []
for fc_dim in fc_dims:
layer = keras.layers.Dense(units=fc_dim, activation="relu")
self.fcs.append(layer)
self.box_pred = keras.layers.Dense(units=4)
self.cls_score = keras.layers.Dense(
units=num_classes + 1, activation="softmax"
)
def call(self, feature_map, training=None):
x = feature_map
for conv in self.convs:
x = conv(x)
for fc in self.fcs:
x = fc(x)
rcnn_boxes = self.box_pred(x)
rcnn_scores = self.cls_score(x)
return rcnn_boxes, rcnn_scores
def get_config(self):
config = {
"num_classes": self.num_classes,
"conv_dims": self.conv_dims,
"fc_dims": self.fc_dims,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
# TODO(tanzheny): add more configurations
@keras.utils.register_keras_serializable(package="keras_cv")
class FasterRCNN(keras.Model):
"""A Keras model implementing the FasterRCNN architecture.
Implements the FasterRCNN architecture for object detection. The constructor
requires `num_classes`, `bounding_box_format` and a `backbone`.
References:
- [FasterRCNN](https://arxiv.org/pdf/1506.01497.pdf)
Usage:
```python
retinanet = keras_cv.models.FasterRCNN(
num_classes=20,
bounding_box_format="xywh",
backbone=None,
)
```
Args:
num_classes: the number of classes in your dataset excluding the
background class. classes should be represented by integers in the
range [0, num_classes).
bounding_box_format: The format of bounding boxes of model output. Refer
[to the keras.io docs](https://keras.io/api/keras_cv/bounding_box/formats/)
for more details on supported bounding box formats.
backbone: Optional `keras.Model`. Must implement the
`pyramid_level_inputs` property with keys "P2", "P3", "P4", and "P5"
and layer names as values. If `None`, defaults to
`keras_cv.models.ResNet50Backbone()`.
anchor_generator: (Optional) a `keras_cv.layers.AnchorGenerator`. It is
used in the model to match ground truth boxes and labels with
anchors, or with region proposals. By default it uses the sizes and
ratios from the paper, that is optimized for image size between
[640, 800]. The users should pass their own anchor generator if the
input image size differs from paper. For now, only anchor generator
with per level dict output is supported,
label_encoder: (Optional) a keras.Layer that accepts an anchors Tensor,
a bounding box Tensor and a bounding box class Tensor to its
`call()` method, and returns RetinaNet training targets. It returns
box and class targets as well as sample weights.
rcnn_head: (Optional) a `keras.layers.Layer` that takes input feature
map and returns a box delta prediction (in reference to rois) and
multi-class prediction (all foreground classes + one background
class). By default it uses the rcnn head from paper, which is 2 FC
layer with 1024 dimension, 1 box regressor and 1 softmax classifier.
prediction_decoder: (Optional) a `keras.layers.Layer` that takes input
box prediction and softmaxed score prediction, and returns NMSed box
prediction, NMSed softmaxed score prediction, NMSed class
prediction, and NMSed valid detection.
""" # noqa: E501
def __init__(
self,
num_classes,
bounding_box_format,
backbone=None,
anchor_generator=None,
label_encoder=None,
rcnn_head=None,
prediction_decoder=None,
**kwargs,
):
self.bounding_box_format = bounding_box_format
super().__init__(**kwargs)
scales = [2**x for x in [0]]
aspect_ratios = [0.5, 1.0, 2.0]
self.anchor_generator = anchor_generator or AnchorGenerator(
bounding_box_format="yxyx",
sizes={
"P2": 32.0,
"P3": 64.0,
"P4": 128.0,
"P5": 256.0,
"P6": 512.0,
},
scales=scales,
aspect_ratios=aspect_ratios,
strides={f"P{i}": 2**i for i in range(2, 7)},
clip_boxes=True,
)
self.rpn_head = RPNHead(
num_anchors_per_location=len(scales) * len(aspect_ratios)
)
self.roi_generator = ROIGenerator(
bounding_box_format="yxyx",
nms_score_threshold_train=float("-inf"),
nms_score_threshold_test=float("-inf"),
)
self.box_matcher = BoxMatcher(
thresholds=[0.0, 0.5], match_values=[-2, -1, 1]
)
self.roi_sampler = _ROISampler(
bounding_box_format="yxyx",
roi_matcher=self.box_matcher,
background_class=num_classes,
num_sampled_rois=512,
)
self.roi_pooler = _ROIAligner(bounding_box_format="yxyx")
self.rcnn_head = rcnn_head or RCNNHead(num_classes)
self.backbone = backbone or models.ResNet50Backbone()
extractor_levels = ["P2", "P3", "P4", "P5"]
extractor_layer_names = [
self.backbone.pyramid_level_inputs[i] for i in extractor_levels
]
self.feature_extractor = get_feature_extractor(
self.backbone, extractor_layer_names, extractor_levels
)
self.feature_pyramid = FeaturePyramid()
self.rpn_labeler = label_encoder or _RpnLabelEncoder(
anchor_format="yxyx",
ground_truth_box_format="yxyx",
positive_threshold=0.7,
negative_threshold=0.3,
samples_per_image=256,
positive_fraction=0.5,
box_variance=BOX_VARIANCE,
)
self._prediction_decoder = (
prediction_decoder
or cv_layers.MultiClassNonMaxSuppression(
bounding_box_format=bounding_box_format,
from_logits=False,
max_detections_per_class=10,
max_detections=10,
)
)
def _call_rpn(self, images, anchors, training=None):
image_shape = tf.shape(images[0])
backbone_outputs = self.feature_extractor(images, training=training)
feature_map = self.feature_pyramid(backbone_outputs, training=training)
# [BS, num_anchors, 4], [BS, num_anchors, 1]
rpn_boxes, rpn_scores = self.rpn_head(feature_map, training=training)
# the decoded format is center_xywh, convert to yxyx
decoded_rpn_boxes = _decode_deltas_to_boxes(
anchors=anchors,
boxes_delta=rpn_boxes,
anchor_format="yxyx",
box_format="yxyx",
variance=BOX_VARIANCE,
)
rois, _ = self.roi_generator(
decoded_rpn_boxes, rpn_scores, training=training
)
rois = _clip_boxes(rois, "yxyx", image_shape)
rpn_boxes = tf.concat(tf.nest.flatten(rpn_boxes), axis=1)
rpn_scores = tf.concat(tf.nest.flatten(rpn_scores), axis=1)
return rois, feature_map, rpn_boxes, rpn_scores
def _call_rcnn(self, rois, feature_map, training=None):
feature_map = self.roi_pooler(feature_map, rois)
# [BS, H*W*K, pool_shape*C]
feature_map = tf.reshape(
feature_map, tf.concat([tf.shape(rois)[:2], [-1]], axis=0)
)
# [BS, H*W*K, 4], [BS, H*W*K, num_classes + 1]
rcnn_box_pred, rcnn_cls_pred = self.rcnn_head(
feature_map, training=training
)
return rcnn_box_pred, rcnn_cls_pred
def call(self, images, training=None):
image_shape = tf.shape(images[0])
anchors = self.anchor_generator(image_shape=image_shape)
rois, feature_map, _, _ = self._call_rpn(
images, anchors, training=training
)
box_pred, cls_pred = self._call_rcnn(
rois, feature_map, training=training
)
if not training:
# box_pred is on "center_yxhw" format, convert to target format.
box_pred = _decode_deltas_to_boxes(
anchors=rois,
boxes_delta=box_pred,
anchor_format="yxyx",
box_format=self.bounding_box_format,
variance=[0.1, 0.1, 0.2, 0.2],
)
return box_pred, cls_pred
# TODO(tanzhenyu): Support compile with metrics.
def compile(
self,
box_loss=None,
classification_loss=None,
rpn_box_loss=None,
rpn_classification_loss=None,
weight_decay=0.0001,
loss=None,
**kwargs,
):
# TODO(tanzhenyu): Add metrics support once COCOMap issue is addressed.
# https://github.com/keras-team/keras-cv/issues/915
if "metrics" in kwargs.keys():
raise ValueError(
"`FasterRCNN` does not currently support the use of "
"`metrics` due to performance and distribution concerns. "
"Please use the `PyCOCOCallback` to evaluate COCO metrics."
)
if loss is not None:
raise ValueError(
"`FasterRCNN` does not accept a `loss` to `compile()`. "
"Instead, please pass `box_loss` and `classification_loss`. "
"`loss` will be ignored during training."
)
box_loss = _validate_and_get_loss(box_loss, "box_loss")
classification_loss = _validate_and_get_loss(
classification_loss, "classification_loss"
)
rpn_box_loss = _validate_and_get_loss(rpn_box_loss, "rpn_box_loss")
if rpn_classification_loss == "BinaryCrossentropy":
rpn_classification_loss = keras.losses.BinaryCrossentropy(
from_logits=True, reduction=keras.losses.Reduction.SUM
)
rpn_classification_loss = _validate_and_get_loss(
rpn_classification_loss, "rpn_cls_loss"
)
if not rpn_classification_loss.from_logits:
raise ValueError(
"`rpn_classification_loss` must come with `from_logits`=True"
)
self.rpn_box_loss = rpn_box_loss
self.rpn_cls_loss = rpn_classification_loss
self.box_loss = box_loss
self.cls_loss = classification_loss
self.weight_decay = weight_decay
losses = {
"box": self.box_loss,
"classification": self.cls_loss,
"rpn_box": self.rpn_box_loss,
"rpn_classification": self.rpn_cls_loss,
}
super().compile(loss=losses, **kwargs)
def compute_loss(self, images, boxes, classes, training):
local_batch = images.get_shape().as_list()[0]
if tf.distribute.has_strategy():
num_sync = tf.distribute.get_strategy().num_replicas_in_sync
else:
num_sync = 1
global_batch = local_batch * num_sync
anchors = self.anchor_generator(image_shape=tuple(images[0].shape))
(
rpn_box_targets,
rpn_box_weights,
rpn_cls_targets,
rpn_cls_weights,
) = self.rpn_labeler(
tf.concat(tf.nest.flatten(anchors), axis=0), boxes, classes
)
rpn_box_weights /= (
self.rpn_labeler.samples_per_image * global_batch * 0.25
)
rpn_cls_weights /= self.rpn_labeler.samples_per_image * global_batch
rois, feature_map, rpn_box_pred, rpn_cls_pred = self._call_rpn(
images, anchors, training=training
)
rois = tf.stop_gradient(rois)
(
rois,
box_targets,
box_weights,
cls_targets,
cls_weights,
) = self.roi_sampler(rois, boxes, classes)
box_weights /= self.roi_sampler.num_sampled_rois * global_batch * 0.25
cls_weights /= self.roi_sampler.num_sampled_rois * global_batch
box_pred, cls_pred = self._call_rcnn(
rois, feature_map, training=training
)
y_true = {
"rpn_box": rpn_box_targets,
"rpn_classification": rpn_cls_targets,
"box": box_targets,
"classification": cls_targets,
}
y_pred = {
"rpn_box": rpn_box_pred,
"rpn_classification": rpn_cls_pred,
"box": box_pred,
"classification": cls_pred,
}
weights = {
"rpn_box": rpn_box_weights,
"rpn_classification": rpn_cls_weights,
"box": box_weights,
"classification": cls_weights,
}
return super().compute_loss(
x=images, y=y_true, y_pred=y_pred, sample_weight=weights
)
def train_step(self, data):
images, y = unpack_input(data)
boxes = y["boxes"]
if len(y["classes"].shape) != 2:
raise ValueError(
"Expected 'classes' to be a tf.Tensor of rank 2. "
f"Got y['classes'].shape={y['classes'].shape}."
)
# TODO(tanzhenyu): remove this hack and perform broadcasting elsewhere
classes = tf.expand_dims(y["classes"], axis=-1)
with tf.GradientTape() as tape:
total_loss = self.compute_loss(
images, boxes, classes, training=True
)
reg_losses = []
if self.weight_decay:
for var in self.trainable_variables:
if "bn" not in var.name:
reg_losses.append(
self.weight_decay * tf.nn.l2_loss(var)
)
l2_loss = tf.math.add_n(reg_losses)
total_loss += l2_loss
self.optimizer.minimize(total_loss, self.trainable_variables, tape=tape)
return self.compute_metrics(images, {}, {}, sample_weight={})
def test_step(self, data):
images, y = unpack_input(data)
boxes = y["boxes"]
if len(y["classes"].shape) != 2:
raise ValueError(
"Expected 'classes' to be a tf.Tensor of rank 2. "
f"Got y['classes'].shape={y['classes'].shape}."
)
classes = tf.expand_dims(y["classes"], axis=-1)
self.compute_loss(images, boxes, classes, training=False)
return self.compute_metrics(images, {}, {}, sample_weight={})
def make_predict_function(self, force=False):
return predict_utils.make_predict_function(self, force=force)
@property
def prediction_decoder(self):
return self._prediction_decoder
@prediction_decoder.setter
def prediction_decoder(self, prediction_decoder):
self._prediction_decoder = prediction_decoder
self.make_predict_function(force=True)
def decode_predictions(self, predictions, images):
# no-op if default decoder is used.
box_pred, scores_pred = predictions
box_pred = bounding_box.convert_format(
box_pred,
source=self.bounding_box_format,
target=self.prediction_decoder.bounding_box_format,
images=images,
)
y_pred = self.prediction_decoder(box_pred, scores_pred[..., :-1])
box_pred = bounding_box.convert_format(
y_pred["boxes"],
source=self.prediction_decoder.bounding_box_format,
target=self.bounding_box_format,
images=images,
)
y_pred["boxes"] = box_pred
return y_pred
def get_config(self):
return {
"num_classes": self.num_classes,
"bounding_box_format": self.bounding_box_format,
"backbone": self.backbone,
"anchor_generator": self.anchor_generator,
"label_encoder": self.rpn_labeler,
"prediction_decoder": self._prediction_decoder,
"feature_pyramid": self.feature_pyramid,
"rcnn_head": self.rcnn_head,
}
def _validate_and_get_loss(loss, loss_name):
if isinstance(loss, str):
loss = keras.losses.get(loss)
if loss is None or not isinstance(loss, keras.losses.Loss):
raise ValueError(
f"FasterRCNN only accepts `keras.losses.Loss` for {loss_name}, "
f"got {loss}"
)
if loss.reduction != keras.losses.Reduction.SUM:
logging.info(
f"FasterRCNN only accepts `SUM` reduction, got {loss.reduction}, "
"automatically converted."
)
loss.reduction = keras.losses.Reduction.SUM
return loss
| keras-cv/keras_cv/models/legacy/object_detection/faster_rcnn/faster_rcnn.py/0 | {
"file_path": "keras-cv/keras_cv/models/legacy/object_detection/faster_rcnn/faster_rcnn.py",
"repo_id": "keras-cv",
"token_count": 11857
} | 83 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Label encoder for YOLOV8. This uses the TOOD Task Aligned Assigner approach.
See https://arxiv.org/abs/2108.07755 for more info, as well as a reference
implementation at https://github.com/fcjian/TOOD/blob/master/mmdet/core/bbox/assigners/task_aligned_assigner.py
""" # noqa: E501
import tensorflow as tf
from keras_cv import bounding_box
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.bounding_box.iou import compute_ciou
def is_anchor_center_within_box(anchors, gt_bboxes):
return ops.all(
ops.logical_and(
gt_bboxes[:, :, None, :2] < anchors,
gt_bboxes[:, :, None, 2:] > anchors,
),
axis=-1,
)
@keras_cv_export("keras_cv.models.yolov8.LabelEncoder")
class YOLOV8LabelEncoder(keras.layers.Layer):
"""
Encodes ground truth boxes to target boxes and class labels for training a
YOLOV8 model. This is an implementation of the Task-aligned sample
assignment scheme proposed in https://arxiv.org/abs/2108.07755.
Args:
num_classes: integer, the number of classes in the training dataset
max_anchor_matches: optional integer, the maximum number of anchors to
match with any given ground truth box. For example, when the default
10 is used, the 10 candidate anchor points with the highest
alignment score are matched with a ground truth box. If less than 10
candidate anchors exist, all candidates will be matched to the box.
alpha: float, a parameter to control the influence of class predictions
on the alignment score of an anchor box. This is the alpha parameter
in equation 9 of https://arxiv.org/pdf/2108.07755.pdf.
beta: float, a parameter to control the influence of box IOUs on the
alignment score of an anchor box. This is the beta parameter in
equation 9 of https://arxiv.org/pdf/2108.07755.pdf.
epsilon: float, a small number used for numerical stability in division
(to avoid diving by zero), and used as a threshold to eliminate very
small matches based on alignment scores of approximately zero.
"""
def __init__(
self,
num_classes,
max_anchor_matches=10,
alpha=0.5,
beta=6.0,
epsilon=1e-9,
**kwargs,
):
super().__init__(**kwargs)
self.max_anchor_matches = max_anchor_matches
self.num_classes = num_classes
self.alpha = alpha
self.beta = beta
self.epsilon = epsilon
def assign(
self, scores, decode_bboxes, anchors, gt_labels, gt_bboxes, gt_mask
):
"""Assigns ground-truth boxes to anchors.
Uses the task-aligned assignment strategy for matching ground truth
and anchor boxes based on prediction scores and IoU.
"""
num_anchors = anchors.shape[0]
# Box scores are the predicted scores for each anchor, ground truth box
# pair. Only the predicted score for the class of the GT box is included
# Shape: (B, num_gt_boxes, num_anchors) (after transpose)
bbox_scores = ops.take_along_axis(
scores,
ops.cast(ops.maximum(gt_labels[:, None, :], 0), "int32"),
axis=-1,
)
bbox_scores = ops.transpose(bbox_scores, (0, 2, 1))
# Overlaps are the IoUs of each predicted box and each GT box.
# Shape: (B, num_gt_boxes, num_anchors)
overlaps = compute_ciou(
ops.expand_dims(gt_bboxes, axis=2),
ops.expand_dims(decode_bboxes, axis=1),
bounding_box_format="xyxy",
)
# Alignment metrics are a combination of box scores and overlaps, per
# the task-aligned-assignment formula.
# Metrics are forced to 0 for boxes which have been masked in the GT
# input (e.g. due to padding)
alignment_metrics = ops.power(bbox_scores, self.alpha) * ops.power(
overlaps, self.beta
)
alignment_metrics = ops.where(gt_mask, alignment_metrics, 0)
# Only anchors which are inside of relevant GT boxes are considered
# for assignment.
# This is a boolean tensor of shape (B, num_gt_boxes, num_anchors)
matching_anchors_in_gt_boxes = is_anchor_center_within_box(
anchors, gt_bboxes
)
alignment_metrics = ops.where(
matching_anchors_in_gt_boxes, alignment_metrics, 0
)
# The top-k highest alignment metrics are used to select K candidate
# anchors for each GT box.
candidate_metrics, candidate_idxs = ops.top_k(
alignment_metrics, self.max_anchor_matches
)
candidate_idxs = ops.where(candidate_metrics > 0, candidate_idxs, -1)
# We now compute a dense grid of anchors and GT boxes. This is useful
# for picking a GT box when an anchor matches to 2, as well as returning
# to a dense format for a mask of which anchors have been matched.
anchors_matched_gt_box = ops.zeros_like(overlaps)
for k in range(self.max_anchor_matches):
anchors_matched_gt_box += ops.one_hot(
candidate_idxs[:, :, k], num_anchors
)
# We zero-out the overlap for anchor, GT box pairs which don't match.
overlaps *= anchors_matched_gt_box
# In cases where one anchor matches to 2 GT boxes, we pick the GT box
# with the highest overlap as a max.
gt_box_matches_per_anchor = ops.argmax(overlaps, axis=1)
gt_box_matches_per_anchor_mask = ops.max(overlaps, axis=1) > 0
# TODO(ianstenbit): Once ops.take_along_axis supports -1 in Torch,
# replace gt_box_matches_per_anchor with
# ops.where(
# ops.max(overlaps, axis=1) > 0, ops.argmax(overlaps, axis=1), -1
# )
# and get rid of the manual masking
gt_box_matches_per_anchor = ops.cast(gt_box_matches_per_anchor, "int32")
# We select the GT boxes and labels that correspond to anchor matches.
bbox_labels = ops.take_along_axis(
gt_bboxes, gt_box_matches_per_anchor[:, :, None], axis=1
)
bbox_labels = ops.where(
gt_box_matches_per_anchor_mask[:, :, None], bbox_labels, -1
)
class_labels = ops.take_along_axis(
gt_labels, gt_box_matches_per_anchor, axis=1
)
class_labels = ops.where(
gt_box_matches_per_anchor_mask, class_labels, -1
)
class_labels = ops.one_hot(
ops.cast(class_labels, "int32"), self.num_classes
)
# Finally, we normalize an anchor's class labels based on the relative
# strength of the anchors match with the corresponding GT box.
alignment_metrics *= anchors_matched_gt_box
max_alignment_per_gt_box = ops.max(
alignment_metrics, axis=-1, keepdims=True
)
max_overlap_per_gt_box = ops.max(overlaps, axis=-1, keepdims=True)
normalized_alignment_metrics = ops.max(
alignment_metrics
* max_overlap_per_gt_box
/ (max_alignment_per_gt_box + self.epsilon),
axis=-2,
)
class_labels *= normalized_alignment_metrics[:, :, None]
# On TF backend, the final "4" becomes a dynamic shape so we include
# this to force it to a static shape of 4. This does not actually
# reshape the Tensor.
bbox_labels = ops.reshape(bbox_labels, (-1, num_anchors, 4))
return (
ops.stop_gradient(bbox_labels),
ops.stop_gradient(class_labels),
ops.stop_gradient(
ops.cast(gt_box_matches_per_anchor > -1, "float32")
),
)
def call(
self, scores, decode_bboxes, anchors, gt_labels, gt_bboxes, gt_mask
):
"""Computes target boxes and classes for anchors.
Args:
scores: a Float Tensor of shape (batch_size, num_anchors,
num_classes) representing predicted class scores for each
anchor.
decode_bboxes: a Float Tensor of shape (batch_size, num_anchors, 4)
representing predicted boxes for each anchor.
anchors: a Float Tensor of shape (batch_size, num_anchors, 2)
representing the xy coordinates of the center of each anchor.
gt_labels: a Float Tensor of shape (batch_size, num_gt_boxes)
representing the classes of ground truth boxes.
gt_bboxes: a Float Tensor of shape (batch_size, num_gt_boxes, 4)
representing the ground truth bounding boxes in xyxy format.
gt_mask: A Boolean Tensor of shape (batch_size, num_gt_boxes)
representing whether a box in `gt_bboxes` is a real box or a
non-box that exists due to padding.
Returns:
A tuple of the following:
- A Float Tensor of shape (batch_size, num_anchors, 4)
representing box targets for the model.
- A Float Tensor of shape (batch_size, num_anchors, num_classes)
representing class targets for the model.
- A Boolean Tensor of shape (batch_size, num_anchors)
representing whether each anchor was a match with a ground
truth box. Anchors that didn't match with a ground truth
box should be excluded from both class and box losses.
"""
if isinstance(gt_bboxes, tf.RaggedTensor):
dense_bounding_boxes = bounding_box.to_dense(
{"boxes": gt_bboxes, "classes": gt_labels},
)
gt_bboxes = dense_bounding_boxes["boxes"]
gt_labels = dense_bounding_boxes["classes"]
if isinstance(gt_mask, tf.RaggedTensor):
gt_mask = gt_mask.to_tensor()
max_num_boxes = ops.shape(gt_bboxes)[1]
# If there are no GT boxes in the batch, we short-circuit and return
# empty targets to avoid NaNs.
return ops.cond(
ops.array(max_num_boxes > 0),
lambda: self.assign(
scores, decode_bboxes, anchors, gt_labels, gt_bboxes, gt_mask
),
lambda: (
ops.zeros_like(decode_bboxes),
ops.zeros_like(scores),
ops.zeros_like(scores[..., 0]),
),
)
def count_params(self):
# The label encoder has no weights, so we short-circuit the weight
# counting to avoid having to `build` this layer unnecessarily.
return 0
def get_config(self):
config = {
"max_anchor_matches": self.max_anchor_matches,
"num_classes": self.num_classes,
"alpha": self.alpha,
"beta": self.beta,
"epsilon": self.epsilon,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| keras-cv/keras_cv/models/object_detection/yolo_v8/yolo_v8_label_encoder.py/0 | {
"file_path": "keras-cv/keras_cv/models/object_detection/yolo_v8/yolo_v8_label_encoder.py",
"repo_id": "keras-cv",
"token_count": 5092
} | 84 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.models.stable_diffusion.padded_conv2d import PaddedConv2D
@keras_cv_export("keras_cv.models.stable_diffusion.DiffusionModel")
class DiffusionModel(keras.Model):
def __init__(
self,
img_height,
img_width,
max_text_length,
name=None,
download_weights=True,
):
context = keras.layers.Input((max_text_length, 768), name="context")
t_embed_input = keras.layers.Input((320,), name="timestep_embedding")
latent = keras.layers.Input(
(img_height // 8, img_width // 8, 4), name="latent"
)
t_emb = keras.layers.Dense(1280)(t_embed_input)
t_emb = keras.layers.Activation("swish")(t_emb)
t_emb = keras.layers.Dense(1280)(t_emb)
# Downsampling flow
outputs = []
x = PaddedConv2D(320, kernel_size=3, padding=1)(latent)
outputs.append(x)
for _ in range(2):
x = ResBlock(320)([x, t_emb])
x = SpatialTransformer(8, 40, fully_connected=False)([x, context])
outputs.append(x)
x = PaddedConv2D(320, 3, strides=2, padding=1)(x) # Downsample 2x
outputs.append(x)
for _ in range(2):
x = ResBlock(640)([x, t_emb])
x = SpatialTransformer(8, 80, fully_connected=False)([x, context])
outputs.append(x)
x = PaddedConv2D(640, 3, strides=2, padding=1)(x) # Downsample 2x
outputs.append(x)
for _ in range(2):
x = ResBlock(1280)([x, t_emb])
x = SpatialTransformer(8, 160, fully_connected=False)([x, context])
outputs.append(x)
x = PaddedConv2D(1280, 3, strides=2, padding=1)(x) # Downsample 2x
outputs.append(x)
for _ in range(2):
x = ResBlock(1280)([x, t_emb])
outputs.append(x)
# Middle flow
x = ResBlock(1280)([x, t_emb])
x = SpatialTransformer(8, 160, fully_connected=False)([x, context])
x = ResBlock(1280)([x, t_emb])
# Upsampling flow
for _ in range(3):
x = keras.layers.Concatenate()([x, outputs.pop()])
x = ResBlock(1280)([x, t_emb])
x = Upsample(1280)(x)
for _ in range(3):
x = keras.layers.Concatenate()([x, outputs.pop()])
x = ResBlock(1280)([x, t_emb])
x = SpatialTransformer(8, 160, fully_connected=False)([x, context])
x = Upsample(1280)(x)
for _ in range(3):
x = keras.layers.Concatenate()([x, outputs.pop()])
x = ResBlock(640)([x, t_emb])
x = SpatialTransformer(8, 80, fully_connected=False)([x, context])
x = Upsample(640)(x)
for _ in range(3):
x = keras.layers.Concatenate()([x, outputs.pop()])
x = ResBlock(320)([x, t_emb])
x = SpatialTransformer(8, 40, fully_connected=False)([x, context])
# Exit flow
x = keras.layers.GroupNormalization(epsilon=1e-5)(x)
x = keras.layers.Activation("swish")(x)
output = PaddedConv2D(4, kernel_size=3, padding=1)(x)
super().__init__([latent, t_embed_input, context], output, name=name)
if download_weights:
diffusion_model_weights_fpath = keras.utils.get_file(
origin="https://huggingface.co/fchollet/stable-diffusion/resolve/main/kcv_diffusion_model.h5", # noqa: E501
file_hash="8799ff9763de13d7f30a683d653018e114ed24a6a819667da4f5ee10f9e805fe", # noqa: E501
)
self.load_weights(diffusion_model_weights_fpath)
class DiffusionModelV2(keras.Model):
def __init__(
self,
img_height,
img_width,
max_text_length,
name=None,
download_weights=True,
):
context = keras.layers.Input((max_text_length, 1024), name="context")
t_embed_input = keras.layers.Input((320,), name="timestep_embedding")
latent = keras.layers.Input(
(img_height // 8, img_width // 8, 4), name="latent"
)
t_emb = keras.layers.Dense(1280)(t_embed_input)
t_emb = keras.layers.Activation("swish")(t_emb)
t_emb = keras.layers.Dense(1280)(t_emb)
# Downsampling flow
outputs = []
x = PaddedConv2D(320, kernel_size=3, padding=1)(latent)
outputs.append(x)
for _ in range(2):
x = ResBlock(320)([x, t_emb])
x = SpatialTransformer(5, 64, fully_connected=True)([x, context])
outputs.append(x)
x = PaddedConv2D(320, 3, strides=2, padding=1)(x) # Downsample 2x
outputs.append(x)
for _ in range(2):
x = ResBlock(640)([x, t_emb])
x = SpatialTransformer(10, 64, fully_connected=True)([x, context])
outputs.append(x)
x = PaddedConv2D(640, 3, strides=2, padding=1)(x) # Downsample 2x
outputs.append(x)
for _ in range(2):
x = ResBlock(1280)([x, t_emb])
x = SpatialTransformer(20, 64, fully_connected=True)([x, context])
outputs.append(x)
x = PaddedConv2D(1280, 3, strides=2, padding=1)(x) # Downsample 2x
outputs.append(x)
for _ in range(2):
x = ResBlock(1280)([x, t_emb])
outputs.append(x)
# Middle flow
x = ResBlock(1280)([x, t_emb])
x = SpatialTransformer(20, 64, fully_connected=True)([x, context])
x = ResBlock(1280)([x, t_emb])
# Upsampling flow
for _ in range(3):
x = keras.layers.Concatenate()([x, outputs.pop()])
x = ResBlock(1280)([x, t_emb])
x = Upsample(1280)(x)
for _ in range(3):
x = keras.layers.Concatenate()([x, outputs.pop()])
x = ResBlock(1280)([x, t_emb])
x = SpatialTransformer(20, 64, fully_connected=True)([x, context])
x = Upsample(1280)(x)
for _ in range(3):
x = keras.layers.Concatenate()([x, outputs.pop()])
x = ResBlock(640)([x, t_emb])
x = SpatialTransformer(10, 64, fully_connected=True)([x, context])
x = Upsample(640)(x)
for _ in range(3):
x = keras.layers.Concatenate()([x, outputs.pop()])
x = ResBlock(320)([x, t_emb])
x = SpatialTransformer(5, 64, fully_connected=True)([x, context])
# Exit flow
x = keras.layers.GroupNormalization(epsilon=1e-5)(x)
x = keras.layers.Activation("swish")(x)
output = PaddedConv2D(4, kernel_size=3, padding=1)(x)
super().__init__([latent, t_embed_input, context], output, name=name)
if download_weights:
diffusion_model_weights_fpath = keras.utils.get_file(
origin="https://huggingface.co/ianstenbit/keras-sd2.1/resolve/main/diffusion_model_v2_1.h5", # noqa: E501
file_hash="c31730e91111f98fe0e2dbde4475d381b5287ebb9672b1821796146a25c5132d", # noqa: E501
)
self.load_weights(diffusion_model_weights_fpath)
class ResBlock(keras.layers.Layer):
def __init__(self, output_dim, **kwargs):
super().__init__(**kwargs)
self.output_dim = output_dim
self.entry_flow = [
keras.layers.GroupNormalization(epsilon=1e-5),
keras.layers.Activation("swish"),
PaddedConv2D(output_dim, 3, padding=1),
]
self.embedding_flow = [
keras.layers.Activation("swish"),
keras.layers.Dense(output_dim),
]
self.exit_flow = [
keras.layers.GroupNormalization(epsilon=1e-5),
keras.layers.Activation("swish"),
PaddedConv2D(output_dim, 3, padding=1),
]
def build(self, input_shape):
if input_shape[0][-1] != self.output_dim:
self.residual_projection = PaddedConv2D(self.output_dim, 1)
else:
self.residual_projection = lambda x: x
def call(self, inputs):
inputs, embeddings = inputs
x = inputs
for layer in self.entry_flow:
x = layer(x)
for layer in self.embedding_flow:
embeddings = layer(embeddings)
x = x + embeddings[:, None, None]
for layer in self.exit_flow:
x = layer(x)
return x + self.residual_projection(inputs)
class SpatialTransformer(keras.layers.Layer):
def __init__(self, num_heads, head_size, fully_connected=False, **kwargs):
super().__init__(**kwargs)
self.norm = keras.layers.GroupNormalization(epsilon=1e-5)
channels = num_heads * head_size
if fully_connected:
self.proj1 = keras.layers.Dense(num_heads * head_size)
else:
self.proj1 = PaddedConv2D(num_heads * head_size, 1)
self.transformer_block = BasicTransformerBlock(
channels, num_heads, head_size
)
if fully_connected:
self.proj2 = keras.layers.Dense(channels)
else:
self.proj2 = PaddedConv2D(channels, 1)
def call(self, inputs):
inputs, context = inputs
_, h, w, c = inputs.shape
x = self.norm(inputs)
x = self.proj1(x)
x = ops.reshape(x, (-1, h * w, c))
x = self.transformer_block([x, context])
x = ops.reshape(x, (-1, h, w, c))
return self.proj2(x) + inputs
class BasicTransformerBlock(keras.layers.Layer):
def __init__(self, dim, num_heads, head_size, **kwargs):
super().__init__(**kwargs)
self.norm1 = keras.layers.LayerNormalization(epsilon=1e-5)
self.attn1 = CrossAttention(num_heads, head_size)
self.norm2 = keras.layers.LayerNormalization(epsilon=1e-5)
self.attn2 = CrossAttention(num_heads, head_size)
self.norm3 = keras.layers.LayerNormalization(epsilon=1e-5)
self.geglu = GEGLU(dim * 4)
self.dense = keras.layers.Dense(dim)
def call(self, inputs):
inputs, context = inputs
x = self.attn1(self.norm1(inputs), context=None) + inputs
x = self.attn2(self.norm2(x), context=context) + x
return self.dense(self.geglu(self.norm3(x))) + x
class CrossAttention(keras.layers.Layer):
def __init__(self, num_heads, head_size, **kwargs):
super().__init__(**kwargs)
self.to_q = keras.layers.Dense(num_heads * head_size, use_bias=False)
self.to_k = keras.layers.Dense(num_heads * head_size, use_bias=False)
self.to_v = keras.layers.Dense(num_heads * head_size, use_bias=False)
self.scale = head_size**-0.5
self.num_heads = num_heads
self.head_size = head_size
self.out_proj = keras.layers.Dense(num_heads * head_size)
def call(self, inputs, context=None):
if context is None:
context = inputs
q, k, v = self.to_q(inputs), self.to_k(context), self.to_v(context)
q = ops.reshape(
q, (-1, inputs.shape[1], self.num_heads, self.head_size)
)
k = ops.reshape(
k, (-1, context.shape[1], self.num_heads, self.head_size)
)
v = ops.reshape(
v, (-1, context.shape[1], self.num_heads, self.head_size)
)
q = ops.transpose(q, (0, 2, 1, 3)) # (bs, num_heads, time, head_size)
k = ops.transpose(k, (0, 2, 3, 1)) # (bs, num_heads, head_size, time)
v = ops.transpose(v, (0, 2, 1, 3)) # (bs, num_heads, time, head_size)
score = td_dot(q, k) * self.scale
weights = keras.activations.softmax(
score
) # (bs, num_heads, time, time)
attn = td_dot(weights, v)
attn = ops.transpose(
attn, (0, 2, 1, 3)
) # (bs, time, num_heads, head_size)
out = ops.reshape(
attn, (-1, inputs.shape[1], self.num_heads * self.head_size)
)
return self.out_proj(out)
class Upsample(keras.layers.Layer):
def __init__(self, channels, **kwargs):
super().__init__(**kwargs)
self.ups = keras.layers.UpSampling2D(2)
self.conv = PaddedConv2D(channels, 3, padding=1)
def call(self, inputs):
return self.conv(self.ups(inputs))
class GEGLU(keras.layers.Layer):
def __init__(self, output_dim, **kwargs):
super().__init__(**kwargs)
self.output_dim = output_dim
self.dense = keras.layers.Dense(output_dim * 2)
def call(self, inputs):
x = self.dense(inputs)
x, gate = x[..., : self.output_dim], x[..., self.output_dim :]
tanh_res = keras.activations.tanh(
gate * 0.7978845608 * (1 + 0.044715 * (gate**2))
)
return x * 0.5 * gate * (1 + tanh_res)
def td_dot(a, b):
aa = ops.reshape(a, (-1, a.shape[2], a.shape[3]))
bb = ops.reshape(b, (-1, b.shape[2], b.shape[3]))
cc = keras.layers.Dot(axes=(2, 1))([aa, bb])
return ops.reshape(cc, (-1, a.shape[1], cc.shape[1], cc.shape[2]))
| keras-cv/keras_cv/models/stable_diffusion/diffusion_model.py/0 | {
"file_path": "keras-cv/keras_cv/models/stable_diffusion/diffusion_model.py",
"repo_id": "keras-cv",
"token_count": 6703
} | 85 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import pytest
import tensorflow as tf
from absl.testing import parameterized
from keras_cv import point_cloud
from keras_cv.tests.test_case import TestCase
class AngleTest(TestCase):
def test_wrap_angle_radians(self):
self.assertAllClose(
-np.pi + 0.1, point_cloud.wrap_angle_radians(np.pi + 0.1)
)
self.assertAllClose(0.0, point_cloud.wrap_angle_radians(2 * np.pi))
class Boxes3DTestCase(TestCase):
def test_convert_center_to_corners(self):
boxes = tf.constant(
[
[[1, 2, 3, 4, 3, 6, 0], [1, 2, 3, 4, 3, 6, 0]],
[
[1, 2, 3, 4, 3, 6, np.pi / 2.0],
[1, 2, 3, 4, 3, 6, np.pi / 2.0],
],
]
)
corners = point_cloud._center_xyzWHD_to_corner_xyz(boxes)
self.assertEqual((2, 2, 8, 3), corners.shape)
for i in [0, 1]:
self.assertAllClose(-1, np.min(corners[0, i, :, 0]))
self.assertAllClose(3, np.max(corners[0, i, :, 0]))
self.assertAllClose(0.5, np.min(corners[0, i, :, 1]))
self.assertAllClose(3.5, np.max(corners[0, i, :, 1]))
self.assertAllClose(0, np.min(corners[0, i, :, 2]))
self.assertAllClose(6, np.max(corners[0, i, :, 2]))
for i in [0, 1]:
self.assertAllClose(-0.5, np.min(corners[1, i, :, 0]))
self.assertAllClose(2.5, np.max(corners[1, i, :, 0]))
self.assertAllClose(0.0, np.min(corners[1, i, :, 1]))
self.assertAllClose(4.0, np.max(corners[1, i, :, 1]))
self.assertAllClose(0, np.min(corners[1, i, :, 2]))
self.assertAllClose(6, np.max(corners[1, i, :, 2]))
def test_within_box2d(self):
boxes = np.array(
[[[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0]]], dtype="float32"
)
points = np.array(
[
[-0.5, -0.5],
[0.5, -0.5],
[1.5, -0.5],
[1.5, 0.5],
[1.5, 1.5],
[0.5, 1.5],
[-0.5, 1.5],
[-0.5, 0.5],
[1.0, 1.0],
[0.5, 0.5],
],
dtype="float32",
)
is_inside = point_cloud.is_within_box2d(points, boxes)
expected = [[False]] * 8 + [[True]] * 2
self.assertAllEqual(expected, is_inside)
def test_within_zero_box2d(self):
bbox = np.array(
[[[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]]], dtype="float32"
)
points = np.array(
[
[-0.5, -0.5],
[0.5, -0.5],
[1.5, -0.5],
[1.5, 0.5],
[1.5, 1.5],
[0.5, 1.5],
[-0.5, 1.5],
[-0.5, 0.5],
[1.0, 1.0],
[0.5, 0.5],
],
dtype="float32",
)
is_inside = point_cloud.is_within_box2d(points, bbox)
expected = [[False]] * 10
self.assertAllEqual(expected, is_inside)
def test_is_on_lefthand_side(self):
v1 = np.array([[0.0, 0.0]], dtype="float32")
v2 = np.array([[1.0, 0.0]], dtype="float32")
p = np.array([[0.5, 0.5], [-1.0, -3], [-1.0, 1.0]], dtype="float32")
res = point_cloud._is_on_lefthand_side(p, v1, v2)
self.assertAllEqual([[True, False, True]], res)
res = point_cloud._is_on_lefthand_side(v1, v1, v2)
self.assertAllEqual([[True]], res)
res = point_cloud._is_on_lefthand_side(v2, v1, v2)
self.assertAllEqual([[True]], res)
@parameterized.named_parameters(
("without_rotation", 0.0),
("with_rotation_1_rad", 1.0),
("with_rotation_2_rad", 2.0),
("with_rotation_3_rad", 3.0),
)
def test_box_area(self, angle):
boxes = np.array(
[
[[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0]],
[[0.0, 0.0], [2.0, 0.0], [2.0, 1.0], [0.0, 1.0]],
[[0.0, 0.0], [2.0, 0.0], [2.0, 2.0], [0.0, 2.0]],
],
dtype="float32",
)
expected = [[1.0], [2.0], [4.0]]
def _rotate(bbox, theta):
rotation_matrix = tf.reshape(
[tf.cos(theta), -tf.sin(theta), tf.sin(theta), tf.cos(theta)],
shape=(2, 2),
)
return tf.matmul(bbox, rotation_matrix)
rotated_bboxes = _rotate(boxes, angle)
res = point_cloud._box_area(rotated_bboxes)
self.assertAllClose(expected, res)
def test_within_box3d(self):
num_points, num_boxes = 19, 4
# rotate the first box by pi / 2 so dim_x and dim_y are swapped.
# The last box is a cube rotated by 45 degrees.
bboxes = np.array(
[
[1.0, 2.0, 3.0, 6.0, 0.4, 6.0, np.pi / 2],
[4.0, 5.0, 6.0, 7.0, 0.8, 7.0, 0.0],
[0.4, 0.3, 0.2, 0.1, 0.1, 0.2, 0.0],
[-10.0, -10.0, -10.0, 3.0, 3.0, 3.0, np.pi / 4],
],
dtype="float32",
)
points = np.array(
[
[1.0, 2.0, 3.0], # box 0 (centroid)
[0.8, 2.0, 3.0], # box 0 (below x)
[1.1, 2.0, 3.0], # box 0 (above x)
[1.3, 2.0, 3.0], # box 0 (too far x)
[0.7, 2.0, 3.0], # box 0 (too far x)
[4.0, 5.0, 6.0], # box 1 (centroid)
[4.0, 4.6, 6.0], # box 1 (below y)
[4.0, 5.4, 6.0], # box 1 (above y)
[4.0, 4.5, 6.0], # box 1 (too far y)
[4.0, 5.5, 6.0], # box 1 (too far y)
[0.4, 0.3, 0.2], # box 2 (centroid)
[0.4, 0.3, 0.1], # box 2 (below z)
[0.4, 0.3, 0.3], # box 2 (above z)
[0.4, 0.3, 0.0], # box 2 (too far z)
[0.4, 0.3, 0.4], # box 2 (too far z)
[5.0, 7.0, 8.0], # none
[1.0, 5.0, 3.6], # box0, box1
[-11.6, -10.0, -10.0], # box3 (rotated corner point).
[-11.4, -11.4, -10.0], # not in box3, would be if not rotated.
],
dtype="float32",
)
expected_is_inside = np.array(
[
[True, False, False, False],
[True, False, False, False],
[True, False, False, False],
[False, False, False, False],
[False, False, False, False],
[False, True, False, False],
[False, True, False, False],
[False, True, False, False],
[False, False, False, False],
[False, False, False, False],
[False, False, True, False],
[False, False, True, False],
[False, False, True, False],
[False, False, False, False],
[False, False, False, False],
[False, False, False, False],
[True, True, False, False],
[False, False, False, True],
[False, False, False, False],
]
)
assert points.shape[0] == num_points
assert bboxes.shape[0] == num_boxes
assert expected_is_inside.shape[0] == num_points
assert expected_is_inside.shape[1] == num_boxes
is_inside = point_cloud.is_within_box3d(points, bboxes)
self.assertAllEqual([num_points, num_boxes], is_inside.shape)
self.assertAllEqual(expected_is_inside, is_inside)
# Add a batch dimension to the data and see that it still works
# as expected.
batch_size = 3
points = tf.tile(points[tf.newaxis, ...], [batch_size, 1, 1])
bboxes = tf.tile(bboxes[tf.newaxis, ...], [batch_size, 1, 1])
is_inside = point_cloud.is_within_box3d(points, bboxes)
self.assertAllEqual(
[batch_size, num_points, num_boxes], is_inside.shape
)
for batch_idx in range(batch_size):
self.assertAllEqual(expected_is_inside, is_inside[batch_idx])
def testCoordinateTransform(self):
# This is a validated test case from a real scene.
#
# A single point [1, 1, 3].
point = np.array(
[[[5736.94580078, 1264.85168457, 45.0271225]]], dtype="float32"
)
# Replicate the point to test broadcasting behavior.
replicated_points = tf.tile(point, [2, 4, 1])
# Pose of the car (x, y, z, yaw, roll, pitch).
#
# We negate the translations so that the coordinates are translated
# such that the car is at the origin.
pose = np.array(
[
-5728.77148438,
-1264.42236328,
-45.06399918,
-3.10496902,
0.03288471,
0.00115049,
],
dtype="float32",
)
result = point_cloud.coordinate_transform(replicated_points, pose)
# We expect the point to be translated close to the car, and then
# rotated mostly around the x-axis. The result is device dependent, skip
# or ignore this test locally if it fails.
expected = np.tile([[[-8.184512, -0.13086952, -0.04200769]]], [2, 4, 1])
self.assertAllClose(expected, result)
def testSphericalCoordinatesTransform(self):
np_xyz = np.random.randn(5, 6, 3)
points = np.array(np_xyz, dtype="float32")
spherical_coordinates = point_cloud.spherical_coordinate_transform(
points
)
# Convert coordinates back to xyz to verify.
dist = spherical_coordinates[..., 0]
theta = spherical_coordinates[..., 1]
phi = spherical_coordinates[..., 2]
x = dist * np.sin(theta) * np.cos(phi)
y = dist * np.sin(theta) * np.sin(phi)
z = dist * np.cos(theta)
self.assertAllClose(x, np_xyz[..., 0])
self.assertAllClose(y, np_xyz[..., 1])
self.assertAllClose(z, np_xyz[..., 2])
@pytest.mark.skipif(
"TEST_CUSTOM_OPS" not in os.environ
or os.environ["TEST_CUSTOM_OPS"] != "true",
reason="Requires binaries compiled from source",
)
def test_group_points(self):
# rotate the first box by pi / 2 so dim_x and dim_y are swapped.
# The last box is a cube rotated by 45 degrees.
with tf.device("cpu:0"):
bboxes = np.array(
[
[1.0, 2.0, 3.0, 6.0, 0.4, 6.0, np.pi / 2],
[4.0, 5.0, 6.0, 7.0, 0.8, 7.0, 0.0],
[0.4, 0.3, 0.2, 0.1, 0.1, 0.2, 0.0],
[-10.0, -10.0, -10.0, 3.0, 3.0, 3.0, np.pi / 4],
],
dtype="float32",
)
points = np.array(
[
[1.0, 2.0, 3.0], # box 0 (centroid)
[0.8, 2.0, 3.0], # box 0 (below x)
[1.1, 2.0, 3.0], # box 0 (above x)
[1.3, 2.0, 3.0], # box 0 (too far x)
[0.7, 2.0, 3.0], # box 0 (too far x)
[4.0, 5.0, 6.0], # box 1 (centroid)
[4.0, 4.61, 6.0], # box 1 (below y)
[4.0, 5.39, 6.0], # box 1 (above y)
[4.0, 4.5, 6.0], # box 1 (too far y)
[4.0, 5.5, 6.0], # box 1 (too far y)
[0.4, 0.3, 0.2], # box 2 (centroid)
[0.4, 0.3, 0.1], # box 2 (below z)
[0.4, 0.3, 0.29], # box 2 (above z)
[0.4, 0.3, 0.0], # box 2 (too far z)
[0.4, 0.3, 0.4], # box 2 (too far z)
[5.0, 7.0, 8.0], # none
[1.0, 5.0, 3.6], # box0, box1
[-11.6, -10.0, -10.0], # box3 (rotated corner point).
[
-11.4,
-11.4,
-10.0,
], # not in box3, would be if not rotated.
],
dtype="float32",
)
res = point_cloud.group_points_by_boxes(points, bboxes)
expected_result = tf.ragged.constant(
[[0, 1, 2], [5, 6, 7, 16], [10, 11, 12], [17]]
)
self.assertAllClose(expected_result.flat_values, res.flat_values)
def testWithinAFrustum(self):
center = np.array([1.0, 1.0, 1.0])
points = np.array([[0.0, 0.0, 0.0], [1.0, 2.0, 1.0], [1.0, 0.0, 1.0]])
point_mask = point_cloud.within_a_frustum(
points, center, r_distance=1.0, theta_width=1.0, phi_width=1.0
)
target_point_mask = np.array([False, True, False])
self.assertAllClose(point_mask, target_point_mask)
point_mask = point_cloud.within_a_frustum(
points, center, r_distance=1.0, theta_width=3.14, phi_width=3.14
)
target_point_mask = np.array([False, True, True])
self.assertAllClose(point_mask, target_point_mask)
point_mask = point_cloud.within_a_frustum(
points, center, r_distance=3.0, theta_width=1.0, phi_width=1.0
)
target_point_mask = np.array([False, False, False])
self.assertAllClose(point_mask, target_point_mask)
| keras-cv/keras_cv/point_cloud/point_cloud_test.py/0 | {
"file_path": "keras-cv/keras_cv/point_cloud/point_cloud_test.py",
"repo_id": "keras-cv",
"token_count": 7984
} | 86 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv import bounding_box
def _axis_mask(starts, ends, mask_len):
# index range of axis
batch_size = tf.shape(starts)[0]
axis_indices = tf.range(mask_len, dtype=starts.dtype)
axis_indices = tf.expand_dims(axis_indices, 0)
axis_indices = tf.tile(axis_indices, [batch_size, 1])
# mask of index bounds
axis_mask = tf.greater_equal(axis_indices, starts) & tf.less(
axis_indices, ends
)
return axis_mask
def corners_to_mask(bounding_boxes, mask_shape):
"""Converts bounding boxes in corners format to boolean masks
Args:
bounding_boxes: tensor of rectangle coordinates with shape
(batch_size, 4) in corners format (x0, y0, x1, y1).
mask_shape: a tuple or list of shape (width, height) indicating the
output width and height of masks.
Returns:
boolean masks with shape (batch_size, width, height) where True values
indicate positions within bounding box coordinates.
"""
mask_width, mask_height = mask_shape
x0, y0, x1, y1 = tf.split(bounding_boxes, [1, 1, 1, 1], axis=-1)
w_mask = _axis_mask(x0, x1, mask_width)
h_mask = _axis_mask(y0, y1, mask_height)
w_mask = tf.expand_dims(w_mask, axis=1)
h_mask = tf.expand_dims(h_mask, axis=2)
masks = tf.logical_and(w_mask, h_mask)
return masks
def fill_rectangle(images, centers_x, centers_y, widths, heights, fill_values):
"""Fill rectangles with fill value into images.
Args:
images: Tensor of images to fill rectangles into
centers_x: Tensor of positions of the rectangle centers on the x-axis
centers_y: Tensor of positions of the rectangle centers on the y-axis
widths: Tensor of widths of the rectangles
heights: Tensor of heights of the rectangles
fill_values: Tensor with same shape as images to get rectangle fill from
Returns:
images with filled rectangles.
"""
images_shape = tf.shape(images)
images_height = images_shape[1]
images_width = images_shape[2]
xywh = tf.stack([centers_x, centers_y, widths, heights], axis=1)
xywh = tf.cast(xywh, tf.float32)
corners = bounding_box.convert_format(
xywh, source="center_xywh", target="xyxy"
)
mask_shape = (images_width, images_height)
is_rectangle = corners_to_mask(corners, mask_shape)
is_rectangle = tf.expand_dims(is_rectangle, -1)
images = tf.where(is_rectangle, fill_values, images)
return images
| keras-cv/keras_cv/utils/fill_utils.py/0 | {
"file_path": "keras-cv/keras_cv/utils/fill_utils.py",
"repo_id": "keras-cv",
"token_count": 1152
} | 87 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Small utility script to count parameters in our preset checkpoints.
Usage:
python shell/count_preset_params.py
python shell/count_preset_params.py --model ResNetV2Backbone
python shell/count_preset_params.py --preset resnet50_v2_imagenet
"""
import inspect
from absl import app
from absl import flags
from keras.utils.layer_utils import count_params
import keras_cv
FLAGS = flags.FLAGS
flags.DEFINE_string(
"model", None, "The name of a model, e.g. ResNetV2Backbone."
)
flags.DEFINE_string(
"preset", None, "The name of a preset, e.g. resnet50_v2_imagenet."
)
def main(_):
for name, symbol in keras_cv.models.__dict__.items():
if FLAGS.model and name != FLAGS.model:
continue
if not hasattr(symbol, "from_preset"):
continue
if not inspect.isclass(symbol):
continue
if not issubclass(
symbol,
(
keras_cv.models.backbones.backbone.Backbone,
keras_cv.models.task.Task,
),
):
continue
for preset in symbol.presets:
if FLAGS.preset and preset != FLAGS.preset:
continue
# Avoid printing all backbone presets of each task.
if issubclass(symbol, keras_cv.models.task.Task) and (
preset
in keras_cv.models.backbones.backbone_presets.backbone_presets
):
continue
if symbol in (
keras_cv.models.RetinaNet,
keras_cv.models.YOLOV8Detector,
):
model = symbol.from_preset(preset, bounding_box_format="xywh")
else:
model = symbol.from_preset(preset)
params = count_params(model.weights)
print(f"{name} {preset} {params}")
if __name__ == "__main__":
app.run(main)
| keras-cv/shell/count_preset_params.py/0 | {
"file_path": "keras-cv/shell/count_preset_params.py",
"repo_id": "keras-cv",
"token_count": 1060
} | 88 |
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/advanced_activations.py#L18)</span>
### LeakyReLU
```python
keras.layers.LeakyReLU(alpha=0.3)
```
ユニットがアクティブでないときに微少な勾配を可能とするRectified Linear Unitの特別なバージョン:
`f(x) = alpha * x for x < 0`,
`f(x) = x for x >= 0`.
__入力のshape__
任意.このレイヤーをモデルの最初のレイヤーとして利用する場合,
`input_shape`というキーワード引数(サンプル数の軸を含まない整数のタプル)を指定してください.
__出力のshape__
入力のshapeと同じ.
__引数__
- __alpha__:0以上の浮動小数点数.負の部分の傾き.
__参考文献__
- [Rectifier Nonlinearities Improve Neural Network Acoustic Models](https://web.stanford.edu/~awni/papers/relu_hybrid_icml2013_final.pdf)
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/advanced_activations.py#L57)</span>
### PReLU
```python
keras.layers.PReLU(alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=None)
```
Parametric Rectified Linear Unit:
`f(x) = alphas * x for x < 0`,
`f(x) = x for x >= 0`,
`alphas`はxと同じshapeを持つ学習対象の配列です.
__入力のshape__
任意.このレイヤーをモデルの最初のレイヤーとして利用する場合,
`input_shape`というキーワード引数(サンプル数の軸を含まない整数のタプル)を指定してください.
__出力のshape__
入力のshapeと同じ.
__引数__
- __alpha_initializer__:重みを初期化する関数.
- __alpha_regularizer__:重みを正則化する関数.
- __alpha_constraint__:重みに対する制約.
- __shared_axes__:活性化関数で共有する学習パラメータの軸.
例えば,incoming feature mapsが,出力shapeとして`(batch, height, width, channels)`を持つ,2Dコンボリューションからなるもので,空間全体で各フィルターごとに一組しかパラメータを持たないたない場合にそのパラメータを共有したければ,`shared_axes=[1, 2]`とセットして下さい.
__参考文献__
- [Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification](https://arxiv.org/abs/1502.01852)
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/advanced_activations.py#L152)</span>
### ELU
```python
keras.layers.ELU(alpha=1.0)
```
Exponential Linear Unit:
`f(x) = alpha * (exp(x) - 1.) for x < 0`,
`f(x) = x for x >= 0`.
__入力のshape__
任意.このレイヤーをモデルの最初のレイヤーとして利用する場合,
`input_shape`というキーワード引数(サンプル数の軸を含まない整数のタプル)を指定してください.
__出力のshape__
入力のshapeと同じ.
__引数__
- __alpha__:負の部分のscale.
__参考文献__
- [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)](https://arxiv.org/abs/1511.07289v1)
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/advanced_activations.py#L191)</span>
### ThresholdedReLU
```python
keras.layers.ThresholdedReLU(theta=1.0)
```
Thresholded Rectified Linear Unit:
`f(x) = x for x > theta`
`f(x) = 0 otherwise`.
__入力のshape__
任意.このレイヤーをモデルの最初のレイヤーとして利用する場合,
`input_shape`というキーワード引数(サンプル数の軸を含まない整数のタプル)を指定してください.
__出力のshape__
入力のshapeと同じ.
__引数__
- __theta__:0以上の浮動小数点数.活性化する閾値.
__参考文献__
- [Zero-Bias Autoencoders and the Benefits of Co-Adapting Features](http://arxiv.org/abs/1402.3337)
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/advanced_activations.py#L230)</span>
### Softmax
```python
keras.layers.Softmax(axis=-1)
```
Softmax関数.
__入力のshape__
任意.このレイヤーをモデルの最初のレイヤーとして利用する場合,
`input_shape`というキーワード引数(サンプル数の軸を含まない整数のタプル)を指定してください.
__出力のshape__
入力のshapeと同じ.
__引数__
- __axis__:整数,softmax正規化が適用される軸.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/advanced_activations.py#L262)</span>
### ReLU
```python
keras.layers.ReLU(max_value=None)
```
Rectified Linear Unit activation function.
__入力のshape__
任意.このレイヤーをモデルの最初のレイヤーとして利用する場合,
`input_shape`というキーワード引数(サンプル数の軸を含まない整数のタプル)を指定してください.
__出力のshape__
入力のshapeと同じ.
__引数__
- __max_value__:浮動小数点数,最大の出力値.
| keras-docs-ja/sources/layers/advanced-activations.md/0 | {
"file_path": "keras-docs-ja/sources/layers/advanced-activations.md",
"repo_id": "keras-docs-ja",
"token_count": 2416
} | 89 |
# SequentialモデルAPI
はじめに,[KerasのSequentialモデルのガイド](/getting-started/sequential-model-guide) を参照してください.
## モデルの有用な属性
- `model.layers`は,モデルに加えたレイヤーのリストです.
----
## Sequentialモデルのメソッド
### compile
```python
compile(self, optimizer, loss, metrics=None, sample_weight_mode=None, weighted_metrics=None, target_tensors=None)
```
訓練過程の設定.
__引数__
- __optimizer__: 文字列(optimizer名)あるいは optimizer のオブジェクト.
[optimizers](/optimizers) を参照してください.
- __loss__: 文字列(目的関数名)あるいは目的関数.
[losses](/losses) を参照してください.
モデルが複数の出力を持つ場合は,オブジェクトの辞書かリストを渡すことで,各出力に異なる損失を用いることができます.
モデルによって最小化される損失値は全ての個々の損失の合計になります.
- __metrics__: 訓練やテストの際にモデルを評価するための評価関数のリスト.
典型的には `metrics=['accuracy']`を使用するでしょう.
多出力モデルの各出力のための各評価関数を指定するために,`metrics={'output_a': 'accuracy'}`のような辞書を渡すこともできます.
- __sample_weight_mode__: もし時間ごとのサンプルの重み付け(2次元の重み)を行う必要があれば`"temporal"`と設定してください.
`"None"`の場合,サンプルへの(1次元)重み付けをデフォルトとしています.
モデルに複数の出力がある場合,モードとして辞書かリストを渡すことで,各出力に異なる`sample_weight_mode`を使うことができます.
- __weighted_metrics__: 訓練やテストの際にsample_weightまたはclass_weightにより評価と重み付けされるメトリクスのリスト.
- __target_tensors__: Kerasはデフォルトでモデルのターゲットためのプレースホルダを作成します.
これは訓練中にターゲットデータが入力されるものです.
代わりの自分のターゲットテンソルを利用したい場合(訓練時にKerasはこれらのターゲットに対して外部のNumpyデータを必要としません)は,それらを`target_tensors`引数で指定することができます.
単一出力の`Sequential`モデルの場合,これは単一のテンソルでなければなりません.
- __**kwargs__: Theano/CNTKがバックエンドの場合, これらは K.function に渡されます.
Tensorflowバックエンドの場合は`tf.Session.run`に渡されます.
__Raises__
- __ValueError__: `optimizer`,`loss`,`metrics`,または`sample_weight_mode`に対して無効な引数が与えられた場合.
__例__
```python
model = Sequential()
model.add(Dense(32, input_shape=(500,)))
model.add(Dense(10, activation='softmax'))
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
```
----
### fit
```python
fit(self, x=None, y=None, batch_size=None, epochs=1, verbose=1, callbacks=None, validation_split=0.0, validation_data=None, shuffle=True, class_weight=None, sample_weight=None, initial_epoch=0, steps_per_epoch=None, validation_steps=None)
```
固定のエポック数でモデルを訓練する.
__引数__
- __x__: 訓練データのNumpy配列.
モデルの入力レイヤーに名前がついていれば,入力の名前とNumpy配列をマップした辞書を渡すことも可能です.
フレームワーク固有のテンソル(例えばTensorFlowデータテンソル)からフィードする場合は`x`を(デフォルトの)`None`にすることもできます.
- __y__: ターゲット(ラベル)データのNumpy配列.
モデルの出力レイヤーに名前がついていれば,出力の名前とNumpy配列をマップした辞書を渡すことも可能です.
フレームワーク固有のテンソル(例えばTensorFlowデータテンソル)からフィードする場合は`y`を(デフォルトの)`None`にすることもできます.
- __batch_size__: 整数または`None`.設定したサンプル数ごとに勾配の更新を行います.
指定しなければデフォルトで32になります.
- __epochs__: 整数で,モデルを訓練するエポック数.
エポックは,提供される`x`および`y`データ全体の反復です.
`initial_epoch`と組み合わせると,`epochs`は「最終エポック」として理解されることに注意してください.
このモデルは`epochs`で与えられた反復回数の訓練をするわけではなく,単に`epochs`という指標に試行が達するまで訓練します.
- __verbose__: 0, 1または2.詳細表示モード.0とすると標準出力にログを出力しません.
1の場合はログをプログレスバーで標準出力,2の場合はエポックごとに1行のログを出力します.
- __callbacks__: `keras.callbacks.Callback` にあるインスタンスのリスト.
訓練中にcallbacksのリストを適用します.
[callbacks](/callbacks)を参照してください.
- __validation_split__: 0から1までの浮動小数点数.
訓練データの中で検証データとして使う割合.
訓練データの中から検証データとして設定されたデータは,訓練時に使用されず,各エポックの最後に計算される損失関数や何らかのモデルの評価関数で使われます.
- __validation_data__: 各エポックの損失関数や評価関数で用いられるタプル`(x_val, y_val)`か`(x_val, y_val, val_sample_weights)`.
このデータは訓練には使われません.
設定すると`validation_split`を上書きします.
- __shuffle__: 真理値(各エポックの前に訓練データをシャッフルするか)か文字列('batch').
'batch' は HDF5 データだけに使える特別なオプションです.バッチサイズのチャンクの中においてシャッフルします.
`steps_per_epoch`が`None`に設定されている場合は効果がありません.
- __class_weight__: 辞書で,クラス毎の重みを格納します.
(訓練の間だけ)損失関数をスケーリングするために使います.
過小評価されたクラスのサンプルに「より注意を向ける」ようにしたい時に便利です.
- __sample_weight__: 入力サンプルと同じ長さの1次元のNumpy 配列で,訓練のサンプルに対する重みを格納します.
これは損失関数をスケーリングするために(訓練の間だけ)使用します.
(重みとサンプルが1:1対応するように)入力サンプルと同じ長さのフラットな(1次元の)Numpy配列を渡すことができます.
あるいは系列データの場合において,2次元配列の`(samples, sequence_length)`という形式で,
すべてのサンプルの各時間において異なる重みを適用できます.
この場合,`compile()`の中で`sample_weight_mode="temporal"`と確実に明記すべきです.
- __initial_epoch__: 訓練開始時のepoch(前の学習から再開する際に便利です).
steps (batches of samples) to validate before stopping.
- __steps_per_epoch__: 終了した1エポックを宣言して次のエポックを始めるまでのステップ数の合計(サンプルのバッチ).TensorFlowのデータテンソルのような入力テンソルを使用して訓練する場合,デフォルトの`None`はデータセットのサンプル数をバッチサイズで割ったものに等しくなります.それが決定できない場合は1になります.
- __validation_steps__: `steps_per_epoch`を指定している場合のみ関係します.停止する前にバリデーションするステップの総数(サンプルのバッチ).
__戻り値__
`History` オブジェクト. `History.history` 属性は
実行に成功したエポックにおける訓練の損失値と評価関数値の記録と,(適用可能ならば)検証における損失値と評価関数値も記録しています.
__Raises__
- __RuntimeError__: モデルが1度もcompileされていないとき.
- __ValueError__: 与えられた入力データがモデルの期待するものとが異なる場合.
----
### evaluate
```python
evaluate(self, x=None, y=None, batch_size=None, verbose=1, sample_weight=None, steps=None)
```
バッチごとにある入力データにおける損失値を計算します.
__引数__
- __x__: 入力データ,Numpy 配列あるいは Numpy 配列のリスト
(モデルに複数の入力がある場合).
(TensorFlowのデータテンソルのような)フレームワーク固有のテンソルを与える場合には`x`をデフォルトの`None`にすることもできます.
- __y__: ラベル,Numpy 配列.
(TensorFlowのデータテンソルのような)フレームワーク固有のテンソルを与える場合には`y`をデフォルトの`None`にすることもできます.
- __batch_size__: 整数.指定しなければデフォルトで32になります.
- __verbose__: 進行状況メッセージ出力モードで,0か1.
- __sample_weight__: サンプルの重み,Numpy 配列.
- __steps__: 整数または`None`.評価ラウンド終了を宣言するまでの総ステップ数(サンプルのバッチ).
デフォルト値の`None`ならば無視されます.
__戻り値__
スカラーで,テストデータの損失値(モデルの評価関数を設定していない場合)
あるいはスカラーのリスト(モデルが他の評価関数を計算している場合).
`model.metrics_names`属性により,スカラーの出力でラベルを表示します.
__Raises__
- __RuntimeError__: モデルが1度もcompileされていないとき.
----
### predict
```python
predict(self, x, batch_size=None, verbose=0, steps=None)
```
入力サンプルに対する予測値の出力を生成します.
入力サンプルごとにバッチごとに処理します.
__引数__
- __x__: 入力データで,Numpy 配列の形式.
- __batch_size__: 整数.指定しなければデフォルトで32になります.
- __verbose__: 進行状況メッセージ出力モード,0または1.
- __steps__: 評価ラウンド終了を宣言するまでの総ステップ数(サンプルのバッチ).
デフォルト値の`None`ならば無視されます.
__戻り値__
予測値を格納した Numpy 配列.
----
### train_on_batch
```python
train_on_batch(self, x, y, class_weight=None, sample_weight=None)
```
サンプル中の1つのバッチで勾配を更新します.
__引数__
- __x__: 入力データ,Numpy 配列または Numpy 配列のリスト(モデルに複数の入力がある場合).
- __y__: ラベル,Numpy 配列.
- __class_weight__: 辞書で,クラス毎の重みを格納します.
(訓練の間だけ)損失関数をスケーリングするために使います.
- __sample_weight__: サンプルの重み,Numpy 配列.
__戻り値__
スカラーでトレーニングの損失値(モデルに評価関数が設定されていない場合)
あるいはスカラーのリスト(モデルが他の評価関数を計算している場合).
`model.metrics_names` 属性により,スカラーの出力でラベルを表示する.
__Raises__
- __RuntimeError__: モデルが1度もcompileされていないとき.
----
### test_on_batch
```python
test_on_batch(self, x, y, sample_weight=None)
```
サンプルの単一バッチにおけるモデルの評価を行います.
__引数__
- __x__: 入力データ,Numpy 配列または Numpy 配列のリスト(モデルに複数の入力がある場合).
- __y__: ラベル,Numpy 配列の形式.
- __sample_weight__: サンプルの重み,Numpy 配列の形式.
__戻り値__
スカラーで,テストの損失値(モデルに評価関数が設定されていない場合)
あるいはスカラーのリスト(モデルが他の評価関数を計算している場合).
`model.metrics_names` 属性により,スカラーの出力でラベルを表示する.
__Raises__
- __RuntimeError__: モデルが1度もcompileされていないとき.
----
### predict_on_batch
```python
predict_on_batch(self, x)
```
サンプルの単一のバッチに対する予測値を返します.
__引数__
- __x__: 入力データ,Numpy 配列または Numpy 配列のリスト(モデルに複数の入力がある場合).
__戻り値__
予測値を格納した Numpy 配列.
----
### fit_generator
```python
fit_generator(self, generator, steps_per_epoch=None, epochs=1, verbose=1, callbacks=None, validation_data=None, validation_steps=None, class_weight=None, max_queue_size=10, workers=1, use_multiprocessing=False, shuffle=True, initial_epoch=0)
```
Python のジェネレータにより,バッチごとに生成されるデータでモデルを学習させます.
ジェネレータは効率化のために,モデルを並列に実行します.
たとえば,これを使えば CPU 上でリアルタイムに画像データを拡大しながら,それと並行して GPU 上でモデルを学習できます.
__引数__
- __generator__: ジェネレータ.
ジェネレータの出力は以下のいずれかでなければならず,どの配列も同数のサンプルを含まなければなりません.
- タプル (inputs, targets)
- タプル (inputs, targets, sample_weights).
全ての配列はサンプル数と同じ個数の値が含まれている必要があります.
ジェネレータは永遠にそのデータを繰り返すことを期待されています.
モデルによって`steps_per_epoch`のバッチが確認されたときにエポックが終了します.
- __steps_per_epoch__: 1エポックを宣言してから次のエポックの開始前までに`generator`から生成されるサンプル (サンプルのバッチ) の総数.
典型的には,データにおけるユニークなサンプル数をバッチサイズで割った値です.
`Sequence`でのオプション:指定されていない場合は,`len(generator)`をステップ数として使用します.
- __epochs__: 整数で,イテレーションの総数.`initial_epoch`と組み合わせると,`epochs`は「最終エポック」として理解されることに注意してください.
このモデルは`epochs`で与えられたnステップの訓練をするわけではなく,epochが`epochs`に達するまで訓練します.
- __verbose__: 進行状況メッセージ出力モードで,0,1,あるいは 2.
- __callbacks__: callbacks のリストで,訓練の際に呼び出されます.
- __validation_data__: 以下のいずれかです.
- 検証用データのジェネレータ
- タプル (inputs, targets)
- タプル (inputs, targets, sample_weights).
- __validation_steps__: `validation_data` がジェネレータである場合だけ関係があります.
各エポックの終わりに検証用ジェネレータから使用するステップ数です.典型的には,検証用データにおけるユニークなサンプル数をバッチサイズで割った値です.
`Sequence`におけるオプション:指定しなければ`len(validation_data)`がステップ数として用いられます.
- __class_weight__: 辞書で,クラス毎の重みを格納します.
- __max_queue_size__: ジェネレータのキューの最大サイズ.
- __workers__: スレッドベースのプロセス使用時の最大プロセス数
- __use_multiprocessing__: Trueならスレッドベースのプロセスを使います.実装がmultiprocessingに依存しているため,子プロセスに簡単に渡すことができないものとしてPickableでない引数をgeneratorに渡すべきではないことに注意してください.
- __shuffle__: 各試行の初めにバッチの順番をシャッフルするかどうか.`Sequence` (keras.utils.Sequence)インスタンスの時のみ使用されます.
- __initial_epoch__: 訓練開始時のepoch(前の学習から再開する際に便利です).
__戻り値__
`History` オブジェクト.
__Raises__
- __RuntimeError__: モデルが1度もcompileされていないとき.
__例__
```python
def generate_arrays_from_file(path):
while 1:
with open(path) as f:
for line in f:
# create Numpy arrays of input data
# and labels, from each line in the file
x, y = process_line(line)
yield (x, y)
model.fit_generator(generate_arrays_from_file('/my_file.txt'),
steps_per_epoch=1000, epochs=10)
```
----
### evaluate_generator
```python
evaluate_generator(self, generator, steps=None, max_queue_size=10, workers=1, use_multiprocessing=False)
```
ジェネレータのデータによってモデルを評価します.
ジェネレータは`test_on_batch`が受け取るデータと同じ種類のデータを返すべきです.
__引数__
- __generator__:
(inputs, targets)あるいは(inputs, targets, sample_weights)のタプルを生成するジェネレータ.
- __steps__: `generator`が停止するまでに生成するサンプル(サンプルのバッチ)の総数.
`Sequence`におけるオプション:指定しなければ`len(generator)`がステップ数として用いられます.
- __max_queue_size__: ジェネレータのキューの最大サイズ
- __workers__: スレッドベースのプロセス使用時の最大プロセス数
- __use_multiprocessing__: Trueならスレッドベースのプロセスを使います.実装がmultiprocessingに依存しているため,子プロセスに簡単に渡すことができないものとしてPickableでない引数をgeneratorに渡すべきではないことに注意してください.
__戻り値__
スカラーで,テストの損失値(モデルに評価関数が設定されていない場合)
あるいはスカラーのリスト(モデルが他の評価関数を計算している場合).
`model.metrics_names`属性により,スカラーの出力でラベルを表示する.
__Raises__
- __RuntimeError__: モデルが1度もcompileされていないとき.
----
### predict_generator
```python
predict_generator(self, generator, steps=None, max_queue_size=10, workers=1, use_multiprocessing=False, verbose=0)
```
ジェネレータから生成されたデータに対して予測します.
ジェネレータは`predict_on_batch`が受け取るデータと同じ種類のデータを返すべきです.
__引数__
- __generator__:
入力サンプルのバッチを生成するジェネレータ.
- __steps__: `generator`が停止するまでに生成するサンプル(サンプルのバッチ)の総数.
`Sequence`におけるオプション:指定しなければ`len(generator)`がステップ数として用いられます.
- __max_queue_size__: ジェネレータのキューの最大サイズ
- __workers__: スレッドベースのプロセス使用時の最大プロセス数
- __use_multiprocessing__: Trueならスレッドベースのプロセスを使います.実装がmultiprocessingに依存しているため,子プロセスに簡単に渡すことができないものとしてPickableでない引数をgeneratorに渡すべきではないことに注意してください.
- __verbose__: 進行状況メッセージ出力モード,0または1.
__戻り値__
予測値の Numpy 配列.
----
### get_layer
```python
get_layer(self, name=None, index=None)
```
モデルの一部分であるレイヤーを探します.
(ユニークな)名前かグラフのインデックスに基づいてレイヤーを返します.インデックスはボトムアップの幅優先探索の順番に基づきます.
__引数__
- __name__: 文字列,レイヤーの名前.
- __index__: 整数,レイヤーのインデックス.
__戻り値__
レイヤーインスタンス.
| keras-docs-ja/sources/models/sequential.md/0 | {
"file_path": "keras-docs-ja/sources/models/sequential.md",
"repo_id": "keras-docs-ja",
"token_count": 9995
} | 90 |
# 케라스 Sequential 모델 시작하기
`Sequential` 모델은 층<sub>layer</sub>을 순서대로 쌓은 것입니다.
아래와 같이 각 층 인스턴스를 리스트 형식으로 나열하여 생성자<sub>constructor</sub>인 `Sequential`로 넘겨주면 모델이 만들어집니다.
```python
from keras.models import Sequential # Sequential 생성자를 불러옵니다.
from keras.layers import Dense, Activation # Dense와 Activation 두 층 인스턴스를 불러옵니다.
# Sequential 생성자에 층을 순서대로 담은 리스트를 전달하여 모델을 만듭니다.
model = Sequential([
Dense(32, input_shape=(784,)), # 784 차원의 입력을 받아 32 차원으로 출력하는 완전 연결 신경망 층입니다.
Activation('relu'), # 'relu' 활성화 함수를 적용하는 층입니다.
Dense(10), # 입력을 10차원으로 출력하는 완전 연결 신경망 층입니다.
Activation('softmax'), # 'softmax' 활성화 함수를 적용하는 층입니다.
])
```
각 층을 리스트 형식으로 입력하는 방법 외에도, `Sequential` 생성자로 만든 모델에 `.add()` 메소드를 사용하면 손쉽게 새 층을 덧붙일 수 있습니다.
```python
model = Sequential() # 먼저 Sequential 생성자를 이용하여 빈 모델을 만들고,
model.add(Dense(32, input_dim=784)) # Dense 층을 추가하고,
model.add(Activation('relu')) # Activation 층을 추가합니다.
```
----
## 입력 형태 지정하기
각 모델은 어떤 형태<sub>shape</sub>의 값이 입력될지 미리 알아야 합니다. 때문에 `Sequential` 모델의 첫 번째 층은 입력할 데이터의 형태 정보를 받습니다(이후의 층들은 자동으로 이전 층의 출력 정보를 입력 정보로 채택하여 형태를 추정합니다). 형태 정보는 다음과 같은 방법으로 입력할 수 있습니다.
- 첫 번째 층의 `input_shape` 인자<sub>argument</sub>에 형태를 입력하는 방법입니다. `input_shape` 인자는 입력 데이터의 각 차원별 크기를 나타내는 정수값들이 나열된 튜플이며, 정수 대신 `None`을 쓸 경우 아직 정해지지 않은 양의 정수를 나타냅니다. 배치 크기는 `input_shape` 인자에 포함되지 않습니다.
- `input_shape` 인자는 입력 값의 크기와 시계열 입력의 길이를 포괄합니다. 따라서 `Dense`와 같이 2D 처리를 하는 층의 경우 `input_shape` 대신에 `input_dim` 인자를 통해서도 입력 크기를 지정할 수 있으며, 시계열과 같이 3D 처리를 하는 층은 `input_dim`과 `input_length`의 두 인자를 사용해서 입력 차원의 크기와 시계열 길이를 각각 지정할 수 있습니다.
- 배치 크기를 고정해야 하는 경우 `batch_size` 인자를 사용합니다(순환 신경망<sub>Recurrent Neural Network</sub>과 같이 현 시점의 결과를 저장하여 다음 시점으로 넘기는 처리를 하는 경우 배치 크기 고정이 필요합니다). 예를 들어, `batch_size=32`와 `input_shape=(6, 8)`을 층에 입력하면 이후의 모든 입력을 `(32, 6, 8)`의 형태로 처리합니다.
이에 따라, 아래의 두 코드는 완전히 동일하게 작동합니다.
```python
model = Sequential()
model.add(Dense(32, input_shape=(784,))) # input_shape를 이용하여 입력 차원(input_dim)을 768로 지정
```
```python
model = Sequential()
model.add(Dense(32, input_dim=784)) # input_dim을 이용하여 입력 차원을 768로 지정
```
----
## 컴파일하기
모델을 학습시키기 전에 `compile` 메소드를 통해서 학습과정의 세부 사항을 설정합니다. `compile` 메소드는 다음 세 개의 인자를 입력받습니다.
- 최적화 함수<sub>optimizer</sub>: 기존의 최적화 함수를(예: `rmsprop`, `adagrad` 등) 문자열로 된 식별자<sub>identifier</sub>를 통해 불러오거나 `Optimizer` 클래스의 인스턴스를 만들어서 사용할 수 있습니다([최적화 함수](/optimizers) 참고).
- 손실 함수<sub>loss function</sub>: 모델이 학습을 통해 최소화하고자 하는 목적 함수<sub>objective function</sub>입니다. 이 또한 기존 손실 함수의 문자열 식별자(예: `categorical_crossentropy`, `mse` 등)를 입력하거나 별도의 목적 함수를 지정하여 사용할 수 있습니다([손실 함수](/losses) 참고).
- 평가 지표<sub>metric</sub> 리스트: 모델의 성능을 평가할 지표를 리스트 형식으로 입력합니다. 예컨대 분류문제라면 `metrics=['accuracy']`를 통해 정확도<sub>accuracy</sub>를 산출할 수 있습니다. 평가 지표는 기존 지표 함수를 문자열 식별자로 불러오거나 사용자가 함수를 정의하여 지정할 수 있습니다([평가 지표](/metrics) 참고).
```python
# 다중 분류 문제 예시
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
# 이진 분류 문제 예시
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
# 회귀분석에 사용할 평균 제곱근 오차 계산
model.compile(optimizer='rmsprop',
loss='mse')
# 사용자 정의 평가 지표 예시
import keras.backend as K
def mean_pred(y_true, y_pred): # y_true와 y_pred 두 개의 인자를 받는 지표 함수 mean_pred를 정의합니다.
return K.mean(y_pred) # y_pred의 평균값을 반환합니다.
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy', mean_pred]) # metrics에 'accuracy'와 앞서 정의한 mean_pred를 리스트 형식으로 입력합니다.
```
----
## 학습시키기
케라스 모델들은 데이터와 레이블로 구성된 NumPy 배열을 입력받아 학습합니다. 모델의 학습에는 일반적으로 `fit` 함수를 사용합니다. [여기서 자세한 정보를 볼 수 있습니다](/models/sequential).
```python
# 하나의 데이터를 입력받아 두 개의 클래스로 분류하는 이진 분류 모델의 경우:
model = Sequential()
model.add(Dense(32, activation='relu', input_dim=100)) # 입력 100차원, 출력 32차원에 'relu' 함수를 적용하는 Dense 층입니다.
model.add(Dense(1, activation='sigmoid')) # 1차원 출력에 'sigmoid' 함수를 적용하는 Dense 층입니다.
model.compile(optimizer='rmsprop', # 최적화 함수 = 'rmsprop'
loss='binary_crossentropy', # 손실 함수 = 'binary_crossentropy'
metrics=['accuracy']) # 평가 지표 = 'accuracy'
# 예제를 위한 더미 데이터 생성
import numpy as np
data = np.random.random((1000, 100)) # 0과 1 사이 값을 갖는 1000 x 100 차원의 난수 행렬을 무작위로 생성합니다.
labels = np.random.randint(2, size=(1000, 1)) # 0 또는 1의 값을 갖는 1000 x 1 차원의 레이블 행렬을 무작위로 생성합니다.
# 학습시키기
model.fit(data, labels, epochs=10, batch_size=32) # 생성된 데이터를 32개씩의 배치로 나누어 전체를 총 10회 학습시킵니다.
```
```python
# 하나의 데이터를 입력받아 열 개의 클래스로 분류하는 다중 분류 모델의 경우:
model = Sequential()
model.add(Dense(32, activation='relu', input_dim=100))
model.add(Dense(10, activation='softmax')) # 10 출력에 'sigmoid' 함수를 적용하는 Dense 층입니다.
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy', # 손실 함수 = 'categorical_crossentropy'
metrics=['accuracy'])
# 예제를 위한 더미 데이터 생성
import numpy as np
data = np.random.random((1000, 100))
labels = np.random.randint(10, size=(1000, 1)) # 0과 9사이 정수값을 갖는 100 x 1 차원 레이블 행렬을 무작위로 생성합니다.
# 레이블을 열 개 클래스의 원-핫 인코딩 데이터로 변환
one_hot_labels = keras.utils.to_categorical(labels, num_classes=10)
# 각 32개씩의 배치로 나누어 총 10회 학습
model.fit(data, one_hot_labels, epochs=10, batch_size=32)
```
----
## 예시
바로 실험해볼 수 있는 예시들입니다!
[여기서](https://github.com/keras-team/keras/tree/master/examples) 실제 데이터셋을 다루는 예제들을 볼 수 있습니다.
- CIFAR10 소형 이미지 분류: 실시간 데이터 증강을 포함하는 합성곱 신경망<sub>Convolutional Neural Network</sub>
- IMDB 영화 후기 감정 분류: 순서를 가진 문자열을 다루는 LSTM<sub>Long Short-Term Memory</sub> 모형
- 로이터 뉴스<sub>Reuters Newswires</sub> 주제 분류: 다층 퍼셉트론<sub>Multilayer Perceptron</sub> 모형
- MNIST 손으로 쓴 숫자 이미지 분류: 다층 신경망과 합성곱 신경망
- LSTM을 이용한 문자 단위 텍스트 생성기
...등등.
### 다중 소프트맥스 분류<sub>Multi-class Softmax Classification</sub>를 위한 다층 신경망(MLP)
```python
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.optimizers import SGD
# 예제를 위한 더미 데이터 생성
import numpy as np
x_train = np.random.random((1000, 20))
y_train = keras.utils.to_categorical(np.random.randint(10, size=(1000, 1)), num_classes=10)
x_test = np.random.random((100, 20))
y_test = keras.utils.to_categorical(np.random.randint(10, size=(100, 1)), num_classes=10)
model = Sequential()
# 아래의 Dense(64)는 64개의 은닉 유닛을 갖는 완전 연결 신경망입니다.
# 첫번째 층에서 반드시 입력될 데이터의 형태를 명시해야 합니다.
# 본 예시에서는 20차원 벡터가 입력됩니다.
model.add(Dense(64, activation='relu', input_dim=20))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
model.fit(x_train, y_train,
epochs=20,
batch_size=128)
score = model.evaluate(x_test, y_test, batch_size=128)
```
### 이진 분류<sub>Binary Classification</sub>를 위한 다층 신경망(MLP)
```python
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout
# 예제를 위한 더미 데이터 생성
x_train = np.random.random((1000, 20))
y_train = np.random.randint(2, size=(1000, 1))
x_test = np.random.random((100, 20))
y_test = np.random.randint(2, size=(100, 1))
model = Sequential()
model.add(Dense(64, input_dim=20, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
model.fit(x_train, y_train,
epochs=20,
batch_size=128)
score = model.evaluate(x_test, y_test, batch_size=128)
```
### VGG 유형의 합성곱 신경망
```python
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.optimizers import SGD
# 예제를 위한 더미 데이터 생성
x_train = np.random.random((100, 100, 100, 3))
y_train = keras.utils.to_categorical(np.random.randint(10, size=(100, 1)), num_classes=10)
x_test = np.random.random((20, 100, 100, 3))
y_test = keras.utils.to_categorical(np.random.randint(10, size=(20, 1)), num_classes=10)
model = Sequential()
# 입력 데이터: R, G, B 3개의 색상 채널을 가진 100x100 사이즈 이미지 -> (100, 100, 3)의 텐서
# 이 입력 데이터에 3x3 크기를 가진 서로 다른 합성곱 필터 32개를 적용합니다.
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(100, 100, 3)))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd)
model.fit(x_train, y_train, batch_size=32, epochs=10)
score = model.evaluate(x_test, y_test, batch_size=32)
```
### LSTM을 사용한 순서형<sub>sequence</sub> 데이터의 분류
```python
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.layers import Embedding
from keras.layers import LSTM
max_features = 1024
model = Sequential()
model.add(Embedding(max_features, output_dim=256))
model.add(LSTM(128))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
model.fit(x_train, y_train, batch_size=16, epochs=10)
score = model.evaluate(x_test, y_test, batch_size=16)
```
### 1차원 합성곱<sub>1D Convolution</sub>을 활용한 순서형 데이터 분류
```python
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.layers import Embedding
from keras.layers import Conv1D, GlobalAveragePooling1D, MaxPooling1D
seq_length = 64
model = Sequential()
model.add(Conv1D(64, 3, activation='relu', input_shape=(seq_length, 100)))
model.add(Conv1D(64, 3, activation='relu'))
model.add(MaxPooling1D(3))
model.add(Conv1D(128, 3, activation='relu'))
model.add(Conv1D(128, 3, activation='relu'))
model.add(GlobalAveragePooling1D())
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
model.fit(x_train, y_train, batch_size=16, epochs=10)
score = model.evaluate(x_test, y_test, batch_size=16)
```
### 순서형 데이터 분류를 위한 LSTM층 쌓기
시계열 데이터의 고차원적인 요인들을 학습할 수 있도록 3개의 LSTM층을 연결한 모델을 만듭니다.
처음의 두 LSTM 층은 순서의 모든 지점에서 결과값을 출력합니다 `return_sequences=True`. 즉, 입력값의 순서 개수와 출력값의 순서 개수가 같습니다. 하지만 마지막 LSTM 층은 출력 시퀀스의 최종 시점에서 결과를 출력합니다. 따라서 앞서 구성한 LSTM층의 최종 출력은 시계열 차원이 없어진 크기를 가집니다(이 과정은 길이를 가진 여러 벡터의 입력 시퀀스를 하나의 벡터로 변환하는 것과도 같습니다).
<img src="https://keras.io/img/regular_stacked_lstm.png" alt="stacked LSTM" style="width: 300px;"/>
```python
from keras.models import Sequential
from keras.layers import LSTM, Dense
import numpy as np
data_dim = 16 # 입력 데이터는 16 차원입니다.
timesteps = 8 # 8개의 순서 시점을 갖습니다.
num_classes = 10 # 목표 클래스는 10개 입니다.
# 입력 데이터의 형태: (batch_size, timesteps, data_dim)
model = Sequential()
model.add(LSTM(32, return_sequences=True,
input_shape=(timesteps, data_dim))) # 32 차원의 순서형 벡터를 반환합니다.
model.add(LSTM(32, return_sequences=True)) # 32 차원의 순서형 벡터를 반환합니다.
model.add(LSTM(32)) # 32 차원의 단일 벡터를 반환합니다.
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
# 예제를 위한 더미 훈련 데이터 생성
x_train = np.random.random((1000, timesteps, data_dim))
y_train = np.random.random((1000, num_classes))
# 예제를 위한 더미 검증 데이터 생성
x_val = np.random.random((100, timesteps, data_dim))
y_val = np.random.random((100, num_classes))
model.fit(x_train, y_train,
batch_size=64, epochs=5,
validation_data=(x_val, y_val))
```
### 층을 쌓으면서, 저장한 상태를 다음 순서로 넘기는<sub>stateful</sub> LSTM 모델
상태 저장 순환 신경망<sub>stateful Recurrent Model</sub>은 입력된 배치를 처리하여 얻은 내부 상태(메모리)를 다음 배치의 초기 상태로 재사용합니다. 이를 통해서 계산 복잡도가 지나치게 높지 않게끔 유지하면서 보다 긴 시퀀스를 처리할 수 있도록 합니다. (예를 들어, 하나의 매우 긴 시계열을 보다 짧은 시계열 길이로 쪼갠 뒤 연속된 배치로 바꾸어 처리하는 경우를 생각해볼 수 있습니다. 이 경우 상태 저장 옵션은 이전 배치의 결과를 다음 배치로 연결해주기 때문에 서로 다른 배치가 마치 하나의 시계열로 이어진 것과 같은 효과를 냅니다.)
[FAQ에서 상태 저장 순환 신경망에 대한 정보를 더 보실 수 있습니다.](/getting-started/faq/#how-can-i-use-stateful-rnns)
```python
from keras.models import Sequential
from keras.layers import LSTM, Dense
import numpy as np
data_dim = 16
timesteps = 8
num_classes = 10
batch_size = 32
# 입력 데이터의 형태: (batch_size, timesteps, data_dim)
# 상태 저장을 활용하기 위해서는 모든 배치의 크기가 같아야 합니다.
# 이 경우 input_shape 인자 대신 batch_input_shape 인자를 사용하여 배치 크기를 함께 명시합니다.
# k번째 배치의 i번째 표본은 k-1번째 배치의 i번째 표본으로부터 상태를 이어받습니다.
model = Sequential()
model.add(LSTM(32, return_sequences=True, stateful=True,
batch_input_shape=(batch_size, timesteps, data_dim)))
model.add(LSTM(32, return_sequences=True, stateful=True))
model.add(LSTM(32, stateful=True))
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
# 예제를 위한 더미 훈련 데이터 생성
x_train = np.random.random((batch_size * 10, timesteps, data_dim))
y_train = np.random.random((batch_size * 10, num_classes))
# 예제를 위한 더미 검증 데이터 생성
x_val = np.random.random((batch_size * 3, timesteps, data_dim))
y_val = np.random.random((batch_size * 3, num_classes))
model.fit(x_train, y_train,
batch_size=batch_size, epochs=5, shuffle=False,
validation_data=(x_val, y_val))
```
| keras-docs-ko/sources/getting-started/sequential-model-guide.md/0 | {
"file_path": "keras-docs-ko/sources/getting-started/sequential-model-guide.md",
"repo_id": "keras-docs-ko",
"token_count": 12104
} | 91 |
## 손실 함수의 사용
손실 함수(목적 함수 또는 최적화 스코어 함수)는 모델을 컴파일하기 위해 필요한 두 개의 매개 변수 중 하나입니다.
```python
model.compile(loss='mean_squared_error', optimizer='sgd')
```
```python
from keras import losses
model.compile(loss=losses.mean_squared_error, optimizer='sgd')
```
케라스가 제공하는 손실 함수의 이름 문자열<sub>string</sub> 또는 TensorFlow/Theano의 심볼릭 함수<sub>symbolic function</sub>를 매개 변수로 전달할 수 있습니다. 심볼릭 함수는 다음의 두 인자를 받아 각각의 데이터 포인트에 대한 스칼라를 반환합니다.
- __y_true__: 정답 레이블. TensorFlow/Theano 텐서.
- __y_pred__: 예측값. `y_true`와 같은 크기<sub>shape</sub>의 TensorFlow/Theano 텐서.
실제로 최적화되는 값은 모든 데이터 포인트에서의 출력값의 평균값입니다.
손실 함수의 예시는 [여기](https://github.com/keras-team/keras/blob/master/keras/losses.py)에서 확인할 수 있습니다.
## 사용 가능한 손실 함수
### mean_squared_error
```python
keras.losses.mean_squared_error(y_true, y_pred)
```
예측값과 목표값의 평균 제곱 오차<sub>(MSE, mean squared error)</sub>를 계산합니다.
`(square(y_pred - y_true))/len(y_true)`
__인자__
- __y_true__: 목표값 텐서.
- __y_pred__: 예측값 텐서.
__반환값__
샘플당 하나의 스칼라 손실값 텐서.
----
### mean_absolute_error
```python
keras.losses.mean_absolute_error(y_true, y_pred)
```
예측값과 목표값의 평균 절대 오차<sub>(MAE, mean absolute error)</sub>를 계산합니다.
`(abs(y_pred - y_true))/len(y_true)`
__인자__
- __y_true__: 목표값 텐서.
- __y_pred__: 예측값 텐서.
__반환값__
샘플당 하나의 스칼라 손실값 텐서.
----
### mean_absolute_percentage_error
```python
keras.losses.mean_absolute_percentage_error(y_true, y_pred)
```
예측값과 목표값의 평균 절대 퍼센트 오차<sub>(MAPE, mean absolute percentage error)</sub>를 계산합니다.
`100.*((abs(y_pred - y_true))/len(y_true))`
__인자__
- __y_true__: 목표값 텐서.
- __y_pred__: 예측값 텐서.
__반환값__
샘플당 하나의 스칼라 손실값 텐서.
----
### mean_squared_logarithmic_error
```python
keras.losses.mean_squared_logarithmic_error(y_true, y_pred)
```
예측값과 목표값의 평균 제곱 로그 오차<sub>(MSLE, mean squared logarithmic error)</sub>를 계산합니다.
`mean(square(log(y_pred + 1) - log(y_true + 1)))`
__인자__
- __y_true__: 목표값 텐서.
- __y_pred__: 예측값 텐서.
__반환값__
샘플당 하나의 스칼라 손실값 텐서.
----
### squared_hinge
```python
keras.losses.squared_hinge(y_true, y_pred)
```
예측값과 목표값의 'squared hinge' 손실값을 계산합니다.
`mean(square(maximum(1 - y_true * y_pred, 0)))`
__인자__
- __y_true__: 목표값 텐서.
- __y_pred__: 예측값 텐서.
__반환값__
샘플당 하나의 스칼라 손실값 텐서.
----
### hinge
```python
keras.losses.hinge(y_true, y_pred)
```
예측값과 목표값의 'hinge' 손실값을 계산합니다.
`mean(maximum(1 - y_true * y_pred, 0)`
__인자__
- __y_true__: 목표값 텐서.
- __y_pred__: 예측값 텐서.
__반환값__
샘플당 하나의 스칼라 손실값 텐서.
----
### categorical_hinge
```python
keras.losses.categorical_hinge(y_true, y_pred)
```
예측값과 목표값의 'categorical hinge' 손실값을 계산합니다.
`maximum(0, max((1 - y_true) * y_pred) - sum(y_true * y_pred) + 1)`
__인자__
- __y_true__: 목표값 텐서.
- __y_pred__: 예측값 텐서.
__반환값__
샘플당 하나의 스칼라 손실값 텐서.
----
### logcosh
```python
keras.losses.logcosh(y_true, y_pred)
```
예측 오차의 하이퍼볼릭 코사인 로그값.
`log(cosh(x))`는 `x`가 작은 경우에는 `(x ** 2) / 2`, `x`가 큰 경우에는
`abs(x) - log(2)`와 거의 같은 값을 가집니다. 다시 말해 `logcosh`는 대부분
평균 제곱 오차와 비슷한 양상을 보이지만, 가끔 예측이 틀리더라도 영향을 크게
받지 않습니다.
__인자__
- __y_true__: 목표값 텐서.
- __y_pred__: 예측값 텐서.
__반환값__
샘플당 하나의 스칼라 손실값 텐서.
----
### categorical_crossentropy
```python
keras.losses.categorical_crossentropy(y_true, y_pred)
```
예측값과 목표값 사이의 크로스 엔트로피값을 계산합니다.
입/출력은 원-핫 인코딩<one-hot encoding> 형태를 가져야 합니다.
__인자__
- __y_true__: 목표값 텐서.
- __y_pred__: 예측값 텐서.
__반환값__
샘플당 하나의 스칼라 손실값 텐서.
----
### sparse_categorical_crossentropy
```python
keras.losses.sparse_categorical_crossentropy(y_true, y_pred)
```
예측값과 목표값 사이의 크로스 엔트로피값을 계산합니다.
입/출력은 `int` 형태를 가져야 합니다.
__인자__
- __y_true__: 목표값 텐서.
- __y_pred__: 예측값 텐서.
__반환값__
샘플당 하나의 스칼라 손실값 텐서.
----
### binary_crossentropy
```python
keras.losses.binary_crossentropy(y_true, y_pred)
```
예측값과 목표값 사이의 크로스 엔트로피값을 계산합니다.
입/출력은 이진<sub>binary</sub> 형태를 가져야 합니다.
__인자__
- __y_true__: 목표값 텐서.
- __y_pred__: 예측값 텐서.
__반환값__
샘플당 하나의 스칼라 손실값 텐서.
----
### kullback_leibler_divergence
```python
keras.losses.kullback_leibler_divergence(y_true, y_pred)
```
예측값과 목표값 사이의 KL 발산<sub>kullback Leibler divergence</sub> 값을 계산합니다.
`sum(y_true * log(y_true / y_pred)`
__인자__
- __y_true__: 목표값 텐서.
- __y_pred__: 예측값 텐서.
__반환값__
샘플당 하나의 스칼라 손실값 텐서.
----
### poisson
```python
keras.losses.poisson(y_true, y_pred)
```
예측값과 목표값 사이의 포아송 손실값<sub>poisson loss</sub>을 계산합니다.
목표값이 포아송 분포를 따른다고 생각될 때 사용합니다.
`mean(y_pred - y_true * log(y_pred + epsilon())`
__인자__
- __y_true__: 목표값 텐서.
- __y_pred__: 예측값 텐서.
__반환값__
샘플당 하나의 스칼라 손실값 텐서.
----
### cosine_proximity
```python
keras.losses.cosine_proximity(y_true, y_pred)
```
예측값과 목표값 사이의 코사인 유사도<sub>cosine proximity</sub> 값을 계산합니다.
__인자__
- __y_true__: 목표값 텐서.
- __y_pred__: 예측값 텐서.
__반환값__
샘플당 하나의 스칼라 손실값 텐서.
----
### is_categorical_crossentropy
```python
kearas.losses.is_categorical_crossentropy(loss)
```
----
**Note**: 손실 함수 `categorical_crossentropy`의 목표값은 범주 형식<sub>categorical format</sub>을 따라야 합니다. 예를 들어 범주(클래스)가 10개라면, 각 샘플의 목표값은 샘플 클래스에 해당하는 인덱스에서는 1이고 나머지 인덱스에서는 0인 10차원 벡터가 되어야 합니다.
케라스의 `to_categorical`을 통해 정수형 목표값(*integer target*)을 범주형 목표값(*categorical target*)으로 변환할 수 있습니다.
```python
from keras.utils import to_categorical
categorical_labels = to_categorical(int_labels, num_classes=None)
```
| keras-docs-ko/sources/losses.md/0 | {
"file_path": "keras-docs-ko/sources/losses.md",
"repo_id": "keras-docs-ko",
"token_count": 5128
} | 92 |
# 关于 Github Issues 和 Pull Requests
找到一个漏洞?有一个新的功能建议?想要对代码库做出贡献?请务必先阅读这些。
## 漏洞报告
你的代码不起作用,你确定问题在于Keras?请按照以下步骤报告错误。
1. 你的漏洞可能已经被修复了。确保更新到目前的Keras master分支,以及最新的 Theano/TensorFlow/CNTK master 分支。
轻松更新 Theano 的方法:`pip install git+git://github.com/Theano/Theano.git --upgrade`
2. 搜索相似问题。 确保在搜索已经解决的 Issue 时删除 `is:open` 标签。有可能已经有人遇到了这个漏洞。同时记得检查 Keras [FAQ](/faq/)。仍然有问题?在 Github 上开一个 Issue,让我们知道。
3. 确保你向我们提供了有关你的配置的有用信息:什么操作系统?什么 Keras 后端?你是否在 GPU 上运行,Cuda 和 cuDNN 的版本是多少?GPU型号是什么?
4. 为我们提供一个脚本来重现这个问题。该脚本应该可以按原样运行,并且不应该要求下载外部数据(如果需要在某些测试数据上运行模型,请使用随机生成的数据)。我们建议你使用 Github Gists 来张贴你的代码。任何无法重现的问题都会被关闭。
5. 如果可能的话,自己动手修复这个漏洞 - 如果可以的话!
你提供的信息越多,我们就越容易验证存在错误,并且我们可以采取更快的行动。如果你想快速解决你的问题,尊许上述步骤操作是至关重要的。
---
## 请求新功能
你也可以使用 [TensorFlow Github issues](https://github.com/tensorflow/tensorflow/issues) 来请求你希望在 Keras 中看到的功能,或者在 Keras API 中的更改。
1. 提供你想要的功能的清晰和详细的解释,以及为什么添加它很重要。请记住,我们需要的功能是对于大多数用户而言的,不仅仅是一小部分人。如果你只是针对少数用户,请考虑为 Keras 编写附加库。对 Keras 来说,避免臃肿的 API 和代码库是至关重要的。
2. 提供代码片段,演示您所需的 API 并说明您的功能的用例。 当然,在这一点上你不需要写任何真正的代码!
3. 讨论完需要在 `tf.keras` 中添加德该功能后,你可以选择尝试提一个 Pull Request。如果你完全可以,开始写一些代码。相比时间上,我们总是有更多的工作要做。如果你可以写一些代码,那么这将加速这个过程。
---
## 请求贡献代码
在[这个板块](https://github.com/keras-team/keras/projects/1)我们会列出当前需要添加的出色的问题和新功能。如果你想要为 Keras 做贡献,这就是可以开始的地方。
---
## Pull Requests 合并请求
**我应该在哪里提交我的合并请求?**
#### 注意:
我们不再会向多后端 Keras 添加新功能(我们仅仅修复漏洞),因为我们正重新将开发精力投入到 tf.keras 上。如果你仍然像提交新的功能申请,请直接将它提交到 TensorFlow 仓库的 tf.keras 中。
1. **Keras 改进与漏洞修复**, 请到 [Keras `master` 分支](https://github.com/keras-team/keras/tree/master)。
2. **测试新功能**, 例如网络层和数据集,请到 [keras-contrib](https://github.com/farizrahman4u/keras-contrib)。除非它是一个在 [Requests for Contributions](https://github.com/keras-team/keras/projects/1) 中列出的新功能,它属于 Keras 的核心部分。如果你觉得你的功能属于 Keras 核心,你可以提交一个设计文档,来解释你的功能,并争取它(请看以下解释)。
请注意任何有关 **代码风格**(而不是修复修复,改进文档或添加新功能)的 PR 都会被拒绝。
以下是提交你的改进的快速指南:
1. 如果你的 PR 介绍了功能的改变,确保你从撰写设计文档并将其发给 Keras 邮件列表开始,以讨论是否应该修改,以及如何处理。这将拯救你于 PR 关闭。当然,如果你的 PR 只是一个简单的漏洞修复,那就不需要这样做。撰写与提交设计文档的过程如下所示:
- 从这个 [Google 文档模版](https://docs.google.com/document/d/1ZXNfce77LDW9tFAj6U5ctaJmI5mT7CQXOFMEAZo-mAA/edit#) 开始,将它复制为一个新的 Google 文档。
- 填写内容。注意你需要插入代码样例。要插入代码,请使用 Google 文档插件,例如 [CodePretty] (https://chrome.google.com/webstore/detail/code-pretty/igjbncgfgnfpbnifnnlcmjfbnidkndnh?hl=en) (有许多可用的插件)。
- 将共享设置为 「每个有链接的人都可以发表评论」。
- 将文档发给 `[email protected]`,主题从 `[API DESIGN REVIEW]` (全大写) 开始,这样我们才会注意到它。
- 等待评论,回复评论。必要时修改提案。
- 该提案最终将被批准或拒绝。一旦获得批准,您可以发出合并请求或要求他人撰写合并请求。
2. 撰写代码(或者让别人写)。这是最难的一部分。
3. 确保你引入的任何新功能或类都有适当的文档。确保你触摸的任何代码仍具有最新的文档。**应该严格遵循 Docstring 风格**。尤其是,它们应该在 MarkDown 中格式化,并且应该有 `Arguments`,`Returns`,`Raises` 部分(如果适用)。查看代码示例中的其他文档以做参考。
4. 撰写测试。你的代码应该有完整的单元测试覆盖。如果你想看到你的 PR 迅速合并,这是至关重要的。
5. 在本地运行测试套件。这很简单:在 Keras 目录下,直接运行: `py.test tests/`。
- 您还需要安装测试包: `pip install -e .[tests]`。
6. 确保通过所有测试:
- 使用 Theano 后端,Python 2.7 和 Python 3.5。确保你有 Theano 的开发版本。
- 使用 TensorFlow 后端,Python 2.7 和 Python 3.5。确保你有 TensorFlow 的开发版本。
- 使用 CNTK 后端, Python 2.7 和 Python 3.5。确保你有 CNTK 的开发版本。
7. 我们使用 PEP8 语法约定,但是当涉及到行长时,我们不是教条式的。尽管如此,确保你的行保持合理的大小。为了让您的生活更轻松,我们推荐使用 PEP8 linter:
- 安装 PEP8 包:`pip install pep8 pytest-pep8 autopep8`
- 运行独立的 PEP8 检查: `py.test --pep8 -m pep8`
- 你可以通过运行这个命令自动修复一些 PEP8 错误: `autopep8 -i --select <errors> <FILENAME>`。
例如: `autopep8 -i --select E128 tests/keras/backend/test_backends.py`
8. 提交时,请使用适当的描述性提交消息。
9. 更新文档。如果引入新功能,请确保包含演示新功能用法的代码片段。
10. 提交你的 PR。如果你的更改已在之前的讨论中获得批准,并且你有完整(并通过)的单元测试以及正确的 docstring/文档,则你的 PR 可能会立即合并。
---
## 添加新的样例
即使你不贡献 Keras 源代码,如果你有一个简洁而强大的 Keras 应用,请考虑将它添加到我们的样例集合中。[现有的例子](https://github.com/keras-team/keras/tree/master/examples)展示惯用的 Keras 代码:确保保持自己的脚本具有相同的风格。
| keras-docs-zh/sources/contributing.md/0 | {
"file_path": "keras-docs-zh/sources/contributing.md",
"repo_id": "keras-docs-zh",
"token_count": 4713
} | 93 |
# 在 IMDB 情绪分类任务上训练循环卷积网络。
2 个轮次后达到 0.8498 的测试精度。K520 GPU 上为 41 秒/轮次。
```python
from __future__ import print_function
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers import Embedding
from keras.layers import LSTM
from keras.layers import Conv1D, MaxPooling1D
from keras.datasets import imdb
# Embedding
max_features = 20000
maxlen = 100
embedding_size = 128
# Convolution
kernel_size = 5
filters = 64
pool_size = 4
# LSTM
lstm_output_size = 70
# Training
batch_size = 30
epochs = 2
'''
注意:
batch_size 是高度敏感的
由于数据集非常小,因此仅需要 2 个轮次。
'''
print('Loading data...')
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')
print('Pad sequences (samples x time)')
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
print('Build model...')
model = Sequential()
model.add(Embedding(max_features, embedding_size, input_length=maxlen))
model.add(Dropout(0.25))
model.add(Conv1D(filters,
kernel_size,
padding='valid',
activation='relu',
strides=1))
model.add(MaxPooling1D(pool_size=pool_size))
model.add(LSTM(lstm_output_size))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print('Train...')
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test))
score, acc = model.evaluate(x_test, y_test, batch_size=batch_size)
print('Test score:', score)
print('Test accuracy:', acc)
``` | keras-docs-zh/sources/examples/imdb_cnn_lstm.md/0 | {
"file_path": "keras-docs-zh/sources/examples/imdb_cnn_lstm.md",
"repo_id": "keras-docs-zh",
"token_count": 894
} | 94 |
# 训练基于 MNIST 数据集上残差块的堆叠式自动编码器。
它举例说明了过去几年开发的两种有影响力的方法。
首先是适当地 "分拆" 的想法。在任何最大池化期间,都会丢失合并的接收场中最大值的确切位置(where),但是在输入图像的整体重建中可能非常有用。
因此,如果将 "位置" 从编码器传递到相应的解码器层,则可以将要解码的特征 "放置" 在正确的位置,从而可以实现更高保真度的重构。
# 参考文献
- [Visualizing and Understanding Convolutional Networks, Matthew D Zeiler, Rob Fergus](https://arxiv.org/abs/1311.2901v3)
- [Stacked What-Where Auto-encoders, Junbo Zhao, Michael Mathieu, Ross Goroshin, Yann LeCun](https://arxiv.org/abs/1506.02351v8)
这里利用的第二个想法是残差学习的想法。残差块通过允许跳过连接使网络能够按照数据认为合适的线性(或非线性)能力简化训练过程。
这样可以轻松地训练很多深度的网络。残差元素在该示例的上下文中似乎是有利的,因为它允许编码器和解码器之间的良好对称性。
通常,在解码器中,对重构图像的空间的最终投影是线性的,但是对于残差块,则不必如此,因为其输出是线性还是非线性的程度取决于被馈送的像素数据。
但是,为了限制此示例中的重建,因为我们知道 MNIST 数字映射到 [0, 1],所以将硬 softmax 用作偏置。
# 参考文献
- [Deep Residual Learning for Image Recognition, Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun](https://arxiv.org/abs/1512.03385v1)
- [Identity Mappings in Deep Residual Networks, Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun](https://arxiv.org/abs/1603.05027v3)
```python
from __future__ import print_function
import numpy as np
from keras.datasets import mnist
from keras.models import Model
from keras.layers import Activation
from keras.layers import UpSampling2D, Conv2D, MaxPooling2D
from keras.layers import Input, BatchNormalization, ELU
import matplotlib.pyplot as plt
import keras.backend as K
from keras import layers
def convresblock(x, nfeats=8, ksize=3, nskipped=2, elu=True):
"""[4] 中提出的残差块。
以 elu=True 运行将使用 ELU 非线性,而以 elu=False 运行将使用 BatchNorm+RELU 非线性。
尽管 ELU 由于不受 BatchNorm 开销的困扰而很快,但它们可能会过拟合,
因为它们不提供 BatchNorm 批处理过程的随机因素,而后者是一个很好的正则化工具。
# 参数
x: 4D 张量, 穿过块的张量
nfeats: 整数。卷积层的特征图大小。
ksize: 整数,第一个卷积中 conv 核的宽度和高度。
nskipped: 整数,残差函数的卷积层数。
elu: 布尔值,是使用 ELU 还是 BN+RELU。
# 输入尺寸
4D 张量,尺寸为:
`(batch, channels, rows, cols)`
# 输出尺寸
4D 张量,尺寸为:
`(batch, filters, rows, cols)`
"""
y0 = Conv2D(nfeats, ksize, padding='same')(x)
y = y0
for i in range(nskipped):
if elu:
y = ELU()(y)
else:
y = BatchNormalization(axis=1)(y)
y = Activation('relu')(y)
y = Conv2D(nfeats, 1, padding='same')(y)
return layers.add([y0, y])
def getwhere(x):
'''计算包含开关的 'where' 掩码,该掩码指示应用 MaxPool2D 时哪个索引包含最大值。
使用总和的梯度是使所有内容保持高水平的不错的技巧。'''
y_prepool, y_postpool = x
return K.gradients(K.sum(y_postpool), y_prepool)
# 本示例假定 'channels_first' 数据格式。
K.set_image_data_format('channels_first')
# 输入图像尺寸
img_rows, img_cols = 28, 28
# 数据,分为训练集和测试集
(x_train, _), (x_test, _) = mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# MaxPooling2D 使用的内核大小
pool_size = 2
# 每层特征图的总数
nfeats = [8, 16, 32, 64, 128]
# 每层池化内核的大小
pool_sizes = np.array([1, 1, 1, 1, 1]) * pool_size
# 卷积核大小
ksize = 3
# 要训练的轮次数
epochs = 5
# 训练期间的批次大小
batch_size = 128
if pool_size == 2:
# 如果使用 pool_size = 2 的 5 层网络
x_train = np.pad(x_train, [[0, 0], [0, 0], [2, 2], [2, 2]],
mode='constant')
x_test = np.pad(x_test, [[0, 0], [0, 0], [2, 2], [2, 2]], mode='constant')
nlayers = 5
elif pool_size == 3:
# 如果使用 pool_size = 3 的 3 层网
x_train = x_train[:, :, :-1, :-1]
x_test = x_test[:, :, :-1, :-1]
nlayers = 3
else:
import sys
sys.exit('Script supports pool_size of 2 and 3.')
# 训练输入的形状(请注意,模型是完全卷积的)
input_shape = x_train.shape[1:]
# axis=1 的所有层的尺寸最终大小,包括输入
nfeats_all = [input_shape[0]] + nfeats
# 首先构建编码器,同时始终跟踪 'where' 掩码
img_input = Input(shape=input_shape)
# 我们将 'where' 掩码推到下面的列表中
wheres = [None] * nlayers
y = img_input
for i in range(nlayers):
y_prepool = convresblock(y, nfeats=nfeats_all[i + 1], ksize=ksize)
y = MaxPooling2D(pool_size=(pool_sizes[i], pool_sizes[i]))(y_prepool)
wheres[i] = layers.Lambda(
getwhere, output_shape=lambda x: x[0])([y_prepool, y])
# 现在构建解码器,并使用存储的 'where' 掩码放置特征
for i in range(nlayers):
ind = nlayers - 1 - i
y = UpSampling2D(size=(pool_sizes[ind], pool_sizes[ind]))(y)
y = layers.multiply([y, wheres[ind]])
y = convresblock(y, nfeats=nfeats_all[ind], ksize=ksize)
# 使用 hard_sigmoid 裁剪重建范围
y = Activation('hard_sigmoid')(y)
# 定义模型及其均方误差损失,并使用 Adam 进行编译
model = Model(img_input, y)
model.compile('adam', 'mse')
# 拟合模型
model.fit(x_train, x_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, x_test))
# 绘图
x_recon = model.predict(x_test[:25])
x_plot = np.concatenate((x_test[:25], x_recon), axis=1)
x_plot = x_plot.reshape((5, 10, input_shape[-2], input_shape[-1]))
x_plot = np.vstack([np.hstack(x) for x in x_plot])
plt.figure()
plt.axis('off')
plt.title('Test Samples: Originals/Reconstructions')
plt.imshow(x_plot, interpolation='none', cmap='gray')
plt.savefig('reconstructions.png')
``` | keras-docs-zh/sources/examples/mnist_swwae.md/0 | {
"file_path": "keras-docs-zh/sources/examples/mnist_swwae.md",
"repo_id": "keras-docs-zh",
"token_count": 3808
} | 95 |
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/recurrent.py#L238)</span>
### RNN
```python
keras.layers.RNN(cell, return_sequences=False, return_state=False, go_backwards=False, stateful=False, unroll=False)
```
循环神经网络层基类。
__参数__
- __cell__: 一个 RNN 单元实例。RNN 单元是一个具有以下几项的类:
- 一个 `call(input_at_t, states_at_t)` 方法,
它返回 `(output_at_t, states_at_t_plus_1)`。
单元的调用方法也可以采引入可选参数 `constants`,
详见下面的小节「关于给 RNN 传递外部常量的说明」。
- 一个 `state_size` 属性。这可以是单个整数(单个状态),
在这种情况下,它是循环层状态的大小(应该与单元输出的大小相同)。
这也可以是整数表示的列表/元组(每个状态一个大小)。
- 一个 `output_size` 属性。 这可以是单个整数或者是一个 TensorShape,
它表示输出的尺寸。出于向后兼容的原因,如果此属性对于当前单元不可用,
则该值将由 `state_size` 的第一个元素推断。
`cell` 也可能是 RNN 单元实例的列表,在这种情况下,RNN 的单元将堆叠在另一个单元上,实现高效的堆叠 RNN。
- __return_sequences__: 布尔值。是返回输出序列中的最后一个输出,还是全部序列。
- __return_state__: 布尔值。除了输出之外是否返回最后一个状态。
- __go_backwards__: 布尔值 (默认 False)。
如果为 True,则向后处理输入序列并返回相反的序列。
- __stateful__: 布尔值 (默认 False)。
如果为 True,则批次中索引 i 处的每个样品的最后状态将用作下一批次中索引 i 样品的初始状态。
- __unroll__: 布尔值 (默认 False)。
如果为 True,则网络将展开,否则将使用符号循环。
展开可以加速 RNN,但它往往会占用更多的内存。
展开只适用于短序列。
- __input_dim__: 输入的维度(整数)。
将此层用作模型中的第一层时,此参数(或者,关键字参数 `input_shape`)是必需的。
- __input_length__: 输入序列的长度,在恒定时指定。
如果你要在上游连接 `Flatten` 和 `Dense` 层,
则需要此参数(如果没有它,无法计算全连接输出的尺寸)。
请注意,如果循环神经网络层不是模型中的第一层,
则需要在第一层的层级指定输入长度(例如,通过 `input_shape` 参数)。
__输入尺寸__
3D 张量,尺寸为 `(batch_size, timesteps, input_dim)`。
__输出尺寸__
- 如果 `return_state`:返回张量列表。
第一个张量为输出。剩余的张量为最后的状态,
每个张量的尺寸为 `(batch_size, units)`。例如,对于 RNN/GRU,状态张量数目为 1,对 LSTM 为 2。
- 如果 `return_sequences`:返回 3D 张量,
尺寸为 `(batch_size, timesteps, units)`。
- 否则,返回尺寸为 `(batch_size, units)` 的 2D 张量。
__Masking__
该层支持以可变数量的时间步对输入数据进行 masking。
要将 masking 引入你的数据,请使用 [Embedding](embeddings.md) 层,
并将 `mask_zero` 参数设置为 `True`。
__关于在 RNN 中使用「状态(statefulness)」的说明__
你可以将 RNN 层设置为 `stateful`(有状态的),
这意味着针对一个批次的样本计算的状态将被重新用作下一批样本的初始状态。
这假定在不同连续批次的样品之间有一对一的映射。
为了使状态有效:
- 在层构造器中指定 `stateful=True`。
- 为你的模型指定一个固定的批次大小,
如果是顺序模型,为你的模型的第一层传递一个 `batch_input_shape=(...)` 参数。
- 为你的模型指定一个固定的批次大小,
如果是顺序模型,为你的模型的第一层传递一个 `batch_input_shape=(...)`。
如果是带有 1 个或多个 Input 层的函数式模型,为你的模型的所有第一层传递一个 `batch_shape=(...)`。
这是你的输入的预期尺寸,*包括批量维度*。
它应该是整数的元组,例如 `(32, 10, 100)`。
- 在调用 `fit()` 是指定 `shuffle=False`。
要重置模型的状态,请在特定图层或整个模型上调用 `.reset_states()`。
__关于指定 RNN 初始状态的说明__
您可以通过使用关键字参数 `initial_state` 调用它们来符号化地指定 RNN 层的初始状态。
`initial_state` 的值应该是表示 RNN 层初始状态的张量或张量列表。
您可以通过调用带有关键字参数 `states` 的 `reset_states` 方法来数字化地指定 RNN 层的初始状态。
`states` 的值应该是一个代表 RNN 层初始状态的 Numpy 数组或者 Numpy 数组列表。
__关于给 RNN 传递外部常量的说明__
你可以使用 `RNN.__call__`(以及 `RNN.call`)的 `constants` 关键字参数将「外部」常量传递给单元。
这要求 `cell.call` 方法接受相同的关键字参数 `constants`。
这些常数可用于调节附加静态输入(不随时间变化)上的单元转换,也可用于注意力机制。
__示例__
```python
# 首先,让我们定义一个 RNN 单元,作为网络层子类。
class MinimalRNNCell(keras.layers.Layer):
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
super(MinimalRNNCell, self).__init__(**kwargs)
def build(self, input_shape):
self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = K.dot(inputs, self.kernel)
output = h + K.dot(prev_output, self.recurrent_kernel)
return output, [output]
# 让我们在 RNN 层使用这个单元:
cell = MinimalRNNCell(32)
x = keras.Input((None, 5))
layer = RNN(cell)
y = layer(x)
# 以下是如何使用单元格构建堆叠的 RNN的方法:
cells = [MinimalRNNCell(32), MinimalRNNCell(64)]
x = keras.Input((None, 5))
layer = RNN(cells)
y = layer(x)
```
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/recurrent.py#L947)</span>
### SimpleRNN
```python
keras.layers.SimpleRNN(units, activation='tanh', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0.0, recurrent_dropout=0.0, return_sequences=False, return_state=False, go_backwards=False, stateful=False, unroll=False)
```
全连接的 RNN,其输出将被反馈到输入。
__参数__
- __units__: 正整数,输出空间的维度。
- __activation__: 要使用的激活函数
(详见 [activations](../activations.md))。
默认:双曲正切(`tanh`)。
如果传入 `None`,则不使用激活函数
(即 线性激活:`a(x) = x`)。
- __use_bias__: 布尔值,该层是否使用偏置向量。
- __kernel_initializer__: `kernel` 权值矩阵的初始化器,
用于输入的线性转换
(详见 [initializers](../initializers.md))。
- __recurrent_initializer__: `recurrent_kernel` 权值矩阵
的初始化器,用于循环层状态的线性转换
(详见 [initializers](../initializers.md))。
- __bias_initializer__:偏置向量的初始化器
(详见[initializers](../initializers.md)).
- __kernel_regularizer__: 运用到 `kernel` 权值矩阵的正则化函数
(详见 [regularizer](../regularizers.md))。
- __recurrent_regularizer__: 运用到 `recurrent_kernel` 权值矩阵的正则化函数
(详见 [regularizer](../regularizers.md))。
- __bias_regularizer__: 运用到偏置向量的正则化函数
(详见 [regularizer](../regularizers.md))。
- __activity_regularizer__: 运用到层输出(它的激活值)的正则化函数
(详见 [regularizer](../regularizers.md))。
- __kernel_constraint__: 运用到 `kernel` 权值矩阵的约束函数
(详见 [constraints](../constraints.md))。
- __recurrent_constraint__: 运用到 `recurrent_kernel` 权值矩阵的约束函数
(详见 [constraints](../constraints.md))。
- __bias_constraint__: 运用到偏置向量的约束函数
(详见 [constraints](../constraints.md))。
- __dropout__: 在 0 和 1 之间的浮点数。
单元的丢弃比例,用于输入的线性转换。
- __recurrent_dropout__: 在 0 和 1 之间的浮点数。
单元的丢弃比例,用于循环层状态的线性转换。
- __return_sequences__: 布尔值。是返回输出序列中的最后一个输出,还是全部序列。
- __return_state__: 布尔值。除了输出之外是否返回最后一个状态。
- __go_backwards__: 布尔值 (默认 False)。
如果为 True,则向后处理输入序列并返回相反的序列。
- __stateful__: 布尔值 (默认 False)。
如果为 True,则批次中索引 i 处的每个样品
的最后状态将用作下一批次中索引 i 样品的初始状态。
- __unroll__: 布尔值 (默认 False)。
如果为 True,则网络将展开,否则将使用符号循环。
展开可以加速 RNN,但它往往会占用更多的内存。
展开只适用于短序列。
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/recurrent.py#L1524)</span>
### GRU
```python
keras.layers.GRU(units, activation='tanh', recurrent_activation='sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0.0, recurrent_dropout=0.0, implementation=2, return_sequences=False, return_state=False, go_backwards=False, stateful=False, unroll=False, reset_after=False)
```
门限循环单元网络(Gated Recurrent Unit) - Cho et al. 2014.
有两种变体。默认的是基于 1406.1078v3 的实现,同时在矩阵乘法之前将复位门应用于隐藏状态。
另一种则是基于 1406.1078v1 的实现,它包括顺序倒置的操作。
第二种变体与 CuDNNGRU(GPU-only) 兼容并且允许在 CPU 上进行推理。
因此它对于 `kernel` 和 `recurrent_kernel` 有可分离偏置。
使用 `'reset_after'=True` 和 `recurrent_activation='sigmoid'` 。
__参数__
- __units__: 正整数,输出空间的维度。
- __activation__: 要使用的激活函数
(详见 [activations](../activations.md))。
默认:双曲正切 (`tanh`)。
如果传入 `None`,则不使用激活函数
(即 线性激活:`a(x) = x`)。
- __recurrent_activation__: 用于循环时间步的激活函数
(详见 [activations](../activations.md))。
默认:分段线性近似 sigmoid (`hard_sigmoid`)。
如果传入 None,则不使用激活函数
(即 线性激活:`a(x) = x`)。
- __use_bias__: 布尔值,该层是否使用偏置向量。
- __kernel_initializer__: `kernel` 权值矩阵的初始化器,
用于输入的线性转换
(详见 [initializers](../initializers.md))。
- __recurrent_initializer__: `recurrent_kernel` 权值矩阵
的初始化器,用于循环层状态的线性转换
(详见 [initializers](../initializers.md))。
- __bias_initializer__:偏置向量的初始化器
(详见[initializers](../initializers.md)).
- __kernel_regularizer__: 运用到 `kernel` 权值矩阵的正则化函数
(详见 [regularizer](../regularizers.md))。
- __recurrent_regularizer__: 运用到 `recurrent_kernel` 权值矩阵的正则化函数
(详见 [regularizer](../regularizers.md))。
- __bias_regularizer__: 运用到偏置向量的正则化函数
(详见 [regularizer](../regularizers.md))。
- __activity_regularizer__: 运用到层输出(它的激活值)的正则化函数
(详见 [regularizer](../regularizers.md))。
- __kernel_constraint__: 运用到 `kernel` 权值矩阵的约束函数
(详见 [constraints](../constraints.md))。
- __recurrent_constraint__: 运用到 `recurrent_kernel` 权值矩阵的约束函数
(详见 [constraints](../constraints.md))。
- __bias_constraint__: 运用到偏置向量的约束函数
(详见 [constraints](../constraints.md))。
- __dropout__: 在 0 和 1 之间的浮点数。
单元的丢弃比例,用于输入的线性转换。
- __recurrent_dropout__: 在 0 和 1 之间的浮点数。
单元的丢弃比例,用于循环层状态的线性转换。
- __implementation__: 实现模式,1 或 2。
模式 1 将把它的操作结构化为更多的小的点积和加法操作,
而模式 2 将把它们分批到更少,更大的操作中。
这些模式在不同的硬件和不同的应用中具有不同的性能配置文件。
- __return_sequences__: 布尔值。是返回输出序列中的最后一个输出,还是全部序列。
- __return_state__: 布尔值。除了输出之外是否返回最后一个状态。
- __go_backwards__: 布尔值 (默认 False)。
如果为 True,则向后处理输入序列并返回相反的序列。
- __stateful__: 布尔值 (默认 False)。
如果为 True,则批次中索引 i 处的每个样品的最后状态
将用作下一批次中索引 i 样品的初始状态。
- __unroll__: 布尔值 (默认 False)。
如果为 True,则网络将展开,否则将使用符号循环。
展开可以加速 RNN,但它往往会占用更多的内存。
展开只适用于短序列。
- __reset_after__: GRU 公约 (是否在矩阵乘法之前或者之后使用重置门)。
False =「之前」(默认),Ture =「之后」( CuDNN 兼容)。
__参考文献__
- [Learning Phrase Representations using RNN Encoder-Decoder for Statistical Machine Translation](https://arxiv.org/abs/1406.1078)
- [On the Properties of Neural Machine Translation: Encoder-Decoder Approaches](https://arxiv.org/abs/1409.1259)
- [Empirical Evaluation of Gated Recurrent Neural Networks on Sequence Modeling](http://arxiv.org/abs/1412.3555v1)
- [A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](http://arxiv.org/abs/1512.05287)
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/recurrent.py#L2086)</span>
### LSTM
```python
keras.layers.LSTM(units, activation='tanh', recurrent_activation='sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', unit_forget_bias=True, kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0.0, recurrent_dropout=0.0, implementation=2, return_sequences=False, return_state=False, go_backwards=False, stateful=False, unroll=False)
```
长短期记忆网络层(Long Short-Term Memory) - Hochreiter 1997.
__参数__
- __units__: 正整数,输出空间的维度。
- __activation__: 要使用的激活函数
(详见 [activations](../activations.md))。
如果传入 `None`,则不使用激活函数
(即 线性激活:`a(x) = x`)。
- __recurrent_activation__: 用于循环时间步的激活函数
(详见 [activations](../activations.md))。
默认:分段线性近似 sigmoid (`hard_sigmoid`)。
如果传入 `None`,则不使用激活函数
(即 线性激活:`a(x) = x`)。
- __use_bias__: 布尔值,该层是否使用偏置向量。
- __kernel_initializer__: `kernel` 权值矩阵的初始化器,
用于输入的线性转换
(详见 [initializers](../initializers.md))。
- __recurrent_initializer__: `recurrent_kernel` 权值矩阵
的初始化器,用于循环层状态的线性转换
(详见 [initializers](../initializers.md))。
- __bias_initializer__:偏置向量的初始化器
(详见[initializers](../initializers.md)).
- __unit_forget_bias__: 布尔值。
如果为 True,初始化时,将忘记门的偏置加 1。
将其设置为 True 同时还会强制 `bias_initializer="zeros"`。
这个建议来自 [Jozefowicz et al. (2015)](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)。
- __kernel_regularizer__: 运用到 `kernel` 权值矩阵的正则化函数
(详见 [regularizer](../regularizers.md))。
- __recurrent_regularizer__: 运用到 `recurrent_kernel` 权值矩阵的正则化函数
(详见 [regularizer](../regularizers.md))。
- __bias_regularizer__: 运用到偏置向量的正则化函数
(详见 [regularizer](../regularizers.md))。
- __activity_regularizer__: 运用到层输出(它的激活值)的正则化函数
(详见 [regularizer](../regularizers.md))。
- __kernel_constraint__: 运用到 `kernel` 权值矩阵的约束函数
(详见 [constraints](../constraints.md))。
- __recurrent_constraint__: 运用到 `recurrent_kernel` 权值矩阵的约束函数
(详见 [constraints](../constraints.md))。
- __bias_constraint__: 运用到偏置向量的约束函数
(详见 [constraints](../constraints.md))。
- __dropout__: 在 0 和 1 之间的浮点数。
单元的丢弃比例,用于输入的线性转换。
- __recurrent_dropout__: 在 0 和 1 之间的浮点数。
单元的丢弃比例,用于循环层状态的线性转换。
- __implementation__: 实现模式,1 或 2。
模式 1 将把它的操作结构化为更多的小的点积和加法操作,
而模式 2 将把它们分批到更少,更大的操作中。
这些模式在不同的硬件和不同的应用中具有不同的性能配置文件。
- __return_sequences__: 布尔值。是返回输出序列中的最后一个输出,还是全部序列。
- __return_state__: 布尔值。除了输出之外是否返回最后一个状态。状态列表的返回元素分别是隐藏状态和单元状态。
- __go_backwards__: 布尔值 (默认 False)。
如果为 True,则向后处理输入序列并返回相反的序列。
- __stateful__: 布尔值 (默认 False)。
如果为 True,则批次中索引 i 处的每个样品的最后状态
将用作下一批次中索引 i 样品的初始状态。
- __unroll__: 布尔值 (默认 False)。
如果为 True,则网络将展开,否则将使用符号循环。
展开可以加速 RNN,但它往往会占用更多的内存。
展开只适用于短序列。
__参考文献__
- [Long short-term memory](http://www.bioinf.jku.at/publications/older/2604.pdf)
- [Learning to forget: Continual prediction with LSTM](http://www.mitpressjournals.org/doi/pdf/10.1162/089976600300015015)
- [Supervised sequence labeling with recurrent neural networks](http://www.cs.toronto.edu/~graves/preprint.pdf)
- [A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](http://arxiv.org/abs/1512.05287)
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional_recurrent.py#L795)</span>
### ConvLSTM2D
```python
keras.layers.ConvLSTM2D(filters, kernel_size, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1), activation='tanh', recurrent_activation='hard_sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', unit_forget_bias=True, kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, return_sequences=False, go_backwards=False, stateful=False, dropout=0.0, recurrent_dropout=0.0)
```
卷积 LSTM。
它类似于 LSTM 层,但输入变换和循环变换都是卷积的。
__参数__
- __filters__: 整数,输出空间的维度
(即卷积中滤波器的输出数量)。
- __kernel_size__: 一个整数,或者 n 个整数表示的元组或列表,
指明卷积窗口的维度。
- __strides__: 一个整数,或者 n 个整数表示的元组或列表,
指明卷积的步长。
指定任何 stride 值 != 1 与指定 `dilation_rate` 值 != 1 两者不兼容。
- __padding__: `"valid"` 或 `"same"` 之一 (大小写敏感)。
- __data_format__: 字符串,
`channels_last` (默认) 或 `channels_first` 之一。
输入中维度的顺序。
`channels_last` 对应输入尺寸为 `(batch, time, ..., channels)`,
`channels_first` 对应输入尺寸为 `(batch, time, channels, ...)`。
它默认为从 Keras 配置文件 `~/.keras/keras.json` 中
找到的 `image_data_format` 值。
如果你从未设置它,将使用 `"channels_last"`。
- __dilation_rate__: 一个整数,或 n 个整数的元组/列表,指定用于膨胀卷积的膨胀率。
目前,指定任何 `dilation_rate` 值 != 1 与指定 stride 值 != 1 两者不兼容。
- __activation__: 要使用的激活函数
(详见 [activations](../activations.md))。
如果传入 None,则不使用激活函数
(即 线性激活:`a(x) = x`)。
- __recurrent_activation__: 用于循环时间步的激活函数
(详见 [activations](../activations.md))。
- __use_bias__: 布尔值,该层是否使用偏置向量。
- __kernel_initializer__: `kernel` 权值矩阵的初始化器,
用于输入的线性转换
(详见 [initializers](../initializers.md))。
- __recurrent_initializer__: `recurrent_kernel` 权值矩阵
的初始化器,用于循环层状态的线性转换
(详见 [initializers](../initializers.md))。
- __bias_initializer__:偏置向量的初始化器
(详见[initializers](../initializers.md)).
- __unit_forget_bias__: 布尔值。
如果为 True,初始化时,将忘记门的偏置加 1。
将其设置为 True 同时还会强制 `bias_initializer="zeros"`。
这个建议来自 [Jozefowicz et al. (2015)](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)。
- __kernel_regularizer__: 运用到 `kernel` 权值矩阵的正则化函数
(详见 [regularizer](../regularizers.md))。
- __recurrent_regularizer__: 运用到 `recurrent_kernel` 权值矩阵的正则化函数
(详见 [regularizer](../regularizers.md))。
- __bias_regularizer__: 运用到偏置向量的正则化函数
(详见 [regularizer](../regularizers.md))。
- __activity_regularizer__: 运用到层输出(它的激活值)的正则化函数
(详见 [regularizer](../regularizers.md))。
- __kernel_constraint__: 运用到 `kernel` 权值矩阵的约束函数
(详见 [constraints](../constraints.md))。
- __recurrent_constraint__: 运用到 `recurrent_kernel` 权值矩阵的约束函数
(详见 [constraints](../constraints.md))。
- __bias_constraint__: 运用到偏置向量的约束函数
(详见 [constraints](../constraints.md))。
- __return_sequences__: 布尔值。是返回输出序列中的最后一个输出,还是全部序列。
- __go_backwards__: 布尔值 (默认 False)。
如果为 True,则向后处理输入序列并返回相反的序列。
- __stateful__: 布尔值 (默认 False)。
如果为 True,则批次中索引 i 处的每个样品的最后状态
将用作下一批次中索引 i 样品的初始状态。
- __dropout__: 在 0 和 1 之间的浮点数。
单元的丢弃比例,用于输入的线性转换。
- __recurrent_dropout__: 在 0 和 1 之间的浮点数。
单元的丢弃比例,用于循环层状态的线性转换。
__输入尺寸__
- 如果 data_format='channels_first',
输入 5D 张量,尺寸为:
`(samples,time, channels, rows, cols)`。
- 如果 data_format='channels_last',
输入 5D 张量,尺寸为:
`(samples,time, rows, cols, channels)`。
__输出尺寸__
- 如果 `return_sequences`,
- 如果 data_format='channels_first',返回 5D 张量,尺寸为:`(samples, time, filters, output_row, output_col)`。
- 如果 data_format='channels_last',返回 5D 张量,尺寸为:`(samples, time, output_row, output_col, filters)`。
- 否则,
- 如果 data_format ='channels_first',返回 4D 张量,尺寸为:`(samples, filters, output_row, output_col)`。
- 如果 data_format='channels_last',返回 4D 张量,尺寸为:`(samples, output_row, output_col, filters)`。
o_row 和 o_col 取决于 filter 和 padding 的尺寸。
__异常__
- __ValueError__: 无效的构造参数。
__参考文献__
- [Convolutional LSTM Network: A Machine Learning Approach for
Precipitation Nowcasting](http://arxiv.org/abs/1506.04214v1)。
当前的实现不包括单元输出的反馈回路。
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional_recurrent.py#L479)</span>
### ConvLSTM2DCell
```python
keras.layers.ConvLSTM2DCell(filters, kernel_size, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1), activation='tanh', recurrent_activation='hard_sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', unit_forget_bias=True, kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0.0, recurrent_dropout=0.0)
```
ConvLSTM2D 层的单元类。
__参数__
- __filters__: 整数,输出空间的维度
(即卷积中滤波器的输出数量)。
- __kernel_size__: 一个整数,或者 n 个整数表示的元组或列表,
指明卷积窗口的维度。
- __strides__: 一个整数,或者 n 个整数表示的元组或列表,
指明卷积的步长。
指定任何 stride 值 != 1 与指定 `dilation_rate` 值 != 1 两者不兼容。
- __padding__: `"valid"` 或 `"same"` 之一 (大小写敏感)。
- __data_format__: 字符串,
`channels_last` (默认) 或 `channels_first` 之一。
输入中维度的顺序。
`channels_last` 对应输入尺寸为 `(batch, time, ..., channels)`,
`channels_first` 对应输入尺寸为 `(batch, time, channels, ...)`。
它默认为从 Keras 配置文件 `~/.keras/keras.json` 中
找到的 `image_data_format` 值。
如果你从未设置它,将使用 `"channels_last"`。
- __dilation_rate__: 一个整数,或 n 个整数的元组/列表,指定用于膨胀卷积的膨胀率。
目前,指定任何 `dilation_rate` 值 != 1 与指定 stride 值 != 1 两者不兼容。
- __activation__: 要使用的激活函数
(详见 [activations](../activations.md))。
如果传入 None,则不使用激活函数
(即 线性激活:`a(x) = x`)。
- __recurrent_activation__: 用于循环时间步的激活函数
(详见 [activations](../activations.md))。
- __use_bias__: 布尔值,该层是否使用偏置向量。
- __kernel_initializer__: `kernel` 权值矩阵的初始化器,
用于输入的线性转换
(详见 [initializers](../initializers.md))。
- __recurrent_initializer__: `recurrent_kernel` 权值矩阵
的初始化器,用于循环层状态的线性转换
(详见 [initializers](../initializers.md))。
- __bias_initializer__:偏置向量的初始化器
(详见[initializers](../initializers.md)).
- __unit_forget_bias__: 布尔值。
如果为 True,初始化时,将忘记门的偏置加 1。
将其设置为 True 同时还会强制 `bias_initializer="zeros"`。
这个建议来自 [Jozefowicz et al. (2015)](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)。
- __kernel_regularizer__: 运用到 `kernel` 权值矩阵的正则化函数
(详见 [regularizer](../regularizers.md))。
- __recurrent_regularizer__: 运用到 `recurrent_kernel` 权值矩阵的正则化函数
(详见 [regularizer](../regularizers.md))。
- __bias_regularizer__: 运用到偏置向量的正则化函数
(详见 [regularizer](../regularizers.md))。
- __kernel_constraint__: 运用到 `kernel` 权值矩阵的约束函数
(详见 [constraints](../constraints.md))。
- __recurrent_constraint__: 运用到 `recurrent_kernel` 权值矩阵的约束函数
(详见 [constraints](../constraints.md))。
- __bias_constraint__: 运用到偏置向量的约束函数
(详见 [constraints](../constraints.md))。
- __dropout__: 在 0 和 1 之间的浮点数。
单元的丢弃比例,用于输入的线性转换。
- __recurrent_dropout__: 在 0 和 1 之间的浮点数。
单元的丢弃比例,用于循环层状态的线性转换。
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/recurrent.py#L812)</span>
### SimpleRNNCell
```python
keras.layers.SimpleRNNCell(units, activation='tanh', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0.0, recurrent_dropout=0.0)
```
SimpleRNN 的单元类。
__参数__
- __units__: 正整数,输出空间的维度。
- __activation__: 要使用的激活函数
(详见 [activations](../activations.md))。
默认:双曲正切 (`tanh`)。
如果传入 `None`,则不使用激活函数
(即 线性激活:`a(x) = x`)。
- __use_bias__: 布尔值,该层是否使用偏置向量。
- __kernel_initializer__: `kernel` 权值矩阵的初始化器,
用于输入的线性转换
(详见 [initializers](../initializers.md))。
- __recurrent_initializer__: `recurrent_kernel` 权值矩阵
的初始化器,用于循环层状态的线性转换
(详见 [initializers](../initializers.md))。
- __bias_initializer__:偏置向量的初始化器
(详见[initializers](../initializers.md)).
- __kernel_regularizer__: 运用到 `kernel` 权值矩阵的正则化函数
(详见 [regularizer](../regularizers.md))。
- __recurrent_regularizer__: 运用到 `recurrent_kernel` 权值矩阵的正则化函数
(详见 [regularizer](../regularizers.md))。
- __bias_regularizer__: 运用到偏置向量的正则化函数
(详见 [regularizer](../regularizers.md))。
- __kernel_constraint__: 运用到 `kernel` 权值矩阵的约束函数
(详见 [constraints](../constraints.md))。
- __recurrent_constraint__: 运用到 `recurrent_kernel` 权值矩阵的约束函数
(详见 [constraints](../constraints.md))。
- __bias_constraint__: 运用到偏置向量的约束函数
(详见 [constraints](../constraints.md))。
- __dropout__: 在 0 和 1 之间的浮点数。
单元的丢弃比例,用于输入的线性转换。
- __recurrent_dropout__: 在 0 和 1 之间的浮点数。
单元的丢弃比例,用于循环层状态的线性转换。
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/recurrent.py#L1196)</span>
### GRUCell
```python
keras.layers.GRUCell(units, activation='tanh', recurrent_activation='sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0.0, recurrent_dropout=0.0, implementation=2, reset_after=False)
```
GRU 层的单元类。
__参数__
- __units__: 正整数,输出空间的维度。
- __activation__: 要使用的激活函数
(详见 [activations](../activations.md))。
默认:双曲正切 (`tanh`)。
如果传入 `None`,则不使用激活函数
(即 线性激活:`a(x) = x`)。
- __recurrent_activation__: 用于循环时间步的激活函数
(详见 [activations](../activations.md))。
默认:分段线性近似 sigmoid (`hard_sigmoid`)。
如果传入 `None`,则不使用激活函数
(即 线性激活:`a(x) = x`)。
- __use_bias__: 布尔值,该层是否使用偏置向量。
- __kernel_initializer__: `kernel` 权值矩阵的初始化器,
用于输入的线性转换
(详见 [initializers](../initializers.md))。
- __recurrent_initializer__: `recurrent_kernel` 权值矩阵
的初始化器,用于循环层状态的线性转换
(详见 [initializers](../initializers.md))。
- __bias_initializer__:偏置向量的初始化器
(详见[initializers](../initializers.md)).
- __kernel_regularizer__: 运用到 `kernel` 权值矩阵的正则化函数
(详见 [regularizer](../regularizers.md))。
- __recurrent_regularizer__: 运用到 `recurrent_kernel` 权值矩阵的正则化函数
(详见 [regularizer](../regularizers.md))。
- __bias_regularizer__: 运用到偏置向量的正则化函数
(详见 [regularizer](../regularizers.md))。
- __kernel_constraint__: 运用到 `kernel` 权值矩阵的约束函数
(详见 [constraints](../constraints.md))。
- __recurrent_constraint__: 运用到 `recurrent_kernel` 权值矩阵的约束函数
(详见 [constraints](../constraints.md))。
- __bias_constraint__: 运用到偏置向量的约束函数
(详见 [constraints](../constraints.md))。
- __dropout__: 在 0 和 1 之间的浮点数。
单元的丢弃比例,用于输入的线性转换。
- __recurrent_dropout__: 在 0 和 1 之间的浮点数。
单元的丢弃比例,用于循环层状态的线性转换。
- __implementation__: 实现模式,1 或 2。
模式 1 将把它的操作结构化为更多的小的点积和加法操作,
而模式 2 将把它们分批到更少,更大的操作中。
这些模式在不同的硬件和不同的应用中具有不同的性能配置文件。
- __reset_after__:
- GRU 公约 (是否在矩阵乘法之前或者之后使用重置门)。
False = "before" (默认),Ture = "after" ( CuDNN 兼容)。
- __reset_after__: GRU convention (whether to apply reset gate after or
before matrix multiplication). False = "before" (default),
True = "after" (CuDNN compatible).
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/recurrent.py#L1798)</span>
### LSTMCell
```python
keras.layers.LSTMCell(units, activation='tanh', recurrent_activation='sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', unit_forget_bias=True, kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0.0, recurrent_dropout=0.0, implementation=2)
```
LSTM 层的单元类。
__参数__
- __units__: 正整数,输出空间的维度。
- __activation__: 要使用的激活函数
(详见 [activations](../activations.md))。
默认:双曲正切(`tanh`)。
如果传入 `None`,则不使用激活函数
(即 线性激活:`a(x) = x`)。
- __recurrent_activation__: 用于循环时间步的激活函数
(详见 [activations](../activations.md))。
默认:分段线性近似 sigmoid (`hard_sigmoid`)。
- __use_bias__: 布尔值,该层是否使用偏置向量。
- __kernel_initializer__: `kernel` 权值矩阵的初始化器,
用于输入的线性转换
(详见 [initializers](../initializers.md))。
- __recurrent_initializer__: `recurrent_kernel` 权值矩阵
的初始化器,用于循环层状态的线性转换
(详见 [initializers](../initializers.md))。
- __bias_initializer__:偏置向量的初始化器
(详见[initializers](../initializers.md)).
- __unit_forget_bias__: 布尔值。
如果为 True,初始化时,将忘记门的偏置加 1。
将其设置为 True 同时还会强制 `bias_initializer="zeros"`。
这个建议来自 [Jozefowicz et al. (2015)](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)。
- __kernel_regularizer__: 运用到 `kernel` 权值矩阵的正则化函数
(详见 [regularizer](../regularizers.md))。
- __recurrent_regularizer__: 运用到 `recurrent_kernel` 权值矩阵的正则化函数
(详见 [regularizer](../regularizers.md))。
- __bias_regularizer__: 运用到偏置向量的正则化函数
(详见 [regularizer](../regularizers.md))。
- __kernel_constraint__: 运用到 `kernel` 权值矩阵的约束函数
(详见 [constraints](../constraints.md))。
- __recurrent_constraint__: 运用到 `recurrent_kernel` 权值矩阵的约束函数
(详见 [constraints](../constraints.md))。
- __bias_constraint__: 运用到偏置向量的约束函数
(详见 [constraints](../constraints.md))。
- __dropout__: 在 0 和 1 之间的浮点数。
单元的丢弃比例,用于输入的线性转换。
- __recurrent_dropout__: 在 0 和 1 之间的浮点数。
单元的丢弃比例,用于循环层状态的线性转换。
- __implementation__: 实现模式,1 或 2。
模式 1 将把它的操作结构化为更多的小的点积和加法操作,
而模式 2 将把它们分批到更少,更大的操作中。
这些模式在不同的硬件和不同的应用中具有不同的性能配置文件。
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/cudnn_recurrent.py#L135)</span>
### CuDNNGRU
```python
keras.layers.CuDNNGRU(units, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, return_sequences=False, return_state=False, stateful=False)
```
由 [CuDNN](https://developer.nvidia.com/cudnn) 支持的快速 GRU 实现。
只能以 TensorFlow 后端运行在 GPU 上。
__参数__
- __units__: 正整数,输出空间的维度。
- __kernel_initializer__: `kernel` 权值矩阵的初始化器,
用于输入的线性转换
(详见 [initializers](../initializers.md))。
- __recurrent_initializer__: `recurrent_kernel` 权值矩阵
的初始化器,用于循环层状态的线性转换
(详见 [initializers](../initializers.md))。
- __bias_initializer__:偏置向量的初始化器
(详见[initializers](../initializers.md)).
- __kernel_regularizer__: 运用到 `kernel` 权值矩阵的正则化函数
(详见 [regularizer](../regularizers.md))。
- __recurrent_regularizer__: 运用到 `recurrent_kernel` 权值矩阵的正则化函数
(详见 [regularizer](../regularizers.md))。
- __bias_regularizer__: 运用到偏置向量的正则化函数
(详见 [regularizer](../regularizers.md))。
- __activity_regularizer__: Regularizer function applied to
the output of the layer (its "activation").
(see [regularizer](../regularizers.md)).
- __kernel_constraint__: 运用到 `kernel` 权值矩阵的约束函数
(详见 [constraints](../constraints.md))。
- __recurrent_constraint__: 运用到 `recurrent_kernel` 权值矩阵的约束函数
(详见 [constraints](../constraints.md))。
- __bias_constraint__: 运用到偏置向量的约束函数
(详见 [constraints](../constraints.md))。
- __return_sequences__: 布尔值。是返回输出序列中的最后一个输出,还是全部序列。
- __return_state__: 布尔值。除了输出之外是否返回最后一个状态。
- __stateful__: 布尔值 (默认 False)。
如果为 True,则批次中索引 i 处的每个样品的最后状态
将用作下一批次中索引 i 样品的初始状态。
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/cudnn_recurrent.py#L328)</span>
### CuDNNLSTM
```python
keras.layers.CuDNNLSTM(units, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', unit_forget_bias=True, kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, return_sequences=False, return_state=False, stateful=False)
```
由 [CuDNN](https://developer.nvidia.com/cudnn) 支持的快速 LSTM 实现。
只能以 TensorFlow 后端运行在 GPU 上。
__参数__
- __units__: 正整数,输出空间的维度。
- __kernel_initializer__: `kernel` 权值矩阵的初始化器,
用于输入的线性转换
(详见 [initializers](../initializers.md))。
- __recurrent_initializer__: `recurrent_kernel` 权值矩阵
的初始化器,用于循环层状态的线性转换
(详见 [initializers](../initializers.md))。
- __bias_initializer__:偏置向量的初始化器
(详见[initializers](../initializers.md)).
- __unit_forget_bias__: 布尔值。
如果为 True,初始化时,将忘记门的偏置加 1。
将其设置为 True 同时还会强制 `bias_initializer="zeros"`。
这个建议来自 [Jozefowicz et al. (2015)](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)。
- __kernel_regularizer__: 运用到 `kernel` 权值矩阵的正则化函数
(详见 [regularizer](../regularizers.md))。
- __recurrent_regularizer__: 运用到 `recurrent_kernel` 权值矩阵的正则化函数
(详见 [regularizer](../regularizers.md))。
- __bias_regularizer__: 运用到偏置向量的正则化函数
(详见 [regularizer](../regularizers.md))。
- __activity_regularizer__: 运用到层输出(它的激活值)的正则化函数
(详见 [regularizer](../regularizers.md))。
- __kernel_constraint__: 运用到 `kernel` 权值矩阵的约束函数
(详见 [constraints](../constraints.md))。
- __recurrent_constraint__: 运用到 `recurrent_kernel` 权值矩阵的约束函数
(详见 [constraints](../constraints.md))。
- __bias_constraint__: 运用到偏置向量的约束函数
(详见 [constraints](../constraints.md))。
- __return_sequences__: 布尔值。是返回输出序列中的最后一个输出,还是全部序列。
- __return_state__: 布尔值。除了输出之外是否返回最后一个状态。
- __stateful__: 布尔值 (默认 False)。
如果为 True,则批次中索引 i 处的每个样品的最后状态
将用作下一批次中索引 i 样品的初始状态。
| keras-docs-zh/sources/layers/recurrent.md/0 | {
"file_path": "keras-docs-zh/sources/layers/recurrent.md",
"repo_id": "keras-docs-zh",
"token_count": 22379
} | 96 |
# 为什么选择 Keras?
在如今无数深度学习框架中,为什么要使用 Keras 而非其他?以下是 Keras 与现有替代品的一些比较。
---
## Keras 优先考虑开发人员的经验
- Keras 是为人类而非机器设计的 API。[Keras 遵循减少认知困难的最佳实践](https://blog.keras.io/user-experience-design-for-apis.html): 它提供一致且简单的 API,它将常见用例所需的用户操作数量降至最低,并且在用户错误时提供清晰和可操作的反馈。
- 这使 Keras 易于学习和使用。作为 Keras 用户,你的工作效率更高,能够比竞争对手更快地尝试更多创意,从而[帮助你赢得机器学习竞赛](https://www.quora.com/Why-has-Keras-been-so-successful-lately-at-Kaggle-competitions)。
- 这种易用性并不以降低灵活性为代价:因为 Keras 与底层深度学习语言(特别是 TensorFlow)集成在一起,所以它可以让你实现任何你可以用基础语言编写的东西。特别是,`tf.keras` 作为 Keras API 可以与 TensorFlow 工作流无缝集成。
---
## Keras 被工业界和学术界广泛采用
<img src='https://s3.amazonaws.com/keras.io/img/dl_frameworks_power_scores.png' style='width:500px; display: block; margin: 0 auto;'/>
<p style='font-style: italic; font-size: 10pt; text-align: center;'>
Deep learning 框架排名,由 Jeff Hale 基于 7 个分类的 11 个数据源计算得出
</i>
</p>
截至 2018 年中期,Keras 拥有超过 250,000 名个人用户。与其他任何深度学习框架相比,Keras 在行业和研究领域的应用率更高(除 TensorFlow 之外,且 Keras API 是 TensorFlow 的官方前端,通过 `tf.keras` 模块使用)。
你已经不断与使用 Keras 构建的功能进行交互 - 它在 Netflix, Uber, Yelp, Instacart, Zocdoc, Square 等众多网站上被使用。它尤其受以深度学习作为产品核心的创业公司的欢迎。
Keras 也是深度学习研究人员的最爱,在上传到预印本服务器 [arXiv.org](https://arxiv.org/archive/cs) 的科学论文中被提及的次数位居第二。Keras 还被大型科学组织的研究人员采用,特别是 CERN 和 NASA。
---
## Keras 可以轻松将模型转化为产品
与任何其他深度学习框架相比,你的 Keras 模型可以轻松地部署在更广泛的平台上:
- 在 iOS 上,通过 [Apple’s CoreML](https://developer.apple.com/documentation/coreml)(苹果为 Keras 提供官方支持)。这里有一个[教程](https://www.pyimagesearch.com/2018/04/23/running-keras-models-on-ios-with-coreml/)。
- 在 Android 上,通过 TensorFlow Android runtime。例如 [Not Hotdog app](https://medium.com/@timanglade/how-hbos-silicon-valley-built-not-hotdog-with-mobile-tensorflow-keras-react-native-ef03260747f3)。
- 在浏览器中,通过 GPU 加速的 JavaScript 运行时,例如 [Keras.js](https://transcranial.github.io/keras-js/#/) 和 [WebDNN](https://mil-tokyo.github.io/webdnn/)。
- 在 Google Cloud 上,通过 [TensorFlow-Serving](https://www.tensorflow.org/serving/)。
- [在 Python webapp 后端中(比如 Flask app)](https://blog.keras.io/building-a-simple-keras-deep-learning-rest-api.html)。
- 在 JVM 上,通过 [SkyMind 提供的 DL4J 模型导入](https://deeplearning4j.org/model-import-keras)。
- 在 Raspberry Pi 树莓派上。
---
## Keras 支持多个后端引擎,不会将你锁定到一个生态系统中
你的 Keras 模型可以基于不同的[深度学习后端](https://keras.io/zh/backend/)开发。重要的是,任何仅利用内置层构建的 Keras 模型,都可以在所有这些后端中移植:你可以用一种后端训练模型,再将它载入另一种后端中(例如为了发布的需要)。支持的后端有:
- 谷歌的 TensorFlow 后端
- 微软的 CNTK 后端
- Theano 后端
亚马逊也有一个[使用 MXNet 作为后端的 Keras 分支](https://github.com/awslabs/keras-apache-mxnet)。
如此一来,你的 Keras 模型可以在 CPU 之外的不同硬件平台上训练:
- [NVIDIA GPUs](https://developer.nvidia.com/deep-learning)
- [Google TPUs](https://cloud.google.com/tpu/),通过 TensorFlow 后端和 Google Cloud
- OpenCL 支持的 GPUs,比如 AMD, 通过 [PlaidML Keras 后端](https://github.com/plaidml/plaidml)
---
## Keras 拥有强大的多 GPU 和分布式训练支持
- Keras [内置对多 GPU 数据并行的支持](https://keras.io/zh/utils/#multi_gpu_model)。
- 优步的 [Horovod](https://github.com/uber/horovod) 对 Keras 模型拥有一流的支持。
- Keras 模型[可以被转换为 TensorFlow Estimators](https://www.tensorflow.org/versions/master/api_docs/python/tf/keras/estimator/model_to_estimator) 并在 [Google Cloud 的 GPU 集群](https://cloud.google.com/solutions/running-distributed-tensorflow-on-compute-engine)上训练。
- Keras 可以在 Spark(通过 CERN 的 [Dist-Keras](https://github.com/cerndb/dist-keras))和 [Elephas](https://github.com/maxpumperla/elephas) 上运行。
---
## Keras 的发展得到深度学习生态系统中的关键公司的支持
Keras 的开发主要由谷歌支持,Keras API 以 `tf.keras` 的形式包装在 TensorFlow 中。此外,微软维护着 Keras 的 CNTK 后端。亚马逊 AWS 正在开发 MXNet 支持。其他提供支持的公司包括 NVIDIA、优步、苹果(通过 CoreML)等。
<img src='/img/google-logo.png' style='width:200px; margin-right:15px;'/>
<img src='/img/microsoft-logo.png' style='width:200px; margin-right:15px;'/>
<img src='/img/nvidia-logo.png' style='width:200px; margin-right:15px;'/>
<img src='/img/aws-logo.png' style='width:110px; margin-right:15px;'/>
| keras-docs-zh/sources/why-use-keras.md/0 | {
"file_path": "keras-docs-zh/sources/why-use-keras.md",
"repo_id": "keras-docs-zh",
"token_count": 3204
} | 97 |
"""
Title: MelGAN-based spectrogram inversion using feature matching
Author: [Darshan Deshpande](https://twitter.com/getdarshan)
Date created: 02/09/2021
Last modified: 15/09/2021
Description: Inversion of audio from mel-spectrograms using the MelGAN architecture and feature matching.
Accelerator: GPU
"""
"""
## Introduction
Autoregressive vocoders have been ubiquitous for a majority of the history of speech processing,
but for most of their existence they have lacked parallelism.
[MelGAN](https://arxiv.org/abs/1910.06711) is a
non-autoregressive, fully convolutional vocoder architecture used for purposes ranging
from spectral inversion and speech enhancement to present-day state-of-the-art
speech synthesis when used as a decoder
with models like Tacotron2 or FastSpeech that convert text to mel spectrograms.
In this tutorial, we will have a look at the MelGAN architecture and how it can achieve
fast spectral inversion, i.e. conversion of spectrograms to audio waves. The MelGAN
implemented in this tutorial is similar to the original implementation with only the
difference of method of padding for convolutions where we will use 'same' instead of
reflect padding.
"""
"""
## Importing and Defining Hyperparameters
"""
"""shell
pip install -qqq tensorflow_addons
pip install -qqq tensorflow-io
"""
import tensorflow as tf
import tensorflow_io as tfio
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow_addons import layers as addon_layers
# Setting logger level to avoid input shape warnings
tf.get_logger().setLevel("ERROR")
# Defining hyperparameters
DESIRED_SAMPLES = 8192
LEARNING_RATE_GEN = 1e-5
LEARNING_RATE_DISC = 1e-6
BATCH_SIZE = 16
mse = keras.losses.MeanSquaredError()
mae = keras.losses.MeanAbsoluteError()
"""
## Loading the Dataset
This example uses the [LJSpeech dataset](https://keithito.com/LJ-Speech-Dataset/).
The LJSpeech dataset is primarily used for text-to-speech and consists of 13,100 discrete
speech samples taken from 7 non-fiction books, having a total length of approximately 24
hours. The MelGAN training is only concerned with the audio waves so we process only the
WAV files and ignore the audio annotations.
"""
"""shell
wget https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2
tar -xf /content/LJSpeech-1.1.tar.bz2
"""
"""
We create a `tf.data.Dataset` to load and process the audio files on the fly.
The `preprocess()` function takes the file path as input and returns two instances of the
wave, one for input and one as the ground truth for comparison. The input wave will be
mapped to a spectrogram using the custom `MelSpec` layer as shown later in this example.
"""
# Splitting the dataset into training and testing splits
wavs = tf.io.gfile.glob("LJSpeech-1.1/wavs/*.wav")
print(f"Number of audio files: {len(wavs)}")
# Mapper function for loading the audio. This function returns two instances of the wave
def preprocess(filename):
audio = tf.audio.decode_wav(tf.io.read_file(filename), 1, DESIRED_SAMPLES).audio
return audio, audio
# Create tf.data.Dataset objects and apply preprocessing
train_dataset = tf.data.Dataset.from_tensor_slices((wavs,))
train_dataset = train_dataset.map(preprocess, num_parallel_calls=tf.data.AUTOTUNE)
"""
## Defining custom layers for MelGAN
The MelGAN architecture consists of 3 main modules:
1. The residual block
2. Dilated convolutional block
3. Discriminator block
![MelGAN](https://i.imgur.com/ZdxwzPG.png)
"""
"""
Since the network takes a mel-spectrogram as input, we will create an additional custom
layer
which can convert the raw audio wave to a spectrogram on-the-fly. We use the raw audio
tensor from `train_dataset` and map it to a mel-spectrogram using the `MelSpec` layer
below.
"""
# Custom keras layer for on-the-fly audio to spectrogram conversion
class MelSpec(layers.Layer):
def __init__(
self,
frame_length=1024,
frame_step=256,
fft_length=None,
sampling_rate=22050,
num_mel_channels=80,
freq_min=125,
freq_max=7600,
**kwargs,
):
super().__init__(**kwargs)
self.frame_length = frame_length
self.frame_step = frame_step
self.fft_length = fft_length
self.sampling_rate = sampling_rate
self.num_mel_channels = num_mel_channels
self.freq_min = freq_min
self.freq_max = freq_max
# Defining mel filter. This filter will be multiplied with the STFT output
self.mel_filterbank = tf.signal.linear_to_mel_weight_matrix(
num_mel_bins=self.num_mel_channels,
num_spectrogram_bins=self.frame_length // 2 + 1,
sample_rate=self.sampling_rate,
lower_edge_hertz=self.freq_min,
upper_edge_hertz=self.freq_max,
)
def call(self, audio, training=True):
# We will only perform the transformation during training.
if training:
# Taking the Short Time Fourier Transform. Ensure that the audio is padded.
# In the paper, the STFT output is padded using the 'REFLECT' strategy.
stft = tf.signal.stft(
tf.squeeze(audio, -1),
self.frame_length,
self.frame_step,
self.fft_length,
pad_end=True,
)
# Taking the magnitude of the STFT output
magnitude = tf.abs(stft)
# Multiplying the Mel-filterbank with the magnitude and scaling it using the db scale
mel = tf.matmul(tf.square(magnitude), self.mel_filterbank)
log_mel_spec = tfio.audio.dbscale(mel, top_db=80)
return log_mel_spec
else:
return audio
def get_config(self):
config = super().get_config()
config.update(
{
"frame_length": self.frame_length,
"frame_step": self.frame_step,
"fft_length": self.fft_length,
"sampling_rate": self.sampling_rate,
"num_mel_channels": self.num_mel_channels,
"freq_min": self.freq_min,
"freq_max": self.freq_max,
}
)
return config
"""
The residual convolutional block extensively uses dilations and has a total receptive
field of 27 timesteps per block. The dilations must grow as a power of the `kernel_size`
to ensure reduction of hissing noise in the output. The network proposed by the paper is
as follows:
![ConvBlock](https://i.imgur.com/sFnnsCll.jpg)
"""
# Creating the residual stack block
def residual_stack(input, filters):
"""Convolutional residual stack with weight normalization.
Args:
filters: int, determines filter size for the residual stack.
Returns:
Residual stack output.
"""
c1 = addon_layers.WeightNormalization(
layers.Conv1D(filters, 3, dilation_rate=1, padding="same"), data_init=False
)(input)
lrelu1 = layers.LeakyReLU()(c1)
c2 = addon_layers.WeightNormalization(
layers.Conv1D(filters, 3, dilation_rate=1, padding="same"), data_init=False
)(lrelu1)
add1 = layers.Add()([c2, input])
lrelu2 = layers.LeakyReLU()(add1)
c3 = addon_layers.WeightNormalization(
layers.Conv1D(filters, 3, dilation_rate=3, padding="same"), data_init=False
)(lrelu2)
lrelu3 = layers.LeakyReLU()(c3)
c4 = addon_layers.WeightNormalization(
layers.Conv1D(filters, 3, dilation_rate=1, padding="same"), data_init=False
)(lrelu3)
add2 = layers.Add()([add1, c4])
lrelu4 = layers.LeakyReLU()(add2)
c5 = addon_layers.WeightNormalization(
layers.Conv1D(filters, 3, dilation_rate=9, padding="same"), data_init=False
)(lrelu4)
lrelu5 = layers.LeakyReLU()(c5)
c6 = addon_layers.WeightNormalization(
layers.Conv1D(filters, 3, dilation_rate=1, padding="same"), data_init=False
)(lrelu5)
add3 = layers.Add()([c6, add2])
return add3
"""
Each convolutional block uses the dilations offered by the residual stack
and upsamples the input data by the `upsampling_factor`.
"""
# Dilated convolutional block consisting of the Residual stack
def conv_block(input, conv_dim, upsampling_factor):
"""Dilated Convolutional Block with weight normalization.
Args:
conv_dim: int, determines filter size for the block.
upsampling_factor: int, scale for upsampling.
Returns:
Dilated convolution block.
"""
conv_t = addon_layers.WeightNormalization(
layers.Conv1DTranspose(conv_dim, 16, upsampling_factor, padding="same"),
data_init=False,
)(input)
lrelu1 = layers.LeakyReLU()(conv_t)
res_stack = residual_stack(lrelu1, conv_dim)
lrelu2 = layers.LeakyReLU()(res_stack)
return lrelu2
"""
The discriminator block consists of convolutions and downsampling layers. This block is
essential for the implementation of the feature matching technique.
Each discriminator outputs a list of feature maps that will be compared during training
to compute the feature matching loss.
"""
def discriminator_block(input):
conv1 = addon_layers.WeightNormalization(
layers.Conv1D(16, 15, 1, "same"), data_init=False
)(input)
lrelu1 = layers.LeakyReLU()(conv1)
conv2 = addon_layers.WeightNormalization(
layers.Conv1D(64, 41, 4, "same", groups=4), data_init=False
)(lrelu1)
lrelu2 = layers.LeakyReLU()(conv2)
conv3 = addon_layers.WeightNormalization(
layers.Conv1D(256, 41, 4, "same", groups=16), data_init=False
)(lrelu2)
lrelu3 = layers.LeakyReLU()(conv3)
conv4 = addon_layers.WeightNormalization(
layers.Conv1D(1024, 41, 4, "same", groups=64), data_init=False
)(lrelu3)
lrelu4 = layers.LeakyReLU()(conv4)
conv5 = addon_layers.WeightNormalization(
layers.Conv1D(1024, 41, 4, "same", groups=256), data_init=False
)(lrelu4)
lrelu5 = layers.LeakyReLU()(conv5)
conv6 = addon_layers.WeightNormalization(
layers.Conv1D(1024, 5, 1, "same"), data_init=False
)(lrelu5)
lrelu6 = layers.LeakyReLU()(conv6)
conv7 = addon_layers.WeightNormalization(
layers.Conv1D(1, 3, 1, "same"), data_init=False
)(lrelu6)
return [lrelu1, lrelu2, lrelu3, lrelu4, lrelu5, lrelu6, conv7]
"""
### Create the generator
"""
def create_generator(input_shape):
inp = keras.Input(input_shape)
x = MelSpec()(inp)
x = layers.Conv1D(512, 7, padding="same")(x)
x = layers.LeakyReLU()(x)
x = conv_block(x, 256, 8)
x = conv_block(x, 128, 8)
x = conv_block(x, 64, 2)
x = conv_block(x, 32, 2)
x = addon_layers.WeightNormalization(
layers.Conv1D(1, 7, padding="same", activation="tanh")
)(x)
return keras.Model(inp, x)
# We use a dynamic input shape for the generator since the model is fully convolutional
generator = create_generator((None, 1))
generator.summary()
"""
### Create the discriminator
"""
def create_discriminator(input_shape):
inp = keras.Input(input_shape)
out_map1 = discriminator_block(inp)
pool1 = layers.AveragePooling1D()(inp)
out_map2 = discriminator_block(pool1)
pool2 = layers.AveragePooling1D()(pool1)
out_map3 = discriminator_block(pool2)
return keras.Model(inp, [out_map1, out_map2, out_map3])
# We use a dynamic input shape for the discriminator
# This is done because the input shape for the generator is unknown
discriminator = create_discriminator((None, 1))
discriminator.summary()
"""
## Defining the loss functions
**Generator Loss**
The generator architecture uses a combination of two losses
1. Mean Squared Error:
This is the standard MSE generator loss calculated between ones and the outputs from the
discriminator with _N_ layers.
<p align="center">
<img src="https://i.imgur.com/dz4JS3I.png" width=300px;></img>
</p>
2. Feature Matching Loss:
This loss involves extracting the outputs of every layer from the discriminator for both
the generator and ground truth and compare each layer output _k_ using Mean Absolute Error.
<p align="center">
<img src="https://i.imgur.com/gEpSBar.png" width=400px;></img>
</p>
**Discriminator Loss**
The discriminator uses the Mean Absolute Error and compares the real data predictions
with ones and generated predictions with zeros.
<p align="center">
<img src="https://i.imgur.com/bbEnJ3t.png" width=425px;></img>
</p>
"""
# Generator loss
def generator_loss(real_pred, fake_pred):
"""Loss function for the generator.
Args:
real_pred: Tensor, output of the ground truth wave passed through the discriminator.
fake_pred: Tensor, output of the generator prediction passed through the discriminator.
Returns:
Loss for the generator.
"""
gen_loss = []
for i in range(len(fake_pred)):
gen_loss.append(mse(tf.ones_like(fake_pred[i][-1]), fake_pred[i][-1]))
return tf.reduce_mean(gen_loss)
def feature_matching_loss(real_pred, fake_pred):
"""Implements the feature matching loss.
Args:
real_pred: Tensor, output of the ground truth wave passed through the discriminator.
fake_pred: Tensor, output of the generator prediction passed through the discriminator.
Returns:
Feature Matching Loss.
"""
fm_loss = []
for i in range(len(fake_pred)):
for j in range(len(fake_pred[i]) - 1):
fm_loss.append(mae(real_pred[i][j], fake_pred[i][j]))
return tf.reduce_mean(fm_loss)
def discriminator_loss(real_pred, fake_pred):
"""Implements the discriminator loss.
Args:
real_pred: Tensor, output of the ground truth wave passed through the discriminator.
fake_pred: Tensor, output of the generator prediction passed through the discriminator.
Returns:
Discriminator Loss.
"""
real_loss, fake_loss = [], []
for i in range(len(real_pred)):
real_loss.append(mse(tf.ones_like(real_pred[i][-1]), real_pred[i][-1]))
fake_loss.append(mse(tf.zeros_like(fake_pred[i][-1]), fake_pred[i][-1]))
# Calculating the final discriminator loss after scaling
disc_loss = tf.reduce_mean(real_loss) + tf.reduce_mean(fake_loss)
return disc_loss
"""
Defining the MelGAN model for training.
This subclass overrides the `train_step()` method to implement the training logic.
"""
class MelGAN(keras.Model):
def __init__(self, generator, discriminator, **kwargs):
"""MelGAN trainer class
Args:
generator: keras.Model, Generator model
discriminator: keras.Model, Discriminator model
"""
super().__init__(**kwargs)
self.generator = generator
self.discriminator = discriminator
def compile(
self,
gen_optimizer,
disc_optimizer,
generator_loss,
feature_matching_loss,
discriminator_loss,
):
"""MelGAN compile method.
Args:
gen_optimizer: keras.optimizer, optimizer to be used for training
disc_optimizer: keras.optimizer, optimizer to be used for training
generator_loss: callable, loss function for generator
feature_matching_loss: callable, loss function for feature matching
discriminator_loss: callable, loss function for discriminator
"""
super().compile()
# Optimizers
self.gen_optimizer = gen_optimizer
self.disc_optimizer = disc_optimizer
# Losses
self.generator_loss = generator_loss
self.feature_matching_loss = feature_matching_loss
self.discriminator_loss = discriminator_loss
# Trackers
self.gen_loss_tracker = keras.metrics.Mean(name="gen_loss")
self.disc_loss_tracker = keras.metrics.Mean(name="disc_loss")
def train_step(self, batch):
x_batch_train, y_batch_train = batch
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
# Generating the audio wave
gen_audio_wave = generator(x_batch_train, training=True)
# Generating the features using the discriminator
real_pred = discriminator(y_batch_train)
fake_pred = discriminator(gen_audio_wave)
# Calculating the generator losses
gen_loss = generator_loss(real_pred, fake_pred)
fm_loss = feature_matching_loss(real_pred, fake_pred)
# Calculating final generator loss
gen_fm_loss = gen_loss + 10 * fm_loss
# Calculating the discriminator losses
disc_loss = discriminator_loss(real_pred, fake_pred)
# Calculating and applying the gradients for generator and discriminator
grads_gen = gen_tape.gradient(gen_fm_loss, generator.trainable_weights)
grads_disc = disc_tape.gradient(disc_loss, discriminator.trainable_weights)
gen_optimizer.apply_gradients(zip(grads_gen, generator.trainable_weights))
disc_optimizer.apply_gradients(zip(grads_disc, discriminator.trainable_weights))
self.gen_loss_tracker.update_state(gen_fm_loss)
self.disc_loss_tracker.update_state(disc_loss)
return {
"gen_loss": self.gen_loss_tracker.result(),
"disc_loss": self.disc_loss_tracker.result(),
}
"""
## Training
The paper suggests that the training with dynamic shapes takes around 400,000 steps (~500
epochs). For this example, we will run it only for a single epoch (819 steps).
Longer training time (greater than 300 epochs) will almost certainly provide better results.
"""
gen_optimizer = keras.optimizers.Adam(
LEARNING_RATE_GEN, beta_1=0.5, beta_2=0.9, clipnorm=1
)
disc_optimizer = keras.optimizers.Adam(
LEARNING_RATE_DISC, beta_1=0.5, beta_2=0.9, clipnorm=1
)
# Start training
generator = create_generator((None, 1))
discriminator = create_discriminator((None, 1))
mel_gan = MelGAN(generator, discriminator)
mel_gan.compile(
gen_optimizer,
disc_optimizer,
generator_loss,
feature_matching_loss,
discriminator_loss,
)
mel_gan.fit(
train_dataset.shuffle(200).batch(BATCH_SIZE).prefetch(tf.data.AUTOTUNE), epochs=1
)
"""
## Testing the model
The trained model can now be used for real time text-to-speech translation tasks.
To test how fast the MelGAN inference can be, let us take a sample audio mel-spectrogram
and convert it. Note that the actual model pipeline will not include the `MelSpec` layer
and hence this layer will be disabled during inference. The inference input will be a
mel-spectrogram processed similar to the `MelSpec` layer configuration.
For testing this, we will create a randomly uniformly distributed tensor to simulate the
behavior of the inference pipeline.
"""
# Sampling a random tensor to mimic a batch of 128 spectrograms of shape [50, 80]
audio_sample = tf.random.uniform([128, 50, 80])
"""
Timing the inference speed of a single sample. Running this, you can see that the average
inference time per spectrogram ranges from 8 milliseconds to 10 milliseconds on a K80 GPU which is
pretty fast.
"""
pred = generator.predict(audio_sample, batch_size=32, verbose=1)
"""
## Conclusion
The MelGAN is a highly effective architecture for spectral inversion that has a Mean
Opinion Score (MOS) of 3.61 that considerably outperforms the Griffin
Lim algorithm having a MOS of just 1.57. In contrast with this, the MelGAN compares with
the state-of-the-art WaveGlow and WaveNet architectures on text-to-speech and speech
enhancement tasks on
the LJSpeech and VCTK datasets <sup>[1]</sup>.
This tutorial highlights:
1. The advantages of using dilated convolutions that grow with the filter size
2. Implementation of a custom layer for on-the-fly conversion of audio waves to
mel-spectrograms
3. Effectiveness of using the feature matching loss function for training GAN generators.
Further reading
1. [MelGAN paper](https://arxiv.org/abs/1910.06711) (Kundan Kumar et al.) to
understand the reasoning behind the architecture and training process
2. For in-depth understanding of the feature matching loss, you can refer to [Improved
Techniques for Training GANs](https://arxiv.org/abs/1606.03498) (Tim Salimans et
al.).
"""
| keras-io/examples/audio/melgan_spectrogram_inversion.py/0 | {
"file_path": "keras-io/examples/audio/melgan_spectrogram_inversion.py",
"repo_id": "keras-io",
"token_count": 7617
} | 98 |
"""
Title: GauGAN for conditional image generation
Author: [Soumik Rakshit](https://github.com/soumik12345), [Sayak Paul](https://twitter.com/RisingSayak)
Date created: 2021/12/26
Last modified: 2022/01/03
Description: Implementing a GauGAN for conditional image generation.
Accelerator: GPU
"""
"""
## Introduction
In this example, we present an implementation of the GauGAN architecture proposed in
[Semantic Image Synthesis with Spatially-Adaptive Normalization](https://arxiv.org/abs/1903.07291).
Briefly, GauGAN uses a Generative Adversarial Network (GAN) to generate realistic images
that are conditioned on cue images and segmentation maps, as shown below
([image source](https://nvlabs.github.io/SPADE/)):
![](https://i.ibb.co/p305dzv/image.png)
The main components of a GauGAN are:
- **SPADE (aka spatially-adaptive normalization)** : The authors of GauGAN argue that the
more conventional normalization layers (such as
[Batch Normalization](https://arxiv.org/abs/1502.03167))
destroy the semantic information obtained from segmentation maps that
are provided as inputs. To address this problem, the authors introduce SPADE, a
normalization layer particularly suitable for learning affine parameters (scale and bias)
that are spatially adaptive. This is done by learning different sets of scaling and
bias parameters for each semantic label.
- **Variational encoder**: Inspired by
[Variational Autoencoders](https://arxiv.org/abs/1312.6114), GauGAN uses a
variational formulation wherein an encoder learns the mean and variance of a
normal (Gaussian) distribution from the cue images. This is where GauGAN gets its name
from. The generator of GauGAN takes as inputs the latents sampled from the Gaussian
distribution as well as the one-hot encoded semantic segmentation label maps. The cue
images act as style images that guide the generator to stylistic generation. This
variational formulation helps GauGAN achieve image diversity as well as fidelity.
- **Multi-scale patch discriminator** : Inspired by the
[PatchGAN](https://paperswithcode.com/method/patchgan) model,
GauGAN uses a discriminator that assesses a given image on a patch basis
and produces an averaged score.
As we proceed with the example, we will discuss each of the different
components in further detail.
For a thorough review of GauGAN, please refer to
[this article](https://blog.paperspace.com/nvidia-gaugan-introduction/).
We also encourage you to check out
[the official GauGAN website](https://nvlabs.github.io/SPADE/), which
has many creative applications of GauGAN. This example assumes that the reader is already
familiar with the fundamental concepts of GANs. If you need a refresher, the following
resources might be useful:
* [Chapter on GANs](https://livebook.manning.com/book/deep-learning-with-python/chapter-8)
from the Deep Learning with Python book by François Chollet.
* GAN implementations on keras.io:
* [Data efficient GANs](https://keras.io/examples/generative/gan_ada)
* [CycleGAN](https://keras.io/examples/generative/cyclegan)
* [Conditional GAN](https://keras.io/examples/generative/conditional_gan)
"""
"""
## Data collection
We will be using the
[Facades dataset](https://cmp.felk.cvut.cz/~tylecr1/facade/)
for training our GauGAN model. Let's first download it.
"""
"""shell
wget https://drive.google.com/uc?id=1q4FEjQg1YSb4mPx2VdxL7LXKYu3voTMj -O facades_data.zip
unzip -q facades_data.zip
"""
"""
## Imports
"""
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import keras
from keras import ops
from keras import layers
from glob import glob
"""
## Data splitting
"""
PATH = "./facades_data/"
SPLIT = 0.2
files = glob(PATH + "*.jpg")
np.random.shuffle(files)
split_index = int(len(files) * (1 - SPLIT))
train_files = files[:split_index]
val_files = files[split_index:]
print(f"Total samples: {len(files)}.")
print(f"Total training samples: {len(train_files)}.")
print(f"Total validation samples: {len(val_files)}.")
"""
## Data loader
"""
BATCH_SIZE = 4
IMG_HEIGHT = IMG_WIDTH = 256
NUM_CLASSES = 12
AUTOTUNE = tf.data.AUTOTUNE
def load(image_files, batch_size, is_train=True):
def _random_crop(
segmentation_map,
image,
labels,
crop_size=(IMG_HEIGHT, IMG_WIDTH),
):
crop_size = tf.convert_to_tensor(crop_size)
image_shape = tf.shape(image)[:2]
margins = image_shape - crop_size
y1 = tf.random.uniform(shape=(), maxval=margins[0], dtype=tf.int32)
x1 = tf.random.uniform(shape=(), maxval=margins[1], dtype=tf.int32)
y2 = y1 + crop_size[0]
x2 = x1 + crop_size[1]
cropped_images = []
images = [segmentation_map, image, labels]
for img in images:
cropped_images.append(img[y1:y2, x1:x2])
return cropped_images
def _load_data_tf(image_file, segmentation_map_file, label_file):
image = tf.image.decode_png(tf.io.read_file(image_file), channels=3)
segmentation_map = tf.image.decode_png(
tf.io.read_file(segmentation_map_file), channels=3
)
labels = tf.image.decode_bmp(tf.io.read_file(label_file), channels=0)
labels = tf.squeeze(labels)
image = tf.cast(image, tf.float32) / 127.5 - 1
segmentation_map = tf.cast(segmentation_map, tf.float32) / 127.5 - 1
return segmentation_map, image, labels
def _one_hot(segmentation_maps, real_images, labels):
labels = tf.one_hot(labels, NUM_CLASSES)
labels.set_shape((None, None, NUM_CLASSES))
return segmentation_maps, real_images, labels
segmentation_map_files = [
image_file.replace("images", "segmentation_map").replace("jpg", "png")
for image_file in image_files
]
label_files = [
image_file.replace("images", "segmentation_labels").replace("jpg", "bmp")
for image_file in image_files
]
dataset = tf.data.Dataset.from_tensor_slices(
(image_files, segmentation_map_files, label_files)
)
dataset = dataset.shuffle(batch_size * 10) if is_train else dataset
dataset = dataset.map(_load_data_tf, num_parallel_calls=AUTOTUNE)
dataset = dataset.map(_random_crop, num_parallel_calls=AUTOTUNE)
dataset = dataset.map(_one_hot, num_parallel_calls=AUTOTUNE)
dataset = dataset.batch(batch_size, drop_remainder=True)
return dataset
train_dataset = load(train_files, batch_size=BATCH_SIZE, is_train=True)
val_dataset = load(val_files, batch_size=BATCH_SIZE, is_train=False)
"""
Now, let's visualize a few samples from the training set.
"""
sample_train_batch = next(iter(train_dataset))
print(f"Segmentation map batch shape: {sample_train_batch[0].shape}.")
print(f"Image batch shape: {sample_train_batch[1].shape}.")
print(f"One-hot encoded label map shape: {sample_train_batch[2].shape}.")
# Plot a view samples from the training set.
for segmentation_map, real_image in zip(sample_train_batch[0], sample_train_batch[1]):
fig = plt.figure(figsize=(10, 10))
fig.add_subplot(1, 2, 1).set_title("Segmentation Map")
plt.imshow((segmentation_map + 1) / 2)
fig.add_subplot(1, 2, 2).set_title("Real Image")
plt.imshow((real_image + 1) / 2)
plt.show()
"""
Note that in the rest of this example, we use a couple of figures from the
[original GauGAN paper](https://arxiv.org/abs/1903.07291) for convenience.
"""
"""
## Custom layers
In the following section, we implement the following layers:
* SPADE
* Residual block including SPADE
* Gaussian sampler
"""
"""
### Some more notes on SPADE
![](https://i.imgur.com/DgMWrrs.png)
**SPatially-Adaptive (DE) normalization** or **SPADE** is a simple but effective layer
for synthesizing photorealistic images given an input semantic layout. Previous methods
for conditional image generation from semantic input such as
Pix2Pix ([Isola et al.](https://arxiv.org/abs/1611.07004))
or Pix2PixHD ([Wang et al.](https://arxiv.org/abs/1711.11585))
directly feed the semantic layout as input to the deep network, which is then processed
through stacks of convolution, normalization, and nonlinearity layers. This is often
suboptimal as the normalization layers have a tendency to wash away semantic information.
In SPADE, the segmentation mask is first projected onto an embedding space, and then
convolved to produce the modulation parameters `γ` and `β`. Unlike prior conditional
normalization methods, `γ` and `β` are not vectors, but tensors with spatial dimensions.
The produced `γ` and `β` are multiplied and added to the normalized activation
element-wise. As the modulation parameters are adaptive to the input segmentation mask,
SPADE is better suited for semantic image synthesis.
"""
class SPADE(layers.Layer):
def __init__(self, filters, epsilon=1e-5, **kwargs):
super().__init__(**kwargs)
self.epsilon = epsilon
self.conv = layers.Conv2D(128, 3, padding="same", activation="relu")
self.conv_gamma = layers.Conv2D(filters, 3, padding="same")
self.conv_beta = layers.Conv2D(filters, 3, padding="same")
def build(self, input_shape):
self.resize_shape = input_shape[1:3]
def call(self, input_tensor, raw_mask):
mask = ops.image.resize(raw_mask, self.resize_shape, interpolation="nearest")
x = self.conv(mask)
gamma = self.conv_gamma(x)
beta = self.conv_beta(x)
mean, var = ops.moments(input_tensor, axes=(0, 1, 2), keepdims=True)
std = ops.sqrt(var + self.epsilon)
normalized = (input_tensor - mean) / std
output = gamma * normalized + beta
return output
class ResBlock(layers.Layer):
def __init__(self, filters, **kwargs):
super().__init__(**kwargs)
self.filters = filters
def build(self, input_shape):
input_filter = input_shape[-1]
self.spade_1 = SPADE(input_filter)
self.spade_2 = SPADE(self.filters)
self.conv_1 = layers.Conv2D(self.filters, 3, padding="same")
self.conv_2 = layers.Conv2D(self.filters, 3, padding="same")
self.learned_skip = False
if self.filters != input_filter:
self.learned_skip = True
self.spade_3 = SPADE(input_filter)
self.conv_3 = layers.Conv2D(self.filters, 3, padding="same")
def call(self, input_tensor, mask):
x = self.spade_1(input_tensor, mask)
x = self.conv_1(keras.activations.leaky_relu(x, 0.2))
x = self.spade_2(x, mask)
x = self.conv_2(keras.activations.leaky_relu(x, 0.2))
skip = (
self.conv_3(
keras.activations.leaky_relu(self.spade_3(input_tensor, mask), 0.2)
)
if self.learned_skip
else input_tensor
)
output = skip + x
return output
class GaussianSampler(layers.Layer):
def __init__(self, batch_size, latent_dim, **kwargs):
super().__init__(**kwargs)
self.batch_size = batch_size
self.latent_dim = latent_dim
self.seed_generator = keras.random.SeedGenerator(1337)
def call(self, inputs):
means, variance = inputs
epsilon = keras.random.normal(
shape=(self.batch_size, self.latent_dim),
mean=0.0,
stddev=1.0,
seed=self.seed_generator,
)
samples = means + ops.exp(0.5 * variance) * epsilon
return samples
"""
Next, we implement the downsampling block for the encoder.
"""
def downsample(
channels,
kernels,
strides=2,
apply_norm=True,
apply_activation=True,
apply_dropout=False,
):
block = keras.Sequential()
block.add(
layers.Conv2D(
channels,
kernels,
strides=strides,
padding="same",
use_bias=False,
kernel_initializer=keras.initializers.GlorotNormal(),
)
)
if apply_norm:
block.add(layers.GroupNormalization(groups=-1))
if apply_activation:
block.add(layers.LeakyReLU(0.2))
if apply_dropout:
block.add(layers.Dropout(0.5))
return block
"""
The GauGAN encoder consists of a few downsampling blocks. It outputs the mean and
variance of a distribution.
![](https://i.imgur.com/JgAv1EW.png)
"""
def build_encoder(image_shape, encoder_downsample_factor=64, latent_dim=256):
input_image = keras.Input(shape=image_shape)
x = downsample(encoder_downsample_factor, 3, apply_norm=False)(input_image)
x = downsample(2 * encoder_downsample_factor, 3)(x)
x = downsample(4 * encoder_downsample_factor, 3)(x)
x = downsample(8 * encoder_downsample_factor, 3)(x)
x = downsample(8 * encoder_downsample_factor, 3)(x)
x = layers.Flatten()(x)
mean = layers.Dense(latent_dim, name="mean")(x)
variance = layers.Dense(latent_dim, name="variance")(x)
return keras.Model(input_image, [mean, variance], name="encoder")
"""
Next, we implement the generator, which consists of the modified residual blocks and
upsampling blocks. It takes latent vectors and one-hot encoded segmentation labels, and
produces new images.
![](https://i.imgur.com/9iP1TsB.png)
With SPADE, there is no need to feed the segmentation map to the first layer of the
generator, since the latent inputs have enough structural information about the style we
want the generator to emulate. We also discard the encoder part of the generator, which is
commonly used in prior architectures. This results in a more lightweight
generator network, which can also take a random vector as input, enabling a simple and
natural path to multi-modal synthesis.
"""
def build_generator(mask_shape, latent_dim=256):
latent = keras.Input(shape=(latent_dim,))
mask = keras.Input(shape=mask_shape)
x = layers.Dense(16384)(latent)
x = layers.Reshape((4, 4, 1024))(x)
x = ResBlock(filters=1024)(x, mask)
x = layers.UpSampling2D((2, 2))(x)
x = ResBlock(filters=1024)(x, mask)
x = layers.UpSampling2D((2, 2))(x)
x = ResBlock(filters=1024)(x, mask)
x = layers.UpSampling2D((2, 2))(x)
x = ResBlock(filters=512)(x, mask)
x = layers.UpSampling2D((2, 2))(x)
x = ResBlock(filters=256)(x, mask)
x = layers.UpSampling2D((2, 2))(x)
x = ResBlock(filters=128)(x, mask)
x = layers.UpSampling2D((2, 2))(x)
x = keras.activations.leaky_relu(x, 0.2)
output_image = keras.activations.tanh(layers.Conv2D(3, 4, padding="same")(x))
return keras.Model([latent, mask], output_image, name="generator")
"""
The discriminator takes a segmentation map and an image and concatenates them. It
then predicts if patches of the concatenated image are real or fake.
![](https://i.imgur.com/rn71PlM.png)
"""
def build_discriminator(image_shape, downsample_factor=64):
input_image_A = keras.Input(shape=image_shape, name="discriminator_image_A")
input_image_B = keras.Input(shape=image_shape, name="discriminator_image_B")
x = layers.Concatenate()([input_image_A, input_image_B])
x1 = downsample(downsample_factor, 4, apply_norm=False)(x)
x2 = downsample(2 * downsample_factor, 4)(x1)
x3 = downsample(4 * downsample_factor, 4)(x2)
x4 = downsample(8 * downsample_factor, 4, strides=1)(x3)
x5 = layers.Conv2D(1, 4)(x4)
outputs = [x1, x2, x3, x4, x5]
return keras.Model([input_image_A, input_image_B], outputs)
"""
## Loss functions
GauGAN uses the following loss functions:
* Generator:
* Expectation over the discriminator predictions.
* [KL divergence](https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence)
for learning the mean and variance predicted by the encoder.
* Minimization between the discriminator predictions on original and generated
images to align the feature space of the generator.
* [Perceptual loss](https://arxiv.org/abs/1603.08155) for encouraging the generated
images to have perceptual quality.
* Discriminator:
* [Hinge loss](https://en.wikipedia.org/wiki/Hinge_loss).
"""
def generator_loss(y):
return -ops.mean(y)
def kl_divergence_loss(mean, variance):
return -0.5 * ops.sum(1 + variance - ops.square(mean) - ops.exp(variance))
class FeatureMatchingLoss(keras.losses.Loss):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.mae = keras.losses.MeanAbsoluteError()
def call(self, y_true, y_pred):
loss = 0
for i in range(len(y_true) - 1):
loss += self.mae(y_true[i], y_pred[i])
return loss
class VGGFeatureMatchingLoss(keras.losses.Loss):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.encoder_layers = [
"block1_conv1",
"block2_conv1",
"block3_conv1",
"block4_conv1",
"block5_conv1",
]
self.weights = [1.0 / 32, 1.0 / 16, 1.0 / 8, 1.0 / 4, 1.0]
vgg = keras.applications.VGG19(include_top=False, weights="imagenet")
layer_outputs = [vgg.get_layer(x).output for x in self.encoder_layers]
self.vgg_model = keras.Model(vgg.input, layer_outputs, name="VGG")
self.mae = keras.losses.MeanAbsoluteError()
def call(self, y_true, y_pred):
y_true = keras.applications.vgg19.preprocess_input(127.5 * (y_true + 1))
y_pred = keras.applications.vgg19.preprocess_input(127.5 * (y_pred + 1))
real_features = self.vgg_model(y_true)
fake_features = self.vgg_model(y_pred)
loss = 0
for i in range(len(real_features)):
loss += self.weights[i] * self.mae(real_features[i], fake_features[i])
return loss
class DiscriminatorLoss(keras.losses.Loss):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.hinge_loss = keras.losses.Hinge()
def call(self, y, is_real):
return self.hinge_loss(is_real, y)
"""
## GAN monitor callback
Next, we implement a callback to monitor the GauGAN results while it is training.
"""
class GanMonitor(keras.callbacks.Callback):
def __init__(self, val_dataset, n_samples, epoch_interval=5):
self.val_images = next(iter(val_dataset))
self.n_samples = n_samples
self.epoch_interval = epoch_interval
self.seed_generator = keras.random.SeedGenerator(42)
def infer(self):
latent_vector = keras.random.normal(
shape=(self.model.batch_size, self.model.latent_dim),
mean=0.0,
stddev=2.0,
seed=self.seed_generator,
)
return self.model.predict([latent_vector, self.val_images[2]])
def on_epoch_end(self, epoch, logs=None):
if epoch % self.epoch_interval == 0:
generated_images = self.infer()
for _ in range(self.n_samples):
grid_row = min(generated_images.shape[0], 3)
f, axarr = plt.subplots(grid_row, 3, figsize=(18, grid_row * 6))
for row in range(grid_row):
ax = axarr if grid_row == 1 else axarr[row]
ax[0].imshow((self.val_images[0][row] + 1) / 2)
ax[0].axis("off")
ax[0].set_title("Mask", fontsize=20)
ax[1].imshow((self.val_images[1][row] + 1) / 2)
ax[1].axis("off")
ax[1].set_title("Ground Truth", fontsize=20)
ax[2].imshow((generated_images[row] + 1) / 2)
ax[2].axis("off")
ax[2].set_title("Generated", fontsize=20)
plt.show()
"""
## Subclassed GauGAN model
Finally, we put everything together inside a subclassed model (from `tf.keras.Model`)
overriding its `train_step()` method.
"""
class GauGAN(keras.Model):
def __init__(
self,
image_size,
num_classes,
batch_size,
latent_dim,
feature_loss_coeff=10,
vgg_feature_loss_coeff=0.1,
kl_divergence_loss_coeff=0.1,
**kwargs,
):
super().__init__(**kwargs)
self.image_size = image_size
self.latent_dim = latent_dim
self.batch_size = batch_size
self.num_classes = num_classes
self.image_shape = (image_size, image_size, 3)
self.mask_shape = (image_size, image_size, num_classes)
self.feature_loss_coeff = feature_loss_coeff
self.vgg_feature_loss_coeff = vgg_feature_loss_coeff
self.kl_divergence_loss_coeff = kl_divergence_loss_coeff
self.discriminator = build_discriminator(self.image_shape)
self.generator = build_generator(self.mask_shape)
self.encoder = build_encoder(self.image_shape)
self.sampler = GaussianSampler(batch_size, latent_dim)
self.patch_size, self.combined_model = self.build_combined_generator()
self.disc_loss_tracker = keras.metrics.Mean(name="disc_loss")
self.gen_loss_tracker = keras.metrics.Mean(name="gen_loss")
self.feat_loss_tracker = keras.metrics.Mean(name="feat_loss")
self.vgg_loss_tracker = keras.metrics.Mean(name="vgg_loss")
self.kl_loss_tracker = keras.metrics.Mean(name="kl_loss")
@property
def metrics(self):
return [
self.disc_loss_tracker,
self.gen_loss_tracker,
self.feat_loss_tracker,
self.vgg_loss_tracker,
self.kl_loss_tracker,
]
def build_combined_generator(self):
# This method builds a model that takes as inputs the following:
# latent vector, one-hot encoded segmentation label map, and
# a segmentation map. It then (i) generates an image with the generator,
# (ii) passes the generated images and segmentation map to the discriminator.
# Finally, the model produces the following outputs: (a) discriminator outputs,
# (b) generated image.
# We will be using this model to simplify the implementation.
self.discriminator.trainable = False
mask_input = keras.Input(shape=self.mask_shape, name="mask")
image_input = keras.Input(shape=self.image_shape, name="image")
latent_input = keras.Input(shape=(self.latent_dim,), name="latent")
generated_image = self.generator([latent_input, mask_input])
discriminator_output = self.discriminator([image_input, generated_image])
combined_outputs = discriminator_output + [generated_image]
patch_size = discriminator_output[-1].shape[1]
combined_model = keras.Model(
[latent_input, mask_input, image_input], combined_outputs
)
return patch_size, combined_model
def compile(self, gen_lr=1e-4, disc_lr=4e-4, **kwargs):
super().compile(**kwargs)
self.generator_optimizer = keras.optimizers.Adam(
gen_lr, beta_1=0.0, beta_2=0.999
)
self.discriminator_optimizer = keras.optimizers.Adam(
disc_lr, beta_1=0.0, beta_2=0.999
)
self.discriminator_loss = DiscriminatorLoss()
self.feature_matching_loss = FeatureMatchingLoss()
self.vgg_loss = VGGFeatureMatchingLoss()
def train_discriminator(self, latent_vector, segmentation_map, real_image, labels):
fake_images = self.generator([latent_vector, labels])
with tf.GradientTape() as gradient_tape:
pred_fake = self.discriminator([segmentation_map, fake_images])[-1]
pred_real = self.discriminator([segmentation_map, real_image])[-1]
loss_fake = self.discriminator_loss(pred_fake, -1.0)
loss_real = self.discriminator_loss(pred_real, 1.0)
total_loss = 0.5 * (loss_fake + loss_real)
self.discriminator.trainable = True
gradients = gradient_tape.gradient(
total_loss, self.discriminator.trainable_variables
)
self.discriminator_optimizer.apply_gradients(
zip(gradients, self.discriminator.trainable_variables)
)
return total_loss
def train_generator(
self, latent_vector, segmentation_map, labels, image, mean, variance
):
# Generator learns through the signal provided by the discriminator. During
# backpropagation, we only update the generator parameters.
self.discriminator.trainable = False
with tf.GradientTape() as tape:
real_d_output = self.discriminator([segmentation_map, image])
combined_outputs = self.combined_model(
[latent_vector, labels, segmentation_map]
)
fake_d_output, fake_image = combined_outputs[:-1], combined_outputs[-1]
pred = fake_d_output[-1]
# Compute generator losses.
g_loss = generator_loss(pred)
kl_loss = self.kl_divergence_loss_coeff * kl_divergence_loss(mean, variance)
vgg_loss = self.vgg_feature_loss_coeff * self.vgg_loss(image, fake_image)
feature_loss = self.feature_loss_coeff * self.feature_matching_loss(
real_d_output, fake_d_output
)
total_loss = g_loss + kl_loss + vgg_loss + feature_loss
all_trainable_variables = (
self.combined_model.trainable_variables + self.encoder.trainable_variables
)
gradients = tape.gradient(total_loss, all_trainable_variables)
self.generator_optimizer.apply_gradients(
zip(gradients, all_trainable_variables)
)
return total_loss, feature_loss, vgg_loss, kl_loss
def train_step(self, data):
segmentation_map, image, labels = data
mean, variance = self.encoder(image)
latent_vector = self.sampler([mean, variance])
discriminator_loss = self.train_discriminator(
latent_vector, segmentation_map, image, labels
)
(generator_loss, feature_loss, vgg_loss, kl_loss) = self.train_generator(
latent_vector, segmentation_map, labels, image, mean, variance
)
# Report progress.
self.disc_loss_tracker.update_state(discriminator_loss)
self.gen_loss_tracker.update_state(generator_loss)
self.feat_loss_tracker.update_state(feature_loss)
self.vgg_loss_tracker.update_state(vgg_loss)
self.kl_loss_tracker.update_state(kl_loss)
results = {m.name: m.result() for m in self.metrics}
return results
def test_step(self, data):
segmentation_map, image, labels = data
# Obtain the learned moments of the real image distribution.
mean, variance = self.encoder(image)
# Sample a latent from the distribution defined by the learned moments.
latent_vector = self.sampler([mean, variance])
# Generate the fake images.
fake_images = self.generator([latent_vector, labels])
# Calculate the losses.
pred_fake = self.discriminator([segmentation_map, fake_images])[-1]
pred_real = self.discriminator([segmentation_map, image])[-1]
loss_fake = self.discriminator_loss(pred_fake, -1.0)
loss_real = self.discriminator_loss(pred_real, 1.0)
total_discriminator_loss = 0.5 * (loss_fake + loss_real)
real_d_output = self.discriminator([segmentation_map, image])
combined_outputs = self.combined_model(
[latent_vector, labels, segmentation_map]
)
fake_d_output, fake_image = combined_outputs[:-1], combined_outputs[-1]
pred = fake_d_output[-1]
g_loss = generator_loss(pred)
kl_loss = self.kl_divergence_loss_coeff * kl_divergence_loss(mean, variance)
vgg_loss = self.vgg_feature_loss_coeff * self.vgg_loss(image, fake_image)
feature_loss = self.feature_loss_coeff * self.feature_matching_loss(
real_d_output, fake_d_output
)
total_generator_loss = g_loss + kl_loss + vgg_loss + feature_loss
# Report progress.
self.disc_loss_tracker.update_state(total_discriminator_loss)
self.gen_loss_tracker.update_state(total_generator_loss)
self.feat_loss_tracker.update_state(feature_loss)
self.vgg_loss_tracker.update_state(vgg_loss)
self.kl_loss_tracker.update_state(kl_loss)
results = {m.name: m.result() for m in self.metrics}
return results
def call(self, inputs):
latent_vectors, labels = inputs
return self.generator([latent_vectors, labels])
"""
## GauGAN training
"""
gaugan = GauGAN(IMG_HEIGHT, NUM_CLASSES, BATCH_SIZE, latent_dim=256)
gaugan.compile()
history = gaugan.fit(
train_dataset,
validation_data=val_dataset,
epochs=15,
callbacks=[GanMonitor(val_dataset, BATCH_SIZE)],
)
def plot_history(item):
plt.plot(history.history[item], label=item)
plt.plot(history.history["val_" + item], label="val_" + item)
plt.xlabel("Epochs")
plt.ylabel(item)
plt.title("Train and Validation {} Over Epochs".format(item), fontsize=14)
plt.legend()
plt.grid()
plt.show()
plot_history("disc_loss")
plot_history("gen_loss")
plot_history("feat_loss")
plot_history("vgg_loss")
plot_history("kl_loss")
"""
## Inference
"""
val_iterator = iter(val_dataset)
for _ in range(5):
val_images = next(val_iterator)
# Sample latent from a normal distribution.
latent_vector = keras.random.normal(
shape=(gaugan.batch_size, gaugan.latent_dim), mean=0.0, stddev=2.0
)
# Generate fake images.
fake_images = gaugan.predict([latent_vector, val_images[2]])
real_images = val_images
grid_row = min(fake_images.shape[0], 3)
grid_col = 3
f, axarr = plt.subplots(grid_row, grid_col, figsize=(grid_col * 6, grid_row * 6))
for row in range(grid_row):
ax = axarr if grid_row == 1 else axarr[row]
ax[0].imshow((real_images[0][row] + 1) / 2)
ax[0].axis("off")
ax[0].set_title("Mask", fontsize=20)
ax[1].imshow((real_images[1][row] + 1) / 2)
ax[1].axis("off")
ax[1].set_title("Ground Truth", fontsize=20)
ax[2].imshow((fake_images[row] + 1) / 2)
ax[2].axis("off")
ax[2].set_title("Generated", fontsize=20)
plt.show()
"""
## Final words
* The dataset we used in this example is a small one. For obtaining even better results
we recommend to use a bigger dataset. GauGAN results were demonstrated with the
[COCO-Stuff](https://github.com/nightrome/cocostuff) and
[CityScapes](https://www.cityscapes-dataset.com/) datasets.
* This example was inspired the Chapter 6 of
[Hands-On Image Generation with TensorFlow](https://www.packtpub.com/product/hands-on-image-generation-with-tensorflow/9781838826789)
by [Soon-Yau Cheong](https://www.linkedin.com/in/soonyau/) and
[Implementing SPADE using fastai](https://towardsdatascience.com/implementing-spade-using-fastai-6ad86b94030a) by
[Divyansh Jha](https://medium.com/@divyanshj.16).
* If you found this example interesting and exciting, you might want to check out
[our repository](https://github.com/soumik12345/tf2_gans) which we are
currently building. It will include reimplementations of popular GANs and pretrained
models. Our focus will be on readability and making the code as accessible as possible.
Our plain is to first train our implementation of GauGAN (following the code of
this example) on a bigger dataset and then make the repository public. We welcome
contributions!
* Recently GauGAN2 was also released. You can check it out
[here](https://blogs.nvidia.com/blog/2021/11/22/gaugan2-ai-art-demo/).
"""
"""
Example available on HuggingFace.
| Trained Model | Demo |
| :--: | :--: |
| [![Generic badge](https://img.shields.io/badge/%F0%9F%A4%97%20Model-GauGAN%20Image%20Generation-black.svg)](https://huggingface.co/keras-io/GauGAN-Image-generation) | [![Generic badge](https://img.shields.io/badge/%F0%9F%A4%97%20Spaces-GauGAN%20Image%20Generation-black.svg)](https://huggingface.co/spaces/keras-io/GauGAN_Conditional_Image_Generation) |
"""
| keras-io/examples/generative/gaugan.py/0 | {
"file_path": "keras-io/examples/generative/gaugan.py",
"repo_id": "keras-io",
"token_count": 13074
} | 99 |