text
stringlengths 5
261k
| id
stringlengths 16
106
| metadata
dict | __index_level_0__
int64 0
266
|
---|---|---|---|
sudo pip install --upgrade pip
sudo pip install -e ".[tests]"
echo "sh shell/lint.sh" > .git/hooks/pre-commit
chmod a+x .git/hooks/pre-commit | autokeras/.devcontainer/setup.sh/0 | {
"file_path": "autokeras/.devcontainer/setup.sh",
"repo_id": "autokeras",
"token_count": 53
} | 0 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from tensorflow import nest
from autokeras.blocks import basic
from autokeras.blocks import preprocessing
from autokeras.blocks import reduction
from autokeras.engine import block as block_module
BLOCK_TYPE = "block_type"
RESNET = "resnet"
XCEPTION = "xception"
VANILLA = "vanilla"
EFFICIENT = "efficient"
NORMALIZE = "normalize"
AUGMENT = "augment"
TRANSFORMER = "transformer"
MAX_TOKENS = "max_tokens"
NGRAM = "ngram"
BERT = "bert"
class ImageBlock(block_module.Block):
"""Block for image data.
The image blocks is a block choosing from ResNetBlock, XceptionBlock,
ConvBlock, which is controlled by a hyperparameter, 'block_type'.
# Arguments
block_type: String. 'resnet', 'xception', 'vanilla'. The type of Block
to use. If unspecified, it will be tuned automatically.
normalize: Boolean. Whether to channel-wise normalize the images.
If unspecified, it will be tuned automatically.
augment: Boolean. Whether to do image augmentation. If unspecified,
it will be tuned automatically.
"""
def __init__(
self,
block_type: Optional[str] = None,
normalize: Optional[bool] = None,
augment: Optional[bool] = None,
**kwargs
):
super().__init__(**kwargs)
self.block_type = block_type
self.normalize = normalize
self.augment = augment
def get_config(self):
config = super().get_config()
config.update(
{
BLOCK_TYPE: self.block_type,
NORMALIZE: self.normalize,
AUGMENT: self.augment,
}
)
return config
def _build_block(self, hp, output_node, block_type):
if block_type == RESNET:
return basic.ResNetBlock().build(hp, output_node)
elif block_type == XCEPTION:
return basic.XceptionBlock().build(hp, output_node)
elif block_type == VANILLA:
return basic.ConvBlock().build(hp, output_node)
elif block_type == EFFICIENT:
return basic.EfficientNetBlock().build(hp, output_node)
def build(self, hp, inputs=None):
input_node = nest.flatten(inputs)[0]
output_node = input_node
if self.normalize is None and hp.Boolean(NORMALIZE):
with hp.conditional_scope(NORMALIZE, [True]):
output_node = preprocessing.Normalization().build(
hp, output_node
)
elif self.normalize:
output_node = preprocessing.Normalization().build(hp, output_node)
if self.augment is None and hp.Boolean(AUGMENT):
with hp.conditional_scope(AUGMENT, [True]):
output_node = preprocessing.ImageAugmentation().build(
hp, output_node
)
elif self.augment:
output_node = preprocessing.ImageAugmentation().build(
hp, output_node
)
if self.block_type is None:
block_type = hp.Choice(
BLOCK_TYPE, [RESNET, XCEPTION, VANILLA, EFFICIENT]
)
with hp.conditional_scope(BLOCK_TYPE, [block_type]):
output_node = self._build_block(hp, output_node, block_type)
else:
output_node = self._build_block(hp, output_node, self.block_type)
return output_node
class TextBlock(block_module.Block):
"""Block for text data.
# Arguments
block_type: String. 'vanilla', 'transformer', and 'ngram'. The type of
Block to use. 'vanilla' and 'transformer' use a TextToIntSequence
vectorizer, whereas 'ngram' uses TextToNgramVector. If unspecified,
it will be tuned automatically.
max_tokens: Int. The maximum size of the vocabulary.
If left unspecified, it will be tuned automatically.
pretraining: String. 'random' (use random weights instead any pretrained
model), 'glove', 'fasttext' or 'word2vec'. Use pretrained word
embedding. If left unspecified, it will be tuned automatically.
"""
def __init__(
self,
block_type: Optional[str] = None,
max_tokens: Optional[int] = None,
pretraining: Optional[str] = None,
**kwargs
):
super().__init__(**kwargs)
self.block_type = block_type
self.max_tokens = max_tokens
self.pretraining = pretraining
def get_config(self):
config = super().get_config()
config.update(
{
BLOCK_TYPE: self.block_type,
MAX_TOKENS: self.max_tokens,
"pretraining": self.pretraining,
}
)
return config
def build(self, hp, inputs=None):
input_node = nest.flatten(inputs)[0]
output_node = input_node
if self.block_type is None:
block_type = hp.Choice(
BLOCK_TYPE, [VANILLA, TRANSFORMER, NGRAM, BERT]
)
with hp.conditional_scope(BLOCK_TYPE, [block_type]):
output_node = self._build_block(hp, output_node, block_type)
else:
output_node = self._build_block(hp, output_node, self.block_type)
return output_node
def _build_block(self, hp, output_node, block_type):
max_tokens = self.max_tokens or hp.Choice(
MAX_TOKENS, [500, 5000, 20000], default=5000
)
if block_type == NGRAM:
output_node = preprocessing.TextToNgramVector(
max_tokens=max_tokens
).build(hp, output_node)
return basic.DenseBlock().build(hp, output_node)
if block_type == BERT:
output_node = basic.BertBlock().build(hp, output_node)
else:
output_node = preprocessing.TextToIntSequence(
max_tokens=max_tokens
).build(hp, output_node)
if block_type == TRANSFORMER:
output_node = basic.Transformer(
max_features=max_tokens + 1,
pretraining=self.pretraining,
).build(hp, output_node)
else:
output_node = basic.Embedding(
max_features=max_tokens + 1,
pretraining=self.pretraining,
).build(hp, output_node)
output_node = basic.ConvBlock().build(hp, output_node)
output_node = reduction.SpatialReduction().build(hp, output_node)
output_node = basic.DenseBlock().build(hp, output_node)
return output_node
class StructuredDataBlock(block_module.Block):
"""Block for structured data.
# Arguments
categorical_encoding: Boolean. Whether to use the CategoricalToNumerical
to encode the categorical features to numerical features. Defaults
to True.
normalize: Boolean. Whether to normalize the features.
If unspecified, it will be tuned automatically.
seed: Int. Random seed.
"""
def __init__(
self,
categorical_encoding: bool = True,
normalize: Optional[bool] = None,
seed: Optional[int] = None,
**kwargs
):
super().__init__(**kwargs)
self.categorical_encoding = categorical_encoding
self.normalize = normalize
self.seed = seed
self.column_types = None
self.column_names = None
@classmethod
def from_config(cls, config):
column_types = config.pop("column_types")
column_names = config.pop("column_names")
instance = cls(**config)
instance.column_types = column_types
instance.column_names = column_names
return instance
def get_config(self):
config = super().get_config()
config.update(
{
"categorical_encoding": self.categorical_encoding,
"normalize": self.normalize,
"seed": self.seed,
"column_types": self.column_types,
"column_names": self.column_names,
}
)
return config
def build(self, hp, inputs=None):
input_node = nest.flatten(inputs)[0]
output_node = input_node
if self.categorical_encoding:
block = preprocessing.CategoricalToNumerical()
block.column_types = self.column_types
block.column_names = self.column_names
output_node = block.build(hp, output_node)
if self.normalize is None and hp.Boolean(NORMALIZE):
with hp.conditional_scope(NORMALIZE, [True]):
output_node = preprocessing.Normalization().build(
hp, output_node
)
elif self.normalize:
output_node = preprocessing.Normalization().build(hp, output_node)
output_node = basic.DenseBlock().build(hp, output_node)
return output_node
class TimeseriesBlock(block_module.Block):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def get_config(self):
return super().get_config()
def build(self, hp, inputs=None):
input_node = nest.flatten(inputs)[0]
output_node = input_node
output_node = basic.RNNBlock().build(hp, output_node)
return output_node
class GeneralBlock(block_module.Block):
"""A general neural network block when the input type is unknown.
When the input type is unknown. The GeneralBlock would search in a large
space for a good model.
# Arguments
name: String.
"""
def build(self, hp, inputs=None):
raise NotImplementedError
| autokeras/autokeras/blocks/wrapper.py/0 | {
"file_path": "autokeras/autokeras/blocks/wrapper.py",
"repo_id": "autokeras",
"token_count": 4560
} | 1 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from autokeras.engine import serializable
class Preprocessor(serializable.Serializable):
"""A preprocessor for tf.data.Dataset.
A preprocessor transforms the dataset using `tf.data` operations.
"""
def fit(self, dataset):
"""Fit the preprocessor with the dataset.
# Arguments
dataset: an instance of `tf.data.Dataset`.
"""
# TODO: may need to change to a streaming way of fit to reduce the
# number of iterations through the dataset for speed. Need to be
# decided when we have more use cases for this fit.
pass
def transform(self, dataset):
"""Transform the dataset wth the preprocessor.
# Arguments
dataset: an instance of `tf.data.Dataset`.
# Returns
The transformed dataset.
"""
raise NotImplementedError
def get_config(self):
return {}
class TargetPreprocessor(Preprocessor):
"""Preprocessor for target data."""
def postprocess(self, dataset):
"""Postprocess the output of the Keras model.
# Arguments
dataset: numpy.ndarray. The corresponding output of the model.
# Returns
numpy.ndarray. The postprocessed data.
"""
raise NotImplementedError
| autokeras/autokeras/engine/preprocessor.py/0 | {
"file_path": "autokeras/autokeras/engine/preprocessor.py",
"repo_id": "autokeras",
"token_count": 649
} | 2 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow import keras
from tensorflow import nest
from autokeras import preprocessors as preprocessors_module
from autokeras.engine import hyper_preprocessor as hpps_module
from autokeras.engine import preprocessor as pps_module
from autokeras.utils import data_utils
from autokeras.utils import io_utils
class HyperPipeline(hpps_module.HyperPreprocessor):
"""A search space consists of HyperPreprocessors.
# Arguments
inputs: a list of lists of HyperPreprocessors.
outputs: a list of lists of HyperPreprocessors.
"""
def __init__(self, inputs, outputs, **kwargs):
super().__init__(**kwargs)
self.inputs = inputs
self.outputs = outputs
@staticmethod
def _build_preprocessors(hp, hpps_lists, dataset):
sources = data_utils.unzip_dataset(dataset)
preprocessors_list = []
for source, hpps_list in zip(sources, hpps_lists):
data = source
preprocessors = []
for hyper_preprocessor in hpps_list:
preprocessor = hyper_preprocessor.build(hp, data)
preprocessor.fit(data)
data = preprocessor.transform(data)
preprocessors.append(preprocessor)
preprocessors_list.append(preprocessors)
return preprocessors_list
def build(self, hp, dataset):
"""Build a Pipeline by Hyperparameters.
# Arguments
hp: Hyperparameters.
dataset: tf.data.Dataset.
# Returns
An instance of Pipeline.
"""
x = dataset.map(lambda x, y: x)
y = dataset.map(lambda x, y: y)
return Pipeline(
inputs=self._build_preprocessors(hp, self.inputs, x),
outputs=self._build_preprocessors(hp, self.outputs, y),
)
def load_pipeline(filepath, custom_objects=None):
"""Load a Pipeline instance from disk."""
if custom_objects is None:
custom_objects = {}
with keras.utils.custom_object_scope(custom_objects):
return Pipeline.from_config(io_utils.load_json(filepath))
class Pipeline(pps_module.Preprocessor):
"""A data pipeline for transform the entire dataset.
# Arguments
inputs: A list of lists of Preprocessors. For the input datasets for
the model.
outputs: A list of lists of Preprocessors. For the target datasets for
the model.
"""
def __init__(self, inputs, outputs, **kwargs):
super().__init__(**kwargs)
self.inputs = inputs
self.outputs = outputs
def fit(self, dataset):
"""Fit the Preprocessors."""
x = dataset.map(lambda x, y: x)
sources_x = data_utils.unzip_dataset(x)
for pps_list, data in zip(self.inputs, sources_x):
for preprocessor in pps_list:
preprocessor.fit(data)
data = preprocessor.transform(data)
y = dataset.map(lambda x, y: y)
sources_y = data_utils.unzip_dataset(y)
for pps_list, data in zip(self.outputs, sources_y):
for preprocessor in pps_list:
preprocessor.fit(data)
data = preprocessor.transform(data)
return
def transform(self, dataset):
"""Transform the dataset to be ready for the model.
# Arguments
dataset: tf.data.Dataset.
# Returns
An instance of tf.data.Dataset. The transformed dataset.
"""
x = dataset.map(lambda x, y: x)
y = dataset.map(lambda x, y: y)
x = self.transform_x(x)
y = self.transform_y(y)
return tf.data.Dataset.zip((x, y))
def transform_x(self, dataset):
"""Transform the input dataset for the model.
# Arguments
dataset: tf.data.Dataset. The input dataset for the model.
# Returns
An instance of tf.data.Dataset. The transformed dataset.
"""
return self._transform_data(dataset, self.inputs)
def transform_y(self, dataset):
"""Transform the target dataset for the model.
# Arguments
dataset: tf.data.Dataset. The target dataset for the model.
# Returns
An instance of tf.data.Dataset. The transformed dataset.
"""
return self._transform_data(dataset, self.outputs)
def _transform_data(self, dataset, pps_lists):
sources = data_utils.unzip_dataset(dataset)
transformed = []
for pps_list, data in zip(pps_lists, sources):
for preprocessor in pps_list:
data = preprocessor.transform(data)
transformed.append(data)
if len(transformed) == 1:
return transformed[0]
return tf.data.Dataset.zip(tuple(transformed))
def save(self, filepath):
io_utils.save_json(filepath, self.get_config())
def get_config(self):
return {
"inputs": [
[
preprocessors_module.serialize(preprocessor)
for preprocessor in preprocessors
]
for preprocessors in self.inputs
],
"outputs": [
[
preprocessors_module.serialize(preprocessor)
for preprocessor in preprocessors
]
for preprocessors in self.outputs
],
}
@classmethod
def from_config(cls, config):
return cls(
inputs=[
[
preprocessors_module.deserialize(preprocessor)
for preprocessor in preprocessors
]
for preprocessors in config["inputs"]
],
outputs=[
[
preprocessors_module.deserialize(preprocessor)
for preprocessor in preprocessors
]
for preprocessors in config["outputs"]
],
)
def postprocess(self, y):
"""Postprocess the outputs of the model.
# Arguments
y: numpy.ndarray or a list of numpy.ndarrays. The output of the
Keras model.
# Returns
A list or an instance of numpy.ndarray. The postprocessed data for
the heads.
"""
outputs = []
for data, preprocessors in zip(nest.flatten(y), self.outputs):
for preprocessor in preprocessors[::-1]:
if isinstance(preprocessor, pps_module.TargetPreprocessor):
data = preprocessor.postprocess(data)
outputs.append(data)
if len(outputs) == 1:
return outputs[0]
return outputs
| autokeras/autokeras/pipeline.py/0 | {
"file_path": "autokeras/autokeras/pipeline.py",
"repo_id": "autokeras",
"token_count": 3266
} | 3 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
import keras_tuner
import numpy as np
from autokeras.engine import tuner as tuner_module
class TrieNode(object):
def __init__(self):
super().__init__()
self.num_leaves = 0
self.children = {}
self.hp_name = None
def is_leaf(self):
return len(self.children) == 0
class Trie(object):
def __init__(self):
super().__init__()
self.root = TrieNode()
def insert(self, hp_name):
names = hp_name.split("/")
new_word = False
current_node = self.root
nodes_on_path = [current_node]
for name in names:
if name not in current_node.children:
current_node.children[name] = TrieNode()
new_word = True
current_node = current_node.children[name]
nodes_on_path.append(current_node)
current_node.hp_name = hp_name
if new_word:
for node in nodes_on_path:
node.num_leaves += 1
@property
def nodes(self):
return self._get_all_nodes(self.root)
def _get_all_nodes(self, node):
ret = [node]
for key, value in node.children.items():
ret += self._get_all_nodes(value)
return ret
def get_hp_names(self, node):
if node.is_leaf():
return [node.hp_name]
ret = []
for key, value in node.children.items():
ret += self.get_hp_names(value)
return ret
class GreedyOracle(keras_tuner.Oracle):
"""An oracle combining random search and greedy algorithm.
It groups the HyperParameters into several categories, namely, HyperGraph,
Preprocessor, Architecture, and Optimization. The oracle tunes each group
separately using random search. In each trial, it use a greedy strategy to
generate new values for one of the categories of HyperParameters and use the
best trial so far for the rest of the HyperParameters values.
# Arguments
initial_hps: A list of dictionaries in the form of
{HyperParameter name (String): HyperParameter value}.
Each dictionary is one set of HyperParameters, which are used as the
initial trials for the search. Defaults to None.
seed: Int. Random seed.
"""
def __init__(self, initial_hps=None, seed=None, **kwargs):
super().__init__(seed=seed, **kwargs)
self.initial_hps = copy.deepcopy(initial_hps) or []
self._tried_initial_hps = [False] * len(self.initial_hps)
def get_state(self):
state = super().get_state()
state.update(
{
"initial_hps": self.initial_hps,
"tried_initial_hps": self._tried_initial_hps,
}
)
return state
def set_state(self, state):
super().set_state(state)
self.initial_hps = state["initial_hps"]
self._tried_initial_hps = state["tried_initial_hps"]
def _select_hps(self):
trie = Trie()
best_hps = self._get_best_hps()
for hp in best_hps.space:
# Not picking the fixed hps for generating new values.
if best_hps.is_active(hp) and not isinstance(
hp, keras_tuner.engine.hyperparameters.Fixed
):
trie.insert(hp.name)
all_nodes = trie.nodes
if len(all_nodes) <= 1:
return []
probabilities = np.array([1 / node.num_leaves for node in all_nodes])
sum_p = np.sum(probabilities)
probabilities = probabilities / sum_p
node = np.random.choice(all_nodes, p=probabilities)
return trie.get_hp_names(node)
def _next_initial_hps(self):
for index, hps in enumerate(self.initial_hps):
if not self._tried_initial_hps[index]:
self._tried_initial_hps[index] = True
return hps
def populate_space(self, trial_id):
if not all(self._tried_initial_hps):
values = self._next_initial_hps()
return {
"status": keras_tuner.engine.trial.TrialStatus.RUNNING,
"values": values,
}
for _ in range(self._max_collisions):
hp_names = self._select_hps()
values = self._generate_hp_values(hp_names)
# Reached max collisions.
if values is None:
continue
# Values found.
return {
"status": keras_tuner.engine.trial.TrialStatus.RUNNING,
"values": values,
}
# All stages reached max collisions.
return {
"status": keras_tuner.engine.trial.TrialStatus.STOPPED,
"values": None,
}
def _get_best_hps(self):
best_trials = self.get_best_trials()
if best_trials:
return best_trials[0].hyperparameters.copy()
else:
return self.hyperparameters.copy()
def _generate_hp_values(self, hp_names):
best_hps = self._get_best_hps()
collisions = 0
while True:
hps = keras_tuner.HyperParameters()
# Generate a set of random values.
for hp in self.hyperparameters.space:
hps.merge([hp])
# if not active, do nothing.
# if active, check if selected to be changed.
if hps.is_active(hp):
# if was active and not selected, do nothing.
if best_hps.is_active(hp.name) and hp.name not in hp_names:
hps.values[hp.name] = best_hps.values[hp.name]
continue
# if was not active or selected, sample.
hps.values[hp.name] = hp.random_sample(self._seed_state)
self._seed_state += 1
values = hps.values
# Keep trying until the set of values is unique,
# or until we exit due to too many collisions.
values_hash = self._compute_values_hash(values)
if values_hash in self._tried_so_far:
collisions += 1
if collisions <= self._max_collisions:
continue
return None
self._tried_so_far.add(values_hash)
break
return values
class Greedy(tuner_module.AutoTuner):
def __init__(
self,
hypermodel: keras_tuner.HyperModel,
objective: str = "val_loss",
max_trials: int = 10,
initial_hps: Optional[List[Dict[str, Any]]] = None,
seed: Optional[int] = None,
hyperparameters: Optional[keras_tuner.HyperParameters] = None,
tune_new_entries: bool = True,
allow_new_entries: bool = True,
**kwargs
):
self.seed = seed
oracle = GreedyOracle(
objective=objective,
max_trials=max_trials,
initial_hps=initial_hps,
seed=seed,
hyperparameters=hyperparameters,
tune_new_entries=tune_new_entries,
allow_new_entries=allow_new_entries,
)
super().__init__(oracle=oracle, hypermodel=hypermodel, **kwargs)
| autokeras/autokeras/tuners/greedy.py/0 | {
"file_path": "autokeras/autokeras/tuners/greedy.py",
"repo_id": "autokeras",
"token_count": 3643
} | 4 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import warnings
import keras_tuner
import tensorflow as tf
from packaging.version import parse
from tensorflow import nest
def validate_num_inputs(inputs, num):
inputs = nest.flatten(inputs)
if not len(inputs) == num:
raise ValueError(
"Expected {num} elements in the inputs list "
"but received {len} inputs.".format(num=num, len=len(inputs))
)
def to_snake_case(name):
intermediate = re.sub("(.)([A-Z][a-z0-9]+)", r"\1_\2", name)
insecure = re.sub("([a-z])([A-Z])", r"\1_\2", intermediate).lower()
return insecure
def check_tf_version() -> None:
if parse(tf.__version__) < parse("2.7.0"):
warnings.warn(
"The Tensorflow package version needs to be at least 2.7.0 \n"
"for AutoKeras to run. Currently, your TensorFlow version is \n"
f"{tf.__version__}. Please upgrade with \n"
"`$ pip install --upgrade tensorflow`. \n"
"You can use `pip freeze` to check afterwards "
"that everything is ok.",
ImportWarning,
)
def check_kt_version() -> None:
if parse(keras_tuner.__version__) < parse("1.1.0"):
warnings.warn(
"The Keras Tuner package version needs to be at least 1.1.0 \n"
"for AutoKeras to run. Currently, your Keras Tuner version is \n"
f"{keras_tuner.__version__}. Please upgrade with \n"
"`$ pip install --upgrade keras-tuner`. \n"
"You can use `pip freeze` to check afterwards "
"that everything is ok.",
ImportWarning,
)
def contain_instance(instance_list, instance_type):
return any(
[isinstance(instance, instance_type) for instance in instance_list]
)
def evaluate_with_adaptive_batch_size(
model, batch_size, verbose=1, **fit_kwargs
):
return run_with_adaptive_batch_size(
batch_size,
lambda x, validation_data, **kwargs: model.evaluate(
x, verbose=verbose, **kwargs
),
**fit_kwargs,
)
def predict_with_adaptive_batch_size(
model, batch_size, verbose=1, **fit_kwargs
):
return run_with_adaptive_batch_size(
batch_size,
lambda x, validation_data, **kwargs: model.predict(
x, verbose=verbose, **kwargs
),
**fit_kwargs,
)
def fit_with_adaptive_batch_size(model, batch_size, **fit_kwargs):
history = run_with_adaptive_batch_size(
batch_size, lambda **kwargs: model.fit(**kwargs), **fit_kwargs
)
return model, history
def run_with_adaptive_batch_size(batch_size, func, **fit_kwargs):
x = fit_kwargs.pop("x")
validation_data = None
if "validation_data" in fit_kwargs:
validation_data = fit_kwargs.pop("validation_data")
while batch_size > 0:
try:
history = func(x=x, validation_data=validation_data, **fit_kwargs)
break
except tf.errors.ResourceExhaustedError as e:
if batch_size == 1:
raise e
batch_size //= 2
print(
"Not enough memory, reduce batch size to {batch_size}.".format(
batch_size=batch_size
)
)
x = x.unbatch().batch(batch_size)
if validation_data is not None:
validation_data = validation_data.unbatch().batch(batch_size)
return history
def get_hyperparameter(value, hp, dtype):
if value is None:
return hp
return value
def add_to_hp(hp, hps, name=None):
"""Add the HyperParameter (self) to the HyperParameters.
# Arguments
hp: keras_tuner.HyperParameters.
name: String. If left unspecified, the hp name is used.
"""
if not isinstance(hp, keras_tuner.engine.hyperparameters.HyperParameter):
return hp
kwargs = hp.get_config()
if name is None:
name = hp.name
kwargs.pop("conditions")
kwargs.pop("name")
class_name = hp.__class__.__name__
func = getattr(hps, class_name)
return func(name=name, **kwargs)
def serialize_keras_object(obj):
if hasattr(tf.keras.utils, "legacy"):
return tf.keras.utils.legacy.serialize_keras_object(
obj
) # pragma: no cover
else:
return tf.keras.utils.serialize_keras_object(obj) # pragma: no cover
def deserialize_keras_object(
config, module_objects=None, custom_objects=None, printable_module_name=None
):
if hasattr(tf.keras.utils, "legacy"):
return (
tf.keras.utils.legacy.deserialize_keras_object( # pragma: no cover
config, custom_objects, module_objects, printable_module_name
)
)
else:
return tf.keras.utils.deserialize_keras_object( # pragma: no cover
config, custom_objects, module_objects, printable_module_name
)
| autokeras/autokeras/utils/utils.py/0 | {
"file_path": "autokeras/autokeras/utils/utils.py",
"repo_id": "autokeras",
"token_count": 2354
} | 5 |
FROM python:3.7
RUN pip install flake8 black isort
WORKDIR /autokeras
CMD ["python", "docker/pre_commit.py"]
| autokeras/docker/pre-commit.Dockerfile/0 | {
"file_path": "autokeras/docker/pre-commit.Dockerfile",
"repo_id": "autokeras",
"token_count": 43
} | 6 |
import pathlib
import shutil
from inspect import getdoc
from inspect import isclass
from typing import Dict
from typing import List
from typing import Union
from typing import get_type_hints
from . import utils
from .docstring import process_docstring
from .examples import copy_examples
from .get_signatures import get_signature
class DocumentationGenerator:
"""Generates the documentation.
# Arguments
pages: A dictionary. The keys are the files' paths, the values
are lists of strings, functions /classes / methods names
with dotted access to the object. For example,
`pages = {'my_file.md': ['keras.layers.Dense']}` is valid.
project_url: The url pointing to the module directory of your project on
GitHub. This will be used to make a `[Sources]` link.
template_dir: Where to put the markdown files which will be copied and
filled in the destination directory. You should put files like
`index.md` inside. If you want a markdown file to be filled with
the docstring of a function, use the `{{autogenerated}}` tag inside,
and then add the markdown file to the `pages` dictionary.
example_dir: Where you store examples in your project. Usually
standalone files with a markdown docstring at the top. Will be
inserted in the docs.
extra_aliases: When displaying type hints, it's possible that the full
dotted path is displayed instead of alias. The aliases present in
`pages` are used, but it may happen if you're using a third-party
library. For example `tensorflow.python.ops.variables.Variable` is
displayed instead of `tensorflow.Variable`. Here you have two
solutions, either you provide the import keras-autodoc should
follow:
`extra_aliases=["tensorflow.Variable"]`, either you provide a
mapping to use
`extra_aliases={"tensorflow.python.ops.variables.Variable":
"tf.Variable"}`. The second option should be used if you want more
control and that you don't want to respect the alias corresponding
to the import (you can't do `import tf.Variable`). When giving a
list, keras-autodoc will try to import the object from the string to
understand what object you want to replace.
max_signature_line_length: When displaying class and function
signatures, keras-autodoc formats them using Black. This parameter
controls the maximum line length of these signatures, and is passed
directly through to Black.
titles_size: `"#"` signs to put before a title in the generated
markdown.
"""
def __init__(
self,
pages: Dict[str, list] = {},
project_url: Union[str, Dict[str, str]] = None,
template_dir=None,
examples_dir=None,
extra_aliases: Union[List[str], Dict[str, str]] = None,
max_signature_line_length: int = 110,
titles_size="###",
):
self.pages = pages
self.project_url = project_url
self.template_dir = template_dir
self.examples_dir = examples_dir
self.class_aliases = {}
self._fill_aliases(extra_aliases)
self.max_signature_line_length = max_signature_line_length
self.titles_size = titles_size
def generate(self, dest_dir):
"""Generate the docs.
# Arguments
dest_dir: Where to put the resulting markdown files.
"""
dest_dir = pathlib.Path(dest_dir)
print("Cleaning up existing sources directory.")
if dest_dir.exists():
shutil.rmtree(dest_dir)
print("Populating sources directory with templates.")
if self.template_dir:
shutil.copytree(self.template_dir, dest_dir)
for file_path, elements in self.pages.items():
markdown_text = ""
for element in elements:
markdown_text += self._render(element)
utils.insert_in_file(markdown_text, dest_dir / file_path)
if self.examples_dir is not None:
copy_examples(self.examples_dir, dest_dir / "examples")
def process_docstring(self, docstring, types: dict = None):
"""Can be overridden."""
processsed = process_docstring(docstring, types, self.class_aliases)
return processsed
def process_signature(self, signature):
"""Can be overridden."""
return signature
def _render(self, element):
if isinstance(element, str):
object_ = utils.import_object(element)
if utils.ismethod(object_):
# we remove the modules when displaying the methods
signature_override = ".".join(element.split(".")[-2:])
else:
signature_override = element
else:
signature_override = None
object_ = element
return self._render_from_object(object_, signature_override)
def _render_from_object(self, object_, signature_override: str):
subblocks = []
if self.project_url is not None:
subblocks.append(utils.make_source_link(object_, self.project_url))
signature = get_signature(
object_, signature_override, self.max_signature_line_length
)
signature = self.process_signature(signature)
subblocks.append(f"{self.titles_size} {object_.__name__}\n")
subblocks.append(utils.code_snippet(signature))
docstring = getdoc(object_)
if docstring:
if isclass(object_):
type_hints = get_type_hints(object_.__init__)
else:
type_hints = get_type_hints(object_)
docstring = self.process_docstring(docstring, type_hints)
subblocks.append(docstring)
return "\n\n".join(subblocks) + "\n\n----\n\n"
def _fill_aliases(self, extra_aliases):
for list_elements in self.pages.values():
for element_as_str in list_elements:
element = utils.import_object(element_as_str)
if not isclass(element):
continue
true_dotted_path = utils.get_dotted_path(element)
self.class_aliases[true_dotted_path] = element_as_str
if isinstance(extra_aliases, dict):
self.class_aliases.update(extra_aliases)
elif isinstance(extra_aliases, list):
for alias in extra_aliases:
full_dotted_path = utils.get_dotted_path(
utils.import_object(alias)
)
self.class_aliases[full_dotted_path] = alias
| autokeras/docs/keras_autodoc/autogen.py/0 | {
"file_path": "autokeras/docs/keras_autodoc/autogen.py",
"repo_id": "autokeras",
"token_count": 2855
} | 7 |
"""shell
pip install autokeras
"""
import os
import numpy as np
import tensorflow as tf
from sklearn.datasets import load_files
import autokeras as ak
"""
To make this tutorial easy to follow, we just treat IMDB dataset as a
regression dataset. It means we will treat prediction targets of IMDB dataset,
which are 0s and 1s as numerical values, so that they can be directly used as
the regression targets.
## A Simple Example
The first step is to prepare your data. Here we use the [IMDB
dataset](https://keras.io/datasets/#imdb-movie-reviews-sentiment-classification)
as an example.
"""
dataset = tf.keras.utils.get_file(
fname="aclImdb.tar.gz",
origin="http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz",
extract=True,
)
# set path to dataset
IMDB_DATADIR = os.path.join(os.path.dirname(dataset), "aclImdb")
classes = ["pos", "neg"]
train_data = load_files(
os.path.join(IMDB_DATADIR, "train"), shuffle=True, categories=classes
)
test_data = load_files(
os.path.join(IMDB_DATADIR, "test"), shuffle=False, categories=classes
)
x_train = np.array(train_data.data)
y_train = np.array(train_data.target)
x_test = np.array(test_data.data)
y_test = np.array(test_data.target)
print(x_train.shape) # (25000,)
print(y_train.shape) # (25000, 1)
print(x_train[0][:50]) # <START> this film was just brilliant casting <UNK>
"""
The second step is to run the [TextRegressor](/text_regressor). As a quick
demo, we set epochs to 2. You can also leave the epochs unspecified for an
adaptive number of epochs.
"""
# Initialize the text regressor.
reg = ak.TextRegressor(
overwrite=True, max_trials=10 # It tries 10 different models.
)
# Feed the text regressor with training data.
reg.fit(x_train, y_train, epochs=2)
# Predict with the best model.
predicted_y = reg.predict(x_test)
# Evaluate the best model with testing data.
print(reg.evaluate(x_test, y_test))
"""
## Validation Data
By default, AutoKeras use the last 20% of training data as validation data. As
shown in the example below, you can use `validation_split` to specify the
percentage.
"""
reg.fit(
x_train,
y_train,
# Split the training data and use the last 15% as validation data.
validation_split=0.15,
)
"""
You can also use your own validation set instead of splitting it from the
training data with `validation_data`.
"""
split = 5000
x_val = x_train[split:]
y_val = y_train[split:]
x_train = x_train[:split]
y_train = y_train[:split]
reg.fit(
x_train,
y_train,
epochs=2,
# Use your own validation set.
validation_data=(x_val, y_val),
)
"""
## Customized Search Space
For advanced users, you may customize your search space by using
[AutoModel](/auto_model/#automodel-class) instead of
[TextRegressor](/text_regressor). You can configure the
[TextBlock](/block/#textblock-class) for some high-level configurations, e.g.,
`vectorizer` for the type of text vectorization method to use. You can use
'sequence', which uses [TextToInteSequence](/block/#texttointsequence-class) to
convert the words to integers and use [Embedding](/block/#embedding-class) for
embedding the integer sequences, or you can use 'ngram', which uses
[TextToNgramVector](/block/#texttongramvector-class) to vectorize the
sentences. You can also do not specify these arguments, which would leave the
different choices to be tuned automatically. See the following example for
detail.
"""
input_node = ak.TextInput()
output_node = ak.TextBlock(block_type="ngram")(input_node)
output_node = ak.RegressionHead()(output_node)
reg = ak.AutoModel(
inputs=input_node, outputs=output_node, overwrite=True, max_trials=1
)
reg.fit(x_train, y_train, epochs=2)
"""
The usage of [AutoModel](/auto_model/#automodel-class) is similar to the
[functional API](https://www.tensorflow.org/guide/keras/functional) of Keras.
Basically, you are building a graph, whose edges are blocks and the nodes are
intermediate outputs of blocks. To add an edge from `input_node` to
`output_node` with `output_node = ak.[some_block]([block_args])(input_node)`.
You can even also use more fine grained blocks to customize the search space
even further. See the following example.
"""
input_node = ak.TextInput()
output_node = ak.TextToIntSequence()(input_node)
output_node = ak.Embedding()(output_node)
# Use separable Conv layers in Keras.
output_node = ak.ConvBlock(separable=True)(output_node)
output_node = ak.RegressionHead()(output_node)
reg = ak.AutoModel(
inputs=input_node, outputs=output_node, overwrite=True, max_trials=1
)
reg.fit(x_train, y_train, epochs=2)
"""
## Data Format
The AutoKeras TextRegressor is quite flexible for the data format.
For the text, the input data should be one-dimensional For the regression
targets, it should be a vector of numerical values. AutoKeras accepts
numpy.ndarray.
We also support using [tf.data.Dataset](
https://www.tensorflow.org/api_docs/python/tf/data/Dataset?version=stable)
format for the training data.
"""
train_set = tf.data.Dataset.from_tensor_slices(((x_train,), (y_train,))).batch(
32
)
test_set = tf.data.Dataset.from_tensor_slices(((x_test,), (y_test,))).batch(32)
reg = ak.TextRegressor(overwrite=True, max_trials=2)
# Feed the tensorflow Dataset to the regressor.
reg.fit(train_set, epochs=2)
# Predict with the best model.
predicted_y = reg.predict(test_set)
# Evaluate the best model with testing data.
print(reg.evaluate(test_set))
"""
## Reference
[TextRegressor](/text_regressor),
[AutoModel](/auto_model/#automodel-class),
[TextBlock](/block/#textblock-class),
[TextToInteSequence](/block/#texttointsequence-class),
[Embedding](/block/#embedding-class),
[TextToNgramVector](/block/#texttongramvector-class),
[ConvBlock](/block/#convblock-class),
[TextInput](/node/#textinput-class),
[RegressionHead](/block/#regressionhead-class).
"""
| autokeras/docs/py/text_regression.py/0 | {
"file_path": "autokeras/docs/py/text_regression.py",
"repo_id": "autokeras",
"token_count": 1992
} | 8 |
:root>* {
--md-primary-fg-color: #d00000;
--md-accent-fg-color: #d00000;
} | autokeras/docs/templates/stylesheets/extra.css/0 | {
"file_path": "autokeras/docs/templates/stylesheets/extra.css",
"repo_id": "autokeras",
"token_count": 38
} | 9 |
from distutils.core import setup
from pathlib import Path
from setuptools import find_packages
this_file = Path(__file__).resolve()
readme = this_file.parent / "README.md"
setup(
name="autokeras",
description="AutoML for deep learning",
package_data={"": ["README.md"]},
long_description=readme.read_text(encoding="utf-8"),
long_description_content_type="text/markdown",
author="DATA Lab, Keras Team",
author_email="[email protected]",
url="http://autokeras.com",
keywords=["AutoML", "Keras"],
install_requires=[
"packaging",
"tensorflow>=2.8.0",
"keras-tuner>=1.1.0",
"keras-nlp>=0.4.0",
"pandas",
],
extras_require={
"tests": [
"pytest>=4.4.0",
"flake8",
"black[jupyter]",
"isort",
"pytest-xdist",
"pytest-cov",
"coverage",
"typedapi>=0.2,<0.3",
"scikit-learn",
],
},
python_requires=">=3.8",
classifiers=[
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Topic :: Scientific/Engineering :: Mathematics",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Software Development :: Libraries",
],
license="Apache License 2.0",
packages=find_packages(exclude=("*test*",)),
)
| autokeras/setup.py/0 | {
"file_path": "autokeras/setup.py",
"repo_id": "autokeras",
"token_count": 771
} | 10 |
# keras-cv Single Stage Two-Dimensional Object Detection API
| Status | Proposed |
:-------------- |:---------------------------------------------------- |
| **Author(s)** | Zhenyu Tan ([email protected]), Francois Chollet ([email protected])|
| **Contributor(s)** | Pengchong Jin ([email protected])|
| **Updated** | 2020-09-28 |
## Objective
We aim at providing the core primitive components for training and serving single-stage two-dimensional object
detection models, such as Single-Shot MultiBox Detector (SSD), RetinaNet, and You-Only-Look-Once (YOLO).
Pretrained models will also be provides, similar to keras-applications.
## Key Benefits
Single-stage object detection models are a state-of-art technique that powers many computer vision tasks, they provide
faster detection compared to two-stage models (such as FasterRCNN), while maintaining comparable performance.
With this proposal, Keras users will be able to build end-to-end models with a simple API.
## Design overview
This proposal includes the specific core components for building single-stage object detection models. It does not, however, include:
1. Data augmentation, such as image and groundtruth box preprocessing
2. Model backbone, such as DarkNet, or functions to generate feature maps
3. Detection heads, such as Feature Pyramid
4. metrics utilities such as COCO Evaluator, or visualization utils.
Data augmentation will be included as a separate RFC that handles a
broader context than object detection.
Model backbone and detection heads are model-specific, we anticipate them to be analyzed and proposed in
`keras.applications` for heavily used patterns, however the user can build them easily using Keras.
#### Training
Case where a user want to train from scratch:
```python
import tensorflow as tf
import tensorflow_datasets as tfds
import keras_cv
# Considering a COCO dataset
coco_dataset = tfds.load('coco/2017')
train_ds, eval_ds = coco_dataset['train'], coco_dataset['validation']
def preprocess(features):
image, gt_boxes, gt_labels = features['image'], features['objects']['bbox'], features['objects']['label']
# preprocess image, gt_boxes, gt_labels, such as flip, resize, and padding, and reserve 0 for background label.
return image, gt_boxes, gt_labels
anchor_generator = keras_cv.ops.AnchorGenerator(anchor_sizes, scales, aspect_ratios, strides)
similarity_calculator = keras_cv.layers.IOUSimilarity()
box_matcher = keras_cv.ops.BoxMatcher(positive_threshold, negative_threshold)
target_gather = keras_cv.ops.TargetGather()
box_coder = keras_cv.ops.BoxCoder(offset='sigmoid')
def encode_label(image, gt_boxes, gt_labels):
anchor_boxes = anchor_generator(image_size)
iou = similarity_calculator(gt_boxes, anchor_boxes)
match_indices, match_indicators = box_matcher(iou)
mask = tf.less_equal(match_indicators, 0)
class_mask = tf.expand_dims(mask, -1)
box_mask = tf.tile(class_mask, [1, 4])
class_targets = target_gather(gt_labels, match_indices, class_mask, -1)
box_targets = target_gather(gt_boxes, match_indices, box_mask, 0.0)
box_targets = box_coder.encode(box_targets, anchor_boxes)
weights = tf.squeeze(tf.ones_like(gt_labels), axis=-1)
ignore_mask = tf.equal(match_indicators, -2)
class_weights = target_gather(weights, match_indices, ignore_mask, 0.0)
box_weights = target_gather(weights, match_indices, mask, 0.0)
return (image, {'classification': class_targets, 'regression': box_targets},
{'classification': class_weights, 'regression': box_weights})
class RetinaNet(tf.keras.Model):
# includes backbone and feature pyramid head.
def __init__(self):
# self.backbone = Model Backbone that returns dict of feature map
# self.fpn = Feature Pyramid Heads that
# self.head = classification and regression heads
def call(self, image, training=None):
feature_map = self.backbone(image, training)
feature_map = self.fpn(feature_map, training)
class_scores, boxes = self.head(feature_map, training)
return {'classification': class_scores, 'regression': boxes}
transformed_train_ds = train_ds.map(preprocess).map(encode_label).batch(128).shuffle(1024)
transformed_eval_ds = eval_ds.map(preprocess).map(encode_label).batch(128)
strategy = tf.distribute.TPUStrategy(...)
with strategy.scope():
optimizer = tf.keras.optimizers.SGD(lr_scheduler)
model = RetinaNet()
model.compile(optimizer=optimizer,
loss={'classification': keras_cv.losses.Focal(), 'regression': tf.keras.losses.Huber()},
metrics=[])
model.fit(transformed_train_ds, epochs=120, validation_data=transformed_eval_ds)
model.save(file_path)
```
#### Serving
Case where a user want to serve the trained model for a single image.
```python
loaded_model = tf.keras.models.load(file_path)
box_coder = keras_cv.ops.BoxCoder(offset='sigmoid')
anchor_generator = keras_cv.ops.AnchorGenerator()
anchor_boxes = anchor_generator(image_size)
detection_generator = keras_cv.layers.NMSDetectionDecoder()
@tf.function
def serving_fn(image):
batched_image = tf.expand_dims(image)
raw_boxes, scores = loaded_model(batched_image, training=False)
decoded_boxes = box_coder.decode(raw_boxes, anchor_boxes)
classes, scores, boxes, _ = detection_generator(scores, decoded_boxes)
return {'classes': classes, 'scores': scores, 'boxes': boxes}
```
## Detailed Design
For the rest of the design, we denote `B` as batch size, `N` as the number of ground truth boxes, and `M` as the number
of anchor boxes.
We propose 2 layers, 1 loss and 4 ops in this RFC.
#### Layers -- IouSimilarity
We propose IouSimilarity layer to support ragged tensor directly, however user can also pad ground truth
boxes or anchor boxes and pass a mask
```python
class IouSimilarity(tf.keras.layers.Layer):
"""Class to compute similarity based on Intersection over Union (IOU) metric."""
def __init__(self, mask_value):
"""Initializes IouSimilarity layer.
Args:
mask_value: A float mask value to fill where `mask` is True.
"""
def call(self, groundtruth_boxes, anchors, mask=None):
"""Compute pairwise IOU similarity between ground truth boxes and anchors.
Args:
groundtruth_boxes: A float Tensor [N], or [B, N] represent coordinates.
anchors: A float Tensor [M], or [B, M] represent coordinates.
mask: A boolean tensor with [N, M] or [B, N, M].
Returns:
A float tensor with shape [M, N] or [B, M, N] representing pairwise
iou scores, anchor per row and groundtruth_box per colulmn.
Input shape:
groundtruth_boxes: [N, 4], or [B, N, 4]
anchors: [M, 4], or [B, M, 4]
Output shape:
[M, N], or [B, M, N]
"""
```
#### Layers -- NMSDetectionDecoder
```python
class NMSDetectionDecoder(tf.keras.layers.Layer):
"""Generates detected boxes with scores and classes for one-stage detector."""
def __init__(self,
pre_nms_top_k=5000,
pre_nms_score_threshold=0.05,
nms_iou_threshold=0.5,
max_num_detections=100,
use_batched_nms=False,
**kwargs):
"""Initializes a detection generator.
Args:
pre_nms_top_k: int, the number of top scores proposals to be kept before
applying NMS.
pre_nms_score_threshold: float, the score threshold to apply before
applying NMS. Proposals whose scores are below this threshold are
thrown away.
nms_iou_threshold: float in [0, 1], the NMS IoU threshold.
max_num_detections: int, the final number of total detections to generate.
use_batched_nms: bool, whether or not use
`tf.image.combined_non_max_suppression`.
**kwargs: other key word arguments passed to Layer.
"""
def call(self, raw_boxes, raw_scores, anchor_boxes, image_shape):
"""Generate final detections.
Args:
raw_boxes: a single Tensor or dict with keys representing FPN levels and values
representing box tenors of shape
[batch, feature_h, feature_w, num_anchors * 4].
raw_scores: a single Tensor or dict with keys representing FPN levels and values
representing logit tensors of shape
[batch, feature_h, feature_w, num_anchors].
anchor_boxes: a tensor of shape of [batch_size, K, 4] representing the
corresponding anchor boxes w.r.t `box_outputs`.
image_shape: a tensor of shape of [batch_size, 2] storing the image height
and width w.r.t. the scaled image, i.e. the same image space as
`box_outputs` and `anchor_boxes`.
Returns:
`detection_boxes`: float Tensor of shape [B, max_num_detections, 4]
representing top detected boxes in [y1, x1, y2, x2].
`detection_scores`: float Tensor of shape [B, max_num_detections]
representing sorted confidence scores for detected boxes. The values
are between [0, 1].
`detection_classes`: int Tensor of shape [B, max_num_detections]
representing classes for detected boxes.
`num_detections`: int Tensor of shape [B] only the first
`num_detections` boxes are valid detections
"""
```
#### Losses -- Focal
```python
class FocalLoss(tf.keras.losses.Loss):
"""Implements a Focal loss for classification problems.
Reference:
[Focal Loss for Dense Object Detection](https://arxiv.org/abs/1708.02002).
"""
def __init__(self,
alpha=0.25,
gamma=2.0,
reduction=tf.keras.losses.Reduction.AUTO,
name=None):
"""Initializes `FocalLoss`.
Arguments:
alpha: The `alpha` weight factor for binary class imbalance.
gamma: The `gamma` focusing parameter to re-weight loss.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial](
https://www.tensorflow.org/tutorials/distribute/custom_training) for
more details.
name: Optional name for the op. Defaults to 'retinanet_class_loss'.
"""
def call(self, y_true, y_pred):
"""Invokes the `FocalLoss`.
Arguments:
y_true: A tensor of size [batch, num_anchors, num_classes]
y_pred: A tensor of size [batch, num_anchors, num_classes]
Returns:
Summed loss float `Tensor`.
"""
```
#### Ops -- AnchorGenerator
```python
class AnchorGenerator:
"""Utility to generate anchors for a multiple feature maps."""
def __init__(self,
anchor_sizes,
scales,
aspect_ratios,
strides,
clip_boxes=False):
"""Constructs multiscale anchors.
Args:
anchor_sizes: A list/dict of int represents the anchor size for each scale. The
anchor height will be `anchor_size / sqrt(aspect_ratio)`, anchor width
will be `anchor_size * sqrt(aspect_ratio)` for each scale.
scales: A list/tuple/dict, or a list/tuple/dict of a list/tuple of positive
floats representing the actual anchor size to the base `anchor_size`.
aspect_ratios: A list/tuple/dict, or a list/tuple/dict of a list/tuple of positive
floats representing the ratio of anchor width to anchor height.
strides: A list/tuple of ints represent the anchor stride size between
center of anchors at each scale.
clip_boxes: Boolean to represents whether the anchor coordinates should be
clipped to the image size. Defaults to `False`.
Input shape: the size of the image, `[H, W, C]`
Output shape: the size of anchors concat on each level, `[(H /
strides) * (W / strides), K * 4]`
"""
def __call__(self, image_size):
"""
Args:
image_size: a tuple of 2 for image_height and image_width.
Returns:
anchors: a dict or single Tensor.
"""
```
#### Ops -- BoxMatcher
```python
class BoxMatcher:
"""Matcher based on highest value.
This class computes matches from a similarity matrix. Each column is matched
to a single row.
To support object detection target assignment this class enables setting both
positive_threshold (upper threshold) and negative_threshold (lower thresholds)
defining three categories of similarity which define whether examples are
positive, negative, or ignored:
(1) similarity >= positive_threshold: Highest similarity. Matched/Positive!
(2) positive_threshold > similarity >= negative_threshold: Medium similarity.
This is Ignored.
(3) negative_threshold > similarity: Lowest similarity for Negative Match.
For ignored matches this class sets the values in the Match object to -2.
"""
def __init__(
self,
positive_threshold,
negative_threshold=None,
force_match_for_each_col=False,
positive_value=1,
negative_value=-1,
ignore_value=-2):
"""Construct BoxMatcher.
Args:
positive_threshold: Threshold for positive matches. Positive if
sim >= positive_threshold, where sim is the maximum value of the
similarity matrix for a given column. Set to None for no threshold.
negative_threshold: Threshold for negative matches. Negative if
sim < negative_threshold. Defaults to positive_threshold when set to None.
force_match_for_each_col: If True, ensures that each column is matched to
at least one row (which is not guaranteed otherwise if the
positive_threshold is high). Defaults to False.
positive_value: An integer to fill for positive match indicators.
negative_value: An integer to fill for negative match indicators.
ignore_value: An integer to fill for ignored match indicators.
Raises:
ValueError: If negative_threshold > positive_threshold.
"""
def __call__(self, similarity_matrix):
"""Tries to match each column of the similarity matrix to a row.
Args:
similarity_matrix: A float tensor of shape [N, M], or [Batch_size, N, M]
representing any similarity metric.
Returns:
matched_indices: A integer tensor of shape [N] with corresponding match indices for each
of M columns, the value represent the column index that argmax match in the matrix.
matched_indicators: A integer tensor of shape [N] or [B, N]. For positive match, the match
result will be the `positive_value`, for negative match, the match will be
`negative_value`, for ignored match, the match result will be
`ignore_value`.
"""
```
#### Ops -- TargetGather
```python
class TargetGather:
"""Labeler for dense object detector."""
def __init__(self):
"""Constructs Anchor Labeler."""
def __call__(self, labels, match_indices, mask, mask_val=0.0):
"""Labels anchors with ground truth inputs.
Args:
labels: An integer tensor with shape [N, dim], or [B, N, dim] representing
groundtruth classes.
match_indices: An integer tensor with shape [N] or [B, N] representing match
ground truth box index.
mask: An integer tensor with shape [N] representing match
labels, e.g., 1 for positive, -1 for negative, -2 for ignore.
mask_val: An python primitive to fill in places where mask is True.
Returns:
targets: A tensor with [M, dim] or [B, M, dim] selected from the `match_indices`.
"""
```
#### Ops -- BoxCoder
```python
class BoxCoder:
"""box coder for RetinaNet, FasterRcnn, SSD, and YOLO."""
def __init__(self, scale_factors=None):
"""Constructor for BoxCoder.
Args:
scale_factors: List of 4 positive scalars to scale ty, tx, th and tw. If
set to None, does not perform scaling. For Faster RCNN, the open-source
implementation recommends using [10.0, 10.0, 5.0, 5.0].
offset: The offset used to code the box coordinates, it can be 'sigmoid',
i.e., coded_coord = coord + sigmoid(tx) which
is used for RetinaNet, FasterRcnn, and SSD, or it can be 'linear',
i.e., encoded_coord = coord + width * tx which is used for YOLO.
"""
def encode(self, boxes, anchors):
"""Compute coded_coord from coord."""
def decode(self, boxes, anchors):
"""Compute coord from coded_coord."""
```
## Questions and Discussion Topics
* Whether `BoxMatcher` should take a list of thresholds (e.g., size 2) and a list of values (e.g., size 3).
* Gathering feedbacks on arguments & naming conventions.
* How to better generalize box coding, to differentiate RCNN-family encoding and YOLO-family encoding.
* Whether to have BoxCoder(inverse=False) and a single call method, or BoxCoder with `encode` and `decode` methods. | governance/rfcs/20200928-keras-cv-single-stage-2d-object-detection.md/0 | {
"file_path": "governance/rfcs/20200928-keras-cv-single-stage-2d-object-detection.md",
"repo_id": "governance",
"token_count": 6025
} | 11 |
sudo: required
dist: trusty
language: python
matrix:
include:
- python: 2.7
env: KERAS_BACKEND=tensorflow TEST_MODE=PEP8
- python: 2.7
env: KERAS_BACKEND=tensorflow
- python: 2.7
env: KERAS_BACKEND=tensorflow KERAS_HEAD=true
- python: 3.6
env: KERAS_BACKEND=tensorflow
- python: 2.7
env: KERAS_BACKEND=theano KERAS_HEAD=true THEANO_FLAGS=optimizer=fast_compile
- python: 3.6
env: KERAS_BACKEND=theano THEANO_FLAGS=optimizer=fast_compile
- python: 2.7
env: KERAS_BACKEND=cntk KERAS_HEAD=true PYTHONWARNINGS=ignore
- python: 3.6
env: KERAS_BACKEND=cntk PYTHONWARNINGS=ignore
install:
# code below is taken from http://conda.pydata.org/docs/travis.html
# We do this conditionally because it saves us some downloading if the
# version is the same.
- if [[ "$TRAVIS_PYTHON_VERSION" == "2.7" ]]; then
wget https://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh -O miniconda.sh;
else
wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh;
fi
- bash miniconda.sh -b -p $HOME/miniconda
- export PATH="$HOME/miniconda/bin:$PATH"
- hash -r
- conda config --set always_yes yes --set changeps1 no
- conda update -q conda
# Useful for debugging any issues with conda
- conda info -a
- conda create -q -n test-environment python=$TRAVIS_PYTHON_VERSION pytest pandas
- source activate test-environment
- pip install --only-binary=numpy,scipy numpy nose scipy matplotlib h5py theano keras==2.2.4
- conda install mkl mkl-service
# set library path
- export LD_LIBRARY_PATH=$HOME/miniconda/envs/test-environment/lib/:$LD_LIBRARY_PATH
# install PIL
- if [[ "$TRAVIS_PYTHON_VERSION" == "2.7" ]]; then
conda install pil;
elif [[ "$TRAVIS_PYTHON_VERSION" == "3.6" ]]; then
conda install Pillow;
fi
#- if [[ $KERAS_HEAD == "true" ]]; then
# pip install --no-deps git+https://github.com/keras-team/keras.git;
# fi
- pip install -e .[tests]
# install TensorFlow (CPU version).
- pip install tensorflow==1.9
# install cntk
- if [[ "$TRAVIS_PYTHON_VERSION" == "2.7" ]]; then
pip install https://cntk.ai/PythonWheel/CPU-Only/cntk-2.3.1-cp27-cp27mu-linux_x86_64.whl;
elif [[ "$TRAVIS_PYTHON_VERSION" == "3.6" ]]; then
pip install https://cntk.ai/PythonWheel/CPU-Only/cntk-2.3.1-cp36-cp36m-linux_x86_64.whl;
fi
# install pydot for visualization tests
- conda install pydot graphviz
# detect one of markdown files is changed or not
- export DOC_ONLY_CHANGED=False;
- if [ $(git diff --name-only HEAD~1 | wc -l) == "1" ] && [[ "$(git diff --name-only HEAD~1)" == *"md" ]]; then
export DOC_ONLY_CHANGED=True;
fi
#install open mpi
- rm -rf ~/mpi
- mkdir ~/mpi
- pushd ~/mpi
- wget http://cntk.ai/PythonWheel/ForKeras/depends/openmpi_1.10-3.zip
- unzip ./openmpi_1.10-3.zip
- sudo dpkg -i openmpi_1.10-3.deb
- popd
# command to run tests
script:
- export MKL_THREADING_LAYER="GNU"
# run keras backend init to initialize backend config
- python -c "import keras.backend"
# create models directory to avoid concurrent directory creation at runtime
- mkdir ~/.keras/models
# set up keras backend
- sed -i -e 's/"backend":[[:space:]]*"[^"]*/"backend":\ "'$KERAS_BACKEND'/g' ~/.keras/keras.json;
- echo -e "Running tests with the following config:\n$(cat ~/.keras/keras.json)"
- if [[ "$DOC_ONLY_CHANGED" == "False" ]]; then
if [[ "$TEST_MODE" == "PEP8" ]]; then
PYTHONPATH=$PWD:$PYTHONPATH py.test --pep8 -m pep8 -n0;
else
PYTHONPATH=$PWD:$PYTHONPATH py.test tests/ --cov-config .coveragerc --cov=keras_applications tests/;
fi;
fi
| keras-applications/.travis.yml/0 | {
"file_path": "keras-applications/.travis.yml",
"repo_id": "keras-applications",
"token_count": 1665
} | 12 |
"""ResNet models for Keras.
# Reference paper
- [Deep Residual Learning for Image Recognition]
(https://arxiv.org/abs/1512.03385) (CVPR 2016 Best Paper Award)
# Reference implementations
- [TensorNets]
(https://github.com/taehoonlee/tensornets/blob/master/tensornets/resnets.py)
- [Caffe ResNet]
(https://github.com/KaimingHe/deep-residual-networks/tree/master/prototxt)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from . import imagenet_utils
from .imagenet_utils import decode_predictions
from .resnet_common import ResNet50
from .resnet_common import ResNet101
from .resnet_common import ResNet152
def preprocess_input(x, **kwargs):
"""Preprocesses a numpy array encoding a batch of images.
# Arguments
x: a 4D numpy array consists of RGB values within [0, 255].
data_format: data format of the image tensor.
# Returns
Preprocessed array.
"""
return imagenet_utils.preprocess_input(x, mode='caffe', **kwargs)
| keras-applications/keras_applications/resnet.py/0 | {
"file_path": "keras-applications/keras_applications/resnet.py",
"repo_id": "keras-applications",
"token_count": 363
} | 13 |
# keras-contrib : Keras community contributions
Keras-contrib is deprecated. Use [TensorFlow Addons](https://github.com/tensorflow/addons).
## The future of Keras-contrib:
We're migrating to [tensorflow/addons](https://github.com/tensorflow/addons). See the announcement [here](https://github.com/keras-team/keras-contrib/issues/519).
[![Build Status](https://travis-ci.org/keras-team/keras-contrib.svg?branch=master)](https://travis-ci.org/keras-team/keras-contrib)
This library is the official extension repository for the python deep learning library [Keras](http://www.keras.io). It contains additional layers, activations, loss functions, optimizers, etc. which are not yet available within Keras itself. All of these additional modules can be used in conjunction with core Keras models and modules.
As the community contributions in Keras-Contrib are tested, used, validated, and their utility proven, they may be integrated into the Keras core repository. In the interest of keeping Keras succinct, clean, and powerfully simple, only the most useful contributions make it into Keras. This contribution repository is both the proving ground for new functionality, and the archive for functionality that (while useful) may not fit well into the Keras paradigm.
---
## Installation
#### Install keras_contrib for keras-team/keras
For instructions on how to install Keras,
see [the Keras installation page](https://keras.io/#installation).
```shell
git clone https://www.github.com/keras-team/keras-contrib.git
cd keras-contrib
python setup.py install
```
Alternatively, using pip:
```shell
sudo pip install git+https://www.github.com/keras-team/keras-contrib.git
```
to uninstall:
```pip
pip uninstall keras_contrib
```
#### Install keras_contrib for tensorflow.keras
```shell
git clone https://www.github.com/keras-team/keras-contrib.git
cd keras-contrib
python convert_to_tf_keras.py
USE_TF_KERAS=1 python setup.py install
```
to uninstall:
```shell
pip uninstall tf_keras_contrib
```
For contributor guidelines see [CONTRIBUTING.md](https://github.com/keras-team/keras-contrib/blob/master/CONTRIBUTING.md)
---
## Example Usage
Modules from the Keras-Contrib library are used in the same way as modules within Keras itself.
```python
from keras.models import Sequential
from keras.layers import Dense
import numpy as np
# I wish Keras had the Parametric Exponential Linear activation..
# Oh, wait..!
from keras_contrib.layers.advanced_activations import PELU
# Create the Keras model, including the PELU advanced activation
model = Sequential()
model.add(Dense(100, input_shape=(10,)))
model.add(PELU())
# Compile and fit on random data
model.compile(loss='mse', optimizer='adam')
model.fit(x=np.random.random((100, 10)), y=np.random.random((100, 100)), epochs=5, verbose=0)
# Save our model
model.save('example.h5')
```
### A Common "Gotcha"
As Keras-Contrib is external to the Keras core, loading a model requires a bit more work. While a pure Keras model is loadable with nothing more than an import of `keras.models.load_model`, a model which contains a contributed module requires an additional import of `keras_contrib`:
```python
# Required, as usual
from keras.models import load_model
# Recommended method; requires knowledge of the underlying architecture of the model
from keras_contrib.layers import PELU
from keras_contrib.layers import GroupNormalization
# Load our model
custom_objects = {'PELU': PELU, 'GroupNormalization': GroupNormalization}
model = load_model('example.h5', custom_objects)
```
| keras-contrib/README.md/0 | {
"file_path": "keras-contrib/README.md",
"repo_id": "keras-contrib",
"token_count": 1072
} | 14 |
{%- for toc_item in toc_item.children %}
<li class="toctree-l{{ navlevel}}"><a class="reference internal" href="{% if not nav_item == page %}{{ nav_item.url|url }}{% endif %}{{ toc_item.url }}">{{ toc_item.title }}</a>
{%- set navlevel = navlevel + 1 %}
{%- if navlevel <= config.theme.navigation_depth and toc_item.children %}
<ul>
{%- include 'toc.html' %}
</ul>
{%- endif %}
{%- set navlevel = navlevel - 1 %}
</li>
{%- endfor %}
| keras-contrib/contrib_docs/theme/toc.html/0 | {
"file_path": "keras-contrib/contrib_docs/theme/toc.html",
"repo_id": "keras-contrib",
"token_count": 215
} | 15 |
from .densenet import DenseNet
from .resnet import ResNet, ResNet18, ResNet34, ResNet50, ResNet101, ResNet152
from .wide_resnet import WideResidualNetwork
from .nasnet import NASNet, NASNetLarge, NASNetMobile
| keras-contrib/keras_contrib/applications/__init__.py/0 | {
"file_path": "keras-contrib/keras_contrib/applications/__init__.py",
"repo_id": "keras-contrib",
"token_count": 67
} | 16 |
from __future__ import absolute_import
from keras import backend as K
from keras.constraints import Constraint
class Clip(Constraint):
"""Clips weights to [-c, c].
# Arguments
c: Clipping parameter.
"""
def __init__(self, c=0.01):
self.c = c
def __call__(self, p):
return K.clip(p, -self.c, self.c)
def get_config(self):
return {'name': self.__class__.__name__,
'c': self.c}
| keras-contrib/keras_contrib/constraints/clip.py/0 | {
"file_path": "keras-contrib/keras_contrib/constraints/clip.py",
"repo_id": "keras-contrib",
"token_count": 202
} | 17 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from keras.layers import Layer
from keras_contrib import backend as KC
from keras_contrib.utils.conv_utils import normalize_data_format
class SubPixelUpscaling(Layer):
""" Sub-pixel convolutional upscaling layer.
This layer requires a Convolution2D prior to it,
having output filters computed according to
the formula :
filters = k * (scale_factor * scale_factor)
where k = a user defined number of filters (generally larger than 32)
scale_factor = the upscaling factor (generally 2)
This layer performs the depth to space operation on
the convolution filters, and returns a
tensor with the size as defined below.
# Example :
```python
# A standard subpixel upscaling block
x = Convolution2D(256, 3, 3, padding='same', activation='relu')(...)
u = SubPixelUpscaling(scale_factor=2)(x)
# Optional
x = Convolution2D(256, 3, 3, padding='same', activation='relu')(u)
```
In practice, it is useful to have a second convolution layer after the
SubPixelUpscaling layer to speed up the learning process.
However, if you are stacking multiple
SubPixelUpscaling blocks, it may increase
the number of parameters greatly, so the
Convolution layer after SubPixelUpscaling
layer can be removed.
# Arguments
scale_factor: Upscaling factor.
data_format: Can be None, 'channels_first' or 'channels_last'.
# Input shape
4D tensor with shape:
`(samples, k * (scale_factor * scale_factor) channels, rows, cols)`
if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, k * (scale_factor * scale_factor) channels)`
if data_format='channels_last'.
# Output shape
4D tensor with shape:
`(samples, k channels, rows * scale_factor, cols * scale_factor))`
if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows * scale_factor, cols * scale_factor, k channels)`
if data_format='channels_last'.
# References
- [Real-Time Single Image and Video Super-Resolution Using an
Efficient Sub-Pixel Convolutional Neural Network](
https://arxiv.org/abs/1609.05158)
"""
def __init__(self, scale_factor=2, data_format=None, **kwargs):
super(SubPixelUpscaling, self).__init__(**kwargs)
self.scale_factor = scale_factor
self.data_format = normalize_data_format(data_format)
def build(self, input_shape):
pass
def call(self, x, mask=None):
y = KC.depth_to_space(x, self.scale_factor, self.data_format)
return y
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_first':
b, k, r, c = input_shape
new_k = k // (self.scale_factor ** 2)
new_r = r * self.scale_factor
new_c = c * self.scale_factor
return b, new_k, new_r, new_c
else:
b, r, c, k = input_shape
new_r = r * self.scale_factor
new_c = c * self.scale_factor
new_k = k // (self.scale_factor ** 2)
return b, new_r, new_c, new_k
def get_config(self):
config = {'scale_factor': self.scale_factor,
'data_format': self.data_format}
base_config = super(SubPixelUpscaling, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| keras-contrib/keras_contrib/layers/convolutional/subpixelupscaling.py/0 | {
"file_path": "keras-contrib/keras_contrib/layers/convolutional/subpixelupscaling.py",
"repo_id": "keras-contrib",
"token_count": 1456
} | 18 |
from keras import backend as K
from keras.optimizers import Optimizer
class Yogi(Optimizer):
"""Yogi optimizer.
Yogi is a variation of Adam that controls the increase in effective
learning rate, which (according to the paper) leads to even better
performance than Adam with similar theoretical guarantees on convergence.
Default parameters follow those provided in the original paper, Tab.1
# Arguments
lr: float >= 0. Learning rate.
beta_1: float, 0 < beta < 1. Generally close to 1.
beta_2: float, 0 < beta < 1. Generally close to 1.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
decay: float >= 0. Learning rate decay over each update.
# References
- [Adaptive Methods for Nonconvex Optimization](
https://papers.nips.cc/paper/8186-adaptive-methods-for-nonconvex-optimization)
If you open an issue or a pull request about the Yogi optimizer,
please add 'cc @MarcoAndreaBuchmann' to notify him.
"""
def __init__(self, lr=0.01, beta_1=0.9, beta_2=0.999,
epsilon=1e-3, decay=0., **kwargs):
super(Yogi, self).__init__(**kwargs)
if beta_1 <= 0 or beta_1 >= 1:
raise ValueError("beta_1 has to be in ]0, 1[")
if beta_2 <= 0 or beta_2 >= 1:
raise ValueError("beta_2 has to be in ]0, 1[")
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.lr = K.variable(lr, name='lr')
self.beta_1 = K.variable(beta_1, name='beta_1')
self.beta_2 = K.variable(beta_2, name='beta_2')
self.decay = K.variable(decay, name='decay')
if epsilon is None:
epsilon = K.epsilon()
if epsilon <= 0:
raise ValueError("epsilon has to be larger than 0")
self.epsilon = epsilon
self.initial_decay = decay
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * (1. / (1. + self.decay * K.cast(self.iterations,
K.dtype(self.decay))))
t = K.cast(self.iterations, K.floatx()) + 1
lr_t = lr * (K.sqrt(1. - K.pow(self.beta_2, t)) /
(1. - K.pow(self.beta_1, t)))
ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
vhats = [K.zeros(1) for _ in params]
self.weights = [self.iterations] + ms + vs + vhats
for p, g, m, v, vhat in zip(params, grads, ms, vs, vhats):
g2 = K.square(g)
m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
v_t = v - (1. - self.beta_2) * K.sign(v - g2) * g2
p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon)
self.updates.append(K.update(m, m_t))
self.updates.append(K.update(v, v_t))
new_p = p_t
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(K.update(p, new_p))
return self.updates
def get_config(self):
config = {'lr': float(K.get_value(self.lr)),
'beta_1': float(K.get_value(self.beta_1)),
'beta_2': float(K.get_value(self.beta_2)),
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon}
base_config = super(Yogi, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| keras-contrib/keras_contrib/optimizers/yogi.py/0 | {
"file_path": "keras-contrib/keras_contrib/optimizers/yogi.py",
"repo_id": "keras-contrib",
"token_count": 1858
} | 19 |
import numpy as np
import pytest
from keras import backend as K
from keras.layers import Input
from keras.models import Sequential, Model
from numpy.testing import assert_allclose
from keras_contrib.layers import InstanceNormalization
from keras_contrib.utils.test_utils import layer_test
input_1 = np.arange(10)
input_2 = np.zeros(10)
input_3 = np.ones(10)
input_shapes = [np.ones((10, 10)), np.ones((10, 10, 10))]
def basic_instancenorm_test():
from keras import regularizers
layer_test(InstanceNormalization,
kwargs={'epsilon': 0.1,
'gamma_regularizer': regularizers.l2(0.01),
'beta_regularizer': regularizers.l2(0.01)},
input_shape=(3, 4, 2))
layer_test(InstanceNormalization,
kwargs={'gamma_initializer': 'ones',
'beta_initializer': 'ones'},
input_shape=(3, 4, 2))
layer_test(InstanceNormalization,
kwargs={'scale': False, 'center': False},
input_shape=(3, 3))
@pytest.mark.parametrize('input_shape,axis', [((10, 1), -1),
((10,), None)])
def test_instancenorm_correctness_rank2(input_shape, axis):
model = Sequential()
norm = InstanceNormalization(input_shape=input_shape, axis=axis)
model.add(norm)
model.compile(loss='mse', optimizer='sgd')
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000,) + input_shape)
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= K.eval(norm.beta)
out /= K.eval(norm.gamma)
assert_allclose(out.mean(), 0.0, atol=1e-1)
assert_allclose(out.std(), 1.0, atol=1e-1)
def test_instancenorm_training_argument():
bn1 = InstanceNormalization(input_shape=(10,))
x1 = Input(shape=(10,))
y1 = bn1(x1, training=True)
model1 = Model(x1, y1)
np.random.seed(123)
x = np.random.normal(loc=5.0, scale=10.0, size=(20, 10))
output_a = model1.predict(x)
model1.compile(loss='mse', optimizer='rmsprop')
model1.fit(x, x, epochs=1, verbose=0)
output_b = model1.predict(x)
assert np.abs(np.sum(output_a - output_b)) > 0.1
assert_allclose(output_b.mean(), 0.0, atol=1e-1)
assert_allclose(output_b.std(), 1.0, atol=1e-1)
bn2 = InstanceNormalization(input_shape=(10,))
x2 = Input(shape=(10,))
bn2(x2, training=False)
def test_instancenorm_convnet():
model = Sequential()
norm = InstanceNormalization(axis=1, input_shape=(3, 4, 4))
model.add(norm)
model.compile(loss='mse', optimizer='sgd')
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 3, 4, 4))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= np.reshape(K.eval(norm.beta), (1, 3, 1, 1))
out /= np.reshape(K.eval(norm.gamma), (1, 3, 1, 1))
assert_allclose(np.mean(out, axis=(0, 2, 3)), 0.0, atol=1e-1)
assert_allclose(np.std(out, axis=(0, 2, 3)), 1.0, atol=1e-1)
def test_shared_instancenorm():
'''Test that a IN layer can be shared
across different data streams.
'''
# Test single layer reuse
bn = InstanceNormalization(input_shape=(10,))
x1 = Input(shape=(10,))
bn(x1)
x2 = Input(shape=(10,))
y2 = bn(x2)
x = np.random.normal(loc=5.0, scale=10.0, size=(2, 10))
model = Model(x2, y2)
model.compile('sgd', 'mse')
model.train_on_batch(x, x)
# Test model-level reuse
x3 = Input(shape=(10,))
y3 = model(x3)
new_model = Model(x3, y3)
new_model.compile('sgd', 'mse')
new_model.train_on_batch(x, x)
def test_instancenorm_perinstancecorrectness():
model = Sequential()
norm = InstanceNormalization(input_shape=(10,))
model.add(norm)
model.compile(loss='mse', optimizer='sgd')
# bimodal distribution
z = np.random.normal(loc=5.0, scale=10.0, size=(2, 10))
y = np.random.normal(loc=-5.0, scale=17.0, size=(2, 10))
x = np.append(z, y)
x = np.reshape(x, (4, 10))
model.fit(x, x, epochs=4, batch_size=4, verbose=1)
out = model.predict(x)
out -= K.eval(norm.beta)
out /= K.eval(norm.gamma)
# verify that each instance in the batch is individually normalized
for i in range(4):
instance = out[i]
assert_allclose(instance.mean(), 0.0, atol=1e-1)
assert_allclose(instance.std(), 1.0, atol=1e-1)
# if each instance is normalized, so should the batch
assert_allclose(out.mean(), 0.0, atol=1e-1)
assert_allclose(out.std(), 1.0, atol=1e-1)
def test_instancenorm_perchannel_correctness():
# have each channel with a different average and std
x = np.random.normal(loc=5.0, scale=2.0, size=(10, 1, 4, 4))
y = np.random.normal(loc=10.0, scale=3.0, size=(10, 1, 4, 4))
z = np.random.normal(loc=-5.0, scale=5.0, size=(10, 1, 4, 4))
batch = np.append(x, y, axis=1)
batch = np.append(batch, z, axis=1)
# this model does not provide a normalization axis
model = Sequential()
norm = InstanceNormalization(axis=None,
input_shape=(3, 4, 4),
center=False,
scale=False)
model.add(norm)
model.compile(loss='mse', optimizer='sgd')
model.fit(batch, batch, epochs=4, verbose=0)
out = model.predict(batch)
# values will not be normalized per-channel
for instance in range(10):
for channel in range(3):
activations = out[instance, channel]
assert abs(activations.mean()) > 1e-2
assert abs(activations.std() - 1.0) > 1e-6
# but values are still normalized per-instance
activations = out[instance]
assert_allclose(activations.mean(), 0.0, atol=1e-1)
assert_allclose(activations.std(), 1.0, atol=1e-1)
# this model sets the channel as a normalization axis
model = Sequential()
norm = InstanceNormalization(axis=1,
input_shape=(3, 4, 4),
center=False,
scale=False)
model.add(norm)
model.compile(loss='mse', optimizer='sgd')
model.fit(batch, batch, epochs=4, verbose=0)
out = model.predict(batch)
# values are now normalized per-channel
for instance in range(10):
for channel in range(3):
activations = out[instance, channel]
assert_allclose(activations.mean(), 0.0, atol=1e-1)
assert_allclose(activations.std(), 1.0, atol=1e-1)
if __name__ == '__main__':
pytest.main([__file__])
| keras-contrib/tests/keras_contrib/layers/normalization/test_instancenormalization.py/0 | {
"file_path": "keras-contrib/tests/keras_contrib/layers/normalization/test_instancenormalization.py",
"repo_id": "keras-contrib",
"token_count": 3078
} | 20 |
from markdown import markdown
from docs import autogen
import pytest
test_doc1 = {
'doc': """Base class for recurrent layers.
# Arguments
cell: A RNN cell instance. A RNN cell is a class that has:
- a `call(input_at_t, states_at_t)` method, returning
`(output_at_t, states_at_t_plus_1)`. The call method of the
cell can also take the optional argument `constants`, see
section "Note on passing external constants" below.
- a `state_size` attribute. This can be a single integer
(single state) in which case it is
the size of the recurrent state
(which should be the same as the size of the cell output).
This can also be a list/tuple of integers
(one size per state). In this case, the first entry
(`state_size[0]`) should be the same as
the size of the cell output.
It is also possible for `cell` to be a list of RNN cell instances,
in which cases the cells get stacked on after the other in the RNN,
implementing an efficient stacked RNN.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
input_dim: dimensionality of the input (integer).
This argument (or alternatively,
the keyword argument `input_shape`)
is required when using this layer as the first layer in a model.
input_length: Length of input sequences, to be specified
when it is constant.
This argument is required if you are going to connect
`Flatten` then `Dense` layers upstream
(without it, the shape of the dense outputs cannot be computed).
Note that if the recurrent layer is not the first layer
in your model, you would need to specify the input length
at the level of the first layer
(e.g. via the `input_shape` argument)
# Input shape
3D tensor with shape `(batch_size, timesteps, input_dim)`.
# Output shape
- if `return_state`: a list of tensors. The first tensor is
the output. The remaining tensors are the last states,
each with shape `(batch_size, units)`.
- if `return_sequences`: 3D tensor with shape
`(batch_size, timesteps, units)`.
- else, 2D tensor with shape `(batch_size, units)`.
# Masking
This layer supports masking for input data with a variable number
of timesteps. To introduce masks to your data,
use an [Embedding](embeddings.md) layer with the `mask_zero` parameter
set to `True`.
# Note on using statefulness in RNNs
You can set RNN layers to be 'stateful', which means that the states
computed for the samples in one batch will be reused as initial states
for the samples in the next batch. This assumes a one-to-one mapping
between samples in different successive batches.
To enable statefulness:
- specify `stateful=True` in the layer constructor.
- specify a fixed batch size for your model, by passing
if sequential model:
`batch_input_shape=(...)` to the first layer in your model.
else for functional model with 1 or more Input layers:
`batch_shape=(...)` to all the first layers in your model.
This is the expected shape of your inputs
*including the batch size*.
It should be a tuple of integers, e.g. `(32, 10, 100)`.
- specify `shuffle=False` when calling fit().
To reset the states of your model, call `.reset_states()` on either
a specific layer, or on your entire model.
# Note on specifying the initial state of RNNs
Note: that
One: You can specify the initial state of RNN layers symbolically by
calling them with the keyword argument `initial_state`.
Two: The value of `initial_state` should be a tensor or list of
tensors representing
the initial state of the RNN layer.
You can specify the initial state of RNN layers numerically by:
One: calling `reset_states`
- With the keyword argument `states`.
- The value of
`states` should be a numpy array or
list of numpy arrays representing
the initial state of the RNN layer.
# Note on passing external constants to RNNs
You can pass "external" constants to the cell using the `constants`
keyword: argument of `RNN.__call__` (as well as `RNN.call`) method.
This: requires that the `cell.call` method accepts the same keyword argument
`constants`. Such constants can be used to condition the cell
transformation on additional static inputs (not changing over time),
a.k.a. an attention mechanism.
# Examples
```python
# First, let's define a RNN Cell, as a layer subclass.
class MinimalRNNCell(keras.layers.Layer):
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
super(MinimalRNNCell, self).__init__(**kwargs)
def build(self, input_shape):
self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = K.dot(inputs, self.kernel)
output = h + K.dot(prev_output, self.recurrent_kernel)
return output, [output]
# Let's use this cell in a RNN layer:
cell = MinimalRNNCell(32)
x = keras.Input((None, 5))
layer = RNN(cell)
y = layer(x)
# Here's how to use the cell to build a stacked RNN:
cells = [MinimalRNNCell(32), MinimalRNNCell(64)]
x = keras.Input((None, 5))
layer = RNN(cells)
y = layer(x)
```
""",
'result': '''Base class for recurrent layers.
__Arguments__
- __cell__: A RNN cell instance. A RNN cell is a class that has:
- a `call(input_at_t, states_at_t)` method, returning
`(output_at_t, states_at_t_plus_1)`. The call method of the
cell can also take the optional argument `constants`, see
section "Note on passing external constants" below.
- a `state_size` attribute. This can be a single integer
(single state) in which case it is
the size of the recurrent state
(which should be the same as the size of the cell output).
This can also be a list/tuple of integers
(one size per state). In this case, the first entry
(`state_size[0]`) should be the same as
the size of the cell output.
It is also possible for `cell` to be a list of RNN cell instances,
in which cases the cells get stacked on after the other in the RNN,
implementing an efficient stacked RNN.
- __return_sequences__: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
- __return_state__: Boolean. Whether to return the last state
in addition to the output.
- __go_backwards__: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
- __stateful__: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
- __unroll__: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
- __input_dim__: dimensionality of the input (integer).
This argument (or alternatively,
the keyword argument `input_shape`)
is required when using this layer as the first layer in a model.
- __input_length__: Length of input sequences, to be specified
when it is constant.
This argument is required if you are going to connect
`Flatten` then `Dense` layers upstream
(without it, the shape of the dense outputs cannot be computed).
Note that if the recurrent layer is not the first layer
in your model, you would need to specify the input length
at the level of the first layer
(e.g. via the `input_shape` argument)
__Input shape__
3D tensor with shape `(batch_size, timesteps, input_dim)`.
__Output shape__
- if `return_state`: a list of tensors. The first tensor is
the output. The remaining tensors are the last states,
each with shape `(batch_size, units)`.
- if `return_sequences`: 3D tensor with shape
`(batch_size, timesteps, units)`.
- else, 2D tensor with shape `(batch_size, units)`.
__Masking__
This layer supports masking for input data with a variable number
of timesteps. To introduce masks to your data,
use an [Embedding](embeddings.md) layer with the `mask_zero` parameter
set to `True`.
__Note on using statefulness in RNNs__
You can set RNN layers to be 'stateful', which means that the states
computed for the samples in one batch will be reused as initial states
for the samples in the next batch. This assumes a one-to-one mapping
between samples in different successive batches.
To enable statefulness:
- specify `stateful=True` in the layer constructor.
- specify a fixed batch size for your model, by passing
if sequential model:
`batch_input_shape=(...)` to the first layer in your model.
else for functional model with 1 or more Input layers:
`batch_shape=(...)` to all the first layers in your model.
This is the expected shape of your inputs
*including the batch size*.
It should be a tuple of integers, e.g. `(32, 10, 100)`.
- specify `shuffle=False` when calling fit().
To reset the states of your model, call `.reset_states()` on either
a specific layer, or on your entire model.
__Note on specifying the initial state of RNNs__
Note: that
- __One__: You can specify the initial state of RNN layers symbolically by
calling them with the keyword argument `initial_state`.
- __Two__: The value of `initial_state` should be a tensor or list of
tensors representing
the initial state of the RNN layer.
You can specify the initial state of RNN layers numerically by:
- __One__: calling `reset_states`
- With the keyword argument `states`.
- The value of
`states` should be a numpy array or
list of numpy arrays representing
the initial state of the RNN layer.
__Note on passing external constants to RNNs__
You can pass "external" constants to the cell using the `constants`
- __keyword__: argument of `RNN.__call__` (as well as `RNN.call`) method.
- __This__: requires that the `cell.call` method accepts the same keyword argument
`constants`. Such constants can be used to condition the cell
transformation on additional static inputs (not changing over time),
a.k.a. an attention mechanism.
__Examples__
```python
# First, let's define a RNN Cell, as a layer subclass.
class MinimalRNNCell(keras.layers.Layer):
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
super(MinimalRNNCell, self).__init__(**kwargs)
def build(self, input_shape):
self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = K.dot(inputs, self.kernel)
output = h + K.dot(prev_output, self.recurrent_kernel)
return output, [output]
# Let's use this cell in a RNN layer:
cell = MinimalRNNCell(32)
x = keras.Input((None, 5))
layer = RNN(cell)
y = layer(x)
# Here's how to use the cell to build a stacked RNN:
cells = [MinimalRNNCell(32), MinimalRNNCell(64)]
x = keras.Input((None, 5))
layer = RNN(cells)
y = layer(x)
```
'''}
def test_doc_lists():
docstring = autogen.process_docstring(test_doc1['doc'])
assert markdown(docstring) == markdown(test_doc1['result'])
dummy_docstring = """Multiplies 2 tensors (and/or variables) and returns a *tensor*.
When attempting to multiply a nD tensor
with a nD tensor, it reproduces the Theano behavior.
(e.g. `(2, 3) * (4, 3, 5) -> (2, 4, 5)`)
# Examples
```python
# Theano-like behavior example
>>> x = K.random_uniform_variable(shape=(2, 3), low=0, high=1)
>>> y = K.ones((4, 3, 5))
>>> xy = K.dot(x, y)
>>> K.int_shape(xy)
(2, 4, 5)
```
# Numpy implementation
```python
def dot(x, y):
return dot(x, y)
```
"""
def test_doc_multiple_sections_code():
""" Checks that we can have code blocks in multiple sections."""
generated = autogen.process_docstring(dummy_docstring)
assert '# Theano-like behavior example' in generated
assert 'def dot(x, y):' in generated
if __name__ == '__main__':
pytest.main([__file__])
| keras-contrib/tests/tooling/test_doc_auto_generation.py/0 | {
"file_path": "keras-contrib/tests/tooling/test_doc_auto_generation.py",
"repo_id": "keras-contrib",
"token_count": 5533
} | 21 |
# Keras Core is becoming Keras 3 and has moved to keras-team/keras
Multi-backend Keras has a new repo: [keras-team/keras](https://github.com/keras-team/keras).
Open any issues / PRs there. `keras-team/keras-core` is no longer in use.
**Keras Core** was the codename of the multi-backend Keras project throughout its initial development
(April 2023 - July 2023) and its public beta test (July 2023 - September 2023). Now, Keras Core
is gearing up to become Keras 3, to be released under the `keras` name. As such, we've moved development
back to [keras-team/keras](https://github.com/keras-team/keras).
Meanwhile, the legacy `tf.keras` codebase is now available at
[keras-team/tf-keras](https://github.com/keras-team/tf-keras).
If you were a contributor to Keras Core -- thank you! Your commits have been reapplied in
`keras-team/keras` under your own authorship, so that your contribution record is fully preserved.
Likewise, if you were a contributor to `keras-team/keras` before the swap, your contribution
history has been fully preserved: the new commits have been reapplied on top of the old git tree.
| keras-core/README.md/0 | {
"file_path": "keras-core/README.md",
"repo_id": "keras-core",
"token_count": 331
} | 22 |
import time
import keras_core
class BenchmarkMetricsCallback(keras_core.callbacks.Callback):
def __init__(self, start_batch=1, stop_batch=None):
self.start_batch = start_batch
self.stop_batch = stop_batch
# Store the throughput of each epoch.
self.state = {"throughput": []}
def on_train_batch_begin(self, batch, logs=None):
if batch == self.start_batch:
self.state["epoch_begin_time"] = time.time()
def on_train_batch_end(self, batch, logs=None):
if batch == self.stop_batch:
epoch_end_time = time.time()
throughput = (self.stop_batch - self.start_batch + 1) / (
epoch_end_time - self.state["epoch_begin_time"]
)
self.state["throughput"].append(throughput)
| keras-core/benchmarks/model_benchmark/benchmark_utils.py/0 | {
"file_path": "keras-core/benchmarks/model_benchmark/benchmark_utils.py",
"repo_id": "keras-core",
"token_count": 348
} | 23 |
import numpy as np
import keras_core
from keras_core import layers
from keras_core.utils import to_categorical
# Model / data parameters
num_classes = 10
input_shape = (28, 28, 1)
# Load the data and split it between train and test sets
(x_train, y_train), (x_test, y_test) = keras_core.datasets.mnist.load_data()
# Scale images to the [0, 1] range
x_train = x_train.astype("float32") / 255
x_test = x_test.astype("float32") / 255
# Make sure images have shape (28, 28, 1)
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
print("x_train shape:", x_train.shape)
print(x_train.shape[0], "train samples")
print(x_test.shape[0], "test samples")
# convert class vectors to binary class matrices
y_train = to_categorical(y_train, num_classes)
y_test = to_categorical(y_test, num_classes)
batch_size = 128
epochs = 3
model = keras_core.Sequential(
[
layers.Input(shape=input_shape),
layers.Conv2D(32, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Flatten(),
layers.Dropout(0.5),
layers.Dense(num_classes, activation="softmax"),
]
)
model.summary()
model.compile(
loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]
)
model.fit(
x_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.1
)
score = model.evaluate(x_test, y_test, verbose=0)
print("Test loss:", score[0])
print("Test accuracy:", score[1])
| keras-core/examples/demo_mnist_convnet.py/0 | {
"file_path": "keras-core/examples/demo_mnist_convnet.py",
"repo_id": "keras-core",
"token_count": 645
} | 24 |
"""
Title: English speaker accent recognition using Transfer Learning
Author: [Fadi Badine](https://twitter.com/fadibadine)
Converted to Keras Core by: [Fadi Badine](https://twitter.com/fadibadine)
Date created: 2022/04/16
Last modified: 2023/07/19
Description: Training a model to classify UK & Ireland accents using feature extraction from Yamnet.
Accelerator: GPU
"""
"""
## Introduction
The following example shows how to use feature extraction in order to
train a model to classify the English accent spoken in an audio wave.
Instead of training a model from scratch, transfer learning enables us to
take advantage of existing state-of-the-art deep learning models and use them as feature extractors.
Our process:
* Use a TF Hub pre-trained model (Yamnet) and apply it as part of the tf.data pipeline which transforms
the audio files into feature vectors.
* Train a dense model on the feature vectors.
* Use the trained model for inference on a new audio file.
Note:
* We need to install TensorFlow IO in order to resample audio files to 16 kHz as required by Yamnet model.
* In the test section, ffmpeg is used to convert the mp3 file to wav.
You can install TensorFlow IO with the following command:
"""
"""shell
pip install -U -q tensorflow_io
"""
"""
## Configuration
"""
SEED = 1337
EPOCHS = 1
BATCH_SIZE = 64
VALIDATION_RATIO = 0.1
MODEL_NAME = "uk_irish_accent_recognition"
# Location where the dataset will be downloaded.
# By default (None), keras.utils.get_file will use ~/.keras/ as the CACHE_DIR
CACHE_DIR = None
# The location of the dataset
URL_PATH = "https://www.openslr.org/resources/83/"
# List of datasets compressed files that contain the audio files
zip_files = {
0: "irish_english_male.zip",
1: "midlands_english_female.zip",
2: "midlands_english_male.zip",
3: "northern_english_female.zip",
4: "northern_english_male.zip",
5: "scottish_english_female.zip",
6: "scottish_english_male.zip",
7: "southern_english_female.zip",
8: "southern_english_male.zip",
9: "welsh_english_female.zip",
10: "welsh_english_male.zip",
}
# We see that there are 2 compressed files for each accent (except Irish):
# - One for male speakers
# - One for female speakers
# However, we will be using a gender agnostic dataset.
# List of gender agnostic categories
gender_agnostic_categories = [
"ir", # Irish
"mi", # Midlands
"no", # Northern
"sc", # Scottish
"so", # Southern
"we", # Welsh
]
class_names = [
"Irish",
"Midlands",
"Northern",
"Scottish",
"Southern",
"Welsh",
"Not a speech",
]
"""
## Imports
"""
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import io
import csv
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_io as tfio
import keras_core as keras
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
from IPython.display import Audio
# Set all random seeds in order to get reproducible results
keras.utils.set_random_seed(SEED)
# Where to download the dataset
DATASET_DESTINATION = os.path.join(
CACHE_DIR if CACHE_DIR else "~/.keras/", "datasets"
)
"""
## Yamnet Model
Yamnet is an audio event classifier trained on the AudioSet dataset to predict audio
events from the AudioSet ontology. It is available on TensorFlow Hub.
Yamnet accepts a 1-D tensor of audio samples with a sample rate of 16 kHz.
As output, the model returns a 3-tuple:
* Scores of shape `(N, 521)` representing the scores of the 521 classes.
* Embeddings of shape `(N, 1024)`.
* The log-mel spectrogram of the entire audio frame.
We will use the embeddings, which are the features extracted from the audio samples, as the input to our dense model.
For more detailed information about Yamnet, please refer to its [TensorFlow Hub](https://tfhub.dev/google/yamnet/1) page.
"""
yamnet_model = hub.load("https://tfhub.dev/google/yamnet/1")
"""
## Dataset
The dataset used is the
[Crowdsourced high-quality UK and Ireland English Dialect speech data set](https://openslr.org/83/)
which consists of a total of 17,877 high-quality audio wav files.
This dataset includes over 31 hours of recording from 120 volunteers who self-identify as
native speakers of Southern England, Midlands, Northern England, Wales, Scotland and Ireland.
For more info, please refer to the above link or to the following paper:
[Open-source Multi-speaker Corpora of the English Accents in the British Isles](https://aclanthology.org/2020.lrec-1.804.pdf)
"""
"""
## Download the data
"""
# CSV file that contains information about the dataset. For each entry, we have:
# - ID
# - wav file name
# - transcript
line_index_file = keras.utils.get_file(
fname="line_index_file", origin=URL_PATH + "line_index_all.csv"
)
# Download the list of compressed files that contain the audio wav files
for i in zip_files:
fname = zip_files[i].split(".")[0]
url = URL_PATH + zip_files[i]
zip_file = keras.utils.get_file(fname=fname, origin=url, extract=True)
os.remove(zip_file)
"""
## Load the data in a Dataframe
Of the 3 columns (ID, filename and transcript), we are only interested in the filename column in order to read the audio file.
We will ignore the other two.
"""
dataframe = pd.read_csv(
line_index_file,
names=["id", "filename", "transcript"],
usecols=["filename"],
)
dataframe.head()
"""
Let's now preprocess the dataset by:
* Adjusting the filename (removing a leading space & adding ".wav" extension to the
filename).
* Creating a label using the first 2 characters of the filename which indicate the
accent.
* Shuffling the samples.
"""
# The purpose of this function is to preprocess the dataframe by applying the following:
# - Cleaning the filename from a leading space
# - Generating a label column that is gender agnostic i.e.
# welsh english male and welsh english female for example are both labeled as
# welsh english
# - Add extension .wav to the filename
# - Shuffle samples
def preprocess_dataframe(dataframe):
# Remove leading space in filename column
dataframe["filename"] = dataframe.apply(
lambda row: row["filename"].strip(), axis=1
)
# Create gender agnostic labels based on the filename first 2 letters
dataframe["label"] = dataframe.apply(
lambda row: gender_agnostic_categories.index(row["filename"][:2]),
axis=1,
)
# Add the file path to the name
dataframe["filename"] = dataframe.apply(
lambda row: os.path.join(DATASET_DESTINATION, row["filename"] + ".wav"),
axis=1,
)
# Shuffle the samples
dataframe = dataframe.sample(frac=1, random_state=SEED).reset_index(
drop=True
)
return dataframe
dataframe = preprocess_dataframe(dataframe)
dataframe.head()
"""
## Prepare training & validation sets
Let's split the samples creating training and validation sets.
"""
split = int(len(dataframe) * (1 - VALIDATION_RATIO))
train_df = dataframe[:split]
valid_df = dataframe[split:]
print(
f"We have {train_df.shape[0]} training samples & {valid_df.shape[0]} validation ones"
)
"""
## Prepare a TensorFlow Dataset
Next, we need to create a `tf.data.Dataset`.
This is done by creating a `dataframe_to_dataset` function that does the following:
* Create a dataset using filenames and labels.
* Get the Yamnet embeddings by calling another function `filepath_to_embeddings`.
* Apply caching, reshuffling and setting batch size.
The `filepath_to_embeddings` does the following:
* Load audio file.
* Resample audio to 16 kHz.
* Generate scores and embeddings from Yamnet model.
* Since Yamnet generates multiple samples for each audio file,
this function also duplicates the label for all the generated samples
that have `score=0` (speech) whereas sets the label for the others as
'other' indicating that this audio segment is not a speech and we won't label it as one of the accents.
The below `load_16k_audio_file` is copied from the following tutorial
[Transfer learning with YAMNet for environmental sound classification](https://www.tensorflow.org/tutorials/audio/transfer_learning_audio)
"""
@tf.function
def load_16k_audio_wav(filename):
# Read file content
file_content = tf.io.read_file(filename)
# Decode audio wave
audio_wav, sample_rate = tf.audio.decode_wav(
file_content, desired_channels=1
)
audio_wav = tf.squeeze(audio_wav, axis=-1)
sample_rate = tf.cast(sample_rate, dtype=tf.int64)
# Resample to 16k
audio_wav = tfio.audio.resample(
audio_wav, rate_in=sample_rate, rate_out=16000
)
return audio_wav
def filepath_to_embeddings(filename, label):
# Load 16k audio wave
audio_wav = load_16k_audio_wav(filename)
# Get audio embeddings & scores.
# The embeddings are the audio features extracted using transfer learning
# while scores will be used to identify time slots that are not speech
# which will then be gathered into a specific new category 'other'
scores, embeddings, _ = yamnet_model(audio_wav)
# Number of embeddings in order to know how many times to repeat the label
embeddings_num = tf.shape(embeddings)[0]
labels = tf.repeat(label, embeddings_num)
# Change labels for time-slots that are not speech into a new category 'other'
labels = tf.where(
tf.argmax(scores, axis=1) == 0, label, len(class_names) - 1
)
# Using one-hot in order to use AUC
return (embeddings, tf.one_hot(labels, len(class_names)))
def dataframe_to_dataset(dataframe, batch_size=64):
dataset = tf.data.Dataset.from_tensor_slices(
(dataframe["filename"], dataframe["label"])
)
dataset = dataset.map(
lambda x, y: filepath_to_embeddings(x, y),
num_parallel_calls=tf.data.experimental.AUTOTUNE,
).unbatch()
return dataset.cache().batch(batch_size).prefetch(tf.data.AUTOTUNE)
train_ds = dataframe_to_dataset(train_df)
valid_ds = dataframe_to_dataset(valid_df)
"""
## Build the model
The model that we use consists of:
* An input layer which is the embedding output of the Yamnet classifier.
* 4 dense hidden layers and 4 dropout layers.
* An output dense layer.
The model's hyperparameters were selected using
[KerasTuner](https://keras.io/keras_tuner/).
"""
keras.backend.clear_session()
def build_and_compile_model():
inputs = keras.layers.Input(shape=(1024,), name="embedding")
x = keras.layers.Dense(256, activation="relu", name="dense_1")(inputs)
x = keras.layers.Dropout(0.15, name="dropout_1")(x)
x = keras.layers.Dense(384, activation="relu", name="dense_2")(x)
x = keras.layers.Dropout(0.2, name="dropout_2")(x)
x = keras.layers.Dense(192, activation="relu", name="dense_3")(x)
x = keras.layers.Dropout(0.25, name="dropout_3")(x)
x = keras.layers.Dense(384, activation="relu", name="dense_4")(x)
x = keras.layers.Dropout(0.2, name="dropout_4")(x)
outputs = keras.layers.Dense(
len(class_names), activation="softmax", name="ouput"
)(x)
model = keras.Model(
inputs=inputs, outputs=outputs, name="accent_recognition"
)
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=1.9644e-5),
loss=keras.losses.CategoricalCrossentropy(),
metrics=["accuracy", keras.metrics.AUC(name="auc")],
)
return model
model = build_and_compile_model()
model.summary()
"""
## Class weights calculation
Since the dataset is quite unbalanced, we wil use `class_weight` argument during training.
Getting the class weights is a little tricky because even though we know the number of
audio files for each class, it does not represent the number of samples for that class
since Yamnet transforms each audio file into multiple audio samples of 0.96 seconds each.
So every audio file will be split into a number of samples that is proportional to its length.
Therefore, to get those weights, we have to calculate the number of samples for each class
after preprocessing through Yamnet.
"""
class_counts = tf.zeros(shape=(len(class_names),), dtype=tf.int32)
for x, y in iter(train_ds):
class_counts = class_counts + tf.math.bincount(
tf.cast(tf.math.argmax(y, axis=1), tf.int32), minlength=len(class_names)
)
class_weight = {
i: tf.math.reduce_sum(class_counts).numpy() / class_counts[i].numpy()
for i in range(len(class_counts))
}
print(class_weight)
"""
## Callbacks
We use Keras callbacks in order to:
* Stop whenever the validation AUC stops improving.
* Save the best model.
* Call TensorBoard in order to later view the training and validation logs.
"""
early_stopping_cb = keras.callbacks.EarlyStopping(
monitor="val_auc", patience=10, restore_best_weights=True
)
model_checkpoint_cb = keras.callbacks.ModelCheckpoint(
MODEL_NAME + ".keras", monitor="val_auc", save_best_only=True
)
tensorboard_cb = keras.callbacks.TensorBoard(
os.path.join(os.curdir, "logs", model.name)
)
callbacks = [early_stopping_cb, model_checkpoint_cb, tensorboard_cb]
"""
## Training
"""
history = model.fit(
train_ds,
epochs=EPOCHS,
validation_data=valid_ds,
class_weight=class_weight,
callbacks=callbacks,
verbose=2,
)
"""
## Results
Let's plot the training and validation AUC and accuracy.
"""
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(14, 5))
axs[0].plot(range(EPOCHS), history.history["accuracy"], label="Training")
axs[0].plot(range(EPOCHS), history.history["val_accuracy"], label="Validation")
axs[0].set_xlabel("Epochs")
axs[0].set_title("Training & Validation Accuracy")
axs[0].legend()
axs[0].grid(True)
axs[1].plot(range(EPOCHS), history.history["auc"], label="Training")
axs[1].plot(range(EPOCHS), history.history["val_auc"], label="Validation")
axs[1].set_xlabel("Epochs")
axs[1].set_title("Training & Validation AUC")
axs[1].legend()
axs[1].grid(True)
plt.show()
"""
## Evaluation
"""
train_loss, train_acc, train_auc = model.evaluate(train_ds)
valid_loss, valid_acc, valid_auc = model.evaluate(valid_ds)
"""
Let's try to compare our model's performance to Yamnet's using one of Yamnet metrics (d-prime)
Yamnet achieved a d-prime value of 2.318.
Let's check our model's performance.
"""
# The following function calculates the d-prime score from the AUC
def d_prime(auc):
standard_normal = stats.norm()
d_prime = standard_normal.ppf(auc) * np.sqrt(2.0)
return d_prime
print(
"train d-prime: {0:.3f}, validation d-prime: {1:.3f}".format(
d_prime(train_auc), d_prime(valid_auc)
)
)
"""
We can see that the model achieves the following results:
Results | Training | Validation
-----------|-----------|------------
Accuracy | 54% | 51%
AUC | 0.91 | 0.89
d-prime | 1.882 | 1.740
"""
"""
## Confusion Matrix
Let's now plot the confusion matrix for the validation dataset.
The confusion matrix lets us see, for every class, not only how many samples were correctly classified,
but also which other classes were the samples confused with.
It allows us to calculate the precision and recall for every class.
"""
# Create x and y tensors
x_valid = None
y_valid = None
for x, y in iter(valid_ds):
if x_valid is None:
x_valid = x.numpy()
y_valid = y.numpy()
else:
x_valid = np.concatenate((x_valid, x.numpy()), axis=0)
y_valid = np.concatenate((y_valid, y.numpy()), axis=0)
# Generate predictions
y_pred = model.predict(x_valid)
# Calculate confusion matrix
confusion_mtx = tf.math.confusion_matrix(
np.argmax(y_valid, axis=1), np.argmax(y_pred, axis=1)
)
# Plot the confusion matrix
plt.figure(figsize=(10, 8))
sns.heatmap(
confusion_mtx,
xticklabels=class_names,
yticklabels=class_names,
annot=True,
fmt="g",
)
plt.xlabel("Prediction")
plt.ylabel("Label")
plt.title("Validation Confusion Matrix")
plt.show()
"""
## Precision & recall
For every class:
* Recall is the ratio of correctly classified samples i.e. it shows how many samples
of this specific class, the model is able to detect.
It is the ratio of diagonal elements to the sum of all elements in the row.
* Precision shows the accuracy of the classifier. It is the ratio of correctly predicted
samples among the ones classified as belonging to this class.
It is the ratio of diagonal elements to the sum of all elements in the column.
"""
for i, label in enumerate(class_names):
precision = confusion_mtx[i, i] / np.sum(confusion_mtx[:, i])
recall = confusion_mtx[i, i] / np.sum(confusion_mtx[i, :])
print(
"{0:15} Precision:{1:.2f}%; Recall:{2:.2f}%".format(
label, precision * 100, recall * 100
)
)
"""
## Run inference on test data
Let's now run a test on a single audio file.
Let's check this example from [The Scottish Voice](https://www.thescottishvoice.org.uk/home/)
We will:
* Download the mp3 file.
* Convert it to a 16k wav file.
* Run the model on the wav file.
* Plot the results.
"""
filename = "audio-sample-Stuart"
url = "https://www.thescottishvoice.org.uk/files/cm/files/"
if os.path.exists(filename + ".wav") == False:
print(f"Downloading {filename}.mp3 from {url}")
command = f"wget {url}{filename}.mp3"
os.system(command)
print(f"Converting mp3 to wav and resampling to 16 kHZ")
command = (
f"ffmpeg -hide_banner -loglevel panic -y -i {filename}.mp3 -acodec "
f"pcm_s16le -ac 1 -ar 16000 {filename}.wav"
)
os.system(command)
filename = filename + ".wav"
"""
The below function `yamnet_class_names_from_csv` was copied and very slightly changed
from this [Yamnet Notebook](https://colab.research.google.com/github/tensorflow/hub/blob/master/examples/colab/yamnet.ipynb).
"""
def yamnet_class_names_from_csv(yamnet_class_map_csv_text):
"""Returns list of class names corresponding to score vector."""
yamnet_class_map_csv = io.StringIO(yamnet_class_map_csv_text)
yamnet_class_names = [
name for (class_index, mid, name) in csv.reader(yamnet_class_map_csv)
]
yamnet_class_names = yamnet_class_names[1:] # Skip CSV header
return yamnet_class_names
yamnet_class_map_path = yamnet_model.class_map_path().numpy()
yamnet_class_names = yamnet_class_names_from_csv(
tf.io.read_file(yamnet_class_map_path).numpy().decode("utf-8")
)
def calculate_number_of_non_speech(scores):
number_of_non_speech = tf.math.reduce_sum(
tf.where(
tf.math.argmax(scores, axis=1, output_type=tf.int32) != 0, 1, 0
)
)
return number_of_non_speech
def filename_to_predictions(filename):
# Load 16k audio wave
audio_wav = load_16k_audio_wav(filename)
# Get audio embeddings & scores.
scores, embeddings, mel_spectrogram = yamnet_model(audio_wav)
print(
"Out of {} samples, {} are not speech".format(
scores.shape[0], calculate_number_of_non_speech(scores)
)
)
# Predict the output of the accent recognition model with embeddings as input
predictions = model.predict(embeddings)
return audio_wav, predictions, mel_spectrogram
"""
Let's run the model on the audio file:
"""
audio_wav, predictions, mel_spectrogram = filename_to_predictions(filename)
infered_class = class_names[predictions.mean(axis=0).argmax()]
print(f"The main accent is: {infered_class} English")
"""
Listen to the audio
"""
Audio(audio_wav, rate=16000)
"""
The below function was copied from this [Yamnet notebook](tinyurl.com/4a8xn7at) and adjusted to our need.
This function plots the following:
* Audio waveform
* Mel spectrogram
* Predictions for every time step
"""
plt.figure(figsize=(10, 6))
# Plot the waveform.
plt.subplot(3, 1, 1)
plt.plot(audio_wav)
plt.xlim([0, len(audio_wav)])
# Plot the log-mel spectrogram (returned by the model).
plt.subplot(3, 1, 2)
plt.imshow(
mel_spectrogram.numpy().T,
aspect="auto",
interpolation="nearest",
origin="lower",
)
# Plot and label the model output scores for the top-scoring classes.
mean_predictions = np.mean(predictions, axis=0)
top_class_indices = np.argsort(mean_predictions)[::-1]
plt.subplot(3, 1, 3)
plt.imshow(
predictions[:, top_class_indices].T,
aspect="auto",
interpolation="nearest",
cmap="gray_r",
)
# patch_padding = (PATCH_WINDOW_SECONDS / 2) / PATCH_HOP_SECONDS
# values from the model documentation
patch_padding = (0.025 / 2) / 0.01
plt.xlim([-patch_padding - 0.5, predictions.shape[0] + patch_padding - 0.5])
# Label the top_N classes.
yticks = range(0, len(class_names), 1)
plt.yticks(yticks, [class_names[top_class_indices[x]] for x in yticks])
_ = plt.ylim(-0.5 + np.array([len(class_names), 0]))
| keras-core/examples/keras_io/tensorflow/audio/uk_ireland_accent_recognition.py/0 | {
"file_path": "keras-core/examples/keras_io/tensorflow/audio/uk_ireland_accent_recognition.py",
"repo_id": "keras-core",
"token_count": 7353
} | 25 |
"""
Title: End-to-end Masked Language Modeling with BERT
Author: [Ankur Singh](https://twitter.com/ankur310794)
Converted to Keras-Core: [Mrutyunjay Biswal](https://twitter.com/LearnStochastic)
Date created: 2020/09/18
Last modified: 2023/09/06
Description: Implement a Masked Language Model (MLM) with BERT and fine-tune it on the IMDB Reviews dataset.
Accelerator: GPU
"""
"""
## Introduction
Masked Language Modeling is a fill-in-the-blank task,
where a model uses the context words surrounding a mask token to try to predict what the
masked word should be.
For an input that contains one or more mask tokens,
the model will generate the most likely substitution for each.
Example:
- Input: "I have watched this [MASK] and it was awesome."
- Output: "I have watched this movie and it was awesome."
Masked language modeling is a great way to train a language
model in a self-supervised setting (without human-annotated labels).
Such a model can then be fine-tuned to accomplish various supervised
NLP tasks.
This example teaches you how to build a BERT model from scratch,
train it with the masked language modeling task,
and then fine-tune this model on a sentiment classification task.
We will use the Keras-Core `TextVectorization` and `MultiHeadAttention` layers
to create a BERT Transformer-Encoder network architecture.
Note: This is only tensorflow backend compatible.
"""
"""
## Setup
"""
import os
import re
import glob
import numpy as np
import pandas as pd
from pathlib import Path
from dataclasses import dataclass
import tensorflow as tf
import keras_core as keras
from keras_core import layers
"""
## Configuration
"""
@dataclass
class Config:
MAX_LEN = 256
BATCH_SIZE = 32
LR = 0.001
VOCAB_SIZE = 30000
EMBED_DIM = 128
NUM_HEAD = 8 # used in bert model
FF_DIM = 128 # used in bert model
NUM_LAYERS = 1
NUM_EPOCHS = 1
STEPS_PER_EPOCH = 2
config = Config()
"""
## Download the Data: IMDB Movie Review Sentiment Classification
Download the IMDB data and load into a Pandas DataFrame.
"""
fpath = keras.utils.get_file(
origin="https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz"
)
dirpath = Path(fpath).parent.absolute()
os.system(f"tar -xf {fpath} -C {dirpath}")
"""
The `aclImdb` folder contains a `train` and `test` subfolder:
"""
os.system(f"ls {dirpath}/aclImdb")
os.system(f"ls {dirpath}/aclImdb/train")
os.system(f"ls {dirpath}/aclImdb/test")
"""
We are only interested in the `pos` and `neg` subfolders, so let's delete the rest:
"""
os.system(f"rm -r {dirpath}/aclImdb/train/unsup")
os.system(f"rm -r {dirpath}/aclImdb/train/*.feat")
os.system(f"rm -r {dirpath}/aclImdb/train/*.txt")
os.system(f"rm -r {dirpath}/aclImdb/test/*.feat")
os.system(f"rm -r {dirpath}/aclImdb/test/*.txt")
"""
Let's read the dataset from the text files to a DataFrame.
"""
def get_text_list_from_files(files):
text_list = []
for name in files:
with open(name) as f:
for line in f:
text_list.append(line)
return text_list
def get_data_from_text_files(folder_name):
pos_files = glob.glob(f"{dirpath}/aclImdb/" + folder_name + "/pos/*.txt")
pos_texts = get_text_list_from_files(pos_files)
neg_files = glob.glob(f"{dirpath}/aclImdb/" + folder_name + "/neg/*.txt")
neg_texts = get_text_list_from_files(neg_files)
df = pd.DataFrame(
{
"review": pos_texts + neg_texts,
"sentiment": [0] * len(pos_texts) + [1] * len(neg_texts),
}
)
df = df.sample(len(df)).reset_index(drop=True)
return df
train_df = get_data_from_text_files("train")
test_df = get_data_from_text_files("test")
all_data = pd.concat([train_df, test_df], axis=0).reset_index(drop=True)
assert len(all_data) != 0, f'{all_data} is empty'
"""
## Dataset preparation
We will use the `TextVectorization` layer to vectorize the text into integer token ids.
It transforms a batch of strings into either
a sequence of token indices (one sample = 1D array of integer token indices, in order)
or a dense representation (one sample = 1D array of float values encoding an unordered set of tokens).
Below, we define 3 preprocessing functions.
1. The `get_vectorize_layer` function builds the `TextVectorization` layer.
2. The `encode` function encodes raw text into integer token ids.
3. The `get_masked_input_and_labels` function will mask input token ids. It masks 15% of all input tokens in each sequence at random.
"""
def custom_standardization(input_data):
lowercase = tf.strings.lower(input_data)
stripped_html = tf.strings.regex_replace(lowercase, "<br />", " ")
return tf.strings.regex_replace(
stripped_html, "[%s]" % re.escape("!#$%&'()*+,-./:;<=>?@\^_`{|}~"), ""
)
def get_vectorize_layer(texts, vocab_size, max_seq, special_tokens=["[MASK]"]):
"""Build Text vectorization layer
Args:
texts (list): List of string i.e input texts
vocab_size (int): vocab size
max_seq (int): Maximum sequence lenght.
special_tokens (list, optional): List of special tokens. Defaults to `['[MASK]']`.
Returns:
layers.Layer: Return TextVectorization Keras Layer
"""
vectorize_layer = layers.TextVectorization(
max_tokens=vocab_size,
output_mode="int",
standardize=custom_standardization,
output_sequence_length=max_seq,
)
vectorize_layer.adapt(texts)
# Insert mask token in vocabulary
vocab = vectorize_layer.get_vocabulary()
vocab = vocab[2 : vocab_size - len(special_tokens)] + ["[mask]"]
vectorize_layer.set_vocabulary(vocab)
return vectorize_layer
vectorize_layer = get_vectorize_layer(
all_data.review.values.tolist(),
config.VOCAB_SIZE,
config.MAX_LEN,
special_tokens=["[mask]"],
)
# Get mask token id for masked language model
mask_token_id = vectorize_layer(["[mask]"]).numpy()[0][0]
def encode(texts):
encoded_texts = vectorize_layer(texts)
return encoded_texts.numpy()
def get_masked_input_and_labels(encoded_texts):
# 15% BERT masking
inp_mask = np.random.rand(*encoded_texts.shape) < 0.15
# Do not mask special tokens
inp_mask[encoded_texts <= 2] = False
# Set targets to -1 by default, it means ignore
labels = -1 * np.ones(encoded_texts.shape, dtype=int)
# Set labels for masked tokens
labels[inp_mask] = encoded_texts[inp_mask]
# Prepare input
encoded_texts_masked = np.copy(encoded_texts)
# Set input to [MASK] which is the last token for the 90% of tokens
# This means leaving 10% unchanged
inp_mask_2mask = inp_mask & (np.random.rand(*encoded_texts.shape) < 0.90)
encoded_texts_masked[
inp_mask_2mask
] = mask_token_id # mask token is the last in the dict
# Set 10% to a random token
inp_mask_2random = inp_mask_2mask & (np.random.rand(*encoded_texts.shape) < 1 / 9)
encoded_texts_masked[inp_mask_2random] = np.random.randint(
3, mask_token_id, inp_mask_2random.sum()
)
# Prepare sample_weights to pass to .fit() method
sample_weights = np.ones(labels.shape)
sample_weights[labels == -1] = 0
# y_labels would be same as encoded_texts i.e input tokens
y_labels = np.copy(encoded_texts)
return encoded_texts_masked, y_labels, sample_weights
# We have 25000 examples for training
x_train = encode(train_df.review.values) # encode reviews with vectorizer
y_train = train_df.sentiment.values
train_classifier_ds = (
tf.data.Dataset.from_tensor_slices((x_train, y_train))
.shuffle(1000)
.batch(config.BATCH_SIZE)
)
# We have 25000 examples for testing
x_test = encode(test_df.review.values)
y_test = test_df.sentiment.values
test_classifier_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(
config.BATCH_SIZE
)
# Build dataset for end to end model input (will be used at the end)
test_raw_classifier_ds = tf.data.Dataset.from_tensor_slices(
(test_df.review.values, y_test)
).batch(config.BATCH_SIZE)
# Prepare data for masked language model
x_all_review = encode(all_data.review.values)
x_masked_train, y_masked_labels, sample_weights = get_masked_input_and_labels(
x_all_review
)
mlm_ds = tf.data.Dataset.from_tensor_slices(
(x_masked_train, y_masked_labels, sample_weights)
)
mlm_ds = mlm_ds.shuffle(1000).batch(config.BATCH_SIZE)
id2token = dict(enumerate(vectorize_layer.get_vocabulary()))
token2id = {y: x for x, y in id2token.items()}
class MaskedTextGenerator(keras.callbacks.Callback):
def __init__(self, sample_tokens, top_k=5):
self.sample_tokens = sample_tokens
self.k = top_k
def decode(self, tokens):
return " ".join([id2token[t] for t in tokens if t != 0])
def convert_ids_to_tokens(self, id):
return id2token[id]
def on_epoch_end(self, epoch, logs=None):
prediction = self.model.predict(self.sample_tokens)
masked_index = np.where(self.sample_tokens == mask_token_id)
masked_index = masked_index[1]
mask_prediction = prediction[0][masked_index]
top_indices = mask_prediction[0].argsort()[-self.k :][::-1]
values = mask_prediction[0][top_indices]
for i in range(len(top_indices)):
p = top_indices[i]
v = values[i]
tokens = np.copy(self.sample_tokens[0])
tokens[masked_index[0]] = p
result = {
"input_text": self.decode(self.sample_tokens[0]),
"prediction": self.decode(tokens),
"probability": v,
"predicted mask token": self.convert_ids_to_tokens(p),
}
sample_tokens = vectorize_layer(["I have watched this [mask] and it was awesome"])
generator_callback = MaskedTextGenerator(sample_tokens.numpy())
"""
## Create BERT model (Pretraining Model) for masked language modeling
We will create a BERT-like pretraining model architecture
using the `MultiHeadAttention` layer.
It will take token ids as inputs (including masked tokens)
and it will predict the correct ids for the masked input tokens.
"""
def bert_module(query, key, value, layer_num):
# Multi headed self-attention
attention_output = layers.MultiHeadAttention(
num_heads=config.NUM_HEAD,
key_dim=config.EMBED_DIM // config.NUM_HEAD,
name=f"encoder_{layer_num}_multiheadattention",
)(query, key, value)
attention_output = layers.Dropout(0.1, name=f"encoder_{layer_num}_att_dropout")(
attention_output
)
attention_output = layers.LayerNormalization(
epsilon=1e-6, name=f"encoder_{layer_num}_att_layernormalization"
)(query + attention_output)
# Feed-forward layer
ffn = keras.Sequential(
[
layers.Dense(config.FF_DIM, activation="relu"),
layers.Dense(config.EMBED_DIM),
],
name=f"encoder_{layer_num}_ffn",
)
ffn_output = ffn(attention_output)
ffn_output = layers.Dropout(0.1, name=f"encoder_{layer_num}_ffn_dropout")(
ffn_output
)
sequence_output = layers.LayerNormalization(
epsilon=1e-6, name=f"encoder_{layer_num}_ffn_layernormalization"
)(attention_output + ffn_output)
return sequence_output
def get_pos_encoding_matrix(max_len, d_emb):
pos_enc = np.array(
[
[pos / np.power(10000, 2 * (j // 2) / d_emb) for j in range(d_emb)]
if pos != 0
else np.zeros(d_emb)
for pos in range(max_len)
]
)
pos_enc[1:, 0::2] = np.sin(pos_enc[1:, 0::2]) # dim 2i
pos_enc[1:, 1::2] = np.cos(pos_enc[1:, 1::2]) # dim 2i+1
return pos_enc
loss_fn = keras.losses.SparseCategoricalCrossentropy(
reduction=None
)
loss_tracker = keras.metrics.Mean(name="loss")
class MaskedLanguageModel(keras.Model):
def train_step(self, inputs):
if len(inputs) == 3:
features, labels, sample_weight = inputs
else:
features, labels = inputs
sample_weight = None
with tf.GradientTape() as tape:
predictions = self(features, training=True)
loss = loss_fn(labels, predictions, sample_weight=sample_weight)
# Compute gradients
trainable_vars = self.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# Compute our own metrics
loss_tracker.update_state(loss, sample_weight=sample_weight)
# Return a dict mapping metric names to current value
return {"loss": loss_tracker.result()}
@property
def metrics(self):
# We list our `Metric` objects here so that `reset_states()` can be
# called automatically at the start of each epoch
# or at the start of `evaluate()`.
# If you don't implement this property, you have to call
# `reset_states()` yourself at the time of your choosing.
return [loss_tracker]
def create_masked_language_bert_model():
inputs = layers.Input((config.MAX_LEN,), dtype=tf.int64)
word_embeddings = layers.Embedding(
config.VOCAB_SIZE, config.EMBED_DIM, name="word_embedding"
)(inputs)
position_embeddings = layers.Embedding(
input_dim=config.MAX_LEN,
output_dim=config.EMBED_DIM,
embeddings_initializer=keras.initializers.Constant(get_pos_encoding_matrix(config.MAX_LEN, config.EMBED_DIM)),
name="position_embedding",
)(tf.range(start=0, limit=config.MAX_LEN, delta=1))
embeddings = word_embeddings + position_embeddings
encoder_output = embeddings
for i in range(config.NUM_LAYERS):
encoder_output = bert_module(encoder_output, encoder_output, encoder_output, i)
mlm_output = layers.Dense(config.VOCAB_SIZE, name="mlm_cls", activation="softmax")(
encoder_output
)
mlm_model = MaskedLanguageModel(inputs, mlm_output, name="masked_bert_model")
optimizer = keras.optimizers.Adam(learning_rate=config.LR)
mlm_model.compile(optimizer=optimizer)
return mlm_model
bert_masked_model = create_masked_language_bert_model()
bert_masked_model.summary()
"""
## Train and Save
"""
bert_masked_model.fit(mlm_ds, epochs=Config.NUM_EPOCHS, steps_per_epoch=Config.STEPS_PER_EPOCH, callbacks=[generator_callback])
bert_masked_model.save("bert_mlm_imdb.keras")
"""
## Fine-tune a sentiment classification model
We will fine-tune our self-supervised model on a downstream task of sentiment classification.
To do this, let's create a classifier by adding a pooling layer and a `Dense` layer on top of the
pretrained BERT features.
"""
# Load pretrained bert model
mlm_model = keras.models.load_model(
"bert_mlm_imdb.keras", custom_objects={"MaskedLanguageModel": MaskedLanguageModel}
)
pretrained_bert_model = keras.Model(
mlm_model.input, mlm_model.get_layer("encoder_0_ffn_layernormalization").output
)
# Freeze it
pretrained_bert_model.trainable = False
def create_classifier_bert_model():
inputs = layers.Input((config.MAX_LEN,), dtype=tf.int64)
sequence_output = pretrained_bert_model(inputs)
pooled_output = layers.GlobalMaxPooling1D()(sequence_output)
hidden_layer = layers.Dense(64, activation="relu")(pooled_output)
outputs = layers.Dense(1, activation="sigmoid")(hidden_layer)
classifer_model = keras.Model(inputs, outputs, name="classification")
optimizer = keras.optimizers.Adam()
classifer_model.compile(
optimizer=optimizer, loss="binary_crossentropy", metrics=["accuracy"]
)
return classifer_model
classifer_model = create_classifier_bert_model()
classifer_model.summary()
# Train the classifier with frozen BERT stage
classifer_model.fit(
train_classifier_ds,
epochs=Config.NUM_EPOCHS,
steps_per_epoch=Config.STEPS_PER_EPOCH,
validation_data=test_classifier_ds,
)
# Unfreeze the BERT model for fine-tuning
pretrained_bert_model.trainable = True
optimizer = keras.optimizers.Adam()
classifer_model.compile(
optimizer=optimizer, loss="binary_crossentropy", metrics=["accuracy"]
)
classifer_model.fit(
train_classifier_ds,
epochs=Config.NUM_EPOCHS,
steps_per_epoch=Config.STEPS_PER_EPOCH,
validation_data=test_classifier_ds,
)
"""
## Create an end-to-end model and evaluate it
When you want to deploy a model, it's best if it already includes its preprocessing
pipeline, so that you don't have to reimplement the preprocessing logic in your
production environment. Let's create an end-to-end model that incorporates
the `TextVectorization` layer, and let's evaluate. Our model will accept raw strings
as input.
"""
def get_end_to_end(model):
inputs_string = keras.Input(shape=(1,), dtype="string")
indices = vectorize_layer(inputs_string)
outputs = model(indices)
end_to_end_model = keras.Model(inputs_string, outputs, name="end_to_end_model")
optimizer = keras.optimizers.Adam(learning_rate=config.LR)
end_to_end_model.compile(
optimizer=optimizer, loss="binary_crossentropy", metrics=["accuracy"]
)
return end_to_end_model
end_to_end_classification_model = get_end_to_end(classifer_model)
end_to_end_classification_model.evaluate(test_raw_classifier_ds) | keras-core/examples/keras_io/tensorflow/nlp/end_to_end_mlm_with_bert.py/0 | {
"file_path": "keras-core/examples/keras_io/tensorflow/nlp/end_to_end_mlm_with_bert.py",
"repo_id": "keras-core",
"token_count": 6779
} | 26 |
"""
Title: Zero-DCE for low-light image enhancement
Author: [Soumik Rakshit](http://github.com/soumik12345)
Converted to Keras Core by: [Soumik Rakshit](http://github.com/soumik12345)
Date created: 2021/09/18
Last modified: 2023/07/15
Description: Implementing Zero-Reference Deep Curve Estimation for low-light image enhancement.
Accelerator: GPU
"""
"""
## Introduction
**Zero-Reference Deep Curve Estimation** or **Zero-DCE** formulates low-light image
enhancement as the task of estimating an image-specific
[*tonal curve*](https://en.wikipedia.org/wiki/Curve_(tonality)) with a deep neural network.
In this example, we train a lightweight deep network, **DCE-Net**, to estimate
pixel-wise and high-order tonal curves for dynamic range adjustment of a given image.
Zero-DCE takes a low-light image as input and produces high-order tonal curves as its output.
These curves are then used for pixel-wise adjustment on the dynamic range of the input to
obtain an enhanced image. The curve estimation process is done in such a way that it maintains
the range of the enhanced image and preserves the contrast of neighboring pixels. This
curve estimation is inspired by curves adjustment used in photo editing software such as
Adobe Photoshop where users can adjust points throughout an image’s tonal range.
Zero-DCE is appealing because of its relaxed assumptions with regard to reference images:
it does not require any input/output image pairs during training.
This is achieved through a set of carefully formulated non-reference loss functions,
which implicitly measure the enhancement quality and guide the training of the network.
### References
- [Zero-Reference Deep Curve Estimation for Low-Light Image Enhancement](https://arxiv.org/pdf/2001.06826.pdf)
- [Curves adjustment in Adobe Photoshop](https://helpx.adobe.com/photoshop/using/curves-adjustment.html)
"""
"""
## Downloading LOLDataset
The **LoL Dataset** has been created for low-light image enhancement. It provides 485
images for training and 15 for testing. Each image pair in the dataset consists of a
low-light input image and its corresponding well-exposed reference image.
"""
import os
import random
import numpy as np
from glob import glob
from PIL import Image, ImageOps
import matplotlib.pyplot as plt
import keras_core as keras
from keras_core import layers
import tensorflow as tf
"""shell
wget https://huggingface.co/datasets/geekyrakshit/LoL-Dataset/resolve/main/lol_dataset.zip
unzip -q lol_dataset.zip && rm lol_dataset.zip
"""
"""
## Creating a TensorFlow Dataset
We use 300 low-light images from the LoL Dataset training set for training, and we use
the remaining 185 low-light images for validation. We resize the images to size `256 x
256` to be used for both training and validation. Note that in order to train the DCE-Net,
we will not require the corresponding enhanced images.
"""
IMAGE_SIZE = 256
BATCH_SIZE = 16
MAX_TRAIN_IMAGES = 400
def load_data(image_path):
image = tf.io.read_file(image_path)
image = tf.image.decode_png(image, channels=3)
image = tf.image.resize(images=image, size=[IMAGE_SIZE, IMAGE_SIZE])
image = image / 255.0
return image
def data_generator(low_light_images):
dataset = tf.data.Dataset.from_tensor_slices((low_light_images))
dataset = dataset.map(load_data, num_parallel_calls=tf.data.AUTOTUNE)
dataset = dataset.batch(BATCH_SIZE, drop_remainder=True)
return dataset
train_low_light_images = sorted(glob("./lol_dataset/our485/low/*"))[
:MAX_TRAIN_IMAGES
]
val_low_light_images = sorted(glob("./lol_dataset/our485/low/*"))[
MAX_TRAIN_IMAGES:
]
test_low_light_images = sorted(glob("./lol_dataset/eval15/low/*"))
train_dataset = data_generator(train_low_light_images)
val_dataset = data_generator(val_low_light_images)
print("Train Dataset:", train_dataset)
print("Validation Dataset:", val_dataset)
"""
## The Zero-DCE Framework
The goal of DCE-Net is to estimate a set of best-fitting light-enhancement curves
(LE-curves) given an input image. The framework then maps all pixels of the input’s RGB
channels by applying the curves iteratively to obtain the final enhanced image.
### Understanding light-enhancement curves
A ligh-enhancement curve is a kind of curve that can map a low-light image
to its enhanced version automatically,
where the self-adaptive curve parameters are solely dependent on the input image.
When designing such a curve, three objectives should be taken into account:
- Each pixel value of the enhanced image should be in the normalized range `[0,1]`, in order to
avoid information loss induced by overflow truncation.
- It should be monotonous, to preserve the contrast between neighboring pixels.
- The shape of this curve should be as simple as possible,
and the curve should be differentiable to allow backpropagation.
The light-enhancement curve is separately applied to three RGB channels instead of solely on the
illumination channel. The three-channel adjustment can better preserve the inherent color and reduce
the risk of over-saturation.
![](https://li-chongyi.github.io/Zero-DCE_files/framework.png)
### DCE-Net
The DCE-Net is a lightweight deep neural network that learns the mapping between an input
image and its best-fitting curve parameter maps. The input to the DCE-Net is a low-light
image while the outputs are a set of pixel-wise curve parameter maps for corresponding
higher-order curves. It is a plain CNN of seven convolutional layers with symmetrical
concatenation. Each layer consists of 32 convolutional kernels of size 3×3 and stride 1
followed by the ReLU activation function. The last convolutional layer is followed by the
Tanh activation function, which produces 24 parameter maps for 8 iterations, where each
iteration requires three curve parameter maps for the three channels.
![](https://i.imgur.com/HtIg34W.png)
"""
def build_dce_net():
input_img = keras.Input(shape=[None, None, 3])
conv1 = layers.Conv2D(
32, (3, 3), strides=(1, 1), activation="relu", padding="same"
)(input_img)
conv2 = layers.Conv2D(
32, (3, 3), strides=(1, 1), activation="relu", padding="same"
)(conv1)
conv3 = layers.Conv2D(
32, (3, 3), strides=(1, 1), activation="relu", padding="same"
)(conv2)
conv4 = layers.Conv2D(
32, (3, 3), strides=(1, 1), activation="relu", padding="same"
)(conv3)
int_con1 = layers.Concatenate(axis=-1)([conv4, conv3])
conv5 = layers.Conv2D(
32, (3, 3), strides=(1, 1), activation="relu", padding="same"
)(int_con1)
int_con2 = layers.Concatenate(axis=-1)([conv5, conv2])
conv6 = layers.Conv2D(
32, (3, 3), strides=(1, 1), activation="relu", padding="same"
)(int_con2)
int_con3 = layers.Concatenate(axis=-1)([conv6, conv1])
x_r = layers.Conv2D(
24, (3, 3), strides=(1, 1), activation="tanh", padding="same"
)(int_con3)
return keras.Model(inputs=input_img, outputs=x_r)
"""
## Loss functions
To enable zero-reference learning in DCE-Net, we use a set of differentiable
zero-reference losses that allow us to evaluate the quality of enhanced images.
"""
"""
### Color constancy loss
The *color constancy loss* is used to correct the potential color deviations in the
enhanced image.
"""
def color_constancy_loss(x):
mean_rgb = tf.reduce_mean(x, axis=(1, 2), keepdims=True)
mr, mg, mb = (
mean_rgb[:, :, :, 0],
mean_rgb[:, :, :, 1],
mean_rgb[:, :, :, 2],
)
d_rg = tf.square(mr - mg)
d_rb = tf.square(mr - mb)
d_gb = tf.square(mb - mg)
return tf.sqrt(tf.square(d_rg) + tf.square(d_rb) + tf.square(d_gb))
"""
### Exposure loss
To restrain under-/over-exposed regions, we use the *exposure control loss*.
It measures the distance between the average intensity value of a local region
and a preset well-exposedness level (set to `0.6`).
"""
def exposure_loss(x, mean_val=0.6):
x = tf.reduce_mean(x, axis=3, keepdims=True)
mean = tf.nn.avg_pool2d(x, ksize=16, strides=16, padding="VALID")
return tf.reduce_mean(tf.square(mean - mean_val))
"""
### Illumination smoothness loss
To preserve the monotonicity relations between neighboring pixels, the
*illumination smoothness loss* is added to each curve parameter map.
"""
def illumination_smoothness_loss(x):
batch_size = tf.shape(x)[0]
h_x = tf.shape(x)[1]
w_x = tf.shape(x)[2]
count_h = (tf.shape(x)[2] - 1) * tf.shape(x)[3]
count_w = tf.shape(x)[2] * (tf.shape(x)[3] - 1)
h_tv = tf.reduce_sum(tf.square((x[:, 1:, :, :] - x[:, : h_x - 1, :, :])))
w_tv = tf.reduce_sum(tf.square((x[:, :, 1:, :] - x[:, :, : w_x - 1, :])))
batch_size = tf.cast(batch_size, dtype=tf.float32)
count_h = tf.cast(count_h, dtype=tf.float32)
count_w = tf.cast(count_w, dtype=tf.float32)
return 2 * (h_tv / count_h + w_tv / count_w) / batch_size
"""
### Spatial consistency loss
The *spatial consistency loss* encourages spatial coherence of the enhanced image by
preserving the contrast between neighboring regions across the input image and its enhanced version.
"""
class SpatialConsistencyLoss(keras.losses.Loss):
def __init__(self, **kwargs):
super().__init__(reduction="none")
self.left_kernel = tf.constant(
[[[[0, 0, 0]], [[-1, 1, 0]], [[0, 0, 0]]]], dtype=tf.float32
)
self.right_kernel = tf.constant(
[[[[0, 0, 0]], [[0, 1, -1]], [[0, 0, 0]]]], dtype=tf.float32
)
self.up_kernel = tf.constant(
[[[[0, -1, 0]], [[0, 1, 0]], [[0, 0, 0]]]], dtype=tf.float32
)
self.down_kernel = tf.constant(
[[[[0, 0, 0]], [[0, 1, 0]], [[0, -1, 0]]]], dtype=tf.float32
)
def call(self, y_true, y_pred):
original_mean = tf.reduce_mean(y_true, 3, keepdims=True)
enhanced_mean = tf.reduce_mean(y_pred, 3, keepdims=True)
original_pool = tf.nn.avg_pool2d(
original_mean, ksize=4, strides=4, padding="VALID"
)
enhanced_pool = tf.nn.avg_pool2d(
enhanced_mean, ksize=4, strides=4, padding="VALID"
)
d_original_left = tf.nn.conv2d(
original_pool,
self.left_kernel,
strides=[1, 1, 1, 1],
padding="SAME",
)
d_original_right = tf.nn.conv2d(
original_pool,
self.right_kernel,
strides=[1, 1, 1, 1],
padding="SAME",
)
d_original_up = tf.nn.conv2d(
original_pool, self.up_kernel, strides=[1, 1, 1, 1], padding="SAME"
)
d_original_down = tf.nn.conv2d(
original_pool,
self.down_kernel,
strides=[1, 1, 1, 1],
padding="SAME",
)
d_enhanced_left = tf.nn.conv2d(
enhanced_pool,
self.left_kernel,
strides=[1, 1, 1, 1],
padding="SAME",
)
d_enhanced_right = tf.nn.conv2d(
enhanced_pool,
self.right_kernel,
strides=[1, 1, 1, 1],
padding="SAME",
)
d_enhanced_up = tf.nn.conv2d(
enhanced_pool, self.up_kernel, strides=[1, 1, 1, 1], padding="SAME"
)
d_enhanced_down = tf.nn.conv2d(
enhanced_pool,
self.down_kernel,
strides=[1, 1, 1, 1],
padding="SAME",
)
d_left = tf.square(d_original_left - d_enhanced_left)
d_right = tf.square(d_original_right - d_enhanced_right)
d_up = tf.square(d_original_up - d_enhanced_up)
d_down = tf.square(d_original_down - d_enhanced_down)
return d_left + d_right + d_up + d_down
"""
### Deep curve estimation model
We implement the Zero-DCE framework as a Keras subclassed model.
"""
class ZeroDCE(keras.Model):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.dce_model = build_dce_net()
def compile(self, learning_rate, **kwargs):
super().compile(**kwargs)
self.optimizer = keras.optimizers.Adam(learning_rate=learning_rate)
self.spatial_constancy_loss = SpatialConsistencyLoss(reduction="none")
self.total_loss_tracker = keras.metrics.Mean(name="total_loss")
self.illumination_smoothness_loss_tracker = keras.metrics.Mean(
name="illumination_smoothness_loss"
)
self.spatial_constancy_loss_tracker = keras.metrics.Mean(
name="spatial_constancy_loss"
)
self.color_constancy_loss_tracker = keras.metrics.Mean(
name="color_constancy_loss"
)
self.exposure_loss_tracker = keras.metrics.Mean(name="exposure_loss")
@property
def metrics(self):
return [
self.total_loss_tracker,
self.illumination_smoothness_loss_tracker,
self.spatial_constancy_loss_tracker,
self.color_constancy_loss_tracker,
self.exposure_loss_tracker,
]
def get_enhanced_image(self, data, output):
r1 = output[:, :, :, :3]
r2 = output[:, :, :, 3:6]
r3 = output[:, :, :, 6:9]
r4 = output[:, :, :, 9:12]
r5 = output[:, :, :, 12:15]
r6 = output[:, :, :, 15:18]
r7 = output[:, :, :, 18:21]
r8 = output[:, :, :, 21:24]
x = data + r1 * (tf.square(data) - data)
x = x + r2 * (tf.square(x) - x)
x = x + r3 * (tf.square(x) - x)
enhanced_image = x + r4 * (tf.square(x) - x)
x = enhanced_image + r5 * (tf.square(enhanced_image) - enhanced_image)
x = x + r6 * (tf.square(x) - x)
x = x + r7 * (tf.square(x) - x)
enhanced_image = x + r8 * (tf.square(x) - x)
return enhanced_image
def call(self, data):
dce_net_output = self.dce_model(data)
return self.get_enhanced_image(data, dce_net_output)
def compute_losses(self, data, output):
enhanced_image = self.get_enhanced_image(data, output)
loss_illumination = 200 * illumination_smoothness_loss(output)
loss_spatial_constancy = tf.reduce_mean(
self.spatial_constancy_loss(enhanced_image, data)
)
loss_color_constancy = 5 * tf.reduce_mean(
color_constancy_loss(enhanced_image)
)
loss_exposure = 10 * tf.reduce_mean(exposure_loss(enhanced_image))
total_loss = (
loss_illumination
+ loss_spatial_constancy
+ loss_color_constancy
+ loss_exposure
)
return {
"total_loss": total_loss,
"illumination_smoothness_loss": loss_illumination,
"spatial_constancy_loss": loss_spatial_constancy,
"color_constancy_loss": loss_color_constancy,
"exposure_loss": loss_exposure,
}
def train_step(self, data):
with tf.GradientTape() as tape:
output = self.dce_model(data)
losses = self.compute_losses(data, output)
gradients = tape.gradient(
losses["total_loss"], self.dce_model.trainable_weights
)
self.optimizer.apply_gradients(
zip(gradients, self.dce_model.trainable_weights)
)
self.total_loss_tracker.update_state(losses["total_loss"])
self.illumination_smoothness_loss_tracker.update_state(
losses["illumination_smoothness_loss"]
)
self.spatial_constancy_loss_tracker.update_state(
losses["spatial_constancy_loss"]
)
self.color_constancy_loss_tracker.update_state(
losses["color_constancy_loss"]
)
self.exposure_loss_tracker.update_state(losses["exposure_loss"])
return {metric.name: metric.result() for metric in self.metrics}
def test_step(self, data):
output = self.dce_model(data)
losses = self.compute_losses(data, output)
self.total_loss_tracker.update_state(losses["total_loss"])
self.illumination_smoothness_loss_tracker.update_state(
losses["illumination_smoothness_loss"]
)
self.spatial_constancy_loss_tracker.update_state(
losses["spatial_constancy_loss"]
)
self.color_constancy_loss_tracker.update_state(
losses["color_constancy_loss"]
)
self.exposure_loss_tracker.update_state(losses["exposure_loss"])
return {metric.name: metric.result() for metric in self.metrics}
def save_weights(
self, filepath, overwrite=True, save_format=None, options=None
):
"""While saving the weights, we simply save the weights of the DCE-Net"""
self.dce_model.save_weights(
filepath,
overwrite=overwrite,
save_format=save_format,
options=options,
)
def load_weights(
self, filepath, by_name=False, skip_mismatch=False, options=None
):
"""While loading the weights, we simply load the weights of the DCE-Net"""
self.dce_model.load_weights(
filepath=filepath,
by_name=by_name,
skip_mismatch=skip_mismatch,
options=options,
)
"""
## Training
"""
zero_dce_model = ZeroDCE()
zero_dce_model.compile(learning_rate=1e-4)
history = zero_dce_model.fit(
train_dataset, validation_data=val_dataset, epochs=100
)
def plot_result(item):
plt.plot(history.history[item], label=item)
plt.plot(history.history["val_" + item], label="val_" + item)
plt.xlabel("Epochs")
plt.ylabel(item)
plt.title("Train and Validation {} Over Epochs".format(item), fontsize=14)
plt.legend()
plt.grid()
plt.show()
plot_result("total_loss")
plot_result("illumination_smoothness_loss")
plot_result("spatial_constancy_loss")
plot_result("color_constancy_loss")
plot_result("exposure_loss")
"""
## Inference
"""
def plot_results(images, titles, figure_size=(12, 12)):
fig = plt.figure(figsize=figure_size)
for i in range(len(images)):
fig.add_subplot(1, len(images), i + 1).set_title(titles[i])
_ = plt.imshow(images[i])
plt.axis("off")
plt.show()
def infer(original_image):
image = keras.utils.img_to_array(original_image)
image = image.astype("float32") / 255.0
image = np.expand_dims(image, axis=0)
output_image = zero_dce_model(image)
output_image = tf.cast((output_image[0, :, :, :] * 255), dtype=np.uint8)
output_image = Image.fromarray(output_image.numpy())
return output_image
"""
### Inference on test images
We compare the test images from LOLDataset enhanced by MIRNet with images enhanced via
the `PIL.ImageOps.autocontrast()` function.
You can use the trained model hosted on [Hugging Face Hub](https://huggingface.co/keras-io/low-light-image-enhancement)
and try the demo on [Hugging Face Spaces](https://huggingface.co/spaces/keras-io/low-light-image-enhancement).
"""
for val_image_file in test_low_light_images:
original_image = Image.open(val_image_file)
enhanced_image = infer(original_image)
plot_results(
[original_image, ImageOps.autocontrast(original_image), enhanced_image],
["Original", "PIL Autocontrast", "Enhanced"],
(20, 12),
)
| keras-core/examples/keras_io/tensorflow/vision/zero_dce.py/0 | {
"file_path": "keras-core/examples/keras_io/tensorflow/vision/zero_dce.py",
"repo_id": "keras-core",
"token_count": 7947
} | 27 |
End of preview. Expand
in Dataset Viewer.
- Downloads last month
- 37