text
stringlengths 5
261k
| id
stringlengths 16
106
| metadata
dict | __index_level_0__
int64 0
266
|
---|---|---|---|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""RMSprop optimizer implementation."""
import tensorflow.compat.v2 as tf
from tf_keras.optimizers import optimizer
from tf_keras.saving.object_registration import register_keras_serializable
# isort: off
from tensorflow.python.util.tf_export import keras_export
@register_keras_serializable()
@keras_export(
"keras.optimizers.RMSprop",
"keras.optimizers.experimental.RMSprop",
"keras.dtensor.experimental.optimizers.RMSprop",
v1=[],
)
class RMSprop(optimizer.Optimizer):
r"""Optimizer that implements the RMSprop algorithm.
The gist of RMSprop is to:
- Maintain a moving (discounted) average of the square of gradients
- Divide the gradient by the root of this average
This implementation of RMSprop uses plain momentum, not Nesterov momentum.
The centered version additionally maintains a moving average of the
gradients, and uses that average to estimate the variance.
Args:
learning_rate: Initial value for the learning rate:
either a floating point value,
or a `tf.keras.optimizers.schedules.LearningRateSchedule` instance.
Defaults to 0.001.
rho: float, defaults to 0.9. Discounting factor for the old gradients.
momentum: float, defaults to 0.0. If not 0.0., the optimizer tracks the
momentum value, with a decay rate equals to `1 - momentum`.
epsilon: A small constant for numerical stability. This epsilon is
"epsilon hat" in the Kingma and Ba paper (in the formula just before
Section 2.1), not the epsilon in Algorithm 1 of the paper.
Defaults to `1e-7`.
centered: Boolean. If `True`, gradients are normalized by the estimated
variance of the gradient; if False, by the uncentered second moment.
Setting this to `True` may help with training, but is slightly more
expensive in terms of computation and memory. Defaults to `False`.
{{base_optimizer_keyword_args}}
Usage:
>>> opt = tf.keras.optimizers.RMSprop(learning_rate=0.1)
>>> var1 = tf.Variable(10.0)
>>> loss = lambda: (var1 ** 2) / 2.0 # d(loss) / d(var1) = var1
>>> opt.minimize(loss, [var1])
>>> var1.numpy()
9.683772
Reference:
- [Hinton, 2012](http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf) # noqa: E501
"""
def __init__(
self,
learning_rate=0.001,
rho=0.9,
momentum=0.0,
epsilon=1e-7,
centered=False,
weight_decay=None,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=100,
jit_compile=True,
name="RMSprop",
**kwargs
):
super().__init__(
weight_decay=weight_decay,
clipnorm=clipnorm,
clipvalue=clipvalue,
global_clipnorm=global_clipnorm,
use_ema=use_ema,
ema_momentum=ema_momentum,
ema_overwrite_frequency=ema_overwrite_frequency,
jit_compile=jit_compile,
name=name,
**kwargs
)
self._learning_rate = self._build_learning_rate(learning_rate)
self.rho = rho
self.momentum = momentum
self.epsilon = epsilon
self.centered = centered
def build(self, var_list):
super().build(var_list)
if hasattr(self, "_built") and self._built:
return
self._built = True
self._velocities = []
for var in var_list:
self._velocities.append(
self.add_variable_from_reference(var, "velocity")
)
self._momentums = []
if self.momentum > 0:
for var in var_list:
self._momentums.append(
self.add_variable_from_reference(var, "momentum")
)
self._average_gradients = []
if self.centered:
for var in var_list:
self._average_gradients.append(
self.add_variable_from_reference(var, "average_gradient")
)
def update_step(self, gradient, variable):
"""Update step given gradient and the associated model variable."""
lr = tf.cast(self.learning_rate, variable.dtype)
var_key = self._var_key(variable)
velocity = self._velocities[self._index_dict[var_key]]
momentum = None
if self.momentum > 0:
momentum = self._momentums[self._index_dict[var_key]]
average_grad = None
if self.centered:
average_grad = self._average_gradients[self._index_dict[var_key]]
rho = self.rho
if isinstance(gradient, tf.IndexedSlices):
# Sparse gradients.
velocity.assign(rho * velocity)
velocity.scatter_add(
tf.IndexedSlices(
tf.square(gradient.values) * (1 - rho), gradient.indices
)
)
if self.centered:
average_grad.assign(rho * average_grad)
average_grad.scatter_add(
tf.IndexedSlices(
gradient.values * (1 - rho), gradient.indices
)
)
denominator = velocity - tf.square(average_grad) + self.epsilon
else:
denominator = velocity + self.epsilon
denominator_slices = tf.gather(denominator, gradient.indices)
increment = tf.IndexedSlices(
lr * gradient.values * tf.math.rsqrt(denominator_slices),
gradient.indices,
)
if self.momentum > 0:
momentum.assign(self.momentum * momentum)
momentum.scatter_add(increment)
variable.assign_add(-momentum)
else:
variable.scatter_add(-increment)
else:
# Dense gradients.
velocity.assign(rho * velocity + (1 - rho) * tf.square(gradient))
if self.centered:
average_grad.assign(rho * average_grad + (1 - rho) * gradient)
denominator = velocity - tf.square(average_grad) + self.epsilon
else:
denominator = velocity + self.epsilon
increment = lr * gradient * tf.math.rsqrt(denominator)
if self.momentum > 0:
momentum.assign(self.momentum * momentum + increment)
variable.assign_add(-momentum)
else:
variable.assign_add(-increment)
def get_config(self):
config = super().get_config()
config.update(
{
"learning_rate": self._serialize_hyperparameter(
self._learning_rate
),
"rho": self.rho,
"momentum": self.momentum,
"epsilon": self.epsilon,
"centered": self.centered,
}
)
return config
RMSprop.__doc__ = RMSprop.__doc__.replace(
"{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args
)
| tf-keras/tf_keras/optimizers/rmsprop.py/0 | {
"file_path": "tf-keras/tf_keras/optimizers/rmsprop.py",
"repo_id": "tf-keras",
"token_count": 3625
} | 199 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for image preprocessing utils."""
import os
import random
import shutil
import tempfile
import numpy as np
import pandas as pd
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
from tf_keras import layers
from tf_keras.engine import sequential
from tf_keras.preprocessing import image
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
from tf_keras.utils import image_utils
try:
import PIL
except ImportError:
PIL = None
def _generate_test_images(
include_rgba=False, include_16bit=False, include_32bit=False
):
img_w = img_h = 20
rgb_images = []
rgba_images = []
gray_images = []
gray_images_16bit = []
gray_images_32bit = []
for _ in range(8):
bias = np.random.rand(img_w, img_h, 1) * 64
variance = np.random.rand(img_w, img_h, 1) * (255 - 64)
# RGB
imarray = np.random.rand(img_w, img_h, 3) * variance + bias
im = PIL.Image.fromarray(imarray.astype("uint8")).convert("RGB")
rgb_images.append(im)
# RGBA
imarray = np.random.rand(img_w, img_h, 4) * variance + bias
im = PIL.Image.fromarray(imarray.astype("uint8")).convert("RGBA")
rgba_images.append(im)
# 8-bit grayscale
imarray = np.random.rand(img_w, img_h, 1) * variance + bias
im = PIL.Image.fromarray(imarray.astype("uint8").squeeze()).convert("L")
gray_images.append(im)
# 16-bit grayscale
imarray = np.array(
np.random.randint(-2147483648, 2147483647, (img_w, img_h))
)
im = PIL.Image.fromarray(imarray.astype("uint16"))
gray_images_16bit.append(im)
# 32-bit grayscale
im = PIL.Image.fromarray(imarray.astype("uint32"))
gray_images_32bit.append(im)
ret = [rgb_images, gray_images]
if include_rgba:
ret.append(rgba_images)
if include_16bit:
ret.append(gray_images_16bit)
if include_32bit:
ret.append(gray_images_32bit)
return ret
@test_utils.run_v2_only
class TestImage(test_combinations.TestCase):
def test_iterator_empty_directory(self):
# Testing with different batch sizes
for batch_size in [0, 32]:
data_iterator = image.Iterator(0, batch_size, False, 0)
ret = next(data_iterator.index_generator)
self.assertEqual(ret.size, 0)
def test_image(self):
if PIL is None:
return # Skip test if PIL is not available.
for test_images in _generate_test_images():
img_list = []
for im in test_images:
img_list.append(image_utils.img_to_array(im)[None, ...])
images = np.vstack(img_list)
generator = image.ImageDataGenerator(
featurewise_center=True,
samplewise_center=True,
featurewise_std_normalization=True,
samplewise_std_normalization=True,
zca_whitening=True,
rotation_range=90.0,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.5,
zoom_range=0.2,
channel_shift_range=0.0,
brightness_range=(1, 5),
fill_mode="nearest",
cval=0.5,
horizontal_flip=True,
vertical_flip=True,
)
# Basic test before fit
x = np.random.random((32, 10, 10, 3))
generator.flow(x)
# Fit
generator.fit(images, augment=True)
for x, _ in generator.flow(
images, np.arange(images.shape[0]), shuffle=True
):
self.assertEqual(x.shape[1:], images.shape[1:])
break
def test_image_with_split_value_error(self):
with self.assertRaises(ValueError):
image.ImageDataGenerator(validation_split=5)
def test_image_invalid_data(self):
generator = image.ImageDataGenerator(
featurewise_center=True,
samplewise_center=True,
featurewise_std_normalization=True,
samplewise_std_normalization=True,
zca_whitening=True,
data_format="channels_last",
)
# Test fit with invalid data
with self.assertRaises(ValueError):
x = np.random.random((3, 10, 10))
generator.fit(x)
# Test flow with invalid data
with self.assertRaises(ValueError):
generator.flow(np.arange(5))
# Invalid number of channels: will work but raise a warning
x = np.random.random((32, 10, 10, 5))
generator.flow(x)
with self.assertRaises(ValueError):
generator = image.ImageDataGenerator(data_format="unknown")
generator = image.ImageDataGenerator(zoom_range=(2.0, 2.0))
def test_image_fit(self):
generator = image.ImageDataGenerator(
featurewise_center=True,
samplewise_center=True,
featurewise_std_normalization=True,
samplewise_std_normalization=True,
zca_whitening=True,
data_format="channels_last",
)
# Test grayscale
x = np.random.random((32, 10, 10, 1))
generator.fit(x)
# Test RBG
x = np.random.random((32, 10, 10, 3))
generator.fit(x)
generator = image.ImageDataGenerator(
featurewise_center=True,
samplewise_center=True,
featurewise_std_normalization=True,
samplewise_std_normalization=True,
zca_whitening=True,
data_format="channels_first",
)
# Test grayscale
x = np.random.random((32, 1, 10, 10))
generator.fit(x)
# Test RBG
x = np.random.random((32, 3, 10, 10))
generator.fit(x)
def test_directory_iterator(self):
if PIL is None:
return # Skip test if PIL is not available.
num_classes = 2
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
# create folders and subfolders
paths = []
for cl in range(num_classes):
class_directory = f"class-{cl}"
classpaths = [
class_directory,
os.path.join(class_directory, "subfolder-1"),
os.path.join(class_directory, "subfolder-2"),
os.path.join(class_directory, "subfolder-1", "sub-subfolder"),
]
for path in classpaths:
os.mkdir(os.path.join(temp_dir, path))
paths.append(classpaths)
# save the images in the paths
count = 0
filenames = []
for test_images in _generate_test_images():
for im in test_images:
# rotate image class
im_class = count % num_classes
# rotate subfolders
classpaths = paths[im_class]
filename = os.path.join(
classpaths[count % len(classpaths)],
f"image-{count}.jpg",
)
filenames.append(filename)
im.save(os.path.join(temp_dir, filename))
count += 1
# Test image loading util
fname = os.path.join(temp_dir, filenames[0])
_ = image_utils.load_img(fname)
_ = image_utils.load_img(fname, grayscale=True)
_ = image_utils.load_img(fname, target_size=(10, 10))
_ = image_utils.load_img(
fname, target_size=(10, 10), interpolation="bilinear"
)
# create iterator
generator = image.ImageDataGenerator()
dir_iterator = generator.flow_from_directory(temp_dir)
# check number of classes and images
self.assertEqual(len(dir_iterator.class_indices), num_classes)
self.assertEqual(len(dir_iterator.classes), count)
self.assertEqual(set(dir_iterator.filenames), set(filenames))
def preprocessing_function(x):
"""This will fail if not provided by a Numpy array.
Note: This is made to enforce backward compatibility.
Args:
x: A numpy array.
Returns:
An array of zeros with the same shape as the given array.
"""
self.assertEqual(x.shape, (26, 26, 3))
self.assertIs(type(x), np.ndarray)
return np.zeros_like(x)
# Test usage as Sequence
generator = image.ImageDataGenerator(
preprocessing_function=preprocessing_function
)
dir_seq = generator.flow_from_directory(
str(temp_dir),
target_size=(26, 26),
color_mode="rgb",
batch_size=3,
class_mode="categorical",
)
self.assertEqual(len(dir_seq), count // 3 + 1)
x1, y1 = dir_seq[1]
self.assertEqual(x1.shape, (3, 26, 26, 3))
self.assertEqual(y1.shape, (3, num_classes))
x1, y1 = dir_seq[5]
self.assertTrue((x1 == 0).all())
def directory_iterator_with_validation_split_test_helper(
self, validation_split
):
if PIL is None:
return # Skip test if PIL is not available.
num_classes = 2
tmp_folder = tempfile.mkdtemp(prefix="test_images")
# create folders and subfolders
paths = []
for cl in range(num_classes):
class_directory = f"class-{cl}"
classpaths = [
class_directory,
os.path.join(class_directory, "subfolder-1"),
os.path.join(class_directory, "subfolder-2"),
os.path.join(class_directory, "subfolder-1", "sub-subfolder"),
]
for path in classpaths:
os.mkdir(os.path.join(tmp_folder, path))
paths.append(classpaths)
# save the images in the paths
count = 0
filenames = []
for test_images in _generate_test_images():
for im in test_images:
# rotate image class
im_class = count % num_classes
# rotate subfolders
classpaths = paths[im_class]
filename = os.path.join(
classpaths[count % len(classpaths)],
f"image-{count}.jpg",
)
filenames.append(filename)
im.save(os.path.join(tmp_folder, filename))
count += 1
# create iterator
generator = image.ImageDataGenerator(validation_split=validation_split)
with self.assertRaises(ValueError):
generator.flow_from_directory(tmp_folder, subset="foo")
num_validation = int(count * validation_split)
num_training = count - num_validation
train_iterator = generator.flow_from_directory(
tmp_folder, subset="training"
)
self.assertEqual(train_iterator.samples, num_training)
valid_iterator = generator.flow_from_directory(
tmp_folder, subset="validation"
)
self.assertEqual(valid_iterator.samples, num_validation)
# check number of classes and images
self.assertEqual(len(train_iterator.class_indices), num_classes)
self.assertEqual(len(train_iterator.classes), num_training)
self.assertEqual(
len(set(train_iterator.filenames) & set(filenames)), num_training
)
model = sequential.Sequential([layers.Flatten(), layers.Dense(2)])
model.compile(optimizer="sgd", loss="mse")
model.fit(train_iterator, epochs=1)
shutil.rmtree(tmp_folder)
@test_combinations.run_all_keras_modes
def test_directory_iterator_with_validation_split_25_percent(self):
self.directory_iterator_with_validation_split_test_helper(0.25)
@test_combinations.run_all_keras_modes
def test_directory_iterator_with_validation_split_40_percent(self):
self.directory_iterator_with_validation_split_test_helper(0.40)
@test_combinations.run_all_keras_modes
def test_directory_iterator_with_validation_split_50_percent(self):
self.directory_iterator_with_validation_split_test_helper(0.50)
def test_batch_standardize(self):
if PIL is None:
return # Skip test if PIL is not available.
# ImageDataGenerator.standardize should work on batches
for test_images in _generate_test_images():
img_list = []
for im in test_images:
img_list.append(image_utils.img_to_array(im)[None, ...])
images = np.vstack(img_list)
generator = image.ImageDataGenerator(
featurewise_center=True,
samplewise_center=True,
featurewise_std_normalization=True,
samplewise_std_normalization=True,
zca_whitening=True,
rotation_range=90.0,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.5,
zoom_range=0.2,
channel_shift_range=0.0,
brightness_range=(1, 5),
fill_mode="nearest",
cval=0.5,
horizontal_flip=True,
vertical_flip=True,
)
generator.fit(images, augment=True)
transformed = np.copy(images)
for i, im in enumerate(transformed):
transformed[i] = generator.random_transform(im)
transformed = generator.standardize(transformed)
def test_img_transforms(self):
x = np.random.random((3, 200, 200))
_ = image.random_rotation(x, 20)
_ = image.random_shift(x, 0.2, 0.2)
_ = image.random_shear(x, 2.0)
_ = image.random_zoom(x, (0.5, 0.5))
_ = image.apply_channel_shift(x, 2, 2)
_ = image.apply_affine_transform(x, 2)
with self.assertRaises(ValueError):
image.random_zoom(x, (0, 0, 0))
_ = image.random_channel_shift(x, 2.0)
@test_utils.run_v2_only
class TestDirectoryIterator(test_combinations.TestCase):
def test_directory_iterator(self):
tmpdir = self.create_tempdir()
all_test_images = _generate_test_images(
include_rgba=True, include_16bit=True, include_32bit=True
)
num_classes = 2
# create folders and subfolders
paths = []
for cl in range(num_classes):
class_directory = f"class-{cl}"
classpaths = [
class_directory,
os.path.join(class_directory, "subfolder-1"),
os.path.join(class_directory, "subfolder-2"),
os.path.join(class_directory, "subfolder-1", "sub-subfolder"),
]
for path in classpaths:
os.mkdir(os.path.join(tmpdir.full_path, path))
paths.append(classpaths)
# save the images in the paths
count = 0
filenames = []
for test_images in all_test_images:
for im in test_images:
# rotate image class
im_class = count % num_classes
# rotate subfolders
classpaths = paths[im_class]
filename = os.path.join(
classpaths[count % len(classpaths)],
f"image-{count}.png",
)
filenames.append(filename)
im.save(os.path.join(tmpdir.full_path, filename))
count += 1
# create iterator
generator = image.ImageDataGenerator()
dir_iterator = generator.flow_from_directory(tmpdir.full_path)
# check number of classes and images
self.assertLen(dir_iterator.class_indices, num_classes)
self.assertLen(dir_iterator.classes, count)
self.assertEqual(set(dir_iterator.filenames), set(filenames))
# Test invalid use cases
with self.assertRaises(ValueError):
generator.flow_from_directory(tmpdir.full_path, color_mode="cmyk")
with self.assertRaises(ValueError):
generator.flow_from_directory(tmpdir.full_path, class_mode="output")
def preprocessing_function(x):
# This will fail if not provided by a Numpy array.
# Note: This is made to enforce backward compatibility.
self.assertEqual(x.shape, (26, 26, 3))
self.assertIsInstance(x, np.ndarray)
return np.zeros_like(x)
# Test usage as Sequence
generator = image.ImageDataGenerator(
preprocessing_function=preprocessing_function
)
dir_seq = generator.flow_from_directory(
tmpdir.full_path,
target_size=(26, 26),
color_mode="rgb",
batch_size=3,
class_mode="categorical",
)
self.assertLen(dir_seq, np.ceil(count / 3.0))
x1, y1 = dir_seq[1]
self.assertEqual(x1.shape, (3, 26, 26, 3))
self.assertEqual(y1.shape, (3, num_classes))
x1, y1 = dir_seq[5]
self.assertTrue((x1 == 0).all())
with self.assertRaises(ValueError):
x1, y1 = dir_seq[14] # there are 40 images and batch size is 3
def test_directory_iterator_class_mode_input(self):
tmpdir = self.create_tempdir()
os.mkdir(os.path.join(tmpdir.full_path, "class-1"))
all_test_images = _generate_test_images(
include_rgba=True, include_16bit=True, include_32bit=True
)
# save the images in the paths
count = 0
for test_images in all_test_images:
for im in test_images:
filename = os.path.join(tmpdir, "class-1", f"image-{count}.png")
im.save(filename)
count += 1
# create iterator
generator = image.ImageDataGenerator()
dir_iterator = generator.flow_from_directory(
tmpdir.full_path, class_mode="input"
)
batch = next(dir_iterator)
# check if input and output have the same shape
self.assertEqual(batch[0].shape, batch[1].shape)
# check if the input and output images are not the same numpy array
input_img = batch[0][0]
output_img = batch[1][0]
output_img[0][0][0] += 1
self.assertNotEqual(input_img[0][0][0], output_img[0][0][0])
@parameterized.parameters(
[
(0.25, 30),
(0.50, 20),
(0.75, 10),
]
)
def test_directory_iterator_with_validation_split(
self, validation_split, num_training
):
tmpdir = self.create_tempdir()
all_test_images = _generate_test_images(
include_rgba=True, include_16bit=True, include_32bit=True
)
num_classes = 2
# create folders and subfolders
paths = []
for cl in range(num_classes):
class_directory = f"class-{cl}"
classpaths = [
class_directory,
os.path.join(class_directory, "subfolder-1"),
os.path.join(class_directory, "subfolder-2"),
os.path.join(class_directory, "subfolder-1", "sub-subfolder"),
]
for path in classpaths:
os.mkdir(os.path.join(tmpdir.full_path, path))
paths.append(classpaths)
# save the images in the paths
count = 0
filenames = []
for test_images in all_test_images:
for im in test_images:
# rotate image class
im_class = count % num_classes
# rotate subfolders
classpaths = paths[im_class]
filename = os.path.join(
classpaths[count % len(classpaths)],
f"image-{count}.png",
)
filenames.append(filename)
im.save(os.path.join(tmpdir.full_path, filename))
count += 1
# create iterator
generator = image.ImageDataGenerator(validation_split=validation_split)
with self.assertRaises(ValueError):
generator.flow_from_directory(tmpdir.full_path, subset="foo")
train_iterator = generator.flow_from_directory(
tmpdir.full_path, subset="training"
)
self.assertEqual(train_iterator.samples, num_training)
valid_iterator = generator.flow_from_directory(
tmpdir.full_path, subset="validation"
)
self.assertEqual(valid_iterator.samples, count - num_training)
# check number of classes and images
self.assertLen(train_iterator.class_indices, num_classes)
self.assertLen(train_iterator.classes, num_training)
self.assertLen(
set(train_iterator.filenames) & set(filenames), num_training
)
@test_utils.run_v2_only
class TestNumpyArrayIterator(test_combinations.TestCase):
def test_numpy_array_iterator(self):
tmpdir = self.create_tempdir()
all_test_images = _generate_test_images(include_rgba=True)
image_data_generator = image.ImageDataGenerator(
featurewise_center=True,
samplewise_center=True,
featurewise_std_normalization=True,
samplewise_std_normalization=True,
zca_whitening=True,
rotation_range=90.0,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.5,
zoom_range=0.2,
channel_shift_range=0.0,
brightness_range=(1, 5),
fill_mode="nearest",
cval=0.5,
horizontal_flip=True,
vertical_flip=True,
interpolation_order=1,
)
for test_images in all_test_images:
img_list = []
for im in test_images:
img_list.append(image_utils.img_to_array(im)[None, ...])
images = np.vstack(img_list)
dsize = images.shape[0]
iterator = image.NumpyArrayIterator(
images,
np.arange(images.shape[0]),
image_data_generator,
shuffle=False,
save_to_dir=tmpdir.full_path,
batch_size=3,
)
x, y = next(iterator)
self.assertEqual(x.shape, images[:3].shape)
self.assertEqual(list(y), [0, 1, 2])
# Test with sample weights
iterator = image.NumpyArrayIterator(
images,
np.arange(images.shape[0]),
image_data_generator,
shuffle=False,
sample_weight=np.arange(images.shape[0]) + 1,
save_to_dir=tmpdir.full_path,
batch_size=3,
)
x, y, w = iterator.next()
self.assertEqual(x.shape, images[:3].shape)
self.assertEqual(list(y), [0, 1, 2])
self.assertEqual(list(w), [1, 2, 3])
# Test with `shuffle=True`
iterator = image.NumpyArrayIterator(
images,
np.arange(images.shape[0]),
image_data_generator,
shuffle=True,
save_to_dir=tmpdir.full_path,
batch_size=3,
seed=42,
)
x, y = iterator.next()
self.assertEqual(x.shape, images[:3].shape)
# Check that the sequence is shuffled.
self.assertNotEqual(list(y), [0, 1, 2])
# Test without y
iterator = image.NumpyArrayIterator(
images,
None,
image_data_generator,
shuffle=True,
save_to_dir=tmpdir.full_path,
batch_size=3,
)
x = iterator.next()
self.assertIsInstance(x, np.ndarray)
self.assertEqual(x.shape, images[:3].shape)
# Test with a single miscellaneous input data array
x_misc1 = np.random.random(dsize)
iterator = image.NumpyArrayIterator(
(images, x_misc1),
np.arange(dsize),
image_data_generator,
shuffle=False,
batch_size=2,
)
for i, (x, y) in enumerate(iterator):
self.assertEqual(x[0].shape, images[:2].shape)
self.assertTrue(
(x[1] == x_misc1[(i * 2) : ((i + 1) * 2)]).all()
)
if i == 2:
break
# Test with two miscellaneous inputs
x_misc2 = np.random.random((dsize, 3, 3))
iterator = image.NumpyArrayIterator(
(images, [x_misc1, x_misc2]),
np.arange(dsize),
image_data_generator,
shuffle=False,
batch_size=2,
)
for i, (x, y) in enumerate(iterator):
self.assertEqual(x[0].shape, images[:2].shape)
self.assertTrue(
(x[1] == x_misc1[(i * 2) : ((i + 1) * 2)]).all()
)
self.assertTrue(
(x[2] == x_misc2[(i * 2) : ((i + 1) * 2)]).all()
)
if i == 2:
break
# Test cases with `y = None`
iterator = image.NumpyArrayIterator(
images, None, image_data_generator, batch_size=3
)
x = iterator.next()
self.assertIsInstance(x, np.ndarray)
self.assertEqual(x.shape, images[:3].shape)
iterator = image.NumpyArrayIterator(
(images, x_misc1),
None,
image_data_generator,
batch_size=3,
shuffle=False,
)
x = iterator.next()
self.assertIsInstance(x, list)
self.assertEqual(x[0].shape, images[:3].shape)
self.assertTrue((x[1] == x_misc1[:3]).all())
iterator = image.NumpyArrayIterator(
(images, [x_misc1, x_misc2]),
None,
image_data_generator,
batch_size=3,
shuffle=False,
)
x = iterator.next()
self.assertIsInstance(x, list)
self.assertEqual(x[0].shape, images[:3].shape)
self.assertTrue((x[1] == x_misc1[:3]).all())
self.assertTrue((x[2] == x_misc2[:3]).all())
# Test with validation split
generator = image.ImageDataGenerator(validation_split=0.2)
iterator = image.NumpyArrayIterator(
images, None, generator, batch_size=3
)
x = iterator.next()
self.assertIsInstance(x, np.ndarray)
self.assertEqual(x.shape, images[:3].shape)
# Test some failure cases:
x_misc_err = np.random.random((dsize + 1, 3, 3))
with self.assertRaisesRegex(ValueError, "All of the arrays in"):
image.NumpyArrayIterator(
(images, x_misc_err),
np.arange(dsize),
generator,
batch_size=3,
)
with self.assertRaisesRegex(
ValueError, r"`x` \(images tensor\) and `y` \(labels\)"
):
image.NumpyArrayIterator(
(images, x_misc1),
np.arange(dsize + 1),
generator,
batch_size=3,
)
# Test `flow` behavior as Sequence
seq = image.NumpyArrayIterator(
images,
np.arange(images.shape[0]),
generator,
shuffle=False,
save_to_dir=tmpdir.full_path,
batch_size=3,
)
self.assertLen(seq, images.shape[0] // 3 + 1)
x, y = seq[0]
self.assertEqual(x.shape, images[:3].shape)
self.assertEqual(list(y), [0, 1, 2])
# Test with `shuffle=True`
seq = image.NumpyArrayIterator(
images,
np.arange(images.shape[0]),
generator,
shuffle=True,
save_to_dir=tmpdir.full_path,
batch_size=3,
seed=123,
)
x, y = seq[0]
# Check that the sequence is shuffled.
self.assertNotEqual(list(y), [0, 1, 2])
# `on_epoch_end` should reshuffle the sequence.
seq.on_epoch_end()
_, y2 = seq[0]
self.assertNotEqual(list(y), list(y2))
# test order_interpolation
labels = np.array(
[
[2, 2, 0, 2, 2],
[1, 3, 2, 3, 1],
[2, 1, 0, 1, 2],
[3, 1, 0, 2, 0],
[3, 1, 3, 2, 1],
]
)
label_generator = image.ImageDataGenerator(
rotation_range=90.0, interpolation_order=0
)
labels_gen = image.NumpyArrayIterator(
labels[np.newaxis, ..., np.newaxis], None, label_generator, seed=123
)
self.assertTrue(
(np.unique(labels) == np.unique(next(labels_gen))).all()
)
@test_utils.run_v2_only
class TestDataFrameIterator(test_combinations.TestCase):
def test_dataframe_iterator(self):
tmpdir = self.create_tempdir()
all_test_images = _generate_test_images(include_rgba=True)
num_classes = 2
# save the images in the tmpdir
count = 0
filenames = []
filepaths = []
filenames_without = []
for test_images in all_test_images:
for im in test_images:
filename = f"image-{count}.png"
filename_without = f"image-{count}"
filenames.append(filename)
filepaths.append(os.path.join(tmpdir.full_path, filename))
filenames_without.append(filename_without)
im.save(os.path.join(tmpdir.full_path, filename))
count += 1
df = pd.DataFrame(
{
"filename": filenames,
"class": [str(random.randint(0, 1)) for _ in filenames],
"filepaths": filepaths,
}
)
# create iterator
iterator = image.DataFrameIterator(df, tmpdir.full_path)
batch = next(iterator)
self.assertLen(batch, 2)
self.assertIsInstance(batch[0], np.ndarray)
self.assertIsInstance(batch[1], np.ndarray)
generator = image.ImageDataGenerator()
df_iterator = generator.flow_from_dataframe(df, x_col="filepaths")
df_iterator_dir = generator.flow_from_dataframe(df, tmpdir.full_path)
df_sparse_iterator = generator.flow_from_dataframe(
df, tmpdir.full_path, class_mode="sparse"
)
self.assertFalse(np.isnan(df_sparse_iterator.classes).any())
# check number of classes and images
self.assertLen(df_iterator.class_indices, num_classes)
self.assertLen(df_iterator.classes, count)
self.assertEqual(set(df_iterator.filenames), set(filepaths))
self.assertLen(df_iterator_dir.class_indices, num_classes)
self.assertLen(df_iterator_dir.classes, count)
self.assertEqual(set(df_iterator_dir.filenames), set(filenames))
# test without shuffle
_, batch_y = next(
generator.flow_from_dataframe(
df, tmpdir.full_path, shuffle=False, class_mode="sparse"
)
)
self.assertTrue(
(batch_y == df["class"].astype("float")[: len(batch_y)]).all()
)
# Test invalid use cases
with self.assertRaises(ValueError):
generator.flow_from_dataframe(
df, tmpdir.full_path, color_mode="cmyk"
)
with self.assertRaises(ValueError):
generator.flow_from_dataframe(
df, tmpdir.full_path, class_mode="output"
)
with self.assertWarns(DeprecationWarning):
generator.flow_from_dataframe(df, tmpdir.full_path, has_ext=True)
with self.assertWarns(DeprecationWarning):
generator.flow_from_dataframe(df, tmpdir.full_path, has_ext=False)
def preprocessing_function(x):
# This will fail if not provided by a Numpy array.
# Note: This is made to enforce backward compatibility.
self.assertEqual(x.shape, (26, 26, 3))
self.assertIsInstance(x, np.ndarray)
return np.zeros_like(x)
# Test usage as Sequence
generator = image.ImageDataGenerator(
preprocessing_function=preprocessing_function
)
dir_seq = generator.flow_from_dataframe(
df,
tmpdir.full_path,
target_size=(26, 26),
color_mode="rgb",
batch_size=3,
class_mode="categorical",
)
self.assertLen(dir_seq, np.ceil(count / 3))
x1, y1 = dir_seq[1]
self.assertEqual(x1.shape, (3, 26, 26, 3))
self.assertEqual(y1.shape, (3, num_classes))
x1, y1 = dir_seq[5]
self.assertTrue((x1 == 0).all())
with self.assertRaises(ValueError):
x1, y1 = dir_seq[9]
def test_dataframe_iterator_validate_filenames(self):
tmpdir = self.create_tempdir()
all_test_images = _generate_test_images(include_rgba=True)
# save the images in the paths
count = 0
filenames = []
for test_images in all_test_images:
for im in test_images:
filename = f"image-{count}.png"
im.save(os.path.join(tmpdir.full_path, filename))
filenames.append(filename)
count += 1
df = pd.DataFrame({"filename": filenames + ["test.jpp", "test.jpg"]})
generator = image.ImageDataGenerator()
df_iterator = generator.flow_from_dataframe(
df, tmpdir.full_path, class_mode="input"
)
self.assertLen(df_iterator.filenames, len(df["filename"]) - 2)
df_iterator = generator.flow_from_dataframe(
df, tmpdir.full_path, class_mode="input", validate_filenames=False
)
self.assertLen(df_iterator.filenames, len(df["filename"]))
def test_dataframe_iterator_sample_weights(self):
tmpdir = self.create_tempdir()
all_test_images = _generate_test_images(include_rgba=True)
# save the images in the paths
count = 0
filenames = []
for test_images in all_test_images:
for im in test_images:
filename = f"image-{count}.png"
im.save(os.path.join(tmpdir.full_path, filename))
filenames.append(filename)
count += 1
df = pd.DataFrame({"filename": filenames})
df["weight"] = ([2, 5] * len(df))[: len(df)]
generator = image.ImageDataGenerator()
df_iterator = generator.flow_from_dataframe(
df,
tmpdir.full_path,
x_col="filename",
y_col=None,
shuffle=False,
batch_size=5,
weight_col="weight",
class_mode="input",
)
batch = next(df_iterator)
self.assertLen(batch, 3) # (x, y, weights)
# check if input and output have the same shape and they're the same
self.assertEqual(batch[0].all(), batch[1].all())
# check if the input and output images are not the same numpy array
input_img = batch[0][0]
output_img = batch[1][0]
output_img[0][0][0] += 1
self.assertNotEqual(input_img[0][0][0], output_img[0][0][0])
self.assertAllEqual(np.array([2, 5, 2, 5, 2]), batch[2])
# fail
df["weight"] = (["2", "5"] * len(df))[: len(df)]
with self.assertRaises(TypeError):
image.ImageDataGenerator().flow_from_dataframe(
df, weight_col="weight", class_mode="input"
)
def test_dataframe_iterator_class_mode_input(self):
tmpdir = self.create_tempdir()
all_test_images = _generate_test_images(include_rgba=True)
# save the images in the paths
count = 0
filenames = []
for test_images in all_test_images:
for im in test_images:
filename = f"image-{count}.png"
im.save(os.path.join(tmpdir.full_path, filename))
filenames.append(filename)
count += 1
df = pd.DataFrame({"filename": filenames})
generator = image.ImageDataGenerator()
df_autoencoder_iterator = generator.flow_from_dataframe(
df,
tmpdir.full_path,
x_col="filename",
y_col=None,
class_mode="input",
)
batch = next(df_autoencoder_iterator)
# check if input and output have the same shape and they're the same
self.assertAllClose(batch[0], batch[1])
# check if the input and output images are not the same numpy array
input_img = batch[0][0]
output_img = batch[1][0]
output_img[0][0][0] += 1
self.assertNotEqual(input_img[0][0][0], output_img[0][0][0])
df_autoencoder_iterator = generator.flow_from_dataframe(
df,
tmpdir.full_path,
x_col="filename",
y_col="class",
class_mode="input",
)
batch = next(df_autoencoder_iterator)
# check if input and output have the same shape and they're the same
self.assertEqual(batch[0].all(), batch[1].all())
# check if the input and output images are not the same numpy array
input_img = batch[0][0]
output_img = batch[1][0]
output_img[0][0][0] += 1
self.assertNotEqual(input_img[0][0][0], output_img[0][0][0])
def test_dataframe_iterator_class_mode_categorical_multi_label(self):
tmpdir = self.create_tempdir()
all_test_images = _generate_test_images(include_rgba=True)
# save the images in the paths
filenames = []
count = 0
for test_images in all_test_images:
for im in test_images:
filename = f"image-{count}.png"
im.save(os.path.join(tmpdir.full_path, filename))
filenames.append(filename)
count += 1
label_opt = ["a", "b", ["a"], ["b"], ["a", "b"], ["b", "a"]]
df = pd.DataFrame(
{
"filename": filenames,
"class": [random.choice(label_opt) for _ in filenames[:-2]]
+ ["b", "a"],
}
)
generator = image.ImageDataGenerator()
df_iterator = generator.flow_from_dataframe(df, tmpdir.full_path)
batch_x, batch_y = next(df_iterator)
self.assertIsInstance(batch_x, np.ndarray)
self.assertLen(batch_x.shape, 4)
self.assertIsInstance(batch_y, np.ndarray)
self.assertEqual(batch_y.shape, (len(batch_x), 2))
for labels in batch_y:
self.assertTrue(all(label in {0, 1} for label in labels))
# on first 3 batches
df = pd.DataFrame(
{
"filename": filenames,
"class": [["b", "a"]]
+ ["b"]
+ [["c"]]
+ [random.choice(label_opt) for _ in filenames[:-3]],
}
)
generator = image.ImageDataGenerator()
df_iterator = generator.flow_from_dataframe(
df, tmpdir.full_path, shuffle=False
)
batch_x, batch_y = next(df_iterator)
self.assertIsInstance(batch_x, np.ndarray)
self.assertLen(batch_x.shape, 4)
self.assertIsInstance(batch_y, np.ndarray)
self.assertEqual(batch_y.shape, (len(batch_x), 3))
for labels in batch_y:
self.assertTrue(all(label in {0, 1} for label in labels))
self.assertTrue((batch_y[0] == np.array([1, 1, 0])).all())
self.assertTrue((batch_y[1] == np.array([0, 1, 0])).all())
self.assertTrue((batch_y[2] == np.array([0, 0, 1])).all())
def test_dataframe_iterator_class_mode_multi_output(self):
tmpdir = self.create_tempdir()
all_test_images = _generate_test_images(include_rgba=True)
# save the images in the paths
filenames = []
count = 0
for test_images in all_test_images:
for im in test_images:
filename = f"image-{count}.png"
im.save(os.path.join(tmpdir.full_path, filename))
filenames.append(filename)
count += 1
# fit both outputs are a single number
df = pd.DataFrame({"filename": filenames}).assign(
output_0=np.random.uniform(size=len(filenames)),
output_1=np.random.uniform(size=len(filenames)),
)
df_iterator = image.ImageDataGenerator().flow_from_dataframe(
df,
y_col=["output_0", "output_1"],
directory=tmpdir.full_path,
batch_size=3,
shuffle=False,
class_mode="multi_output",
)
batch_x, batch_y = next(df_iterator)
self.assertIsInstance(batch_x, np.ndarray)
self.assertLen(batch_x.shape, 4)
self.assertIsInstance(batch_y, list)
self.assertLen(batch_y, 2)
self.assertAllEqual(batch_y[0], np.array(df["output_0"].tolist()[:3]))
self.assertAllEqual(batch_y[1], np.array(df["output_1"].tolist()[:3]))
# if one of the outputs is a 1D array
df["output_1"] = [
np.random.uniform(size=(2, 2, 1)).flatten() for _ in range(len(df))
]
df_iterator = image.ImageDataGenerator().flow_from_dataframe(
df,
y_col=["output_0", "output_1"],
directory=tmpdir.full_path,
batch_size=3,
shuffle=False,
class_mode="multi_output",
)
batch_x, batch_y = next(df_iterator)
self.assertIsInstance(batch_x, np.ndarray)
self.assertLen(batch_x.shape, 4)
self.assertIsInstance(batch_y, list)
self.assertLen(batch_y, 2)
self.assertAllEqual(batch_y[0], np.array(df["output_0"].tolist()[:3]))
self.assertAllEqual(batch_y[1], np.array(df["output_1"].tolist()[:3]))
# if one of the outputs is a 2D array
df["output_1"] = [
np.random.uniform(size=(2, 2, 1)) for _ in range(len(df))
]
df_iterator = image.ImageDataGenerator().flow_from_dataframe(
df,
y_col=["output_0", "output_1"],
directory=tmpdir.full_path,
batch_size=3,
shuffle=False,
class_mode="multi_output",
)
batch_x, batch_y = next(df_iterator)
self.assertIsInstance(batch_x, np.ndarray)
self.assertLen(batch_x.shape, 4)
self.assertIsInstance(batch_y, list)
self.assertLen(batch_y, 2)
self.assertAllEqual(batch_y[0], np.array(df["output_0"].tolist()[:3]))
self.assertAllEqual(batch_y[1], np.array(df["output_1"].tolist()[:3]))
# fail if single column
with self.assertRaises(TypeError):
image.ImageDataGenerator().flow_from_dataframe(
df,
y_col="output_0",
directory=tmpdir.full_path,
class_mode="multi_output",
)
def test_dataframe_iterator_class_mode_raw(self):
tmpdir = self.create_tempdir()
all_test_images = _generate_test_images(include_rgba=True)
# save the images in the paths
filenames = []
count = 0
for test_images in all_test_images:
for im in test_images:
filename = f"image-{count}.png"
im.save(os.path.join(tmpdir.full_path, filename))
filenames.append(filename)
count += 1
# case for 1D output
df = pd.DataFrame({"filename": filenames}).assign(
output_0=np.random.uniform(size=len(filenames)),
output_1=np.random.uniform(size=len(filenames)),
)
df_iterator = image.ImageDataGenerator().flow_from_dataframe(
df,
y_col="output_0",
directory=tmpdir.full_path,
batch_size=3,
shuffle=False,
class_mode="raw",
)
batch_x, batch_y = next(df_iterator)
self.assertIsInstance(batch_x, np.ndarray)
self.assertLen(batch_x.shape, 4)
self.assertIsInstance(batch_y, np.ndarray)
self.assertEqual(batch_y.shape, (3,))
self.assertAllEqual(batch_y, df["output_0"].values[:3])
# case with a 2D output
df_iterator = image.ImageDataGenerator().flow_from_dataframe(
df,
y_col=["output_0", "output_1"],
directory=tmpdir.full_path,
batch_size=3,
shuffle=False,
class_mode="raw",
)
batch_x, batch_y = next(df_iterator)
self.assertIsInstance(batch_x, np.ndarray)
self.assertLen(batch_x.shape, 4)
self.assertIsInstance(batch_y, np.ndarray)
self.assertEqual(batch_y.shape, (3, 2))
self.assertAllEqual(batch_y, df[["output_0", "output_1"]].values[:3])
@parameterized.parameters(
[
(0.25, 18),
(0.50, 12),
(0.75, 6),
]
)
def test_dataframe_iterator_with_validation_split(
self, validation_split, num_training
):
tmpdir = self.create_tempdir()
all_test_images = _generate_test_images(include_rgba=True)
num_classes = 2
# save the images in the tmpdir
count = 0
filenames = []
filenames_without = []
for test_images in all_test_images:
for im in test_images:
filename = f"image-{count}.png"
filename_without = f"image-{count}"
filenames.append(filename)
filenames_without.append(filename_without)
im.save(os.path.join(tmpdir.full_path, filename))
count += 1
df = pd.DataFrame(
{
"filename": filenames,
"class": [str(random.randint(0, 1)) for _ in filenames],
}
)
# create iterator
generator = image.ImageDataGenerator(validation_split=validation_split)
df_sparse_iterator = generator.flow_from_dataframe(
df, tmpdir.full_path, class_mode="sparse"
)
if np.isnan(next(df_sparse_iterator)[:][1]).any():
raise ValueError("Invalid values.")
with self.assertRaises(ValueError):
generator.flow_from_dataframe(df, tmpdir.full_path, subset="foo")
train_iterator = generator.flow_from_dataframe(
df, tmpdir.full_path, subset="training"
)
self.assertEqual(train_iterator.samples, num_training)
valid_iterator = generator.flow_from_dataframe(
df, tmpdir.full_path, subset="validation"
)
self.assertEqual(valid_iterator.samples, count - num_training)
# check number of classes and images
self.assertLen(train_iterator.class_indices, num_classes)
self.assertLen(train_iterator.classes, num_training)
self.assertLen(
set(train_iterator.filenames) & set(filenames), num_training
)
def test_dataframe_iterator_with_custom_indexed_dataframe(self):
tmpdir = self.create_tempdir()
all_test_images = _generate_test_images(include_rgba=True)
num_classes = 2
# save the images in the tmpdir
count = 0
filenames = []
for test_images in all_test_images:
for im in test_images:
filename = f"image-{count}.png"
filenames.append(filename)
im.save(os.path.join(tmpdir.full_path, filename))
count += 1
# create dataframes
classes = np.random.randint(num_classes, size=len(filenames))
classes = [str(c) for c in classes]
df = pd.DataFrame({"filename": filenames, "class": classes})
df2 = pd.DataFrame(
{"filename": filenames, "class": classes},
index=np.arange(1, len(filenames) + 1),
)
df3 = pd.DataFrame(
{"filename": filenames, "class": classes}, index=filenames
)
# create iterators
seed = 1
generator = image.ImageDataGenerator()
df_iterator = generator.flow_from_dataframe(
df, tmpdir.full_path, seed=seed
)
df2_iterator = generator.flow_from_dataframe(
df2, tmpdir.full_path, seed=seed
)
df3_iterator = generator.flow_from_dataframe(
df3, tmpdir.full_path, seed=seed
)
# Test all iterators return same pairs of arrays
for _ in range(len(filenames)):
a1, c1 = next(df_iterator)
a2, c2 = next(df2_iterator)
a3, c3 = next(df3_iterator)
self.assertAllEqual(a1, a2)
self.assertAllEqual(a1, a3)
self.assertAllEqual(c1, c2)
self.assertAllEqual(c1, c3)
def test_dataframe_iterator_n(self):
tmpdir = self.create_tempdir()
all_test_images = _generate_test_images(include_rgba=True)
# save the images in the tmpdir
count = 0
filenames = []
for test_images in all_test_images:
for im in test_images:
filename = f"image-{count}.png"
filenames.append(filename)
im.save(os.path.join(tmpdir.full_path, filename))
count += 1
# exclude first two items
n_files = len(filenames)
input_filenames = filenames[2:]
# create dataframes
classes = np.random.randint(2, size=len(input_filenames))
classes = [str(c) for c in classes]
df = pd.DataFrame({"filename": input_filenames})
df2 = pd.DataFrame({"filename": input_filenames, "class": classes})
# create iterators
generator = image.ImageDataGenerator()
df_iterator = generator.flow_from_dataframe(
df, tmpdir.full_path, class_mode=None
)
df2_iterator = generator.flow_from_dataframe(
df2, tmpdir.full_path, class_mode="binary"
)
# Test the number of items in iterators
self.assertEqual(df_iterator.n, n_files - 2)
self.assertEqual(df2_iterator.n, n_files - 2)
def test_dataframe_iterator_absolute_path(self):
tmpdir = self.create_tempdir()
all_test_images = _generate_test_images(include_rgba=True)
# save the images in the tmpdir
count = 0
file_paths = []
for test_images in all_test_images:
for im in test_images:
filename = f"image-{count:0>5}.png"
file_path = os.path.join(tmpdir.full_path, filename)
file_paths.append(file_path)
im.save(file_path)
count += 1
# prepare an image with a forbidden extension.
file_path_fbd = os.path.join(tmpdir.full_path, "image-forbid.fbd")
shutil.copy(file_path, file_path_fbd)
# create dataframes
classes = np.random.randint(2, size=len(file_paths))
classes = [str(c) for c in classes]
df = pd.DataFrame({"filename": file_paths})
df2 = pd.DataFrame({"filename": file_paths, "class": classes})
df3 = pd.DataFrame({"filename": ["image-not-exist.png"] + file_paths})
df4 = pd.DataFrame({"filename": file_paths + [file_path_fbd]})
# create iterators
generator = image.ImageDataGenerator()
df_iterator = generator.flow_from_dataframe(
df, None, class_mode=None, shuffle=False, batch_size=1
)
df2_iterator = generator.flow_from_dataframe(
df2, None, class_mode="binary", shuffle=False, batch_size=1
)
df3_iterator = generator.flow_from_dataframe(
df3, None, class_mode=None, shuffle=False, batch_size=1
)
df4_iterator = generator.flow_from_dataframe(
df4, None, class_mode=None, shuffle=False, batch_size=1
)
validation_split = 0.2
generator_split = image.ImageDataGenerator(
validation_split=validation_split
)
df_train_iterator = generator_split.flow_from_dataframe(
df,
None,
class_mode=None,
shuffle=False,
subset="training",
batch_size=1,
)
df_val_iterator = generator_split.flow_from_dataframe(
df,
None,
class_mode=None,
shuffle=False,
subset="validation",
batch_size=1,
)
# Test the number of items in iterators
self.assertLen(file_paths, df_iterator.n)
self.assertLen(file_paths, df2_iterator.n)
self.assertLen(file_paths, df3_iterator.n)
self.assertLen(file_paths, df4_iterator.n)
self.assertEqual(
df_val_iterator.n, int(validation_split * len(file_paths))
)
self.assertLen(file_paths, df_train_iterator.n + df_val_iterator.n)
# Test flow_from_dataframe
for i in range(len(file_paths)):
a1 = next(df_iterator)
a2, _ = next(df2_iterator)
a3 = next(df3_iterator)
a4 = next(df4_iterator)
if i < df_val_iterator.n:
a5 = next(df_val_iterator)
else:
a5 = next(df_train_iterator)
self.assertAllEqual(a1, a2)
self.assertAllEqual(a1, a3)
self.assertAllEqual(a1, a4)
self.assertAllEqual(a1, a5)
def test_dataframe_iterator_with_subdirs(self):
tmpdir = self.create_tempdir()
all_test_images = _generate_test_images(include_rgba=True)
num_classes = 2
# create folders and subfolders
paths = []
for cl in range(num_classes):
class_directory = f"class-{cl}"
classpaths = [
class_directory,
os.path.join(class_directory, "subfolder-1"),
os.path.join(class_directory, "subfolder-2"),
os.path.join(class_directory, "subfolder-1", "sub-subfolder"),
]
for path in classpaths:
os.mkdir(os.path.join(tmpdir, path))
paths.append(classpaths)
# save the images in the paths
count = 0
filenames = []
for test_images in all_test_images:
for im in test_images:
# rotate image class
im_class = count % num_classes
# rotate subfolders
classpaths = paths[im_class]
filename = os.path.join(
classpaths[count % len(classpaths)],
f"image-{count}.png",
)
filenames.append(filename)
im.save(os.path.join(tmpdir.full_path, filename))
count += 1
# create dataframe
classes = np.random.randint(num_classes, size=len(filenames))
classes = [str(c) for c in classes]
df = pd.DataFrame({"filename": filenames, "class": classes})
# create iterator
generator = image.ImageDataGenerator()
df_iterator = generator.flow_from_dataframe(
df, tmpdir.full_path, class_mode="binary"
)
# Test the number of items in iterator
self.assertLen(filenames, df_iterator.n)
self.assertEqual(set(df_iterator.filenames), set(filenames))
def test_dataframe_iterator_classes_indices_order(self):
tmpdir = self.create_tempdir()
all_test_images = _generate_test_images(include_rgba=True)
# save the images in the paths
count = 0
filenames = []
for test_images in all_test_images:
for im in test_images:
filename = f"image-{count}.png"
im.save(os.path.join(tmpdir.full_path, filename))
filenames.append(filename)
count += 1
# Test the class_indices without classes input
generator = image.ImageDataGenerator()
label_opt = ["a", "b", ["a"], ["b"], ["a", "b"], ["b", "a"]]
df_f = pd.DataFrame(
{
"filename": filenames,
"class": ["a", "b"]
+ [random.choice(label_opt) for _ in filenames[:-2]],
}
)
flow_forward_iter = generator.flow_from_dataframe(
df_f, tmpdir.full_path
)
label_rev = ["b", "a", ["b"], ["a"], ["b", "a"], ["a", "b"]]
df_r = pd.DataFrame(
{
"filename": filenames,
"class": ["b", "a"]
+ [random.choice(label_rev) for _ in filenames[:-2]],
}
)
flow_backward_iter = generator.flow_from_dataframe(
df_r, tmpdir.full_path
)
# check class_indices
self.assertEqual(
flow_forward_iter.class_indices, flow_backward_iter.class_indices
)
# Test the class_indices with classes input
generator_2 = image.ImageDataGenerator()
df_f2 = pd.DataFrame(
[["data/A.jpg", "A"], ["data/B.jpg", "B"]],
columns=["filename", "class"],
)
flow_forward = generator_2.flow_from_dataframe(
df_f2, classes=["A", "B"]
)
df_b2 = pd.DataFrame(
[["data/A.jpg", "A"], ["data/B.jpg", "B"]],
columns=["filename", "class"],
)
flow_backward = generator_2.flow_from_dataframe(
df_b2, classes=["B", "A"]
)
# check class_indices
self.assertNotEqual(
flow_forward.class_indices, flow_backward.class_indices
)
@test_utils.run_v2_only
class TestImageDataGenerator(test_combinations.TestCase):
def test_image_data_generator(self):
all_test_images = _generate_test_images(include_rgba=True)
for test_images in all_test_images:
img_list = []
for im in test_images:
img_list.append(image_utils.img_to_array(im)[None, ...])
image.ImageDataGenerator(
featurewise_center=True,
samplewise_center=True,
featurewise_std_normalization=True,
samplewise_std_normalization=True,
zca_whitening=True,
rotation_range=90.0,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.5,
zoom_range=0.2,
channel_shift_range=0.0,
brightness_range=(1, 5),
fill_mode="nearest",
cval=0.5,
horizontal_flip=True,
vertical_flip=True,
interpolation_order=1,
)
def test_image_data_generator_with_validation_split(self):
all_test_images = _generate_test_images(include_rgba=True)
for test_images in all_test_images:
img_list = []
for im in test_images:
img_list.append(image_utils.img_to_array(im)[None, ...])
images = np.vstack(img_list)
labels = np.concatenate(
[
np.zeros((int(len(images) / 2),)),
np.ones((int(len(images) / 2),)),
]
)
generator = image.ImageDataGenerator(validation_split=0.5)
# training and validation sets would have different
# number of classes, because labels are sorted
with self.assertRaisesRegex(
ValueError,
"Training and validation subsets have "
"different number of classes",
):
generator.flow(
images,
labels,
shuffle=False,
batch_size=10,
subset="validation",
)
# test non categorical labels with validation split
generator.flow(
images,
labels,
shuffle=False,
batch_size=10,
ignore_class_split=True,
subset="validation",
)
labels = np.concatenate(
[
np.zeros((int(len(images) / 4),)),
np.ones((int(len(images) / 4),)),
np.zeros((int(len(images) / 4),)),
np.ones((int(len(images) / 4),)),
]
)
seq = generator.flow(
images,
labels,
shuffle=False,
batch_size=10,
subset="validation",
)
_, y = seq[0]
self.assertLen(np.unique(y), 2)
seq = generator.flow(
images, labels, shuffle=False, batch_size=10, subset="training"
)
_, y2 = seq[0]
self.assertLen(np.unique(y2), 2)
with self.assertRaises(ValueError):
generator.flow(
images,
np.arange(images.shape[0]),
shuffle=False,
batch_size=3,
subset="foo",
)
def test_image_data_generator_with_split_value_error(self):
with self.assertRaises(ValueError):
image.ImageDataGenerator(validation_split=5)
def test_image_data_generator_invalid_data(self):
generator = image.ImageDataGenerator(
featurewise_center=True,
samplewise_center=True,
featurewise_std_normalization=True,
samplewise_std_normalization=True,
zca_whitening=True,
data_format="channels_last",
)
# Test fit with invalid data
with self.assertRaises(ValueError):
x = np.random.random((3, 10, 10))
generator.fit(x)
# Test flow with invalid data
with self.assertRaises(ValueError):
x = np.random.random((32, 10, 10))
generator.flow(np.arange(x.shape[0]))
def test_image_data_generator_fit(self):
generator = image.ImageDataGenerator(
featurewise_center=True,
samplewise_center=True,
featurewise_std_normalization=True,
samplewise_std_normalization=True,
zca_whitening=True,
rotation_range=90.0,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.5,
zoom_range=(0.2, 0.2),
channel_shift_range=0.0,
brightness_range=(1, 5),
fill_mode="nearest",
cval=0.5,
horizontal_flip=True,
vertical_flip=True,
interpolation_order=1,
data_format="channels_last",
)
x = np.random.random((32, 10, 10, 3))
generator.fit(x, augment=True)
# Test grayscale
x = np.random.random((32, 10, 10, 1))
generator.fit(x)
# Test RBG
x = np.random.random((32, 10, 10, 3))
generator.fit(x)
# Test more samples than dims
x = np.random.random((32, 4, 4, 1))
generator.fit(x)
generator = image.ImageDataGenerator(
featurewise_center=True,
samplewise_center=True,
featurewise_std_normalization=True,
samplewise_std_normalization=True,
zca_whitening=True,
rotation_range=90.0,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.5,
zoom_range=(0.2, 0.2),
channel_shift_range=0.0,
brightness_range=(1, 5),
fill_mode="nearest",
cval=0.5,
horizontal_flip=True,
vertical_flip=True,
interpolation_order=1,
data_format="channels_first",
)
x = np.random.random((32, 10, 10, 3))
generator.fit(x, augment=True)
# Test grayscale
x = np.random.random((32, 1, 10, 10))
generator.fit(x)
# Test RBG
x = np.random.random((32, 3, 10, 10))
generator.fit(x)
# Test more samples than dims
x = np.random.random((32, 1, 4, 4))
generator.fit(x)
def test_image_data_generator_flow(self):
tmpdir = self.create_tempdir()
all_test_images = _generate_test_images(include_rgba=True)
for test_images in all_test_images:
img_list = []
for im in test_images:
img_list.append(image_utils.img_to_array(im)[None, ...])
images = np.vstack(img_list)
dsize = images.shape[0]
generator = image.ImageDataGenerator(
featurewise_center=True,
samplewise_center=True,
featurewise_std_normalization=True,
samplewise_std_normalization=True,
zca_whitening=True,
rotation_range=90.0,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.5,
zoom_range=0.2,
channel_shift_range=0.0,
brightness_range=(1, 5),
fill_mode="nearest",
cval=0.5,
horizontal_flip=True,
vertical_flip=True,
interpolation_order=1,
)
generator.flow(
images,
np.arange(images.shape[0]),
shuffle=False,
save_to_dir=tmpdir.full_path,
batch_size=3,
)
generator.flow(
images,
np.arange(images.shape[0]),
shuffle=False,
sample_weight=np.arange(images.shape[0]) + 1,
save_to_dir=tmpdir.full_path,
batch_size=3,
)
# Test with `shuffle=True`
generator.flow(
images,
np.arange(images.shape[0]),
shuffle=True,
save_to_dir=tmpdir.full_path,
batch_size=3,
seed=42,
)
# Test without y
generator.flow(
images,
None,
shuffle=True,
save_to_dir=tmpdir.full_path,
batch_size=3,
)
# Test with a single miscellaneous input data array
x_misc1 = np.random.random(dsize)
generator.flow(
(images, x_misc1), np.arange(dsize), shuffle=False, batch_size=2
)
# Test with two miscellaneous inputs
x_misc2 = np.random.random((dsize, 3, 3))
generator.flow(
(images, [x_misc1, x_misc2]),
np.arange(dsize),
shuffle=False,
batch_size=2,
)
# Test cases with `y = None`
generator.flow(images, None, batch_size=3)
generator.flow((images, x_misc1), None, batch_size=3, shuffle=False)
generator.flow(
(images, [x_misc1, x_misc2]), None, batch_size=3, shuffle=False
)
generator = image.ImageDataGenerator(validation_split=0.2)
generator.flow(images, batch_size=3)
# Test some failure cases:
x_misc_err = np.random.random((dsize + 1, 3, 3))
with self.assertRaisesRegex(ValueError, "All of the arrays in"):
generator.flow(
(images, x_misc_err), np.arange(dsize), batch_size=3
)
with self.assertRaisesRegex(
ValueError, r"`x` \(images tensor\) and `y` \(labels\)"
):
generator.flow(
(images, x_misc1), np.arange(dsize + 1), batch_size=3
)
# Test `flow` behavior as Sequence
generator.flow(
images,
np.arange(images.shape[0]),
shuffle=False,
save_to_dir=tmpdir.full_path,
batch_size=3,
)
# Test with `shuffle=True`
generator.flow(
images,
np.arange(images.shape[0]),
shuffle=True,
save_to_dir=tmpdir.full_path,
batch_size=3,
seed=123,
)
# test order_interpolation
labels = np.array(
[
[2, 2, 0, 2, 2],
[1, 3, 2, 3, 1],
[2, 1, 0, 1, 2],
[3, 1, 0, 2, 0],
[3, 1, 3, 2, 1],
]
)
label_generator = image.ImageDataGenerator(
rotation_range=90.0, interpolation_order=0
)
label_generator.flow(x=labels[np.newaxis, ..., np.newaxis], seed=123)
def test_valid_args(self):
with self.assertRaises(ValueError):
image.ImageDataGenerator(brightness_range=0.1)
def test_batch_standardize(self):
all_test_images = _generate_test_images(include_rgba=True)
# ImageDataGenerator.standardize should work on batches
for test_images in all_test_images:
img_list = []
for im in test_images:
img_list.append(image_utils.img_to_array(im)[None, ...])
images = np.vstack(img_list)
generator = image.ImageDataGenerator(
featurewise_center=True,
samplewise_center=True,
featurewise_std_normalization=True,
samplewise_std_normalization=True,
zca_whitening=True,
rotation_range=90.0,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.5,
zoom_range=0.2,
channel_shift_range=0.0,
brightness_range=(1, 5),
fill_mode="nearest",
cval=0.5,
horizontal_flip=True,
vertical_flip=True,
)
generator.fit(images, augment=True)
transformed = np.copy(images)
for i, im in enumerate(transformed):
transformed[i] = generator.random_transform(im)
transformed = generator.standardize(transformed)
def test_deterministic_transform(self):
x = np.ones((32, 32, 3))
generator = image.ImageDataGenerator(
rotation_range=90, fill_mode="constant"
)
x = np.random.random((32, 32, 3))
self.assertAllClose(
generator.apply_transform(x, {"flip_vertical": True}), x[::-1, :, :]
)
self.assertAllClose(
generator.apply_transform(x, {"flip_horizontal": True}),
x[:, ::-1, :],
)
x = np.ones((3, 3, 3))
x_rotated = np.array(
[
[[0.0, 0.0, 0.0], [1.0, 1.0, 1.0], [0.0, 0.0, 0.0]],
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
[[0.0, 0.0, 0.0], [1.0, 1.0, 1.0], [0.0, 0.0, 0.0]],
]
)
self.assertAllClose(
generator.apply_transform(x, {"theta": 45}), x_rotated
)
def test_random_transforms(self):
x = np.random.random((2, 28, 28))
# Test get_random_transform with predefined seed
seed = 1
generator = image.ImageDataGenerator(
rotation_range=90.0,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.5,
zoom_range=0.2,
channel_shift_range=0.1,
brightness_range=(1, 5),
horizontal_flip=True,
vertical_flip=True,
)
transform_dict = generator.get_random_transform(x.shape, seed)
transform_dict2 = generator.get_random_transform(x.shape, seed * 2)
self.assertNotEqual(transform_dict["theta"], 0)
self.assertNotEqual(transform_dict["theta"], transform_dict2["theta"])
self.assertNotEqual(transform_dict["tx"], 0)
self.assertNotEqual(transform_dict["tx"], transform_dict2["tx"])
self.assertNotEqual(transform_dict["ty"], 0)
self.assertNotEqual(transform_dict["ty"], transform_dict2["ty"])
self.assertNotEqual(transform_dict["shear"], 0)
self.assertNotEqual(transform_dict["shear"], transform_dict2["shear"])
self.assertNotEqual(transform_dict["zx"], 0)
self.assertNotEqual(transform_dict["zx"], transform_dict2["zx"])
self.assertNotEqual(transform_dict["zy"], 0)
self.assertNotEqual(transform_dict["zy"], transform_dict2["zy"])
self.assertNotEqual(transform_dict["channel_shift_intensity"], 0)
self.assertNotEqual(
transform_dict["channel_shift_intensity"],
transform_dict2["channel_shift_intensity"],
)
self.assertNotEqual(transform_dict["brightness"], 0)
self.assertNotEqual(
transform_dict["brightness"], transform_dict2["brightness"]
)
# Test get_random_transform without any randomness
generator = image.ImageDataGenerator()
transform_dict = generator.get_random_transform(x.shape, seed)
self.assertEqual(transform_dict["theta"], 0)
self.assertEqual(transform_dict["tx"], 0)
self.assertEqual(transform_dict["ty"], 0)
self.assertEqual(transform_dict["shear"], 0)
self.assertEqual(transform_dict["zx"], 1)
self.assertEqual(transform_dict["zy"], 1)
self.assertIsNone(transform_dict["channel_shift_intensity"], None)
self.assertIsNone(transform_dict["brightness"], None)
def test_fit_rescale(self):
all_test_images = _generate_test_images(include_rgba=True)
rescale = 1.0 / 255
for test_images in all_test_images:
img_list = []
for im in test_images:
img_list.append(image_utils.img_to_array(im)[None, ...])
images = np.vstack(img_list)
# featurewise_center test
generator = image.ImageDataGenerator(
rescale=rescale, featurewise_center=True, dtype="float64"
)
generator.fit(images)
batch = generator.flow(images, batch_size=8).next()
self.assertLess(abs(np.mean(batch)), 1e-6)
# featurewise_std_normalization test
generator = image.ImageDataGenerator(
rescale=rescale,
featurewise_center=True,
featurewise_std_normalization=True,
dtype="float64",
)
generator.fit(images)
batch = generator.flow(images, batch_size=8).next()
self.assertLess(abs(np.mean(batch)), 1e-6)
self.assertLess(abs(1 - np.std(batch)), 1e-5)
# zca_whitening test
generator = image.ImageDataGenerator(
rescale=rescale,
featurewise_center=True,
zca_whitening=True,
dtype="float64",
)
generator.fit(images)
batch = generator.flow(images, batch_size=8).next()
batch = np.reshape(
batch,
(
batch.shape[0],
batch.shape[1] * batch.shape[2] * batch.shape[3],
),
)
# Y * Y_T = n * I, where Y = W * X
identity = np.dot(batch, batch.T) / batch.shape[0]
self.assertTrue(
(
(np.abs(identity) - np.identity(identity.shape[0])) < 1e-6
).all()
)
@test_utils.run_v2_only
class TestAffineTransformations(test_combinations.TestCase):
def test_random_transforms(self):
x = np.random.random((2, 28, 28))
self.assertEqual(image.random_rotation(x, 45).shape, (2, 28, 28))
self.assertEqual(image.random_shift(x, 1, 1).shape, (2, 28, 28))
self.assertEqual(image.random_shear(x, 20).shape, (2, 28, 28))
self.assertEqual(image.random_channel_shift(x, 20).shape, (2, 28, 28))
def test_deterministic_transform(self):
x = np.ones((3, 3, 3))
x_rotated = np.array(
[
[[0.0, 0.0, 0.0], [1.0, 1.0, 1.0], [0.0, 0.0, 0.0]],
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
[[0.0, 0.0, 0.0], [1.0, 1.0, 1.0], [0.0, 0.0, 0.0]],
]
)
self.assertAllClose(
image.apply_affine_transform(
x,
theta=45,
row_axis=0,
col_axis=1,
channel_axis=2,
fill_mode="constant",
),
x_rotated,
)
def test_matrix_center(self):
x = np.expand_dims(
np.array(
[
[0, 1],
[0, 0],
]
),
-1,
)
x_rotated90 = np.expand_dims(
np.array(
[
[1, 0],
[0, 0],
]
),
-1,
)
self.assertAllClose(
image.apply_affine_transform(
x, theta=90, row_axis=0, col_axis=1, channel_axis=2
),
x_rotated90,
)
def test_translation(self):
x = np.array(
[
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
]
)
x_up = np.array(
[
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
]
)
x_dn = np.array(
[
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 0, 0],
]
)
x_left = np.array(
[
[0, 0, 0, 0],
[1, 0, 0, 0],
[0, 0, 0, 0],
]
)
x_right = np.array(
[
[0, 0, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 0],
]
)
# Channels first
x_test = np.expand_dims(x, 0)
# Horizontal translation
self.assertAllEqual(
x_left, np.squeeze(image.apply_affine_transform(x_test, tx=1))
)
self.assertAllEqual(
x_right, np.squeeze(image.apply_affine_transform(x_test, tx=-1))
)
# change axes: x<->y
self.assertAllEqual(
x_left,
np.squeeze(
image.apply_affine_transform(
x_test, ty=1, row_axis=2, col_axis=1
)
),
)
self.assertAllEqual(
x_right,
np.squeeze(
image.apply_affine_transform(
x_test, ty=-1, row_axis=2, col_axis=1
)
),
)
# Vertical translation
self.assertAllEqual(
x_up, np.squeeze(image.apply_affine_transform(x_test, ty=1))
)
self.assertAllEqual(
x_dn, np.squeeze(image.apply_affine_transform(x_test, ty=-1))
)
# change axes: x<->y
self.assertAllEqual(
x_up,
np.squeeze(
image.apply_affine_transform(
x_test, tx=1, row_axis=2, col_axis=1
)
),
)
self.assertAllEqual(
x_dn,
np.squeeze(
image.apply_affine_transform(
x_test, tx=-1, row_axis=2, col_axis=1
)
),
)
# Channels last
x_test = np.expand_dims(x, -1)
# Horizontal translation
self.assertAllEqual(
x_left,
np.squeeze(
image.apply_affine_transform(
x_test, tx=1, row_axis=0, col_axis=1, channel_axis=2
)
),
)
self.assertAllEqual(
x_right,
np.squeeze(
image.apply_affine_transform(
x_test, tx=-1, row_axis=0, col_axis=1, channel_axis=2
)
),
)
# change axes: x<->y
self.assertAllEqual(
x_left,
np.squeeze(
image.apply_affine_transform(
x_test, ty=1, row_axis=1, col_axis=0, channel_axis=2
)
),
)
self.assertAllEqual(
x_right,
np.squeeze(
image.apply_affine_transform(
x_test, ty=-1, row_axis=1, col_axis=0, channel_axis=2
)
),
)
# Vertical translation
self.assertAllEqual(
x_up,
np.squeeze(
image.apply_affine_transform(
x_test, ty=1, row_axis=0, col_axis=1, channel_axis=2
)
),
)
self.assertAllEqual(
x_dn,
np.squeeze(
image.apply_affine_transform(
x_test, ty=-1, row_axis=0, col_axis=1, channel_axis=2
)
),
)
# change axes: x<->y
self.assertAllEqual(
x_up,
np.squeeze(
image.apply_affine_transform(
x_test, tx=1, row_axis=1, col_axis=0, channel_axis=2
)
),
)
self.assertAllEqual(
x_dn,
np.squeeze(
image.apply_affine_transform(
x_test, tx=-1, row_axis=1, col_axis=0, channel_axis=2
)
),
)
def test_random_zoom(self):
x = np.random.random((2, 28, 28))
self.assertEqual(image.random_zoom(x, (5, 5)).shape, (2, 28, 28))
self.assertAllClose(x, image.random_zoom(x, (1, 1)))
def test_random_zoom_error(self):
with self.assertRaises(ValueError):
image.random_zoom(0, zoom_range=[0])
def test_random_brightness_error(self):
with self.assertRaises(ValueError):
image.random_brightness(0, [0])
def test_random_brightness_scale(self):
img = np.ones((1, 1, 3)) * 128
zeros = np.zeros((1, 1, 3))
must_be_128 = image.random_brightness(img, [1, 1], False)
self.assertAllEqual(img, must_be_128)
must_be_0 = image.random_brightness(img, [1, 1], True)
self.assertAllEqual(zeros, must_be_0)
def test_random_brightness_scale_outside_range_positive(self):
img = np.ones((1, 1, 3)) * 1024
zeros = np.zeros((1, 1, 3))
must_be_1024 = image.random_brightness(img, [1, 1], False)
self.assertAllEqual(img, must_be_1024)
must_be_0 = image.random_brightness(img, [1, 1], True)
self.assertAllEqual(zeros, must_be_0)
def test_random_brightness_scale_outside_range_negative(self):
img = np.ones((1, 1, 3)) * -1024
zeros = np.zeros((1, 1, 3))
must_be_neg_1024 = image.random_brightness(img, [1, 1], False)
self.assertAllEqual(img, must_be_neg_1024)
must_be_0 = image.random_brightness(img, [1, 1], True)
self.assertAllEqual(zeros, must_be_0)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/preprocessing/image_test.py/0 | {
"file_path": "tf-keras/tf_keras/preprocessing/image_test.py",
"repo_id": "tf-keras",
"token_count": 44625
} | 200 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TF-Keras metrics serialization."""
import os
import shutil
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import tf_keras as keras
from tf_keras import layers
from tf_keras import metrics
from tf_keras.optimizers import legacy as optimizer_legacy
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
from tf_keras.utils import custom_object_scope
try:
import h5py
except ImportError:
h5py = None
# Custom metric
class MyMeanAbsoluteError(metrics.MeanMetricWrapper):
def __init__(self, name="my_mae", dtype=None):
super().__init__(_my_mae, name, dtype=dtype)
# Custom metric function
def _my_mae(y_true, y_pred):
return keras.backend.mean(tf.abs(y_pred - y_true), axis=-1)
def _get_multi_io_model():
inp_1 = layers.Input(shape=(1,), name="input_1")
inp_2 = layers.Input(shape=(1,), name="input_2")
d = test_utils.Bias(name="output")
out_1 = d(inp_1)
out_2 = d(inp_2)
return keras.Model([inp_1, inp_2], [out_1, out_2])
@test_combinations.run_all_keras_modes
@parameterized.named_parameters(
dict(testcase_name="string", value=["mae"]),
dict(testcase_name="built_in_fn", value=[metrics.mae]),
dict(testcase_name="built_in_class", value=[metrics.MeanAbsoluteError]),
dict(testcase_name="custom_fn", value=[_my_mae]),
dict(testcase_name="custom_class", value=[MyMeanAbsoluteError]),
dict(
testcase_name="list_of_built_in_fn_and_list",
value=[metrics.mae, [metrics.mae]],
),
dict(
testcase_name="list_of_built_in_class_and_list",
value=[metrics.MeanAbsoluteError, [metrics.MeanAbsoluteError]],
),
dict(
testcase_name="list_of_custom_fn_and_list", value=[_my_mae, [_my_mae]]
),
dict(
testcase_name="list_of_custom_class_and_list",
value=[MyMeanAbsoluteError, [MyMeanAbsoluteError]],
),
dict(
testcase_name="list_of_lists_of_custom_fns",
value=[[_my_mae], [_my_mae, "mae"]],
),
dict(
testcase_name="list_of_lists_of_custom_classes",
value=[[MyMeanAbsoluteError], [MyMeanAbsoluteError, "mae"]],
),
dict(
testcase_name="dict_of_list_of_string",
value={
"output": ["mae"],
"output_1": ["mae"],
},
),
dict(
testcase_name="dict_of_list_of_built_in_fn",
value={
"output": [metrics.mae],
"output_1": [metrics.mae],
},
),
dict(
testcase_name="dict_of_list_of_built_in_class",
value={
"output": [metrics.MeanAbsoluteError],
"output_1": [metrics.MeanAbsoluteError],
},
),
dict(
testcase_name="dict_of_list_of_custom_fn",
value={
"output": [_my_mae],
"output_1": [_my_mae],
},
),
dict(
testcase_name="dict_of_list_of_custom_class",
value={
"output": [MyMeanAbsoluteError],
"output_1": [MyMeanAbsoluteError],
},
),
dict(
testcase_name="dict_of_string",
value={
"output": "mae",
"output_1": "mae",
},
),
dict(
testcase_name="dict_of_built_in_fn",
value={
"output": metrics.mae,
"output_1": metrics.mae,
},
),
dict(
testcase_name="dict_of_built_in_class",
value={
"output": metrics.MeanAbsoluteError,
"output_1": metrics.MeanAbsoluteError,
},
),
dict(
testcase_name="dict_of_custom_fn",
value={"output": _my_mae, "output_1": _my_mae},
),
dict(
testcase_name="dict_of_custom_class",
value={
"output": MyMeanAbsoluteError,
"output_1": MyMeanAbsoluteError,
},
),
)
class MetricsSerialization(test_combinations.TestCase):
def setUp(self):
super(MetricsSerialization, self).setUp()
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir)
self.model_filename = os.path.join(tmpdir, "tmp_model_metric.h5")
self.x = np.array([[0.0], [1.0], [2.0]], dtype="float32")
self.y = np.array([[0.5], [2.0], [3.5]], dtype="float32")
self.w = np.array([1.25, 0.5, 1.25], dtype="float32")
def test_serializing_model_with_metric_with_custom_object_scope(
self, value
):
def get_instance(x):
if isinstance(x, str):
return x
if isinstance(x, type) and issubclass(x, metrics.Metric):
return x()
return x
metric_input = tf.nest.map_structure(get_instance, value)
weighted_metric_input = tf.nest.map_structure(get_instance, value)
with custom_object_scope(
{
"MyMeanAbsoluteError": MyMeanAbsoluteError,
"_my_mae": _my_mae,
"Bias": test_utils.Bias,
}
):
model = _get_multi_io_model()
model.compile(
optimizer_legacy.gradient_descent.SGD(0.1),
"mae",
metrics=metric_input,
weighted_metrics=weighted_metric_input,
run_eagerly=test_utils.should_run_eagerly(),
)
history = model.fit(
[self.x, self.x],
[self.y, self.y],
batch_size=3,
epochs=3,
sample_weight=[self.w, self.w],
)
# Assert training.
self.assertAllClose(history.history["loss"], [2.0, 1.6, 1.2], 1e-3)
eval_results = model.evaluate(
[self.x, self.x],
[self.y, self.y],
sample_weight=[self.w, self.w],
)
if h5py is None:
return
model.save(self.model_filename)
loaded_model = keras.models.load_model(self.model_filename)
loaded_model.predict([self.x, self.x])
loaded_eval_results = loaded_model.evaluate(
[self.x, self.x],
[self.y, self.y],
sample_weight=[self.w, self.w],
)
# Assert all evaluation results are the same.
self.assertAllClose(eval_results, loaded_eval_results, 1e-9)
def test_serializing_model_with_metric_with_custom_objects(self, value):
def get_instance(x):
if isinstance(x, str):
return x
if isinstance(x, type) and issubclass(x, metrics.Metric):
return x()
return x
metric_input = tf.nest.map_structure(get_instance, value)
weighted_metric_input = tf.nest.map_structure(get_instance, value)
model = _get_multi_io_model()
model.compile(
optimizer_legacy.gradient_descent.SGD(0.1),
"mae",
metrics=metric_input,
weighted_metrics=weighted_metric_input,
run_eagerly=test_utils.should_run_eagerly(),
)
history = model.fit(
[self.x, self.x],
[self.y, self.y],
batch_size=3,
epochs=3,
sample_weight=[self.w, self.w],
)
# Assert training.
self.assertAllClose(history.history["loss"], [2.0, 1.6, 1.2], 1e-3)
eval_results = model.evaluate(
[self.x, self.x], [self.y, self.y], sample_weight=[self.w, self.w]
)
if h5py is None:
return
model.save(self.model_filename)
loaded_model = keras.models.load_model(
self.model_filename,
custom_objects={
"MyMeanAbsoluteError": MyMeanAbsoluteError,
"_my_mae": _my_mae,
"Bias": test_utils.Bias,
},
)
loaded_model.predict([self.x, self.x])
loaded_eval_results = loaded_model.evaluate(
[self.x, self.x], [self.y, self.y], sample_weight=[self.w, self.w]
)
# Assert all evaluation results are the same.
self.assertAllClose(eval_results, loaded_eval_results, 1e-9)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/saving/legacy/metrics_serialization_test.py/0 | {
"file_path": "tf-keras/tf_keras/saving/legacy/metrics_serialization_test.py",
"repo_id": "tf-keras",
"token_count": 4432
} | 201 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes and functions implementing Metrics SavedModel serialization."""
import tensorflow.compat.v2 as tf
from tf_keras.saving import object_registration
from tf_keras.saving.legacy.saved_model import constants
from tf_keras.saving.legacy.saved_model import layer_serialization
class MetricSavedModelSaver(layer_serialization.LayerSavedModelSaver):
"""Metric serialization."""
@property
def object_identifier(self):
return constants.METRIC_IDENTIFIER
def _python_properties_internal(self):
metadata = dict(
class_name=object_registration.get_registered_name(type(self.obj)),
name=self.obj.name,
dtype=self.obj.dtype,
)
metadata.update(layer_serialization.get_serialized(self.obj))
if self.obj._build_input_shape is not None:
metadata["build_input_shape"] = self.obj._build_input_shape
return metadata
def _get_serialized_attributes_internal(self, unused_serialization_cache):
return (
dict(variables=tf.__internal__.tracking.wrap(self.obj.variables)),
# TODO(b/135550038): save functions to enable saving custom metrics.
{},
)
| tf-keras/tf_keras/saving/legacy/saved_model/metric_serialization.py/0 | {
"file_path": "tf-keras/tf_keras/saving/legacy/saved_model/metric_serialization.py",
"repo_id": "tf-keras",
"token_count": 634
} | 202 |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pickling / deepcopying of TF-Keras Models."""
import copy
import pickle
import numpy as np
import tensorflow.compat.v2 as tf
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
@test_utils.run_v2_only
class TestPickleProtocol(test_combinations.TestCase):
"""Tests pickle protocol support."""
@test_combinations.run_with_all_model_types
@test_combinations.parameterized.named_parameters(
("copy", copy.copy),
("deepcopy", copy.deepcopy),
*(
(
f"pickle_protocol_level_{protocol}",
lambda model: pickle.loads(
pickle.dumps(model, protocol=protocol)
),
)
for protocol in range(pickle.HIGHEST_PROTOCOL + 1)
),
)
def test_built_models(self, serializer):
"""Built models should be copyable and pickleable for all model
types."""
if not tf.__internal__.tf2.enabled():
self.skipTest(
"pickle model only available in v2 when tf format is used."
)
model = test_utils.get_small_mlp(
num_hidden=1, num_classes=2, input_dim=3
)
model.compile(optimizer="sgd", loss="sparse_categorical_crossentropy")
# train
x = np.random.random(size=(10, 3))
y = np.random.randint(low=0, high=2, size=(10,))
model.fit(x, y) # builds model
y1 = model.predict(x)
# roundtrip with training
model = serializer(model)
y2 = model.predict(x)
# check that the predictions are the same
self.assertAllClose(y1, y2)
# and that we can continue training
model.fit(x, y)
y3 = model.predict(x)
# check that the predictions are the same
self.assertNotAllClose(y2, y3)
@test_combinations.run_with_all_model_types
@test_combinations.parameterized.named_parameters(
("copy", copy.copy),
("deepcopy", copy.deepcopy),
)
def test_unbuilt_models(self, serializer):
"""Unbuilt models should be copyable & deepcopyable for all model
types."""
if not tf.__internal__.tf2.enabled():
self.skipTest(
"pickle model only available in v2 when tf format is used."
)
original_model = test_utils.get_small_mlp(
num_hidden=1, num_classes=2, input_dim=3
)
# roundtrip without compiling or training
model = serializer(original_model)
# compile
model.compile(optimizer="sgd", loss="sparse_categorical_crossentropy")
if hasattr(model.optimizer, "_distribution_strategy"):
model.optimizer._distribution_strategy = None
# roundtrip compiled but not trained
model = serializer(model)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/saving/pickle_utils_test.py/0 | {
"file_path": "tf-keras/tf_keras/saving/pickle_utils_test.py",
"repo_id": "tf-keras",
"token_count": 1471
} | 203 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for automatic outside compilation for TF 2.0/Keras."""
import collections
import os
import numpy as np
import tensorflow.compat.v2 as tf
from absl import flags
from tf_keras import callbacks
from tf_keras.distribute import distribute_strategy_test
from tf_keras.engine import base_layer
from tf_keras.engine import sequential as sequential_model_lib
from tf_keras.engine import training
from tf_keras.layers import convolutional as conv_layer_lib
from tf_keras.layers import core as layer_lib
from tf_keras.layers import pooling as pool_layer_lib
from tf_keras.layers import regularization as regularization_layer_lib
from tf_keras.layers import reshaping as reshaping_layer_lib
from tf_keras.testing_infra import test_utils
# isort: off
from tensorboard.plugins.histogram import (
summary_v2 as histogram_summary_v2,
)
from tensorboard.plugins.image import (
summary_v2 as image_summary_v2,
)
from tensorboard.plugins.scalar import (
summary_v2 as scalar_summary_v2,
)
from tensorflow.python.eager.context import (
set_soft_device_placement,
)
from tensorflow.python.framework import (
test_util as tf_test_utils,
)
NUM_CLASSES = 4
FLAGS = flags.FLAGS
flags.DEFINE_string("tpu", "", "Name of TPU to connect to.")
flags.DEFINE_string("project", None, "Name of GCP project with TPU.")
flags.DEFINE_string("zone", None, "Name of GCP zone with TPU.")
def get_tpu_cluster_resolver():
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
tpu=FLAGS.tpu,
zone=FLAGS.zone,
project=FLAGS.project,
)
return resolver
def get_tpu_strategy():
resolver = get_tpu_cluster_resolver()
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
return tf.distribute.experimental.TPUStrategy(resolver)
class LayerForScalarSummary(base_layer.Layer):
"""A pass-through layer that only records scalar values to summary."""
def call(self, x):
# Add summary scalar using compat v2 implementation.
scalar_summary_v2.scalar("custom_scalar_summary_v2", tf.reduce_sum(x))
return x
class LayerForImageSummary(base_layer.Layer):
"""A pass-through layer that only records image values to summary."""
def call(self, x):
# Add summary image using compat v2 implementation.
image_summary_v2.image("custom_image_summary_v2", x)
return x
class LayerForHistogramSummary(base_layer.Layer):
"""A pass-through layer that records histogram values to summary."""
def call(self, x):
# Add summary histogram using compat v2 implementation.
histogram_summary_v2.histogram("custom_histogram_summary_v2", x)
return x
class CustomModel(training.Model):
"""Custom model with summary ops in model call definition."""
def __init__(self, name=None, enable_histograms=True):
super().__init__()
self._my_layers = [
layer_lib.Dense(
4096,
name="dense1",
kernel_initializer=tf.compat.v1.glorot_normal_initializer(
seed=0
),
use_bias=False,
),
layer_lib.Dense(
4,
name="dense2",
kernel_initializer=tf.compat.v1.glorot_normal_initializer(
seed=0
),
use_bias=False,
),
]
if enable_histograms:
self.histogram_summary_layer = LayerForHistogramSummary()
else:
self.histogram_summary_layer = (
base_layer.Layer()
) # no-op pass through
self.scalar_summary_layer = LayerForScalarSummary()
def call(self, x):
for layer in self._my_layers:
x = layer(x)
x = self.scalar_summary_layer(x)
return self.histogram_summary_layer(x)
def get_image_dataset():
inputs = np.zeros((10, 28, 28, 3), dtype=np.float32)
targets = np.zeros((10, NUM_CLASSES), dtype=np.float32)
dataset = tf.data.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10, drop_remainder=True)
return dataset
def mnist_model(input_shape, enable_histograms=True):
"""Creates a MNIST model."""
model = sequential_model_lib.Sequential()
# Adding custom pass-through layer to visualize input images.
model.add(LayerForImageSummary())
model.add(
conv_layer_lib.Conv2D(
32, kernel_size=(3, 3), activation="relu", input_shape=input_shape
)
)
model.add(conv_layer_lib.Conv2D(64, (3, 3), activation="relu"))
model.add(pool_layer_lib.MaxPooling2D(pool_size=(2, 2)))
model.add(regularization_layer_lib.Dropout(0.25))
model.add(reshaping_layer_lib.Flatten())
model.add(layer_lib.Dense(128, activation="relu"))
model.add(regularization_layer_lib.Dropout(0.5))
model.add(layer_lib.Dense(NUM_CLASSES, activation="softmax"))
# Adding custom pass-through layer for summary recording.
if enable_histograms:
model.add(LayerForHistogramSummary())
return model
@test_utils.run_v2_only
class AutoOutsideCompilationWithKerasTest(tf.test.TestCase):
def setUp(self):
super().setUp()
set_soft_device_placement(True)
self.summary_dir = self.get_temp_dir()
def validate_recorded_sumary_file(self, event_files, expected_event_counts):
event_counts = collections.defaultdict(int)
for event_file in event_files:
for e in tf.compat.v1.train.summary_iterator(event_file):
for v in e.summary.value:
event_counts[v.tag] += 1
event_counts = dict(
event_counts
) # Avoid defaultdict type in repr below.
# Populate a count of 0 for tags that were expected but not found.
actual_event_counts = {
tag: event_counts.get(tag, 0) for tag in expected_event_counts
}
self.assertEqual(
expected_event_counts,
actual_event_counts,
msg="expected counts not found; all event counts: %r"
% event_counts,
)
def testV2SummaryWithKerasSequentialModel(self):
# Histogram summaries require the MLIR bridge; see
# b/178826597#comment107.
# TODO(https://github.com/tensorflow/tensorboard/issues/2885): remove
# this if histogram summaries are supported fully on non-MLIR bridge or
# non-MLIR bridge is no longer run.
enable_histograms = tf_test_utils.is_mlir_bridge_enabled()
strategy = get_tpu_strategy()
with strategy.scope():
model = mnist_model(
(28, 28, 3), enable_histograms=enable_histograms
)
model.compile("sgd", "mse")
dataset = get_image_dataset()
tensorboard_callback = callbacks.TensorBoard(
self.summary_dir, update_freq=2
)
model.fit(
dataset,
steps_per_epoch=10,
epochs=1,
callbacks=[tensorboard_callback],
)
event_files = tf.io.gfile.glob(
os.path.join(self.summary_dir, "train", "event*")
)
# Since total of 10 steps are ran and summary ops should be invoked
# every 2 batches, we should see total of 5 event logs for each
# summary.
expected_event_counts = {
"sequential/layer_for_histogram_summary/custom_histogram_summary_v2": 5 # noqa: E501
if enable_histograms
else 0,
"sequential/layer_for_image_summary/custom_image_summary_v2": 5,
}
self.validate_recorded_sumary_file(
event_files, expected_event_counts
)
def testV2SummaryWithKerasSubclassedModel(self):
# Histogram summaries require the MLIR bridge; see
# b/178826597#comment107.
# TODO(https://github.com/tensorflow/tensorboard/issues/2885): remove
# this if histogram summaries are supported fully on non-MLIR bridge or
# non-MLIR bridge is no longer run.
enable_histograms = tf_test_utils.is_mlir_bridge_enabled()
strategy = get_tpu_strategy()
with strategy.scope():
model = CustomModel(enable_histograms=enable_histograms)
model.compile("sgd", "mse")
dataset = distribute_strategy_test.get_dataset(strategy)
tensorboard_callback = callbacks.TensorBoard(
self.summary_dir, update_freq=2
)
model.fit(
dataset,
steps_per_epoch=10,
epochs=1,
callbacks=[tensorboard_callback],
)
event_files = tf.io.gfile.glob(
os.path.join(self.summary_dir, "train", "event*")
)
# Since total of 10 steps are ran and summary ops should be invoked
# every 2 batches, we should see total of 5 event logs for each
# summary.
expected_event_counts = {
(
"custom_model/layer_for_scalar_summary/"
"custom_scalar_summary_v2"
): 5,
(
"custom_model/layer_for_histogram_summary/"
"custom_histogram_summary_v2"
): 5
if enable_histograms
else 0,
}
self.validate_recorded_sumary_file(
event_files, expected_event_counts
)
def testSummaryWithCustomTrainingLoop(self):
strategy = get_tpu_strategy()
writer = tf.summary.create_file_writer(self.summary_dir)
with strategy.scope():
model = distribute_strategy_test.get_model()
model.compile("sgd", "mse")
@tf.function
def custom_function(dataset):
def _custom_step(features, labels):
del labels
logits = model(features)
with tf.summary.record_if(True), writer.as_default():
scalar_summary_v2.scalar(
"logits",
tf.reduce_sum(logits),
step=model.optimizer.iterations,
)
return logits
iterator = iter(dataset)
output = strategy.unwrap(
strategy.run(_custom_step, args=(next(iterator)))
)
return output
dataset = strategy.experimental_distribute_dataset(
distribute_strategy_test.get_dataset(strategy)
)
custom_function(dataset)
writer.close()
event_files = tf.io.gfile.glob(os.path.join(self.summary_dir, "event*"))
expected_event_counts = {
"logits": 1,
}
self.validate_recorded_sumary_file(event_files, expected_event_counts)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/tests/automatic_outside_compilation_test.py/0 | {
"file_path": "tf-keras/tf_keras/tests/automatic_outside_compilation_test.py",
"repo_id": "tf-keras",
"token_count": 5303
} | 204 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for tensorflow.python.training.saver.py."""
import functools
import os
import tensorflow.compat.v2 as tf
from tf_keras.engine import training
from tf_keras.layers import core
# isort: off
from tensorflow.python.checkpoint import (
checkpoint as trackable_utils,
)
class NonLayerTrackable(tf.Module):
def __init__(self):
super().__init__()
self.a_variable = trackable_utils.add_variable(
self, name="a_variable", shape=[]
)
class MyModel(training.Model):
"""A concrete Model for testing."""
def __init__(self):
super().__init__()
self._named_dense = core.Dense(1, use_bias=True)
self._second = core.Dense(1, use_bias=False)
# We can still track Trackables which aren't Layers.
self._non_layer = NonLayerTrackable()
def call(self, values):
ret = self._second(self._named_dense(values))
return ret
class TrackableCompatibilityTests(tf.test.TestCase):
def _initialized_model(self):
input_value = tf.constant([[3.0]])
model = MyModel()
optimizer = tf.compat.v1.train.AdamOptimizer(0.001)
optimizer_step = tf.compat.v1.train.get_or_create_global_step()
root_trackable = tf.train.Checkpoint(
optimizer=optimizer, model=model, optimizer_step=optimizer_step
)
train_op = optimizer.minimize(
functools.partial(model, input_value), global_step=optimizer_step
)
self.evaluate(trackable_utils.gather_initializers(root_trackable))
self.evaluate(train_op)
# A regular variable, a slot variable, and a non-slot Optimizer variable
# with known values to check when loading.
self.evaluate(model._named_dense.bias.assign([1.0]))
self.evaluate(
optimizer.get_slot(var=model._named_dense.bias, name="m").assign(
[2.0]
)
)
beta1_power, _ = optimizer._get_beta_accumulators()
self.evaluate(beta1_power.assign(3.0))
return root_trackable
def _set_sentinels(self, root_trackable):
self.evaluate(root_trackable.model._named_dense.bias.assign([101.0]))
self.evaluate(
root_trackable.optimizer.get_slot(
var=root_trackable.model._named_dense.bias, name="m"
).assign([102.0])
)
beta1_power, _ = root_trackable.optimizer._get_beta_accumulators()
self.evaluate(beta1_power.assign(103.0))
def _check_sentinels(self, root_trackable):
self.assertAllEqual(
[1.0], self.evaluate(root_trackable.model._named_dense.bias)
)
self.assertAllEqual(
[2.0],
self.evaluate(
root_trackable.optimizer.get_slot(
var=root_trackable.model._named_dense.bias, name="m"
)
),
)
beta1_power, _ = root_trackable.optimizer._get_beta_accumulators()
self.assertAllEqual(3.0, self.evaluate(beta1_power))
def testLoadFromObjectBasedGraph(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_graph = tf.Graph()
with save_graph.as_default(), self.session(graph=save_graph) as sess:
root = self._initialized_model()
object_saver = tf.train.Checkpoint(root=root)
save_path = object_saver.save(file_prefix=checkpoint_prefix)
# An incompatible object-based checkpoint to check error messages
var = tf.Variable(1.0, name="a")
self.evaluate(var.initializer)
second_saver = tf.train.Checkpoint(v=var)
second_path = second_saver.save(
file_prefix=os.path.join(checkpoint_directory, "second")
)
restore_graph = tf.Graph()
with restore_graph.as_default(), self.session(
graph=restore_graph
) as sess:
root = self._initialized_model()
self._set_sentinels(root)
saver = tf.compat.v1.train.Saver()
saver.restore(sess=sess, save_path=save_path)
self._check_sentinels(root)
before_second_restore_ops = restore_graph.get_operations()
# Test that multiple restores do not pollute the graph
saver.restore(sess=sess, save_path=save_path)
self.assertEqual(
before_second_restore_ops, restore_graph.get_operations()
)
with self.assertRaisesRegex(
tf.errors.NotFoundError, "Could not find some variables"
):
saver.restore(sess=sess, save_path=second_path)
def testLoadFromObjectBasedEager(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_graph = tf.Graph()
with save_graph.as_default(), self.session(graph=save_graph):
root = self._initialized_model()
object_saver = tf.train.Checkpoint(root=root)
save_path = object_saver.save(file_prefix=checkpoint_prefix)
with tf.__internal__.eager_context.eager_mode():
root = self._initialized_model()
self._set_sentinels(root)
saver = tf.compat.v1.train.Saver(
root.model.variables + root.optimizer.variables()
)
saver.restore(sess=None, save_path=save_path)
self._check_sentinels(root)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/tests/saver_test.py/0 | {
"file_path": "tf-keras/tf_keras/tests/saver_test.py",
"repo_id": "tf-keras",
"token_count": 2756
} | 205 |
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Public TF-Keras utilities."""
# isort: off
# Serialization related
from tf_keras.saving.serialization_lib import deserialize_keras_object
from tf_keras.saving.serialization_lib import serialize_keras_object
from tf_keras.saving.object_registration import CustomObjectScope
from tf_keras.saving.object_registration import custom_object_scope
from tf_keras.saving.object_registration import get_custom_objects
from tf_keras.saving.object_registration import get_registered_name
from tf_keras.saving.object_registration import register_keras_serializable
# Dataset related
from tf_keras.utils.audio_dataset import audio_dataset_from_directory
from tf_keras.utils.text_dataset import text_dataset_from_directory
from tf_keras.utils.timeseries_dataset import timeseries_dataset_from_array
from tf_keras.utils.image_dataset import image_dataset_from_directory
from tf_keras.utils.dataset_utils import split_dataset
# Sequence related
from tf_keras.utils.data_utils import GeneratorEnqueuer
from tf_keras.utils.data_utils import OrderedEnqueuer
from tf_keras.utils.data_utils import Sequence
from tf_keras.utils.data_utils import SequenceEnqueuer
# Image related
from tf_keras.utils.image_utils import array_to_img
from tf_keras.utils.image_utils import img_to_array
from tf_keras.utils.image_utils import load_img
from tf_keras.utils.image_utils import save_img
# Python utils
from tf_keras.utils.tf_utils import set_random_seed
from tf_keras.utils.generic_utils import Progbar
from tf_keras.utils.data_utils import get_file
# Preprocessing utils
from tf_keras.utils.feature_space import FeatureSpace
# Internal
from tf_keras.utils.layer_utils import get_source_inputs
from tf_keras.utils.layer_utils import warmstart_embedding_matrix
# Deprecated
from tf_keras.utils.np_utils import normalize
from tf_keras.utils.np_utils import to_categorical
from tf_keras.utils.np_utils import to_ordinal
from tf_keras.utils.data_utils import pad_sequences
# Evaluation related
from tf_keras.utils.sidecar_evaluator import SidecarEvaluator
from tf_keras.utils.sidecar_evaluator import SidecarEvaluatorModelExport
# Timed Thread
from tf_keras.utils.timed_threads import TimedThread
# Visualization related
from tf_keras.utils.vis_utils import model_to_dot
from tf_keras.utils.vis_utils import plot_model
| tf-keras/tf_keras/utils/__init__.py/0 | {
"file_path": "tf-keras/tf_keras/utils/__init__.py",
"repo_id": "tf-keras",
"token_count": 901
} | 206 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python utilities required by TF-Keras."""
import binascii
import codecs
import importlib
import marshal
import os
import re
import sys
import time
import types as python_types
import numpy as np
import tensorflow.compat.v2 as tf
from tf_keras.utils import io_utils
from tf_keras.utils import tf_inspect
# isort: off
from tensorflow.python.util.tf_export import keras_export
def func_dump(func):
"""Serializes a user defined function.
Args:
func: the function to serialize.
Returns:
A tuple `(code, defaults, closure)`.
"""
if os.name == "nt":
raw_code = marshal.dumps(func.__code__).replace(b"\\", b"/")
code = codecs.encode(raw_code, "base64").decode("ascii")
else:
raw_code = marshal.dumps(func.__code__)
code = codecs.encode(raw_code, "base64").decode("ascii")
defaults = func.__defaults__
if func.__closure__:
closure = tuple(c.cell_contents for c in func.__closure__)
else:
closure = None
return code, defaults, closure
def func_load(code, defaults=None, closure=None, globs=None):
"""Deserializes a user defined function.
Args:
code: bytecode of the function.
defaults: defaults of the function.
closure: closure of the function.
globs: dictionary of global objects.
Returns:
A function object.
"""
if isinstance(code, (tuple, list)): # unpack previous dump
code, defaults, closure = code
if isinstance(defaults, list):
defaults = tuple(defaults)
def ensure_value_to_cell(value):
"""Ensures that a value is converted to a python cell object.
Args:
value: Any value that needs to be casted to the cell type
Returns:
A value wrapped as a cell object (see function "func_load")
"""
def dummy_fn():
value # just access it so it gets captured in .__closure__
cell_value = dummy_fn.__closure__[0]
if not isinstance(value, type(cell_value)):
return cell_value
return value
if closure is not None:
closure = tuple(ensure_value_to_cell(_) for _ in closure)
try:
raw_code = codecs.decode(code.encode("ascii"), "base64")
except (UnicodeEncodeError, binascii.Error):
raw_code = code.encode("raw_unicode_escape")
code = marshal.loads(raw_code)
if globs is None:
globs = globals()
return python_types.FunctionType(
code, globs, name=code.co_name, argdefs=defaults, closure=closure
)
def has_arg(fn, name, accept_all=False):
"""Checks if a callable accepts a given keyword argument.
Args:
fn: Callable to inspect.
name: Check if `fn` can be called with `name` as a keyword argument.
accept_all: What to return if there is no parameter called `name` but
the function accepts a `**kwargs` argument.
Returns:
bool, whether `fn` accepts a `name` keyword argument.
"""
arg_spec = tf_inspect.getfullargspec(fn)
if accept_all and arg_spec.varkw is not None:
return True
return name in arg_spec.args or name in arg_spec.kwonlyargs
@keras_export("keras.utils.Progbar")
class Progbar:
"""Displays a progress bar.
Args:
target: Total number of steps expected, None if unknown.
width: Progress bar width on screen.
verbose: Verbosity mode, 0 (silent), 1 (verbose), 2 (semi-verbose)
stateful_metrics: Iterable of string names of metrics that should *not*
be averaged over time. Metrics in this list will be displayed as-is.
All others will be averaged by the progbar before display.
interval: Minimum visual progress update interval (in seconds).
unit_name: Display name for step counts (usually "step" or "sample").
"""
def __init__(
self,
target,
width=30,
verbose=1,
interval=0.05,
stateful_metrics=None,
unit_name="step",
):
self.target = target
self.width = width
self.verbose = verbose
self.interval = interval
self.unit_name = unit_name
if stateful_metrics:
self.stateful_metrics = set(stateful_metrics)
else:
self.stateful_metrics = set()
self._dynamic_display = (
(hasattr(sys.stdout, "isatty") and sys.stdout.isatty())
or "ipykernel" in sys.modules
or "posix" in sys.modules
or "PYCHARM_HOSTED" in os.environ
)
self._total_width = 0
self._seen_so_far = 0
# We use a dict + list to avoid garbage collection
# issues found in OrderedDict
self._values = {}
self._values_order = []
self._start = time.time()
self._last_update = 0
self._time_at_epoch_start = self._start
self._time_at_epoch_end = None
self._time_after_first_step = None
def update(self, current, values=None, finalize=None):
"""Updates the progress bar.
Args:
current: Index of current step.
values: List of tuples: `(name, value_for_last_step)`. If `name` is
in `stateful_metrics`, `value_for_last_step` will be displayed
as-is. Else, an average of the metric over time will be
displayed.
finalize: Whether this is the last update for the progress bar. If
`None`, uses `current >= self.target`. Defaults to `None`.
"""
if finalize is None:
if self.target is None:
finalize = False
else:
finalize = current >= self.target
values = values or []
for k, v in values:
if k not in self._values_order:
self._values_order.append(k)
if k not in self.stateful_metrics:
# In the case that progress bar doesn't have a target value in
# the first epoch, both on_batch_end and on_epoch_end will be
# called, which will cause 'current' and 'self._seen_so_far' to
# have the same value. Force the minimal value to 1 here,
# otherwise stateful_metric will be 0s.
value_base = max(current - self._seen_so_far, 1)
if k not in self._values:
self._values[k] = [v * value_base, value_base]
else:
self._values[k][0] += v * value_base
self._values[k][1] += value_base
else:
# Stateful metrics output a numeric value. This representation
# means "take an average from a single value" but keeps the
# numeric formatting.
self._values[k] = [v, 1]
self._seen_so_far = current
message = ""
now = time.time()
info = f" - {now - self._start:.0f}s"
if current == self.target:
self._time_at_epoch_end = now
if self.verbose == 1:
if now - self._last_update < self.interval and not finalize:
return
prev_total_width = self._total_width
if self._dynamic_display:
message += "\b" * prev_total_width
message += "\r"
else:
message += "\n"
if self.target is not None:
numdigits = int(np.log10(self.target)) + 1
bar = ("%" + str(numdigits) + "d/%d [") % (current, self.target)
prog = float(current) / self.target
prog_width = int(self.width * prog)
if prog_width > 0:
bar += "=" * (prog_width - 1)
if current < self.target:
bar += ">"
else:
bar += "="
bar += "." * (self.width - prog_width)
bar += "]"
else:
bar = "%7d/Unknown" % current
self._total_width = len(bar)
message += bar
time_per_unit = self._estimate_step_duration(current, now)
if self.target is None or finalize:
info += self._format_time(time_per_unit, self.unit_name)
else:
eta = time_per_unit * (self.target - current)
if eta > 3600:
eta_format = "%d:%02d:%02d" % (
eta // 3600,
(eta % 3600) // 60,
eta % 60,
)
elif eta > 60:
eta_format = "%d:%02d" % (eta // 60, eta % 60)
else:
eta_format = "%ds" % eta
info = f" - ETA: {eta_format}"
for k in self._values_order:
info += f" - {k}:"
if isinstance(self._values[k], list):
avg = np.mean(
self._values[k][0] / max(1, self._values[k][1])
)
if abs(avg) > 1e-3:
info += f" {avg:.4f}"
else:
info += f" {avg:.4e}"
else:
info += f" {self._values[k]}"
self._total_width += len(info)
if prev_total_width > self._total_width:
info += " " * (prev_total_width - self._total_width)
if finalize:
info += "\n"
message += info
io_utils.print_msg(message, line_break=False)
message = ""
elif self.verbose == 2:
if finalize:
numdigits = int(np.log10(self.target)) + 1
count = ("%" + str(numdigits) + "d/%d") % (current, self.target)
info = count + info
for k in self._values_order:
info += f" - {k}:"
avg = np.mean(
self._values[k][0] / max(1, self._values[k][1])
)
if avg > 1e-3:
info += f" {avg:.4f}"
else:
info += f" {avg:.4e}"
if self._time_at_epoch_end:
time_per_epoch = (
self._time_at_epoch_end - self._time_at_epoch_start
)
avg_time_per_step = time_per_epoch / self.target
self._time_at_epoch_start = now
self._time_at_epoch_end = None
info += " -" + self._format_time(time_per_epoch, "epoch")
info += " -" + self._format_time(
avg_time_per_step, self.unit_name
)
info += "\n"
message += info
io_utils.print_msg(message, line_break=False)
message = ""
self._last_update = now
def add(self, n, values=None):
self.update(self._seen_so_far + n, values)
def _format_time(self, time_per_unit, unit_name):
"""format a given duration to display to the user.
Given the duration, this function formats it in either milliseconds
or seconds and displays the unit (i.e. ms/step or s/epoch)
Args:
time_per_unit: the duration to display
unit_name: the name of the unit to display
Returns:
a string with the correctly formatted duration and units
"""
formatted = ""
if time_per_unit >= 1 or time_per_unit == 0:
formatted += f" {time_per_unit:.0f}s/{unit_name}"
elif time_per_unit >= 1e-3:
formatted += f" {time_per_unit * 1000.0:.0f}ms/{unit_name}"
else:
formatted += f" {time_per_unit * 1000000.0:.0f}us/{unit_name}"
return formatted
def _estimate_step_duration(self, current, now):
"""Estimate the duration of a single step.
Given the step number `current` and the corresponding time `now` this
function returns an estimate for how long a single step takes. If this
is called before one step has been completed (i.e. `current == 0`) then
zero is given as an estimate. The duration estimate ignores the duration
of the (assumed to be non-representative) first step for estimates when
more steps are available (i.e. `current>1`).
Args:
current: Index of current step.
now: The current time.
Returns: Estimate of the duration of a single step.
"""
if current:
# there are a few special scenarios here:
# 1) somebody is calling the progress bar without ever supplying
# step 1
# 2) somebody is calling the progress bar and supplies step one
# multiple times, e.g. as part of a finalizing call
# in these cases, we just fall back to the simple calculation
if self._time_after_first_step is not None and current > 1:
time_per_unit = (now - self._time_after_first_step) / (
current - 1
)
else:
time_per_unit = (now - self._start) / current
if current == 1:
self._time_after_first_step = now
return time_per_unit
else:
return 0
def _update_stateful_metrics(self, stateful_metrics):
self.stateful_metrics = self.stateful_metrics.union(stateful_metrics)
def make_batches(size, batch_size):
"""Returns a list of batch indices (tuples of indices).
Args:
size: Integer, total size of the data to slice into batches.
batch_size: Integer, batch size.
Returns:
A list of tuples of array indices.
"""
num_batches = int(np.ceil(size / float(batch_size)))
return [
(i * batch_size, min(size, (i + 1) * batch_size))
for i in range(0, num_batches)
]
def slice_arrays(arrays, start=None, stop=None):
"""Slice an array or list of arrays.
This takes an array-like, or a list of
array-likes, and outputs:
- arrays[start:stop] if `arrays` is an array-like
- [x[start:stop] for x in arrays] if `arrays` is a list
Can also work on list/array of indices: `slice_arrays(x, indices)`
Args:
arrays: Single array or list of arrays.
start: can be an integer index (start index) or a list/array of indices
stop: integer (stop index); should be None if `start` was a list.
Returns:
A slice of the array(s).
Raises:
ValueError: If the value of start is a list and stop is not None.
"""
if arrays is None:
return [None]
if isinstance(start, list) and stop is not None:
raise ValueError(
"The stop argument has to be None if the value of start "
f"is a list. Received start={start}, stop={stop}"
)
elif isinstance(arrays, list):
if hasattr(start, "__len__"):
# hdf5 datasets only support list objects as indices
if hasattr(start, "shape"):
start = start.tolist()
return [None if x is None else x[start] for x in arrays]
return [
None
if x is None
else None
if not hasattr(x, "__getitem__")
else x[start:stop]
for x in arrays
]
else:
if hasattr(start, "__len__"):
if hasattr(start, "shape"):
start = start.tolist()
return arrays[start]
if hasattr(start, "__getitem__"):
return arrays[start:stop]
return [None]
def to_list(x):
"""Normalizes a list/tensor into a list.
If a tensor is passed, we return
a list of size 1 containing the tensor.
Args:
x: target object to be normalized.
Returns:
A list.
"""
if isinstance(x, list):
return x
return [x]
def to_snake_case(name):
intermediate = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
insecure = re.sub("([a-z])([A-Z])", r"\1_\2", intermediate).lower()
# If the class is private the name starts with "_" which is not secure
# for creating scopes. We prefix the name with "private" in this case.
if insecure[0] != "_":
return insecure
return "private" + insecure
def is_all_none(structure):
iterable = tf.nest.flatten(structure)
# We cannot use Python's `any` because the iterable may return Tensors.
for element in iterable:
if element is not None:
return False
return True
def check_for_unexpected_keys(name, input_dict, expected_values):
unknown = set(input_dict.keys()).difference(expected_values)
if unknown:
raise ValueError(
f"Unknown entries in {name} dictionary: {list(unknown)}. "
f"Only expected following keys: {expected_values}"
)
def validate_kwargs(
kwargs, allowed_kwargs, error_message="Keyword argument not understood:"
):
"""Checks that all keyword arguments are in the set of allowed keys."""
for kwarg in kwargs:
if kwarg not in allowed_kwargs:
raise TypeError(error_message, kwarg)
def default(method):
"""Decorates a method to detect overrides in subclasses."""
method._is_default = True
return method
def is_default(method):
"""Check if a method is decorated with the `default` wrapper."""
return getattr(method, "_is_default", False)
def populate_dict_with_module_objects(target_dict, modules, obj_filter):
for module in modules:
for name in dir(module):
obj = getattr(module, name)
if obj_filter(obj):
target_dict[name] = obj
class LazyLoader(python_types.ModuleType):
"""Lazily import a module, mainly to avoid pulling in large dependencies."""
def __init__(self, local_name, parent_module_globals, name):
self._local_name = local_name
self._parent_module_globals = parent_module_globals
super().__init__(name)
def _load(self):
"""Load the module and insert it into the parent's globals."""
# Import the target module and insert it into the parent's namespace
module = importlib.import_module(self.__name__)
self._parent_module_globals[self._local_name] = module
# Update this object's dict so that if someone keeps a reference to the
# LazyLoader, lookups are efficient (__getattr__ is only called on
# lookups that fail).
self.__dict__.update(module.__dict__)
return module
def __getattr__(self, item):
module = self._load()
return getattr(module, item)
| tf-keras/tf_keras/utils/generic_utils.py/0 | {
"file_path": "tf-keras/tf_keras/utils/generic_utils.py",
"repo_id": "tf-keras",
"token_count": 8957
} | 207 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for losses_utils."""
import tensorflow.compat.v2 as tf
from tf_keras.testing_infra import test_combinations
from tf_keras.utils import losses_utils
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class RemoveSqueezableTest(tf.test.TestCase):
"""Test remove_squeezable_dimensions"""
def test_ragged_3d_same_shape(self):
"""shape (2, (sequence={1, 2}), 3)"""
x = tf.ragged.constant([[[1, 2, 3]], [[4, 5, 6], [7, 8, 9]]])
rank = x.shape.ndims
x_p, _ = losses_utils.remove_squeezable_dimensions(x, x)
self.assertEqual(x_p.shape.ndims, rank)
def test_ragged_3d_4d_squeezable(self):
"""shapes:
x: (2, (sequence={1, 2}), 3)
y: (2, (sequence={1, 2}), 3, 1)
"""
x = tf.ragged.constant([[[1, 2, 3]], [[4, 5, 6], [7, 8, 9]]])
y = tf.expand_dims(x, axis=-1)
self.assertEqual(x.shape.ndims, 3)
self.assertEqual(y.shape.ndims, 4)
_, y_p = losses_utils.remove_squeezable_dimensions(x, y)
y_p.shape.assert_is_compatible_with(x.shape)
self.assertEqual(y_p.shape.ndims, 3)
x_p, _ = losses_utils.remove_squeezable_dimensions(y, x)
x_p.shape.assert_is_compatible_with(x.shape)
self.assertEqual(x_p.shape.ndims, 3)
def test_dense_2d_3d_squeezable(self):
x = tf.constant([[1, 2], [3, 4]])
y = tf.constant([[[1], [2]], [[3], [4]]])
_, y_p = losses_utils.remove_squeezable_dimensions(x, y)
y_p.shape.assert_is_compatible_with(x.shape)
self.assertEqual(y_p.shape.ndims, x.shape.ndims)
x_p, _ = losses_utils.remove_squeezable_dimensions(y, x)
x_p.shape.assert_is_compatible_with(x.shape)
class RemoveSqueezableTestGraphOnly(tf.test.TestCase):
"""Test remove_squeezable_dimensions (graph-mode only)."""
def test_placeholder(self):
"""Test dynamic rank tensors."""
with tf.Graph().as_default():
x = tf.compat.v1.placeholder_with_default(
[1.0, 2.0, 3.0], shape=None
)
y = tf.compat.v1.placeholder_with_default(
[[1.0], [2.0], [3.0]], shape=None
)
_, y_p = losses_utils.remove_squeezable_dimensions(x, y)
y_p.shape.assert_is_compatible_with(x.shape)
self.assertAllEqual(tf.shape(x), tf.shape(y_p))
x_p, _ = losses_utils.remove_squeezable_dimensions(y, x)
x_p.shape.assert_is_compatible_with(x.shape)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/utils/losses_utils_test.py/0 | {
"file_path": "tf-keras/tf_keras/utils/losses_utils_test.py",
"repo_id": "tf-keras",
"token_count": 1436
} | 208 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TF-Keras TF utils."""
from unittest.mock import MagicMock
from unittest.mock import patch
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import tf_keras as keras
from tf_keras.testing_infra import test_combinations
from tf_keras.utils import tf_utils
try:
import attr
except ImportError:
attr = None
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class TestIsSymbolicTensor(tf.test.TestCase, parameterized.TestCase):
def test_default_behavior(self):
if tf.executing_eagerly():
self.assertFalse(
tf_utils.is_symbolic_tensor(
tf.Variable(name="blah", initial_value=0.0)
)
)
self.assertFalse(
tf_utils.is_symbolic_tensor(tf.convert_to_tensor(0.0))
)
self.assertFalse(
tf_utils.is_symbolic_tensor(
tf.SparseTensor(
indices=[[0, 0], [1, 2]],
values=[1, 2],
dense_shape=[3, 4],
)
)
)
else:
self.assertTrue(
tf_utils.is_symbolic_tensor(
tf.Variable(name="blah", initial_value=0.0)
)
)
self.assertTrue(
tf_utils.is_symbolic_tensor(tf.convert_to_tensor(0.0))
)
self.assertTrue(
tf_utils.is_symbolic_tensor(
tf.SparseTensor(
indices=[[0, 0], [1, 2]],
values=[1, 2],
dense_shape=[3, 4],
)
)
)
def test_works_with_registered(self):
class CustomClass:
def value(self):
return tf.convert_to_tensor(42.0)
tf.register_tensor_conversion_function(
CustomClass, lambda value, **_: value.value()
)
tf_utils.register_symbolic_tensor_type(CustomClass)
if tf.executing_eagerly():
self.assertFalse(
tf_utils.is_symbolic_tensor(
tf.Variable(name="blah", initial_value=0.0)
)
)
self.assertFalse(
tf_utils.is_symbolic_tensor(tf.convert_to_tensor(0.0))
)
self.assertFalse(
tf_utils.is_symbolic_tensor(
tf.SparseTensor(
indices=[[0, 0], [1, 2]],
values=[1, 2],
dense_shape=[3, 4],
)
)
)
self.assertFalse(tf_utils.is_symbolic_tensor(CustomClass()))
else:
self.assertTrue(
tf_utils.is_symbolic_tensor(
tf.Variable(name="blah", initial_value=0.0)
)
)
self.assertTrue(
tf_utils.is_symbolic_tensor(tf.convert_to_tensor(0.0))
)
self.assertTrue(
tf_utils.is_symbolic_tensor(
tf.SparseTensor(
indices=[[0, 0], [1, 2]],
values=[1, 2],
dense_shape=[3, 4],
)
)
)
self.assertTrue(tf_utils.is_symbolic_tensor(CustomClass()))
def test_enables_nontensor_plumbing(self):
if tf.executing_eagerly():
self.skipTest("`compile` functionality changed.")
# Setup.
class Foo:
def __init__(self, input_):
self._input = input_
self.value = tf.convert_to_tensor([[42.0]])
@property
def dtype(self):
return self.value.dtype
tf.register_tensor_conversion_function(
Foo, lambda x, *args, **kwargs: x.value
)
tf_utils.register_symbolic_tensor_type(Foo)
class PlumbingLayer(keras.layers.Lambda):
def __init__(self, fn, **kwargs):
def _fn(*fargs, **fkwargs):
d = fn(*fargs, **fkwargs)
x = tf.convert_to_tensor(d)
d.shape = x.shape
d.get_shape = x.get_shape
return d, x
super().__init__(_fn, **kwargs)
self._enter_dunder_call = False
def __call__(self, inputs, *args, **kwargs):
self._enter_dunder_call = True
d, _ = super().__call__(inputs, *args, **kwargs)
self._enter_dunder_call = False
return d
def call(self, inputs, *args, **kwargs):
d, v = super().call(inputs, *args, **kwargs)
if self._enter_dunder_call:
return d, v
return d
# User-land.
model = keras.Sequential(
[
keras.layers.InputLayer((1,)),
PlumbingLayer(Foo), # Makes a `Foo` object.
]
)
# Let's ensure TF-Keras graph history is preserved by composing models.
model = keras.Model(model.inputs, model(model.outputs))
# Now we instantiate the model and verify we have a `Foo` object, not a
# `Tensor`.
y = model(tf.convert_to_tensor([[7.0]]))
self.assertIsInstance(y, Foo)
# Confirm that (custom) loss sees `Foo` instance, not Tensor.
obtained_prediction_box = [None]
def custom_loss(y_obs, y_pred):
del y_obs
obtained_prediction_box[0] = y_pred
return y_pred
# Apparently `compile` calls the loss function enough to trigger the
# side-effect.
model.compile("SGD", loss=custom_loss)
self.assertIsInstance(obtained_prediction_box[0], Foo)
class ConvertInnerNodeDataTest(tf.test.TestCase):
def test_convert_inner_node_data(self):
data = tf_utils.convert_inner_node_data(
(
tf_utils.ListWrapper(["l", 2, 3]),
tf_utils.ListWrapper(["l", 5, 6]),
)
)
self.assertEqual(data, (["l", 2, 3], ["l", 5, 6]))
data = tf_utils.convert_inner_node_data(
((["l", 2, 3], ["l", 5, 6])), wrap=True
)
self.assertTrue(
all(isinstance(ele, tf_utils.ListWrapper) for ele in data)
)
class AttrsTest(tf.test.TestCase):
def test_map_structure_with_atomic_accept_attr(self):
if attr is None:
self.skipTest("attr module is unavailable.")
@attr.s(frozen=True)
class Foo:
bar = attr.ib()
self.assertEqual(
Foo(2),
tf_utils.map_structure_with_atomic(
is_atomic_fn=lambda x: isinstance(x, int),
map_fn=lambda x: x + 1,
nested=Foo(1),
),
)
class TestIsRagged(tf.test.TestCase):
def test_is_ragged_return_true_for_ragged_tensor(self):
tensor = tf.RaggedTensor.from_row_splits(
values=[3, 1, 4, 1, 5, 9, 2, 6], row_splits=[0, 4, 4, 7, 8, 8]
)
self.assertTrue(tf_utils.is_ragged(tensor))
def test_is_ragged_return_false_for_list(self):
tensor = [1.0, 2.0, 3.0]
self.assertFalse(tf_utils.is_ragged(tensor))
class TestIsSparse(tf.test.TestCase):
def test_is_sparse_return_true_for_sparse_tensor(self):
tensor = tf.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4]
)
self.assertTrue(tf_utils.is_sparse(tensor))
def test_is_sparse_return_true_for_sparse_tensor_value(self):
tensor = tf.compat.v1.SparseTensorValue(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4]
)
self.assertTrue(tf_utils.is_sparse(tensor))
def test_is_sparse_return_false_for_list(self):
tensor = [1.0, 2.0, 3.0]
self.assertFalse(tf_utils.is_sparse(tensor))
class TestIsExtensionType(tf.test.TestCase):
def test_is_extension_type_return_true_for_ragged_tensor(self):
self.assertTrue(
tf_utils.is_extension_type(tf.ragged.constant([[1, 2], [3]]))
)
def test_is_extension_type_return_true_for_sparse_tensor(self):
self.assertTrue(
tf_utils.is_extension_type(tf.sparse.from_dense([[1, 2], [3, 4]]))
)
def test_is_extension_type_return_false_for_dense_tensor(self):
self.assertFalse(
tf_utils.is_extension_type(tf.constant([[1, 2], [3, 4]]))
)
def test_is_extension_type_return_false_for_list(self):
tensor = [1.0, 2.0, 3.0]
self.assertFalse(tf_utils.is_extension_type(tensor))
class TestIsTensorOrExtensionType(tf.test.TestCase):
def test_is_tensor_or_extension_type_return_true_for_ragged_tensor(self):
self.assertTrue(
tf_utils.is_tensor_or_extension_type(
tf.ragged.constant([[1, 2], [3]])
)
)
def test_is_tensor_or_extension_type_return_true_for_sparse_tensor(self):
self.assertTrue(
tf_utils.is_tensor_or_extension_type(
tf.sparse.from_dense([[1, 2], [3, 4]])
)
)
def test_is_tensor_or_extension_type_return_true_for_dense_tensor(self):
self.assertTrue(
tf_utils.is_tensor_or_extension_type(tf.constant([[1, 2], [3, 4]]))
)
def test_is_tensor_or_extension_type_return_true_for_custom_ext_types(self):
class DummyExtensionType(tf.experimental.ExtensionType):
...
self.assertTrue(
tf_utils.is_tensor_or_extension_type(DummyExtensionType())
)
def test_is_tensor_or_extension_type_return_false_for_list(self):
self.assertFalse(tf_utils.is_tensor_or_extension_type([1.0, 2.0, 3.0]))
@test_combinations.generate(test_combinations.combine(mode=["eager"]))
class TestConvertVariablesToTensors(tf.test.TestCase):
def test_convert_variables_to_tensors(self):
x = tf.Variable([1.0])
result = tf_utils.convert_variables_to_tensors(x)
self.assertIsInstance(result, tf.Tensor)
self.assertAllEqual(result, [1.0])
def test_convert_variables_in_list_to_tensors(self):
x = [tf.Variable([1.0]), tf.constant([2.0])]
result = tf_utils.convert_variables_to_tensors(x)
self.assertLen(result, 2)
self.assertIsInstance(result[0], tf.Tensor)
self.assertAllEqual(result[0], [1.0])
self.assertIs(result[1], x[1])
def test_convert_variables_in_composite_tensor_to_tensors(self):
class Spec(tf.TypeSpec):
value_type = property(lambda self: CompositeVariable)
def _serialize(self):
pass
def _component_specs(self):
pass
def _to_components(self, value):
return value.variables
def _from_components(self, variable_list):
return CompositeVariable(variable_list)
class CompositeVariable(tf.__internal__.CompositeTensor):
def __init__(self, variable_list):
self.variables = variable_list
@property
def _type_spec(self):
return Spec()
def _convert_variables_to_tensors(self):
self.variables = tf.nest.map_structure(
tf_utils.convert_variables_to_tensors, self.variables
)
return self
cv = CompositeVariable([tf.Variable([1.0])])
self.assertIsInstance(cv.variables[0], tf.Variable)
result = tf_utils.convert_variables_to_tensors(cv)
self.assertLen(result.variables, 1)
self.assertIsInstance(result.variables[0], tf.Tensor)
self.assertAllEqual(result.variables[0], [1.0])
class TestRandomSeedSetting(tf.test.TestCase):
def test_seeds(self):
if not tf.__internal__.tf2.enabled():
self.skipTest("set_random_seed() is only expected to work in tf2.")
def get_model_output():
model = keras.Sequential(
[
keras.layers.Dense(10),
keras.layers.Dropout(0.5),
keras.layers.Dense(10),
]
)
x = np.random.random((32, 10)).astype("float32")
ds = tf.data.Dataset.from_tensor_slices(x).shuffle(32).batch(16)
return model.predict(ds)
tf_utils.set_random_seed(42)
y1 = get_model_output()
tf_utils.set_random_seed(42)
y2 = get_model_output()
self.assertAllClose(y1, y2, atol=1e-6)
class CustomTypeSpec(tf.TypeSpec):
"""Stubbed-out custom type spec, for testing."""
def __init__(self, shape, dtype):
self.shape = tf.TensorShape(shape)
self.dtype = tf.dtypes.as_dtype(dtype)
def with_shape(self, new_shape):
return CustomTypeSpec(new_shape, self.dtype)
# Stub implementations for all the TypeSpec methods:
value_type = None
_to_components = lambda self, value: None
_from_components = lambda self, components: None
_component_specs = property(lambda self: None)
_serialize = lambda self: (self.shape, self.dtype)
class TestGetTensorSpec(parameterized.TestCase):
@parameterized.parameters(
[
(lambda: tf.constant([[1, 2]]), [1, 2]),
(tf.TensorSpec([8, 3], tf.int32), [8, 3]),
(tf.TensorSpec([8], tf.int32), [8]),
(tf.TensorSpec([], tf.int32), []),
(tf.TensorSpec(None, tf.int32), None),
(tf.RaggedTensorSpec([8, 3], tf.int32), [8, 3]),
(tf.SparseTensorSpec([8, 3], tf.int32), [8, 3]),
]
)
def test_without_dynamic_batch(self, t, expected_shape):
if callable(t):
t = t()
result = tf_utils.get_tensor_spec(t)
self.assertTrue(result.is_compatible_with(t))
if expected_shape is None:
self.assertIsNone(result.shape.rank)
else:
self.assertEqual(result.shape.as_list(), expected_shape)
@parameterized.parameters(
[
(lambda: tf.constant([[1, 2]]), [None, 2]),
(tf.TensorSpec([8, 3], tf.int32), [None, 3]),
(tf.TensorSpec([8], tf.int32), [None]),
(tf.TensorSpec([], tf.int32), []),
(tf.TensorSpec(None, tf.int32), None),
(tf.RaggedTensorSpec([8, 3], tf.int32), [None, 3]),
(tf.SparseTensorSpec([8, 3], tf.int32), [None, 3]),
]
)
def test_with_dynamic_batch(self, t, expected_shape):
if callable(t):
t = t()
result = tf_utils.get_tensor_spec(t, True)
self.assertTrue(result.is_compatible_with(t))
if expected_shape is None:
self.assertIsNone(result.shape.rank)
else:
self.assertEqual(result.shape.as_list(), expected_shape)
def test_with_keras_tensor_with_ragged_spec(self):
t = keras.engine.keras_tensor.KerasTensor(
tf.RaggedTensorSpec(shape=(None, None, 1))
)
self.assertIsInstance(tf_utils.get_tensor_spec(t), tf.RaggedTensorSpec)
class TestSyncToNumpyOrPythonType(parameterized.TestCase):
@parameterized.parameters(
[
(0.5,),
(b"string value",),
]
)
def test_types(self, value):
if not tf.executing_eagerly():
self.skipTest("`sync_to_numpy_or_python_type` only works in eager")
tensor = tf.constant(value)
self.assertEqual(tf_utils.sync_to_numpy_or_python_type(tensor), value)
class TestCanJitCompile(tf.test.TestCase):
def test_darwin_arm_xla(self):
with patch("platform.processor", MagicMock(return_value="arm")):
with patch("platform.system", MagicMock(return_value="Darwin")):
self.assertFalse(tf_utils.can_jit_compile())
def test_linux_xla(self):
with patch("platform.system", MagicMock(return_value="Linux")):
self.assertTrue(tf_utils.can_jit_compile())
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/utils/tf_utils_test.py/0 | {
"file_path": "tf-keras/tf_keras/utils/tf_utils_test.py",
"repo_id": "tf-keras",
"token_count": 8719
} | 209 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
import pytest
import tensorflow as tf
from autokeras import test_utils
from autokeras.adapters import input_adapters
from autokeras.utils import data_utils
def test_structured_data_input_unsupported_type_error():
with pytest.raises(TypeError) as info:
adapter = input_adapters.StructuredDataAdapter()
adapter.adapt("unknown", batch_size=32)
assert "Unsupported type" in str(info.value)
def test_structured_data_input_transform_to_dataset():
x = tf.data.Dataset.from_tensor_slices(
pd.read_csv(test_utils.TRAIN_CSV_PATH).to_numpy().astype(str)
)
adapter = input_adapters.StructuredDataAdapter()
x = adapter.adapt(x, batch_size=32)
assert isinstance(x, tf.data.Dataset)
def test_image_input_adapter_transform_to_dataset():
x = test_utils.generate_data()
adapter = input_adapters.ImageAdapter()
assert isinstance(adapter.adapt(x, batch_size=32), tf.data.Dataset)
def test_image_input_unsupported_type():
x = "unknown"
adapter = input_adapters.ImageAdapter()
with pytest.raises(TypeError) as info:
x = adapter.adapt(x, batch_size=32)
assert "Expect the data to ImageInput to be numpy" in str(info.value)
def test_image_input_numerical():
x = np.array([[["unknown"]]])
adapter = input_adapters.ImageAdapter()
with pytest.raises(TypeError) as info:
x = adapter.adapt(x, batch_size=32)
assert "Expect the data to ImageInput to be numerical" in str(info.value)
def test_input_type_error():
x = "unknown"
adapter = input_adapters.InputAdapter()
with pytest.raises(TypeError) as info:
x = adapter.adapt(x, batch_size=32)
assert "Expect the data to Input to be numpy" in str(info.value)
def test_input_numerical():
x = np.array([[["unknown"]]])
adapter = input_adapters.InputAdapter()
with pytest.raises(TypeError) as info:
x = adapter.adapt(x, batch_size=32)
assert "Expect the data to Input to be numerical" in str(info.value)
def test_text_adapt_unbatched_dataset():
x = tf.data.Dataset.from_tensor_slices(np.array(["a b c", "b b c"]))
adapter = input_adapters.TextAdapter()
x = adapter.adapt(x, batch_size=32)
assert data_utils.dataset_shape(x).as_list() == [None]
assert isinstance(x, tf.data.Dataset)
def test_text_adapt_batched_dataset():
x = tf.data.Dataset.from_tensor_slices(np.array(["a b c", "b b c"])).batch(
32
)
adapter = input_adapters.TextAdapter()
x = adapter.adapt(x, batch_size=32)
assert data_utils.dataset_shape(x).as_list() == [None]
assert isinstance(x, tf.data.Dataset)
def test_text_adapt_np():
x = np.array(["a b c", "b b c"])
adapter = input_adapters.TextAdapter()
x = adapter.adapt(x, batch_size=32)
assert data_utils.dataset_shape(x).as_list() == [None]
assert isinstance(x, tf.data.Dataset)
def test_text_input_type_error():
x = "unknown"
adapter = input_adapters.TextAdapter()
with pytest.raises(TypeError) as info:
x = adapter.adapt(x, batch_size=32)
assert "Expect the data to TextInput to be numpy" in str(info.value)
def test_time_series_input_type_error():
x = "unknown"
adapter = input_adapters.TimeseriesAdapter()
with pytest.raises(TypeError) as info:
x = adapter.adapt(x, batch_size=32)
assert "Expect the data in TimeseriesInput to be numpy" in str(info.value)
def test_time_series_input_transform_df_to_dataset():
adapter = input_adapters.TimeseriesAdapter()
x = adapter.adapt(pd.DataFrame(np.random.rand(100, 32)), batch_size=32)
assert isinstance(x, tf.data.Dataset)
| autokeras/autokeras/adapters/input_adapters_test.py/0 | {
"file_path": "autokeras/autokeras/adapters/input_adapters_test.py",
"repo_id": "autokeras",
"token_count": 1599
} | 0 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import keras_tuner
import tensorflow as tf
from keras_tuner.engine import hyperparameters
from tensorflow import keras
from tensorflow import nest
from autokeras import blocks
from autokeras import test_utils
def test_augment_build_return_tensor():
block = blocks.ImageAugmentation(rotation_factor=0.2)
outputs = block.build(
keras_tuner.HyperParameters(),
keras.Input(shape=(32, 32, 3), dtype=tf.float32),
)
assert len(nest.flatten(outputs)) == 1
def test_augment_build_with_translation_factor_range_return_tensor():
block = blocks.ImageAugmentation(translation_factor=(0, 0.1))
outputs = block.build(
keras_tuner.HyperParameters(),
keras.Input(shape=(32, 32, 3), dtype=tf.float32),
)
assert len(nest.flatten(outputs)) == 1
def test_augment_build_with_no_flip_return_tensor():
block = blocks.ImageAugmentation(vertical_flip=False, horizontal_flip=False)
outputs = block.build(
keras_tuner.HyperParameters(),
keras.Input(shape=(32, 32, 3), dtype=tf.float32),
)
assert len(nest.flatten(outputs)) == 1
def test_augment_build_with_vflip_only_return_tensor():
block = blocks.ImageAugmentation(vertical_flip=True, horizontal_flip=False)
outputs = block.build(
keras_tuner.HyperParameters(),
keras.Input(shape=(32, 32, 3), dtype=tf.float32),
)
assert len(nest.flatten(outputs)) == 1
def test_augment_build_with_zoom_factor_return_tensor():
block = blocks.ImageAugmentation(zoom_factor=0.1)
outputs = block.build(
keras_tuner.HyperParameters(),
keras.Input(shape=(32, 32, 3), dtype=tf.float32),
)
assert len(nest.flatten(outputs)) == 1
def test_augment_build_with_contrast_factor_return_tensor():
block = blocks.ImageAugmentation(contrast_factor=0.1)
outputs = block.build(
keras_tuner.HyperParameters(),
keras.Input(shape=(32, 32, 3), dtype=tf.float32),
)
assert len(nest.flatten(outputs)) == 1
def test_augment_deserialize_to_augment():
serialized_block = blocks.serialize(
blocks.ImageAugmentation(
zoom_factor=0.1,
contrast_factor=hyperparameters.Float("contrast_factor", 0.1, 0.5),
)
)
block = blocks.deserialize(serialized_block)
assert isinstance(block, blocks.ImageAugmentation)
assert block.zoom_factor == 0.1
assert isinstance(block.contrast_factor, hyperparameters.Float)
def test_augment_get_config_has_all_attributes():
block = blocks.ImageAugmentation()
config = block.get_config()
assert test_utils.get_func_args(blocks.ImageAugmentation.__init__).issubset(
config.keys()
)
def test_ngram_build_return_tensor():
block = blocks.TextToNgramVector()
outputs = block.build(
keras_tuner.HyperParameters(), keras.Input(shape=(1,), dtype=tf.string)
)
assert len(nest.flatten(outputs)) == 1
def test_ngram_build_with_ngrams_return_tensor():
block = blocks.TextToNgramVector(ngrams=2)
outputs = block.build(
keras_tuner.HyperParameters(), keras.Input(shape=(1,), dtype=tf.string)
)
assert len(nest.flatten(outputs)) == 1
def test_ngram_deserialize_to_ngram():
serialized_block = blocks.serialize(blocks.TextToNgramVector())
block = blocks.deserialize(serialized_block)
assert isinstance(block, blocks.TextToNgramVector)
def test_ngram_get_config_has_all_attributes():
block = blocks.TextToNgramVector()
config = block.get_config()
assert test_utils.get_func_args(blocks.TextToNgramVector.__init__).issubset(
config.keys()
)
def test_int_seq_build_return_tensor():
block = blocks.TextToIntSequence()
outputs = block.build(
keras_tuner.HyperParameters(), keras.Input(shape=(1,), dtype=tf.string)
)
assert len(nest.flatten(outputs)) == 1
def test_int_seq_build_with_seq_len_return_tensor():
block = blocks.TextToIntSequence(output_sequence_length=50)
outputs = block.build(
keras_tuner.HyperParameters(), keras.Input(shape=(1,), dtype=tf.string)
)
assert len(nest.flatten(outputs)) == 1
def test_int_seq_deserialize_to_int_seq():
serialized_block = blocks.serialize(blocks.TextToIntSequence())
block = blocks.deserialize(serialized_block)
assert isinstance(block, blocks.TextToIntSequence)
def test_int_seq_get_config_has_all_attributes():
block = blocks.TextToIntSequence()
config = block.get_config()
assert test_utils.get_func_args(blocks.TextToIntSequence.__init__).issubset(
config.keys()
)
def test_cat_to_num_build_return_tensor():
block = blocks.CategoricalToNumerical()
block.column_names = ["a"]
block.column_types = {"a": "num"}
outputs = block.build(
keras_tuner.HyperParameters(), keras.Input(shape=(1,), dtype=tf.string)
)
assert len(nest.flatten(outputs)) == 1
def test_cat_to_num_deserialize_to_cat_to_num():
serialized_block = blocks.serialize(blocks.CategoricalToNumerical())
block = blocks.deserialize(serialized_block)
assert isinstance(block, blocks.CategoricalToNumerical)
def test_cat_to_num_get_config_has_all_attributes():
block = blocks.CategoricalToNumerical()
config = block.get_config()
assert test_utils.get_func_args(
blocks.CategoricalToNumerical.__init__
).issubset(config.keys())
| autokeras/autokeras/blocks/preprocessing_test.py/0 | {
"file_path": "autokeras/autokeras/blocks/preprocessing_test.py",
"repo_id": "autokeras",
"token_count": 2285
} | 1 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import keras_tuner
from tensorflow import keras
from autokeras.engine import serializable
from autokeras.utils import utils
class NamedHyperModel(keras_tuner.HyperModel, serializable.Serializable):
"""
# Arguments
name: String. The name of the HyperModel. If unspecified, it will be set
automatically with the class name.
"""
def __init__(self, name: str = None, **kwargs):
if not name:
prefix = self.__class__.__name__
name = prefix + "_" + str(keras.backend.get_uid(prefix))
name = utils.to_snake_case(name)
super().__init__(name=name, **kwargs)
def get_config(self):
"""Get the configuration of the preprocessor.
# Returns
A dictionary of configurations of the preprocessor.
"""
return {"name": self.name, "tunable": self.tunable}
| autokeras/autokeras/engine/named_hypermodel.py/0 | {
"file_path": "autokeras/autokeras/engine/named_hypermodel.py",
"repo_id": "autokeras",
"token_count": 503
} | 2 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import tensorflow as tf
from tensorflow import keras
from autokeras import keras_layers as layer_module
def test_multi_cat_encode_strings_correctly(tmp_path):
x_train = np.array([["a", "ab", 2.1], ["b", "bc", 1.0], ["a", "bc", "nan"]])
layer = layer_module.MultiCategoryEncoding(
[layer_module.INT, layer_module.INT, layer_module.NONE]
)
dataset = tf.data.Dataset.from_tensor_slices(x_train).batch(32)
layer.adapt(tf.data.Dataset.from_tensor_slices(x_train).batch(32))
for data in dataset:
result = layer(data)
assert result[0][0] == result[2][0]
assert result[0][0] != result[1][0]
assert result[0][1] != result[1][1]
assert result[0][1] != result[2][1]
assert result[2][2] == 0
assert result.dtype == tf.float32
def test_model_save_load_output_same(tmp_path):
x_train = np.array([["a", "ab", 2.1], ["b", "bc", 1.0], ["a", "bc", "nan"]])
layer = layer_module.MultiCategoryEncoding(
encoding=[layer_module.INT, layer_module.INT, layer_module.NONE]
)
layer.adapt(tf.data.Dataset.from_tensor_slices(x_train).batch(32))
model = keras.Sequential([keras.Input(shape=(3,), dtype=tf.string), layer])
model.save(os.path.join(tmp_path, "model"))
model2 = keras.models.load_model(os.path.join(tmp_path, "model"))
assert np.array_equal(model.predict(x_train), model2.predict(x_train))
def test_init_multi_one_hot_encode():
layer_module.MultiCategoryEncoding(
encoding=[layer_module.ONE_HOT, layer_module.INT, layer_module.NONE]
)
# TODO: add more content when it is implemented
def test_call_multi_with_single_column_return_right_shape():
x_train = np.array([["a"], ["b"], ["a"]])
layer = layer_module.MultiCategoryEncoding(encoding=[layer_module.INT])
layer.adapt(tf.data.Dataset.from_tensor_slices(x_train).batch(32))
assert layer(x_train).shape == (3, 1)
def get_text_data():
train = np.array(
[
["This is a test example"],
["This is another text example"],
["Is this another example?"],
[""],
["Is this a long long long long long long example?"],
],
dtype=str,
)
test = np.array(
[
["This is a test example"],
["This is another text example"],
["Is this another example?"],
],
dtype=str,
)
y = np.random.rand(3, 1)
return train, test, y
def test_cast_to_float32_return_float32_tensor(tmp_path):
layer = layer_module.CastToFloat32()
tensor = layer(tf.constant(["0.3"], dtype=tf.string))
assert tf.float32 == tensor.dtype
def test_expand_last_dim_return_tensor_with_more_dims(tmp_path):
layer = layer_module.ExpandLastDim()
tensor = layer(tf.constant([0.1, 0.2], dtype=tf.float32))
assert 2 == len(tensor.shape.as_list())
| autokeras/autokeras/keras_layers_test.py/0 | {
"file_path": "autokeras/autokeras/keras_layers_test.py",
"repo_id": "autokeras",
"token_count": 1396
} | 3 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import os
import numpy as np
import tensorflow as tf
from tensorflow import keras
import autokeras as ak
SEED = 5
COLUMN_NAMES = [
"sex",
"age",
"n_siblings_spouses",
"parch",
"fare",
"class",
"deck",
"embark_town",
"alone",
]
COLUMN_TYPES = {
"sex": "categorical",
"age": "numerical",
"n_siblings_spouses": "categorical",
"parch": "categorical",
"fare": "numerical",
"class": "categorical",
"deck": "categorical",
"embark_town": "categorical",
"alone": "categorical",
}
TRAIN_DATA_URL = "https://storage.googleapis.com/tf-datasets/titanic/train.csv"
TEST_DATA_URL = "https://storage.googleapis.com/tf-datasets/titanic/eval.csv"
TRAIN_CSV_PATH = keras.utils.get_file(
fname=os.path.basename(TRAIN_DATA_URL), origin=TRAIN_DATA_URL
)
TEST_CSV_PATH = keras.utils.get_file(
fname=os.path.basename(TEST_DATA_URL), origin=TEST_DATA_URL
)
def generate_data(num_instances=100, shape=(32, 32, 3), dtype="np"):
np.random.seed(SEED)
data = np.random.rand(*((num_instances,) + shape))
if data.dtype == np.float64:
data = data.astype(np.float32)
if dtype == "np":
return data
if dtype == "dataset":
return tf.data.Dataset.from_tensor_slices(data)
def generate_one_hot_labels(num_instances=100, num_classes=10, dtype="np"):
np.random.seed(SEED)
labels = np.random.randint(num_classes, size=num_instances)
data = keras.utils.to_categorical(labels, num_classes=num_classes)
if dtype == "np":
return data
if dtype == "dataset":
return tf.data.Dataset.from_tensor_slices(data).batch(32)
def generate_text_data(num_instances=100):
vocab = np.array(
[
["adorable", "clueless", "dirty", "odd", "stupid"],
["puppy", "car", "rabbit", "girl", "monkey"],
["runs", "hits", "jumps", "drives", "barfs"],
[
"crazily.",
"dutifully.",
"foolishly.",
"merrily.",
"occasionally.",
],
]
)
return np.array(
[
" ".join([vocab[j][np.random.randint(0, 5)] for j in range(4)])
for i in range(num_instances)
]
)
def generate_data_with_categorical(
num_instances=100,
num_numerical=10,
num_categorical=3,
num_classes=5,
dtype="np",
):
categorical_data = np.random.randint(
num_classes, size=(num_instances, num_categorical)
)
numerical_data = np.random.rand(num_instances, num_numerical)
data = np.concatenate((numerical_data, categorical_data), axis=1)
if data.dtype == np.float64:
data = data.astype(np.float32)
if dtype == "np":
return data
if dtype == "dataset":
return tf.data.Dataset.from_tensor_slices(data)
def build_graph():
keras.backend.clear_session()
image_input = ak.ImageInput(shape=(32, 32, 3))
image_input.batch_size = 32
image_input.num_samples = 1000
merged_outputs = ak.SpatialReduction()(image_input)
head = ak.ClassificationHead(num_classes=10, shape=(10,))
classification_outputs = head(merged_outputs)
return ak.graph.Graph(inputs=image_input, outputs=classification_outputs)
def get_func_args(func):
params = inspect.signature(func).parameters.keys()
return set(params) - set(["self", "args", "kwargs"])
def get_object_detection_data():
images = generate_data(num_instances=2, shape=(32, 32, 3))
bbox_0 = np.random.rand(3, 4)
class_id_0 = np.random.rand(
3,
)
bbox_1 = np.random.rand(5, 4)
class_id_1 = np.random.rand(
5,
)
labels = np.array(
[(bbox_0, class_id_0), (bbox_1, class_id_1)], dtype=object
)
return images, labels
| autokeras/autokeras/test_utils.py/0 | {
"file_path": "autokeras/autokeras/test_utils.py",
"repo_id": "autokeras",
"token_count": 1934
} | 4 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import pytest
import tensorflow as tf
from tensorflow import keras
from autokeras import test_utils
from autokeras.utils import io_utils
IMG_DATA_DIR = os.path.join(
os.path.dirname(
keras.utils.get_file(
origin="https://storage.googleapis.com/"
+ "download.tensorflow.org/example_images/flower_photos.tgz",
fname="image_data",
extract=True,
)
),
"flower_photos",
)
def test_load_imdb_dataset():
data_dir = os.path.join(
os.path.dirname(
keras.utils.get_file(
fname="text_data",
origin="https://github.com/keras-team/autokeras/releases/download/1.0.19/aclImdb_v1.tar.gz", # noqa: E501
extract=True,
)
),
"aclImdb",
)
shutil.rmtree(os.path.join(data_dir, "train/unsup"))
dataset = io_utils.text_dataset_from_directory(
os.path.join(data_dir, "train"), max_length=20
)
for data in dataset:
assert data[0].dtype == tf.string
assert data[1].dtype == tf.string
break
def test_load_image_data():
dataset = io_utils.image_dataset_from_directory(
IMG_DATA_DIR,
image_size=(180, 180),
validation_split=0.2,
subset="training",
seed=test_utils.SEED,
)
val_dataset = io_utils.image_dataset_from_directory(
IMG_DATA_DIR,
image_size=(180, 180),
validation_split=0.2,
subset="validation",
seed=test_utils.SEED,
)
for data in dataset:
assert data[0].numpy().shape == (32, 180, 180, 3)
assert data[1].dtype == tf.string
break
for data in val_dataset:
assert data[0].numpy().shape == (32, 180, 180, 3)
assert data[1].dtype == tf.string
break
def test_load_image_data_raise_subset_error():
with pytest.raises(ValueError) as info:
io_utils.image_dataset_from_directory(
IMG_DATA_DIR,
image_size=(180, 180),
validation_split=0.2,
subset="abcd",
seed=test_utils.SEED,
)
assert "`subset` must be either" in str(info.value)
def test_load_image_data_raise_color_mode_error():
with pytest.raises(ValueError) as info:
io_utils.image_dataset_from_directory(
IMG_DATA_DIR, image_size=(180, 180), color_mode="abcd"
)
assert "`color_mode` must be one of" in str(info.value)
def test_load_image_data_rgba():
io_utils.image_dataset_from_directory(
IMG_DATA_DIR, image_size=(180, 180), color_mode="rgba"
)
def test_load_image_data_grey_scale():
io_utils.image_dataset_from_directory(
IMG_DATA_DIR, image_size=(180, 180), color_mode="grayscale"
)
def test_path_to_image():
img_dir = os.path.join(IMG_DATA_DIR, "roses")
assert isinstance(
io_utils.path_to_image(
os.path.join(img_dir, os.listdir(img_dir)[5]),
num_channels=3,
image_size=(180, 180),
interpolation="bilinear",
),
tf.Tensor,
)
| autokeras/autokeras/utils/io_utils_test.py/0 | {
"file_path": "autokeras/autokeras/utils/io_utils_test.py",
"repo_id": "autokeras",
"token_count": 1679
} | 5 |
help:
@cat Makefile
DATA?="${HOME}/Data"
GPUS?=all
DOCKER_FILE?=Dockerfile
DOCKER=docker
TF_VERSION=2.3.0
TEST=tests/
SRC?=$(shell dirname `pwd`)
build:
docker build -t autokeras --build-arg TF_VERSION=$(TF_VERSION) -f $(DOCKER_FILE) .
bash: build
$(DOCKER) run --gpus $(GPUS) -it -v $(SRC):/src/workspace -v $(DATA):/data autokeras bash
ipython: build
$(DOCKER) run --gpus $(GPUS) -it -v $(SRC):/src/workspace -v $(DATA):/data --env autokeras ipython
notebook: build
$(DOCKER) run --gpus $(GPUS) -it -v $(SRC):/src/workspace -v $(DATA):/data --net=host --env autokeras
test: build
$(DOCKER) run --gpus $(GPUS) -it -v $(SRC):/src/workspace -v $(DATA):/data --env autokeras py.test $(TEST)
| autokeras/docker/Makefile/0 | {
"file_path": "autokeras/docker/Makefile",
"repo_id": "autokeras",
"token_count": 309
} | 6 |
<jupyter_start><jupyter_code>!pip install autokeras
import os
import numpy as np
import tensorflow as tf
from sklearn.datasets import load_files
import autokeras as ak<jupyter_output><empty_output><jupyter_text>To make this tutorial easy to follow, we just treat IMDB dataset as aregression dataset. It means we will treat prediction targets of IMDB dataset,which are 0s and 1s as numerical values, so that they can be directly used asthe regression targets. A Simple ExampleThe first step is to prepare your data. Here we use the [IMDBdataset](https://keras.io/datasets/imdb-movie-reviews-sentiment-classification)as an example.<jupyter_code>dataset = tf.keras.utils.get_file(
fname="aclImdb.tar.gz",
origin="http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz",
extract=True,
)
# set path to dataset
IMDB_DATADIR = os.path.join(os.path.dirname(dataset), "aclImdb")
classes = ["pos", "neg"]
train_data = load_files(
os.path.join(IMDB_DATADIR, "train"), shuffle=True, categories=classes
)
test_data = load_files(
os.path.join(IMDB_DATADIR, "test"), shuffle=False, categories=classes
)
x_train = np.array(train_data.data)
y_train = np.array(train_data.target)
x_test = np.array(test_data.data)
y_test = np.array(test_data.target)
print(x_train.shape) # (25000,)
print(y_train.shape) # (25000, 1)
print(x_train[0][:50]) # <START> this film was just brilliant casting <UNK><jupyter_output><empty_output><jupyter_text>The second step is to run the [TextRegressor](/text_regressor). As a quickdemo, we set epochs to 2. You can also leave the epochs unspecified for anadaptive number of epochs.<jupyter_code># Initialize the text regressor.
reg = ak.TextRegressor(
overwrite=True, max_trials=10 # It tries 10 different models.
)
# Feed the text regressor with training data.
reg.fit(x_train, y_train, epochs=2)
# Predict with the best model.
predicted_y = reg.predict(x_test)
# Evaluate the best model with testing data.
print(reg.evaluate(x_test, y_test))<jupyter_output><empty_output><jupyter_text>Validation DataBy default, AutoKeras use the last 20% of training data as validation data. Asshown in the example below, you can use `validation_split` to specify thepercentage.<jupyter_code>reg.fit(
x_train,
y_train,
# Split the training data and use the last 15% as validation data.
validation_split=0.15,
)<jupyter_output><empty_output><jupyter_text>You can also use your own validation set instead of splitting it from thetraining data with `validation_data`.<jupyter_code>split = 5000
x_val = x_train[split:]
y_val = y_train[split:]
x_train = x_train[:split]
y_train = y_train[:split]
reg.fit(
x_train,
y_train,
epochs=2,
# Use your own validation set.
validation_data=(x_val, y_val),
)<jupyter_output><empty_output><jupyter_text>Customized Search SpaceFor advanced users, you may customize your search space by using[AutoModel](/auto_model/automodel-class) instead of[TextRegressor](/text_regressor). You can configure the[TextBlock](/block/textblock-class) for some high-level configurations, e.g.,`vectorizer` for the type of text vectorization method to use. You can use'sequence', which uses [TextToInteSequence](/block/texttointsequence-class) toconvert the words to integers and use [Embedding](/block/embedding-class) forembedding the integer sequences, or you can use 'ngram', which uses[TextToNgramVector](/block/texttongramvector-class) to vectorize thesentences. You can also do not specify these arguments, which would leave thedifferent choices to be tuned automatically. See the following example fordetail.<jupyter_code>input_node = ak.TextInput()
output_node = ak.TextBlock(block_type="ngram")(input_node)
output_node = ak.RegressionHead()(output_node)
reg = ak.AutoModel(
inputs=input_node, outputs=output_node, overwrite=True, max_trials=1
)
reg.fit(x_train, y_train, epochs=2)<jupyter_output><empty_output><jupyter_text>The usage of [AutoModel](/auto_model/automodel-class) is similar to the[functional API](https://www.tensorflow.org/guide/keras/functional) of Keras.Basically, you are building a graph, whose edges are blocks and the nodes areintermediate outputs of blocks. To add an edge from `input_node` to`output_node` with `output_node = ak.[some_block]([block_args])(input_node)`.You can even also use more fine grained blocks to customize the search spaceeven further. See the following example.<jupyter_code>input_node = ak.TextInput()
output_node = ak.TextToIntSequence()(input_node)
output_node = ak.Embedding()(output_node)
# Use separable Conv layers in Keras.
output_node = ak.ConvBlock(separable=True)(output_node)
output_node = ak.RegressionHead()(output_node)
reg = ak.AutoModel(
inputs=input_node, outputs=output_node, overwrite=True, max_trials=1
)
reg.fit(x_train, y_train, epochs=2)<jupyter_output><empty_output><jupyter_text>Data FormatThe AutoKeras TextRegressor is quite flexible for the data format.For the text, the input data should be one-dimensional For the regressiontargets, it should be a vector of numerical values. AutoKeras acceptsnumpy.ndarray.We also support using [tf.data.Dataset](https://www.tensorflow.org/api_docs/python/tf/data/Dataset?version=stable)format for the training data.<jupyter_code>train_set = tf.data.Dataset.from_tensor_slices(((x_train,), (y_train,))).batch(
32
)
test_set = tf.data.Dataset.from_tensor_slices(((x_test,), (y_test,))).batch(32)
reg = ak.TextRegressor(overwrite=True, max_trials=2)
# Feed the tensorflow Dataset to the regressor.
reg.fit(train_set, epochs=2)
# Predict with the best model.
predicted_y = reg.predict(test_set)
# Evaluate the best model with testing data.
print(reg.evaluate(test_set))<jupyter_output><empty_output> | autokeras/docs/ipynb/text_regression.ipynb/0 | {
"file_path": "autokeras/docs/ipynb/text_regression.ipynb",
"repo_id": "autokeras",
"token_count": 1951
} | 7 |
"""shell
pip install autokeras
"""
import pandas as pd
import tensorflow as tf
import autokeras as ak
"""
## A Simple Example
The first step is to prepare your data. Here we use the [Titanic
dataset](https://www.kaggle.com/c/titanic) as an example.
"""
TRAIN_DATA_URL = "https://storage.googleapis.com/tf-datasets/titanic/train.csv"
TEST_DATA_URL = "https://storage.googleapis.com/tf-datasets/titanic/eval.csv"
train_file_path = tf.keras.utils.get_file("train.csv", TRAIN_DATA_URL)
test_file_path = tf.keras.utils.get_file("eval.csv", TEST_DATA_URL)
"""
The second step is to run the
[StructuredDataClassifier](/structured_data_classifier).
As a quick demo, we set epochs to 10.
You can also leave the epochs unspecified for an adaptive number of epochs.
"""
# Initialize the structured data classifier.
clf = ak.StructuredDataClassifier(
overwrite=True, max_trials=3
) # It tries 3 different models.
# Feed the structured data classifier with training data.
clf.fit(
# The path to the train.csv file.
train_file_path,
# The name of the label column.
"survived",
epochs=10,
)
# Predict with the best model.
predicted_y = clf.predict(test_file_path)
# Evaluate the best model with testing data.
print(clf.evaluate(test_file_path, "survived"))
"""
## Data Format
The AutoKeras StructuredDataClassifier is quite flexible for the data format.
The example above shows how to use the CSV files directly. Besides CSV files,
it also supports numpy.ndarray, pandas.DataFrame or [tf.data.Dataset](
https://www.tensorflow.org/api_docs/python/tf/data/Dataset?version=stable). The
data should be two-dimensional with numerical or categorical values.
For the classification labels, AutoKeras accepts both plain labels, i.e. strings
or integers, and one-hot encoded encoded labels, i.e. vectors of 0s and 1s. The
labels can be numpy.ndarray, pandas.DataFrame, or pandas.Series.
The following examples show how the data can be prepared with numpy.ndarray,
pandas.DataFrame, and tensorflow.data.Dataset.
"""
# x_train as pandas.DataFrame, y_train as pandas.Series
x_train = pd.read_csv(train_file_path)
print(type(x_train)) # pandas.DataFrame
y_train = x_train.pop("survived")
print(type(y_train)) # pandas.Series
# You can also use pandas.DataFrame for y_train.
y_train = pd.DataFrame(y_train)
print(type(y_train)) # pandas.DataFrame
# You can also use numpy.ndarray for x_train and y_train.
x_train = x_train.to_numpy()
y_train = y_train.to_numpy()
print(type(x_train)) # numpy.ndarray
print(type(y_train)) # numpy.ndarray
# Preparing testing data.
x_test = pd.read_csv(test_file_path)
y_test = x_test.pop("survived")
# It tries 10 different models.
clf = ak.StructuredDataClassifier(overwrite=True, max_trials=3)
# Feed the structured data classifier with training data.
clf.fit(x_train, y_train, epochs=10)
# Predict with the best model.
predicted_y = clf.predict(x_test)
# Evaluate the best model with testing data.
print(clf.evaluate(x_test, y_test))
"""
The following code shows how to convert numpy.ndarray to tf.data.Dataset.
"""
train_set = tf.data.Dataset.from_tensor_slices((x_train.astype(str), y_train))
test_set = tf.data.Dataset.from_tensor_slices(
(x_test.to_numpy().astype(str), y_test)
)
clf = ak.StructuredDataClassifier(overwrite=True, max_trials=3)
# Feed the tensorflow Dataset to the classifier.
clf.fit(train_set, epochs=10)
# Predict with the best model.
predicted_y = clf.predict(test_set)
# Evaluate the best model with testing data.
print(clf.evaluate(test_set))
"""
You can also specify the column names and types for the data as follows. The
`column_names` is optional if the training data already have the column names,
e.g. pandas.DataFrame, CSV file. Any column, whose type is not specified will
be inferred from the training data.
"""
# Initialize the structured data classifier.
clf = ak.StructuredDataClassifier(
column_names=[
"sex",
"age",
"n_siblings_spouses",
"parch",
"fare",
"class",
"deck",
"embark_town",
"alone",
],
column_types={"sex": "categorical", "fare": "numerical"},
max_trials=10, # It tries 10 different models.
overwrite=True,
)
"""
## Validation Data
By default, AutoKeras use the last 20% of training data as validation data. As
shown in the example below, you can use `validation_split` to specify the
percentage.
"""
clf.fit(
x_train,
y_train,
# Split the training data and use the last 15% as validation data.
validation_split=0.15,
epochs=10,
)
"""
You can also use your own validation set
instead of splitting it from the training data with `validation_data`.
"""
split = 500
x_val = x_train[split:]
y_val = y_train[split:]
x_train = x_train[:split]
y_train = y_train[:split]
clf.fit(
x_train,
y_train,
# Use your own validation set.
validation_data=(x_val, y_val),
epochs=10,
)
"""
## Customized Search Space
For advanced users, you may customize your search space by using
[AutoModel](/auto_model/#automodel-class) instead of
[StructuredDataClassifier](/structured_data_classifier). You can configure the
[StructuredDataBlock](/block/#structureddatablock-class) for some high-level
configurations, e.g., `categorical_encoding` for whether to use the
[CategoricalToNumerical](/block/#categoricaltonumerical-class). You can also do
not specify these arguments, which would leave the different choices to be
tuned automatically. See the following example for detail.
"""
input_node = ak.StructuredDataInput()
output_node = ak.StructuredDataBlock(categorical_encoding=True)(input_node)
output_node = ak.ClassificationHead()(output_node)
clf = ak.AutoModel(
inputs=input_node, outputs=output_node, overwrite=True, max_trials=3
)
clf.fit(x_train, y_train, epochs=10)
"""
The usage of [AutoModel](/auto_model/#automodel-class) is similar to the
[functional API](https://www.tensorflow.org/guide/keras/functional) of Keras.
Basically, you are building a graph, whose edges are blocks and the nodes are
intermediate outputs of blocks.
To add an edge from `input_node` to `output_node` with
`output_node = ak.[some_block]([block_args])(input_node)`.
You can even also use more fine grained blocks to customize the search space
even further. See the following example.
"""
input_node = ak.StructuredDataInput()
output_node = ak.CategoricalToNumerical()(input_node)
output_node = ak.DenseBlock()(output_node)
output_node = ak.ClassificationHead()(output_node)
clf = ak.AutoModel(
inputs=input_node, outputs=output_node, overwrite=True, max_trials=1
)
clf.fit(x_train, y_train, epochs=1)
clf.predict(x_train)
"""
You can also export the best model found by AutoKeras as a Keras Model.
"""
model = clf.export_model()
model.summary()
print(x_train.dtype)
# numpy array in object (mixed type) is not supported.
# convert it to unicode.
model.predict(x_train.astype(str))
"""
## Reference
[StructuredDataClassifier](/structured_data_classifier),
[AutoModel](/auto_model/#automodel-class),
[StructuredDataBlock](/block/#structureddatablock-class),
[DenseBlock](/block/#denseblock-class),
[StructuredDataInput](/node/#structureddatainput-class),
[ClassificationHead](/block/#classificationhead-class),
[CategoricalToNumerical](/block/#categoricaltonumerical-class).
"""
| autokeras/docs/py/structured_data_classification.py/0 | {
"file_path": "autokeras/docs/py/structured_data_classification.py",
"repo_id": "autokeras",
"token_count": 2552
} | 8 |
<?xml version="1.0" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd"><svg id="logo" width="600" height="100" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<g fill="#d00000">
<g transform="translate(-58, -40) scale(0.8)">
<path d="M150 50 L150 174.8528137423857 L130 154.8528137423857 L130 70 "/>
<path d="M131 69 L73.4314575050762 126.5685424949238 L73.4314575050762 154.8528137423857 L131 97.2842712474619 "/>
<path d="M131 155.8528137423857 L105.04906208587143 129.90187582825715 L119.19119770960238 115.75974020452618 L131 127.5685424949238 "/>
<path d="M73.4314575050762 98.2842712474619 L73.4314575050762 70 L98.38239541920477 94.95093791412857 L84.24025979547382 109.09307353785952 "/>
<path d="M154.71404520791032 50 L154.71404520791032 174.8528137423857 L174.71404520791032 154.8528137423857 L174.71404520791032 70 "/>
<path d="M173.71404520791032 127.5685424949238 L231.28258770283412 70 L202.99831645537222 70 L173.71404520791032 99.2842712474619 "/>
<path d="M206.33164978870556 101.61760458079523 L231.28258770283412 126.5685424949238 L231.28258770283412 154.8528137423857 L192.1895141649746 115.75974020452618 "/>
</g>
<defs>
<style type="text/css">
@import url('https://fonts.googleapis.com/css2?family=Roboto');
</style>
</defs>
<g transform="translate(155, 85)">
<text font-size="95" style="font-family: 'Roboto', sans-serif;">
AutoKeras
</text>
</g>
</g>
</svg>
| autokeras/docs/templates/img/row_red.svg/0 | {
"file_path": "autokeras/docs/templates/img/row_red.svg",
"repo_id": "autokeras",
"token_count": 689
} | 9 |
"""
Run the following commands first
pip3 install git+https://github.com/keras-team/[email protected]
pip3 install autokeras==1.0.5
This Script searches for a model for the wine dataset
Source and Description of data:
"""
import os
import pandas as pd
import tensorflow as tf
import autokeras as ak
dataset_url = (
"https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data"
)
# save data
data_file_path = tf.keras.utils.get_file(
fname=os.path.basename(dataset_url), origin=dataset_url
)
column_names = [
"Wine",
"Alcohol",
"Malic.acid",
"Ash",
"Acl",
"Mg",
"Phenols",
"Flavanoids",
"Nonflavanoid.phenols",
"Proanth",
"Color.int",
"Hue",
"OD",
"Proline",
]
feature_names = column_names[1:]
label_name = column_names[0] # Wine
data = pd.read_csv(data_file_path, header=0, names=column_names)
# Shuffling
data = data.sample(frac=1)
split_length = int(data.shape[0] * 0.8) # 141
# train and test
train_data = data.iloc[:split_length]
test_data = data.iloc[split_length:]
# Initialize the classifier.
clf = ak.StructuredDataClassifier(max_trials=5)
# Evaluate
clf.fit(x=train_data[feature_names], y=train_data[label_name])
print(
"Accuracy: {accuracy}".format(
accuracy=clf.evaluate(
x=test_data[feature_names], y=test_data[label_name]
)
)
)
| autokeras/examples/wine.py/0 | {
"file_path": "autokeras/examples/wine.py",
"repo_id": "autokeras",
"token_count": 590
} | 10 |
# Keras NLP
| Status | Proposed |
:-------------- |:---------------------------------------------------- |
| **Author(s)** | Zhenyu Tan ([email protected]), Mark Omernick ([email protected]), Francois Chollet ([email protected]), Hongkun Yu ([email protected])|
| **Updated** | 2020-09-11 |
## Objective
We aim at describing the scope of [keras-nlp](https://github.com/keras-team/keras-nlp), especially:
- What use cases `keras-nlp` should cover
- Boundaries between `keras-nlp` and [tensorflow addons](https://github.com/tensorflow/addons)
- Boundaries between `keras-nlp` and [tensorflow model garden](https://github.com/tensorflow/models)
- Boundaries between `keras-nlp` and [tf.keras](https://www.tensorflow.org/api_docs/python/tf/keras).
- Boundaries between `keras-nlp` and [tf.text](https://www.tensorflow.org/tutorials/tensorflow_text/intro).
## Motivation
Natural Language Processing (NLP) is a major application area for our users.
In recent years, Transformer-based models have become the foundation of many NLP workflows.
These workflows tend to reuse similar components, for which in some cases third-party packages
have been developed by the open-source community.
These third-party solutions are not always kept up to date or up to the same quality standards as core Keras.
They also raise the issue of API standardization.
To fix this, we want machine learning engineers to have access to a standard Keras-native,
optimized, and well-tested set of components to build their Transformer-based (and beyond) NLP workflows.
This provides key user benefits:
- The package would be first-party and thus always up to date with modern best practices.
- High code quality and testing standards and strict quality control: same level of trust as core Keras
- A shared API standard across the community
- Ability for the open-source community to build more advanced solutions *on top* of this package instead of reinventing it
- Ability for research scientists to benefit from subclassing and customizing base components to quickly test new research ideas
## Design Proposal
`keras-nlp` will include most standard Transformer-based modules, specifically:
- Keras layer components such as Transformer encoder and decoder blocks.
- Keras task components such as masked language, span labeler and named entity recognition.
- Tensorflow operations such as beam search.
- Keras optimizer utilities such as learning rate schedules widely used.
- Data loader and preprocessing for different dataset, such as SQUAD, GLUE.
### Success criteria for keras-nlp
- Reusable and standardized components that cover the above
- Easy-to-use API
- Models run on CPU/GPU/TPU seamlessly
- State of the art performance
- Models can be readily deployed to production
### Boundaries between keras-nlp and tf.text
- `tf.text` will contain all pre-processing operations, such as WordPiece Tokenizer, n-grams, that handles strings.
- `keras-nlp` will contain modeling components that cover workflows past the tokenization stage.
### Boundaries between `keras-nlp` and TensorFlow Addons:
- Highly experimental modeling, layers, losses, etc, live in Addons (e.g. newly published research code).
- Components from Addons will graduate to Model Garden, given they get sufficient usage,
and given that they work on CPU/GPU/TPU. The API interface will remain experimental for a short time after graduation,
so as to leave us the option to make changes based on user feedback.
### Boundaries between keras-nlp and Model Garden
- End to end modeling workflow and model specific details live in Model Garden
- Model garden will re-use most of the building blocks from keras-nlp
- Components from Model Garden can graduate to keras-nlp, given they get sufficient usage,
and given that they work on CPU/GPU/TPU. The API interface should remain stable after graduation.
### Boundaries between keras-nlp and core Keras
- `keras-nlp` will contain NLP-specific components
(e.g. the `MultiHeadAttention` layer may be used outside of NLP, and thus is shipping in core Keras).
- Components from keras-nlp can graduate to Keras core, given its usage expands beyond
natural language processing.
## Dependencies
- Tensorflow version >= 2.4
- Tensorflow datasets
## Backwards compatibility
We propose to guarantee major release backwards compatibility.
## Maintenance
The `keras-nlp` codebase will be primarily maintained by the Keras team at Google,
with help and contributions from the community. The codebase will be developed
on GitHub as part of the `keras-team` organization. The same process for tracking
issues and reviewing PRs will be used as for the core Keras repository.
## Performance Benchmark
We will set up Keras benchmark utilities to help users contribute to this repository.
Detailed design will be shared in a separate document (this document only focuses on scope).
## Questions and Discussion Topics
Please share any questions or suggestion.
| governance/rfcs/20200826-keras-nlp-scoping-design.md/0 | {
"file_path": "governance/rfcs/20200826-keras-nlp-scoping-design.md",
"repo_id": "governance",
"token_count": 1325
} | 11 |
"""MobileNet v2 models for Keras.
MobileNetV2 is a general architecture and can be used for multiple use cases.
Depending on the use case, it can use different input layer size and
different width factors. This allows different width models to reduce
the number of multiply-adds and thereby
reduce inference cost on mobile devices.
MobileNetV2 is very similar to the original MobileNet,
except that it uses inverted residual blocks with
bottlenecking features. It has a drastically lower
parameter count than the original MobileNet.
MobileNets support any input size greater
than 32 x 32, with larger image sizes
offering better performance.
The number of parameters and number of multiply-adds
can be modified by using the `alpha` parameter,
which increases/decreases the number of filters in each layer.
By altering the image size and `alpha` parameter,
all 22 models from the paper can be built, with ImageNet weights provided.
The paper demonstrates the performance of MobileNets using `alpha` values of
1.0 (also called 100 % MobileNet), 0.35, 0.5, 0.75, 1.0, 1.3, and 1.4
For each of these `alpha` values, weights for 5 different input image sizes
are provided (224, 192, 160, 128, and 96).
The following table describes the performance of
MobileNet on various input sizes:
------------------------------------------------------------------------
MACs stands for Multiply Adds
Classification Checkpoint| MACs (M) | Parameters (M)| Top 1 Accuracy| Top 5 Accuracy
--------------------------|------------|---------------|---------|----|-------------
| [mobilenet_v2_1.4_224] | 582 | 6.06 | 75.0 | 92.5 |
| [mobilenet_v2_1.3_224] | 509 | 5.34 | 74.4 | 92.1 |
| [mobilenet_v2_1.0_224] | 300 | 3.47 | 71.8 | 91.0 |
| [mobilenet_v2_1.0_192] | 221 | 3.47 | 70.7 | 90.1 |
| [mobilenet_v2_1.0_160] | 154 | 3.47 | 68.8 | 89.0 |
| [mobilenet_v2_1.0_128] | 99 | 3.47 | 65.3 | 86.9 |
| [mobilenet_v2_1.0_96] | 56 | 3.47 | 60.3 | 83.2 |
| [mobilenet_v2_0.75_224] | 209 | 2.61 | 69.8 | 89.6 |
| [mobilenet_v2_0.75_192] | 153 | 2.61 | 68.7 | 88.9 |
| [mobilenet_v2_0.75_160] | 107 | 2.61 | 66.4 | 87.3 |
| [mobilenet_v2_0.75_128] | 69 | 2.61 | 63.2 | 85.3 |
| [mobilenet_v2_0.75_96] | 39 | 2.61 | 58.8 | 81.6 |
| [mobilenet_v2_0.5_224] | 97 | 1.95 | 65.4 | 86.4 |
| [mobilenet_v2_0.5_192] | 71 | 1.95 | 63.9 | 85.4 |
| [mobilenet_v2_0.5_160] | 50 | 1.95 | 61.0 | 83.2 |
| [mobilenet_v2_0.5_128] | 32 | 1.95 | 57.7 | 80.8 |
| [mobilenet_v2_0.5_96] | 18 | 1.95 | 51.2 | 75.8 |
| [mobilenet_v2_0.35_224] | 59 | 1.66 | 60.3 | 82.9 |
| [mobilenet_v2_0.35_192] | 43 | 1.66 | 58.2 | 81.2 |
| [mobilenet_v2_0.35_160] | 30 | 1.66 | 55.7 | 79.1 |
| [mobilenet_v2_0.35_128] | 20 | 1.66 | 50.8 | 75.0 |
| [mobilenet_v2_0.35_96] | 11 | 1.66 | 45.5 | 70.4 |
The weights for all 16 models are obtained and
translated from the Tensorflow checkpoints
from TensorFlow checkpoints found [here]
(https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/README.md).
# Reference
This file contains building code for MobileNetV2, based on
[MobileNetV2: Inverted Residuals and Linear Bottlenecks]
(https://arxiv.org/abs/1801.04381) (CVPR 2018)
Tests comparing this model to the existing Tensorflow model can be
found at [mobilenet_v2_keras]
(https://github.com/JonathanCMitchell/mobilenet_v2_keras)
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import os
import warnings
import numpy as np
from . import correct_pad
from . import get_submodules_from_kwargs
from . import imagenet_utils
from .imagenet_utils import decode_predictions
from .imagenet_utils import _obtain_input_shape
# TODO Change path to v1.1
BASE_WEIGHT_PATH = ('https://github.com/JonathanCMitchell/mobilenet_v2_keras/'
'releases/download/v1.1/')
backend = None
layers = None
models = None
keras_utils = None
def preprocess_input(x, **kwargs):
"""Preprocesses a numpy array encoding a batch of images.
# Arguments
x: a 4D numpy array consists of RGB values within [0, 255].
# Returns
Preprocessed array.
"""
return imagenet_utils.preprocess_input(x, mode='tf', **kwargs)
# This function is taken from the original tf repo.
# It ensures that all layers have a channel number that is divisible by 8
# It can be seen here:
# https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def MobileNetV2(input_shape=None,
alpha=1.0,
include_top=True,
weights='imagenet',
input_tensor=None,
pooling=None,
classes=1000,
**kwargs):
"""Instantiates the MobileNetV2 architecture.
# Arguments
input_shape: optional shape tuple, to be specified if you would
like to use a model with an input img resolution that is not
(224, 224, 3).
It should have exactly 3 inputs channels (224, 224, 3).
You can also omit this option if you would like
to infer input_shape from an input_tensor.
If you choose to include both input_tensor and input_shape then
input_shape will be used if they match, if the shapes
do not match then we will throw an error.
E.g. `(160, 160, 3)` would be one valid value.
alpha: controls the width of the network. This is known as the
width multiplier in the MobileNetV2 paper, but the name is kept for
consistency with MobileNetV1 in Keras.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor (i.e. output of
`layers.Input()`)
to use as image input for the model.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape or invalid alpha, rows when
weights='imagenet'
"""
global backend, layers, models, keras_utils
backend, layers, models, keras_utils = get_submodules_from_kwargs(kwargs)
if not (weights in {'imagenet', None} or os.path.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top` '
'as true, `classes` should be 1000')
# Determine proper input shape and default size.
# If both input_shape and input_tensor are used, they should match
if input_shape is not None and input_tensor is not None:
try:
is_input_t_tensor = backend.is_keras_tensor(input_tensor)
except ValueError:
try:
is_input_t_tensor = backend.is_keras_tensor(
keras_utils.get_source_inputs(input_tensor))
except ValueError:
raise ValueError('input_tensor: ', input_tensor,
'is not type input_tensor')
if is_input_t_tensor:
if backend.image_data_format == 'channels_first':
if backend.int_shape(input_tensor)[1] != input_shape[1]:
raise ValueError('input_shape: ', input_shape,
'and input_tensor: ', input_tensor,
'do not meet the same shape requirements')
else:
if backend.int_shape(input_tensor)[2] != input_shape[1]:
raise ValueError('input_shape: ', input_shape,
'and input_tensor: ', input_tensor,
'do not meet the same shape requirements')
else:
raise ValueError('input_tensor specified: ', input_tensor,
'is not a keras tensor')
# If input_shape is None, infer shape from input_tensor
if input_shape is None and input_tensor is not None:
try:
backend.is_keras_tensor(input_tensor)
except ValueError:
raise ValueError('input_tensor: ', input_tensor,
'is type: ', type(input_tensor),
'which is not a valid type')
if input_shape is None and not backend.is_keras_tensor(input_tensor):
default_size = 224
elif input_shape is None and backend.is_keras_tensor(input_tensor):
if backend.image_data_format() == 'channels_first':
rows = backend.int_shape(input_tensor)[2]
cols = backend.int_shape(input_tensor)[3]
else:
rows = backend.int_shape(input_tensor)[1]
cols = backend.int_shape(input_tensor)[2]
if rows == cols and rows in [96, 128, 160, 192, 224]:
default_size = rows
else:
default_size = 224
# If input_shape is None and no input_tensor
elif input_shape is None:
default_size = 224
# If input_shape is not None, assume default size
else:
if backend.image_data_format() == 'channels_first':
rows = input_shape[1]
cols = input_shape[2]
else:
rows = input_shape[0]
cols = input_shape[1]
if rows == cols and rows in [96, 128, 160, 192, 224]:
default_size = rows
else:
default_size = 224
input_shape = _obtain_input_shape(input_shape,
default_size=default_size,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights)
if backend.image_data_format() == 'channels_last':
row_axis, col_axis = (0, 1)
else:
row_axis, col_axis = (1, 2)
rows = input_shape[row_axis]
cols = input_shape[col_axis]
if weights == 'imagenet':
if alpha not in [0.35, 0.50, 0.75, 1.0, 1.3, 1.4]:
raise ValueError('If imagenet weights are being loaded, '
'alpha can be one of `0.35`, `0.50`, `0.75`, '
'`1.0`, `1.3` or `1.4` only.')
if rows != cols or rows not in [96, 128, 160, 192, 224]:
rows = 224
warnings.warn('`input_shape` is undefined or non-square, '
'or `rows` is not in [96, 128, 160, 192, 224].'
' Weights for input shape (224, 224) will be'
' loaded as the default.')
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1
first_block_filters = _make_divisible(32 * alpha, 8)
x = layers.ZeroPadding2D(padding=correct_pad(backend, img_input, 3),
name='Conv1_pad')(img_input)
x = layers.Conv2D(first_block_filters,
kernel_size=3,
strides=(2, 2),
padding='valid',
use_bias=False,
name='Conv1')(x)
x = layers.BatchNormalization(axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name='bn_Conv1')(x)
x = layers.ReLU(6., name='Conv1_relu')(x)
x = _inverted_res_block(x, filters=16, alpha=alpha, stride=1,
expansion=1, block_id=0)
x = _inverted_res_block(x, filters=24, alpha=alpha, stride=2,
expansion=6, block_id=1)
x = _inverted_res_block(x, filters=24, alpha=alpha, stride=1,
expansion=6, block_id=2)
x = _inverted_res_block(x, filters=32, alpha=alpha, stride=2,
expansion=6, block_id=3)
x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1,
expansion=6, block_id=4)
x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1,
expansion=6, block_id=5)
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=2,
expansion=6, block_id=6)
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1,
expansion=6, block_id=7)
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1,
expansion=6, block_id=8)
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1,
expansion=6, block_id=9)
x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1,
expansion=6, block_id=10)
x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1,
expansion=6, block_id=11)
x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1,
expansion=6, block_id=12)
x = _inverted_res_block(x, filters=160, alpha=alpha, stride=2,
expansion=6, block_id=13)
x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1,
expansion=6, block_id=14)
x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1,
expansion=6, block_id=15)
x = _inverted_res_block(x, filters=320, alpha=alpha, stride=1,
expansion=6, block_id=16)
# no alpha applied to last conv as stated in the paper:
# if the width multiplier is greater than 1 we
# increase the number of output channels
if alpha > 1.0:
last_block_filters = _make_divisible(1280 * alpha, 8)
else:
last_block_filters = 1280
x = layers.Conv2D(last_block_filters,
kernel_size=1,
use_bias=False,
name='Conv_1')(x)
x = layers.BatchNormalization(axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name='Conv_1_bn')(x)
x = layers.ReLU(6., name='out_relu')(x)
if include_top:
x = layers.GlobalAveragePooling2D()(x)
x = layers.Dense(classes, activation='softmax',
use_bias=True, name='Logits')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = keras_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = models.Model(inputs, x,
name='mobilenetv2_%0.2f_%s' % (alpha, rows))
# Load weights.
if weights == 'imagenet':
if include_top:
model_name = ('mobilenet_v2_weights_tf_dim_ordering_tf_kernels_' +
str(alpha) + '_' + str(rows) + '.h5')
weight_path = BASE_WEIGHT_PATH + model_name
weights_path = keras_utils.get_file(
model_name, weight_path, cache_subdir='models')
else:
model_name = ('mobilenet_v2_weights_tf_dim_ordering_tf_kernels_' +
str(alpha) + '_' + str(rows) + '_no_top' + '.h5')
weight_path = BASE_WEIGHT_PATH + model_name
weights_path = keras_utils.get_file(
model_name, weight_path, cache_subdir='models')
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id):
channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1
in_channels = backend.int_shape(inputs)[channel_axis]
pointwise_conv_filters = int(filters * alpha)
pointwise_filters = _make_divisible(pointwise_conv_filters, 8)
x = inputs
prefix = 'block_{}_'.format(block_id)
if block_id:
# Expand
x = layers.Conv2D(expansion * in_channels,
kernel_size=1,
padding='same',
use_bias=False,
activation=None,
name=prefix + 'expand')(x)
x = layers.BatchNormalization(axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name=prefix + 'expand_BN')(x)
x = layers.ReLU(6., name=prefix + 'expand_relu')(x)
else:
prefix = 'expanded_conv_'
# Depthwise
if stride == 2:
x = layers.ZeroPadding2D(padding=correct_pad(backend, x, 3),
name=prefix + 'pad')(x)
x = layers.DepthwiseConv2D(kernel_size=3,
strides=stride,
activation=None,
use_bias=False,
padding='same' if stride == 1 else 'valid',
name=prefix + 'depthwise')(x)
x = layers.BatchNormalization(axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name=prefix + 'depthwise_BN')(x)
x = layers.ReLU(6., name=prefix + 'depthwise_relu')(x)
# Project
x = layers.Conv2D(pointwise_filters,
kernel_size=1,
padding='same',
use_bias=False,
activation=None,
name=prefix + 'project')(x)
x = layers.BatchNormalization(axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name=prefix + 'project_BN')(x)
if in_channels == pointwise_filters and stride == 1:
return layers.Add(name=prefix + 'add')([inputs, x])
return x
| keras-applications/keras_applications/mobilenet_v2.py/0 | {
"file_path": "keras-applications/keras_applications/mobilenet_v2.py",
"repo_id": "keras-applications",
"token_count": 10071
} | 12 |
[report]
# Regexes for lines to exclude from consideration
exclude_lines =
os.remove
except ImportError
# Don't complain if tests don't hit defensive assertion code:
raise ImportError
raise NotImplementedError
# Don't complain if legacy support codes are not performed:
if original_keras_version == '1':
show_missing = True
omit =
keras_contrib/backend/theano_backend.py
keras_contrib/backend/tensorflow_backend.py
keras_contrib/backend/cntk_backend.py
| keras-contrib/.coveragerc/0 | {
"file_path": "keras-contrib/.coveragerc",
"repo_id": "keras-contrib",
"token_count": 173
} | 13 |
# Maintainer Guidelines
## Maintainers:
Following are the users with write-access to this repository (maintainers) :
* [athundt](https://www.github.com/athundt)
* [bstriner](https://www.github.com/bstriner)
* [farizrahman4u](https://www.github.com/farizrahman4u)
* [fchollet](https://www.github.com/fchollet)
* [kemaswill](https://www.github.com/kemaswill)
* [lukedeo](https://www.github.com/lukedeo)
* [patyork](https://www.github.com/patyork)
* [tboquet](https://www.github.com/tboquet)
* [the-moliver](https://www.github.com/the-moliver)
## Addition of new features
* Addition of new features require submitting a pull request, even for those who have write access to this repository.
* Maintainers should not merge their own pull requests.
* Whenever possible, multiple Maintainers should review a pull requests before it is merged.
* All incoming new features should be accompanied by documentation and unit tests.
## Becoming a maintainer
* To become a maintainer, you should be a recognized contributor to either Keras-contrib or Keras core.
* If you think you are eligible to be a maintainer, you can contact one of the existing maintainers to join the team.
## Versioning
* Keras-contrib is tested only against the bleeding-edge version of Keras.
* In case the Travis build fails due to a change in Keras core, the maintainers are responsible of rectifying the issue.
| keras-contrib/GUIDELINES.md/0 | {
"file_path": "keras-contrib/GUIDELINES.md",
"repo_id": "keras-contrib",
"token_count": 409
} | 14 |
<a class="{% if not nav_item.is_link %}reference internal{% endif %}{% if nav_item.active%} current{%endif%}" href="{% if not nav_item.is_section %}{{ nav_item.url|url }}{% else %}#{% endif %}">{{ nav_item.title }}</a>
{%- set navlevel = navlevel + 1 %}
{%- if navlevel <= config.theme.navigation_depth
and ((nav_item.is_page and nav_item.toc.items
and (not config.theme.titles_only
and (nav_item == page or not config.theme.collapse_navigation)))
or (nav_item.is_section and nav_item.children)) %}
<ul{% if nav_item.active %} class="current"{% endif %}>
{%- if nav_item.is_page %}
{#- Skip first level of toc which is page title. #}
{%- set toc_item = nav_item.toc.items[0] %}
{%- include 'toc.html' %}
{%- elif nav_item.is_section %}
{%- for nav_item in nav_item.children %}
<li class="toctree-l{{ navlevel }}{% if nav_item.active%} current{%endif%}">
{%- include 'nav.html' %}
</li>
{%- endfor %}
{%- endif %}
</ul>
{%- endif %}
{%- set navlevel = navlevel - 1 %}
| keras-contrib/contrib_docs/theme/nav.html/0 | {
"file_path": "keras-contrib/contrib_docs/theme/nav.html",
"repo_id": "keras-contrib",
"token_count": 522
} | 15 |
from __future__ import absolute_import
from . import backend
from . import datasets
from . import layers
from . import preprocessing
from . import utils
from . import wrappers
from . import callbacks
from . import constraints
from . import initializers
from . import metrics
from . import losses
from . import optimizers
from . import regularizers
__version__ = '0.0.2'
| keras-contrib/keras_contrib/__init__.py/0 | {
"file_path": "keras-contrib/keras_contrib/__init__.py",
"repo_id": "keras-contrib",
"token_count": 94
} | 16 |
from __future__ import absolute_import
from __future__ import print_function
import os
import numpy as np
from keras.callbacks import Callback, ModelCheckpoint, LearningRateScheduler
try:
import requests
except ImportError:
requests = None
class SnapshotModelCheckpoint(Callback):
"""Callback that saves the snapshot weights of the model.
Saves the model weights on certain epochs (which can be considered the
snapshot of the model at that epoch).
Should be used with the cosine annealing learning rate schedule to save
the weight just before learning rate is sharply increased.
# Arguments:
nb_epochs: total number of epochs that the model will be trained for.
nb_snapshots: number of times the weights of the model will be saved.
fn_prefix: prefix for the filename of the weights.
"""
def __init__(self, nb_epochs, nb_snapshots, fn_prefix='Model'):
super(SnapshotModelCheckpoint, self).__init__()
self.check = nb_epochs // nb_snapshots
self.fn_prefix = fn_prefix
def on_epoch_end(self, epoch, logs={}):
if epoch != 0 and (epoch + 1) % self.check == 0:
filepath = self.fn_prefix + '-%d.h5' % ((epoch + 1) // self.check)
self.model.save_weights(filepath, overwrite=True)
# print("Saved snapshot at weights/%s_%d.h5" % (self.fn_prefix, epoch))
class SnapshotCallbackBuilder:
"""Callback builder for snapshot ensemble training of a model.
From the paper "Snapshot Ensembles: Train 1, Get M For Free" (
https://openreview.net/pdf?id=BJYwwY9ll)
Creates a list of callbacks, which are provided when training a model
so as to save the model weights at certain epochs, and then sharply
increase the learning rate.
"""
def __init__(self, nb_epochs, nb_snapshots, init_lr=0.1):
"""
Initialize a snapshot callback builder.
# Arguments:
nb_epochs: total number of epochs that the model will be trained for.
nb_snapshots: number of times the weights of the model will be saved.
init_lr: initial learning rate
"""
self.T = nb_epochs
self.M = nb_snapshots
self.alpha_zero = init_lr
def get_callbacks(self, model_prefix='Model'):
"""
Creates a list of callbacks that can be used during training to create a
snapshot ensemble of the model.
Args:
model_prefix: prefix for the filename of the weights.
Returns: list of 3 callbacks [ModelCheckpoint, LearningRateScheduler,
SnapshotModelCheckpoint] which can be provided to the 'fit' function
"""
if not os.path.exists('weights/'):
os.makedirs('weights/')
callback_list = [ModelCheckpoint('weights/%s-Best.h5' % model_prefix,
monitor='val_acc',
save_best_only=True, save_weights_only=True),
LearningRateScheduler(schedule=self._cosine_anneal_schedule),
SnapshotModelCheckpoint(self.T,
self.M,
fn_prefix='weights/%s' % model_prefix)]
return callback_list
def _cosine_anneal_schedule(self, t):
cos_inner = np.pi * (t % (self.T // self.M))
cos_inner /= self.T // self.M
cos_out = np.cos(cos_inner) + 1
return float(self.alpha_zero / 2 * cos_out)
| keras-contrib/keras_contrib/callbacks/snapshot.py/0 | {
"file_path": "keras-contrib/keras_contrib/callbacks/snapshot.py",
"repo_id": "keras-contrib",
"token_count": 1495
} | 17 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from keras import backend as K
from keras import activations
from keras import regularizers
from keras import initializers
from keras import constraints
from keras.layers import Layer
from keras_contrib.utils.test_utils import to_tuple
class Capsule(Layer):
"""Capsule Layer implementation in Keras
This implementation is based on Dynamic Routing of Capsules,
Geoffrey Hinton et. al.
The Capsule Layer is a Neural Network Layer which helps
modeling relationships in image and sequential data better
than just CNNs or RNNs. It achieves this by understanding
the spatial relationships between objects (in images)
or words (in text) by encoding additional information
about the image or text, such as angle of rotation,
thickness and brightness, relative proportions etc.
This layer can be used instead of pooling layers to
lower dimensions and still capture important information
about the relationships and structures within the data.
A normal pooling layer would lose a lot of
this information.
This layer can be used on the output of any layer
which has a 3-D output (including batch_size). For example,
in image classification, it can be used on the output of a
Conv2D layer for Computer Vision applications. Also,
it can be used on the output of a GRU or LSTM Layer
(Bidirectional or Unidirectional) for NLP applications.
The default activation function is 'linear'. But, this layer
is generally used with the 'squash' activation function
(recommended). To use the squash activation function, do :
from keras_contrib.activations import squash
capsule = Capsule(num_capsule=10,
dim_capsule=10,
routings=3,
share_weights=True,
activation=squash)
# Example usage :
1). COMPUTER VISION
input_image = Input(shape=(None, None, 3))
conv_2d = Conv2D(64,
(3, 3),
activation='relu')(input_image)
capsule = Capsule(num_capsule=10,
dim_capsule=16,
routings=3,
activation='relu',
share_weights=True)(conv_2d)
2). NLP
maxlen = 72
max_features = 120000
input_text = Input(shape=(maxlen,))
embedding = Embedding(max_features,
embed_size,
weights=[embedding_matrix],
trainable=False)(input_text)
bi_gru = Bidirectional(GRU(64,
return_seqeunces=True))(embedding)
capsule = Capsule(num_capsule=5,
dim_capsule=5,
routings=4,
activation='sigmoid',
share_weights=True)(bi_gru)
# Arguments
num_capsule : Number of Capsules (int)
dim_capsules : Dimensions of the vector output of each Capsule (int)
routings : Number of dynamic routings in the Capsule Layer (int)
share_weights : Whether to share weights between Capsules or not
(boolean)
activation : Activation function for the Capsules
regularizer : Regularizer for the weights of the Capsules
initializer : Initializer for the weights of the Caspules
constraint : Constraint for the weights of the Capsules
# Input shape
3D tensor with shape:
(batch_size, input_num_capsule, input_dim_capsule)
[any 3-D Tensor with the first dimension as batch_size]
# Output shape
3D tensor with shape:
(batch_size, num_capsule, dim_capsule)
# References
- [Dynamic-Routing-Between-Capsules]
(https://arxiv.org/pdf/1710.09829.pdf)
- [Keras-Examples-CIFAR10-CNN-Capsule]"""
def __init__(self,
num_capsule,
dim_capsule,
routings=3,
share_weights=True,
initializer='glorot_uniform',
activation=None,
regularizer=None,
constraint=None,
**kwargs):
super(Capsule, self).__init__(**kwargs)
self.num_capsule = num_capsule
self.dim_capsule = dim_capsule
self.routings = routings
self.share_weights = share_weights
self.activation = activations.get(activation)
self.regularizer = regularizers.get(regularizer)
self.initializer = initializers.get(initializer)
self.constraint = constraints.get(constraint)
def build(self, input_shape):
input_shape = to_tuple(input_shape)
input_dim_capsule = input_shape[-1]
if self.share_weights:
self.W = self.add_weight(name='capsule_kernel',
shape=(1,
input_dim_capsule,
self.num_capsule *
self.dim_capsule),
initializer=self.initializer,
regularizer=self.regularizer,
constraint=self.constraint,
trainable=True)
else:
input_num_capsule = input_shape[-2]
self.W = self.add_weight(name='capsule_kernel',
shape=(input_num_capsule,
input_dim_capsule,
self.num_capsule *
self.dim_capsule),
initializer=self.initializer,
regularizer=self.regularizer,
constraint=self.constraint,
trainable=True)
self.build = True
def call(self, inputs):
if self.share_weights:
u_hat_vectors = K.conv1d(inputs, self.W)
else:
u_hat_vectors = K.local_conv1d(inputs, self.W, [1], [1])
# u_hat_vectors : The spatially transformed input vectors (with local_conv_1d)
batch_size = K.shape(inputs)[0]
input_num_capsule = K.shape(inputs)[1]
u_hat_vectors = K.reshape(u_hat_vectors, (batch_size,
input_num_capsule,
self.num_capsule,
self.dim_capsule))
u_hat_vectors = K.permute_dimensions(u_hat_vectors, (0, 2, 1, 3))
routing_weights = K.zeros_like(u_hat_vectors[:, :, :, 0])
for i in range(self.routings):
capsule_weights = K.softmax(routing_weights, 1)
outputs = K.batch_dot(capsule_weights, u_hat_vectors, [2, 2])
if K.ndim(outputs) == 4:
outputs = K.sum(outputs, axis=1)
if i < self.routings - 1:
outputs = K.l2_normalize(outputs, -1)
routing_weights = K.batch_dot(outputs, u_hat_vectors, [2, 3])
if K.ndim(routing_weights) == 4:
routing_weights = K.sum(routing_weights, axis=1)
return self.activation(outputs)
def compute_output_shape(self, input_shape):
return (None, self.num_capsule, self.dim_capsule)
def get_config(self):
config = {'num_capsule': self.num_capsule,
'dim_capsule': self.dim_capsule,
'routings': self.routings,
'share_weights': self.share_weights,
'activation': activations.serialize(self.activation),
'regularizer': regularizers.serialize(self.regularizer),
'initializer': initializers.serialize(self.initializer),
'constraint': constraints.serialize(self.constraint)}
base_config = super(Capsule, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| keras-contrib/keras_contrib/layers/capsule.py/0 | {
"file_path": "keras-contrib/keras_contrib/layers/capsule.py",
"repo_id": "keras-contrib",
"token_count": 4334
} | 18 |
from __future__ import absolute_import
from keras.optimizers import Optimizer
from keras import backend as K
class FTML(Optimizer):
"""FTML optimizer.
# Arguments
lr: float >= 0. Learning rate.
beta_1: float, 0 < beta < 1. Generally close to 0.5.
beta_2: float, 0 < beta < 1. Generally close to 1.
epsilon: float >= 0. Fuzz factor.
decay: float >= 0. Learning rate decay over each update.
# References
- [FTML - Follow the Moving Leader in Deep Learning](
http://www.cse.ust.hk/~szhengac/papers/icml17.pdf)
"""
def __init__(self, lr=0.0025, beta_1=0.6, beta_2=0.999,
epsilon=1e-8, decay=0., **kwargs):
super(FTML, self).__init__(**kwargs)
self.__dict__.update(locals())
self.iterations = K.variable(0)
self.lr = K.variable(lr)
self.beta_1 = K.variable(beta_1)
self.beta_2 = K.variable(beta_2)
self.decay = K.variable(decay)
self.epsilon = epsilon
self.inital_decay = decay
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
lr = self.lr
if self.inital_decay > 0:
lr *= (1. / (1. + self.decay * self.iterations))
t = self.iterations + 1
lr_t = lr / (1. - K.pow(self.beta_1, t))
shapes = [K.int_shape(p) for p in params]
zs = [K.zeros(shape) for shape in shapes]
vs = [K.zeros(shape) for shape in shapes]
ds = [K.zeros(shape) for shape in shapes]
self.weights = [self.iterations] + zs + vs + ds
for p, g, z, v, d in zip(params, grads, zs, vs, ds):
v_t = self.beta_2 * v + (1. - self.beta_2) * K.square(g)
d_t = (K.sqrt(v_t / (1. - K.pow(self.beta_2, t)))
+ self.epsilon) / lr_t
sigma_t = d_t - self.beta_1 * d
z_t = self.beta_1 * z + (1. - self.beta_1) * g - sigma_t * p
p_t = - z_t / d_t
self.updates.append(K.update(z, z_t))
self.updates.append(K.update(v, v_t))
self.updates.append(K.update(d, d_t))
new_p = p_t
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(K.update(p, new_p))
return self.updates
def get_config(self):
config = {'lr': float(K.get_value(self.lr)),
'beta_1': float(K.get_value(self.beta_1)),
'beta_2': float(K.get_value(self.beta_2)),
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon}
base_config = super(FTML, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| keras-contrib/keras_contrib/optimizers/ftml.py/0 | {
"file_path": "keras-contrib/keras_contrib/optimizers/ftml.py",
"repo_id": "keras-contrib",
"token_count": 1471
} | 19 |
# Configuration of py.test
[pytest]
addopts=-v
-n 2
--durations=10
--cov-report term-missing
# Do not run tests in the build folder
norecursedirs= build
# PEP-8 The following are ignored:
# E402 module level import not at top of file - temporary measure to continue adding ros python packaged in sys.path
# E731 do not assign a lambda expression, use a def
pep8ignore=* E402 \
* E731 \
* W503
pep8maxlinelength = 88
| keras-contrib/pytest.ini/0 | {
"file_path": "keras-contrib/pytest.ini",
"repo_id": "keras-contrib",
"token_count": 182
} | 20 |
import numpy as np
import pytest
from keras import backend as K
from keras.models import Sequential
from numpy.testing import assert_allclose
from keras_contrib.utils.test_utils import layer_test
from keras_contrib.layers import CosineConvolution2D
# TensorFlow does not support full convolution.
if K.backend() == 'theano':
_convolution_border_modes = ['valid', 'same']
data_format = 'channels_first'
else:
_convolution_border_modes = ['valid', 'same']
data_format = 'channels_last'
@pytest.mark.parametrize('border_mode', _convolution_border_modes)
@pytest.mark.parametrize('subsample', [(1, 1), (2, 2)])
@pytest.mark.parametrize('use_bias_mode', [True, False])
@pytest.mark.parametrize('use_regularizer', [True, False])
def test_cosineconvolution_2d(border_mode,
subsample,
use_bias_mode,
use_regularizer):
num_samples = 2
num_filter = 2
stack_size = 3
num_row = 10
num_col = 6
if border_mode == 'same' and subsample != (1, 1):
return
kwargs = {'filters': num_filter,
'kernel_size': (3, 3),
'padding': border_mode,
'strides': subsample,
'use_bias': use_bias_mode,
'data_format': data_format}
if use_regularizer:
kwargs.update({'kernel_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2'})
layer_test(CosineConvolution2D,
kwargs=kwargs,
input_shape=(num_samples, num_row, num_col, stack_size))
def test_cosineconvolution_2d_correctness():
if data_format == 'channels_first':
X = np.random.randn(1, 3, 5, 5)
input_dim = (3, 5, 5)
W0 = X[:, :, ::-1, ::-1]
elif data_format == 'channels_last':
X = np.random.randn(1, 5, 5, 3)
input_dim = (5, 5, 3)
W0 = X[0, :, :, :, None]
model = Sequential()
model.add(CosineConvolution2D(1, (5, 5), use_bias=True,
input_shape=input_dim,
data_format=data_format))
model.compile(loss='mse', optimizer='rmsprop')
W = model.get_weights()
W[0] = W0
W[1] = np.asarray([1.])
model.set_weights(W)
out = model.predict(X)
assert_allclose(out, np.ones((1, 1, 1, 1), dtype=K.floatx()), atol=1e-5)
model = Sequential()
model.add(CosineConvolution2D(1, (5, 5),
use_bias=False,
input_shape=input_dim,
data_format=data_format))
model.compile(loss='mse', optimizer='rmsprop')
W = model.get_weights()
W[0] = -2 * W0
model.set_weights(W)
out = model.predict(X)
assert_allclose(out, -np.ones((1, 1, 1, 1), dtype=K.floatx()), atol=1e-5)
if __name__ == '__main__':
pytest.main([__file__])
| keras-contrib/tests/keras_contrib/layers/convolutional/test_cosineconvolution2d.py/0 | {
"file_path": "keras-contrib/tests/keras_contrib/layers/convolutional/test_cosineconvolution2d.py",
"repo_id": "keras-contrib",
"token_count": 1498
} | 21 |
import pytest
import os
from keras import backend as K
from keras.layers import Input, Dense
from keras.models import Model
from numpy.testing import assert_allclose
from keras_contrib.utils.save_load_utils import save_all_weights, load_all_weights
@pytest.mark.skipif(K.backend() != 'tensorflow',
reason='save_all_weights and load_all_weights only '
'supported on TensorFlow')
def test_save_and_load_all_weights():
'''
Test save_all_weights and load_all_weights.
Save and load optimizer and model weights but not configuration.
'''
def make_model():
_x = Input((10,))
_y = Dense(10)(_x)
_m = Model(_x, _y)
_m.compile('adam', 'mean_squared_error')
_m._make_train_function()
return _m
# make a model
m1 = make_model()
# set weights
w1 = m1.layers[1].kernel # dense layer
w1value = K.get_value(w1)
w1value[0, 0:4] = [1, 3, 3, 7]
K.set_value(w1, w1value)
# set optimizer weights
ow1 = m1.optimizer.weights[3] # momentum weights
ow1value = K.get_value(ow1)
ow1value[0, 0:3] = [4, 2, 0]
K.set_value(ow1, ow1value)
# save all weights
save_all_weights(m1, 'model.h5')
# new model
m2 = make_model()
# load all weights
load_all_weights(m2, 'model.h5')
# check weights
assert_allclose(K.get_value(m2.layers[1].kernel)[0, 0:4], [1, 3, 3, 7])
# check optimizer weights
assert_allclose(K.get_value(m2.optimizer.weights[3])[0, 0:3], [4, 2, 0])
os.remove('model.h5')
if __name__ == '__main__':
pytest.main([__file__])
| keras-contrib/tests/keras_contrib/utils/save_load_utils_test.py/0 | {
"file_path": "keras-contrib/tests/keras_contrib/utils/save_load_utils_test.py",
"repo_id": "keras-contrib",
"token_count": 734
} | 22 |
"""
Title: OCR model for reading Captchas
Author: [A_K_Nain](https://twitter.com/A_K_Nain)
Date created: 2020/06/14
Last modified: 2020/06/26
Description: How to implement an OCR model using CNNs, RNNs and CTC loss.
Accelerator: GPU
"""
"""
## Introduction
This example demonstrates a simple OCR model built with the Functional API. Apart from
combining CNN and RNN, it also illustrates how you can instantiate a new layer
and use it as an "Endpoint layer" for implementing CTC loss. For a detailed
guide to layer subclassing, please check out
[this page](https://keras.io/guides/making_new_layers_and_models_via_subclassing/)
in the developer guides.
"""
"""
## Setup
"""
import os
import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
from collections import Counter
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
"""
## Load the data: [Captcha Images](https://www.kaggle.com/fournierp/captcha-version-2-images)
Let's download the data.
"""
"""shell
curl -LO https://github.com/AakashKumarNain/CaptchaCracker/raw/master/captcha_images_v2.zip
unzip -qq captcha_images_v2.zip
"""
"""
The dataset contains 1040 captcha files as `png` images. The label for each sample is a string,
the name of the file (minus the file extension).
We will map each character in the string to an integer for training the model. Similary,
we will need to map the predictions of the model back to strings. For this purpose
we will maintain two dictionaries, mapping characters to integers, and integers to characters,
respectively.
"""
# Path to the data directory
data_dir = Path("./captcha_images_v2/")
# Get list of all the images
images = sorted(list(map(str, list(data_dir.glob("*.png")))))
labels = [img.split(os.path.sep)[-1].split(".png")[0] for img in images]
characters = set(char for label in labels for char in label)
characters = sorted(list(characters))
print("Number of images found: ", len(images))
print("Number of labels found: ", len(labels))
print("Number of unique characters: ", len(characters))
print("Characters present: ", characters)
# Batch size for training and validation
batch_size = 16
# Desired image dimensions
img_width = 200
img_height = 50
# Factor by which the image is going to be downsampled
# by the convolutional blocks. We will be using two
# convolution blocks and each block will have
# a pooling layer which downsample the features by a factor of 2.
# Hence total downsampling factor would be 4.
downsample_factor = 4
# Maximum length of any captcha in the dataset
max_length = max([len(label) for label in labels])
"""
## Preprocessing
"""
# Mapping characters to integers
char_to_num = layers.StringLookup(vocabulary=list(characters), mask_token=None)
# Mapping integers back to original characters
num_to_char = layers.StringLookup(
vocabulary=char_to_num.get_vocabulary(), mask_token=None, invert=True
)
def split_data(images, labels, train_size=0.9, shuffle=True):
# 1. Get the total size of the dataset
size = len(images)
# 2. Make an indices array and shuffle it, if required
indices = np.arange(size)
if shuffle:
np.random.shuffle(indices)
# 3. Get the size of training samples
train_samples = int(size * train_size)
# 4. Split data into training and validation sets
x_train, y_train = (
images[indices[:train_samples]],
labels[indices[:train_samples]],
)
x_valid, y_valid = (
images[indices[train_samples:]],
labels[indices[train_samples:]],
)
return x_train, x_valid, y_train, y_valid
# Splitting data into training and validation sets
x_train, x_valid, y_train, y_valid = split_data(
np.array(images), np.array(labels)
)
def encode_single_sample(img_path, label):
# 1. Read image
img = tf.io.read_file(img_path)
# 2. Decode and convert to grayscale
img = tf.io.decode_png(img, channels=1)
# 3. Convert to float32 in [0, 1] range
img = tf.image.convert_image_dtype(img, tf.float32)
# 4. Resize to the desired size
img = tf.image.resize(img, [img_height, img_width])
# 5. Transpose the image because we want the time
# dimension to correspond to the width of the image.
img = tf.transpose(img, perm=[1, 0, 2])
# 6. Map the characters in label to numbers
label = char_to_num(tf.strings.unicode_split(label, input_encoding="UTF-8"))
# 7. Return a dict as our model is expecting two inputs
return {"image": img, "label": label}
"""
## Create `Dataset` objects
"""
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = (
train_dataset.map(encode_single_sample, num_parallel_calls=tf.data.AUTOTUNE)
.batch(batch_size)
.prefetch(buffer_size=tf.data.AUTOTUNE)
)
validation_dataset = tf.data.Dataset.from_tensor_slices((x_valid, y_valid))
validation_dataset = (
validation_dataset.map(
encode_single_sample, num_parallel_calls=tf.data.AUTOTUNE
)
.batch(batch_size)
.prefetch(buffer_size=tf.data.AUTOTUNE)
)
"""
## Visualize the data
"""
_, ax = plt.subplots(4, 4, figsize=(10, 5))
for batch in train_dataset.take(1):
images = batch["image"]
labels = batch["label"]
for i in range(16):
img = (images[i] * 255).numpy().astype("uint8")
label = (
tf.strings.reduce_join(num_to_char(labels[i]))
.numpy()
.decode("utf-8")
)
ax[i // 4, i % 4].imshow(img[:, :, 0].T, cmap="gray")
ax[i // 4, i % 4].set_title(label)
ax[i // 4, i % 4].axis("off")
plt.show()
"""
## Model
"""
class CTCLayer(layers.Layer):
def __init__(self, name=None):
super().__init__(name=name)
self.loss_fn = keras.backend.ctc_batch_cost
def call(self, y_true, y_pred):
# Compute the training-time loss value and add it
# to the layer using `self.add_loss()`.
batch_len = tf.cast(tf.shape(y_true)[0], dtype="int64")
input_length = tf.cast(tf.shape(y_pred)[1], dtype="int64")
label_length = tf.cast(tf.shape(y_true)[1], dtype="int64")
input_length = input_length * tf.ones(
shape=(batch_len, 1), dtype="int64"
)
label_length = label_length * tf.ones(
shape=(batch_len, 1), dtype="int64"
)
loss = self.loss_fn(y_true, y_pred, input_length, label_length)
self.add_loss(loss)
# At test time, just return the computed predictions
return y_pred
def build_model():
# Inputs to the model
input_img = layers.Input(
shape=(img_width, img_height, 1), name="image", dtype="float32"
)
labels = layers.Input(name="label", shape=(None,), dtype="float32")
# First conv block
x = layers.Conv2D(
32,
(3, 3),
activation="relu",
kernel_initializer="he_normal",
padding="same",
name="Conv1",
)(input_img)
x = layers.MaxPooling2D((2, 2), name="pool1")(x)
# Second conv block
x = layers.Conv2D(
64,
(3, 3),
activation="relu",
kernel_initializer="he_normal",
padding="same",
name="Conv2",
)(x)
x = layers.MaxPooling2D((2, 2), name="pool2")(x)
# We have used two max pool with pool size and strides 2.
# Hence, downsampled feature maps are 4x smaller. The number of
# filters in the last layer is 64. Reshape accordingly before
# passing the output to the RNN part of the model
new_shape = ((img_width // 4), (img_height // 4) * 64)
x = layers.Reshape(target_shape=new_shape, name="reshape")(x)
x = layers.Dense(64, activation="relu", name="dense1")(x)
x = layers.Dropout(0.2)(x)
# RNNs
x = layers.Bidirectional(
layers.LSTM(128, return_sequences=True, dropout=0.25)
)(x)
x = layers.Bidirectional(
layers.LSTM(64, return_sequences=True, dropout=0.25)
)(x)
# Output layer
x = layers.Dense(
len(char_to_num.get_vocabulary()) + 1,
activation="softmax",
name="dense2",
)(x)
# Add CTC layer for calculating CTC loss at each step
output = CTCLayer(name="ctc_loss")(labels, x)
# Define the model
model = keras.models.Model(
inputs=[input_img, labels], outputs=output, name="ocr_model_v1"
)
# Optimizer
opt = keras.optimizers.Adam()
# Compile the model and return
model.compile(optimizer=opt)
return model
# Get the model
model = build_model()
model.summary()
"""
## Training
"""
epochs = 1
early_stopping_patience = 10
# Add early stopping
early_stopping = keras.callbacks.EarlyStopping(
monitor="val_loss",
patience=early_stopping_patience,
restore_best_weights=True,
)
# Train the model
history = model.fit(
train_dataset,
validation_data=validation_dataset,
epochs=epochs,
callbacks=[early_stopping],
)
"""
## Inference
You can use the trained model hosted on [Hugging Face Hub](https://huggingface.co/keras-io/ocr-for-captcha)
and try the demo on [Hugging Face Spaces](https://huggingface.co/spaces/keras-io/ocr-for-captcha).
"""
# Get the prediction model by extracting layers till the output layer
prediction_model = keras.models.Model(
model.get_layer(name="image").input, model.get_layer(name="dense2").output
)
prediction_model.summary()
# A utility function to decode the output of the network
def decode_batch_predictions(pred):
input_len = np.ones(pred.shape[0]) * pred.shape[1]
# Use greedy search. For complex tasks, you can use beam search
results = keras.backend.ctc_decode(
pred, input_length=input_len, greedy=True
)[0][0][:, :max_length]
# Iterate over the results and get back the text
output_text = []
for res in results:
res = tf.strings.reduce_join(num_to_char(res)).numpy().decode("utf-8")
output_text.append(res)
return output_text
# Let's check results on some validation samples
for batch in validation_dataset.take(1):
batch_images = batch["image"]
batch_labels = batch["label"]
preds = prediction_model.predict(batch_images)
pred_texts = decode_batch_predictions(preds)
orig_texts = []
for label in batch_labels:
label = (
tf.strings.reduce_join(num_to_char(label)).numpy().decode("utf-8")
)
orig_texts.append(label)
_, ax = plt.subplots(4, 4, figsize=(15, 5))
for i in range(len(pred_texts)):
img = (batch_images[i, :, :, 0] * 255).numpy().astype(np.uint8)
img = img.T
title = f"Prediction: {pred_texts[i]}"
ax[i // 4, i % 4].imshow(img, cmap="gray")
ax[i // 4, i % 4].set_title(title)
ax[i // 4, i % 4].axis("off")
plt.show()
| keras-core/examples/keras_io/tensorflow/vision/captcha_ocr.py/0 | {
"file_path": "keras-core/examples/keras_io/tensorflow/vision/captcha_ocr.py",
"repo_id": "keras-core",
"token_count": 4216
} | 23 |
"""
Title: The Functional API
Author: [fchollet](https://twitter.com/fchollet)
Date created: 2019/03/01
Last modified: 2020/04/12
Description: Complete guide to the functional API.
Accelerator: GPU
"""
"""
## Setup
"""
import numpy as np
import keras_core as keras
from keras_core import layers
from keras_core import ops
"""
## Introduction
The Keras *functional API* is a way to create models that are more flexible
than the `keras.Sequential` API. The functional API can handle models
with non-linear topology, shared layers, and even multiple inputs or outputs.
The main idea is that a deep learning model is usually
a directed acyclic graph (DAG) of layers.
So the functional API is a way to build *graphs of layers*.
Consider the following model:
<div class="k-default-codeblock">
```
(input: 784-dimensional vectors)
โง
[Dense (64 units, relu activation)]
โง
[Dense (64 units, relu activation)]
โง
[Dense (10 units, softmax activation)]
โง
(output: logits of a probability distribution over 10 classes)
```
</div>
This is a basic graph with three layers.
To build this model using the functional API, start by creating an input node:
"""
inputs = keras.Input(shape=(784,))
"""
The shape of the data is set as a 784-dimensional vector.
The batch size is always omitted since only the shape of each sample is specified.
If, for example, you have an image input with a shape of `(32, 32, 3)`,
you would use:
"""
# Just for demonstration purposes.
img_inputs = keras.Input(shape=(32, 32, 3))
"""
The `inputs` that is returned contains information about the shape and `dtype`
of the input data that you feed to your model.
Here's the shape:
"""
inputs.shape
"""
Here's the dtype:
"""
inputs.dtype
"""
You create a new node in the graph of layers by calling a layer on this `inputs`
object:
"""
dense = layers.Dense(64, activation="relu")
x = dense(inputs)
"""
The "layer call" action is like drawing an arrow from "inputs" to this layer
you created.
You're "passing" the inputs to the `dense` layer, and you get `x` as the output.
Let's add a few more layers to the graph of layers:
"""
x = layers.Dense(64, activation="relu")(x)
outputs = layers.Dense(10)(x)
"""
At this point, you can create a `Model` by specifying its inputs and outputs
in the graph of layers:
"""
model = keras.Model(inputs=inputs, outputs=outputs, name="mnist_model")
"""
Let's check out what the model summary looks like:
"""
model.summary()
"""
You can also plot the model as a graph:
"""
keras.utils.plot_model(model, "my_first_model.png")
"""
And, optionally, display the input and output shapes of each layer
in the plotted graph:
"""
keras.utils.plot_model(
model, "my_first_model_with_shape_info.png", show_shapes=True
)
"""
This figure and the code are almost identical. In the code version,
the connection arrows are replaced by the call operation.
A "graph of layers" is an intuitive mental image for a deep learning model,
and the functional API is a way to create models that closely mirrors this.
"""
"""
## Training, evaluation, and inference
Training, evaluation, and inference work exactly in the same way for models
built using the functional API as for `Sequential` models.
The `Model` class offers a built-in training loop (the `fit()` method)
and a built-in evaluation loop (the `evaluate()` method). Note
that you can easily [customize these loops](/guides/customizing_what_happens_in_fit/)
to implement training routines beyond supervised learning
(e.g. [GANs](https://keras.io/examples/generative/dcgan_overriding_train_step/)).
Here, load the MNIST image data, reshape it into vectors,
fit the model on the data (while monitoring performance on a validation split),
then evaluate the model on the test data:
"""
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
x_train = x_train.reshape(60000, 784).astype("float32") / 255
x_test = x_test.reshape(10000, 784).astype("float32") / 255
model.compile(
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=keras.optimizers.RMSprop(),
metrics=["accuracy"],
)
history = model.fit(
x_train, y_train, batch_size=64, epochs=2, validation_split=0.2
)
test_scores = model.evaluate(x_test, y_test, verbose=2)
print("Test loss:", test_scores[0])
print("Test accuracy:", test_scores[1])
"""
For further reading, see the [training and evaluation](/guides/training_with_built_in_methods/) guide.
"""
"""
## Save and serialize
Saving the model and serialization work the same way for models built using
the functional API as they do for `Sequential` models. The standard way
to save a functional model is to call `model.save()`
to save the entire model as a single file. You can later recreate the same model
from this file, even if the code that built the model is no longer available.
This saved file includes the:
- model architecture
- model weight values (that were learned during training)
- model training config, if any (as passed to `compile()`)
- optimizer and its state, if any (to restart training where you left off)
"""
model.save("my_model.keras")
del model
# Recreate the exact same model purely from the file:
model = keras.models.load_model("my_model.keras")
"""
For details, read the model [serialization & saving](
/guides/serialization_and_saving/) guide.
"""
"""
## Use the same graph of layers to define multiple models
In the functional API, models are created by specifying their inputs
and outputs in a graph of layers. That means that a single
graph of layers can be used to generate multiple models.
In the example below, you use the same stack of layers to instantiate two models:
an `encoder` model that turns image inputs into 16-dimensional vectors,
and an end-to-end `autoencoder` model for training.
"""
encoder_input = keras.Input(shape=(28, 28, 1), name="img")
x = layers.Conv2D(16, 3, activation="relu")(encoder_input)
x = layers.Conv2D(32, 3, activation="relu")(x)
x = layers.MaxPooling2D(3)(x)
x = layers.Conv2D(32, 3, activation="relu")(x)
x = layers.Conv2D(16, 3, activation="relu")(x)
encoder_output = layers.GlobalMaxPooling2D()(x)
encoder = keras.Model(encoder_input, encoder_output, name="encoder")
encoder.summary()
x = layers.Reshape((4, 4, 1))(encoder_output)
x = layers.Conv2DTranspose(16, 3, activation="relu")(x)
x = layers.Conv2DTranspose(32, 3, activation="relu")(x)
x = layers.UpSampling2D(3)(x)
x = layers.Conv2DTranspose(16, 3, activation="relu")(x)
decoder_output = layers.Conv2DTranspose(1, 3, activation="relu")(x)
autoencoder = keras.Model(encoder_input, decoder_output, name="autoencoder")
autoencoder.summary()
"""
Here, the decoding architecture is strictly symmetrical
to the encoding architecture, so the output shape is the same as
the input shape `(28, 28, 1)`.
The reverse of a `Conv2D` layer is a `Conv2DTranspose` layer,
and the reverse of a `MaxPooling2D` layer is an `UpSampling2D` layer.
"""
"""
## All models are callable, just like layers
You can treat any model as if it were a layer by invoking it on an `Input` or
on the output of another layer. By calling a model you aren't just reusing
the architecture of the model, you're also reusing its weights.
To see this in action, here's a different take on the autoencoder example that
creates an encoder model, a decoder model, and chains them in two calls
to obtain the autoencoder model:
"""
encoder_input = keras.Input(shape=(28, 28, 1), name="original_img")
x = layers.Conv2D(16, 3, activation="relu")(encoder_input)
x = layers.Conv2D(32, 3, activation="relu")(x)
x = layers.MaxPooling2D(3)(x)
x = layers.Conv2D(32, 3, activation="relu")(x)
x = layers.Conv2D(16, 3, activation="relu")(x)
encoder_output = layers.GlobalMaxPooling2D()(x)
encoder = keras.Model(encoder_input, encoder_output, name="encoder")
encoder.summary()
decoder_input = keras.Input(shape=(16,), name="encoded_img")
x = layers.Reshape((4, 4, 1))(decoder_input)
x = layers.Conv2DTranspose(16, 3, activation="relu")(x)
x = layers.Conv2DTranspose(32, 3, activation="relu")(x)
x = layers.UpSampling2D(3)(x)
x = layers.Conv2DTranspose(16, 3, activation="relu")(x)
decoder_output = layers.Conv2DTranspose(1, 3, activation="relu")(x)
decoder = keras.Model(decoder_input, decoder_output, name="decoder")
decoder.summary()
autoencoder_input = keras.Input(shape=(28, 28, 1), name="img")
encoded_img = encoder(autoencoder_input)
decoded_img = decoder(encoded_img)
autoencoder = keras.Model(autoencoder_input, decoded_img, name="autoencoder")
autoencoder.summary()
"""
As you can see, the model can be nested: a model can contain sub-models
(since a model is just like a layer).
A common use case for model nesting is *ensembling*.
For example, here's how to ensemble a set of models into a single model
that averages their predictions:
"""
def get_model():
inputs = keras.Input(shape=(128,))
outputs = layers.Dense(1)(inputs)
return keras.Model(inputs, outputs)
model1 = get_model()
model2 = get_model()
model3 = get_model()
inputs = keras.Input(shape=(128,))
y1 = model1(inputs)
y2 = model2(inputs)
y3 = model3(inputs)
outputs = layers.average([y1, y2, y3])
ensemble_model = keras.Model(inputs=inputs, outputs=outputs)
"""
## Manipulate complex graph topologies
### Models with multiple inputs and outputs
The functional API makes it easy to manipulate multiple inputs and outputs.
This cannot be handled with the `Sequential` API.
For example, if you're building a system for ranking customer issue tickets by
priority and routing them to the correct department,
then the model will have three inputs:
- the title of the ticket (text input),
- the text body of the ticket (text input), and
- any tags added by the user (categorical input)
This model will have two outputs:
- the priority score between 0 and 1 (scalar sigmoid output), and
- the department that should handle the ticket (softmax output
over the set of departments).
You can build this model in a few lines with the functional API:
"""
num_tags = 12 # Number of unique issue tags
num_words = 10000 # Size of vocabulary obtained when preprocessing text data
num_departments = 4 # Number of departments for predictions
title_input = keras.Input(
shape=(None,), name="title"
) # Variable-length sequence of ints
body_input = keras.Input(
shape=(None,), name="body"
) # Variable-length sequence of ints
tags_input = keras.Input(
shape=(num_tags,), name="tags"
) # Binary vectors of size `num_tags`
# Embed each word in the title into a 64-dimensional vector
title_features = layers.Embedding(num_words, 64)(title_input)
# Embed each word in the text into a 64-dimensional vector
body_features = layers.Embedding(num_words, 64)(body_input)
# Reduce sequence of embedded words in the title into a single 128-dimensional vector
title_features = layers.LSTM(128)(title_features)
# Reduce sequence of embedded words in the body into a single 32-dimensional vector
body_features = layers.LSTM(32)(body_features)
# Merge all available features into a single large vector via concatenation
x = layers.concatenate([title_features, body_features, tags_input])
# Stick a logistic regression for priority prediction on top of the features
priority_pred = layers.Dense(1, name="priority")(x)
# Stick a department classifier on top of the features
department_pred = layers.Dense(num_departments, name="department")(x)
# Instantiate an end-to-end model predicting both priority and department
model = keras.Model(
inputs=[title_input, body_input, tags_input],
outputs={"priority": priority_pred, "department": department_pred},
)
"""
Now plot the model:
"""
keras.utils.plot_model(
model, "multi_input_and_output_model.png", show_shapes=True
)
"""
When compiling this model, you can assign different losses to each output.
You can even assign different weights to each loss -- to modulate
their contribution to the total training loss.
"""
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss=[
keras.losses.BinaryCrossentropy(from_logits=True),
keras.losses.CategoricalCrossentropy(from_logits=True),
],
loss_weights=[1.0, 0.2],
)
"""
Since the output layers have different names, you could also specify
the losses and loss weights with the corresponding layer names:
"""
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss={
"priority": keras.losses.BinaryCrossentropy(from_logits=True),
"department": keras.losses.CategoricalCrossentropy(from_logits=True),
},
loss_weights={"priority": 1.0, "department": 0.2},
)
"""
Train the model by passing lists of NumPy arrays of inputs and targets:
"""
# Dummy input data
title_data = np.random.randint(num_words, size=(1280, 10))
body_data = np.random.randint(num_words, size=(1280, 100))
tags_data = np.random.randint(2, size=(1280, num_tags)).astype("float32")
# Dummy target data
priority_targets = np.random.random(size=(1280, 1))
dept_targets = np.random.randint(2, size=(1280, num_departments))
model.fit(
{"title": title_data, "body": body_data, "tags": tags_data},
{"priority": priority_targets, "department": dept_targets},
epochs=2,
batch_size=32,
)
"""
When calling fit with a `Dataset` object, it should yield either a
tuple of lists like `([title_data, body_data, tags_data], [priority_targets, dept_targets])`
or a tuple of dictionaries like
`({'title': title_data, 'body': body_data, 'tags': tags_data}, {'priority': priority_targets, 'department': dept_targets})`.
For more detailed explanation, refer to the [training and evaluation](/guides/training_with_built_in_methods/) guide.
"""
"""
### A toy ResNet model
In addition to models with multiple inputs and outputs,
the functional API makes it easy to manipulate non-linear connectivity
topologies -- these are models with layers that are not connected sequentially,
which the `Sequential` API cannot handle.
A common use case for this is residual connections.
Let's build a toy ResNet model for CIFAR10 to demonstrate this:
"""
inputs = keras.Input(shape=(32, 32, 3), name="img")
x = layers.Conv2D(32, 3, activation="relu")(inputs)
x = layers.Conv2D(64, 3, activation="relu")(x)
block_1_output = layers.MaxPooling2D(3)(x)
x = layers.Conv2D(64, 3, activation="relu", padding="same")(block_1_output)
x = layers.Conv2D(64, 3, activation="relu", padding="same")(x)
block_2_output = layers.add([x, block_1_output])
x = layers.Conv2D(64, 3, activation="relu", padding="same")(block_2_output)
x = layers.Conv2D(64, 3, activation="relu", padding="same")(x)
block_3_output = layers.add([x, block_2_output])
x = layers.Conv2D(64, 3, activation="relu")(block_3_output)
x = layers.GlobalAveragePooling2D()(x)
x = layers.Dense(256, activation="relu")(x)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(10)(x)
model = keras.Model(inputs, outputs, name="toy_resnet")
model.summary()
"""
Plot the model:
"""
keras.utils.plot_model(model, "mini_resnet.png", show_shapes=True)
"""
Now train the model:
"""
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
x_train = x_train.astype("float32") / 255.0
x_test = x_test.astype("float32") / 255.0
y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss=keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=["acc"],
)
# We restrict the data to the first 1000 samples so as to limit execution time
# on Colab. Try to train on the entire dataset until convergence!
model.fit(
x_train[:1000],
y_train[:1000],
batch_size=64,
epochs=1,
validation_split=0.2,
)
"""
## Shared layers
Another good use for the functional API are models that use *shared layers*.
Shared layers are layer instances that are reused multiple times in the same model --
they learn features that correspond to multiple paths in the graph-of-layers.
Shared layers are often used to encode inputs from similar spaces
(say, two different pieces of text that feature similar vocabulary).
They enable sharing of information across these different inputs,
and they make it possible to train such a model on less data.
If a given word is seen in one of the inputs,
that will benefit the processing of all inputs that pass through the shared layer.
To share a layer in the functional API, call the same layer instance multiple times.
For instance, here's an `Embedding` layer shared across two different text inputs:
"""
# Embedding for 1000 unique words mapped to 128-dimensional vectors
shared_embedding = layers.Embedding(1000, 128)
# Variable-length sequence of integers
text_input_a = keras.Input(shape=(None,), dtype="int32")
# Variable-length sequence of integers
text_input_b = keras.Input(shape=(None,), dtype="int32")
# Reuse the same layer to encode both inputs
encoded_input_a = shared_embedding(text_input_a)
encoded_input_b = shared_embedding(text_input_b)
"""
## Extract and reuse nodes in the graph of layers
Because the graph of layers you are manipulating is a static data structure,
it can be accessed and inspected. And this is how you are able to plot
functional models as images.
This also means that you can access the activations of intermediate layers
("nodes" in the graph) and reuse them elsewhere --
which is very useful for something like feature extraction.
Let's look at an example. This is a VGG19 model with weights pretrained on ImageNet:
"""
vgg19 = keras.applications.VGG19()
"""
And these are the intermediate activations of the model,
obtained by querying the graph data structure:
"""
features_list = [layer.output for layer in vgg19.layers]
"""
Use these features to create a new feature-extraction model that returns
the values of the intermediate layer activations:
"""
feat_extraction_model = keras.Model(inputs=vgg19.input, outputs=features_list)
img = np.random.random((1, 224, 224, 3)).astype("float32")
extracted_features = feat_extraction_model(img)
"""
This comes in handy for tasks like
[neural style transfer](https://keras.io/examples/generative/neural_style_transfer/),
among other things.
"""
"""
## Extend the API using custom layers
`keras` includes a wide range of built-in layers, for example:
- Convolutional layers: `Conv1D`, `Conv2D`, `Conv3D`, `Conv2DTranspose`
- Pooling layers: `MaxPooling1D`, `MaxPooling2D`, `MaxPooling3D`, `AveragePooling1D`
- RNN layers: `GRU`, `LSTM`, `ConvLSTM2D`
- `BatchNormalization`, `Dropout`, `Embedding`, etc.
But if you don't find what you need, it's easy to extend the API by creating
your own layers. All layers subclass the `Layer` class and implement:
- `call` method, that specifies the computation done by the layer.
- `build` method, that creates the weights of the layer (this is just a style
convention since you can create weights in `__init__`, as well).
To learn more about creating layers from scratch, read
[custom layers and models](/guides/making_new_layers_and_models_via_subclassing) guide.
The following is a basic implementation of `keras.layers.Dense`:
"""
class CustomDense(layers.Layer):
def __init__(self, units=32):
super().__init__()
self.units = units
def build(self, input_shape):
self.w = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="random_normal",
trainable=True,
)
self.b = self.add_weight(
shape=(self.units,), initializer="random_normal", trainable=True
)
def call(self, inputs):
return ops.matmul(inputs, self.w) + self.b
inputs = keras.Input((4,))
outputs = CustomDense(10)(inputs)
model = keras.Model(inputs, outputs)
"""
For serialization support in your custom layer, define a `get_config()`
method that returns the constructor arguments of the layer instance:
"""
class CustomDense(layers.Layer):
def __init__(self, units=32):
super().__init__()
self.units = units
def build(self, input_shape):
self.w = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="random_normal",
trainable=True,
)
self.b = self.add_weight(
shape=(self.units,), initializer="random_normal", trainable=True
)
def call(self, inputs):
return ops.matmul(inputs, self.w) + self.b
def get_config(self):
return {"units": self.units}
inputs = keras.Input((4,))
outputs = CustomDense(10)(inputs)
model = keras.Model(inputs, outputs)
config = model.get_config()
new_model = keras.Model.from_config(
config, custom_objects={"CustomDense": CustomDense}
)
"""
Optionally, implement the class method `from_config(cls, config)` which is used
when recreating a layer instance given its config dictionary.
The default implementation of `from_config` is:
```python
def from_config(cls, config):
return cls(**config)
```
"""
"""
## When to use the functional API
Should you use the Keras functional API to create a new model,
or just subclass the `Model` class directly? In general, the functional API
is higher-level, easier and safer, and has a number of
features that subclassed models do not support.
However, model subclassing provides greater flexibility when building models
that are not easily expressible as directed acyclic graphs of layers.
For example, you could not implement a Tree-RNN with the functional API
and would have to subclass `Model` directly.
For an in-depth look at the differences between the functional API and
model subclassing, read
[What are Symbolic and Imperative APIs in TensorFlow 2.0?](https://blog.tensorflow.org/2019/01/what-are-symbolic-and-imperative-apis.html).
### Functional API strengths:
The following properties are also true for Sequential models
(which are also data structures), but are not true for subclassed models
(which are Python bytecode, not data structures).
#### Less verbose
There is no `super().__init__(...)`, no `def call(self, ...):`, etc.
Compare:
```python
inputs = keras.Input(shape=(32,))
x = layers.Dense(64, activation='relu')(inputs)
outputs = layers.Dense(10)(x)
mlp = keras.Model(inputs, outputs)
```
With the subclassed version:
```python
class MLP(keras.Model):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.dense_1 = layers.Dense(64, activation='relu')
self.dense_2 = layers.Dense(10)
def call(self, inputs):
x = self.dense_1(inputs)
return self.dense_2(x)
# Instantiate the model.
mlp = MLP()
# Necessary to create the model's state.
# The model doesn't have a state until it's called at least once.
_ = mlp(ops.zeros((1, 32)))
```
#### Model validation while defining its connectivity graph
In the functional API, the input specification (shape and dtype) is created
in advance (using `Input`). Every time you call a layer,
the layer checks that the specification passed to it matches its assumptions,
and it will raise a helpful error message if not.
This guarantees that any model you can build with the functional API will run.
All debugging -- other than convergence-related debugging --
happens statically during the model construction and not at execution time.
This is similar to type checking in a compiler.
#### A functional model is plottable and inspectable
You can plot the model as a graph, and you can easily access intermediate nodes
in this graph. For example, to extract and reuse the activations of intermediate
layers (as seen in a previous example):
```python
features_list = [layer.output for layer in vgg19.layers]
feat_extraction_model = keras.Model(inputs=vgg19.input, outputs=features_list)
```
#### A functional model can be serialized or cloned
Because a functional model is a data structure rather than a piece of code,
it is safely serializable and can be saved as a single file
that allows you to recreate the exact same model
without having access to any of the original code.
See the [serialization & saving guide](/guides/serialization_and_saving/).
To serialize a subclassed model, it is necessary for the implementer
to specify a `get_config()`
and `from_config()` method at the model level.
### Functional API weakness:
#### It does not support dynamic architectures
The functional API treats models as DAGs of layers.
This is true for most deep learning architectures, but not all -- for example,
recursive networks or Tree RNNs do not follow this assumption and cannot
be implemented in the functional API.
"""
"""
## Mix-and-match API styles
Choosing between the functional API or Model subclassing isn't a
binary decision that restricts you into one category of models.
All models in the `keras` API can interact with each other, whether they're
`Sequential` models, functional models, or subclassed models that are written
from scratch.
You can always use a functional model or `Sequential` model
as part of a subclassed model or layer:
"""
units = 32
timesteps = 10
input_dim = 5
# Define a Functional model
inputs = keras.Input((None, units))
x = layers.GlobalAveragePooling1D()(inputs)
outputs = layers.Dense(1)(x)
model = keras.Model(inputs, outputs)
class CustomRNN(layers.Layer):
def __init__(self):
super().__init__()
self.units = units
self.projection_1 = layers.Dense(units=units, activation="tanh")
self.projection_2 = layers.Dense(units=units, activation="tanh")
# Our previously-defined Functional model
self.classifier = model
def call(self, inputs):
outputs = []
state = ops.zeros(shape=(inputs.shape[0], self.units))
for t in range(inputs.shape[1]):
x = inputs[:, t, :]
h = self.projection_1(x)
y = h + self.projection_2(state)
state = y
outputs.append(y)
features = ops.stack(outputs, axis=1)
print(features.shape)
return self.classifier(features)
rnn_model = CustomRNN()
_ = rnn_model(ops.zeros((1, timesteps, input_dim)))
"""
You can use any subclassed layer or model in the functional API
as long as it implements a `call` method that follows one of the following patterns:
- `call(self, inputs, **kwargs)` --
Where `inputs` is a tensor or a nested structure of tensors (e.g. a list of tensors),
and where `**kwargs` are non-tensor arguments (non-inputs).
- `call(self, inputs, training=None, **kwargs)` --
Where `training` is a boolean indicating whether the layer should behave
in training mode and inference mode.
- `call(self, inputs, mask=None, **kwargs)` --
Where `mask` is a boolean mask tensor (useful for RNNs, for instance).
- `call(self, inputs, training=None, mask=None, **kwargs)` --
Of course, you can have both masking and training-specific behavior at the same time.
Additionally, if you implement the `get_config` method on your custom Layer or model,
the functional models you create will still be serializable and cloneable.
Here's a quick example of a custom RNN, written from scratch,
being used in a functional model:
"""
units = 32
timesteps = 10
input_dim = 5
batch_size = 16
class CustomRNN(layers.Layer):
def __init__(self):
super().__init__()
self.units = units
self.projection_1 = layers.Dense(units=units, activation="tanh")
self.projection_2 = layers.Dense(units=units, activation="tanh")
self.classifier = layers.Dense(1)
def call(self, inputs):
outputs = []
state = ops.zeros(shape=(inputs.shape[0], self.units))
for t in range(inputs.shape[1]):
x = inputs[:, t, :]
h = self.projection_1(x)
y = h + self.projection_2(state)
state = y
outputs.append(y)
features = ops.stack(outputs, axis=1)
return self.classifier(features)
# Note that you specify a static batch size for the inputs with the `batch_shape`
# arg, because the inner computation of `CustomRNN` requires a static batch size
# (when you create the `state` zeros tensor).
inputs = keras.Input(batch_shape=(batch_size, timesteps, input_dim))
x = layers.Conv1D(32, 3)(inputs)
outputs = CustomRNN()(x)
model = keras.Model(inputs, outputs)
rnn_model = CustomRNN()
_ = rnn_model(ops.zeros((1, 10, 5)))
| keras-core/guides/functional_api.py/0 | {
"file_path": "keras-core/guides/functional_api.py",
"repo_id": "keras-core",
"token_count": 9292
} | 24 |
import types
from keras_core.activations.activations import elu
from keras_core.activations.activations import exponential
from keras_core.activations.activations import gelu
from keras_core.activations.activations import hard_sigmoid
from keras_core.activations.activations import leaky_relu
from keras_core.activations.activations import linear
from keras_core.activations.activations import log_softmax
from keras_core.activations.activations import mish
from keras_core.activations.activations import relu
from keras_core.activations.activations import relu6
from keras_core.activations.activations import selu
from keras_core.activations.activations import sigmoid
from keras_core.activations.activations import silu
from keras_core.activations.activations import softmax
from keras_core.activations.activations import softplus
from keras_core.activations.activations import softsign
from keras_core.activations.activations import tanh
from keras_core.api_export import keras_core_export
from keras_core.saving import object_registration
from keras_core.saving import serialization_lib
ALL_OBJECTS = {
relu,
leaky_relu,
relu6,
softmax,
elu,
selu,
softplus,
softsign,
silu,
gelu,
tanh,
sigmoid,
exponential,
hard_sigmoid,
linear,
mish,
log_softmax,
}
ALL_OBJECTS_DICT = {fn.__name__: fn for fn in ALL_OBJECTS}
# Additional aliases
ALL_OBJECTS_DICT["swish"] = silu
@keras_core_export("keras_core.activations.serialize")
def serialize(activation):
fn_config = serialization_lib.serialize_keras_object(activation)
if "config" not in fn_config:
raise ValueError(
f"Unknown activation function '{activation}' cannot be "
"serialized due to invalid function name. Make sure to use "
"an activation name that matches the references defined in "
"activations.py or use "
"`@keras_core.saving.register_keras_serializable()`"
"to register any custom activations. "
f"config={fn_config}"
)
if not isinstance(activation, types.FunctionType):
# Case for additional custom activations represented by objects
return fn_config
if (
isinstance(fn_config["config"], str)
and fn_config["config"] not in globals()
):
# Case for custom activation functions from external activations modules
fn_config["config"] = object_registration.get_registered_name(
activation
)
return fn_config
# Case for keras.activations builtins (simply return name)
return fn_config["config"]
@keras_core_export("keras_core.activations.deserialize")
def deserialize(config, custom_objects=None):
"""Return a Keras activation function via its config."""
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_core_export("keras_core.activations.get")
def get(identifier):
"""Retrieve a Keras activation function via an identifier."""
if identifier is None:
return linear
if isinstance(identifier, (str, dict)):
obj = deserialize(identifier)
else:
obj = identifier
if callable(obj):
return obj
raise ValueError(
f"Could not interpret activation function identifier: {identifier}"
)
| keras-core/keras_core/activations/__init__.py/0 | {
"file_path": "keras-core/keras_core/activations/__init__.py",
"repo_id": "keras-core",
"token_count": 1249
} | 25 |
import jax
from keras_core.backend.config import floatx
from keras_core.random.seed_generator import SeedGenerator
from keras_core.random.seed_generator import draw_seed
from keras_core.random.seed_generator import make_default_seed
def jax_draw_seed(seed):
if isinstance(seed, jax.Array):
return seed
else:
return draw_seed(seed)
def normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
dtype = dtype or floatx()
seed = jax_draw_seed(seed)
sample = jax.random.normal(seed, shape=shape, dtype=dtype)
return sample * stddev + mean
def uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None):
dtype = dtype or floatx()
seed = jax_draw_seed(seed)
return jax.random.uniform(
seed, shape=shape, dtype=dtype, minval=minval, maxval=maxval
)
def categorical(logits, num_samples, dtype="int32", seed=None):
seed = jax_draw_seed(seed)
output_shape = list(logits.shape)
output_shape[1] = num_samples
output_shape = tuple(output_shape)
output = jax.random.categorical(
seed, logits[..., None], shape=output_shape, axis=1
)
return output.astype(dtype)
def randint(shape, minval, maxval, dtype="int32", seed=None):
seed = jax_draw_seed(seed)
return jax.random.randint(
seed, shape=shape, dtype=dtype, minval=minval, maxval=maxval
)
def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
dtype = dtype or floatx()
seed = jax_draw_seed(seed)
sample = jax.random.truncated_normal(
seed, shape=shape, lower=-2.0, upper=2.0, dtype=dtype
)
return sample * stddev + mean
def _get_concrete_noise_shape(inputs, noise_shape):
if noise_shape is None:
return inputs.shape
concrete_inputs_shape = inputs.shape
concrete_noise_shape = []
for i, value in enumerate(noise_shape):
concrete_noise_shape.append(
concrete_inputs_shape[i] if value is None else value
)
return concrete_noise_shape
def dropout(inputs, rate, noise_shape=None, seed=None):
seed = jax_draw_seed(seed)
keep_prob = 1.0 - rate
# The `noise_shape` may contain `None` so we need to convert it
# into a concrete shape before passing it on to jax.
noise_shape = _get_concrete_noise_shape(inputs, noise_shape)
mask = jax.random.bernoulli(seed, p=keep_prob, shape=noise_shape)
mask = jax.numpy.broadcast_to(mask, inputs.shape)
return jax.lax.select(
mask, inputs / keep_prob, jax.numpy.zeros_like(inputs)
)
def shuffle(x, axis=0, seed=None):
seed = jax_draw_seed(seed)
return jax.random.shuffle(seed, x, axis)
| keras-core/keras_core/backend/jax/random.py/0 | {
"file_path": "keras-core/keras_core/backend/jax/random.py",
"repo_id": "keras-core",
"token_count": 1107
} | 26 |
import torch
from keras_core import optimizers
from keras_core.backend.torch.optimizers import torch_parallel_optimizer
class SGD(torch_parallel_optimizer.TorchParallelOptimizer, optimizers.SGD):
def _parallel_update_step(
self,
grads,
variables,
learning_rate,
):
keras_variables = variables
variables = [v.value for v in variables]
if self.momentum != 0:
bufs = [
self.momentums[self._get_variable_index(variable)].value
for variable in keras_variables
]
for i in range(len(bufs)):
if bufs[i] is None:
bufs[i] = torch.clone(grads[i]).detach()
torch._foreach_mul_(bufs, self.momentum)
torch._foreach_add_(bufs, grads, alpha=-learning_rate)
if self.nesterov:
torch._foreach_add_(variables, grads, alpha=-learning_rate)
torch._foreach_add_(variables, bufs, alpha=self.momentum)
else:
torch._foreach_add_(variables, bufs)
else:
torch._foreach_add_(variables, grads, alpha=-learning_rate)
| keras-core/keras_core/backend/torch/optimizers/torch_sgd.py/0 | {
"file_path": "keras-core/keras_core/backend/torch/optimizers/torch_sgd.py",
"repo_id": "keras-core",
"token_count": 584
} | 27 |
import numpy as np
from keras_core import backend
from keras_core import constraints
from keras_core import testing
def get_example_array():
np.random.seed(3537)
example_array = np.random.random((100, 100)) * 100.0 - 50.0
example_array[0, 0] = 0.0 # Possible edge case
return example_array
class ConstraintsTest(testing.TestCase):
def test_max_norm(self):
constraint_fn = constraints.MaxNorm(2.0)
x = np.array([[0, 0, 0], [1.0, 0, 0], [3, 0, 0], [3, 3, 3]]).T
target = np.array(
[
[0, 0, 0],
[1.0, 0, 0],
[2.0, 0, 0],
[2.0 / np.sqrt(3), 2.0 / np.sqrt(3), 2.0 / np.sqrt(3)],
]
).T
output = constraint_fn(x)
self.assertAllClose(target, output)
def test_non_neg(self):
constraint_fn = constraints.NonNeg()
output = constraint_fn(get_example_array())
output = backend.convert_to_numpy(output)
self.assertTrue((np.min(output, axis=1) >= 0.0).all())
def test_unit_norm(self):
constraint_fn = constraints.UnitNorm()
output = constraint_fn(get_example_array())
output = backend.convert_to_numpy(output)
l2 = np.sqrt(np.sum(np.square(output), axis=0))
self.assertAllClose(l2, 1.0)
def test_min_max_norm(self):
constraint_fn = constraints.MinMaxNorm(min_value=0.2, max_value=0.5)
output = constraint_fn(get_example_array())
output = backend.convert_to_numpy(output)
l2 = np.sqrt(np.sum(np.square(output), axis=0))
self.assertFalse(l2[l2 < 0.2])
self.assertFalse(l2[l2 > 0.5 + 1e-6])
def test_get_method(self):
obj = constraints.get("unit_norm")
self.assertTrue(obj, constraints.UnitNorm)
obj = constraints.get(None)
self.assertEqual(obj, None)
with self.assertRaises(ValueError):
constraints.get("typo")
| keras-core/keras_core/constraints/constraints_test.py/0 | {
"file_path": "keras-core/keras_core/constraints/constraints_test.py",
"repo_id": "keras-core",
"token_count": 928
} | 28 |
import numpy as np
import pytest
from keras_core import testing
from keras_core.layers.activations import prelu
class PReLUTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_prelu(self):
self.run_layer_test(
prelu.PReLU,
init_kwargs={
"alpha_initializer": "zeros",
"alpha_regularizer": "L1",
"alpha_constraint": "MaxNorm",
"shared_axes": 1,
},
input_shape=(2, 3, 4),
supports_masking=True,
)
def test_prelu_correctness(self):
def np_prelu(x, alpha):
return (x > 0) * x + (x <= 0) * alpha * x
inputs = np.random.randn(2, 10, 5, 3)
prelu_layer = prelu.PReLU(
alpha_initializer="glorot_uniform",
alpha_regularizer="l1",
alpha_constraint="non_neg",
shared_axes=(1, 2),
)
prelu_layer.build(inputs.shape)
weights = np.random.random((1, 1, 3))
prelu_layer.alpha.assign(weights)
ref_out = np_prelu(inputs, weights)
self.assertAllClose(prelu_layer(inputs), ref_out)
| keras-core/keras_core/layers/activations/prelu_test.py/0 | {
"file_path": "keras-core/keras_core/layers/activations/prelu_test.py",
"repo_id": "keras-core",
"token_count": 609
} | 29 |
from keras_core import activations
from keras_core import constraints
from keras_core import initializers
from keras_core import ops
from keras_core import regularizers
from keras_core.api_export import keras_core_export
from keras_core.layers.input_spec import InputSpec
from keras_core.layers.layer import Layer
@keras_core_export("keras_core.layers.Dense")
class Dense(Layer):
"""Just your regular densely-connected NN layer.
`Dense` implements the operation:
`output = activation(dot(input, kernel) + bias)`
where `activation` is the element-wise activation function
passed as the `activation` argument, `kernel` is a weights matrix
created by the layer, and `bias` is a bias vector created by the layer
(only applicable if `use_bias` is `True`).
Note: If the input to the layer has a rank greater than 2, `Dense`
computes the dot product between the `inputs` and the `kernel` along the
last axis of the `inputs` and axis 0 of the `kernel` (using `tf.tensordot`).
For example, if input has dimensions `(batch_size, d0, d1)`, then we create
a `kernel` with shape `(d1, units)`, and the `kernel` operates along axis 2
of the `input`, on every sub-tensor of shape `(1, 1, d1)` (there are
`batch_size * d0` such sub-tensors). The output in this case will have
shape `(batch_size, d0, units)`.
Args:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
N-D tensor with shape: `(batch_size, ..., input_dim)`.
The most common situation would be
a 2D input with shape `(batch_size, input_dim)`.
Output shape:
N-D tensor with shape: `(batch_size, ..., units)`.
For instance, for a 2D input with shape `(batch_size, input_dim)`,
the output would have shape `(batch_size, units)`.
"""
def __init__(
self,
units,
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(activity_regularizer=activity_regularizer, **kwargs)
self.units = units
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(min_ndim=2)
self.supports_masking = True
def build(self, input_shape):
input_dim = input_shape[-1]
self.kernel = self.add_weight(
name="kernel",
shape=(input_dim, self.units),
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
)
if self.use_bias:
self.bias = self.add_weight(
name="bias",
shape=(self.units,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
)
self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
self.built = True
def call(self, inputs):
x = ops.matmul(inputs, self.kernel)
if self.use_bias:
x = x + self.bias
if self.activation:
x = self.activation(x)
return x
def compute_output_shape(self, input_shape):
output_shape = list(input_shape)
output_shape[-1] = self.units
return tuple(output_shape)
def get_config(self):
base_config = super().get_config()
config = {
"units": self.units,
"activation": activations.serialize(self.activation),
"use_bias": self.use_bias,
"kernel_initializer": initializers.serialize(
self.kernel_initializer
),
"bias_initializer": initializers.serialize(self.bias_initializer),
"kernel_regularizer": regularizers.serialize(
self.kernel_regularizer
),
"bias_regularizer": regularizers.serialize(self.bias_regularizer),
"kernel_constraint": constraints.serialize(self.kernel_constraint),
"bias_constraint": constraints.serialize(self.bias_constraint),
}
return {**base_config, **config}
| keras-core/keras_core/layers/core/dense.py/0 | {
"file_path": "keras-core/keras_core/layers/core/dense.py",
"repo_id": "keras-core",
"token_count": 2298
} | 30 |
import numpy as np
import pytest
from absl.testing import parameterized
from tensorflow import data as tf_data
from keras_core import layers
from keras_core import testing
class CenterCropTest(testing.TestCase, parameterized.TestCase):
def np_center_crop(self, img, h_new, w_new):
img = np.array(img)
if img.ndim == 4:
_, h, w = img.shape[:3]
else:
h, w = img.shape[:2]
h_start = (h - h_new) // 2
w_start = (w - w_new) // 2
return img[..., h_start : h_start + h_new, w_start : w_start + w_new, :]
@pytest.mark.requires_trainable_backend
def test_center_crop_basics(self):
self.run_layer_test(
layers.CenterCrop,
init_kwargs={
"height": 6,
"width": 6,
"data_format": "channels_last",
},
input_shape=(2, 12, 12, 3),
expected_output_shape=(2, 6, 6, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
self.run_layer_test(
layers.CenterCrop,
init_kwargs={
"height": 7,
"width": 7,
"data_format": "channels_first",
},
input_shape=(2, 3, 13, 13),
expected_output_shape=(2, 3, 7, 7),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
@parameterized.parameters(
[
((5, 7), "channels_first"),
((5, 7), "channels_last"),
((4, 9), "channels_first"),
((9, 4), "channels_last"),
]
)
def test_center_crop_correctness(self, size, data_format):
# batched case
if data_format == "channels_first":
img = np.random.random((2, 3, 9, 11))
else:
img = np.random.random((2, 9, 11, 3))
out = layers.CenterCrop(
size[0],
size[1],
data_format=data_format,
)(img)
if data_format == "channels_first":
img_transpose = np.transpose(img, (0, 2, 3, 1))
ref_out = np.transpose(
self.np_center_crop(img_transpose, size[0], size[1]),
(0, 3, 1, 2),
)
else:
ref_out = self.np_center_crop(img, size[0], size[1])
self.assertAllClose(ref_out, out)
# unbatched case
if data_format == "channels_first":
img = np.random.random((3, 9, 11))
else:
img = np.random.random((9, 11, 3))
out = layers.CenterCrop(
size[0],
size[1],
data_format=data_format,
)(img)
if data_format == "channels_first":
img_transpose = np.transpose(img, (1, 2, 0))
ref_out = np.transpose(
self.np_center_crop(
img_transpose,
size[0],
size[1],
),
(2, 0, 1),
)
else:
ref_out = self.np_center_crop(
img,
size[0],
size[1],
)
self.assertAllClose(ref_out, out)
@parameterized.parameters(
[
((15, 10), "channels_first"),
((10, 17), "channels_last"),
]
)
def test_input_smaller_than_crop_box(self, size, data_format):
"""Output should equal resizing with crop_to_aspect ratio."""
# batched case
if data_format == "channels_first":
img = np.random.random((2, 3, 9, 11))
else:
img = np.random.random((2, 9, 11, 3))
out = layers.CenterCrop(
size[0],
size[1],
data_format=data_format,
)(img)
ref_out = layers.Resizing(
size[0], size[1], data_format=data_format, crop_to_aspect_ratio=True
)(img)
self.assertAllClose(ref_out, out)
# unbatched case
if data_format == "channels_first":
img = np.random.random((3, 9, 11))
else:
img = np.random.random((9, 11, 3))
out = layers.CenterCrop(
size[0],
size[1],
data_format=data_format,
)(img)
ref_out = layers.Resizing(
size[0], size[1], data_format=data_format, crop_to_aspect_ratio=True
)(img)
self.assertAllClose(ref_out, out)
def test_tf_data_compatibility(self):
layer = layers.CenterCrop(8, 9)
input_data = np.random.random((2, 10, 12, 3))
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output = output.numpy()
self.assertEqual(list(output.shape), [2, 8, 9, 3])
def test_list_compatibility(self):
images = [
np.random.rand(10, 10, 3),
np.random.rand(10, 10, 3),
]
output = layers.CenterCrop(height=6, width=5)(images)
ref_output = self.np_center_crop(images, 6, 5)
self.assertListEqual(list(output.shape), [2, 6, 5, 3])
self.assertAllClose(ref_output, output)
| keras-core/keras_core/layers/preprocessing/center_crop_test.py/0 | {
"file_path": "keras-core/keras_core/layers/preprocessing/center_crop_test.py",
"repo_id": "keras-core",
"token_count": 2967
} | 31 |
import numpy as np
import pytest
from absl.testing import parameterized
from tensorflow import data as tf_data
from keras_core import Sequential
from keras_core import backend
from keras_core import layers
from keras_core import testing
class ResizingTest(testing.TestCase, parameterized.TestCase):
def test_resizing_basics(self):
self.run_layer_test(
layers.Resizing,
init_kwargs={
"height": 6,
"width": 6,
"data_format": "channels_last",
"interpolation": "bicubic",
"crop_to_aspect_ratio": True,
},
input_shape=(2, 12, 12, 3),
expected_output_shape=(2, 6, 6, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
run_training_check=False,
)
self.run_layer_test(
layers.Resizing,
init_kwargs={
"height": 6,
"width": 6,
"data_format": "channels_first",
"interpolation": "bilinear",
"crop_to_aspect_ratio": True,
},
input_shape=(2, 3, 12, 12),
expected_output_shape=(2, 3, 6, 6),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
run_training_check=False,
)
self.run_layer_test(
layers.Resizing,
init_kwargs={
"height": 6,
"width": 6,
"data_format": "channels_last",
"interpolation": "nearest",
"crop_to_aspect_ratio": False,
},
input_shape=(2, 12, 12, 3),
expected_output_shape=(2, 6, 6, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
run_training_check=False,
)
@pytest.mark.skipif(
backend.backend() == "torch", reason="Torch does not support lanczos."
)
def test_resizing_basics_lanczos5(self):
self.run_layer_test(
layers.Resizing,
init_kwargs={
"height": 6,
"width": 6,
"data_format": "channels_first",
"interpolation": "lanczos5",
"crop_to_aspect_ratio": False,
},
input_shape=(2, 3, 12, 12),
expected_output_shape=(2, 3, 6, 6),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
run_training_check=False,
)
@parameterized.parameters([("channels_first",), ("channels_last",)])
def test_down_sampling_numeric(self, data_format):
img = np.reshape(np.arange(0, 16), (1, 4, 4, 1)).astype(np.float32)
if data_format == "channels_first":
img = img.transpose(0, 3, 1, 2)
out = layers.Resizing(
height=2, width=2, interpolation="nearest", data_format=data_format
)(img)
ref_out = (
np.asarray([[5, 7], [13, 15]])
.astype(np.float32)
.reshape((1, 2, 2, 1))
)
if data_format == "channels_first":
ref_out = ref_out.transpose(0, 3, 1, 2)
self.assertAllClose(ref_out, out)
@parameterized.parameters([("channels_first",), ("channels_last",)])
def test_up_sampling_numeric(self, data_format):
img = np.reshape(np.arange(0, 4), (1, 2, 2, 1)).astype(np.float32)
if data_format == "channels_first":
img = img.transpose(0, 3, 1, 2)
out = layers.Resizing(
height=4,
width=4,
interpolation="nearest",
data_format=data_format,
)(img)
ref_out = (
np.asarray([[0, 0, 1, 1], [0, 0, 1, 1], [2, 2, 3, 3], [2, 2, 3, 3]])
.astype(np.float32)
.reshape((1, 4, 4, 1))
)
if data_format == "channels_first":
ref_out = ref_out.transpose(0, 3, 1, 2)
self.assertAllClose(ref_out, out)
@parameterized.parameters([("channels_first",), ("channels_last",)])
def test_crop_to_aspect_ratio(self, data_format):
img = np.reshape(np.arange(0, 16), (1, 4, 4, 1)).astype("float32")
if data_format == "channels_first":
img = img.transpose(0, 3, 1, 2)
out = layers.Resizing(
height=4,
width=2,
interpolation="nearest",
data_format=data_format,
crop_to_aspect_ratio=True,
)(img)
ref_out = (
np.asarray(
[
[1, 2],
[5, 6],
[9, 10],
[13, 14],
]
)
.astype("float32")
.reshape((1, 4, 2, 1))
)
if data_format == "channels_first":
ref_out = ref_out.transpose(0, 3, 1, 2)
self.assertAllClose(ref_out, out)
@parameterized.parameters([("channels_first",), ("channels_last",)])
def test_unbatched_image(self, data_format):
img = np.reshape(np.arange(0, 16), (4, 4, 1)).astype("float32")
if data_format == "channels_first":
img = img.transpose(2, 0, 1)
out = layers.Resizing(
2, 2, interpolation="nearest", data_format=data_format
)(img)
ref_out = (
np.asarray(
[
[5, 7],
[13, 15],
]
)
.astype("float32")
.reshape((2, 2, 1))
)
if data_format == "channels_first":
ref_out = ref_out.transpose(2, 0, 1)
self.assertAllClose(ref_out, out)
def test_tf_data_compatibility(self):
layer = layers.Resizing(8, 9)
input_data = np.random.random((2, 10, 12, 3))
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output = output.numpy()
self.assertEqual(list(output.shape), [2, 8, 9, 3])
@pytest.mark.skipif(
backend.backend() != "tensorflow",
reason="Sequential + tf.data only works with TF backend",
)
def test_tf_data_compatibility_sequential(self):
# Test compatibility when wrapping in a Sequential
# https://github.com/keras-team/keras-core/issues/347
layer = layers.Resizing(8, 9)
input_data = np.random.random((2, 10, 12, 3))
ds = (
tf_data.Dataset.from_tensor_slices(input_data)
.batch(2)
.map(Sequential([layer]))
)
for output in ds.take(1):
output = output.numpy()
self.assertEqual(list(output.shape), [2, 8, 9, 3])
| keras-core/keras_core/layers/preprocessing/resizing_test.py/0 | {
"file_path": "keras-core/keras_core/layers/preprocessing/resizing_test.py",
"repo_id": "keras-core",
"token_count": 3932
} | 32 |
from keras_core import ops
from keras_core.api_export import keras_core_export
from keras_core.layers.input_spec import InputSpec
from keras_core.layers.layer import Layer
@keras_core_export("keras_core.layers.UpSampling1D")
class UpSampling1D(Layer):
"""Upsampling layer for 1D inputs.
Repeats each temporal step `size` times along the time axis.
Examples:
>>> input_shape = (2, 2, 3)
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
>>> x
[[[ 0 1 2]
[ 3 4 5]]
[[ 6 7 8]
[ 9 10 11]]]
>>> y = keras_core.layers.UpSampling1D(size=2)(x)
>>> y
[[[ 0. 1. 2.]
[ 0. 1. 2.]
[ 3. 4. 5.]
[ 3. 4. 5.]]
[[ 6. 7. 8.]
[ 6. 7. 8.]
[ 9. 10. 11.]
[ 9. 10. 11.]]]
Args:
size: Integer. Upsampling factor.
Input shape:
3D tensor with shape: `(batch_size, steps, features)`.
Output shape:
3D tensor with shape: `(batch_size, upsampled_steps, features)`.
"""
def __init__(self, size=2, **kwargs):
super().__init__(**kwargs)
self.size = int(size)
self.input_spec = InputSpec(ndim=3)
def compute_output_shape(self, input_shape):
size = (
self.size * input_shape[1] if input_shape[1] is not None else None
)
return [input_shape[0], size, input_shape[2]]
def call(self, inputs):
return ops.repeat(x=inputs, repeats=self.size, axis=1)
def get_config(self):
config = {"size": self.size}
base_config = super().get_config()
return {**base_config, **config}
| keras-core/keras_core/layers/reshaping/up_sampling1d.py/0 | {
"file_path": "keras-core/keras_core/layers/reshaping/up_sampling1d.py",
"repo_id": "keras-core",
"token_count": 747
} | 33 |
from keras_core.models.functional import Functional
from keras_core.models.model import Model
from keras_core.models.sequential import Sequential
| keras-core/keras_core/models/__init__.py/0 | {
"file_path": "keras-core/keras_core/models/__init__.py",
"repo_id": "keras-core",
"token_count": 38
} | 34 |
from keras_core import backend
from keras_core.api_export import keras_core_export
from keras_core.backend import KerasTensor
from keras_core.backend import any_symbolic_tensors
from keras_core.ops.operation import Operation
from keras_core.ops.operation_utils import compute_conv_output_shape
class Resize(Operation):
def __init__(
self,
size,
interpolation="bilinear",
antialias=False,
data_format="channels_last",
):
super().__init__()
self.size = tuple(size)
self.interpolation = interpolation
self.antialias = antialias
self.data_format = data_format
def call(self, image):
return backend.image.resize(
image,
self.size,
interpolation=self.interpolation,
antialias=self.antialias,
data_format=self.data_format,
)
def compute_output_spec(self, image):
if len(image.shape) == 3:
return KerasTensor(
self.size + (image.shape[-1],), dtype=image.dtype
)
elif len(image.shape) == 4:
if self.data_format == "channels_last":
return KerasTensor(
(image.shape[0],) + self.size + (image.shape[-1],),
dtype=image.dtype,
)
else:
return KerasTensor(
(image.shape[0], image.shape[1]) + self.size,
dtype=image.dtype,
)
raise ValueError(
"Invalid input rank: expected rank 3 (single image) "
"or rank 4 (batch of images). Received input with shape: "
f"image.shape={image.shape}"
)
@keras_core_export("keras_core.ops.image.resize")
def resize(
image,
size,
interpolation="bilinear",
antialias=False,
data_format="channels_last",
):
"""Resize images to size using the specified interpolation method.
Args:
image: Input image or batch of images. Must be 3D or 4D.
size: Size of output image in `(height, width)` format.
interpolation: Interpolation method. Available methods are `"nearest"`,
`"bilinear"`, and `"bicubic"`. Defaults to `"bilinear"`.
antialias: Whether to use an antialiasing filter when downsampling an
image. Defaults to `False`.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, height, width, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch, channels, height, weight)`. It defaults to the
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be
`"channels_last"`.
Returns:
Resized image or batch of images.
Examples:
>>> x = np.random.random((2, 4, 4, 3)) # batch of 2 RGB images
>>> y = keras_core.ops.image.resize(x, (2, 2))
>>> y.shape
(2, 2, 2, 3)
>>> x = np.random.random((4, 4, 3)) # single RGB image
>>> y = keras_core.ops.image.resize(x, (2, 2))
>>> y.shape
(2, 2, 3)
>>> x = np.random.random((2, 3, 4, 4)) # batch of 2 RGB images
>>> y = keras_core.ops.image.resize(x, (2, 2),
... data_format="channels_first")
>>> y.shape
(2, 3, 2, 2)
"""
if any_symbolic_tensors((image,)):
return Resize(
size,
interpolation=interpolation,
antialias=antialias,
data_format=data_format,
).symbolic_call(image)
return backend.image.resize(
image,
size,
interpolation=interpolation,
antialias=antialias,
data_format=data_format,
)
class AffineTransform(Operation):
def __init__(
self,
interpolation="bilinear",
fill_mode="constant",
fill_value=0,
data_format="channels_last",
):
super().__init__()
self.interpolation = interpolation
self.fill_mode = fill_mode
self.fill_value = fill_value
self.data_format = data_format
def call(self, image, transform):
return backend.image.affine_transform(
image,
transform,
interpolation=self.interpolation,
fill_mode=self.fill_mode,
fill_value=self.fill_value,
data_format=self.data_format,
)
def compute_output_spec(self, image, transform):
if len(image.shape) not in (3, 4):
raise ValueError(
"Invalid image rank: expected rank 3 (single image) "
"or rank 4 (batch of images). Received input with shape: "
f"image.shape={image.shape}"
)
if len(transform.shape) not in (1, 2):
raise ValueError(
"Invalid transform rank: expected rank 1 (single transform) "
"or rank 2 (batch of transforms). Received input with shape: "
f"transform.shape={transform.shape}"
)
return KerasTensor(image.shape, dtype=image.dtype)
@keras_core_export("keras_core.ops.image.affine_transform")
def affine_transform(
image,
transform,
interpolation="bilinear",
fill_mode="constant",
fill_value=0,
data_format="channels_last",
):
"""Applies the given transform(s) to the image(s).
Args:
image: Input image or batch of images. Must be 3D or 4D.
transform: Projective transform matrix/matrices. A vector of length 8 or
tensor of size N x 8. If one row of transform is
`[a0, a1, a2, b0, b1, b2, c0, c1]`, then it maps the output point
`(x, y)` to a transformed input point
`(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`,
where `k = c0 x + c1 y + 1`. The transform is inverted compared to
the transform mapping input points to output points. Note that
gradients are not backpropagated into transformation parameters.
Note that `c0` and `c1` are only effective when using TensorFlow
backend and will be considered as `0` when using other backends.
interpolation: Interpolation method. Available methods are `"nearest"`,
and `"bilinear"`. Defaults to `"bilinear"`.
fill_mode: Points outside the boundaries of the input are filled
according to the given mode. Available methods are `"constant"`,
`"nearest"`, `"wrap"` and `"reflect"`. Defaults to `"constant"`.
- `"reflect"`: `(d c b a | a b c d | d c b a)`
The input is extended by reflecting about the edge of the last
pixel.
- `"constant"`: `(k k k k | a b c d | k k k k)`
The input is extended by filling all values beyond
the edge with the same constant value k specified by
`fill_value`.
- `"wrap"`: `(a b c d | a b c d | a b c d)`
The input is extended by wrapping around to the opposite edge.
- `"nearest"`: `(a a a a | a b c d | d d d d)`
The input is extended by the nearest pixel.
fill_value: Value used for points outside the boundaries of the input if
`fill_mode="constant"`. Defaults to `0`.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, height, width, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch, channels, height, weight)`. It defaults to the
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be
`"channels_last"`.
Returns:
Applied affine transform image or batch of images.
Examples:
>>> x = np.random.random((2, 64, 80, 3)) # batch of 2 RGB images
>>> transform = np.array(
... [
... [1.5, 0, -20, 0, 1.5, -16, 0, 0], # zoom
... [1, 0, -20, 0, 1, -16, 0, 0], # translation
... ]
... )
>>> y = keras_core.ops.image.affine_transform(x, transform)
>>> y.shape
(2, 64, 80, 3)
>>> x = np.random.random((64, 80, 3)) # single RGB image
>>> transform = np.array([1.0, 0.5, -20, 0.5, 1.0, -16, 0, 0]) # shear
>>> y = keras_core.ops.image.affine_transform(x, transform)
>>> y.shape
(64, 80, 3)
>>> x = np.random.random((2, 3, 64, 80)) # batch of 2 RGB images
>>> transform = np.array(
... [
... [1.5, 0, -20, 0, 1.5, -16, 0, 0], # zoom
... [1, 0, -20, 0, 1, -16, 0, 0], # translation
... ]
... )
>>> y = keras_core.ops.image.affine_transform(x, transform,
... data_format="channels_first")
>>> y.shape
(2, 3, 64, 80)
"""
if any_symbolic_tensors((image, transform)):
return AffineTransform(
interpolation=interpolation,
fill_mode=fill_mode,
fill_value=fill_value,
data_format=data_format,
).symbolic_call(image, transform)
return backend.image.affine_transform(
image,
transform,
interpolation=interpolation,
fill_mode=fill_mode,
fill_value=fill_value,
data_format=data_format,
)
class ExtractPatches(Operation):
def __init__(
self,
size,
strides=None,
dilation_rate=1,
padding="valid",
data_format="channels_last",
):
super().__init__()
if isinstance(size, int):
size = (size, size)
self.size = size
self.strides = strides
self.dilation_rate = dilation_rate
self.padding = padding
self.data_format = data_format
def call(self, image):
return _extract_patches(
image=image,
size=self.size,
strides=self.strides,
dilation_rate=self.dilation_rate,
padding=self.padding,
data_format=self.data_format,
)
def compute_output_spec(self, image):
image_shape = image.shape
if not self.strides:
strides = (self.size[0], self.size[1])
if self.data_format == "channels_last":
channels_in = image.shape[-1]
else:
channels_in = image.shape[-3]
if len(image.shape) == 3:
image_shape = (1,) + image_shape
filters = self.size[0] * self.size[1] * channels_in
kernel_size = (self.size[0], self.size[1])
out_shape = compute_conv_output_shape(
image_shape,
filters,
kernel_size,
strides=strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate,
)
if len(image.shape) == 3:
out_shape = out_shape[1:]
return KerasTensor(shape=out_shape, dtype=image.dtype)
@keras_core_export("keras_core.ops.image.extract_patches")
def extract_patches(
image,
size,
strides=None,
dilation_rate=1,
padding="valid",
data_format="channels_last",
):
"""Extracts patches from the image(s).
Args:
image: Input image or batch of images. Must be 3D or 4D.
size: Patch size int or tuple (patch_height, patch_widht)
strides: strides along height and width. If not specified, or
if `None`, it defaults to the same value as `size`.
dilation_rate: This is the input stride, specifying how far two
consecutive patch samples are in the input. For value other than 1,
strides must be 1. NOTE: `strides > 1` is not supported in
conjunction with `dilation_rate > 1`
padding: The type of padding algorithm to use: `"same"` or `"valid"`.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, height, width, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch, channels, height, weight)`. It defaults to the
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be
`"channels_last"`.
Returns:
Extracted patches 3D (if not batched) or 4D (if batched)
Examples:
>>> image = np.random.random(
... (2, 20, 20, 3)
... ).astype("float32") # batch of 2 RGB images
>>> patches = keras_core.ops.image.extract_patches(image, (5, 5))
>>> patches.shape
(2, 4, 4, 75)
>>> image = np.random.random((20, 20, 3)).astype("float32") # 1 RGB image
>>> patches = keras_core.ops.image.extract_patches(image, (3, 3), (1, 1))
>>> patches.shape
(18, 18, 27)
"""
if any_symbolic_tensors((image,)):
return ExtractPatches(
size=size,
strides=strides,
dilation_rate=dilation_rate,
padding=padding,
data_format=data_format,
).symbolic_call(image)
return _extract_patches(
image, size, strides, dilation_rate, padding, data_format=data_format
)
def _extract_patches(
image,
size,
strides=None,
dilation_rate=1,
padding="valid",
data_format="channels_last",
):
if isinstance(size, int):
patch_h = patch_w = size
elif len(size) == 2:
patch_h, patch_w = size[0], size[1]
else:
raise TypeError(
"Invalid `size` argument. Expected an "
f"int or a tuple of length 2. Received: size={size}"
)
if data_format == "channels_last":
channels_in = image.shape[-1]
elif data_format == "channels_first":
channels_in = image.shape[-3]
if not strides:
strides = size
out_dim = patch_h * patch_w * channels_in
kernel = backend.numpy.eye(out_dim)
kernel = backend.numpy.reshape(
kernel, (patch_h, patch_w, channels_in, out_dim)
)
_unbatched = False
if len(image.shape) == 3:
_unbatched = True
image = backend.numpy.expand_dims(image, axis=0)
patches = backend.nn.conv(
inputs=image,
kernel=kernel,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
)
if _unbatched:
patches = backend.numpy.squeeze(patches, axis=0)
return patches
class MapCoordinates(Operation):
def __init__(self, order, fill_mode="constant", fill_value=0):
super().__init__()
self.order = order
self.fill_mode = fill_mode
self.fill_value = fill_value
def call(self, image, coordinates):
return backend.image.map_coordinates(
image,
coordinates,
order=self.order,
fill_mode=self.fill_mode,
fill_value=self.fill_value,
)
def compute_output_spec(self, image, coordinates):
if coordinates.shape[0] != len(image.shape):
raise ValueError(
"First dim of `coordinates` must be the same as the rank of "
"`image`. "
f"Received image with shape: {image.shape} and coordinate "
f"leading dim of {coordinates.shape[0]}"
)
if len(coordinates.shape) < 2:
raise ValueError(
"Invalid coordinates rank: expected at least rank 2."
f" Received input with shape: {coordinates.shape}"
)
return KerasTensor(coordinates.shape[1:], dtype=image.dtype)
@keras_core_export("keras_core.ops.image.map_coordinates")
def map_coordinates(
input, coordinates, order, fill_mode="constant", fill_value=0
):
"""Map the input array to new coordinates by interpolation..
Note that interpolation near boundaries differs from the scipy function,
because we fixed an outstanding bug
[scipy/issues/2640](https://github.com/scipy/scipy/issues/2640).
Args:
input: The input array.
coordinates: The coordinates at which input is evaluated.
order: The order of the spline interpolation. The order must be `0` or
`1`. `0` indicates the nearest neighbor and `1` indicates the linear
interpolation.
fill_mode: Points outside the boundaries of the input are filled
according to the given mode. Available methods are `"constant"`,
`"nearest"`, `"wrap"` and `"mirror"` and `"reflect"`. Defaults to
`"constant"`.
- `"constant"`: `(k k k k | a b c d | k k k k)`
The input is extended by filling all values beyond
the edge with the same constant value k specified by
`fill_value`.
- `"nearest"`: `(a a a a | a b c d | d d d d)`
The input is extended by the nearest pixel.
- `"wrap"`: `(a b c d | a b c d | a b c d)`
The input is extended by wrapping around to the opposite edge.
- `"mirror"`: `(c d c b | a b c d | c b a b)`
The input is extended by mirroring about the edge.
- `"reflect"`: `(d c b a | a b c d | d c b a)`
The input is extended by reflecting about the edge of the last
pixel.
fill_value: Value used for points outside the boundaries of the input if
`fill_mode="constant"`. Defaults to `0`.
Returns:
Output image or batch of images.
"""
if any_symbolic_tensors((input, coordinates)):
return MapCoordinates(
order,
fill_mode,
fill_value,
).symbolic_call(input, coordinates)
return backend.image.map_coordinates(
input,
coordinates,
order,
fill_mode,
fill_value,
)
| keras-core/keras_core/ops/image.py/0 | {
"file_path": "keras-core/keras_core/ops/image.py",
"repo_id": "keras-core",
"token_count": 8317
} | 35 |
from keras_core.api_export import keras_core_export
from keras_core.optimizers.adadelta import Adadelta
from keras_core.optimizers.adafactor import Adafactor
from keras_core.optimizers.adagrad import Adagrad
from keras_core.optimizers.adam import Adam
from keras_core.optimizers.adamax import Adamax
from keras_core.optimizers.adamw import AdamW
from keras_core.optimizers.ftrl import Ftrl
from keras_core.optimizers.lion import Lion
from keras_core.optimizers.loss_scale_optimizer import LossScaleOptimizer
from keras_core.optimizers.nadam import Nadam
from keras_core.optimizers.optimizer import Optimizer
from keras_core.optimizers.rmsprop import RMSprop
from keras_core.optimizers.sgd import SGD
from keras_core.saving import serialization_lib
ALL_OBJECTS = {
Optimizer,
Adam,
SGD,
RMSprop,
Adadelta,
AdamW,
Adagrad,
Adamax,
Adafactor,
Nadam,
Ftrl,
Lion,
LossScaleOptimizer,
}
ALL_OBJECTS_DICT = {cls.__name__.lower(): cls for cls in ALL_OBJECTS}
@keras_core_export("keras_core.optimizers.serialize")
def serialize(optimizer):
"""Returns the optimizer configuration as a Python dict.
Args:
optimizer: An `Optimizer` instance to serialize.
Returns:
Python dict which contains the configuration of the optimizer.
"""
return serialization_lib.serialize_keras_object(optimizer)
@keras_core_export("keras_core.optimizers.deserialize")
def deserialize(config, custom_objects=None):
"""Returns a Keras optimizer object via its configuration.
Args:
config: Optimizer configuration dictionary.
custom_objects: Optional dictionary mapping names (strings) to custom
objects (classes and functions) to be considered during
deserialization.
Returns:
A Keras Optimizer instance.
"""
# Make deserialization case-insensitive for built-in optimizers.
if config["class_name"].lower() in ALL_OBJECTS_DICT:
config["class_name"] = config["class_name"].lower()
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_core_export("keras_core.optimizers.get")
def get(identifier):
"""Retrieves a Keras Optimizer instance.
Args:
identifier: Optimizer identifier, one of:
- String: name of an optimizer
- Dictionary: configuration dictionary.
- Keras Optimizer instance (it will be returned unchanged).
Returns:
A Keras Optimizer instance.
"""
if identifier is None:
return None
elif isinstance(identifier, dict):
obj = deserialize(identifier)
elif isinstance(identifier, str):
config = {"class_name": identifier, "config": {}}
obj = deserialize(config)
else:
obj = identifier
if isinstance(obj, Optimizer):
return obj
raise ValueError(f"Could not interpret optimizer identifier: {identifier}")
| keras-core/keras_core/optimizers/__init__.py/0 | {
"file_path": "keras-core/keras_core/optimizers/__init__.py",
"repo_id": "keras-core",
"token_count": 1126
} | 36 |
from keras_core import backend
from keras_core.api_export import keras_core_export
@keras_core_export("keras_core.random.normal")
def normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
"""Draw random samples from a normal (Gaussian) distribution.
Args:
shape: The shape of the random values to generate.
mean: Floats, defaults to 0. Mean of the random values to generate.
stddev: Floats, defaults to 1. Standard deviation of the random values
to generate.
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `keras_core.config.floatx()` is used,
which defaults to `float32` unless you configured it otherwise (via
`keras_core.config.set_floatx(float_dtype)`).
seed: A Python integer or instance of
`keras_core.random.SeedGenerator`.
Used to make the behavior of the initializer
deterministic. Note that an initializer seeded with an integer
or None (unseeded) will produce the same random values
across multiple calls. To get different random values
across multiple calls, use as seed an instance
of `keras_core.random.SeedGenerator`.
"""
return backend.random.normal(
shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed
)
@keras_core_export("keras_core.random.categorical")
def categorical(logits, num_samples, dtype="int32", seed=None):
"""Draws samples from a categorical distribution.
This function takes as input `logits`, a 2-D input tensor with shape
(batch_size, num_classes). Each row of the input represents a categorical
distribution, with each column index containing the log-probability for a
given class.
The function will output a 2-D tensor with shape (batch_size, num_samples),
where each row contains samples from the corresponding row in `logits`.
Each column index contains an independent samples drawn from the input
distribution.
Args:
logits: 2-D Tensor with shape (batch_size, num_classes). Each row
should define a categorical distibution with the unnormalized
log-probabilities for all classes.
num_samples: Int, the number of independent samples to draw for each
row of the input. This will be the second dimension of the output
tensor's shape.
dtype: Optional dtype of the output tensor.
seed: A Python integer or instance of
`keras_core.random.SeedGenerator`.
Used to make the behavior of the initializer
deterministic. Note that an initializer seeded with an integer
or None (unseeded) will produce the same random values
across multiple calls. To get different random values
across multiple calls, use as seed an instance
of `keras_core.random.SeedGenerator`.
Returns:
A 2-D tensor with (batch_size, num_samples).
"""
logits_shape = list(backend.convert_to_tensor(logits).shape)
if len(logits_shape) != 2:
raise ValueError(
"`logits` should be a 2-D tensor with shape "
f"[batch_size, num_classes]. Received: logits={logits}"
)
return backend.random.categorical(
logits, num_samples, dtype=dtype, seed=seed
)
@keras_core_export("keras_core.random.uniform")
def uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None):
"""Draw samples from a uniform distribution.
The generated values follow a uniform distribution in the range
`[minval, maxval)`. The lower bound `minval` is included in the range,
while the upper bound `maxval` is excluded.
`dtype` must be a floating point type, the default range is `[0, 1)`.
Args:
shape: The shape of the random values to generate.
minval: Floats, defaults to 0. Lower bound of the range of
random values to generate (inclusive).
maxval: Floats, defaults to 1. Upper bound of the range of
random values to generate (exclusive).
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `keras_core.config.floatx()` is used,
which defaults to `float32` unless you configured it otherwise (via
`keras_core.config.set_floatx(float_dtype)`)
seed: A Python integer or instance of
`keras_core.random.SeedGenerator`.
Used to make the behavior of the initializer
deterministic. Note that an initializer seeded with an integer
or None (unseeded) will produce the same random values
across multiple calls. To get different random values
across multiple calls, use as seed an instance
of `keras_core.random.SeedGenerator`.
"""
if dtype and not backend.is_float_dtype(dtype):
raise ValueError(
"`keras_core.random.uniform` requires a floating point `dtype`. "
f"Received: dtype={dtype} "
)
return backend.random.uniform(
shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed
)
@keras_core_export("keras_core.random.randint")
def randint(shape, minval, maxval, dtype="int32", seed=None):
"""Draw random integers from a uniform distribution.
The generated values follow a uniform distribution in the range
`[minval, maxval)`. The lower bound `minval` is included in the range,
while the upper bound `maxval` is excluded.
`dtype` must be an integer type.
Args:
shape: The shape of the random values to generate.
minval: Floats, defaults to 0. Lower bound of the range of
random values to generate (inclusive).
maxval: Floats, defaults to 1. Upper bound of the range of
random values to generate (exclusive).
dtype: Optional dtype of the tensor. Only integer types are
supported. If not specified, `keras_core.config.floatx()` is used,
which defaults to `float32` unless you configured it otherwise (via
`keras_core.config.set_floatx(float_dtype)`)
seed: A Python integer or instance of
`keras_core.random.SeedGenerator`.
Used to make the behavior of the initializer
deterministic. Note that an initializer seeded with an integer
or None (unseeded) will produce the same random values
across multiple calls. To get different random values
across multiple calls, use as seed an instance
of `keras_core.random.SeedGenerator`.
"""
if dtype and not backend.is_int_dtype(dtype):
raise ValueError(
"`keras_core.random.randint` requires an integer `dtype`. "
f"Received: dtype={dtype} "
)
return backend.random.randint(
shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed
)
@keras_core_export("keras_core.random.truncated_normal")
def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
"""Draw samples from a truncated normal distribution.
The values are drawn from a normal distribution with specified mean and
standard deviation, discarding and re-drawing any samples that are more
than two standard deviations from the mean.
Args:
shape: The shape of the random values to generate.
mean: Floats, defaults to 0. Mean of the random values to generate.
stddev: Floats, defaults to 1. Standard deviation of the random values
to generate.
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `keras.config.floatx()` is used,
which defaults to `float32` unless you configured it otherwise (via
`keras.config.set_floatx(float_dtype)`)
seed: A Python integer or instance of
`keras_core.random.SeedGenerator`.
Used to make the behavior of the initializer
deterministic. Note that an initializer seeded with an integer
or None (unseeded) will produce the same random values
across multiple calls. To get different random values
across multiple calls, use as seed an instance
of `keras_core.random.SeedGenerator`.
"""
return backend.random.truncated_normal(
shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed
)
@keras_core_export("keras_core.random.dropout")
def dropout(inputs, rate, noise_shape=None, seed=None):
return backend.random.dropout(
inputs, rate, noise_shape=noise_shape, seed=seed
)
@keras_core_export("keras_core.random.shuffle")
def shuffle(x, axis=0, seed=None):
"""Shuffle the elements of a tensor uniformly at random along an axis.
Args:
x: The tensor to be shuffled.
axis: An integer specifying the axis along which to shuffle. Defaults to
`0`.
seed: A Python integer or instance of
`keras_core.random.SeedGenerator`.
Used to make the behavior of the initializer
deterministic. Note that an initializer seeded with an integer
or None (unseeded) will produce the same random values
across multiple calls. To get different random values
across multiple calls, use as seed an instance
of `keras_core.random.SeedGenerator`.
"""
return backend.random.shuffle(x, axis=axis, seed=seed)
| keras-core/keras_core/random/random.py/0 | {
"file_path": "keras-core/keras_core/random/random.py",
"repo_id": "keras-core",
"token_count": 3608
} | 37 |
import json
import shutil
import tempfile
import unittest
import numpy as np
import tree
from keras_core import backend
from keras_core import ops
from keras_core import utils
from keras_core.backend.common import is_float_dtype
from keras_core.backend.common import standardize_dtype
from keras_core.backend.common.keras_tensor import KerasTensor
from keras_core.models import Model
from keras_core.utils import traceback_utils
class TestCase(unittest.TestCase):
maxDiff = None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if traceback_utils.is_traceback_filtering_enabled():
traceback_utils.disable_traceback_filtering()
def get_temp_dir(self):
temp_dir = tempfile.mkdtemp()
self.addCleanup(lambda: shutil.rmtree(temp_dir))
return temp_dir
def assertAllClose(self, x1, x2, atol=1e-6, rtol=1e-6, msg=None):
if not isinstance(x1, np.ndarray):
x1 = backend.convert_to_numpy(x1)
if not isinstance(x2, np.ndarray):
x2 = backend.convert_to_numpy(x2)
np.testing.assert_allclose(x1, x2, atol=atol, rtol=rtol)
def assertNotAllClose(self, x1, x2, atol=1e-6, rtol=1e-6, msg=None):
try:
self.assertAllClose(x1, x2, atol=atol, rtol=rtol, msg=msg)
except AssertionError:
return
msg = msg or ""
raise AssertionError(
f"The two values are close at all elements. \n"
f"{msg}.\n"
f"Values: {x1}"
)
def assertAlmostEqual(self, x1, x2, decimal=3, msg=None):
if not isinstance(x1, np.ndarray):
x1 = backend.convert_to_numpy(x1)
if not isinstance(x2, np.ndarray):
x2 = backend.convert_to_numpy(x2)
np.testing.assert_almost_equal(x1, x2, decimal=decimal)
def assertAllEqual(self, x1, x2, msg=None):
self.assertEqual(len(x1), len(x2), msg=msg)
for e1, e2 in zip(x1, x2):
if isinstance(e1, (list, tuple)) or isinstance(e2, (list, tuple)):
self.assertAllEqual(e1, e2, msg=msg)
else:
e1 = backend.convert_to_numpy(e1)
e2 = backend.convert_to_numpy(e2)
self.assertEqual(e1, e2, msg=msg)
def assertLen(self, iterable, expected_len, msg=None):
self.assertEqual(len(iterable), expected_len, msg=msg)
def run_class_serialization_test(self, instance, custom_objects=None):
from keras_core.saving import custom_object_scope
from keras_core.saving import deserialize_keras_object
from keras_core.saving import serialize_keras_object
# get_config roundtrip
cls = instance.__class__
config = instance.get_config()
config_json = json.dumps(config, sort_keys=True, indent=4)
ref_dir = dir(instance)[:]
with custom_object_scope(custom_objects):
revived_instance = cls.from_config(config)
revived_config = revived_instance.get_config()
revived_config_json = json.dumps(
revived_config, sort_keys=True, indent=4
)
self.assertEqual(config_json, revived_config_json)
self.assertEqual(ref_dir, dir(revived_instance))
# serialization roundtrip
serialized = serialize_keras_object(instance)
serialized_json = json.dumps(serialized, sort_keys=True, indent=4)
with custom_object_scope(custom_objects):
revived_instance = deserialize_keras_object(
json.loads(serialized_json)
)
revived_config = revived_instance.get_config()
revived_config_json = json.dumps(
revived_config, sort_keys=True, indent=4
)
self.assertEqual(config_json, revived_config_json)
new_dir = dir(revived_instance)[:]
for lst in [ref_dir, new_dir]:
if "__annotations__" in lst:
lst.remove("__annotations__")
self.assertEqual(ref_dir, new_dir)
return revived_instance
def run_layer_test(
self,
layer_cls,
init_kwargs,
input_shape=None,
input_dtype="float32",
input_sparse=False,
input_data=None,
call_kwargs=None,
expected_output_shape=None,
expected_output_dtype=None,
expected_output_sparse=False,
expected_output=None,
expected_num_trainable_weights=None,
expected_num_non_trainable_weights=None,
expected_num_non_trainable_variables=None,
expected_num_seed_generators=None,
expected_num_losses=None,
supports_masking=None,
expected_mask_shape=None,
custom_objects=None,
run_training_check=True,
run_mixed_precision_check=True,
):
"""Run basic checks on a layer.
Args:
layer_cls: The class of the layer to test.
init_kwargs: Dict of arguments to be used to
instantiate the layer.
input_shape: Shape tuple (or list/dict of shape tuples)
to call the layer on.
input_dtype: Corresponding input dtype.
input_sparse: Whether the input is a sparse tensor (this requires
the backend to support sparse tensors).
input_data: Tensor (or list/dict of tensors)
to call the layer on.
call_kwargs: Dict of arguments to use when calling the
layer (does not include the first input tensor argument)
expected_output_shape: Shape tuple
(or list/dict of shape tuples)
expected as output.
expected_output_dtype: dtype expected as output.
expected_output_sparse: Whether the output is expected to be sparse
(this requires the backend to support sparse tensors).
expected_output: Expected output tensor -- only
to be specified if input_data is provided.
expected_num_trainable_weights: Expected number
of trainable weights of the layer once built.
expected_num_non_trainable_weights: Expected number
of non-trainable weights of the layer once built.
expected_num_seed_generators: Expected number of
SeedGenerators objects of the layer once built.
expected_num_losses: Expected number of loss tensors
produced when calling the layer.
supports_masking: If True, will check that the layer
supports masking.
expected_mask_shape: Expected mask shape tuple
returned by compute_mask() (only supports 1 shape).
custom_objects: Dict of any custom objects to be
considered during deserialization.
run_training_check: Whether to attempt to train the layer
(if an input shape or input data was provided).
run_mixed_precision_check: Whether to test the layer with a mixed
precision dtype policy.
"""
if input_shape is not None and input_data is not None:
raise ValueError(
"input_shape and input_data cannot be passed "
"at the same time."
)
if expected_output_shape is not None and expected_output is not None:
raise ValueError(
"expected_output_shape and expected_output cannot be passed "
"at the same time."
)
if expected_output is not None and input_data is None:
raise ValueError(
"In order to use expected_output, input_data must be provided."
)
if expected_mask_shape is not None and supports_masking is not True:
raise ValueError(
"""In order to use expected_mask_shape, supports_masking
must be True."""
)
init_kwargs = init_kwargs or {}
call_kwargs = call_kwargs or {}
# Serialization test.
layer = layer_cls(**init_kwargs)
self.run_class_serialization_test(layer, custom_objects)
# Basic masking test.
if supports_masking is not None:
self.assertEqual(
layer.supports_masking,
supports_masking,
msg="Unexpected supports_masking value",
)
def run_build_asserts(layer):
self.assertTrue(layer.built)
if expected_num_trainable_weights is not None:
self.assertLen(
layer.trainable_weights,
expected_num_trainable_weights,
msg="Unexpected number of trainable_weights",
)
if expected_num_non_trainable_weights is not None:
self.assertLen(
layer.non_trainable_weights,
expected_num_non_trainable_weights,
msg="Unexpected number of non_trainable_weights",
)
if expected_num_non_trainable_variables is not None:
self.assertLen(
layer.non_trainable_variables,
expected_num_non_trainable_variables,
msg="Unexpected number of non_trainable_variables",
)
if expected_num_seed_generators is not None:
self.assertLen(
layer._seed_generators,
expected_num_seed_generators,
msg="Unexpected number of _seed_generators",
)
def run_output_asserts(layer, output, eager=False):
if expected_output_shape is not None:
if isinstance(expected_output_shape, tuple):
self.assertEqual(
expected_output_shape,
output.shape,
msg="Unexpected output shape",
)
elif isinstance(expected_output_shape, dict):
self.assertIsInstance(output, dict)
self.assertEqual(
set(output.keys()),
set(expected_output_shape.keys()),
msg="Unexpected output dict keys",
)
output_shape = {
k: v.shape for k, v in expected_output_shape.items()
}
self.assertEqual(
expected_output_shape,
output_shape,
msg="Unexpected output shape",
)
elif isinstance(expected_output_shape, list):
self.assertIsInstance(output, list)
self.assertEqual(
len(output),
len(expected_output_shape),
msg="Unexpected number of outputs",
)
output_shape = [v.shape for v in expected_output_shape]
self.assertEqual(
expected_output_shape,
output_shape,
msg="Unexpected output shape",
)
if expected_output_dtype is not None:
output_dtype = tree.flatten(output)[0].dtype
self.assertEqual(
expected_output_dtype,
backend.standardize_dtype(output_dtype),
msg="Unexpected output dtype",
)
if expected_output_sparse:
import tensorflow as tf
for x in tree.flatten(output):
if isinstance(x, KerasTensor):
self.assertTrue(x.sparse)
else:
self.assertIsInstance(x, tf.SparseTensor)
if eager:
if expected_output is not None:
self.assertEqual(type(expected_output), type(output))
for ref_v, v in zip(
tree.flatten(expected_output), tree.flatten(output)
):
self.assertAllClose(
ref_v, v, msg="Unexpected output value"
)
if expected_num_losses is not None:
self.assertLen(layer.losses, expected_num_losses)
def run_training_step(layer, input_data, output_data):
class TestModel(Model):
def __init__(self, layer):
super().__init__()
self.layer = layer
def call(self, x):
return self.layer(x)
model = TestModel(layer)
if input_sparse:
import tensorflow as tf
dataset = tf.data.Dataset.from_tensors(
(input_data, output_data)
)
model.compile(optimizer="sgd", loss="mse", jit_compile=False)
model.fit(dataset, verbose=0)
else:
input_data = tree.map_structure(
lambda x: backend.convert_to_numpy(x), input_data
)
output_data = tree.map_structure(
lambda x: backend.convert_to_numpy(x), output_data
)
model.compile(optimizer="sgd", loss="mse", jit_compile=True)
model.fit(input_data, output_data, verbose=0)
# Build test.
if input_data is not None or input_shape is not None:
if input_shape is None:
build_shape = tree.map_structure(
lambda x: ops.shape(x), input_data
)
else:
build_shape = input_shape
layer = layer_cls(**init_kwargs)
if isinstance(build_shape, dict):
layer.build(**build_shape)
else:
layer.build(build_shape)
run_build_asserts(layer)
# Symbolic call test.
if input_shape is None:
keras_tensor_inputs = tree.map_structure(
lambda x: create_keras_tensors(
ops.shape(x), x.dtype, input_sparse
),
input_data,
)
else:
keras_tensor_inputs = create_keras_tensors(
input_shape, input_dtype, input_sparse
)
layer = layer_cls(**init_kwargs)
if isinstance(keras_tensor_inputs, dict):
keras_tensor_outputs = layer(
**keras_tensor_inputs, **call_kwargs
)
else:
keras_tensor_outputs = layer(keras_tensor_inputs, **call_kwargs)
run_build_asserts(layer)
run_output_asserts(layer, keras_tensor_outputs, eager=False)
if expected_mask_shape is not None:
output_mask = layer.compute_mask(keras_tensor_inputs)
self.assertEqual(expected_mask_shape, output_mask.shape)
# Eager call test and compiled training test.
if input_data is not None or input_shape is not None:
if input_data is None:
input_data = create_eager_tensors(
input_shape, input_dtype, input_sparse
)
layer = layer_cls(**init_kwargs)
if isinstance(input_data, dict):
output_data = layer(**input_data, **call_kwargs)
else:
output_data = layer(input_data, **call_kwargs)
run_output_asserts(layer, output_data, eager=True)
if run_training_check:
run_training_step(layer, input_data, output_data)
# Never test mixed precision on torch CPU. Torch lacks support.
if run_mixed_precision_check and backend.backend() == "torch":
import torch
run_mixed_precision_check = torch.cuda.is_available()
if run_mixed_precision_check:
layer = layer_cls(**{**init_kwargs, "dtype": "mixed_float16"})
if isinstance(input_data, dict):
output_data = layer(**input_data, **call_kwargs)
else:
output_data = layer(input_data, **call_kwargs)
for tensor in tree.flatten(output_data):
dtype = standardize_dtype(tensor.dtype)
if is_float_dtype(dtype):
self.assertEqual(dtype, "float16")
for weight in layer.weights:
dtype = standardize_dtype(weight.dtype)
if is_float_dtype(dtype):
self.assertEqual(dtype, "float32")
def create_keras_tensors(input_shape, dtype, sparse):
if isinstance(input_shape, tuple):
return KerasTensor(input_shape, dtype=dtype, sparse=sparse)
if isinstance(input_shape, list):
return [KerasTensor(s, dtype=dtype, sparse=sparse) for s in input_shape]
if isinstance(input_shape, dict):
return {
utils.removesuffix(k, "_shape"): KerasTensor(
v, dtype=dtype, sparse=sparse
)
for k, v in input_shape.items()
}
raise ValueError(f"Unsupported type for `input_shape`: {type(input_shape)}")
def create_eager_tensors(input_shape, dtype, sparse):
from keras_core.backend import random
if dtype not in [
"float16",
"float32",
"float64",
"int16",
"int32",
"int64",
]:
raise ValueError(
"dtype must be a standard float or int dtype. "
f"Received: dtype={dtype}"
)
if sparse:
import tensorflow as tf
def create_fn(shape, dtype):
min_dim = min(dim for dim in shape if dim > 1)
x = random.uniform(shape, dtype="float32") * 3 / min_dim
x = tf.nn.dropout(x, 1.0 / min_dim)
x = tf.cast(x, dtype=dtype)
return tf.sparse.from_dense(x)
else:
def create_fn(shape, dtype):
return ops.cast(
random.uniform(shape, dtype="float32") * 3, dtype=dtype
)
if isinstance(input_shape, tuple):
return create_fn(input_shape, dtype=dtype)
if isinstance(input_shape, list):
return [create_fn(s, dtype=dtype) for s in input_shape]
if isinstance(input_shape, dict):
return {
utils.removesuffix(k, "_shape"): create_fn(v, dtype=dtype)
for k, v in input_shape.items()
}
| keras-core/keras_core/testing/test_case.py/0 | {
"file_path": "keras-core/keras_core/testing/test_case.py",
"repo_id": "keras-core",
"token_count": 9582
} | 38 |
import tree
from keras_core.trainers.data_adapters.data_adapter import DataAdapter
class TorchDataLoaderAdapter(DataAdapter):
"""Adapter that handles `torch.utils.data.DataLoader`."""
def __init__(self, dataloader):
import torch
if not isinstance(dataloader, torch.utils.data.DataLoader):
raise ValueError(
f"Expected argument `dataloader` to be an instance of"
f"`torch.utils.data.DataLoader`. Received: {dataloader}"
)
self._dataloader = dataloader
self._batch_size = dataloader.batch_size
self._size = len(dataloader)
self._partial_batch_size = len(dataloader.dataset) % self._batch_size
def get_numpy_iterator(self):
for batch in self._dataloader:
yield tuple(tree.map_structure(lambda x: x.cpu().numpy(), batch))
def get_torch_dataloader(self):
return self._dataloader
def get_tf_dataset(self):
from keras_core.utils.module_utils import tensorflow as tf
output_signature = self.peek_and_get_tensor_spec()
return tf.data.Dataset.from_generator(
self.get_numpy_iterator,
output_signature=output_signature,
)
def peek_and_get_tensor_spec(self):
from keras_core.utils.module_utils import tensorflow as tf
batch_data = next(iter(self._dataloader))
def get_tensor_spec(x):
shape = x.shape
if len(shape) < 1:
raise ValueError(
"When passing a Pytorch DataLoader to a Keras model, "
"the arrays returned by the generator "
"must be at least rank 1. Received: "
f"{x} of rank {len(x.shape)}"
)
shape = list(shape)
shape[0] = None # The batch size is not guaranteed to be static.
# No easy way to get string representation of dtype in torch
# TODO: Figure out a better way to achieve this
dtype = str(x.dtype).replace("torch.", "")
return tf.TensorSpec(shape=shape, dtype=dtype)
return tuple(tree.map_structure(get_tensor_spec, batch_data))
@property
def num_batches(self):
return self._size
@property
def batch_size(self):
return self._batch_size
@property
def has_partial_batch(self):
if self._partial_batch_size:
return self._partial_batch_size > 0
else:
return None
@property
def partial_batch_size(self):
return self._partial_batch_size
| keras-core/keras_core/trainers/data_adapters/torch_data_adapter.py/0 | {
"file_path": "keras-core/keras_core/trainers/data_adapters/torch_data_adapter.py",
"repo_id": "keras-core",
"token_count": 1192
} | 39 |
import numpy as np
import pytest
import torch
from keras_core import backend
from keras_core import layers
from keras_core import models
from keras_core import testing
from keras_core.utils.torch_utils import TorchModuleWrapper
class Classifier(models.Model):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fc = TorchModuleWrapper(torch.nn.Linear(2, 4))
def call(self, x):
return self.fc(x)
class ClassifierWithNoSpecialCasing(models.Model):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fc = torch.nn.Linear(2, 4)
self.fc2 = layers.Dense(2)
def call(self, x):
return self.fc(self.fc2(x))
class TorchUtilsTest(testing.TestCase):
@pytest.mark.skipif(
backend.backend() != "torch", reason="Requires torch backend"
)
def test_basic_usage(self):
model = Classifier()
self.assertEqual(len(model.layers), 1)
self.assertEqual(len(model.trainable_weights), 2)
model(np.random.random((3, 2)))
model.compile(optimizer="sgd", loss="mse")
model.fit(np.random.random((3, 2)), np.random.random((3, 4)))
@pytest.mark.skipif(
backend.backend() != "torch", reason="Requires torch backend"
)
def test_module_autowrapping(self):
model = ClassifierWithNoSpecialCasing()
self.assertIsInstance(model.fc, TorchModuleWrapper)
self.assertFalse(isinstance(model.fc2, TorchModuleWrapper))
self.assertEqual(len(model.fc.trainable_weights), 2)
model(np.random.random((3, 2)))
self.assertEqual(len(model._layers), 2)
self.assertEqual(len(model.fc2.trainable_weights), 2)
self.assertEqual(len(model.trainable_weights), 4)
model.compile(optimizer="sgd", loss="mse")
model.fit(np.random.random((3, 2)), np.random.random((3, 4)))
| keras-core/keras_core/utils/torch_utils_test.py/0 | {
"file_path": "keras-core/keras_core/utils/torch_utils_test.py",
"repo_id": "keras-core",
"token_count": 801
} | 40 |
{
"name": "Keras-cv",
"build": {
"dockerfile": "Dockerfile",
"args": {
"VERSION": "2.11.0"
// Uncomment this if GPU support is required
// "VERSION": "2.11.0-gpu",
}
},
"customizations": {
"vscode": {
"settings": {
"python.linting.enabled": true,
"python.linting.flake8Enabled": true,
"python.linting.pylintEnabled": false,
"python.testing.pytestEnabled": true,
"editor.formatOnSave": true,
"editor.codeActionsOnSave": {
"source.organizeImports": true
},
"[python]": {
"editor.defaultFormatter": "ms-python.black-formatter"
},
"editor.rulers": [
80
]
},
"extensions": [
"ms-python.python",
"ms-python.isort",
"ms-python.flake8",
"ms-python.black-formatter",
"ms-vscode.cpptools",
"xaver.clang-format"
]
}
},
"features": {
"ghcr.io/devcontainers/features/github-cli:1": {}
},
// TODO: Improve to allow dynamic runArgs, see microsoft/vscode-remote-release#3972
// Uncomment this if GPU support is required
// "runArgs": [
// "--gpus=all"
// ],
"onCreateCommand": "locale-gen \"en_US.UTF-8\"",
// Optional: install pre-commit hooks
// "postCreateCommand": "git config core.hooksPath .github/.githooks"
"postCreateCommand": "sh /setup.sh"
} | keras-cv/.devcontainer/devcontainer.json/0 | {
"file_path": "keras-cv/.devcontainer/devcontainer.json",
"repo_id": "keras-cv",
"token_count": 571
} | 41 |
import math
import random
import time
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import tensorflow as tf
import keras_cv
from keras_cv.metrics import coco
def produce_random_data(
include_confidence=False, num_images=128, num_classes=20
):
"""Generates a fake list of bounding boxes for use in this test.
Returns:
a tensor list of size [128, 25, 5/6]. This represents 128 images, 25
bboxes and 5/6 dimensions to represent each bbox depending on if
confidence is set.
"""
images = []
for _ in range(num_images):
num_boxes = math.floor(25 * random.uniform(0, 1))
classes_in_image = np.floor(np.random.rand(num_boxes, 1) * num_classes)
bboxes = np.random.rand(num_boxes, 4)
boxes = np.concatenate([bboxes, classes_in_image], axis=-1)
if include_confidence:
confidence = np.random.rand(num_boxes, 1)
boxes = np.concatenate([boxes, confidence], axis=-1)
images.append(
keras_cv.utils.bounding_box.xywh_to_corners(
tf.constant(boxes, dtype=tf.float32)
)
)
images = [keras_cv.bounding_box.to_dense(x, max_boxes=25) for x in images]
return tf.stack(images, axis=0)
y_true = produce_random_data()
y_pred = produce_random_data(include_confidence=True)
class_ids = list(range(20))
bucket_values = [500, 1000, 2000, 3500, 5000, 7500, 10000]
update_state_runtimes = []
result_runtimes = []
end_to_end_runtimes = []
for buckets in bucket_values:
metric = coco._COCOMeanAveragePrecision(class_ids, num_buckets=buckets)
# warm up
metric.update_state(y_true, y_pred)
metric.result()
start = time.time()
metric.update_state(y_true, y_pred)
update_state_done = time.time()
r = metric.result()
end = time.time()
update_state_runtimes.append(update_state_done - start)
result_runtimes.append(end - update_state_done)
end_to_end_runtimes.append(end - start)
print("end_to_end_runtimes", end_to_end_runtimes)
data = pd.DataFrame(
{
"bucket_values": bucket_values,
"update_state_runtimes": update_state_runtimes,
"result_runtimes": result_runtimes,
"end_to_end_runtimes": end_to_end_runtimes,
}
)
sns.lineplot(data=data, x="bucket_values", y="update_state_runtimes")
plt.xlabel("Number of Confidence Buckets")
plt.ylabel("update_state() runtime (seconds)")
plt.title("Runtime of update_state()")
plt.show()
sns.lineplot(data=data, x="bucket_values", y="result_runtimes")
plt.xlabel("Number of Confidence Buckets")
plt.ylabel("result() runtime (seconds)")
plt.title("Runtime of result()")
plt.show()
sns.lineplot(data=data, x="bucket_values", y="end_to_end_runtimes")
plt.xlabel("Number of Confidence Buckets")
plt.ylabel("End to end runtime (seconds)")
plt.title("Runtimes of update_state() followed by result()")
plt.show()
| keras-cv/benchmarks/metrics/coco/mean_average_precision_bucket_performance.py/0 | {
"file_path": "keras-cv/benchmarks/metrics/coco/mean_average_precision_bucket_performance.py",
"repo_id": "keras-cv",
"token_count": 1209
} | 42 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow import keras
from keras_cv.layers import RandomSaturation
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing as preprocessing_utils
class OldRandomSaturation(BaseImageAugmentationLayer):
"""Randomly adjusts the saturation on given images.
This layer will randomly increase/reduce the saturation for the input RGB
images. At inference time, the output will be identical to the input.
Call the layer with `training=True` to adjust the saturation of the input.
Args:
factor: A tuple of two floats, a single float or
`keras_cv.FactorSampler`. `factor` controls the extent to which the
image saturation is impacted. `factor=0.5` makes this layer perform
a no-op operation. `factor=0.0` makes the image to be fully
grayscale. `factor=1.0` makes the image to be fully saturated.
Values should be between `0.0` and `1.0`. If a tuple is used, a
`factor` is sampled between the two values for every image
augmented. If a single float is used, a value between `0.0` and the
passed float is sampled. In order to ensure the value is always the
same, please pass a tuple with two identical floats: `(0.5, 0.5)`.
seed: Integer. Used to create a random seed.
"""
def __init__(self, factor, seed=None, **kwargs):
super().__init__(seed=seed, **kwargs)
self.factor = preprocessing_utils.parse_factor(
factor,
min_value=0.0,
max_value=1.0,
)
self.seed = seed
def get_random_transformation(self, **kwargs):
return self.factor()
def augment_image(self, image, transformation=None, **kwargs):
# Convert the factor range from [0, 1] to [0, +inf]. Note that the
# tf.image.adjust_saturation is trying to apply the following math
# formula `output_saturation = input_saturation * factor`. We use the
# following method to the do the mapping.
# `y = x / (1 - x)`.
# This will ensure:
# y = +inf when x = 1 (full saturation)
# y = 1 when x = 0.5 (no augmentation)
# y = 0 when x = 0 (full gray scale)
# Convert the transformation to tensor in case it is a float. When
# transformation is 1.0, then it will result in to divide by zero error,
# but it will be handled correctly when it is a one tensor.
transformation = tf.convert_to_tensor(transformation)
adjust_factor = transformation / (1 - transformation)
return tf.image.adjust_saturation(
image, saturation_factor=adjust_factor
)
def augment_bounding_boxes(
self, bounding_boxes, transformation=None, **kwargs
):
return bounding_boxes
def augment_label(self, label, transformation=None, **kwargs):
return label
def augment_segmentation_mask(
self, segmentation_mask, transformation, **kwargs
):
return segmentation_mask
def get_config(self):
config = {
"factor": self.factor,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if isinstance(config["factor"], dict):
config["factor"] = keras.utils.deserialize_keras_object(
config["factor"]
)
return cls(**config)
(x_train, _), _ = keras.datasets.cifar10.load_data()
x_train = x_train.astype(np.float32)
x_train.shape
num_images = [1000, 2000, 5000, 10000]
results = {}
aug_candidates = [RandomSaturation, OldRandomSaturation]
aug_args = {"factor": (0.5)}
for aug in aug_candidates:
c = aug.__name__
layer = aug(**aug_args)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
layer(x_train[:n_images])
t0 = time.time()
r1 = layer(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
c = aug.__name__ + " Graph Mode"
layer = aug(**aug_args)
@tf.function()
def apply_aug(inputs):
return layer(inputs)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
apply_aug(x_train[:n_images])
t0 = time.time()
r1 = apply_aug(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison.png")
# So we can actually see more relevant margins
del results[aug_candidates[1].__name__]
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison_no_old_eager.png")
| keras-cv/benchmarks/vectorized_random_saturation.py/0 | {
"file_path": "keras-cv/benchmarks/vectorized_random_saturation.py",
"repo_id": "keras-cv",
"token_count": 2335
} | 43 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import demo_utils
import tensorflow as tf
from keras_cv import layers as cv_layers
def _default_anchor_generator(bounding_box_format):
strides = [50]
sizes = [100.0]
scales = [1.0]
aspect_ratios = [1.0]
return cv_layers.AnchorGenerator(
bounding_box_format=bounding_box_format,
anchor_sizes=sizes,
aspect_ratios=aspect_ratios,
scales=scales,
strides=strides,
clip_boxes=True,
)
generator = _default_anchor_generator(bounding_box_format="xywh")
def pair_with_anchor_boxes(inputs):
images = inputs["images"]
anchor_boxes = generator(images[0])
anchor_boxes = anchor_boxes[0]
anchor_boxes = tf.expand_dims(anchor_boxes, axis=0)
anchor_boxes = tf.tile(anchor_boxes, [tf.shape(images)[0], 1, 1])
inputs["bounding_boxes"] = anchor_boxes
return inputs
if __name__ == "__main__":
dataset = demo_utils.load_voc_dataset(bounding_box_format="xywh")
result = dataset.map(
pair_with_anchor_boxes, num_parallel_calls=tf.data.AUTOTUNE
)
demo_utils.visualize_data(result, bounding_box_format="xywh")
| keras-cv/examples/layers/object_detection/anchor_generator_configuration.py/0 | {
"file_path": "keras-cv/examples/layers/object_detection/anchor_generator_configuration.py",
"repo_id": "keras-cv",
"token_count": 627
} | 44 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for preprocessing demos."""
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow import keras
def resize(image, label, img_size=(224, 224), num_classes=10):
image = tf.image.resize(image, img_size)
label = tf.one_hot(label, num_classes)
return {"images": image, "labels": label}
def load_oxford_dataset(
name="oxford_flowers102",
batch_size=64,
img_size=(224, 224),
as_supervised=True,
):
# Load dataset.
data, ds_info = tfds.load(name, as_supervised=as_supervised, with_info=True)
train_ds = data["train"]
num_classes = ds_info.features["label"].num_classes
# Get tf dataset.
train_ds = train_ds.map(
lambda x, y: resize(x, y, img_size=img_size, num_classes=num_classes)
).batch(batch_size)
return train_ds
def visualize_dataset(ds):
outputs = next(iter(ds.take(1)))
images = outputs["images"]
plt.figure(figsize=(8, 8))
for i in range(9):
plt.subplot(3, 3, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.axis("off")
plt.show()
def gallery_show(images):
images = images.astype(int)
for i in range(9):
image = images[i]
plt.subplot(3, 3, i + 1)
plt.imshow(image.astype("uint8"))
plt.axis("off")
plt.show()
def load_elephant_tensor(output_size=(300, 300)):
elephants = keras.utils.get_file(
"african_elephant.jpg", "https://i.imgur.com/Bvro0YD.png"
)
elephants = keras.utils.load_img(elephants, target_size=output_size)
elephants = keras.utils.img_to_array(elephants)
many_elephants = tf.repeat(tf.expand_dims(elephants, axis=0), 9, axis=0)
return many_elephants
| keras-cv/examples/layers/preprocessing/classification/demo_utils.py/0 | {
"file_path": "keras-cv/examples/layers/preprocessing/classification/demo_utils.py",
"repo_id": "keras-cv",
"token_count": 909
} | 45 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""resize_demo.py shows how to use the Resizing preprocessing layer.
Uses the oxford iiit pet_dataset. In this script the pets
are loaded, then are passed through the preprocessing layers.
Finally, they are shown using matplotlib.
"""
import tensorflow as tf
import tensorflow_datasets as tfds
from keras_cv.layers import preprocessing
from keras_cv.visualization import plot_image_gallery
def load_data():
ds = tfds.load(
name="oxford_iiit_pet",
split="train",
)
return ds.map(
lambda inputs: {
"images": tf.cast(inputs["image"], dtype=tf.float32),
"segmentation_masks": inputs["segmentation_mask"] - 1,
}
)
def map_fn_for_visualization(inputs):
masks = tf.cast(inputs["segmentation_masks"], dtype=tf.float32) / 2.0
images = tf.expand_dims(inputs["images"], axis=0)
masks = tf.expand_dims(masks, axis=0)
masks = tf.repeat(masks, repeats=3, axis=-1)
image_masks = tf.concat([images, masks], axis=2)
return image_masks[0]
def main():
ds = load_data()
resize = preprocessing.Resizing(
256,
256,
interpolation="bilinear",
crop_to_aspect_ratio=False,
pad_to_aspect_ratio=False,
bounding_box_format=None,
)
resize_crop = preprocessing.Resizing(
256,
256,
interpolation="bilinear",
crop_to_aspect_ratio=True,
pad_to_aspect_ratio=False,
bounding_box_format=None,
)
resize_pad = preprocessing.Resizing(
256,
256,
interpolation="bilinear",
crop_to_aspect_ratio=False,
pad_to_aspect_ratio=True,
bounding_box_format=None,
)
ds_resize = ds.map(resize, num_parallel_calls=tf.data.AUTOTUNE)
ds_crop = ds.map(resize_crop, num_parallel_calls=tf.data.AUTOTUNE)
ds_pad = ds.map(resize_pad, num_parallel_calls=tf.data.AUTOTUNE)
ds_resize = ds_resize.map(map_fn_for_visualization).batch(8)
ds_crop = ds_crop.map(map_fn_for_visualization).batch(8)
ds_pad = ds_pad.map(map_fn_for_visualization).batch(8)
plot_image_gallery(
next(iter(ds_resize.take(1))),
value_range=(0, 1),
scale=3,
rows=2,
cols=4,
path="resize.png",
)
plot_image_gallery(
next(iter(ds_crop.take(1))),
value_range=(0, 1),
scale=3,
rows=2,
cols=4,
path="resize_crop.png",
)
plot_image_gallery(
next(iter(ds_pad.take(1))),
value_range=(0, 1),
scale=3,
rows=2,
cols=4,
path="resize_pad.png",
)
if __name__ == "__main__":
main()
| keras-cv/examples/layers/preprocessing/segmentation/resize_demo.py/0 | {
"file_path": "keras-cv/examples/layers/preprocessing/segmentation/resize_demo.py",
"repo_id": "keras-cv",
"token_count": 1440
} | 46 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import types
from keras_cv.backend import keras
try:
import namex
except ImportError:
namex = None
def maybe_register_serializable(symbol, package):
if isinstance(symbol, types.FunctionType) or hasattr(symbol, "get_config"):
keras.saving.register_keras_serializable(package=package)(symbol)
if namex:
class keras_cv_export(namex.export):
def __init__(self, path, package="keras_cv"):
super().__init__(package="keras_cv", path=path)
self.package = package
def __call__(self, symbol):
maybe_register_serializable(symbol, self.package)
return super().__call__(symbol)
else:
class keras_cv_export:
def __init__(self, path, package="keras_cv"):
self.package = package
def __call__(self, symbol):
maybe_register_serializable(symbol, self.package)
return symbol
| keras-cv/keras_cv/api_export.py/0 | {
"file_path": "keras-cv/keras_cv/api_export.py",
"repo_id": "keras-cv",
"token_count": 539
} | 47 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for iou functions."""
import numpy as np
from keras_cv.bounding_box import iou as iou_lib
from keras_cv.tests.test_case import TestCase
class IoUTest(TestCase):
def test_compute_single_iou(self):
bb1 = np.array([[100, 101, 200, 201]])
bb1_off_by_1 = np.array([[101, 102, 201, 202]])
# area of bb1 and bb1_off_by_1 are each 10000.
# intersection area is 99*99=9801
# iou=9801/(2*10000 - 9801)=0.96097656633
self.assertAllClose(
iou_lib.compute_iou(bb1, bb1_off_by_1, "yxyx")[0], [0.96097656633]
)
def test_compute_iou(self):
bb1 = [100, 101, 200, 201]
bb1_off_by_1_pred = [101, 102, 201, 202]
iou_bb1_bb1_off = 0.96097656633
top_left_bounding_box = [0, 2, 1, 3]
far_away_box = [1300, 1400, 1500, 1401]
another_far_away_pred = [1000, 1400, 1200, 1401]
# Rows represent predictions, columns ground truths
expected_result = np.array(
[[iou_bb1_bb1_off, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]],
dtype=np.float32,
)
sample_y_true = np.array([bb1, top_left_bounding_box, far_away_box])
sample_y_pred = np.array(
[bb1_off_by_1_pred, top_left_bounding_box, another_far_away_pred],
)
result = iou_lib.compute_iou(sample_y_true, sample_y_pred, "yxyx")
self.assertAllClose(expected_result, result)
def test_batched_compute_iou(self):
bb1 = [100, 101, 200, 201]
bb1_off_by_1_pred = [101, 102, 201, 202]
iou_bb1_bb1_off = 0.96097656633
top_left_bounding_box = [0, 2, 1, 3]
far_away_box = [1300, 1400, 1500, 1401]
another_far_away_pred = [1000, 1400, 1200, 1401]
# Rows represent predictions, columns ground truths
expected_result = np.array(
[
[[iou_bb1_bb1_off, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]],
[[iou_bb1_bb1_off, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]],
],
)
sample_y_true = np.array(
[
[bb1, top_left_bounding_box, far_away_box],
[bb1, top_left_bounding_box, far_away_box],
],
)
sample_y_pred = np.array(
[
[
bb1_off_by_1_pred,
top_left_bounding_box,
another_far_away_pred,
],
[
bb1_off_by_1_pred,
top_left_bounding_box,
another_far_away_pred,
],
],
)
result = iou_lib.compute_iou(sample_y_true, sample_y_pred, "yxyx")
self.assertAllClose(expected_result, result)
def test_batched_boxes1_unbatched_boxes2(self):
bb1 = [100, 101, 200, 201]
bb1_off_by_1_pred = [101, 102, 201, 202]
iou_bb1_bb1_off = 0.96097656633
top_left_bounding_box = [0, 2, 1, 3]
far_away_box = [1300, 1400, 1500, 1401]
another_far_away_pred = [1000, 1400, 1200, 1401]
# Rows represent predictions, columns ground truths
expected_result = np.array(
[
[[iou_bb1_bb1_off, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]],
[[iou_bb1_bb1_off, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]],
],
)
sample_y_true = np.array(
[
[bb1, top_left_bounding_box, far_away_box],
[bb1, top_left_bounding_box, far_away_box],
],
)
sample_y_pred = np.array(
[bb1_off_by_1_pred, top_left_bounding_box, another_far_away_pred],
)
result = iou_lib.compute_iou(sample_y_true, sample_y_pred, "yxyx")
self.assertAllClose(expected_result, result)
def test_unbatched_boxes1_batched_boxes2(self):
bb1 = [100, 101, 200, 201]
bb1_off_by_1_pred = [101, 102, 201, 202]
iou_bb1_bb1_off = 0.96097656633
top_left_bounding_box = [0, 2, 1, 3]
far_away_box = [1300, 1400, 1500, 1401]
another_far_away_pred = [1000, 1400, 1200, 1401]
# Rows represent predictions, columns ground truths
expected_result = np.array(
[
[[iou_bb1_bb1_off, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]],
[[iou_bb1_bb1_off, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]],
],
)
sample_y_true = np.array(
[
[bb1, top_left_bounding_box, far_away_box],
],
)
sample_y_pred = np.array(
[
[
bb1_off_by_1_pred,
top_left_bounding_box,
another_far_away_pred,
],
[
bb1_off_by_1_pred,
top_left_bounding_box,
another_far_away_pred,
],
],
)
result = iou_lib.compute_iou(sample_y_true, sample_y_pred, "yxyx")
self.assertAllClose(expected_result, result)
| keras-cv/keras_cv/bounding_box/iou_test.py/0 | {
"file_path": "keras-cv/keras_cv/bounding_box/iou_test.py",
"repo_id": "keras-cv",
"token_count": 3167
} | 48 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow.keras.callbacks import Callback
from keras_cv.api_export import keras_cv_export
from keras_cv.bounding_box_3d import CENTER_XYZ_DXDYDZ_PHI
from keras_cv.utils import assert_waymo_open_dataset_installed
try:
from waymo_open_dataset import label_pb2
from waymo_open_dataset.metrics.python.wod_detection_evaluator import (
WODDetectionEvaluator,
)
from waymo_open_dataset.protos import breakdown_pb2
from waymo_open_dataset.protos import metrics_pb2
except ImportError:
WODDetectionEvaluator = None
@keras_cv_export("keras_cv.callbacks.WaymoEvaluationCallback")
class WaymoEvaluationCallback(Callback):
def __init__(self, validation_data, config=None, **kwargs):
"""Creates a callback to evaluate Waymo Open Dataset (WOD) metrics on a
validation dataset.
Args:
validation_data: a tf.data.Dataset containing validation data.
Entries should have the form `(point_clouds, {"bounding_boxes":
bounding_boxes}`. Padded bounding box should have a class of -1
to be correctly filtered out.
config: an optional `metrics_pb2.Config` object from WOD to specify
what metrics should be evaluated.
"""
assert_waymo_open_dataset_installed(
"keras_cv.callbacks.WaymoEvaluationCallback()"
)
self.val_data = validation_data
self.evaluator = WODDetectionEvaluator(
config=config or self._get_default_config()
)
super().__init__(**kwargs)
def _get_default_config(self):
"""Returns the default Config proto for detection."""
config = metrics_pb2.Config()
config.breakdown_generator_ids.append(
breakdown_pb2.Breakdown.OBJECT_TYPE
)
difficulty = config.difficulties.add()
difficulty.levels.append(label_pb2.Label.LEVEL_1)
difficulty.levels.append(label_pb2.Label.LEVEL_2)
config.matcher_type = metrics_pb2.MatcherProto.TYPE_HUNGARIAN
config.iou_thresholds.append(0.0) # Unknown
config.iou_thresholds.append(0.7) # Vehicle
config.iou_thresholds.append(0.5) # Pedestrian
config.iou_thresholds.append(0.5) # Sign
config.iou_thresholds.append(0.5) # Cyclist
config.box_type = label_pb2.Label.Box.TYPE_3D
for i in range(100):
config.score_cutoffs.append(i * 0.01)
config.score_cutoffs.append(1.0)
return config
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
gt, preds = self._eval_dataset(self.val_data)
self.evaluator.update_state(gt, preds)
metrics = self.evaluator.result()
metrics_dict = {
"average_precision_vehicle_l1": metrics.average_precision[0],
"average_precision_vehicle_l2": metrics.average_precision[1],
"average_precision_ped_l1": metrics.average_precision[2],
"average_precision_ped_l2": metrics.average_precision[3],
}
logs.update(metrics_dict)
def _eval_dataset(self, dataset):
def point_clouds_only(point_clouds, target):
return point_clouds
def boxes_only(point_clouds, target):
return target["3d_boxes"]
model_outputs = self.model.predict(dataset.map(point_clouds_only))[
"3d_boxes"
]
def flatten_target(boxes):
return tf.concat(
[
boxes["boxes"],
tf.expand_dims(
tf.cast(boxes["classes"], tf.float32), axis=-1
),
tf.expand_dims(
tf.cast(boxes["difficulty"], tf.float32), axis=-1
),
],
axis=-1,
)
gt_boxes = tf.concat(
[flatten_target(x) for x in iter(dataset.map(boxes_only))], axis=0
)
boxes_per_gt_frame = gt_boxes.shape[1]
num_frames = gt_boxes.shape[0]
gt_boxes = tf.reshape(gt_boxes, (num_frames * boxes_per_gt_frame, 9))
# Remove padded boxes
gt_real_boxes = tf.concat(
[x["mask"] for x in iter(dataset.map(boxes_only))], axis=0
)
gt_real_boxes = tf.reshape(
gt_real_boxes, (num_frames * boxes_per_gt_frame)
)
gt_boxes = tf.boolean_mask(gt_boxes, gt_real_boxes)
frame_ids = tf.cast(tf.linspace(1, num_frames, num_frames), tf.int64)
ground_truth = {
"ground_truth_frame_id": tf.boolean_mask(
tf.repeat(frame_ids, boxes_per_gt_frame), gt_real_boxes
),
"ground_truth_bbox": gt_boxes[:, : CENTER_XYZ_DXDYDZ_PHI.PHI + 1],
"ground_truth_type": tf.cast(
gt_boxes[:, CENTER_XYZ_DXDYDZ_PHI.CLASS], tf.uint8
),
"ground_truth_difficulty": tf.cast(
gt_boxes[:, CENTER_XYZ_DXDYDZ_PHI.CLASS + 1], tf.uint8
),
}
boxes_per_pred_frame = model_outputs["boxes"].shape[1]
total_predicted_boxes = boxes_per_pred_frame * num_frames
predicted_boxes = tf.reshape(
model_outputs["boxes"], (total_predicted_boxes, 7)
)
predicted_classes = tf.cast(
tf.reshape(model_outputs["classes"], (total_predicted_boxes, 1)),
tf.uint8,
)
prediction_scores = tf.reshape(
model_outputs["confidence"], (total_predicted_boxes, 1)
)
# Remove boxes that come from padding
pred_real_boxes = tf.squeeze(prediction_scores > 0)
predicted_boxes = tf.boolean_mask(predicted_boxes, pred_real_boxes)
predicted_classes = tf.boolean_mask(predicted_classes, pred_real_boxes)
prediction_scores = tf.boolean_mask(prediction_scores, pred_real_boxes)
predictions = {
"prediction_frame_id": tf.boolean_mask(
tf.repeat(frame_ids, boxes_per_pred_frame), pred_real_boxes
),
"prediction_bbox": predicted_boxes,
"prediction_type": tf.squeeze(predicted_classes),
"prediction_score": tf.squeeze(prediction_scores),
"prediction_overlap_nlz": tf.cast(
tf.zeros(predicted_boxes.shape[0]), tf.bool
),
}
return ground_truth, predictions
| keras-cv/keras_cv/callbacks/waymo_evaluation_callback.py/0 | {
"file_path": "keras-cv/keras_cv/callbacks/waymo_evaluation_callback.py",
"repo_id": "keras-cv",
"token_count": 3284
} | 49 |
/* Copyright 2023 The KerasCV Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#define EIGEN_USE_THREADS
#include "keras_cv/custom_ops/box_util.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/lib/core/errors.h"
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
namespace kerascv {
class WithinAnyBoxOp : public OpKernel {
public:
explicit WithinAnyBoxOp(OpKernelConstruction* ctx) : OpKernel(ctx) {}
void Compute(OpKernelContext* ctx) override {
const Tensor& points = ctx->input(0);
const Tensor& boxes = ctx->input(1);
const int num_points = points.dim_size(0);
const int num_boxes = boxes.dim_size(0);
Tensor* within_any_box = nullptr;
OP_REQUIRES_OK(
ctx, ctx->allocate_output("within_any_box", TensorShape({num_points}),
&within_any_box));
auto within_any_box_t = within_any_box->flat<bool>();
for (auto i = 0; i < num_points; ++i) within_any_box_t(i) = false;
std::vector<box::Upright3DBox> boxes_vec = box::ParseBoxesFromTensor(boxes);
std::vector<box::Vertex> points_vec = box::ParseVerticesFromTensor(points);
auto within_fn = [&boxes_vec, &points_vec, &within_any_box_t](int64_t begin,
int64_t end) {
for (int64_t idx = begin; idx < end; ++idx) {
box::Upright3DBox& box = boxes_vec[idx];
for (uint64_t p_idx = 0; p_idx < points_vec.size(); ++p_idx) {
if (within_any_box_t(p_idx)) {
continue;
}
auto point = points_vec[p_idx];
if (box.WithinBox3D(point)) {
within_any_box_t(p_idx) = true;
}
}
}
};
const CPUDevice& device = ctx->eigen_device<CPUDevice>();
const Eigen::TensorOpCost cost(num_points, num_boxes, 3);
device.parallelFor(num_boxes, cost, within_fn);
}
};
REGISTER_KERNEL_BUILDER(Name("KcvWithinAnyBox").Device(DEVICE_CPU),
WithinAnyBoxOp);
} // namespace kerascv
} // namespace tensorflow
| keras-cv/keras_cv/custom_ops/kernels/within_any_box_op.cc/0 | {
"file_path": "keras-cv/keras_cv/custom_ops/kernels/within_any_box_op.cc",
"repo_id": "keras-cv",
"token_count": 1136
} | 50 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tensorflow import keras
from keras_cv.api_export import keras_cv_export
@keras_cv_export("keras_cv.layers.FeaturePyramid")
class FeaturePyramid(keras.layers.Layer):
"""Implements a Feature Pyramid Network.
This implements the paper:
Tsung-Yi Lin, Piotr Dollar, Ross Girshick, Kaiming He, Bharath Hariharan,
and Serge Belongie. Feature Pyramid Networks for Object Detection.
(https://arxiv.org/pdf/1612.03144)
Feature Pyramid Networks (FPNs) are basic components that are added to an
existing feature extractor (CNN) to combine features at different scales.
For the basic FPN, the inputs are features `Ci` from different levels of a
CNN, which is usually the last block for each level, where the feature is
scaled from the image by a factor of `1/2^i`.
There is an output associated with each level in the basic FPN. The output
Pi at level `i` (corresponding to Ci) is given by performing a merge
operation on the outputs of:
1) a lateral operation on Ci (usually a conv2D layer with kernel = 1 and
strides = 1)
2) a top-down upsampling operation from Pi+1 (except for the top most level)
The final output of each level will also have a conv2D operation
(typically with kernel = 3 and strides = 1).
The inputs to the layer should be a dict with int keys should match the
pyramid_levels, e.g. for `pyramid_levels` = [2,3,4,5], the expected input
dict should be `{2:c2, 3:c3, 4:c4, 5:c5}`.
The output of the layer will have same structures as the inputs, a dict with
int keys and value for each of the level.
Args:
min_level: a python int for the lowest level of the pyramid for
feature extraction.
max_level: a python int for the highest level of the pyramid for
feature extraction.
num_channels: an integer representing the number of channels for the FPN
operations, defaults to 256.
lateral_layers: a python dict with int keys that matches to each of the
pyramid level. The values of the dict should be `keras.Layer`, which
will be called with feature activation outputs from backbone at each
level. Defaults to None, and a `keras.Conv2D` layer with kernel 1x1
will be created for each pyramid level.
output_layers: a python dict with int keys that matches to each of the
pyramid level. The values of the dict should be `keras.Layer`, which
will be called with feature inputs and merged result from upstream
levels. Defaults to None, and a `keras.Conv2D` layer with kernel 3x3
will be created for each pyramid level.
Sample Usage:
```python
inp = keras.layers.Input((384, 384, 3))
backbone = keras.applications.EfficientNetB0(
input_tensor=inp,
include_top=False
)
layer_names = ['block2b_add',
'block3b_add',
'block5c_add',
'top_activation'
]
backbone_outputs = {}
for i, layer_name in enumerate(layer_names):
backbone_outputs[i+2] = backbone.get_layer(layer_name).output
# output_dict is a dict with 2, 3, 4, 5 as keys
output_dict = keras_cv.layers.FeaturePyramid(
min_level=2,
max_level=5
)(backbone_outputs)
```
"""
def __init__(
self,
min_level,
max_level,
num_channels=256,
lateral_layers=None,
output_layers=None,
**kwargs,
):
super().__init__(**kwargs)
self.min_level = min_level
self.max_level = max_level
self.pyramid_levels = list(range(min_level, max_level + 1))
self.num_channels = num_channels
# required for successful serialization
self.lateral_layers_passed = lateral_layers
self.output_layers_passed = output_layers
if not lateral_layers:
# populate self.lateral_ops with default FPN Conv2D 1X1 layers
self.lateral_layers = {}
for i in self.pyramid_levels:
self.lateral_layers[i] = keras.layers.Conv2D(
self.num_channels,
kernel_size=1,
strides=1,
padding="same",
name=f"lateral_P{i}",
)
else:
self._validate_user_layers(lateral_layers, "lateral_layers")
self.lateral_layers = lateral_layers
# Output conv2d layers.
if not output_layers:
self.output_layers = {}
for i in self.pyramid_levels:
self.output_layers[i] = keras.layers.Conv2D(
self.num_channels,
kernel_size=3,
strides=1,
padding="same",
name=f"output_P{i}",
)
else:
self._validate_user_layers(output_layers, "output_layers")
self.output_layers = output_layers
# the same upsampling layer is used for all levels
self.top_down_op = keras.layers.UpSampling2D(size=2)
# the same merge layer is used for all levels
self.merge_op = keras.layers.Add()
def _validate_user_layers(self, user_input, param_name):
if (
not isinstance(user_input, dict)
or sorted(user_input.keys()) != self.pyramid_levels
):
raise ValueError(
f"Expect {param_name} to be a dict with keys as "
f"{self.pyramid_levels}, got {user_input}"
)
def call(self, features):
# Note that this assertion might not be true for all the subclasses. It
# is possible to have FPN that has high levels than the height of
# backbone outputs.
if (
not isinstance(features, dict)
or sorted(features.keys()) != self.pyramid_levels
):
raise ValueError(
"FeaturePyramid expects input features to be a dict with int "
"keys that match the values provided in pyramid_levels. "
f"Expect feature keys: {self.pyramid_levels}, got: {features}"
)
return self.build_feature_pyramid(features)
def build_feature_pyramid(self, input_features):
# To illustrate the connection/topology, the basic flow for a FPN with
# level 3, 4, 5 is like below:
#
# input_l5 -> conv2d_1x1_l5 ----V---> conv2d_3x3_l5 -> output_l5
# V
# upsample2d
# V
# input_l4 -> conv2d_1x1_l4 -> Add -> conv2d_3x3_l4 -> output_l4
# V
# upsample2d
# V
# input_l3 -> conv2d_1x1_l3 -> Add -> conv2d_3x3_l3 -> output_l3
output_features = {}
reversed_levels = list(sorted(input_features.keys(), reverse=True))
top_level = reversed_levels[0]
for level in reversed_levels:
output = self.lateral_layers[level](input_features[level])
if level < top_level:
# for the top most output, it doesn't need to merge with any
# upper stream outputs
upstream_output = self.top_down_op(output_features[level + 1])
output = self.merge_op([output, upstream_output])
output_features[level] = output
# Post apply the output layers so that we don't leak them to the down
# stream level
for level in reversed_levels:
output_features[level] = self.output_layers[level](
output_features[level]
)
return output_features
def get_config(self):
config = {
"min_level": self.min_level,
"max_level": self.max_level,
"num_channels": self.num_channels,
"lateral_layers": self.lateral_layers_passed,
"output_layers": self.output_layers_passed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| keras-cv/keras_cv/layers/feature_pyramid.py/0 | {
"file_path": "keras-cv/keras_cv/layers/feature_pyramid.py",
"repo_id": "keras-cv",
"token_count": 3879
} | 51 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
from typing import Mapping
from typing import Optional
from typing import Tuple
import tensorflow as tf
from tensorflow import keras
from keras_cv import bounding_box
from keras_cv.backend import assert_tf_keras
def _feature_bilinear_interpolation(
features: tf.Tensor, kernel_y: tf.Tensor, kernel_x: tf.Tensor
) -> tf.Tensor:
"""
Feature bilinear interpolation.
The RoIAlign feature f can be computed by bilinear interpolation
of four neighboring feature points f0, f1, f2, and f3.
f(y, x) = [hy, ly] * [[f00, f01], * [hx, lx]^T
[f10, f11]]
f(y, x) = (hy*hx)f00 + (hy*lx)f01 + (ly*hx)f10 + (lx*ly)f11
f(y, x) = w00*f00 + w01*f01 + w10*f10 + w11*f11
kernel_y = [hy, ly]
kernel_x = [hx, lx]
Args:
features: The features are in shape of [batch_size, num_boxes,
output_size * 2, output_size * 2, num_filters].
kernel_y: Tensor of size [batch_size, boxes, output_size, 2, 1].
kernel_x: Tensor of size [batch_size, boxes, output_size, 2, 1].
Returns:
A 5-D tensor representing feature crop of shape
[batch_size, num_boxes, output_size, output_size, num_filters].
"""
features_shape = tf.shape(features)
batch_size, num_boxes, output_size, num_filters = (
features_shape[0],
features_shape[1],
features_shape[2],
features_shape[4],
)
output_size = output_size // 2
kernel_y = tf.reshape(kernel_y, [batch_size, num_boxes, output_size * 2, 1])
kernel_x = tf.reshape(kernel_x, [batch_size, num_boxes, 1, output_size * 2])
# Use implicit broadcast to generate the interpolation kernel. The
# multiplier `4` is for avg pooling.
interpolation_kernel = kernel_y * kernel_x * 4
# Interpolate the gathered features with computed interpolation kernels.
features *= tf.cast(
tf.expand_dims(interpolation_kernel, axis=-1), dtype=features.dtype
)
features = tf.reshape(
features,
[batch_size * num_boxes, output_size * 2, output_size * 2, num_filters],
)
features = tf.nn.avg_pool(features, [1, 2, 2, 1], [1, 2, 2, 1], "VALID")
features = tf.reshape(
features, [batch_size, num_boxes, output_size, output_size, num_filters]
)
return features
def _compute_grid_positions(
boxes: tf.Tensor,
boundaries: tf.Tensor,
output_size: int,
sample_offset: float,
) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]:
"""
Computes the grid position w.r.t. the corresponding feature map.
Args:
boxes: a 3-D tensor of shape [batch_size, num_boxes, 4] encoding the
information of each box w.r.t. the corresponding feature map.
boxes[:, :, 0:2] are the grid position in (y, x) (float) of the top-left
corner of each box. boxes[:, :, 2:4] are the box sizes in (h, w) (float)
in terms of the number of pixels of the corresponding feature map
size.
boundaries: a 3-D tensor of shape [batch_size, num_boxes, 2] representing
the boundary (in (y, x)) of the corresponding feature map for each box.
Any resampled grid points that go beyond the boundary will be clipped.
output_size: a scalar indicating the output crop size.
sample_offset: a float number in [0, 1] indicates the subpixel sample
offset from grid point.
Returns:
kernel_y: Tensor of size [batch_size, boxes, output_size, 2, 1].
kernel_x: Tensor of size [batch_size, boxes, output_size, 2, 1].
box_grid_y0y1: Tensor of size [batch_size, boxes, output_size, 2]
box_grid_x0x1: Tensor of size [batch_size, boxes, output_size, 2]
"""
boxes_shape = tf.shape(boxes)
batch_size, num_boxes = boxes_shape[0], boxes_shape[1]
if batch_size is None:
batch_size = tf.shape(boxes)[0]
box_grid_x = []
box_grid_y = []
for i in range(output_size):
box_grid_x.append(
boxes[:, :, 1] + (i + sample_offset) * boxes[:, :, 3] / output_size
)
box_grid_y.append(
boxes[:, :, 0] + (i + sample_offset) * boxes[:, :, 2] / output_size
)
box_grid_x = tf.stack(box_grid_x, axis=2)
box_grid_y = tf.stack(box_grid_y, axis=2)
box_grid_y0 = tf.floor(box_grid_y)
box_grid_x0 = tf.floor(box_grid_x)
box_grid_x0 = tf.maximum(tf.cast(0.0, dtype=box_grid_x0.dtype), box_grid_x0)
box_grid_y0 = tf.maximum(tf.cast(0.0, dtype=box_grid_y0.dtype), box_grid_y0)
box_grid_x0 = tf.minimum(
box_grid_x0, tf.expand_dims(boundaries[:, :, 1], -1)
)
box_grid_x1 = tf.minimum(
box_grid_x0 + 1, tf.expand_dims(boundaries[:, :, 1], -1)
)
box_grid_y0 = tf.minimum(
box_grid_y0, tf.expand_dims(boundaries[:, :, 0], -1)
)
box_grid_y1 = tf.minimum(
box_grid_y0 + 1, tf.expand_dims(boundaries[:, :, 0], -1)
)
box_gridx0x1 = tf.stack([box_grid_x0, box_grid_x1], axis=-1)
box_gridy0y1 = tf.stack([box_grid_y0, box_grid_y1], axis=-1)
# The RoIAlign feature f can be computed by bilinear interpolation of four
# neighboring feature points f0, f1, f2, and f3.
# f(y, x) = [hy, ly] * [[f00, f01], * [hx, lx]^T
# [f10, f11]]
# f(y, x) = (hy*hx)f00 + (hy*lx)f01 + (ly*hx)f10 + (lx*ly)f11
# f(y, x) = w00*f00 + w01*f01 + w10*f10 + w11*f11
ly = box_grid_y - box_grid_y0
lx = box_grid_x - box_grid_x0
hy = 1.0 - ly
hx = 1.0 - lx
kernel_y = tf.reshape(
tf.stack([hy, ly], axis=3), [batch_size, num_boxes, output_size, 2, 1]
)
kernel_x = tf.reshape(
tf.stack([hx, lx], axis=3), [batch_size, num_boxes, output_size, 2, 1]
)
return kernel_y, kernel_x, box_gridy0y1, box_gridx0x1
def multilevel_crop_and_resize(
features: Dict[str, tf.Tensor],
boxes: tf.Tensor,
output_size: int = 7,
sample_offset: float = 0.5,
) -> tf.Tensor:
"""
Crop and resize on multilevel feature pyramid.
Generate the (output_size, output_size) set of pixels for each input box
by first locating the box into the correct feature level, and then cropping
and resizing it using the corresponding feature map of that level.
Args:
features: A dictionary with key as pyramid level and value as features.
The pyramid level keys need to be represented by strings like so:
"P2", "P3", "P4", and so on.
The features are in shape of [batch_size, height_l, width_l,
num_filters].
boxes: A 3-D Tensor of shape [batch_size, num_boxes, 4]. Each row
represents a box with [y1, x1, y2, x2] in un-normalized coordinates.
output_size: A scalar to indicate the output crop size.
sample_offset: a float number in [0, 1] indicates the subpixel sample
offset from grid point.
Returns:
A 5-D tensor representing feature crop of shape
[batch_size, num_boxes, output_size, output_size, num_filters].
"""
with tf.name_scope("multilevel_crop_and_resize"):
levels_str = list(features.keys())
# Levels are represented by strings with a prefix "P" to represent
# pyramid levels. The integer level can be obtained by looking at
# the value that follows the "P".
levels = [int(level_str[1:]) for level_str in levels_str]
min_level = min(levels)
max_level = max(levels)
features_shape = tf.shape(features[f"P{min_level}"])
batch_size, max_feature_height, max_feature_width, num_filters = (
features_shape[0],
features_shape[1],
features_shape[2],
features_shape[3],
)
num_boxes = tf.shape(boxes)[1]
# Stack feature pyramid into a features_all of shape
# [batch_size, levels, height, width, num_filters].
features_all = []
feature_heights = []
feature_widths = []
for level in range(min_level, max_level + 1):
shape = features[f"P{level}"].get_shape().as_list()
feature_heights.append(shape[1])
feature_widths.append(shape[2])
# Concat tensor of [batch_size, height_l * width_l, num_filters] for
# each level.
features_all.append(
tf.reshape(features[f"P{level}"], [batch_size, -1, num_filters])
)
features_r2 = tf.reshape(tf.concat(features_all, 1), [-1, num_filters])
# Calculate height_l * width_l for each level.
level_dim_sizes = [
feature_widths[i] * feature_heights[i]
for i in range(len(feature_widths))
]
# level_dim_offsets is accumulated sum of level_dim_size.
level_dim_offsets = [0]
for i in range(len(feature_widths) - 1):
level_dim_offsets.append(level_dim_offsets[i] + level_dim_sizes[i])
batch_dim_size = level_dim_offsets[-1] + level_dim_sizes[-1]
level_dim_offsets = tf.constant(level_dim_offsets, tf.int32)
height_dim_sizes = tf.constant(feature_widths, tf.int32)
# Assigns boxes to the right level.
box_width = boxes[:, :, 3] - boxes[:, :, 1]
box_height = boxes[:, :, 2] - boxes[:, :, 0]
areas_sqrt = tf.sqrt(
tf.cast(box_height, tf.float32) * tf.cast(box_width, tf.float32)
)
# following the FPN paper to divide by 224.
levels = tf.cast(
tf.math.floordiv(
tf.math.log(tf.math.divide_no_nan(areas_sqrt, 224.0)),
tf.math.log(2.0),
)
+ 4.0,
dtype=tf.int32,
)
# Maps levels between [min_level, max_level].
levels = tf.minimum(max_level, tf.maximum(levels, min_level))
# Projects box location and sizes to corresponding feature levels.
scale_to_level = tf.cast(
tf.pow(tf.constant(2.0), tf.cast(levels, tf.float32)),
dtype=boxes.dtype,
)
boxes /= tf.expand_dims(scale_to_level, axis=2)
box_width /= scale_to_level
box_height /= scale_to_level
boxes = tf.concat(
[
boxes[:, :, 0:2],
tf.expand_dims(box_height, -1),
tf.expand_dims(box_width, -1),
],
axis=-1,
)
# Maps levels to [0, max_level-min_level].
levels -= min_level
level_strides = tf.pow([[2.0]], tf.cast(levels, tf.float32))
boundary = tf.cast(
tf.concat(
[
tf.expand_dims(
[[tf.cast(max_feature_height, tf.float32)]]
/ level_strides
- 1,
axis=-1,
),
tf.expand_dims(
[[tf.cast(max_feature_width, tf.float32)]]
/ level_strides
- 1,
axis=-1,
),
],
axis=-1,
),
boxes.dtype,
)
# Compute grid positions.
(
kernel_y,
kernel_x,
box_gridy0y1,
box_gridx0x1,
) = _compute_grid_positions(boxes, boundary, output_size, sample_offset)
x_indices = tf.cast(
tf.reshape(box_gridx0x1, [batch_size, num_boxes, output_size * 2]),
dtype=tf.int32,
)
y_indices = tf.cast(
tf.reshape(box_gridy0y1, [batch_size, num_boxes, output_size * 2]),
dtype=tf.int32,
)
batch_size_offset = tf.tile(
tf.reshape(
tf.range(batch_size) * batch_dim_size, [batch_size, 1, 1, 1]
),
[1, num_boxes, output_size * 2, output_size * 2],
)
# Get level offset for each box. Each box belongs to one level.
levels_offset = tf.tile(
tf.reshape(
tf.gather(level_dim_offsets, levels),
[batch_size, num_boxes, 1, 1],
),
[1, 1, output_size * 2, output_size * 2],
)
y_indices_offset = tf.tile(
tf.reshape(
y_indices
* tf.expand_dims(tf.gather(height_dim_sizes, levels), -1),
[batch_size, num_boxes, output_size * 2, 1],
),
[1, 1, 1, output_size * 2],
)
x_indices_offset = tf.tile(
tf.reshape(x_indices, [batch_size, num_boxes, 1, output_size * 2]),
[1, 1, output_size * 2, 1],
)
indices = tf.reshape(
batch_size_offset
+ levels_offset
+ y_indices_offset
+ x_indices_offset,
[-1],
)
# TODO(tanzhenyu): replace tf.gather with tf.gather_nd and try to get
# similar performance.
features_per_box = tf.reshape(
tf.gather(features_r2, indices),
[
batch_size,
num_boxes,
output_size * 2,
output_size * 2,
num_filters,
],
)
# Bilinear interpolation.
features_per_box = _feature_bilinear_interpolation(
features_per_box, kernel_y, kernel_x
)
return features_per_box
# TODO(tanzhenyu): Remove this implementation once roi_pool has better
# performance as this is mostly a duplicate of
# https://github.com/tensorflow/models/blob/master/official/legacy/detection/ops/spatial_transform_ops.py#L324
@keras.utils.register_keras_serializable(package="keras_cv")
class _ROIAligner(keras.layers.Layer):
"""Performs ROIAlign for the second stage processing."""
def __init__(
self,
bounding_box_format,
target_size=7,
sample_offset: float = 0.5,
**kwargs,
):
"""
Generates ROI Aligner.
Args:
bounding_box_format: the input format for boxes.
crop_size: An `int` of the output size of the cropped features.
sample_offset: A `float` in [0, 1] of the subpixel sample offset.
**kwargs: Additional keyword arguments passed to Layer.
"""
assert_tf_keras("keras_cv.layers._ROIAligner")
self._config_dict = {
"bounding_box_format": bounding_box_format,
"crop_size": target_size,
"sample_offset": sample_offset,
}
super().__init__(**kwargs)
def call(
self,
features: Mapping[str, tf.Tensor],
boxes: tf.Tensor,
training: Optional[bool] = None,
):
"""
Args:
features: A dictionary with key as pyramid level and value as
features. The features are in shape of
[batch_size, height_l, width_l, num_filters].
boxes: A 3-D `tf.Tensor` of shape [batch_size, num_boxes, 4]. Each row
represents a box with [y1, x1, y2, x2] in un-normalized coordinates.
from grid point.
training: A `bool` of whether it is in training mode.
Returns:
A 5-D `tf.Tensor` representing feature crop of shape
[batch_size, num_boxes, crop_size, crop_size, num_filters].
"""
boxes = bounding_box.convert_format(
boxes,
source=self._config_dict["bounding_box_format"],
target="yxyx",
)
roi_features = multilevel_crop_and_resize(
features,
boxes,
output_size=self._config_dict["crop_size"],
sample_offset=self._config_dict["sample_offset"],
)
return roi_features
def get_config(self):
return self._config_dict
| keras-cv/keras_cv/layers/object_detection/roi_align.py/0 | {
"file_path": "keras-cv/keras_cv/layers/object_detection/roi_align.py",
"repo_id": "keras-cv",
"token_count": 7772
} | 52 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.layers.object_detection_3d import voxel_utils
from keras_cv.tests.test_case import TestCase
class PadOrTrimToTest(TestCase):
"""Tests for pad_or_trim_to, branched from
https://github.com/tensorflow/lingvo/blob/master/lingvo/core/py_utils_test.py.
"""
def test_2D_constant_shape_pad(self):
x = tf.random.normal(shape=(3, 3), seed=123456)
shape = [4, 6]
padded_x_right = voxel_utils._pad_or_trim_to(x, shape, pad_val=0)
padded_x_left = voxel_utils._pad_or_trim_to(
x, shape, pad_val=0, pad_after_contents=False
)
self.assertEqual(padded_x_right.shape.as_list(), [4, 6])
self.assertEqual(padded_x_left.shape.as_list(), [4, 6])
real_x_right, real_x_left = self.evaluate(
[padded_x_right, padded_x_left]
)
expected_x_right = [
[0.38615, 2.975221, -0.852826, 0.0, 0.0, 0.0],
[-0.571142, -0.432439, 0.413158, 0.0, 0.0, 0.0],
[0.255314, -0.985647, 1.461641, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
]
self.assertAllClose(expected_x_right, real_x_right)
expected_x_left = [
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.38615, 2.975221, -0.852826],
[0.0, 0.0, 0.0, -0.571142, -0.432439, 0.413158],
[0.0, 0.0, 0.0, 0.255314, -0.985647, 1.461641],
]
self.assertAllClose(expected_x_left, real_x_left)
def test_2D_constant_shape_trim(self):
x = tf.random.normal(shape=(3, 3), seed=123456)
shape = [1, 3]
trimmed_x_right = voxel_utils._pad_or_trim_to(x, shape, pad_val=0)
trimmed_x_left = voxel_utils._pad_or_trim_to(
x, shape, pad_val=0, pad_after_contents=False
)
self.assertEqual(trimmed_x_right.shape.as_list(), [1, 3])
self.assertEqual(trimmed_x_left.shape.as_list(), [1, 3])
real_x_right, real_x_left = self.evaluate(
[trimmed_x_right, trimmed_x_left]
)
expected_x_right = [[0.38615, 2.975221, -0.852826]]
self.assertAllClose(expected_x_right, real_x_right)
expected_x_left = [[0.255314, -0.985647, 1.461641]]
self.assertAllClose(expected_x_left, real_x_left)
| keras-cv/keras_cv/layers/object_detection_3d/voxel_utils_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/object_detection_3d/voxel_utils_test.py",
"repo_id": "keras-cv",
"token_count": 1406
} | 53 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
VectorizedBaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing
@keras_cv_export("keras_cv.layers.Equalization")
class Equalization(VectorizedBaseImageAugmentationLayer):
"""Equalization performs histogram equalization on a channel-wise basis.
Args:
value_range: a tuple or a list of two elements. The first value
represents the lower bound for values in passed images, the second
represents the upper bound. Images passed to the layer should have
values within `value_range`.
bins: Integer indicating the number of bins to use in histogram
equalization. Should be in the range [0, 256].
Usage:
```python
equalize = Equalization()
(images, labels), _ = keras.datasets.cifar10.load_data()
# Note that images are an int8 Tensor with values in the range [0, 255]
images = equalize(images)
```
Call arguments:
images: Tensor of pixels in range [0, 255], in RGB format. Can be
of type float or int. Should be in NHWC format.
"""
def __init__(self, value_range, bins=256, **kwargs):
super().__init__(**kwargs)
self.bins = bins
self.value_range = value_range
def equalize_channel(self, images, channel_index):
"""equalize_channel performs histogram equalization on a single channel.
Args:
image: int Tensor with pixels in range [0, 255], RGB format,
with channels last
channel_index: channel to equalize
"""
is_single_image = tf.rank(images) == 4 and tf.shape(images)[0] == 1
images = images[..., channel_index]
# Compute the histogram of the image channel.
# If the input is not a batch of images, directly using
# tf.histogram_fixed_width is much faster than using tf.vectorized_map
if is_single_image:
histogram = tf.histogram_fixed_width(
images, [0, 255], nbins=self.bins
)
histogram = tf.expand_dims(histogram, axis=0)
else:
partial_hist = partial(
tf.histogram_fixed_width, value_range=[0, 255], nbins=self.bins
)
histogram = tf.vectorized_map(
partial_hist, images, fallback_to_while_loop=True, warn=True
)
# For the purposes of computing the step, filter out the non-zeros.
# Zeroes are replaced by a big number while calculating min to keep
# shape constant across input sizes for compatibility with
# vectorized_map
big_number = 1410065408
histogram_without_zeroes = tf.where(
tf.equal(histogram, 0),
big_number,
histogram,
)
step = (
tf.reduce_sum(histogram, axis=-1)
- tf.reduce_min(histogram_without_zeroes, axis=-1)
) // (self.bins - 1)
def build_mapping(histogram, step):
bacth_size = tf.shape(histogram)[0]
# Replace where step is 0 with 1 to avoid division by 0.
# This doesn't change the result, because where step==0 the
# original image is returned
_step = tf.where(
tf.equal(step, 0),
1,
step,
)
_step = tf.expand_dims(_step, -1)
# Compute the cumulative sum, shifting by step // 2
# and then normalization by step.
lookup_table = (
tf.cumsum(histogram, axis=-1) + (_step // 2)
) // _step
# Shift lookup_table, prepending with 0.
lookup_table = tf.concat(
[tf.tile([[0]], [bacth_size, 1]), lookup_table[..., :-1]],
axis=1,
)
# Clip the counts to be in range. This is done
# in the C code for image.point.
return tf.clip_by_value(lookup_table, 0, 255)
# If step is zero, return the original image. Otherwise, build
# lookup table from the full histogram and step and then index from it.
# The lookup table is built for all images,
# regardless of the corresponding value of step.
result = tf.where(
tf.reshape(tf.equal(step, 0), (-1, 1, 1)),
images,
tf.gather(
build_mapping(histogram, step), images, batch_dims=1, axis=1
),
)
return result
def augment_images(self, images, transformations=None, **kwargs):
images = preprocessing.transform_value_range(
images, self.value_range, (0, 255), dtype=self.compute_dtype
)
images = tf.cast(images, tf.int32)
images = tf.map_fn(
lambda channel: self.equalize_channel(images, channel),
tf.range(tf.shape(images)[-1]),
)
images = tf.transpose(images, [1, 2, 3, 0])
images = tf.cast(images, self.compute_dtype)
images = preprocessing.transform_value_range(
images, (0, 255), self.value_range, dtype=self.compute_dtype
)
return images
def augment_bounding_boxes(self, bounding_boxes, **kwargs):
return bounding_boxes
def augment_labels(self, labels, transformations=None, **kwargs):
return labels
def augment_segmentation_masks(
self, segmentation_masks, transformations, **kwargs
):
return segmentation_masks
def augment_keypoints(self, keypoints, transformations, **kwargs):
return keypoints
def augment_targets(self, targets, transformations, **kwargs):
return targets
def augment_ragged_image(self, image, transformation, **kwargs):
image = tf.expand_dims(image, axis=0)
image = self.augment_images(
images=image, transformations=transformation, **kwargs
)
return tf.squeeze(image, axis=0)
def get_config(self):
config = super().get_config()
config.update({"bins": self.bins, "value_range": self.value_range})
return config
| keras-cv/keras_cv/layers/preprocessing/equalization.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/equalization.py",
"repo_id": "keras-cv",
"token_count": 2893
} | 54 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
from absl.testing import parameterized
from keras_cv import layers
from keras_cv.tests.test_case import TestCase
CONSISTENT_OUTPUT_TEST_CONFIGURATIONS = [
("AutoContrast", layers.AutoContrast, {"value_range": (0, 255)}),
("ChannelShuffle", layers.ChannelShuffle, {}),
("Equalization", layers.Equalization, {"value_range": (0, 255)}),
("Grayscale", layers.Grayscale, {}),
("GridMask", layers.GridMask, {}),
(
"Posterization",
layers.Posterization,
{"bits": 3, "value_range": (0, 255)},
),
(
"RandomColorDegeneration",
layers.RandomColorDegeneration,
{"factor": 0.5},
),
(
"RandomCutout",
layers.RandomCutout,
{"height_factor": 0.2, "width_factor": 0.2},
),
(
"RandomHue",
layers.RandomHue,
{"factor": 0.5, "value_range": (0, 255)},
),
(
"RandomChannelShift",
layers.RandomChannelShift,
{"value_range": (0, 255), "factor": 0.5},
),
(
"RandomColorJitter",
layers.RandomColorJitter,
{
"value_range": (0, 255),
"brightness_factor": (-0.2, 0.5),
"contrast_factor": (0.5, 0.9),
"saturation_factor": (0.5, 0.9),
"hue_factor": (0.5, 0.9),
"seed": 1,
},
),
(
"RandomContrast",
layers.RandomContrast,
{"value_range": (0, 255), "factor": 0.5},
),
(
"RandomGaussianBlur",
layers.RandomGaussianBlur,
{"kernel_size": 3, "factor": (0.0, 3.0)},
),
("RandomFlip", layers.RandomFlip, {"mode": "horizontal"}),
("RandomJpegQuality", layers.RandomJpegQuality, {"factor": (75, 100)}),
("RandomRotation", layers.RandomRotation, {"factor": 0.5}),
("RandomSaturation", layers.RandomSaturation, {"factor": 0.5}),
(
"RandomSharpness",
layers.RandomSharpness,
{"factor": 0.5, "value_range": (0, 255)},
),
("RandomShear", layers.RandomShear, {"x_factor": 0.3, "y_factor": 0.3}),
(
"RandomTranslation",
layers.RandomTranslation,
{"height_factor": 0.5, "width_factor": 0.5},
),
(
"RandomZoom",
layers.RandomZoom,
{"height_factor": 0.2, "width_factor": 0.5},
),
("Solarization", layers.Solarization, {"value_range": (0, 255)}),
(
"RandomBrightness",
layers.RandomBrightness,
{"factor": (1, 1), "value_range": (0, 1)},
),
]
DENSE_OUTPUT_TEST_CONFIGURATIONS = [
(
"JitteredResize",
layers.JitteredResize,
{
"target_size": (224, 224),
"scale_factor": (0.8, 1.25),
"bounding_box_format": "xywh",
},
),
(
"RandomCrop",
layers.RandomCrop,
{"height": 2, "width": 2},
),
(
"RandomCropAndResize",
layers.RandomCropAndResize,
{
"target_size": (224, 224),
"crop_area_factor": (0.8, 1.0),
"aspect_ratio_factor": (3 / 4, 4 / 3),
},
),
(
"Resizing",
layers.Resizing,
{
"height": 224,
"width": 224,
},
),
]
RAGGED_OUTPUT_TEST_CONFIGURATIONS = [
("RandomAspectRatio", layers.RandomAspectRatio, {"factor": (0.9, 1.1)}),
]
class RaggedImageTest(TestCase):
@parameterized.named_parameters(*CONSISTENT_OUTPUT_TEST_CONFIGURATIONS)
def test_preserves_ragged_status(self, layer_cls, init_args):
layer = layer_cls(**init_args)
inputs = tf.ragged.stack(
[
np.ones((5, 5, 3)),
np.ones((8, 8, 3)),
]
)
outputs = layer(inputs)
self.assertTrue(isinstance(outputs, tf.RaggedTensor))
@parameterized.named_parameters(*DENSE_OUTPUT_TEST_CONFIGURATIONS)
def test_converts_ragged_to_dense(self, layer_cls, init_args):
layer = layer_cls(**init_args)
inputs = tf.ragged.stack(
[
np.ones((5, 5, 3)),
np.ones((8, 8, 3)),
]
)
outputs = layer(inputs)
self.assertTrue(isinstance(outputs, tf.Tensor))
@parameterized.named_parameters(*RAGGED_OUTPUT_TEST_CONFIGURATIONS)
def test_dense_to_ragged(self, layer_cls, init_args):
layer = layer_cls(**init_args)
inputs = np.ones((8, 512, 512, 3))
outputs = layer(inputs)
self.assertTrue(isinstance(outputs, tf.RaggedTensor))
| keras-cv/keras_cv/layers/preprocessing/ragged_image_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/ragged_image_test.py",
"repo_id": "keras-cv",
"token_count": 2445
} | 55 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
from keras_cv.backend import ops
from keras_cv.layers import preprocessing
from keras_cv.tests.test_case import TestCase
class RandomColorDegenerationTest(TestCase):
def test_random_color_degeneration_base_case(self):
img_shape = (50, 50, 3)
xs = tf.stack(
[2 * np.ones(img_shape), np.ones(img_shape)],
axis=0,
)
layer = preprocessing.RandomColorDegeneration(0.0)
ys = layer(xs)
self.assertEqual(xs.shape, ys.shape)
def test_color_degeneration_full_factor(self):
img_shape = (50, 50, 1)
r = np.ones(img_shape)
g = 2 * np.ones(img_shape)
b = 3 * np.ones(img_shape)
xs = tf.concat([r, g, b], axis=-1)
layer = preprocessing.RandomColorDegeneration(factor=(1, 1))
ys = ops.convert_to_numpy(layer(xs))
# Color degeneration uses standard luma conversion for RGB->Grayscale.
# The formula for luma is result= 0.2989*r + 0.5870*g + 0.1140*b
luma_result = 0.2989 + 2 * 0.5870 + 3 * 0.1140
self.assertAllClose(ys, np.ones_like(ys) * luma_result)
def test_color_degeneration_70p_factor(self):
img_shape = (50, 50, 1)
r = np.ones(img_shape)
g = 2 * np.ones(img_shape)
b = 3 * np.ones(img_shape)
xs = tf.concat([r, g, b], axis=-1)
layer = preprocessing.RandomColorDegeneration(factor=(0.7, 0.7))
ys = ops.convert_to_numpy(layer(xs))
# Color degeneration uses standard luma conversion for RGB->Grayscale.
# The formula for luma is result= 0.2989*r + 0.5870*g + 0.1140*b
luma_result = 0.2989 + 2 * 0.5870 + 3 * 0.1140
# with factor=0.7, luma_result should be blended at a 70% rate with the
# original
r_result = luma_result * 0.7 + 1 * 0.3
g_result = luma_result * 0.7 + 2 * 0.3
b_result = luma_result * 0.7 + 3 * 0.3
r = ys[..., 0]
g = ys[..., 1]
b = ys[..., 2]
self.assertAllClose(r, np.ones_like(r) * r_result)
self.assertAllClose(g, np.ones_like(g) * g_result)
self.assertAllClose(b, np.ones_like(b) * b_result)
| keras-cv/keras_cv/layers/preprocessing/random_color_degeneration_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_color_degeneration_test.py",
"repo_id": "keras-cv",
"token_count": 1212
} | 56 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from absl.testing import parameterized
from keras_cv import core
from keras_cv.backend import ops
from keras_cv.layers import preprocessing
from keras_cv.tests.test_case import TestCase
class RandomHueTest(TestCase):
def test_preserves_output_shape(self):
image_shape = (4, 8, 8, 3)
image = np.random.uniform(size=image_shape) * 255.0
layer = preprocessing.RandomHue(factor=(0.3, 0.8), value_range=(0, 255))
output = layer(image)
self.assertEqual(image.shape, output.shape)
self.assertNotAllClose(image, output)
def test_adjust_no_op(self):
image_shape = (4, 8, 8, 3)
image = np.random.uniform(size=image_shape) * 255.0
layer = preprocessing.RandomHue(factor=(0.0, 0.0), value_range=(0, 255))
output = layer(image)
self.assertAllClose(image, output, atol=1e-5, rtol=1e-5)
def test_adjust_full_opposite_hue(self):
image_shape = (4, 8, 8, 3)
image = np.random.uniform(size=image_shape) * 255.0
layer = preprocessing.RandomHue(factor=(1.0, 1.0), value_range=(0, 255))
output = ops.convert_to_numpy(layer(image))
channel_max = np.max(output, axis=-1)
channel_min = np.min(output, axis=-1)
# Make sure the max and min channel are the same between input and
# output. In the meantime, and channel will swap between each other.
self.assertAllClose(
channel_max,
np.max(image, axis=-1),
atol=1e-5,
rtol=1e-5,
)
self.assertAllClose(
channel_min,
np.min(image, axis=-1),
atol=1e-5,
rtol=1e-5,
)
@parameterized.named_parameters(
("025", 0.25), ("05", 0.5), ("075", 0.75), ("100", 1.0)
)
def test_adjusts_all_values_for_factor(self, factor):
image_shape = (4, 8, 8, 3)
# Value range (0, 100)
image = np.random.uniform(size=image_shape) * 100.0
layer = preprocessing.RandomHue(
factor=(factor, factor), value_range=(0, 255)
)
output = layer(image)
self.assertNotAllClose(image, output, atol=1e-5, rtol=1e-5)
def test_adjustment_for_non_rgb_value_range(self):
image_shape = (4, 8, 8, 3)
# Value range (0, 100)
image = np.random.uniform(size=image_shape) * 100.0
layer = preprocessing.RandomHue(factor=(0.0, 0.0), value_range=(0, 255))
output = layer(image)
self.assertAllClose(image, output, atol=1e-5, rtol=1e-5)
layer = preprocessing.RandomHue(factor=(0.3, 0.8), value_range=(0, 255))
output = layer(image)
self.assertNotAllClose(image, output)
def test_with_uint8(self):
image_shape = (4, 8, 8, 3)
image = (np.random.uniform(size=image_shape) * 255.0).astype(np.uint8)
layer = preprocessing.RandomHue(factor=(0.0, 0.0), value_range=(0, 255))
output = layer(image)
self.assertAllClose(image, output, atol=1e-5, rtol=1e-5)
layer = preprocessing.RandomHue(factor=(0.3, 0.8), value_range=(0, 255))
output = layer(image)
self.assertNotAllClose(image, output)
def test_config(self):
layer = preprocessing.RandomHue(factor=(0.3, 0.8), value_range=(0, 255))
config = layer.get_config()
self.assertTrue(isinstance(config["factor"], core.UniformFactorSampler))
self.assertEqual(config["factor"].get_config()["lower"], 0.3)
self.assertEqual(config["factor"].get_config()["upper"], 0.8)
self.assertEqual(config["value_range"], (0, 255))
| keras-cv/keras_cv/layers/preprocessing/random_hue_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_hue_test.py",
"repo_id": "keras-cv",
"token_count": 1825
} | 57 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import tensorflow as tf
import keras_cv.layers as cv_layers
from keras_cv.backend.config import keras_3
from keras_cv.tests.test_case import TestCase
class RepeatedAugmentationTest(TestCase):
@pytest.mark.skipif(keras_3(), reason="Disabled for Keras 3")
def test_output_shapes(self):
repeated_augment = cv_layers.RepeatedAugmentation(
augmenters=[
cv_layers.RandAugment(value_range=(0, 255)),
cv_layers.RandomFlip(),
]
)
inputs = {
"images": tf.ones((8, 512, 512, 3)),
"labels": tf.ones((8,)),
}
outputs = repeated_augment(inputs)
self.assertEqual(outputs["images"].shape, (16, 512, 512, 3))
self.assertEqual(outputs["labels"].shape, (16,))
@pytest.mark.skipif(keras_3(), reason="disabling test for Keras 3")
def test_with_mix_up(self):
repeated_augment = cv_layers.RepeatedAugmentation(
augmenters=[
cv_layers.RandAugment(value_range=(0, 255)),
cv_layers.MixUp(),
]
)
inputs = {
"images": tf.ones((8, 512, 512, 3)),
"labels": tf.ones((8, 10)),
}
outputs = repeated_augment(inputs)
self.assertEqual(outputs["images"].shape, (16, 512, 512, 3))
self.assertEqual(outputs["labels"].shape, (16, 10))
| keras-cv/keras_cv/layers/preprocessing/repeated_augmentation_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/repeated_augmentation_test.py",
"repo_id": "keras-cv",
"token_count": 841
} | 58 |
Copyright (c) 2023 Waymo LLC. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
Additional IP Rights Grant (Patents)
"Works" means the code located at keras_cv/layers/preprocessing_3d/waymo
licensed from Waymo LLC ("Waymo") for inclusion in the KerasCV project at
github.com/keras-team/keras-cv. โPatents" means the pending U.S. Patent App.
No. 63/418,259 and any issued patents arising therefrom. Subject to the terms
and conditions of this license, Waymo hereby grants to you a limited worldwide,
non-exclusive, royalty-free, personal patent license to make, have made, use,
and import the Works, where such license applies only to those Patent claims
that are necessarily infringed by the Works executing the โpreprocessing_3dโ
augmentation library on 3D perception tasks using the
โlidaraugment_keraspolicy.pyโ file. This grant does not include claims that
would be infringed by combining the Works with other works, utilizing the Works
on other tasks, or as a consequence of further modification of the Works. If
you or your agent or exclusive licensee institute or order or agree to the
institution of patent litigation or any other patent enforcement activity
against any entity (including a cross-claim or counterclaim in a lawsuit)
alleging that the Works or any activity using the Works to execute functions for
3D perception tasks constitutes direct or contributory patent infringement, or
inducement of patent infringement, then any patent rights granted to you under
this license for the Works shall terminate as of the date such litigation is
filed.
DISCLAIMER
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| keras-cv/keras_cv/layers/preprocessing_3d/waymo/LICENSE/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing_3d/waymo/LICENSE",
"repo_id": "keras-cv",
"token_count": 768
} | 59 |
# Copyright 2022 Waymo LLC.
#
# Licensed under the terms in https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing_3d/waymo/LICENSE # noqa: E501
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.bounding_box_3d import CENTER_XYZ_DXDYDZ_PHI
from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d
from keras_cv.point_cloud import group_points_by_boxes
from keras_cv.point_cloud import is_within_box3d
POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS
BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES
OBJECT_POINT_CLOUDS = base_augmentation_layer_3d.OBJECT_POINT_CLOUDS
OBJECT_BOUNDING_BOXES = base_augmentation_layer_3d.OBJECT_BOUNDING_BOXES
@keras_cv_export("keras_cv.layers.GroupPointsByBoundingBoxes")
class GroupPointsByBoundingBoxes(
base_augmentation_layer_3d.BaseAugmentationLayer3D
):
"""A preprocessing layer which groups point clouds based on bounding boxes
during training.
This layer will group point clouds based on bounding boxes and generate
OBJECT_POINT_CLOUDS and OBJECT_BOUNDING_BOXES tensors.
Input shape:
point_clouds: 3D (multi frames) float32 Tensor with shape
[num of frames, num of points, num of point features].
The first 5 features are [x, y, z, class, range].
bounding_boxes: 3D (multi frames) float32 Tensor with shape
[num of frames, num of boxes, num of box features]. Boxes are expected
to follow the CENTER_XYZ_DXDYDZ_PHI format. Refer to
https://github.com/keras-team/keras-cv/blob/master/keras_cv/bounding_box_3d/formats.py
Output shape:
A dictionary of Tensors with the same shape as input Tensors and two
additional items for OBJECT_POINT_CLOUDS (shape [num of frames, num of
valid boxes, max num of points, num of point features]) and
OBJECT_BOUNDING_BOXES (shape [num of frames, num of valid boxes, num of
box features]).
Arguments:
label_index: An optional int scalar sets the target object index.
Bounding boxes and corresponding point clouds with box class ==
label_index will be saved as OBJECT_BOUNDING_BOXES and
OBJECT_POINT_CLOUDS. If label index is None, all valid bounding boxes
(box class !=0) are used.
min_points_per_bounding_boxes: A int scalar sets the min number of points
in a bounding box. If a bounding box contains less than
min_points_per_bounding_boxes, the bounding box is filtered out.
max_points_per_bounding_boxes: A int scalar sets the max number of points
in a bounding box. All the object point clouds will be padded or trimmed
to the same shape, where the number of points dimension is
max_points_per_bounding_boxes.
"""
def __init__(
self,
label_index=None,
min_points_per_bounding_boxes=0,
max_points_per_bounding_boxes=2000,
**kwargs
):
super().__init__(**kwargs)
if label_index and label_index < 0:
raise ValueError("label_index must be >=0 or None.")
if min_points_per_bounding_boxes < 0:
raise ValueError("min_points_per_bounding_boxes must be >=0.")
if max_points_per_bounding_boxes < 0:
raise ValueError("max_points_per_bounding_boxes must be >=0.")
if min_points_per_bounding_boxes > max_points_per_bounding_boxes:
raise ValueError(
"max_paste_bounding_boxes must be >= "
"min_points_per_bounding_boxes."
)
self._label_index = label_index
self._min_points_per_bounding_boxes = min_points_per_bounding_boxes
self._max_points_per_bounding_boxes = max_points_per_bounding_boxes
self._auto_vectorize = False
def get_config(self):
return {
"label_index": self._label_index,
"min_points_per_bounding_boxes": self._min_points_per_bounding_boxes, # noqa: E501
"max_points_per_bounding_boxes": self._max_points_per_bounding_boxes, # noqa: E501
}
def augment_point_clouds_bounding_boxes(
self, point_clouds, bounding_boxes, **kwargs
):
if self._label_index:
bounding_boxes_mask = (
bounding_boxes[0, :, CENTER_XYZ_DXDYDZ_PHI.CLASS]
== self._label_index
)
object_bounding_boxes = tf.boolean_mask(
bounding_boxes, bounding_boxes_mask, axis=1
)
else:
bounding_boxes_mask = (
bounding_boxes[0, :, CENTER_XYZ_DXDYDZ_PHI.CLASS] > 0.0
)
object_bounding_boxes = tf.boolean_mask(
bounding_boxes, bounding_boxes_mask, axis=1
)
points_in_bounding_boxes = is_within_box3d(
point_clouds[:, :, :3], object_bounding_boxes[:, :, :7]
)
# Filter bounding boxes using the current frame.
# [num_boxes]
min_points_filter = (
tf.reduce_sum(
tf.cast(points_in_bounding_boxes[0], dtype=tf.int32), axis=0
)
>= self._min_points_per_bounding_boxes
)
object_bounding_boxes = tf.boolean_mask(
object_bounding_boxes, min_points_filter, axis=1
)
points_in_bounding_boxes = tf.boolean_mask(
points_in_bounding_boxes, min_points_filter, axis=2
)
# [num of frames, num of boxes, num of points].
points_in_bounding_boxes = tf.transpose(
points_in_bounding_boxes, [0, 2, 1]
)
points_in_bounding_boxes = tf.cast(points_in_bounding_boxes, tf.int32)
sort_valid_index = tf.argsort(
points_in_bounding_boxes, axis=-1, direction="DESCENDING"
)
sort_valid_mask = tf.gather(
points_in_bounding_boxes, sort_valid_index, axis=2, batch_dims=2
)[:, :, : self._max_points_per_bounding_boxes]
# [num of frames, num of boxes, self._max_points_per_bounding_boxes, num
# of point features].
object_point_clouds = point_clouds[:, tf.newaxis, :, :]
num_valid_bounding_boxes = tf.shape(object_bounding_boxes)[1]
object_point_clouds = tf.tile(
object_point_clouds, [1, num_valid_bounding_boxes, 1, 1]
)
object_point_clouds = tf.gather(
object_point_clouds, sort_valid_index, axis=2, batch_dims=2
)[:, :, : self._max_points_per_bounding_boxes, :]
object_point_clouds = tf.where(
sort_valid_mask[:, :, :, tf.newaxis] > 0, object_point_clouds, 0.0
)
return (
object_point_clouds,
object_bounding_boxes,
)
def augment_point_clouds_bounding_boxes_v2(
self, point_clouds, bounding_boxes, **kwargs
):
if self._label_index:
bounding_boxes_mask = (
bounding_boxes[0, :, CENTER_XYZ_DXDYDZ_PHI.CLASS]
== self._label_index
)
object_bounding_boxes = tf.boolean_mask(
bounding_boxes, bounding_boxes_mask, axis=1
)
else:
bounding_boxes_mask = (
bounding_boxes[0, :, CENTER_XYZ_DXDYDZ_PHI.CLASS] > 0.0
)
object_bounding_boxes = tf.boolean_mask(
bounding_boxes, bounding_boxes_mask, axis=1
)
# [frames, num_boxes, ragged_points]
points_in_bounding_boxes = group_points_by_boxes(
point_clouds[:, :, :3], object_bounding_boxes[:, :, :7]
)
# Filter bounding boxes using the current frame.
# [num_boxes]
min_points_filter = (
points_in_bounding_boxes.row_lengths(-1)
>= self._min_points_per_bounding_boxes
)
# [frames, num_valid_boxes, box_feature]
object_bounding_boxes = tf.ragged.boolean_mask(
object_bounding_boxes, min_points_filter
)
# [frames, num_valid_boxes, ragged_points]
points_in_bounding_boxes = tf.ragged.boolean_mask(
points_in_bounding_boxes, min_points_filter
)
# point_clouds: [frames, num_points, point_feature]
# object_point_clouds: [frames, num_valid_boxes, ragged_points,
# point_feature]
object_point_clouds = tf.gather(
point_clouds, points_in_bounding_boxes, axis=1, batch_dims=1
)
return (object_point_clouds, object_bounding_boxes)
def _augment(self, inputs):
result = inputs
point_clouds = inputs[POINT_CLOUDS]
bounding_boxes = inputs[BOUNDING_BOXES]
transformation = self.get_random_transformation(
point_clouds=point_clouds,
bounding_boxes=bounding_boxes,
)
(
object_point_clouds,
object_bounding_boxes,
) = self.augment_point_clouds_bounding_boxes(
point_clouds,
bounding_boxes=bounding_boxes,
transformation=transformation,
)
result.update(
{
OBJECT_POINT_CLOUDS: object_point_clouds,
OBJECT_BOUNDING_BOXES: object_bounding_boxes,
}
)
return result
def call(self, inputs):
# TODO(ianstenbit): Support the model input format.
point_clouds = inputs[POINT_CLOUDS]
bounding_boxes = inputs[BOUNDING_BOXES]
if point_clouds.shape.rank == 3 and bounding_boxes.shape.rank == 3:
return self._augment(inputs)
elif point_clouds.shape.rank == 4 and bounding_boxes.shape.rank == 4:
batch = point_clouds.get_shape().as_list()[0]
object_point_clouds_list = []
object_bounding_boxes_list = []
for i in range(batch):
(
object_point_clouds,
object_bounding_boxes,
) = self.augment_point_clouds_bounding_boxes(
inputs[POINT_CLOUDS][i], inputs[BOUNDING_BOXES][i]
)
object_point_clouds_list += [object_point_clouds]
object_bounding_boxes_list += [object_bounding_boxes]
# object_point_clouds shape [num of frames, num of valid boxes,
# max num of points, num of point features].
inputs[OBJECT_POINT_CLOUDS] = tf.concat(
object_point_clouds_list, axis=-3
)
# object_bounding_boxes shape [num of frames, num of valid
# boxes, num of box features].
inputs[OBJECT_BOUNDING_BOXES] = tf.concat(
object_bounding_boxes_list, axis=-2
)
return inputs
else:
raise ValueError(
"Point clouds augmentation layers are expecting inputs "
"point clouds and bounding boxes to be rank 3D (Frame, "
"Point, Feature) or 4D (Batch, Frame, Point, Feature) "
"tensors. Got shape: {} and {}".format(
point_clouds.shape, bounding_boxes.shape
)
)
| keras-cv/keras_cv/layers/preprocessing_3d/waymo/group_points_by_bounding_boxes.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing_3d/waymo/group_points_by_bounding_boxes.py",
"repo_id": "keras-cv",
"token_count": 5266
} | 60 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.layers.regularization.stochastic_depth import StochasticDepth
from keras_cv.tests.test_case import TestCase
class StochasticDepthTest(TestCase):
FEATURE_SHAPE = (1, 14, 14, 256)
def test_inputs_have_two_elements(self):
inputs = tf.random.uniform(self.FEATURE_SHAPE, 0, 1)
inputs = [inputs, inputs, inputs]
with self.assertRaisesRegex(
ValueError,
"Input must be a list of length 2. " "Got input with length=3.",
):
StochasticDepth()(inputs)
def test_eval_mode(self):
inputs = tf.random.uniform(self.FEATURE_SHAPE, 0, 1)
inputs = [inputs, inputs]
rate = 0.5
outputs = StochasticDepth(rate=rate)(inputs, training=False)
self.assertAllClose(inputs[0] * (1 + rate), outputs)
def test_training_mode(self):
inputs = tf.random.uniform(self.FEATURE_SHAPE, 0, 1)
inputs = [inputs, inputs]
rate = 0.5
outputs = StochasticDepth(rate=rate)(inputs, training=True)
outputs_sum = tf.math.reduce_sum(outputs)
inputs_sum = tf.math.reduce_sum(inputs[0])
self.assertIn(outputs_sum, [inputs_sum, inputs_sum * 2])
| keras-cv/keras_cv/layers/regularization/stochastic_depth_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/regularization/stochastic_depth_test.py",
"repo_id": "keras-cv",
"token_count": 688
} | 61 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
@keras_cv_export("keras_cv.losses.FocalLoss")
class FocalLoss(keras.losses.Loss):
"""Implements Focal loss
Focal loss is a modified cross-entropy designed to perform better with
class imbalance. For this reason, it's commonly used with object detectors.
Args:
alpha: a float value between 0 and 1 representing a weighting factor
used to deal with class imbalance. Positive classes and negative
classes have alpha and (1 - alpha) as their weighting factors
respectively. Defaults to 0.25.
gamma: a positive float value representing the tunable focusing
parameter, defaults to 2.
from_logits: Whether `y_pred` is expected to be a logits tensor. By
default, `y_pred` is assumed to encode a probability distribution.
Default to `False`.
label_smoothing: Float in `[0, 1]`. If higher than 0 then smooth the
labels by squeezing them towards `0.5`, i.e., using
`1. - 0.5 * label_smoothing` for the target class and
`0.5 * label_smoothing` for the non-target class.
References:
- [Focal Loss paper](https://arxiv.org/abs/1708.02002)
Standalone usage:
```python
y_true = np.random.uniform(size=[10], low=0, high=4)
y_pred = np.random.uniform(size=[10], low=0, high=4)
loss = FocalLoss()
loss(y_true, y_pred)
```
Usage with the `compile()` API:
```python
model.compile(optimizer='adam', loss=keras_cv.losses.FocalLoss())
```
"""
def __init__(
self,
alpha=0.25,
gamma=2,
from_logits=False,
label_smoothing=0,
**kwargs,
):
super().__init__(**kwargs)
self.alpha = float(alpha)
self.gamma = float(gamma)
self.from_logits = from_logits
self.label_smoothing = label_smoothing
def _smooth_labels(self, y_true):
return (
y_true * (1.0 - self.label_smoothing) + 0.5 * self.label_smoothing
)
def call(self, y_true, y_pred):
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.cast(y_true, y_pred.dtype)
if self.label_smoothing:
y_true = self._smooth_labels(y_true)
if self.from_logits:
y_pred = ops.sigmoid(y_pred)
cross_entropy = ops.binary_crossentropy(y_true, y_pred)
alpha = ops.where(
ops.equal(y_true, 1.0), self.alpha, (1.0 - self.alpha)
)
pt = y_true * y_pred + (1.0 - y_true) * (1.0 - y_pred)
loss = (
alpha
* ops.cast(ops.power(1.0 - pt, self.gamma), alpha.dtype)
* ops.cast(cross_entropy, alpha.dtype)
)
# In most losses you mean over the final axis to achieve a scalar
# Focal loss however is a special case in that it is meant to focus on
# a small number of hard examples in a batch. Most of the time this
# comes in the form of thousands of background class boxes and a few
# positive boxes.
# If you mean over the final axis you will get a number close to 0,
# which will encourage your model to exclusively predict background
# class boxes.
return ops.sum(loss, axis=-1)
def get_config(self):
config = super().get_config()
config.update(
{
"alpha": self.alpha,
"gamma": self.gamma,
"from_logits": self.from_logits,
"label_smoothing": self.label_smoothing,
}
)
return config
| keras-cv/keras_cv/losses/focal.py/0 | {
"file_path": "keras-cv/keras_cv/losses/focal.py",
"repo_id": "keras-cv",
"token_count": 1822
} | 62 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import numpy as np
try:
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
except ImportError:
COCO = object
COCOeval = None
from keras_cv.utils.conditional_imports import assert_pycocotools_installed
METRIC_NAMES = [
"AP",
"AP50",
"AP75",
"APs",
"APm",
"APl",
"ARmax1",
"ARmax10",
"ARmax100",
"ARs",
"ARm",
"ARl",
]
class PyCOCOWrapper(COCO):
"""COCO wrapper class.
This class wraps COCO API object, which provides the following additional
functionalities:
1. Support string type image id.
2. Support loading the groundtruth dataset using the external annotation
dictionary.
3. Support loading the prediction results using the external annotation
dictionary.
"""
def __init__(self, gt_dataset=None):
"""Instantiates a COCO-style API object.
Args:
eval_type: either 'box' or 'mask'.
annotation_file: a JSON file that stores annotations of the eval
dataset. This is required if `gt_dataset` is not provided.
gt_dataset: the groundtruth eval dataset in COCO API format.
"""
assert_pycocotools_installed("PyCOCOWrapper")
COCO.__init__(self, annotation_file=None)
self._eval_type = "box"
if gt_dataset:
self.dataset = gt_dataset
self.createIndex()
def loadRes(self, predictions):
"""Loads result file and return a result api object.
Args:
predictions: a list of dictionary each representing an annotation in
COCO format. The required fields are `image_id`, `category_id`,
`score`, `bbox`, `segmentation`.
Returns:
res: result COCO api object.
Raises:
ValueError: if the set of image id from predictions is not the subset
of the set of image id of the groundtruth dataset.
"""
res = COCO()
res.dataset["images"] = copy.deepcopy(self.dataset["images"])
res.dataset["categories"] = copy.deepcopy(self.dataset["categories"])
image_ids = [ann["image_id"] for ann in predictions]
if set(image_ids) != (set(image_ids) & set(self.getImgIds())):
raise ValueError(
"Results do not correspond to the current dataset!"
)
for ann in predictions:
x1, x2, y1, y2 = [
ann["bbox"][0],
ann["bbox"][0] + ann["bbox"][2],
ann["bbox"][1],
ann["bbox"][1] + ann["bbox"][3],
]
ann["area"] = ann["bbox"][2] * ann["bbox"][3]
ann["segmentation"] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
res.dataset["annotations"] = copy.deepcopy(predictions)
res.createIndex()
return res
def _yxyx_to_xywh(boxes):
if boxes.shape[-1] != 4:
raise ValueError(
"boxes.shape[-1] is {:d}, but must be 4.".format(boxes.shape[-1])
)
boxes_ymin = boxes[..., 0]
boxes_xmin = boxes[..., 1]
boxes_width = boxes[..., 3] - boxes[..., 1]
boxes_height = boxes[..., 2] - boxes[..., 0]
new_boxes = np.stack(
[boxes_xmin, boxes_ymin, boxes_width, boxes_height], axis=-1
)
return new_boxes
def _convert_predictions_to_coco_annotations(predictions):
coco_predictions = []
num_batches = len(predictions["source_id"])
for i in range(num_batches):
batch_size = predictions["source_id"][i].shape[0]
predictions["detection_boxes"][i] = predictions["detection_boxes"][
i
].copy()
for j in range(batch_size):
max_num_detections = predictions["num_detections"][i][j]
predictions["detection_boxes"][i][j] = _yxyx_to_xywh(
predictions["detection_boxes"][i][j]
)
for k in range(max_num_detections):
ann = {}
ann["image_id"] = predictions["source_id"][i][j]
ann["category_id"] = predictions["detection_classes"][i][j][k]
ann["bbox"] = predictions["detection_boxes"][i][j][k]
ann["score"] = predictions["detection_scores"][i][j][k]
coco_predictions.append(ann)
for i, ann in enumerate(coco_predictions):
ann["id"] = i + 1
return coco_predictions
def _convert_groundtruths_to_coco_dataset(groundtruths, label_map=None):
source_ids = np.concatenate(groundtruths["source_id"], axis=0)
gt_images = [{"id": i} for i in source_ids]
gt_annotations = []
num_batches = len(groundtruths["source_id"])
for i in range(num_batches):
max_num_instances = max(x.shape[0] for x in groundtruths["classes"][i])
batch_size = groundtruths["source_id"][i].shape[0]
for j in range(batch_size):
num_instances = groundtruths["num_detections"][i][j]
if num_instances > max_num_instances:
num_instances = max_num_instances
for k in range(int(num_instances)):
ann = {}
ann["image_id"] = groundtruths["source_id"][i][j]
ann["iscrowd"] = 0
ann["category_id"] = int(groundtruths["classes"][i][j][k])
boxes = groundtruths["boxes"][i]
ann["bbox"] = [
float(boxes[j][k][1]),
float(boxes[j][k][0]),
float(boxes[j][k][3] - boxes[j][k][1]),
float(boxes[j][k][2] - boxes[j][k][0]),
]
ann["area"] = float(
(boxes[j][k][3] - boxes[j][k][1])
* (boxes[j][k][2] - boxes[j][k][0])
)
gt_annotations.append(ann)
for i, ann in enumerate(gt_annotations):
ann["id"] = i + 1
if label_map:
gt_categories = [{"id": i, "name": label_map[i]} for i in label_map]
else:
category_ids = [gt["category_id"] for gt in gt_annotations]
gt_categories = [{"id": i} for i in set(category_ids)]
gt_dataset = {
"images": gt_images,
"categories": gt_categories,
"annotations": copy.deepcopy(gt_annotations),
}
return gt_dataset
def _concat_numpy(groundtruths, predictions):
"""Converts tensors to numpy arrays."""
numpy_groundtruths = {}
for key, val in groundtruths.items():
if isinstance(val, tuple):
val = np.concatenate(val)
numpy_groundtruths[key] = val
numpy_predictions = {}
for key, val in predictions.items():
if isinstance(val, tuple):
val = np.concatenate(val)
numpy_predictions[key] = val
return numpy_groundtruths, numpy_predictions
def compute_pycoco_metrics(groundtruths, predictions):
assert_pycocotools_installed("compute_pycoco_metrics")
groundtruths, predictions = _concat_numpy(groundtruths, predictions)
gt_dataset = _convert_groundtruths_to_coco_dataset(groundtruths)
coco_gt = PyCOCOWrapper(gt_dataset=gt_dataset)
coco_predictions = _convert_predictions_to_coco_annotations(predictions)
coco_dt = coco_gt.loadRes(predictions=coco_predictions)
image_ids = [ann["image_id"] for ann in coco_predictions]
coco_eval = COCOeval(coco_gt, coco_dt, iouType="bbox")
coco_eval.params.imgIds = image_ids
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
coco_metrics = coco_eval.stats
metrics = coco_metrics
metrics_dict = {}
for i, name in enumerate(METRIC_NAMES):
metrics_dict[name] = metrics[i].astype(np.float32)
return metrics_dict
| keras-cv/keras_cv/metrics/coco/pycoco_wrapper.py/0 | {
"file_path": "keras-cv/keras_cv/metrics/coco/pycoco_wrapper.py",
"repo_id": "keras-cv",
"token_count": 3808
} | 63 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MobileNet v3 backbone model.
References:
- [Searching for MobileNetV3](https://arxiv.org/pdf/1905.02244.pdf)
(ICCV 2019)
- [Based on the original keras.applications MobileNetv3](https://github.com/keras-team/keras/blob/master/keras/applications/mobilenet_v3.py)
""" # noqa: E501
import copy
from keras_cv import layers as cv_layers
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.models import utils
from keras_cv.models.backbones.backbone import Backbone
from keras_cv.models.backbones.mobilenet_v3.mobilenet_v3_backbone_presets import ( # noqa: E501
backbone_presets,
)
from keras_cv.models.backbones.mobilenet_v3.mobilenet_v3_backbone_presets import ( # noqa: E501
backbone_presets_with_weights,
)
from keras_cv.utils.python_utils import classproperty
CHANNEL_AXIS = -1
BN_EPSILON = 1e-3
BN_MOMENTUM = 0.999
@keras_cv_export("keras_cv.models.MobileNetV3Backbone")
class MobileNetV3Backbone(Backbone):
"""Instantiates the MobileNetV3 architecture.
References:
- [Searching for MobileNetV3](https://arxiv.org/pdf/1905.02244.pdf)
(ICCV 2019)
- [Based on the Original keras.applications MobileNetv3](https://github.com/keras-team/keras/blob/master/keras/applications/mobilenet_v3.py)
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](https://keras.io/guides/transfer_learning/).
Args:
stackwise_expansion: list of ints or floats, the expansion ratio for
each inverted residual block in the model.
stackwise_filters: list of ints, number of filters for each inverted
residual block in the model.
stackwise_stride: list of ints, stride length for each inverted
residual block in the model.
include_rescaling: bool, whether to rescale the inputs. If set to True,
inputs will be passed through a `Rescaling(scale=1 / 255)`
layer.
input_shape: optional shape tuple, defaults to (None, None, 3).
input_tensor: optional Keras tensor (i.e., output of `layers.Input()`)
to use as image input for the model.
alpha: float, controls the width of the network. This is known as the
depth multiplier in the MobileNetV3 paper, but the name is kept for
consistency with MobileNetV1 in Keras.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
Examples:
```python
input_data = tf.ones(shape=(8, 224, 224, 3))
# Randomly initialized backbone with a custom config
model = MobileNetV3Backbone(
stackwise_expansion=[1, 72.0 / 16, 88.0 / 24, 4, 6, 6, 3, 3, 6, 6, 6],
stackwise_filters=[16, 24, 24, 40, 40, 40, 48, 48, 96, 96, 96],
stackwise_kernel_size=[3, 3, 3, 5, 5, 5, 5, 5, 5, 5, 5],
stackwise_stride=[2, 2, 1, 2, 1, 1, 1, 1, 2, 1, 1],
stackwise_se_ratio=[0.25, None, None, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25],
stackwise_activation=["relu", "relu", "relu", "hard_swish", "hard_swish", "hard_swish", "hard_swish", "hard_swish", "hard_swish", "hard_swish", "hard_swish"],
include_rescaling=False,
)
output = model(input_data)
```
""" # noqa: E501
def __init__(
self,
*,
stackwise_expansion,
stackwise_filters,
stackwise_kernel_size,
stackwise_stride,
stackwise_se_ratio,
stackwise_activation,
include_rescaling,
input_shape=(None, None, 3),
input_tensor=None,
alpha=1.0,
**kwargs,
):
inputs = utils.parse_model_inputs(input_shape, input_tensor)
x = inputs
if include_rescaling:
x = keras.layers.Rescaling(scale=1 / 255)(x)
x = keras.layers.Conv2D(
16,
kernel_size=3,
strides=(2, 2),
padding="same",
use_bias=False,
name="Conv",
)(x)
x = keras.layers.BatchNormalization(
axis=CHANNEL_AXIS,
epsilon=BN_EPSILON,
momentum=BN_MOMENTUM,
name="Conv_BatchNorm",
)(x)
x = apply_hard_swish(x)
pyramid_level_inputs = []
for stack_index in range(len(stackwise_filters)):
if stackwise_stride[stack_index] != 1:
pyramid_level_inputs.append(utils.get_tensor_input_name(x))
x = apply_inverted_res_block(
x,
expansion=stackwise_expansion[stack_index],
filters=adjust_channels(
(stackwise_filters[stack_index]) * alpha
),
kernel_size=stackwise_kernel_size[stack_index],
stride=stackwise_stride[stack_index],
se_ratio=stackwise_se_ratio[stack_index],
activation=stackwise_activation[stack_index],
expansion_index=stack_index,
)
pyramid_level_inputs.append(utils.get_tensor_input_name(x))
last_conv_ch = adjust_channels(x.shape[CHANNEL_AXIS] * 6)
x = keras.layers.Conv2D(
last_conv_ch,
kernel_size=1,
padding="same",
use_bias=False,
name="Conv_1",
)(x)
x = keras.layers.BatchNormalization(
axis=CHANNEL_AXIS,
epsilon=BN_EPSILON,
momentum=BN_MOMENTUM,
name="Conv_1_BatchNorm",
)(x)
x = apply_hard_swish(x)
super().__init__(inputs=inputs, outputs=x, **kwargs)
self.pyramid_level_inputs = {
f"P{i + 1}": name for i, name in enumerate(pyramid_level_inputs)
}
self.stackwise_expansion = stackwise_expansion
self.stackwise_filters = stackwise_filters
self.stackwise_kernel_size = stackwise_kernel_size
self.stackwise_stride = stackwise_stride
self.stackwise_se_ratio = stackwise_se_ratio
self.stackwise_activation = stackwise_activation
self.include_rescaling = include_rescaling
self.input_tensor = input_tensor
self.alpha = alpha
def get_config(self):
config = super().get_config()
config.update(
{
"stackwise_expansion": self.stackwise_expansion,
"stackwise_filters": self.stackwise_filters,
"stackwise_kernel_size": self.stackwise_kernel_size,
"stackwise_stride": self.stackwise_stride,
"stackwise_se_ratio": self.stackwise_se_ratio,
"stackwise_activation": self.stackwise_activation,
"include_rescaling": self.include_rescaling,
"input_shape": self.input_shape[1:],
"input_tensor": self.input_tensor,
"alpha": self.alpha,
}
)
return config
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return copy.deepcopy(backbone_presets)
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return copy.deepcopy(backbone_presets_with_weights)
class HardSigmoidActivation(keras.layers.Layer):
def __init__(self):
super().__init__()
def call(self, x):
return apply_hard_sigmoid(x)
def get_config(self):
return super().get_config()
def adjust_channels(x, divisor=8, min_value=None):
"""Ensure that all layers have a channel number divisible by the `divisor`.
Args:
x: integer, input value.
divisor: integer, the value by which a channel number should be
divisible, defaults to 8.
min_value: float, optional minimum value for the new tensor. If None,
defaults to value of divisor.
Returns:
the updated input scalar.
"""
if min_value is None:
min_value = divisor
new_x = max(min_value, int(x + divisor / 2) // divisor * divisor)
# make sure that round down does not go down by more than 10%.
if new_x < 0.9 * x:
new_x += divisor
return new_x
def apply_hard_sigmoid(x):
activation = keras.layers.ReLU(6.0)
return activation(x + 3.0) * (1.0 / 6.0)
def apply_hard_swish(x):
return keras.layers.Multiply()([x, apply_hard_sigmoid(x)])
def apply_inverted_res_block(
x,
expansion,
filters,
kernel_size,
stride,
se_ratio,
activation,
expansion_index,
):
"""An Inverted Residual Block.
Args:
x: input tensor.
expansion: integer, the expansion ratio, multiplied with infilters to
get the minimum value passed to adjust_channels.
filters: integer, number of filters for convolution layer.
kernel_size: integer, the kernel size for DepthWise Convolutions.
stride: integer, the stride length for DepthWise Convolutions.
se_ratio: float, ratio for bottleneck filters. Number of bottleneck
filters = filters * se_ratio.
activation: the activation layer to use.
expansion_index: integer, a unique identification if you want to use
expanded convolutions. If greater than 0, an additional Conv+BN
layer is added after the expanded convolutional layer.
Returns:
the updated input tensor.
"""
if isinstance(activation, str):
if activation == "hard_swish":
activation = apply_hard_swish
else:
activation = keras.activations.get(activation)
shortcut = x
prefix = "expanded_conv_"
infilters = x.shape[CHANNEL_AXIS]
if expansion_index > 0:
prefix = f"expanded_conv_{expansion_index}_"
x = keras.layers.Conv2D(
adjust_channels(infilters * expansion),
kernel_size=1,
padding="same",
use_bias=False,
name=prefix + "expand",
)(x)
x = keras.layers.BatchNormalization(
axis=CHANNEL_AXIS,
epsilon=BN_EPSILON,
momentum=BN_MOMENTUM,
name=prefix + "expand_BatchNorm",
)(x)
x = activation(x)
if stride == 2:
x = keras.layers.ZeroPadding2D(
padding=utils.correct_pad_downsample(x, kernel_size),
name=prefix + "depthwise_pad",
)(x)
x = keras.layers.DepthwiseConv2D(
kernel_size,
strides=stride,
padding="same" if stride == 1 else "valid",
use_bias=False,
name=prefix + "depthwise",
)(x)
x = keras.layers.BatchNormalization(
axis=CHANNEL_AXIS,
epsilon=BN_EPSILON,
momentum=BN_MOMENTUM,
name=prefix + "depthwise_BatchNorm",
)(x)
x = activation(x)
if se_ratio:
se_filters = adjust_channels(infilters * expansion)
x = cv_layers.SqueezeAndExcite2D(
filters=se_filters,
bottleneck_filters=adjust_channels(se_filters * se_ratio),
squeeze_activation="relu",
excite_activation=HardSigmoidActivation(),
)(x)
x = keras.layers.Conv2D(
filters,
kernel_size=1,
padding="same",
use_bias=False,
name=prefix + "project",
)(x)
x = keras.layers.BatchNormalization(
axis=CHANNEL_AXIS,
epsilon=BN_EPSILON,
momentum=BN_MOMENTUM,
name=prefix + "project_BatchNorm",
)(x)
if stride == 1 and infilters == filters:
x = keras.layers.Add(name=prefix + "Add")([shortcut, x])
return x
| keras-cv/keras_cv/models/backbones/mobilenet_v3/mobilenet_v3_backbone.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/mobilenet_v3/mobilenet_v3_backbone.py",
"repo_id": "keras-cv",
"token_count": 5692
} | 64 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.models.feature_extractor.clip.clip_image_model import (
CLIPImageEncoder,
)
from keras_cv.models.feature_extractor.clip.clip_presets import ( # noqa: E501
clip_presets,
)
from keras_cv.models.feature_extractor.clip.clip_text_model import (
CLIPTextEncoder,
)
from keras_cv.models.task import Task
from keras_cv.utils.python_utils import classproperty
try:
import keras_nlp
except ImportError:
keras_nlp = None
@keras_cv_export(["keras_cv.models.CLIP"])
class CLIP(Task):
"""
CLIP implements the Contrastive Language-Image Pretraining (CLIP)
architecture, which enables joint learning of visual and textual
representations for various downstream tasks. The deafult base model
achitecture will be set to clip-vit-base-patch32.
Args:
embed_dim (int): The dimensionality of the joint embedding space for
images and texts.
image_resolution (int): The resolution of the input images (both height
and width).
vision_layers (int): The number of layers in the vision (image) encoder.
vision_width (int): The width of the hidden layers in the vision
encoder.
vision_patch_size (int): The size of each square patch in the input
images.
context_length (int): The maximum length of the contextualized text
sequences.
vocab_size (int): The size of the vocabulary for tokenization.
transformer_width (int): The width of the hidden layers in the
transformer-based text encoder.
transformer_heads (int): The number of attention heads in the
transformer-based text encoder.
transformer_layers (int): The number of layers in the transformer-based
text encoder.
"""
def __init__(
self,
embed_dim=512,
image_resolution=224,
vision_layers=12,
vision_width=768,
vision_patch_size=32,
context_length=77,
vocab_size=49408,
transformer_width=768,
transformer_heads=8,
transformer_layers=12,
**kwargs,
):
super().__init__(**kwargs)
if keras_nlp is None:
raise ValueError(
"ClipTokenizer requires keras-nlp. Please install "
"using pip `pip install -U keras-nlp && pip install -U keras`"
)
self.embed_dim = embed_dim
self.image_resolution = image_resolution
self.vision_layers = vision_layers
self.vision_width = vision_width
self.vision_patch_size = vision_patch_size
self.context_length = context_length
self.vocab_size = vocab_size
self.transformer_width = transformer_width
self.transformer_heads = transformer_heads
self.transformer_layers = transformer_layers
vision_heads = self.vision_width // 64
self.image_encoder = CLIPImageEncoder(
input_resolution=self.image_resolution,
patch_size=self.vision_patch_size,
width=self.vision_width,
num_layers=self.vision_layers,
heads=vision_heads,
output_dim=self.embed_dim,
name="image_encoder",
)
self.text_encoder = CLIPTextEncoder(
transformer_width=self.transformer_width,
transformer_layers=self.transformer_layers,
transformer_heads=self.transformer_heads,
vocab_size=self.vocab_size,
embed_dim=self.embed_dim,
context_length=self.context_length,
name="text_encoder",
)
self.logit_scale = keras.Variable(
ops.ones([]) * ops.log(1 / 0.07), name="logit_scale"
)
self.image_embeddings = None
self.text_embeddings = None
def build(self, input_shape):
super().build(input_shape)
self.text_encoder.build([None, self.context_length])
self.image_encoder.build(
[None, self.image_resolution, self.image_resolution, 3]
)
def encode_images(self, image):
return self.image_encoder(image)
def encode_text(self, text, attention_mask=None):
return self.text_encoder(text, attention_mask=attention_mask)
def call(self, image, text, attention_mask=None):
self.image_embeddings = self.encode_images(image)
self.text_embeddings = self.encode_text(
text, attention_mask=attention_mask
)
normalize_image_features = ops.sqrt(
ops.sum(ops.power(self.image_embeddings, 2), keepdims=True)
)
normalize_text_features = ops.sqrt(
ops.sum(ops.power(self.text_embeddings, 2), keepdims=True)
)
self.image_embeddings = self.image_embeddings / normalize_image_features
self.text_embeddings = self.text_embeddings / normalize_text_features
logit_scale = ops.exp(self.logit_scale)
logits_per_image = (
ops.matmul(
self.image_embeddings,
ops.transpose(self.text_embeddings),
)
* logit_scale
)
logits_per_text = ops.transpose(logits_per_image)
return logits_per_image, logits_per_text
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return copy.deepcopy({**clip_presets})
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return copy.deepcopy({**clip_presets})
def get_config(self):
config = super().get_config()
config.update(
{
"embed_dim": self.embed_dim,
"image_resolution": self.image_resolution,
"vision_layers": self.vision_layers,
"vision_width": self.vision_width,
"vision_patch_size": self.vision_patch_size,
"context_length": self.context_length,
"vocab_size": self.vocab_size,
"transformer_width": self.transformer_width,
"transformer_heads": self.transformer_heads,
"transformer_layers": self.transformer_layers,
}
)
return config
| keras-cv/keras_cv/models/feature_extractor/clip/clip_model.py/0 | {
"file_path": "keras-cv/keras_cv/models/feature_extractor/clip/clip_model.py",
"repo_id": "keras-cv",
"token_count": 2998
} | 65 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration tests for KerasCV models."""
import os
import pytest
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import backend
class ModelsTest:
def assertShapeEqual(self, shape1, shape2):
self.assertEqual(tf.TensorShape(shape1), tf.TensorShape(shape2))
@pytest.fixture(autouse=True)
def cleanup_global_session(self):
# Code before yield runs before the test
yield
keras.backend.clear_session()
def _test_application_base(self, app, _, args):
# Can be instantiated with default arguments
model = app(
include_top=True, num_classes=10, include_rescaling=False, **args
)
# Can be serialized and deserialized
config = model.get_config()
reconstructed_model = model.__class__.from_config(config)
self.assertEqual(len(model.weights), len(reconstructed_model.weights))
# There is no rescaling layer bcause include_rescaling=False
with self.assertRaises(ValueError):
model.get_layer(name="rescaling")
def _test_application_with_rescaling(self, app, last_dim, args):
model = app(include_rescaling=True, include_top=False, **args)
self.assertIsNotNone(model.get_layer(name="rescaling"))
def _test_application_pooling(self, app, last_dim, args):
model = app(
include_rescaling=False, include_top=False, pooling="avg", **args
)
self.assertShapeEqual(model.output_shape, (None, last_dim))
def _test_application_variable_input_channels(self, app, last_dim, args):
# Make a local copy of args because we modify them in the test
args = dict(args)
input_shape = (None, None, 3)
# Avoid passing this parameter twice to the app function
if "input_shape" in args:
input_shape = args["input_shape"]
del args["input_shape"]
single_channel_input_shape = (input_shape[0], input_shape[1], 1)
model = app(
include_rescaling=False,
include_top=False,
input_shape=single_channel_input_shape,
**args
)
output_shape = model.output_shape
if "Mixer" not in app.__name__ and "ViT" not in app.__name__:
self.assertShapeEqual(output_shape, (None, None, None, last_dim))
elif "MixerB16" in app.__name__ or "MixerL16" in app.__name__:
num_patches = 196
self.assertShapeEqual(output_shape, (None, num_patches, last_dim))
elif "MixerB32" in app.__name__:
num_patches = 49
self.assertShapeEqual(output_shape, (None, num_patches, last_dim))
elif (
"ViTTiny16" in app.__name__
or "ViTS16" in app.__name__
or "ViTB16" in app.__name__
or "ViTL16" in app.__name__
or "ViTH16" in app.__name__
):
num_patches = 197
self.assertShapeEqual(output_shape, (None, num_patches, last_dim))
elif (
"ViTTiny32" in app.__name__
or "ViTS32" in app.__name__
or "ViTB32" in app.__name__
or "ViTL32" in app.__name__
or "ViTH32" in app.__name__
):
num_patches = 50
self.assertShapeEqual(output_shape, (None, num_patches, last_dim))
backend.clear_session()
four_channel_input_shape = (input_shape[0], input_shape[1], 4)
model = app(
include_rescaling=False,
include_top=False,
input_shape=four_channel_input_shape,
**args
)
output_shape = model.output_shape
if "Mixer" not in app.__name__ and "ViT" not in app.__name__:
self.assertShapeEqual(output_shape, (None, None, None, last_dim))
elif "MixerB16" in app.__name__ or "MixerL16" in app.__name__:
num_patches = 196
self.assertShapeEqual(output_shape, (None, num_patches, last_dim))
elif "MixerB32" in app.__name__:
num_patches = 49
self.assertShapeEqual(output_shape, (None, num_patches, last_dim))
elif (
"ViTTiny16" in app.__name__
or "ViTS16" in app.__name__
or "ViTB16" in app.__name__
or "ViTL16" in app.__name__
or "ViTH16" in app.__name__
):
num_patches = 197
self.assertShapeEqual(output_shape, (None, num_patches, last_dim))
elif (
"ViTTiny32" in app.__name__
or "ViTS32" in app.__name__
or "ViTB32" in app.__name__
or "ViTL32" in app.__name__
or "ViTH32" in app.__name__
):
num_patches = 50
self.assertShapeEqual(output_shape, (None, num_patches, last_dim))
def _test_model_can_be_used_as_backbone(self, app, last_dim, args):
inputs = keras.layers.Input(shape=(224, 224, 3))
backbone = app(
include_rescaling=False,
include_top=False,
input_tensor=inputs,
pooling="avg",
**args
)
x = inputs
x = backbone(x)
backbone_output = backbone.get_layer(index=-1).output
model = keras.Model(inputs=inputs, outputs=[backbone_output])
model.compile()
@pytest.mark.large # Saving is slow, so mark these large.
def _test_model_serialization(self, app, _, args, save_format, filename):
model = app(include_rescaling=True, include_top=False, **args)
input_batch = tf.ones(shape=(16, 224, 224, 3))
model_output = model(input_batch)
save_path = os.path.join(self.get_temp_dir(), filename)
model.save(save_path, save_format=save_format)
restored_model = keras.models.load_model(save_path)
# Check that output matches.
restored_output = restored_model(input_batch)
self.assertAllClose(model_output, restored_output)
if __name__ == "__main__":
tf.test.main()
| keras-cv/keras_cv/models/legacy/models_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/legacy/models_test.py",
"repo_id": "keras-cv",
"token_count": 2989
} | 66 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
import tensorflow as tf
from tensorflow.keras import utils
def parse_weights(weights, include_top, model_type):
if not weights:
return weights
if weights.startswith("gs://"):
weights = weights.replace("gs://", "https://storage.googleapis.com/")
return utils.get_file(
origin=weights,
cache_subdir="models",
)
if tf.io.gfile.exists(weights):
return weights
if weights in ALIASES[model_type]:
weights = ALIASES[model_type][weights]
if weights in WEIGHTS_CONFIG[model_type]:
if not include_top:
weights = weights + "-notop"
return utils.get_file(
origin=f"{BASE_PATH}/{model_type}/{weights}.h5",
cache_subdir="models",
file_hash=WEIGHTS_CONFIG[model_type][weights],
)
raise ValueError(
"The `weights` argument should be either `None`, a the path to the "
"weights file to be loaded, or the name of pre-trained weights from "
"https://github.com/keras-team/keras-cv/blob/master/keras_cv/models/weights.py. " # noqa: E501
f"Invalid `weights` argument: {weights}"
)
BASE_PATH = "https://storage.googleapis.com/keras-cv/models"
ALIASES = {
"convmixer_512_16": {
"imagenet": "imagenet/classification-v0",
"imagenet/classification": "imagenet/classification-v0",
},
"cspdarknetl": {
"imagenet": "imagenet/classification-v0",
"imagenet/classification": "imagenet/classification-v0",
},
"cspdarknettiny": {
"imagenet": "imagenet/classification-v0",
"imagenet/classification": "imagenet/classification-v0",
},
"darknet53": {
"imagenet": "imagenet/classification-v0",
"imagenet/classification": "imagenet/classification-v0",
},
"deeplabv3": {
"voc": "voc/segmentation-v0",
},
"densenet121": {
"imagenet": "imagenet/classification-v0",
"imagenet/classification": "imagenet/classification-v0",
},
"densenet169": {
"imagenet": "imagenet/classification-v0",
"imagenet/classification": "imagenet/classification-v0",
},
"densenet201": {
"imagenet": "imagenet/classification-v0",
"imagenet/classification": "imagenet/classification-v0",
},
"resnet50": {
"imagenet": "imagenet/classification-v0",
"imagenet/classification": "imagenet/classification-v0",
},
"resnet50v2": {
"imagenet": "imagenet/classification-v2",
"imagenet/classification": "imagenet/classification-v2",
},
"vittiny16": {
"imagenet": "imagenet/classification-v0",
"imagenet/classification": "imagenet/classification-v0",
},
"vits16": {
"imagenet": "imagenet/classification-v0",
"imagenet/classification": "imagenet/classification-v0",
},
"vitb16": {
"imagenet": "imagenet/classification-v0",
"imagenet/classification": "imagenet/classification-v0",
},
"vitl16": {
"imagenet": "imagenet/classification-v0",
"imagenet/classification": "imagenet/classification-v0",
},
"vits32": {
"imagenet": "imagenet/classification-v0",
"imagenet/classification": "imagenet/classification-v0",
},
"vitb32": {
"imagenet": "imagenet/classification-v0",
"imagenet/classification": "imagenet/classification-v0",
},
}
WEIGHTS_CONFIG = {
"convmixer_512_16": {
"imagenet/classification-v0": "861f3080dc383f7936d3df89691aadea05eee6acaa4a0b60aa70dd657df915ee", # noqa: E501
"imagenet/classification-v0-notop": "aa08c7fa9ca6ec045c4783e1248198dbe1bc141e2ae788e712de471c0370822c", # noqa: E501
},
"cspdarknetl": {
"imagenet/classification-v0": "8bdc3359222f0d26f77aa42c4e97d67a05a1431fe6c448ceeab9a9c5a34ff804", # noqa: E501
"imagenet/classification-v0-notop": "9303aabfadffbff8447171fce1e941f96d230d8f3cef30d3f05a9c85097f8f1e", # noqa: E501
},
"cspdarknettiny": {
"imagenet/classification-v0": "c17fe6d7b597f2eb25e42fbd97ec58fb1dad753ba18920cc27820953b7947704", # noqa: E501
"imagenet/classification-v0-notop": "0007ae82c95be4d4aef06368a7c38e006381324d77e5df029b04890e18a8ad19", # noqa: E501
},
"darknet53": {
"imagenet/classification-v0": "7bc5589f7f7f7ee3878e61ab9323a71682bfb617eb57f530ca8757c742f00c77", # noqa: E501
"imagenet/classification-v0-notop": "8dcce43163e4b4a63e74330ba1902e520211db72d895b0b090b6bfe103e7a8a5", # noqa: E501
},
"deeplabv3": {
"voc/segmentation-v0": "732042e8b6c9ddba3d51c861f26dc41865187e9f85a0e5d43dfef75a405cca18", # noqa: E501
},
"densenet121": {
"imagenet/classification-v0": "13de3d077ad9d9816b9a0acc78215201d9b6e216c7ed8e71d69cc914f8f0775b", # noqa: E501
"imagenet/classification-v0-notop": "709afe0321d9f2b2562e562ff9d0dc44cca10ed09e0e2cfba08d783ff4dab6bf", # noqa: E501
},
"densenet169": {
"imagenet/classification-v0": "4cd2a661d0cb2378574073b23129ee4d06ea53c895c62a8863c44ee039e236a1", # noqa: E501
"imagenet/classification-v0-notop": "a99d1bb2cbe1a59a1cdd1f435fb265453a97c2a7b723d26f4ebee96e5fb49d62", # noqa: E501
},
"densenet201": {
"imagenet/classification-v0": "3b6032e744e5e5babf7457abceaaba11fcd449fe2d07016ae5076ac3c3c6cf0c", # noqa: E501
"imagenet/classification-v0-notop": "c1189a934f12c1a676a9cf52238e5994401af925e2adfc0365bad8133c052060", # noqa: E501
},
"resnet50": {
"imagenet/classification-v0": "1525dc1ce580239839ba6848c0f1b674dc89cb9ed73c4ed49eba355b35eac3ce", # noqa: E501
"imagenet/classification-v0-notop": "dc5f6d8f929c78d0fc192afecc67b11ac2166e9d8b9ef945742368ae254c07af", # noqa: E501
},
"resnet50v2": {
"imagenet/classification-v0": "11bde945b54d1dca65101be2648048abca8a96a51a42820d87403486389790db", # noqa: E501
"imagenet/classification-v0-notop": "5b4aca4932c433d84f6aef58135472a4312ed2fa565d53fedcd6b0c24b54ab4a", # noqa: E501
"imagenet/classification-v1": "a32e5d9998e061527f6f947f36d8e794ad54dad71edcd8921cda7804912f3ee7", # noqa: E501
"imagenet/classification-v1-notop": "ac46b82c11070ab2f69673c41fbe5039c9eb686cca4f34cd1d79412fd136f1ae", # noqa: E501
"imagenet/classification-v2": "5ee5a8ac650aaa59342bc48ffe770e6797a5550bcc35961e1d06685292c15921", # noqa: E501
"imagenet/classification-v2-notop": "e711c83d6db7034871f6d345a476c8184eab99dbf3ffcec0c1d8445684890ad9", # noqa: E501
},
"vittiny16": {
"imagenet/classification-v0": "c8227fde16ec8c2e7ab886169b11b4f0ca9af2696df6d16767db20acc9f6e0dd", # noqa: E501
"imagenet/classification-v0-notop": "aa4d727e3c6bd30b20f49d3fa294fb4bbef97365c7dcb5cee9c527e4e83c8f5b", # noqa: E501
},
"vits16": {
"imagenet/classification-v0": "4a66a1a70a879ff33a3ca6ca30633b9eadafea84b421c92174557eee83e088b5", # noqa: E501
"imagenet/classification-v0-notop": "8d0111eda6692096676a5453abfec5d04c79e2de184b04627b295f10b1949745", # noqa: E501
},
"vitb16": {
"imagenet/classification-v0": "6ab4e08c773e08de42023d963a97e905ccba710e2c05ef60c0971978d4a8c41b", # noqa: E501
"imagenet/classification-v0-notop": "4a1bdd32889298471cb4f30882632e5744fd519bf1a1525b1fa312fe4ea775ed", # noqa: E501
},
"vitl16": {
"imagenet/classification-v0": "5a98000f848f2e813ea896b2528983d8d956f8c4b76ceed0b656219d5b34f7fb", # noqa: E501
"imagenet/classification-v0-notop": "40d237c44f14d20337266fce6192c00c2f9b890a463fd7f4cb17e8e35b3f5448", # noqa: E501
},
"vits32": {
"imagenet/classification-v0": "f5836e3aff2bab202eaee01d98337a08258159d3b718e0421834e98b3665e10a", # noqa: E501
"imagenet/classification-v0-notop": "f3907845eff780a4d29c1c56e0ae053411f02fff6fdce1147c4c3bb2124698cd", # noqa: E501
},
"vitb32": {
"imagenet/classification-v0": "73025caa78459dc8f9b1de7b58f1d64e24a823f170d17e25fcc8eb6179bea179", # noqa: E501
"imagenet/classification-v0-notop": "f07b80c03336d731a2a3a02af5cac1e9fc9aa62659cd29e2e7e5c7474150cc71", # noqa: E501
},
}
| keras-cv/keras_cv/models/legacy/weights.py/0 | {
"file_path": "keras-cv/keras_cv/models/legacy/weights.py",
"repo_id": "keras-cv",
"token_count": 4397
} | 67 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import warnings
from keras_cv import bounding_box
from keras_cv import layers
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.losses.ciou_loss import CIoULoss
from keras_cv.models.backbones.backbone_presets import backbone_presets
from keras_cv.models.backbones.backbone_presets import (
backbone_presets_with_weights,
)
from keras_cv.models.object_detection.__internal__ import unpack_input
from keras_cv.models.object_detection.yolo_v8.yolo_v8_detector_presets import (
yolo_v8_detector_presets,
)
from keras_cv.models.object_detection.yolo_v8.yolo_v8_label_encoder import (
YOLOV8LabelEncoder,
)
from keras_cv.models.object_detection.yolo_v8.yolo_v8_layers import (
apply_conv_bn,
)
from keras_cv.models.object_detection.yolo_v8.yolo_v8_layers import (
apply_csp_block,
)
from keras_cv.models.task import Task
from keras_cv.utils.python_utils import classproperty
from keras_cv.utils.train import get_feature_extractor
BOX_REGRESSION_CHANNELS = 64
def get_anchors(
image_shape,
strides=[8, 16, 32],
base_anchors=[0.5, 0.5],
):
"""Gets anchor points for YOLOV8.
YOLOV8 uses anchor points representing the center of proposed boxes, and
matches ground truth boxes to anchors based on center points.
Args:
image_shape: tuple or list of two integers representing the height and
width of input images, respectively.
strides: tuple of list of integers, the size of the strides across the
image size that should be used to create anchors.
base_anchors: tuple or list of two integers representing the offset from
(0,0) to start creating the center of anchor boxes, relative to the
stride. For example, using the default (0.5, 0.5) creates the first
anchor box for each stride such that its center is half of a stride
from the edge of the image.
Returns:
A tuple of anchor centerpoints and anchor strides. Multiplying the
two together will yield the centerpoints in absolute x,y format.
"""
base_anchors = ops.array(base_anchors, dtype="float32")
all_anchors = []
all_strides = []
for stride in strides:
hh_centers = ops.arange(0, image_shape[0], stride)
ww_centers = ops.arange(0, image_shape[1], stride)
ww_grid, hh_grid = ops.meshgrid(ww_centers, hh_centers)
grid = ops.cast(
ops.reshape(ops.stack([hh_grid, ww_grid], 2), [-1, 1, 2]),
"float32",
)
anchors = (
ops.expand_dims(
base_anchors * ops.array([stride, stride], "float32"), 0
)
+ grid
)
anchors = ops.reshape(anchors, [-1, 2])
all_anchors.append(anchors)
all_strides.append(ops.repeat(stride, anchors.shape[0]))
all_anchors = ops.cast(ops.concatenate(all_anchors, axis=0), "float32")
all_strides = ops.cast(ops.concatenate(all_strides, axis=0), "float32")
all_anchors = all_anchors / all_strides[:, None]
# Swap the x and y coordinates of the anchors.
all_anchors = ops.concatenate(
[all_anchors[:, 1, None], all_anchors[:, 0, None]], axis=-1
)
return all_anchors, all_strides
def apply_path_aggregation_fpn(features, depth=3, name="fpn"):
"""Applies the Feature Pyramid Network (FPN) to the outputs of a backbone.
Args:
features: list of tensors representing the P3, P4, and P5 outputs of the
backbone.
depth: integer, the depth of the CSP blocks used in the FPN.
name: string, a prefix for names of layers used by the FPN.
Returns:
A list of three tensors whose shapes are the same as the three inputs,
but which are dependent on each of the three inputs to combine the high
resolution of the P3 inputs with the strong feature representations of
the P5 inputs.
"""
p3, p4, p5 = features
# Upsample P5 and concatenate with P4, then apply a CSPBlock.
p5_upsampled = ops.repeat(ops.repeat(p5, 2, axis=1), 2, axis=2)
p4p5 = ops.concatenate([p5_upsampled, p4], axis=-1)
p4p5 = apply_csp_block(
p4p5,
channels=p4.shape[-1],
depth=depth,
shortcut=False,
activation="swish",
name=f"{name}_p4p5",
)
# Upsample P4P5 and concatenate with P3, then apply a CSPBlock.
p4p5_upsampled = ops.repeat(ops.repeat(p4p5, 2, axis=1), 2, axis=2)
p3p4p5 = ops.concatenate([p4p5_upsampled, p3], axis=-1)
p3p4p5 = apply_csp_block(
p3p4p5,
channels=p3.shape[-1],
depth=depth,
shortcut=False,
activation="swish",
name=f"{name}_p3p4p5",
)
# Downsample P3P4P5, concatenate with P4P5, and apply a CSP Block.
p3p4p5_d1 = apply_conv_bn(
p3p4p5,
p3p4p5.shape[-1],
kernel_size=3,
strides=2,
activation="swish",
name=f"{name}_p3p4p5_downsample1",
)
p3p4p5_d1 = ops.concatenate([p3p4p5_d1, p4p5], axis=-1)
p3p4p5_d1 = apply_csp_block(
p3p4p5_d1,
channels=p4p5.shape[-1],
shortcut=False,
activation="swish",
name=f"{name}_p3p4p5_downsample1_block",
)
# Downsample the resulting P3P4P5 again, concatenate with P5, and apply
# another CSP Block.
p3p4p5_d2 = apply_conv_bn(
p3p4p5_d1,
p3p4p5_d1.shape[-1],
kernel_size=3,
strides=2,
activation="swish",
name=f"{name}_p3p4p5_downsample2",
)
p3p4p5_d2 = ops.concatenate([p3p4p5_d2, p5], axis=-1)
p3p4p5_d2 = apply_csp_block(
p3p4p5_d2,
channels=p5.shape[-1],
shortcut=False,
activation="swish",
name=f"{name}_p3p4p5_downsample2_block",
)
return [p3p4p5, p3p4p5_d1, p3p4p5_d2]
def apply_yolo_v8_head(
inputs,
num_classes,
name="yolo_v8_head",
):
"""Applies a YOLOV8 head.
Makes box and class predictions based on the output of a feature pyramid
network.
Args:
inputs: list of tensors output by the Feature Pyramid Network, should
have the same shape as the P3, P4, and P5 outputs of the backbone.
num_classes: integer, the number of classes that a bounding box could
possibly be assigned to.
name: string, a prefix for names of layers used by the head.
Returns: A dictionary with two entries. The "boxes" entry contains box
regression predictions, while the "classes" entry contains class
predictions.
"""
# 64 is the default number of channels, as 16 components are used to predict
# each of the 4 offsets for corner points of a bounding box with respect
# to the center point. In cases where the input has much higher resolution
# (e.g. the P3 input has >256 channels), we use additional channels for
# the intermediate conv layers. This is only true for very large backbones.
box_channels = max(BOX_REGRESSION_CHANNELS, inputs[0].shape[-1] // 4)
# We use at least num_classes channels for intermediate conv layer for class
# predictions. In most cases, the P3 input has many more channels than the
# number of classes, so we preserve those channels until the final layer.
class_channels = max(num_classes, inputs[0].shape[-1])
# We compute box and class predictions for each of the feature maps from
# the FPN and then combine them.
outputs = []
for id, feature in enumerate(inputs):
cur_name = f"{name}_{id+1}"
box_predictions = apply_conv_bn(
feature,
box_channels,
kernel_size=3,
activation="swish",
name=f"{cur_name}_box_1",
)
box_predictions = apply_conv_bn(
box_predictions,
box_channels,
kernel_size=3,
activation="swish",
name=f"{cur_name}_box_2",
)
box_predictions = keras.layers.Conv2D(
filters=BOX_REGRESSION_CHANNELS,
kernel_size=1,
name=f"{cur_name}_box_3_conv",
)(box_predictions)
class_predictions = apply_conv_bn(
feature,
class_channels,
kernel_size=3,
activation="swish",
name=f"{cur_name}_class_1",
)
class_predictions = apply_conv_bn(
class_predictions,
class_channels,
kernel_size=3,
activation="swish",
name=f"{cur_name}_class_2",
)
class_predictions = keras.layers.Conv2D(
filters=num_classes,
kernel_size=1,
name=f"{cur_name}_class_3_conv",
)(class_predictions)
class_predictions = keras.layers.Activation(
"sigmoid", name=f"{cur_name}_classifier"
)(class_predictions)
out = ops.concatenate([box_predictions, class_predictions], axis=-1)
out = keras.layers.Reshape(
[-1, out.shape[-1]], name=f"{cur_name}_output_reshape"
)(out)
outputs.append(out)
outputs = ops.concatenate(outputs, axis=1)
outputs = keras.layers.Activation(
"linear", dtype="float32", name="box_outputs"
)(outputs)
return {
"boxes": outputs[:, :, :BOX_REGRESSION_CHANNELS],
"classes": outputs[:, :, BOX_REGRESSION_CHANNELS:],
}
def decode_regression_to_boxes(preds):
"""Decodes the results of the YOLOV8Detector forward-pass into boxes.
Returns left / top / right / bottom predictions with respect to anchor
points.
Each coordinate is encoded with 16 predicted values. Those predictions are
softmaxed and multiplied by [0..15] to make predictions. The resulting
predictions are relative to the stride of an anchor box (and correspondingly
relative to the scale of the feature map from which the predictions came).
"""
preds_bbox = keras.layers.Reshape((-1, 4, BOX_REGRESSION_CHANNELS // 4))(
preds
)
preds_bbox = ops.nn.softmax(preds_bbox, axis=-1) * ops.arange(
BOX_REGRESSION_CHANNELS // 4, dtype="float32"
)
return ops.sum(preds_bbox, axis=-1)
def dist2bbox(distance, anchor_points):
"""Decodes distance predictions into xyxy boxes.
Input left / top / right / bottom predictions are transformed into xyxy box
predictions based on anchor points.
The resulting xyxy predictions must be scaled by the stride of their
corresponding anchor points to yield an absolute xyxy box.
"""
left_top, right_bottom = ops.split(distance, 2, axis=-1)
x1y1 = anchor_points - left_top
x2y2 = anchor_points + right_bottom
return ops.concatenate((x1y1, x2y2), axis=-1) # xyxy bbox
@keras_cv_export(
[
"keras_cv.models.YOLOV8Detector",
"keras_cv.models.object_detection.YOLOV8Detector",
]
)
class YOLOV8Detector(Task):
"""Implements the YOLOV8 architecture for object detection.
Args:
backbone: `keras.Model`, must implement the `pyramid_level_inputs`
property with keys "P2", "P3", and "P4" and layer names as values.
A sensible backbone to use is the `keras_cv.models.YOLOV8Backbone`.
num_classes: integer, the number of classes in your dataset excluding the
background class. Classes should be represented by integers in the
range [0, num_classes).
bounding_box_format: string, the format of bounding boxes of input dataset.
Refer
[to the keras.io docs](https://keras.io/api/keras_cv/bounding_box/formats/)
for more details on supported bounding box formats.
fpn_depth: integer, a specification of the depth of the CSP blocks in
the Feature Pyramid Network. This is usually 1, 2, or 3, depending
on the size of your YOLOV8Detector model. We recommend using 3 for
"yolo_v8_l_backbone" and "yolo_v8_xl_backbone". Defaults to 2.
label_encoder: (Optional) A `YOLOV8LabelEncoder` that is
responsible for transforming input boxes into trainable labels for
YOLOV8Detector. If not provided, a default is provided.
prediction_decoder: (Optional) A `keras.layers.Layer` that is
responsible for transforming YOLOV8 predictions into usable
bounding boxes. If not provided, a default is provided. The
default `prediction_decoder` layer is a
`keras_cv.layers.MultiClassNonMaxSuppression` layer, which uses
a Non-Max Suppression for box pruning.
Examples:
```python
images = tf.ones(shape=(1, 512, 512, 3))
labels = {
"boxes": tf.constant([
[
[0, 0, 100, 100],
[100, 100, 200, 200],
[300, 300, 100, 100],
]
], dtype=tf.float32),
"classes": tf.constant([[1, 1, 1]], dtype=tf.int64),
}
model = keras_cv.models.YOLOV8Detector(
num_classes=20,
bounding_box_format="xywh",
backbone=keras_cv.models.YOLOV8Backbone.from_preset(
"yolo_v8_m_backbone_coco"
),
fpn_depth=2
)
# Evaluate model without box decoding and NMS
model(images)
# Prediction with box decoding and NMS
model.predict(images)
# Train model
model.compile(
classification_loss='binary_crossentropy',
box_loss='ciou',
optimizer=tf.optimizers.SGD(global_clipnorm=10.0),
jit_compile=False,
)
model.fit(images, labels)
```
""" # noqa: E501
def __init__(
self,
backbone,
num_classes,
bounding_box_format,
fpn_depth=2,
label_encoder=None,
prediction_decoder=None,
**kwargs,
):
extractor_levels = ["P3", "P4", "P5"]
extractor_layer_names = [
backbone.pyramid_level_inputs[i] for i in extractor_levels
]
feature_extractor = get_feature_extractor(
backbone, extractor_layer_names, extractor_levels
)
images = keras.layers.Input(feature_extractor.input_shape[1:])
features = list(feature_extractor(images).values())
fpn_features = apply_path_aggregation_fpn(
features, depth=fpn_depth, name="pa_fpn"
)
outputs = apply_yolo_v8_head(
fpn_features,
num_classes,
)
# To make loss metrics pretty, we use a no-op layer with a good name.
boxes = keras.layers.Concatenate(axis=1, name="box")([outputs["boxes"]])
scores = keras.layers.Concatenate(axis=1, name="class")(
[outputs["classes"]]
)
outputs = {"boxes": boxes, "classes": scores}
super().__init__(inputs=images, outputs=outputs, **kwargs)
self.bounding_box_format = bounding_box_format
self._prediction_decoder = (
prediction_decoder
or layers.NonMaxSuppression(
bounding_box_format=bounding_box_format,
from_logits=False,
confidence_threshold=0.2,
iou_threshold=0.7,
)
)
self.backbone = backbone
self.fpn_depth = fpn_depth
self.num_classes = num_classes
self.label_encoder = label_encoder or YOLOV8LabelEncoder(
num_classes=num_classes
)
def compile(
self,
box_loss,
classification_loss,
box_loss_weight=7.5,
classification_loss_weight=0.5,
metrics=None,
**kwargs,
):
"""Compiles the YOLOV8Detector.
`compile()` mirrors the standard Keras `compile()` method, but has one
key distinction -- two losses must be provided: `box_loss` and
`classification_loss`.
Args:
box_loss: a Keras loss to use for box offset regression. A
preconfigured loss is provided when the string "ciou" is passed.
classification_loss: a Keras loss to use for box classification. A
preconfigured loss is provided when the string
"binary_crossentropy" is passed.
box_loss_weight: (optional) float, a scaling factor for the box
loss. Defaults to 7.5.
classification_loss_weight: (optional) float, a scaling factor for
the classification loss. Defaults to 0.5.
kwargs: most other `keras.Model.compile()` arguments are supported
and propagated to the `keras.Model` class.
"""
if metrics is not None:
raise ValueError("User metrics not yet supported for YOLOV8")
if isinstance(box_loss, str):
if box_loss == "ciou":
box_loss = CIoULoss(bounding_box_format="xyxy", reduction="sum")
elif box_loss == "iou":
warnings.warn(
"YOLOV8 recommends using CIoU loss, but was configured to "
"use standard IoU. Consider using `box_loss='ciou'` "
"instead."
)
else:
raise ValueError(
f"Invalid box loss for YOLOV8Detector: {box_loss}. Box "
"loss should be a keras.Loss or the string 'ciou'."
)
if isinstance(classification_loss, str):
if classification_loss == "binary_crossentropy":
classification_loss = keras.losses.BinaryCrossentropy(
reduction="sum"
)
else:
raise ValueError(
"Invalid classification loss for YOLOV8Detector: "
f"{classification_loss}. Classification loss should be a "
"keras.Loss or the string 'binary_crossentropy'."
)
self.box_loss = box_loss
self.classification_loss = classification_loss
self.box_loss_weight = box_loss_weight
self.classification_loss_weight = classification_loss_weight
losses = {
"box": self.box_loss,
"class": self.classification_loss,
}
super().compile(loss=losses, **kwargs)
def train_step(self, *args):
data = args[-1]
args = args[:-1]
x, y = unpack_input(data)
return super().train_step(*args, (x, y))
def test_step(self, *args):
data = args[-1]
args = args[:-1]
x, y = unpack_input(data)
return super().test_step(*args, (x, y))
def compute_loss(self, x, y, y_pred, sample_weight=None, **kwargs):
box_pred, cls_pred = y_pred["boxes"], y_pred["classes"]
pred_boxes = decode_regression_to_boxes(box_pred)
pred_scores = cls_pred
anchor_points, stride_tensor = get_anchors(image_shape=x.shape[1:])
stride_tensor = ops.expand_dims(stride_tensor, axis=-1)
gt_labels = y["classes"]
mask_gt = ops.all(y["boxes"] > -1.0, axis=-1, keepdims=True)
gt_bboxes = bounding_box.convert_format(
y["boxes"],
source=self.bounding_box_format,
target="xyxy",
images=x,
)
pred_bboxes = dist2bbox(pred_boxes, anchor_points)
target_bboxes, target_scores, fg_mask = self.label_encoder(
pred_scores,
ops.cast(pred_bboxes * stride_tensor, gt_bboxes.dtype),
anchor_points * stride_tensor,
gt_labels,
gt_bboxes,
mask_gt,
)
target_bboxes /= stride_tensor
target_scores_sum = ops.maximum(ops.sum(target_scores), 1)
box_weight = ops.expand_dims(
ops.sum(target_scores, axis=-1) * fg_mask,
axis=-1,
)
y_true = {
"box": target_bboxes * fg_mask[..., None],
"class": target_scores,
}
y_pred = {
"box": pred_bboxes * fg_mask[..., None],
"class": pred_scores,
}
sample_weights = {
"box": self.box_loss_weight * box_weight / target_scores_sum,
"class": self.classification_loss_weight / target_scores_sum,
}
return super().compute_loss(
x=x, y=y_true, y_pred=y_pred, sample_weight=sample_weights, **kwargs
)
def decode_predictions(
self,
pred,
images,
):
boxes = pred["boxes"]
scores = pred["classes"]
boxes = decode_regression_to_boxes(boxes)
anchor_points, stride_tensor = get_anchors(image_shape=images.shape[1:])
stride_tensor = ops.expand_dims(stride_tensor, axis=-1)
box_preds = dist2bbox(boxes, anchor_points) * stride_tensor
box_preds = bounding_box.convert_format(
box_preds,
source="xyxy",
target=self.bounding_box_format,
images=images,
)
return self.prediction_decoder(box_preds, scores)
def predict_step(self, *args):
outputs = super().predict_step(*args)
if isinstance(outputs, tuple):
return self.decode_predictions(outputs[0], args[-1]), outputs[1]
else:
return self.decode_predictions(outputs, args[-1])
@property
def prediction_decoder(self):
return self._prediction_decoder
@prediction_decoder.setter
def prediction_decoder(self, prediction_decoder):
if prediction_decoder.bounding_box_format != self.bounding_box_format:
raise ValueError(
"Expected `prediction_decoder` and YOLOV8Detector to "
"use the same `bounding_box_format`, but got "
"`prediction_decoder.bounding_box_format="
f"{prediction_decoder.bounding_box_format}`, and "
"`self.bounding_box_format="
f"{self.bounding_box_format}`."
)
self._prediction_decoder = prediction_decoder
self.make_predict_function(force=True)
self.make_train_function(force=True)
self.make_test_function(force=True)
def get_config(self):
return {
"num_classes": self.num_classes,
"bounding_box_format": self.bounding_box_format,
"fpn_depth": self.fpn_depth,
"backbone": keras.saving.serialize_keras_object(self.backbone),
"label_encoder": keras.saving.serialize_keras_object(
self.label_encoder
),
"prediction_decoder": keras.saving.serialize_keras_object(
self._prediction_decoder
),
}
@classmethod
def from_config(cls, config):
config["backbone"] = keras.saving.deserialize_keras_object(
config["backbone"]
)
label_encoder = config.get("label_encoder")
if label_encoder is not None and isinstance(label_encoder, dict):
config["label_encoder"] = keras.saving.deserialize_keras_object(
label_encoder
)
prediction_decoder = config.get("prediction_decoder")
if prediction_decoder is not None and isinstance(
prediction_decoder, dict
):
config["prediction_decoder"] = (
keras.saving.deserialize_keras_object(prediction_decoder)
)
return cls(**config)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return copy.deepcopy({**backbone_presets, **yolo_v8_detector_presets})
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return copy.deepcopy(
{**backbone_presets_with_weights, **yolo_v8_detector_presets}
)
@classproperty
def backbone_presets(cls):
"""Dictionary of preset names and configurations of compatible
backbones."""
return copy.deepcopy(backbone_presets)
| keras-cv/keras_cv/models/object_detection/yolo_v8/yolo_v8_detector.py/0 | {
"file_path": "keras-cv/keras_cv/models/object_detection/yolo_v8/yolo_v8_detector.py",
"repo_id": "keras-cv",
"token_count": 11170
} | 68 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.layers.object_detection_3d.heatmap_decoder import HeatmapDecoder
from keras_cv.models.object_detection_3d.center_pillar_backbone_presets import (
backbone_presets,
)
from keras_cv.models.task import Task
from keras_cv.utils.python_utils import classproperty
@keras_cv_export("keras_cv.models.MultiHeadCenterPillar")
class MultiHeadCenterPillar(Task):
"""Multi headed model based on CenterNet heatmap and PointPillar.
This model builds box classification and regression for each class
separately. It voxelizes the point cloud feature, applies feature extraction
on top of voxelized feature, and applies multi-class classification and
regression heads on the feature map.
Args:
backbone: the backbone to apply to voxelized features.
voxel_net: the voxel_net that takes point cloud feature and convert
to voxelized features. KerasCV offers a `DynamicVoxelization` layer
in `keras_cv.layers` which is a reasonable default for most
detection use cases.
multiclass_head: A keras.layers.Layer which takes the backbone output
and returns a dict of heatmap prediction and regression prediction
per class.
prediction_decoder: a multi class heatmap prediction decoder that
returns a dict of decoded boxes, box class, and box confidence score
per class.
"""
def __init__(
self,
backbone,
voxel_net,
multiclass_head,
prediction_decoder,
**kwargs,
):
point_xyz = keras.layers.Input((None, 3), name="point_xyz")
point_feature = keras.layers.Input((None, 4), name="point_feature")
point_mask = keras.layers.Input(
(None, 1), name="point_mask", dtype="bool"
)
inputs = {
"point_xyz": point_xyz,
"point_feature": point_feature,
"point_mask": point_mask,
}
voxel_feature = voxel_net(point_xyz, point_feature, point_mask[..., 0])
voxel_feature = backbone(voxel_feature)
predictions = multiclass_head(voxel_feature)
# A slight hack to get the output names in the model outputs for a
# functional model.
for head_name in multiclass_head._head_names:
predictions[f"box_{head_name}"] = keras.layers.Identity(
name=f"box_{head_name}"
)(predictions[head_name])
predictions[f"heatmap_{head_name}"] = keras.layers.Identity(
name=f"heatmap_{head_name}"
)(predictions[head_name])
super().__init__(inputs=inputs, outputs=predictions, **kwargs)
self._backbone = backbone
self._multiclass_head = multiclass_head
self._prediction_decoder = prediction_decoder
self._head_names = self._multiclass_head._head_names
def compile(self, heatmap_loss=None, box_loss=None, **kwargs):
"""Compiles the MultiHeadCenterPillar.
`compile()` mirrors the standard Keras `compile()` method, but allows
for specification of heatmap and box-specific losses.
Args:
heatmap_loss: a Keras loss to use for heatmap regression.
box_loss: a Keras loss to use for box regression, or a list of Keras
losses for box regression, one for each class. If only one loss
is specified, it will be used for all classes, otherwise exactly
one loss should be specified per class.
kwargs: other `keras.Model.compile()` arguments are supported and
propagated to the `keras.Model` class.
"""
losses = {}
if box_loss is not None and not isinstance(box_loss, list):
box_loss = [
box_loss for _ in range(self._multiclass_head._num_classes)
]
for i in range(self._multiclass_head._num_classes):
losses[f"heatmap_class_{i+1}"] = heatmap_loss
losses[f"box_class_{i+1}"] = box_loss[i]
super().compile(loss=losses, **kwargs)
def compute_loss(self, x, y, y_pred, sample_weight=None, **kwargs):
predictions = y_pred
targets = y
y_pred = {}
y_true = {}
sample_weight = {}
for head_name in self._head_names:
prediction = predictions[head_name]
heatmap_pred = ops.softmax(prediction[..., :2])[..., 1]
box_pred = prediction[..., 2:]
box = targets[head_name]["boxes"]
heatmap = targets[head_name]["heatmap"]
index = targets[head_name]["top_k_index"]
# the prediction returns 2 outputs for background vs object
y_pred["heatmap_" + head_name] = heatmap_pred
y_true["heatmap_" + head_name] = heatmap
# TODO(ianstenbit): loss heatmap threshold should be configurable.
box_regression_mask = (
ops.take_along_axis(
ops.reshape(heatmap, (heatmap.shape[0], -1)),
index[..., 0] * heatmap.shape[1] + index[..., 1],
axis=1,
)
> 0.95
)
box = ops.take_along_axis(
ops.reshape(box, (ops.shape(box)[0], -1, 7)),
ops.expand_dims(
index[..., 0] * ops.shape(box)[1] + index[..., 1], axis=-1
),
axis=1,
)
box_pred = ops.take_along_axis(
ops.reshape(
box_pred,
(ops.shape(box_pred)[0], -1, ops.shape(box_pred)[-1]),
),
ops.expand_dims(
index[..., 0] * ops.shape(box_pred)[1] + index[..., 1],
axis=-1,
),
axis=1,
)
box_center_mask = heatmap > 0.99
num_boxes = ops.maximum(
ops.sum(ops.cast(box_center_mask, "float32"), axis=[1, 2]), 1
)
sample_weight["box_" + head_name] = ops.cast(
box_regression_mask, "float32"
) / ops.broadcast_to(
ops.expand_dims(num_boxes, axis=-1),
ops.shape(box_regression_mask),
)
sample_weight["heatmap_" + head_name] = ops.ones_like(
heatmap
) / ops.broadcast_to(
ops.expand_dims(ops.expand_dims(num_boxes, axis=-1), axis=-1),
heatmap.shape,
)
y_pred["box_" + head_name] = box_pred
y_true["box_" + head_name] = box
return super().compute_loss(
x={}, y=y_true, y_pred=y_pred, sample_weight=sample_weight
)
def predict_step(self, *args):
outputs = super().predict_step(*args)
if isinstance(outputs, tuple):
return self._prediction_decoder(outputs[0]), outputs[1]
else:
return self._prediction_decoder(outputs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return copy.deepcopy(backbone_presets)
@classproperty
def backbone_presets(cls):
"""Dictionary of preset names and configurations of compatible
backbones."""
return copy.deepcopy(backbone_presets)
class MultiClassDetectionHead(keras.layers.Layer):
"""Multi-class object detection head for CenterPillar.
This head includes a 1x1 convolution layer for each class which is called
on the output of the CenterPillar's backbone. The outputs are per-class
prediction heatmaps which must be decoded into 3D boxes.
Args:
num_classes: int, the number of box classes to predict.
num_head_bin: list of ints, the number of heading bins to use for each
respective box class.
"""
def __init__(
self,
num_classes,
num_head_bin,
name="detection_head",
):
super().__init__(name=name)
self._heads = {}
self._head_names = []
self._num_classes = num_classes
self._num_head_bin = num_head_bin
for i in range(num_classes):
self._head_names.append(f"class_{i + 1}")
# 1x1 conv for each voxel/pixel.
self._heads[self._head_names[i]] = keras.layers.Conv2D(
# 2 for class, 3 for location, 3 for size, 2N for heading
filters=8 + 2 * num_head_bin[i],
kernel_size=(1, 1),
name=f"head_{i + 1}",
)
def call(self, feature, training=True):
del training
outputs = {}
for head_name in self._head_names:
outputs[head_name] = self._heads[head_name](feature)
return outputs
class MultiClassHeatmapDecoder(keras.layers.Layer):
"""Heatmap decoder for CenterPillar models.
The heatmap decoder converts a sparse heatmap of box predictions into a
padded dense set of decoded predicted boxes.
The input to the heatmap decoder is a spatial heatmap of encoded box
predictions, and the output is decoded 3D boxes in CENTER_XYZ_DXDYDZ_PHI
format.
Args:
num_classes: int, the number of box classes to predict.
num_head_bin: list of ints, the number of heading bins for each
respective class.
anchor_size: list of length-3 lists of floats, the 3D anchor sizes for
each respective class.
max_pool_size: list of ints, the 2D pooling size for the heatmap, to be
used before box decoding.
max_num_box: list of ints, the maximum number of boxes to return for
each class. The top K boxes will be returned, and if fewer than K
boxes are predicted, the outputs will be padded to contain K boxes.
heatmap_threshold: list of floats, the heatmap confidence threshold to
be used for each respective class to determine whether or not a box
prediction is strong enough to decode and return.
voxel_size: list of floats, the size of the voxels that were used to
voxelize inputs to the CenterPillar model for each respective class.
spatial_size: list of floats, the global 3D size of the heatmap for each
respective class. `spatial_size[i] / voxel_size[i]` equals the
size of the `i`th rank of the input heatmap.
"""
def __init__(
self,
num_classes,
num_head_bin,
anchor_size,
max_pool_size,
max_num_box,
heatmap_threshold,
voxel_size,
spatial_size,
**kwargs,
):
super().__init__(**kwargs)
self.num_classes = num_classes
self.class_ids = list(range(1, num_classes + 1))
self.num_head_bin = num_head_bin
self.anchor_size = anchor_size
self.max_pool_size = max_pool_size
self.max_num_box = max_num_box
self.heatmap_threshold = heatmap_threshold
self.voxel_size = voxel_size
self.spatial_size = spatial_size
self.decoders = {}
for i, class_id in enumerate(self.class_ids):
self.decoders[f"class_{class_id}"] = HeatmapDecoder(
class_id=class_id,
num_head_bin=self.num_head_bin[i],
anchor_size=self.anchor_size[i],
max_pool_size=self.max_pool_size[i],
max_num_box=self.max_num_box[i],
heatmap_threshold=self.heatmap_threshold[i],
voxel_size=self.voxel_size,
spatial_size=self.spatial_size,
)
def call(self, predictions):
box_predictions = []
class_predictions = []
box_confidence = []
for class_id in self.class_ids:
class_tag = f"class_{class_id}"
boxes, classes, confidence = self.decoders[class_tag](
predictions[class_tag]
)
box_predictions.append(boxes)
class_predictions.append(classes)
box_confidence.append(confidence)
return {
"3d_boxes": {
"boxes": ops.concatenate(box_predictions, axis=1),
"classes": ops.concatenate(class_predictions, axis=1),
"confidence": ops.concatenate(box_confidence, axis=1),
}
}
| keras-cv/keras_cv/models/object_detection_3d/center_pillar.py/0 | {
"file_path": "keras-cv/keras_cv/models/object_detection_3d/center_pillar.py",
"repo_id": "keras-cv",
"token_count": 5943
} | 69 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from keras_cv.models.segmentation.segformer.segformer import SegFormer
from keras_cv.models.segmentation.segformer.segformer_presets import presets
from keras_cv.utils.python_utils import classproperty
ALIAS_DOCSTRING = """SegFormer model.
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](https://keras.io/guides/transfer_learning/).
Args:
backbone: a KerasCV backbone for feature extraction.
num_classes: the number of classes for segmentation, including the background class.
Examples:
```python
input_data = tf.ones(shape=(8, 224, 224, 3))
# Randomly initialized backbone
backbone = keras_cv.models.MiTBackbone.from_preset("mit_b0_imagenet")
segformer = keras_cv.models.SegFormer(backbone=backbone, num_classes=19)
output = model(input_data)
```
""" # noqa: E501
class SegFormerB0(SegFormer):
def __new__(
cls,
num_classes,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"num_classes": num_classes,
}
)
return SegFormer.from_preset("segformer_b0", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {
"segformer_b0": copy.deepcopy(presets["segformer_b0"]),
}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return cls.presets
class SegFormerB1(SegFormer):
def __new__(
cls,
num_classes,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"num_classes": num_classes,
}
)
return SegFormer.from_preset("segformer_b1", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {
"segformer_b1": copy.deepcopy(presets["segformer_b1"]),
}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return cls.presets
class SegFormerB2(SegFormer):
def __new__(
cls,
num_classes,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"num_classes": num_classes,
}
)
return SegFormer.from_preset("segformer_b2", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {
"segformer_b2": copy.deepcopy(presets["segformer_b2"]),
}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return cls.presets
class SegFormerB3(SegFormer):
def __new__(
cls,
num_classes,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"num_classes": num_classes,
}
)
return SegFormer.from_preset("segformer_b3", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {
"segformer_b3": copy.deepcopy(presets["segformer_b3"]),
}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return cls.presets
class SegFormerB4(SegFormer):
def __new__(
cls,
num_classes,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"num_classes": num_classes,
}
)
return SegFormer.from_preset("segformer_b4", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {
"segformer_b4": copy.deepcopy(presets["segformer_b4"]),
}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return cls.presets
class SegFormerB5(SegFormer):
def __new__(
cls,
num_classes,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"num_classes": num_classes,
}
)
return SegFormer.from_preset("segformer_b5", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {
"segformer_b5": copy.deepcopy(presets["segformer_b5"]),
}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return cls.presets
setattr(
SegFormerB0,
"__doc__",
ALIAS_DOCSTRING.format(name="SegFormerB0"),
)
setattr(
SegFormerB1,
"__doc__",
ALIAS_DOCSTRING.format(name="SegFormerB1"),
)
setattr(
SegFormerB2,
"__doc__",
ALIAS_DOCSTRING.format(name="SegFormerB2"),
)
setattr(
SegFormerB3,
"__doc__",
ALIAS_DOCSTRING.format(name="SegFormerB3"),
)
setattr(
SegFormerB4,
"__doc__",
ALIAS_DOCSTRING.format(name="SegFormerB4"),
)
setattr(
SegFormerB5,
"__doc__",
ALIAS_DOCSTRING.format(name="SegFormerB5"),
)
| keras-cv/keras_cv/models/segmentation/segformer/segformer_aliases.py/0 | {
"file_path": "keras-cv/keras_cv/models/segmentation/segformer/segformer_aliases.py",
"repo_id": "keras-cv",
"token_count": 2719
} | 70 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code is taken nearly verbatim from
https://github.com/divamgupta/stable-diffusion-tensorflow."""
import gzip
import html
from functools import lru_cache
import regex as re
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
@lru_cache()
def bytes_to_unicode():
"""Return a list of utf-8 bytes and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you
want to avoid UNKs. When you're at something like a 10B token dataset you
end up needing around 5K for decent coverage. This is a significant
percentage of your normal, say, 32K bpe vocab. To avoid that, we want
lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = (
list(range(ord("!"), ord("~") + 1))
+ list(range(ord("ยก"), ord("ยฌ") + 1))
+ list(range(ord("ยฎ"), ord("รฟ") + 1))
)
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8 + n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
A word is represented as tuple of symbols(symbols being variable-length
strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r"\s+", " ", text)
text = text.strip()
return text
@keras_cv_export("keras_cv.models.stable_diffusion.SimpleTokenizer")
class SimpleTokenizer:
def __init__(self, bpe_path=None):
bpe_path = bpe_path or keras.utils.get_file(
"bpe_simple_vocab_16e6.txt.gz",
"https://github.com/openai/CLIP/blob/main/clip/bpe_simple_vocab_16e6.txt.gz?raw=true", # noqa: E501
file_hash="924691ac288e54409236115652ad4aa250f48203de50a9e4722a6ecd48d6804a", # noqa: E501
)
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = gzip.open(bpe_path).read().decode("utf-8").split("\n")
merges = merges[1 : 49152 - 256 - 2 + 1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v + "</w>" for v in vocab]
for merge in merges:
vocab.append("".join(merge))
vocab.extend(["<|startoftext|>", "<|endoftext|>"])
self.vocab = vocab
self.encoder = self._create_encoder(self.vocab)
self.decoder = self._create_decoder(self.encoder)
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.special_tokens = {
"<|startoftext|>": "<|startoftext|>",
"<|endoftext|>": "<|endoftext|>",
}
self.cache = {
"<|startoftext|>": "<|startoftext|>",
"<|endoftext|>": "<|endoftext|>",
}
self.pat = self._create_pat()
def _create_encoder(self, vocab):
return dict(zip(vocab, range(len(vocab))))
def _create_decoder(self, encoder):
return {v: k for k, v in encoder.items()}
def _create_pat(self):
return re.compile(
"|".join([re.escape(key) for key in self.special_tokens.keys()])
+ r"""|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""",
re.IGNORECASE,
)
@property
def end_of_text(self):
return self.encoder["<|endoftext|>"]
@property
def start_of_text(self):
return self.encoder["<|startoftext|>"]
def add_tokens(self, tokens):
if isinstance(tokens, str):
tokens = [tokens]
tokens_added = 0
for token in tokens:
if token in self.vocab:
continue
tokens_added += 1
self.vocab.append(token)
self.special_tokens[token] = token
self.cache[token] = token
self.encoder = self._create_encoder(self.vocab)
self.decoder = self._create_decoder(self.encoder)
self.pat = self._create_pat()
return tokens_added
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + (token[-1] + "</w>",)
pairs = get_pairs(word)
if not pairs:
return token + "</w>"
while True:
bigram = min(
pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))
)
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if (
word[i] == first
and i < len(word) - 1
and word[i + 1] == second
):
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = " ".join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = "".join(self.byte_encoder[b] for b in token.encode("utf-8"))
bpe_tokens.extend(
self.encoder[bpe_token]
for bpe_token in self.bpe(token).split(" ")
)
return [self.start_of_text] + bpe_tokens + [self.end_of_text]
def decode(self, tokens):
text = "".join([self.decoder[token] for token in tokens])
text = (
bytearray([self.byte_decoder[c] for c in text])
.decode("utf-8", errors="replace")
.replace("</w>", " ")
)
return text
| keras-cv/keras_cv/models/stable_diffusion/clip_tokenizer.py/0 | {
"file_path": "keras-cv/keras_cv/models/stable_diffusion/clip_tokenizer.py",
"repo_id": "keras-cv",
"token_count": 3452
} | 71 |
# Copyright 2022 The KerasCV Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for IoU3D using custom op."""
import math
import os
import pytest
from keras_cv.ops import iou_3d
from keras_cv.tests.test_case import TestCase
class IoU3DTest(TestCase):
@pytest.mark.skipif(
"TEST_CUSTOM_OPS" not in os.environ
or os.environ["TEST_CUSTOM_OPS"] != "true",
reason="Requires binaries compiled from source",
)
def testOpCall(self):
# Predicted boxes:
# 0: a 2x2x2 box centered at 0,0,0, rotated 0 degrees
# 1: a 2x2x2 box centered at 1,1,1, rotated 135 degrees
# Ground Truth boxes:
# 0: a 2x2x2 box centered at 1,1,1, rotated 45 degrees
# (identical to predicted box 1)
# 1: a 2x2x2 box centered at 1,1,1, rotated 0 degrees
box_preds = [[0, 0, 0, 2, 2, 2, 0], [1, 1, 1, 2, 2, 2, 3 * math.pi / 4]]
box_gt = [[1, 1, 1, 2, 2, 2, math.pi / 4], [1, 1, 1, 2, 2, 2, 0]]
# Predicted box 0 and both ground truth boxes overlap by 1/8th of the
# box. Therefore, IiU is 1/15.
# Predicted box 1 is the same as ground truth box 0, therefore IoU is 1.
# Predicted box 1 shares an origin with ground truth box 1, but is
# rotated by 135 degrees.
# Their IoU can be reduced to that of two overlapping squares that
# share a center with the same offset of 135 degrees, which reduces to
# the square root of 0.5.
expected_ious = [[1 / 15, 1 / 15], [1, 0.5**0.5]]
self.assertAllClose(iou_3d(box_preds, box_gt), expected_ious)
| keras-cv/keras_cv/ops/iou_3d_test.py/0 | {
"file_path": "keras-cv/keras_cv/ops/iou_3d_test.py",
"repo_id": "keras-cv",
"token_count": 820
} | 72 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.utils.conditional_imports import assert_cv2_installed
from keras_cv.utils.conditional_imports import assert_matplotlib_installed
from keras_cv.utils.conditional_imports import (
assert_waymo_open_dataset_installed,
)
from keras_cv.utils.fill_utils import fill_rectangle
from keras_cv.utils.preprocessing import blend
from keras_cv.utils.preprocessing import ensure_tensor
from keras_cv.utils.preprocessing import get_interpolation
from keras_cv.utils.preprocessing import parse_factor
from keras_cv.utils.preprocessing import transform
from keras_cv.utils.preprocessing import transform_value_range
from keras_cv.utils.to_numpy import to_numpy
from keras_cv.utils.train import convert_inputs_to_tf_dataset
from keras_cv.utils.train import scale_loss_for_distribution
| keras-cv/keras_cv/utils/__init__.py/0 | {
"file_path": "keras-cv/keras_cv/utils/__init__.py",
"repo_id": "keras-cv",
"token_count": 398
} | 73 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""KerasCV Version check."""
try:
import tensorflow as tf
except ModuleNotFoundError:
raise ModuleNotFoundError(
"KerasCV uses TensorFlow for its "
"preprocessing layers. While this dependency "
"will be dropped in the future, please install "
"TensorFlow with `pip install tensorflow` to "
"use KerasCV"
)
from packaging.version import parse
MIN_VERSION = "2.11.0"
def check_tf_version():
if parse(tf.__version__) < parse(MIN_VERSION):
raise RuntimeError(
"The Tensorflow package version needs to be at least "
f"{MIN_VERSION} for KerasCV to run. Currently, your TensorFlow "
f"version is {tf.__version__}. Please upgrade with `$ pip install "
"--upgrade tensorflow`. You can use `pip freeze` to check "
"afterwards that everything is ok."
)
| keras-cv/keras_cv/version_check.py/0 | {
"file_path": "keras-cv/keras_cv/version_check.py",
"repo_id": "keras-cv",
"token_count": 501
} | 74 |
# Copyright 2019 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup script."""
import os
import pathlib
from setuptools import find_packages
from setuptools import setup
from setuptools.dist import Distribution
def read(rel_path):
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, rel_path)) as fp:
return fp.read()
def get_version(rel_path):
for line in read(rel_path).splitlines():
if line.startswith("__version__"):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
raise RuntimeError("Unable to find version string.")
BUILD_WITH_CUSTOM_OPS = (
"BUILD_WITH_CUSTOM_OPS" in os.environ
and os.environ["BUILD_WITH_CUSTOM_OPS"] == "true"
)
HERE = pathlib.Path(__file__).parent
README = (HERE / "README.md").read_text()
if os.path.exists("keras_cv/version_utils.py"):
VERSION = get_version("keras_cv/version_utils.py")
else:
VERSION = get_version("keras_cv/src/version_utils.py")
class BinaryDistribution(Distribution):
"""This class is needed in order to create OS specific wheels."""
def has_ext_modules(self):
return BUILD_WITH_CUSTOM_OPS
def is_pure(self):
return not BUILD_WITH_CUSTOM_OPS
setup(
name="keras-cv",
description="Industry-strength computer Vision extensions for Keras.",
long_description=README,
long_description_content_type="text/markdown",
version=VERSION,
url="https://github.com/keras-team/keras-cv",
author="Keras team",
author_email="[email protected]",
license="Apache License 2.0",
install_requires=[
"packaging",
"absl-py",
"regex",
"tensorflow-datasets",
"keras-core",
"kagglehub",
],
extras_require={
"tests": [
"flake8",
"isort",
"black[jupyter]",
"pytest",
"pycocotools",
],
"examples": ["tensorflow_datasets", "matplotlib"],
},
distclass=BinaryDistribution,
# Supported Python versions
python_requires=">=3.9",
classifiers=[
"Development Status :: 3 - Alpha",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3 :: Only",
"Operating System :: Unix",
"Operating System :: Microsoft :: Windows",
"Operating System :: MacOS",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"Topic :: Software Development",
],
packages=find_packages(exclude=("*_test.py",)),
include_package_data=True,
)
| keras-cv/setup.py/0 | {
"file_path": "keras-cv/setup.py",
"repo_id": "keras-cv",
"token_count": 1293
} | 75 |
# Keras: Pythonใฎๆทฑๅฑคๅญฆ็ฟใฉใคใใฉใช
## Kerasใจใฏ
Kerasใฏ๏ผPythonใงๆธใใใ๏ผ[TensorFlow](https://github.com/tensorflow/tensorflow)ใพใใฏ[CNTK](https://github.com/Microsoft/cntk)๏ผ[Theano](https://github.com/Theano/Theano)ไธใงๅฎ่กๅฏ่ฝใช้ซๆฐดๆบใฎใใฅใผใฉใซใใใใฏใผใฏใฉใคใใฉใชใงใ๏ผ
Kerasใฏ๏ผ่ฟ
้ใชๅฎ้จใๅฏ่ฝใซใใใใจใซ้็นใ็ฝฎใใฆ้็บใใใพใใ๏ผ
*ใขใคใใขใใ็ตๆใซๅฐ้ใใใพใงใฎใชใผใใฟใคใ ใใงใใใ ใๅฐใใใใใใจใ๏ผ่ฏใ็ ็ฉถใใใใใใฎ้ตใซใชใใพใ๏ผ*
ๆฌกใฎใใใชๅ ดๅใงๆทฑๅฑคๅญฆ็ฟใฉใคใใฉใชใๅฟ
่ฆใชใ๏ผKerasใไฝฟ็จใใฆใใ ใใ:
- ๅฎนๆใซ็ด ๆฉใใใญใใฟใคใใฎไฝๆใๅฏ่ฝ๏ผใฆใผใถใผใใฌใณใใชใผ๏ผใขใธใฅใผใซๆง๏ผใใใณๆกๅผตๆงใซใใ๏ผ
- CNNใจRNNใฎไธกๆน๏ผใใใณใใใใฎ2ใคใฎ็ตใฟๅใใใใตใใผใ
- CPUใจGPUไธใงใทใผใ ใฌในใชๅไฝ
[Keras.io](https://keras.io/ja/)ใฎใใญใฅใกใณใใ่ชญใใงใใ ใใ๏ผ
Kerasใฏ**Python 2.7-3.6**ใซๅฏพๅฟใใฆใใพใ๏ผ
------------------
## ใใซใใใใฏใจใณใใฎKerasใจtf.keras:
**็พๅจ๏ผใใซใใใใฏใจใณใใฎKerasใTensorFlowใใใฏใจใณใใงไฝฟ็จใใฆใใใฆใผใถใฏTensorFlow 2.0ใฎ`tf.keras`ใธ็งป่กใใใใจใๆจๅฅจใใฆใใพใ๏ผ**
`tf.keras`ใฎๆนใใใใใใกใณใใใณในใใใฆใใ๏ผTensorFlowใฎๆฉ่ฝ๏ผeager executionใๅๆฃใฎใตใใผใใชใฉ๏ผใจใใใใใ็ตฑๅใใใฆใใพใ๏ผ
Keras 2.2.5ใฏ2.2.* APIใๅฎ่ฃ
ใใๆๅพใฎใชใชใผในใงใ๏ผTensorFlow 1ใๅฏไธใตใใผใใใฆใใๆๅพใฎใชใชใผในใงใใใใพใ๏ผTheanoใCNTKใซใคใใฆใๅๆงใงใ๏ผ๏ผ
็พๅจใฎใชใชใผในใฏKeras 2.3.0ใงใ๏ผใใใซใฏ้่ฆใชAPIใฎๅคๆดใจTensorFlow 2.0ใฎใตใใผใใ่ฟฝๅ ใๅซใพใใฆใใพใ๏ผใใฎ2.3.0ใชใชใผในใฏใฏใใซใใใใฏใจใณใใฎKerasใฎๆๅพใฎใกใธใฃใผใชใชใผในใจใชใไบๅฎใงใ๏ผใใซใใใใฏใจใณใใฎKerasใฏ`tf.keras`ใซๅใฃใฆไปฃใใใใฆใใพใ๏ผ
ใใซใใใใฏใจใณใใฎKerasใซๅญๅจใใใใฐใฏ2020ๅนด4ๆใพใง๏ผใใคใใผใชใชใผในใฎไธ้จใจใใฆ๏ผไฟฎๆญฃใใใพใ๏ผ
Kerasใฎๅฐๆฅใฎใใ่ฉณ็ดฐใชๆ
ๅ ฑใฏ[Kerasใฎ่ญฐไบ้ฒ](http://bit.ly/keras-meeting-notes)ใใ่ฆงใใ ใใ๏ผ
------------------
## ใฌใคใใฉใคใณ
- __ใฆใผใถใผใใฌใณใใชใผ__: Kerasใฏๆฉๆขฐๅใใงใชใ๏ผไบบ้ๅใใซ่จญ่จใใใใฉใคใใฉใชใงใ๏ผใฆใผใถใผใจใฏในใใชใจใณในใๅ้ขใจไธญๅฟใซใใใฆใใพใ๏ผKerasใฏ๏ผ่ช็ฅ่ฒ ่ทใ่ปฝๆธใใใใใฎใในใใใฉใฏใใฃในใใใฉใญใผใใพใ๏ผไธ่ฒซใใใทใณใใซใชAPI็พคใๆไพใ๏ผไธ่ฌ็ใชไฝฟ็จไบไพใง่ฆๆฑใใใใฆใผใถใผใขใฏใทใงใณใๆๅฐ้ใซๆใ๏ผใฆใผใถใผใจใฉใผๆใซๆ็ขบใงๅฎ็จ็ใชใใฃใผใใใใฏใๆไพใใพใ๏ผ
- __ใขใธใฅใผใซๆง__: ใขใใซใจใฏ๏ผใงใใใ ใๅถ็ดใฎๅฐใชใๆฅ็ถใๅฏ่ฝใง๏ผ็ฌ็ซใใ๏ผๅฎๅ
จใซ่จญๅฎๅฏ่ฝใชใขใธใฅใผใซใฎ๏ผใทใผใฑใณในใพใใฏใฐใฉใใจใใฆ็่งฃใใใฆใใพใ๏ผ
็นใซ๏ผใใฅใผใฉใซใใใใฏใผใฏใฎๅฑค๏ผๆๅคฑ้ขๆฐ๏ผๆ้ฉๅ๏ผๅๆๅ๏ผๆดปๆงๅ้ขๆฐ๏ผๆญฃ่ฆๅใฏใในใฆ๏ผๆฐใใใขใใซใไฝๆใใใใใฎ็ตใฟๅใใๅฏ่ฝใช๏ผ็ฌ็ซใใใขใธใฅใผใซใงใ๏ผ
- __ๆกๅผตๆง__: ๆฐใใใขใธใฅใผใซใ๏ผๆฐใใใฏใฉในใ้ขๆฐใจใใฆ๏ผ็ฐกๅใซ่ฟฝๅ ใงใใพใ๏ผใพใ๏ผๆขๅญใฎใขใธใฅใผใซใซใฏๅคใใฎๅฎ่ฃ
ไพใใใใพใ๏ผๆฐใใใขใธใฅใผใซใๅฎนๆใซไฝๆใงใใใใ๏ผใใใใ่กจ็พใๅฏ่ฝใซใชใฃใฆใใพใ๏ผใใฎใใจใใKerasใฏๅ
้ฒ็ใช็ ็ฉถใซ้ฉใใฆใใพใ๏ผ
- __Pythonใงๅฎ่ฃ
__: ๅฎฃ่จๅฝขๅผใฎ่จญๅฎใใกใคใซใๆใฃใใขใใซใฏใใใพใใ๏ผใขใใซใฏPythonใณใผใใง่จ่ฟฐใใใฆใใพใ๏ผใใฎPythonใณใผใใฏ๏ผใณใณใใฏใใง๏ผใใใใฐใๅฎนๆใง๏ผ็ฐกๅใซๆกๅผตใงใใพใ๏ผ
------------------
## 30็งใงKerasใซๅ
ฅ้ใใพใใใ๏ผ
Kerasใฎไธญๅฟ็ใชใใผใฟๆง้ ใฏ__model__ใง๏ผใฌใคใคใผใๆงๆใใๆนๆณใงใ๏ผ
ไธปใชใขใใซใฏ[`Sequential`](http://keras.io/ja/getting-started/sequential-model-guide)ใขใใซใง๏ผใฌใคใคใผใฎ็ทๅฝขในใฟใใฏใงใ๏ผ
ๆดใซ่ค้ใชใขใผใญใใฏใใฃใฎๅ ดๅใฏ๏ผ[Keras functional API](http://keras.io/ja/getting-started/functional-api-guide)ใไฝฟ็จใใๅฟ
่ฆใใใใพใ๏ผใใใงใฌใคใคใผใฎใชใไปปๆใฎใฐใฉใใๆง็ฏๅฏ่ฝใซใชใใพใ๏ผ
`Sequential` ใขใใซใฎไธไพใ่ฆใฆใฟใพใใใ๏ผ
```python
from keras.models import Sequential
model = Sequential()
```
`.add()`ใง็ฐกๅใซใฌใคใคใผใ็ฉใฟ้ใญใใใจใใงใใพใ:
```python
from keras.layers import Dense
model.add(Dense(units=64, activation='relu', input_dim=100))
model.add(Dense(units=10, activation='softmax'))
```
ๅฎ่ฃ
ใใใขใใซใใใใใใชใ`.compile()`ใง่จ็ทดใใญใปในใ่จญๅฎใใพใใใ๏ผ
```python
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
```
ๅฟ
่ฆใซๅฟใใฆ๏ผๆ้ฉๅใขใซใดใชใบใ ใ่จญๅฎใงใใพใ๏ผKerasใฎไธญๅฟ็ใช่จญ่จๆๆณใฏ๏ผใฆใผใถใผใๅฟ
่ฆใชใจใใซๅฎๅ
จใซใณใณใใญใผใซ๏ผใฝใผในใณใผใใฎๅฎนๆใชๆกๅผตๆงใๅฎ็พใใ็ฉถๆฅตใฎใณใณใใญใผใซ๏ผใงใใไธๆนใง๏ผ้ฉๅบฆใซๅ็ดใซใใใใจใงใ๏ผ
```python
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.SGD(lr=0.01, momentum=0.9, nesterov=True))
```
่จ็ทดใใผใฟใใใใใใใง็นฐใ่ฟใๅฆ็ใงใใพใ๏ผ
```python
# x_train and y_train are Numpy arrays --just like in the Scikit-Learn API.
model.fit(x_train, y_train, epochs=5, batch_size=32)
```
ไปฃใใใซ๏ผใใใใตใคใบใๅฅใซ่ฆๅฎใงใใพใ๏ผ
```python
model.train_on_batch(x_batch, y_batch)
```
1่กใงใขใใซใฎ่ฉไพกใใงใใพใ๏ผ
```python
loss_and_metrics = model.evaluate(x_test, y_test, batch_size=128)
```
ใพใ๏ผๆฐใใใใผใฟใซๅฏพใใฆไบๆธฌใใงใใพใ:
```python
classes = model.predict(x_test, batch_size=128)
```
่ณชๅๅฟ็ญใทในใใ ใ็ปๅๅ้ก๏ผใใฅใผใฉใซใใฅใผใชใณใฐใใทใณ๏ผword2vecใใใฎไปๅคใใฎใขใใซใฏ้ซ้ใใคใทใณใใซใซๅฎ่ฃ
ๅฏ่ฝใงใ๏ผๆทฑๅฑคๅญฆ็ฟใฎๆ นๅบใซใใใขใคใใขใฏใจใฆใใทใณใใซใงใ๏ผๅฎ่ฃ
ใใทใณใใซใงใใในใใงใฏใชใใงใใใใ๏ผ
Kerasใซใคใใฆใฎใใ่ฉณ็ดฐใชใใฅใผใใชใขใซใซใคใใฆใฏ๏ผไปฅไธใๅ็
งใใฆใใ ใใ๏ผ
- [Getting started with the Sequential model](http://keras.io/ja/getting-started/sequential-model-guide)
- [Getting started with the functional API](http://keras.io/ja/getting-started/functional-api-guide)
ใชใใธใใชใฎ[examples folder](https://github.com/keras-team/keras/tree/master/examples)ใซใฏใใใซ้ซๅบฆใชใขใใซใใใใพใ๏ผ
ใกใขใชใใใใฏใผใฏใ็จใใ่ณชๅๅฟ็ญใทในใใ ใ็ฉๅฑคLSTMใ็จใใๆ็ซ ็ๆใชใฉใงใ๏ผ
------------------
## ใคใณในใใผใซ
KerasใใคใณในใใผใซใใๅใซKerasใฎใใใฏใจใณใใใคใณในใใผใซใใฆใใ ใใ๏ผTensorFlowใTheano๏ผCNTKใใใใพใ๏ผ
TensorFlowใๆจๅฅจใใฆใใพใ๏ผ
- [TensorFlow installation instructions](https://www.tensorflow.org/install/).
- [Theano installation instructions](http://deeplearning.net/software/theano/install.html#install).
- [CNTK installation instructions](https://docs.microsoft.com/en-us/cognitive-toolkit/setup-cntk-on-your-machine).
ๆฌกใฎใใใช**ใชใใทใงใใซใชไพๅญ**ใฎใคใณในใใผใซใ่ๆ
ฎใใฆใใใใใใใใพใใ๏ผ
- cuDNN ๏ผKerasใGPUใงๅใใๅ ดๅใฏๆจๅฅจ๏ผ๏ผ
- HDF5ใจh5py ๏ผKerasใฎใขใใซใใใฃในใฏใซไฟๅญใใๅ ดๅใฏๅฟ
้ ๏ผ๏ผ
- graphvizใจpydot ๏ผ[ๅฏ่ฆๅ](https://keras.io/ja/visualization/)ใงใขใใซใฎใฐใฉใๆ็ปใซๅฉ็จ๏ผ๏ผ
ใใใงKerasใใคใณในใใผใซใงใใใใใซใชใใพใใ๏ผKerasใใคใณในใใผใซใใใซใฏ2ใคใฎๆนๆณใใใใพใ๏ผ
- **PyPIใใKerasใใคใณในใใผใซ๏ผๆจๅฅจ๏ผ๏ผ**
```sh
sudo pip install keras
```
virtualenvใๅฉ็จใใฆใใๅ ดๅ๏ผsudoใๅ้ฟใงใใพใ๏ผ
```sh
pip install keras
```
- **ไปฃๆฟๆนๆณ๏ผGithubใฝใผในใใKerasใใคใณในใใผใซ๏ผ**
ใพใ๏ผ`git`ใงKerasใใฏใญใผใณใใพใ๏ผ
```sh
git clone https://github.com/keras-team/keras.git
```
ใใใฆ๏ผKerasใฎใใฉใซใใซ`cd`ใใฆใคใณในใใผใซใณใใณใใๅฎ่กใใพใ๏ผ
```sh
cd keras
sudo python setup.py install
```
------------------
## TensorFlowใใCNTKใTheanoใธใฎๅคๆด
ใใใฉใซใใงใฏ๏ผKerasใฏTensorFlowใใใณใฝใซ่จ็ฎใฉใคใใฉใชใจใใฆใใพใ๏ผKerasใฎใใใฏใจใณใใ่จญๅฎใใใซใฏ๏ผ[ใใฎๆ้ ](http://keras.io/ja/backend/)ใซๅพใฃใฆใใ ใใ๏ผ
------------------
## ใตใใผใ
่ณชๅใใใใ๏ผ้็บใซ้ขใใใใฃในใซใใทใงใณใซๅๅ ใงใใพใ:
- [Keras Google group](https://groups.google.com/forum/#!forum/keras-users)ไธใง
- [Keras Slack channel](https://kerasteam.slack.com)ไธใง๏ผใใฃใณใใซใธใฎใชใฏใจในใใใใซใฏ[ใใฎใชใณใฏ](https://keras-slack-autojoin.herokuapp.com/)ใไฝฟใฃใฆใใ ใใ๏ผ
[Githubใฎissues](https://github.com/keras-team/keras/issues)ใซ**ใใฐใฌใใผใใๆฉ่ฝใชใฏใจในใ**ใๆ็จฟใงใใพใ๏ผใพใ[ใฌใคใใฉใคใณ](https://github.com/keras-team/keras/blob/master/CONTRIBUTING.md)ใๅฟ
ใ่ชญใใงใใ ใใ๏ผ
------------------
## ใฉใใใฆใใฎใฉใคใใฉใชใซKerasใจใใๅๅใไปใใใฎใงใใ๏ผ
Keras (ฮบฮญฯฮฑฯ) ใฏใฎใชใทใข่ชใง**่ง**ใๆๅณใใพใ๏ผๅคไปฃใฎใชใทใขๆๅญฆใใใณใฉใใณๆๅญฆใซใใใๆๅญฆไธใฎๆณๅใใใฎๅๅใฎ็ฑๆฅใงใ๏ผๆๅใซใใฎๆณๅใ่ฆใคใใฃใใฎใฏ_Odyssey_ใง๏ผๅคขใฎ็ฅ๏ผ_Oneiroi_๏ผๅๆฐๅฝข _Oneiros_๏ผใฏ๏ผ่ฑก็ใฎ้ใ้ใฃใฆๅฐไธใซ่จชใใฆๅฝใใฎใใธใงใณใงไบบใ
ใ้จใ็ฅใจ๏ผ ่งใฎ้ใ้ใฃใฆๅฐไธใซ่จชใใฆ่ตทใใใฏใใฎๆชๆฅใ็ฅใใใ็ฅใจใซๅใใใฆใใใใใงใ๏ผใใใฏ ฮบฮญฯฮฑฯ ๏ผ่ง๏ผ/ ฮบฯฮฑฮฏฮฝฯ ๏ผ้่ก๏ผใจ แผฮปฮญฯฮฑฯ ๏ผ่ฑก็๏ผ/ แผฮปฮตฯฮฑฮฏฯฮฟฮผฮฑฮน ๏ผๆฌบ็๏ผใฎไผผใ้ฟใใๆฅฝใใ่จ่้ใณใงใ๏ผ
KerasใฏๅฝๅใใญใธใงใฏใONEIROS (Open-ended Neuro-Electronic Intelligent Robot Operating System) ใฎ็ ็ฉถใฎไธ็ฐใจใใฆ้็บใใใพใใ๏ผ
>_"Oneiroi are beyond our unravelling --who can be sure what tale they tell? Not all that men look for comes to pass. Two gates there are that give passage to fleeting Oneiroi; one is made of horn, one of ivory. The Oneiroi that pass through sawn ivory are deceitful, bearing a message that will not be fulfilled; those that come out through polished horn have truth behind them, to be accomplished for men who see them."_ Homer, Odyssey 19. 562 ff (Shewring translation).
------------------
| keras-docs-ja/sources/index.md/0 | {
"file_path": "keras-docs-ja/sources/index.md",
"repo_id": "keras-docs-ja",
"token_count": 4967
} | 76 |
## ่ฉไพก้ขๆฐใฎๅฉ็จๆนๆณ
่ฉไพก้ขๆฐใฏใขใใซใฎๆง่ฝใๆธฌใใใใซไฝฟใใใพใ๏ผ
ๆฌกใฎใณใผใใฎใใใซ๏ผใขใใซใใณใณใใคใซใใ้ใซ `metrics` ใใฉใกใผใฟใจใใฆ่ฉไพก้ขๆฐใๆธกใใฆๆๅฎใใพใ๏ผ
```python
model.compile(loss='mean_squared_error',
optimizer='sgd',
metrics=['mae', 'acc'])
```
```python
from keras import metrics
model.compile(loss='mean_squared_error',
optimizer='sgd',
metrics=[metrics.mae, metrics.categorical_accuracy])
```
่ฉไพก้ขๆฐใฏ[ๆๅคฑ้ขๆฐ](/losses)ใจใใไผผใฆใใพใใ๏ผ่ฉไพก็ตๆใฎๅคใ่จ็ทดใซ็ดๆฅไฝฟใใใใใจใฏใใใพใใ๏ผ
ๆธกใ `metrics` ใใฉใกใผใฟใซใฏๆขๅญใฎ่ฉไพก้ขๆฐใฎๅๅใๅผๆฐใซไธใใใ๏ผ
่ชๅใงไฝใฃใ่ฉไพก้ขๆฐใๆธกใไบใใงใใพใ๏ผ[ใซในใฟใใคใบ](#_3) ใๅ็
งใใฆใใ ใใ๏ผ๏ผ
#### ๅผๆฐ
- __y_true__: ็ใฎใฉใใซ๏ผTheano/TensorFlowใฎใใณใฝใซ
- __y_pred__: ไบๆธฌๅค๏ผy_trueใจๅใshapeใฎTheano/TensorFlowใฎใใณใฝใซ
#### ๆปใๅค
ๅ
จใใผใฟ็นใฎๅนณๅๅคใ่กจใในใซใฉ๏ผ
---
## ๅฉ็จๅฏ่ฝใช่ฉไพก้ขๆฐ
### binary_accuracy
```python
binary_accuracy(y_true, y_pred)
```
---
### categorical_accuracy
```python
categorical_accuracy(y_true, y_pred)
```
---
### sparse_categorical_accuracy
```python
sparse_categorical_accuracy(y_true, y_pred)
```
---
### top_k_categorical_accuracy
```python
top_k_categorical_accuracy(y_true, y_pred, k=5)
```
## ใซในใฟใใคใบ
`(y_true, y_pred)` ใๅผๆฐใจใ๏ผๅใใผใฟ็นใซๅฏพใใฆในใซใฉใ่ฟใ้ขๆฐใ่ฉไพก้ขๆฐใจใใฆๅฉ็จใงใใพใ:
- __y_true__: ๆญฃ่งฃใฉใใซ๏ผTheano/TensorFlow ใใณใฝใซ
- __y_pred__: ไบๆธฌ๏ผy_trueใจๅใๅฝข็ถใฎTheano/TensorFlow ใใณใฝใซ
```python
import keras.backend as K
def mean_pred(y_true, y_pred):
return K.mean(y_pred)
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy', mean_pred])
```
| keras-docs-ja/sources/metrics.md/0 | {
"file_path": "keras-docs-ja/sources/metrics.md",
"repo_id": "keras-docs-ja",
"token_count": 1036
} | 77 |
# ๋ฐ์ดํฐ ์
## CIFAR10 ์ํ ์ด๋ฏธ์ง ๋ถ๋ฅ
50,000๊ฐ์ 32x32 ์ปฌ๋ฌ ํ์ต ์ด๋ฏธ์ง, 10๊ฐ ๋ฒ์ฃผ์ ๋ผ๋ฒจ, 10,000๊ฐ์ ํ
์คํธ ์ด๋ฏธ์ง๋ก ๊ตฌ์ฑ๋ ๋ฐ์ดํฐ์
.
### ์ฌ์ฉ๋ฒ:
```python
from keras.datasets import cifar10
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
```
- __๋ฐํ๊ฐ:__
- 2๊ฐ์ ํํ:
- __x_train, x_test__: RGB ์ด๋ฏธ์ง ๋ฐ์ดํฐ์ uint8 ๋ฐฐ์ด. `channels_first` ์ด๋ `channels_last`์ `image_data_format` ๋ฐฑ์๋ ์ธํ
์ ๋ฐ๋ผ ๊ฐ๊ฐ (num_samples, 3, 32, 32) ํน์ (num_samples, 32, 32, 3)์ ํํ๋ฅผ ์ทจํฉ๋๋ค.
- __y_train, y_test__: ๋ฒ์ฃผ ๋ผ๋ฒจ์ uint8 ๋ฐฐ์ด (0-9 ๋ฒ์์ ์ ์). (num_samples,)์ ํํ๋ฅผ ์ทจํฉ๋๋ค.
---
## CIFAR100 ์ํ ์ด๋ฏธ์ง ๋ถ๋ฅ:
50,000๊ฐ์ 32x32 ์ปฌ๋ฌ ํ์ต ์ด๋ฏธ์ง, 10๊ฐ ๋ฒ์ฃผ์ ๋ผ๋ฒจ, 10,000๊ฐ์ ํ
์คํธ ์ด๋ฏธ์ง๋ก ๊ตฌ์ฑ๋ ๋ฐ์ดํฐ์
.
### ์ฌ์ฉ๋ฒ:
```python
from keras.datasets import cifar100
(x_train, y_train), (x_test, y_test) = cifar100.load_data(label_mode='fine')
```
- __๋ฐํ๊ฐ:__
- 2๊ฐ์ ํํ:
- __x_train, x_test__: RGB ์ด๋ฏธ์ง ๋ฐ์ดํฐ์ uint8 ๋ฐฐ์ด. `channels_first` ์ด๋ `channels_last`์ `image_data_format` ๋ฐฑ์๋ ์ธํ
์ ๋ฐ๋ผ ๊ฐ๊ฐ (num_samples, 3, 32, 32) ํน์ (num_samples, 32, 32, 3)์ ํํ๋ฅผ ์ทจํฉ๋๋ค.
- __y_train, y_test__: ๋ฒ์ฃผ ๋ผ๋ฒจ์ uint8 ๋ฐฐ์ด (0-9 ๋ฒ์์ ์ ์). (num_samples,)์ ํํ๋ฅผ ์ทจํฉ๋๋ค.
- __์ธ์:__
- __label_mode__: "fine" ํน์ "coarse".
---
## IMDB ์ํ ๋ฆฌ๋ทฐ ๊ฐ์ ๋ถ๋ฅ:
๊ฐ์ ์ ๋ฐ๋ผ (๊ธ์ ์ /๋ถ์ ์ )์ผ๋ก ๋ผ๋ฒจ๋ 25,000๊ฐ์ IMDB ์ํ ๋ฆฌ๋ทฐ๋ก ๊ตฌ์ฑ๋ ๋ฐ์ดํฐ์
. ๋ฆฌ๋ทฐ๋ ์ ํ์ฒ๋ฆฌ๋์์ผ๋ฉฐ, ๊ฐ ๋ฆฌ๋ทฐ๋ ๋จ์ด ์ธ๋ฑ์ค(์ ์)๋ก ๊ตฌ์ฑ๋ [sequence](preprocessing/sequence.md)๋ก ์ธ์ฝ๋ฉ ๋์์ต๋๋ค. ํธ์๋ฅผ ์ํด ๋จ์ด๋ ๋ฐ์ดํฐ๋ด ์ ์ฒด์ ์ฌ์ฉ๋น๋์ ๋ฐ๋ผ ์ธ๋ฑ์คํ ๋์์ต๋๋ค. ์๋ฅผ ๋ค์ด, ์ ์ "3"์ ๋ฐ์ดํฐ ๋ด์์ ์ธ ๋ฒ์งธ๋ก ๋น๋ฒํ๊ฒ ์ฌ์ฉ๋ ๋จ์ด๋ฅผ ๋ํ๋
๋๋ค. ์ด๋ "๊ฐ์ฅ ๋น๋ฒํ๊ฒ ์ฌ์ฉ๋ 10,000 ๋จ์ด๋ง์ ๊ณ ๋ คํ๋ ๊ฐ์ฅ ๋ง์ด ์ฐ์ธ 20 ๋จ์ด๋ ์ ์ธ"์ ๊ฐ์ ๋น ๋ฅธ ํํฐ๋ง ์์
์ ๊ฐ๋ฅ์ผ ํฉ๋๋ค.
๊ด์ต์ ๋ฐ๋ผ "0"์ ํน์ ๋จ์ด๋ฅผ ๋ํ๋ด๋ ๊ฒ์ด ์๋๋ผ ๋ฏธํ์ธ ๋จ์ด๋ฅผ ํต์นญํฉ๋๋ค.
### ์ฌ์ฉ๋ฒ:
```python
from keras.datasets import imdb
(x_train, y_train), (x_test, y_test) = imdb.load_data(path="imdb.npz",
num_words=None,
skip_top=0,
maxlen=None,
seed=113,
start_char=1,
oov_char=2,
index_from=3)
```
- __๋ฐํ๊ฐ:__
- 2๊ฐ์ ํํ:
- __x_train, x_test__: ์ธ๋ฑ์ค(์ ์)์ ๋ฆฌ์คํธ์ธ ์ํ์ค๋ก ์ด๋ฃจ์ด์ง ๋ฆฌ์คํธ. ๋ง์ฝ num_words ์ธ์๋ฅผ ํน์ ์ง์ผ๋ฉด, ์ธ๋ฑ์ค์ ์ต๋๊ฐ์ num_words-1 ์
๋๋ค. ๋ง์ฝ maxlen ์ธ์๋ฅผ ํน์ ์ง์ผ๋ฉด, ์ํ์ค ๊ธธ์ด์ ์ต๋๊ฐ์ maxlen์
๋๋ค.
- __y_train, y_test__: ์ ์ ๋ผ๋ฒจ(1 or 0)๋ก ์ด๋ฃจ์ด์ง ๋ฆฌ์คํธ.
- __์ธ์:__
- __path__: (`'~/.keras/datasets/' + path`)์ ์์น์ ๋ฐ์ดํฐ๊ฐ ์๋ค๋ฉด, ์ด ์์น๋ก ๋ฐ์ดํฐ๊ฐ ๋ค์ด๋ก๋๋ฉ๋๋ค.
- __num_words__: ์ ์ ํน์ None. ๊ณ ๋ คํ ๊ฐ์ฅ ๋น๋ฒํ ๋จ์ด. ๊ทธ๋ณด๋ค ๋๋ฌผ๊ฒ ์ฌ์ฉ๋ ๋จ์ด๋ ์ํธ์ค ๋ฐ์ดํฐ์ `oov_char` ๊ฐ์ผ๋ก ๋ํ๋ฉ๋๋ค.
- __skip_top__: ์ ์. ๊ณ ๋ คํ์ง ์์ ๊ฐ์ฅ ๋น๋ฒํ ๋จ์ด. ์ด๋ฌํ ๋จ์ด๋ ์ํ์ค ๋ฐ์ดํฐ์ `oov_char` ๊ฐ์ผ๋ก ๋ํ๋ฉ๋๋ค.
- __maxlen__: ์ ์. ์ํ์ค ๊ธธ์์ ์ต๋๊ฐ. ๋ ๊ธด ์ํ์ค๋ ์๋ผ๋
๋๋ค.
- __seed__: ์ ์. ์ฌํ ๊ฐ๋ฅํ ๋ฐ์ดํฐ ์
ํ๋ง์ ์ํ ์๋์
๋๋ค.
- __start_char__: ์ ์. ์ํ์ค์ ์ฒซ ์์์ด ์ด ๋ฌธ์๋ก ๋งํน๋ฉ๋๋ค.
0์ ํต์ ํจ๋ฉ ๋ฌธ์์ด๋ฏ๋ก 1์ผ๋ก ์กฐ์ ํ์ญ์์ค.
- __oov_char__: ์ ์. `num_words` ํน์ `skip_top`์ผ๋ก ์ธํ์ฌ ์ ์ธ๋ ๋จ์ด๋ ์ด ๋ฌธ์๋ก ๋์ฒด๋ฉ๋๋ค.
- __index_from__: ์ ์. ๋จ์ด๋ฅผ ์ด ์ธ๋ฑ์ค ์ด์์ ์๋ก ์ธ๋ฑ์คํ ์ํต๋๋ค.
---
## ๋ก์ดํฐ ๋ด์ค ํ ํฝ ๋ถ๋ฅ
46๊ฐ์ง ํ ํฝ์ผ๋ก ๋ผ๋ฒจ์ด ๋ฌ๋ฆฐ 11,228๊ฐ์ ๋ก์ดํฐ ๋ด์ค๋ก ์ด๋ฃจ์ด์ง ๋ฐ์ดํฐ์
. IMDB ๋ฐ์ดํฐ์
๊ณผ ๋ง์ฐฌ๊ฐ์ง๋ก, ๊ฐ ๋ด์ค๋ (๊ฐ์ ๋ฐฉ์์ ์ฌ์ฉํ) ๋จ์ด ์ธ๋ฑ์ค์ ์ํ์ค๋ก ์ธ์ฝ๋ฉ๋์ด ์์ต๋๋ค.
### ์ฌ์ฉ๋ฒ:
```python
from keras.datasets import reuters
(x_train, y_train), (x_test, y_test) = reuters.load_data(path="reuters.npz",
num_words=None,
skip_top=0,
maxlen=None,
test_split=0.2,
seed=113,
start_char=1,
oov_char=2,
index_from=3)
```
์ธ๋ถ์ฌํญ์ IMDB ๋ฐ์ดํฐ์
๊ณผ ๋์ผํ๋, ๋ค์์ ์ถ๊ฐ์ฌํญ์ด ์์ต๋๋ค:
- __test_split__: float. ํ
์คํธ ๋ฐ์ดํฐ๋ก ์ฌ์ฉํ ๋ฐ์ดํฐ์
์ ๋น์จ.
๋ํ ์ด ๋ฐ์ดํฐ์
์ ์ํ์ค๋ฅผ ์ธ์ฝ๋ฉํ๋๋ฐ ์ฌ์ฉํ ๋จ์ด ์ธ๋ฑ์ค๋ฅผ ์ ๊ณตํฉ๋๋ค:
```python
word_index = reuters.get_word_index(path="reuters_word_index.json")
```
- __๋ฐํ๊ฐ:__ ํค๊ฐ ๋จ์ด(str)์ด๊ณ ๊ฐ์ด ์ธ๋ฑ์ค(integer)์ธ ํ๋์ ๋์
๋๋ฆฌ. ์์. `word_index["giraffe"]`๋ `1234`๋ผ๋ ๊ฐ์ ๋ฐํํ ์ ์์ต๋๋ค.
- __์ธ์:__
- __path__: (`'~/.keras/datasets/' + path`)์ ์์น์ ์ธ๋ฑ์ค ํ์ผ์ด ์๋ค๋ฉด, ์ด ์์น๋ก ๋ค์ด๋ก๋ ๋ฉ๋๋ค.
---
## ์์ผ๋ก ์ด ์ซ์๋ค๋ก ์ด๋ฃจ์ด์ง MNIST ๋ฐ์ดํฐ๋ฒ ์ด์ค
10๊ฐ์ง ์ซ์์ ๋ํ 60,000๊ฐ์ 28x28 ๊ทธ๋ ์ด ์ค์ผ์ผ ์ด๋ฏธ์ง ๋ฐ์ดํฐ์
๊ณผ, ๊ทธ์ ๋ํด 10,000๊ฐ์ ์ด๋ฏธ์ง๋ก ์ด๋ฃจ์ด์ง ํ
์คํธ์
.
### ์ฌ์ฉ๋ฒ:
```python
from keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
```
- __๋ฐํ๊ฐ:__
- 2๊ฐ์ ํํ:
- __x_train, x_test__: ๊ทธ๋ ์ด ์ค์ผ์ผ ์ด๋ฏธ์ง ๋ฐ์ดํฐ์ uint8 ๋ฐฐ์ด. (num_samples, 28, 28)์ ํํ๋ฅผ ์ทจํฉ๋๋ค.
- __y_train, y_test__: ์ซ์ ๋ผ๋ฒจ์ uint8 ๋ฐฐ์ด (0-9 ๋ฒ์์ ์ ์). (num_samples,)์ ํํ๋ฅผ ์ทจํฉ๋๋ค.
- __์ธ์:__
- __path__: ('~/.keras/datasets/' + path`)์ ์์น์ ์ธ๋ฑ์ค ํ์ผ์ด ์๋ค๋ฉด, ์ด ์์น๋ก ๋ค์ด๋ก๋ ๋ฉ๋๋ค.
---
## ํจ์
์ด๋ฏธ์ง๋ก ์ด๋ฃจ์ด์ง ํจ์
-MNIST ๋ฐ์ดํฐ๋ฒ ์ด์ค
10๊ฐ์ง ํจ์
๋ฒ์ฃผ์ ๋ํ 60,000๊ฐ์ 28x28 ๊ทธ๋ ์ผ ์ค์ผ์ผ ์ด๋ฏธ์ง๋ก ์ด๋ฃจ์ด์ง ๋ฐ์ดํฐ์
๊ณผ, ๊ทธ์ ๋ํด 10,000๊ฐ์ ์ด๋ฏธ์ง๋ก ์ด๋ฃจ์ด์ง ํ
์คํธ์
. ์ด ๋ฐ์ดํฐ ์
์ MNIST๋ฅผ ๊ฐํธํ๊ฒ ๋์ฒดํ๋ ์ฉ๋๋ก ์ฌ์ฉํ ์ ์์ต๋๋ค. ํด๋์ค ๋ผ๋ฒจ์ ๋ค์๊ณผ ๊ฐ์ต๋๋ค:
| ๋ผ๋ฒจ | ์ค๋ช
|
| --- | --- |
| 0 | ํฐ์
์ธ /์์ |
| 1 | ๋ฐ์ง |
| 2 | ์ ํผ |
| 3 | ๋๋ ์ค |
| 4 | ์ฝํธ |
| 5 | ์๋ค |
| 6 | ์
์ธ |
| 7 | ์ด๋ํ |
| 8 | ๊ฐ๋ฐฉ |
| 9 | ์ตํด ๋ถ์ธ |
### ์ฌ์ฉ๋ฒ:
```python
from keras.datasets import fashion_mnist
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
```
- __๋ฐํ๊ฐ:__
- 2๊ฐ์ ํํ:
- __x_train, x_test__: ๊ทธ๋ ์ด ์ค์ผ์ผ ์ด๋ฏธ์ง ๋ฐ์ดํฐ์ uint8 ๋ฐฐ์ด. (num_samples, 28, 28)์ ํํ๋ฅผ ์ทจํฉ๋๋ค.
- __y_train, y_test__: ์ซ์ ๋ผ๋ฒจ์ uint8 ๋ฐฐ์ด (0-9 ๋ฒ์์ ์ ์). (num_samples,)์ ํํ๋ฅผ ์ทจํฉ๋๋ค.
---
## ๋ณด์คํด ์ฃผํ ๊ฐ๊ฒฉ ํ๊ท ๋ฐ์ดํฐ์
์นด๋ค๊ธฐ ๋ฉ๋ก ๋ํ์ด ๊ด๋ฆฌํ๋ StatLib ๋์๊ด์ ๋ฐ์ดํฐ์
.
๊ฐ ์ํ์ 1970๋
๋ ๋ณด์คํด ๊ทผ๊ต ์ฌ๋ฌ์ง์ญ์ ์์นํ ์ฃผํ์ 13๊ฐ์ง ์์ฑ์ผ๋ก ์ด๋ฃจ์ด์ ธ ์์ต๋๋ค.
ํ๊ฒ์ ํ ์ง์ญ์ ์ฃผํ๋ค์ (1,000$ ๋จ์) ์ค์๊ฐ์
๋๋ค.
### ์ฌ์ฉ๋ฒ:
```python
from keras.datasets import boston_housing
(x_train, y_train), (x_test, y_test) = boston_housing.load_data()
```
- __์ธ์:__
- __path__: ๋ฐ์ดํฐ์
์ ๋ก์ปฌ๋ก ์บ์ฑํ ๊ฒฝ๋ก
(~/.keras/datasets๋ฅผ ๊ธฐ์ค์ผ๋ก).
- __seed__: ํ
์คํธ ๋ฐ์ดํฐ๋ฅผ ๋ถํ ํ๊ธฐ ์ ๋ฐ์ดํฐ ์
ํ๋ง์ ์ํ ์๋.
- __test_split__: ํ
์คํธ์
์ผ๋ก ๋จ๊ฒจ๋ ๋ฐ์ดํฐ์ ๋น์จ.
- __๋ฐํ๊ฐ:__
Numpy ๋ฐฐ์ด๋ค๋ก ์ด๋ฃจ์ด์ง ํํ: `(x_train, y_train), (x_test, y_test)`.
| keras-docs-ko/sources/datasets.md/0 | {
"file_path": "keras-docs-ko/sources/datasets.md",
"repo_id": "keras-docs-ko",
"token_count": 6582
} | 78 |
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/recurrent.py#L237)</span>
### RNN
```python
keras.layers.RNN(cell, return_sequences=False, return_state=False, go_backwards=False, stateful=False, unroll=False)
```
์ํ ์ ๊ฒฝ๋ง<sub>Recurrent Neural Network</sub> ์ธต<sub>layer</sub>์ ๊ธฐ๋ณธ ํด๋์ค.
__์ธ์__
- __cell__: ์ํ ์ ๊ฒฝ๋ง ๋ด๋ถ์ ์
์ธ์คํด์ค์
๋๋ค. ์ํ ์ ๊ฒฝ๋ง์ ํน์ ํ ์ฐ์ฐ์ ์
๋ ฅ๋ ์๊ณ์ด ๊ธธ์ด๋งํผ ๋ฐ๋ณตํ๋ ํํ์ ์ ๊ฒฝ๋ง์
๋๋ค. ์
์ ์ด ๋ฐ๋ณต๋๋ ์ฐ์ฐ ๋ถ๋ถ์ ๋ด๋นํ๋ ์์ญ์ผ๋ก ์์ฑํ ์ํ ์ ๊ฒฝ๋ง ์ธ์คํด์ค์ `cell`์์ฑ์ ํ ๋น๋ฉ๋๋ค. ์
์ ๋ค์๊ณผ ๊ฐ์ ํ์ ์์๋ค์ ๊ฐ์ง ํด๋์ค์
๋๋ค.
- `t`์์ ์ ์
๋ ฅ<sub>input</sub>๊ณผ ์ํ<sub>state</sub>๋ฅผ ๋ถ๋ฌ์ค๋ `call`๋ฉ์๋(`call(input_at_t, states_at_t)`). `t`์์ ์ ์ถ๋ ฅ<sub>output</sub>๊ณผ `t+1`์์ ์ ์ํ`(output_at_t, states_at_t_plus_1)`๋ฅผ ๋ฐํํฉ๋๋ค. ์
์ธ์คํด์ค์ `call`๋ฉ์๋๋ ํ์ํ ๊ฒฝ์ฐ `constants`์ธ์๋ฅผ ์ด์ฉํ์ฌ ๋ณ๋์ ์ํ๋ ์์๊ฐ์ ์
๋ ฅ๋ฐ์ ์ ์์ต๋๋ค. ์์ธํ ๋ด์ฉ์ ์๋ "๋ณ๋์ ์์ ์ ๋ฌ ์์ ์ ์์ "์ ์ฐธ๊ณ ํ์ญ์์ค.
- `state_size` ์์ฑ. ์ ๊ฒฝ๋ง ์์์ ๋จ๊ณ๋ง๋ค ์ ๋ฌ๋๋ ์ํ์ ํฌ๊ธฐ๋ฅผ ๋ํ๋
๋๋ค(์
์ ์ถ๋ ฅ๊ณผ ํฌ๊ธฐ๊ฐ ๊ฐ์์ผ ํฉ๋๋ค). ์
์ด ๊ฐ์ง๊ฒ ๋ ์ํ๊ฐ ํ๋์ธ ๊ฒฝ์ฐ ํ๋์ ์ ์๋ฅผ, ์ฌ๋ฟ์ธ ๊ฒฝ์ฐ ์ ์๋ก ์ด๋ฃจ์ด์ง ๋ฆฌ์คํธ/ํํ์ ์
๋ ฅ๋ฐ์ต๋๋ค.
- `output_size` ์์ฑ. ์ถ๋ ฅ๊ฐ์ ํฌ๊ธฐ๋ฅผ ๋ํ๋
๋๋ค. ์ ์ ํน์ `TensorShape`๋ฅผ ์
๋ ฅ๋ฐ์ต๋๋ค. ๋ง์ฝ ํด๋น ์์ฑ์ด ์๋ ๊ฒฝ์ฐ, `state_size`์ ์ฒซ๋ฒ์งธ ๊ฐ์ผ๋ก๋ถํฐ ์ ์ถํ ๊ฒฐ๊ณผ๋ก ๋์ ํฉ๋๋ค.
์ฌ๋ฌ ์
์ ์ธ์คํด์ค๋ฅผ ์ธต์ธต์ด ์๊ณ ์ ํ๋ ๊ฒฝ์ฐ ์
์ธ์คํด์ค์ ๋ฆฌ์คํธ๋ฅผ `cell`๋ก ์ง์ ํ ์ ์์ต๋๋ค. ์ ์ธตํ ์ํ ์ ๊ฒฝ๋ง<sub>stacked RNN</sub>์ ์ ์ฉํ ๋ ์ ์ฉํฉ๋๋ค.
- __return_sequences__: `bool`. ์๊ณ์ด<sub>sequence</sub> ๊ฐ์ด๋ฐ ๋ชจ๋ ์์ ์ ์ถ๋ ฅ๊ฐ์ ๋ฐํํ ์ง ๋ง์ง๋ง ์์ ์ ์ถ๋ ฅ๊ฐ๋ง์ ๋ฐํํ ์ง ๊ฒฐ์ ํฉ๋๋ค. ๊ธฐ๋ณธ๊ฐ์ `False`์
๋๋ค.
- __return_state__: `bool`. ์ถ๋ ฅ๊ณผ ํจ๊ป ๋ง์ง๋ง ์์ ์ ์ํ๊ฐ๋ ๋ฐํํ ์ง์ ์ฌ๋ถ๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ๊ธฐ๋ณธ๊ฐ์ `False`์
๋๋ค.
- __go_backwards__: `bool`. ๊ธฐ๋ณธ๊ฐ์ `False`์
๋๋ค. `True`์ธ ๊ฒฝ์ฐ ์
๋ ฅ์ ์์๋ฅผ ๋ค์ง์ด ๊ฑฐ๊พธ๋ก๋ ์์์ ์ฒ๋ฆฌ ๊ฒฐ๊ณผ๋ฅผ ๋ฐํํฉ๋๋ค.
- __stateful__: `bool`. ๊ธฐ๋ณธ๊ฐ์ `False`์
๋๋ค. `True`์ธ ๊ฒฝ์ฐ ํ์ฌ ์
๋ ฅ๋ ๋ฐฐ์น์ ๊ฐ ์ธ๋ฑ์ค `i`์ ํด๋นํ๋ ์
๋ ฅ๊ฐ์ ๋ง์ง๋ง ์ํ๊ฐ ๋ค์ ๋ฐฐ์น์ ๊ฐ ์ธ๋ฑ์ค `i`์ ํด๋นํ๋ ์
๋ ฅ๊ฐ์ ์ด๊ธฐ ์ํ๋ก ์ฌ์ฉ๋ฉ๋๋ค. ๋งค์ฐ ๊ธด ์๊ณ์ด์ ์ฐ์๋ ๋ฐฐ์น๋ก ๋๋์ด ์ฒ๋ฆฌํ ๋ ์ ์ฉํฉ๋๋ค.
- __unroll__: `bool`. ๊ธฐ๋ณธ๊ฐ์ `False`์
๋๋ค. `True`์ธ ๊ฒฝ์ฐ ์ํ๊ตฌ์กฐ๋ก ์ฒ๋ฆฌ๋๋ ์ ๊ฒฝ๋ง์ ํผ์ณ์ ์ฐ์ฐ์ ์ผ๋ถ๋ฅผ ๋์์ ์ฒ๋ฆฌํฉ๋๋ค. ์ด ๊ฒฝ์ฐ ์ ์ฒด๋ฅผ ์ํ๊ตฌ์กฐ๋ก ์ฒ๋ฆฌํ๋ ๊ฒ๋ณด๋ค ๋น ๋ฅธ ์ฐ์ฐ์ด ๊ฐ๋ฅํ์ง๋ง ๊ทธ๋งํผ ๋ง์ ์ ๋ณด๋ฅผ ๋์์ ์ ์ฅํด์ผ ํ๊ธฐ ๋๋ฌธ์ ๋ฉ๋ชจ๋ฆฌ ์๋ชจ๊ฐ ์ปค์ง๋๋ค. ์๊ณ์ด ๊ธธ์ด๊ฐ ์งง์ ๊ฒฝ์ฐ ์ ํฉํฉ๋๋ค.
- __input_dim__: `int`. ์
๋ ฅ๊ฐ ๊ฐ์ด๋ฐ ์์ธ<sub>feature</sub>๋ค๋ก ์ด๋ฃจ์ด์ง ์ฐจ์์ ํฌ๊ธฐ๋ฅผ ์ง์ ํฉ๋๋ค. ์ด ์ธ์(๋๋ `input_shape`์ธ์)๋ ํด๋น ์ธต์ด ๋ชจ๋ธ์ ์
๋ ฅ์ ์ง์ ๋ค๋ฃจ๋ ์ฒซ ๋ฒ์งธ ์ธต์ผ๋ก ์ฌ์ฉ๋๋ ๊ฒฝ์ฐ์๋ง ์๊ตฌ๋ฉ๋๋ค.
- __input_length__: ์
๋ ฅ๊ฐ์ ์๊ณ์ด(์์ํ) ๊ธธ์ด. ๋ชจ๋ ๋ฐฐ์น๊ฐ ๋จ์ผํ ๊ธธ์ด๋ฅผ ๊ฐ์ง๋ ๊ฒฝ์ฐ ํ๋์ ์์๊ฐ์ ๋ฐฐ์ ํฉ๋๋ค. ์ดํ `Flatten`์ ์ด์ด `Dense`์ธต์ ์ฐ๊ฒฐํ๋ ค๋ฉด ์ด ์ธ์๊ฐ ํ์ํฉ๋๋ค(์ด ์ธ์๊ฐ ์ฃผ์ด์ง์ง ์์ ๊ฒฝ์ฐ, `Dense`์ธต์ ์ถ๋ ฅ์ ๊ณ์ฐํ ์ ์์ต๋๋ค). ํด๋น ์ธต์ด ๋ชจ๋ธ์ ์ฒซ ๋ฒ์งธ ์ธต์ด ์๋ ๊ฒฝ์ฐ์๋ ์ฒซ ๋ฒ์งธ ์ธต์ ์ธ์์์(์: `input_shape`) ๊ธธ์ด๋ฅผ ์ง์ ํด์ผ ํฉ๋๋ค.
__์
๋ ฅ ํํ__
`(batch_size, timesteps, input_dim)` ํํ์ 3D ํ
์.
__์ถ๋ ฅ ํํ__
- `return_state=True`์ธ ๊ฒฝ์ฐ: ํ
์์ ๋ฆฌ์คํธ. ์ฒซ ๋ฒ์งธ ํ
์๋ ์ถ๋ ฅ, ๋๋จธ์ง ํ
์๋ ๋ง์ง๋ง ์์ ์ ์ํ๊ฐ์ผ๋ก ๊ฐ๊ฐ `(batch_size, units)`์ ํํ๋ฅผ ๊ฐ์ต๋๋ค. RNN์ด๋ GRU์ ๊ฒฝ์ฐ 1๊ฐ, LSTM์ ๊ฒฝ์ฐ 2๊ฐ์ ์ํ๊ฐ์ ๋ฐํํฉ๋๋ค.
- `return_sequences=True`์ธ ๊ฒฝ์ฐ: `(batch_size, timesteps, units)`ํํ์ 3D ํ
์.
- ๊ทธ ์ธ์ ๊ฒฝ์ฐ: `(batch_size, units)` ํํ์ 2D ํ
์.
__๋ง์คํน__
RNN ์ธต์ ์๊ณ์ด์ ๊ธธ์ด๊ฐ ์๋ก ๋ค๋ฅธ ์
๋ ฅ ๋ฐ์ดํฐ๋ฅผ ํจ๋ฉํด์ ๊ธธ์ด๋ฅผ ํต์ผํ ๊ฒฝ์ฐ์ ๋ํ ๋ง์คํน์ ์ง์ํฉ๋๋ค. ๋ง์คํน์ ์ ์ฉํ๋ ค๋ฉด โ[Embedding](embeddings.md)์ธต์์ `mask_zero=True`๋ก ์ค์ ํฉ๋๋ค.
__์ํ ์ ๊ฒฝ๋ง์ ์ํ ์ ์ฅ ๋ชจ๋ ์ฌ์ฉ์ ๋ํ ์ ์์ __
์ํ ์ ๊ฒฝ๋ง ์ธต์ ์ํ ์ ์ฅ<sub>stateful</sub> ๋ชจ๋๋ก ์ค์ ํ ๊ฒฝ์ฐ ํ ๋ฐฐ์น ๋ด ํ๋ณธ๋ค์ ๋ง์ง๋ง ์ํ๋ฅผ ๊ณ์ฐํ์ฌ ์ด๋ฅผ ๋ค์ ๋ฐฐ์น ๋ด ํ๋ณธ๋ค์ ์ด๊ธฐ์ํ๋ก ์ฌ์ฉํ๊ฒ ๋ฉ๋๋ค. ์ด์ ๋ฐฐ์น์ ์ธ๋ฑ์ค๊ฐ ๋ค์ ๋ฐฐ์น์ ์ธ๋ฑ์ค์ 1:1๋ก ์ด์ด์ ธ์ผ ํ๋ฏ๋ก, ์ํ ์ ์ฅ์ ์ฌ์ฉํ๋ ค๋ฉด ๋ชจ๋ธ์ ๋ฐฐ์น ํฌ๊ธฐ๋ฅผ ํต์ผํด์ผ ํฉ๋๋ค.
์ํ ์ ์ฅ ์ํ ์ ๊ฒฝ๋ง์ ์ฌ์ฉํ๋ ค๋ฉด ๋ค์๊ณผ ๊ฐ์ด ์ค์ ํฉ๋๋ค.
- ์ธต์ ์์ฑํ ๋ `stateful=True`๋ก ์ง์ ํฉ๋๋ค.
- ๋ชจ๋ธ์ ๋ฐฐ์น ํฌ๊ธฐ๋ฅผ ๊ณ ์ ํฉ๋๋ค. Sequential ๋ชจ๋ธ์ ๊ฒฝ์ฐ ์ฒซ ์ธต์์ `batch_input_shape=()`์, ํจ์ํ ๋ชจ๋ธ์ ๊ฒฝ์ฐ ๋ฐ์ดํฐ ์
๋ ฅ์ ๋ฐ๋ ๋ชจ๋ ์ฒซ ๋ฒ์งธ ์ธต์์ `batch_shape=()`์ ์ฌ์ฉํ์ฌ ์
๋ ฅํ ๋ฐฐ์น์ ํํ<sub>shape</sub>๋ฅผ ์ง์ ํฉ๋๋ค. ์ด ๊ฒฝ์ฐ ์ง์ ํ ๊ฐ์ (๋ฐฐ์น ํฌ๊ธฐ, ์๊ณ์ด ๊ธธ์ด, ์
๋ ฅ๊ฐ์ ์ฐจ์)์ผ๋ก ์ด๋ฃจ์ด์ง ์ ์ ํํ์
๋๋ค(์: `(32, 10, 100)`).
์ ์ฅ๋ ์ํ๋ฅผ ์ด๊ธฐํํ๊ณ ์ ํ๋ค๋ฉด ํน์ ์ธต์ ๋ํด์๋ `layer.reset_states()`, ๋ชจ๋ธ ์ ์ฒด์ ๋ํด์๋ `model.reset_states()`๋ฅผ ์ฌ์ฉํฉ๋๋ค.
__์ํ ์ ๊ฒฝ๋ง ์ด๊ธฐ ์ํ ํน์ ์์ ์ ์์ __
`initial_state`์ธ์๋ฅผ ์ฌ์ฉํ๋ฉด ์ฌ๋ณผ๋ฆญ ํ
์๋ฅผ ์ด์ฉํ์ฌ ์ํ ์ ๊ฒฝ๋ง ์ธต์ ์ด๊ธฐ ์ํ๋ฅผ ์ํ๋ ๊ฐ์ผ๋ก ์ค์ ํ ์ ์์ต๋๋ค. ์ด๋ ์ค์ ์ ์ฌ์ฉํ ๊ฐ์ ํด๋น ์ํ ์ ๊ฒฝ๋ง ์ธต์ด ์๊ตฌํ๋ ์ด๊ธฐ ์ํ์ ๊ฐ์ ํํ์ ํ
์๋ ํ
์์ ๋ฆฌ์คํธ์ฌ์ผ ํฉ๋๋ค.
`reset_states` ํธ์ถ์์ `states`์ธ์๋ฅผ ์ฌ์ฉํ๋ฉด NumPy ๋ฐฐ์ด<sub>array</sub>์ ์ด์ฉํ์ฌ ์ํ ์ ๊ฒฝ๋ง ์ธต์ ์ด๊ธฐ ์ํ๋ฅผ ์ํ๋ ๊ฐ์ผ๋ก ์ค์ ํ ์ ์์ต๋๋ค. ์ด๋ ์ค์ ์ ์ฌ์ฉํ ๊ฐ์ ํด๋น ์ํ ์ ๊ฒฝ๋ง ์ธต์ด ์๊ตฌํ๋ ์ด๊ธฐ ์ํ์ ๊ฐ์ ํํ์ NumPy ๋ฐฐ์ด์ด๋ ๋ฐฐ์ด์ ๋ฆฌ์คํธ์ฌ์ผ ํฉ๋๋ค.
__์ํ ์ ๊ฒฝ๋ง์ ๋ณ๋์ ์์ ์ ๋ฌ ์์ ์ ์์ __
`RNN.__call__`(ํน์ `RNN.call`) ๋ฉ์๋์ `constants` ์ธ์๋ฅผ ์ง์ ํ๋ฉด ์ธต์ ์
๋ ฅ๊ฐ ์ธ์๋ ๋ณ๋์ ์์๋ฅผ ์
์ ์
๋ ฅํ ์ ์์ต๋๋ค. ์ด๋ฅผ ์ฌ์ฉํ๊ธฐ ์ํด์๋ ์ธต ๋ด๋ถ์์ ์๋ํ๋ `cell.call` ๋ฉ์๋๋ ๋์ผํ `constants`์
๋ ฅ์ ๋ฐ๋๋ก ์ ์๋์ด ์์ด์ผ ํฉ๋๋ค. `constants`์ธ์๋ฅผ ์ด์ฉํ๋ฉด ์ดํ
์
๋ฉ์ปค๋์ฆ์์ ํ์ํ ๊ฒ๊ณผ ๊ฐ์ด ์
์์ค์์ ์ธ๋ถ๋ก๋ถํฐ ๋
๋ฆฝ์ ์ธ ์์๋ฅผ ๋ฐ์์ ์
๋ด์ ๊ณ์ฐ์ ์ ์ฉ์ํค๋ ์ ํ์ ์ฐ์ฐ์ ํ ์ ์์ต๋๋ค.
__์์__
```python
# ์ฐ์ ์ํ ์ ๊ฒฝ๋ง ์
์ ๋ ์ด์ด ํ์ ํด๋์ค๋ก ์ ์ํด ๋ด
์๋ค.
class MinimalRNNCell(keras.layers.Layer):
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
super(MinimalRNNCell, self).__init__(**kwargs)
def build(self, input_shape):
self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = K.dot(inputs, self.kernel)
output = h + K.dot(prev_output, self.recurrent_kernel)
return output, [output]
# ์ด ์
์ ์ํ ์ ๊ฒฝ๋ง ๋ ์ด์ด์ ์ ์ฉํด ๋ด
์๋ค.
cell = MinimalRNNCell(32)
x = keras.Input((None, 5))
layer = RNN(cell)
y = layer(x)
# ๋ค์์ ์ํ ์ ์ฅ ์ํ ์ ๊ฒฝ๋ง์ ๊ตฌ์ฑํ๊ธฐ ์ํด ์
์ ์ด๋ป๊ฒ ์ฌ์ฉํ๋์ง ๋ณด์ฌ์ค๋๋ค.
cells = [MinimalRNNCell(32), MinimalRNNCell(64)]
x = keras.Input((None, 5))
layer = RNN(cells)
y = layer(x)
```
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/recurrent.py#L945)</span>
### SimpleRNN
```python
keras.layers.SimpleRNN(units, activation='tanh', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0.0, recurrent_dropout=0.0, return_sequences=False, return_state=False, go_backwards=False, stateful=False, unroll=False)
```
์ด์ ์์ ์ ์ถ๋ ฅ์ ํ ์์ ์ ์
๋ ฅ์ผ๋ก ๋ฐ๋ ์์ ์ฐ๊ฒฐ<sub>fully-connected</sub> ์ํ ์ ๊ฒฝ๋ง.
__์ธ์__
- __units__: ์์ ์ ์. ์ถ๋ ฅ๊ฐ์ ์ฐจ์ ํฌ๊ธฐ๋ฅผ ๊ฒฐ์ ํฉ๋๋ค.
- __activation__: ์ฌ์ฉํ ํ์ฑํ ํจ์์
๋๋ค. ๊ธฐ๋ณธ๊ฐ์ ํ์ดํผ๋ณผ๋ฆญํ์ ํธ(`tanh`)์ด๋ฉฐ `None`์ ์ ๋ฌํ ๊ฒฝ์ฐ ํ์ฑํ ํจ์๊ฐ ์ ์ฉ๋์ง ์์ต๋๋ค(`a(x) = x`). ์ฐธ๊ณ : [ํ์ฑํ ํจ์](../activations.md)
- __use_bias__: `bool`. ์ธต์ ์ฐ์ฐ์ ํธํฅ<sub>bias</bias>์ ์ ์ฉํ ์ง ์ฌ๋ถ๋ฅผ ๊ฒฐ์ ํฉ๋๋ค.
- __kernel_initializer__: `kernel` ๊ฐ์ค์น<sub>weights</sub> ํ๋ ฌ์ ์ด๊ธฐํ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ด ๊ฐ์ค์น๋ ์
๋ ฅ๊ฐ์ ๊ณฑํด์ ธ์ ์ ํ๋ณํํ๋ ์ฐ์ฐ์ ์ฌ์ฉ๋ฉ๋๋ค. ์ฐธ๊ณ : [์ด๊ธฐํ ํจ์](../initializers.md)
- __recurrent_initializer__: `recurrent_kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ด๊ธฐํ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ด ๊ฐ์ค์น๋ ์ด์ ์์ ์ผ๋ก๋ถํฐ ์ ๋ฌ๋ฐ์ ์ํ๊ฐ์ ๊ณฑํด์ ธ์ ์ ํ๋ณํํ๋ ์ฐ์ฐ์ ์ฌ์ฉ๋ฉ๋๋ค. ์ฐธ๊ณ : [์ด๊ธฐํ ํจ์](../initializers.md)
- __bias_initializer__: ํธํฅ ๋ฒกํฐ์ ์ด๊ธฐํ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [์ด๊ธฐํ ํจ์](../initializers.md)
- __kernel_regularizer__: `kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ ์ฉํ ๊ท์ ํจ์<sub>regularizer</sub>๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [๊ท์ ํจ์](../regularizers.md)
- __recurrent_regularizer__: `recurrent_kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ ์ฉํ ๊ท์ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [๊ท์ ํจ์](../regularizers.md)
- __bias_regularizer__: ํธํฅ ๋ฒกํฐ์ ์ ์ฉํ ๊ท์ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [๊ท์ ํจ์](../regularizers.md)
- __activity_regularizer__: ์ธต์ ์ถ๋ ฅ๊ฐ์ ์ ์ฉํ ๊ท์ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [๊ท์ ํจ์](../regularizers.md)
- __kernel_constraint__: `kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ ์ฉํ ์ ์ฝ<sub>constraints</sub>์ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [์ ์ฝ](../constraints.md))
- __recurrent_constraint__: `recurrent_kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ ์ฉํ ์ ์ฝ์ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [์ ์ฝ](../constraints.md))
- __bias_constraint__: ํธํฅ ๋ฒกํฐ์ ์ ์ฉํ ์ ์ฝ์ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [์ ์ฝ](../constraints.md))
- __dropout__: `0`๊ณผ `1`์ฌ์ด์ `float`. ์
๋ ฅ๊ฐ์ ์ ํ ๋ณํ์ ์ฌ์ฉ๋๋ `kernel` ๊ฐ์ค์น์ ๊ฐ ๊ฐ์ด๋ฐ ์ง์ ํ ๋งํผ์ ๋น์จ์ ๋ฌด์์๋ก `0`์ผ๋ก ๋ฐ๊พธ์ด ํ๋ฝ์ํต๋๋ค.
- __recurrent_dropout__: `0`๊ณผ `1`์ฌ์ด์ `float`. ์ํ๊ฐ์ ์ ํ ๋ณํ์ ์ฌ์ฉ๋๋ `recurrent_kernel` ๊ฐ์ค์น์ ๊ฐ ๊ฐ์ด๋ฐ ์ง์ ํ ๋งํผ์ ๋น์จ์ ๋ฌด์์๋ก `0`์ผ๋ก ๋ฐ๊พธ์ด ํ๋ฝ์ํต๋๋ค.
- __return_sequences__: `bool`. ์๊ณ์ด ๊ฐ์ด๋ฐ ๋ชจ๋ ์์ ์ ์ถ๋ ฅ๊ฐ์ ๋ฐํํ ์ง ๋ง์ง๋ง ์์ ์ ์ถ๋ ฅ๊ฐ๋ง์ ๋ฐํํ ์ง ๊ฒฐ์ ํฉ๋๋ค. ๊ธฐ๋ณธ๊ฐ์ `False`์
๋๋ค.
- __return_state__: `bool`. ์ถ๋ ฅ๊ณผ ํจ๊ป ๋ง์ง๋ง ์์ ์ ์ํ๊ฐ๋ ๋ฐํํ ์ง์ ์ฌ๋ถ๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ๊ธฐ๋ณธ๊ฐ์ `False`์
๋๋ค.
- __go_backwards__: `bool`. ๊ธฐ๋ณธ๊ฐ์ `False`์
๋๋ค. `True`์ธ ๊ฒฝ์ฐ ์
๋ ฅ์ ์์๋ฅผ ๋ค์ง์ด ๊ฑฐ๊พธ๋ก๋ ์์์ ์ฒ๋ฆฌ ๊ฒฐ๊ณผ๋ฅผ ๋ฐํํฉ๋๋ค.
- __stateful__: `bool`. ๊ธฐ๋ณธ๊ฐ์ `False`์
๋๋ค. `True`์ธ ๊ฒฝ์ฐ ํ์ฌ ์
๋ ฅ๋ ๋ฐฐ์น์ ๊ฐ ์ธ๋ฑ์ค `i`์ ํด๋นํ๋ ์
๋ ฅ๊ฐ์ ๋ง์ง๋ง ์ํ๊ฐ ๋ค์ ๋ฐฐ์น์ ๊ฐ ์ธ๋ฑ์ค `i`์ ํด๋นํ๋ ์
๋ ฅ๊ฐ์ ์ด๊ธฐ ์ํ๋ก ์ฌ์ฉ๋ฉ๋๋ค. ๋งค์ฐ ๊ธด ์๊ณ์ด์ ์ฐ์๋ ๋ฐฐ์น๋ก ๋๋์ด ์ฒ๋ฆฌํ ๋ ์ ์ฉํฉ๋๋ค.
- __unroll__: `bool`. ๊ธฐ๋ณธ๊ฐ์ `False`์
๋๋ค. `True`์ธ ๊ฒฝ์ฐ ์ํ๊ตฌ์กฐ๋ก ์ฒ๋ฆฌ๋๋ ์ ๊ฒฝ๋ง์ ํผ์ณ์ ์ฐ์ฐ์ ์ผ๋ถ๋ฅผ ๋์์ ์ฒ๋ฆฌํฉ๋๋ค. ์ด ๊ฒฝ์ฐ ์ ์ฒด๋ฅผ ์ํ๊ตฌ์กฐ๋ก ์ฒ๋ฆฌํ๋ ๊ฒ๋ณด๋ค ๋น ๋ฅธ ์ฐ์ฐ์ด ๊ฐ๋ฅํ์ง๋ง ๊ทธ๋งํผ ๋ง์ ์ ๋ณด๋ฅผ ๋์์ ์ ์ฅํด์ผ ํ๊ธฐ ๋๋ฌธ์ ๋ฉ๋ชจ๋ฆฌ ์๋ชจ๊ฐ ์ปค์ง๋๋ค. ์๊ณ์ด ๊ธธ์ด๊ฐ ์งง์ ๊ฒฝ์ฐ ์ ํฉํฉ๋๋ค.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/recurrent.py#L1491)</span>
### GRU
```python
keras.layers.GRU(units, activation='tanh', recurrent_activation='sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0.0, recurrent_dropout=0.0, implementation=2, return_sequences=False, return_state=False, go_backwards=False, stateful=False, unroll=False, reset_after=False)
```
Gated Recurrent Unit - Cho et al. 2014.
์ผ๋ผ์ค๊ฐ ์ง์ํ๋ GRU ๋ฒ์ ์ ๋ ๊ฐ์ง์
๋๋ค. ๊ธฐ๋ณธ๊ฐ์ ํ๊ธฐ ๋ฒ์ ์ธ 1406.1078v3์ ๋ฐํ์ผ๋ก ๋ง๋ ๊ฒ์ผ๋ก ์ด์ ์์ ์์ ์ ๋ฌ๋ฐ์ ์๋ ์ํ<sub>hidden state</sub>์ ๊ฐ์ค์น(`recurrent_kernel`)๋ฅผ ๊ณฑํ๊ธฐ ์ ์ ๋ฆฌ์
๊ฒ์ดํธ๊ฐ ์ํ์ ๋จผ์ ์ ์ฉ๋๋ ์์ ๋ฐ๋ฆ
๋๋ค. ๋๋จธ์ง๋ ์ด๊ธฐ ๋ฒ์ ์ธ 1406.1078v1์ ๋ฐํ์ผ๋ก ๋ง๋ ๊ฒ์ผ๋ก ๋จผ์ ์ํ์ ๊ฐ์ค์น๋ฅผ ๊ณฑํ ๋ค์์ ๋ฆฌ์
๊ฒ์ดํธ๋ฅผ ์ ์ฉํฉ๋๋ค.
์ด ๊ฐ์ด๋ฐ `CuDNNGRU`(์ค์ง GPU๋ง์ ์ฌ์ฉํ๋ ์ฐ์ฐ)์ CPU์ฐ์ฐ์ ๋ชจ๋ ์ง์ํ๋ ๊ฒ์ ์ด๊ธฐ ๋ฒ์ ์
๋๋ค. ์ด ๋ฒ์ ์์๋ `kernel`๊ณผ `recurrent_kernel` ๊ณ์ฐ์ด ๋ณ๋๋ก ์ด๋ฃจ์ด์ง๊ธฐ ๋๋ฌธ์ ์ฌ์ฉ๋๋ ํธํฅ ๋ฒกํฐ ์ญ์ ๋ณ๊ฐ๋ก ์กด์ฌํฉ๋๋ค. ์ด๊ธฐ ๋ฒ์ ์ ์ฌ์ฉํ๋ ค๋ฉด `reset_after=True`๋ก, `recuurrent_activation='sigmoid'`๋ก ์ค์ ํ์ญ์์ค.
__์ธ์__
- __units__: ์์ ์ ์. ์ถ๋ ฅ๊ฐ์ ์ฐจ์ ํฌ๊ธฐ๋ฅผ ๊ฒฐ์ ํฉ๋๋ค.
- __activation__: GRU์์ ํ์ฌ ์์ ์
๋ ฅ ๊ณ์ฐ์ ์ฌ์ฉํ ํ์ฑํ ํจ์์
๋๋ค. ๊ธฐ๋ณธ๊ฐ์ ํ์ดํผ๋ณผ๋ฆญํ์ ํธ(`tanh`)์ด๋ฉฐ `None`์ ์ ๋ฌํ ๊ฒฝ์ฐ ํ์ฑํ ํจ์๊ฐ ์ ์ฉ๋์ง ์์ต๋๋ค(`a(x) = x`). ์ฐธ๊ณ : [ํ์ฑํ ํจ์](../activations.md)
- __recurrent_activation__: GRU์ ์
๋ฐ์ดํธ ๊ฒ์ดํธ์ ๋ฆฌ์
๊ฒ์ดํธ ๊ณ์ฐ์ ์ฌ์ฉํ ํ์ฑํ ํจ์์
๋๋ค. ๊ธฐ๋ณธ๊ฐ์ ์๊ทธ๋ชจ์ด๋(`'sigmoid'`)์ด๋ฉฐ `None`์ ์ ๋ฌํ ๊ฒฝ์ฐ ํ์ฑํ ํจ์๊ฐ ์ ์ฉ๋์ง ์์ต๋๋ค(`a(x) = x`). ์ฐธ๊ณ : [ํ์ฑํ ํจ์](../activations.md)
- __use_bias__: `bool`. ์ธต์ ์ฐ์ฐ์ ํธํฅ์ ์ ์ฉํ ์ง ์ฌ๋ถ๋ฅผ ๊ฒฐ์ ํฉ๋๋ค.
- __kernel_initializer__: `kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ด๊ธฐํ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ด ๊ฐ์ค์น๋ ์
๋ ฅ๊ฐ์ ๊ณฑํด์ ธ์ ์ ํ๋ณํํ๋ ์ฐ์ฐ์ ์ฌ์ฉ๋ฉ๋๋ค. ์ฐธ๊ณ : [์ด๊ธฐํ ํจ์](../initializers.md)
- __recurrent_initializer__: `recurrent_kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ด๊ธฐํ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ด ๊ฐ์ค์น๋ ์ด์ ์์ ์ผ๋ก๋ถํฐ ์ ๋ฌ๋ฐ์ ์ํ๊ฐ์ ๊ณฑํด์ ธ์ ์ ํ๋ณํํ๋ ์ฐ์ฐ์ ์ฌ์ฉ๋ฉ๋๋ค. ์ฐธ๊ณ : [์ด๊ธฐํ ํจ์](../initializers.md)
- __bias_initializer__: ํธํฅ ๋ฒกํฐ์ ์ด๊ธฐํ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [์ด๊ธฐํ ํจ์](../initializers.md)
- __kernel_regularizer__: `kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ ์ฉํ ๊ท์ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [๊ท์ ํจ์](../regularizers.md)
- __recurrent_regularizer__: `recurrent_kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ ์ฉํ ๊ท์ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [๊ท์ ํจ์](../regularizers.md)
- __bias_regularizer__: ํธํฅ ๋ฒกํฐ์ ์ ์ฉํ ๊ท์ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [๊ท์ ํจ์](../regularizers.md)
- __activity_regularizer__: ์ธต์ ์ถ๋ ฅ๊ฐ์ ์ ์ฉํ ๊ท์ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [๊ท์ ํจ์](../regularizers.md)
- __kernel_constraint__: `kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ ์ฉํ ์ ์ฝ์ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [์ ์ฝ](../constraints.md))
- __recurrent_constraint__: `recurrent_kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ ์ฉํ ์ ์ฝ์ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [์ ์ฝ](../constraints.md))
- __bias_constraint__: ํธํฅ ๋ฒกํฐ์ ์ ์ฉํ ์ ์ฝ์ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [์ ์ฝ](../constraints.md))
- __dropout__: `0`๊ณผ `1`์ฌ์ด์ `float`. ์
๋ ฅ๊ฐ์ ์ ํ ๋ณํ์ ์ฌ์ฉ๋๋ `kernel` ๊ฐ์ค์น์ ๊ฐ ๊ฐ์ด๋ฐ ์ง์ ํ ๋งํผ์ ๋น์จ์ ๋ฌด์์๋ก `0`์ผ๋ก ๋ฐ๊พธ์ด ํ๋ฝ์ํต๋๋ค.
- __recurrent_dropout__: `0`๊ณผ `1`์ฌ์ด์ `float`. ์ํ๊ฐ์ ์ ํ ๋ณํ์ ์ฌ์ฉ๋๋ `recurrent_kernel` ๊ฐ์ค์น์ ๊ฐ ๊ฐ์ด๋ฐ ์ง์ ํ ๋งํผ์ ๋น์จ์ ๋ฌด์์๋ก `0`์ผ๋ก ๋ฐ๊พธ์ด ํ๋ฝ์ํต๋๋ค.
- __implementation__: `1` ๋๋ `2`. ์ฐ์ฐ ๋ชจ๋๋ฅผ ์ค์ ํฉ๋๋ค. `1`์ ์์ ํฌ๊ธฐ์ ๋ด์ ๊ณผ ๋ง์
์ ๋ง์ด ํ๋ ๊ตฌ์ฑ, `2`๋ ํฐ ํฌ๊ธฐ์ ๋ด์ ๊ณผ ๋ง์
์ ๋ณด๋ค ์ ์ ํ์๋ก ํ๋ ๊ตฌ์ฑ์
๋๋ค. ์ด๋ฌํ ์ค์ ์ ํ๋์จ์ด ๋ฐ ์ดํ๋ฆฌ์ผ์ด์
์ ๋ฐ๋ผ ์๋ก ๋ค๋ฅธ ์ฐ์ฐ ์ฑ๋ฅ์ ๊ฐ์ ธ์ต๋๋ค. ๊ธฐ๋ณธ๊ฐ์ `2`์
๋๋ค.
- __return_sequences__: `bool`. ์๊ณ์ด ๊ฐ์ด๋ฐ ๋ชจ๋ ์์ ์ ์ถ๋ ฅ๊ฐ์ ๋ฐํํ ์ง ๋ง์ง๋ง ์์ ์ ์ถ๋ ฅ๊ฐ๋ง์ ๋ฐํํ ์ง ๊ฒฐ์ ํฉ๋๋ค. ๊ธฐ๋ณธ๊ฐ์ `False`์
๋๋ค.
- __return_state__: `bool`. ์ถ๋ ฅ๊ณผ ํจ๊ป ๋ง์ง๋ง ์์ ์ ์ํ๊ฐ๋ ๋ฐํํ ์ง์ ์ฌ๋ถ๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ๊ธฐ๋ณธ๊ฐ์ `False`์
๋๋ค.
- __go_backwards__: `bool`. ๊ธฐ๋ณธ๊ฐ์ `False`์
๋๋ค. `True`์ธ ๊ฒฝ์ฐ ์
๋ ฅ์ ์์๋ฅผ ๋ค์ง์ด ๊ฑฐ๊พธ๋ก๋ ์์์ ์ฒ๋ฆฌ ๊ฒฐ๊ณผ๋ฅผ ๋ฐํํฉ๋๋ค.
- __stateful__: `bool`. ๊ธฐ๋ณธ๊ฐ์ `False`์
๋๋ค. `True`์ธ ๊ฒฝ์ฐ ํ์ฌ ์
๋ ฅ๋ ๋ฐฐ์น์ ๊ฐ ์ธ๋ฑ์ค `i`์ ํด๋นํ๋ ์
๋ ฅ๊ฐ์ ๋ง์ง๋ง ์ํ๊ฐ ๋ค์ ๋ฐฐ์น์ ๊ฐ ์ธ๋ฑ์ค `i`์ ํด๋นํ๋ ์
๋ ฅ๊ฐ์ ์ด๊ธฐ ์ํ๋ก ์ฌ์ฉ๋ฉ๋๋ค. ๋งค์ฐ ๊ธด ์๊ณ์ด์ ์ฐ์๋ ๋ฐฐ์น๋ก ๋๋์ด ์ฒ๋ฆฌํ ๋ ์ ์ฉํฉ๋๋ค.
- __unroll__: `bool`. ๊ธฐ๋ณธ๊ฐ์ `False`์
๋๋ค. `True`์ธ ๊ฒฝ์ฐ ์ํ๊ตฌ์กฐ๋ก ์ฒ๋ฆฌ๋๋ ์ ๊ฒฝ๋ง์ ํผ์ณ์ ์ฐ์ฐ์ ์ผ๋ถ๋ฅผ ๋์์ ์ฒ๋ฆฌํฉ๋๋ค. ์ด ๊ฒฝ์ฐ ์ ์ฒด๋ฅผ ์ํ๊ตฌ์กฐ๋ก ์ฒ๋ฆฌํ๋ ๊ฒ๋ณด๋ค ๋น ๋ฅธ ์ฐ์ฐ์ด ๊ฐ๋ฅํ์ง๋ง ๊ทธ๋งํผ ๋ง์ ์ ๋ณด๋ฅผ ๋์์ ์ ์ฅํด์ผ ํ๊ธฐ ๋๋ฌธ์ ๋ฉ๋ชจ๋ฆฌ ์๋ชจ๊ฐ ์ปค์ง๋๋ค. ์๊ณ์ด ๊ธธ์ด๊ฐ ์งง์ ๊ฒฝ์ฐ ์ ํฉํฉ๋๋ค.
- __reset_after__: ๋ฆฌ์
๊ฒ์ดํธ๋ฅผ ์๋ ์ํ์ ์ ์ฉํ๋ ์์ ์ ์ง์ ํฉ๋๋ค. `False`์ธ ๊ฒฝ์ฐ `recurrent_kernel`์ ๊ณฑํ๊ธฐ ์ ์ ์ ์ฉํ๋ฉฐ, `True`์ธ ๊ฒฝ์ฐ `recurrent_kernel`์ ๊ณฑํ ๋ค์ ๊ทธ ๊ฒฐ๊ณผ์ ์ ์ฉํฉ๋๋ค(`CuDNN`๊ณผ ํธํ๋๋ ๋ฐฉ์์
๋๋ค).
__์ฐธ๊ณ __
- [Learning Phrase Representations using RNN Encoder-Decoder for
Statistical Machine Translation](https://arxiv.org/abs/1406.1078)
- [On the Properties of Neural Machine Translation:
Encoder-Decoder Approaches](https://arxiv.org/abs/1409.1259)
- [Empirical Evaluation of Gated Recurrent Neural Networks on
Sequence Modeling](https://arxiv.org/abs/1412.3555v1)
- [A Theoretically Grounded Application of Dropout in
Recurrent Neural Networks](https://arxiv.org/abs/1512.05287)
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/recurrent.py#L2051)</span>
### LSTM
```python
keras.layers.LSTM(units, activation='tanh', recurrent_activation='hard_sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', unit_forget_bias=True, kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0.0, recurrent_dropout=0.0, implementation=1, return_sequences=False, return_state=False, go_backwards=False, stateful=False, unroll=False)
```
Long Short-Term Memory - Hochreiter 1997.
__์ธ์__
- __units__: ์์ ์ ์. ์ถ๋ ฅ๊ฐ์ ์ฐจ์ ํฌ๊ธฐ๋ฅผ ๊ฒฐ์ ํฉ๋๋ค.
- __activation__: LSTM์์ ํ์ฌ ์์ ์
๋ ฅ์ ๋ฐํ์ผ๋ก ํ๋ณด๊ฐ<sub>candidate value</sub>์ ๊ณ์ฐํ๋ ๊ณผ์ ๋ฐ ์
์ํ<sub>csll state</sub>๋ฅผ ์ด์ฉํ์ฌ ํ์ฌ ์์ ์ ์ถ๋ ฅ๊ฐ์ ๊ณ์ฐํ๋ ๊ณผ์ ์์ ์ฌ์ฉํ ํ์ฑํ ํจ์์
๋๋ค. ๊ธฐ๋ณธ๊ฐ์ ํ์ดํผ๋ณผ๋ฆญํ์ ํธ(`tanh`)์ด๋ฉฐ `None`์ ์ ๋ฌํ ๊ฒฝ์ฐ ํ์ฑํ ํจ์๊ฐ ์ ์ฉ๋์ง ์์ต๋๋ค(`a(x) = x`). ์ฐธ๊ณ : [ํ์ฑํ ํจ์](../activations.md)
- __recurrent_activation__: LSTM์ ์ธํ ๊ฒ์ดํธ์ ํฌ๊ฒ ๊ฒ์ดํธ, ์์ํ ๊ฒ์ดํธ ๊ณ์ฐ์ ์ฌ์ฉํ ํ์ฑํ ํจ์์
๋๋ค. ๊ธฐ๋ณธ๊ฐ์ ํ๋ ์๊ทธ๋ชจ์ด๋(`'hard_sigmoid'`)์ด๋ฉฐ `None`์ ์ ๋ฌํ ๊ฒฝ์ฐ ํ์ฑํ ํจ์๊ฐ ์ ์ฉ๋์ง ์์ต๋๋ค(`a(x) = x`). ์ฐธ๊ณ : [ํ์ฑํ ํจ์](../activations.md)
- __use_bias__: `bool`. ์ธต์ ์ฐ์ฐ์ ํธํฅ์ ์ ์ฉํ ์ง ์ฌ๋ถ๋ฅผ ๊ฒฐ์ ํฉ๋๋ค.
- __kernel_initializer__: `kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ด๊ธฐํ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ด ๊ฐ์ค์น๋ ์
๋ ฅ๊ฐ์ ๊ณฑํด์ ธ์ ์ ํ๋ณํํ๋ ์ฐ์ฐ์ ์ฌ์ฉ๋ฉ๋๋ค. ์ฐธ๊ณ : [์ด๊ธฐํ ํจ์](../initializers.md)
- __recurrent_initializer__: `recurrent_kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ด๊ธฐํ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ด ๊ฐ์ค์น๋ ์ด์ ์์ ์ผ๋ก๋ถํฐ ์ ๋ฌ๋ฐ์ ์ํ๊ฐ์ ๊ณฑํด์ ธ์ ์ ํ๋ณํํ๋ ์ฐ์ฐ์ ์ฌ์ฉ๋ฉ๋๋ค. ์ฐธ๊ณ : [์ด๊ธฐํ ํจ์](../initializers.md)
- __bias_initializer__: ํธํฅ ๋ฒกํฐ์ ์ด๊ธฐํ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [์ด๊ธฐํ ํจ์](../initializers.md)
- __unit_forget_bias__: `bool`. `True`์ธ ๊ฒฝ์ฐ [Jozefowicz et al. (2015)](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)์ ์ ์์ ๋ฐ๋ผ ํฌ๊ฒ ๊ฒ์ดํธ์ ํธํฅ์ `1`์ ๋ํฉ๋๋ค. ๋ํ ๊ฐ์ ์ ์ผ๋ก `bias_initializer='zeros'`๋ก ์ค์ ํ์ฌ ๋๋จธ์ง ๊ฒ์ดํธ์ ํธํฅ์ `0`์ผ๋ก ์์ํ๊ฒ๋ ํฉ๋๋ค. ๊ธฐ๋ณธ๊ฐ์ `True`์
๋๋ค.
- __kernel_regularizer__: `kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ ์ฉํ ๊ท์ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [๊ท์ ํจ์](../regularizers.md)
- __recurrent_regularizer__: `recurrent_kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ ์ฉํ ๊ท์ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [๊ท์ ํจ์](../regularizers.md)
- __bias_regularizer__: ํธํฅ ๋ฒกํฐ์ ์ ์ฉํ ๊ท์ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [๊ท์ ํจ์](../regularizers.md)
- __activity_regularizer__: ์ธต์ ์ถ๋ ฅ๊ฐ์ ์ ์ฉํ ๊ท์ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [๊ท์ ํจ์](../regularizers.md)
- __kernel_constraint__: `kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ ์ฉํ ์ ์ฝ์ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [์ ์ฝ](../constraints.md))
- __recurrent_constraint__: `recurrent_kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ ์ฉํ ์ ์ฝ์ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [์ ์ฝ](../constraints.md))
- __bias_constraint__: ํธํฅ ๋ฒกํฐ์ ์ ์ฉํ ์ ์ฝ์ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [์ ์ฝ](../constraints.md))
- __dropout__: `0`๊ณผ `1`์ฌ์ด์ `float`. ์
๋ ฅ๊ฐ์ ์ ํ ๋ณํ์ ์ฌ์ฉ๋๋ `kernel` ๊ฐ์ค์น์ ๊ฐ ๊ฐ์ด๋ฐ ์ง์ ํ ๋งํผ์ ๋น์จ์ ๋ฌด์์๋ก `0`์ผ๋ก ๋ฐ๊พธ์ด ํ๋ฝ์ํต๋๋ค.
- __recurrent_dropout__: `0`๊ณผ `1`์ฌ์ด์ `float`. ์ํ๊ฐ์ ์ ํ ๋ณํ์ ์ฌ์ฉ๋๋ `recurrent_kernel` ๊ฐ์ค์น์ ๊ฐ ๊ฐ์ด๋ฐ ์ง์ ํ ๋งํผ์ ๋น์จ์ ๋ฌด์์๋ก `0`์ผ๋ก ๋ฐ๊พธ์ด ํ๋ฝ์ํต๋๋ค.
- __implementation__: `1` ๋๋ `2`. ์ฐ์ฐ ๋ชจ๋๋ฅผ ์ค์ ํฉ๋๋ค. `1`์ ์์ ํฌ๊ธฐ์ ๋ด์ ๊ณผ ๋ง์
์ ๋ง์ด ํ๋ ๊ตฌ์ฑ, `2`๋ ํฐ ํฌ๊ธฐ์ ๋ด์ ๊ณผ ๋ง์
์ ๋ณด๋ค ์ ์ ํ์๋ก ํ๋ ๊ตฌ์ฑ์
๋๋ค. ์ด๋ฌํ ์ค์ ์ ํ๋์จ์ด ๋ฐ ์ดํ๋ฆฌ์ผ์ด์
์ ๋ฐ๋ผ ์๋ก ๋ค๋ฅธ ์ฐ์ฐ ์ฑ๋ฅ์ ๊ฐ์ ธ์ต๋๋ค. ๊ธฐ๋ณธ๊ฐ์ `2`์
๋๋ค.
- __return_sequences__: `bool`. ์๊ณ์ด ๊ฐ์ด๋ฐ ๋ชจ๋ ์์ ์ ์ถ๋ ฅ๊ฐ์ ๋ฐํํ ์ง ๋ง์ง๋ง ์์ ์ ์ถ๋ ฅ๊ฐ๋ง์ ๋ฐํํ ์ง ๊ฒฐ์ ํฉ๋๋ค. ๊ธฐ๋ณธ๊ฐ์ `False`์
๋๋ค.
- __return_state__: `bool`. ์ถ๋ ฅ๊ณผ ํจ๊ป ๋ง์ง๋ง ์์ ์ ์ํ๊ฐ๋ ๋ฐํํ ์ง์ ์ฌ๋ถ๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ๊ธฐ๋ณธ๊ฐ์ `False`์
๋๋ค.
- __go_backwards__: `bool`. ๊ธฐ๋ณธ๊ฐ์ `False`์
๋๋ค. `True`์ธ ๊ฒฝ์ฐ ์
๋ ฅ์ ์์๋ฅผ ๋ค์ง์ด ๊ฑฐ๊พธ๋ก๋ ์์์ ์ฒ๋ฆฌ ๊ฒฐ๊ณผ๋ฅผ ๋ฐํํฉ๋๋ค.
- __stateful__: `bool`. ๊ธฐ๋ณธ๊ฐ์ `False`์
๋๋ค. `True`์ธ ๊ฒฝ์ฐ ํ์ฌ ์
๋ ฅ๋ ๋ฐฐ์น์ ๊ฐ ์ธ๋ฑ์ค `i`์ ํด๋นํ๋ ์
๋ ฅ๊ฐ์ ๋ง์ง๋ง ์ํ๊ฐ ๋ค์ ๋ฐฐ์น์ ๊ฐ ์ธ๋ฑ์ค `i`์ ํด๋นํ๋ ์
๋ ฅ๊ฐ์ ์ด๊ธฐ ์ํ๋ก ์ฌ์ฉ๋ฉ๋๋ค. ๋งค์ฐ ๊ธด ์๊ณ์ด์ ์ฐ์๋ ๋ฐฐ์น๋ก ๋๋์ด ์ฒ๋ฆฌํ ๋ ์ ์ฉํฉ๋๋ค.
- __unroll__: `bool`. ๊ธฐ๋ณธ๊ฐ์ `False`์
๋๋ค. `True`์ธ ๊ฒฝ์ฐ ์ํ๊ตฌ์กฐ๋ก ์ฒ๋ฆฌ๋๋ ์ ๊ฒฝ๋ง์ ํผ์ณ์ ์ฐ์ฐ์ ์ผ๋ถ๋ฅผ ๋์์ ์ฒ๋ฆฌํฉ๋๋ค. ์ด ๊ฒฝ์ฐ ์ ์ฒด๋ฅผ ์ํ๊ตฌ์กฐ๋ก ์ฒ๋ฆฌํ๋ ๊ฒ๋ณด๋ค ๋น ๋ฅธ ์ฐ์ฐ์ด ๊ฐ๋ฅํ์ง๋ง ๊ทธ๋งํผ ๋ง์ ์ ๋ณด๋ฅผ ๋์์ ์ ์ฅํด์ผ ํ๊ธฐ ๋๋ฌธ์ ๋ฉ๋ชจ๋ฆฌ ์๋ชจ๊ฐ ์ปค์ง๋๋ค. ์๊ณ์ด ๊ธธ์ด๊ฐ ์งง์ ๊ฒฝ์ฐ ์ ํฉํฉ๋๋ค.
__์ฐธ๊ณ __
- [Long short-term memory](
http://www.bioinf.jku.at/publications/older/2604.pdf)
- [Learning to forget: Continual prediction with LSTM](
http://www.mitpressjournals.org/doi/pdf/10.1162/089976600300015015)
- [Supervised sequence labeling with recurrent neural networks](
http://www.cs.toronto.edu/~graves/preprint.pdf)
- [A Theoretically Grounded Application of Dropout in
Recurrent Neural Networks](https://arxiv.org/abs/1512.05287)
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional_recurrent.py#L788)</span>
### ConvLSTM2D
```python
keras.layers.ConvLSTM2D(filters, kernel_size, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1), activation='tanh', recurrent_activation='hard_sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', unit_forget_bias=True, kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, return_sequences=False, go_backwards=False, stateful=False, dropout=0.0, recurrent_dropout=0.0)
```
ํฉ์ฑ๊ณฑ<sub>convolutional</sub> LSTM ์ ๊ฒฝ๋ง.
LSTM๊ณผ ๋น์ทํ์ง๋ง ์
๋ ฅ๊ฐ๊ณผ ์ํ์ ๋ณํ์ ํฉ์ฑ๊ณฑ์ด ์ ์ฉ๋ฉ๋๋ค.
__์ธ์__
- __filters__: `int`. ์ถ๋ ฅํ ๊ฒฐ๊ณผ๊ฐ์ ์ฐจ์์ผ๋ก ํฉ์ฑ๊ณฑ ํํฐ์ ๊ฐ์๋ฅผ ๋ํ๋
๋๋ค.
- __kernel_size__: `int` ๋๋ `int`๋ก ์ด๋ฃจ์ด์ง ํํ/๋ฆฌ์คํธ. ํฉ์ฑ๊ณฑ ํํฐ์ ํฌ๊ธฐ๋ฅผ ์ง์ ํฉ๋๋ค.
- __strides__: `int` ๋๋ `int`๋ก ์ด๋ฃจ์ด์ง ํํ/๋ฆฌ์คํธ. ํฉ์ฑ๊ณฑ ํํฐ์ ์คํธ๋ผ์ด๋๋ฅผ ์ง์ ํฉ๋๋ค. ๊ธฐ๋ณธ๊ฐ์ `(1, 1)`์
๋๋ค. ๋ง์ฝ ํฝ์ฐฝ ํฉ์ฑ๊ณฑ<sub>dilated convolution</sub>์ ์ฌ์ฉํ๊ณ ์ ํ ๋ ์คํธ๋ผ์ด๋์ ํฌ๊ธฐ๋ฅผ `1`๋ณด๋ค ํฌ๊ฒ ์ง์ ํ๋ค๋ฉด `dilation_rate`์ธ์๋ ๋ฐ๋์ `1`๋ก ๋ง์ถฐ์ผ ํฉ๋๋ค.
- __padding__: `str`. ์
๋ ฅ๊ฐ์ ํจ๋ฉ์ฒ๋ฆฌ ์ฌ๋ถ๋ฅผ `'valid'` ๋๋ `'same'` ๊ฐ์ด๋ฐ ํ๋๋ก ์ง์ ํฉ๋๋ค(๋์๋ฌธ์ ๋ฌด๊ด). `'valid'`๋ ํจ๋ฉ์ด ์๋ ๊ฒฝ์ฐ, `'same'`์ ์ถ๋ ฅ์ ํํ๋ฅผ ์
๋ ฅ๊ณผ ๊ฐ๊ฒ ๋ง์ถ๊ณ ์ ํ๋ ๊ฒฝ์ฐ์ ์ฌ์ฉํฉ๋๋ค.
- __data_format__: `str`. ์
๋ ฅ ๋ฐ์ดํฐ์ ์ฐจ์ ์์๋ฅผ ์ ์ํ๋ ์ธ์๋ก `'channels_last'`(๊ธฐ๋ณธ๊ฐ) ๋๋ `'channels_first'` ๊ฐ์ด๋ฐ ํ๋๋ฅผ ์ง์ ํฉ๋๋ค. ์
๋ ฅ ํํ๊ฐ `(batch, time, ..., channels)`๋ก ์ฑ๋ ์ ๋ณด๊ฐ ๋ง์ง๋ง์ ์ฌ ๊ฒฝ์ฐ `'channels_last'`๋ฅผ, `(batch, time, channels, ...)`๋ก ์ฑ๋ ์ ๋ณด๊ฐ ๋จผ์ ์ฌ ๊ฒฝ์ฐ `'channels_first'`๋ฅผ ์ ํํฉ๋๋ค. ์ผ๋ผ์ค ์ค์ `~/.keras/keras.json`ํ์ผ์ ์๋ `image_data_format`๊ฐ์ ๊ธฐ๋ณธ๊ฐ์ผ๋ก ์ฌ์ฉํ๋ฉฐ, ํด๋น ๊ฐ์ด ์๋ ๊ฒฝ์ฐ ์๋์ผ๋ก `'channels_last'`๋ฅผ ๊ธฐ๋ณธ๊ฐ์ผ๋ก ์ ์ฉํฉ๋๋ค.
- __dilation_rate__: `int` ๋๋ `int`๋ก ์ด๋ฃจ์ด์ง ํํ/๋ฆฌ์คํธ. ํฝ์ฐฝ ํฉ์ฑ๊ณฑ ํํฐ์ ํฝ์ฐฝ๋น์จ์ ๊ฒฐ์ ํฉ๋๋ค. ํฝ์ฐฝ ํฉ์ฑ๊ณฑ์ ์๋ ์กฐ๋ฐํ ํํ ๊ทธ๋๋ก ์
๋ ฅ์ ์ ์ฉ๋๋ ํฉ์ฑ๊ณฑ ํํฐ๋ฅผ ๊ฐ ์์ ์ฌ์ด์ ๊ฐ๋ก, ์ธ๋ก ๋ฐฉํฅ์ผ๋ก ๊ฐ๊ฒฉ์ ๋์ฐ๋ ๋ฐฉ์์ผ๋ก ํฝ์ฐฝ์์ผ ์ฑ๊ธด ๋์ ๋ณด๋ค ๋์ ์์ญ์ ์ ์ฉ๋ ์ ์๋๋ก ๋ณํํ ํฉ์ฑ๊ณฑ์
๋๋ค. ์์ธํ ๋ด์ฉ์ [Multi-Scale Context Aggregation by Dilated Convolutions](https://arxiv.org/abs/1511.07122v3)์ ์ฐธ๊ณ ํ์ญ์์ค. ๊ธฐ๋ณธ๊ฐ์ `(1, 1)`์ด๋ฉฐ, ํ์ฌ ๋ฒ์ ์์๋ `dilation_rate`๊ฐ `1`๋ณด๋ค ํฐ ๊ฒฝ์ฐ `1`๋ณด๋ค ํฐ `strides`๋ฅผ ์ง์ ํ ์ ์์ต๋๋ค.
- __activation__: LSTM์์ ํ์ฌ ์์ ์
๋ ฅ์ ๋ฐํ์ผ๋ก ํ๋ณด๊ฐ์ ๊ณ์ฐํ๋ ๊ณผ์ ๋ฐ ์
์ํ๋ฅผ ์ด์ฉํ์ฌ ํ์ฌ ์์ ์ ์ถ๋ ฅ๊ฐ์ ๊ณ์ฐํ๋ ๊ณผ์ ์์ ์ฌ์ฉํ ํ์ฑํ ํจ์์
๋๋ค. ๊ธฐ๋ณธ๊ฐ์ ํ์ดํผ๋ณผ๋ฆญํ์ ํธ(`tanh`)์ด๋ฉฐ `None`์ ์ ๋ฌํ ๊ฒฝ์ฐ ํ์ฑํ ํจ์๊ฐ ์ ์ฉ๋์ง ์์ต๋๋ค(`a(x) = x`). ์ฐธ๊ณ : [ํ์ฑํ ํจ์](../activations.md)
- __recurrent_activation__: LSTM์ ์ธํ ๊ฒ์ดํธ์ ํฌ๊ฒ ๊ฒ์ดํธ, ์์ํ ๊ฒ์ดํธ ๊ณ์ฐ์ ์ฌ์ฉํ ํ์ฑํ ํจ์์
๋๋ค. ๊ธฐ๋ณธ๊ฐ์ ํ๋ ์๊ทธ๋ชจ์ด๋(`'hard_sigmoid'`)์ด๋ฉฐ `None`์ ์ ๋ฌํ ๊ฒฝ์ฐ ํ์ฑํ ํจ์๊ฐ ์ ์ฉ๋์ง ์์ต๋๋ค(`a(x) = x`). ์ฐธ๊ณ : [ํ์ฑํ ํจ์](../activations.md)
- __use_bias__: `bool`. ์ธต์ ์ฐ์ฐ์ ํธํฅ์ ์ ์ฉํ ์ง ์ฌ๋ถ๋ฅผ ๊ฒฐ์ ํฉ๋๋ค.
- __kernel_initializer__: `kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ด๊ธฐํ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ด ๊ฐ์ค์น๋ ์
๋ ฅ๊ฐ์ ๊ณฑํด์ ธ์ ์ ํ๋ณํํ๋ ์ฐ์ฐ์ ์ฌ์ฉ๋ฉ๋๋ค. ์ฐธ๊ณ : [์ด๊ธฐํ ํจ์](../initializers.md)
- __recurrent_initializer__: `recurrent_kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ด๊ธฐํ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ด ๊ฐ์ค์น๋ ์ด์ ์์ ์ผ๋ก๋ถํฐ ์ ๋ฌ๋ฐ์ ์ํ๊ฐ์ ๊ณฑํด์ ธ์ ์ ํ๋ณํํ๋ ์ฐ์ฐ์ ์ฌ์ฉ๋ฉ๋๋ค. ์ฐธ๊ณ : [์ด๊ธฐํ ํจ์](../initializers.md)
- __bias_initializer__: ํธํฅ ๋ฒกํฐ์ ์ด๊ธฐํ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [์ด๊ธฐํ ํจ์](../initializers.md)
- __unit_forget_bias__: `bool`. `True`์ธ ๊ฒฝ์ฐ [Jozefowicz et al. (2015)](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)์ ์ ์์ ๋ฐ๋ผ ํฌ๊ฒ ๊ฒ์ดํธ์ ํธํฅ์ `1`์ ๋ํฉ๋๋ค. ๋ํ ๊ฐ์ ์ ์ผ๋ก `bias_initializer='zeros'`๋ก ์ค์ ํ์ฌ ๋๋จธ์ง ๊ฒ์ดํธ์ ํธํฅ์ `0`์ผ๋ก ์์ํ๊ฒ๋ ํฉ๋๋ค. ๊ธฐ๋ณธ๊ฐ์ `True`์
๋๋ค.
- __kernel_regularizer__: `kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ ์ฉํ ๊ท์ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [๊ท์ ํจ์](../regularizers.md)
- __recurrent_regularizer__: `recurrent_kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ ์ฉํ ๊ท์ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [๊ท์ ํจ์](../regularizers.md)
- __bias_regularizer__: ํธํฅ ๋ฒกํฐ์ ์ ์ฉํ ๊ท์ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [๊ท์ ํจ์](../regularizers.md)
- __activity_regularizer__: ์ธต์ ์ถ๋ ฅ๊ฐ์ ์ ์ฉํ ๊ท์ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [๊ท์ ํจ์](../regularizers.md)
- __kernel_constraint__: `kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ ์ฉํ ์ ์ฝ์ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [์ ์ฝ](../constraints.md))
- __recurrent_constraint__: `recurrent_kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ ์ฉํ ์ ์ฝ์ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [์ ์ฝ](../constraints.md))
- __bias_constraint__: ํธํฅ ๋ฒกํฐ์ ์ ์ฉํ ์ ์ฝ์ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [์ ์ฝ](../constraints.md))
- __return_sequences__: `bool`. ์๊ณ์ด ๊ฐ์ด๋ฐ ๋ชจ๋ ์์ ์ ์ถ๋ ฅ๊ฐ์ ๋ฐํํ ์ง ๋ง์ง๋ง ์์ ์ ์ถ๋ ฅ๊ฐ๋ง์ ๋ฐํํ ์ง ๊ฒฐ์ ํฉ๋๋ค. ๊ธฐ๋ณธ๊ฐ์ `False`์
๋๋ค.
- __go_backwards__: `bool`. ๊ธฐ๋ณธ๊ฐ์ `False`์
๋๋ค. `True`์ธ ๊ฒฝ์ฐ ์
๋ ฅ์ ์์๋ฅผ ๋ค์ง์ด ๊ฑฐ๊พธ๋ก๋ ์์์ ์ฒ๋ฆฌ ๊ฒฐ๊ณผ๋ฅผ ๋ฐํํฉ๋๋ค.
- __stateful__: `bool`. ๊ธฐ๋ณธ๊ฐ์ `False`์
๋๋ค. `True`์ธ ๊ฒฝ์ฐ ํ์ฌ ์
๋ ฅ๋ ๋ฐฐ์น์ ๊ฐ ์ธ๋ฑ์ค `i`์ ํด๋นํ๋ ์
๋ ฅ๊ฐ์ ๋ง์ง๋ง ์ํ๊ฐ ๋ค์ ๋ฐฐ์น์ ๊ฐ ์ธ๋ฑ์ค `i`์ ํด๋นํ๋ ์
๋ ฅ๊ฐ์ ์ด๊ธฐ ์ํ๋ก ์ฌ์ฉ๋ฉ๋๋ค. ๋งค์ฐ ๊ธด ์๊ณ์ด์ ์ฐ์๋ ๋ฐฐ์น๋ก ๋๋์ด ์ฒ๋ฆฌํ ๋ ์ ์ฉํฉ๋๋ค.
- __dropout__: `0`๊ณผ `1`์ฌ์ด์ `float`. ์
๋ ฅ๊ฐ์ ์ ํ ๋ณํ์ ์ฌ์ฉ๋๋ `kernel` ๊ฐ์ค์น์ ๊ฐ ๊ฐ์ด๋ฐ ์ง์ ํ ๋งํผ์ ๋น์จ์ ๋ฌด์์๋ก `0`์ผ๋ก ๋ฐ๊พธ์ด ํ๋ฝ์ํต๋๋ค.
- __recurrent_dropout__: `0`๊ณผ `1`์ฌ์ด์ `float`. ์ํ๊ฐ์ ์ ํ ๋ณํ์ ์ฌ์ฉ๋๋ `recurrent_kernel` ๊ฐ์ค์น์ ๊ฐ ๊ฐ์ด๋ฐ ์ง์ ํ ๋งํผ์ ๋น์จ์ ๋ฌด์์๋ก 0์ผ๋ก ๋ฐ๊พธ์ด ํ๋ฝ์ํต๋๋ค.
__์
๋ ฅ ํํ__
- `data_format='channels_first'`์ ๊ฒฝ์ฐ
๋ค์๊ณผ ๊ฐ์ ํํ์ 5D ํ
์: `(samples, time, channels, rows, cols)`
- `data_format='channels_last'`์ ๊ฒฝ์ฐ
๋ค์๊ณผ ๊ฐ์ ํํ์ 5D ํ
์: `(samples, time, rows, cols, channels)`
__์ถ๋ ฅ ํํ__
- `return_sequences=True`์ ๊ฒฝ์ฐ
- `data_format='channels_first'`์ด๋ฉด
๋ค์๊ณผ ๊ฐ์ ํํ์ 5D ํ
์:
`(samples, time, filters, output_row, output_col)`
- `data_format='channels_last'`์ด๋ฉด
๋ค์๊ณผ ๊ฐ์ ํํ์ 5D ํ
์:
`(samples, time, output_row, output_col, filters)`
- ๊ทธ ์ธ์ ๊ฒฝ์ฐ
- `data_format='channels_first'`์ด๋ฉด
๋ค์๊ณผ ๊ฐ์ ํํ์ 4D ํ
์:
`(samples, filters, output_row, output_col)`
- `data_format='channels_last'`์ด๋ฉด
๋ค์๊ณผ ๊ฐ์ ํํ์ 4D ํ
์:
`(samples, output_row, output_col, filters)`
`output_row`์ `output_col`์ ๊ฐ์ ํํฐ์ ํํ์ ํจ๋ฉ์ ๋ฐ๋ผ ๋ฌ๋ผ์ง๋๋ค.
__์ค๋ฅ__
- __ValueError__: ์ ํจํ์ง ์์ ์ธ์๋ฅผ ์ ๋ฌ๋ฐ๋ ๊ฒฝ์ฐ ๋ฐ์ํฉ๋๋ค.
__์ฐธ๊ณ __
- [Convolutional LSTM Network: A Machine Learning Approach for Precipitation Nowcasting](http://arxiv.org/abs/1506.04214v1)
ํ์ฌ ๊ตฌํ๋ฐฉ์์ ์
์ ์ถ๋ ฅ์ ๋ํ ํผ๋๋ฐฑ ๋ฃจํ๋ฅผ ํฌํจํ์ง ์์ต๋๋ค.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/recurrent.py#L780)</span>
### SimpleRNNCell
```python
keras.layers.SimpleRNNCell(units, activation='tanh', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0.0, recurrent_dropout=0.0)
```
SimpleRNN์ ์
ํด๋์ค.
__์ธ์__
- __units__: ์์ ์ ์. ์ถ๋ ฅ๊ฐ์ ์ฐจ์ ํฌ๊ธฐ๋ฅผ ๊ฒฐ์ ํฉ๋๋ค.
- __activation__: ์ฌ์ฉํ ํ์ฑํ ํจ์์
๋๋ค. ๊ธฐ๋ณธ๊ฐ์ ํ์ดํผ๋ณผ๋ฆญํ์ ํธ(`tanh`)์ด๋ฉฐ `None`์ ์ ๋ฌํ ๊ฒฝ์ฐ ํ์ฑํ ํจ์๊ฐ ์ ์ฉ๋์ง ์์ต๋๋ค(`a(x) = x`). ์ฐธ๊ณ : [ํ์ฑํ ํจ์](../activations.md)
- __use_bias__: `bool`. ์ธต์ ์ฐ์ฐ์ ํธํฅ์ ์ ์ฉํ ์ง ์ฌ๋ถ๋ฅผ ๊ฒฐ์ ํฉ๋๋ค.
- __kernel_initializer__: `kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ด๊ธฐํ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ด ๊ฐ์ค์น๋ ์
๋ ฅ๊ฐ์ ๊ณฑํด์ ธ์ ์ ํ๋ณํํ๋ ์ฐ์ฐ์ ์ฌ์ฉ๋ฉ๋๋ค. ์ฐธ๊ณ : [์ด๊ธฐํ ํจ์](../initializers.md)
- __recurrent_initializer__: `recurrent_kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ด๊ธฐํ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ด ๊ฐ์ค์น๋ ์ด์ ์์ ์ผ๋ก๋ถํฐ ์ ๋ฌ๋ฐ์ ์ํ๊ฐ์ ๊ณฑํด์ ธ์ ์ ํ๋ณํํ๋ ์ฐ์ฐ์ ์ฌ์ฉ๋ฉ๋๋ค. ์ฐธ๊ณ : [์ด๊ธฐํ ํจ์](../initializers.md)
- __bias_initializer__: ํธํฅ ๋ฒกํฐ์ ์ด๊ธฐํ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [์ด๊ธฐํ ํจ์](../initializers.md)
- __kernel_regularizer__: `kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ ์ฉํ ๊ท์ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [๊ท์ ํจ์](../regularizers.md)
- __recurrent_regularizer__: `recurrent_kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ ์ฉํ ๊ท์ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [๊ท์ ํจ์](../regularizers.md)
- __bias_regularizer__: ํธํฅ ๋ฒกํฐ์ ์ ์ฉํ ๊ท์ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [๊ท์ ํจ์](../regularizers.md)
- __kernel_constraint__: `kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ ์ฉํ ์ ์ฝ์ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [์ ์ฝ](../constraints.md))
- __recurrent_constraint__: `recurrent_kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ ์ฉํ ์ ์ฝ์ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [์ ์ฝ](../constraints.md))
- __bias_constraint__: ํธํฅ ๋ฒกํฐ์ ์ ์ฉํ ์ ์ฝ์ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [์ ์ฝ](../constraints.md))
- __dropout__: `0`๊ณผ `1`์ฌ์ด์ `float`. ์
๋ ฅ๊ฐ์ ์ ํ ๋ณํ์ ์ฌ์ฉ๋๋ `kernel` ๊ฐ์ค์น์ ๊ฐ ๊ฐ์ด๋ฐ ์ง์ ํ ๋งํผ์ ๋น์จ์ ๋ฌด์์๋ก `0`์ผ๋ก ๋ฐ๊พธ์ด ํ๋ฝ์ํต๋๋ค.
- __recurrent_dropout__: `0`๊ณผ `1`์ฌ์ด์ `float`. ์ํ๊ฐ์ ์ ํ ๋ณํ์ ์ฌ์ฉ๋๋ `recurrent_kernel` ๊ฐ์ค์น์ ๊ฐ ๊ฐ์ด๋ฐ ์ง์ ํ ๋งํผ์ ๋น์จ์ ๋ฌด์์๋ก `0`์ผ๋ก ๋ฐ๊พธ์ด ํ๋ฝ์ํต๋๋ค.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/recurrent.py#L1164)</span>
### GRUCell
```python
keras.layers.GRUCell(units, activation='tanh', recurrent_activation='sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0.0, recurrent_dropout=0.0, implementation=2, reset_after=False)
```
GRU์ ์
ํด๋์ค.
__์ธ์__
- __units__: ์์ ์ ์. ์ถ๋ ฅ๊ฐ์ ์ฐจ์ ํฌ๊ธฐ๋ฅผ ๊ฒฐ์ ํฉ๋๋ค.
- __activation__: GRU์์ ํ์ฌ ์์ ์
๋ ฅ ๊ณ์ฐ์ ์ฌ์ฉํ ํ์ฑํ ํจ์์
๋๋ค. ๊ธฐ๋ณธ๊ฐ์ ํ์ดํผ๋ณผ๋ฆญํ์ ํธ(`tanh`)์ด๋ฉฐ `None`์ ์ ๋ฌํ ๊ฒฝ์ฐ ํ์ฑํ ํจ์๊ฐ ์ ์ฉ๋์ง ์์ต๋๋ค(`a(x) = x`). ์ฐธ๊ณ : [ํ์ฑํ ํจ์](../activations.md)
- __recurrent_activation__: GRU์ ์
๋ฐ์ดํธ ๊ฒ์ดํธ์ ๋ฆฌ์
๊ฒ์ดํธ ๊ณ์ฐ์ ์ฌ์ฉํ ํ์ฑํ ํจ์์
๋๋ค. ๊ธฐ๋ณธ๊ฐ์ ์๊ทธ๋ชจ์ด๋(`'sigmoid'`)์ด๋ฉฐ `None`์ ์ ๋ฌํ ๊ฒฝ์ฐ ํ์ฑํ ํจ์๊ฐ ์ ์ฉ๋์ง ์์ต๋๋ค(`a(x) = x`). ์ฐธ๊ณ : [ํ์ฑํ ํจ์](../activations.md)
- __use_bias__: `bool`. ์ธต์ ์ฐ์ฐ์ ํธํฅ์ ์ ์ฉํ ์ง ์ฌ๋ถ๋ฅผ ๊ฒฐ์ ํฉ๋๋ค.
- __kernel_initializer__: `kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ด๊ธฐํ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ด ๊ฐ์ค์น๋ ์
๋ ฅ๊ฐ์ ๊ณฑํด์ ธ์ ์ ํ๋ณํํ๋ ์ฐ์ฐ์ ์ฌ์ฉ๋ฉ๋๋ค. ์ฐธ๊ณ : [์ด๊ธฐํ ํจ์](../initializers.md)
- __recurrent_initializer__: `recurrent_kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ด๊ธฐํ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ด ๊ฐ์ค์น๋ ์ด์ ์์ ์ผ๋ก๋ถํฐ ์ ๋ฌ๋ฐ์ ์ํ๊ฐ์ ๊ณฑํด์ ธ์ ์ ํ๋ณํํ๋ ์ฐ์ฐ์ ์ฌ์ฉ๋ฉ๋๋ค. ์ฐธ๊ณ : [์ด๊ธฐํ ํจ์](../initializers.md)
- __bias_initializer__: ํธํฅ ๋ฒกํฐ์ ์ด๊ธฐํ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [์ด๊ธฐํ ํจ์](../initializers.md)
- __kernel_regularizer__: `kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ ์ฉํ ๊ท์ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [๊ท์ ํจ์](../regularizers.md)
- __recurrent_regularizer__: `recurrent_kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ ์ฉํ ๊ท์ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [๊ท์ ํจ์](../regularizers.md)
- __bias_regularizer__: ํธํฅ ๋ฒกํฐ์ ์ ์ฉํ ๊ท์ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [๊ท์ ํจ์](../regularizers.md)
- __kernel_constraint__: `kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ ์ฉํ ์ ์ฝ์ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [์ ์ฝ](../constraints.md))
- __recurrent_constraint__: `recurrent_kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ ์ฉํ ์ ์ฝ์ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [์ ์ฝ](../constraints.md))
- __bias_constraint__: ํธํฅ ๋ฒกํฐ์ ์ ์ฉํ ์ ์ฝ์ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [์ ์ฝ](../constraints.md))
- __dropout__: `0`๊ณผ `1`์ฌ์ด์ `float`. ์
๋ ฅ๊ฐ์ ์ ํ ๋ณํ์ ์ฌ์ฉ๋๋ `kernel` ๊ฐ์ค์น์ ๊ฐ ๊ฐ์ด๋ฐ ์ง์ ํ ๋งํผ์ ๋น์จ์ ๋ฌด์์๋ก `0`์ผ๋ก ๋ฐ๊พธ์ด ํ๋ฝ์ํต๋๋ค.
- __recurrent_dropout__: `0`๊ณผ `1`์ฌ์ด์ `float`. ์ํ๊ฐ์ ์ ํ ๋ณํ์ ์ฌ์ฉ๋๋ `recurrent_kernel` ๊ฐ์ค์น์ ๊ฐ ๊ฐ์ด๋ฐ ์ง์ ํ ๋งํผ์ ๋น์จ์ ๋ฌด์์๋ก `0`์ผ๋ก ๋ฐ๊พธ์ด ํ๋ฝ์ํต๋๋ค.
- __implementation__: `1` ๋๋ `2`. ์ฐ์ฐ ๋ชจ๋๋ฅผ ์ค์ ํฉ๋๋ค. `1`์ ์์ ํฌ๊ธฐ์ ๋ด์ ๊ณผ ๋ง์
์ ๋ง์ด ํ๋ ๊ตฌ์ฑ, `2`๋ ํฐ ํฌ๊ธฐ์ ๋ด์ ๊ณผ ๋ง์
์ ๋ณด๋ค ์ ์ ํ์๋ก ํ๋ ๊ตฌ์ฑ์
๋๋ค. ์ด๋ฌํ ์ค์ ์ ํ๋์จ์ด ๋ฐ ์ดํ๋ฆฌ์ผ์ด์
์ ๋ฐ๋ผ ์๋ก ๋ค๋ฅธ ์ฐ์ฐ ์ฑ๋ฅ์ ๊ฐ์ ธ์ต๋๋ค. ๊ธฐ๋ณธ๊ฐ์ `2`์
๋๋ค.
- __return_sequences__: `bool`. ์๊ณ์ด ๊ฐ์ด๋ฐ ๋ชจ๋ ์์ ์ ์ถ๋ ฅ๊ฐ์ ๋ฐํํ ์ง ๋ง์ง๋ง ์์ ์ ์ถ๋ ฅ๊ฐ๋ง์ ๋ฐํํ ์ง ๊ฒฐ์ ํฉ๋๋ค. ๊ธฐ๋ณธ๊ฐ์ `False`์
๋๋ค.
- __return_state__: `bool`. ์ถ๋ ฅ๊ณผ ํจ๊ป ๋ง์ง๋ง ์์ ์ ์ํ๊ฐ๋ ๋ฐํํ ์ง์ ์ฌ๋ถ๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ๊ธฐ๋ณธ๊ฐ์ `False`์
๋๋ค.
- __go_backwards__: `bool`. ๊ธฐ๋ณธ๊ฐ์ `False`์
๋๋ค. `True`์ธ ๊ฒฝ์ฐ ์
๋ ฅ์ ์์๋ฅผ ๋ค์ง์ด ๊ฑฐ๊พธ๋ก๋ ์์์ ์ฒ๋ฆฌ ๊ฒฐ๊ณผ๋ฅผ ๋ฐํํฉ๋๋ค.
- __stateful__: `bool`. ๊ธฐ๋ณธ๊ฐ์ `False`์
๋๋ค. `True`์ธ ๊ฒฝ์ฐ ํ์ฌ ์
๋ ฅ๋ ๋ฐฐ์น์ ๊ฐ ์ธ๋ฑ์ค `i`์ ํด๋นํ๋ ์
๋ ฅ๊ฐ์ ๋ง์ง๋ง ์ํ๊ฐ ๋ค์ ๋ฐฐ์น์ ๊ฐ ์ธ๋ฑ์ค `i`์ ํด๋นํ๋ ์
๋ ฅ๊ฐ์ ์ด๊ธฐ ์ํ๋ก ์ฌ์ฉ๋ฉ๋๋ค. ๋งค์ฐ ๊ธด ์๊ณ์ด์ ์ฐ์๋ ๋ฐฐ์น๋ก ๋๋์ด ์ฒ๋ฆฌํ ๋ ์ ์ฉํฉ๋๋ค.
- __unroll__: `bool`. ๊ธฐ๋ณธ๊ฐ์ `False`์
๋๋ค. `True`์ธ ๊ฒฝ์ฐ ์ํ๊ตฌ์กฐ๋ก ์ฒ๋ฆฌ๋๋ ์ ๊ฒฝ๋ง์ ํผ์ณ์ ์ฐ์ฐ์ ์ผ๋ถ๋ฅผ ๋์์ ์ฒ๋ฆฌํฉ๋๋ค. ์ด ๊ฒฝ์ฐ ์ ์ฒด๋ฅผ ์ํ๊ตฌ์กฐ๋ก ์ฒ๋ฆฌํ๋ ๊ฒ๋ณด๋ค ๋น ๋ฅธ ์ฐ์ฐ์ด ๊ฐ๋ฅํ์ง๋ง ๊ทธ๋งํผ ๋ง์ ์ ๋ณด๋ฅผ ๋์์ ์ ์ฅํด์ผ ํ๊ธฐ ๋๋ฌธ์ ๋ฉ๋ชจ๋ฆฌ ์๋ชจ๊ฐ ์ปค์ง๋๋ค. ์๊ณ์ด ๊ธธ์ด๊ฐ ์งง์ ๊ฒฝ์ฐ ์ ํฉํฉ๋๋ค.
- __reset_after__: ๋ฆฌ์
๊ฒ์ดํธ๋ฅผ ์๋ ์ํ์ ์ ์ฉํ๋ ์์ ์ ์ง์ ํฉ๋๋ค. `False`์ธ ๊ฒฝ์ฐ `recurrent_kernel`์ ๊ณฑํ๊ธฐ ์ ์ ์ ์ฉํ๋ฉฐ, `True`์ธ ๊ฒฝ์ฐ `recurrent_kernel`์ ๊ณฑํ ๋ค์ ๊ทธ ๊ฒฐ๊ณผ์ ์ ์ฉํฉ๋๋ค(`CuDNN`๊ณผ ํธํ๋๋ ๋ฐฉ์์
๋๋ค).
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/recurrent.py#L1765)</span>
### LSTMCell
```python
keras.layers.LSTMCell(units, activation='tanh', recurrent_activation='hard_sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', unit_forget_bias=True, kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0.0, recurrent_dropout=0.0, implementation=1)
```
LSTM์ ์
ํด๋์ค.
__์ธ์__
- __units__: ์์ ์ ์, ์ถ๋ ฅ๊ฐ์ ์ฐจ์ ํฌ๊ธฐ๋ฅผ ๊ฒฐ์ ํฉ๋๋ค.
- __activation__: LSTM์์ ํ์ฌ ์์ ์
๋ ฅ์ ๋ฐํ์ผ๋ก ํ๋ณด๊ฐ์ ๊ณ์ฐํ๋ ๊ณผ์ ๋ฐ ์
์ํ๋ฅผ ์ด์ฉํ์ฌ ํ์ฌ ์์ ์ ์ถ๋ ฅ๊ฐ์ ๊ณ์ฐํ๋ ๊ณผ์ ์์ ์ฌ์ฉํ ํ์ฑํ ํจ์์
๋๋ค. ๊ธฐ๋ณธ๊ฐ์ ํ์ดํผ๋ณผ๋ฆญํ์ ํธ(`tanh`)์ด๋ฉฐ `None`์ ์ ๋ฌํ ๊ฒฝ์ฐ ํ์ฑํ ํจ์๊ฐ ์ ์ฉ๋์ง ์์ต๋๋ค(`a(x) = x`). ์ฐธ๊ณ : [ํ์ฑํ ํจ์](../activations.md)
- __recurrent_activation__: LSTM์ ์ธํ ๊ฒ์ดํธ์ ํฌ๊ฒ ๊ฒ์ดํธ, ์์ํ ๊ฒ์ดํธ ๊ณ์ฐ์ ์ฌ์ฉํ ํ์ฑํ ํจ์์
๋๋ค. ๊ธฐ๋ณธ๊ฐ์ ํ๋ ์๊ทธ๋ชจ์ด๋(`'hard_sigmoid'`)์ด๋ฉฐ `None`์ ์ ๋ฌํ ๊ฒฝ์ฐ ํ์ฑํ ํจ์๊ฐ ์ ์ฉ๋์ง ์์ต๋๋ค(`a(x) = x`). ์ฐธ๊ณ : [ํ์ฑํ ํจ์](../activations.md)
- __use_bias__: `bool`. ์ธต์ ์ฐ์ฐ์ ํธํฅ์ ์ ์ฉํ ์ง ์ฌ๋ถ๋ฅผ ๊ฒฐ์ ํฉ๋๋ค.
- __kernel_initializer__: `kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ด๊ธฐํ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ด ๊ฐ์ค์น๋ ์
๋ ฅ๊ฐ์ ๊ณฑํด์ ธ์ ์ ํ๋ณํํ๋ ์ฐ์ฐ์ ์ฌ์ฉ๋ฉ๋๋ค. ์ฐธ๊ณ : [์ด๊ธฐํ ํจ์](../initializers.md)
- __recurrent_initializer__: `recurrent_kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ด๊ธฐํ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ด ๊ฐ์ค์น๋ ์ด์ ์์ ์ผ๋ก๋ถํฐ ์ ๋ฌ๋ฐ์ ์ํ๊ฐ์ ๊ณฑํด์ ธ์ ์ ํ๋ณํํ๋ ์ฐ์ฐ์ ์ฌ์ฉ๋ฉ๋๋ค. ์ฐธ๊ณ : [์ด๊ธฐํ ํจ์](../initializers.md)
- __bias_initializer__: ํธํฅ ๋ฒกํฐ์ ์ด๊ธฐํ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [์ด๊ธฐํ ํจ์](../initializers.md)
- __unit_forget_bias__: `bool`. `True`์ธ ๊ฒฝ์ฐ [Jozefowicz et al. (2015)](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)์ ์ ์์ ๋ฐ๋ผ ํฌ๊ฒ ๊ฒ์ดํธ์ ํธํฅ์ `1`์ ๋ํฉ๋๋ค. ๋ํ ๊ฐ์ ์ ์ผ๋ก `bias_initializer='zeros'`๋ก ์ค์ ํ์ฌ ๋๋จธ์ง ๊ฒ์ดํธ์ ํธํฅ์ `0`์ผ๋ก ์์ํ๊ฒ๋ ํฉ๋๋ค. ๊ธฐ๋ณธ๊ฐ์ `True`์
๋๋ค.
- __kernel_regularizer__: `kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ ์ฉํ ๊ท์ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [๊ท์ ํจ์](../regularizers.md)
- __recurrent_regularizer__: `recurrent_kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ ์ฉํ ๊ท์ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [๊ท์ ํจ์](../regularizers.md)
- __bias_regularizer__: ํธํฅ ๋ฒกํฐ์ ์ ์ฉํ ๊ท์ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [๊ท์ ํจ์](../regularizers.md)
- __kernel_constraint__: `kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ ์ฉํ ์ ์ฝ์ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [์ ์ฝ](../constraints.md))
- __recurrent_constraint__: `recurrent_kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ ์ฉํ ์ ์ฝ์ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [์ ์ฝ](../constraints.md))
- __bias_constraint__: ํธํฅ ๋ฒกํฐ์ ์ ์ฉํ ์ ์ฝ์ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [์ ์ฝ](../constraints.md))
- __dropout__: `0`๊ณผ `1`์ฌ์ด์ `float`. ์
๋ ฅ๊ฐ์ ์ ํ ๋ณํ์ ์ฌ์ฉ๋๋ `kernel` ๊ฐ์ค์น์ ๊ฐ ๊ฐ์ด๋ฐ ์ง์ ํ ๋งํผ์ ๋น์จ์ ๋ฌด์์๋ก `0`์ผ๋ก ๋ฐ๊พธ์ด ํ๋ฝ์ํต๋๋ค.
- __recurrent_dropout__: `0`๊ณผ `1`์ฌ์ด์ `float`. ์ํ๊ฐ์ ์ ํ ๋ณํ์ ์ฌ์ฉ๋๋ `recurrent_kernel` ๊ฐ์ค์น์ ๊ฐ ๊ฐ์ด๋ฐ ์ง์ ํ ๋งํผ์ ๋น์จ์ ๋ฌด์์๋ก `0`์ผ๋ก ๋ฐ๊พธ์ด ํ๋ฝ์ํต๋๋ค.
- __implementation__: `1`๋๋ `2`. ์ฐ์ฐ ๋ชจ๋๋ฅผ ์ค์ ํฉ๋๋ค. `1`์ ์์ ํฌ๊ธฐ์ ๋ด์ ๊ณผ ๋ง์
์ ๋ง์ด ํ๋ ๊ตฌ์ฑ, `2`๋ ํฐ ํฌ๊ธฐ์ ๋ด์ ๊ณผ ๋ง์
์ ๋ณด๋ค ์ ์ ํ์๋ก ํ๋ ๊ตฌ์ฑ์
๋๋ค. ์ด๋ฌํ ์ค์ ์ ํ๋์จ์ด ๋ฐ ์ดํ๋ฆฌ์ผ์ด์
์ ๋ฐ๋ผ ์๋ก ๋ค๋ฅธ ์ฐ์ฐ ์ฑ๋ฅ์ ๊ฐ์ ธ์ต๋๋ค. ๊ธฐ๋ณธ๊ฐ์ `2`์
๋๋ค.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/cudnn_recurrent.py#L135)</span>
### CuDNNGRU
```python
keras.layers.CuDNNGRU(units, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, return_sequences=False, return_state=False, stateful=False)
```
[CuDNN](https://developer.nvidia.com/cudnn)์ ์ฌ์ฉํ ๋น ๋ฅธ GRU ๊ตฌํ ์ธต.
TensorFlow ๋ฐฑ์๋๋ก CuDNN์ ์ง์ํ๋ GPU์์๋ง ์คํํ ์ ์์ต๋๋ค.
__์ธ์__
- __units__: ์์ ์ ์. ์ถ๋ ฅ๊ฐ์ ์ฐจ์ ํฌ๊ธฐ๋ฅผ ๊ฒฐ์ ํฉ๋๋ค..
- __kernel_initializer__: `kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ด๊ธฐํ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ด ๊ฐ์ค์น๋ ์
๋ ฅ๊ฐ์ ๊ณฑํด์ ธ์ ์ ํ๋ณํํ๋ ์ฐ์ฐ์ ์ฌ์ฉ๋ฉ๋๋ค. ์ฐธ๊ณ : [์ด๊ธฐํ ํจ์](../initializers.md)
- __recurrent_initializer__: `recurrent_kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ด๊ธฐํ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ด ๊ฐ์ค์น๋ ์ด์ ์์ ์ผ๋ก๋ถํฐ ์ ๋ฌ๋ฐ์ ์ํ๊ฐ์ ๊ณฑํด์ ธ์ ์ ํ๋ณํํ๋ ์ฐ์ฐ์ ์ฌ์ฉ๋ฉ๋๋ค. ์ฐธ๊ณ : [์ด๊ธฐํ ํจ์](../initializers.md)
- __bias_initializer__: ํธํฅ ๋ฒกํฐ์ ์ด๊ธฐํ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [์ด๊ธฐํ ํจ์](../initializers.md)
- __kernel_regularizer__: `kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ ์ฉํ ๊ท์ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [๊ท์ ํจ์](../regularizers.md)
- __recurrent_regularizer__: `recurrent_kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ ์ฉํ ๊ท์ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [๊ท์ ํจ์](../regularizers.md)
- __bias_regularizer__: ํธํฅ ๋ฒกํฐ์ ์ ์ฉํ ๊ท์ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [๊ท์ ํจ์](../regularizers.md)
- __activity_regularizer__: ์ธต์ ์ถ๋ ฅ๊ฐ์ ์ ์ฉํ ๊ท์ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [๊ท์ ํจ์](../regularizers.md)
- __kernel_constraint__: `kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ ์ฉํ ์ ์ฝ์ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [์ ์ฝ](../constraints.md))
- __recurrent_constraint__: `recurrent_kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ ์ฉํ ์ ์ฝ์ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [์ ์ฝ](../constraints.md))
- __bias_constraint__: ํธํฅ ๋ฒกํฐ์ ์ ์ฉํ ์ ์ฝ์ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [์ ์ฝ](../constraints.md))
- __return_sequences__: `bool`. ์๊ณ์ด ๊ฐ์ด๋ฐ ๋ชจ๋ ์์ ์ ์ถ๋ ฅ๊ฐ์ ๋ฐํํ ์ง ๋ง์ง๋ง ์์ ์ ์ถ๋ ฅ๊ฐ๋ง์ ๋ฐํํ ์ง ๊ฒฐ์ ํฉ๋๋ค. ๊ธฐ๋ณธ๊ฐ์ `False`์
๋๋ค.
- __return_state__: `bool`. ์ถ๋ ฅ๊ณผ ํจ๊ป ๋ง์ง๋ง ์์ ์ ์ํ๊ฐ๋ ๋ฐํํ ์ง์ ์ฌ๋ถ๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ๊ธฐ๋ณธ๊ฐ์ `False`์
๋๋ค.
- __stateful__: `bool`. ๊ธฐ๋ณธ๊ฐ์ `False`์
๋๋ค. `True`์ธ ๊ฒฝ์ฐ ํ์ฌ ์
๋ ฅ๋ ๋ฐฐ์น์ ๊ฐ ์ธ๋ฑ์ค `i`์ ํด๋นํ๋ ์
๋ ฅ๊ฐ์ ๋ง์ง๋ง ์ํ๊ฐ ๋ค์ ๋ฐฐ์น์ ๊ฐ ์ธ๋ฑ์ค `i`์ ํด๋นํ๋ ์
๋ ฅ๊ฐ์ ์ด๊ธฐ ์ํ๋ก ์ฌ์ฉ๋ฉ๋๋ค. ๋งค์ฐ ๊ธด ์๊ณ์ด์ ์ฐ์๋ ๋ฐฐ์น๋ก ๋๋์ด ์ฒ๋ฆฌํ ๋ ์ ์ฉํฉ๋๋ค.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/cudnn_recurrent.py#L328)</span>
### CuDNNLSTM
```python
keras.layers.CuDNNLSTM(units, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', unit_forget_bias=True, kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, return_sequences=False, return_state=False, stateful=False)
```
[CuDNN](https://developer.nvidia.com/cudnn)์ ์ฌ์ฉํ ๋น ๋ฅธ LSTM ๊ตฌํ ์ธต.
TensorFlow ๋ฐฑ์๋๋ก CuDNN์ ์ง์ํ๋ GPU์์๋ง ์คํํ ์ ์์ต๋๋ค.
__์ธ์__
- __units__: ์์ ์ ์. ์ถ๋ ฅ๊ฐ์ ์ฐจ์ ํฌ๊ธฐ๋ฅผ ๊ฒฐ์ ํฉ๋๋ค.
- __kernel_initializer__: `kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ด๊ธฐํ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ด ๊ฐ์ค์น๋ ์
๋ ฅ๊ฐ์ ๊ณฑํด์ ธ์ ์ ํ๋ณํํ๋ ์ฐ์ฐ์ ์ฌ์ฉ๋ฉ๋๋ค. ์ฐธ๊ณ : [์ด๊ธฐํ ํจ์](../initializers.md)
- __unit_forget_bias__: `bool`. `True`์ธ ๊ฒฝ์ฐ [Jozefowicz et al. (2015)](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)์ ์ ์์ ๋ฐ๋ผ ํฌ๊ฒ ๊ฒ์ดํธ์ ํธํฅ์ `1`์ ๋ํฉ๋๋ค. ๋ํ ๊ฐ์ ์ ์ผ๋ก `bias_initializer="zeros"`๋ก ์ค์ ํ์ฌ ๋๋จธ์ง ๊ฒ์ดํธ์ ํธํฅ์ `0`์ผ๋ก ์์ํ๊ฒ๋ ํฉ๋๋ค. ๊ธฐ๋ณธ๊ฐ์ `True`์
๋๋ค.
- __recurrent_initializer__: `recurrent_kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ด๊ธฐํ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ด ๊ฐ์ค์น๋ ์ด์ ์์ ์ผ๋ก๋ถํฐ ์ ๋ฌ๋ฐ์ ์ํ๊ฐ์ ๊ณฑํด์ ธ์ ์ ํ๋ณํํ๋ ์ฐ์ฐ์ ์ฌ์ฉ๋ฉ๋๋ค. ์ฐธ๊ณ : [์ด๊ธฐํ ํจ์](../initializers.md)
- __bias_initializer__: ํธํฅ ๋ฒกํฐ์ ์ด๊ธฐํ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [์ด๊ธฐํ ํจ์](../initializers.md)
- __kernel_regularizer__: `kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ ์ฉํ ๊ท์ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [๊ท์ ํจ์](../regularizers.md)
- __recurrent_regularizer__: `recurrent_kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ ์ฉํ ๊ท์ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [๊ท์ ํจ์](../regularizers.md)
- __bias_regularizer__: ํธํฅ ๋ฒกํฐ์ ์ ์ฉํ ๊ท์ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [๊ท์ ํจ์](../regularizers.md)
- __activity_regularizer__: ์ธต์ ์ถ๋ ฅ๊ฐ์ ์ ์ฉํ ๊ท์ ํจ์๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [๊ท์ ํจ์](../regularizers.md)
- __kernel_constraint__: `kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ ์ฉํ ์ ์ฝ์ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [์ ์ฝ](../constraints.md))
- __recurrent_constraint__: `recurrent_kernel` ๊ฐ์ค์น ํ๋ ฌ์ ์ ์ฉํ ์ ์ฝ์ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [์ ์ฝ](../constraints.md))
- __bias_constraint__: ํธํฅ ๋ฒกํฐ์ ์ ์ฉํ ์ ์ฝ์ ๊ฒฐ์ ํฉ๋๋ค. ์ฐธ๊ณ : [์ ์ฝ](../constraints.md))
- __return_sequences__: `bool`. ์๊ณ์ด ๊ฐ์ด๋ฐ ๋ชจ๋ ์์ ์ ์ถ๋ ฅ๊ฐ์ ๋ฐํํ ์ง ๋ง์ง๋ง ์์ ์ ์ถ๋ ฅ๊ฐ๋ง์ ๋ฐํํ ์ง ๊ฒฐ์ ํฉ๋๋ค. ๊ธฐ๋ณธ๊ฐ์ `False`์
๋๋ค.
- __return_state__: `bool`. ์ถ๋ ฅ๊ณผ ํจ๊ป ๋ง์ง๋ง ์์ ์ ์ํ๊ฐ๋ ๋ฐํํ ์ง์ ์ฌ๋ถ๋ฅผ ๊ฒฐ์ ํฉ๋๋ค. ๊ธฐ๋ณธ๊ฐ์ `False`์
๋๋ค.
- __stateful__: `bool`. ๊ธฐ๋ณธ๊ฐ์ `False`์
๋๋ค. `True`์ธ ๊ฒฝ์ฐ ํ์ฌ ์
๋ ฅ๋ ๋ฐฐ์น์ ๊ฐ ์ธ๋ฑ์ค `i`์ ํด๋นํ๋ ์
๋ ฅ๊ฐ์ ๋ง์ง๋ง ์ํ๊ฐ ๋ค์ ๋ฐฐ์น์ ๊ฐ ์ธ๋ฑ์ค `i`์ ํด๋นํ๋ ์
๋ ฅ๊ฐ์ ์ด๊ธฐ ์ํ๋ก ์ฌ์ฉ๋ฉ๋๋ค. ๋งค์ฐ ๊ธด ์๊ณ์ด์ ์ฐ์๋ ๋ฐฐ์น๋ก ๋๋์ด ์ฒ๋ฆฌํ ๋ ์ ์ฉํฉ๋๋ค.
| keras-docs-ko/sources/layers/recurrent.md/0 | {
"file_path": "keras-docs-ko/sources/layers/recurrent.md",
"repo_id": "keras-docs-ko",
"token_count": 36979
} | 79 |
# ์ Keras์ผ๊น์?
์ค๋๋ ์กด์ฌํ๋ ์๋ง์ ๋ฅ๋ฌ๋ ํ๋ ์์ํฌ๋ค ์ค์์, ์ ๊ตณ์ด Keras์ผ๊น์? ๋ค๋ฅธ ๋์๋ค์ ๋นํด Keras๋ฅผ ์ ํธํ๋ ์ด์ ๋ ๋ค์๊ณผ ๊ฐ์ต๋๋ค.
---
## Keras๋ ์ฌ์ฉ์ ์นํ์ ์
๋๋ค
- Keras๋ ๊ธฐ๊ณ๊ฐ ์๋ ์ฌ๋์ ์ํ ๋๊ตฌ์
๋๋ค. Keras๋ [์ฌ์ฉ์์ ๋ถ๋ด์ ๋๊ธฐ ์ํด](https://blog.keras.io/user-experience-design-for-apis.html) ์ผ๊ด๋๊ณ ๊ฐ๊ฒฐํ API๋ฅผ ์ ๊ณตํ๋ฉฐ, ์ผ๋ฐ์ ์ธ ์ ์ค์ผ์ด์ค์ ํ์ํ ์ฌ์ฉ์์ ์กฐ์์ ์ต์ํ ํ๊ณ , ์ค์๋์ ๋ํ ๋ช
ํํ๊ณ ์ค์ฉ์ ์ธ ํผ๋๋ฐฑ์ ์ ๊ณตํฉ๋๋ค.
- Keras์ ์ด๋ฐ ๊ฐ๋ฐ ์ฒ ํ ๋๋ถ์ Keras๋ ๋ฐฐ์ฐ๊ธฐ๋, ์ฌ์ฉํ๊ธฐ์๋ ์ฝ์ต๋๋ค. Keras๋ฅผ ํตํด์ ๋ ๋ง์ ์์ด๋์ด๋ฅผ ๋น ๋ฅด๊ฒ ์๋ํด ๋ณผ ์ ์๊ณ , ์ด๋ [๋จธ์ ๋ฌ๋ ๋ํ์์ ์ข์ ์ฑ์ ์ ๊ฑฐ๋ ์ ์๋๋ก ๋์์ค๋๋ค](https://www.quora.com/Why-has-Keras-been-so-successful-lately-at-Kaggle-competitions).
- Keras๋ ์ฌ์ด ๊ณ ์์ค์ API๋ฅผ ์ ๊ณตํ๋ฉด์๋, TensorFlow์ ๊ฐ์ ์ ์์ค์ API์๋ ํธํ์ด ์ ๋์ด ์ด๋ ํ ๋คํธ์ํฌ ๊ตฌ์กฐ๋ ๋ง๋ค ์ ์๊ฒ ํฉ๋๋ค. ํนํ, `tf.keras`๋ฅผ ์ฌ์ฉํ๋ฉด TensorFlow ๊ธฐ๋ฐ์ ์์
ํ๋ฆ์๋ ๋งค๋๋ฝ๊ฒ ํตํฉ์ํฌ ์ ์์ต๋๋ค.
---
## Keras๋ ์
๊ณ์ ํ๊ณ ์์ชฝ์์ ๋ชจ๋ ํญ๋๊ฒ ์ฌ์ฉ๋๊ณ ์์ต๋๋ค
<a href='https://towardsdatascience.com/deep-learning-framework-power-scores-2018-23607ddf297a'>
<img style='width: 80%; margin-left: 10%;' src='https://s3.amazonaws.com/keras.io/img/dl_frameworks_power_scores.png'/>
</a>
<p style='font-style: italic; font-size: 10pt; text-align: center;'>
7๊ฐ์ ๋ถ๋ฅ์ ๊ฑธ์น 11๊ฐ์ ๋ฐ์ดํฐ ์์ค๋ฅผ ๊ธฐ๋ฐ์ผ๋ก ๊ณ์ฐ๋ ๋ฅ๋ฌ๋ ํ๋ ์์ํฌ ์์, Jeff Hale.
</i>
Keras๋ 250,000๋ช
์ด์์ ๊ฐ์ธ ์ฌ์ฉ์(2018๋
๊ธฐ์ค)๋ฅผ ๊ธฐ๋ฐ์ผ๋ก TensorFlow๋ฅผ ์ ์ธํ ๊ทธ ์ด๋ค ๋ฅ๋ฌ๋ ํ๋ ์์ํฌ๋ณด๋ค ์
๊ณ์ ํ๊ณ ๋ชจ๋์ ๊น๊ฒ ๋ฐฐ์ด์์ต๋๋ค. ๋ํ Keras API๋ `tf.keras` ๋ชจ๋์ ํตํด TensorFlow์ ๊ณต์ ํ๋ก ํธ์๋๋ก ์ฌ์ฉ๋๊ณ ์์ต๋๋ค.
Keras๋ฅผ ํตํด ๊ฐ๋ฐ๋ ๊ธฐ๋ฅ๋ค์ Netflix, Uber, Yelp, Instacart, Zocdoc, Square์ฌ ๋ฑ์ ์๋น์ค์์ ์ฝ๊ฒ ์ฐพ์๋ณผ ์ ์์ต๋๋ค. ์ด๋ ํนํ ๋ฅ๋ฌ๋์ ์๋น์ค์ ํต์ฌ์ผ๋ก ์ผ๋ ์คํํธ์
๊ธฐ์
๋ค ์ฌ์ด์์ ์ธ๊ธฐ๊ฐ ๋ง์ต๋๋ค.
Keras๋ [arXiv.org](https://arxiv.org/archive/cs)์ ์
๋ก๋ ๋ ๊ณผํ ๋
ผ๋ฌธ๋ค ์ค์์ ๋ ๋ฒ์งธ๋ก ๋ง์ด ์ธ๊ธ ๋ ์ ๋๋ก ๋ฅ๋ฌ๋ ์ฐ๊ตฌ์๋ค์๊ฒ ์ฌ๋๋ฐ๊ณ ์์ต๋๋ค. Keras๋ ๋ํ CERN๊ณผ NASA์ ๊ฐ์ ๋ํ ์ฐ๊ตฌ์์์๋ ์ฑํ๋ ๋๊ตฌ์
๋๋ค.
---
## Keras๋ ๋ชจ๋ธ์ ์ ํํ๋ฅผ ์ฝ๊ฒ ํด์ค๋๋ค
Keras๋ ๋ค๋ฅธ ์ด๋ค ๋ฅ๋ฌ๋ ํ๋ ์์ํฌ๋ณด๋ค๋ ๋ค์ํ ๋ฐฉ๋ฉด์ ํ๋ซํผ์ ์ฝ๊ฒ ๋ฐฐํฌํ ์ ์์ต๋๋ค. ์ด์ ํด๋นํ๋ ํ๋ซํผ๋ค์ ๋ค์๊ณผ ๊ฐ์ต๋๋ค.
- iOS์์๋ [Appleโs CoreML](https://developer.apple.com/documentation/coreml)์ ํตํด์ ๊ฐ๋ฅํฉ๋๋ค. Apple์ฌ๋ ๊ณต์์ ์ผ๋ก Keras๋ฅผ ์ง์ํฉ๋๋ค ([ํํ ๋ฆฌ์ผ](https://www.pyimagesearch.com/2018/04/23/running-keras-models-on-ios-with-coreml/)).
- Android์์๋ TensorFlow Android ๋ฐํ์์ ํตํด์ ๊ฐ๋ฅํฉ๋๋ค (e.g. [Not Hotdog ์ฑ](https://medium.com/@timanglade/how-hbos-silicon-valley-built-not-hotdog-with-mobile-tensorflow-keras-react-native-ef03260747f3)).
- ์น ๋ธ๋ผ์ฐ์ ์์๋ [Keras.js](https://transcranial.github.io/keras-js/#/)์ ๊ฐ์ GPU ๊ฐ์๋ JavaScript ๋ฐํ์๊ณผ [WebDNN](https://mil-tokyo.github.io/webdnn/)์ ํตํด์ ๊ฐ๋ฅํฉ๋๋ค.
- Google Cloud์์๋ [TensorFlow-Serving](https://www.tensorflow.org/serving/)์ ํตํด์ ๊ฐ๋ฅํฉ๋๋ค.
- [Flask ์ฑ๊ณผ ๊ฐ์ Python ์น ๋ฐฑ์๋](https://blog.keras.io/building-a-simple-keras-deep-learning-rest-api.html)์์๋ ๊ฐ๋ฅํฉ๋๋ค.
- JVM์์๋ [SkyMind๊ฐ ์ ๊ณตํ๋ DL4J](https://deeplearning4j.org/model-import-keras)๋ฅผ ํตํด์ ๊ฐ๋ฅํฉ๋๋ค.
- Raspberry Pi์์๋ ๊ฐ๋ฅํฉ๋๋ค.
---
## Keras๋ ์ฌ๋ฌ ๋ฐฑ์๋ ์์ง์ ์ง์ํ์ฌ ํ๋์ ์ํ๊ณ์ ์๋ฐ๋์ง ์์ต๋๋ค
Keras ๋ชจ๋ธ์ ์ฌ๋ฌ [๋ฅ๋ฌ๋ ๋ฐฑ์๋](https://keras.io/backend/)๋ฅผ ์ง์ํฉ๋๋ค. ๋์ฌ๊ฒจ๋ณผ ๋งํ ์ ์, ๋ด์ฅ ๋ ์ด์ด๋ก๋ง ๊ตฌ์ฑ๋ Keras ๋ชจ๋ธ๋ค์ ์ง์ํ๋ ๋ชจ๋ ๋ฐฑ์๋๋ค๊ณผ ํธํ์ด ๋์ด ํ์ต์ ์ฌ์ฉ๋๋ ๋ฐฑ์๋์ ๋ฐฐํฌ ๋ฑ์ ์ํ ๋ก๋์ ์ฌ์ฉ๋๋ ๋ฐฑ์๋๊ฐ ์๋ก ๋ฌ๋ผ๋ ๋๋ค๋ ๊ฒ์
๋๋ค. ์ฌ์ฉ ๊ฐ๋ฅํ ๋ฐฑ์๋๋ค์ ๋ค์๊ณผ ๊ฐ์ต๋๋ค.
- TensorFlow ๋ฐฑ์๋ (Google์ฌ ์ ๊ณต)
- CNTK ๋ฐฑ์๋ (Microsoft์ฌ ์ ๊ณต)
- Theano ๋ฐฑ์๋
Amazon์ฌ๋ MXNet์ ๋ฐฑ์๋๋ก ์ฌ์ฉํ๋ [Keras์ ๋ถ๊ธฐ ๋ฒ์ ](https://github.com/awslabs/keras-apache-mxnet)์ ์ ๊ณตํฉ๋๋ค.
๊ฒฐ๊ณผ์ ์ผ๋ก Keras ๋ชจ๋ธ๋ค์ CPU๋ฟ๋ง์ด ์๋ ๋ค๋ฅธ ์ฌ๋ฌ ํ๋์จ์ด ํ๋ซํผ๋ค์์๋ ํ์ต์ด ๊ฐ๋ฅํฉ๋๋ค.
- [NVIDIA GPUs](https://developer.nvidia.com/deep-learning)
- [Google TPUs](https://cloud.google.com/tpu/) (TensorFlow ๋ฐฑ์๋์ Google Cloud๋ฅผ ํตํด์)
- AMD์ฌ์ OpenCL๊ณผ ํธํ๋๋ GPU ([PlaidML Keras ๋ฐฑ์๋](https://github.com/plaidml/plaidml)๋ฅผ ํตํด์)
---
## Keras๋ ๋ค์ค GPU์ ํ์ต์ ๋ถ์ฐ์ฒ๋ฆฌ๋ฅผ ์ง์ํฉ๋๋ค
- Keras๋ [๋ค์ค GPU ๋ฐ์ดํฐ ๋ณ๋ ฌ์ฑ์ ๋ํ ์ง์์ด ๋ด์ฅ๋์ด์์ต๋๋ค](/utils/#multi_gpu_model).
- Uber์ฌ์ [Horovod](https://github.com/uber/horovod)๋ ์ผ๋ผ์ค ๋ชจ๋ธ์ ์ผ์ฐจ์ ์ผ๋ก ์ง์ํฉ๋๋ค.
- Keras ๋ชจ๋ธ์ [TensorFlow ์ถ์ ์๋ก ๋ณํ](https://www.tensorflow.org/versions/master/api_docs/python/tf/keras/estimator/model_to_estimator)์ด ๊ฐ๋ฅํ๋ฉฐ, [Google Cloud๋ฅผ ํตํ GPU ํด๋ฌ์คํฐ](https://cloud.google.com/solutions/running-distributed-tensorflow-on-compute-engine)์์ ํ์ต์ํฌ ์ ์์ต๋๋ค.
- [Dist-Keras](https://github.com/cerndb/dist-keras)์ [Elephas](https://github.com/maxpumperla/elephas)๋ฅผ ํตํด Spark์์ Keras๋ฅผ ์คํํ ์ ์์ต๋๋ค.
---
## Keras์ ๊ฐ๋ฐ์ ๋ฅ๋ฌ๋ ์ํ๊ณ์ ์ฃผ์ ๊ธฐ์
๋ค์ ์ง์์ ๋ฐ์ต๋๋ค
Keras๋ Google์ฌ์ ์ง์์ ์ค์ฌ์ผ๋ก ๊ฐ๋ฐ๋๊ณ ์์ผ๋ฉฐ, Keras API๋ `tf.keras`๋ก TensorFlow์ ํจํค์ง๋ก ์ ๊ณต๋ฉ๋๋ค. CNTK Keras ๋ฐฑ์๋์ ์ ์ง๋ณด์ ๋ํ Microsoft์ฌ์ ์ฑ
์ํ์ ์ด๋ฃจ์ด์ง๋๋ค. Amazon AWS๋ MXNet๊ณผ ํจ๊ป Keras๋ฅผ ๊ด๋ฆฌํฉ๋๋ค. NVIDIA, Uber, CoreML์ ํฌํจํ Apple์ฌ ๋ํ Keras์ ๊ฐ๋ฐ์ ๊ณตํํ์์ต๋๋ค.
<img src='https://keras.io/img/google-logo.png' style='width:200px; margin-right:15px;'/>
<img src='https://keras.io/img/microsoft-logo.png' style='width:200px; margin-right:15px;'/>
<img src='https://keras.io/img/nvidia-logo.png' style='width:200px; margin-right:15px;'/>
<img src='https://keras.io/img/aws-logo.png' style='width:110px; margin-right:15px;'/>
| keras-docs-ko/sources/why-use-keras.md/0 | {
"file_path": "keras-docs-ko/sources/why-use-keras.md",
"repo_id": "keras-docs-ko",
"token_count": 4593
} | 80 |
# Keras ๅ็ซฏ
## ไปไนๆฏ ใๅ็ซฏใ๏ผ
Keras ๆฏไธไธชๆจกๅ็บงๅบ๏ผไธบๅผๅๆทฑๅบฆๅญฆไน ๆจกๅๆไพไบ้ซๅฑๆฌก็ๆๅปบๆจกๅใๅฎไธๅค็่ฏธๅฆๅผ ้ไน็งฏๅๅท็งฏ็ญไฝ็บงๆไฝใ็ธๅ๏ผๅฎไพ่ตไบไธไธชไธ้จ็ใไผๅ็ๅผ ้ๆไฝๅบๆฅๅฎๆ่ฟไธชๆไฝ๏ผๅฎๅฏไปฅไฝไธบ Keras ็ใๅ็ซฏๅผๆใใ็ธๆฏๅ็ฌๅฐ้ๆฉไธไธชๅผ ้ๅบ๏ผ่ๅฐ Keras ็ๅฎ็ฐไธ่ฏฅๅบ็ธๅ
ณ่๏ผKeras ไปฅๆจกๅๆนๅผๅค็่ฟไธช้ฎ้ข๏ผๅนถไธๅฏไปฅๅฐๅ ไธชไธๅ็ๅ็ซฏๅผๆๆ ็ผๅตๅ
ฅๅฐ Keras ไธญใ
็ฎๅ๏ผKeras ๆไธไธชๅ็ซฏๅฎ็ฐๅฏ็จ: **TensorFlow** ๅ็ซฏ๏ผ**Theano** ๅ็ซฏ๏ผ**CNTK** ๅ็ซฏใ
- [TensorFlow](http://www.tensorflow.org/) ๆฏ็ฑ Google ๅผๅ็ไธไธชๅผๆบ็ฌฆๅท็บงๅผ ้ๆไฝๆกๆถใ
- [Theano](http://deeplearning.net/software/theano/) ๆฏ็ฑ่็นๅฉๅฐๅคงๅญฆ็ LISA Lab ๅผๅ็ไธไธชๅผๆบ็ฌฆๅท็บงๅผ ้ๆไฝๆกๆถใ
- [CNTK](https://www.microsoft.com/en-us/cognitive-toolkit/) ๆฏ็ฑๅพฎ่ฝฏๅผๅ็ไธไธชๆทฑๅบฆๅญฆไน ๅผๆบๅทฅๅ
ทๅ
ใ
ๅฐๆฅ๏ผๆไปฌๅฏ่ฝไผๆทปๅ ๆดๅคๅ็ซฏ้้กนใ
----
## ไปไธไธชๅ็ซฏๅๆขๅฐๅฆไธไธชๅ็ซฏ
ๅฆๆๆจ่ณๅฐ่ฟ่ก่ฟไธๆฌก Keras๏ผๆจๅฐๅจไปฅไธไฝ็ฝฎๆพๅฐ Keras ้
็ฝฎๆไปถ๏ผ
`$HOME/.keras/keras.json`
ๅฆๆๅฎไธๅจ้ฃ้๏ผไฝ ๅฏไปฅๅๅปบๅฎใ
**Windows็จๆทๆณจๆไบ้กน๏ผ** ่ฏทๅฐ `$HOME` ไฟฎๆนไธบ `%USERPROFILE%`ใ
้ป่ฎค็้
็ฝฎๆไปถๅฆไธๆ็คบ๏ผ
```
{
"image_data_format": "channels_last",
"epsilon": 1e-07,
"floatx": "float32",
"backend": "tensorflow"
}
```
ๅช้ๅฐๅญๆฎต `backend` ๆดๆนไธบ `theano`๏ผ`tensorflow` ๆ `cntk`๏ผKeras ๅฐๅจไธๆฌก่ฟ่ก Keras ไปฃ็ ๆถไฝฟ็จๆฐ็้
็ฝฎใ
ไฝ ไนๅฏไปฅๅฎไน็ฏๅขๅ้ ``KERAS_BACKEND``๏ผ่ฟไผ่ฆ็้
็ฝฎๆไปถไธญๅฎไน็ๅ
ๅฎน๏ผ
```bash
KERAS_BACKEND=tensorflow python -c "from keras import backend"
Using TensorFlow backend.
```
ๅจ Keras ไธญ๏ผๅฏไปฅๅ ่ฝฝๆฏ `"tensorflow"`, `"theano"` ๅ `"cntk"` ๆดๅค็ๅ็ซฏใ
Keras ไนๅฏไปฅไฝฟ็จๅค้จๅ็ซฏ๏ผ่ฟๅฏไปฅ้่ฟๆดๆน `keras.json` ้
็ฝฎๆไปถๅ `"backend"` ่ฎพ็ฝฎๆฅๆง่กใ ๅ่ฎพๆจๆไธไธชๅไธบ `my_module` ็ Python ๆจกๅ๏ผๆจๅธๆๅฐๅ
ถ็จไฝๅค้จๅ็ซฏใ`keras.json` ้
็ฝฎๆไปถๅฐๆดๆนๅฆไธ๏ผ
```
{
"image_data_format": "channels_last",
"epsilon": 1e-07,
"floatx": "float32",
"backend": "my_package.my_module"
}
```
ๅฟ
้กป้ช่ฏๅค้จๅ็ซฏๆ่ฝไฝฟ็จ๏ผๆๆ็ๅ็ซฏๅฟ
้กปๅ
ทๆไปฅไธๅฝๆฐ๏ผ`placeholder`, `variable` and `function`.
ๅฆๆ็ฑไบ็ผบๅฐๅฟ
้็ๆก็ฎ่ๅฏผ่ดๅค้จๅ็ซฏๆ ๆ๏ผๅไผ่ฎฐๅฝ้่ฏฏ๏ผ้็ฅ็ผบๅฐๅชไบๆก็ฎใ
----
## keras.json ่ฏฆ็ป้
็ฝฎ
The `keras.json` ้
็ฝฎๆไปถๅ
ๅซไปฅไธ่ฎพ็ฝฎ๏ผ
```
{
"image_data_format": "channels_last",
"epsilon": 1e-07,
"floatx": "float32",
"backend": "tensorflow"
}
```
ๆจๅฏไปฅ้่ฟ็ผ่พ `$ HOME/.keras/keras.json` ๆฅๆดๆน่ฟไบ่ฎพ็ฝฎใ
- `image_data_format`: ๅญ็ฌฆไธฒ๏ผ`"channels_last"` ๆ่
`"channels_first"`ใๅฎๆๅฎไบ Keras ๅฐ้ตๅพช็ๆฐๆฎๆ ผๅผ็บฆๅฎใ(`keras.backend.image_data_format()` ่ฟๅๅฎใ)
- ๅฏนไบ 2D ๆฐๆฎ (ไพๅฆๅพๅ)๏ผ`"channels_last"` ๅๅฎไธบ `(rows, cols, channels)`๏ผ่ `"channels_first"` ๅๅฎไธบ `(channels, rows, cols)`ใ
- ๅฏนไบ 3D ๆฐๆฎ๏ผ `"channels_last"` ๅๅฎไธบ `(conv_dim1, conv_dim2, conv_dim3, channels)`๏ผ่ `"channels_first"` ๅๅฎไธบ `(channels, conv_dim1, conv_dim2, conv_dim3)`ใ
- `epsilon`: ๆตฎ็นๆฐ๏ผ็จไบ้ฟๅ
ๅจๆไบๆไฝไธญ่ขซ้ถ้ค็ๆฐๅญๆจก็ณๅธธ้ใ
- `floatx`: ๅญ็ฌฆไธฒ๏ผ`"float16"`, `"float32"`, ๆ `"float64"`ใ้ป่ฎคๆตฎ็น็ฒพๅบฆใ
- `backend`: ๅญ็ฌฆไธฒ๏ผ `"tensorflow"`, `"theano"`, ๆ `"cntk"`ใ
----
## ไฝฟ็จๆฝ่ฑก Keras ๅ็ซฏ็ผๅๆฐไปฃ็
ๅฆๆไฝ ๅธๆไฝ ็ผๅ็ Keras ๆจกๅไธ Theano (`th`) ๅ TensorFlow (`tf`) ๅ
ผๅฎน๏ผๅๅฟ
้กป้่ฟๆฝ่ฑก Keras ๅ็ซฏ API ๆฅ็ผๅๅฎไปฌใไปฅไธๆฏไธไธชไป็ปใ
ๆจๅฏไปฅ้่ฟไปฅไธๆนๅผๅฏผๅ
ฅๅ็ซฏๆจกๅ๏ผ
```python
from keras import backend as K
```
ไธ้ข็ไปฃ็ ๅฎไพๅไธไธช่พๅ
ฅๅ ไฝ็ฌฆใๅฎ็ญไปทไบ `tf.placeholder()` ๆ `th.tensor.matrix()`, `th.tensor.tensor3()`, ็ญ็ญใ
```python
inputs = K.placeholder(shape=(2, 4, 5))
# ๅๆ ทๅฏไปฅ๏ผ
inputs = K.placeholder(shape=(None, 4, 5))
# ๅๆ ทๅฏไปฅ๏ผ
inputs = K.placeholder(ndim=3)
```
ไธ้ข็ไปฃ็ ๅฎไพๅไธไธชๅ้ใๅฎ็ญไปทไบ `tf.Variable()` ๆ `th.shared()`ใ
```python
import numpy as np
val = np.random.random((3, 4, 5))
var = K.variable(value=val)
# ๅ
จ 0 ๅ้๏ผ
var = K.zeros(shape=(3, 4, 5))
# ๅ
จ 1 ๅ้๏ผ
var = K.ones(shape=(3, 4, 5))
```
ไฝ ้่ฆ็ๅคงๅคๆฐๅผ ้ๆไฝ้ฝๅฏไปฅๅๅจ TensorFlow ๆ Theano ไธญ้ฃๆ ทๅฎๆ๏ผ
```python
# ไฝฟ็จ้ๆบๆฐๅๅงๅๅผ ้
b = K.random_uniform_variable(shape=(3, 4), low=0, high=1) # ๅๅๅๅธ
c = K.random_normal_variable(shape=(3, 4), mean=0, scale=1) # ้ซๆฏๅๅธ
d = K.random_normal_variable(shape=(3, 4), mean=0, scale=1)
# ๅผ ้่ฟ็ฎ
a = b + c * K.abs(d)
c = K.dot(a, K.transpose(b))
a = K.sum(b, axis=1)
a = K.softmax(b)
a = K.concatenate([b, c], axis=-1)
# ็ญ็ญ
```
----
## ๅ็ซฏๅฝๆฐ
### backend
```python
backend.backend()
```
่ฟๅๅฝๅๅ็ซฏ็ๅๅญ (ไพๅฆ "tensorflow")ใ
__่ฟๅ__
ๅญ็ฌฆไธฒ๏ผKeras ็ฎๅๆญฃๅจไฝฟ็จ็ๅ็ซฏๅใ
__็คบไพ__
```python
>>> keras.backend.backend()
'tensorflow'
```
----
### symbolic
```python
keras.backend.symbolic(func)
```
ๅจ TensorFlow 2.0 ไธญ็จไบ่ฟๅ
ฅ Keras ๅพ็่ฃ
้ฅฐๅจใ
__ๅๆฐ__
- __func__: ้่ฆ่ฃ
้ฅฐ็ๅฝๆฐใ
__่ฟๅ__
่ฃ
้ฅฐๅ็ๅฝๆฐใ
----
### eager
```python
keras.backend.eager(func)
```
ๅจ TensorFlow 2.0 ไธญ็จไบ้ๅบ Keras ๅพ็่ฃ
้ฅฐๅจใ
__ๅๆฐ__
- __func__: ้่ฆ่ฃ
้ฅฐ็ๅฝๆฐใ
__่ฟๅ__
่ฃ
้ฅฐๅ็ๅฝๆฐใ
----
### get_uid
```python
keras.backend.get_uid(prefix='')
```
ๆไพไธไธชๆดๅญ็ฌฆไธฒๅ็ผ็็ฌ็ซ UIDใ
__ๅๆฐ__
- __prefix__: ๅญ็ฌฆไธฒใ
__่ฟๅ__
ไธไธชๆดๆฐใ
__็คบไพ__
```python
>>> keras.backend.get_uid('dense')
1
>>> keras.backend.get_uid('dense')
2
```
----
### manual_variable_initialization
```python
keras.backend.manual_variable_initialization(value)
```
่ฎพ็ฝฎๆๅจๅ้ๅๅงๅๆ ่ฏใ
่ฟไธชๅธๅฐๆ ่ฏๅณๅฎๅ้ๆฏๅฆๅจๅฎไพๅ๏ผ้ป่ฎค๏ผๆถๅๅงๅ๏ผๆ่
่ฎฉ็จๆท่ชๅทฑๆฅๅค็ๅๅงๅใ
__ๅๆฐ__
- __value__: Python ๅธๅฐๅผใ
----
### epsilon
```python
keras.backend.epsilon()
```
่ฟๅๆฐๅญ่กจ่พพๅผไธญไฝฟ็จ็ๆจก็ณๅ ๅญ็ๅผใ
__่ฟๅ__
ไธไธชๆตฎ็นๆฐใ
__็คบไพ__
```python
>>> keras.backend.epsilon()
1e-07
```
----
### reset_uids
```python
keras.backend.reset_uids()
```
้็ฝฎๅพๆ ่ฏใ
----
### set_epsilon
```python
keras.backend.set_epsilon(e)
```
่ฎพ็ฝฎๆฐๅญ่กจ่พพๅผไธญไฝฟ็จ็ๆจก็ณๅ ๅญ็ๅผใ
__ๅๆฐ__
- __e__: ๆตฎ็นๆฐใๆฐ็ epsilon ๅผใ
__็คบไพ__
```python
>>> from keras import backend as K
>>> K.epsilon()
1e-07
>>> K.set_epsilon(1e-05)
>>> K.epsilon()
1e-05
```
----
### floatx
```python
keras.backend.floatx()
```
ไปฅๅญ็ฌฆไธฒๅฝขๅผ่ฟๅ้ป่ฎค็ๆตฎ็น็ฑปๅใ
(ไพๅฆ๏ผ'float16', 'float32', 'float64')ใ
__่ฟๅ__
ๅญ็ฌฆไธฒ๏ผๅฝๅ้ป่ฎค็ๆตฎ็น็ฑปๅใ
__็คบไพ__
```python
>>> keras.backend.floatx()
'float32'
```
----
### set_floatx
```python
keras.backend.set_floatx(floatx)
```
่ฎพ็ฝฎ้ป่ฎค็ๆตฎ็น็ฑปๅใ
__ๅๆฐ__
- __floatx__: ๅญ็ฌฆไธฒ๏ผ'float16', 'float32', ๆ 'float64'ใ
__็คบไพ__
```python
>>> from keras import backend as K
>>> K.floatx()
'float32'
>>> K.set_floatx('float16')
>>> K.floatx()
'float16'
```
----
### cast_to_floatx
```python
keras.backend.cast_to_floatx(x)
```
ๅฐ Numpy ๆฐ็ป่ฝฌๆขไธบ้ป่ฎค็ Keras ๆตฎ็น็ฑปๅใ
__ๅๆฐ__
- __x__: Numpy ๆฐ็ปใ
__่ฟๅ__
็ธๅ็ Numpy ๆฐ็ป๏ผ่ฝฌๆขไธบๅฎ็ๆฐ็ฑปๅใ
__็คบไพ__
```python
>>> from keras import backend as K
>>> K.floatx()
'float32'
>>> arr = numpy.array([1.0, 2.0], dtype='float64')
>>> arr.dtype
dtype('float64')
>>> new_arr = K.cast_to_floatx(arr)
>>> new_arr
array([ 1., 2.], dtype=float32)
>>> new_arr.dtype
dtype('float32')
```
----
### image_data_format
```python
keras.backend.image_data_format()
```
่ฟๅ้ป่ฎคๅพๅๆฐๆฎๆ ผๅผ็บฆๅฎใ
__่ฟๅ__
ไธไธชๅญ็ฌฆไธฒ๏ผ`'channels_first'` ๆ `'channels_last'`
__็คบไพ__
```python
>>> keras.backend.image_data_format()
'channels_first'
```
----
### set_image_data_format
```python
keras.backend.set_image_data_format(data_format)
```
่ฎพ็ฝฎๆฐๆฎๆ ผๅผ็บฆๅฎ็ๅผใ
__ๅๆฐ__
- __data_format__: ๅญ็ฌฆไธฒใ`'channels_first'` ๆ `'channels_last'`ใ
__็คบไพ__
```python
>>> from keras import backend as K
>>> K.image_data_format()
'channels_first'
>>> K.set_image_data_format('channels_last')
>>> K.image_data_format()
'channels_last'
```
----
### learning_phase
```python
keras.backend.learning_phase()
```
่ฟๅๅญฆไน ้ถๆฎต็ๆ ๅฟใ
ๅญฆไน ้ถๆฎตๆ ๅฟๆฏไธไธชๅธๅฐๅผ ้๏ผ0 = test๏ผ1 = train๏ผ๏ผ
ๅฎไฝไธบ่พๅ
ฅไผ ้็ปไปปไฝ็ Keras ๅฝๆฐ๏ผไปฅๅจ่ฎญ็ปๅๆต่ฏ
ๆถๆง่กไธๅ็่กไธบๆไฝใ
__่ฟๅ__
ๅญฆไน ้ถๆฎต (ๆ ้ๆดๆฐๅผ ้ๆ python ๆดๆฐ)ใ
----
### set_learning_phase
```python
keras.backend.set_learning_phase(value)
```
ๅฐๅญฆไน ้ถๆฎต่ฎพ็ฝฎไธบๅบๅฎๅผใ
__ๅๆฐ__
- __value__: ๅญฆไน ้ถๆฎต็ๅผ๏ผ0 ๆ 1๏ผๆดๆฐ๏ผใ
__ๅผๅธธ__
- __ValueError__: ๅฆๆ `value` ๆขไธๆฏ `0` ไนไธๆฏ `1`ใ
----
### clear_session
```python
keras.backend.clear_session()
```
้ๆฏๅฝๅ็ Keras ๅพๅนถๅๅปบไธไธชๆฐๅพใ
ๆ็จไบ้ฟๅ
ๆงๆจกๅ/็ฝ็ปๅฑๆททไนฑใ
----
### is_sparse
```python
keras.backend.is_sparse(tensor)
```
ๅคๆญๅผ ้ๆฏๅฆๆฏ็จ็ๅผ ้ใ
__ๅๆฐ__
- __tensor__: ไธไธชๅผ ้ๅฎไพใ
__่ฟๅ__
ๅธๅฐๅผใ
__็คบไพ__
```python
>>> from keras import backend as K
>>> a = K.placeholder((2, 2), sparse=False)
>>> print(K.is_sparse(a))
False
>>> b = K.placeholder((2, 2), sparse=True)
>>> print(K.is_sparse(b))
True
```
----
### to_dense
```python
keras.backend.to_dense(tensor)
```
ๅฐ็จ็ๅผ ้่ฝฌๆขไธบ็จ ๅฏๅผ ้ๅนถ่ฟๅใ
__ๅๆฐ__
- __tensor__: ๅผ ้ๅฎไพ๏ผๅฏ่ฝ็จ็๏ผใ
__่ฟๅ__
ไธไธช็จ ๅฏๅผ ้ใ
__็คบไพ__
```python
>>> from keras import backend as K
>>> b = K.placeholder((2, 2), sparse=True)
>>> print(K.is_sparse(b))
True
>>> c = K.to_dense(b)
>>> print(K.is_sparse(c))
False
```
----
### variable
```python
keras.backend.variable(value, dtype=None, name=None, constraint=None)
```
ๅฎไพๅไธไธชๅ้ๅนถ่ฟๅๅฎใ
__ๅๆฐ__
- __value__: Numpy ๆฐ็ป๏ผๅผ ้็ๅๅงๅผใ
- __dtype__: ๅผ ้็ฑปๅใ
- __name__: ๅผ ้็ๅฏ้ๅ็งฐๅญ็ฌฆไธฒใ
- __constraint__: ๅจไผๅๅจๆดๆฐๅๅบ็จไบๅ้็ๅฏ้ๆๅฝฑๅฝๆฐใ
__่ฟๅ__
ๅ้ๅฎไพ๏ผๅ
ๅซ Keras ๅ
ๆฐๆฎ๏ผ
__็คบไพ__
```python
>>> from keras import backend as K
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val, dtype='float64', name='example_var')
>>> K.dtype(kvar)
'float64'
>>> print(kvar)
example_var
>>> K.eval(kvar)
array([[ 1., 2.],
[ 3., 4.]])
```
----
### is_variable
```python
keras.backend.is_variable(x)
```
----
### constant
```python
keras.backend.constant(value, dtype=None, shape=None, name=None)
```
ๅๅปบไธไธชๅธธๆฐๅผ ้ใ
__ๅๆฐ__
- __value__: ไธไธชๅธธๆฐๅผ๏ผๆๅ่กจ๏ผ
- __dtype__: ็ปๆๅผ ้็ๅ
็ด ็ฑปๅใ
- __shape__: ๅฏ้็็ปๆๅผ ้็ๅฐบๅฏธใ
- __name__: ๅฏ้็ๅผ ้็ๅ็งฐใ
__่ฟๅ__
ไธไธชๅธธๆฐๅผ ้ใ
----
### is_keras_tensor
```python
keras.backend.is_keras_tensor(x)
```
ๅคๆญ `x` ๆฏๅฆๆฏ Keras ๅผ ้
ใKerasๅผ ้ใๆฏ็ฑ Keras ๅฑ๏ผ`Layer`็ฑป๏ผๆ `Input` ่ฟๅ็ๅผ ้ใ
__ๅๆฐ__
- __x__: ๅ้ๅผ ้ใ
__่ฟๅ__
ๅธๅฐๅผ๏ผๅๆฐๆฏๅฆๆฏ Keras ๅผ ้ใ
__ๅผๅธธ__
- __ValueError__: ๅฆๆ `x` ไธๆฏไธไธช็ฌฆๅทๅผ ้ใ
__็คบไพ__
```python
>>> from keras import backend as K
>>> from keras.layers import Input, Dense
>>> np_var = numpy.array([1, 2])
>>> K.is_keras_tensor(np_var) # ไธไธช Numpy ๆฐ็ปไธๆฏไธไธช็ฌฆๅทๅผ ้ใ
ValueError
>>> k_var = tf.placeholder('float32', shape=(1,1))
# ๅจ Keras ไนๅค้ดๆฅๅๅปบ็ๅ้ไธๆฏ Keras ๅผ ้ใ
>>> K.is_keras_tensor(k_var)
False
>>> keras_var = K.variable(np_var)
# Keras ๅ็ซฏๅๅปบ็ๅ้ไธๆฏ Keras ๅผ ้ใ
>>> K.is_keras_tensor(keras_var)
False
>>> keras_placeholder = K.placeholder(shape=(2, 4, 5))
# ๅ ไฝ็ฌฆไธๆฏ Keras ๅผ ้ใ
>>> K.is_keras_tensor(keras_placeholder)
False
>>> keras_input = Input([10])
>>> K.is_keras_tensor(keras_input) # ่พๅ
ฅ Input ๆฏ Keras ๅผ ้ใ
True
>>> keras_layer_output = Dense(10)(keras_input)
# ไปปไฝ Keras ๅฑ่พๅบ้ฝๆฏ Keras ๅผ ้ใ
>>> K.is_keras_tensor(keras_layer_output)
True
```
----
### is_tensor
```python
keras.backend.is_tensor(x)
```
----
### placeholder
```python
keras.backend.placeholder(shape=None, ndim=None, dtype=None, sparse=False, name=None)
```
ๅฎไพๅไธไธชๅ ไฝ็ฌฆๅผ ้ๅนถ่ฟๅๅฎใ
__ๅๆฐ__
- __shape__: ๅ ไฝ็ฌฆๅฐบๅฏธ
(ๆดๆฐๅ
็ป๏ผๅฏ่ฝๅ
ๅซ `None` ้กน)ใ
- __ndim__: ๅผ ้็่ฝดๆฐใ
{`shape`, `ndim`} ่ณๅฐไธไธช้่ฆ่ขซๆๅฎใ
ๅฆๆไธคไธช้ฝ่ขซๆๅฎ๏ผ้ฃไนไฝฟ็จ `shape`ใ
- __dtype__: ๅ ไฝ็ฌฆ็ฑปๅใ
- __sparse__: ๅธๅฐๅผ๏ผๅ ไฝ็ฌฆๆฏๅฆๅบ่ฏฅๆไธไธช็จ็็ฑปๅใ
- __name__: ๅฏ้็ๅ ไฝ็ฌฆ็ๅ็งฐๅญ็ฌฆไธฒใ
__่ฟๅ__
ๅผ ้ๅฎไพ๏ผๅ
ๆฌ Keras ๅ
ๆฐๆฎ๏ผใ
__็คบไพ__
```python
>>> from keras import backend as K
>>> input_ph = K.placeholder(shape=(2, 4, 5))
>>> input_ph._keras_shape
(2, 4, 5)
>>> input_ph
<tf.Tensor 'Placeholder_4:0' shape=(2, 4, 5) dtype=float32>
```
----
### is_placeholder
```python
keras.backend.is_placeholder(x)
```
ๅคๆญ `x` ๆฏๅฆๆฏๅ ไฝ็ฌฆใ
__ๅๆฐ__
- __x__: ๅ้ๅ ไฝ็ฌฆใ
__่ฟๅ__
ๅธๅฐๅผใ
----
### shape
```python
keras.backend.shape(x)
```
่ฟๅๅผ ้ๆๅ้็็ฌฆๅทๅฐบๅฏธใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
__่ฟๅ__
็ฌฆๅทๅฐบๅฏธ๏ผๅฎๆฌ่บซๅฐฑๆฏๅผ ้๏ผใ
__็คบไพ__
```python
# TensorFlow ็คบไพ
>>> from keras import backend as K
>>> tf_session = K.get_session()
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val)
>>> inputs = keras.backend.placeholder(shape=(2, 4, 5))
>>> K.shape(kvar)
<tf.Tensor 'Shape_8:0' shape=(2,) dtype=int32>
>>> K.shape(inputs)
<tf.Tensor 'Shape_9:0' shape=(3,) dtype=int32>
# ่ฆๅพๅฐๆดๆฐๅฐบๅฏธ (็ธๅ๏ผไฝ ๅฏไปฅไฝฟ็จ K.int_shape(x))
>>> K.shape(kvar).eval(session=tf_session)
array([2, 2], dtype=int32)
>>> K.shape(inputs).eval(session=tf_session)
array([2, 4, 5], dtype=int32)
```
----
### int_shape
```python
keras.backend.int_shape(x)
```
่ฟๅๅผ ้ๆๅ้็ๅฐบๅฏธ๏ผไฝไธบ int ๆ None ้กน็ๅ
็ปใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
__่ฟๅ__
ๆดๆฐๅ
็ป๏ผๆ None ้กน๏ผใ
__็คบไพ__
```python
>>> from keras import backend as K
>>> inputs = K.placeholder(shape=(2, 4, 5))
>>> K.int_shape(inputs)
(2, 4, 5)
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val)
>>> K.int_shape(kvar)
(2, 2)
```
__Numpy ๅฎ็ฐ__
```python
def int_shape(x):
return x.shape
```
----
### ndim
```python
keras.backend.ndim(x)
```
ไปฅๆดๆฐๅฝขๅผ่ฟๅๅผ ้ไธญ็่ฝดๆฐใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
__่ฟๅ__
ๆดๆฐ (ๆ ้), ่ฝด็ๆฐ้ใ
__็คบไพ__
```python
>>> from keras import backend as K
>>> inputs = K.placeholder(shape=(2, 4, 5))
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val)
>>> K.ndim(inputs)
3
>>> K.ndim(kvar)
2
```
__Numpy ๅฎ็ฐ__
```python
def ndim(x):
return x.ndim
```
----
### size
```python
keras.backend.size(x, name=None)
```
่ฟๅๅผ ้ๅฐบๅฏธใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
- __name__: ๆไฝๅ็งฐ๏ผๅฏ้๏ผใ
__่ฟๅ__
ๅผ ้ๅฐบๅฏธ
__็คบไพ__
```python
>>> from keras import backend as K
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val)
>>> K.size(inputs)
<tf.Tensor: id=9, shape=(), dtype=int32, numpy=4>
```
----
### dtype
```python
keras.backend.dtype(x)
```
ไปฅๅญ็ฌฆไธฒๅฝขๅผ่ฟๅ Keras ๅผ ้ๆๅ้็ dtypeใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
__่ฟๅ__
ๅญ็ฌฆไธฒ๏ผ`x` ็ dtypeใ
__็คบไพ__
```python
>>> from keras import backend as K
>>> K.dtype(K.placeholder(shape=(2,4,5)))
'float32'
>>> K.dtype(K.placeholder(shape=(2,4,5), dtype='float32'))
'float32'
>>> K.dtype(K.placeholder(shape=(2,4,5), dtype='float64'))
'float64'
# Keras ๅ้
>>> kvar = K.variable(np.array([[1, 2], [3, 4]]))
>>> K.dtype(kvar)
'float32_ref'
>>> kvar = K.variable(np.array([[1, 2], [3, 4]]), dtype='float32')
>>> K.dtype(kvar)
'float32_ref'
```
__Numpy ๅฎ็ฐ__
```python
def dtype(x):
return x.dtype.name
```
----
### eval
```python
keras.backend.eval(x)
```
ไผฐ่ฎกไธไธชๅผ ้็ๅผใ
__ๅๆฐ__
- __x__: ๅผ ้ใ
__่ฟๅ__
Numpy ๆฐ็ปใ
__็คบไพ__
```python
>>> from keras import backend as K
>>> kvar = K.variable(np.array([[1, 2], [3, 4]]), dtype='float32')
>>> K.eval(kvar)
array([[ 1., 2.],
[ 3., 4.]], dtype=float32)
```
__Numpy ๅฎ็ฐ__
```python
def eval(x):
return x
```
----
### zeros
```python
keras.backend.zeros(shape, dtype=None, name=None)
```
ๅฎไพๅไธไธชๅ
จ้ถๅ้ๅนถ่ฟๅๅฎใ
__ๅๆฐ__
- __shape__: ๆดๆฐๅ
็ป๏ผ่ฟๅ็Kerasๅ้็ๅฐบๅฏธใ
- __dtype__: ๅญ็ฌฆไธฒ๏ผ่ฟๅ็ Keras ๅ้็ๆฐๆฎ็ฑปๅใ
- __name__: ๅญ็ฌฆไธฒ๏ผ่ฟๅ็ Keras ๅ้็ๅ็งฐใ
__่ฟๅ__
ไธไธชๅ้๏ผๅ
ๆฌ Keras ๅ
ๆฐๆฎ๏ผ๏ผ็จ `0.0` ๅกซๅ
ใ
่ฏทๆณจๆ๏ผๅฆๆ `shape` ๆฏ็ฌฆๅทๅ็๏ผๆไปฌไธ่ฝ่ฟๅไธไธชๅ้๏ผ
่ไผ่ฟๅไธไธชๅจๆๅฐบๅฏธ็ๅผ ้ใ
__็คบไพ__
```python
>>> from keras import backend as K
>>> kvar = K.zeros((3,4))
>>> K.eval(kvar)
array([[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.]], dtype=float32)
```
__Numpy ๅฎ็ฐ__
```python
def zeros(shape, dtype=floatx(), name=None):
return np.zeros(shape, dtype=dtype)
```
----
### ones
```python
keras.backend.ones(shape, dtype=None, name=None)
```
ๅฎไพๅไธไธชๅ
จไธๅ้ๅนถ่ฟๅๅฎใ
__ๅๆฐ__
- __shape__: ๆดๆฐๅ
็ป๏ผ่ฟๅ็Kerasๅ้็ๅฐบๅฏธใ
- __dtype__: ๅญ็ฌฆไธฒ๏ผ่ฟๅ็ Keras ๅ้็ๆฐๆฎ็ฑปๅใ
- __name__: ๅญ็ฌฆไธฒ๏ผ่ฟๅ็ Keras ๅ้็ๅ็งฐใ
__่ฟๅ__
ไธไธช Keras ๅ้๏ผ็จ `1.0` ๅกซๅ
ใ
่ฏทๆณจๆ๏ผๅฆๆ `shape` ๆฏ็ฌฆๅทๅ็๏ผๆไปฌไธ่ฝ่ฟๅไธไธชๅ้๏ผ
่ไผ่ฟๅไธไธชๅจๆๅฐบๅฏธ็ๅผ ้ใ
__็คบไพ__
```python
>>> from keras import backend as K
>>> kvar = K.ones((3,4))
>>> K.eval(kvar)
array([[ 1., 1., 1., 1.],
[ 1., 1., 1., 1.],
[ 1., 1., 1., 1.]], dtype=float32)
```
__Numpy ๅฎ็ฐ__
```python
def ones(shape, dtype=floatx(), name=None):
return np.ones(shape, dtype=dtype)
```
----
### eye
```python
keras.backend.eye(size, dtype=None, name=None)
```
ๅฎไพๅไธไธชๅไฝ็ฉ้ตๅนถ่ฟๅๅฎใ
__ๅๆฐ__
- __size__: ๅ
็ป๏ผ่กๅๅ็ๆฐ็ฎใๅฆๆๆฏๆดๆฐ๏ผๅไธบ่กๆฐใ
- __dtype__: ๅญ็ฌฆไธฒ๏ผ่ฟๅ็ Keras ๅ้็ๆฐๆฎ็ฑปๅใ
- __name__: ๅญ็ฌฆไธฒ๏ผ่ฟๅ็ Keras ๅ้็ๅ็งฐใ
__่ฟๅ__
Keras ๅ้๏ผไธไธชๅไฝ็ฉ้ตใ
__็คบไพ__
```python
>>> from keras import backend as K
>>> K.eval(K.eye(3))
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]], dtype=float32)
>>> K.eval(K.eye((2, 3)))
array([[1., 0., 0.],
[0., 1., 0.]], dtype=float32
```
__Numpy ๅฎ็ฐ__
```python
def eye(size, dtype=None, name=None):
if isinstance(size, (list, tuple)):
n, m = size
else:
n, m = size, size
return np.eye(n, m, dtype=dtype)
```
----
### zeros_like
```python
keras.backend.zeros_like(x, dtype=None, name=None)
```
ๅฎไพๅไธๅฆไธไธชๅผ ้็ธๅๅฐบๅฏธ็ๅ
จ้ถๅ้ใ
__ๅๆฐ__
- __x__: Keras ๅ้ๆ Keras ๅผ ้ใ
- __dtype__: ๅญ็ฌฆไธฒ๏ผ่ฟๅ็ Keras ๅ้็็ฑปๅใ
ๅฆๆไธบ None๏ผๅไฝฟ็จ x ็็ฑปๅใ
- __name__: ๅญ็ฌฆไธฒ๏ผๆๅๅปบ็ๅ้็ๅ็งฐใ
__่ฟๅ__
ไธไธช Keras ๅ้๏ผๅ
ถๅฝข็ถไธบ x๏ผ็จ้ถๅกซๅ
ใ
__็คบไพ__
```python
>>> from keras import backend as K
>>> kvar = K.variable(np.random.random((2,3)))
>>> kvar_zeros = K.zeros_like(kvar)
>>> K.eval(kvar_zeros)
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
```
__Numpy ๅฎ็ฐ__
```python
def zeros_like(x, dtype=floatx(), name=None):
return np.zeros_like(x, dtype=dtype)
```
----
### ones_like
```python
keras.backend.ones_like(x, dtype=None, name=None)
```
ๅฎไพๅไธๅฆไธไธชๅผ ้็ธๅๅฝข็ถ็ๅ
จไธๅ้ใ
__ๅๆฐ__
- __x__: Keras ๅ้ๆๅผ ้ใ
- __dtype__: ๅญ็ฌฆไธฒ๏ผ่ฟๅ็ Keras ๅ้็็ฑปๅใ
ๅฆๆไธบ None๏ผๅไฝฟ็จ x ็็ฑปๅใ
- __name__: ๅญ็ฌฆไธฒ๏ผๆๅๅปบ็ๅ้็ๅ็งฐใ
__่ฟๅ__
ไธไธช Keras ๅ้๏ผๅ
ถๅฝข็ถไธบ x๏ผ็จไธๅกซๅ
ใ
__็คบไพ__
```python
>>> from keras import backend as K
>>> kvar = K.variable(np.random.random((2,3)))
>>> kvar_ones = K.ones_like(kvar)
>>> K.eval(kvar_ones)
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
```
__Numpy ๅฎ็ฐ__
```python
def ones_like(x, dtype=floatx(), name=None):
return np.ones_like(x, dtype=dtype)
```
----
### identity
```python
keras.backend.identity(x, name=None)
```
่ฟๅไธ่พๅ
ฅๅผ ้็ธๅๅ
ๅฎน็ๅผ ้ใ
__ๅๆฐ__
- __x__: ่พๅ
ฅๅผ ้ใ
- __name__: ๅญ็ฌฆไธฒ๏ผๆๅๅปบ็ๅ้็ๅ็งฐใ
__่ฟๅ__
ไธไธช็ธๅๅฐบๅฏธใ็ฑปๅๅๅ
ๅฎน็ๅผ ้ใ
----
### random_uniform_variable
```python
keras.backend.random_uniform_variable(shape, low, high, dtype=None, name=None, seed=None)
```
ไฝฟ็จไปๅๅๅๅธไธญๆฝๆ ทๅบๆฅ็ๅผๆฅๅฎไพๅๅ้ใ
__ๅๆฐ__
- __shape__: ๆดๆฐๅ
็ป๏ผ่ฟๅ็ Keras ๅ้็ๅฐบๅฏธใ
- __low__: ๆตฎ็นๆฐ๏ผ่พๅบ้ด้็ไธ็ใ
- __high__: ๆตฎ็นๆฐ๏ผ่พๅบ้ด้็ไธ็ใ
- __dtype__: ๅญ็ฌฆไธฒ๏ผ่ฟๅ็ Keras ๅ้็ๆฐๆฎ็ฑปๅใ
- __name__: ๅญ็ฌฆไธฒ๏ผ่ฟๅ็ Keras ๅ้็ๅ็งฐใ
- __seed__: ๆดๆฐ๏ผ้ๆบ็งๅญใ
__่ฟๅ__
ไธไธช Keras ๅ้๏ผไปฅๆฝๅ็ๆ ทๆฌๅกซๅ
ใ
__็คบไพ__
```python
# TensorFlow ็คบไพ
>>> kvar = K.random_uniform_variable((2,3), 0, 1)
>>> kvar
<tensorflow.python.ops.variables.Variable object at 0x10ab40b10>
>>> K.eval(kvar)
array([[ 0.10940075, 0.10047495, 0.476143 ],
[ 0.66137183, 0.00869417, 0.89220798]], dtype=float32)
```
__Numpy ๅฎ็ฐ__
```python
def random_uniform_variable(shape, low, high, dtype=None, name=None, seed=None):
return (high - low) * np.random.random(shape).astype(dtype) + low
```
----
### random_normal_variable
```python
keras.backend.random_normal_variable(shape, mean, scale, dtype=None, name=None, seed=None)
```
ไฝฟ็จไปๆญฃๆๅๅธไธญๆฝๅ็ๅผๅฎไพๅไธไธชๅ้ใ
__ๅๆฐ__
- __shape__: ๆดๆฐๅ
็ป๏ผ่ฟๅ็Kerasๅ้็ๅฐบๅฏธใ
- __mean__: ๆตฎ็นๅ๏ผๆญฃๆๅๅธๅนณๅๅผใ
- __scale__: ๆตฎ็นๅ๏ผๆญฃๆๅๅธๆ ๅๅทฎใ
- __dtype__: ๅญ็ฌฆไธฒ๏ผ่ฟๅ็Kerasๅ้็ dtypeใ
- __name__: ๅญ็ฌฆไธฒ๏ผ่ฟๅ็Kerasๅ้็ๅ็งฐใ
- __seed__: ๆดๆฐ๏ผ้ๆบ็งๅญใ
__่ฟๅ__
ไธไธช Keras ๅ้๏ผไปฅๆฝๅ็ๆ ทๆฌๅกซๅ
ใ
__็คบไพ__
```python
# TensorFlow ็คบไพ
>>> kvar = K.random_normal_variable((2,3), 0, 1)
>>> kvar
<tensorflow.python.ops.variables.Variable object at 0x10ab12dd0>
>>> K.eval(kvar)
array([[ 1.19591331, 0.68685907, -0.63814116],
[ 0.92629528, 0.28055015, 1.70484698]], dtype=float32)
```
__Numpy ๅฎ็ฐ__
```python
def random_normal_variable(shape, mean, scale, dtype=None, name=None, seed=None):
return scale * np.random.randn(*shape).astype(dtype) + mean
```
----
### count_params
```python
keras.backend.count_params(x)
```
่ฟๅ Keras ๅ้ๆๅผ ้ไธญ็้ๆๅ
็ด ๆฐใ
__ๅๆฐ__
- __x__: Keras ๅ้ๆๅผ ้ใ
__่ฟๅ__
ๆดๆฐ๏ผ`x` ไธญ็ๅ
็ด ๆฐ้๏ผๅณ๏ผๆฐ็ปไธญ้ๆ็ปดๅบฆ็ไน็งฏใ
__็คบไพ__
```python
>>> kvar = K.zeros((2,3))
>>> K.count_params(kvar)
6
>>> K.eval(kvar)
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
```
__Numpy ๅฎ็ฐ__
```python
def count_params(x):
return x.size
```
----
### cast
```python
keras.backend.cast(x, dtype)
```
ๅฐๅผ ้่ฝฌๆขๅฐไธๅ็ dtype ๅนถ่ฟๅใ
ไฝ ๅฏไปฅ่ฝฌๆขไธไธช Keras ๅ้๏ผไฝๅฎไป็ถ่ฟๅไธไธช Keras ๅผ ้ใ
__ๅๆฐ__
- __x__: Keras ๅผ ้๏ผๆๅ้๏ผใ
- __dtype__: ๅญ็ฌฆไธฒ๏ผ (`'float16'`, `'float32'` ๆ `'float64'`)ใ
__่ฟๅ__
Keras ๅผ ้๏ผ็ฑปๅไธบ `dtype`ใ
__็คบไพ__
```python
>>> from keras import backend as K
>>> input = K.placeholder((2, 3), dtype='float32')
>>> input
<tf.Tensor 'Placeholder_2:0' shape=(2, 3) dtype=float32>
# It doesn't work in-place as below.
>>> K.cast(input, dtype='float16')
<tf.Tensor 'Cast_1:0' shape=(2, 3) dtype=float16>
>>> input
<tf.Tensor 'Placeholder_2:0' shape=(2, 3) dtype=float32>
# you need to assign it.
>>> input = K.cast(input, dtype='float16')
>>> input
<tf.Tensor 'Cast_2:0' shape=(2, 3) dtype=float16>
```
----
### update
```python
keras.backend.update(x, new_x)
```
ๅฐ `x` ็ๅผๆดๆฐไธบ `new_x`ใ
__ๅๆฐ__
- __x__: ไธไธชย `Variable`ใ
- __new_x__: ไธไธชไธ `x` ๅฐบๅฏธ็ธๅ็ๅผ ้ใ
__่ฟๅ__
ๆดๆฐๅ็ๅ้ `x`ใ
----
### update_add
```python
keras.backend.update_add(x, increment)
```
้่ฟๅขๅ `increment` ๆฅๆดๆฐ `x` ็ๅผใ
__ๅๆฐ__
- __x__: ไธไธช `Variable`ใ
- __increment__: ไธ `x` ๅฝข็ถ็ธๅ็ๅผ ้ใ
__่ฟๅ__
ๆดๆฐๅ็ๅ้ `x`ใ
----
### update_sub
```python
keras.backend.update_sub(x, decrement)
```
้่ฟๅ `decrement` ๆฅๆดๆฐ `x` ็ๅผใ
__ๅๆฐ__
- __x__: ไธไธช `Variable`ใ
- __decrement__: ไธ `x` ๅฝข็ถ็ธๅ็ๅผ ้ใ
__่ฟๅ__
ๆดๆฐๅ็ๅ้ `x`ใ
----
### moving_average_update
```python
keras.backend.moving_average_update(x, value, momentum)
```
่ฎก็ฎๅ้็็งปๅจๅนณๅๅผใ
__ๅๆฐ__
- __x__: ไธไธช `Variable`ใ
- __value__: ไธ `x` ๅฝข็ถ็ธๅ็ๅผ ้ใ
- __momentum__: ็งปๅจๅนณๅๅจ้ใ
__่ฟๅ__
ๆดๆฐๅ้็ๆไฝใ
----
### dot
```python
keras.backend.dot(x, y)
```
ๅฐ 2 ไธชๅผ ้๏ผๅ/ๆๅ้๏ผ็ธไนๅนถ่ฟๅไธไธช*ๅผ ้*ใ
ๅฝ่ฏๅพๅฐ nD ๅผ ้ไธ nD ๅผ ้็ธไนๆถ๏ผ
ๅฎไผ้็ฐ Theano ่กไธบใ
(ไพๅฆ `(2, 3) * (4, 3, 5) -> (2, 4, 5)`)
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
- __y__: ๅผ ้ๆๅ้ใ
__่ฟๅ__
ไธไธชๅผ ้๏ผ`x` ๅ `y` ็็น็งฏใ
__็คบไพ__
```python
# ๅผ ้ไน้ด็็น็งฏ
>>> x = K.placeholder(shape=(2, 3))
>>> y = K.placeholder(shape=(3, 4))
>>> xy = K.dot(x, y)
>>> xy
<tf.Tensor 'MatMul_9:0' shape=(2, 4) dtype=float32>
```
```python
# ๅผ ้ไน้ด็็น็งฏ
>>> x = K.placeholder(shape=(32, 28, 3))
>>> y = K.placeholder(shape=(3, 4))
>>> xy = K.dot(x, y)
>>> xy
<tf.Tensor 'MatMul_9:0' shape=(32, 28, 4) dtype=float32>
```
```python
# ็ฑป Theano ่กไธบ็็คบไพ
>>> x = K.random_uniform_variable(shape=(2, 3), low=0, high=1)
>>> y = K.ones((4, 3, 5))
>>> xy = K.dot(x, y)
>>> K.int_shape(xy)
(2, 4, 5)
```
__Numpy ๅฎ็ฐ__
```python
def dot(x, y):
return np.dot(x, y)
```
----
### batch_dot
```python
keras.backend.batch_dot(x, y, axes=None)
```
ๆน้ๅ็็น็งฏใ
ๅฝ `x` ๅ `y` ๆฏๆน้ๆฐๆฎๆถ๏ผ
`batch_dot` ็จไบ่ฎก็ฎ `x` ๅ `y` ็็น็งฏ๏ผ
ๅณๅฐบๅฏธไธบ `(batch_size, :)`ใ
`batch_dot` ไบง็ไธไธชๆฏ่พๅ
ฅๅฐบๅฏธๆดๅฐ็ๅผ ้ๆๅ้ใ
ๅฆๆ็ปดๆฐๅๅฐๅฐ 1๏ผๆไปฌไฝฟ็จ `expand_dims` ๆฅ็กฎไฟ ndim ่ณๅฐไธบ 2ใ
__ๅๆฐ__
- __x__: `ndim >= 2` ็ Keras ๅผ ้ๆๅ้ใ
- __y__: `ndim >= 2` ็ Keras ๅผ ้ๆๅ้ใ
- __axes__: ๆดๆฐๆๅ
็ป (int, int)ใ ้่ฆๅฝ็บฆ็็ฎๆ ็ปดๅบฆใ
__่ฟๅ__
ไธไธชๅฐบๅฏธ็ญไบ `x` ็ๅฐบๅฏธ๏ผๅๅปๆปๅ็็ปดๅบฆ๏ผๅ `y` ็ๅฐบๅฏธ๏ผๅๅปๆนๆฌก็ปดๅบฆๅๆปๅ็็ปดๅบฆ๏ผ็่ฟๆฅ็ๅผ ้ใ
ๅฆๆๆๅ็็งฉไธบ 1๏ผๆไปฌๅฐๅฎ้ๆฐ่ฝฌๆขไธบ `(batch_size, 1)`ใ
__็คบไพ__
ๅ่ฎพ `x = [[1, 2], [3, 4]]` ๅ `y = [[5, 6], [7, 8]]`๏ผ
`batch_dot(x, y, axes=1) = [[17], [53]]` ๆฏ `x.dot(y.T)` ็ไธปๅฏน่ง็บฟ๏ผ
ๅฐฝ็ฎกๆไปฌไธ้่ฆ่ฎก็ฎ้ๅฏน่งๅ
็ด ใ
ไผชไปฃ็ ๏ผ
```
inner_products = []
for xi, yi in zip(x, y):
inner_products.append(xi.dot(yi))
result = stack(inner_products)
```
ๅฐบๅฏธๆจๆญ๏ผ
่ฎฉ `x` ็ๅฐบๅฏธไธบ `(100, 20)`๏ผไปฅๅ `y` ็ๅฐบๅฏธไธบ `(100, 30, 20)`ใ
ๅฆๆ `axes` ๆฏ (1, 2)๏ผ่ฆๆพๅบ็ปๆๅผ ้็ๅฐบๅฏธ๏ผ
ๅพช็ฏ `x` ๅย `y` ็ๅฐบๅฏธ็ๆฏไธไธช็ปดๅบฆใ
* `x.shape[0]` : 100 : ้ๅ ๅฐ่พๅบๅฝข็ถ๏ผ
* `x.shape[1]` : 20 : ไธ้ๅ ๅฐ่พๅบๅฝข็ถ๏ผ
`x` ็็ฌฌไธไธช็ปดๅบฆๅทฒ็ป่ขซๅ ๅไบ (`dot_axes[0]` = 1)ใ
* `y.shape[0]` : 100 : ไธ้ๅ ๅฐ่พๅบๅฝข็ถ๏ผๆปๆฏๅฟฝ็ฅ `y` ็็ฌฌไธ็ปด
* `y.shape[1]` : 30 : ้ๅ ๅฐ่พๅบๅฝข็ถ๏ผ
* `y.shape[2]` : 20 : ไธ้ๅ ๅฐ่พๅบๅฝข็ถ๏ผ
`y` ็็ฌฌไบไธช็ปดๅบฆๅทฒ็ป่ขซๅ ๅไบ (`dot_axes[0]` = 2)ใ
`output_shape` = `(100, 30)`
```python
>>> x_batch = K.ones(shape=(32, 20, 1))
>>> y_batch = K.ones(shape=(32, 30, 20))
>>> xy_batch_dot = K.batch_dot(x_batch, y_batch, axes=(1, 2))
>>> K.int_shape(xy_batch_dot)
(32, 1, 30)
```
__Numpy ๅฎ็ฐ__
<details>
<summary>ๅฑ็คบ Numpy ๅฎ็ฐ</summary>
```python
def batch_dot(x, y, axes=None):
if x.ndim < 2 or y.ndim < 2:
raise ValueError('Batch dot requires inputs of rank 2 or more.')
if isinstance(axes, int):
axes = [axes, axes]
elif isinstance(axes, tuple):
axes = list(axes)
if axes is None:
if y.ndim == 2:
axes = [x.ndim - 1, y.ndim - 1]
else:
axes = [x.ndim - 1, y.ndim - 2]
if any([isinstance(a, (list, tuple)) for a in axes]):
raise ValueError('Multiple target dimensions are not supported. ' +
'Expected: None, int, (int, int), ' +
'Provided: ' + str(axes))
# ๅค็่ด่ฝด
if axes[0] < 0:
axes[0] += x.ndim
if axes[1] < 0:
axes[1] += y.ndim
if 0 in axes:
raise ValueError('Can not perform batch dot over axis 0.')
if x.shape[0] != y.shape[0]:
raise ValueError('Can not perform batch dot on inputs'
' with different batch sizes.')
d1 = x.shape[axes[0]]
d2 = y.shape[axes[1]]
if d1 != d2:
raise ValueError('Can not do batch_dot on inputs with shapes ' +
str(x.shape) + ' and ' + str(y.shape) +
' with axes=' + str(axes) + '. x.shape[%d] != '
'y.shape[%d] (%d != %d).' % (axes[0], axes[1], d1, d2))
result = []
axes = [axes[0] - 1, axes[1] - 1] # ๅฟฝ็ฅๆนๆฌก็ปดๅบฆ
for xi, yi in zip(x, y):
result.append(np.tensordot(xi, yi, axes))
result = np.array(result)
if result.ndim == 1:
result = np.expand_dims(result, -1)
return result
```
</details>
----
### transpose
```python
keras.backend.transpose(x)
```
ๅฐๅผ ้่ฝฌ็ฝฎๅนถ่ฟๅใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
__่ฟๅ__
ไธไธชๅผ ้ใ
__็คบไพ__
```python
>>> var = K.variable([[1, 2, 3], [4, 5, 6]])
>>> K.eval(var)
array([[ 1., 2., 3.],
[ 4., 5., 6.]], dtype=float32)
>>> var_transposed = K.transpose(var)
>>> K.eval(var_transposed)
array([[ 1., 4.],
[ 2., 5.],
[ 3., 6.]], dtype=float32)
```
```python
>>> inputs = K.placeholder((2, 3))
>>> inputs
<tf.Tensor 'Placeholder_11:0' shape=(2, 3) dtype=float32>
>>> input_transposed = K.transpose(inputs)
>>> input_transposed
<tf.Tensor 'transpose_4:0' shape=(3, 2) dtype=float32>
```
__Numpy ๅฎ็ฐ__
```python
def transpose(x):
return np.transpose(x)
```
----
### gather
```python
keras.backend.gather(reference, indices)
```
ๅจๅผ ้ `reference` ไธญๆฃ็ดข็ดขๅผ `indices` ็ๅ
็ด ใ
__ๅๆฐ__
- __reference__: ไธไธชๅผ ้ใ
- __indices__: ็ดขๅผ็ๆดๆฐๅผ ้ใ
__่ฟๅ__
ไธ `reference` ็ฑปๅ็ธๅ็ๅผ ้ใ
__Numpy ๅฎ็ฐ__
```python
def gather(reference, indices):
return reference[indices]
```
----
### max
```python
keras.backend.max(x, axis=None, keepdims=False)
```
ๅผ ้ไธญ็ๆๅคงๅผใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
- __axis__: ไธไธชๆดๆฐ๏ผ้่ฆๅจๅชไธช่ฝดๅฏปๆพๆๅคงๅผใ
- __keepdims__: ๅธๅฐๅผ๏ผๆฏๅฆไฟ็ๅๅฐบๅฏธใ
ๅฆๆ `keepdims` ไธบ `False`๏ผๅๅผ ้็็งฉๅ 1ใ
ๅฆๆ `keepdims` ไธบ `True`๏ผ็ผฉๅฐ็็ปดๅบฆไฟ็ไธบ้ฟๅบฆ 1ใ
__่ฟๅ__
`x` ไธญๆๅคงๅผ็ๅผ ้ใ
__Numpy ๅฎ็ฐ__
```python
def max(x, axis=None, keepdims=False):
if isinstance(axis, list):
axis = tuple(axis)
return np.max(x, axis=axis, keepdims=keepdims)
```
----
### min
```python
keras.backend.min(x, axis=None, keepdims=False)
```
ๅผ ้ไธญ็ๆๅฐๅผใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
- __axis__: ไธไธชๆดๆฐ๏ผ้่ฆๅจๅชไธช่ฝดๅฏปๆพๆๅคงๅผใ
- __keepdims__: ๅธๅฐๅผ๏ผๆฏๅฆไฟ็ๅๅฐบๅฏธใ
ๅฆๆ `keepdims` ไธบ `False`๏ผๅๅผ ้็็งฉๅ 1ใ
ๅฆๆ `keepdims` ไธบ `True`๏ผ็ผฉๅฐ็็ปดๅบฆไฟ็ไธบ้ฟๅบฆ 1ใ
__่ฟๅ__
`x` ไธญๆๅฐๅผ็ๅผ ้ใ
__Numpy ๅฎ็ฐ__
```python
def min(x, axis=None, keepdims=False):
if isinstance(axis, list):
axis = tuple(axis)
return np.min(x, axis=axis, keepdims=keepdims)
```
----
### sum
```python
keras.backend.sum(x, axis=None, keepdims=False)
```
่ฎก็ฎๅผ ้ๅจๆไธๆๅฎ่ฝด็ๅใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
- __axis__: ไธไธชๆดๆฐ๏ผ้่ฆๅ ๅ็่ฝดใ
- __keepdims__: ๅธๅฐๅผ๏ผๆฏๅฆไฟ็ๅๅฐบๅฏธใ
ๅฆๆ `keepdims` ไธบ `False`๏ผๅๅผ ้็็งฉๅ 1ใ
ๅฆๆ `keepdims` ไธบ `True`๏ผ็ผฉๅฐ็็ปดๅบฆไฟ็ไธบ้ฟๅบฆ 1ใ
__่ฟๅ__
`x` ็ๅ็ๅผ ้ใ
__Numpy ๅฎ็ฐ__
```python
def sum(x, axis=None, keepdims=False):
if isinstance(axis, list):
axis = tuple(axis)
return np.sum(x, axis=axis, keepdims=keepdims)
```
----
### prod
```python
keras.backend.prod(x, axis=None, keepdims=False)
```
ๅจๆไธๆๅฎ่ฝด๏ผ่ฎก็ฎๅผ ้ไธญ็ๅผ็ไน็งฏใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
- __axis__: ไธไธชๆดๆฐ้่ฆ่ฎก็ฎไน็งฏ็่ฝดใ
- __keepdims__: ๅธๅฐๅผ๏ผๆฏๅฆไฟ็ๅๅฐบๅฏธใ
ๅฆๆ `keepdims` ไธบ `False`๏ผๅๅผ ้็็งฉๅ 1ใ
ๅฆๆ `keepdims` ไธบ `True`๏ผ็ผฉๅฐ็็ปดๅบฆไฟ็ไธบ้ฟๅบฆ 1ใ
__่ฟๅ__
`x` ็ๅ
็ด ็ไน็งฏ็ๅผ ้ใ
__Numpy ๅฎ็ฐ__
```python
def prod(x, axis=None, keepdims=False):
if isinstance(axis, list):
axis = tuple(axis)
return np.prod(x, axis=axis, keepdims=keepdims)
```
----
### cumsum
```python
keras.backend.cumsum(x, axis=0)
```
ๅจๆไธๆๅฎ่ฝด๏ผ่ฎก็ฎๅผ ้ไธญ็ๅผ็็ดฏๅ ๅใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
- __axis__: ไธไธชๆดๆฐ๏ผ้่ฆๅ ๅ็่ฝดใ
__่ฟๅ__
`x` ๅจ `axis` ่ฝด็็ดฏๅ ๅ็ๅผ ้ใ
__Numpy ๅฎ็ฐ__
```python
def cumsum(x, axis=0):
return np.cumsum(x, axis=axis)
```
----
### cumprod
```python
keras.backend.cumprod(x, axis=0)
```
ๅจๆไธๆๅฎ่ฝด๏ผ่ฎก็ฎๅผ ้ไธญ็ๅผ็็ดฏ็งฏไน็งฏใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
- __axis__: ไธไธชๆดๆฐ๏ผ้่ฆ่ฎก็ฎไน็งฏ็่ฝดใ
__่ฟๅ__
`x` ๅจ `axis` ่ฝด็็ดฏไน็ๅผ ้ใ
__Numpy ๅฎ็ฐ__
```python
def cumprod(x, axis=0):
return np.cumprod(x, axis=axis)
```
----
### var
```python
keras.backend.var(x, axis=None, keepdims=False)
```
ๅผ ้ๅจๆไธๆๅฎ่ฝด็ๆนๅทฎใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
- __axis__: ไธไธชๆดๆฐ๏ผ่ฆ่ฎก็ฎๆนๅทฎ็่ฝดใ
- __keepdims__: ๅธๅฐๅผ๏ผๆฏๅฆไฟ็ๅๅฐบๅฏธใ
ๅฆๆ `keepdims` ไธบ `False`๏ผๅๅผ ้็็งฉๅ 1ใ
ๅฆๆ `keepdims` ไธบ `True`๏ผ็ผฉๅฐ็็ปดๅบฆไฟ็ไธบ้ฟๅบฆ 1ใ
__่ฟๅ__
`x` ๅ
็ด ็ๆนๅทฎ็ๅผ ้ใ
__Numpy ๅฎ็ฐ__
```python
def var(x, axis=None, keepdims=False):
if isinstance(axis, list):
axis = tuple(axis)
return np.var(x, axis=axis, keepdims=keepdims)
```
----
### std
```python
keras.backend.std(x, axis=None, keepdims=False)
```
ๅผ ้ๅจๆไธๆๅฎ่ฝด็ๆ ๅๅทฎใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
- __axis__: ไธไธชๆดๆฐ๏ผ่ฆ่ฎก็ฎๆ ๅๅทฎ็่ฝดใ
- __keepdims__: ๅธๅฐๅผ๏ผๆฏๅฆไฟ็ๅๅฐบๅฏธใ
ๅฆๆ `keepdims` ไธบ `False`๏ผๅๅผ ้็็งฉๅ 1ใ
ๅฆๆ `keepdims` ไธบ `True`๏ผ็ผฉๅฐ็็ปดๅบฆไฟ็ไธบ้ฟๅบฆ 1ใ
__่ฟๅ__
`x` ๅ
็ด ็ๆ ๅๅทฎ็ๅผ ้ใ
__Numpy ๅฎ็ฐ__
```python
def std(x, axis=None, keepdims=False):
if isinstance(axis, list):
axis = tuple(axis)
return np.std(x, axis=axis, keepdims=keepdims)
```
----
### mean
```python
keras.backend.mean(x, axis=None, keepdims=False)
```
ๅผ ้ๅจๆไธๆๅฎ่ฝด็ๅๅผใ
__ๅๆฐ__
- __x__: A tensor or variable.
- __axis__: ๆดๆฐๆๅ่กจใ้่ฆ่ฎก็ฎๅๅผ็่ฝดใ
- __keepdims__: ๅธๅฐๅผ๏ผๆฏๅฆไฟ็ๅๅฐบๅฏธใ
ๅฆๆ `keepdims` ไธบ `False`๏ผๅ `axis` ไธญๆฏไธ้กน็ๅผ ้็งฉๅ 1ใ
ๅฆๆ `keepdims` ไธบ `True`๏ผๅ็ผฉๅฐ็็ปดๅบฆไฟ็ไธบ้ฟๅบฆ 1ใ
__่ฟๅ__
`x` ๅ
็ด ็ๅๅผ็ๅผ ้ใ
__Numpy ๅฎ็ฐ__
```python
def mean(x, axis=None, keepdims=False):
if isinstance(axis, list):
axis = tuple(axis)
return np.mean(x, axis=axis, keepdims=keepdims)
```
----
### any
```python
keras.backend.any(x, axis=None, keepdims=False)
```
reduction
ๆไฝๅฝ็บฆ๏ผ้ป่พ OR๏ผใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
- __axis__: ๆง่กๅฝ็บฆๆไฝ็่ฝดใ
- __keepdims__: ๆฏๅฆๆพๅผๆๅนฟๆญๅฝ็บฆ็่ฝดใ
__่ฟๅ__
ไธไธช uint8 ๅผ ้ (0s ๅ 1s)ใ
__Numpy ๅฎ็ฐ__
```python
def any(x, axis=None, keepdims=False):
if isinstance(axis, list):
axis = tuple(axis)
return np.any(x, axis=axis, keepdims=keepdims)
```
----
### all
```python
keras.backend.all(x, axis=None, keepdims=False)
```
ๆไฝๅฝ็บฆ๏ผ้ป่พ AND๏ผใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
- __axis__: ๆง่กๅฝ็บฆๆไฝ็่ฝดใ
- __keepdims__: ๆฏๅฆๆพๅผๆๅนฟๆญๅฝ็บฆ็่ฝดใ
__่ฟๅ__
ไธไธช uint8 ๅผ ้ (0s ๅ 1s)ใ
__Numpy ๅฎ็ฐ__
```python
def all(x, axis=None, keepdims=False):
if isinstance(axis, list):
axis = tuple(axis)
return np.all(x, axis=axis, keepdims=keepdims)
```
----
### argmax
```python
keras.backend.argmax(x, axis=-1)
```
่ฟๅๆๅฎ่ฝด็ๆๅคงๅผ็็ดขๅผใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
- __axis__: ๆง่กๅฝ็บฆๆไฝ็่ฝดใ
__่ฟๅ__
ไธไธชๅผ ้ใ
__Numpy ๅฎ็ฐ__
```python
def argmax(x, axis=-1):
return np.argmax(x, axis=axis)
```
----
### argmin
```python
keras.backend.argmin(x, axis=-1)
```
่ฟๅๆๅฎ่ฝด็ๆๅฐๅผ็็ดขๅผใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
- __axis__: ๆง่กๅฝ็บฆๆไฝ็่ฝดใ
__่ฟๅ__
ไธไธชๅผ ้ใ
__Numpy ๅฎ็ฐ__
```python
def argmin(x, axis=-1):
return np.argmin(x, axis=axis)
```
----
### square
```python
keras.backend.square(x)
```
ๅ
็ด ็บง็ๅนณๆนๆไฝใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
__่ฟๅ__
ไธไธชๅผ ้ใ
----
### abs
```python
keras.backend.abs(x)
```
ๅ
็ด ็บง็็ปๅฏนๅผๆไฝใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
__่ฟๅ__
ไธไธชๅผ ้ใ
----
### sqrt
```python
keras.backend.sqrt(x)
```
ๅ
็ด ็บง็ๅนณๆนๆ นๆไฝใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
__่ฟๅ__
ไธไธชๅผ ้ใ
__Numpy ๅฎ็ฐ__
```python
def sqrt(x):
y = np.sqrt(x)
y[np.isnan(y)] = 0.
return y
```
----
### exp
```python
keras.backend.exp(x)
```
ๅ
็ด ็บง็ๆๆฐ่ฟ็ฎๆไฝใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
__่ฟๅ__
ไธไธชๅผ ้ใ
----
### log
```python
keras.backend.log(x)
```
ๅ
็ด ็บง็ๅฏนๆฐ่ฟ็ฎๆไฝใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
__่ฟๅ__
ไธไธชๅผ ้ใ
----
### logsumexp
```python
keras.backend.logsumexp(x, axis=None, keepdims=False)
```
่ฎก็ฎ log(sum(exp(ๅผ ้ๅจๆไธ่ฝด็ๅ
็ด )))ใ
่ฟไธชๅฝๆฐๅจๆฐๅผไธๆฏ log(sum(exp(x))) ๆด็จณๅฎใ
ๅฎ้ฟๅ
ไบๆฑๅคง่พๅ
ฅ็ๆๆฐ้ ๆ็ไธๆบข๏ผไปฅๅๆฑๅฐ่พๅ
ฅ็ๅฏนๆฐ้ ๆ็ไธๆบขใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
- __axis__: ไธไธชๆดๆฐ๏ผ้่ฆๅฝ็บฆ็่ฝดใ
- __keepdims__: ๅธๅฐๅผ๏ผๆฏๅฆไฟ็ๅๅฐบๅฏธใ
ๅฆๆ `keepdims` ไธบ `False`๏ผๅๅผ ้็็งฉๅ 1ใ
ๅฆๆ `keepdims` ไธบ `True`๏ผ็ผฉๅฐ็็ปดๅบฆไฟ็ไธบ้ฟๅบฆ 1ใ
__่ฟๅ__
ๅฝ็บฆๅ็ๅผ ้ใ
__Numpy ๅฎ็ฐ__
```python
def logsumexp(x, axis=None, keepdims=False):
if isinstance(axis, list):
axis = tuple(axis)
return sp.special.logsumexp(x, axis=axis, keepdims=keepdims)
```
----
### round
```python
keras.backend.round(x)
```
ๅ
็ด ็บงๅฐๅ่ไบๅ
ฅๅฐๆๆฅ่ฟ็ๆดๆฐใ
ๅจๅนณๅฑ็ๆ
ๅตไธ๏ผไฝฟ็จ็่ๅ
ฅๆจกๅผๆฏใๅถๆฐ็ไธๅใใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
__่ฟๅ__
ไธไธชๅผ ้ใ
----
### sign
```python
keras.backend.sign(x)
```
ๅ
็ด ็บง็็ฌฆๅท่ฟ็ฎใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
__่ฟๅ__
ไธไธชๅผ ้ใ
----
### pow
```python
keras.backend.pow(x, a)
```
ๅ
็ด ็บง็ๆๆฐ่ฟ็ฎๆไฝใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
- __a__: Python ๆดๆฐใ
__่ฟๅ__
ไธไธชๅผ ้ใ
__Numpy ๅฎ็ฐ__
```python
def pow(x, a=1.):
return np.power(x, a)
```
----
### clip
```python
keras.backend.clip(x, min_value, max_value)
```
ๅ
็ด ็บง่ฃๅชใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
- __min_value__: Python ๆตฎ็น๏ผๆดๆฐๆๅผ ้ใ
- __max_value__: Python ๆตฎ็น๏ผๆดๆฐๆๅผ ้ใ
__่ฟๅ__
ไธไธชๅผ ้ใ
__Numpy ๅฎ็ฐ__
```python
def clip(x, min_value, max_value):
return np.clip(x, min_value, max_value)
```
----
### equal
```python
keras.backend.equal(x, y)
```
้ไธชๅ
็ด ๅฏนๆฏไธคไธชๅผ ้็็ธ็ญๆ
ๅตใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
- __y__: ๅผ ้ๆๅ้ใ
__่ฟๅ__
ไธไธชๅธๅฐๅผ ้ใ
__Numpy ๅฎ็ฐ__
```python
def equal(x, y):
return x == y
```
----
### not_equal
```python
keras.backend.not_equal(x, y)
```
้ไธชๅ
็ด ๅฏนๆฏไธคไธชๅผ ้็ไธ็ธ็ญๆ
ๅตใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
- __y__: ๅผ ้ๆๅ้ใ
__่ฟๅ__
ไธไธชๅธๅฐๅผ ้ใ
__Numpy ๅฎ็ฐ__
```python
def not_equal(x, y):
return x != y
```
----
### greater
```python
keras.backend.greater(x, y)
```
้ไธชๅ
็ด ๆฏๅฏน (x > y) ็็ๅผใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
- __y__: ๅผ ้ๆๅ้ใ
__่ฟๅ__
ไธไธชๅธๅฐๅผ ้ใ
__Numpy ๅฎ็ฐ__
```python
def greater(x, y):
return x > y
```
----
### greater_equal
```python
keras.backend.greater_equal(x, y)
```
้ไธชๅ
็ด ๆฏๅฏน (x >= y) ็็ๅผใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
- __y__: ๅผ ้ๆๅ้ใ
__่ฟๅ__
ไธไธชๅธๅฐๅผ ้ใ
__Numpy ๅฎ็ฐ__
```python
def greater_equal(x, y):
return x >= y
```
----
### less
```python
keras.backend.less(x, y)
```
้ไธชๅ
็ด ๆฏๅฏน (x < y) ็็ๅผใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
- __y__: ๅผ ้ๆๅ้ใ
__่ฟๅ__
ไธไธชๅธๅฐๅผ ้ใ
__Numpy ๅฎ็ฐ__
```python
def less(x, y):
return x < y
```
----
### less_equal
```python
keras.backend.less_equal(x, y)
```
้ไธชๅ
็ด ๆฏๅฏน (x <= y) ็็ๅผใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
- __y__: ๅผ ้ๆๅ้ใ
__่ฟๅ__
ไธไธชๅธๅฐๅผ ้ใ
__Numpy ๅฎ็ฐ__
```python
def less_equal(x, y):
return x <= y
```
----
### maximum
```python
keras.backend.maximum(x, y)
```
้ไธชๅ
็ด ๆฏๅฏนไธคไธชๅผ ้็ๆๅคงๅผใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
- __y__: ๅผ ้ๆๅ้ใ
__่ฟๅ__
ไธไธชๅผ ้ใ
__Numpy ๅฎ็ฐ__
```python
def maximum(x, y):
return np.maximum(x, y)
```
### minimum
```python
keras.backend.minimum(x, y)
```
้ไธชๅ
็ด ๆฏๅฏนไธคไธชๅผ ้็ๆๅฐๅผใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
- __y__: ๅผ ้ๆๅ้ใ
__่ฟๅ__
ไธไธชๅผ ้ใ
__Numpy ๅฎ็ฐ__
```python
def minimum(x, y):
return np.minimum(x, y)
```
----
### sin
```python
keras.backend.sin(x)
```
้ไธชๅ
็ด ่ฎก็ฎ x ็ sin ๅผใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
__่ฟๅ__
ไธไธชๅผ ้ใ
----
### cos
```python
keras.backend.cos(x)
```
้ไธชๅ
็ด ่ฎก็ฎ x ็ cos ๅผใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
__่ฟๅ__
ไธไธชๅผ ้ใ
----
### normalize_batch_in_training
```python
keras.backend.normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=0.001)
```
่ฎก็ฎๆนๆฌก็ๅๅผๅๆ ๅๅทฎ๏ผ็ถๅๅจๆนๆฌกไธๅบ็จๆนๆฌกๆ ๅๅใ
__ๅๆฐ__
- __x__: ่พๅ
ฅๅผ ้ๆๅ้ใ
- __gamma__: ็จไบ็ผฉๆพ่พๅ
ฅ็ๅผ ้ใ
- __beta__: ็จไบไธญๅฟๅ่พๅ
ฅ็ๅผ ้ใ
- __reduction_axes__: ๆดๆฐ่ฟญไปฃ๏ผ้่ฆๆ ๅๅ็่ฝดใ
- __epsilon__: ๆจก็ณๅ ๅญใ
__่ฟๅ__
้ฟๅบฆไธบ 3 ไธชๅ
็ป๏ผ`(normalized_tensor, mean, variance)`ใ
----
### batch_normalization
```python
keras.backend.batch_normalization(x, mean, var, beta, gamma, epsilon=0.001)
```
ๅจ็ปๅฎ็ mean๏ผvar๏ผbeta ๅ gamma ไธๅบ็จๆน้ๆ ๅๅใ
ๅณ๏ผ่ฟๅ๏ผ
`output = (x - mean) / (sqrt(var) + epsilon) * gamma + beta`
__ๅๆฐ__
- __x__: ่พๅ
ฅๅผ ้ๆๅ้ใ
- __mean__: ๆนๆฌก็ๅๅผใ
- __var__: ๆนๆฌก็ๆนๅทฎใ
- __beta__: ็จไบไธญๅฟๅ่พๅ
ฅ็ๅผ ้ใ
- __gamma__: ็จไบ็ผฉๆพ่พๅ
ฅ็ๅผ ้ใ
- __epsilon__: ๆจก็ณๅ ๅญใ
__่ฟๅ__
ไธไธชๅผ ้ใ
----
### concatenate
```python
keras.backend.concatenate(tensors, axis=-1)
```
ๅบไบๆๅฎ็่ฝด๏ผ่ฟๆฅๅผ ้็ๅ่กจใ
__ๅๆฐ__
- __tensors__: ้่ฆ่ฟๆฅ็ๅผ ้ๅ่กจใ
- __axis__: ่ฟๆฅ็่ฝดใ
__่ฟๅ__
ไธไธชๅผ ้ใ
----
### reshape
```python
keras.backend.reshape(x, shape)
```
ๅฐๅผ ้้ๅกไธบๆๅฎ็ๅฐบๅฏธใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
- __shape__: ็ฎๆ ๅฐบๅฏธๅ
็ปใ
__่ฟๅ__
ไธไธชๅผ ้ใ
----
### permute_dimensions
```python
keras.backend.permute_dimensions(x, pattern)
```
้ๆฐๆๅๅผ ้็่ฝดใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
- __pattern__: ็ปดๅบฆ็ดขๅผ็ๅ
็ป๏ผไพๅฆ `(0, 2, 1)`ใ
__่ฟๅ__
ไธไธชๅผ ้ใ
----
### resize_images
```python
keras.backend.resize_images(x, height_factor, width_factor, data_format)
```
่ฐๆด 4D ๅผ ้ไธญๅ
ๅซ็ๅพๅ็ๅคงๅฐใ
__ๅๆฐ__
- __x__: ้่ฆ่ฐๆด็ๅผ ้ๆๅ้ใ
- __height_factor__: ๆญฃๆดๆฐใ
- __width_factor__: ๆญฃๆดๆฐใ
- __data_format__: ๅญ็ฌฆไธฒ๏ผ`"channels_last"` ๆ `"channels_first"`ใ
__่ฟๅ__
ไธไธชๅผ ้ใ
__ๅผๅธธ__
- __ValueError__: ๅฆๆ `data_format` ๆขไธๆฏย `"channels_last"` ไนไธๆฏ `"channels_first"`ใ
----
### resize_volumes
```python
keras.backend.resize_volumes(x, depth_factor, height_factor, width_factor, data_format)
```
่ฐๆด 5D ๅผ ้ไธญๅ
ๅซ็ไฝ็งฏใ
__ๅๆฐ__
- __x__: ้่ฆ่ฐๆด็ๅผ ้ๆๅ้ใ
- __depth_factor__: ๆญฃๆดๆฐใ
- __height_factor__: ๆญฃๆดๆฐใ
- __width_factor__: ๆญฃๆดๆฐใ
- __data_format__: ๅญ็ฌฆไธฒ๏ผ`"channels_last"` ๆ `"channels_first"`ใ
__่ฟๅ__
ไธไธชๅผ ้ใ
__ๅผๅธธ__
- __ValueError__: ๅฆๆ `data_format` ๆขไธๆฏย `"channels_last"` ไนไธๆฏ `"channels_first"`ใ
----
### repeat_elements
```python
keras.backend.repeat_elements(x, rep, axis)
```
ๆฒฟๆไธ่ฝด้ๅคๅผ ้็ๅ
็ด ๏ผๅฆ `np.repeat`ใ
ๅฆๆ `x` ็ๅฐบๅฏธไธบ `(s1๏ผs2๏ผs3)` ่ `axis` ไธบ `1`๏ผ
ๅ่พๅบๅฐบๅฏธไธบ `(s1๏ผs2 * rep๏ผs3๏ผ`ใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
- __rep__: Python ๆดๆฐ๏ผ้ๅคๆฌกๆฐใ
- __axis__: ้่ฆ้ๅค็่ฝดใ
__่ฟๅ__
ไธไธชๅผ ้ใ
----
### repeat
```python
keras.backend.repeat(x, n)
```
้ๅคไธไธช 2D ๅผ ้ใ
ๅฆๆ `x` ็ๅฐบๅฏธไธบ `(samples, dim)` ๅนถไธย `n` ไธบ `2`๏ผ
ๅ่พๅบ็ๅฐบๅฏธไธบ `(samples, 2, dim)`ใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
- __n__: Python ๆดๆฐ๏ผ้ๅคๆฌกๆฐใ
__่ฟๅ__
ไธไธชๅผ ้ใ
----
### arange
```python
keras.backend.arange(start, stop=None, step=1, dtype='int32')
```
ๅๅปบไธไธชๅ
ๅซๆดๆฐๅบๅ็ 1D ๅผ ้ใ
่ฏฅๅฝๆฐๅๆฐไธ Theano ็ `arange` ๅฝๆฐ็็บฆๅฎ็ธๅ๏ผ
ๅฆๆๅชๆไพไบไธไธชๅๆฐ๏ผ้ฃๅฎๅฐฑๆฏ `stop` ๅๆฐใ
่ฟๅ็ๅผ ้็้ป่ฎค็ฑปๅๆฏ `int32`๏ผไปฅๅน้
TensorFlow ็้ป่ฎคๅผใ
__ๅๆฐ__
- __start__: ่ตทๅงๅผใ
- __stop__: ็ปๆๅผใ
- __step__: ไธคไธช่ฟ็ปญๅผไน้ด็ๅทฎใ
- __dtype__: ่ฆไฝฟ็จ็ๆดๆฐ็ฑปๅใ
__่ฟๅ__
ไธไธชๆดๆฐๅผ ้ใ
----
### tile
```python
keras.backend.tile(x, n)
```
ๅๅปบไธไธช็จ `n` ๅนณ้บ ็ `x` ๅผ ้ใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
- __n__: ๆดๆฐๅ่กจใ้ฟๅบฆๅฟ
้กปไธ `x` ไธญ็็ปดๆฐ็ธๅใ
__่ฟๅ__
ไธไธชๅนณ้บ็ๅผ ้ใ
__็คบไพ__
```python
>>> from keras import backend as K
>>> kvar = K.variable(np.random.random((2, 3)))
>>> kvar_tile = K.tile(K.eye(2), (2, 3))
>>> K.eval(kvar_tile)
array([[1., 0., 1., 0., 1., 0.],
[0., 1., 0., 1., 0., 1.],
[1., 0., 1., 0., 1., 0.],
[0., 1., 0., 1., 0., 1.]], dtype=float32)
```
__Numpy ๅฎ็ฐ__
```python
def tile(x, n):
return np.tile(x, n)
```
----
### flatten
```python
keras.backend.flatten(x)
```
ๅฑๅนณไธไธชๅผ ้ใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
__่ฟๅ__
ไธไธช้ๆฐ่ฐๆดไธบ 1D ็ๅผ ้ใ
----
### batch_flatten
```python
keras.backend.batch_flatten(x)
```
ๅฐไธไธช nD ๅผ ้ๅๆไธไธช ็ฌฌ 0 ็ปด็ธๅ็ 2D ๅผ ้ใ
ๆขๅฅ่ฏ่ฏด๏ผๅฎๅฐๆนๆฌกไธญ็ๆฏไธไธชๆ ทๆฌๅฑๅนณใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
__่ฟๅ__
ไธไธชๅผ ้ใ
----
### expand_dims
```python
keras.backend.expand_dims(x, axis=-1)
```
ๅจ็ดขๅผ `axis` ่ฝด๏ผๆทปๅ 1 ไธชๅฐบๅฏธ็็ปดๅบฆใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
- __axis__: ้่ฆๆทปๅ ๆฐ็่ฝด็ไฝ็ฝฎใ
__่ฟๅ__
ไธไธชๆฉๅฑ็ปดๅบฆ็่ฝดใ
----
### squeeze
```python
keras.backend.squeeze(x, axis)
```
ๅจ็ดขๅผ `axis` ่ฝด๏ผ็งป้ค 1 ไธชๅฐบๅฏธ็็ปดๅบฆใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
- __axis__: ้่ฆไธขๅผ็่ฝดใ
__่ฟๅ__
ไธไธชไธ `x` ๆฐๆฎ็ธๅไฝ็ปดๅบฆ้ไฝ็ๅผ ้ใ
----
### temporal_padding
```python
keras.backend.temporal_padding(x, padding=(1, 1))
```
ๅกซๅ
3D ๅผ ้็ไธญ้ด็ปดๅบฆใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
- __padding__: 2 ไธชๆดๆฐ็ๅ
็ป๏ผๅจ็ฌฌไธไธช็ปดๅบฆ็ๅผๅงๅ็ปๆๅคๆทปๅ ๅคๅฐไธช้ถใ
__่ฟๅ__
ไธไธชๅกซๅ
็ 3D ๅผ ้ใ
----
### spatial_2d_padding
```python
keras.backend.spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None)
```
ๅกซๅ
4D ๅผ ้็็ฌฌไบ็ปดๅ็ฌฌไธ็ปดใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
- __padding__: 2 ๅ
็ป็ๅ
็ป๏ผๅกซๅ
ๆจกๅผใ
- __data_format__: ๅญ็ฌฆไธฒ๏ผ`"channels_last"` ๆ `"channels_first"`ใ
__่ฟๅ__
ไธไธชๅกซๅ
็ 4D ๅผ ้ใ
__ๅผๅธธ__
- __ValueError__: ๅฆๆ `data_format` ๆขไธๆฏ `"channels_last"` ไนไธๆฏ `"channels_first"`ใ
----
### spatial_3d_padding
```python
keras.backend.spatial_3d_padding(x, padding=((1, 1), (1, 1), (1, 1)), data_format=None)
```
ๆฒฟ็ๆทฑๅบฆใ้ซๅบฆๅฎฝๅบฆไธไธช็ปดๅบฆๅกซๅ
5D ๅผ ้ใ
ๅๅซไฝฟ็จ "padding[0]", "padding[1]" ๅ "padding[2]" ๆฅๅทฆๅณๅกซๅ
่ฟไบ็ปดๅบฆใ
ๅฏนไบ 'channels_last' ๆฐๆฎๆ ผๅผ๏ผ
็ฌฌ 2ใ3ใ4 ็ปดๅฐ่ขซๅกซๅ
ใ
ๅฏนไบ 'channels_first' ๆฐๆฎๆ ผๅผ๏ผ
็ฌฌ 3ใ4ใ5 ็ปดๅฐ่ขซๅกซๅ
ใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
- __padding__: 3 ๅ
็ป็ๅ
็ป๏ผๅกซๅ
ๆจกๅผใ
- __data_format__: ๅญ็ฌฆไธฒ๏ผ`"channels_last"` ๆ `"channels_first"`ใ
__่ฟๅ__
ไธไธชๅกซๅ
็ 5D ๅผ ้ใ
__ๅผๅธธ__
- __ValueError__: ๅฆๆ `data_format` ๆขไธๆฏ `"channels_last"` ไนไธๆฏ `"channels_first"`ใ
----
### stack
```python
keras.backend.stack(x, axis=0)
```
ๅฐ็งฉ ไธบ `R` ็ๅผ ้ๅ่กจๅ ๅ ๆ็งฉไธบ `R + 1` ็ๅผ ้ใ
__ๅๆฐ__
- __x__: ๅผ ้ๅ่กจใ
- __axis__: ้่ฆๆง่กๅ ๅ ็่ฝดใ
__่ฟๅ__
ไธไธชๅผ ้ใ
__Numpy ๅฎ็ฐ__
```python
def stack(x, axis=0):
return np.stack(x, axis=axis)
```
----
### one_hot
```python
keras.backend.one_hot(indices, num_classes)
```
่ฎก็ฎไธไธชๆดๆฐๅผ ้็ one-hot ่กจ็คบใ
__ๅๆฐ__
- __indices__: nD ๆดๆฐ๏ผๅฐบๅฏธไธบ
`(batch_size, dim1, dim2, ... dim(n-1))`
- __num_classes__: ๆดๆฐ๏ผ้่ฆ่่็็ฑปๅซๆฐใ
__่ฟๅ__
่พๅ
ฅ็ (n + 1)D one-hot ่กจ็คบ๏ผ
ๅฐบๅฏธไธบ `(batch_size, dim1, dim2, ... dim(n-1), num_classes)`ใ
----
### reverse
```python
keras.backend.reverse(x, axes)
```
ๆฒฟๆๅฎ็่ฝดๅ่ฝฌๅผ ้ใ
__ๅๆฐ__
- __x__: ้่ฆๅ่ฝฌ็ๅผ ้ใ
- __axes__: ๆดๆฐๆๆดๆฐ่ฟญไปฃใ้่ฆๅ่ฝฌ็่ฝดใ
__่ฟๅ__
ไธไธชๅผ ้ใ
__Numpy ๅฎ็ฐ__
```python
def reverse(x, axes):
if isinstance(axes, list):
axes = tuple(axes)
return np.flip(x, axes)
```
----
### slice
```python
keras.backend.slice(x, start, size)
```
ไปๅผ ้ไธญๆๅไธไธชๅ็ใ
__ๅๆฐ__
- __x__: ่พๅ
ฅๅผ ้ใ
- __start__: ๆดๆฐๅ่กจ/ๅ
็ป๏ผ่กจๆๆฏไธช่ฝด็่ตทๅงๅ็็ดขๅผไฝ็ฝฎใ
- __size__: ๆดๆฐๅ่กจ/ๅ
็ป๏ผ่กจๆๆฏไธช่ฝดไธๅ็ๅคๅฐ็ปดๅบฆใ
__่ฟๅ__
ไธไธชๅ็ๅผ ้๏ผ
```python
new_x = x[start[0]: start[0] + size[0], ..., start[-1]: start[-1] + size[-1]]
```
__ๅผๅธธ__
- __ValueError__: ๅฆๆ็ปดๅบฆๅ็ดขๅผ็ๅฐบๅฏธไธๅน้
ใ
__Numpy ๅฎ็ฐ__
```python
def slice(x, start, size):
slices = [py_slice(i, i + j) for i, j in zip(start, size)]
return x[tuple(slices)]
```
----
### get_value
```python
keras.backend.get_value(x)
```
่ฟๅไธไธชๅ้็ๅผใ
__ๅๆฐ__
- __x__: ่พๅ
ฅๅ้ใ
__่ฟๅ__
ไธไธช Numpy ๆฐ็ปใ
----
### batch_get_value
```python
keras.backend.batch_get_value(ops)
```
่ฟๅๅคไธชๅผ ้ๅ้็ๅผใ
__ๅๆฐ__
- __ops__: ่ฆ่ฟ่ก็ๆไฝๅ่กจใ
__่ฟๅ__
ไธไธช Numpy ๆฐ็ป็ๅ่กจใ
----
### set_value
```python
keras.backend.set_value(x, value)
```
ไฝฟ็จ Numpy ๆฐ็ป่ฎพ็ฝฎๅ้็ๅผใ
__ๅๆฐ__
- __x__: ้่ฆ่ฎพ็ฝฎๆฐๅผ็ๅ้ใ
- __value__: ้่ฆ่ฎพ็ฝฎ็ๅผ๏ผ
ไธไธชๅฐบๅฏธ็ธๅ็ Numpy ๆฐ็ปใ
----
### batch_set_value
```python
keras.backend.batch_set_value(tuples)
```
ไธๆฌก่ฎพ็ฝฎๅคไธชๅผ ้ๅ้็ๅผใ
__ๅๆฐ__
- __tuples__: ๅ
็ป `(tensor, value)` ็ๅ่กจใ
`value` ๅบ่ฏฅๆฏไธไธช Numpy ๆฐ็ปใ
----
### print_tensor
```python
keras.backend.print_tensor(x, message='')
```
ๅจ่ฏไผฐๆถๆๅฐ `message` ๅๅผ ้็ๅผใ
่ฏทๆณจๆ๏ผ`print_tensor` ่ฟๅไธไธชไธ `x` ็ธๅ็ๆฐๅผ ้๏ผๅบ่ฏฅๅจๅ้ข็ไปฃ็ ไธญไฝฟ็จๅฎใๅฆๅๅจ่ฏไผฐ่ฟ็จไธญไธไผ่่ๆๅฐๆไฝใ
__็คบไพ__
```python
>>> x = K.print_tensor(x, message="x is: ")
```
__ๅๆฐ__
- __x__: ้่ฆๆๅฐ็ๅผ ้ใ
- __message__: ้่ฆไธๅผ ้ไธ่ตทๆๅฐ็ๆถๆฏใ
__่ฟๅ__
ๅไธไธชไธๅ็ๅผ ้ย `x`ใ
----
### function
```python
keras.backend.function(inputs, outputs, updates=None)
```
ๅฎไพๅ Keras ๅฝๆฐใ
----
### gradients
```python
keras.backend.gradients(loss, variables)
```
่ฟๅ `variables` ๅจ `loss` ไธ็ๆขฏๅบฆใ
__ๅๆฐ__
- __loss__: ้่ฆๆๅฐๅ็ๆ ้ๅผ ้ใ
- __variables__: ๅ้ๅ่กจใ
__่ฟๅ__
ไธไธชๆขฏๅบฆๅผ ้ใ
----
### stop_gradient
```python
keras.backend.stop_gradient(variables)
```
่ฟๅ `variables`๏ผไฝๆฏๅฏนไบๅ
ถไปๅ้๏ผๅ
ถๆขฏๅบฆไธบ้ถใ
__ๅๆฐ__
- __variables__: ้่ฆ่่็ๅผ ้ๆๅผ ้ๅ่กจ๏ผไปปไฝ็ๅ
ถไปๅ้ไฟๆไธๅใ
__่ฟๅ__
ๅไธชๅผ ้ๆๅผ ้ๅ่กจ๏ผๅๅณไบไผ ้็ๅๆฐ๏ผ๏ผ
ไธไปปไฝๅ
ถไปๅ้ๅ
ทๆๆๅฎ็ๆขฏๅบฆใ
----
### rnn
```python
keras.backend.rnn(step_function, inputs, initial_states, go_backwards=False, mask=None, constants=None, unroll=False, input_length=None)
```
ๅจๅผ ้็ๆถ้ด็ปดๅบฆ่ฟญไปฃใ
__ๅๆฐ__
- __step_function__: RNN ๆญฅ้ชคๅฝๆฐ๏ผ
- __inputs__: ๅฐบๅฏธไธบ `(samples, ...)` ็ๅผ ้
(ไธๅซๆถ้ด็ปดๅบฆ), ่กจ็คบๆนๆฌกๆ ทๅๅจๆไธชๆถ้ดๆญฅ็่พๅ
ฅใ
- __states__: ๅผ ้ๅ่กจใ
- __outputs__: ๅฐบๅฏธไธบ `(samples, output_dim)` ็ๅผ ้
(ไธๅซๆถ้ด็ปดๅบฆ)
- __new_states__: ๅผ ้ๅ่กจ๏ผไธ `states` ้ฟๅบฆๅๅฐบๅฏธ็ธๅใ
ๅ่กจไธญ็็ฌฌไธไธช็ถๆๅฟ
้กปๆฏๅไธไธชๆถ้ดๆญฅ็่พๅบๅผ ้ใ
- __inputs__: ๆถๅบๆฐๆฎๅผ ้ `(samples, time, ...)`
(ๆๅฐ 3D)ใ
- __initial_states__: ๅฐบๅฏธไธบ `(samples, output_dim)` ็ๅผ ้
(ไธๅซๆถ้ด็ปดๅบฆ)๏ผๅ
ๅซๆญฅ้ชคๅฝๆฐไธญไฝฟ็จ็็ถๆ็ๅๅงๅผใ
- __go_backwards__: ๅธๅฐๅผใๅฆๆไธบ True๏ผไปฅ็ธๅ็้กบๅบๅจๆถ้ด็ปดไธ่ฟ่ก่ฟญไปฃๅนถ่ฟๅ็ธๅ็ๅบๅใ
- __mask__: ๅฐบๅฏธไธบ `(samples, time, 1)` ็ไบ่ฟๅถๅผ ้๏ผๅฏนไบ่ขซๅฑ่ฝ็ๆฏไธชๅ
็ด ้ฝไธบ้ถใ
- __constants__: ๆฏไธชๆญฅ้ชคไผ ้็ๅธธ้ๅผๅ่กจใ
- __unroll__: ๆฏๅฆๅฑๅผ RNN ๆไฝฟ็จ็ฌฆๅทๅพช็ฏ๏ผไพ่ตไบๅ็ซฏ็ `while_loop`ๆ `scan`๏ผใ
- __input_length__: ไธ TensorFlow ๅฎ็ฐไธ็ธๅ
ณใๅฆๆไฝฟ็จ Theano ๅฑๅผ๏ผๅๅฟ
้กปๆๅฎใ
__่ฟๅ__
ไธไธชๅ
็ป๏ผ`(last_output, outputs, new_states)`ใ
- __last_output__: rnn ็ๆๅ่พๅบ๏ผๅฐบๅฏธไธบ `(samples, ...)`ใ
- __outputs__: ๅฐบๅฏธไธบ `(samples, time, ...)` ็ๅผ ้๏ผๅ
ถไธญ
ๆฏไธ้กน `outputs[s, t]` ๆฏๆ ทๆฌ `s` ๅจๆถ้ด `t` ็ๆญฅ้ชคๅฝๆฐ่พๅบๅผใ
- __new_states__: ๅผ ้ๅ่กจ๏ผๆๆญฅ้ชคๅฝๆฐ่ฟๅ็ๆๅ็ถๆ๏ผ
ๅฐบๅฏธไธบ `(samples, ...)`ใ
__ๅผๅธธ__
- __ValueError__: ๅฆๆ่พๅ
ฅ็็ปดๅบฆๅฐไบ 3ใ
- __ValueError__: ๅฆๆย `unroll` ไธบ `True` ไฝ่พๅ
ฅๆถ้ดๆญฅๅนถไธๆฏๅบๅฎ็ๆฐๅญใ
- __ValueError__: ๅฆๆๆไพไบ `mask` (้ `None`) ไฝๆชๆไพ `states` (`len(states)` == 0)ใ
__Numpy ๅฎ็ฐ__
<details>
<summary>ๅฑ็คบ Numpy ๅฎ็ฐ</summary>
```python
def rnn(step_function, inputs, initial_states,
go_backwards=False, mask=None, constants=None,
unroll=False, input_length=None):
if constants is None:
constants = []
output_sample, _ = step_function(inputs[:, 0], initial_states + constants)
if mask is not None:
if mask.dtype != np.bool:
mask = mask.astype(np.bool)
if mask.shape != inputs.shape[:2]:
raise ValueError(
'mask should have `shape=(samples, time)`, '
'got {}'.format(mask.shape))
def expand_mask(mask_, x):
# expand mask so that `mask[:, t].ndim == x.ndim`
while mask_.ndim < x.ndim + 1:
mask_ = np.expand_dims(mask_, axis=-1)
return mask_
output_mask = expand_mask(mask, output_sample)
states_masks = [expand_mask(mask, state) for state in initial_states]
if input_length is None:
input_length = inputs.shape[1]
assert input_length == inputs.shape[1]
time_index = range(input_length)
if go_backwards:
time_index = time_index[::-1]
outputs = []
states_tm1 = initial_states # tm1 means "t minus one" as in "previous timestep"
output_tm1 = np.zeros(output_sample.shape)
for t in time_index:
output_t, states_t = step_function(inputs[:, t], states_tm1 + constants)
if mask is not None:
output_t = np.where(output_mask[:, t], output_t, output_tm1)
states_t = [np.where(state_mask[:, t], state_t, state_tm1)
for state_mask, state_t, state_tm1
in zip(states_masks, states_t, states_tm1)]
outputs.append(output_t)
states_tm1 = states_t
output_tm1 = output_t
return outputs[-1], np.stack(outputs, axis=1), states_tm1
```
</details>
----
### switch
```python
keras.backend.switch(condition, then_expression, else_expression)
```
ๆ นๆฎไธไธชๆ ้ๅผๅจไธคไธชๆไฝไน้ดๅๆขใ
่ฏทๆณจๆ๏ผ`then_expression` ๅ `else_expression`
้ฝๅบ่ฏฅๆฏ*็ธๅๅฐบๅฏธ*็็ฌฆๅทๅผ ้ใ
__ๅๆฐ__
- __condition__: ๅผ ้ (`int` ๆ `bool`)ใ
- __then_expression__: ๅผ ้ๆ่ฟๅๅผ ้็ๅฏ่ฐ็จๅฝๆฐใ
- __else_expression__: ๅผ ้ๆ่ฟๅๅผ ้็ๅฏ่ฐ็จๅฝๆฐใ
__่ฟๅ__
้ๆฉ็ๅผ ้ใ
__ๅผๅธธ__
- __ValueError__: ๅฆๆ `condition` ็็งฉๅคงไบไธคไธช่กจ่พพๅผ็็งฉๅบใ
__Numpy ๅฎ็ฐ__
```python
def switch(condition, then_expression, else_expression):
cond_float = condition.astype(floatx())
while cond_float.ndim < then_expression.ndim:
cond_float = cond_float[..., np.newaxis]
return cond_float * then_expression + (1 - cond_float) * else_expression
```
----
### in_train_phase
```python
keras.backend.in_train_phase(x, alt, training=None)
```
ๅจ่ฎญ็ป้ถๆฎต้ๆฉ `x`๏ผๅ
ถไป้ถๆฎต้ๆฉ `alt`ใ
่ฏทๆณจๆ `alt` ๅบ่ฏฅไธ `x` ๅฐบๅฏธ็ธๅใ
__ๅๆฐ__
- __x__: ๅจ่ฎญ็ป้ถๆฎต้่ฆ่ฟๅ็ x
(ๅผ ้ๆ่ฟๅๅผ ้็ๅฏ่ฐ็จๅฝๆฐ)ใ
- __alt__: ๅจๅ
ถไป้ถๆฎต้่ฆ่ฟๅ็ alt
(ๅผ ้ๆ่ฟๅๅผ ้็ๅฏ่ฐ็จๅฝๆฐ)ใ
- __training__: ๅฏ้็ๆ ้ๅผ ้
(ๆ Python ๅธๅฐๅผ๏ผๆ่
Python ๆดๆฐ)๏ผ
ไปฅๆๅฎๅญฆไน ้ถๆฎตใ
__่ฟๅ__
ๅบไบ `training` ๆ ๅฟ๏ผ่ฆไน่ฟๅ `x`๏ผ่ฆไน่ฟๅ `alt`ใ
`training` ๆ ๅฟ้ป่ฎคไธบ `K.learning_phase()`ใ
----
### in_test_phase
```python
keras.backend.in_test_phase(x, alt, training=None)
```
ๅจๆต่ฏ้ถๆฎต้ๆฉ `x`๏ผๅ
ถไป้ถๆฎต้ๆฉ `alt`ใ
่ฏทๆณจๆ `alt` ๅบ่ฏฅไธ `x` ๅฐบๅฏธ็ธๅใ
__ๅๆฐ__
- __x__: ๅจ่ฎญ็ป้ถๆฎต้่ฆ่ฟๅ็ x
(ๅผ ้ๆ่ฟๅๅผ ้็ๅฏ่ฐ็จๅฝๆฐ)ใ
- __alt__: ๅจๅ
ถไป้ถๆฎต้่ฆ่ฟๅ็ alt
(ๅผ ้ๆ่ฟๅๅผ ้็ๅฏ่ฐ็จๅฝๆฐ)ใ
- __training__: ๅฏ้็ๆ ้ๅผ ้
(ๆ Python ๅธๅฐๅผ๏ผๆ่
Python ๆดๆฐ)๏ผ
ไปฅๆๅฎๅญฆไน ้ถๆฎตใ
__่ฟๅ__
ๅบไบ `K.learning_phase`๏ผ่ฆไน่ฟๅ `x`๏ผ่ฆไน่ฟๅ `alt`ใ
----
### relu
```python
keras.backend.relu(x, alpha=0.0, max_value=None)
```
ReLU ๆดๆต็บฟๆงๅไฝใ
้ป่ฎคๆ
ๅตไธ๏ผๅฎ่ฟๅ้ไธชๅ
็ด ็ `max(x, 0)` ๅผใ
__ๅๆฐ__
- __x__: ไธไธชๅผ ้ๆๅ้ใ
- __alpha__: ไธไธชๆ ้๏ผ่ดๆฐ้จๅ็ๆ็๏ผ้ป่ฎคไธบ `0.`๏ผใ
- __max_value__: ้ฅฑๅๅบฆ้ๅผใ
__่ฟๅ__
ไธไธชๅผ ้ใ
__Numpy ๅฎ็ฐ__
```python
def relu(x, alpha=0., max_value=None, threshold=0.):
if max_value is None:
max_value = np.inf
above_threshold = x * (x >= threshold)
above_threshold = np.clip(above_threshold, 0.0, max_value)
below_threshold = alpha * (x - threshold) * (x < threshold)
return below_threshold + above_threshold
```
----
### elu
```python
keras.backend.elu(x, alpha=1.0)
```
ๆๆฐ็บฟๆงๅๅ
ใ
__ๅๆฐ__
- __x__: ็จไบ่ฎก็ฎๆฟๆดปๅฝๆฐ็ๅผ ้ๆๅ้ใ
- __alpha__: ไธไธชๆ ้๏ผ่ดๆฐ้จๅ็ๆ็ใ
__่ฟๅ__
ไธไธชๅผ ้ใ
__Numpy ๅฎ็ฐ__
```python
def elu(x, alpha=1.):
return x * (x > 0) + alpha * (np.exp(x) - 1.) * (x < 0)
```
----
### softmax
```python
keras.backend.softmax(x)
```
ๅผ ้็ Softmax ๅผใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
__่ฟๅ__
ไธไธชๅผ ้ใ
__Numpy ๅฎ็ฐ__
```python
def softmax(x, axis=-1):
y = np.exp(x - np.max(x, axis, keepdims=True))
return y / np.sum(y, axis, keepdims=True)
```
----
### softplus
```python
keras.backend.softplus(x)
```
ๅผ ้็ Softplus ๅผใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
__่ฟๅ__
ไธไธชๅผ ้ใ
__Numpy ๅฎ็ฐ__
```python
def softplus(x):
return np.log(1. + np.exp(x))
```
----
### softsign
```python
keras.backend.softsign(x)
```
ๅผ ้็ Softsign ๅผใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
__่ฟๅ__
ไธไธชๅผ ้ใ
__Numpy ๅฎ็ฐ__
```python
def softsign(x):
return x / (1 + np.abs(x))
```
----
### categorical_crossentropy
```python
keras.backend.categorical_crossentropy(target, output, from_logits=False)
```
่พๅบๅผ ้ไธ็ฎๆ ๅผ ้ไน้ด็ๅ็ฑปไบคๅ็ตใ
__ๅๆฐ__
- __target__: ไธ `output` ๅฐบๅฏธ็ธๅ็ๅผ ้ใ
- __output__: ็ฑ softmax ไบง็็ๅผ ้
(้ค้ `from_logits` ไธบ True๏ผ
ๅจ่ฟ็งๆ
ๅตไธ `output` ๅบ่ฏฅๆฏๅฏนๆฐๅฝขๅผ)ใ
- __from_logits__: ๅธๅฐๅผ๏ผ`output` ๆฏ softmax ็็ปๆ๏ผ
่ฟๆฏๅฏนๆฐๅฝขๅผ็ๅผ ้ใ
__่ฟๅ__
่พๅบๅผ ้ใ
----
### sparse_categorical_crossentropy
```python
keras.backend.sparse_categorical_crossentropy(target, output, from_logits=False)
```
็จ็่กจ็คบ็ๆดๆฐๅผ็ฎๆ ็ๅ็ฑปไบคๅ็ตใ
__ๅๆฐ__
- __target__: ไธไธชๆดๆฐๅผ ้ใ
- __output__: ็ฑ softmax ไบง็็ๅผ ้
(้ค้ `from_logits` ไธบ True๏ผ
ๅจ่ฟ็งๆ
ๅตไธ `output` ๅบ่ฏฅๆฏๅฏนๆฐๅฝขๅผ)ใ
- __from_logits__: ๅธๅฐๅผ๏ผ`output` ๆฏ softmax ็็ปๆ๏ผ
่ฟๆฏๅฏนๆฐๅฝขๅผ็ๅผ ้ใ
__่ฟๅ__
่พๅบๅผ ้ใ
----
### binary_crossentropy
```python
keras.backend.binary_crossentropy(target, output, from_logits=False)
```
่พๅบๅผ ้ไธ็ฎๆ ๅผ ้ไน้ด็ไบ่ฟๅถไบคๅ็ตใ
__ๅๆฐ__
- __target__: ไธ `output` ๅฐบๅฏธ็ธๅ็ๅผ ้ใ
- __output__: ไธไธชๅผ ้ใ
- __from_logits__: `output` ๆฏๅฆๆฏๅฏนๆฐๅผ ้ใ
้ป่ฎคๆ
ๅตไธ๏ผๆไปฌ่ฎคไธบ `output` ็ผ็ ไบๆฆ็ๅๅธใ
__่ฟๅ__
ไธไธชๅผ ้ใ
----
### sigmoid
```python
keras.backend.sigmoid(x)
```
้ไธชๅ
็ด ๆฑ sigmoid ๅผใ
__ๅๆฐ__
- __x__: ไธไธชๅผ ้ๆๅ้ใ
__่ฟๅ__
ไธไธชๅผ ้ใ
__Numpy ๅฎ็ฐ__
```python
def sigmoid(x):
return 1. / (1. + np.exp(-x))
```
----
### hard_sigmoid
```python
keras.backend.hard_sigmoid(x)
```
ๅๆฎต็ sigmoid ็บฟๆง่ฟไผผใ้ๅบฆๆฏ sigmoid ๆดๅฟซใ
- ๅฆๆ `x < -2.5`๏ผ่ฟๅ `0`ใ
- ๅฆๆ `x > 2.5`๏ผ่ฟๅ `1`ใ
- ๅฆๆ `-2.5 <= x <= 2.5`๏ผ่ฟๅ `0.2 * x + 0.5`ใ
__ๅๆฐ__
- __x__: ไธไธชๅผ ้ๆๅ้ใ
__่ฟๅ__
ไธไธชๅผ ้ใ
__Numpy ๅฎ็ฐ__
```python
def hard_sigmoid(x):
y = 0.2 * x + 0.5
return np.clip(y, 0, 1)
```
----
### tanh
```python
keras.backend.tanh(x)
```
้ไธชๅ
็ด ๆฑ tanh ๅผใ
__ๅๆฐ__
- __x__: ไธไธชๅผ ้ๆๅ้ใ
__่ฟๅ__
ไธไธชๅผ ้ใ
__Numpy ๅฎ็ฐ__
```python
def tanh(x):
return np.tanh(x)
```
----
### dropout
```python
keras.backend.dropout(x, level, noise_shape=None, seed=None)
```
ๅฐ `x` ไธญ็ๆไบ้กน้ๆบ่ฎพ็ฝฎไธบ้ถ๏ผๅๆถ็ผฉๆพๆดไธชๅผ ้ใ
__ๅๆฐ__
- __x__: ๅผ ้
- __level__: ๅผ ้ไธญๅฐ่ขซ่ฎพ็ฝฎไธบ 0 ็้กน็ๆฏไพใ
- __noise_shape__: ้ๆบ็ๆ็ ไฟ็/ไธขๅผ ๆ ๅฟ็ๅฐบๅฏธ๏ผ
ๅฟ
้กปๅฏไปฅๅนฟๆญๅฐ `x` ็ๅฐบๅฏธใ
- __seed__: ไฟ่ฏ็กฎๅฎๆง็้ๆบ็งๅญใ
__่ฟๅ__
ไธไธชๅผ ้ใ
__Numpy ๅฎ็ฐ__
<details>
<summary>ๅฑ็คบ Numpy ๅฎ็ฐ</summary>
```python
def dropout(x, level, noise_shape=None, seed=None):
if noise_shape is None:
noise_shape = x.shape
if learning_phase():
noise = np.random.choice([0, 1],
noise_shape,
replace=True,
p=[level, 1 - level])
return x * noise / (1 - level)
else:
return x
```
</details>
----
### l2_normalize
```python
keras.backend.l2_normalize(x, axis=None)
```
ๅจๆๅฎ็่ฝดไฝฟ็จ L2 ่ๅผ ๆ ๅๅไธไธชๅผ ้ใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
- __axis__: ้่ฆๆง่กๆ ๅๅ็่ฝดใ
__่ฟๅ__
ไธไธชๅผ ้ใ
__Numpy ๅฎ็ฐ__
```python
def l2_normalize(x, axis=-1):
y = np.max(np.sum(x ** 2, axis, keepdims=True), axis, keepdims=True)
return x / np.sqrt(y)
```
----
### in_top_k
```python
keras.backend.in_top_k(predictions, targets, k)
```
ๅคๆญ `targets` ๆฏๅฆๅจ `predictions` ็ๅ `k` ไธชไธญใ
__ๅๆฐ__
- __predictions__: ไธไธชๅผ ้๏ผๅฐบๅฏธไธบ `(batch_size, classes)`๏ผ็ฑปๅไธบ `float32`ใ
- __targets__: ไธไธช 1D ๅผ ้๏ผ้ฟๅบฆไธบ `batch_size`๏ผ็ฑปๅไธบ `int32` ๆ `int64`ใ
- __k__: ไธไธช `int`๏ผ่ฆ่่็้กถ้จๅ
็ด ็ๆฐ้ใ
__่ฟๅ__
ไธไธช 1D ๅผ ้๏ผ้ฟๅบฆไธบ `batch_size`๏ผ็ฑปๅไธบ `bool`ใ
ๅฆๆ `predictions[i, targets[i]]` ๅจ
`predictions[i]` ็ top-`k` ๅผไธญ๏ผ
ๅ `output[i]` ไธบ `True`ใ
----
### conv1d
```python
keras.backend.conv1d(x, kernel, strides=1, padding='valid', data_format=None, dilation_rate=1)
```
1D ๅท็งฏใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
- __kernel__: ๆ ธๅผ ้ใ
- __strides__: ๆญฅ้ฟๆดๅใ
- __padding__: ๅญ็ฌฆไธฒ๏ผ`"same"`, `"causal"` ๆ `"valid"`ใ
- __data_format__: ๅญ็ฌฆไธฒ๏ผ`"channels_last"` ๆ `"channels_first"`ใ
- __dilation_rate__: ๆดๆฐ่จ่็ใ
__่ฟๅ__
ไธไธชๅผ ้๏ผ1D ๅท็งฏ็ปๆใ
__ๅผๅธธ__
- __ValueError__: ๅฆๆ `data_format` ๆขไธๆฏ `channels_last` ไนไธๆฏ `channels_first`ใ
----
### conv2d
```python
keras.backend.conv2d(x, kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1))
```
2D ๅท็งฏใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
- __kernel__: ๆ ธๅผ ้ใ
- __strides__: ๆญฅ้ฟๅ
็ปใ
- __padding__: ๅญ็ฌฆไธฒ๏ผ`"same"` ๆ `"valid"`ใ
- __data_format__: ๅญ็ฌฆไธฒ๏ผ`"channels_last"` ๆ `"channels_first"`ใ
ๅฏนไบ่พๅ
ฅ/ๅท็งฏๆ ธ/่พๅบ๏ผๆฏๅฆไฝฟ็จ Theano ๆ TensorFlow/CNTKๆฐๆฎๆ ผๅผใ
- __dilation_rate__: 2 ไธชๆดๆฐ็ๅ
็ปใ
__่ฟๅ__
ไธไธชๅผ ้๏ผ2D ๅท็งฏ็ปๆใ
__ๅผๅธธ__
- __ValueError__: ๅฆๆ `data_format` ๆขไธๆฏ `channels_last` ไนไธๆฏ `channels_first`ใ
----
### conv2d_transpose
```python
keras.backend.conv2d_transpose(x, kernel, output_shape, strides=(1, 1), padding='valid', data_format=None)
```
2D ๅๅท็งฏ (ๅณ่ฝฌ็ฝฎๅท็งฏ)ใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
- __kernel__: ๆ ธๅผ ้ใ
- __output_shape__: ่กจ็คบ่พๅบๅฐบๅฏธ็ 1D ๆดๅๅผ ้ใ
- __strides__: ๆญฅ้ฟๅ
็ปใ
- __padding__: ๅญ็ฌฆไธฒ๏ผ`"same"` ๆ `"valid"`ใ
- __data_format__: ๅญ็ฌฆไธฒ๏ผ`"channels_last"` ๆ `"channels_first"`ใ
ๅฏนไบ่พๅ
ฅ/ๅท็งฏๆ ธ/่พๅบ๏ผๆฏๅฆไฝฟ็จ Theano ๆ TensorFlow/CNTKๆฐๆฎๆ ผๅผใ
__่ฟๅ__
ไธไธชๅผ ้๏ผ่ฝฌ็ฝฎ็ 2D ๅท็งฏ็็ปๆใ
__ๅผๅธธ__
- __ValueError__: ๅฆๆ `data_format` ๆขไธๆฏ `channels_last` ไนไธๆฏ `channels_first`ใ
----
### separable_conv1d
```python
keras.backend.separable_conv1d(x, depthwise_kernel, pointwise_kernel, strides=1, padding='valid', data_format=None, dilation_rate=1)
```
ๅธฆๅฏๅ็ฆปๆปคๆณขๅจ็ 1D ๅท็งฏใ
__ๅๆฐ__
- __x__: ่พๅ
ฅๅผ ้ใ
- __depthwise_kernel__: ็จไบๆทฑๅบฆๅท็งฏ็ๅท็งฏๆ ธใ
- __pointwise_kernel__: 1x1 ๅท็งฏๆ ธใ
- __strides__: ๆญฅ้ฟๆดๆฐใ
- __padding__: ๅญ็ฌฆไธฒ๏ผ`"same"` ๆ `"valid"`ใ
- __data_format__: ๅญ็ฌฆไธฒ๏ผ`"channels_last"` ๆ `"channels_first"`ใ
- __dilation_rate__: ๆดๆฐ่จ่็ใ
__่ฟๅ__
่พๅบๅผ ้ใ
__ๅผๅธธ__
- __ValueError__: ๅฆๆ `data_format` ๆขไธๆฏ `channels_last` ไนไธๆฏ `channels_first`ใ
----
### separable_conv2d
```python
keras.backend.separable_conv2d(x, depthwise_kernel, pointwise_kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1))
```
ๅธฆๅฏๅ็ฆปๆปคๆณขๅจ็ 2D ๅท็งฏใ
__ๅๆฐ__
- __x__: ่พๅ
ฅๅผ ้ใ
- __depthwise_kernel__: ็จไบๆทฑๅบฆๅท็งฏ็ๅท็งฏๆ ธใ
- __pointwise_kernel__: 1x1 ๅท็งฏๆ ธใ
- __strides__: ๆญฅ้ฟๅ
็ป (้ฟๅบฆไธบ 2)ใ
- __padding__: ๅญ็ฌฆไธฒ๏ผ`"same"` ๆ `"valid"`ใ
- __data_format__: ๅญ็ฌฆไธฒ๏ผ`"channels_last"` ๆ `"channels_first"`ใ
- __dilation_rate__: ๆดๆฐๅ
็ป๏ผๅฏๅ็ฆปๅท็งฏ็่จ่็ใ
__่ฟๅ__
่พๅบๅผ ้ใ
__ๅผๅธธ__
- __ValueError__: ๅฆๆ `data_format` ๆขไธๆฏ `channels_last` ไนไธๆฏ `channels_first`ใ
----
### depthwise_conv2d
```python
keras.backend.depthwise_conv2d(x, depthwise_kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1))
```
ๅธฆๅฏๅ็ฆปๆปคๆณขๅจ็ 2D ๅท็งฏใ
__ๅๆฐ__
- __x__: ่พๅ
ฅๅผ ้ใ
- __depthwise_kernel__: ็จไบๆทฑๅบฆๅท็งฏ็ๅท็งฏๆ ธใ
- __strides__: ๆญฅ้ฟๅ
็ป (้ฟๅบฆไธบ 2)ใ
- __padding__: ๅญ็ฌฆไธฒ๏ผ`"same"` ๆ `"valid"`ใ
- __data_format__: ๅญ็ฌฆไธฒ๏ผ`"channels_last"` ๆ `"channels_first"`ใ
- __dilation_rate__: ๆดๆฐๅ
็ป๏ผๅฏๅ็ฆปๅท็งฏ็่จ่็ใ
__่ฟๅ__
่พๅบๅผ ้ใ
__ๅผๅธธ__
- __ValueError__: ๅฆๆ `data_format` ๆขไธๆฏ `channels_last` ไนไธๆฏ `channels_first`ใ
----
### conv3d
```python
keras.backend.conv3d(x, kernel, strides=(1, 1, 1), padding='valid', data_format=None, dilation_rate=(1, 1, 1))
```
3D ๅท็งฏใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
- __kernel__: ๆ ธๅผ ้ใ
- __strides__: ๆญฅ้ฟๅ
็ปใ
- __padding__: ๅญ็ฌฆไธฒ๏ผ`"same"` ๆ `"valid"`ใ
- __data_format__: ๅญ็ฌฆไธฒ๏ผ`"channels_last"` ๆ `"channels_first"`ใ
- __dilation_rate__: 3 ไธชๆดๆฐ็ๅ
็ปใ
__่ฟๅ__
ไธไธชๅผ ้๏ผ3D ๅท็งฏ็็ปๆใ
__ๅผๅธธ__
- __ValueError__: ๅฆๆ `data_format` ๆขไธๆฏ `channels_last` ไนไธๆฏ `channels_first`ใ
----
### conv3d_transpose
```python
keras.backend.conv3d_transpose(x, kernel, output_shape, strides=(1, 1, 1), padding='valid', data_format=None)
```
3D ๅๅท็งฏ (ๅณ่ฝฌ็ฝฎๅท็งฏ)ใ
__ๅๆฐ__
- __x__: ่พๅ
ฅๅผ ้ใ
- __kernel__: ๆ ธๅผ ้ใ
- __output_shape__: ่กจ็คบ่พๅบๅฐบๅฏธ็ 1D ๆดๆฐๅผ ้ใ
- __strides__: ๆญฅ้ฟๅ
็ปใ
- __padding__: ๅญ็ฌฆไธฒ๏ผ`"same"` ๆ `"valid"`ใ
- __data_format__: ๅญ็ฌฆไธฒ๏ผ`"channels_last"` ๆ `"channels_first"`ใ
ๅฏนไบ่พๅ
ฅ/ๅท็งฏๆ ธ/่พๅบ๏ผๆฏๅฆไฝฟ็จ Theano ๆ TensorFlow/CNTKๆฐๆฎๆ ผๅผใ
__่ฟๅ__
ไธไธชๅผ ้๏ผ3D ่ฝฌ็ฝฎๅท็งฏ็็ปๆใ
__ๅผๅธธ__
- __ValueError__: ๅฆๆ `data_format` ๆขไธๆฏ `channels_last` ไนไธๆฏ `channels_first`ใ
----
### pool2d
```python
keras.backend.pool2d(x, pool_size, strides=(1, 1), padding='valid', data_format=None, pool_mode='max')
```
2D ๆฑ ๅใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
- __pool_size__: 2 ไธชๆดๆฐ็ๅ
็ปใ
- __strides__: 2 ไธชๆดๆฐ็ๅ
็ปใ
- __padding__: ๅญ็ฌฆไธฒ๏ผ`"same"` ๆ `"valid"`ใ
- __data_format__: ๅญ็ฌฆไธฒ๏ผ`"channels_last"` ๆ `"channels_first"`ใ
- __pool_mode__: ๅญ็ฌฆไธฒ๏ผ`"max"` ๆ `"avg"`ใ
__่ฟๅ__
ไธไธชๅผ ้๏ผ2D ๆฑ ๅ็็ปๆใ
__ๅผๅธธ__
- __ValueError__: ๅฆๆ `data_format` ๆขไธๆฏ `channels_last` ไนไธๆฏ `channels_first`ใ
- __ValueError__: if `pool_mode` ๆขไธๆฏ `"max"` ไนไธๆฏ `"avg"`ใ
----
### pool3d
```python
keras.backend.pool3d(x, pool_size, strides=(1, 1, 1), padding='valid', data_format=None, pool_mode='max')
```
3D ๆฑ ๅใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
- __pool_size__: 3 ไธชๆดๆฐ็ๅ
็ปใ
- __strides__: 3 ไธชๆดๆฐ็ๅ
็ปใ
- __padding__: ๅญ็ฌฆไธฒ๏ผ`"same"` ๆ `"valid"`ใ
- __data_format__: ๅญ็ฌฆไธฒ๏ผ`"channels_last"` ๆ `"channels_first"`ใ
- __pool_mode__: ๅญ็ฌฆไธฒ๏ผ`"max"` ๆ `"avg"`ใ
__่ฟๅ__
ไธไธชๅผ ้๏ผ3D ๆฑ ๅ็็ปๆใ
__ๅผๅธธ__
- __ValueError__: ๅฆๆ `data_format` ๆขไธๆฏ `channels_last` ไนไธๆฏ `channels_first`ใ
- __ValueError__: if `pool_mode` ๆขไธๆฏ `"max"` ไนไธๆฏ `"avg"`ใ
----
### local_conv1d
```python
keras.backend.local_conv1d(inputs, kernel, kernel_size, strides, data_format=None)
```
ๅจไธๅ
ฑไบซๆๅผ็ๆ
ๅตไธ๏ผ่ฟ็จ 1D ๅท็งฏใ
__ๅๆฐ__
- __inputs__: 3D ๅผ ้๏ผๅฐบๅฏธไธบ (batch_size, steps, input_dim)
- __kernel__: ๅท็งฏ็้ๅ
ฑไบซๆ้,
ๅฐบๅฏธไธบ (output_items, feature_dim, filters)
- __kernel_size__: ไธไธชๆดๆฐ็ๅ
็ป๏ผ
ๆๅฎ 1D ๅท็งฏ็ชๅฃ็้ฟๅบฆใ
- __strides__: ไธไธชๆดๆฐ็ๅ
็ป๏ผ
ๆๅฎๅท็งฏๆญฅ้ฟใ
- __data_format__: ๆฐๆฎๆ ผๅผ๏ผchannels_first ๆ channels_lastใ
__่ฟๅ__
่ฟ็จไธๅ
ฑไบซๆ้็ 1D ๅท็งฏไนๅ็ๅผ ้๏ผๅฐบๅฏธไธบ (batch_size, output_length, filters)ใ
__ๅผๅธธ__
- __ValueError__: ๅฆๆ `data_format` ๆขไธๆฏ `channels_last` ไนไธๆฏ `channels_first`ใ
----
### local_conv2d
```python
keras.backend.local_conv2d(inputs, kernel, kernel_size, strides, output_shape, data_format=None)
```
ๅจไธๅ
ฑไบซๆๅผ็ๆ
ๅตไธ๏ผ่ฟ็จ 2D ๅท็งฏใ
__ๅๆฐ__
- __inputs__: ๅฆๆ `data_format='channels_first'`๏ผ
ๅไธบๅฐบๅฏธไธบ (batch_size, filters, new_rows, new_cols) ็ 4D ๅผ ้ใ
ๅฆๆ `data_format='channels_last'`๏ผ
ๅไธบๅฐบๅฏธไธบ (batch_size, new_rows, new_cols, filters) ็ 4D ๅผ ้ใ
- __kernel__: ๅท็งฏ็้ๅ
ฑไบซๆ้,
ๅฐบๅฏธไธบ (output_items, feature_dim, filters)
- __kernel_size__: 2 ไธชๆดๆฐ็ๅ
็ป๏ผ
ๆๅฎ 2D ๅท็งฏ็ชๅฃ็ๅฎฝๅบฆๅ้ซๅบฆใ
- __strides__: 2 ไธชๆดๆฐ็ๅ
็ป๏ผ
ๆๅฎ 2D ๅท็งฏๆฒฟๅฎฝๅบฆๅ้ซๅบฆๆนๅ็ๆญฅ้ฟใ
- __output_shape__: ๅ
็ป (output_row, output_col) ใ
- __data_format__: ๆฐๆฎๆ ผๅผ๏ผchannels_first ๆ channels_lastใ
__่ฟๅ__
ไธไธช 4D ๅผ ้ใ
- ๅฆๆ `data_format='channels_first'`๏ผๅฐบๅฏธไธบ (batch_size, filters, new_rows, new_cols)ใ
- ๅฆๆ `data_format='channels_last'`๏ผๅฐบๅฏธไธบ (batch_size, new_rows, new_cols, filters)
__ๅผๅธธ__
- __ValueError__: ๅฆๆ `data_format` ๆขไธๆฏ `channels_last` ไนไธๆฏ `channels_first`ใ
----
### bias_add
```python
keras.backend.bias_add(x, bias, data_format=None)
```
็ปๅผ ้ๆทปๅ ไธไธชๅ็ฝฎๅ้ใ
__ๅๆฐ__
- __x__: ๅผ ้ๆๅ้ใ
- __bias__: ้่ฆๆทปๅ ็ๅ็ฝฎๅ้ใ
- __data_format__: ๅญ็ฌฆไธฒ๏ผ`"channels_last"` ๆ `"channels_first"`ใ
__่ฟๅ__
่พๅบๅผ ้ใ
__ๅผๅธธ__
- __ValueError__: ไปฅไธไธค็งๆ
ๅตไนไธ๏ผ
1. ๆ ๆ็ `data_format` ๅๆฐใ
2. ๆ ๆ็ๅ็ฝฎๅ้ๅฐบๅฏธใ
ๅ็ฝฎๅบ่ฏฅๆฏไธไธช `ndim(x)-1` ็ปด็ๅ้ๆๅผ ้ใ
__Numpy ๅฎ็ฐ__
<details>
<summary>ๅฑ็คบ Numpy ๅฎ็ฐ</summary>
```python
def bias_add(x, y, data_format):
if data_format == 'channels_first':
if y.ndim > 1:
y = np.reshape(y, y.shape[::-1])
for _ in range(x.ndim - y.ndim - 1):
y = np.expand_dims(y, -1)
else:
for _ in range(x.ndim - y.ndim - 1):
y = np.expand_dims(y, 0)
return x + y
```
</details>
----
### random_normal
```python
keras.backend.random_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None)
```
่ฟๅๆญฃๆๅๅธๅผ็ๅผ ้ใ
__ๅๆฐ__
- __shape__: ไธไธชๆดๆฐๅ
็ป๏ผ้่ฆๅๅปบ็ๅผ ้็ๅฐบๅฏธใ
- __mean__: ไธไธชๆตฎ็นๆฐ๏ผๆฝๆ ท็ๆญฃๆๅๅธๅนณๅๅผใ
- __stddev__: ไธไธชๆตฎ็นๆฐ๏ผๆฝๆ ท็ๆญฃๆๅๅธๆ ๅๅทฎใ
- __dtype__: ๅญ็ฌฆไธฒ๏ผ่ฟๅ็ๅผ ้็ๆฐๆฎ็ฑปๅใ
- __seed__: ๆดๆฐ๏ผ้ๆบ็งๅญใ
__่ฟๅ__
ไธไธชๅผ ้ใ
----
### random_uniform
```python
keras.backend.random_uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None)
```
่ฟๅๅๅๅๅธๅผ็ๅผ ้ใ
__ๅๆฐ__
- __shape__: ไธไธชๆดๆฐๅ
็ป๏ผ้่ฆๅๅปบ็ๅผ ้็ๅฐบๅฏธใ
- __minval__: ไธไธชๆตฎ็นๆฐ๏ผๆฝๆ ท็ๅๅๅๅธไธ็ใ
- __maxval__: ไธไธชๆตฎ็นๆฐ๏ผๆฝๆ ท็ๅๅๅๅธไธ็ใ
- __dtype__: ๅญ็ฌฆไธฒ๏ผ่ฟๅ็ๅผ ้็ๆฐๆฎ็ฑปๅใ
- __seed__: ๆดๆฐ๏ผ้ๆบ็งๅญใ
__่ฟๅ__
ไธไธชๅผ ้ใ
----
### random_binomial
```python
keras.backend.random_binomial(shape, p=0.0, dtype=None, seed=None)
```
่ฟๅ้ๆบไบ้กนๅๅธๅผ็ๅผ ้ใ
__ๅๆฐ__
- __shape__: ไธไธชๆดๆฐๅ
็ป๏ผ้่ฆๅๅปบ็ๅผ ้็ๅฐบๅฏธใ
- __p__: ไธไธชๆตฎ็นๆฐ๏ผ`0. <= p <= 1`๏ผไบ้กนๅๅธ็ๆฆ็ใ
- __dtype__: ๅญ็ฌฆไธฒ๏ผ่ฟๅ็ๅผ ้็ๆฐๆฎ็ฑปๅใ
- __seed__: ๆดๆฐ๏ผ้ๆบ็งๅญใ
__่ฟๅ__
ไธไธชๅผ ้ใ
----
### truncated_normal
```python
keras.backend.truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None)
```
่ฟๅๆชๆญ็้ๆบๆญฃๆๅๅธๅผ็ๅผ ้ใ
็ๆ็ๅผ้ตๅพชๅ
ทๆๆๅฎๅนณๅๅผๅๆ ๅๅทฎ็ๆญฃๆๅๅธ๏ผ
ๆญคๅค๏ผๅ
ถไธญๆฐๅผๅคงไบๅนณๅๅผไธคไธชๆ ๅๅทฎ็ๅฐ่ขซไธขๅผๅ้ๆฐๆ้ใ
__ๅๆฐ__
- __shape__: ไธไธชๆดๆฐๅ
็ป๏ผ้่ฆๅๅปบ็ๅผ ้็ๅฐบๅฏธใ
- __mean__: ๅนณๅๅผใ
- __stddev__: ๆ ๅๅทฎใ
- __dtype__: ๅญ็ฌฆไธฒ๏ผ่ฟๅ็ๅผ ้็ๆฐๆฎ็ฑปๅใ
- __seed__: ๆดๆฐ๏ผ้ๆบ็งๅญใ
__่ฟๅ__
ไธไธชๅผ ้ใ
----
### ctc_label_dense_to_sparse
```python
keras.backend.ctc_label_dense_to_sparse(labels, label_lengths)
```
ๅฐ CTC ๆ ็ญพไปๅฏ้่ฝฌๆขไธบ็จ็่กจ็คบใ
__ๅๆฐ__
- __labels__: ๅฏ้ CTC ๆ ็ญพใ
- __label_lengths__: ๆ ็ญพ้ฟๅบฆใ
__่ฟๅ__
ไธไธช่กจ็คบๆ ็ญพ็็จ็ๅผ ้ใ
----
### ctc_batch_cost
```python
keras.backend.ctc_batch_cost(y_true, y_pred, input_length, label_length)
```
ๅจๆฏไธชๆนๆฌกๅ
็ด ไธ่ฟ่ก CTC ๆๅคฑ็ฎๆณใ
__ๅๆฐ__
- __y_true__: ๅผ ้ `(samples, max_string_length)`๏ผ
ๅ
ๅซ็ๅฎๆ ็ญพใ
- __y_pred__: ๅผ ้ `(samples, time_steps, num_categories)`๏ผ
ๅ
ๅซ้ขๆตๅผ๏ผๆ softmax ่พๅบใ
- __input_length__: ๅผ ้ `(samples, 1)`๏ผ
ๅ
ๅซ `y_pred` ไธญๆฏไธชๆนๆฌกๆ ทๆฌ็ๅบๅ้ฟๅบฆใ
- __label_length__: ๅผ ้ `(samples, 1)`๏ผ
ๅ
ๅซ `y_true` ไธญๆฏไธชๆนๆฌกๆ ทๆฌ็ๅบๅ้ฟๅบฆใ
__่ฟๅ__
ๅฐบๅฏธไธบ (samples,1) ็ๅผ ้๏ผๅ
ๅซๆฏไธไธชๅ
็ด ็ CTC ๆๅคฑใ
----
### ctc_decode
```python
keras.backend.ctc_decode(y_pred, input_length, greedy=True, beam_width=100, top_paths=1, merge_repeated=False)
```
่งฃ็ softmax ็่พๅบใ
ๅฏไปฅไฝฟ็จ่ดชๅฟๆ็ดข๏ผไน็งฐไธบๆไผ่ทฏๅพ๏ผๆๅ้ๅญๅ
ธๆ็ดขใ
__ๅๆฐ__
- __y_pred__: ๅผ ้ย `(samples, time_steps, num_categories)`๏ผ
ๅ
ๅซ้ขๆตๅผ๏ผๆ softmax ่พๅบใ
- __input_length__: ๅผ ้ `(samples,)`๏ผ
ๅ
ๅซ `y_pred` ไธญๆฏไธชๆนๆฌกๆ ทๆฌ็ๅบๅ้ฟๅบฆใ
- __greedy__: ๅฆๆไธบ `True`๏ผๅๆง่กๆดๅฟซ้็ๆไผ่ทฏๅพๆ็ดข๏ผ่ไธไฝฟ็จๅญๅ
ธใ
- __beam_width__: ๅฆๆ `greedy` ไธบ `False`๏ผๅฐไฝฟ็จ่ฏฅๅฎฝๅบฆ็ beam ๆ็ดข่งฃ็ ๅจๆ็ดขใ
- __top_paths__: ๅฆๆ `greedy` ไธบ `alse`๏ผ
ๅฐ่ฟๅๅคๅฐๆกๆๅฏ่ฝ็่ทฏๅพใ
__่ฟๅ__
- __Tuple__:
- __List__: ๅฆๆย `greedy` ไธบ `True`๏ผ่ฟๅๅ
ๅซ่งฃ็ ๅบๅ็ไธไธชๅ
็ด ็ๅ่กจใ
ๅฆๆไธบ `False`๏ผ่ฟๅๆๅฏ่ฝ่งฃ็ ๅบๅ็ `top_paths`ใ
- __Important__: ็ฉบ็ฝๆ ็ญพ่ฟๅไธบ `-1`ใๅ
ๅซๆฏไธช่งฃ็ ๅบๅ็ๅฏนๆฐๆฆ็็ๅผ ้ `(top_paths,)`ใ
----
### control_dependencies
```python
keras.backend.control_dependencies(control_inputs)
```
ไธไธชๆๅฎๆงๅถไพ่ต็ไธไธๆ็ฎก็ๅจใ
__ๅๆฐ__
- __control_inputs__: ไธ็ณปๅ็ๅฏน่ฑก็ๆไฝๆๅผ ้๏ผๅฎไปฌๅฟ
้กปๅจๆง่กไธไธๆไธญๅฎไน็ๆไฝไนๅๆง่กใๅฎไนๅฏไปฅๆฏ None๏ผ่กจ็คบๆธ
็ฉบๆงๅถไพ่ตใ
__Returns__
ไธไธชไธไธๆ็ฎก็ๅจใ
----
### map_fn
```python
keras.backend.map_fn(fn, elems, name=None, dtype=None)
```
ๅฐๅฝๆฐfnๆ ๅฐๅฐๅ
็ด `elems` ไธๅนถ่ฟๅ่พๅบใ
__ๅๆฐ__
- __fn__: ๅฐๅจๆฏไธชๅ
็ด ไธ่ฐ็จ็ๅฏ่ฐ็จๅฝๆฐใ
- __elems__: ๅผ ้ใ
- __name__: ๆ ๅฐ่็นๅจๅพไธญ็ๅญ็ฌฆไธฒๅ็งฐใ
- __dtype__: ่พๅบๆฐๆฎๆ ผๅผใ
__่ฟๅ__
ๆฐๆฎ็ฑปๅไธบ `dtype` ็ๅผ ้ใ
----
### foldl
```python
keras.backend.foldl(fn, elems, initializer=None, name=None)
```
ไฝฟ็จ fn ๅฝ็บฆ elems๏ผไปฅไปๅทฆๅฐๅณ็ปๅๅฎไปฌใ
__ๅๆฐ__
- __fn__: ๅฐๅจๆฏไธชๅ
็ด ๅไธไธช็ดฏๅ ๅจไธ่ฐ็จ็ๅฏ่ฐ็จๅฝๆฐ๏ผไพๅฆ `lambda acc, x: acc + x`ใ
- __elems__: ๅผ ้ใ
- __initializer__: ็ฌฌไธไธชไฝฟ็จ็ๅผ (ๅฆๆไธบ None๏ผไฝฟ็จ`elems[0]`)ใ
- __name__: foldl ่็นๅจๅพไธญ็ๅญ็ฌฆไธฒๅ็งฐใ
__่ฟๅ__
ไธ `initializer` ็ฑปๅๅๅฐบๅฏธ็ธๅ็ๅผ ้ใ
----
### foldr
```python
keras.backend.foldr(fn, elems, initializer=None, name=None)
```
ไฝฟ็จ fn ๅฝ็บฆ elems๏ผไปฅไปๅณๅฐๅทฆ็ปๅๅฎไปฌใ
__ๅๆฐ__
- __fn__: ๅฐๅจๆฏไธชๅ
็ด ๅไธไธช็ดฏๅ ๅจไธ่ฐ็จ็ๅฏ่ฐ็จๅฝๆฐ๏ผไพๅฆ `lambda acc, x: acc + x`ใ
- __elems__: ๅผ ้ใ
- __initializer__: ็ฌฌไธไธชไฝฟ็จ็ๅผ (ๅฆๆไธบ None๏ผไฝฟ็จ`elems[-1]`)ใ
- __name__: foldr ่็นๅจๅพไธญ็ๅญ็ฌฆไธฒๅ็งฐใ
__่ฟๅ__
ไธ `initializer` ็ฑปๅๅๅฐบๅฏธ็ธๅ็ๅผ ้ใ
| keras-docs-zh/sources/backend.md/0 | {
"file_path": "keras-docs-zh/sources/backend.md",
"repo_id": "keras-docs-zh",
"token_count": 46035
} | 81 |
# ๅ
ๅญฆๅญ็ฌฆ่ฏๅซ
ๆญค็คบไพไฝฟ็จๅท็งฏๅ ๆ ๏ผๅ่ท้ๅฝๅ ๆ ๅ CTC logloss ๅฝๆฐ๏ผไปฅๅฏน็ๆ็ๆๆฌๅพๅ่ฟ่กๅ
ๅญฆๅญ็ฌฆ่ฏๅซใ
ๆๆฒกๆ่ฏๆฎ่กจๆๅฎๅฎ้
ไธๆฏๅญฆไน ๆๆฌ็ไธ่ฌๅฝข็ถ๏ผ่ฟๆฏไป
ไป
่ฝๅค่ฏๅซๆๆๅบ็ๆๆไธๅๅญไฝโฆโฆๅฎ็็ฎ็ๆดๅคๆฏไธบไบๅจ Keras ไธญๆผ็คบCTCใ
่ฏทๆณจๆ๏ผๅฏ่ฝ้่ฆ้ๅฏนไฝฟ็จไธญ็็นๅฎๆไฝ็ณป็ปๆดๆฐๅญไฝๅ่กจใ
ๅฎไป 4 ไธชๅญๆฏ่ฏๅผๅงใๅฏนไบๅ12ไธช่ฝฎๆฌก๏ผไฝฟ็จ TextImageGenerator ็ฑป๏ผๅๆถๆฏๆต่ฏ/่ฎญ็ปๆฐๆฎ็็ๆๅจ็ฑปๅ Keras ๅ่ฐ็ฑป๏ผไผ้ๆธๅขๅ ้พๅบฆใ
20ไธช ่ฝฎๆฌกๅ๏ผ้่ฟ้ๆฐ็ผ่ฏๆจกๅไปฅๅค็ๆดๅฎฝ็ๅพๅๅนถ้ๅปบๅ่ฏๅ่กจไปฅๅ
ๅซไธคไธชไปฅ็ฉบๆ ผๅ้็ๅ่ฏ๏ผๅฐๆๅบๆด้ฟ็ๅบๅใ
ไธ่กจๆพ็คบไบๆ ๅๅ็็ผ่พ่ท็ฆปๅผใ Theano ไฝฟ็จ็ CTC ๅฎ็ฐ็ฅๆไธๅ๏ผๅ ๆญค็ปๆไนๆๆไธๅใ
Epoch | TF | TH
-----:|-------:|-------:
10| 0.027 | 0.064
15| 0.038 | 0.035
20| 0.043 | 0.045
25| 0.014 | 0.019
# ๅ
ถไปไพ่ต
้่ฆ ```cairo``` ๅ ```editdistance``` ๅ
:
้ฆๅ
๏ผๅฎ่ฃ
Cairo ๅบ: https://cairographics.org/
็ถๅๅฎ่ฃ
Python ไพ่ต:
```python
pip install cairocffi
pip install editdistance
```
Created by Mike Henry
https://github.com/mbhenry/
```python
import os
import itertools
import codecs
import re
import datetime
import cairocffi as cairo
import editdistance
import numpy as np
from scipy import ndimage
import pylab
from keras import backend as K
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.layers import Input, Dense, Activation
from keras.layers import Reshape, Lambda
from keras.layers.merge import add, concatenate
from keras.models import Model
from keras.layers.recurrent import GRU
from keras.optimizers import SGD
from keras.utils.data_utils import get_file
from keras.preprocessing import image
import keras.callbacks
OUTPUT_DIR = 'image_ocr'
# ๅญ็ฌฆ็ฑปๅๅน้
็ๆญฃๅ่กจ่พพๅผ่ฟๆปคๅจ
regex = r'^[a-z ]+$'
alphabet = u'abcdefghijklmnopqrstuvwxyz '
np.random.seed(55)
# ่ฟไผไบง็ๆดๅคง็ "ๆ็น" ๅชๅฃฐ๏ผ
# ็่ตทๆฅๆฏไป
ๆทปๅ ้ซๆฏๅชๅฃฐๆดไธบ็ๅฎ๏ผ
# ๅฎๅๅฎๅ็ด ็็ฐๅบฆ่ๅดไธบ 0 ๅฐ 1
def speckle(img):
severity = np.random.uniform(0, 0.6)
blur = ndimage.gaussian_filter(np.random.randn(*img.shape) * severity, 1)
img_speck = (img + blur)
img_speck[img_speck > 1] = 1
img_speck[img_speck <= 0] = 0
return img_speck
# ๅจ้ๆบไฝ็ฝฎ็ปๅถๅญ็ฌฆไธฒ๏ผ่พน็ๆกไนไฝฟ็จ้ๆบๅญไฝใ่ฝปๅพฎ็้ๆบๆ่ฝฌๅ้ๆบ็ๆ็นๅชๅฃฐ
def paint_text(text, w, h, rotate=False, ud=False, multi_fonts=False):
surface = cairo.ImageSurface(cairo.FORMAT_RGB24, w, h)
with cairo.Context(surface) as context:
context.set_source_rgb(1, 1, 1) # ็ฝ่ฒ
context.paint()
# ๆญคๅญไฝๅ่กจๅฏๅจ CentOS 7 ไธญไฝฟ็จ
if multi_fonts:
fonts = [
'Century Schoolbook', 'Courier', 'STIX',
'URW Chancery L', 'FreeMono']
context.select_font_face(
np.random.choice(fonts),
cairo.FONT_SLANT_NORMAL,
np.random.choice([cairo.FONT_WEIGHT_BOLD, cairo.FONT_WEIGHT_NORMAL]))
else:
context.select_font_face('Courier',
cairo.FONT_SLANT_NORMAL,
cairo.FONT_WEIGHT_BOLD)
context.set_font_size(25)
box = context.text_extents(text)
border_w_h = (4, 4)
if box[2] > (w - 2 * border_w_h[1]) or box[3] > (h - 2 * border_w_h[0]):
raise IOError(('Could not fit string into image.'
'Max char count is too large for given image width.'))
# ้่ฟๅจ็ปๅธไธ้ๆบๆพ็ฝฎๆๆฌๆกๅนถๆ่ฝฌไธไบ็ฉบ้ดๆฅๆไผ RNN ๅนณ็งปไธๅๆง
max_shift_x = w - box[2] - border_w_h[0]
max_shift_y = h - box[3] - border_w_h[1]
top_left_x = np.random.randint(0, int(max_shift_x))
if ud:
top_left_y = np.random.randint(0, int(max_shift_y))
else:
top_left_y = h // 2
context.move_to(top_left_x - int(box[0]), top_left_y - int(box[1]))
context.set_source_rgb(0, 0, 0)
context.show_text(text)
buf = surface.get_data()
a = np.frombuffer(buf, np.uint8)
a.shape = (h, w, 4)
a = a[:, :, 0] # ๆๅๅไธช้้
a = a.astype(np.float32) / 255
a = np.expand_dims(a, 0)
if rotate:
a = image.random_rotation(a, 3 * (w - top_left_x) / w + 1)
a = speckle(a)
return a
def shuffle_mats_or_lists(matrix_list, stop_ind=None):
ret = []
assert all([len(i) == len(matrix_list[0]) for i in matrix_list])
len_val = len(matrix_list[0])
if stop_ind is None:
stop_ind = len_val
assert stop_ind <= len_val
a = list(range(stop_ind))
np.random.shuffle(a)
a += list(range(stop_ind, len_val))
for mat in matrix_list:
if isinstance(mat, np.ndarray):
ret.append(mat[a])
elif isinstance(mat, list):
ret.append([mat[i] for i in a])
else:
raise TypeError('`shuffle_mats_or_lists` only supports '
'numpy.array and list objects.')
return ret
# ๅฐๅญ็ฌฆ่ฝฌๆขไธบๅฏไธ็ๆดๆฐๅผ
def text_to_labels(text):
ret = []
for char in text:
ret.append(alphabet.find(char))
return ret
# ๅฐๆฐๅญ็ฑปๅๅ่ฝฌๆขๅๅญ็ฌฆ
def labels_to_text(labels):
ret = []
for c in labels:
if c == len(alphabet): # CTC ็ฉบ็ฝ
ret.append("")
else:
ret.append(alphabet[c])
return "".join(ret)
# ไป
a-z ๅ็ฉบๆ ผ..ๅฏ่ฝไธ้พๆฉๅฑไธบๅคงๅๅ็ฌฆๅท
def is_valid_str(in_str):
search = re.compile(regex, re.UNICODE).search
return bool(search(in_str))
# ไฝฟ็จ็ๆๅจๅฝๆฐๆไพ่ฎญ็ป/ๆต่ฏๆฐๆฎใๆฏๆฌกไฝฟ็จ้ๆบๆฐๅจๅจๆๅๅปบๅพๅๆธฒๆๅๆๆฌ
class TextImageGenerator(keras.callbacks.Callback):
def __init__(self, monogram_file, bigram_file, minibatch_size,
img_w, img_h, downsample_factor, val_split,
absolute_max_string_len=16):
self.minibatch_size = minibatch_size
self.img_w = img_w
self.img_h = img_h
self.monogram_file = monogram_file
self.bigram_file = bigram_file
self.downsample_factor = downsample_factor
self.val_split = val_split
self.blank_label = self.get_output_size() - 1
self.absolute_max_string_len = absolute_max_string_len
def get_output_size(self):
return len(alphabet) + 1
# ็ฑไบไฝฟ็จ็ๆๅจ๏ผๅ ๆญค num_wordsๅฏไปฅไธ่ฝฎๆฌกๅคงๅฐๆ ๅ
ณ๏ผๅ ไธบ max_string_len ๅข้ฟ, num_words ไนไผๅข้ฟ
def build_word_list(self, num_words, max_string_len=None, mono_fraction=0.5):
assert max_string_len <= self.absolute_max_string_len
assert num_words % self.minibatch_size == 0
assert (self.val_split * num_words) % self.minibatch_size == 0
self.num_words = num_words
self.string_list = [''] * self.num_words
tmp_string_list = []
self.max_string_len = max_string_len
self.Y_data = np.ones([self.num_words, self.absolute_max_string_len]) * -1
self.X_text = []
self.Y_len = [0] * self.num_words
def _is_length_of_word_valid(word):
return (max_string_len == -1 or
max_string_len is None or
len(word) <= max_string_len)
# ไผๆ ๆไปถๆ่ฑ่ฏญ่ฏญ้ณไธญ็้ข็ๆๅบ
with codecs.open(self.monogram_file, mode='r', encoding='utf-8') as f:
for line in f:
if len(tmp_string_list) == int(self.num_words * mono_fraction):
break
word = line.rstrip()
if _is_length_of_word_valid(word):
tmp_string_list.append(word)
# bigramๆไปถๅ
ๅซ่ฑ่ฏญ่ฏญ้ณไธญ็ๅธธ็จๅ่ฏๅฏน
with codecs.open(self.bigram_file, mode='r', encoding='utf-8') as f:
lines = f.readlines()
for line in lines:
if len(tmp_string_list) == self.num_words:
break
columns = line.lower().split()
word = columns[0] + ' ' + columns[1]
if is_valid_str(word) and _is_length_of_word_valid(word):
tmp_string_list.append(word)
if len(tmp_string_list) != self.num_words:
raise IOError('Could not pull enough words'
'from supplied monogram and bigram files.')
# ้่กๆซๆไปฅๆททๅๆ็จ่ฏๅ้พ็จ่ฏ
self.string_list[::2] = tmp_string_list[:self.num_words // 2]
self.string_list[1::2] = tmp_string_list[self.num_words // 2:]
for i, word in enumerate(self.string_list):
self.Y_len[i] = len(word)
self.Y_data[i, 0:len(word)] = text_to_labels(word)
self.X_text.append(word)
self.Y_len = np.expand_dims(np.array(self.Y_len), 1)
self.cur_val_index = self.val_split
self.cur_train_index = 0
# ๆฏๆฌกไป่ฎญ็ป/้ช่ฏ/ๆต่ฏไธญ่ฏทๆฑๅพๅๆถ๏ผ้ฝไผๅฏนๆๆฌ่ฟ่กๆฐ็้ๆบ็ปๅถ
def get_batch(self, index, size, train):
# width ๅ height ๆๅ
ธๅ็ Keras ็บฆๅฎๅๅ๏ผๅ ไธบ width ๆฏๅฐๅ
ถ้ฆๅ
ฅ RNN ๆถ็ๆถ้ด็ปดใ
if K.image_data_format() == 'channels_first':
X_data = np.ones([size, 1, self.img_w, self.img_h])
else:
X_data = np.ones([size, self.img_w, self.img_h, 1])
labels = np.ones([size, self.absolute_max_string_len])
input_length = np.zeros([size, 1])
label_length = np.zeros([size, 1])
source_str = []
for i in range(size):
# ๆททๅไธไบ็ฉบ็ฝ่พๅ
ฅใ่ฟๅฏนไบๅฎ็ฐ็ฟป่ฏไธๅๆงไผผไนๅพ้่ฆ
if train and i > size - 4:
if K.image_data_format() == 'channels_first':
X_data[i, 0, 0:self.img_w, :] = self.paint_func('')[0, :, :].T
else:
X_data[i, 0:self.img_w, :, 0] = self.paint_func('',)[0, :, :].T
labels[i, 0] = self.blank_label
input_length[i] = self.img_w // self.downsample_factor - 2
label_length[i] = 1
source_str.append('')
else:
if K.image_data_format() == 'channels_first':
X_data[i, 0, 0:self.img_w, :] = (
self.paint_func(self.X_text[index + i])[0, :, :].T)
else:
X_data[i, 0:self.img_w, :, 0] = (
self.paint_func(self.X_text[index + i])[0, :, :].T)
labels[i, :] = self.Y_data[index + i]
input_length[i] = self.img_w // self.downsample_factor - 2
label_length[i] = self.Y_len[index + i]
source_str.append(self.X_text[index + i])
inputs = {'the_input': X_data,
'the_labels': labels,
'input_length': input_length,
'label_length': label_length,
'source_str': source_str # ไป
็จไบๅฏ่งๅ
}
outputs = {'ctc': np.zeros([size])} # ่ๆๆฐๆฎ๏ผ็จไบ่ๆ loss ๅฝๆฐ
return (inputs, outputs)
def next_train(self):
while 1:
ret = self.get_batch(self.cur_train_index,
self.minibatch_size, train=True)
self.cur_train_index += self.minibatch_size
if self.cur_train_index >= self.val_split:
self.cur_train_index = self.cur_train_index % 32
(self.X_text, self.Y_data, self.Y_len) = shuffle_mats_or_lists(
[self.X_text, self.Y_data, self.Y_len], self.val_split)
yield ret
def next_val(self):
while 1:
ret = self.get_batch(self.cur_val_index,
self.minibatch_size, train=False)
self.cur_val_index += self.minibatch_size
if self.cur_val_index >= self.num_words:
self.cur_val_index = self.val_split + self.cur_val_index % 32
yield ret
def on_train_begin(self, logs={}):
self.build_word_list(16000, 4, 1)
self.paint_func = lambda text: paint_text(
text, self.img_w, self.img_h,
rotate=False, ud=False, multi_fonts=False)
def on_epoch_begin(self, epoch, logs={}):
# ้ๆฐ็ปๅ็ป็ปๅ่ฝไปฅๅฎ็ฐ่ฏพ็จๅญฆไน
if 3 <= epoch < 6:
self.paint_func = lambda text: paint_text(
text, self.img_w, self.img_h,
rotate=False, ud=True, multi_fonts=False)
elif 6 <= epoch < 9:
self.paint_func = lambda text: paint_text(
text, self.img_w, self.img_h,
rotate=False, ud=True, multi_fonts=True)
elif epoch >= 9:
self.paint_func = lambda text: paint_text(
text, self.img_w, self.img_h,
rotate=True, ud=True, multi_fonts=True)
if epoch >= 21 and self.max_string_len < 12:
self.build_word_list(32000, 12, 0.5)
# ๅฐฝ็ฎกไธๆฏๅ
้จ Keras ๆๅคฑๅฝๆฐ๏ผไฝๅฎ้
ๆๅคฑ่ฎก็ฎไปๅจๆญคๅคๅ็
def ctc_lambda_func(args):
y_pred, labels, input_length, label_length = args
# ่ฟ้็ 2 ๆฏ่ณๅ
ณ้่ฆ็๏ผๅ ไธบ RNN ็ๅๅ ไธช่พๅบๅพๅพๆฏๅๅพ๏ผ
y_pred = y_pred[:, 2:, :]
return K.ctc_batch_cost(labels, y_pred, input_length, label_length)
# ๅฏนไบ็ๆญฃ็ OCR ๅบ็จ็จๅบ๏ผ่ฟๅบ่ฏฅๆฏๅธฆๆๅญๅ
ธๅ่ฏญ่จๆจกๅ็ๆณขๆๆ็ดขใ
# ๅฏนไบๆญค็คบไพ๏ผๆไฝณ่ทฏๅพๅฐฑ่ถณๅคไบใ
def decode_batch(test_func, word_batch):
out = test_func([word_batch])[0]
ret = []
for j in range(out.shape[0]):
out_best = list(np.argmax(out[j, 2:], 1))
out_best = [k for k, g in itertools.groupby(out_best)]
outstr = labels_to_text(out_best)
ret.append(outstr)
return ret
class VizCallback(keras.callbacks.Callback):
def __init__(self, run_name, test_func, text_img_gen, num_display_words=6):
self.test_func = test_func
self.output_dir = os.path.join(
OUTPUT_DIR, run_name)
self.text_img_gen = text_img_gen
self.num_display_words = num_display_words
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
def show_edit_distance(self, num):
num_left = num
mean_norm_ed = 0.0
mean_ed = 0.0
while num_left > 0:
word_batch = next(self.text_img_gen)[0]
num_proc = min(word_batch['the_input'].shape[0], num_left)
decoded_res = decode_batch(self.test_func,
word_batch['the_input'][0:num_proc])
for j in range(num_proc):
edit_dist = editdistance.eval(decoded_res[j],
word_batch['source_str'][j])
mean_ed += float(edit_dist)
mean_norm_ed += float(edit_dist) / len(word_batch['source_str'][j])
num_left -= num_proc
mean_norm_ed = mean_norm_ed / num
mean_ed = mean_ed / num
print('\nOut of %d samples: Mean edit distance:'
'%.3f Mean normalized edit distance: %0.3f'
% (num, mean_ed, mean_norm_ed))
def on_epoch_end(self, epoch, logs={}):
self.model.save_weights(
os.path.join(self.output_dir, 'weights%02d.h5' % (epoch)))
self.show_edit_distance(256)
word_batch = next(self.text_img_gen)[0]
res = decode_batch(self.test_func,
word_batch['the_input'][0:self.num_display_words])
if word_batch['the_input'][0].shape[0] < 256:
cols = 2
else:
cols = 1
for i in range(self.num_display_words):
pylab.subplot(self.num_display_words // cols, cols, i + 1)
if K.image_data_format() == 'channels_first':
the_input = word_batch['the_input'][i, 0, :, :]
else:
the_input = word_batch['the_input'][i, :, :, 0]
pylab.imshow(the_input.T, cmap='Greys_r')
pylab.xlabel(
'Truth = \'%s\'\nDecoded = \'%s\'' %
(word_batch['source_str'][i], res[i]))
fig = pylab.gcf()
fig.set_size_inches(10, 13)
pylab.savefig(os.path.join(self.output_dir, 'e%02d.png' % (epoch)))
pylab.close()
def train(run_name, start_epoch, stop_epoch, img_w):
# ่พๅ
ฅๅๆฐ
img_h = 64
words_per_epoch = 16000
val_split = 0.2
val_words = int(words_per_epoch * (val_split))
# ็ฝ็ปๅๆฐ
conv_filters = 16
kernel_size = (3, 3)
pool_size = 2
time_dense_size = 32
rnn_size = 512
minibatch_size = 32
if K.image_data_format() == 'channels_first':
input_shape = (1, img_w, img_h)
else:
input_shape = (img_w, img_h, 1)
fdir = os.path.dirname(
get_file('wordlists.tgz',
origin='http://www.mythic-ai.com/datasets/wordlists.tgz',
untar=True))
img_gen = TextImageGenerator(
monogram_file=os.path.join(fdir, 'wordlist_mono_clean.txt'),
bigram_file=os.path.join(fdir, 'wordlist_bi_clean.txt'),
minibatch_size=minibatch_size,
img_w=img_w,
img_h=img_h,
downsample_factor=(pool_size ** 2),
val_split=words_per_epoch - val_words)
act = 'relu'
input_data = Input(name='the_input', shape=input_shape, dtype='float32')
inner = Conv2D(conv_filters, kernel_size, padding='same',
activation=act, kernel_initializer='he_normal',
name='conv1')(input_data)
inner = MaxPooling2D(pool_size=(pool_size, pool_size), name='max1')(inner)
inner = Conv2D(conv_filters, kernel_size, padding='same',
activation=act, kernel_initializer='he_normal',
name='conv2')(inner)
inner = MaxPooling2D(pool_size=(pool_size, pool_size), name='max2')(inner)
conv_to_rnn_dims = (img_w // (pool_size ** 2),
(img_h // (pool_size ** 2)) * conv_filters)
inner = Reshape(target_shape=conv_to_rnn_dims, name='reshape')(inner)
# ๅๅฐ่ฟๅ
ฅ RNN ็่พๅ
ฅๅคงๅฐ๏ผ
inner = Dense(time_dense_size, activation=act, name='dense1')(inner)
# ไธคๅฑๅๅGRU
# ๅๅฑ GRU ไผผไนไนๅฏไปฅ๏ผๅฆๆไธๆฏ LSTM ๅผบ๏ผ
gru_1 = GRU(rnn_size, return_sequences=True,
kernel_initializer='he_normal', name='gru1')(inner)
gru_1b = GRU(rnn_size, return_sequences=True,
go_backwards=True, kernel_initializer='he_normal',
name='gru1_b')(inner)
gru1_merged = add([gru_1, gru_1b])
gru_2 = GRU(rnn_size, return_sequences=True,
kernel_initializer='he_normal', name='gru2')(gru1_merged)
gru_2b = GRU(rnn_size, return_sequences=True, go_backwards=True,
kernel_initializer='he_normal', name='gru2_b')(gru1_merged)
# ๅฐ RNN ่พๅบ่ฝฌๆขไธบๅญ็ฌฆๆฟๆดป๏ผ
inner = Dense(img_gen.get_output_size(), kernel_initializer='he_normal',
name='dense2')(concatenate([gru_2, gru_2b]))
y_pred = Activation('softmax', name='softmax')(inner)
Model(inputs=input_data, outputs=y_pred).summary()
labels = Input(name='the_labels',
shape=[img_gen.absolute_max_string_len], dtype='float32')
input_length = Input(name='input_length', shape=[1], dtype='int64')
label_length = Input(name='label_length', shape=[1], dtype='int64')
# Keras ๅฝๅไธๆฏๆๅธฆๆ้ขๅคๅๆฐ็ๆๅคฑๅฝๆฐ๏ผๅ ๆญค CTC ๆๅคฑๅจ Lambda ๅฑไธญๅฎ็ฐ
loss_out = Lambda(
ctc_lambda_func, output_shape=(1,),
name='ctc')([y_pred, labels, input_length, label_length])
# clipnorm ไผผไนๅ ๅฟซไบๆถๆ้ๅบฆ
sgd = SGD(learning_rate=0.02,
decay=1e-6,
momentum=0.9,
nesterov=True)
model = Model(inputs=[input_data, labels, input_length, label_length],
outputs=loss_out)
# ๆๅคฑ่ฎก็ฎๅ็ๅจๅ
ถไปๅฐๆน๏ผๅ ๆญค่ฏทไฝฟ็จ่ๆ lambda ๅฝๆฐ่กฅๅฟๆๅคฑ
model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=sgd)
if start_epoch > 0:
weight_file = os.path.join(
OUTPUT_DIR,
os.path.join(run_name, 'weights%02d.h5' % (start_epoch - 1)))
model.load_weights(weight_file)
# ๆ่ท softmax ็่พๅบ๏ผไปฅไพฟๆไปฌๅฏไปฅๅจๅฏ่งๅ่ฟ็จไธญ่งฃ็ ่พๅบ
test_func = K.function([input_data], [y_pred])
viz_cb = VizCallback(run_name, test_func, img_gen.next_val())
model.fit_generator(
generator=img_gen.next_train(),
steps_per_epoch=(words_per_epoch - val_words) // minibatch_size,
epochs=stop_epoch,
validation_data=img_gen.next_val(),
validation_steps=val_words // minibatch_size,
callbacks=[viz_cb, img_gen],
initial_epoch=start_epoch)
if __name__ == '__main__':
run_name = datetime.datetime.now().strftime('%Y:%m:%d:%H:%M:%S')
train(run_name, 0, 20, 128)
# ๅขๅ ๅฐๆดๅฎฝ็ๅพๅๅนถไป็ฌฌ 20 ไธช่ฝฎๆฌกๅผๅงใ
# ๅญฆๅฐ็้้ไผ้ๆฐๅ ่ฝฝ
train(run_name, 20, 25, 512)
``` | keras-docs-zh/sources/examples/image_ocr.md/0 | {
"file_path": "keras-docs-zh/sources/examples/image_ocr.md",
"repo_id": "keras-docs-zh",
"token_count": 11542
} | 82 |
่ฟๆฏ็ฑ Tianqi Chen, Ian Goodfellow, and Jonathon Shlens ๅจ "Net2Net: Accelerating Learning via Knowledge Transfer" ไธญ็จ MNIST ่ฟ่ก็ Net2Net ๅฎ้ช็ๅฎ็ฐใ
arXiv:1511.05641v4 [cs.LG] 23 Apr 2016
http://arxiv.org/abs/1511.05641
# ๆณจๆ
- ไปไน:
+ Net2Net ๆฏๅฐ็ฅ่ฏไปๆๅธ็ฅ็ป็ฝ็ป่ฝฌ็งปๅฐๅญฆ็็ฝ็ป็ไธ็ปๆนๆณ๏ผๅ ๆญค๏ผไธไปๅคดๅผๅง็ธๆฏ๏ผๅฏไปฅๆดๅฟซๅฐ่ฎญ็ปๅญฆ็็ฝ็ปใ
+ ๆฌๆ่ฎจ่ฎบไบ Net2Net ็ไธค็ง็นๅฎๆนๆณ๏ผๅณ Net2WiderNet ๅ Net2DeeperNetใ
+ Net2WiderNet ๅฐๆจกๅๆฟๆขไธบ็ญๆ็ๆดๅฎฝๆจกๅ๏ผ่ฏฅๆจกๅๅจๆฏไธช้่ๅฑไธญๅ
ทๆๆดๅคๅไฝใ
+ Net2DeeperNet ๅฐๆจกๅๆฟๆขไธบ็ญๆ็ๆดๆทฑๆจกๅใ
+ ไธค่
้ฝๅบไบโ็ฅ็ป็ฝ็ป็ๅ่ฝไฟ็ๅๆขโ็ๆๆณใ
- ไธบไปไน:
+ ้่ฟๅๅปบไธ็ณปๅๅ
ทๆๅฏ่ฝฌ็งป็ฅ่ฏ็ๆดๅฎฝๅๆดๆทฑๅ
ฅ็ๆจกๅ๏ผๅจๅฎ้ชๅ่ฎพ่ฎก่ฟ็จไธญๅฟซ้ๆข็ดขๅคไธช็ฅ็ป็ฝ็ปใ
+ ้่ฟ้ๆญฅ่ฐๆดๆจกๅ็ๅคๆๆงไปฅ้ๅบๆฐๆฎๅฏ็จๆงๅนถ้็จๅฏ่ฝฌ่ฎฉ็็ฅ่ฏ๏ผไป่ๅฏ็จโ็ป่บซๅญฆไน ็ณป็ปโใ
# ๅฎ้ช
- ๆๅธๆจกๅ๏ผๅจ MNIST ไธ่ฎญ็ป็ 3 ไธชๅบๆฌ CNN ๆจกๅใ
- Net2WiderNet ๅฎ้ช๏ผ
+ ๅญฆ็ๆจกๅๅ
ทๆๆดๅฎฝ็ Conv2D ๅฑๅๆดๅฎฝ็ FC ๅฑใ
+ ๆฏ่พ 'random-padding' ๅ 'net2wider' ๆ้ๅๅงๅใ
+ ไฝฟ็จ่ฟไธค็งๆนๆณ๏ผๅจ 1 ไธช่ฝฎๆฌกไนๅ๏ผๅญฆ็ๆจกๅ็่กจ็ฐๅบไธๆๅธๆจกๅ็ธๅ๏ผไฝ 'net2wider' ่ฆๅฅฝไธไบใ
- Net2DeeperNet ๅฎ้ช๏ผ
+ ๅญฆ็ๆจกๅๅ
ทๆ้ขๅค็ Conv2D ๅฑๅ้ขๅค็ FC ๅฑใ
+ ๆฏ่พ 'random-init' ๅ 'net2deeper' ๆ้ๅๅงๅใ
+ 1 ไธช่ฝฎๆฌกๅ๏ผ'net2deeper' ็ๆง่ฝไผไบ 'random-init'ใ
- ่ถ
ๅๆฐ:
+ momentum=0.9 ็ SGD ็จไบ่ฎญ็ปๆๅธๅๅญฆ็ๆจกๅใ
+ ๅญฆไน ็่ฐๆด๏ผๅปบ่ฎฎๅฐๅญฆ็ๆจกๅ็ๅญฆไน ็้ไฝๅฐ 1/10ใ
+ ๅจ 'net2wider' ไธญๆทปๅ ๅชๅฃฐ็จไบๆ็ ดๆ้ๅฏน็งฐๆง๏ผ
ไป่ๅฎ็ฐๅญฆ็ๆจกๅ็ๅ
จ้จๅฎน้ใไฝฟ็จ Dropout ๅฑๆถ๏ผๅฎๆฏๅฏ้็ใ
# ็ปๆ
- ็ป่ฟ TF ๅ็ซฏๅ 'channels_last' ็ image_data_format ๆต่ฏใ
- ๅจ GPU GeForce GTX Titan X Maxwell ไธ่ฟ่ก
- ๆง่ฝๆฏ่พ-ๅ 3 ไธช่ฝฎๆฌก็้ช่ฏๆๅคฑๅผ๏ผ
ๆๅธๆจกๅ ...
(0) teacher_model: 0.0537 0.0354 0.0356
Net2WiderNet ๅฎ้ช...
(1) wider_random_pad: 0.0320 0.0317 0.0289
(2) wider_net2wider: 0.0271 0.0274 0.0270
Net2DeeperNet ๅฎ้ช...
(3) deeper_random_init: 0.0682 0.0506 0.0468
(4) deeper_net2deeper: 0.0292 0.0294 0.0286
```python
from __future__ import print_function
import numpy as np
import keras
from keras import backend as K
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Dense, Flatten
from keras.optimizers import SGD
from keras.datasets import mnist
if K.image_data_format() == 'channels_first':
input_shape = (1, 28, 28) # ๅพๅๅฐบๅฏธ
else:
input_shape = (28, 28, 1) # ๅพๅๅฐบๅฏธ
num_classes = 10 # ็ฑปๅซๆฐ
epochs = 3
# ๅ ่ฝฝๅ้ขๅค็ๆฐๆฎ
def preprocess_input(x):
return x.astype('float32').reshape((-1,) + input_shape) / 255
def preprocess_output(y):
return keras.utils.to_categorical(y)
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = map(preprocess_input, [x_train, x_test])
y_train, y_test = map(preprocess_output, [y_train, y_test])
print('Loading MNIST data...')
print('x_train shape:', x_train.shape, 'y_train shape:', y_train.shape)
print('x_test shape:', x_test.shape, 'y_test shape', y_test.shape)
# ็ฅ่ฏ่ฝฌ็งป็ฎๆณ
def wider2net_conv2d(teacher_w1, teacher_b1, teacher_w2, new_width, init):
'''้่ฟ 'random-padding' ๆ 'net2wider'๏ผ่ทๅพๅ
ทๆ่พๅคง่ฟๆปคๅจ็ๆดๅฎฝ conv2d ๅฑ็ๅๅงๆ้ใ
# ๅๆฐ
teacher_w1: `weight`๏ผconv2d ๅฑ้่ฆๅ ๅฎฝ็ๆ้๏ผ
ๅฐบๅฏธไธบ (filters1, num_channel1, kh1, kw1)
teacher_b1: `bias`๏ผconv2d ๅฑ้่ฆๅ ๅฎฝ็ๅ็ฝฎ๏ผ
ๅฐบๅฏธไธบ (filters1, )
teacher_w2: `weight`๏ผไธไธไธช่ฟๆฅ็ conv2d ๅฑ็ๆ้๏ผ
ๅฐบๅฏธไธบ (filters2, num_channel2, kh2, kw2)
new_width: ๆฐ็ `filters`๏ผๅฏนไบๆดๅฎฝ็ conv2d ๅฑ
init: ๆฐๆ้็ๅๅงๅ็ฎๆณ๏ผ
'random-pad' ๆ 'net2wider' ไนไธ
'''
assert teacher_w1.shape[0] == teacher_w2.shape[1], (
'successive layers from teacher model should have compatible shapes')
assert teacher_w1.shape[3] == teacher_b1.shape[0], (
'weight and bias from same layer should have compatible shapes')
assert new_width > teacher_w1.shape[3], (
'new width (filters) should be bigger than the existing one')
n = new_width - teacher_w1.shape[3]
if init == 'random-pad':
new_w1 = np.random.normal(0, 0.1, size=teacher_w1.shape[:3] + (n,))
new_b1 = np.ones(n) * 0.1
new_w2 = np.random.normal(
0, 0.1,
size=teacher_w2.shape[:2] + (n, teacher_w2.shape[3]))
elif init == 'net2wider':
index = np.random.randint(teacher_w1.shape[3], size=n)
factors = np.bincount(index)[index] + 1.
new_w1 = teacher_w1[:, :, :, index]
new_b1 = teacher_b1[index]
new_w2 = teacher_w2[:, :, index, :] / factors.reshape((1, 1, -1, 1))
else:
raise ValueError('Unsupported weight initializer: %s' % init)
student_w1 = np.concatenate((teacher_w1, new_w1), axis=3)
if init == 'random-pad':
student_w2 = np.concatenate((teacher_w2, new_w2), axis=2)
elif init == 'net2wider':
# ๆทปๅ ่พๅฐ็ๅชๅฃฐไปฅ็ ดๅๅฏน็งฐๆง๏ผไปฅไพฟๅญฆ็ๆจกๅไปฅๅๅฏไปฅๅฎๅ
จไฝฟ็จ
noise = np.random.normal(0, 5e-2 * new_w2.std(), size=new_w2.shape)
student_w2 = np.concatenate((teacher_w2, new_w2 + noise), axis=2)
student_w2[:, :, index, :] = new_w2
student_b1 = np.concatenate((teacher_b1, new_b1), axis=0)
return student_w1, student_b1, student_w2
def wider2net_fc(teacher_w1, teacher_b1, teacher_w2, new_width, init):
'''้่ฟ 'random-padding' ๆ 'net2wider'๏ผ่ทๅพๅ
ทๆๆดๅคง่็น็ๆดๅฎฝ็ๅฎๅ
จ่ฟๆฅ๏ผๅฏ้๏ผๅฑ็ๅๅงๆ้ใ
# ๅๆฐ
teacher_w1: `weight`๏ผfc ๅฑ้่ฆๅ ๅฎฝ็ๆ้๏ผ
ๅฐบๅฏธไธบ (nin1, nout1)
teacher_b1: `bias`๏ผfc ๅฑ้่ฆๅ ๅฎฝ็ๅ็ฝฎ๏ผ
ๅฐบๅฏธไธบ (nout1, )
teacher_w2: `weight`๏ผไธไธไธช่ฟๆฅ็ fc ๅฑ็ๆ้,
ๅฐบๅฏธไธบ (nin2, nout2)
new_width: ๆดๅฎฝ็ fc ๅฑ็ๆฐ `nout`
init: ๆฐๆ้็ๅๅงๅ็ฎๆณ๏ผ
'random-pad' ๆ 'net2wider' ไนไธ
'''
assert teacher_w1.shape[1] == teacher_w2.shape[0], (
'successive layers from teacher model should have compatible shapes')
assert teacher_w1.shape[1] == teacher_b1.shape[0], (
'weight and bias from same layer should have compatible shapes')
assert new_width > teacher_w1.shape[1], (
'new width (nout) should be bigger than the existing one')
n = new_width - teacher_w1.shape[1]
if init == 'random-pad':
new_w1 = np.random.normal(0, 0.1, size=(teacher_w1.shape[0], n))
new_b1 = np.ones(n) * 0.1
new_w2 = np.random.normal(0, 0.1, size=(n, teacher_w2.shape[1]))
elif init == 'net2wider':
index = np.random.randint(teacher_w1.shape[1], size=n)
factors = np.bincount(index)[index] + 1.
new_w1 = teacher_w1[:, index]
new_b1 = teacher_b1[index]
new_w2 = teacher_w2[index, :] / factors[:, np.newaxis]
else:
raise ValueError('Unsupported weight initializer: %s' % init)
student_w1 = np.concatenate((teacher_w1, new_w1), axis=1)
if init == 'random-pad':
student_w2 = np.concatenate((teacher_w2, new_w2), axis=0)
elif init == 'net2wider':
# ๆทปๅ ่พๅฐ็ๅชๅฃฐไปฅ็ ดๅๅฏน็งฐๆง๏ผไปฅไพฟๅญฆ็ๆจกๅไปฅๅๅฏไปฅๅฎๅ
จไฝฟ็จ
noise = np.random.normal(0, 5e-2 * new_w2.std(), size=new_w2.shape)
student_w2 = np.concatenate((teacher_w2, new_w2 + noise), axis=0)
student_w2[index, :] = new_w2
student_b1 = np.concatenate((teacher_b1, new_b1), axis=0)
return student_w1, student_b1, student_w2
def deeper2net_conv2d(teacher_w):
'''้่ฟ "net2deeper' ่ทๅพๆดๆทฑๅฑ conv2d ๅฑ็ๅๅงๆ้ใ
# ๅๆฐ
teacher_w: `weight`๏ผๅไธไธช conv2d ๅฑ็ๆ้๏ผ
ๅฐบๅฏธไธบ (kh, kw, num_channel, filters)
'''
kh, kw, num_channel, filters = teacher_w.shape
student_w = np.zeros_like(teacher_w)
for i in range(filters):
student_w[(kh - 1) // 2, (kw - 1) // 2, i, i] = 1.
student_b = np.zeros(filters)
return student_w, student_b
def copy_weights(teacher_model, student_model, layer_names):
'''ๅฐๅ็งฐไป layer_names ไธญๅๅบ็ๅพๅฑ็ๆ้ไป teacher_model ๅคๅถๅฐ student_model
'''
for name in layer_names:
weights = teacher_model.get_layer(name=name).get_weights()
student_model.get_layer(name=name).set_weights(weights)
# ๆ้ teacher_model ๅ student_model ็ๆนๆณ
def make_teacher_model(x_train, y_train,
x_test, y_test,
epochs):
'''็ฎๅ CNN ็่ฎญ็ปๅๅบๅๆง่ฝใ
(0) Teacher model
'''
model = Sequential()
model.add(Conv2D(64, 3, input_shape=input_shape,
padding='same', name='conv1'))
model.add(MaxPooling2D(2, name='pool1'))
model.add(Conv2D(64, 3, padding='same', name='conv2'))
model.add(MaxPooling2D(2, name='pool2'))
model.add(Flatten(name='flatten'))
model.add(Dense(64, activation='relu', name='fc1'))
model.add(Dense(num_classes, activation='softmax', name='fc2'))
model.compile(loss='categorical_crossentropy',
optimizer=SGD(learning_rate=0.01, momentum=0.9),
metrics=['accuracy'])
model.fit(x_train, y_train,
epochs=epochs,
validation_data=(x_test, y_test))
return model
def make_wider_student_model(teacher_model,
x_train, y_train,
x_test, y_test,
init, epochs):
'''ไฝฟ็จ 'random-pad'๏ผๅบ็บฟ๏ผๆ 'net2wider'๏ผๅบไบ teacher_model ่ฎญ็ปๆดๅนฟๆณ็ๅญฆ็ๆจกๅ
'''
new_conv1_width = 128
new_fc1_width = 128
model = Sequential()
# ไธไธชๆฏ teacher_model ๆดๅฎฝ็ conv1
model.add(Conv2D(new_conv1_width, 3, input_shape=input_shape,
padding='same', name='conv1'))
model.add(MaxPooling2D(2, name='pool1'))
model.add(Conv2D(64, 3, padding='same', name='conv2'))
model.add(MaxPooling2D(2, name='pool2'))
model.add(Flatten(name='flatten'))
# ไธไธชๆฏ teacher_model ๆดๅฎฝ็ fc1
model.add(Dense(new_fc1_width, activation='relu', name='fc1'))
model.add(Dense(num_classes, activation='softmax', name='fc2'))
# ้คไบๅ ๅฎฝ็ๅพๅฑๅๅ
ถ็ดๆฅไธๆธธไนๅค๏ผๅ
ถไปๅพๅฑ็ๆ้้่ฆไปๆๅธๆจกๅๅคๅถๅฐๅญฆ็ๆจกๅ๏ผ่ฟๅฐๅๅซ่ฟ่กๅๅงๅใ
# ๅฏนไบๆญค็คบไพ๏ผไธ้่ฆๅคๅถๅ
ถไปไปปไฝๅฑใ
w_conv1, b_conv1 = teacher_model.get_layer('conv1').get_weights()
w_conv2, b_conv2 = teacher_model.get_layer('conv2').get_weights()
new_w_conv1, new_b_conv1, new_w_conv2 = wider2net_conv2d(
w_conv1, b_conv1, w_conv2, new_conv1_width, init)
model.get_layer('conv1').set_weights([new_w_conv1, new_b_conv1])
model.get_layer('conv2').set_weights([new_w_conv2, b_conv2])
w_fc1, b_fc1 = teacher_model.get_layer('fc1').get_weights()
w_fc2, b_fc2 = teacher_model.get_layer('fc2').get_weights()
new_w_fc1, new_b_fc1, new_w_fc2 = wider2net_fc(
w_fc1, b_fc1, w_fc2, new_fc1_width, init)
model.get_layer('fc1').set_weights([new_w_fc1, new_b_fc1])
model.get_layer('fc2').set_weights([new_w_fc2, b_fc2])
model.compile(loss='categorical_crossentropy',
optimizer=SGD(learning_rate=0.001, momentum=0.9),
metrics=['accuracy'])
model.fit(x_train, y_train,
epochs=epochs,
validation_data=(x_test, y_test))
def make_deeper_student_model(teacher_model,
x_train, y_train,
x_test, y_test,
init, epochs):
'''ไฝฟ็จ 'random-pad'๏ผๅบ็บฟ๏ผๆ 'net2wider'๏ผๅบไบ teacher_model ่ฎญ็ปๆดๅนฟๆณ็ๅญฆ็ๆจกๅ
'''
model = Sequential()
model.add(Conv2D(64, 3, input_shape=input_shape,
padding='same', name='conv1'))
model.add(MaxPooling2D(2, name='pool1'))
model.add(Conv2D(64, 3, padding='same', name='conv2'))
# ๆทปๅ ๅฆไธไธช conv2d ๅฑไปฅไฝฟๅๅง conv2 ๆดๆทฑ
if init == 'net2deeper':
prev_w, _ = model.get_layer('conv2').get_weights()
new_weights = deeper2net_conv2d(prev_w)
model.add(Conv2D(64, 3, padding='same',
name='conv2-deeper', weights=new_weights))
elif init == 'random-init':
model.add(Conv2D(64, 3, padding='same', name='conv2-deeper'))
else:
raise ValueError('Unsupported weight initializer: %s' % init)
model.add(MaxPooling2D(2, name='pool2'))
model.add(Flatten(name='flatten'))
model.add(Dense(64, activation='relu', name='fc1'))
# ๆทปๅ ๅฆไธไธช fc ๅฑไปฅไฝฟๅๅง fc1 ๆดๆทฑ
if init == 'net2deeper':
# ๅธฆๆ relu ็ fc ๅฑ็ net2deeper ๅชๆฏไธไธช่บซไปฝๅๅงๅๅจ
model.add(Dense(64, kernel_initializer='identity',
activation='relu', name='fc1-deeper'))
elif init == 'random-init':
model.add(Dense(64, activation='relu', name='fc1-deeper'))
else:
raise ValueError('Unsupported weight initializer: %s' % init)
model.add(Dense(num_classes, activation='softmax', name='fc2'))
# ๅคๅถๅ
ถไปๅพๅฑ็ๆ้
copy_weights(teacher_model, model, layer_names=[
'conv1', 'conv2', 'fc1', 'fc2'])
model.compile(loss='categorical_crossentropy',
optimizer=SGD(learning_rate=0.001, momentum=0.9),
metrics=['accuracy'])
model.fit(x_train, y_train,
epochs=epochs,
validation_data=(x_test, y_test))
# ๅฎ้ช่ฎพ็ฝฎ
def net2wider_experiment():
'''ๅบๅ่กจ็ฐ
(1) ๅธฆๆ `random_pad` ๅๅงๅผ่ฎพๅฎ้กน็ๆดๅฎฝ็ๅญฆ็ๆจกๅ
(2)ๅธฆๆ `Net2WiderNet` ๅๅงๅ็จๅบ็ๆดๅฎฝ็ๅญฆ็ๆจกๅ
'''
print('\nExperiment of Net2WiderNet ...')
print('\n(1) building wider student model by random padding ...')
make_wider_student_model(teacher_model,
x_train, y_train,
x_test, y_test,
init='random-pad',
epochs=epochs)
print('\n(2) building wider student model by net2wider ...')
make_wider_student_model(teacher_model,
x_train, y_train,
x_test, y_test,
init='net2wider',
epochs=epochs)
def net2deeper_experiment():
'''ๅบๅ่กจ็ฐ
(3) ๅธฆๆ `random_init` ๅๅงๅผ่ฎพๅฎ้กน็ๆดๅฎฝ็ๅญฆ็ๆจกๅ
(4) ๅธฆๆ `Net2DeeperNet` ๅๅงๅผ่ฎพๅฎ้กน็ๆดๅฎฝ็ๅญฆ็ๆจกๅ
'''
print('\nExperiment of Net2DeeperNet ...')
print('\n(3) building deeper student model by random init ...')
make_deeper_student_model(teacher_model,
x_train, y_train,
x_test, y_test,
init='random-init',
epochs=epochs)
print('\n(4) building deeper student model by net2deeper ...')
make_deeper_student_model(teacher_model,
x_train, y_train,
x_test, y_test,
init='net2deeper',
epochs=epochs)
print('\n(0) building teacher model ...')
teacher_model = make_teacher_model(x_train, y_train,
x_test, y_test,
epochs=epochs)
# ่ฟ่กๅฎ้ช
net2wider_experiment()
net2deeper_experiment()
``` | keras-docs-zh/sources/examples/mnist_net2net.md/0 | {
"file_path": "keras-docs-zh/sources/examples/mnist_net2net.md",
"repo_id": "keras-docs-zh",
"token_count": 8765
} | 83 |
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/noise.py#L14)</span>
### GaussianNoise
```python
keras.layers.GaussianNoise(stddev)
```
ๅบ็จไปฅ 0 ไธบไธญๅฟ็ๅ ๆง้ซๆฏๅชๅฃฐใ
่ฟๅฏน็ผ่งฃ่ฟๆๅๅพๆ็จ
๏ผไฝ ๅฏไปฅๅฐๅ
ถ่งไธบ้ๆบๆฐๆฎๅขๅผบ็ไธ็งๅฝขๅผ๏ผใ
้ซๆฏๅชๅฃฐ๏ผGS๏ผๆฏๅฏน็ๅฎ่พๅ
ฅ็่
่่ฟ็จ็่ช็ถ้ๆฉใ
็ฑไบๅฎๆฏไธไธชๆญฃๅๅๅฑ๏ผๅ ๆญคๅฎๅชๅจ่ฎญ็ปๆถๆ่ขซๆฟๆดปใ
__ๅๆฐ__
- __stddev__: float๏ผๅชๅฃฐๅๅธ็ๆ ๅๅทฎใ
__่พๅ
ฅๅฐบๅฏธ__
ๅฏไปฅๆฏไปปๆ็ใ
ๅฆๆๅฐ่ฏฅๅฑไฝไธบๆจกๅ็็ฌฌไธๅฑ๏ผๅ้่ฆๆๅฎ `input_shape` ๅๆฐ
๏ผๆดๆฐๅ
็ป๏ผไธๅ
ๅซๆ ทๆฌๆฐ้็็ปดๅบฆ๏ผใ
__่พๅบๅฐบๅฏธ__
ไธ่พๅ
ฅ็ธๅใ
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/noise.py#L58)</span>
### GaussianDropout
```python
keras.layers.GaussianDropout(rate)
```
ๅบ็จไปฅ 1 ไธบไธญๅฟ็ ไนๆง้ซๆฏๅชๅฃฐใ
็ฑไบๅฎๆฏไธไธชๆญฃๅๅๅฑ๏ผๅ ๆญคๅฎๅชๅจ่ฎญ็ปๆถๆ่ขซๆฟๆดปใ
__ๅๆฐ__
- __rate__: float๏ผไธขๅผๆฆ็๏ผไธ `Dropout` ็ธๅ๏ผใ
่ฟไธชไนๆงๅชๅฃฐ็ๆ ๅๅทฎไธบ `sqrt(rate / (1 - rate))`ใ
__่พๅ
ฅๅฐบๅฏธ__
ๅฏไปฅๆฏไปปๆ็ใ
ๅฆๆๅฐ่ฏฅๅฑไฝไธบๆจกๅ็็ฌฌไธๅฑ๏ผๅ้่ฆๆๅฎ `input_shape` ๅๆฐ
๏ผๆดๆฐๅ
็ป๏ผไธๅ
ๅซๆ ทๆฌๆฐ้็็ปดๅบฆ๏ผใ
__่พๅบๅฐบๅฏธ__
ไธ่พๅ
ฅ็ธๅใ
__ๅ่ๆ็ฎ__
- [Dropout: A Simple Way to Prevent Neural Networks from Overfitting Srivastava, Hinton, et al. 2014](http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf)
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/noise.py#L106)</span>
### AlphaDropout
```python
keras.layers.AlphaDropout(rate, noise_shape=None, seed=None)
```
ๅฐ Alpha Dropout ๅบ็จๅฐ่พๅ
ฅใ
Alpha Dropout ๆฏไธ็ง `Dropout`๏ผ
ๅฎไฟๆ่พๅ
ฅ็ๅนณๅๅผๅๆนๅทฎไธๅๆฅ็ๅผไธๅ๏ผ
ไปฅ็กฎไฟๅณไฝฟๅจ dropout ๅไน่ฝๅฎ็ฐ่ชๆๅฝไธๅใ
้่ฟ้ๆบๅฐๆฟๆดป่ฎพ็ฝฎไธบ่ด้ฅฑๅๅผ๏ผ
Alpha Dropout ้ๅธธ้ๅๆๆฏไพ็ผฉๆพ็ๆๆฐ็บฟๆงๅๅ
๏ผSELU๏ผใ
__ๅๆฐ__
- __rate__: float๏ผไธขๅผๆฆ็๏ผไธ `Dropout` ็ธๅ๏ผใ
่ฟไธชไนๆงๅชๅฃฐ็ๆ ๅๅทฎไธบ `sqrt(rate / (1 - rate))`ใ
- __noise_shape__: ไธไธช็ฑปๅไธบ `int32` ็ 1D `Tensor`๏ผ่กจ็คบ้ๆบ็ๆ keep/drop ๆ ่ฏ็ๅฐบๅฏธใ
- __seed__: ็จไฝ้ๆบ็งๅญ็ Python ๆดๆฐใ
__่พๅ
ฅๅฐบๅฏธ__
ๅฏไปฅๆฏไปปๆ็ใ
ๅฆๆๅฐ่ฏฅๅฑไฝไธบๆจกๅ็็ฌฌไธๅฑ๏ผๅ้่ฆๆๅฎ `input_shape` ๅๆฐ
๏ผๆดๆฐๅ
็ป๏ผไธๅ
ๅซๆ ทๆฌๆฐ้็็ปดๅบฆ๏ผใ
__่พๅบๅฐบๅฏธ__
ไธ่พๅ
ฅ็ธๅใ
__ๅ่ๆ็ฎ__
- [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)
| keras-docs-zh/sources/layers/noise.md/0 | {
"file_path": "keras-docs-zh/sources/layers/noise.md",
"repo_id": "keras-docs-zh",
"token_count": 1595
} | 84 |
# Scikit-Learn API ็ๅฐ่ฃ
ๅจ
ไฝ ๅฏไปฅไฝฟ็จ Keras ็ `Sequential` ๆจกๅ๏ผไป
้ๅไธ่พๅ
ฅ๏ผไฝไธบ Scikit-Learn ๅทฅไฝๆต็จ็ไธ้จๅ๏ผ้่ฟๅจๆญคๆพๅฐ็ๅ
่ฃ
ๅจ: `keras.wrappers.scikit_learn.py`ใ
ๆไธคไธชๅฐ่ฃ
ๅจๅฏ็จ:
`keras.wrappers.scikit_learn.KerasClassifier(build_fn=None, **sk_params)`, ่ฟๅฎ็ฐไบScikit-Learn ๅ็ฑปๅจๆฅๅฃ,
`keras.wrappers.scikit_learn.KerasRegressor(build_fn=None, **sk_params)`, ่ฟๅฎ็ฐไบScikit-Learn ๅๅฝๆฅๅฃใ
### ๅๆฐ
- __build_fn__: ๅฏ่ฐ็จๅฝๆฐๆ็ฑปๅฎไพ
- __sk_params__: ๆจกๅๅๆฐๅๆๅๅๆฐ
`build_fn` ๅบ่ฏฅๅปบ็ซ๏ผ็ผ่ฏ๏ผๅนถ่ฟๅไธไธช Keras ๆจกๅ๏ผ็ถๅ่ขซ็จๆฅ่ฎญ็ป/้ขๆตใไปฅไธไธไธชๅผไนไธๅฏไปฅไผ ้็ป`build_fn`
1. ไธไธชๅฝๆฐ๏ผ
2. ๅฎ็ฐ `__call__` ๆนๆณ็็ฑป็ๅฎไพ๏ผ
3. Noneใ่ฟๆๅณ็ไฝ ๅฎ็ฐไบไธไธช็ปงๆฟ่ช `KerasClassifier` ๆ `KerasRegressor` ็็ฑปใๅฝๅ็ฑป `__call__` ๆนๆณๅฐ่ขซ่งไธบ้ป่ฎค็ `build_fn`ใ
`sk_params` ๅๆถๅ
ๅซๆจกๅๅๆฐๅๆๅๅๆฐใๅๆณ็ๆจกๅๅๆฐๆฏ `build_fn` ็ๅๆฐใ่ฏทๆณจๆ๏ผไธ scikit-learn ไธญ็ๆๆๅ
ถไปไผฐ็ฎๅจไธๆ ท๏ผ`build_fn` ๅบไธบๅ
ถๅๆฐๆไพ้ป่ฎคๅผ๏ผ
ไปฅไพฟไฝ ๅฏไปฅๅๅปบไผฐ็ฎๅจ่ไธๅฐไปปไฝๅผไผ ้็ป `sk_params`ใ
`sk_params` ่ฟๅฏไปฅๆฅๅ็จไบ่ฐ็จ `fit`๏ผ`predict`๏ผ`predict_proba` ๅ `score` ๆนๆณ็ๅๆฐ๏ผไพๅฆ๏ผ`epochs`๏ผ`batch_size`๏ผใ่ฎญ็ป๏ผ้ขๆต๏ผๅๆฐๆไปฅไธ้กบๅบ้ๆฉ๏ผ
1. ไผ ้็ป `fit`๏ผ`predict`๏ผ`predict_proba` ๅ `score` ๅฝๆฐ็ๅญๅ
ธๅๆฐ็ๅผ๏ผ
2. ไผ ้็ป `sk_params` ็ๅผ๏ผ
3. `keras.models.Sequential` ็ `fit`๏ผ`predict`๏ผ`predict_proba` ๅ `score` ๆนๆณ็้ป่ฎคๅผใ
ๅฝไฝฟ็จ scikit-learn ็ `grid_search` API ๆถ๏ผๅๆณๅฏ่ฐๅๆฐๆฏไฝ ๅฏไปฅไผ ้็ป `sk_params` ็ๅๆฐ๏ผๅ
ๆฌ่ฎญ็ปๅๆฐใๆขๅฅ่ฏ่ฏด๏ผไฝ ๅฏไปฅไฝฟ็จ `grid_search` ๆฅๆ็ดขๆไฝณ็ `batch_size` ๆ `epoch` ไปฅๅๅ
ถไปๆจกๅๅๆฐใ
| keras-docs-zh/sources/scikit-learn-api.md/0 | {
"file_path": "keras-docs-zh/sources/scikit-learn-api.md",
"repo_id": "keras-docs-zh",
"token_count": 1196
} | 85 |
# Keras.io code examples contributor guide
This guide offers general tips to be followed when writing code examples for [keras.io](https://keras.io).
Make sure to read it before opening a PR.
## Code style
### Variable names
Make sure to use fully-spelled out variable names. Do not use single-letter variable names.
Do not use abbreviations unless they're completely obvious (e.g. `num_layers` is ok).
This is bad:
```python
m = get_model(u=32, d=0.5)
```
This is good:
```python
model = get_model(units=32, dropout_rate=0.5)
```
### Imports
Import modules, not individual objects. In particular, don't import individual layers. Typically
you should import the following:
```python
import tensorflow as tf
import keras
from keras import layers
```
Then access objects from these modules:
```python
tf.Variable(...)
tf.reshape(...)
keras.Input(...)
keras.Model(...)
keras.optimizers.Adam(...)
layers.Layer(...)
layers.Conv2D(...)
```
Note: As of `2.13` prefer `import keras` over `from tensorflow import keras`.
### Extra dependencies
If your example requires extra dependencies, don't include installation commands as part of the code of your example.
Instead, mention the dependencies in the text, alongside an example of the pip command to install them, e.g.
```md
This example requires XYZ. You can install it via the following command: `pip install XYZ`
```
---
## Model development best practices
### Model types
**Use Functional models wherever possible.**
Only use subclassing if your model cannot straightforwardly be implemented as a Functional model.
If writing a subclassed Model or Layer, do not instantiate any Layer as part of the `call()` method.
Any layer you use in `call()` should have been instantiated beforehand, either in `__init__()` or in `build()`.
This is bad:
```python
class MyLayer(layers.Layer):
def call(self, x):
...
x = layers.Add()([x, y])
...
```
This is good:
```python
class MyLayer(layers.Layer):
def call(self, inputs):
...
features += other_features
...
```
### Training loop types
**Use `model.fit()` whenever possible.** If you cannot use the built-in `fit()`
(e.g. in the case of a GAN, VAE, similarity model, etc.) then use
a custom `train_step()` method to customize `fit()` ([see guide](https://keras.io/guides/customizing_what_happens_in_fit/)).
If you need to customize how the model iterates on the data (e.g. in the case of a RL algorithm or a curriculum learning algorithm),
then write a training loop from scratch using `tf.GradientTape`.
### Demonstrate generalization power
Whenever you call `fit()` (or otherwise run a training loop), make sure to
use a validation dataset (`validation_data` argument) to monitor the model's performance
on data it has not seen during training. Likewise, when showing inference results,
use samples from a validation or test set, not training samples.
The only exception to this rule is in the case of generative models.
### Demonstrate the full power of your model, but keep the run time short
We need to keep the run time of the notebooks short (typically no more than 20 minutes on a V100 GPU).
However, many models need to be trained for much longer in order to achieve good results. In such
cases:
- Keep the run time short by limiting the number of epochs (e.g. train for a single epoch).
- Highlight the fact that the model should actually be trained for `N` epochs to achieve the expected results
(in a text paragraph and in code comments).
- Showcase the results of the full model trained for `N` epochs, by providing accuracy numbers and showing
inference results of the full model. You can simply insert images in the text paragraphs, hosted on
[imgur.com](imgur.com).
### Argument validation
In general, user-provided input argument validation is not required in custom classes / functions in a keras.io code example.
If you want to add input validation, do so with `ValueError`; do not use `assert` statements.
### Data input
Prefer using either NumPy arrays or a `tf.data.Dataset` for data input whenever possible.
If impossible, then use a `keras.utils.Sequence` subclass. Do not use regular Python generators.
When using `.map()` with a `tf.data.Dataset`, make sure to pass a value for `num_parallel_calls`.
Typically, you can set the value to be 4, 8, or `tf.data.AUTOTUNE`.
---
## Text style
### Length
Examples should be clear and detailed, but not overly verbose. You can add as much text content as you want, as
long as each additional sentence / paragraph provides useful information that helps with understanding the example.
Never use any "filler" content.
### Style
- Use present tense ("We present... we implement...")
- Always define abbreviations / acronyms the first time you use them ("We implement a Graph Attention Network (GAT)...")
- All and any sentence should convey a useful idea; avoid filler at all costs.
### Proofreading
Make sure to proofread your text paragraphs to avoid typos.
Every sentence should start with a capital letter and should end with a period. This applies to code comments as well.
### Introduction and conclusion
There should be an introduction that explains what the reader should expect to find in the example,
and why it is useful/interesting.
If the example presents a specific technique,
the introduction should also include an overview of the technique as well as links to external references.
There should be a conclusion section that recapitulates key takeaways from the example, and offers pointers to next steps.
### Code elements
All code keywords should be formatted with backticks, e.g. `like_this` (standard Markdown code formatting).
When referring to a function or method name, it should be followed with parens, like this: `my_function()` or `my_method()`.
### Mathematical notation
Do not use any LaTeX notation. Explain math operations with pseudocode.
If you really must have an equation, then embed it as an image.
### Line length
Keep text lines relatively short (about 80 characters), unless it's a link.
### Markdown links
Each markdown link should fit on a single line, unbroken, like this:
```md
Here's a link:
[This is the link text](https://github.com/keras-team/keras-io/blob/master/contributor_guide.md)
```
Do not break the link like this (or in any other way):
```md
[This is the link text](
https://github.com/keras-team/keras-io/blob/master/contributor_guide.md)
```
### Markdown lists
There should be a line break before the first item in any list, e.g.
This is good:
```md
Here's a list:
- First item
- Second item
```
This is bad:
```md
Here's a badly formatted list:
- First item
- Second item
```
| keras-io/contributor_guide.md/0 | {
"file_path": "keras-io/contributor_guide.md",
"repo_id": "keras-io",
"token_count": 1877
} | 86 |
<jupyter_start><jupyter_text>DreamBooth**Author:** [Sayak Paul](https://twitter.com/RisingSayak), [Chansung Park](https://twitter.com/algo_diver)**Date created:** 2023/02/01**Last modified:** 2023/02/05**Description:** Implementing DreamBooth. IntroductionIn this example, we implement DreamBooth, a fine-tuning technique to teach new visualconcepts to text-conditioned Diffusion models with just 3 - 5 images. DreamBooth wasproposed in[DreamBooth: Fine Tuning Text-to-Image Diffusion Models for Subject-Driven Generation](https://arxiv.org/abs/2208.12242)by Ruiz et al.DreamBooth, in a sense, is similar to the[traditional way of fine-tuning a text-conditioned Diffusion model except](https://keras.io/examples/generative/finetune_stable_diffusion/)for a few gotchas. This example assumes that you have basic familiarity withDiffusion models and how to fine-tune them. Here are some reference examples that mighthelp you to get familiarized quickly:* [High-performance image generation using Stable Diffusion in KerasCV](https://keras.io/guides/keras_cv/generate_images_with_stable_diffusion/)* [Teach StableDiffusion new concepts via Textual Inversion](https://keras.io/examples/generative/fine_tune_via_textual_inversion/)* [Fine-tuning Stable Diffusion](https://keras.io/examples/generative/finetune_stable_diffusion/)First, let's install the latest versions of KerasCV and TensorFlow.<jupyter_code>!pip install -q -U keras_cv==0.6.0
!pip install -q -U tensorflow<jupyter_output><empty_output><jupyter_text>If you're running the code, please ensure you're using a GPU with at least 24 GBs ofVRAM. Initial imports<jupyter_code>import math
import keras_cv
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from imutils import paths
from tensorflow import keras<jupyter_output><empty_output><jupyter_text>Usage of DreamBooth... is very versatile. By teaching Stable Diffusion about your favorite visualconcepts, you can* Recontextualize objects in interesting ways: * Generate artistic renderings of the underlying visual concept: And many other applications. We welcome you to check out the original[DreamBooth paper](https://arxiv.org/abs/2208.12242) in this regard. Download the instance and class imagesDreamBooth uses a technique called "prior preservation" to meaningfully guide thetraining procedure such that the fine-tuned models can still preserve some of the priorsemantics of the visual concept you're introducing. To know more about the idea of "priorpreservation" refer to [this document](https://dreambooth.github.io/).Here, we need to introduce a few key terms specific to DreamBooth:* **Unique class**: Examples include "dog", "person", etc. In this example, we use "dog".* **Unique identifier**: A unique identifier that is prepended to the unique class whileforming the "instance prompts". In this example, we use "sks" as this unique identifier.* **Instance prompt**: Denotes a prompt that best describes the "instance images". Anexample prompt could be - "f"a photo of {unique_id} {unique_class}". So, for our example,this becomes - "a photo of sks dog".* **Class prompt**: Denotes a prompt without the unique identifier. This prompt is usedfor generating "class images" for prior preservation. For our example, this prompt is -"a photo of dog".* **Instance images**: Denote the images that represent the visual concept you're tryingto teach aka the "instance prompt". This number is typically just 3 - 5. We typicallygather these images ourselves.* **Class images**: Denote the images generated using the "class prompt" for using priorpreservation in DreamBooth training. We leverage the pre-trained model before fine-tuningit to generate these class images. Typically, 200 - 300 class images are enough.In code, this generation process looks quite simply:```pyfrom tqdm import tqdmimport numpy as npimport hashlibimport keras_cvimport PILimport osclass_images_dir = "class-images"os.makedirs(class_images_dir, exist_ok=True)model = keras_cv.models.StableDiffusion(img_width=512, img_height=512, jit_compile=True)class_prompt = "a photo of dog"num_imgs_to_generate = 200for i in tqdm(range(num_imgs_to_generate)): images = model.text_to_image( class_prompt, batch_size=3, ) idx = np.random.choice(len(images)) selected_image = PIL.Image.fromarray(images[idx]) hash_image = hashlib.sha1(selected_image.tobytes()).hexdigest() image_filename = os.path.join(class_images_dir, f"{hash_image}.jpg") selected_image.save(image_filename)```To keep the runtime of this example short, the authors of this example have gone aheadand generated some class images using[this notebook](https://colab.research.google.com/gist/sayakpaul/6b5de345d29cf5860f84b6d04d958692/generate_class_priors.ipynb).**Note** that prior preservation is an optional technique used in DreamBooth, but italmost always helps in improving the quality of the generated images.<jupyter_code>instance_images_root = tf.keras.utils.get_file(
origin="https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/instance-images.tar.gz",
untar=True,
)
class_images_root = tf.keras.utils.get_file(
origin="https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/class-images.tar.gz",
untar=True,
)<jupyter_output><empty_output><jupyter_text>Visualize imagesFirst, let's load the image paths.<jupyter_code>instance_image_paths = list(paths.list_images(instance_images_root))
class_image_paths = list(paths.list_images(class_images_root))<jupyter_output><empty_output><jupyter_text>Then we load the images from the paths.<jupyter_code>def load_images(image_paths):
images = [np.array(keras.utils.load_img(path)) for path in image_paths]
return images<jupyter_output><empty_output><jupyter_text>And then we make use a utility function to plot the loaded images.<jupyter_code>def plot_images(images, title=None):
plt.figure(figsize=(20, 20))
for i in range(len(images)):
ax = plt.subplot(1, len(images), i + 1)
if title is not None:
plt.title(title)
plt.imshow(images[i])
plt.axis("off")<jupyter_output><empty_output><jupyter_text>**Instance images**:<jupyter_code>plot_images(load_images(instance_image_paths[:5]))<jupyter_output><empty_output><jupyter_text>**Class images**:<jupyter_code>plot_images(load_images(class_image_paths[:5]))<jupyter_output><empty_output><jupyter_text>Prepare datasetsDataset preparation includes two stages: (1): preparing the captions, (2) processing theimages. Prepare the captions<jupyter_code># Since we're using prior preservation, we need to match the number
# of instance images we're using. We just repeat the instance image paths
# to do so.
new_instance_image_paths = []
for index in range(len(class_image_paths)):
instance_image = instance_image_paths[index % len(instance_image_paths)]
new_instance_image_paths.append(instance_image)
# We just repeat the prompts / captions per images.
unique_id = "sks"
class_label = "dog"
instance_prompt = f"a photo of {unique_id} {class_label}"
instance_prompts = [instance_prompt] * len(new_instance_image_paths)
class_prompt = f"a photo of {class_label}"
class_prompts = [class_prompt] * len(class_image_paths)<jupyter_output><empty_output><jupyter_text>Next, we embed the prompts to save some compute.<jupyter_code>import itertools
# The padding token and maximum prompt length are specific to the text encoder.
# If you're using a different text encoder be sure to change them accordingly.
padding_token = 49407
max_prompt_length = 77
# Load the tokenizer.
tokenizer = keras_cv.models.stable_diffusion.SimpleTokenizer()
# Method to tokenize and pad the tokens.
def process_text(caption):
tokens = tokenizer.encode(caption)
tokens = tokens + [padding_token] * (max_prompt_length - len(tokens))
return np.array(tokens)
# Collate the tokenized captions into an array.
tokenized_texts = np.empty(
(len(instance_prompts) + len(class_prompts), max_prompt_length)
)
for i, caption in enumerate(itertools.chain(instance_prompts, class_prompts)):
tokenized_texts[i] = process_text(caption)
# We also pre-compute the text embeddings to save some memory during training.
POS_IDS = tf.convert_to_tensor([list(range(max_prompt_length))], dtype=tf.int32)
text_encoder = keras_cv.models.stable_diffusion.TextEncoder(max_prompt_length)
gpus = tf.config.list_logical_devices("GPU")
# Ensure the computation takes place on a GPU.
# Note that it's done automatically when there's a GPU present.
# This example just attempts at showing how you can do it
# more explicitly.
with tf.device(gpus[0].name):
embedded_text = text_encoder(
[tf.convert_to_tensor(tokenized_texts), POS_IDS], training=False
).numpy()
# To ensure text_encoder doesn't occupy any GPU space.
del text_encoder<jupyter_output><empty_output><jupyter_text>Prepare the images<jupyter_code>resolution = 512
auto = tf.data.AUTOTUNE
augmenter = keras.Sequential(
layers=[
keras_cv.layers.CenterCrop(resolution, resolution),
keras_cv.layers.RandomFlip(),
keras.layers.Rescaling(scale=1.0 / 127.5, offset=-1),
]
)
def process_image(image_path, tokenized_text):
image = tf.io.read_file(image_path)
image = tf.io.decode_png(image, 3)
image = tf.image.resize(image, (resolution, resolution))
return image, tokenized_text
def apply_augmentation(image_batch, embedded_tokens):
return augmenter(image_batch), embedded_tokens
def prepare_dict(instance_only=True):
def fn(image_batch, embedded_tokens):
if instance_only:
batch_dict = {
"instance_images": image_batch,
"instance_embedded_texts": embedded_tokens,
}
return batch_dict
else:
batch_dict = {
"class_images": image_batch,
"class_embedded_texts": embedded_tokens,
}
return batch_dict
return fn
def assemble_dataset(image_paths, embedded_texts, instance_only=True, batch_size=1):
dataset = tf.data.Dataset.from_tensor_slices((image_paths, embedded_texts))
dataset = dataset.map(process_image, num_parallel_calls=auto)
dataset = dataset.shuffle(5, reshuffle_each_iteration=True)
dataset = dataset.batch(batch_size)
dataset = dataset.map(apply_augmentation, num_parallel_calls=auto)
prepare_dict_fn = prepare_dict(instance_only=instance_only)
dataset = dataset.map(prepare_dict_fn, num_parallel_calls=auto)
return dataset<jupyter_output><empty_output><jupyter_text>Assemble dataset<jupyter_code>instance_dataset = assemble_dataset(
new_instance_image_paths,
embedded_text[: len(new_instance_image_paths)],
)
class_dataset = assemble_dataset(
class_image_paths,
embedded_text[len(new_instance_image_paths) :],
instance_only=False,
)
train_dataset = tf.data.Dataset.zip((instance_dataset, class_dataset))<jupyter_output><empty_output><jupyter_text>Check shapesNow that the dataset has been prepared, let's quickly check what's inside it.<jupyter_code>sample_batch = next(iter(train_dataset))
print(sample_batch[0].keys(), sample_batch[1].keys())
for k in sample_batch[0]:
print(k, sample_batch[0][k].shape)
for k in sample_batch[1]:
print(k, sample_batch[1][k].shape)<jupyter_output><empty_output><jupyter_text>During training, we make use of these keys to gather the images and text embeddings andconcat them accordingly. DreamBooth training loopOur DreamBooth training loop is very much inspired by[this script](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth.py)provided by the Diffusers team at Hugging Face. However, there is an importantdifference to note. We only fine-tune the UNet (the model responsible for predictingnoise) and don't fine-tune the text encoder in this example. If you're looking for animplementation that also performs the additional fine-tuning of the text encoder, referto [this repository](https://github.com/sayakpaul/dreambooth-keras/).<jupyter_code>import tensorflow.experimental.numpy as tnp
class DreamBoothTrainer(tf.keras.Model):
# Reference:
# https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth.py
def __init__(
self,
diffusion_model,
vae,
noise_scheduler,
use_mixed_precision=False,
prior_loss_weight=1.0,
max_grad_norm=1.0,
**kwargs,
):
super().__init__(**kwargs)
self.diffusion_model = diffusion_model
self.vae = vae
self.noise_scheduler = noise_scheduler
self.prior_loss_weight = prior_loss_weight
self.max_grad_norm = max_grad_norm
self.use_mixed_precision = use_mixed_precision
self.vae.trainable = False
def train_step(self, inputs):
instance_batch = inputs[0]
class_batch = inputs[1]
instance_images = instance_batch["instance_images"]
instance_embedded_text = instance_batch["instance_embedded_texts"]
class_images = class_batch["class_images"]
class_embedded_text = class_batch["class_embedded_texts"]
images = tf.concat([instance_images, class_images], 0)
embedded_texts = tf.concat([instance_embedded_text, class_embedded_text], 0)
batch_size = tf.shape(images)[0]
with tf.GradientTape() as tape:
# Project image into the latent space and sample from it.
latents = self.sample_from_encoder_outputs(self.vae(images, training=False))
# Know more about the magic number here:
# https://keras.io/examples/generative/fine_tune_via_textual_inversion/
latents = latents * 0.18215
# Sample noise that we'll add to the latents.
noise = tf.random.normal(tf.shape(latents))
# Sample a random timestep for each image.
timesteps = tnp.random.randint(
0, self.noise_scheduler.train_timesteps, (batch_size,)
)
# Add noise to the latents according to the noise magnitude at each timestep
# (this is the forward diffusion process).
noisy_latents = self.noise_scheduler.add_noise(
tf.cast(latents, noise.dtype), noise, timesteps
)
# Get the target for loss depending on the prediction type
# just the sampled noise for now.
target = noise # noise_schedule.predict_epsilon == True
# Predict the noise residual and compute loss.
timestep_embedding = tf.map_fn(
lambda t: self.get_timestep_embedding(t), timesteps, dtype=tf.float32
)
model_pred = self.diffusion_model(
[noisy_latents, timestep_embedding, embedded_texts], training=True
)
loss = self.compute_loss(target, model_pred)
if self.use_mixed_precision:
loss = self.optimizer.get_scaled_loss(loss)
# Update parameters of the diffusion model.
trainable_vars = self.diffusion_model.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
if self.use_mixed_precision:
gradients = self.optimizer.get_unscaled_gradients(gradients)
gradients = [tf.clip_by_norm(g, self.max_grad_norm) for g in gradients]
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
return {m.name: m.result() for m in self.metrics}
def get_timestep_embedding(self, timestep, dim=320, max_period=10000):
half = dim // 2
log_max_period = tf.math.log(tf.cast(max_period, tf.float32))
freqs = tf.math.exp(
-log_max_period * tf.range(0, half, dtype=tf.float32) / half
)
args = tf.convert_to_tensor([timestep], dtype=tf.float32) * freqs
embedding = tf.concat([tf.math.cos(args), tf.math.sin(args)], 0)
return embedding
def sample_from_encoder_outputs(self, outputs):
mean, logvar = tf.split(outputs, 2, axis=-1)
logvar = tf.clip_by_value(logvar, -30.0, 20.0)
std = tf.exp(0.5 * logvar)
sample = tf.random.normal(tf.shape(mean), dtype=mean.dtype)
return mean + std * sample
def compute_loss(self, target, model_pred):
# Chunk the noise and model_pred into two parts and compute the loss
# on each part separately.
# Since the first half of the inputs has instance samples and the second half
# has class samples, we do the chunking accordingly.
model_pred, model_pred_prior = tf.split(
model_pred, num_or_size_splits=2, axis=0
)
target, target_prior = tf.split(target, num_or_size_splits=2, axis=0)
# Compute instance loss.
loss = self.compiled_loss(target, model_pred)
# Compute prior loss.
prior_loss = self.compiled_loss(target_prior, model_pred_prior)
# Add the prior loss to the instance loss.
loss = loss + self.prior_loss_weight * prior_loss
return loss
def save_weights(self, filepath, overwrite=True, save_format=None, options=None):
# Overriding this method will allow us to use the `ModelCheckpoint`
# callback directly with this trainer class. In this case, it will
# only checkpoint the `diffusion_model` since that's what we're training
# during fine-tuning.
self.diffusion_model.save_weights(
filepath=filepath,
overwrite=overwrite,
save_format=save_format,
options=options,
)
def load_weights(self, filepath, by_name=False, skip_mismatch=False, options=None):
# Similarly override `load_weights()` so that we can directly call it on
# the trainer class object.
self.diffusion_model.load_weights(
filepath=filepath,
by_name=by_name,
skip_mismatch=skip_mismatch,
options=options,
)<jupyter_output><empty_output><jupyter_text>Trainer initialization<jupyter_code># Comment it if you are not using a GPU having tensor cores.
tf.keras.mixed_precision.set_global_policy("mixed_float16")
use_mp = True # Set it to False if you're not using a GPU with tensor cores.
image_encoder = keras_cv.models.stable_diffusion.ImageEncoder()
dreambooth_trainer = DreamBoothTrainer(
diffusion_model=keras_cv.models.stable_diffusion.DiffusionModel(
resolution, resolution, max_prompt_length
),
# Remove the top layer from the encoder, which cuts off the variance and only
# returns the mean.
vae=tf.keras.Model(
image_encoder.input,
image_encoder.layers[-2].output,
),
noise_scheduler=keras_cv.models.stable_diffusion.NoiseScheduler(),
use_mixed_precision=use_mp,
)
# These hyperparameters come from this tutorial by Hugging Face:
# https://github.com/huggingface/diffusers/tree/main/examples/dreambooth
learning_rate = 5e-6
beta_1, beta_2 = 0.9, 0.999
weight_decay = (1e-2,)
epsilon = 1e-08
optimizer = tf.keras.optimizers.experimental.AdamW(
learning_rate=learning_rate,
weight_decay=weight_decay,
beta_1=beta_1,
beta_2=beta_2,
epsilon=epsilon,
)
dreambooth_trainer.compile(optimizer=optimizer, loss="mse")<jupyter_output><empty_output><jupyter_text>Train!We first calculate the number of epochs, we need to train for.<jupyter_code>num_update_steps_per_epoch = train_dataset.cardinality()
max_train_steps = 800
epochs = math.ceil(max_train_steps / num_update_steps_per_epoch)
print(f"Training for {epochs} epochs.")<jupyter_output><empty_output><jupyter_text>And then we start training!<jupyter_code>ckpt_path = "dreambooth-unet.h5"
ckpt_callback = tf.keras.callbacks.ModelCheckpoint(
ckpt_path,
save_weights_only=True,
monitor="loss",
mode="min",
)
dreambooth_trainer.fit(train_dataset, epochs=epochs, callbacks=[ckpt_callback])<jupyter_output><empty_output><jupyter_text>Experiments and inferenceWe ran various experiments with a slightly modified version of this example. Ourexperiments are based on[this repository](https://github.com/sayakpaul/dreambooth-keras/) and are inspired by[this blog post](https://huggingface.co/blog/dreambooth) from Hugging Face.First, let's see how we can use the fine-tuned checkpoint for running inference.<jupyter_code># Initialize a new Stable Diffusion model.
dreambooth_model = keras_cv.models.StableDiffusion(
img_width=resolution, img_height=resolution, jit_compile=True
)
dreambooth_model.diffusion_model.load_weights(ckpt_path)
# Note how the unique identifier and the class have been used in the prompt.
prompt = f"A photo of {unique_id} {class_label} in a bucket"
num_imgs_to_gen = 3
images_dreamboothed = dreambooth_model.text_to_image(prompt, batch_size=num_imgs_to_gen)
plot_images(images_dreamboothed, prompt)<jupyter_output><empty_output><jupyter_text>Now, let's load checkpoints from a different experiment we conducted where we alsofine-tuned the text encoder along with the UNet:<jupyter_code>unet_weights = tf.keras.utils.get_file(
origin="https://huggingface.co/chansung/dreambooth-dog/resolve/main/lr%409e-06-max_train_steps%40200-train_text_encoder%40True-unet.h5"
)
text_encoder_weights = tf.keras.utils.get_file(
origin="https://huggingface.co/chansung/dreambooth-dog/resolve/main/lr%409e-06-max_train_steps%40200-train_text_encoder%40True-text_encoder.h5"
)
dreambooth_model.diffusion_model.load_weights(unet_weights)
dreambooth_model.text_encoder.load_weights(text_encoder_weights)
images_dreamboothed = dreambooth_model.text_to_image(prompt, batch_size=num_imgs_to_gen)
plot_images(images_dreamboothed, prompt)<jupyter_output><empty_output><jupyter_text>The default number of steps for generating an image in `text_to_image()`[is 50](https://github.com/keras-team/keras-cv/blob/3575bc3b944564fe15b46b917e6555aa6a9d7be0/keras_cv/models/stable_diffusion/stable_diffusion.pyL73).Let's increase it to 100.<jupyter_code>images_dreamboothed = dreambooth_model.text_to_image(
prompt, batch_size=num_imgs_to_gen, num_steps=100
)
plot_images(images_dreamboothed, prompt)<jupyter_output><empty_output> | keras-io/examples/generative/ipynb/dreambooth.ipynb/0 | {
"file_path": "keras-io/examples/generative/ipynb/dreambooth.ipynb",
"repo_id": "keras-io",
"token_count": 8262
} | 87 |
# Graph representation learning with node2vec
**Author:** [Khalid Salama](https://www.linkedin.com/in/khalid-salama-24403144/)<br>
**Date created:** 2021/05/15<br>
**Last modified:** 2021/05/15<br>
**Description:** Implementing the node2vec model to generate embeddings for movies from the MovieLens dataset.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/graph/ipynb/node2vec_movielens.ipynb) <span class="k-dot">โข</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/graph/node2vec_movielens.py)
---
## Introduction
Learning useful representations from objects structured as graphs is useful for
a variety of machine learning (ML) applicationsโsuch as social and communication networks analysis,
biomedicine studies, and recommendation systems.
[Graph representation Learning](https://www.cs.mcgill.ca/~wlh/grl_book/) aims to
learn embeddings for the graph nodes, which can be used for a variety of ML tasks
such as node label prediction (e.g. categorizing an article based on its citations)
and link prediction (e.g. recommending an interest group to a user in a social network).
[node2vec](https://arxiv.org/abs/1607.00653) is a simple, yet scalable and effective
technique for learning low-dimensional embeddings for nodes in a graph by optimizing
a neighborhood-preserving objective. The aim is to learn similar embeddings for
neighboring nodes, with respect to the graph structure.
Given your data items structured as a graph (where the items are represented as
nodes and the relationship between items are represented as edges),
node2vec works as follows:
1. Generate item sequences using (biased) random walk.
2. Create positive and negative training examples from these sequences.
3. Train a [word2vec](https://www.tensorflow.org/tutorials/text/word2vec) model
(skip-gram) to learn embeddings for the items.
In this example, we demonstrate the node2vec technique on the
[small version of the Movielens dataset](https://files.grouplens.org/datasets/movielens/ml-latest-small-README.html)
to learn movie embeddings. Such a dataset can be represented as a graph by treating
the movies as nodes, and creating edges between movies that have similar ratings
by the users. The learnt movie embeddings can be used for tasks such as movie recommendation,
or movie genres prediction.
This example requires `networkx` package, which can be installed using the following command:
```shell
pip install networkx
```
---
## Setup
```python
import os
from collections import defaultdict
import math
import networkx as nx
import random
from tqdm import tqdm
from zipfile import ZipFile
from urllib.request import urlretrieve
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import matplotlib.pyplot as plt
```
---
## Download the MovieLens dataset and prepare the data
The small version of the MovieLens dataset includes around 100k ratings
from 610 users on 9,742 movies.
First, let's download the dataset. The downloaded folder will contain
three data files: `users.csv`, `movies.csv`, and `ratings.csv`. In this example,
we will only need the `movies.dat`, and `ratings.dat` data files.
```python
urlretrieve(
"http://files.grouplens.org/datasets/movielens/ml-latest-small.zip", "movielens.zip"
)
ZipFile("movielens.zip", "r").extractall()
```
Then, we load the data into a Pandas DataFrame and perform some basic preprocessing.
```python
# Load movies to a DataFrame.
movies = pd.read_csv("ml-latest-small/movies.csv")
# Create a `movieId` string.
movies["movieId"] = movies["movieId"].apply(lambda x: f"movie_{x}")
# Load ratings to a DataFrame.
ratings = pd.read_csv("ml-latest-small/ratings.csv")
# Convert the `ratings` to floating point
ratings["rating"] = ratings["rating"].apply(lambda x: float(x))
# Create the `movie_id` string.
ratings["movieId"] = ratings["movieId"].apply(lambda x: f"movie_{x}")
print("Movies data shape:", movies.shape)
print("Ratings data shape:", ratings.shape)
```
<div class="k-default-codeblock">
```
Movies data shape: (9742, 3)
Ratings data shape: (100836, 4)
```
</div>
Let's inspect a sample instance of the `ratings` DataFrame.
```python
ratings.head()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
<div class="k-default-codeblock">
```
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
```
</div>
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>userId</th>
<th>movieId</th>
<th>rating</th>
<th>timestamp</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>1</td>
<td>movie_1</td>
<td>4.0</td>
<td>964982703</td>
</tr>
<tr>
<th>1</th>
<td>1</td>
<td>movie_3</td>
<td>4.0</td>
<td>964981247</td>
</tr>
<tr>
<th>2</th>
<td>1</td>
<td>movie_6</td>
<td>4.0</td>
<td>964982224</td>
</tr>
<tr>
<th>3</th>
<td>1</td>
<td>movie_47</td>
<td>5.0</td>
<td>964983815</td>
</tr>
<tr>
<th>4</th>
<td>1</td>
<td>movie_50</td>
<td>5.0</td>
<td>964982931</td>
</tr>
</tbody>
</table>
</div>
Next, let's check a sample instance of the `movies` DataFrame.
```python
movies.head()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
<div class="k-default-codeblock">
```
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
```
</div>
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>movieId</th>
<th>title</th>
<th>genres</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>movie_1</td>
<td>Toy Story (1995)</td>
<td>Adventure|Animation|Children|Comedy|Fantasy</td>
</tr>
<tr>
<th>1</th>
<td>movie_2</td>
<td>Jumanji (1995)</td>
<td>Adventure|Children|Fantasy</td>
</tr>
<tr>
<th>2</th>
<td>movie_3</td>
<td>Grumpier Old Men (1995)</td>
<td>Comedy|Romance</td>
</tr>
<tr>
<th>3</th>
<td>movie_4</td>
<td>Waiting to Exhale (1995)</td>
<td>Comedy|Drama|Romance</td>
</tr>
<tr>
<th>4</th>
<td>movie_5</td>
<td>Father of the Bride Part II (1995)</td>
<td>Comedy</td>
</tr>
</tbody>
</table>
</div>
Implement two utility functions for the `movies` DataFrame.
```python
def get_movie_title_by_id(movieId):
return list(movies[movies.movieId == movieId].title)[0]
def get_movie_id_by_title(title):
return list(movies[movies.title == title].movieId)[0]
```
---
## Construct the Movies graph
We create an edge between two movie nodes in the graph if both movies are rated
by the same user >= `min_rating`. The weight of the edge will be based on the
[pointwise mutual information](https://en.wikipedia.org/wiki/Pointwise_mutual_information)
between the two movies, which is computed as: `log(xy) - log(x) - log(y) + log(D)`, where:
* `xy` is how many users rated both movie `x` and movie `y` with >= `min_rating`.
* `x` is how many users rated movie `x` >= `min_rating`.
* `y` is how many users rated movie `y` >= `min_rating`.
* `D` total number of movie ratings >= `min_rating`.
### Step 1: create the weighted edges between movies.
```python
min_rating = 5
pair_frequency = defaultdict(int)
item_frequency = defaultdict(int)
# Filter instances where rating is greater than or equal to min_rating.
rated_movies = ratings[ratings.rating >= min_rating]
# Group instances by user.
movies_grouped_by_users = list(rated_movies.groupby("userId"))
for group in tqdm(
movies_grouped_by_users,
position=0,
leave=True,
desc="Compute movie rating frequencies",
):
# Get a list of movies rated by the user.
current_movies = list(group[1]["movieId"])
for i in range(len(current_movies)):
item_frequency[current_movies[i]] += 1
for j in range(i + 1, len(current_movies)):
x = min(current_movies[i], current_movies[j])
y = max(current_movies[i], current_movies[j])
pair_frequency[(x, y)] += 1
```
<div class="k-default-codeblock">
```
Compute movie rating frequencies: 100%|โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ| 573/573 [00:00<00:00, 1049.83it/s]
```
</div>
### Step 2: create the graph with the nodes and the edges
To reduce the number of edges between nodes, we only add an edge between movies
if the weight of the edge is greater than `min_weight`.
```python
min_weight = 10
D = math.log(sum(item_frequency.values()))
# Create the movies undirected graph.
movies_graph = nx.Graph()
# Add weighted edges between movies.
# This automatically adds the movie nodes to the graph.
for pair in tqdm(
pair_frequency, position=0, leave=True, desc="Creating the movie graph"
):
x, y = pair
xy_frequency = pair_frequency[pair]
x_frequency = item_frequency[x]
y_frequency = item_frequency[y]
pmi = math.log(xy_frequency) - math.log(x_frequency) - math.log(y_frequency) + D
weight = pmi * xy_frequency
# Only include edges with weight >= min_weight.
if weight >= min_weight:
movies_graph.add_edge(x, y, weight=weight)
```
<div class="k-default-codeblock">
```
Creating the movie graph: 100%|โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ| 298586/298586 [00:00<00:00, 552893.62it/s]
```
</div>
Let's display the total number of nodes and edges in the graph.
Note that the number of nodes is less than the total number of movies,
since only the movies that have edges to other movies are added.
```python
print("Total number of graph nodes:", movies_graph.number_of_nodes())
print("Total number of graph edges:", movies_graph.number_of_edges())
```
<div class="k-default-codeblock">
```
Total number of graph nodes: 1405
Total number of graph edges: 40043
```
</div>
Let's display the average node degree (number of neighbours) in the graph.
```python
degrees = []
for node in movies_graph.nodes:
degrees.append(movies_graph.degree[node])
print("Average node degree:", round(sum(degrees) / len(degrees), 2))
```
<div class="k-default-codeblock">
```
Average node degree: 57.0
```
</div>
### Step 3: Create vocabulary and a mapping from tokens to integer indices
The vocabulary is the nodes (movie IDs) in the graph.
```python
vocabulary = ["NA"] + list(movies_graph.nodes)
vocabulary_lookup = {token: idx for idx, token in enumerate(vocabulary)}
```
---
## Implement the biased random walk
A random walk starts from a given node, and randomly picks a neighbour node to move to.
If the edges are weighted, the neighbour is selected *probabilistically* with
respect to weights of the edges between the current node and its neighbours.
This procedure is repeated for `num_steps` to generate a sequence of *related* nodes.
The [*biased* random walk](https://en.wikipedia.org/wiki/Biased_random_walk_on_a_graph) balances between **breadth-first sampling**
(where only local neighbours are visited) and **depth-first sampling**
(where distant neighbours are visited) by introducing the following two parameters:
1. **Return parameter** (`p`): Controls the likelihood of immediately revisiting
a node in the walk. Setting it to a high value encourages moderate exploration,
while setting it to a low value would keep the walk local.
2. **In-out parameter** (`q`): Allows the search to differentiate
between *inward* and *outward* nodes. Setting it to a high value biases the
random walk towards local nodes, while setting it to a low value biases the walk
to visit nodes which are further away.
```python
def next_step(graph, previous, current, p, q):
neighbors = list(graph.neighbors(current))
weights = []
# Adjust the weights of the edges to the neighbors with respect to p and q.
for neighbor in neighbors:
if neighbor == previous:
# Control the probability to return to the previous node.
weights.append(graph[current][neighbor]["weight"] / p)
elif graph.has_edge(neighbor, previous):
# The probability of visiting a local node.
weights.append(graph[current][neighbor]["weight"])
else:
# Control the probability to move forward.
weights.append(graph[current][neighbor]["weight"] / q)
# Compute the probabilities of visiting each neighbor.
weight_sum = sum(weights)
probabilities = [weight / weight_sum for weight in weights]
# Probabilistically select a neighbor to visit.
next = np.random.choice(neighbors, size=1, p=probabilities)[0]
return next
def random_walk(graph, num_walks, num_steps, p, q):
walks = []
nodes = list(graph.nodes())
# Perform multiple iterations of the random walk.
for walk_iteration in range(num_walks):
random.shuffle(nodes)
for node in tqdm(
nodes,
position=0,
leave=True,
desc=f"Random walks iteration {walk_iteration + 1} of {num_walks}",
):
# Start the walk with a random node from the graph.
walk = [node]
# Randomly walk for num_steps.
while len(walk) < num_steps:
current = walk[-1]
previous = walk[-2] if len(walk) > 1 else None
# Compute the next node to visit.
next = next_step(graph, previous, current, p, q)
walk.append(next)
# Replace node ids (movie ids) in the walk with token ids.
walk = [vocabulary_lookup[token] for token in walk]
# Add the walk to the generated sequence.
walks.append(walk)
return walks
```
---
## Generate training data using the biased random walk
You can explore different configurations of `p` and `q` to different results of
related movies.
```python
# Random walk return parameter.
p = 1
# Random walk in-out parameter.
q = 1
# Number of iterations of random walks.
num_walks = 5
# Number of steps of each random walk.
num_steps = 10
walks = random_walk(movies_graph, num_walks, num_steps, p, q)
print("Number of walks generated:", len(walks))
```
<div class="k-default-codeblock">
```
Random walks iteration 1 of 5: 100%|โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ| 1405/1405 [00:04<00:00, 291.76it/s]
Random walks iteration 2 of 5: 100%|โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ| 1405/1405 [00:04<00:00, 302.56it/s]
Random walks iteration 3 of 5: 100%|โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ| 1405/1405 [00:04<00:00, 294.52it/s]
Random walks iteration 4 of 5: 100%|โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ| 1405/1405 [00:04<00:00, 304.06it/s]
Random walks iteration 5 of 5: 100%|โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ| 1405/1405 [00:04<00:00, 302.15it/s]
Number of walks generated: 7025
```
</div>
---
## Generate positive and negative examples
To train a skip-gram model, we use the generated walks to create positive and
negative training examples. Each example includes the following features:
1. `target`: A movie in a walk sequence.
2. `context`: Another movie in a walk sequence.
3. `weight`: How many times these two movies occured in walk sequences.
4. `label`: The label is 1 if these two movies are samples from the walk sequences,
otherwise (i.e., if randomly sampled) the label is 0.
### Generate examples
```python
def generate_examples(sequences, window_size, num_negative_samples, vocabulary_size):
example_weights = defaultdict(int)
# Iterate over all sequences (walks).
for sequence in tqdm(
sequences,
position=0,
leave=True,
desc=f"Generating postive and negative examples",
):
# Generate positive and negative skip-gram pairs for a sequence (walk).
pairs, labels = keras.preprocessing.sequence.skipgrams(
sequence,
vocabulary_size=vocabulary_size,
window_size=window_size,
negative_samples=num_negative_samples,
)
for idx in range(len(pairs)):
pair = pairs[idx]
label = labels[idx]
target, context = min(pair[0], pair[1]), max(pair[0], pair[1])
if target == context:
continue
entry = (target, context, label)
example_weights[entry] += 1
targets, contexts, labels, weights = [], [], [], []
for entry in example_weights:
weight = example_weights[entry]
target, context, label = entry
targets.append(target)
contexts.append(context)
labels.append(label)
weights.append(weight)
return np.array(targets), np.array(contexts), np.array(labels), np.array(weights)
num_negative_samples = 4
targets, contexts, labels, weights = generate_examples(
sequences=walks,
window_size=num_steps,
num_negative_samples=num_negative_samples,
vocabulary_size=len(vocabulary),
)
```
<div class="k-default-codeblock">
```
Generating postive and negative examples: 100%|โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ| 7025/7025 [00:11<00:00, 617.64it/s]
```
</div>
Let's display the shapes of the outputs
```python
print(f"Targets shape: {targets.shape}")
print(f"Contexts shape: {contexts.shape}")
print(f"Labels shape: {labels.shape}")
print(f"Weights shape: {weights.shape}")
```
<div class="k-default-codeblock">
```
Targets shape: (881412,)
Contexts shape: (881412,)
Labels shape: (881412,)
Weights shape: (881412,)
```
</div>
### Convert the data into `tf.data.Dataset` objects
```python
batch_size = 1024
def create_dataset(targets, contexts, labels, weights, batch_size):
inputs = {
"target": targets,
"context": contexts,
}
dataset = tf.data.Dataset.from_tensor_slices((inputs, labels, weights))
dataset = dataset.shuffle(buffer_size=batch_size * 2)
dataset = dataset.batch(batch_size, drop_remainder=True)
dataset = dataset.prefetch(tf.data.AUTOTUNE)
return dataset
dataset = create_dataset(
targets=targets,
contexts=contexts,
labels=labels,
weights=weights,
batch_size=batch_size,
)
```
---
## Train the skip-gram model
Our skip-gram is a simple binary classification model that works as follows:
1. An embedding is looked up for the `target` movie.
2. An embedding is looked up for the `context` movie.
3. The dot product is computed between these two embeddings.
4. The result (after a sigmoid activation) is compared to the label.
5. A binary crossentropy loss is used.
```python
learning_rate = 0.001
embedding_dim = 50
num_epochs = 10
```
### Implement the model
```python
def create_model(vocabulary_size, embedding_dim):
inputs = {
"target": layers.Input(name="target", shape=(), dtype="int32"),
"context": layers.Input(name="context", shape=(), dtype="int32"),
}
# Initialize item embeddings.
embed_item = layers.Embedding(
input_dim=vocabulary_size,
output_dim=embedding_dim,
embeddings_initializer="he_normal",
embeddings_regularizer=keras.regularizers.l2(1e-6),
name="item_embeddings",
)
# Lookup embeddings for target.
target_embeddings = embed_item(inputs["target"])
# Lookup embeddings for context.
context_embeddings = embed_item(inputs["context"])
# Compute dot similarity between target and context embeddings.
logits = layers.Dot(axes=1, normalize=False, name="dot_similarity")(
[target_embeddings, context_embeddings]
)
# Create the model.
model = keras.Model(inputs=inputs, outputs=logits)
return model
```
### Train the model
We instantiate the model and compile it.
```python
model = create_model(len(vocabulary), embedding_dim)
model.compile(
optimizer=keras.optimizers.Adam(learning_rate),
loss=keras.losses.BinaryCrossentropy(from_logits=True),
)
```
Let's plot the model.
```python
keras.utils.plot_model(
model,
show_shapes=True,
show_dtype=True,
show_layer_names=True,
)
```
![png](/img/examples/graph/node2vec_movielens/node2vec_movielens_44_0.png)
Now we train the model on the `dataset`.
```python
history = model.fit(dataset, epochs=num_epochs)
```
<div class="k-default-codeblock">
```
Epoch 1/10
860/860 [==============================] - 5s 5ms/step - loss: 2.4527
Epoch 2/10
860/860 [==============================] - 4s 5ms/step - loss: 2.3431
Epoch 3/10
860/860 [==============================] - 4s 4ms/step - loss: 2.3351
Epoch 4/10
860/860 [==============================] - 4s 4ms/step - loss: 2.3301
Epoch 5/10
860/860 [==============================] - 4s 5ms/step - loss: 2.3259
Epoch 6/10
860/860 [==============================] - 4s 4ms/step - loss: 2.3223
Epoch 7/10
860/860 [==============================] - 4s 5ms/step - loss: 2.3191
Epoch 8/10
860/860 [==============================] - 4s 4ms/step - loss: 2.3160
Epoch 9/10
860/860 [==============================] - 4s 4ms/step - loss: 2.3130
Epoch 10/10
860/860 [==============================] - 4s 5ms/step - loss: 2.3104
```
</div>
Finally we plot the learning history.
```python
plt.plot(history.history["loss"])
plt.ylabel("loss")
plt.xlabel("epoch")
plt.show()
```
![png](/img/examples/graph/node2vec_movielens/node2vec_movielens_48_0.png)
---
## Analyze the learnt embeddings.
```python
movie_embeddings = model.get_layer("item_embeddings").get_weights()[0]
print("Embeddings shape:", movie_embeddings.shape)
```
<div class="k-default-codeblock">
```
Embeddings shape: (1406, 50)
```
</div>
### Find related movies
Define a list with some movies called `query_movies`.
```python
query_movies = [
"Matrix, The (1999)",
"Star Wars: Episode IV - A New Hope (1977)",
"Lion King, The (1994)",
"Terminator 2: Judgment Day (1991)",
"Godfather, The (1972)",
]
```
Get the embeddings of the movies in `query_movies`.
```python
query_embeddings = []
for movie_title in query_movies:
movieId = get_movie_id_by_title(movie_title)
token_id = vocabulary_lookup[movieId]
movie_embedding = movie_embeddings[token_id]
query_embeddings.append(movie_embedding)
query_embeddings = np.array(query_embeddings)
```
Compute the [consine similarity](https://en.wikipedia.org/wiki/Cosine_similarity) between the embeddings of `query_movies`
and all the other movies, then pick the top k for each.
```python
similarities = tf.linalg.matmul(
tf.math.l2_normalize(query_embeddings),
tf.math.l2_normalize(movie_embeddings),
transpose_b=True,
)
_, indices = tf.math.top_k(similarities, k=5)
indices = indices.numpy().tolist()
```
Display the top related movies in `query_movies`.
```python
for idx, title in enumerate(query_movies):
print(title)
print("".rjust(len(title), "-"))
similar_tokens = indices[idx]
for token in similar_tokens:
similar_movieId = vocabulary[token]
similar_title = get_movie_title_by_id(similar_movieId)
print(f"- {similar_title}")
print()
```
<div class="k-default-codeblock">
```
Matrix, The (1999)
------------------
- Matrix, The (1999)
- Raiders of the Lost Ark (Indiana Jones and the Raiders of the Lost Ark) (1981)
- Schindler's List (1993)
- Star Wars: Episode IV - A New Hope (1977)
- Lord of the Rings: The Fellowship of the Ring, The (2001)
```
</div>
<div class="k-default-codeblock">
```
Star Wars: Episode IV - A New Hope (1977)
-----------------------------------------
- Star Wars: Episode IV - A New Hope (1977)
- Schindler's List (1993)
- Raiders of the Lost Ark (Indiana Jones and the Raiders of the Lost Ark) (1981)
- Matrix, The (1999)
- Pulp Fiction (1994)
```
</div>
<div class="k-default-codeblock">
```
Lion King, The (1994)
---------------------
- Lion King, The (1994)
- Jurassic Park (1993)
- Independence Day (a.k.a. ID4) (1996)
- Beauty and the Beast (1991)
- Mrs. Doubtfire (1993)
```
</div>
<div class="k-default-codeblock">
```
Terminator 2: Judgment Day (1991)
---------------------------------
- Schindler's List (1993)
- Jurassic Park (1993)
- Terminator 2: Judgment Day (1991)
- Star Wars: Episode IV - A New Hope (1977)
- Back to the Future (1985)
```
</div>
<div class="k-default-codeblock">
```
Godfather, The (1972)
---------------------
- Apocalypse Now (1979)
- Fargo (1996)
- Godfather, The (1972)
- Schindler's List (1993)
- Casablanca (1942)
```
</div>
### Visualize the embeddings using the Embedding Projector
```python
import io
out_v = io.open("embeddings.tsv", "w", encoding="utf-8")
out_m = io.open("metadata.tsv", "w", encoding="utf-8")
for idx, movie_id in enumerate(vocabulary[1:]):
movie_title = list(movies[movies.movieId == movie_id].title)[0]
vector = movie_embeddings[idx]
out_v.write("\t".join([str(x) for x in vector]) + "\n")
out_m.write(movie_title + "\n")
out_v.close()
out_m.close()
```
Download the `embeddings.tsv` and `metadata.tsv` to analyze the obtained embeddings
in the [Embedding Projector](https://projector.tensorflow.org/).
**Example available on HuggingFace**
| Trained Model | Demo |
| :--: | :--: |
| [![Generic badge](https://img.shields.io/badge/%F0%9F%A4%97%20Model%3A%20-Node2Vec%20Movielens-black.svg)](https://huggingface.co/keras-io/Node2Vec_MovieLens) | [![Generic badge](https://img.shields.io/badge/%F0%9F%A4%97%20Spaces%3A-Node2Vec%20Movielens-black.svg)](https://huggingface.co/spaces/keras-io/Node2Vec_MovieLens) |
| keras-io/examples/graph/md/node2vec_movielens.md/0 | {
"file_path": "keras-io/examples/graph/md/node2vec_movielens.md",
"repo_id": "keras-io",
"token_count": 9709
} | 88 |