|
import numpy as np |
|
from tensorflow import keras |
|
from sklearn.model_selection import train_test_split |
|
import matplotlib.pyplot as plt |
|
|
|
|
|
(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.fashion_mnist.load_data() |
|
|
|
|
|
X_train_full = X_train_full.astype('float32') / 255. |
|
X_test = X_test.astype('float32') / 255. |
|
|
|
|
|
X_train, X_val = train_test_split(X_train_full, test_size=0.2, random_state=42) |
|
X_val, X_test = train_test_split(X_val, test_size=0.5, random_state=42) |
|
|
|
|
|
input_shape = X_train.shape[1:] |
|
latent_dim = 50 |
|
autoencoder = keras.models.Sequential([ |
|
keras.layers.Flatten(input_shape=input_shape), |
|
keras.layers.Dense(256, activation='relu'), |
|
keras.layers.Dense(128, activation='relu'), |
|
keras.layers.Dense(latent_dim, activation='relu', name='latent_layer'), |
|
keras.layers.Dense(128, activation='relu'), |
|
keras.layers.Dense(256, activation='relu'), |
|
keras.layers.Dense(np.prod(input_shape), activation='sigmoid'), |
|
keras.layers.Reshape(input_shape) |
|
]) |
|
|
|
|
|
autoencoder.compile(loss='binary_crossentropy', optimizer='adam',metrics=['accuracy']) |
|
history = autoencoder.fit(X_train, X_train, |
|
epochs=50, |
|
batch_size=128, |
|
validation_data=(X_val, X_val)) |
|
|
|
|
|
n = 7 |
|
decoded_imgs = autoencoder.predict(X_test[:n]) |
|
plt.figure(figsize=(10, 4.5)) |
|
for i in range(n): |
|
|
|
ax = plt.subplot(2, n, i + 1) |
|
plt.imshow(X_test[i]) |
|
plt.gray() |
|
ax.get_xaxis().set_visible(False) |
|
ax.get_yaxis().set_visible(False) |
|
|
|
|
|
ax = plt.subplot(2, n, i + 1 + n) |
|
plt.imshow(decoded_imgs[i]) |
|
plt.gray() |
|
ax.get_xaxis().set_visible(False) |
|
ax.get_yaxis().set_visible(False) |