Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
import gradio as gr | |
import numpy as np | |
import tensorflow as tf | |
from tensorflow import keras | |
from huggingface_hub import from_pretrained_keras | |
result_prefix = "paris_generated" | |
# Weights of the different loss components | |
total_variation_weight = 1e-6 | |
style_weight = 1e-6 | |
content_weight = 2.5e-8 | |
# Dimensions of the generated picture. | |
width, height = keras.preprocessing.image.load_img(base_image_path).size | |
img_nrows = 400 | |
img_ncols = int(width * img_nrows / height) | |
# Build a VGG19 model loaded with pre-trained ImageNet weights | |
model = from_pretrained_keras("rushic24/keras-VGG19") | |
# Get the symbolic outputs of each "key" layer (we gave them unique names). | |
outputs_dict = dict([(layer.name, layer.output) for layer in model.layers]) | |
# Set up a model that returns the activation values for every layer in | |
# VGG19 (as a dict). | |
feature_extractor = keras.Model(inputs=model.inputs, outputs=outputs_dict) | |
# List of layers to use for the style loss. | |
style_layer_names = [ | |
"block1_conv1", | |
"block2_conv1", | |
"block3_conv1", | |
"block4_conv1", | |
"block5_conv1", | |
] | |
# The layer to use for the content loss. | |
content_layer_name = "block5_conv2" | |
def compute_loss_and_grads(combination_image, base_image, style_reference_image): | |
with tf.GradientTape() as tape: | |
loss = compute_loss(combination_image, base_image, style_reference_image) | |
grads = tape.gradient(loss, combination_image) | |
return loss, grads | |
optimizer = keras.optimizers.SGD( | |
keras.optimizers.schedules.ExponentialDecay( | |
initial_learning_rate=100.0, decay_steps=100, decay_rate=0.96 | |
) | |
) | |
def get_imgs(base_image_path, style_reference_image_path): | |
base_image = preprocess_image(base_image_path) | |
style_reference_image = preprocess_image(style_reference_image_path) | |
combination_image = tf.Variable(preprocess_image(base_image_path)) | |
iterations = 400 | |
for i in range(1, iterations + 1): | |
loss, grads = compute_loss_and_grads(combination_image, base_image, style_reference_image) | |
optimizer.apply_gradients([(grads, combination_image)]) | |
if i % 100 == 0: | |
print("Iteration %d: loss=%.2f" % (i, loss)) | |
img = deprocess_image(combination_image.numpy()) | |
return img | |
title = "Neural style transfer" | |
description = "Gradio Demo for Neural style transfer. To use it, simply upload a base image and a style image" | |
content = gr.inputs.Image(shape=None, image_mode="RGB", invert_colors=False, source="upload", tool="editor", type="filepath", label=None, optional=False) | |
style = gr.inputs.Image(shape=None, image_mode="RGB", invert_colors=False, source="upload", tool="editor", type="filepath", label=None, optional=False) | |
gr.Interface(get_imgs, inputs=[content, style], outputs=["image"], | |
title=title, | |
description=description, | |
examples=[["base.jpg", "style.jpg"]]).launch(enable_queue=True) |