Spaces:
Runtime error
Runtime error
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import torch
|
3 |
+
import torchvision.transforms as transforms
|
4 |
+
from PIL import Image
|
5 |
+
import matplotlib.pyplot as plt
|
6 |
+
|
7 |
+
# Load the trained generator model
|
8 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
9 |
+
generator_A2B = Generator().to(device)
|
10 |
+
generator_A2B.load_state_dict(torch.load("generator_A2B.pth", map_location=device))
|
11 |
+
generator_A2B.eval()
|
12 |
+
|
13 |
+
def transform_image(image):
|
14 |
+
transform = transforms.Compose([
|
15 |
+
transforms.Resize((256, 256)),
|
16 |
+
transforms.ToTensor(),
|
17 |
+
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
|
18 |
+
])
|
19 |
+
return transform(image).unsqueeze(0).to(device)
|
20 |
+
|
21 |
+
def generate(image):
|
22 |
+
image = Image.open(image).convert("RGB")
|
23 |
+
input_tensor = transform_image(image)
|
24 |
+
with torch.no_grad():
|
25 |
+
output_tensor = generator_A2B(input_tensor)
|
26 |
+
|
27 |
+
output_image = (output_tensor.squeeze(0).permute(1, 2, 0).cpu().numpy() + 1) / 2
|
28 |
+
plt.imshow(output_image)
|
29 |
+
plt.axis("off")
|
30 |
+
plt.show()
|
31 |
+
return output_image
|
32 |
+
|
33 |
+
# Create Gradio Interface
|
34 |
+
demo = gr.Interface(
|
35 |
+
fn=generate,
|
36 |
+
inputs=gr.Image(type="filepath"),
|
37 |
+
outputs=gr.Image(),
|
38 |
+
title="CycleGAN Image Translation",
|
39 |
+
description="Upload an image and get the translated output from the CycleGAN model."
|
40 |
+
)
|
41 |
+
|
42 |
+
demo.launch()
|