Spaces:
Running
on
Zero
Running
on
Zero
File size: 1,713 Bytes
9f9c100 1032fe3 9f9c100 857191d 9f9c100 857191d 9f9c100 857191d 9f9c100 857191d 9f9c100 857191d 1032fe3 9f9c100 857191d 91b1804 9f9c100 857191d 91b1804 857191d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 |
#!/usr/bin/env python
from __future__ import annotations
import functools
import sys
import gradio as gr
import huggingface_hub
import PIL.Image
import spaces
import torch
import torch.nn as nn
sys.path.insert(0, "Anime2Sketch")
from data import read_img_path, tensor_to_img
from model import UnetGenerator
TITLE = "Anime2Sketch"
DESCRIPTION = "https://github.com/Mukosame/Anime2Sketch"
def load_model(device: torch.device) -> nn.Module:
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
model = UnetGenerator(3, 1, 8, 64, norm_layer=norm_layer, use_dropout=False)
path = huggingface_hub.hf_hub_download("public-data/Anime2Sketch", "netG.pth")
ckpt = torch.load(path)
for key in list(ckpt.keys()):
if "module." in key:
ckpt[key.replace("module.", "")] = ckpt[key]
del ckpt[key]
model.load_state_dict(ckpt)
model.to(device)
model.eval()
return model
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = load_model(device)
@spaces.GPU
@torch.inference_mode()
def run(image_file: str, load_size: int = 512) -> PIL.Image.Image:
tensor, orig_size = read_img_path(image_file, load_size)
tensor = tensor.to(device)
out = model(tensor)
res = tensor_to_img(out)
res = PIL.Image.fromarray(res)
res = res.resize(orig_size, PIL.Image.Resampling.BICUBIC)
return res
demo = gr.Interface(
fn=run,
inputs=gr.Image(label="Input", type="filepath"),
outputs=gr.Image(label="Output"),
examples=["Anime2Sketch/test_samples/madoka.jpg"],
title=TITLE,
description=DESCRIPTION,
)
if __name__ == "__main__":
demo.queue().launch()
|