Spaces:
Runtime error
Runtime error
File size: 1,790 Bytes
b43e10d ceac432 6887d0a ec4b0ec 44a0fda 7abfb9e ceac432 6887d0a ec4b0ec ceac432 02a3bed 44a0fda 01d839a 44a0fda 8e23176 4d85bc3 8e23176 1c6d57c 7618b15 fcdd24e 8e23176 dc5de93 8e23176 dc5de93 8e23176 dc5de93 9aa7f1e 8e23176 2089e7d 8e23176 dc5de93 3cb6ee0 b43e10d 58ba404 fcdd24e 02a3bed d317fe8 44a0fda 6887d0a 44a0fda 3cb6ee0 b43e10d 6887d0a 8e23176 8d22ff6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 |
import os
import gradio as gr
import util
import run_cmd
from random import randint
from PIL import Image
is_colab = util.is_google_colab()
run_cmd("pip install pngquant")
def inference(img, size, type):
_id = randint(1, 10000)
INPUT_DIR = "/tmp/input_image" + str(_id) + "/"
OUTPUT_DIR = "/tmp/output_image" + str(_id) + "/"
img_in_path = os.path.join(INPUT_DIR, "1.jpg")
img_out_path = os.path.join(OUTPUT_DIR, "1_out.png")
run_cmd(f"rm -rf {INPUT_DIR}")
run_cmd(f"rm -rf {OUTPUT_DIR}")
run_cmd(f"mkdir {INPUT_DIR}")
run_cmd(f"mkdir {OUTPUT_DIR}")
img.save(INPUT_DIR + "1.jpg", "JPEG")
if type == "Manga":
run_cmd(f"python inference_manga_v2.py {img_in_path} {img_out_path}")
else:
run_cmd(f"python inference.py {img_in_path} {img_out_path} {type}")
img_out = Image.open(img_out_path)
if size == "x2":
img_out = img_out.resize((img_out.width // 2, img_out.height // 2), resample=Image.BICUBIC)
#img_out.save(img_out_path, optimize=True) # Add more optimizations
#img_out = Image.open(img_out_path)
# Remove input and output image
run_cmd(f"rm -f {img_in_path}")
run_cmd(f"rm -f {img_out_path}")
return [img_out]
input_image = gr.Image(type="pil", label="Input")
upscale_type = gr.Radio(["Manga", "Anime", "General"], label="Select the type of picture you want to upscale:", value="Manga")
upscale_size = gr.Radio(["x4", "x2"], label="Upscale by:", value="x4")
output_image = gr.Gallery(label="Generated images", show_label=False, elem_id="gallery").style(grid=[2], height="auto")
demo = gr.Interface(
inference,
inputs=[input_image, upscale_size, upscale_type],
outputs=[output_image]
)
demo.queue()
demo.launch(debug=is_colab, share=is_colab, inline=is_colab) |