Spaces:
Sleeping
Sleeping
File size: 1,588 Bytes
94a0cd2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 |
import gradio as gr
import numpy as np
import torch
from PIL import Image
import constants
import utils
PREDICTOR = None
def inference(image: np.ndarray, text: str, center_crop: bool):
num_steps = 10
if not text.lower().startswith("remove the"):
raise gr.Error("Instruction should start with 'Remove the' !")
image = Image.fromarray(image)
cropped_image, image = utils.preprocess_image(image, center_crop=center_crop)
utils.seed_everything()
prediction = PREDICTOR.predict(image, text, num_steps)
print("Num steps:", num_steps)
return cropped_image, prediction
if __name__ == "__main__":
utils.setup_environment()
if not PREDICTOR:
PREDICTOR = utils.get_predictor()
sample_image, sample_instruction, sample_step = constants.EXAMPLES[3]
gr.Interface(
fn=inference,
inputs=[
gr.Image(type="numpy", value=sample_image, label="Source Image").style(
height=256
),
gr.Textbox(
label="Instruction",
lines=1,
value=sample_instruction,
),
gr.Checkbox(value=True, label="Center Crop", interactive=False),
],
outputs=[
gr.Image(type="pil", label="Cropped Image").style(height=256),
gr.Image(type="pil", label="Output Image").style(height=256),
],
allow_flagging="never",
examples=constants.EXAMPLES,
cache_examples=True,
title=constants.TITLE,
description=constants.DESCRIPTION,
).launch()
|