I2I / app.py
Akjava's picture
change simple
496dbd8
raw
history blame
5.91 kB
import spaces
import gradio as gr
import re
from PIL import Image
import os
import numpy as np
import shutil
#shutil.rmtree("/home/user/app/.gradio/cached_examples/23")
import torch
from diffusers import FluxImg2ImgPipeline
dtype = torch.bfloat16
device = "cuda" if torch.cuda.is_available() else "cpu"
pipe = FluxImg2ImgPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16).to(device)
def sanitize_prompt(prompt):
# Allow only alphanumeric characters, spaces, and basic punctuation
allowed_chars = re.compile(r"[^a-zA-Z0-9\s.,!?-]")
sanitized_prompt = allowed_chars.sub("", prompt)
return sanitized_prompt
#@spaces.GPU
def process_img2img(image,mask_image,prompt="a person",model_id="black-forest-labs/FLUX.1-schnell",strength=0.75,seed=0,num_inference_steps=4):
print("start process image process_image")
if image == None:
print("empty input image returned")
return None
generators = []
generator = torch.Generator(device).manual_seed(seed)
generators.append(generator)
# more parameter see https://huggingface.co/docs/diffusers/api/pipelines/flux#diffusers.FluxInpaintPipeline
print(prompt)
output = pipe(prompt=prompt, image=image,generator=generator,strength=strength
,guidance_scale=0,num_inference_steps=num_inference_steps,max_sequence_length=256)
# TODO support mask
return output.images[0]
@spaces.GPU(duration=180)
def process_images(image, image2=None,prompt="a girl",inpaint_model="black-forest-labs/FLUX.1-schnell",strength=0.75,seed=0):
print("start process_images")
try:
# I'm not sure when this happen
if not isinstance(image, dict):
if image2 == None:
print("empty mask")
return image
else:
image = dict({'background': image, 'layers': [image2]})
if image2!=None:
#print("use image2")
mask = image2
else:
if len(image['layers']) == 0:
print("empty mask")
return image
print("use layer")
mask = image['layers'][0]
output = process_img2img(image["background"],mask,prompt,inpaint_model,strength,seed)
except Exception as e:
print(f"An error occurred: {e}")
gr.Error(e)
print("end process_images")
return output
def read_file(path: str) -> str:
with open(path, 'r', encoding='utf-8') as f:
content = f.read()
return content
def example_out(image,image_mask,prompt,strength,example_id):
# input
#parent,file=os.path.split(image_mask) # image is complex dict
#base,ext = os.path.splitext(file)
#key = base.split("_")[0]
return f"images/{example_id}.jpg"
#loaded_image = Image.open(f"images/{example_id}.jpg")
#return loaded_image
#return np.array(loaded_image)
css="""
#col-left {
margin: 0 auto;
max-width: 640px;
}
#col-right {
margin: 0 auto;
max-width: 640px;
}
"""
demo_blocks = gr.Blocks(css=css, elem_id="demo-container")
with demo_blocks as demo:
with gr.Column():
gr.HTML(read_file("demo_header.html"))
with gr.Row():
with gr.Column():
image = gr.ImageEditor(height=800,sources=['upload','clipboard'],transforms=[],image_mode='RGB', layers=False, elem_id="image_upload", type="pil", label="Upload",brush=gr.Brush(colors=["#fff"], color_mode="fixed"))
with gr.Row(elem_id="prompt-container", equal_height=False):
with gr.Row():
prompt = gr.Textbox(label="Prompt",value="a eyes closed girl,shut eyes",placeholder="Your prompt (what you want in place of what is erased)", elem_id="prompt")
btn = gr.Button("Img2Img", elem_id="run_button",variant="primary")
image_mask = gr.Image(sources=['upload','clipboard'], elem_id="mask_upload", type="pil", label="Mask_Upload",height=400, value=None)
with gr.Accordion(label="Advanced Settings", open=False):
with gr.Row( equal_height=True):
strength = gr.Number(value=0.75, minimum=0, maximum=1.0, step=0.01, label="strength")
seed = gr.Number(value=0, minimum=0, step=1, label="seed")
models = ["black-forest-labs/FLUX.1-schnell"]
inpaint_model = gr.Dropdown(label="modes", choices=models, value="black-forest-labs/FLUX.1-schnell")
id_input=gr.Text(label="Name", visible=False)
with gr.Column():
image_out = gr.Image(height=800,sources=[],label="Output", elem_id="output-img",format="jpg")
btn.click(fn=process_images, inputs=[image, image_mask,prompt,inpaint_model,strength,seed], outputs =image_out, api_name='infer')
gr.Examples(
examples=[
#["images/00547245_99.jpg", "images/00547245_99_mask.jpg","a beautiful girl,eyes closed",0.8,"images/00547245.jpg"],
#["images/00538245_paint.jpg", "images/00538245_mask.jpg","a beautiful girl,wearing t-shirt",0.7,"images/00538245.jpg"],
#["images/00207245_18.jpg", "images/00207245_18_mask.jpg","a beautiful girl,mouth opened",0.2,"images/00207245.jpg"]
]
,
#fn=example_out,
inputs=[image,image_mask,prompt,strength,image_out],
#outputs=[test_out],
#cache_examples=False,
)
gr.HTML(
"""
"""
)
try:
demo_blocks.queue(max_size=25).launch(share=False,debug=True)
except Exception as e:
print(f"An launch error occurred: {e}")
gr.Error(e)