Spaces:
Running
Running
File size: 3,951 Bytes
6453bed ac1b901 496dbd8 ac1b901 6453bed 75f921d 6453bed 75f921d ca70941 75f921d ca70941 6b2baab 8a3405a 75f921d f53d9ab 75f921d 6b2baab 75f921d ca70941 75f921d 6b2baab 75f921d a5afc6b 6453bed 75f921d 6453bed a5afc6b 75f921d a5afc6b 4c38851 a5afc6b 75f921d a5afc6b 4c38851 a5afc6b 75f921d a5afc6b 6453bed cc4a3ff 6453bed c618582 6453bed 75f921d 6453bed 75f921d 8a3405a cc4a3ff 75f921d 8a3405a cc4a3ff 8a3405a 75f921d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 |
import spaces
import gradio as gr
import re
from PIL import Image
import os
import numpy as np
import torch
from diffusers import FluxImg2ImgPipeline
dtype = torch.bfloat16
device = "cuda" if torch.cuda.is_available() else "cpu"
pipe = FluxImg2ImgPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16).to(device)
def sanitize_prompt(prompt):
allowed_chars = re.compile(r"[^a-zA-Z0-9\s.,!?-]")
sanitized_prompt = allowed_chars.sub("", prompt)
return sanitized_prompt
def convert_to_fit_size(original_width_and_height, maximum_size=2048):
width, height = original_width_and_height
if width <= maximum_size and height <= maximum_size:
return width, height
scaling_factor = maximum_size / max(width, height)
new_width = int(width * scaling_factor)
new_height = int(height * scaling_factor)
return new_width, new_height
def adjust_to_multiple_of_32(width: int, height: int):
width = width - (width % 32)
height = height - (height % 32)
return width, height
@spaces.GPU(duration=120)
def process_images(image, prompt="a girl", strength=0.75, seed=0, inference_step=4, progress=gr.Progress(track_tqdm=True)):
progress(0, desc="Starting")
def process_img2img(image, prompt="a person", strength=0.75, seed=0, num_inference_steps=4):
if image is None:
return None
generator = torch.Generator(device).manual_seed(seed)
width, height = convert_to_fit_size(image.size)
width, height = adjust_to_multiple_of_32(width, height)
image = image.resize((width, height), Image.LANCZOS)
output = pipe(prompt=prompt, image=image, generator=generator, strength=strength, width=width, height=height, guidance_scale=0, num_inference_steps=num_inference_steps, max_sequence_length=256)
return output.images[0]
output = process_img2img(image, prompt, strength, seed, inference_step)
return output
def read_file(path: str) -> str:
with open(path, 'r', encoding='utf-8') as f:
content = f.read()
return content
css = """
#col-left, #col-right {
margin: 0 auto;
max-width: 640px;
}
.grid-container {
display: flex;
align-items: center;
justify-content: center;
gap: 10px;
}
.image, .button {
border: 4px solid black;
border-radius: 8px;
font-weight: bold;
}
.image {
width: 256px;
height: 256px;
object-fit: cover;
}
#run_button {
font-weight: bold;
border: 4px solid black;
border-radius: 8px;
}
.text {
font-size: 16px;
}
"""
with gr.Blocks(css=css, elem_id="demo-container") as demo:
with gr.Column():
gr.HTML(read_file("demo_header.html"))
# Removed or commented out the demo_tools.html line
# gr.HTML(read_file("demo_tools.html"))
with gr.Row():
with gr.Column():
image = gr.Image(width=256, height=256, sources=['upload', 'clipboard'], image_mode='RGB', elem_id="image_upload", type="pil", label="Upload")
prompt = gr.Textbox(label="Prompt", value="a woman", placeholder="Your prompt", elem_id="prompt")
btn = gr.Button("Generate", elem_id="run_button", variant="primary")
with gr.Accordion(label="Advanced Settings", open=False):
strength = gr.Number(value=0.75, minimum=0, maximum=0.75, step=0.01, label="Strength")
seed = gr.Number(value=100, minimum=0, step=1, label="Seed")
inference_step = gr.Number(value=4, minimum=1, step=4, label="Inference Steps")
with gr.Column():
image_out = gr.Image(width=256, height=256, label="Output", elem_id="output-img", format="jpg")
gr.HTML(gr.HTML(read_file("demo_footer.html")))
gr.on(
triggers=[btn.click, prompt.submit],
fn=process_images,
inputs=[image, prompt, strength, seed, inference_step],
outputs=[image_out]
)
if __name__ == "__main__":
demo.launch() |