import gradio as gr import re from PIL import Image import torch from diffusers import FluxImg2ImgPipeline # Set up the device and pipeline dtype = torch.bfloat16 device = "cuda" if torch.cuda.is_available() else "cpu" pipe = FluxImg2ImgPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16).to(device) def sanitize_prompt(prompt): allowed_chars = re.compile(r"[^a-zA-Z0-9\s.,!?-]") sanitized_prompt = allowed_chars.sub("", prompt) return sanitized_prompt def convert_to_fit_size(original_width_and_height, maximum_size=2048): width, height = original_width_and_height if width <= maximum_size and height <= maximum_size: return width, height scaling_factor = maximum_size / max(width, height) return int(width * scaling_factor), int(height * scaling_factor) def adjust_to_multiple_of_32(width, height): return width - (width % 32), height - (height % 32) def process_images(image, prompt="a girl", strength=0.75, seed=0, inference_step=4, progress=gr.Progress(track_tqdm=True)): def process_img2img(image, prompt, strength, seed, num_inference_steps): if image is None: return None generator = torch.Generator(device).manual_seed(seed) width, height = adjust_to_multiple_of_32(*convert_to_fit_size(image.size)) image = image.resize((width, height), Image.LANCZOS) output = pipe( prompt=prompt, image=image, generator=generator, strength=strength, width=width, height=height, guidance_scale=0, num_inference_steps=num_inference_steps ) return output.images[0] return process_img2img(image, prompt, strength, seed, inference_step) # Minimal CSS for black outline and container styling css = """ #demo-container { border: 2px solid black; padding: 10px; width: 100%; max-width: 750px; margin: auto; } #image_upload, #output-img, #generate_button { border: 2px solid black; } """ with gr.Blocks(css=css, elem_id="demo-container") as demo: with gr.Column(): gr.HTML("