import torch import spaces import gradio as gr from diffusers import FluxFillPipeline pipe = FluxFillPipeline.from_pretrained("black-forest-labs/FLUX.1-Fill-dev", torch_dtype=torch.bfloat16).to("cuda") # reference https://huggingface.co/spaces/black-forest-labs/FLUX.1-Fill-dev/blob/main/app.py def calculate_optimal_dimensions(image): # Extract the original dimensions original_width, original_height = image.size # Set constants MIN_ASPECT_RATIO = 9 / 16 MAX_ASPECT_RATIO = 16 / 9 FIXED_DIMENSION = 1024 # Calculate the aspect ratio of the original image original_aspect_ratio = original_width / original_height # Determine which dimension to fix if original_aspect_ratio > 1: # Wider than tall width = FIXED_DIMENSION height = round(FIXED_DIMENSION / original_aspect_ratio) else: # Taller than wide height = FIXED_DIMENSION width = round(FIXED_DIMENSION * original_aspect_ratio) # Ensure dimensions are multiples of 8 width = (width // 8) * 8 height = (height // 8) * 8 # Enforce aspect ratio limits calculated_aspect_ratio = width / height if calculated_aspect_ratio > MAX_ASPECT_RATIO: width = (height * MAX_ASPECT_RATIO // 8) * 8 elif calculated_aspect_ratio < MIN_ASPECT_RATIO: height = (width / MIN_ASPECT_RATIO // 8) * 8 # Ensure width and height remain above the minimum dimensions width = max(width, 576) if width == FIXED_DIMENSION else width height = max(height, 576) if height == FIXED_DIMENSION else height return width, height @spaces.GPU(duration=120) def inpaint( image, mask, prompt="", num_inference_steps=28, guidance_scale=50, ): image = image.convert("RGB") mask = mask.convert("L") width, height = calculate_optimal_dimensions(image) result = pipe( prompt=prompt, height= height, width= width, image= image, mask_image=mask, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, ).images[0] result = result.convert("RGBA") return result demo = gr.Interface( fn=inpaint, inputs=[ gr.Image(label="image", type="pil"), gr.Image(label="mask", type="pil"), gr.Text(label="prompt"), gr.Number(value=40, label="num_inference_steps"), gr.Number(value=28, label="guidance_scale"), ], outputs=["image"], api_name="inpaint", examples=[["./assets/rocket.png", "./assets/Inpainting mask.png"]], cache_examples=False, description="it is recommended that you use https://github.com/la-voliere/react-mask-editor when creating an image mask in JS and then inverse it before sending it to this space", ) demo.launch()