Spaces:
Running
on
Zero
Running
on
Zero
import gradio as gr | |
import numpy as np | |
import spaces | |
import torch | |
import random | |
from PIL import Image | |
from diffusers import FluxKontextPipeline | |
from diffusers import FluxTransformer2DModel | |
from diffusers.utils import load_image | |
from huggingface_hub import hf_hub_download | |
pipe = FluxKontextPipeline.from_pretrained("black-forest-labs/FLUX.1-Kontext-dev", torch_dtype=torch.bfloat16).to("cuda") | |
pipe.load_lora_weights("kontext-community/relighting-kontext-dev-lora-v3", weight_name="relighting-kontext-dev-lora-v3.safetensors", adapter_name="lora") | |
pipe.set_adapters(["lora"], adapter_weights=[1.0]) | |
MAX_SEED = np.iinfo(np.int32).max | |
def infer(input_image, prompt, seed=42, randomize_seed=False, guidance_scale=2.5, progress=gr.Progress(track_tqdm=True)): | |
if randomize_seed: | |
seed = random.randint(0, MAX_SEED) | |
input_image = input_image.convert("RGB") | |
prompt_with_template = f"Change the lighting conditions in this image and add {prompt}. change the background details but maintain the forground. Lighting determines how bright or dark different parts of the image appear, where shadows fall, and how colors look. When you relight an image, you're simulating what the photo would look like if it were taken under different lighting conditions." | |
image = pipe( | |
image=input_image, | |
prompt=prompt_with_template, | |
guidance_scale=guidance_scale, | |
width=input_image.size[0], | |
height=input_image.size[1], | |
generator=torch.Generator().manual_seed(seed), | |
).images[0] | |
return image, seed | |
css=""" | |
#col-container { | |
margin: 0 auto; | |
max-width: 960px; | |
} | |
""" | |
with gr.Blocks(css=css) as demo: | |
with gr.Column(elem_id="col-container"): | |
gr.Markdown(f"""# FLUX.1 Kontext [dev] Relight 💡 | |
""") | |
gr.Markdown(f"""Kontext[dev] used for object relighting ✨ | |
""") | |
with gr.Row(): | |
with gr.Column(): | |
input_image = gr.Image(label="Upload the image for relighting", type="pil") | |
with gr.Row(): | |
prompt = gr.Text( | |
label="Prompt", | |
show_label=False, | |
max_lines=1, | |
placeholder="scifi RGB flowing, studio lighting", | |
container=False, | |
) | |
run_button = gr.Button("Run", scale=0) | |
with gr.Accordion("Advanced Settings", open=False): | |
seed = gr.Slider( | |
label="Seed", | |
minimum=0, | |
maximum=MAX_SEED, | |
step=1, | |
value=0, | |
) | |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True) | |
guidance_scale = gr.Slider( | |
label="Guidance Scale", | |
minimum=1, | |
maximum=10, | |
step=0.1, | |
value=2.5, | |
) | |
with gr.Column(): | |
result = gr.Image(label="Result", show_label=False, interactive=False) | |
gr.Examples( | |
examples=[ | |
["./assets/5_before.png", "sunset over sea lighting coming from the top right part of the photo", 0, True, 2.5], | |
["./assets/3_before.png", "sci-fi RGB glowing, studio lighting",0, True,2.5], | |
["./assets/2_before.png", "neon light, city",0, True, 2.5], | |
["./assets/before_6.png", "bright sunlight, warm, luminous", 0, True, 2.5] | |
], | |
inputs=[input_image, prompt, seed, randomize_seed, guidance_scale], | |
outputs=[result, seed], | |
fn=infer, | |
cache_examples="lazy" | |
) | |
gr.on( | |
triggers=[run_button.click, prompt.submit], | |
fn = infer, | |
inputs = [input_image, prompt, seed, randomize_seed, guidance_scale], | |
outputs = [result, seed] | |
) | |
demo.launch() |