kontext-relight / app.py
linoyts's picture
linoyts HF Staff
Update app.py
1486e38 verified
raw
history blame
4.19 kB
import gradio as gr
import numpy as np
import spaces
import torch
import random
from PIL import Image
#from kontext_pipeline import FluxKontextPipeline
from pipeline_flux_kontext import FluxKontextPipeline
from diffusers import FluxTransformer2DModel
from diffusers.utils import load_image
from huggingface_hub import hf_hub_download
kontext_path = hf_hub_download(repo_id="diffusers/kontext-v2", filename="dev-opt-2-a-3.safetensors")
MAX_SEED = np.iinfo(np.int32).max
transformer = FluxTransformer2DModel.from_single_file(kontext_path, torch_dtype=torch.bfloat16)
pipe = FluxKontextPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", transformer=transformer, torch_dtype=torch.bfloat16).to("cuda")
@spaces.GPU
def infer(input_image, prompt, seed=42, randomize_seed=False, guidance_scale=2.5, progress=gr.Progress(track_tqdm=True)):
if randomize_seed:
seed = random.randint(0, MAX_SEED)
input_image = input_image.convert("RGB")
prompt_with_template = f"hange the lighting conditions in this image and add {prompt}. Lighting determines how bright or dark different parts of the image appear, where shadows fall, and how colors look. When you relight an image, you're simulating what the photo would look like if it were taken under different lighting conditions."
image = pipe(
image=input_image,
prompt=prompt_with_template,
guidance_scale=guidance_scale,
width=input_image.size[0],
height=input_image.size[1],
generator=torch.Generator().manual_seed(seed),
).images[0]
return image, seed, gr.update(visible=True)
css="""
#col-container {
margin: 0 auto;
max-width: 960px;
}
"""
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown(f"""# FLUX.1 Kontext [dev] Relight
""")
with gr.Row():
with gr.Column():
input_image = gr.Image(label="Upload the image for relighting", type="pil")
with gr.Row():
prompt = gr.Text(
label="Prompt",
show_label=False,
max_lines=1,
placeholder="scifi RGB flowing, studio lighting",
container=False,
)
run_button = gr.Button("Run", scale=0)
with gr.Accordion("Advanced Settings", open=False):
seed = gr.Slider(
label="Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=0,
)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
guidance_scale = gr.Slider(
label="Guidance Scale",
minimum=1,
maximum=10,
step=0.1,
value=2.5,
)
with gr.Column():
result = gr.Image(label="Result", show_label=False, interactive=False)
reuse_button = gr.Button("Reuse this image", visible=False)
gr.Examples(
examples=[
["./assets/5_before.png", "sunset over sea lighting coming from the top right part of the photo", 0, True, 2.5],
["./assets/3_before.png", "sci-fi RGB glowing, studio lighting",0, True,2.5],
["./assets/2_before.png", "neon light, city",0, True, 2.5]
],
inputs=[input_image, prompt, seed, randomize_seed, guidance_scale],
outputs=[result, seed, reuse_button],
fn=infer,
cache_examples="lazy"
)
gr.on(
triggers=[run_button.click, prompt.submit],
fn = infer,
inputs = [input_image, prompt, seed, randomize_seed, guidance_scale],
outputs = [result, seed, reuse_button]
)
reuse_button.click(
fn = lambda image: image,
inputs = [result],
outputs = [input_image]
)
demo.launch()