Spaces:
Running
on
Zero
Running
on
Zero
File size: 4,101 Bytes
5b8270b b16771f 5b8270b b16771f 2c42e9b fbe8913 5b8270b b11a041 5b8270b 360e2a9 f390678 5b8270b f390678 5b8270b 5a641ff 5b8270b e982174 5b8270b b16771f 5b8270b b16771f 5b8270b e982174 f390678 66818c2 f390678 66818c2 097eb9b 5b8270b 097eb9b e982174 66818c2 5b8270b 3cafc71 dfc6809 4aa3956 dfc6809 5a641ff dfc6809 3cafc71 5b8270b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 |
import gradio as gr
import numpy as np
import spaces
import torch
import random
from PIL import Image
from diffusers import FluxKontextPipeline
from diffusers import FluxTransformer2DModel
from diffusers.utils import load_image
from huggingface_hub import hf_hub_download
pipe = FluxKontextPipeline.from_pretrained("black-forest-labs/FLUX.1-Kontext-dev", torch_dtype=torch.bfloat16).to("cuda")
pipe.load_lora_weights("kontext-community/relighting-kontext-dev-lora-v3", weight_name="relighting-kontext-dev-lora-v3.safetensors", adapter_name="lora")
pipe.set_adapters(["lora"], adapter_weights=[1.0])
MAX_SEED = np.iinfo(np.int32).max
@spaces.GPU
def infer(input_image, prompt, seed=42, randomize_seed=False, guidance_scale=2.5, progress=gr.Progress(track_tqdm=True)):
if randomize_seed:
seed = random.randint(0, MAX_SEED)
input_image = input_image.convert("RGB")
prompt_with_template = f"Change the lighting conditions in this image and add {prompt}. change the background details but maintain the forground. Lighting determines how bright or dark different parts of the image appear, where shadows fall, and how colors look. When you relight an image, you're simulating what the photo would look like if it were taken under different lighting conditions."
image = pipe(
image=input_image,
prompt=prompt_with_template,
guidance_scale=guidance_scale,
width=input_image.size[0],
height=input_image.size[1],
generator=torch.Generator().manual_seed(seed),
).images[0]
return image, seed
css="""
#col-container {
margin: 0 auto;
max-width: 960px;
}
"""
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown(f"""# FLUX.1 Kontext [dev] Relight 💡
""")
gr.Markdown(f"""Kontext[dev] used for object relighting ✨
""")
with gr.Row():
with gr.Column():
input_image = gr.Image(label="Upload the image for relighting", type="pil")
with gr.Row():
prompt = gr.Text(
label="Prompt",
show_label=False,
max_lines=1,
placeholder="scifi RGB flowing, studio lighting",
container=False,
)
run_button = gr.Button("Run", scale=0)
with gr.Accordion("Advanced Settings", open=False):
seed = gr.Slider(
label="Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=0,
)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
guidance_scale = gr.Slider(
label="Guidance Scale",
minimum=1,
maximum=10,
step=0.1,
value=2.5,
)
with gr.Column():
result = gr.Image(label="Result", show_label=False, interactive=False)
gr.Examples(
examples=[
["./assets/5_before.png", "sunset over sea lighting coming from the top right part of the photo", 0, True, 2.5],
["./assets/3_before.png", "sci-fi RGB glowing, studio lighting",0, True,2.5],
["./assets/2_before.png", "neon light, city",0, True, 2.5],
["./assets/before_6.png", "bright sunlight, warm, luminous", 0, True, 2.5]
],
inputs=[input_image, prompt, seed, randomize_seed, guidance_scale],
outputs=[result, seed],
fn=infer,
cache_examples="lazy"
)
gr.on(
triggers=[run_button.click, prompt.submit],
fn = infer,
inputs = [input_image, prompt, seed, randomize_seed, guidance_scale],
outputs = [result, seed]
)
demo.launch() |