Spaces:
Running
on
Zero
Running
on
Zero
File size: 5,154 Bytes
079a382 6bcf05c 079a382 6bcf05c 079a382 b4f042d 6c51e38 6bcf05c 079a382 6bcf05c 079a382 6bcf05c 079a382 f27dee7 6bcf05c f27dee7 6bcf05c f27dee7 079a382 b4f042d f27dee7 6bcf05c f27dee7 b4f042d 6bcf05c 079a382 f27dee7 6bcf05c f27dee7 b4f042d 6bcf05c 3609460 6bcf05c b4f042d 6bcf05c b4f042d ced387c b4f042d 6bcf05c 079a382 6bcf05c daa5f41 6bcf05c daa5f41 6bcf05c daa5f41 6bcf05c daa5f41 6bcf05c daa5f41 6bcf05c daa5f41 6bcf05c daa5f41 6bcf05c daa5f41 6bcf05c daa5f41 6bcf05c daa5f41 6bcf05c daa5f41 e9e83e2 daa5f41 6bcf05c b4f042d 079a382 b7d4359 8a37172 6bcf05c 079a382 6bcf05c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 |
import torch
torch.jit.script = lambda f: f
import spaces
import gradio as gr
from diffusers import FluxInpaintPipeline
from PIL import Image, ImageFile
# ImageFile.LOAD_TRUNCATED_IMAGES = True
# Initialize the pipeline
pipe = FluxInpaintPipeline.from_pretrained(
"black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16
)
pipe.to("cuda")
pipe.load_lora_weights(
"ysmao/multiview-incontext",
weight_name="twoview-incontext-b01.safetensors",
)
def fractional_resize_image(img, target_size=864):
if img.mode in ("RGBA", "P"):
img = img.convert("RGB")
width, height = img.size
scale_factor = target_size / max(width, height)
return img.resize(
(int(width * scale_factor), int(height * scale_factor)),
Image.Resampling.LANCZOS,
)
def duplicate_horizontally(img):
width, height = img.size
new_image = Image.new("RGB", (width * 2, height))
new_image.paste(img, (0, 0))
new_image.paste(img, (width, 0))
mask_image = Image.new("RGB", (width * 2, height), (255, 255, 255))
left_mask = Image.new(
"RGB",
(width, height),
(0, 0, 0),
)
mask_image.paste(left_mask, (0, 0))
return new_image, mask_image
@spaces.GPU(duration=120)
def generate(
image, prompt_description, prompt_user, progress=gr.Progress(track_tqdm=True)
):
prompt_structure = (
"[TWO-VIEWS] This set of two images presents a scene from two different viewpoints. [IMAGE1] The first image shows "
+ prompt_description
+ " [IMAGE2] The second image shows the same room but in another viewpoint "
)
prompt = prompt_structure + prompt_user + "."
resized_image = fractional_resize_image(image)
image_twoview, mask_image = duplicate_horizontally(resized_image)
image_width, image_height = image_twoview.size
out = pipe(
prompt=prompt,
image=image_twoview,
mask_image=mask_image,
guidance_scale=3.5,
height=image_height,
width=image_width,
num_inference_steps=28,
max_sequence_length=256,
strength=1,
).images[0]
width, height = out.size
half_width = width // 2
image_2 = out.crop((half_width, 0, width, height))
return image_2, out
with gr.Blocks() as demo:
gr.Markdown("# MultiView in Context")
gr.Markdown(
"### [In-Context LoRA](https://huggingface.co/ali-vilab/In-Context-LoRA) + Image-to-Image + Inpainting. Diffusers implementation based on the [workflow by WizardWhitebeard/klinter](https://civitai.com/articles/8779)"
)
gr.Markdown(
"### Using [MultiView In-Context LoRA](https://huggingface.co/ysmao/multiview-incontext)"
)
with gr.Tab("Demo"):
with gr.Row():
with gr.Column():
input_image = gr.Image(
label="Upload Source Image", type="pil", height=384
)
prompt_description = gr.Textbox(
label="Describe the source image",
placeholder="a living room with a sofa set with cushions, side tables with table lamps, a flat screen television on a table, houseplants, wall hangings, electric lights, and a carpet on the floor",
)
prompt_input = gr.Textbox(
label="Any additional description to the new viewpoint?",
placeholder="",
)
generate_btn = gr.Button("Generate Application", variant="primary")
with gr.Column():
output_image = gr.Image(label="Generated Application")
output_side = gr.Image(label="Side by side")
gr.Examples(
examples=[
[
"livingroom_fluxdev.jpg",
"a living room with a sofa set with cushions, side tables with table lamps, a flat screen television on a table, houseplants, wall hangings, electric lights, and a carpet on the floor",
"",
],
[
"bedroom_fluxdev.jpg",
"a bedroom with a bed, dresser, and window. The bed is covered with a blanket and pillows, and there is a carpet on the floor. The walls are adorned with photo frames, and the windows have curtains. Through the window, we can see trees outside.",
"",
],
],
inputs=[input_image, prompt_description, prompt_input],
outputs=[output_image, output_side],
fn=generate,
cache_examples="lazy",
)
with gr.Row():
gr.Markdown(
"""
### Instructions:
1. Upload a source image
2. Describe the source image
3. Click 'Generate Application' and wait for the result
Note: The generation process might take a few moments.
"""
)
# Set up the click event
generate_btn.click(
fn=generate,
inputs=[input_image, prompt_description, prompt_input],
outputs=[output_image, output_side],
)
demo.launch()
|