|
import random |
|
import os |
|
import uuid |
|
from datetime import datetime |
|
import gradio as gr |
|
import numpy as np |
|
import spaces |
|
import torch |
|
from diffusers import DiffusionPipeline |
|
from PIL import Image |
|
|
|
|
|
SAVE_DIR = "saved_images" |
|
if not os.path.exists(SAVE_DIR): |
|
os.makedirs(SAVE_DIR, exist_ok=True) |
|
|
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
repo_id = "black-forest-labs/FLUX.1-dev" |
|
adapter_id = "seawolf2357/kim-korea" |
|
|
|
pipeline = DiffusionPipeline.from_pretrained(repo_id, torch_dtype=torch.bfloat16) |
|
pipeline.load_lora_weights(adapter_id) |
|
pipeline = pipeline.to(device) |
|
|
|
MAX_SEED = np.iinfo(np.int32).max |
|
MAX_IMAGE_SIZE = 1024 |
|
|
|
def save_generated_image(image, prompt): |
|
|
|
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") |
|
unique_id = str(uuid.uuid4())[:8] |
|
filename = f"{timestamp}_{unique_id}.png" |
|
filepath = os.path.join(SAVE_DIR, filename) |
|
|
|
|
|
image.save(filepath) |
|
|
|
|
|
metadata_file = os.path.join(SAVE_DIR, "metadata.txt") |
|
with open(metadata_file, "a", encoding="utf-8") as f: |
|
f.write(f"{filename}|{prompt}|{timestamp}\n") |
|
|
|
return filepath |
|
|
|
@spaces.GPU(duration=60) |
|
def inference( |
|
prompt, |
|
seed=42, |
|
randomize_seed=True, |
|
width=1024, |
|
height=768, |
|
guidance_scale=3.5, |
|
num_inference_steps=30, |
|
lora_scale=1.0, |
|
progress=None, |
|
): |
|
if randomize_seed: |
|
seed = random.randint(0, MAX_SEED) |
|
generator = torch.Generator(device=device).manual_seed(int(seed)) |
|
|
|
image = pipeline( |
|
prompt=prompt, |
|
guidance_scale=guidance_scale, |
|
num_inference_steps=num_inference_steps, |
|
width=width, |
|
height=height, |
|
generator=generator, |
|
joint_attention_kwargs={"scale": lora_scale}, |
|
).images[0] |
|
|
|
|
|
filepath = save_generated_image(image, prompt) |
|
|
|
|
|
return image, seed |
|
|
|
|
|
|
|
examples = [ |
|
"Mr. KIM delivering a powerful speech in front of a large crowd with confident gestures and determined expression. ", |
|
"Mr. KIM holding a press conference, facing flashing cameras, wearing a tailored suit and a subtle lapel pin. ", |
|
"Mr. KIM visiting a rural area, warmly greeting local residents 'all womans'(different faces) while discussing policies and improvements. ", |
|
"Mr. KIM in a dynamic interview setting, passionately outlining his visions for the future.", |
|
"Mr. KIM preparing for an important debate, surrounded by paperwork, looking focused and resolute. ", |
|
"Mr. KIM holding up a 'NO.1 KOREA' banner with both hands, showing patriotic pride and determination for national excellence. ", |
|
"Mr. KIM jogging in a park wearing athletic gear, demonstrating healthy lifestyle and energetic leadership qualities.", |
|
"Mr. KIM raising both arms in celebration with a triumphant expression, showing victory and hope for the future.", |
|
"Mr. KIM warmly shaking hands with female citizens in a crowded street, showing genuine care and connection with women voters. ", |
|
"Mr. KIM participating in a community event, surrounded by enthusiastic female supporters cheering and waving flags. ", |
|
"Mr. KIM at a campaign rally, pointing toward the horizon with an inspiring gesture while female audience members applaud. ", |
|
"Mr. KIM visiting a local market, engaging in friendly conversation with female vendors and shopkeepers. ", |
|
"Mr. KIM at a town hall meeting, attentively listening to concerns raised by female constituents with a compassionate expression.", |
|
"Mr. KIM cutting a ribbon at a new facility opening, smiling broadly while female community leaders stand beside him. ", |
|
"Mr. KIM walking through a university campus, discussing education policies with female students and professors. ", |
|
] |
|
|
|
|
|
custom_css = """ |
|
:root { |
|
--color-primary: #8F1A3A; /* ๋ถ์ ํค์ ๋ฉ์ธ ์ปฌ๋ฌ */ |
|
--color-secondary: #FF4B4B; /* ํฌ์ธํธ ์ปฌ๋ฌ(๋ฐ์ ๋นจ๊ฐ) */ |
|
--background-fill-primary: linear-gradient(to right, #FFF5F5, #FED7D7, #FEB2B2); |
|
} |
|
|
|
footer { |
|
visibility: hidden; |
|
} |
|
|
|
.gradio-container { |
|
background: var(--background-fill-primary); |
|
} |
|
|
|
.title { |
|
color: var(--color-primary) !important; |
|
font-size: 3rem !important; |
|
font-weight: 700 !important; |
|
text-align: center; |
|
margin: 1rem 0; |
|
text-shadow: 2px 2px 4px rgba(0,0,0,0.05); |
|
font-family: 'Playfair Display', serif; |
|
} |
|
|
|
.subtitle { |
|
color: #4A5568 !important; |
|
font-size: 1.2rem !important; |
|
text-align: center; |
|
margin-bottom: 1.5rem; |
|
font-style: italic; |
|
} |
|
|
|
.collection-link { |
|
text-align: center; |
|
margin-bottom: 2rem; |
|
font-size: 1.1rem; |
|
} |
|
|
|
.collection-link a { |
|
color: var(--color-primary); |
|
text-decoration: underline; |
|
transition: color 0.3s ease; |
|
} |
|
|
|
.collection-link a:hover { |
|
color: var(--color-secondary); |
|
} |
|
|
|
.model-description { |
|
background-color: rgba(255, 255, 255, 0.8); |
|
border-radius: 12px; |
|
padding: 24px; |
|
margin: 20px 0; |
|
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.05); |
|
border-left: 5px solid var(--color-primary); |
|
} |
|
|
|
button.primary { |
|
background-color: var(--color-primary) !important; |
|
transition: all 0.3s ease; |
|
color: #fff !important; |
|
} |
|
|
|
button:hover { |
|
transform: translateY(-2px); |
|
box-shadow: 0 5px 15px rgba(0,0,0,0.1); |
|
} |
|
|
|
.input-container { |
|
border-radius: 10px; |
|
box-shadow: 0 2px 8px rgba(0,0,0,0.05); |
|
background-color: rgba(255, 255, 255, 0.6); |
|
padding: 20px; |
|
margin-bottom: 1rem; |
|
} |
|
|
|
.advanced-settings { |
|
margin-top: 1rem; |
|
padding: 1rem; |
|
border-radius: 10px; |
|
background-color: rgba(255, 255, 255, 0.6); |
|
} |
|
|
|
.example-region { |
|
background-color: rgba(255, 255, 255, 0.5); |
|
border-radius: 10px; |
|
padding: 1rem; |
|
margin-top: 1rem; |
|
} |
|
""" |
|
|
|
with gr.Blocks(css=custom_css, analytics_enabled=False) as demo: |
|
gr.HTML('<div class="title">Mr. KIM in KOREA</div>') |
|
|
|
|
|
gr.HTML('<div class="collection-link"><a href="https://huggingface.co/collections/openfree/painting-art-ai-681453484ec15ef5978bbeb1" target="_blank">Visit the LoRA Model Collection</a></div>') |
|
|
|
|
|
with gr.Group(elem_classes="model-description"): |
|
gr.HTML(""" |
|
<p> |
|
๋ณธ ๋ชจ๋ธ์ ์ฐ๊ตฌ ๋ชฉ์ ์ผ๋ก ํน์ ์ธ์ ์ผ๊ตด๊ณผ ์ธ๋ชจ๋ฅผ ํ์ตํ LoRA ๋ชจ๋ธ์
๋๋ค.<br> |
|
๋ชฉ์ ์ธ์ ์ฉ๋๋ก ๋ฌด๋จ ์ฌ์ฉ ์๋๋ก ์ ์ํด ์ฃผ์ธ์.<br> |
|
(์์ prompt ์ฌ์ฉ ์ ๋ฐ๋์ 'kim'์ ํฌํจํ์ฌ์ผ ์ต์ ์ ๊ฒฐ๊ณผ๋ฅผ ์ป์ ์ ์์ต๋๋ค.) |
|
</p> |
|
""") |
|
|
|
|
|
with gr.Column(elem_id="col-container"): |
|
with gr.Row(elem_classes="input-container"): |
|
prompt = gr.Text( |
|
label="Prompt", |
|
max_lines=1, |
|
placeholder="Enter your prompt (add [trigger] at the end)", |
|
value=examples[0] |
|
) |
|
run_button = gr.Button("Generate", variant="primary", scale=0) |
|
|
|
result = gr.Image(label="Generated Image") |
|
seed_output = gr.Number(label="Seed", visible=True) |
|
|
|
with gr.Accordion("Advanced Settings", open=False, elem_classes="advanced-settings"): |
|
seed = gr.Slider( |
|
label="Seed", |
|
minimum=0, |
|
maximum=MAX_SEED, |
|
step=1, |
|
value=42, |
|
) |
|
randomize_seed = gr.Checkbox(label="Randomize seed", value=True) |
|
|
|
with gr.Row(): |
|
width = gr.Slider( |
|
label="Width", |
|
minimum=256, |
|
maximum=MAX_IMAGE_SIZE, |
|
step=32, |
|
value=1024, |
|
) |
|
height = gr.Slider( |
|
label="Height", |
|
minimum=256, |
|
maximum=MAX_IMAGE_SIZE, |
|
step=32, |
|
value=768, |
|
) |
|
|
|
with gr.Row(): |
|
guidance_scale = gr.Slider( |
|
label="Guidance scale", |
|
minimum=0.0, |
|
maximum=10.0, |
|
step=0.1, |
|
value=3.5, |
|
) |
|
num_inference_steps = gr.Slider( |
|
label="Number of inference steps", |
|
minimum=1, |
|
maximum=50, |
|
step=1, |
|
value=30, |
|
) |
|
lora_scale = gr.Slider( |
|
label="LoRA scale", |
|
minimum=0.0, |
|
maximum=1.0, |
|
step=0.1, |
|
value=1.0, |
|
) |
|
|
|
with gr.Group(elem_classes="example-region"): |
|
gr.Markdown("### Examples") |
|
gr.Examples( |
|
examples=examples, |
|
inputs=prompt, |
|
outputs=None, |
|
fn=None, |
|
cache_examples=False, |
|
) |
|
|
|
|
|
gr.on( |
|
triggers=[run_button.click, prompt.submit], |
|
fn=inference, |
|
inputs=[ |
|
prompt, |
|
seed, |
|
randomize_seed, |
|
width, |
|
height, |
|
guidance_scale, |
|
num_inference_steps, |
|
lora_scale, |
|
], |
|
outputs=[result, seed_output], |
|
) |
|
|
|
demo.queue() |
|
demo.launch() |
|
|