|
import random |
|
import os |
|
import uuid |
|
import re |
|
import time |
|
from datetime import datetime |
|
|
|
import gradio as gr |
|
import numpy as np |
|
import requests |
|
import torch |
|
from diffusers import DiffusionPipeline |
|
from PIL import Image |
|
|
|
|
|
from openai import OpenAI |
|
|
|
client = OpenAI(api_key=os.getenv("LLM_API")) |
|
|
|
|
|
STYLE_PRESETS = { |
|
"None": "", |
|
"Realistic Photo": "photorealistic, 8k, ultra-detailed, cinematic lighting, realistic skin texture", |
|
"Oil Painting": "oil painting, rich brush strokes, canvas texture, baroque lighting", |
|
"Comic Book": "comic book style, bold ink outlines, cel shading, vibrant colors", |
|
"Watercolor": "watercolor illustration, soft gradients, splatter effect, pastel palette", |
|
} |
|
|
|
|
|
SAVE_DIR = "saved_images" |
|
if not os.path.exists(SAVE_DIR): |
|
os.makedirs(SAVE_DIR, exist_ok=True) |
|
|
|
|
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
repo_id = "black-forest-labs/FLUX.1-dev" |
|
adapter_id = "seawolf2357/kim-korea" |
|
|
|
pipeline = DiffusionPipeline.from_pretrained(repo_id, torch_dtype=torch.bfloat16) |
|
pipeline.load_lora_weights(adapter_id) |
|
pipeline = pipeline.to(device) |
|
|
|
MAX_SEED = np.iinfo(np.int32).max |
|
MAX_IMAGE_SIZE = 1024 |
|
|
|
|
|
HANGUL_RE = re.compile(r"[\u3131-\u318E\uAC00-\uD7A3]+") |
|
|
|
def is_korean(text: str) -> bool: |
|
return bool(HANGUL_RE.search(text)) |
|
|
|
|
|
|
|
def openai_translate(text: str, retries: int = 3) -> str: |
|
"""ํ๊ธ์ ์์ด๋ก ๋ฒ์ญ (OpenAI GPT-4.1-mini ์ฌ์ฉ). ์์ด ์
๋ ฅ์ด๋ฉด ๊ทธ๋๋ก ๋ฐํ.""" |
|
if not is_korean(text): |
|
return text |
|
|
|
for attempt in range(retries): |
|
try: |
|
res = client.chat.completions.create( |
|
model="gpt-4.1-mini", |
|
messages=[ |
|
{ |
|
"role": "system", |
|
"content": "Translate the following Korean prompt into concise, descriptive English suitable for an image generation model. Keep the meaning, do not add new concepts." |
|
}, |
|
{"role": "user", "content": text} |
|
], |
|
temperature=0.3, |
|
max_tokens=256, |
|
) |
|
return res.choices[0].message.content.strip() |
|
except (requests.exceptions.RequestException, Exception) as e: |
|
print(f"[translate] attempt {attempt + 1} failed: {e}") |
|
time.sleep(2) |
|
return text |
|
|
|
def prepare_prompt(user_prompt: str, style_key: str) -> str: |
|
"""ํ๊ธ์ด๋ฉด ๋ฒ์ญํ๊ณ , ์ ํํ ์คํ์ผ ํ๋ฆฌ์
์ ๋ถ์ฌ์ ์ต์ข
ํ๋กฌํํธ๋ฅผ ๋ง๋ ๋ค.""" |
|
prompt_en = openai_translate(user_prompt) |
|
style_suffix = STYLE_PRESETS.get(style_key, "") |
|
if style_suffix: |
|
final_prompt = f"{prompt_en}, {style_suffix}" |
|
else: |
|
final_prompt = prompt_en |
|
return final_prompt |
|
|
|
|
|
|
|
def save_generated_image(image: Image.Image, prompt: str) -> str: |
|
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") |
|
unique_id = str(uuid.uuid4())[:8] |
|
filename = f"{timestamp}_{unique_id}.png" |
|
filepath = os.path.join(SAVE_DIR, filename) |
|
image.save(filepath) |
|
|
|
|
|
metadata_file = os.path.join(SAVE_DIR, "metadata.txt") |
|
with open(metadata_file, "a", encoding="utf-8") as f: |
|
f.write(f"{filename}|{prompt}|{timestamp}\n") |
|
return filepath |
|
|
|
|
|
|
|
def run_pipeline(prompt: str, seed: int, width: int, height: int, guidance_scale: float, num_steps: int, lora_scale: float): |
|
generator = torch.Generator(device=device).manual_seed(int(seed)) |
|
result = pipeline( |
|
prompt=prompt, |
|
guidance_scale=guidance_scale, |
|
num_inference_steps=num_steps, |
|
width=width, |
|
height=height, |
|
generator=generator, |
|
joint_attention_kwargs={"scale": lora_scale}, |
|
).images[0] |
|
return result |
|
|
|
|
|
|
|
@spaces.GPU(duration=60) |
|
def generate_image( |
|
user_prompt: str, |
|
style_key: str, |
|
seed: int = 42, |
|
randomize_seed: bool = True, |
|
width: int = 1024, |
|
height: int = 768, |
|
guidance_scale: float = 3.5, |
|
num_inference_steps: int = 30, |
|
lora_scale: float = 1.0, |
|
progress=None, |
|
): |
|
if randomize_seed: |
|
seed = random.randint(0, MAX_SEED) |
|
|
|
|
|
final_prompt = prepare_prompt(user_prompt, style_key) |
|
|
|
|
|
image = run_pipeline(final_prompt, seed, width, height, guidance_scale, num_inference_steps, lora_scale) |
|
|
|
|
|
save_generated_image(image, final_prompt) |
|
|
|
return image, seed |
|
|
|
|
|
|
|
examples = [ |
|
"Mr. KIM์ด ๋ ์์ผ๋ก 'Fighting!' ํ์๋ง์ ๋ค๊ณ ์๋ ๋ชจ์ต, ์ ๊ตญ์ฌ๊ณผ ๊ตญ๊ฐ ๋ฐ์ ์ ๋ํ ์์ง๋ฅผ ๋ณด์ฌ์ฃผ๊ณ ์๋ค.", |
|
"Mr. KIM์ด ์ํ์ ๋ค์ด ์ฌ๋ฆฌ๋ฉฐ ์น๋ฆฌ์ ํ์ ์ผ๋ก ํํธํ๋ ๋ชจ์ต, ์น๋ฆฌ์ ๋ฏธ๋์ ๋ํ ํฌ๋ง์ ๋ณด์ฌ์ฃผ๊ณ ์๋ค.", |
|
"Mr. KIM์ด ์ด๋๋ณต์ ์
๊ณ ๊ณต์์์ ์กฐ๊น
ํ๋ ๋ชจ์ต, ๊ฑด๊ฐํ ์ํ์ต๊ด๊ณผ ํ๊ธฐ์ฐฌ ๋ฆฌ๋์ญ์ ๋ณด์ฌ์ฃผ๊ณ ์๋ค.", |
|
"Mr. KIM์ด ๋ถ๋น๋ ๊ฑฐ๋ฆฌ์์ ์ฌ์ฑ ์๋ฏผ๋ค๊ณผ ๋ฐ๋ปํ๊ฒ ์
์ํ๋ ๋ชจ์ต, ์ฌ์ฑ ์ ๊ถ์๋ค์ ๋ํ ์ง์ ํ ๊ด์ฌ๊ณผ ์ํต์ ๋ณด์ฌ์ฃผ๊ณ ์๋ค.", |
|
"Mr. KIM์ด ์ ๊ฑฐ ์ ์ธ์ฅ์์ ์งํ์ ์ ํฅํด ์๊ฐ๋ฝ์ผ๋ก ๊ฐ๋ฆฌํค๋ฉฐ ์๊ฐ์ ์ฃผ๋ ์ ์ค์ฒ๋ฅผ ์ทจํ๊ณ ์๊ณ , ์ฌ์ฑ๋ค๊ณผ ์์ด๋ค์ด ๋ฐ์๋ฅผ ์น๊ณ ์๋ค.", |
|
"Mr. KIM์ด ์ง์ญ ํ์ฌ์ ์ฐธ์ฌํ์ฌ ์ด์ ์ ์ผ๋ก ์์ํ๋ ์ฌ์ฑ ์ง์ง์๋ค์๊ฒ ๋๋ฌ์ธ์ฌ ์๋ ๋ชจ์ต.", |
|
"Mr. KIM visiting a local market, engaging in friendly conversation with female vendors and shopkeepers. ", |
|
"Mr. KIM walking through a university campus, discussing education policies with female students and professors. ", |
|
"Mr. KIM delivering a powerful speech in front of a large crowd with confident gestures and determined expression. ", |
|
"Mr. KIM in a dynamic interview setting, passionately outlining his visions for the future.", |
|
"Mr. KIM preparing for an important debate, surrounded by paperwork, looking focused and resolute. ", |
|
] |
|
|
|
custom_css = """ |
|
:root { |
|
--color-primary: #8F1A3A; |
|
--color-secondary: #FF4B4B; |
|
--background-fill-primary: linear-gradient(to right, #FFF5F5, #FED7D7, #FEB2B2); |
|
} |
|
footer {visibility: hidden;} |
|
.gradio-container {background: var(--background-fill-primary);} |
|
.title {color: var(--color-primary)!important; font-size:3rem!important; font-weight:700!important; text-align:center; margin:1rem 0; font-family:'Playfair Display',serif;} |
|
.subtitle {color:#4A5568!important; font-size:1.2rem!important; text-align:center; margin-bottom:1.5rem; font-style:italic;} |
|
.collection-link {text-align:center; margin-bottom:2rem; font-size:1.1rem;} |
|
.collection-link a {color:var(--color-primary); text-decoration:underline; transition:color .3s ease;} |
|
.collection-link a:hover {color:var(--color-secondary);} |
|
.model-description{background:rgba(255,255,255,.8); border-radius:12px; padding:24px; margin:20px 0; box-shadow:0 4px 12px rgba(0,0,0,.05); border-left:5px solid var(--color-primary);} |
|
button.primary{background:var(--color-primary)!important; color:#fff!important; transition:all .3s ease;} |
|
button:hover{transform:translateY(-2px); box-shadow:0 5px 15px rgba(0,0,0,.1);} |
|
.input-container{border-radius:10px; box-shadow:0 2px 8px rgba(0,0,0,.05); background:rgba(255,255,255,.6); padding:20px; margin-bottom:1rem;} |
|
.advanced-settings{margin-top:1rem; padding:1rem; border-radius:10px; background:rgba(255,255,255,.6);} |
|
.example-region{background:rgba(255,255,255,.5); border-radius:10px; padding:1rem; margin-top:1rem;} |
|
""" |
|
|
|
|
|
with gr.Blocks(css=custom_css, analytics_enabled=False) as demo: |
|
gr.HTML('<div class="title">Mr. KIM in KOREA</div>') |
|
gr.HTML('<div class="collection-link"><a href="https://huggingface.co/collections/openfree/painting-art-ai-681453484ec15ef5978bbeb1" target="_blank">Visit the LoRA Model Collection</a></div>') |
|
|
|
with gr.Group(elem_classes="model-description"): |
|
gr.HTML(""" |
|
<p> |
|
๋ณธ ๋ชจ๋ธ์ ์ฐ๊ตฌ ๋ชฉ์ ์ผ๋ก ํน์ ์ธ์ ์ผ๊ตด๊ณผ ์ธ๋ชจ๋ฅผ ํ์ตํ LoRA ๋ชจ๋ธ์
๋๋ค.<br> |
|
๋ชฉ์ ์ธ์ ์ฉ๋๋ก ๋ฌด๋จ ์ฌ์ฉ ์๋๋ก ์ ์ํด ์ฃผ์ธ์.<br> |
|
(์์ prompt ์ฌ์ฉ ์ ๋ฐ๋์ 'kim'์ ํฌํจํ์ฌ์ผ ์ต์ ์ ๊ฒฐ๊ณผ๋ฅผ ์ป์ ์ ์์ต๋๋ค.) |
|
</p> |
|
""") |
|
|
|
|
|
with gr.Column(): |
|
with gr.Row(elem_classes="input-container"): |
|
user_prompt = gr.Text(label="Prompt", max_lines=1, value=examples[0]) |
|
style_select = gr.Radio(label="Style Preset", choices=list(STYLE_PRESETS.keys()), value="None", interactive=True) |
|
run_button = gr.Button("Generate", variant="primary") |
|
|
|
result_image = gr.Image(label="Generated Image") |
|
seed_output = gr.Number(label="Seed") |
|
|
|
|
|
with gr.Accordion("Advanced Settings", open=False, elem_classes="advanced-settings"): |
|
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42) |
|
randomize_seed = gr.Checkbox(label="Randomize seed", value=True) |
|
with gr.Row(): |
|
width = gr.Slider(label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024) |
|
height = gr.Slider(label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=768) |
|
with gr.Row(): |
|
guidance_scale = gr.Slider(label="Guidance scale", minimum=0.0, maximum=10.0, step=0.1, value=3.5) |
|
num_inference_steps = gr.Slider(label="Inference steps", minimum=1, maximum=50, step=1, value=30) |
|
lora_scale = gr.Slider(label="LoRA scale", minimum=0.0, maximum=1.0, step=0.1, value=1.0) |
|
|
|
|
|
with gr.Group(elem_classes="example-region"): |
|
gr.Markdown("### Examples") |
|
gr.Examples(examples=examples, inputs=user_prompt, cache_examples=False) |
|
|
|
|
|
run_button.click( |
|
fn=generate_image, |
|
inputs=[ |
|
user_prompt, |
|
style_select, |
|
seed, |
|
randomize_seed, |
|
width, |
|
height, |
|
guidance_scale, |
|
num_inference_steps, |
|
lora_scale, |
|
], |
|
outputs=[result_image, seed_output], |
|
) |
|
|
|
|
|
demo.queue() |
|
demo.launch() |
|
|