|
|
|
try: |
|
import spaces |
|
HF_SPACES = True |
|
except ImportError: |
|
|
|
def spaces_gpu_decorator(duration=60): |
|
def decorator(func): |
|
return func |
|
return decorator |
|
spaces = type('spaces', (), {'GPU': spaces_gpu_decorator})() |
|
HF_SPACES = False |
|
print("Warning: Running without Hugging Face Spaces GPU allocation") |
|
|
|
|
|
import random |
|
import os |
|
import uuid |
|
import re |
|
import time |
|
from datetime import datetime |
|
|
|
import gradio as gr |
|
import numpy as np |
|
import requests |
|
import torch |
|
from diffusers import DiffusionPipeline |
|
from PIL import Image |
|
|
|
|
|
from openai import OpenAI |
|
|
|
|
|
try: |
|
client = OpenAI(api_key=os.getenv("LLM_API")) |
|
except Exception as e: |
|
print(f"Warning: OpenAI client initialization failed: {e}") |
|
client = None |
|
|
|
|
|
STYLE_PRESETS = { |
|
"None": "", |
|
"Realistic Photo": "photorealistic, 8k, ultra-detailed, cinematic lighting, realistic skin texture", |
|
"Oil Painting": "oil painting, rich brush strokes, canvas texture, baroque lighting", |
|
"Comic Book": "comic book style, bold ink outlines, cel shading, vibrant colors", |
|
"Watercolor": "watercolor illustration, soft gradients, splatter effect, pastel palette", |
|
} |
|
|
|
|
|
SAVE_DIR = "saved_images" |
|
if not os.path.exists(SAVE_DIR): |
|
os.makedirs(SAVE_DIR, exist_ok=True) |
|
|
|
|
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
print(f"Using device: {device}") |
|
|
|
repo_id = "black-forest-labs/FLUX.1-dev" |
|
adapter_id = "seawolf2357/kim-korea" |
|
|
|
|
|
try: |
|
pipeline = DiffusionPipeline.from_pretrained(repo_id, torch_dtype=torch.bfloat16) |
|
pipeline.load_lora_weights(adapter_id) |
|
pipeline = pipeline.to(device) |
|
print("Model loaded successfully") |
|
except Exception as e: |
|
print(f"Error loading model: {e}") |
|
pipeline = None |
|
|
|
MAX_SEED = np.iinfo(np.int32).max |
|
MAX_IMAGE_SIZE = 1024 |
|
|
|
|
|
HANGUL_RE = re.compile(r"[\u3131-\u318E\uAC00-\uD7A3]+") |
|
|
|
def is_korean(text: str) -> bool: |
|
return bool(HANGUL_RE.search(text)) |
|
|
|
|
|
|
|
def openai_translate(text: str, retries: int = 3) -> str: |
|
"""ํ๊ธ์ ์์ด๋ก ๋ฒ์ญ (OpenAI GPT-4o-mini ์ฌ์ฉ). ์์ด ์
๋ ฅ์ด๋ฉด ๊ทธ๋๋ก ๋ฐํ.""" |
|
if not is_korean(text): |
|
return text |
|
|
|
if client is None: |
|
print("Warning: OpenAI client not available, returning original text") |
|
return text |
|
|
|
for attempt in range(retries): |
|
try: |
|
res = client.chat.completions.create( |
|
model="gpt-4o-mini", |
|
messages=[ |
|
{ |
|
"role": "system", |
|
"content": "Translate the following Korean prompt into concise, descriptive English suitable for an image generation model. Keep the meaning, do not add new concepts." |
|
}, |
|
{"role": "user", "content": text} |
|
], |
|
temperature=0.3, |
|
max_tokens=256, |
|
) |
|
return res.choices[0].message.content.strip() |
|
except Exception as e: |
|
print(f"[translate] attempt {attempt + 1} failed: {e}") |
|
time.sleep(2) |
|
return text |
|
|
|
def enhance_prompt(text: str, retries: int = 3) -> str: |
|
"""OpenAI๋ฅผ ํตํด ํ๋กฌํํธ๋ฅผ ์ฆ๊ฐํ์ฌ ๊ณ ํ์ง ์ด๋ฏธ์ง ์์ฑ์ ์ํ ์์ธํ ์ค๋ช
์ผ๋ก ๋ณํ.""" |
|
if client is None: |
|
print("Warning: OpenAI client not available, returning original text") |
|
return text |
|
|
|
for attempt in range(retries): |
|
try: |
|
res = client.chat.completions.create( |
|
model="gpt-4o-mini", |
|
messages=[ |
|
{ |
|
"role": "system", |
|
"content": """You are an expert prompt engineer for image generation models. Enhance the given prompt to create high-quality, detailed images. |
|
|
|
Guidelines: |
|
- Add specific visual details (lighting, composition, colors, textures) |
|
- Include technical photography terms (depth of field, focal length, etc.) |
|
- Add atmosphere and mood descriptors |
|
- Specify image quality terms (4K, ultra-detailed, professional, etc.) |
|
- Keep the core subject and meaning intact |
|
- Make it comprehensive but not overly long |
|
- Focus on visual elements that will improve image generation quality |
|
|
|
Example: |
|
Input: "A man giving a speech" |
|
Output: "A professional man giving an inspiring speech at a podium, dramatic lighting with warm spotlights, confident posture and gestures, high-resolution 4K photography, sharp focus, cinematic composition, bokeh background with audience silhouettes, professional event setting, detailed facial expressions, realistic skin texture" |
|
""" |
|
}, |
|
{"role": "user", "content": f"Enhance this prompt for high-quality image generation: {text}"} |
|
], |
|
temperature=0.7, |
|
max_tokens=512, |
|
) |
|
return res.choices[0].message.content.strip() |
|
except Exception as e: |
|
print(f"[enhance] attempt {attempt + 1} failed: {e}") |
|
time.sleep(2) |
|
return text |
|
|
|
def prepare_prompt(user_prompt: str, style_key: str, enhance_prompt_enabled: bool = False) -> str: |
|
"""ํ๊ธ์ด๋ฉด ๋ฒ์ญํ๊ณ , ํ๋กฌํํธ ์ฆ๊ฐ ์ต์
์ด ํ์ฑํ๋๋ฉด ์ฆ๊ฐํ๊ณ , ์ ํํ ์คํ์ผ ํ๋ฆฌ์
์ ๋ถ์ฌ์ ์ต์ข
ํ๋กฌํํธ๋ฅผ ๋ง๋ ๋ค.""" |
|
|
|
prompt_en = openai_translate(user_prompt) |
|
|
|
|
|
if enhance_prompt_enabled: |
|
prompt_en = enhance_prompt(prompt_en) |
|
print(f"Enhanced prompt: {prompt_en}") |
|
|
|
|
|
style_suffix = STYLE_PRESETS.get(style_key, "") |
|
if style_suffix: |
|
final_prompt = f"{prompt_en}, {style_suffix}" |
|
else: |
|
final_prompt = prompt_en |
|
|
|
return final_prompt |
|
|
|
|
|
|
|
def save_generated_image(image: Image.Image, prompt: str) -> str: |
|
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") |
|
unique_id = str(uuid.uuid4())[:8] |
|
filename = f"{timestamp}_{unique_id}.png" |
|
filepath = os.path.join(SAVE_DIR, filename) |
|
image.save(filepath) |
|
|
|
|
|
metadata_file = os.path.join(SAVE_DIR, "metadata.txt") |
|
with open(metadata_file, "a", encoding="utf-8") as f: |
|
f.write(f"{filename}|{prompt}|{timestamp}\n") |
|
return filepath |
|
|
|
|
|
|
|
def run_pipeline(prompt: str, seed: int, width: int, height: int, guidance_scale: float, num_steps: int, lora_scale: float): |
|
if pipeline is None: |
|
raise ValueError("Model pipeline not loaded") |
|
|
|
generator = torch.Generator(device=device).manual_seed(int(seed)) |
|
result = pipeline( |
|
prompt=prompt, |
|
guidance_scale=guidance_scale, |
|
num_inference_steps=num_steps, |
|
width=width, |
|
height=height, |
|
generator=generator, |
|
joint_attention_kwargs={"scale": lora_scale}, |
|
).images[0] |
|
return result |
|
|
|
|
|
|
|
@spaces.GPU(duration=60) |
|
def generate_image( |
|
user_prompt: str, |
|
style_key: str, |
|
enhance_prompt_enabled: bool = False, |
|
seed: int = 42, |
|
randomize_seed: bool = True, |
|
width: int = 1024, |
|
height: int = 768, |
|
guidance_scale: float = 3.5, |
|
num_inference_steps: int = 30, |
|
lora_scale: float = 1.0, |
|
progress=None, |
|
): |
|
try: |
|
if randomize_seed: |
|
seed = random.randint(0, MAX_SEED) |
|
|
|
|
|
final_prompt = prepare_prompt(user_prompt, style_key, enhance_prompt_enabled) |
|
print(f"Final prompt: {final_prompt}") |
|
|
|
|
|
image = run_pipeline(final_prompt, seed, width, height, guidance_scale, num_inference_steps, lora_scale) |
|
|
|
|
|
save_generated_image(image, final_prompt) |
|
|
|
return image, seed |
|
|
|
except Exception as e: |
|
print(f"Error generating image: {e}") |
|
|
|
error_image = Image.new('RGB', (width, height), color='red') |
|
return error_image, seed |
|
|
|
|
|
|
|
examples = [ |
|
"Mr. KIM์ด ๋ ์์ผ๋ก 'Fighting!' ํ์๋ง์ ๋ค๊ณ ์๋ ๋ชจ์ต, ์ ๊ตญ์ฌ๊ณผ ๊ตญ๊ฐ ๋ฐ์ ์ ๋ํ ์์ง๋ฅผ ๋ณด์ฌ์ฃผ๊ณ ์๋ค.", |
|
"Mr. KIM์ด ์ํ์ ๋ค์ด ์ฌ๋ฆฌ๋ฉฐ ์น๋ฆฌ์ ํ์ ์ผ๋ก ํํธํ๋ ๋ชจ์ต, ์น๋ฆฌ์ ๋ฏธ๋์ ๋ํ ํฌ๋ง์ ๋ณด์ฌ์ฃผ๊ณ ์๋ค.", |
|
"Mr. KIM์ด ์ด๋๋ณต์ ์
๊ณ ๊ณต์์์ ์กฐ๊น
ํ๋ ๋ชจ์ต, ๊ฑด๊ฐํ ์ํ์ต๊ด๊ณผ ํ๊ธฐ์ฐฌ ๋ฆฌ๋์ญ์ ๋ณด์ฌ์ฃผ๊ณ ์๋ค.", |
|
"Mr. KIM์ด ๋ถ๋น๋ ๊ฑฐ๋ฆฌ์์ ์ฌ์ฑ ์๋ฏผ๋ค๊ณผ ๋ฐ๋ปํ๊ฒ ์
์ํ๋ ๋ชจ์ต, ์ฌ์ฑ ์ ๊ถ์๋ค์ ๋ํ ์ง์ ํ ๊ด์ฌ๊ณผ ์ํต์ ๋ณด์ฌ์ฃผ๊ณ ์๋ค.", |
|
"Mr. KIM์ด ์ ๊ฑฐ ์ ์ธ์ฅ์์ ์งํ์ ์ ํฅํด ์๊ฐ๋ฝ์ผ๋ก ๊ฐ๋ฆฌํค๋ฉฐ ์๊ฐ์ ์ฃผ๋ ์ ์ค์ฒ๋ฅผ ์ทจํ๊ณ ์๊ณ , ์ฌ์ฑ๋ค๊ณผ ์์ด๋ค์ด ๋ฐ์๋ฅผ ์น๊ณ ์๋ค.", |
|
"Mr. KIM์ด ์ง์ญ ํ์ฌ์ ์ฐธ์ฌํ์ฌ ์ด์ ์ ์ผ๋ก ์์ํ๋ ์ฌ์ฑ ์ง์ง์๋ค์๊ฒ ๋๋ฌ์ธ์ฌ ์๋ ๋ชจ์ต.", |
|
"Mr. KIM visiting a local market, engaging in friendly conversation with female vendors and shopkeepers.", |
|
"Mr. KIM walking through a university campus, discussing education policies with female students and professors.", |
|
"Mr. KIM delivering a powerful speech in front of a large crowd with confident gestures and determined expression.", |
|
"Mr. KIM in a dynamic interview setting, passionately outlining his visions for the future.", |
|
"Mr. KIM preparing for an important debate, surrounded by paperwork, looking focused and resolute.", |
|
] |
|
|
|
|
|
custom_css = """ |
|
:root { |
|
--color-primary: #8F1A3A; |
|
--color-secondary: #FF4B4B; |
|
--color-accent: #B91C3C; |
|
--color-dark-red: #7F1D1D; |
|
--color-gold: #D4AF37; |
|
--background-fill-primary: linear-gradient(135deg, #450A0A 0%, #7F1D1D 25%, #991B1B 50%, #B91C3C 75%, #DC2626 100%); |
|
--glass-bg: rgba(255, 255, 255, 0.1); |
|
--glass-border: rgba(255, 255, 255, 0.2); |
|
--shadow-primary: 0 25px 50px -12px rgba(0, 0, 0, 0.5); |
|
--shadow-secondary: 0 10px 25px -3px rgba(0, 0, 0, 0.3); |
|
--shadow-accent: 0 4px 15px rgba(185, 28, 60, 0.4); |
|
} |
|
|
|
/* ์ ์ฒด ๋ฐฐ๊ฒฝ */ |
|
footer {visibility: hidden;} |
|
.gradio-container { |
|
background: var(--background-fill-primary) !important; |
|
min-height: 100vh; |
|
position: relative; |
|
} |
|
|
|
/* ๋ฐฐ๊ฒฝ์ ํจํด ์ถ๊ฐ */ |
|
.gradio-container::before { |
|
content: ''; |
|
position: absolute; |
|
top: 0; |
|
left: 0; |
|
right: 0; |
|
bottom: 0; |
|
background-image: |
|
radial-gradient(circle at 25% 25%, rgba(255, 255, 255, 0.1) 0%, transparent 50%), |
|
radial-gradient(circle at 75% 75%, rgba(212, 175, 55, 0.1) 0%, transparent 50%); |
|
pointer-events: none; |
|
z-index: 1; |
|
} |
|
|
|
/* ๋ฉ์ธ ์ฝํ
์ธ z-index */ |
|
.gradio-container > * { |
|
position: relative; |
|
z-index: 2; |
|
} |
|
|
|
/* ํ์ดํ ์คํ์ผ */ |
|
.title { |
|
color: #FFFFFF !important; |
|
font-size: 3.5rem !important; |
|
font-weight: 800 !important; |
|
text-align: center; |
|
margin: 2rem 0; |
|
font-family: 'Playfair Display', serif; |
|
text-shadow: 0 4px 8px rgba(0, 0, 0, 0.5); |
|
background: linear-gradient(135deg, #FFFFFF 0%, var(--color-gold) 50%, #FFFFFF 100%); |
|
-webkit-background-clip: text; |
|
-webkit-text-fill-color: transparent; |
|
background-clip: text; |
|
} |
|
|
|
.subtitle { |
|
color: #F3F4F6 !important; |
|
font-size: 1.3rem !important; |
|
text-align: center; |
|
margin-bottom: 2rem; |
|
font-style: italic; |
|
text-shadow: 0 2px 4px rgba(0, 0, 0, 0.3); |
|
} |
|
|
|
.collection-link { |
|
text-align: center; |
|
margin-bottom: 2rem; |
|
font-size: 1.1rem; |
|
} |
|
|
|
.collection-link a { |
|
color: var(--color-gold); |
|
text-decoration: none; |
|
transition: all 0.3s ease; |
|
font-weight: 600; |
|
text-shadow: 0 2px 4px rgba(0, 0, 0, 0.3); |
|
} |
|
|
|
.collection-link a:hover { |
|
color: #F59E0B; |
|
text-shadow: 0 4px 8px rgba(245, 158, 11, 0.5); |
|
} |
|
|
|
/* ๊ณ ๊ธ์ค๋ฌ์ด 3D ๋ฐ์ค ์คํ์ผ */ |
|
.model-description { |
|
background: linear-gradient(135deg, var(--glass-bg) 0%, rgba(255, 255, 255, 0.05) 100%); |
|
backdrop-filter: blur(20px); |
|
-webkit-backdrop-filter: blur(20px); |
|
border: 1px solid var(--glass-border); |
|
border-radius: 20px; |
|
padding: 30px; |
|
margin: 25px 0; |
|
box-shadow: |
|
var(--shadow-primary), |
|
inset 0 1px 0 rgba(255, 255, 255, 0.2), |
|
0 0 30px rgba(185, 28, 60, 0.2); |
|
position: relative; |
|
overflow: hidden; |
|
transform: translateZ(0); |
|
} |
|
|
|
.model-description::before { |
|
content: ''; |
|
position: absolute; |
|
top: 0; |
|
left: 0; |
|
right: 0; |
|
height: 1px; |
|
background: linear-gradient(90deg, transparent, var(--color-gold), transparent); |
|
} |
|
|
|
.model-description p { |
|
color: #F9FAFB !important; |
|
font-size: 1.1rem; |
|
line-height: 1.6; |
|
text-shadow: 0 1px 2px rgba(0, 0, 0, 0.5); |
|
} |
|
|
|
/* ๋ฒํผ ์คํ์ผ */ |
|
button.primary { |
|
background: linear-gradient(135deg, var(--color-primary) 0%, var(--color-accent) 100%) !important; |
|
color: #FFFFFF !important; |
|
border: none !important; |
|
box-shadow: |
|
var(--shadow-accent), |
|
inset 0 1px 0 rgba(255, 255, 255, 0.2) !important; |
|
transition: all 0.3s ease !important; |
|
font-weight: 600 !important; |
|
text-transform: uppercase !important; |
|
letter-spacing: 0.5px !important; |
|
} |
|
|
|
button.primary:hover { |
|
transform: translateY(-3px) !important; |
|
box-shadow: |
|
0 20px 40px rgba(185, 28, 60, 0.4), |
|
inset 0 1px 0 rgba(255, 255, 255, 0.2) !important; |
|
background: linear-gradient(135deg, var(--color-accent) 0%, #DC2626 100%) !important; |
|
} |
|
|
|
/* ์
๋ ฅ ์ปจํ
์ด๋ */ |
|
.input-container { |
|
background: linear-gradient(135deg, var(--glass-bg) 0%, rgba(255, 255, 255, 0.08) 100%); |
|
backdrop-filter: blur(25px); |
|
-webkit-backdrop-filter: blur(25px); |
|
border: 1px solid var(--glass-border); |
|
border-radius: 20px; |
|
padding: 25px; |
|
margin-bottom: 2rem; |
|
box-shadow: var(--shadow-secondary); |
|
position: relative; |
|
overflow: hidden; |
|
} |
|
|
|
.input-container::before { |
|
content: ''; |
|
position: absolute; |
|
top: 0; |
|
left: 0; |
|
right: 0; |
|
height: 2px; |
|
background: linear-gradient(90deg, var(--color-primary), var(--color-gold), var(--color-primary)); |
|
} |
|
|
|
/* ๊ณ ๊ธ ์ค์ */ |
|
.advanced-settings { |
|
background: linear-gradient(135deg, rgba(0, 0, 0, 0.3) 0%, rgba(0, 0, 0, 0.1) 100%); |
|
backdrop-filter: blur(15px); |
|
-webkit-backdrop-filter: blur(15px); |
|
border: 1px solid rgba(255, 255, 255, 0.1); |
|
border-radius: 16px; |
|
padding: 20px; |
|
margin-top: 1.5rem; |
|
box-shadow: var(--shadow-secondary); |
|
} |
|
|
|
/* ์์ ์์ญ */ |
|
.example-region { |
|
background: linear-gradient(135deg, rgba(0, 0, 0, 0.2) 0%, rgba(0, 0, 0, 0.05) 100%); |
|
backdrop-filter: blur(20px); |
|
-webkit-backdrop-filter: blur(20px); |
|
border: 1px solid rgba(255, 255, 255, 0.15); |
|
border-radius: 16px; |
|
padding: 20px; |
|
margin-top: 1.5rem; |
|
box-shadow: var(--shadow-secondary); |
|
} |
|
|
|
/* ํ๋กฌํํธ ์
๋ ฅ์นธ ํฌ๊ธฐ 2๋ฐฐ ์ฆ๊ฐ + ์คํ์ผ */ |
|
.large-prompt textarea { |
|
min-height: 120px !important; |
|
font-size: 16px !important; |
|
line-height: 1.5 !important; |
|
background: rgba(0, 0, 0, 0.3) !important; |
|
border: 2px solid rgba(255, 255, 255, 0.2) !important; |
|
border-radius: 12px !important; |
|
color: #FFFFFF !important; |
|
backdrop-filter: blur(10px) !important; |
|
transition: all 0.3s ease !important; |
|
} |
|
|
|
.large-prompt textarea:focus { |
|
border-color: var(--color-gold) !important; |
|
box-shadow: 0 0 20px rgba(212, 175, 55, 0.3) !important; |
|
} |
|
|
|
.large-prompt textarea::placeholder { |
|
color: rgba(255, 255, 255, 0.6) !important; |
|
} |
|
|
|
/* ์์ฑ ๋ฒํผ ์๊ฒ ๋ง๋ค๊ธฐ + 3D ํจ๊ณผ */ |
|
.small-generate-btn { |
|
max-width: 120px !important; |
|
height: 45px !important; |
|
font-size: 14px !important; |
|
padding: 10px 20px !important; |
|
border-radius: 12px !important; |
|
position: relative !important; |
|
overflow: hidden !important; |
|
} |
|
|
|
.small-generate-btn::before { |
|
content: ''; |
|
position: absolute; |
|
top: 0; |
|
left: -100%; |
|
width: 100%; |
|
height: 100%; |
|
background: linear-gradient(90deg, transparent, rgba(255, 255, 255, 0.2), transparent); |
|
transition: left 0.5s ease; |
|
} |
|
|
|
.small-generate-btn:hover::before { |
|
left: 100%; |
|
} |
|
|
|
/* ํ๋กฌํํธ ์ฆ๊ฐ ์น์
์คํ์ผ */ |
|
.prompt-enhance-section { |
|
background: linear-gradient(135deg, rgba(212, 175, 55, 0.15) 0%, rgba(212, 175, 55, 0.05) 100%); |
|
backdrop-filter: blur(15px); |
|
-webkit-backdrop-filter: blur(15px); |
|
border: 1px solid rgba(212, 175, 55, 0.3); |
|
border-radius: 15px; |
|
padding: 20px; |
|
margin-top: 15px; |
|
box-shadow: |
|
0 8px 25px rgba(0, 0, 0, 0.2), |
|
inset 0 1px 0 rgba(255, 255, 255, 0.1); |
|
position: relative; |
|
} |
|
|
|
.prompt-enhance-section::before { |
|
content: ''; |
|
position: absolute; |
|
top: 0; |
|
left: 20px; |
|
right: 20px; |
|
height: 1px; |
|
background: linear-gradient(90deg, transparent, var(--color-gold), transparent); |
|
} |
|
|
|
/* ์คํ์ผ ํ๋ฆฌ์
์น์
*/ |
|
.style-preset-section { |
|
background: linear-gradient(135deg, rgba(255, 255, 255, 0.1) 0%, rgba(255, 255, 255, 0.05) 100%); |
|
backdrop-filter: blur(15px); |
|
-webkit-backdrop-filter: blur(15px); |
|
border: 1px solid rgba(255, 255, 255, 0.2); |
|
border-radius: 15px; |
|
padding: 20px; |
|
margin-top: 15px; |
|
box-shadow: var(--shadow-secondary); |
|
} |
|
|
|
/* ๋ผ๋ฒจ ํ
์คํธ ์์ */ |
|
label { |
|
color: #F9FAFB !important; |
|
font-weight: 600 !important; |
|
text-shadow: 0 1px 2px rgba(0, 0, 0, 0.5) !important; |
|
} |
|
|
|
/* ๋ผ๋์ค ๋ฒํผ ๋ฐ ์ฒดํฌ๋ฐ์ค ์คํ์ผ */ |
|
input[type="radio"], input[type="checkbox"] { |
|
accent-color: var(--color-gold) !important; |
|
} |
|
|
|
/* ์ฌ๋ผ์ด๋ ์คํ์ผ */ |
|
input[type="range"] { |
|
accent-color: var(--color-gold) !important; |
|
} |
|
|
|
/* ๊ฒฐ๊ณผ ์ด๋ฏธ์ง ์ปจํ
์ด๋ */ |
|
.image-container { |
|
border-radius: 16px !important; |
|
overflow: hidden !important; |
|
box-shadow: var(--shadow-primary) !important; |
|
background: rgba(0, 0, 0, 0.3) !important; |
|
backdrop-filter: blur(10px) !important; |
|
} |
|
|
|
/* ์ ๋๋ฉ์ด์
์ถ๊ฐ */ |
|
@keyframes shimmer { |
|
0% { transform: translateX(-100%); } |
|
100% { transform: translateX(100%); } |
|
} |
|
|
|
.model-description, .input-container, .prompt-enhance-section, .style-preset-section { |
|
animation: fadeInUp 0.6s ease-out; |
|
} |
|
|
|
@keyframes fadeInUp { |
|
from { |
|
opacity: 0; |
|
transform: translateY(30px); |
|
} |
|
to { |
|
opacity: 1; |
|
transform: translateY(0); |
|
} |
|
} |
|
""" |
|
|
|
|
|
def create_interface(): |
|
with gr.Blocks(css=custom_css, analytics_enabled=False) as demo: |
|
gr.HTML('<div class="title">Mr. KIM in KOREA</div>') |
|
gr.HTML('<div class="collection-link"><a href="https://huggingface.co/collections/openfree/painting-art-ai-681453484ec15ef5978bbeb1" target="_blank">Visit the LoRA Model Collection</a></div>') |
|
|
|
with gr.Group(elem_classes="model-description"): |
|
gr.HTML(""" |
|
<p> |
|
๋ณธ ๋ชจ๋ธ์ ์ฐ๊ตฌ ๋ชฉ์ ์ผ๋ก ํน์ ์ธ์ ์ผ๊ตด๊ณผ ์ธ๋ชจ๋ฅผ ํ์ตํ LoRA ๋ชจ๋ธ์
๋๋ค.<br> |
|
๋ชฉ์ ์ธ์ ์ฉ๋๋ก ๋ฌด๋จ ์ฌ์ฉ ์๋๋ก ์ ์ํด ์ฃผ์ธ์.<br> |
|
(์์ prompt ์ฌ์ฉ ์ ๋ฐ๋์ 'kim'์ ํฌํจํ์ฌ์ผ ์ต์ ์ ๊ฒฐ๊ณผ๋ฅผ ์ป์ ์ ์์ต๋๋ค.) |
|
</p> |
|
""") |
|
|
|
|
|
with gr.Column(): |
|
with gr.Row(elem_classes="input-container"): |
|
with gr.Column(scale=4): |
|
user_prompt = gr.Text( |
|
label="Prompt", |
|
max_lines=5, |
|
value=examples[0], |
|
elem_classes="large-prompt" |
|
) |
|
with gr.Column(scale=1): |
|
run_button = gr.Button( |
|
"์์ฑ", |
|
variant="primary", |
|
elem_classes="small-generate-btn" |
|
) |
|
|
|
|
|
with gr.Group(elem_classes="prompt-enhance-section"): |
|
enhance_prompt_checkbox = gr.Checkbox( |
|
label="๐ ํ๋กฌํํธ ์ฆ๊ฐ (AI๋ก ํ๋กฌํํธ๋ฅผ ์๋์ผ๋ก ๊ฐ์ ํ์ฌ ๊ณ ํ์ง ์ด๋ฏธ์ง ์์ฑ)", |
|
value=False, |
|
info="OpenAI API๋ฅผ ์ฌ์ฉํ์ฌ ์
๋ ฅํ ํ๋กฌํํธ๋ฅผ ๋์ฑ ์์ธํ๊ณ ๊ณ ํ์ง์ ์ด๋ฏธ์ง๋ฅผ ์์ฑํ ์ ์๋๋ก ์๋์ผ๋ก ์ฆ๊ฐํฉ๋๋ค." |
|
) |
|
|
|
|
|
with gr.Group(elem_classes="style-preset-section"): |
|
style_select = gr.Radio( |
|
label="๐จ Style Preset", |
|
choices=list(STYLE_PRESETS.keys()), |
|
value="None", |
|
interactive=True |
|
) |
|
|
|
result_image = gr.Image(label="Generated Image") |
|
seed_output = gr.Number(label="Seed") |
|
|
|
|
|
with gr.Accordion("Advanced Settings", open=False, elem_classes="advanced-settings"): |
|
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42) |
|
randomize_seed = gr.Checkbox(label="Randomize seed", value=True) |
|
with gr.Row(): |
|
width = gr.Slider(label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024) |
|
height = gr.Slider(label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=768) |
|
with gr.Row(): |
|
guidance_scale = gr.Slider(label="Guidance scale", minimum=0.0, maximum=10.0, step=0.1, value=3.5) |
|
num_inference_steps = gr.Slider(label="Inference steps", minimum=1, maximum=50, step=1, value=30) |
|
lora_scale = gr.Slider(label="LoRA scale", minimum=0.0, maximum=1.0, step=0.1, value=1.0) |
|
|
|
|
|
with gr.Group(elem_classes="example-region"): |
|
gr.Markdown("### Examples") |
|
gr.Examples(examples=examples, inputs=user_prompt, cache_examples=False) |
|
|
|
|
|
run_button.click( |
|
fn=generate_image, |
|
inputs=[ |
|
user_prompt, |
|
style_select, |
|
enhance_prompt_checkbox, |
|
seed, |
|
randomize_seed, |
|
width, |
|
height, |
|
guidance_scale, |
|
num_inference_steps, |
|
lora_scale, |
|
], |
|
outputs=[result_image, seed_output], |
|
) |
|
|
|
return demo |
|
|
|
|
|
if __name__ == "__main__": |
|
demo = create_interface() |
|
demo.queue() |
|
demo.launch() |