# ===== CRITICAL: Import spaces FIRST before any CUDA operations ===== try: import spaces HF_SPACES = True except ImportError: # If running locally, create a dummy decorator def spaces_gpu_decorator(duration=60): def decorator(func): return func return decorator spaces = type('spaces', (), {'GPU': spaces_gpu_decorator})() HF_SPACES = False print("Warning: Running without Hugging Face Spaces GPU allocation") # ===== Now import other libraries ===== import random import os import uuid import re import time from datetime import datetime import gradio as gr import numpy as np import requests import torch from diffusers import DiffusionPipeline from PIL import Image # ===== OpenAI 설정 ===== from openai import OpenAI # Add error handling for API key try: client = OpenAI(api_key=os.getenv("LLM_API")) except Exception as e: print(f"Warning: OpenAI client initialization failed: {e}") client = None # ===== 프롬프트 증강용 스타일 프리셋 ===== STYLE_PRESETS = { "None": "", "Realistic Photo": "photorealistic, 8k, ultra-detailed, cinematic lighting, realistic skin texture", "Oil Painting": "oil painting, rich brush strokes, canvas texture, baroque lighting", "Comic Book": "comic book style, bold ink outlines, cel shading, vibrant colors", "Watercolor": "watercolor illustration, soft gradients, splatter effect, pastel palette", } # ===== 저장 폴더 ===== SAVE_DIR = "saved_images" if not os.path.exists(SAVE_DIR): os.makedirs(SAVE_DIR, exist_ok=True) # ===== 디바이스 & 모델 로드 ===== device = "cuda" if torch.cuda.is_available() else "cpu" print(f"Using device: {device}") repo_id = "black-forest-labs/FLUX.1-dev" adapter_id = "seawolf2357/kim-korea" # Add error handling for model loading try: pipeline = DiffusionPipeline.from_pretrained(repo_id, torch_dtype=torch.bfloat16) pipeline.load_lora_weights(adapter_id) pipeline = pipeline.to(device) print("Model loaded successfully") except Exception as e: print(f"Error loading model: {e}") pipeline = None MAX_SEED = np.iinfo(np.int32).max MAX_IMAGE_SIZE = 1024 # ===== 한글 여부 판별 ===== HANGUL_RE = re.compile(r"[\u3131-\u318E\uAC00-\uD7A3]+") def is_korean(text: str) -> bool: return bool(HANGUL_RE.search(text)) # ===== 번역 & 증강 함수 ===== def openai_translate(text: str, retries: int = 3) -> str: """한글을 영어로 번역 (OpenAI GPT-4o-mini 사용). 영어 입력이면 그대로 반환.""" if not is_korean(text): return text if client is None: print("Warning: OpenAI client not available, returning original text") return text for attempt in range(retries): try: res = client.chat.completions.create( model="gpt-4o-mini", messages=[ { "role": "system", "content": "Translate the following Korean prompt into concise, descriptive English suitable for an image generation model. Keep the meaning, do not add new concepts." }, {"role": "user", "content": text} ], temperature=0.3, max_tokens=256, ) return res.choices[0].message.content.strip() except Exception as e: print(f"[translate] attempt {attempt + 1} failed: {e}") time.sleep(2) return text # 번역 실패 시 원문 그대로 def prepare_prompt(user_prompt: str, style_key: str) -> str: """한글이면 번역하고, 선택한 스타일 프리셋을 붙여서 최종 프롬프트를 만든다.""" prompt_en = openai_translate(user_prompt) style_suffix = STYLE_PRESETS.get(style_key, "") if style_suffix: final_prompt = f"{prompt_en}, {style_suffix}" else: final_prompt = prompt_en return final_prompt # ===== 이미지 저장 ===== def save_generated_image(image: Image.Image, prompt: str) -> str: timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") unique_id = str(uuid.uuid4())[:8] filename = f"{timestamp}_{unique_id}.png" filepath = os.path.join(SAVE_DIR, filename) image.save(filepath) # 메타데이터 저장 metadata_file = os.path.join(SAVE_DIR, "metadata.txt") with open(metadata_file, "a", encoding="utf-8") as f: f.write(f"{filename}|{prompt}|{timestamp}\n") return filepath # ===== Diffusion 호출 ===== def run_pipeline(prompt: str, seed: int, width: int, height: int, guidance_scale: float, num_steps: int, lora_scale: float): if pipeline is None: raise ValueError("Model pipeline not loaded") generator = torch.Generator(device=device).manual_seed(int(seed)) result = pipeline( prompt=prompt, guidance_scale=guidance_scale, num_inference_steps=num_steps, width=width, height=height, generator=generator, joint_attention_kwargs={"scale": lora_scale}, ).images[0] return result # ===== Gradio inference 래퍼 ===== @spaces.GPU(duration=60) def generate_image( user_prompt: str, style_key: str, seed: int = 42, randomize_seed: bool = True, width: int = 1024, height: int = 768, guidance_scale: float = 3.5, num_inference_steps: int = 30, lora_scale: float = 1.0, progress=None, ): try: if randomize_seed: seed = random.randint(0, MAX_SEED) # 1) 번역 + 증강 final_prompt = prepare_prompt(user_prompt, style_key) print(f"Final prompt: {final_prompt}") # 2) 파이프라인 호출 image = run_pipeline(final_prompt, seed, width, height, guidance_scale, num_inference_steps, lora_scale) # 3) 저장 save_generated_image(image, final_prompt) return image, seed except Exception as e: print(f"Error generating image: {e}") # Return a placeholder or error message error_image = Image.new('RGB', (width, height), color='red') return error_image, seed # ===== 예시 프롬프트 (한국어/영어 혼용 허용) ===== examples = [ "Mr. KIM이 두 손으로 'Fighting!' 현수막을 들고 있는 모습, 애국심과 국가 발전에 대한 의지를 보여주고 있다.", "Mr. KIM이 양팔을 들어 올리며 승리의 표정으로 환호하는 모습, 승리와 미래에 대한 희망을 보여주고 있다.", "Mr. KIM이 운동복을 입고 공원에서 조깅하는 모습, 건강한 생활습관과 활기찬 리더십을 보여주고 있다.", "Mr. KIM이 붐비는 거리에서 여성 시민들과 따뜻하게 악수하는 모습, 여성 유권자들에 대한 진정한 관심과 소통을 보여주고 있다.", "Mr. KIM이 선거 유세장에서 지평선을 향해 손가락으로 가리키며 영감을 주는 제스처를 취하고 있고, 여성들과 아이들이 박수를 치고 있다.", "Mr. KIM이 지역 행사에 참여하여 열정적으로 응원하는 여성 지지자들에게 둘러싸여 있는 모습.", "Mr. KIM visiting a local market, engaging in friendly conversation with female vendors and shopkeepers.", "Mr. KIM walking through a university campus, discussing education policies with female students and professors.", "Mr. KIM delivering a powerful speech in front of a large crowd with confident gestures and determined expression.", "Mr. KIM in a dynamic interview setting, passionately outlining his visions for the future.", "Mr. KIM preparing for an important debate, surrounded by paperwork, looking focused and resolute.", ] # ===== 커스텀 CSS (붉은 톤 유지) ===== custom_css = """ :root { --color-primary: #8F1A3A; --color-secondary: #FF4B4B; --background-fill-primary: linear-gradient(to right, #FFF5F5, #FED7D7, #FEB2B2); } footer {visibility: hidden;} .gradio-container {background: var(--background-fill-primary);} .title {color: var(--color-primary)!important; font-size:3rem!important; font-weight:700!important; text-align:center; margin:1rem 0; font-family:'Playfair Display',serif;} .subtitle {color:#4A5568!important; font-size:1.2rem!important; text-align:center; margin-bottom:1.5rem; font-style:italic;} .collection-link {text-align:center; margin-bottom:2rem; font-size:1.1rem;} .collection-link a {color:var(--color-primary); text-decoration:underline; transition:color .3s ease;} .collection-link a:hover {color:var(--color-secondary);} .model-description{background:rgba(255,255,255,.8); border-radius:12px; padding:24px; margin:20px 0; box-shadow:0 4px 12px rgba(0,0,0,.05); border-left:5px solid var(--color-primary);} button.primary{background:var(--color-primary)!important; color:#fff!important; transition:all .3s ease;} button:hover{transform:translateY(-2px); box-shadow:0 5px 15px rgba(0,0,0,.1);} .input-container{border-radius:10px; box-shadow:0 2px 8px rgba(0,0,0,.05); background:rgba(255,255,255,.6); padding:20px; margin-bottom:1rem;} .advanced-settings{margin-top:1rem; padding:1rem; border-radius:10px; background:rgba(255,255,255,.6);} .example-region{background:rgba(255,255,255,.5); border-radius:10px; padding:1rem; margin-top:1rem;} """ # ===== Gradio UI ===== def create_interface(): with gr.Blocks(css=custom_css, analytics_enabled=False) as demo: gr.HTML('
본 모델은 연구 목적으로 특정인의 얼굴과 외모를 학습한 LoRA 모델입니다.
목적외의 용도로 무단 사용 않도록 유의해 주세요.
(예시 prompt 사용 시 반드시 'kim'을 포함하여야 최적의 결과를 얻을 수 있습니다.)