Spaces:
Sleeping
Sleeping
# storyverse_weaver/core/image_services.py | |
import os | |
import requests | |
import base64 | |
from io import BytesIO | |
from PIL import Image | |
from huggingface_hub import InferenceClient # For HF fallback | |
from openai import OpenAI # For DALL-E | |
# --- API Key Configuration --- | |
OPENAI_API_KEY = os.getenv("STORYVERSE_OPENAI_API_KEY") # Primary for DALL-E | |
HF_TOKEN = os.getenv("STORYVERSE_HF_TOKEN") # For fallback & text | |
OPENAI_DALLE_CONFIGURED = False | |
HF_IMAGE_API_CONFIGURED = False | |
hf_inference_image_client = None | |
openai_client = None | |
class ImageGenResponse: | |
def __init__(self, image: Image.Image = None, image_url: str = None, | |
error: str = None, success: bool = True, | |
provider: str = "Unknown Image Gen", model_id_used: str = None): | |
self.image = image | |
self.image_url = image_url | |
self.error = error | |
self.success = success | |
self.provider = provider | |
self.model_id_used = model_id_used | |
def __str__(self): | |
status = "Success" if self.success else "Failed" | |
details = f"Image URL: {self.image_url}" if self.image_url else ("Image data present" if self.image else "No image data") | |
if self.error: | |
details = f"Error: {self.error}" | |
return f"ImageGenResponse(Provider: {self.provider}, Model: {self.model_id_used or 'N/A'}, Status: {status}, Details: {details})" | |
def initialize_image_llms(): # "LLMs" here is a bit of a misnomer for image services, but kept for consistency | |
global OPENAI_DALLE_CONFIGURED, HF_IMAGE_API_CONFIGURED, hf_inference_image_client, openai_client, OPENAI_API_KEY, HF_TOKEN | |
# Ensure keys are fetched within this function's scope if not already module-level and populated | |
OPENAI_API_KEY = os.getenv("STORYVERSE_OPENAI_API_KEY") | |
HF_TOKEN = os.getenv("STORYVERSE_HF_TOKEN") | |
print("INFO: image_services.py - Initializing Image Generation services (DALL-E primary, HF fallback)...") | |
# OpenAI DALL-E (Primary) | |
if OPENAI_API_KEY and OPENAI_API_KEY.strip(): | |
print("INFO: image_services.py - STORYVERSE_OPENAI_API_KEY found.") | |
try: | |
openai_client = OpenAI(api_key=OPENAI_API_KEY) | |
# A lightweight way to test if the client is configured and key is somewhat valid: | |
# try: | |
# openai_client.models.list() # This makes a quick API call | |
# except Exception as test_e: | |
# raise Exception(f"OpenAI client initialized but test call failed: {test_e}") from test_e | |
OPENAI_DALLE_CONFIGURED = True | |
print("SUCCESS: image_services.py - OpenAI DALL-E client configured.") | |
except Exception as e: | |
OPENAI_DALLE_CONFIGURED = False | |
print(f"ERROR: image_services.py - Failed to configure OpenAI DALL-E client: {type(e).__name__} - {e}") | |
openai_client = None | |
else: | |
OPENAI_DALLE_CONFIGURED = False | |
print("WARNING: image_services.py - STORYVERSE_OPENAI_API_KEY not found or empty. DALL-E disabled.") | |
# Hugging Face Image Models (Fallback) | |
if HF_TOKEN and HF_TOKEN.strip(): | |
print("INFO: image_services.py - STORYVERSE_HF_TOKEN found (for fallback image model).") | |
try: | |
hf_inference_image_client = InferenceClient(token=HF_TOKEN) | |
HF_IMAGE_API_CONFIGURED = True | |
print("SUCCESS: image_services.py - Hugging Face InferenceClient (for fallback images) ready.") | |
except Exception as e: | |
HF_IMAGE_API_CONFIGURED = False | |
print(f"ERROR: image_services.py - Failed to initialize HF InferenceClient for fallback images: {type(e).__name__} - {e}") | |
hf_inference_image_client = None | |
else: | |
HF_IMAGE_API_CONFIGURED = False | |
print("WARNING: image_services.py - STORYVERSE_HF_TOKEN not found or empty (for fallback image model).") | |
print(f"INFO: image_services.py - Image Service Init complete. DALL-E Ready: {OPENAI_DALLE_CONFIGURED}, HF Image (Fallback) Ready: {HF_IMAGE_API_CONFIGURED}") | |
def is_dalle_ready(): | |
global OPENAI_DALLE_CONFIGURED | |
return OPENAI_DALLE_CONFIGURED | |
def is_hf_image_api_ready(): | |
global HF_IMAGE_API_CONFIGURED | |
return HF_IMAGE_API_CONFIGURED | |
# --- OpenAI DALL-E --- | |
def generate_image_dalle(prompt: str, | |
model: str = "dall-e-3", # or "dall-e-2" | |
size: str = "1024x1024", | |
quality: str = "standard", # "standard" or "hd" for dall-e-3 | |
n: int = 1, | |
response_format: str = "b64_json" # Get image data directly | |
) -> ImageGenResponse: | |
global openai_client # Use the initialized client | |
if not is_dalle_ready() or not openai_client: | |
return ImageGenResponse(error="OpenAI DALL-E API not configured.", success=False, provider="DALL-E", model_id_used=model) | |
print(f"DEBUG: image_services.py - Calling DALL-E ({model}) with prompt: {prompt[:70]}...") | |
try: | |
response = openai_client.images.generate( | |
model=model, | |
prompt=prompt, | |
size=size, | |
quality=quality, | |
n=n, | |
response_format=response_format | |
) | |
if response_format == "b64_json": | |
if not response.data or not response.data[0].b64_json: | |
return ImageGenResponse(error="No image data in DALL-E b64_json response.", success=False, provider="DALL-E", model_id_used=model, raw_response=response) | |
image_data = base64.b64decode(response.data[0].b64_json) | |
image = Image.open(BytesIO(image_data)) | |
print(f"DEBUG: image_services.py - DALL-E image generated successfully ({model}).") | |
return ImageGenResponse(image=image, provider="DALL-E", model_id_used=model) | |
elif response_format == "url": # If you choose to get URL | |
if not response.data or not response.data[0].url: | |
return ImageGenResponse(error="No image URL in DALL-E response.", success=False, provider="DALL-E", model_id_used=model, raw_response=response) | |
image_url = response.data[0].url | |
print(f"DEBUG: image_services.py - DALL-E image URL received ({model}): {image_url}. Attempting download...") | |
img_content_response = requests.get(image_url, timeout=30) | |
img_content_response.raise_for_status() | |
image = Image.open(BytesIO(img_content_response.content)) | |
print(f"DEBUG: image_services.py - DALL-E image downloaded successfully ({model}).") | |
return ImageGenResponse(image=image, image_url=image_url, provider="DALL-E", model_id_used=model) | |
else: | |
return ImageGenResponse(error=f"Unsupported DALL-E response_format: {response_format}", success=False, provider="DALL-E", model_id_used=model) | |
except Exception as e: | |
error_msg = f"DALL-E API Error ({model}): {type(e).__name__} - {str(e)}" | |
# Attempt to get more details from OpenAI error structure | |
if hasattr(e, 'response') and e.response is not None: | |
try: | |
err_data = e.response.json() | |
if 'error' in err_data and 'message' in err_data['error']: | |
error_msg += f" - OpenAI Message: {err_data['error']['message']}" | |
elif hasattr(e.response, 'text'): | |
error_msg += f" - API Response: {e.response.text[:200]}" | |
except: # Fallback if parsing response fails | |
if hasattr(e.response, 'text'): error_msg += f" - API Response: {e.response.text[:200]}" | |
elif hasattr(e, 'message'): | |
error_msg += f" - Detail: {e.message}" | |
print(f"ERROR: image_services.py - {error_msg}") | |
return ImageGenResponse(error=error_msg, success=False, provider="DALL-E", model_id_used=model, raw_response=e) | |
# --- Hugging Face Image Model (Fallback) --- | |
def generate_image_hf_model(prompt: str, | |
model_id: str = "stabilityai/stable-diffusion-xl-base-1.0", # Default HF model | |
negative_prompt: str = None, | |
height: int = 768, | |
width: int = 768, | |
num_inference_steps: int = 25, | |
guidance_scale: float = 7.0 | |
) -> ImageGenResponse: | |
global hf_inference_image_client | |
if not is_hf_image_api_ready() or not hf_inference_image_client: | |
return ImageGenResponse(error="Hugging Face API (for images) not configured.", success=False, provider="HF Image API", model_id_used=model_id) | |
params = { | |
"negative_prompt": negative_prompt, | |
"height": height, | |
"width": width, | |
"num_inference_steps": num_inference_steps, | |
"guidance_scale": guidance_scale | |
} | |
params = {k: v for k, v in params.items() if v is not None} | |
print(f"DEBUG: image_services.py - Calling HF Image API ({model_id}) with prompt: {prompt[:70]}...") | |
try: | |
image_result: Image.Image = hf_inference_image_client.text_to_image( | |
prompt, | |
model=model_id, | |
**params | |
) | |
print(f"DEBUG: image_services.py - HF Image API ({model_id}) image generated successfully.") | |
return ImageGenResponse(image=image_result, provider="HF Image API", model_id_used=model_id) | |
except Exception as e: | |
error_msg = f"HF Image API Error ({model_id}): {type(e).__name__} - {str(e)}" | |
if "Rate limit reached" in str(e): error_msg += " You may have hit free tier limits for HF Inference API." | |
elif "Model is currently loading" in str(e) or "estimated_time" in str(e).lower(): error_msg += " The HF model may be loading, please try again in a moment." | |
elif "Authorization" in str(e) or "401" in str(e): error_msg += " Authentication issue with your STORYVERSE_HF_TOKEN." | |
elif "does not seem to support task text-to-image" in str(e): error_msg = f"Model {model_id} on HF may not support text-to-image or is misconfigured for Inference API." | |
print(f"ERROR: image_services.py - {error_msg}") | |
return ImageGenResponse(error=error_msg, success=False, provider="HF Image API", model_id_used=model_id, raw_response=e) | |
print("DEBUG: core.image_services (DALL-E Primary, HF Fallback for StoryVerseWeaver) - Module defined.") |