mgbam commited on
Commit
eb29468
·
verified ·
1 Parent(s): 95e771a

Update core/image_services.py

Browse files
Files changed (1) hide show
  1. core/image_services.py +108 -60
core/image_services.py CHANGED
@@ -1,93 +1,141 @@
1
  # storyverse_weaver/core/image_services.py
2
  import os
3
- import base64 # Still useful if HF API ever returns b64
 
4
  from io import BytesIO
5
  from PIL import Image
6
- from huggingface_hub import InferenceClient # Main client for HF models
 
7
 
8
  # --- API Key Configuration ---
9
- HF_TOKEN = os.getenv("STORYVERSE_HF_TOKEN") # This is the key we'll use
 
10
 
11
- HF_IMAGE_API_CONFIGURED = False
 
12
  hf_inference_image_client = None
 
13
 
14
- class ImageGenResponse:
15
  def __init__(self, image: Image.Image = None, image_url: str = None,
16
  error: str = None, success: bool = True,
17
- provider: str = "HF Image API", model_id_used: str = None):
18
  self.image, self.image_url, self.error, self.success, self.provider, self.model_id_used = \
19
  image, image_url, error, success, provider, model_id_used
20
 
21
- def initialize_image_llms(): # Renamed for consistency, though it's not an "LLM" for images
22
- global HF_IMAGE_API_CONFIGURED, hf_inference_image_client, HF_TOKEN
23
 
24
- HF_TOKEN = os.getenv("STORYVERSE_HF_TOKEN") # Ensure it's loaded here too
25
- print("INFO: image_services.py - Initializing Image Generation services (HF Focus)...")
 
 
26
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  if HF_TOKEN and HF_TOKEN.strip():
 
28
  try:
29
  hf_inference_image_client = InferenceClient(token=HF_TOKEN)
30
- # Optional: Test with a quick model ping if desired, but client init is usually enough
31
- # For instance, try to get model info for a known image model if API allows
32
- # Or assume it's ready if client initializes without error.
33
  HF_IMAGE_API_CONFIGURED = True
34
- print("SUCCESS: image_services.py - Hugging Face InferenceClient (for images) ready.")
35
  except Exception as e:
36
  HF_IMAGE_API_CONFIGURED = False
37
- print(f"ERROR: image_services.py - Failed to initialize HF InferenceClient for images: {type(e).__name__} - {e}")
38
  hf_inference_image_client = None
39
  else:
40
  HF_IMAGE_API_CONFIGURED = False
41
- print("WARNING: image_services.py - STORYVERSE_HF_TOKEN not found or empty. HF Image models disabled.")
42
 
43
- print(f"INFO: image_services.py - Image Service Init complete. HF Image API Configured: {HF_IMAGE_API_CONFIGURED}")
44
 
45
- def is_hf_image_api_ready(): # Getter function for app.py
46
- global HF_IMAGE_API_CONFIGURED
47
- return HF_IMAGE_API_CONFIGURED
48
 
49
- # --- Hugging Face Image Model via Inference API ---
50
- def generate_image_hf_model(prompt: str,
51
- model_id: str = "stabilityai/stable-diffusion-xl-base-1.0", # Default popular model
52
- negative_prompt: str = None,
53
- height: int = 768, # Common for SDXL
54
- width: int = 768, # Common for SDXL
55
- num_inference_steps: int = 25,
56
- guidance_scale: float = 7.0 # Lower can be more creative, higher more prompt-adherent
57
- ) -> ImageGenResponse:
58
- global hf_inference_image_client
59
- if not is_hf_image_api_ready() or not hf_inference_image_client:
60
- return ImageGenResponse(error="Hugging Face API (for images) not configured.", success=False, model_id_used=model_id)
61
-
62
- params = {
63
- "negative_prompt": negative_prompt,
64
- "height": height,
65
- "width": width,
66
- "num_inference_steps": num_inference_steps,
67
- "guidance_scale": guidance_scale
68
- }
69
- params = {k: v for k, v in params.items() if v is not None} # Clean out None params
70
-
71
- print(f"DEBUG: image_services.py - Calling HF Image API ({model_id}) with prompt: {prompt[:70]}...")
72
  try:
73
- # InferenceClient's text_to_image method returns a PIL Image directly
74
- image_result: Image.Image = hf_inference_image_client.text_to_image(
75
- prompt,
76
- model=model_id,
77
- **params
 
 
78
  )
79
- # Some models might be on serverless inference endpoints that take longer
80
- # The default timeout for InferenceClient is usually reasonable.
81
- print(f"DEBUG: image_services.py - HF Image API ({model_id}) image generated successfully.")
82
- return ImageGenResponse(image=image_result, provider="HF Image API", model_id_used=model_id)
83
- except Exception as e:
84
- error_msg = f"HF Image API Error ({model_id}): {type(e).__name__} - {str(e)}"
85
- if "Rate limit reached" in str(e): error_msg += " You may have hit free tier limits."
86
- elif "Model is currently loading" in str(e) or "estimated_time" in str(e).lower(): error_msg += " Model may be loading, try again in a moment."
87
- elif "Authorization" in str(e) or "401" in str(e): error_msg += " Authentication issue with your HF_TOKEN."
88
- elif "does not seem to support task text-to-image" in str(e): error_msg = f"Model {model_id} may not support text-to-image or is misconfigured."
89
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
  print(f"ERROR: image_services.py - {error_msg}")
91
- return ImageGenResponse(error=error_msg, success=False, provider="HF Image API", model_id_used=model_id, raw_response=e)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
 
93
- print("DEBUG: core.image_services (HF Focus for StoryVerseWeaver) - Module defined.")
 
1
  # storyverse_weaver/core/image_services.py
2
  import os
3
+ import requests
4
+ import base64
5
  from io import BytesIO
6
  from PIL import Image
7
+ from huggingface_hub import InferenceClient # For HF fallback
8
+ from openai import OpenAI # For DALL-E
9
 
10
  # --- API Key Configuration ---
11
+ OPENAI_API_KEY = os.getenv("STORYVERSE_OPENAI_API_KEY") # Primary for DALL-E
12
+ HF_TOKEN = os.getenv("STORYVERSE_HF_TOKEN") # For fallback
13
 
14
+ OPENAI_DALLE_CONFIGURED = False
15
+ HF_IMAGE_API_CONFIGURED = False # For fallback image model
16
  hf_inference_image_client = None
17
+ openai_client = None
18
 
19
+ class ImageGenResponse: # Keep this class
20
  def __init__(self, image: Image.Image = None, image_url: str = None,
21
  error: str = None, success: bool = True,
22
+ provider: str = "Unknown Image Gen", model_id_used: str = None):
23
  self.image, self.image_url, self.error, self.success, self.provider, self.model_id_used = \
24
  image, image_url, error, success, provider, model_id_used
25
 
26
+ def initialize_image_llms(): # Renamed to reflect image services
27
+ global OPENAI_DALLE_CONFIGURED, HF_IMAGE_API_CONFIGURED, hf_inference_image_client, openai_client, OPENAI_API_KEY, HF_TOKEN
28
 
29
+ OPENAI_API_KEY = os.getenv("STORYVERSE_OPENAI_API_KEY") # Ensure it's loaded here
30
+ HF_TOKEN = os.getenv("STORYVERSE_HF_TOKEN")
31
+
32
+ print("INFO: image_services.py - Initializing Image Generation services (DALL-E primary)...")
33
 
34
+ # OpenAI DALL-E (Primary)
35
+ if OPENAI_API_KEY and OPENAI_API_KEY.strip():
36
+ print("INFO: image_services.py - STORYVERSE_OPENAI_API_KEY found.")
37
+ try:
38
+ openai_client = OpenAI(api_key=OPENAI_API_KEY)
39
+ # Simple test: list models (lightweight call, though DALL-E models aren't listed this way usually)
40
+ # A better test would be a very cheap image call if possible, or assume ready if client inits.
41
+ # For now, client initialization is the main check.
42
+ OPENAI_DALLE_CONFIGURED = True
43
+ print("SUCCESS: image_services.py - OpenAI DALL-E client configured.")
44
+ except Exception as e:
45
+ OPENAI_DALLE_CONFIGURED = False
46
+ print(f"ERROR: image_services.py - Failed to configure OpenAI DALL-E client: {type(e).__name__} - {e}")
47
+ openai_client = None
48
+ else:
49
+ OPENAI_DALLE_CONFIGURED = False
50
+ print("WARNING: image_services.py - STORYVERSE_OPENAI_API_KEY not found or empty. DALL-E disabled.")
51
+
52
+ # Hugging Face Image Models (Fallback)
53
  if HF_TOKEN and HF_TOKEN.strip():
54
+ print("INFO: image_services.py - STORYVERSE_HF_TOKEN found (for fallback image model).")
55
  try:
56
  hf_inference_image_client = InferenceClient(token=HF_TOKEN)
 
 
 
57
  HF_IMAGE_API_CONFIGURED = True
58
+ print("SUCCESS: image_services.py - Hugging Face InferenceClient (for fallback images) ready.")
59
  except Exception as e:
60
  HF_IMAGE_API_CONFIGURED = False
61
+ print(f"ERROR: image_services.py - Failed to initialize HF InferenceClient for fallback images: {e}")
62
  hf_inference_image_client = None
63
  else:
64
  HF_IMAGE_API_CONFIGURED = False
65
+ print("WARNING: image_services.py - STORYVERSE_HF_TOKEN not found or empty (for fallback image model).")
66
 
67
+ print(f"INFO: image_services.py - Image Service Init complete. DALL-E Ready: {OPENAI_DALLE_CONFIGURED}, HF Image (Fallback) Ready: {HF_IMAGE_API_CONFIGURED}")
68
 
69
+ def is_dalle_ready(): return OPENAI_DALLE_CONFIGURED
70
+ def is_hf_image_api_ready(): return HF_IMAGE_API_CONFIGURED # Still useful for fallback
 
71
 
72
+ # --- OpenAI DALL-E ---
73
+ def generate_image_dalle(prompt: str,
74
+ model: str = "dall-e-3", # or "dall-e-2"
75
+ size: str = "1024x1024",
76
+ quality: str = "standard", # "standard" or "hd" for dall-e-3
77
+ n: int = 1,
78
+ response_format: str = "b64_json" # Get image data directly
79
+ ) -> ImageGenResponse:
80
+ global openai_client
81
+ if not is_dalle_ready() or not openai_client:
82
+ return ImageGenResponse(error="OpenAI DALL-E API not configured.", success=False, provider="DALL-E", model_id_used=model)
83
+
84
+ print(f"DEBUG: image_services.py - Calling DALL-E ({model}) with prompt: {prompt[:70]}...")
 
 
 
 
 
 
 
 
 
 
85
  try:
86
+ response = openai_client.images.generate(
87
+ model=model,
88
+ prompt=prompt,
89
+ size=size,
90
+ quality=quality,
91
+ n=n,
92
+ response_format=response_format # Get base64 encoded image
93
  )
 
 
 
 
 
 
 
 
 
 
94
 
95
+ if response_format == "b64_json":
96
+ if not response.data or not response.data[0].b64_json:
97
+ return ImageGenResponse(error="No image data in DALL-E b64_json response.", success=False, provider="DALL-E", model_id_used=model)
98
+ image_data = base64.b64decode(response.data[0].b64_json)
99
+ image = Image.open(BytesIO(image_data))
100
+ print(f"DEBUG: image_services.py - DALL-E image generated successfully ({model}).")
101
+ return ImageGenResponse(image=image, provider="DALL-E", model_id_used=model)
102
+ elif response_format == "url":
103
+ if not response.data or not response.data[0].url:
104
+ return ImageGenResponse(error="No image URL in DALL-E response.", success=False, provider="DALL-E", model_id_used=model)
105
+ image_url = response.data[0].url
106
+ # Download the image from URL
107
+ img_content_response = requests.get(image_url, timeout=30)
108
+ img_content_response.raise_for_status()
109
+ image = Image.open(BytesIO(img_content_response.content))
110
+ print(f"DEBUG: image_services.py - DALL-E image downloaded successfully ({model}).")
111
+ return ImageGenResponse(image=image, image_url=image_url, provider="DALL-E", model_id_used=model)
112
+
113
+ except Exception as e:
114
+ error_msg = f"DALL-E API Error ({model}): {type(e).__name__} - {str(e)}"
115
+ if hasattr(e, 'response') and e.response is not None and hasattr(e.response, 'text'):
116
+ error_msg += f" - API Response: {e.response.text[:200]}"
117
+ elif hasattr(e, 'message'): # OpenAI specific error structure
118
+ error_msg += f" - OpenAI Message: {e.message}"
119
+
120
  print(f"ERROR: image_services.py - {error_msg}")
121
+ return ImageGenResponse(error=error_msg, success=False, provider="DALL-E", model_id_used=model, raw_response=e)
122
+
123
+ # --- Hugging Face Image Model (Fallback) ---
124
+ def generate_image_hf_model(prompt: str, model_id: str = "stabilityai/stable-diffusion-xl-base-1.0", ...) -> ImageGenResponse:
125
+ # ... (This function remains the same as the one from "I don't have Stability api what do I do?" response)
126
+ # ... (It uses hf_inference_image_client)
127
+ global hf_inference_image_client
128
+ if not is_hf_image_api_ready() or not hf_inference_image_client: return ImageGenResponse(error="HF Image API not configured.", success=False, provider="HF Image API", model_id_used=model_id)
129
+ params = { # Default params, ensure they are passed from app.py or orchestrator
130
+ "negative_prompt": negative_prompt if 'negative_prompt' in locals() else None,
131
+ "height": height if 'height' in locals() else 768, "width": width if 'width' in locals() else 768,
132
+ "num_inference_steps": num_inference_steps if 'num_inference_steps' in locals() else 25,
133
+ "guidance_scale": guidance_scale if 'guidance_scale' in locals() else 7.0
134
+ }; params = {k: v for k,v in params.items() if v is not None}
135
+ try:
136
+ image_result: Image.Image = hf_inference_image_client.text_to_image(prompt, model=model_id, **params)
137
+ return ImageGenResponse(image=image_result, provider="HF Image API", model_id_used=model_id)
138
+ except Exception as e: return ImageGenResponse(error=f"HF Image API Error ({model_id}): {e}", success=False, provider="HF Image API", model_id_used=model_id, raw_response=e)
139
+
140
 
141
+ print("DEBUG: core.image_services (DALL-E Primary for StoryVerseWeaver) - Module defined.")