File size: 1,587 Bytes
db99ae5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
# core/gemini_handler.py
import google.generativeai as genai
import json
import re # For cleaning JSON output

class GeminiHandler:
    def __init__(self, api_key):
        genai.configure(api_key=api_key)
        self.model = genai.GenerativeModel('gemini-1.5-flash-latest') # Or your preferred Gemini model

    def _clean_json_response(self, text_response):
        # Gemini might sometimes wrap JSON in ```json ... ```
        match = re.search(r"```json\s*([\s\S]*?)\s*```", text_response)
        if match:
            return match.group(1).strip()
        return text_response.strip()

    def generate_story_breakdown(self, prompt_text):
        try:
            response = self.model.generate_content(prompt_text)
            cleaned_response = self._clean_json_response(response.text)
            # print(f"Debug: Cleaned Story Breakdown JSON: {cleaned_response}") # For debugging
            story_scenes = json.loads(cleaned_response)
            return story_scenes
        except Exception as e:
            print(f"Error generating story breakdown: {e}")
            print(f"Problematic Gemini Response: {response.text if 'response' in locals() else 'No response object'}")
            return None

    def generate_image_prompt(self, prompt_text):
        try:
            response = self.model.generate_content(prompt_text)
            # Image prompts are usually just text, no need for JSON cleaning unless specified
            return response.text.strip()
        except Exception as e:
            print(f"Error generating image prompt: {e}")
            return None