mgbam commited on
Commit
fe60278
·
verified ·
1 Parent(s): ce507ec

Rename core/evolution_engine.py to core/story_engine.py

Browse files
Files changed (2) hide show
  1. core/evolution_engine.py +0 -57
  2. core/story_engine.py +117 -0
core/evolution_engine.py DELETED
@@ -1,57 +0,0 @@
1
- # algoforge_prime/core/evolution_engine.py
2
- print("DEBUG: Importing core.evolution_engine")
3
-
4
- from core.llm_clients import call_huggingface_api, call_gemini_api, LLMResponse
5
- from prompts.system_prompts import get_system_prompt
6
- # from core.evaluation_engine import EvaluationResultOutput # For type hinting if needed
7
-
8
- print("DEBUG: core.evolution_engine - Imports successful")
9
-
10
- def evolve_solution(
11
- original_solution_text: str,
12
- evaluation_output_obj, # This is an EvaluationResultOutput object
13
- problem_description: str,
14
- problem_type: str,
15
- llm_client_config: dict
16
- ) -> str:
17
- print(f"DEBUG: evolution_engine.py - Evolving solution. Problem type: {problem_type}")
18
- system_p_evolve = get_system_prompt("evolution_general")
19
-
20
- try:
21
- # Use the method from EvaluationResultOutput to get formatted critique and test results
22
- critique_and_test_feedback = evaluation_output_obj.get_display_critique()
23
- original_score = evaluation_output_obj.combined_score
24
- except AttributeError: # Fallback if evaluation_output_obj is not as expected
25
- critique_and_test_feedback = "Detailed evaluation feedback was not available or malformed."
26
- original_score = 0 # Or try to get it from evaluation_output_obj if it's just a simple dict
27
- if hasattr(evaluation_output_obj, 'score'): original_score = evaluation_output_obj.score
28
- elif isinstance(evaluation_output_obj, dict) and 'score' in evaluation_output_obj: original_score = evaluation_output_obj['score']
29
-
30
- user_p_evolve = (
31
- f"Original Problem Context: \"{problem_description}\"\n\n"
32
- f"The solution to be evolved achieved a combined score of {original_score}/10.\n"
33
- f"Here is the original solution text:\n```python\n{original_solution_text}\n```\n\n"
34
- f"Here is the comprehensive evaluation it received (including LLM critique AND automated test feedback/errors if run):\n'''\n{critique_and_test_feedback}\n'''\n\n"
35
- f"Your Task: Based on ALL the information above (solution, LLM critique, and crucially any test execution results/errors mentioned in the evaluation), "
36
- f"evolve the provided solution to make it demonstrably superior. "
37
- f"**Your HIGHEST PRIORITY is to fix any reported execution errors or failed tests.** "
38
- f"Then, address other critique points like efficiency, clarity, or completeness. "
39
- f"Output ONLY the *complete, raw, evolved Python code block*. Do not include explanations outside the code block unless explicitly part of the solution's comments."
40
- )
41
-
42
- llm_response_obj = None
43
- if llm_client_config["type"] == "hf":
44
- llm_response_obj = call_huggingface_api(user_p_evolve, llm_client_config["model_id"], llm_client_config["temp"], llm_client_config["max_tokens"], system_p_evolve)
45
- elif llm_client_config["type"] == "google_gemini":
46
- llm_response_obj = call_gemini_api(user_p_evolve, llm_client_config["model_id"], llm_client_config["temp"], llm_client_config["max_tokens"], system_p_evolve)
47
- else:
48
- return f"ERROR (Evolution): Unknown LLM client type '{llm_client_config['type']}'"
49
-
50
- if llm_response_obj.success:
51
- # Optional: basic cleanup of the LLM output if it tends to add markdown
52
- from core.utils import basic_text_cleanup # Assuming you have this
53
- return basic_text_cleanup(llm_response_obj.text)
54
- else:
55
- return f"ERROR (Evolution with {llm_response_obj.model_id_used}): {llm_response_obj.error}"
56
-
57
- print("DEBUG: core.evolution_engine - Module fully defined.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
core/story_engine.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # storyverse_weaver/core/story_engine.py
2
+ from PIL import Image # Make sure Pillow is in requirements.txt
3
+
4
+ class Scene:
5
+ def __init__(self, scene_number: int, user_prompt: str, narrative_text: str = None,
6
+ image: Image.Image = None, image_style_prompt: str = None,
7
+ image_provider: str = None, error_message: str = None): # Added error_message here
8
+ self.scene_number = scene_number
9
+ self.user_prompt = user_prompt
10
+ self.narrative_text = narrative_text
11
+ self.image_style_prompt = image_style_prompt
12
+ self.image = image
13
+ self.image_provider = image_provider
14
+ self.error_message = error_message # Store errors specific to this scene
15
+
16
+ def __str__(self):
17
+ img_status = "Yes" if self.image else ("Error" if self.error_message and not self.image else "No")
18
+ narr_status = str(self.narrative_text)[:50] if self.narrative_text and "Error" not in self.narrative_text else ("Error" if "Error" in str(self.narrative_text) else "N/A")
19
+ return f"Scene {self.scene_number}: {self.user_prompt[:50]}... Narrative: {narr_status}... Image: {img_status}"
20
+
21
+ class Story:
22
+ def __init__(self, title: str = "Untitled StoryVerse"):
23
+ self.title = title
24
+ self.scenes: list[Scene] = []
25
+ self.current_scene_number = 0
26
+ self.global_style_prompt: str = None
27
+ self.global_negative_prompt: str = None
28
+
29
+ def add_scene_from_elements(self, user_prompt: str, narrative_text: str,
30
+ image: Image.Image, image_style_prompt: str,
31
+ image_provider: str, error_message: str = None) -> Scene: # Added error_message
32
+ self.current_scene_number += 1
33
+ scene = Scene(
34
+ scene_number=self.current_scene_number,
35
+ user_prompt=user_prompt,
36
+ narrative_text=narrative_text,
37
+ image=image,
38
+ image_style_prompt=image_style_prompt,
39
+ image_provider=image_provider,
40
+ error_message=error_message # Store any error that occurred during this scene's generation
41
+ )
42
+ self.scenes.append(scene)
43
+ print(f"DEBUG: story_engine.py - Added scene {self.current_scene_number}: {scene.user_prompt[:30]}")
44
+ return scene
45
+
46
+ # This method might be redundant if add_scene_from_elements can handle errors
47
+ # def add_scene_with_error(self, user_prompt: str, error_message: str) -> Scene:
48
+ # self.current_scene_number += 1
49
+ # scene = Scene(scene_number=self.current_scene_number, user_prompt=user_prompt, error_message=error_message)
50
+ # self.scenes.append(scene)
51
+ # print(f"DEBUG: story_engine.py - Added scene {self.current_scene_number} WITH ERROR: {error_message}")
52
+ # return scene
53
+
54
+ def get_last_scene_narrative(self) -> str:
55
+ if self.scenes and self.scenes[-1].narrative_text and "Error" not in self.scenes[-1].narrative_text:
56
+ return self.scenes[-1].narrative_text
57
+ return "" # Return empty if last scene had error or no narrative
58
+
59
+ def get_all_scenes_for_gallery_display(self) -> list[tuple[Image.Image, str]]:
60
+ """Prepares scenes for Gradio Gallery: list of (image, caption)"""
61
+ gallery_items = []
62
+ if not self.scenes:
63
+ # Return a placeholder if no scenes, so gallery doesn't error out
64
+ placeholder_img = Image.new('RGB', (100,100), color='lightgrey')
65
+ gallery_items.append((placeholder_img, "No scenes yet. Weave your story!"))
66
+ return gallery_items
67
+
68
+ for scene in self.scenes:
69
+ caption = f"S{scene.scene_number}: {scene.user_prompt[:40]}..."
70
+ if scene.image_style_prompt: caption += f"\nStyle: {scene.image_style_prompt}"
71
+
72
+ if scene.error_message:
73
+ # Create a placeholder image for errors
74
+ # error_img = Image.new('RGB', (100,100), color='#FFCCCB') # Light red for error
75
+ # For gallery, might be better to use None for image if error occurred, or a default "error" image
76
+ # Gradio gallery might handle None gracefully, or you might need a placeholder PIL image.
77
+ # Let's try None and see how Gradio handles it. If it errors, use a placeholder image.
78
+ gallery_items.append((None, f"{caption}\n⚠️ Error: {scene.error_message[:100]}..."))
79
+ elif scene.image:
80
+ gallery_items.append((scene.image, caption))
81
+ else: # No image, but no specific error reported for the image part of this scene
82
+ placeholder_img = Image.new('RGB', (100,100), color='whitesmoke')
83
+ gallery_items.append((placeholder_img, f"{caption}\n(No image generated for this scene)"))
84
+ return gallery_items
85
+
86
+ def get_latest_scene_details_for_display(self) -> tuple[Image.Image, str]:
87
+ """Returns (PIL.Image or None, markdown_string_for_narrative) for the latest scene."""
88
+ if not self.scenes:
89
+ return None, "No scenes yet. Describe your first scene idea above!"
90
+
91
+ ls = self.scenes[-1]
92
+ latest_image = ls.image if ls.image else None # Will be None if error or no image
93
+
94
+ narrative_display = f"## Scene {ls.scene_number}: {ls.user_prompt}\n\n"
95
+ if ls.error_message:
96
+ narrative_display += f"<p style='color:red;'><strong>Generation Error for this scene:</strong><br>{ls.error_message}</p>\n"
97
+
98
+ if ls.image and not ls.error_message : # Only show style if image was successful
99
+ narrative_display += f"**Style:** {ls.image_style_prompt}\n\n"
100
+
101
+ if ls.narrative_text:
102
+ if "Error" in ls.narrative_text: # Check if narrative itself is an error message
103
+ narrative_display += f"<p style='color:orange;'>{ls.narrative_text}</p>"
104
+ else:
105
+ narrative_display += ls.narrative_text
106
+ elif not ls.error_message: # No narrative but no major scene error
107
+ narrative_display += "(No narrative generated for this scene)."
108
+
109
+ return latest_image, narrative_display
110
+
111
+
112
+ def clear_story(self):
113
+ self.scenes = []
114
+ self.current_scene_number = 0
115
+ print("DEBUG: story_engine.py - Story cleared.")
116
+
117
+ print("DEBUG: core.story_engine (for StoryVerseWeaver) - Module defined.")