Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -160,8 +160,6 @@ Provide a new, complete, and rigorously justified solution. Ensure that every er
|
|
160 |
print(f"Error loading prompts: {e}")
|
161 |
return prompts
|
162 |
|
163 |
-
|
164 |
-
|
165 |
def extract_problem_text(self, img_str):
|
166 |
try:
|
167 |
response = client.models.generate_content(
|
@@ -239,37 +237,47 @@ def solve():
|
|
239 |
agent_system = AgentSystem()
|
240 |
|
241 |
# Étape 0: Extraction
|
242 |
-
yield
|
243 |
-
|
|
|
|
|
|
|
244 |
|
245 |
problem_text = agent_system.extract_problem_text(img_str)
|
246 |
-
|
|
|
247 |
|
248 |
# Étape 1
|
249 |
-
|
|
|
|
|
250 |
step1_prompt = agent_system.prompts["step1_initial_solution"].replace(
|
251 |
"[The mathematical problem will be inserted here]", problem_text
|
252 |
)
|
253 |
initial_solution = agent_system.run_agent_step("step1", step1_prompt, use_calculator)
|
254 |
-
yield
|
255 |
|
256 |
# Étape 2
|
257 |
-
|
|
|
|
|
258 |
step2_prompt = agent_system.prompts["step2_self_improvement"].replace(
|
259 |
"[The initial solution attempt will be inserted here]", initial_solution
|
260 |
)
|
261 |
improved_solution = agent_system.run_agent_step("step2", step2_prompt, use_calculator)
|
262 |
-
yield
|
263 |
|
264 |
# Étape 3
|
265 |
-
|
|
|
|
|
266 |
step3_prompt = agent_system.prompts["step3_verification"].replace(
|
267 |
"[The mathematical problem will be inserted here]", problem_text
|
268 |
).replace(
|
269 |
"[The solution to be verified will be inserted here]", improved_solution
|
270 |
)
|
271 |
verification_result = agent_system.run_agent_step("step3", step3_prompt, False)
|
272 |
-
yield
|
273 |
|
274 |
needs_correction = (
|
275 |
"Critical Error" in verification_result
|
@@ -278,7 +286,9 @@ def solve():
|
|
278 |
)
|
279 |
|
280 |
if needs_correction:
|
281 |
-
|
|
|
|
|
282 |
step5_prompt = agent_system.prompts["step5_correction"].replace(
|
283 |
"[The full verification report will be inserted here]", verification_result
|
284 |
).replace(
|
@@ -286,14 +296,17 @@ def solve():
|
|
286 |
)
|
287 |
corrected_solution = agent_system.run_agent_step("step5", step5_prompt, use_calculator)
|
288 |
final_solution = corrected_solution
|
289 |
-
yield
|
290 |
else:
|
291 |
final_solution = improved_solution
|
292 |
-
|
|
|
293 |
|
294 |
-
yield
|
295 |
-
|
296 |
-
|
|
|
|
|
297 |
|
298 |
else:
|
299 |
prompt = BASE_PROMPT
|
@@ -316,11 +329,11 @@ def solve():
|
|
316 |
for chunk in response:
|
317 |
for part in chunk.candidates[0].content.parts:
|
318 |
if hasattr(part, 'text') and part.text:
|
319 |
-
yield
|
320 |
|
321 |
except Exception as e:
|
322 |
print(f"Error during generation: {e}")
|
323 |
-
yield
|
324 |
|
325 |
return Response(
|
326 |
stream_with_context(generate()),
|
|
|
160 |
print(f"Error loading prompts: {e}")
|
161 |
return prompts
|
162 |
|
|
|
|
|
163 |
def extract_problem_text(self, img_str):
|
164 |
try:
|
165 |
response = client.models.generate_content(
|
|
|
237 |
agent_system = AgentSystem()
|
238 |
|
239 |
# Étape 0: Extraction
|
240 |
+
yield 'data: ' + json.dumps({"mode": "thinking"}) + '\n\n'
|
241 |
+
|
242 |
+
# Pre-define strings with newlines to avoid f-string backslash issue
|
243 |
+
extraction_msg = "# 🔍 EXTRACTION DU PROBLÈME\n\nAnalyse de l'image pour extraire l'énoncé du problème...\n\n"
|
244 |
+
yield 'data: ' + json.dumps({"content": extraction_msg, "type": "text"}) + '\n\n'
|
245 |
|
246 |
problem_text = agent_system.extract_problem_text(img_str)
|
247 |
+
problem_msg = f"**Problème identifié:**\n{problem_text}\n\n"
|
248 |
+
yield 'data: ' + json.dumps({"content": problem_msg, "type": "text"}) + '\n\n'
|
249 |
|
250 |
# Étape 1
|
251 |
+
step1_msg = "# 📝 ÉTAPE 1: SOLUTION INITIALE\n\n"
|
252 |
+
yield 'data: ' + json.dumps({"content": step1_msg, "type": "text"}) + '\n\n'
|
253 |
+
|
254 |
step1_prompt = agent_system.prompts["step1_initial_solution"].replace(
|
255 |
"[The mathematical problem will be inserted here]", problem_text
|
256 |
)
|
257 |
initial_solution = agent_system.run_agent_step("step1", step1_prompt, use_calculator)
|
258 |
+
yield 'data: ' + json.dumps({"content": initial_solution, "type": "text"}) + '\n\n'
|
259 |
|
260 |
# Étape 2
|
261 |
+
step2_msg = "# 🔧 ÉTAPE 2: AUTO-AMÉLIORATION\n\n"
|
262 |
+
yield 'data: ' + json.dumps({"content": step2_msg, "type": "text"}) + '\n\n'
|
263 |
+
|
264 |
step2_prompt = agent_system.prompts["step2_self_improvement"].replace(
|
265 |
"[The initial solution attempt will be inserted here]", initial_solution
|
266 |
)
|
267 |
improved_solution = agent_system.run_agent_step("step2", step2_prompt, use_calculator)
|
268 |
+
yield 'data: ' + json.dumps({"content": improved_solution, "type": "text"}) + '\n\n'
|
269 |
|
270 |
# Étape 3
|
271 |
+
step3_msg = "# ✅ ÉTAPE 3: VÉRIFICATION\n\n"
|
272 |
+
yield 'data: ' + json.dumps({"content": step3_msg, "type": "text"}) + '\n\n'
|
273 |
+
|
274 |
step3_prompt = agent_system.prompts["step3_verification"].replace(
|
275 |
"[The mathematical problem will be inserted here]", problem_text
|
276 |
).replace(
|
277 |
"[The solution to be verified will be inserted here]", improved_solution
|
278 |
)
|
279 |
verification_result = agent_system.run_agent_step("step3", step3_prompt, False)
|
280 |
+
yield 'data: ' + json.dumps({"content": verification_result, "type": "text"}) + '\n\n'
|
281 |
|
282 |
needs_correction = (
|
283 |
"Critical Error" in verification_result
|
|
|
286 |
)
|
287 |
|
288 |
if needs_correction:
|
289 |
+
step5_msg = "# 🛠️ ÉTAPE 5: CORRECTION\n\n"
|
290 |
+
yield 'data: ' + json.dumps({"content": step5_msg, "type": "text"}) + '\n\n'
|
291 |
+
|
292 |
step5_prompt = agent_system.prompts["step5_correction"].replace(
|
293 |
"[The full verification report will be inserted here]", verification_result
|
294 |
).replace(
|
|
|
296 |
)
|
297 |
corrected_solution = agent_system.run_agent_step("step5", step5_prompt, use_calculator)
|
298 |
final_solution = corrected_solution
|
299 |
+
yield 'data: ' + json.dumps({"content": corrected_solution, "type": "text"}) + '\n\n'
|
300 |
else:
|
301 |
final_solution = improved_solution
|
302 |
+
validated_msg = "✅ La solution a été validée sans correction.\n\n"
|
303 |
+
yield 'data: ' + json.dumps({"content": validated_msg, "type": "text"}) + '\n\n'
|
304 |
|
305 |
+
yield 'data: ' + json.dumps({"mode": "answering"}) + '\n\n'
|
306 |
+
|
307 |
+
final_msg = "# 📋 SOLUTION FINALE\n\n"
|
308 |
+
yield 'data: ' + json.dumps({"content": final_msg, "type": "text"}) + '\n\n'
|
309 |
+
yield 'data: ' + json.dumps({"content": final_solution, "type": "text"}) + '\n\n'
|
310 |
|
311 |
else:
|
312 |
prompt = BASE_PROMPT
|
|
|
329 |
for chunk in response:
|
330 |
for part in chunk.candidates[0].content.parts:
|
331 |
if hasattr(part, 'text') and part.text:
|
332 |
+
yield 'data: ' + json.dumps({"content": part.text, "type": "text"}) + '\n\n'
|
333 |
|
334 |
except Exception as e:
|
335 |
print(f"Error during generation: {e}")
|
336 |
+
yield 'data: ' + json.dumps({"error": "Erreur inattendue"}) + '\n\n'
|
337 |
|
338 |
return Response(
|
339 |
stream_with_context(generate()),
|