Mariam-cards / app.py
Docfile's picture
Update app.py
a0afec0 verified
raw
history blame
14.8 kB
# --- START OF CORRECTED_AGAIN app.py ---
from flask import Flask, render_template, request, jsonify, Response, stream_with_context
# Utilisation de l'import et de l'initialisation de votre code original
from google import genai
from google.genai import types
# Import des exceptions potentielles si elles sont dans google.api_core
# from google.api_core import exceptions as api_exceptions
import os
from PIL import Image
import io
import base64
import json
import traceback # Import traceback pour un meilleur log des erreurs
app = Flask(__name__)
GOOGLE_API_KEY = os.environ.get("GEMINI_API_KEY")
# Utilisation de l'initialisation de votre code original
client = genai.Client(
api_key=GOOGLE_API_KEY,
)
# Ensure API key is available (good practice)
if not GOOGLE_API_KEY:
print("WARNING: GEMINI_API_KEY environment variable not set. API calls will likely fail.")
# Consider adding a check before allowing API calls if the key is missing
# --- Routes for index and potentially the Pro version (kept for context) ---
@app.route('/')
def index():
# Assuming index.html is for the Pro version or another page
return render_template('index.html') # Or redirect to /free if it's the main page
@app.route('/free')
def indexx():
# This route serves the free version HTML
return render_template('maj.html')
# --- Original /solve route (Pro version, streaming) - Kept as is ---
@app.route('/solve', methods=['POST'])
def solve():
try:
if 'image' not in request.files or not request.files['image'].filename:
return jsonify({'error': 'No image file provided'}), 400
image_data = request.files['image'].read()
if not image_data:
return jsonify({'error': 'Empty image file provided'}), 400
try:
img = Image.open(io.BytesIO(image_data))
except Exception as img_err:
return jsonify({'error': f'Invalid image file: {str(img_err)}'}), 400
buffered = io.BytesIO()
img.save(buffered, format="PNG")
img_str = base64.b64encode(buffered.getvalue()).decode() # Keep base64 for this route
def generate():
mode = 'starting'
try:
response = client.models.generate_content_stream(
model="gemini-2.5-pro-exp-03-25", # Your original model name
contents=[
{'inline_data': {'mime_type': 'image/png', 'data': img_str}},
"""Résous cet exercice en français avec du LaTeX.
Si nécessaire, utilise du code Python pour effectuer les calculs complexes.
Présente ta solution de façon claire et espacée."""
],
config=types.GenerateContentConfig(
thinking_config=types.ThinkingConfig(
thinking_budget=8000
),
tools=[types.Tool(
code_execution=types.ToolCodeExecution()
)]
)
)
for chunk in response:
# Process chunks as in your original streaming logic
if chunk.candidates and chunk.candidates[0].content and chunk.candidates[0].content.parts:
for part in chunk.candidates[0].content.parts:
if hasattr(part, 'thought') and part.thought:
if mode != "thinking":
yield 'data: ' + json.dumps({"mode": "thinking"}) + '\n\n'
mode = "thinking"
elif hasattr(part, 'executable_code') and part.executable_code:
if mode != "executing_code":
yield 'data: ' + json.dumps({"mode": "executing_code"}) + '\n\n'
mode = "executing_code"
code_block_open = "```python\n"
code_block_close = "\n```"
yield 'data: ' + json.dumps({"content": code_block_open + part.executable_code.code + code_block_close}) + '\n\n'
elif hasattr(part, 'code_execution_result') and part.code_execution_result:
if mode != "code_result":
yield 'data: ' + json.dumps({"mode": "code_result"}) + '\n\n'
mode = "code_result"
result_block_open = "Résultat d'exécution:\n```\n"
result_block_close = "\n```"
yield 'data: ' + json.dumps({"content": result_block_open + part.code_execution_result.output + result_block_close}) + '\n\n'
else: # Assuming it's text
if mode != "answering":
yield 'data: ' + json.dumps({"mode": "answering"}) + '\n\n'
mode = "answering"
if hasattr(part, 'text') and part.text:
yield 'data: ' + json.dumps({"content": part.text}) + '\n\n'
# Handle prompt feedback or finish reasons in streaming
elif chunk.prompt_feedback and chunk.prompt_feedback.block_reason:
error_msg = f"Prompt blocked: {chunk.prompt_feedback.block_reason.name}"
print(error_msg)
yield 'data: ' + json.dumps({"error": error_msg}) + '\n\n'
break # Stop processing on block
elif chunk.candidates and chunk.candidates[0].finish_reason:
finish_reason = chunk.candidates[0].finish_reason.name
if finish_reason != 'STOP':
error_msg = f"Generation finished early: {finish_reason}"
print(error_msg)
yield 'data: ' + json.dumps({"error": error_msg}) + '\n\n'
break # Stop processing on finish reason
except Exception as e:
print(f"Error during streaming generation: {e}")
yield 'data: ' + json.dumps({"error": str(e)}) + '\n\n'
return Response(
stream_with_context(generate()),
mimetype='text/event-stream',
headers={
'Cache-Control': 'no-cache',
'X-Accel-Buffering': 'no'
}
)
except Exception as e:
# Log the full error for debugging
print(f"Error in /solve endpoint (setup or initial request): {e}")
print(traceback.format_exc())
# Return JSON error for fetch API if streaming setup fails
return jsonify({'error': f'Failed to process request: {str(e)}'}), 500
# --- MODIFIED /solved route (Free version, non-streaming) - Corrected Exception Handling ---
@app.route('/solved', methods=['POST'])
def solved():
try:
if 'image' not in request.files or not request.files['image'].filename:
return jsonify({'error': 'No image file provided'}), 400
image_data = request.files['image'].read()
if not image_data:
return jsonify({'error': 'Empty image file provided'}), 400
try:
img = Image.open(io.BytesIO(image_data))
except Exception as img_err:
return jsonify({'error': f'Invalid image file: {str(img_err)}'}), 400
buffered = io.BytesIO() # Keep BytesIO
img.save(buffered, format="PNG")
img_str = base64.b64encode(buffered.getvalue()).decode() # Keep base64
# Use the non-streaming generate_content method
model_name = "gemini-2.5-flash-preview-04-17" # Your original free model name
contents = [
{'inline_data': {'mime_type': 'image/png', 'data': img_str}}, # Use inline_data with base64
"""Résous cet exercice en français en utilisant le format LaTeX pour les mathématiques si nécessaire.
Si tu dois effectuer des calculs complexes, utilise l'outil d'exécution de code Python fourni.
Présente ta solution de manière claire et bien structurée. Formate le code Python dans des blocs délimités par ```python ... ``` et les résultats d'exécution dans des blocs ``` ... ```."""
]
# Call the non-streaming generation method using the original client object
response = client.models.generate_content(
model=model_name,
contents=contents,
config=types.GenerateContentConfig(
tools=[types.Tool(
code_execution=types.ToolCodeExecution()
)]
)
# Note: No stream=True here for non-streaming
)
# Aggregate the response parts into a single string
full_solution = ""
# Check if the response has candidates and parts
if response.candidates and response.candidates[0].content and response.candidates[0].content.parts:
for part in response.candidates[0].content.parts:
if hasattr(part, 'text') and part.text:
full_solution += part.text
elif hasattr(part, 'executable_code') and part.executable_code:
full_solution += f"\n\n```python\n{part.executable_code.code}\n```\n\n"
# Check for the result attribute name - reverting to your original structure if possible
# Based on your original code, code_execution_result seemed to be the attribute
elif hasattr(part, 'code_execution_result') and hasattr(part.code_execution_result, 'output'):
output_str = part.code_execution_result.output
full_solution += f"\n\n**Résultat d'exécution:**\n```\n{output_str}\n```\n\n"
# Note: 'thought' parts are ignored
# Handle cases where the response is empty or blocked
if not full_solution.strip():
# Check for prompt feedback blocking or finish reasons
if response.prompt_feedback and response.prompt_feedback.block_reason:
block_reason = response.prompt_feedback.block_reason.name
full_solution = f"Le contenu a été bloqué pour des raisons de sécurité: {block_reason}."
print(f"Generation blocked by prompt feedback: {block_reason}") # Log it
elif response.candidates and response.candidates[0].finish_reason:
finish_reason = response.candidates[0].finish_reason.name
# Provide specific messages for known non-STOP finish reasons
if finish_reason == 'SAFETY':
full_solution = "Désolé, je ne peux pas fournir de réponse en raison de restrictions de sécurité."
elif finish_reason == 'RECITATION':
full_solution = "Désolé, la réponse ne peut être fournie en raison de la politique sur les récitations."
else:
# Generic message for other finish reasons (e.g., MAX_TOKENS)
full_solution = f"La génération s'est terminée prématurément ({finish_reason}). Le problème est peut-être trop complexe ou nécessite plus de tokens."
print(f"Generation finished early: {finish_reason}") # Log it
else:
# Fallback if no specific reason is found but the response is empty
full_solution = "Désolé, je n'ai pas pu générer de solution pour cette image."
print("Generation resulted in empty content without specific block/finish reason.")
# Return the complete solution as JSON
return jsonify({'solution': full_solution.strip()})
# --- Corrected Exception Handling ---
# Catching a more general Google API error if available, otherwise just Exception
# The specific exception name might depend on the exact SDK version.
# We'll try a common one first. If this still gives AttributeError,
# we'll rely on the generic Exception catch below.
try:
# Attempt to import the specific exception type dynamically
# This is safer than assuming its location
from google.api_core.exceptions import GoogleAPIError
# If the import succeeds, catch that specific error
except GoogleAPIError as api_error:
print(f"Google API Error caught: {api_error}")
# Provide error details to the client, avoiding revealing full traceback
error_message = "Une erreur est survenue lors de la communication avec l'API GenAI."
# Attempt to extract a more specific message if possible from the error object
if hasattr(api_error, 'message'):
error_message = f"Erreur API: {api_error.message}"
elif hasattr(api_error, 'details'):
error_message = f"Erreur API: {api_error.details}"
else:
error_message = f"Erreur API: {str(api_error)}" # Fallback to string representation
# Check for common error phrases to provide user-friendly messages
if "blocked" in str(api_error).lower() or "safety" in str(api_error).lower():
error_message = 'Le contenu a été bloqué par l\'API pour des raisons de sécurité.'
return jsonify({'error': error_message}), 400 # Use 400 for client-side issue (the prompt)
return jsonify({'error': error_message}), 500 # Use 500 for server-side API issues
except ImportError:
# If GoogleAPIError is not found in api_core, we'll fall through to the generic Exception catch
print("Could not import google.api_core.exceptions.GoogleAPIError. Using generic exception handling.")
pass # Continue to the next except block
except Exception as e:
# Catch any other unexpected errors during processing or API call
print(f"An unexpected error occurred in /solved endpoint: {e}")
# Log the full traceback for server-side debugging
print(traceback.format_exc())
# Provide a generic error message to the user
return jsonify({'error': f'Une erreur interne est survenue: {str(e)}'}), 500
if __name__ == '__main__':
# Set host='0.0.0.0' to make it accessible on your network if needed
# Remove debug=True in production
app.run(debug=True, host='0.0.0.0', port=5000) # Example port
# --- END OF CORRECTED_AGAIN app.py ---