Mariam-cards / app.py
Docfile's picture
Update app.py
e7761b5 verified
raw
history blame
13.6 kB
# --- START OF CORRECTED app.py ---
from flask import Flask, render_template, request, jsonify, Response, stream_with_context
# Revert to the original google.genai import and usage
from google import genai
# Make sure types is imported from google.genai if needed for specific model config
from google.genai import types
import os
from PIL import Image
import io
import base64
import json
import re # Import regex if needed for advanced text processing (though less likely without streaming logic parsing)
app = Flask(__name__)
GOOGLE_API_KEY = os.environ.get("GEMINI_API_KEY")
# Use the original client initialization
client = genai.Client(
api_key=GOOGLE_API_KEY,
)
# Ensure API key is available (good practice)
if not GOOGLE_API_KEY:
print("WARNING: GEMINI_API_KEY environment variable not set.")
# Handle this case appropriately, e.g., exit or show an error on the page
# --- Routes for index and potentially the Pro version (kept for context) ---
@app.route('/')
def index():
# Assuming index.html is for the Pro version or another page
return render_template('index.html') # Or redirect to /free if it's the main page
@app.route('/free')
def indexx():
# This route serves the free version HTML
return render_template('maj.html')
# --- Original /solve route (Pro version, streaming) - Kept for reference ---
# If you want the Pro version (/solve) to also be non-streaming, apply similar changes as below
@app.route('/solve', methods=['POST'])
def solve():
try:
if 'image' not in request.files or not request.files['image'].filename:
return jsonify({'error': 'No image file provided'}), 400
image_data = request.files['image'].read()
if not image_data:
return jsonify({'error': 'Empty image file provided'}), 400
try:
img = Image.open(io.BytesIO(image_data))
except Exception as img_err:
return jsonify({'error': f'Invalid image file: {str(img_err)}'}), 400
buffered = io.BytesIO()
img.save(buffered, format="PNG")
img_str = base64.b64encode(buffered.getvalue()).decode() # Keep base64 for this route as in original
def generate():
mode = 'starting'
try:
response = client.models.generate_content_stream(
# Use the model name for the Pro version as in your original code
model="gemini-2.5-pro-exp-03-25", # Your original model name
contents=[
# Pass image as inline_data with base64 as in your original code
{'inline_data': {'mime_type': 'image/png', 'data': img_str}},
"""Résous cet exercice en français avec du LaTeX.
Si nécessaire, utilise du code Python pour effectuer les calculs complexes.
Présente ta solution de façon claire et espacée."""
],
config=types.GenerateContentConfig(
thinking_config=types.ThinkingConfig(
thinking_budget=8000
),
tools=[types.Tool(
code_execution=types.ToolCodeExecution()
)]
)
)
# Process the streaming response as you had it
for chunk in response:
if chunk.candidates and chunk.candidates[0].content and chunk.candidates[0].content.parts:
for part in chunk.candidates[0].content.parts:
# Keep your original logic for emitting different modes in the stream
if hasattr(part, 'thought') and part.thought:
if mode != "thinking":
yield 'data: ' + json.dumps({"mode": "thinking"}) + '\n\n'
mode = "thinking"
elif hasattr(part, 'executable_code') and part.executable_code:
if mode != "executing_code":
yield 'data: ' + json.dumps({"mode": "executing_code"}) + '\n\n'
mode = "executing_code"
code_block_open = "```python\n"
code_block_close = "\n```"
yield 'data: ' + json.dumps({"content": code_block_open + part.executable_code.code + code_block_close}) + '\n\n'
elif hasattr(part, 'code_execution_result') and part.code_execution_result:
if mode != "code_result":
yield 'data: ' + json.dumps({"mode": "code_result"}) + '\n\n'
mode = "code_result"
result_block_open = "Résultat d'exécution:\n```\n"
result_block_close = "\n```"
yield 'data: ' + json.dumps({"content": result_block_open + part.code_execution_result.output + result_block_close}) + '\n\n'
else: # Assuming it's text
if mode != "answering":
yield 'data: ' + json.dumps({"mode": "answering"}) + '\n\n'
mode = "answering"
if hasattr(part, 'text') and part.text:
yield 'data: ' + json.dumps({"content": part.text}) + '\n\n'
# Handle cases where a chunk might not have candidates/parts immediately, or handle errors
elif chunk.prompt_feedback and chunk.prompt_feedback.block_reason:
error_msg = f"Prompt blocked: {chunk.prompt_feedback.block_reason.name}"
print(error_msg)
yield 'data: ' + json.dumps({"error": error_msg}) + '\n\n'
break # Stop processing on block
elif chunk.candidates and chunk.candidates[0].finish_reason:
finish_reason = chunk.candidates[0].finish_reason.name
if finish_reason != 'STOP':
error_msg = f"Generation finished early: {finish_reason}"
print(error_msg)
yield 'data: ' + json.dumps({"error": error_msg}) + '\n\n'
break # Stop processing on finish reason
except Exception as e:
print(f"Error during streaming generation: {e}")
yield 'data: ' + json.dumps({"error": str(e)}) + '\n\n'
return Response(
stream_with_context(generate()),
mimetype='text/event-stream',
headers={
'Cache-Control': 'no-cache',
'X-Accel-Buffering': 'no'
}
)
except Exception as e:
print(f"Error in /solve endpoint: {e}")
# Return JSON error for fetch API if streaming setup fails
return jsonify({'error': f'Failed to process request: {str(e)}'}), 500
# --- MODIFIED /solved route (Free version, non-streaming) using original SDK syntax ---
@app.route('/solved', methods=['POST'])
def solved():
try:
if 'image' not in request.files or not request.files['image'].filename:
return jsonify({'error': 'No image file provided'}), 400
image_data = request.files['image'].read()
if not image_data:
return jsonify({'error': 'Empty image file provided'}), 400
try:
img = Image.open(io.BytesIO(image_data))
except Exception as img_err:
return jsonify({'error': f'Invalid image file: {str(img_err)}'}), 400
buffered = io.BytesBytesIO()
img.save(buffered, format="PNG")
img_str = base64.b64encode(buffered.getvalue()).decode()
# Use the non-streaming generate_content method
# Use the model name for the Free version as in your original code
model_name = "gemini-2.5-flash-preview-04-17" # Your original free model name
# Prepare the content using inline_data with base64 string as in your original code
contents = [
{'inline_data': {'mime_type': 'image/png', 'data': img_str}},
"""Résous cet exercice en français en utilisant le format LaTeX pour les mathématiques si nécessaire.
Si tu dois effectuer des calculs complexes, utilise l'outil d'exécution de code Python fourni.
Présente ta solution de manière claire et bien structurée. Formate le code Python dans des blocs délimités par ```python ... ``` et les résultats d'exécution dans des blocs ``` ... ```."""
]
# Call the non-streaming generation method using the original client object
response = client.models.generate_content(
model=model_name,
contents=contents,
config=types.GenerateContentConfig(
# Removed thinking_config as it's not relevant for non-streaming output
tools=[types.Tool(
code_execution=types.ToolCodeExecution()
)]
)
# Note: No stream=True here for non-streaming
)
# Aggregate the response parts into a single string
full_solution = ""
# Check if the response has candidates and parts
if response.candidates and response.candidates[0].content and response.candidates[0].content.parts:
for part in response.candidates[0].content.parts:
# Process parts based on attribute existence
if hasattr(part, 'text') and part.text:
full_solution += part.text
elif hasattr(part, 'executable_code') and part.executable_code:
# Format code block using Markdown, as the frontend expects this
full_solution += f"\n\n```python\n{part.executable_code.code}\n```\n\n"
# Check for the result attribute name based on your SDK version's structure
# It might be `code_execution_result` as in your original code, or nested
elif hasattr(part, 'code_execution_result') and hasattr(part.code_execution_result, 'output'):
# Format execution result block using Markdown
output_str = part.code_execution_result.output
full_solution += f"\n\n**Résultat d'exécution:**\n```\n{output_str}\n```\n\n"
# Add other potential part types if necessary (e.g., function_call, etc.)
# Note: 'thought' parts are ignored as requested
# Ensure we have some content, otherwise return a message
if not full_solution.strip():
# Check for finish reasons or safety ratings
finish_reason = response.candidates[0].finish_reason.name if response.candidates and response.candidates[0].finish_reason else "UNKNOWN"
safety_ratings = response.candidates[0].safety_ratings if response.candidates else []
print(f"Generation finished with reason: {finish_reason}, Safety: {safety_ratings}") # Log details
if finish_reason == 'SAFETY':
full_solution = "Désolé, je ne peux pas fournir de réponse en raison de restrictions de sécurité."
elif finish_reason == 'RECITATION':
full_solution = "Désolé, la réponse ne peut être fournie en raison de la politique sur les récitations."
# Also check prompt feedback for blocking reasons
elif response.prompt_feedback and response.prompt_feedback.block_reason:
block_reason = response.prompt_feedback.block_reason.name
full_solution = f"Le contenu a été bloqué pour des raisons de sécurité: {block_reason}."
else:
full_solution = "Désolé, je n'ai pas pu générer de solution complète pour cette image."
# Return the complete solution as JSON
# Use strip() to remove leading/trailing whitespace from the full solution
return jsonify({'solution': full_solution.strip()})
# Catch specific API errors from your original SDK
except genai.core.exceptions.GoogleAPIError as api_error:
print(f"GenAI API Error: {api_error}")
# Check if the error response has details, like safety block
error_detail = str(api_error)
if "safety" in error_detail.lower():
return jsonify({'error': 'Le contenu a été bloqué pour des raisons de sécurité par l\'API.'}), 400
elif "blocked" in error_detail.lower():
return jsonify({'error': 'La requête a été bloquée par l\'API.'}), 400
else:
return jsonify({'error': f'Erreur de l\'API GenAI: {error_detail}'}), 500
except Exception as e:
# Log the full error for debugging
import traceback
print(f"Error in /solved endpoint: {e}")
print(traceback.format_exc())
# Provide a generic error message to the user
return jsonify({'error': f'Une erreur interne est survenue lors du traitement: {str(e)}'}), 500
if __name__ == '__main__':
# Set host='0.0.0.0' to make it accessible on your network if needed
# Remove debug=True in production
app.run(debug=True, host='0.0.0.0', port=5000) # Example port
# --- END OF CORRECTED app.py ---