Docfile commited on
Commit
451d8eb
·
verified ·
1 Parent(s): 949f8bc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +98 -213
app.py CHANGED
@@ -1,133 +1,105 @@
1
- # --- START OF CORRECTED app.py (v3 - Fixes AttributeError) ---
2
-
3
  from flask import Flask, render_template, request, jsonify, Response, stream_with_context
4
- # Revert to the original google.genai import and usage
5
  from google import genai
6
- # Make sure types is imported from google.genai if needed for specific model config
7
  from google.genai import types
8
- # Correct import for GoogleAPIError with the original genai client
9
- from google.api_core.exceptions import GoogleAPIError # <-- IMPORTATION CORRIGÉE
10
  import os
11
  from PIL import Image
12
  import io
13
  import base64
14
  import json
15
- import re
 
 
 
 
16
 
17
  app = Flask(__name__)
18
 
 
19
  GOOGLE_API_KEY = os.environ.get("GEMINI_API_KEY")
20
-
21
- # Use the original client initialization
22
- client = genai.Client(
23
- api_key=GOOGLE_API_KEY,
24
- )
25
-
26
- # Ensure API key is available (good practice)
27
  if not GOOGLE_API_KEY:
28
- print("WARNING: GEMINI_API_KEY environment variable not set.")
29
- # Handle this case appropriately, e.g., exit or show an error on the page
30
- # In a real application, you might want to raise an error or redirect
 
 
 
 
31
 
32
- # --- Routes for index and potentially the Pro version (kept for context) ---
33
  @app.route('/')
34
  def index():
35
- # Assuming index.html is for the Pro version or another page
36
- return render_template('index.html') # Or redirect to /free if it's the main page
37
 
38
  @app.route('/free')
39
- def indexx():
40
- # This route serves the free version HTML
41
  return render_template('maj.html')
42
 
43
- # --- Original /solve route (Pro version, streaming) - Kept as is ---
44
- @app.route('/solve', methods=['POST'])
45
- def solve():
46
  try:
47
- if 'image' not in request.files or not request.files['image'].filename:
48
- return jsonify({'error': 'No image file provided'}), 400
49
-
50
- image_data = request.files['image'].read()
51
- if not image_data:
52
- return jsonify({'error': 'Empty image file provided'}), 400
53
-
54
- try:
55
- img = Image.open(io.BytesIO(image_data))
56
- except Exception as img_err:
57
- return jsonify({'error': f'Invalid image file: {str(img_err)}'}), 400
58
-
59
  buffered = io.BytesIO()
60
  img.save(buffered, format="PNG")
61
  img_str = base64.b64encode(buffered.getvalue()).decode()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
 
63
- def generate():
64
- mode = 'starting'
65
- try:
66
- response = client.models.generate_content_stream(
67
- model="gemini-2.5-pro-exp-03-25", # Your original model name
68
- contents=[
69
- {'inline_data': {'mime_type': 'image/png', 'data': img_str}},
70
- """Résous cet exercice en français avec du LaTeX.
71
- Si nécessaire, utilise du code Python pour effectuer les calculs complexes.
72
- Présente ta solution de façon claire et espacée."""
73
- ],
74
- config=types.GenerateContentConfig(
75
- thinking_config=types.ThinkingConfig(
76
- thinking_budget=8000
77
- ),
78
- tools=[types.Tool(
79
- code_execution=types.ToolCodeExecution()
80
- )]
81
- )
82
- )
83
-
84
- for chunk in response:
85
- if chunk.candidates and chunk.candidates[0].content and chunk.candidates[0].content.parts:
86
- for part in chunk.candidates[0].content.parts:
87
- if hasattr(part, 'thought') and part.thought:
88
- if mode != "thinking":
89
- yield 'data: ' + json.dumps({"mode": "thinking"}) + '\n\n'
90
- mode = "thinking"
91
- elif hasattr(part, 'executable_code') and part.executable_code:
92
- if mode != "executing_code":
93
- yield 'data: ' + json.dumps({"mode": "executing_code"}) + '\n\n'
94
- mode = "executing_code"
95
- code_block_open = "```python\n"
96
- code_block_close = "\n```"
97
- yield 'data: ' + json.dumps({"content": code_block_open + part.executable_code.code + code_block_close}) + '\n\n'
98
- elif hasattr(part, 'code_execution_result') and part.code_execution_result:
99
- if mode != "code_result":
100
- yield 'data: ' + json.dumps({"mode": "code_result"}) + '\n\n'
101
- mode = "code_result"
102
- result_block_open = "Résultat d'exécution:\n```\n"
103
- result_block_close = "\n```"
104
- yield 'data: ' + json.dumps({"content": result_block_open + part.code_execution_result.output + result_block_close}) + '\n\n'
105
- else: # Assuming it's text
106
- if mode != "answering":
107
- yield 'data: ' + json.dumps({"mode": "answering"}) + '\n\n'
108
- mode = "answering"
109
- if hasattr(part, 'text') and part.text:
110
- yield 'data: ' + json.dumps({"content": part.text}) + '\n\n'
111
- # Handle cases where a chunk might not have candidates/parts, or handle errors
112
- elif chunk.prompt_feedback and chunk.prompt_feedback.block_reason:
113
- error_msg = f"Prompt blocked: {chunk.prompt_feedback.block_reason.name}"
114
- print(error_msg)
115
- yield 'data: ' + json.dumps({"error": error_msg}) + '\n\n'
116
- break # Stop processing on block
117
- elif chunk.candidates and chunk.candidates[0].finish_reason:
118
- finish_reason = chunk.candidates[0].finish_reason.name
119
- if finish_reason != 'STOP':
120
- error_msg = f"Generation finished early: {finish_reason}"
121
- print(error_msg)
122
- yield 'data: ' + json.dumps({"error": error_msg}) + '\n\n'
123
- break # Stop processing on finish reason
124
-
125
- except Exception as e:
126
- print(f"Error during streaming generation: {e}")
127
- yield 'data: ' + json.dumps({"error": str(e)}) + '\n\n'
128
-
129
  return Response(
130
- stream_with_context(generate()),
 
 
 
 
131
  mimetype='text/event-stream',
132
  headers={
133
  'Cache-Control': 'no-cache',
@@ -136,122 +108,35 @@ def solve():
136
  )
137
 
138
  except Exception as e:
139
- print(f"Error in /solve endpoint: {e}")
140
- return jsonify({'error': f'Failed to process request: {str(e)}'}), 500
141
-
142
 
143
- # --- MODIFIED /solved route (Free version, non-streaming) using original SDK syntax ---
144
  @app.route('/solved', methods=['POST'])
145
  def solved():
 
 
 
 
146
  try:
147
- if 'image' not in request.files or not request.files['image'].filename:
148
- return jsonify({'error': 'No image file provided'}), 400
149
-
150
  image_data = request.files['image'].read()
151
- if not image_data:
152
- return jsonify({'error': 'Empty image file provided'}), 400
153
-
154
- try:
155
- img = Image.open(io.BytesIO(image_data))
156
- except Exception as img_err:
157
- return jsonify({'error': f'Invalid image file: {str(img_err)}'}), 400
158
-
159
- buffered = io.BytesIO() # Corrected spelling BytesBytesIO -> BytesIO
160
- img.save(buffered, format="PNG")
161
- img_str = base64.b64encode(buffered.getvalue()).decode()
162
-
163
- model_name = "gemini-2.5-flash-preview-04-17" # Your original free model name
164
-
165
- contents = [
166
- {'inline_data': {'mime_type': 'image/png', 'data': img_str}},
167
- """Résous cet exercice en français en utilisant le format LaTeX pour les mathématiques si nécessaire.
168
- Si tu dois effectuer des calculs complexes, utilise l'outil d'exécution de code Python fourni.
169
- Présente ta solution de manière claire et bien structurée. Formate le code Python dans des blocs délimités par ```python ... ``` et les résultats d'exécution dans des blocs ``` ... ```."""
170
- ]
171
-
172
- response = client.models.generate_content(
173
- model=model_name,
174
- contents=contents,
175
- config=types.GenerateContentConfig(
176
- tools=[types.Tool(
177
- code_execution=types.ToolCodeExecution()
178
- )]
179
- )
180
  )
181
 
182
- full_solution = ""
183
- # Check if the response has candidates and parts
184
- if response.candidates and response.candidates[0].content and response.candidates[0].content.parts:
185
- for part in response.candidates[0].content.parts:
186
- if hasattr(part, 'text') and part.text:
187
- full_solution += part.text
188
- elif hasattr(part, 'executable_code') and part.executable_code:
189
- full_solution += f"\n\n```python\n{part.executable_code.code}\n```\n\n"
190
- # Check for the result attribute name based on your SDK version's structure
191
- elif hasattr(part, 'code_execution_result') and hasattr(part.code_execution_result, 'output'):
192
- output_str = part.code_execution_result.output
193
- full_solution += f"\n\n**Résultat d'exécution:**\n```\n{output_str}\n```\n\n"
194
- # Check for prompt_feedback on the response object for non-streaming
195
- if response.prompt_feedback and response.prompt_feedback.block_reason:
196
- block_reason = response.prompt_feedback.block_reason.name
197
- # Add block reason to the solution or handle as error
198
- if not full_solution.strip(): # If no other content generated
199
- full_solution = f"Le contenu a été bloqué pour des raisons de sécurité: {block_reason}."
200
- else: # If some content was generated before blocking
201
- full_solution += f"\n\n**Attention:** La réponse a pu être incomplète car le contenu a été bloqué: {block_reason}."
202
-
203
-
204
- # Ensure we have some content, otherwise return a message or specific error
205
- if not full_solution.strip():
206
- # Check for finish reasons on candidates
207
- finish_reason = response.candidates[0].finish_reason.name if response.candidates and response.candidates[0].finish_reason else "UNKNOWN"
208
- # safety_ratings = response.candidates[0].safety_ratings if response.candidates else [] # You could log or use these
209
- print(f"Generation finished with reason (no content): {finish_reason}")
210
- if finish_reason == 'SAFETY':
211
- full_solution = "Désolé, je ne peux pas fournir de réponse en raison de restrictions de sécurité."
212
- elif finish_reason == 'RECITATION':
213
- full_solution = "Désolé, la réponse ne peut être fournie en raison de la politique sur les récitations."
214
- elif finish_reason == 'OTHER' or finish_reason == 'UNKNOWN': # Catch general failures
215
- full_solution = "Désolé, je n'ai pas pu générer de solution complète pour cette image."
216
- # If finish_reason is 'STOP' but no content, the generic message below applies
217
-
218
- if not full_solution.strip(): # Fallback if reason didn't give a specific message
219
- full_solution = "Désolé, je n'ai pas pu générer de solution complète pour cette image."
220
-
221
-
222
- # Return the complete solution as JSON
223
- return jsonify({'solution': full_solution.strip()})
224
-
225
- # Catch specific API errors from google.api_core.exceptions
226
- except GoogleAPIError as api_error: # <-- UTILISATION CORRIGÉE
227
- print(f"GenAI API Error: {api_error}")
228
- # Provide more user-friendly error messages based on potential API errors
229
- error_message = str(api_error)
230
- if "RESOURCE_EXHAUSTED" in error_message:
231
- user_error = "Vous avez atteint votre quota d'utilisation de l'API. Veuillez réessayer plus tard ou vérifier votre console Google Cloud."
232
- elif "400 Bad Request" in error_message or "INVALID_ARGUMENT" in error_message:
233
- user_error = f"La requête à l'API est invalide : {error_message}. L'image n'a peut-être pas été comprise."
234
- elif "403 Forbidden" in error_message or "PERMISSION_DENIED" in error_message:
235
- user_error = "Erreur d'authentification ou de permissions avec l'API. Vérifiez votre clé API."
236
- elif "50" in error_message: # Catch 5xx errors
237
- user_error = f"Erreur serveur de l'API : {error_message}. Veuillez réessayer plus tard."
238
- else:
239
- user_error = f'Erreur de l\'API GenAI: {error_message}'
240
-
241
- return jsonify({'error': user_error}), api_error.code if hasattr(api_error, 'code') else 500 # Return appropriate status code if available
242
-
243
  except Exception as e:
244
- # Log the full error for debugging
245
- import traceback
246
- print(f"Error in /solved endpoint: {e}")
247
- print(traceback.format_exc())
248
- # Provide a generic error message to the user
249
- return jsonify({'error': f'Une erreur interne est survenue lors du traitement: {str(e)}'}), 500
250
-
251
 
252
  if __name__ == '__main__':
253
- # Set host='0.0.0.0' to make it accessible on your network if needed
254
- # Remove debug=True in production
255
- app.run(debug=True, host='0.0.0.0', port=5000) # Example port
256
-
257
- # --- END OF CORRECTED app.py (v3 - Fixes AttributeError) ---
 
 
 
1
  from flask import Flask, render_template, request, jsonify, Response, stream_with_context
 
2
  from google import genai
 
3
  from google.genai import types
 
 
4
  import os
5
  from PIL import Image
6
  import io
7
  import base64
8
  import json
9
+ import logging
10
+
11
+ # Configuration du logging
12
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
13
+ logger = logging.getLogger(__name__)
14
 
15
  app = Flask(__name__)
16
 
17
+ # Récupération de la clé API depuis les variables d'environnement
18
  GOOGLE_API_KEY = os.environ.get("GEMINI_API_KEY")
 
 
 
 
 
 
 
19
  if not GOOGLE_API_KEY:
20
+ logger.error("La clé API Gemini n'est pas configurée dans les variables d'environnement")
21
+
22
+ # Initialisation du client Gemini
23
+ try:
24
+ client = genai.Client(api_key=GOOGLE_API_KEY)
25
+ except Exception as e:
26
+ logger.error(f"Erreur lors de l'initialisation du client Gemini: {e}")
27
 
 
28
  @app.route('/')
29
  def index():
30
+ return render_template('index.html')
 
31
 
32
  @app.route('/free')
33
+ def maintenance():
 
34
  return render_template('maj.html')
35
 
36
+ def process_image(image_data):
37
+ """Traite l'image et retourne sa représentation base64"""
 
38
  try:
39
+ img = Image.open(io.BytesIO(image_data))
 
 
 
 
 
 
 
 
 
 
 
40
  buffered = io.BytesIO()
41
  img.save(buffered, format="PNG")
42
  img_str = base64.b64encode(buffered.getvalue()).decode()
43
+ return img_str
44
+ except Exception as e:
45
+ logger.error(f"Erreur lors du traitement de l'image: {e}")
46
+ raise
47
+
48
+ def stream_gemini_response(model_name, image_str, thinking_budget=None):
49
+ """Génère et diffuse la réponse du modèle Gemini"""
50
+ mode = 'starting'
51
+
52
+ config_kwargs = {}
53
+ if thinking_budget:
54
+ config_kwargs["thinking_config"] = types.ThinkingConfig(thinking_budget=thinking_budget)
55
+
56
+ try:
57
+ response = client.models.generate_content_stream(
58
+ model=model_name,
59
+ contents=[
60
+ {'inline_data': {'mime_type': 'image/png', 'data': image_str}},
61
+ "Résous ça en français with rendering latex"
62
+ ],
63
+ config=types.GenerateContentConfig(**config_kwargs)
64
+ )
65
+
66
+ for chunk in response:
67
+ if not hasattr(chunk, 'candidates') or not chunk.candidates:
68
+ continue
69
+
70
+ for part in chunk.candidates[0].content.parts:
71
+ if hasattr(part, 'thought') and part.thought:
72
+ if mode != "thinking":
73
+ yield f'data: {json.dumps({"mode": "thinking"})}\n\n'
74
+ mode = "thinking"
75
+ else:
76
+ if mode != "answering":
77
+ yield f'data: {json.dumps({"mode": "answering"})}\n\n'
78
+ mode = "answering"
79
+
80
+ if hasattr(part, 'text') and part.text:
81
+ yield f'data: {json.dumps({"content": part.text})}\n\n'
82
+
83
+ except Exception as e:
84
+ logger.error(f"Erreur pendant la génération avec le modèle {model_name}: {e}")
85
+ yield f'data: {json.dumps({"error": str(e)})}\n\n'
86
 
87
+ @app.route('/solve', methods=['POST'])
88
+ def solve():
89
+ """Endpoint utilisant le modèle Pro avec capacité de réflexion étendue"""
90
+ if 'image' not in request.files:
91
+ return jsonify({'error': 'Aucune image fournie'}), 400
92
+
93
+ try:
94
+ image_data = request.files['image'].read()
95
+ img_str = process_image(image_data)
96
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
  return Response(
98
+ stream_with_context(stream_gemini_response(
99
+ model_name="gemini-2.5-pro-exp-03-25",
100
+ image_str=img_str,
101
+ thinking_budget=8000
102
+ )),
103
  mimetype='text/event-stream',
104
  headers={
105
  'Cache-Control': 'no-cache',
 
108
  )
109
 
110
  except Exception as e:
111
+ logger.error(f"Erreur dans /solve: {e}")
112
+ return jsonify({'error': str(e)}), 500
 
113
 
 
114
  @app.route('/solved', methods=['POST'])
115
  def solved():
116
+ """Endpoint utilisant le modèle Flash (plus rapide)"""
117
+ if 'image' not in request.files:
118
+ return jsonify({'error': 'Aucune image fournie'}), 400
119
+
120
  try:
 
 
 
121
  image_data = request.files['image'].read()
122
+ img_str = process_image(image_data)
123
+
124
+ return Response(
125
+ stream_with_context(stream_gemini_response(
126
+ model_name="gemini-2.5-flash-preview-04-17",
127
+ image_str=img_str
128
+ )),
129
+ mimetype='text/event-stream',
130
+ headers={
131
+ 'Cache-Control': 'no-cache',
132
+ 'X-Accel-Buffering': 'no'
133
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134
  )
135
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
136
  except Exception as e:
137
+ logger.error(f"Erreur dans /solved: {e}")
138
+ return jsonify({'error': str(e)}), 500
 
 
 
 
 
139
 
140
  if __name__ == '__main__':
141
+ # En production, modifiez ces paramètres
142
+ app.run(host='0.0.0.0', port=5000, debug=False)