File size: 14,766 Bytes
a0afec0
e7761b5
51cbadd
a0afec0
f388c93
 
a0afec0
 
f388c93
 
 
 
6b10944
a0afec0
12d4886
f388c93
 
12d4886
f388c93
a0afec0
f388c93
 
 
 
e7761b5
 
a0afec0
 
e7761b5
 
f388c93
 
e7761b5
 
f388c93
2ef19ee
c2c3e4e
e7761b5
2ef19ee
 
a0afec0
f388c93
 
 
e7761b5
 
 
51cbadd
e7761b5
 
 
 
 
 
 
f388c93
 
 
a0afec0
f388c93
51cbadd
e79be93
51cbadd
e79be93
e7761b5
6b10944
 
e7761b5
12d4886
 
01e07c4
 
12d4886
 
 
 
 
 
 
6b10944
12d4886
e79be93
a0afec0
e7761b5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a0afec0
e7761b5
 
 
 
 
 
 
 
 
 
 
 
 
e79be93
51cbadd
e7761b5
61f5a5c
51cbadd
6b10944
 
 
 
 
 
 
 
f388c93
 
a0afec0
 
 
e7761b5
 
f388c93
e7761b5
a0afec0
2ef19ee
93f4a81
2ef19ee
e7761b5
 
 
2ef19ee
e7761b5
 
2ef19ee
e7761b5
 
 
 
 
a0afec0
2ef19ee
a0afec0
2ef19ee
e7761b5
 
2ef19ee
e7761b5
a0afec0
e7761b5
 
 
 
2ef19ee
e7761b5
 
 
 
 
 
 
 
 
 
2ef19ee
 
e7761b5
 
 
 
 
 
 
 
 
a0afec0
 
e7761b5
 
 
a0afec0
e7761b5
a0afec0
e7761b5
a0afec0
 
e7761b5
 
a0afec0
 
 
 
 
 
 
 
 
 
 
 
 
e7761b5
a0afec0
 
 
e7761b5
 
 
 
 
a0afec0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2ef19ee
a0afec0
 
 
e7761b5
 
a0afec0
e7761b5
2ef19ee
f388c93
e7761b5
 
 
 
a0afec0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
# --- START OF CORRECTED_AGAIN app.py ---

from flask import Flask, render_template, request, jsonify, Response, stream_with_context
# Utilisation de l'import et de l'initialisation de votre code original
from google import genai
from google.genai import types
# Import des exceptions potentielles si elles sont dans google.api_core
# from google.api_core import exceptions as api_exceptions
import os
from PIL import Image
import io
import base64
import json
import traceback # Import traceback pour un meilleur log des erreurs

app = Flask(__name__)

GOOGLE_API_KEY = os.environ.get("GEMINI_API_KEY")

# Utilisation de l'initialisation de votre code original
client = genai.Client(
    api_key=GOOGLE_API_KEY,
)

# Ensure API key is available (good practice)
if not GOOGLE_API_KEY:
    print("WARNING: GEMINI_API_KEY environment variable not set. API calls will likely fail.")
    # Consider adding a check before allowing API calls if the key is missing

# --- Routes for index and potentially the Pro version (kept for context) ---
@app.route('/')
def index():
    # Assuming index.html is for the Pro version or another page
    return render_template('index.html') # Or redirect to /free if it's the main page

@app.route('/free')
def indexx():
    # This route serves the free version HTML
    return render_template('maj.html')

# --- Original /solve route (Pro version, streaming) - Kept as is ---
@app.route('/solve', methods=['POST'])
def solve():
    try:
        if 'image' not in request.files or not request.files['image'].filename:
            return jsonify({'error': 'No image file provided'}), 400

        image_data = request.files['image'].read()
        if not image_data:
            return jsonify({'error': 'Empty image file provided'}), 400

        try:
            img = Image.open(io.BytesIO(image_data))
        except Exception as img_err:
             return jsonify({'error': f'Invalid image file: {str(img_err)}'}), 400

        buffered = io.BytesIO()
        img.save(buffered, format="PNG")
        img_str = base64.b64encode(buffered.getvalue()).decode() # Keep base64 for this route

        def generate():
            mode = 'starting'
            try:
                response = client.models.generate_content_stream(
                    model="gemini-2.5-pro-exp-03-25", # Your original model name
                    contents=[
                        {'inline_data': {'mime_type': 'image/png', 'data': img_str}},
                        """Résous cet exercice en français avec du LaTeX.
                        Si nécessaire, utilise du code Python pour effectuer les calculs complexes.
                        Présente ta solution de façon claire et espacée."""
                    ],
                    config=types.GenerateContentConfig(
                        thinking_config=types.ThinkingConfig(
                            thinking_budget=8000
                        ),
                        tools=[types.Tool(
                            code_execution=types.ToolCodeExecution()
                        )]
                    )
                )

                for chunk in response:
                     # Process chunks as in your original streaming logic
                    if chunk.candidates and chunk.candidates[0].content and chunk.candidates[0].content.parts:
                        for part in chunk.candidates[0].content.parts:
                            if hasattr(part, 'thought') and part.thought:
                                if mode != "thinking":
                                    yield 'data: ' + json.dumps({"mode": "thinking"}) + '\n\n'
                                    mode = "thinking"
                            elif hasattr(part, 'executable_code') and part.executable_code:
                                if mode != "executing_code":
                                    yield 'data: ' + json.dumps({"mode": "executing_code"}) + '\n\n'
                                    mode = "executing_code"
                                code_block_open = "```python\n"
                                code_block_close = "\n```"
                                yield 'data: ' + json.dumps({"content": code_block_open + part.executable_code.code + code_block_close}) + '\n\n'
                            elif hasattr(part, 'code_execution_result') and part.code_execution_result:
                                if mode != "code_result":
                                    yield 'data: ' + json.dumps({"mode": "code_result"}) + '\n\n'
                                    mode = "code_result"
                                result_block_open = "Résultat d'exécution:\n```\n"
                                result_block_close = "\n```"
                                yield 'data: ' + json.dumps({"content": result_block_open + part.code_execution_result.output + result_block_close}) + '\n\n'
                            else: # Assuming it's text
                                if mode != "answering":
                                    yield 'data: ' + json.dumps({"mode": "answering"}) + '\n\n'
                                    mode = "answering"
                                if hasattr(part, 'text') and part.text:
                                    yield 'data: ' + json.dumps({"content": part.text}) + '\n\n'
                    # Handle prompt feedback or finish reasons in streaming
                    elif chunk.prompt_feedback and chunk.prompt_feedback.block_reason:
                        error_msg = f"Prompt blocked: {chunk.prompt_feedback.block_reason.name}"
                        print(error_msg)
                        yield 'data: ' + json.dumps({"error": error_msg}) + '\n\n'
                        break # Stop processing on block
                    elif chunk.candidates and chunk.candidates[0].finish_reason:
                        finish_reason = chunk.candidates[0].finish_reason.name
                        if finish_reason != 'STOP':
                            error_msg = f"Generation finished early: {finish_reason}"
                            print(error_msg)
                            yield 'data: ' + json.dumps({"error": error_msg}) + '\n\n'
                        break # Stop processing on finish reason


            except Exception as e:
                print(f"Error during streaming generation: {e}")
                yield 'data: ' + json.dumps({"error": str(e)}) + '\n\n'

        return Response(
            stream_with_context(generate()),
            mimetype='text/event-stream',
            headers={
                'Cache-Control': 'no-cache',
                'X-Accel-Buffering': 'no'
            }
        )

    except Exception as e:
        # Log the full error for debugging
        print(f"Error in /solve endpoint (setup or initial request): {e}")
        print(traceback.format_exc())
        # Return JSON error for fetch API if streaming setup fails
        return jsonify({'error': f'Failed to process request: {str(e)}'}), 500


# --- MODIFIED /solved route (Free version, non-streaming) - Corrected Exception Handling ---
@app.route('/solved', methods=['POST'])
def solved():
    try:
        if 'image' not in request.files or not request.files['image'].filename:
            return jsonify({'error': 'No image file provided'}), 400

        image_data = request.files['image'].read()
        if not image_data:
            return jsonify({'error': 'Empty image file provided'}), 400

        try:
            img = Image.open(io.BytesIO(image_data))
        except Exception as img_err:
            return jsonify({'error': f'Invalid image file: {str(img_err)}'}), 400

        buffered = io.BytesIO() # Keep BytesIO
        img.save(buffered, format="PNG")
        img_str = base64.b64encode(buffered.getvalue()).decode() # Keep base64

        # Use the non-streaming generate_content method
        model_name = "gemini-2.5-flash-preview-04-17" # Your original free model name

        contents = [
             {'inline_data': {'mime_type': 'image/png', 'data': img_str}}, # Use inline_data with base64
            """Résous cet exercice en français en utilisant le format LaTeX pour les mathématiques si nécessaire.
            Si tu dois effectuer des calculs complexes, utilise l'outil d'exécution de code Python fourni.
            Présente ta solution de manière claire et bien structurée. Formate le code Python dans des blocs délimités par ```python ... ``` et les résultats d'exécution dans des blocs ``` ... ```."""
        ]

        # Call the non-streaming generation method using the original client object
        response = client.models.generate_content(
            model=model_name,
            contents=contents,
            config=types.GenerateContentConfig(
                tools=[types.Tool(
                    code_execution=types.ToolCodeExecution()
                )]
            )
            # Note: No stream=True here for non-streaming
        )

        # Aggregate the response parts into a single string
        full_solution = ""
        # Check if the response has candidates and parts
        if response.candidates and response.candidates[0].content and response.candidates[0].content.parts:
            for part in response.candidates[0].content.parts:
                if hasattr(part, 'text') and part.text:
                    full_solution += part.text
                elif hasattr(part, 'executable_code') and part.executable_code:
                    full_solution += f"\n\n```python\n{part.executable_code.code}\n```\n\n"
                # Check for the result attribute name - reverting to your original structure if possible
                # Based on your original code, code_execution_result seemed to be the attribute
                elif hasattr(part, 'code_execution_result') and hasattr(part.code_execution_result, 'output'):
                     output_str = part.code_execution_result.output
                     full_solution += f"\n\n**Résultat d'exécution:**\n```\n{output_str}\n```\n\n"
                # Note: 'thought' parts are ignored

        # Handle cases where the response is empty or blocked
        if not full_solution.strip():
             # Check for prompt feedback blocking or finish reasons
             if response.prompt_feedback and response.prompt_feedback.block_reason:
                 block_reason = response.prompt_feedback.block_reason.name
                 full_solution = f"Le contenu a été bloqué pour des raisons de sécurité: {block_reason}."
                 print(f"Generation blocked by prompt feedback: {block_reason}") # Log it

             elif response.candidates and response.candidates[0].finish_reason:
                 finish_reason = response.candidates[0].finish_reason.name
                 # Provide specific messages for known non-STOP finish reasons
                 if finish_reason == 'SAFETY':
                     full_solution = "Désolé, je ne peux pas fournir de réponse en raison de restrictions de sécurité."
                 elif finish_reason == 'RECITATION':
                      full_solution = "Désolé, la réponse ne peut être fournie en raison de la politique sur les récitations."
                 else:
                      # Generic message for other finish reasons (e.g., MAX_TOKENS)
                      full_solution = f"La génération s'est terminée prématurément ({finish_reason}). Le problème est peut-être trop complexe ou nécessite plus de tokens."
                 print(f"Generation finished early: {finish_reason}") # Log it
             else:
                 # Fallback if no specific reason is found but the response is empty
                 full_solution = "Désolé, je n'ai pas pu générer de solution pour cette image."
                 print("Generation resulted in empty content without specific block/finish reason.")


        # Return the complete solution as JSON
        return jsonify({'solution': full_solution.strip()})

    # --- Corrected Exception Handling ---
    # Catching a more general Google API error if available, otherwise just Exception
    # The specific exception name might depend on the exact SDK version.
    # We'll try a common one first. If this still gives AttributeError,
    # we'll rely on the generic Exception catch below.
    try:
         # Attempt to import the specific exception type dynamically
         # This is safer than assuming its location
         from google.api_core.exceptions import GoogleAPIError
         # If the import succeeds, catch that specific error
         except GoogleAPIError as api_error:
             print(f"Google API Error caught: {api_error}")
             # Provide error details to the client, avoiding revealing full traceback
             error_message = "Une erreur est survenue lors de la communication avec l'API GenAI."
             # Attempt to extract a more specific message if possible from the error object
             if hasattr(api_error, 'message'):
                  error_message = f"Erreur API: {api_error.message}"
             elif hasattr(api_error, 'details'):
                   error_message = f"Erreur API: {api_error.details}"
             else:
                  error_message = f"Erreur API: {str(api_error)}" # Fallback to string representation

             # Check for common error phrases to provide user-friendly messages
             if "blocked" in str(api_error).lower() or "safety" in str(api_error).lower():
                 error_message = 'Le contenu a été bloqué par l\'API pour des raisons de sécurité.'
                 return jsonify({'error': error_message}), 400 # Use 400 for client-side issue (the prompt)

             return jsonify({'error': error_message}), 500 # Use 500 for server-side API issues
         except ImportError:
             # If GoogleAPIError is not found in api_core, we'll fall through to the generic Exception catch
             print("Could not import google.api_core.exceptions.GoogleAPIError. Using generic exception handling.")
             pass # Continue to the next except block

    except Exception as e:
        # Catch any other unexpected errors during processing or API call
        print(f"An unexpected error occurred in /solved endpoint: {e}")
        # Log the full traceback for server-side debugging
        print(traceback.format_exc())
        # Provide a generic error message to the user
        return jsonify({'error': f'Une erreur interne est survenue: {str(e)}'}), 500


if __name__ == '__main__':
    # Set host='0.0.0.0' to make it accessible on your network if needed
    # Remove debug=True in production
    app.run(debug=True, host='0.0.0.0', port=5000) # Example port

# --- END OF CORRECTED_AGAIN app.py ---