File size: 13,606 Bytes
e7761b5
 
51cbadd
e7761b5
f388c93
e7761b5
f388c93
 
 
 
 
6b10944
e7761b5
12d4886
f388c93
 
12d4886
f388c93
e7761b5
f388c93
 
 
 
e7761b5
 
 
 
 
 
f388c93
 
e7761b5
 
f388c93
2ef19ee
c2c3e4e
e7761b5
2ef19ee
 
e7761b5
 
f388c93
 
 
e7761b5
 
 
51cbadd
e7761b5
 
 
 
 
 
 
f388c93
 
 
e7761b5
f388c93
51cbadd
e79be93
51cbadd
e79be93
e7761b5
 
6b10944
e7761b5
6b10944
e7761b5
12d4886
 
01e07c4
 
12d4886
 
 
 
 
 
 
6b10944
12d4886
e7761b5
e79be93
e7761b5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e79be93
51cbadd
e7761b5
61f5a5c
51cbadd
6b10944
 
 
 
 
 
 
 
f388c93
 
e7761b5
 
 
f388c93
e7761b5
 
2ef19ee
93f4a81
2ef19ee
e7761b5
 
 
2ef19ee
e7761b5
 
2ef19ee
e7761b5
 
 
 
 
 
2ef19ee
 
 
e7761b5
 
 
2ef19ee
e7761b5
 
 
 
 
 
 
2ef19ee
e7761b5
 
 
 
 
 
 
 
 
 
 
2ef19ee
 
e7761b5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2ef19ee
e7761b5
 
 
 
 
 
 
2ef19ee
f388c93
e7761b5
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
# --- START OF CORRECTED app.py ---

from flask import Flask, render_template, request, jsonify, Response, stream_with_context
# Revert to the original google.genai import and usage
from google import genai
# Make sure types is imported from google.genai if needed for specific model config
from google.genai import types
import os
from PIL import Image
import io
import base64
import json
import re # Import regex if needed for advanced text processing (though less likely without streaming logic parsing)

app = Flask(__name__)

GOOGLE_API_KEY = os.environ.get("GEMINI_API_KEY")

# Use the original client initialization
client = genai.Client(
    api_key=GOOGLE_API_KEY,
)

# Ensure API key is available (good practice)
if not GOOGLE_API_KEY:
    print("WARNING: GEMINI_API_KEY environment variable not set.")
    # Handle this case appropriately, e.g., exit or show an error on the page

# --- Routes for index and potentially the Pro version (kept for context) ---
@app.route('/')
def index():
    # Assuming index.html is for the Pro version or another page
    return render_template('index.html') # Or redirect to /free if it's the main page

@app.route('/free')
def indexx():
    # This route serves the free version HTML
    return render_template('maj.html')

# --- Original /solve route (Pro version, streaming) - Kept for reference ---
# If you want the Pro version (/solve) to also be non-streaming, apply similar changes as below
@app.route('/solve', methods=['POST'])
def solve():
    try:
        if 'image' not in request.files or not request.files['image'].filename:
            return jsonify({'error': 'No image file provided'}), 400

        image_data = request.files['image'].read()
        if not image_data:
            return jsonify({'error': 'Empty image file provided'}), 400

        try:
            img = Image.open(io.BytesIO(image_data))
        except Exception as img_err:
             return jsonify({'error': f'Invalid image file: {str(img_err)}'}), 400

        buffered = io.BytesIO()
        img.save(buffered, format="PNG")
        img_str = base64.b64encode(buffered.getvalue()).decode() # Keep base64 for this route as in original

        def generate():
            mode = 'starting'
            try:
                response = client.models.generate_content_stream(
                    # Use the model name for the Pro version as in your original code
                    model="gemini-2.5-pro-exp-03-25", # Your original model name
                    contents=[
                        # Pass image as inline_data with base64 as in your original code
                        {'inline_data': {'mime_type': 'image/png', 'data': img_str}},
                        """Résous cet exercice en français avec du LaTeX.
                        Si nécessaire, utilise du code Python pour effectuer les calculs complexes.
                        Présente ta solution de façon claire et espacée."""
                    ],
                    config=types.GenerateContentConfig(
                        thinking_config=types.ThinkingConfig(
                            thinking_budget=8000
                        ),
                        tools=[types.Tool(
                            code_execution=types.ToolCodeExecution()
                        )]
                    )
                )

                # Process the streaming response as you had it
                for chunk in response:
                    if chunk.candidates and chunk.candidates[0].content and chunk.candidates[0].content.parts:
                        for part in chunk.candidates[0].content.parts:
                            # Keep your original logic for emitting different modes in the stream
                            if hasattr(part, 'thought') and part.thought:
                                if mode != "thinking":
                                    yield 'data: ' + json.dumps({"mode": "thinking"}) + '\n\n'
                                    mode = "thinking"
                            elif hasattr(part, 'executable_code') and part.executable_code:
                                if mode != "executing_code":
                                    yield 'data: ' + json.dumps({"mode": "executing_code"}) + '\n\n'
                                    mode = "executing_code"
                                code_block_open = "```python\n"
                                code_block_close = "\n```"
                                yield 'data: ' + json.dumps({"content": code_block_open + part.executable_code.code + code_block_close}) + '\n\n'
                            elif hasattr(part, 'code_execution_result') and part.code_execution_result:
                                if mode != "code_result":
                                    yield 'data: ' + json.dumps({"mode": "code_result"}) + '\n\n'
                                    mode = "code_result"
                                result_block_open = "Résultat d'exécution:\n```\n"
                                result_block_close = "\n```"
                                yield 'data: ' + json.dumps({"content": result_block_open + part.code_execution_result.output + result_block_close}) + '\n\n'
                            else: # Assuming it's text
                                if mode != "answering":
                                    yield 'data: ' + json.dumps({"mode": "answering"}) + '\n\n'
                                    mode = "answering"
                                if hasattr(part, 'text') and part.text:
                                    yield 'data: ' + json.dumps({"content": part.text}) + '\n\n'
                    # Handle cases where a chunk might not have candidates/parts immediately, or handle errors
                    elif chunk.prompt_feedback and chunk.prompt_feedback.block_reason:
                        error_msg = f"Prompt blocked: {chunk.prompt_feedback.block_reason.name}"
                        print(error_msg)
                        yield 'data: ' + json.dumps({"error": error_msg}) + '\n\n'
                        break # Stop processing on block
                    elif chunk.candidates and chunk.candidates[0].finish_reason:
                        finish_reason = chunk.candidates[0].finish_reason.name
                        if finish_reason != 'STOP':
                            error_msg = f"Generation finished early: {finish_reason}"
                            print(error_msg)
                            yield 'data: ' + json.dumps({"error": error_msg}) + '\n\n'
                        break # Stop processing on finish reason


            except Exception as e:
                print(f"Error during streaming generation: {e}")
                yield 'data: ' + json.dumps({"error": str(e)}) + '\n\n'

        return Response(
            stream_with_context(generate()),
            mimetype='text/event-stream',
            headers={
                'Cache-Control': 'no-cache',
                'X-Accel-Buffering': 'no'
            }
        )

    except Exception as e:
        print(f"Error in /solve endpoint: {e}")
        # Return JSON error for fetch API if streaming setup fails
        return jsonify({'error': f'Failed to process request: {str(e)}'}), 500


# --- MODIFIED /solved route (Free version, non-streaming) using original SDK syntax ---
@app.route('/solved', methods=['POST'])
def solved():
    try:
        if 'image' not in request.files or not request.files['image'].filename:
            return jsonify({'error': 'No image file provided'}), 400

        image_data = request.files['image'].read()
        if not image_data:
            return jsonify({'error': 'Empty image file provided'}), 400

        try:
            img = Image.open(io.BytesIO(image_data))
        except Exception as img_err:
            return jsonify({'error': f'Invalid image file: {str(img_err)}'}), 400

        buffered = io.BytesBytesIO()
        img.save(buffered, format="PNG")
        img_str = base64.b64encode(buffered.getvalue()).decode()

        # Use the non-streaming generate_content method
        # Use the model name for the Free version as in your original code
        model_name = "gemini-2.5-flash-preview-04-17" # Your original free model name

        # Prepare the content using inline_data with base64 string as in your original code
        contents = [
             {'inline_data': {'mime_type': 'image/png', 'data': img_str}},
            """Résous cet exercice en français en utilisant le format LaTeX pour les mathématiques si nécessaire.
            Si tu dois effectuer des calculs complexes, utilise l'outil d'exécution de code Python fourni.
            Présente ta solution de manière claire et bien structurée. Formate le code Python dans des blocs délimités par ```python ... ``` et les résultats d'exécution dans des blocs ``` ... ```."""
        ]

        # Call the non-streaming generation method using the original client object
        response = client.models.generate_content(
            model=model_name,
            contents=contents,
            config=types.GenerateContentConfig(
                # Removed thinking_config as it's not relevant for non-streaming output
                tools=[types.Tool(
                    code_execution=types.ToolCodeExecution()
                )]
            )
            # Note: No stream=True here for non-streaming
        )

        # Aggregate the response parts into a single string
        full_solution = ""
        # Check if the response has candidates and parts
        if response.candidates and response.candidates[0].content and response.candidates[0].content.parts:
            for part in response.candidates[0].content.parts:
                # Process parts based on attribute existence
                if hasattr(part, 'text') and part.text:
                    full_solution += part.text
                elif hasattr(part, 'executable_code') and part.executable_code:
                    # Format code block using Markdown, as the frontend expects this
                    full_solution += f"\n\n```python\n{part.executable_code.code}\n```\n\n"
                # Check for the result attribute name based on your SDK version's structure
                # It might be `code_execution_result` as in your original code, or nested
                elif hasattr(part, 'code_execution_result') and hasattr(part.code_execution_result, 'output'):
                     # Format execution result block using Markdown
                     output_str = part.code_execution_result.output
                     full_solution += f"\n\n**Résultat d'exécution:**\n```\n{output_str}\n```\n\n"
                 # Add other potential part types if necessary (e.g., function_call, etc.)
                # Note: 'thought' parts are ignored as requested

        # Ensure we have some content, otherwise return a message
        if not full_solution.strip():
             # Check for finish reasons or safety ratings
             finish_reason = response.candidates[0].finish_reason.name if response.candidates and response.candidates[0].finish_reason else "UNKNOWN"
             safety_ratings = response.candidates[0].safety_ratings if response.candidates else []
             print(f"Generation finished with reason: {finish_reason}, Safety: {safety_ratings}") # Log details
             if finish_reason == 'SAFETY':
                 full_solution = "Désolé, je ne peux pas fournir de réponse en raison de restrictions de sécurité."
             elif finish_reason == 'RECITATION':
                  full_solution = "Désolé, la réponse ne peut être fournie en raison de la politique sur les récitations."
             # Also check prompt feedback for blocking reasons
             elif response.prompt_feedback and response.prompt_feedback.block_reason:
                 block_reason = response.prompt_feedback.block_reason.name
                 full_solution = f"Le contenu a été bloqué pour des raisons de sécurité: {block_reason}."
             else:
                 full_solution = "Désolé, je n'ai pas pu générer de solution complète pour cette image."


        # Return the complete solution as JSON
        # Use strip() to remove leading/trailing whitespace from the full solution
        return jsonify({'solution': full_solution.strip()})

    # Catch specific API errors from your original SDK
    except genai.core.exceptions.GoogleAPIError as api_error:
        print(f"GenAI API Error: {api_error}")
        # Check if the error response has details, like safety block
        error_detail = str(api_error)
        if "safety" in error_detail.lower():
            return jsonify({'error': 'Le contenu a été bloqué pour des raisons de sécurité par l\'API.'}), 400
        elif "blocked" in error_detail.lower():
             return jsonify({'error': 'La requête a été bloquée par l\'API.'}), 400
        else:
             return jsonify({'error': f'Erreur de l\'API GenAI: {error_detail}'}), 500
    except Exception as e:
        # Log the full error for debugging
        import traceback
        print(f"Error in /solved endpoint: {e}")
        print(traceback.format_exc())
        # Provide a generic error message to the user
        return jsonify({'error': f'Une erreur interne est survenue lors du traitement: {str(e)}'}), 500


if __name__ == '__main__':
    # Set host='0.0.0.0' to make it accessible on your network if needed
    # Remove debug=True in production
    app.run(debug=True, host='0.0.0.0', port=5000) # Example port

# --- END OF CORRECTED app.py ---