|
import os |
|
import json |
|
from flask import Flask, render_template, request, session, redirect, url_for, flash |
|
from dotenv import load_dotenv |
|
import google.generativeai as genai |
|
import requests |
|
from werkzeug.utils import secure_filename |
|
import mimetypes |
|
|
|
load_dotenv() |
|
|
|
app = Flask(__name__) |
|
|
|
app.config['SECRET_KEY'] = os.getenv('FLASK_SECRET_KEY', 'une-clé-secrète-par-défaut-pour-dev') |
|
|
|
UPLOAD_FOLDER = 'temp' |
|
ALLOWED_EXTENSIONS = {'txt', 'pdf', 'png', 'jpg', 'jpeg'} |
|
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER |
|
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024 |
|
|
|
|
|
os.makedirs(UPLOAD_FOLDER, exist_ok=True) |
|
|
|
|
|
try: |
|
genai.configure(api_key=os.getenv("GOOGLE_API_KEY")) |
|
|
|
safety_settings = [ |
|
{"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE"}, |
|
{"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE"}, |
|
{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_NONE"}, |
|
{"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_NONE"}, |
|
] |
|
|
|
model = genai.GenerativeModel( |
|
'gemini-1.5-flash', |
|
|
|
safety_settings=safety_settings, |
|
system_instruction="Tu es un assistant intelligent. ton but est d'assister au mieux que tu peux. tu as été créé par Aenir et tu t'appelles Mariam" |
|
|
|
|
|
) |
|
print("Modèle Gemini chargé.") |
|
except Exception as e: |
|
print(f"Erreur lors de la configuration de Gemini : {e}") |
|
model = None |
|
|
|
|
|
|
|
def allowed_file(filename): |
|
return '.' in filename and \ |
|
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS |
|
|
|
def perform_web_search(query): |
|
"""Effectue une recherche web via l'API Serper.""" |
|
conn_key = "9b90a274d9e704ff5b21c0367f9ae1161779b573" |
|
if not conn_key: |
|
print("Clé API SERPER manquante dans .env") |
|
return None |
|
search_url = "https://google.serper.dev/search" |
|
headers = { |
|
'X-API-KEY': conn_key, |
|
'Content-Type': 'application/json' |
|
} |
|
payload = json.dumps({"q": query}) |
|
try: |
|
response = requests.post(search_url, headers=headers, data=payload, timeout=10) |
|
response.raise_for_status() |
|
data = response.json() |
|
print("Résultats de recherche obtenus.") |
|
return data |
|
except requests.exceptions.RequestException as e: |
|
print(f"Erreur lors de la recherche web : {e}") |
|
return None |
|
except json.JSONDecodeError as e: |
|
print(f"Erreur lors du décodage de la réponse JSON de Serper : {e}") |
|
print(f"Réponse reçue : {response.text}") |
|
return None |
|
|
|
def format_search_results(data): |
|
"""Met en forme les résultats de recherche pour le prompt Gemini.""" |
|
if not data: |
|
return "Aucun résultat de recherche trouvé." |
|
|
|
result = "Résultats de recherche web :\n" |
|
|
|
if 'knowledgeGraph' in data: |
|
kg = data['knowledgeGraph'] |
|
result += f"\n## Graphe de connaissances :\n" |
|
result += f"### {kg.get('title', '')} ({kg.get('type', '')})\n" |
|
result += f"{kg.get('description', '')}\n" |
|
if 'attributes' in kg: |
|
for attr, value in kg['attributes'].items(): |
|
result += f"- {attr}: {value}\n" |
|
|
|
if 'answerBox' in data: |
|
ab = data['answerBox'] |
|
result += f"\n## Réponse directe :\n" |
|
result += f"{ab.get('title','')}\n{ab.get('snippet') or ab.get('answer','')}\n" |
|
|
|
|
|
if 'organic' in data and data['organic']: |
|
result += "\n## Résultats principaux :\n" |
|
for i, item in enumerate(data['organic'][:3], 1): |
|
result += f"{i}. **{item.get('title', 'N/A')}**\n" |
|
result += f" {item.get('snippet', 'N/A')}\n" |
|
result += f" Lien : {item.get('link', '#')}\n\n" |
|
|
|
if 'peopleAlsoAsk' in data and data['peopleAlsoAsk']: |
|
result += "## Questions fréquentes :\n" |
|
for i, item in enumerate(data['peopleAlsoAsk'][:2], 1): |
|
result += f"{i}. **{item.get('question', 'N/A')}**\n" |
|
|
|
|
|
return result |
|
|
|
def prepare_gemini_history(chat_history): |
|
"""Convertit l'historique stocké en session au format attendu par Gemini API.""" |
|
gemini_history = [] |
|
for message in chat_history: |
|
role = 'user' if message['role'] == 'user' else 'model' |
|
parts = [message['text']] |
|
if message.get('gemini_file'): |
|
parts.insert(0, message['gemini_file']) |
|
gemini_history.append({'role': role, 'parts': parts}) |
|
return gemini_history |
|
|
|
|
|
|
|
@app.route('/', methods=['GET']) |
|
def index(): |
|
"""Affiche la page principale du chat.""" |
|
if 'chat_history' not in session: |
|
session['chat_history'] = [] |
|
if 'web_search' not in session: |
|
session['web_search'] = False |
|
|
|
return render_template( |
|
'index.html', |
|
chat_history=session['chat_history'], |
|
web_search_active=session['web_search'], |
|
error=session.pop('error', None), |
|
processing_message=session.pop('processing', False) |
|
) |
|
|
|
@app.route('/chat', methods=['POST']) |
|
def chat(): |
|
"""Gère la soumission du formulaire de chat.""" |
|
if not model: |
|
session['error'] = "Le modèle Gemini n'a pas pu être chargé. Vérifiez la clé API et la configuration." |
|
return redirect(url_for('index')) |
|
|
|
prompt = request.form.get('prompt', '').strip() |
|
|
|
session['web_search'] = 'web_search' in request.form |
|
file = request.files.get('file') |
|
uploaded_gemini_file = None |
|
user_message_content = {'role': 'user', 'text': prompt} |
|
|
|
if not prompt and not file: |
|
session['error'] = "Veuillez entrer un message ou uploader un fichier." |
|
return redirect(url_for('index')) |
|
|
|
|
|
if file and file.filename != '': |
|
if allowed_file(file.filename): |
|
try: |
|
|
|
filename = secure_filename(file.filename) |
|
filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename) |
|
file.save(filepath) |
|
print(f"Fichier sauvegardé temporairement : {filepath}") |
|
|
|
|
|
print("Upload vers Gemini en cours...") |
|
|
|
mime_type = mimetypes.guess_type(filepath)[0] |
|
if not mime_type: |
|
|
|
mime_type = 'application/octet-stream' |
|
print(f"Impossible de deviner le mime_type pour {filename}, utilisation de {mime_type}") |
|
|
|
gemini_file_obj = genai.upload_file(path=filepath, mime_type=mime_type) |
|
uploaded_gemini_file = gemini_file_obj |
|
user_message_content['gemini_file'] = uploaded_gemini_file |
|
user_message_content['text'] = f"[Fichier: {filename}]\n\n{prompt}" |
|
print(f"Fichier {filename} uploadé avec succès vers Gemini. MimeType: {mime_type}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
except Exception as e: |
|
print(f"Erreur lors du traitement ou de l'upload du fichier : {e}") |
|
session['error'] = f"Erreur lors du traitement du fichier : {e}" |
|
|
|
|
|
else: |
|
session['error'] = "Type de fichier non autorisé." |
|
return redirect(url_for('index')) |
|
elif file and file.filename == '': |
|
|
|
pass |
|
|
|
|
|
if prompt or uploaded_gemini_file: |
|
|
|
display_history_message = {'role': 'user', 'text': user_message_content['text']} |
|
session['chat_history'].append(display_history_message) |
|
session.modified = True |
|
|
|
|
|
try: |
|
|
|
session['processing'] = True |
|
session.modified = True |
|
|
|
|
|
final_prompt_parts = [] |
|
if uploaded_gemini_file: |
|
final_prompt_parts.append(uploaded_gemini_file) |
|
|
|
current_prompt_text = prompt |
|
|
|
|
|
if session['web_search'] and prompt: |
|
print("Recherche web activée pour le prompt:", prompt) |
|
|
|
|
|
|
|
|
|
session['processing_web_search'] = True |
|
session.modified = True |
|
|
|
web_results = perform_web_search(prompt) |
|
if web_results: |
|
formatted_results = format_search_results(web_results) |
|
current_prompt_text = f"Question originale: {prompt}\n\n{formatted_results}\n\nBasé sur ces informations et ta connaissance générale, réponds à la question originale." |
|
print("Prompt modifié avec les résultats de recherche.") |
|
else: |
|
print("Aucun résultat de recherche web obtenu ou erreur.") |
|
|
|
|
|
final_prompt_parts.append(current_prompt_text) |
|
|
|
|
|
gemini_history = prepare_gemini_history(session['chat_history'][:-1]) |
|
|
|
print(f"\n--- Envoi à Gemini ---") |
|
print(f"Historique envoyé: {len(gemini_history)} messages") |
|
print(f"Parties du prompt actuel: {len(final_prompt_parts)}") |
|
|
|
|
|
|
|
|
|
|
|
full_conversation = gemini_history + [{'role': 'user', 'parts': final_prompt_parts}] |
|
response = model.generate_content(full_conversation) |
|
|
|
|
|
response_text = response.text |
|
print(f"\n--- Réponse de Gemini ---") |
|
print(response_text[:500] + ('...' if len(response_text) > 500 else '')) |
|
|
|
|
|
|
|
session['chat_history'].append({'role': 'assistant', 'text': response_text}) |
|
session.modified = True |
|
|
|
except Exception as e: |
|
print(f"Erreur lors de l'appel à Gemini : {e}") |
|
session['error'] = f"Une erreur s'est produite lors de la communication avec l'IA : {e}" |
|
|
|
|
|
if session['chat_history'] and session['chat_history'][-1]['role'] == 'user': |
|
session['chat_history'].pop() |
|
session.modified = True |
|
|
|
finally: |
|
|
|
session['processing'] = False |
|
session.pop('processing_web_search', None) |
|
session.modified = True |
|
|
|
|
|
return redirect(url_for('index')) |
|
|
|
@app.route('/clear', methods=['POST']) |
|
def clear_chat(): |
|
"""Efface l'historique de la conversation.""" |
|
session.pop('chat_history', None) |
|
session.pop('web_search', None) |
|
print("Historique de chat effacé.") |
|
return redirect(url_for('index')) |
|
|
|
|
|
if __name__ == '__main__': |
|
|
|
|
|
app.run(debug=True, host='0.0.0.0', port=5001) |