Spaces:
Sleeping
Sleeping
File size: 4,103 Bytes
cf07f0f 36fb175 cf07f0f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 |
from flask import Flask, render_template, request, jsonify, Response, stream_with_context
from google import genai
import logging
from pathlib import Path
import sys
from typing import Generator
import json
import os
api_key = os.environ.get("GEMINI_API_KEY")
#pi_key = "YOUR_API_KEY_HERE"
# Configuration du logging
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
handlers=[
logging.StreamHandler(sys.stdout),
logging.FileHandler(Path('app.log'))
]
)
logger = logging.getLogger(__name__)
app = Flask(__name__)
class GeminiClient:
def __init__(self, api_key: str):
self.client = None
self.init_client(api_key)
def init_client(self, api_key: str) -> None:
try:
self.client = genai.Client(
api_key=api_key,
http_options={'api_version': 'v1alpha'}
)
except Exception as e:
logger.error(f"Erreur d'initialisation du client Gemini: {e}")
raise RuntimeError(f"Impossible d'initialiser le client Gemini: {e}")
def get_response(self, question: str, model_name: str) -> Generator:
if not self.client:
raise RuntimeError("Client Gemini non initialisé")
try:
response = self.client.models.generate_content_stream(
model=model_name,
system_instruction="Tu es un assistant intelligent. ton but est d'assister au mieux que tu peux. tu as été créé par Aenir et tu t'appelles Mariam.",
config={'thinking_config': {'include_thoughts': True}},
contents=[question]
)
return response
except Exception as e:
logger.error(f"Erreur lors de la génération de la réponse: {e}")
raise
def stream_response(response: Generator):
thinking_text = ""
answer_text = ""
mode = 'starting'
try:
for chunk in response:
if hasattr(chunk, 'candidates') and chunk.candidates:
content = chunk.candidates[0].content
if hasattr(content, 'parts'):
for part in content.parts:
has_thought = hasattr(part, 'thought') and part.thought
text = getattr(part, 'text', '')
if not text:
continue
if has_thought:
thinking_text += text
yield json.dumps({
'type': 'thinking',
'content': thinking_text
}) + '\n'
else:
answer_text += text
yield json.dumps({
'type': 'answer',
'content': answer_text
}) + '\n'
except Exception as e:
logger.error(f"Erreur dans le streaming de la réponse: {e}")
yield json.dumps({
'type': 'error',
'content': "Une erreur est survenue lors de l'analyse."
}) + '\n'
gemini_client = GeminiClient(api_key)
@app.route('/')
def home():
return render_template('index.html')
@app.route('/ask', methods=['POST'])
def ask():
# À remplacer par votre clé API
question = request.json.get('question')
model_name = "gemini-2.0-flash-thinking-exp-01-21"
try:
response = gemini_client.get_response(question, model_name)
return Response(
stream_with_context(stream_response(response)),
mimetype='text/event-stream'
)
except Exception as e:
logger.error(f"Erreur lors de la génération: {e}", exc_info=True)
return jsonify({'error': "Une erreur est survenue. Veuillez réessayer."}), 500
if __name__ == '__main__':
app.run(debug=True) |