Mariamm1 / app.py
Docfile's picture
Create app.py
17312e6 verified
raw
history blame
3.07 kB
import streamlit as st
from google import genai
from google.genai import types
from PIL import Image
import io
import base64
import time
import json
# Configuration de l'API Gemini
GOOGLE_API_KEY = "YOUR_API_KEY" # Remplacez par votre clé API
genai.configure(api_key=GOOGLE_API_KEY)
client = genai.GenerativeModel('gemini-pro-vision')
# Fonction pour générer la réponse en streaming
def solve_math_problem(image_data):
img = Image.open(io.BytesIO(image_data))
# Convertir l'image en base64 pour l'envoyer à l'API Gemini
buffered = io.BytesIO()
img.save(buffered, format="PNG")
img_str = base64.b64encode(buffered.getvalue()).decode()
# Créer le contenu de la requête pour l'API Gemini
contents = [
{'parts': [{'mime_type': 'image/png', 'data': img_str}]},
{'parts': [{'text': "Résous ce problème?"}]},
]
# Configuration pour inclure les pensées (si disponible)
config = {
'thinking_config': {'include_thoughts': True}
}
response_stream = client.generate_content(
contents=contents,
model="gemini-2.0-flash-thinking-exp-01-21",
stream=True,
generation_config=config
)
for chunk in response_stream:
for part in chunk.parts:
if part.text:
yield part.text
# Interface Streamlit
st.set_page_config(page_title="Mariam M-0", page_icon="🧮", layout="centered")
st.title("Mariam M-0")
st.subheader("Solution Mathématique Intelligente")
uploaded_file = st.file_uploader("Déposez votre image ici", type=["jpg", "jpeg", "png"])
if uploaded_file is not None:
image_data = uploaded_file.getvalue()
st.image(image_data, caption="Image téléchargée.", use_column_width=True)
if st.button("Résoudre le problème"):
with st.spinner("Analyse en cours..."):
# Utilisation d'un conteneur pour mettre à jour le texte en streaming
thoughts_container = st.empty()
answer_container = st.empty()
full_thoughts = ""
full_answer = ""
for response_text in solve_math_problem(image_data):
try:
response_json = json.loads(response_text)
# Vérifier si la réponse contient une clé 'thoughts' ou 'answer'
if 'thoughts' in response_json:
thoughts_content = response_json['thoughts']
full_thoughts += thoughts_content + " \n"
thoughts_container.markdown(f"**Processus de Réflexion:**\n\n{full_thoughts}")
elif 'answer' in response_json:
answer_content = response_json['answer']
full_answer += answer_content + " \n"
answer_container.markdown(f"**Solution:**\n\n{full_answer}")
except json.JSONDecodeError:
print(f"Could not parse as JSON: {response_text}")
continue
st.success("Problème résolu!")