Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -4,6 +4,8 @@ import os
|
|
4 |
from dotenv import load_dotenv
|
5 |
import http.client
|
6 |
import json
|
|
|
|
|
7 |
|
8 |
load_dotenv()
|
9 |
|
@@ -17,7 +19,8 @@ safety_settings = [
|
|
17 |
{"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_NONE"},
|
18 |
]
|
19 |
|
20 |
-
model = genai.GenerativeModel('gemini-2.0-flash-exp',
|
|
|
21 |
safety_settings=safety_settings,
|
22 |
system_instruction="Tu es un assistant intelligent. ton but est d'assister au mieux que tu peux. tu as été créé par Aenir et tu t'appelles Mariam")
|
23 |
|
@@ -45,25 +48,22 @@ def format_search_results(data):
|
|
45 |
|
46 |
result = ""
|
47 |
|
48 |
-
# Knowledge Graph
|
49 |
if 'knowledgeGraph' in data:
|
50 |
kg = data['knowledgeGraph']
|
51 |
result += f"### {kg.get('title', '')}\n"
|
52 |
result += f"*{kg.get('type', '')}*\n\n"
|
53 |
result += f"{kg.get('description', '')}\n\n"
|
54 |
|
55 |
-
# Organic Results
|
56 |
if 'organic' in data:
|
57 |
result += "### Résultats principaux:\n"
|
58 |
-
for item in data['organic'][:3]:
|
59 |
result += f"- **{item['title']}**\n"
|
60 |
result += f" {item['snippet']}\n"
|
61 |
result += f" [Lien]({item['link']})\n\n"
|
62 |
|
63 |
-
# People Also Ask
|
64 |
if 'peopleAlsoAsk' in data:
|
65 |
result += "### Questions fréquentes:\n"
|
66 |
-
for item in data['peopleAlsoAsk'][:2]:
|
67 |
result += f"- **{item['question']}**\n"
|
68 |
result += f" {item['snippet']}\n\n"
|
69 |
|
@@ -75,7 +75,7 @@ def role_to_streamlit(role):
|
|
75 |
else:
|
76 |
return role
|
77 |
|
78 |
-
#
|
79 |
if "chat" not in st.session_state:
|
80 |
st.session_state.chat = model.start_chat(history=[])
|
81 |
if "web_search" not in st.session_state:
|
@@ -97,7 +97,6 @@ for message in st.session_state.chat.history:
|
|
97 |
with st.chat_message(role_to_streamlit(message.role)):
|
98 |
st.markdown(message.parts[0].text)
|
99 |
|
100 |
-
# Function to handle file upload with Gemini
|
101 |
def process_uploaded_file(file):
|
102 |
if file is not None:
|
103 |
with open(os.path.join("temp", file.name), "wb") as f:
|
@@ -109,6 +108,20 @@ def process_uploaded_file(file):
|
|
109 |
st.error(f"Erreur lors du téléchargement du fichier : {e}")
|
110 |
return None
|
111 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
112 |
# Chat input and processing
|
113 |
if prompt := st.chat_input("Hey?"):
|
114 |
uploaded_gemini_file = None
|
@@ -117,8 +130,7 @@ if prompt := st.chat_input("Hey?"):
|
|
117 |
|
118 |
# Display user message
|
119 |
st.chat_message("user").markdown(prompt)
|
120 |
-
|
121 |
-
print("------------")
|
122 |
try:
|
123 |
# Perform web search if enabled
|
124 |
web_results = None
|
@@ -129,16 +141,18 @@ if prompt := st.chat_input("Hey?"):
|
|
129 |
formatted_results = format_search_results(web_results)
|
130 |
prompt = f"""Question: {prompt}\n\nRésultats de recherche web:\n{formatted_results}\n\nPourrais-tu analyser ces informations et me donner une réponse complète?"""
|
131 |
|
132 |
-
#
|
133 |
-
if uploaded_gemini_file:
|
134 |
-
response = st.session_state.chat.send_message([uploaded_gemini_file, "\n\n", prompt])
|
135 |
-
else:
|
136 |
-
response = st.session_state.chat.send_message(prompt)
|
137 |
-
|
138 |
-
print(response.text)
|
139 |
-
# Display assistant response
|
140 |
with st.chat_message("assistant"):
|
141 |
-
st.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
142 |
|
143 |
except Exception as e:
|
144 |
st.error(f"Erreur lors de l'envoi du message : {e}")
|
|
|
4 |
from dotenv import load_dotenv
|
5 |
import http.client
|
6 |
import json
|
7 |
+
import asyncio
|
8 |
+
from typing import AsyncGenerator
|
9 |
|
10 |
load_dotenv()
|
11 |
|
|
|
19 |
{"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_NONE"},
|
20 |
]
|
21 |
|
22 |
+
model = genai.GenerativeModel('gemini-2.0-flash-exp',
|
23 |
+
tools='code_execution',
|
24 |
safety_settings=safety_settings,
|
25 |
system_instruction="Tu es un assistant intelligent. ton but est d'assister au mieux que tu peux. tu as été créé par Aenir et tu t'appelles Mariam")
|
26 |
|
|
|
48 |
|
49 |
result = ""
|
50 |
|
|
|
51 |
if 'knowledgeGraph' in data:
|
52 |
kg = data['knowledgeGraph']
|
53 |
result += f"### {kg.get('title', '')}\n"
|
54 |
result += f"*{kg.get('type', '')}*\n\n"
|
55 |
result += f"{kg.get('description', '')}\n\n"
|
56 |
|
|
|
57 |
if 'organic' in data:
|
58 |
result += "### Résultats principaux:\n"
|
59 |
+
for item in data['organic'][:3]:
|
60 |
result += f"- **{item['title']}**\n"
|
61 |
result += f" {item['snippet']}\n"
|
62 |
result += f" [Lien]({item['link']})\n\n"
|
63 |
|
|
|
64 |
if 'peopleAlsoAsk' in data:
|
65 |
result += "### Questions fréquentes:\n"
|
66 |
+
for item in data['peopleAlsoAsk'][:2]:
|
67 |
result += f"- **{item['question']}**\n"
|
68 |
result += f" {item['snippet']}\n\n"
|
69 |
|
|
|
75 |
else:
|
76 |
return role
|
77 |
|
78 |
+
# Initialize session state
|
79 |
if "chat" not in st.session_state:
|
80 |
st.session_state.chat = model.start_chat(history=[])
|
81 |
if "web_search" not in st.session_state:
|
|
|
97 |
with st.chat_message(role_to_streamlit(message.role)):
|
98 |
st.markdown(message.parts[0].text)
|
99 |
|
|
|
100 |
def process_uploaded_file(file):
|
101 |
if file is not None:
|
102 |
with open(os.path.join("temp", file.name), "wb") as f:
|
|
|
108 |
st.error(f"Erreur lors du téléchargement du fichier : {e}")
|
109 |
return None
|
110 |
|
111 |
+
async def stream_response(prompt: str, uploaded_gemini_file=None) -> AsyncGenerator[str, None]:
|
112 |
+
try:
|
113 |
+
if uploaded_gemini_file:
|
114 |
+
response = await st.session_state.chat.send_message_async([uploaded_gemini_file, "\n\n", prompt], stream=True)
|
115 |
+
else:
|
116 |
+
response = await st.session_state.chat.send_message_async(prompt, stream=True)
|
117 |
+
|
118 |
+
async for chunk in response:
|
119 |
+
if chunk.text:
|
120 |
+
yield chunk.text
|
121 |
+
except Exception as e:
|
122 |
+
st.error(f"Erreur lors du streaming : {e}")
|
123 |
+
yield "Désolé, une erreur s'est produite lors de la génération de la réponse."
|
124 |
+
|
125 |
# Chat input and processing
|
126 |
if prompt := st.chat_input("Hey?"):
|
127 |
uploaded_gemini_file = None
|
|
|
130 |
|
131 |
# Display user message
|
132 |
st.chat_message("user").markdown(prompt)
|
133 |
+
|
|
|
134 |
try:
|
135 |
# Perform web search if enabled
|
136 |
web_results = None
|
|
|
141 |
formatted_results = format_search_results(web_results)
|
142 |
prompt = f"""Question: {prompt}\n\nRésultats de recherche web:\n{formatted_results}\n\nPourrais-tu analyser ces informations et me donner une réponse complète?"""
|
143 |
|
144 |
+
# Create a placeholder for the streaming response
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
145 |
with st.chat_message("assistant"):
|
146 |
+
response_placeholder = st.empty()
|
147 |
+
full_response = ""
|
148 |
+
|
149 |
+
# Stream the response
|
150 |
+
for response_chunk in asyncio.run(stream_response(prompt, uploaded_gemini_file)):
|
151 |
+
full_response += response_chunk
|
152 |
+
response_placeholder.markdown(full_response + "▌")
|
153 |
+
|
154 |
+
# Update the placeholder with the complete response
|
155 |
+
response_placeholder.markdown(full_response)
|
156 |
|
157 |
except Exception as e:
|
158 |
st.error(f"Erreur lors de l'envoi du message : {e}")
|