Docfile commited on
Commit
69977f9
·
verified ·
1 Parent(s): 392d67d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +95 -83
app.py CHANGED
@@ -4,8 +4,7 @@ import os
4
  from dotenv import load_dotenv
5
  import http.client
6
  import json
7
- import asyncio
8
- from typing import AsyncGenerator
9
 
10
  load_dotenv()
11
 
@@ -24,7 +23,7 @@ model = genai.GenerativeModel('gemini-2.0-flash-exp',
24
  safety_settings=safety_settings,
25
  system_instruction="Tu es un assistant intelligent. ton but est d'assister au mieux que tu peux. tu as été créé par Aenir et tu t'appelles Mariam")
26
 
27
- def perform_web_search(query):
28
  conn = http.client.HTTPSConnection("google.serper.dev")
29
  payload = json.dumps({"q": query})
30
  headers = {
@@ -42,7 +41,7 @@ def perform_web_search(query):
42
  finally:
43
  conn.close()
44
 
45
- def format_search_results(data):
46
  if not data:
47
  return "Aucun résultat trouvé"
48
 
@@ -69,93 +68,106 @@ def format_search_results(data):
69
 
70
  return result
71
 
72
- def role_to_streamlit(role):
73
- if role == "model":
74
- return "assistant"
75
- else:
76
- return role
77
-
78
- # Initialize session state
79
- if "chat" not in st.session_state:
80
- st.session_state.chat = model.start_chat(history=[])
81
- if "web_search" not in st.session_state:
82
- st.session_state.web_search = False
83
-
84
- # Display Form Title
85
- st.title("Mariam AI!")
86
-
87
- # Settings section
88
- with st.sidebar:
89
- st.title("Paramètres")
90
- st.session_state.web_search = st.toggle("Activer la recherche web", value=st.session_state.web_search)
91
-
92
- # File upload section
93
- uploaded_file = st.file_uploader("Télécharger un fichier (image/document)", type=['jpg', 'mp4', 'mp3', 'jpeg', 'png', 'pdf', 'txt'])
94
 
95
- # Display chat messages
96
- for message in st.session_state.chat.history:
97
- with st.chat_message(role_to_streamlit(message.role)):
98
- st.markdown(message.parts[0].text)
99
 
100
- def process_uploaded_file(file):
101
  if file is not None:
102
- with open(os.path.join("temp", file.name), "wb") as f:
 
103
  f.write(file.getbuffer())
104
  try:
105
- gemini_file = genai.upload_file(os.path.join("temp", file.name))
106
- return gemini_file
107
  except Exception as e:
108
  st.error(f"Erreur lors du téléchargement du fichier : {e}")
109
  return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
110
 
111
- async def stream_response(prompt: str, uploaded_gemini_file=None) -> AsyncGenerator[str, None]:
112
- try:
113
- if uploaded_gemini_file:
114
- response = await st.session_state.chat.send_message_async([uploaded_gemini_file, "\n\n", prompt], stream=True)
115
- else:
116
- response = await st.session_state.chat.send_message_async(prompt, stream=True)
117
-
118
- async for chunk in response:
119
- if chunk.text:
120
- yield chunk.text
121
- except Exception as e:
122
- st.error(f"Erreur lors du streaming : {e}")
123
- yield "Désolé, une erreur s'est produite lors de la génération de la réponse."
124
-
125
- # Chat input and processing
126
- if prompt := st.chat_input("Hey?"):
127
- uploaded_gemini_file = None
128
- if uploaded_file:
129
- uploaded_gemini_file = process_uploaded_file(uploaded_file)
130
-
131
- # Display user message
132
- st.chat_message("user").markdown(prompt)
133
-
134
- try:
135
- # Perform web search if enabled
136
- web_results = None
137
- if st.session_state.web_search:
138
- with st.spinner("Recherche web en cours..."):
139
- web_results = perform_web_search(prompt)
140
- if web_results:
141
- formatted_results = format_search_results(web_results)
142
- prompt = f"""Question: {prompt}\n\nRésultats de recherche web:\n{formatted_results}\n\nPourrais-tu analyser ces informations et me donner une réponse complète?"""
143
-
144
- # Create a placeholder for the streaming response
145
- with st.chat_message("assistant"):
146
- response_placeholder = st.empty()
147
- full_response = ""
148
-
149
- # Stream the response
150
- for response_chunk in asyncio.run(stream_response(prompt, uploaded_gemini_file)):
151
- full_response += response_chunk
152
- response_placeholder.markdown(full_response + "▌")
153
 
154
- # Update the placeholder with the complete response
155
- response_placeholder.markdown(full_response)
156
-
157
- except Exception as e:
158
- st.error(f"Erreur lors de l'envoi du message : {e}")
 
 
 
159
 
160
- # Create temp directory
161
- os.makedirs("temp", exist_ok=True)
 
 
 
4
  from dotenv import load_dotenv
5
  import http.client
6
  import json
7
+ from typing import Iterator
 
8
 
9
  load_dotenv()
10
 
 
23
  safety_settings=safety_settings,
24
  system_instruction="Tu es un assistant intelligent. ton but est d'assister au mieux que tu peux. tu as été créé par Aenir et tu t'appelles Mariam")
25
 
26
+ def perform_web_search(query: str) -> dict:
27
  conn = http.client.HTTPSConnection("google.serper.dev")
28
  payload = json.dumps({"q": query})
29
  headers = {
 
41
  finally:
42
  conn.close()
43
 
44
+ def format_search_results(data: dict) -> str:
45
  if not data:
46
  return "Aucun résultat trouvé"
47
 
 
68
 
69
  return result
70
 
71
+ def stream_response(prompt: str, uploaded_file=None) -> Iterator[str]:
72
+ """Stream the response from Gemini"""
73
+ try:
74
+ if uploaded_file:
75
+ response = model.generate_content([uploaded_file, "\n\n", prompt], stream=True)
76
+ else:
77
+ response = model.generate_content(prompt, stream=True)
78
+
79
+ for chunk in response:
80
+ if chunk.text:
81
+ yield chunk.text
82
+
83
+ except Exception as e:
84
+ yield f"Erreur lors de la génération de la réponse : {str(e)}"
 
 
 
 
 
 
 
 
85
 
86
+ def role_to_streamlit(role: str) -> str:
87
+ return "assistant" if role == "model" else role
 
 
88
 
89
+ def process_uploaded_file(file) -> object:
90
  if file is not None:
91
+ file_path = os.path.join("temp", file.name)
92
+ with open(file_path, "wb") as f:
93
  f.write(file.getbuffer())
94
  try:
95
+ return genai.upload_file(file_path)
 
96
  except Exception as e:
97
  st.error(f"Erreur lors du téléchargement du fichier : {e}")
98
  return None
99
+ finally:
100
+ # Clean up the temporary file
101
+ if os.path.exists(file_path):
102
+ os.remove(file_path)
103
+
104
+ def main():
105
+ # Initialize session state
106
+ if "chat" not in st.session_state:
107
+ st.session_state.chat = model.start_chat(history=[])
108
+ if "web_search" not in st.session_state:
109
+ st.session_state.web_search = False
110
+
111
+ st.title("Mariam AI!")
112
+
113
+ # Settings sidebar
114
+ with st.sidebar:
115
+ st.title("Paramètres")
116
+ st.session_state.web_search = st.toggle("Activer la recherche web",
117
+ value=st.session_state.web_search)
118
+
119
+ # File upload
120
+ uploaded_file = st.file_uploader("Télécharger un fichier (image/document)",
121
+ type=['jpg', 'mp4', 'mp3', 'jpeg', 'png', 'pdf', 'txt'])
122
+
123
+ # Display chat history
124
+ for message in st.session_state.chat.history:
125
+ with st.chat_message(role_to_streamlit(message.role)):
126
+ st.markdown(message.parts[0].text)
127
+
128
+ # Chat input
129
+ if prompt := st.chat_input("Hey?"):
130
+ # Display user message
131
+ st.chat_message("user").markdown(prompt)
132
+
133
+ # Handle file upload
134
+ uploaded_gemini_file = None
135
+ if uploaded_file:
136
+ uploaded_gemini_file = process_uploaded_file(uploaded_file)
137
 
138
+ try:
139
+ # Perform web search if enabled
140
+ if st.session_state.web_search:
141
+ with st.spinner("Recherche web en cours..."):
142
+ web_results = perform_web_search(prompt)
143
+ if web_results:
144
+ formatted_results = format_search_results(web_results)
145
+ prompt = f"""Question: {prompt}\n\nRésultats de recherche web:\n{formatted_results}\n\nPourrais-tu analyser ces informations et me donner une réponse complète?"""
146
+
147
+ # Display assistant message with streaming
148
+ with st.chat_message("assistant"):
149
+ message_placeholder = st.empty()
150
+ full_response = ""
151
+
152
+ # Stream the response
153
+ for chunk in stream_response(prompt, uploaded_gemini_file):
154
+ full_response += chunk
155
+ # Update the message placeholder with the accumulated response
156
+ message_placeholder.markdown(full_response + "▌")
157
+
158
+ # Remove the cursor and update with the final response
159
+ message_placeholder.markdown(full_response)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
160
 
161
+ # Add the interaction to chat history
162
+ st.session_state.chat.history.extend([
163
+ {"role": "user", "parts": [prompt]},
164
+ {"role": "model", "parts": [full_response]}
165
+ ])
166
+
167
+ except Exception as e:
168
+ st.error(f"Erreur lors de l'envoi du message : {e}")
169
 
170
+ if __name__ == "__main__":
171
+ # Create temp directory
172
+ os.makedirs("temp", exist_ok=True)
173
+ main()