Docfile commited on
Commit
2cd6b7a
·
verified ·
1 Parent(s): 98e9cfd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +73 -101
app.py CHANGED
@@ -4,7 +4,6 @@ import os
4
  from dotenv import load_dotenv
5
  import http.client
6
  import json
7
- from typing import Iterator
8
 
9
  load_dotenv()
10
 
@@ -18,12 +17,11 @@ safety_settings = [
18
  {"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_NONE"},
19
  ]
20
 
21
- model = genai.GenerativeModel('gemini-2.0-flash-exp',
22
- tools='code_execution',
23
  safety_settings=safety_settings,
24
  system_instruction="Tu es un assistant intelligent. ton but est d'assister au mieux que tu peux. tu as été créé par Aenir et tu t'appelles Mariam")
25
 
26
- def perform_web_search(query: str) -> dict:
27
  conn = http.client.HTTPSConnection("google.serper.dev")
28
  payload = json.dumps({"q": query})
29
  headers = {
@@ -41,135 +39,109 @@ def perform_web_search(query: str) -> dict:
41
  finally:
42
  conn.close()
43
 
44
- def format_search_results(data: dict) -> str:
45
  if not data:
46
  return "Aucun résultat trouvé"
47
 
48
  result = ""
49
 
 
50
  if 'knowledgeGraph' in data:
51
  kg = data['knowledgeGraph']
52
  result += f"### {kg.get('title', '')}\n"
53
  result += f"*{kg.get('type', '')}*\n\n"
54
  result += f"{kg.get('description', '')}\n\n"
55
 
 
56
  if 'organic' in data:
57
  result += "### Résultats principaux:\n"
58
- for item in data['organic'][:3]:
59
  result += f"- **{item['title']}**\n"
60
  result += f" {item['snippet']}\n"
61
  result += f" [Lien]({item['link']})\n\n"
62
 
 
63
  if 'peopleAlsoAsk' in data:
64
  result += "### Questions fréquentes:\n"
65
- for item in data['peopleAlsoAsk'][:2]:
66
  result += f"- **{item['question']}**\n"
67
  result += f" {item['snippet']}\n\n"
68
 
69
  return result
70
 
71
- def stream_response(prompt: str, uploaded_file=None) -> Iterator[str]:
72
- """Stream the response from Gemini"""
73
- try:
74
- if uploaded_file:
75
- response = model.generate_content([uploaded_file, "\n\n", prompt], stream=True)
76
- else:
77
- response = model.generate_content(prompt, stream=True)
78
-
79
- for chunk in response:
80
- if chunk.text:
81
- yield chunk.text
82
-
83
- except Exception as e:
84
- yield f"Erreur lors de la génération de la réponse : {str(e)}"
85
 
86
- def role_to_streamlit(role: str) -> str:
87
- return "assistant" if role == "model" else role
88
 
89
- def process_uploaded_file(file) -> object:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
  if file is not None:
91
- file_path = os.path.join("temp", file.name)
92
- with open(file_path, "wb") as f:
93
  f.write(file.getbuffer())
94
  try:
95
- return genai.upload_file(file_path)
 
96
  except Exception as e:
97
  st.error(f"Erreur lors du téléchargement du fichier : {e}")
98
  return None
99
- finally:
100
- # Clean up the temporary file
101
- if os.path.exists(file_path):
102
- os.remove(file_path)
103
-
104
- def main():
105
- # Initialize session state
106
- if "chat" not in st.session_state:
107
- st.session_state.chat = model.start_chat(history=[])
108
- if "messages" not in st.session_state:
109
- st.session_state.messages = []
110
- if "web_search" not in st.session_state:
111
- st.session_state.web_search = False
112
-
113
- st.title("Mariam AI!")
114
-
115
- # Settings sidebar
116
- with st.sidebar:
117
- st.title("Paramètres")
118
- st.session_state.web_search = st.toggle("Activer la recherche web",
119
- value=st.session_state.web_search)
120
-
121
- # File upload
122
- uploaded_file = st.file_uploader("Télécharger un fichier (image/document)",
123
- type=['jpg', 'mp4', 'mp3', 'jpeg', 'png', 'pdf', 'txt'])
124
-
125
- # Display chat history from session state
126
- for message in st.session_state.messages:
127
- with st.chat_message(message["role"]):
128
- st.markdown(message["content"])
129
-
130
- # Chat input
131
- if prompt := st.chat_input("Hey?"):
132
- # Add user message to chat history
133
- st.session_state.messages.append({"role": "user", "content": prompt})
134
-
135
- # Display user message
136
- st.chat_message("user").markdown(prompt)
137
 
138
- # Handle file upload
139
- uploaded_gemini_file = None
140
- if uploaded_file:
141
- uploaded_gemini_file = process_uploaded_file(uploaded_file)
142
-
143
- try:
144
- # Perform web search if enabled
145
- if st.session_state.web_search:
146
- with st.spinner("Recherche web en cours..."):
147
- web_results = perform_web_search(prompt)
148
- if web_results:
149
- formatted_results = format_search_results(web_results)
150
- prompt = f"""Question: {prompt}\n\nRésultats de recherche web:\n{formatted_results}\n\nPourrais-tu analyser ces informations et me donner une réponse complète?"""
151
-
152
- # Display assistant message with streaming
153
- with st.chat_message("assistant"):
154
- message_placeholder = st.empty()
155
- full_response = ""
156
-
157
- # Stream the response
158
- for chunk in stream_response(prompt, uploaded_gemini_file):
159
- full_response += chunk
160
- # Update the message placeholder with the accumulated response
161
- message_placeholder.markdown(full_response + "▌")
162
-
163
- # Remove the cursor and update with the final response
164
- message_placeholder.markdown(full_response)
165
-
166
- # Add assistant response to chat history
167
- st.session_state.messages.append({"role": "assistant", "content": full_response})
168
 
169
- except Exception as e:
170
- st.error(f"Erreur lors de l'envoi du message : {e}")
 
 
 
 
 
171
 
172
- if __name__ == "__main__":
173
- # Create temp directory
174
- os.makedirs("temp", exist_ok=True)
175
- main()
 
4
  from dotenv import load_dotenv
5
  import http.client
6
  import json
 
7
 
8
  load_dotenv()
9
 
 
17
  {"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_NONE"},
18
  ]
19
 
20
+ model = genai.GenerativeModel('gemini-2.0-flash-exp', tools='code_execution',
 
21
  safety_settings=safety_settings,
22
  system_instruction="Tu es un assistant intelligent. ton but est d'assister au mieux que tu peux. tu as été créé par Aenir et tu t'appelles Mariam")
23
 
24
+ def perform_web_search(query):
25
  conn = http.client.HTTPSConnection("google.serper.dev")
26
  payload = json.dumps({"q": query})
27
  headers = {
 
39
  finally:
40
  conn.close()
41
 
42
+ def format_search_results(data):
43
  if not data:
44
  return "Aucun résultat trouvé"
45
 
46
  result = ""
47
 
48
+ # Knowledge Graph
49
  if 'knowledgeGraph' in data:
50
  kg = data['knowledgeGraph']
51
  result += f"### {kg.get('title', '')}\n"
52
  result += f"*{kg.get('type', '')}*\n\n"
53
  result += f"{kg.get('description', '')}\n\n"
54
 
55
+ # Organic Results
56
  if 'organic' in data:
57
  result += "### Résultats principaux:\n"
58
+ for item in data['organic'][:3]: # Limit to top 3 results
59
  result += f"- **{item['title']}**\n"
60
  result += f" {item['snippet']}\n"
61
  result += f" [Lien]({item['link']})\n\n"
62
 
63
+ # People Also Ask
64
  if 'peopleAlsoAsk' in data:
65
  result += "### Questions fréquentes:\n"
66
+ for item in data['peopleAlsoAsk'][:2]: # Limit to top 2 questions
67
  result += f"- **{item['question']}**\n"
68
  result += f" {item['snippet']}\n\n"
69
 
70
  return result
71
 
72
+ def role_to_streamlit(role):
73
+ if role == "model":
74
+ return "assistant"
75
+ else:
76
+ return role
77
+
78
+ # Add chat and settings to session state
79
+ if "chat" not in st.session_state:
80
+ st.session_state.chat = model.start_chat(history=[])
81
+ if "web_search" not in st.session_state:
82
+ st.session_state.web_search = False
 
 
 
83
 
84
+ # Display Form Title
85
+ st.title("Mariam AI!")
86
 
87
+ # Settings section
88
+ with st.sidebar:
89
+ st.title("Paramètres")
90
+ st.session_state.web_search = st.toggle("Activer la recherche web", value=st.session_state.web_search)
91
+
92
+ # File upload section
93
+ uploaded_file = st.file_uploader("Télécharger un fichier (image/document)", type=['jpg', 'mp4', 'mp3', 'jpeg', 'png', 'pdf', 'txt'])
94
+
95
+ # Display chat messages
96
+ for message in st.session_state.chat.history:
97
+ with st.chat_message(role_to_streamlit(message.role)):
98
+ st.markdown(message.parts[0].text)
99
+
100
+ # Function to handle file upload with Gemini
101
+ def process_uploaded_file(file):
102
  if file is not None:
103
+ with open(os.path.join("temp", file.name), "wb") as f:
 
104
  f.write(file.getbuffer())
105
  try:
106
+ gemini_file = genai.upload_file(os.path.join("temp", file.name))
107
+ return gemini_file
108
  except Exception as e:
109
  st.error(f"Erreur lors du téléchargement du fichier : {e}")
110
  return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111
 
112
+ # Chat input and processing
113
+ if prompt := st.chat_input("Hey?"):
114
+ uploaded_gemini_file = None
115
+ if uploaded_file:
116
+ uploaded_gemini_file = process_uploaded_file(uploaded_file)
117
+
118
+ # Display user message
119
+ st.chat_message("user").markdown(prompt)
120
+ print(prompt)
121
+ print("------------")
122
+ try:
123
+ # Perform web search if enabled
124
+ web_results = None
125
+ if st.session_state.web_search:
126
+ with st.spinner("Recherche web en cours..."):
127
+ web_results = perform_web_search(prompt)
128
+ if web_results:
129
+ formatted_results = format_search_results(web_results)
130
+ prompt = f"""Question: {prompt}\n\nRésultats de recherche web:\n{formatted_results}\n\nPourrais-tu analyser ces informations et me donner une réponse complète?"""
131
+
132
+ # Send message to Gemini
133
+ if uploaded_gemini_file:
134
+ response = st.session_state.chat.send_message([uploaded_gemini_file, "\n\n", prompt])
135
+ else:
136
+ response = st.session_state.chat.send_message(prompt)
 
 
 
 
 
137
 
138
+ print(response.text)
139
+ # Display assistant response
140
+ with st.chat_message("assistant"):
141
+ st.markdown(response.text)
142
+
143
+ except Exception as e:
144
+ st.error(f"Erreur lors de l'envoi du message : {e}")
145
 
146
+ # Create temp directory
147
+ os.makedirs("temp", exist_ok=True)