Update app.py
Browse files
app.py
CHANGED
@@ -1,177 +1,81 @@
|
|
1 |
-
|
2 |
-
|
|
|
|
|
3 |
import os
|
4 |
from dotenv import load_dotenv
|
5 |
-
import http.client
|
6 |
-
import json
|
7 |
-
import uuid
|
8 |
-
import requests # pour envoyer le fichier sur Telegram
|
9 |
|
10 |
-
load_dotenv()
|
11 |
|
|
|
|
|
|
|
|
|
|
|
12 |
# Configure l'API de Gemini
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
]
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
# --- Gestion de la session et sauvegarde de l'historique ---
|
82 |
-
|
83 |
-
if "session_id" not in st.session_state:
|
84 |
-
st.session_state.session_id = str(uuid.uuid4())
|
85 |
-
|
86 |
-
def save_chat_history():
|
87 |
-
history_data = []
|
88 |
-
for message in st.session_state.chat.history:
|
89 |
-
history_data.append({
|
90 |
-
"role": message.role,
|
91 |
-
"text": message.parts[0].text
|
92 |
-
})
|
93 |
-
file_name = f"chat_history_{st.session_state.session_id}.json"
|
94 |
-
with open(file_name, "w", encoding="utf-8") as f:
|
95 |
-
json.dump(history_data, f, ensure_ascii=False, indent=4)
|
96 |
-
return file_name # On retourne le nom du fichier pour pouvoir l'envoyer ensuite
|
97 |
-
|
98 |
-
def send_file_to_telegram(file_path):
|
99 |
-
if TELEGRAM_BOT_TOKEN is None or TELEGRAM_CHAT_ID is None:
|
100 |
-
st.error("Les variables d'environnement TELEGRAM_BOT_TOKEN ou TELEGRAM_CHAT_ID ne sont pas définies.")
|
101 |
-
return
|
102 |
-
|
103 |
-
url = f"https://api.telegram.org/bot{TELEGRAM_BOT_TOKEN}/sendDocument"
|
104 |
-
try:
|
105 |
-
with open(file_path, "rb") as file:
|
106 |
-
files = {"document": file}
|
107 |
-
data = {"chat_id": TELEGRAM_CHAT_ID, "caption": "Historique de chat"}
|
108 |
-
response = requests.post(url, data=data, files=files)
|
109 |
-
if response.status_code == 200:
|
110 |
-
st.success("Fichier envoyé sur Telegram avec succès!")
|
111 |
-
else:
|
112 |
-
st.error(f"Erreur lors de l'envoi sur Telegram: {response.text}")
|
113 |
-
except Exception as e:
|
114 |
-
st.error(f"Erreur lors de l'ouverture du fichier ou l'envoi: {e}")
|
115 |
-
|
116 |
-
if "chat" not in st.session_state:
|
117 |
-
st.session_state.chat = model.start_chat(history=[])
|
118 |
-
if "web_search" not in st.session_state:
|
119 |
-
st.session_state.web_search = False
|
120 |
-
|
121 |
-
st.title("Mariam AI!")
|
122 |
-
|
123 |
-
with st.sidebar:
|
124 |
-
st.title("Paramètres")
|
125 |
-
st.session_state.web_search = st.toggle("Activer la recherche web", value=st.session_state.web_search)
|
126 |
-
|
127 |
-
uploaded_file = st.file_uploader("Télécharger un fichier (image/document)", type=['jpg', 'mp4', 'mp3', 'jpeg', 'png', 'pdf', 'txt'])
|
128 |
-
|
129 |
-
for message in st.session_state.chat.history:
|
130 |
-
with st.chat_message(role_to_streamlit(message.role)):
|
131 |
-
st.markdown(message.parts[0].text)
|
132 |
-
|
133 |
-
def process_uploaded_file(file):
|
134 |
-
if file is not None:
|
135 |
-
os.makedirs("temp", exist_ok=True)
|
136 |
-
temp_file_path = os.path.join("temp", file.name)
|
137 |
-
with open(temp_file_path, "wb") as f:
|
138 |
-
f.write(file.getbuffer())
|
139 |
-
try:
|
140 |
-
gemini_file = genai.upload_file(temp_file_path)
|
141 |
-
return gemini_file
|
142 |
-
except Exception as e:
|
143 |
-
st.error(f"Erreur lors du téléchargement du fichier : {e}")
|
144 |
-
return None
|
145 |
-
|
146 |
-
if prompt := st.chat_input("Hey?"):
|
147 |
-
uploaded_gemini_file = None
|
148 |
-
if uploaded_file:
|
149 |
-
uploaded_gemini_file = process_uploaded_file(uploaded_file)
|
150 |
-
|
151 |
-
st.chat_message("user").markdown(prompt)
|
152 |
-
|
153 |
-
try:
|
154 |
-
web_results = None
|
155 |
-
if st.session_state.web_search:
|
156 |
-
with st.spinner("Recherche web en cours..."):
|
157 |
-
web_results = perform_web_search(prompt)
|
158 |
-
if web_results:
|
159 |
-
formatted_results = format_search_results(web_results)
|
160 |
-
prompt = f"""Question: {prompt}\n\nRésultats de recherche web:\n{formatted_results}\n\nPourrais-tu analyser ces informations et me donner une réponse complète?"""
|
161 |
-
|
162 |
-
if uploaded_gemini_file:
|
163 |
-
response = st.session_state.chat.send_message([uploaded_gemini_file, "\n\n", prompt])
|
164 |
-
else:
|
165 |
-
response = st.session_state.chat.send_message(prompt)
|
166 |
-
|
167 |
-
with st.chat_message("assistant"):
|
168 |
-
st.markdown(response.text)
|
169 |
-
|
170 |
-
# Sauvegarde de l'historique et récupération du nom de fichier
|
171 |
-
file_name = save_chat_history()
|
172 |
-
|
173 |
-
# Optionnel : envoyer le fichier sur Telegram
|
174 |
-
send_file_to_telegram(file_name)
|
175 |
-
|
176 |
-
except Exception as e:
|
177 |
-
st.error(f"Erreur lors de l'envoi du message : {e}")
|
|
|
1 |
+
from langchain_community.chat_message_histories import StreamlitChatMessageHistory
|
2 |
+
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
3 |
+
from langchain_core.runnables.history import RunnableWithMessageHistory
|
4 |
+
from langchain_google_genai import GoogleGenerativeAIimport
|
5 |
import os
|
6 |
from dotenv import load_dotenv
|
|
|
|
|
|
|
|
|
7 |
|
|
|
8 |
|
9 |
+
import streamlit as st
|
10 |
+
|
11 |
+
|
12 |
+
load_dotenv()
|
13 |
+
api_key=os.getenv("GOOGLE_API_KEY")
|
14 |
# Configure l'API de Gemini
|
15 |
+
#llm = GoogleGenerativeAI(model="models/text-bison-001", google_api_key=api_key)
|
16 |
+
#genai.configure(api_key=api_key)
|
17 |
+
|
18 |
+
|
19 |
+
st.set_page_config(page_title="StreamlitChatMessageHistory", page_icon="📖")
|
20 |
+
st.title("📖 StreamlitChatMessageHistory")
|
21 |
+
|
22 |
+
"""
|
23 |
+
A basic example of using StreamlitChatMessageHistory to help LLMChain remember messages in a conversation.
|
24 |
+
The messages are stored in Session State across re-runs automatically. You can view the contents of Session State
|
25 |
+
in the expander below. View the
|
26 |
+
[source code for this app](https://github.com/langchain-ai/streamlit-agent/blob/main/streamlit_agent/basic_memory.py).
|
27 |
+
"""
|
28 |
+
|
29 |
+
# Set up memory
|
30 |
+
msgs = StreamlitChatMessageHistory(key="langchain_messages")
|
31 |
+
if len(msgs.messages) == 0:
|
32 |
+
msgs.add_ai_message("How can I help you?")
|
33 |
+
|
34 |
+
view_messages = st.expander("View the message contents in session state")
|
35 |
+
|
36 |
+
|
37 |
+
|
38 |
+
# Set up the LangChain, passing in Message History
|
39 |
+
|
40 |
+
prompt = ChatPromptTemplate.from_messages(
|
41 |
+
[
|
42 |
+
("system", "You are an AI chatbot having a conversation with a human."),
|
43 |
+
MessagesPlaceholder(variable_name="history"),
|
44 |
+
("human", "{question}"),
|
45 |
+
]
|
46 |
+
)
|
47 |
+
|
48 |
+
chain = prompt | GoogleGenerativeAI(model="models/gemini-2.0-flash-exp", google_api_key=api_key)
|
49 |
+
|
50 |
+
|
51 |
+
|
52 |
+
chain_with_history = RunnableWithMessageHistory(
|
53 |
+
chain,
|
54 |
+
lambda session_id: msgs,
|
55 |
+
input_messages_key="question",
|
56 |
+
history_messages_key="history",
|
57 |
+
)
|
58 |
+
|
59 |
+
# Render current messages from StreamlitChatMessageHistory
|
60 |
+
for msg in msgs.messages:
|
61 |
+
st.chat_message(msg.type).write(msg.content)
|
62 |
+
|
63 |
+
# If user inputs a new prompt, generate and draw a new response
|
64 |
+
if prompt := st.chat_input():
|
65 |
+
st.chat_message("human").write(prompt)
|
66 |
+
# Note: new messages are saved to history automatically by Langchain during run
|
67 |
+
config = {"configurable": {"session_id": "any"}}
|
68 |
+
response = chain_with_history.invoke({"question": prompt}, config)
|
69 |
+
st.chat_message("ai").write(response.content)
|
70 |
+
|
71 |
+
# Draw the messages at the end, so newly generated ones show up immediately
|
72 |
+
with view_messages:
|
73 |
+
"""
|
74 |
+
Message History initialized with:
|
75 |
+
```python
|
76 |
+
msgs = StreamlitChatMessageHistory(key="langchain_messages")
|
77 |
+
```
|
78 |
+
|
79 |
+
Contents of `st.session_state.langchain_messages`:
|
80 |
+
"""
|
81 |
+
view_messages.json(st.session_state.langchain_messages)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|