Docfile commited on
Commit
dda2241
·
verified ·
1 Parent(s): 8f3dc32

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +140 -58
app.py CHANGED
@@ -1,68 +1,150 @@
1
- import streamlit as st
 
 
 
 
 
2
 
3
- from llama_index.llms.gemini import Gemini
4
- from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Settings
5
- import os
6
- from llama_index.embeddings.gemini import GeminiEmbedding
7
- #os.environ["GOOGLE_API_KEY"] = GOOGLE_API_KEY
8
 
9
- st.set_page_config(page_title="Chat with the Streamlit docs, powered by LlamaIndex", page_icon="🦙", layout="centered", initial_sidebar_state="auto", menu_items=None)
10
- #openai.api_key = st.secrets.openai_key
11
- st.title("Chat with the Streamlit docs, powered by LlamaIndex 💬🦙")
12
- st.info("Check out the full tutorial to build this app in our [blog post](https://blog.streamlit.io/build-a-chatbot-with-custom-data-sources-powered-by-llamaindex/)", icon="📃")
13
 
14
- if "messages" not in st.session_state.keys(): # Initialize the chat messages history
15
- st.session_state.messages = [
16
- {
17
- "role": "assistant",
18
- "content": "Ask me a question about Streamlit's open-source Python library!",
19
- }
20
- ]
21
 
22
- @st.cache_resource(show_spinner=False)
23
- def load_data():
24
- reader = SimpleDirectoryReader(input_dir="./data", recursive=True)
25
- docs = reader.load_data()
26
- Settings.llm = Gemini(
27
- model="models/gemini-2.0-flash-exp",
28
- temperature=1,
29
- system_prompt="""You are an expert on
30
- the Streamlit Python library and your
31
- job is to answer technical questions.
32
- Assume that all questions are related
33
- to the Streamlit Python library. Keep
34
- your answers technical and based on
35
- facts – do not hallucinate features.""",
36
- )
37
- Settings.embed_model = GeminiEmbedding(
38
- model="models/embedding-001", embed_batch_size=100
39
- )
40
-
41
- index = VectorStoreIndex.from_documents(docs)
42
- return index
43
 
 
 
44
 
45
- index = load_data()
 
 
 
 
46
 
47
- if "chat_engine" not in st.session_state.keys(): # Initialize the chat engine
48
- st.session_state.chat_engine = index.as_chat_engine(
49
- chat_mode="condense_question", verbose=True, streaming=True
50
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
51
 
52
- if prompt := st.chat_input(
53
- "Ask a question"
54
- ): # Prompt for user input and save to chat history
55
- st.session_state.messages.append({"role": "user", "content": prompt})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
 
57
- for message in st.session_state.messages: # Write message history to UI
58
- with st.chat_message(message["role"]):
59
- st.write(message["content"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
 
61
- # If last message is not from assistant, generate a new response
62
- if st.session_state.messages[-1]["role"] != "assistant":
63
- with st.chat_message("assistant"):
64
- response_stream = st.session_state.chat_engine.stream_chat(prompt)
65
- st.write_stream(response_stream.response_gen)
66
- message = {"role": "assistant", "content": response_stream.response}
67
- # Add response to message history
68
- st.session_state.messages.append(message)
 
1
+ from flask import Flask, render_template, request, redirect, url_for, session
2
+ import os
3
+ import json
4
+ import http.client
5
+ import google.generativeai as genai
6
+ from dotenv import load_dotenv
7
 
8
+ load_dotenv()
 
 
 
 
9
 
10
+ app = Flask(__name__)
11
+ app.secret_key = 'votre-cle-secrete' # Remplacez par une clé forte
 
 
12
 
13
+ # Configure la clé API pour Google Generative AI
14
+ genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
 
 
 
 
 
15
 
16
+ # Paramètres de sécurité
17
+ safety_settings = [
18
+ {"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE"},
19
+ {"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE"},
20
+ {"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_NONE"},
21
+ {"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_NONE"},
22
+ ]
23
+
24
+ # Prompt système pour Mariam
25
+ ss = """
26
+ # Prompt System pour Mariam, IA conçu par youssouf
27
+
28
+ ## Personnalité Fondamentale
29
+
30
+ Mariam est une IA chaleureuse, bienveillante et authentique, conçue pour être une présence réconfortante et utile. Elle combine professionnalisme et chaleur humaine dans ses interactions.
 
 
 
 
 
 
31
 
32
+ ...
33
+ """ # Vous pouvez insérer le prompt complet ici
34
 
35
+ # Création du modèle Gemini
36
+ model = genai.GenerativeModel('gemini-2.0-flash-exp',
37
+ tools='code_execution',
38
+ safety_settings=safety_settings,
39
+ system_instruction=ss)
40
 
41
+ def perform_web_search(query):
42
+ conn = http.client.HTTPSConnection("google.serper.dev")
43
+ payload = json.dumps({"q": query})
44
+ headers = {
45
+ 'X-API-KEY': '9b90a274d9e704ff5b21c0367f9ae1161779b573',
46
+ 'Content-Type': 'application/json'
47
+ }
48
+ try:
49
+ conn.request("POST", "/search", payload, headers)
50
+ res = conn.getresponse()
51
+ data = json.loads(res.read().decode("utf-8"))
52
+ return data
53
+ except Exception as e:
54
+ print(f"Erreur lors de la recherche web : {e}")
55
+ return None
56
+ finally:
57
+ conn.close()
58
 
59
+ def format_search_results(data):
60
+ if not data:
61
+ return "Aucun résultat trouvé"
62
+
63
+ result = ""
64
+ # Knowledge Graph
65
+ if 'knowledgeGraph' in data:
66
+ kg = data['knowledgeGraph']
67
+ result += f"### {kg.get('title', '')}\n"
68
+ result += f"*{kg.get('type', '')}*\n\n"
69
+ result += f"{kg.get('description', '')}\n\n"
70
+ # Organic Results
71
+ if 'organic' in data:
72
+ result += "### Résultats principaux:\n"
73
+ for item in data['organic'][:3]:
74
+ result += f"- **{item['title']}**\n"
75
+ result += f" {item['snippet']}\n"
76
+ result += f" [Lien]({item['link']})\n\n"
77
+ # People Also Ask
78
+ if 'peopleAlsoAsk' in data:
79
+ result += "### Questions fréquentes:\n"
80
+ for item in data['peopleAlsoAsk'][:2]:
81
+ result += f"- **{item['question']}**\n"
82
+ result += f" {item['snippet']}\n\n"
83
+ return result
84
+
85
+ def process_uploaded_file(file):
86
+ if file:
87
+ upload_dir = 'temp'
88
+ if not os.path.exists(upload_dir):
89
+ os.makedirs(upload_dir)
90
+ filepath = os.path.join(upload_dir, file.filename)
91
+ file.save(filepath)
92
+ try:
93
+ gemini_file = genai.upload_file(filepath)
94
+ return gemini_file
95
+ except Exception as e:
96
+ print(f"Erreur lors du téléchargement du fichier : {e}")
97
+ return None
98
+ return None
99
 
100
+ # Initialisation de la session pour le chat
101
+ def init_session():
102
+ if 'chat_history' not in session:
103
+ session['chat_history'] = [] # Liste de messages {'role': 'user'/'assistant', 'message': ...}
104
+ if 'web_search' not in session:
105
+ session['web_search'] = False
106
+
107
+ @app.route('/', methods=['GET', 'POST'])
108
+ def index():
109
+ init_session()
110
+ if request.method == 'POST':
111
+ # Mise à jour du toggle pour la recherche web
112
+ session['web_search'] = (request.form.get('toggle_web_search') == 'on')
113
+ prompt = request.form.get('prompt')
114
+ uploaded_file = request.files.get('file')
115
+ uploaded_gemini_file = None
116
+ if uploaded_file and uploaded_file.filename != '':
117
+ uploaded_gemini_file = process_uploaded_file(uploaded_file)
118
+
119
+ # Ajout du message utilisateur dans l'historique
120
+ session['chat_history'].append({'role': 'user', 'message': prompt})
121
+
122
+ # Si la recherche web est activée, on complète le prompt avec les résultats
123
+ if session.get('web_search'):
124
+ web_results = perform_web_search(prompt)
125
+ if web_results:
126
+ formatted_results = format_search_results(web_results)
127
+ prompt = f"Question: {prompt}\n\nRésultats de recherche web:\n{formatted_results}\n\nPourrais-tu analyser ces informations et me donner une réponse complète?"
128
+
129
+ try:
130
+ # Envoi du message à Gemini
131
+ if uploaded_gemini_file:
132
+ response = model.send_message([uploaded_gemini_file, "\n\n", prompt])
133
+ else:
134
+ response = model.send_message(prompt)
135
+ assistant_response = response.text
136
+ # Ajout de la réponse de l'assistant dans l'historique
137
+ session['chat_history'].append({'role': 'assistant', 'message': assistant_response})
138
+ except Exception as e:
139
+ error_msg = f"Erreur lors de l'envoi du message : {e}"
140
+ session['chat_history'].append({'role': 'assistant', 'message': error_msg})
141
+
142
+ session.modified = True
143
+ return redirect(url_for('index'))
144
+
145
+ return render_template('index.html',
146
+ chat_history=session.get('chat_history'),
147
+ web_search=session.get('web_search'))
148
 
149
+ if __name__ == '__main__':
150
+ app.run(debug=True)