Programmes commited on
Commit
83054c0
·
verified ·
1 Parent(s): 4e367d7

Upload 7 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ faiss_index/faiss_index.faiss filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,12 +1,37 @@
1
  ---
2
- title: Edu Pilot
3
- emoji: 🐠
4
- colorFrom: yellow
5
- colorTo: red
 
6
  sdk: streamlit
7
- sdk_version: 1.44.0
8
  app_file: app.py
9
- pinned: false
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ pinned: false
3
+ title: Edu P
4
+ emoji: 🎓
5
+ colorFrom: purple
6
+ colorTo: blue
7
  sdk: streamlit
8
+ sdk_version: 1.31.1
9
  app_file: app.py
10
+
11
  ---
12
 
13
+
14
+
15
+ # Chatbot d'Orientation Scolaire (RAG + Mistral)
16
+
17
+ Ce projet est un chatbot basé sur le modèle open-source Mistral 7B, utilisant un index FAISS pour récupérer les documents pertinents à partir d'une base de données, et générer des réponses contextualisées.
18
+
19
+ ## Lancer l'application
20
+
21
+ 1. Installe les dépendances :
22
+
23
+ ```
24
+ pip install -r requirements.txt
25
+ ```
26
+
27
+ 2. Lance l'application Streamlit :
28
+
29
+ ```
30
+ streamlit run app.py
31
+ ```
32
+
33
+ ## Structure
34
+
35
+ - `app.py` : interface utilisateur
36
+ - `rag_utils.py` : logique RAG
37
+ - `faiss_index/` : index FAISS + documents associés
app.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from rag_utils import load_faiss_index, get_embedding_model, query_index, generate_answer, nettoyer_context
3
+
4
+ st.set_page_config(page_title="🎓 EduPilot", page_icon="🧠")
5
+ st.title("🎓 EduPilot ")
6
+
7
+ # Initialiser la mémoire de session
8
+ if "chat_history" not in st.session_state:
9
+ st.session_state.chat_history = []
10
+
11
+ # Chargement des données et du modèle d'embedding
12
+ index, documents = load_faiss_index()
13
+ model_embed = get_embedding_model()
14
+
15
+ # Entrée utilisateur
16
+ user_input = st.text_input("Pose ta question ici :")
17
+
18
+ if user_input:
19
+ st.session_state.chat_history.append(f"Utilisateur : {user_input}")
20
+
21
+ # Recherche des documents
22
+ top_docs = query_index(user_input, index, documents, model_embed)
23
+ context = nettoyer_context("\n".join(top_docs))
24
+
25
+ # Ajouter les 6 derniers échanges comme contexte
26
+ history = "\n".join(st.session_state.chat_history[-6:])
27
+ full_prompt = f"{history}\n\nContexte :\n{context}"
28
+
29
+ # Génération de la réponse
30
+ response = generate_answer(user_input, full_prompt)
31
+ st.session_state.chat_history.append(f"Chatbot : {response}")
32
+
33
+ # Affichage
34
+ st.markdown("### ✨ Réponse du chatbot :")
35
+ st.write(response)
36
+
37
+ with st.expander("🧠 Historique de la conversation"):
38
+ for msg in st.session_state.chat_history:
39
+ st.write(msg)
40
+ st.markdown("---")
41
+ st.caption("🔹 Développé avec ❤️ par EduPilot")
faiss_index/documents.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4cc6100b51468166d2e0b5e0ca119f239e648cb5d539dd256dc886ef39f45f46
3
+ size 36366182
faiss_index/faiss_index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43efadcf8c063cedf2414c75df1dd801404c0ba263b0e35de8f21c25436d0694
3
+ size 165167661
gitattributes ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ faiss_index/faiss_index.faiss filter=lfs diff=lfs merge=lfs -text
rag_utils.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import faiss
2
+ import pickle
3
+ import numpy as np
4
+ import re
5
+ from sentence_transformers import SentenceTransformer
6
+ from huggingface_hub import hf_hub_download
7
+ from llama_cpp import Llama
8
+
9
+ def load_faiss_index(index_path="faiss_index/faiss_index.faiss", doc_path="faiss_index/documents.pkl"):
10
+ index = faiss.read_index(index_path)
11
+ with open(doc_path, "rb") as f:
12
+ documents = pickle.load(f)
13
+ return index, documents
14
+
15
+ def get_embedding_model():
16
+ return SentenceTransformer("sentence-transformers/multi-qa-MiniLM-L6-cos-v1")
17
+
18
+ def query_index(question, index, documents, model, k=3):
19
+ question_embedding = model.encode([question])
20
+ _, indices = index.search(np.array(question_embedding).astype("float32"), k)
21
+ return [documents[i] for i in indices[0]]
22
+
23
+ def nettoyer_context(context):
24
+ context = re.sub(r"\[\'(.*?)\'\]", r"\1", context)
25
+ context = context.replace("None", "")
26
+ return context
27
+
28
+ def generate_answer(question, context):
29
+ model_file = hf_hub_download(
30
+ repo_id="TheBloke/Mistral-7B-Instruct-v0.1-GGUF",
31
+ filename="mistral-7b-instruct-v0.1.Q4_K_M.gguf"
32
+ )
33
+
34
+ llm = Llama(
35
+ model_path=model_file,
36
+ n_ctx=2048,
37
+ n_threads=6,
38
+ verbose=False
39
+ )
40
+
41
+ prompt = f"""Voici des informations sur des établissements et formations :
42
+
43
+ {context}
44
+
45
+ Formule ta réponse comme un conseiller d’orientation bienveillant, de manière fluide et naturelle, sans énumérations brutes.
46
+
47
+ Question : {question}
48
+ Réponse :
49
+ """
50
+
51
+ output = llm(prompt, max_tokens=256, stop=["</s>"])
52
+ return output["choices"][0]["text"].strip()
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ streamlit
2
+ faiss-cpu
3
+ sentence-transformers
4
+ llama-cpp-python
5
+ huggingface_hub