File size: 4,446 Bytes
b4e5268
1fe0c9e
8f64959
55a8b20
dd507bb
4f4aca6
b4e5268
1fe0c9e
397c421
6a7d03a
1fe0c9e
 
6a7d03a
1fe0c9e
 
 
 
 
9d68da3
1fe0c9e
b956157
d5caf79
 
b4e5268
b956157
1fe0c9e
b4e5268
 
 
1fe0c9e
b4e5268
1fe0c9e
b4e5268
 
 
 
 
 
 
1fe0c9e
b4e5268
0000cad
4f4aca6
1fe0c9e
4f4aca6
4595535
4f4aca6
b4e5268
1fe0c9e
b4e5268
 
 
 
4f4aca6
b4e5268
1fe0c9e
b4e5268
 
 
 
 
 
6812dc5
1fe0c9e
0b0e73b
ab55f29
 
 
647afad
ab55f29
 
1fe0c9e
 
 
 
 
 
 
 
 
 
647afad
2b8b939
5d2d937
2b8b939
 
1fe0c9e
2b8b939
 
1fe0c9e
 
 
598d787
 
 
 
 
ffdd294
417bcab
ffdd294
417bcab
 
 
 
1fe0c9e
 
0b0e73b
1fe0c9e
2b8b939
ab55f29
202c93a
ab55f29
 
 
 
2651861
647afad
1fe0c9e
 
 
 
 
 
 
 
 
 
 
 
 
ab55f29
2651861
 
0f28a78
 
647afad
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
import os
import csv
import streamlit as st
from langchain_community.vectorstores import FAISS
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_huggingface import HuggingFaceEndpoint
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain, RetrievalQA
from huggingface_hub import login

# Login to Hugging Face
login(token=st.secrets["HF_TOKEN"])

# Load FAISS index
db = FAISS.load_local(
    "faiss_index", HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L12-v2'),
    allow_dangerous_deserialization=True
)

# Create retriever
retriever = db.as_retriever(
    search_type="mmr",
    search_kwargs={'k': 1}
)

# Define prompt template
prompt_template = """
### [INST]
Instruction: You are a Q&A assistant. Your goal is to answer questions as accurately as possible based on the instructions and context provided without using prior knowledge.You answer in FRENCH
Analyse carefully the context and provide a direct answer based on the context. If the user said Bonjour or Hello your only answer will be  Hi! comment puis-je vous aider?
Answer in french only

{context}
Vous devez répondre aux questions en français.
### QUESTION:
{question}
[/INST]
Answer in french only
 Vous devez répondre aux questions en français.
"""

repo_id = "mistralai/Mistral-7B-Instruct-v0.3"

# Create LLM model
mistral_llm = HuggingFaceEndpoint(
    repo_id=repo_id, max_length=2048, temperature=0.05, huggingfacehub_api_token=st.secrets["HF_TOKEN"]
)

# Create prompt and LLM chain
prompt = PromptTemplate(
    input_variables=["question"],
    template=prompt_template,
)
llm_chain = LLMChain(llm=mistral_llm, prompt=prompt)

# Create QA chain
qa = RetrievalQA.from_chain_type(
    llm=mistral_llm,
    chain_type="stuff",
    retriever=retriever,
    chain_type_kwargs={"prompt": prompt},
)

# Streamlit UI setup
st.set_page_config(page_title="Alter-IA Chat", page_icon="🤖")

# Define function to handle user input and display chatbot response
def chatbot_response(user_input):
    response = qa.run(user_input)
    return response

# Define function to save feedback to CSV
def save_feedback(question, response, rating, comment):
    filename = 'feedback.csv'
    file_exists = os.path.isfile(filename)
    with open(filename, 'a', newline='', encoding='utf-8') as csvfile:
        fieldnames = ['question', 'response', 'rating', 'comment']
        writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
        if not file_exists:
            writer.writeheader()
        writer.writerow({'question': question, 'response': response, 'rating': rating, 'comment': comment})

# Create columns for logos
col1, col2, col3 = st.columns([2, 3, 2])

with col1:
    st.image("Design 3_22.png", width=150, use_column_width=True)

with col3:
    st.image("Altereo logo 2023 original - eau et territoires durables.png", width=150, use_column_width=True)

# Add CSS for styling
st.markdown("""
    <style>
    .centered-text {
        text-align: center;
    }
    .centered-orange-text {
        text-align: center;
        color: darkorange;
    }
    </style>
    """, unsafe_allow_html=True)

# Center and color text
st.markdown('<h3 class="centered-text">🤖 AlteriaChat 🤖 </h3>', unsafe_allow_html=True)
st.markdown('<p class="centered-orange-text">"Votre Réponse à Chaque Défi Méthodologique "</p>', unsafe_allow_html=True)

# Input and button for user interaction
user_input = st.text_input("You:", "")
submit_button = st.button("Ask 📨")

if submit_button:
    if user_input.strip() != "":
        bot_response = chatbot_response(user_input)
        st.markdown("### Bot:")
        st.text_area("", value=bot_response, height=600)

        # Feedback Section
        st.markdown("### Évaluation de la réponse")
        rating = st.slider("Rating (1 to 5)", 1, 5, 3)
        comment = st.text_area("Your comment:", "")
        
        if st.button("Submit Feedback"):
            if comment.strip() != "":
                save_feedback(user_input, bot_response, rating, comment)
                st.success("Thank you for your feedback!")
            else:
                st.warning("⚠️ Please enter a comment.")

    else:
        st.warning("⚠️ Please enter a message.")

# Motivational quote at the bottom
st.markdown("---")
st.markdown("La collaboration est la clé du succès. Chaque question trouve sa réponse, chaque défi devient une opportunité.")