File size: 5,265 Bytes
b4e5268
e241d15
8f64959
93afb85
55a8b20
dd507bb
4f4aca6
b4e5268
1fe0c9e
b321ba9
6a7d03a
1fe0c9e
 
6a7d03a
47025c6
 
 
 
 
 
 
 
 
 
d5caf79
 
b4e5268
b956157
1fe0c9e
b4e5268
 
47025c6
 
b4e5268
1fe0c9e
b4e5268
 
 
 
 
 
47025c6
1fe0c9e
b4e5268
0000cad
4f4aca6
47025c6
 
 
 
 
 
 
 
b4e5268
1fe0c9e
b4e5268
 
 
 
47025c6
b4e5268
1fe0c9e
b4e5268
47025c6
b4e5268
 
 
 
6812dc5
93afb85
ab55f29
 
647afad
ab55f29
4aa3f53
2b8b939
5d2d937
2b8b939
 
2c2fdad
2b8b939
 
2c2fdad
1fe0c9e
3e67ebc
417bcab
1fe0c9e
e575254
 
 
68be054
 
 
3e67ebc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68be054
3e67ebc
 
 
 
 
e575254
 
 
68be054
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
import os
import psycopg2
import streamlit as st
from datetime import datetime
from langchain_community.vectorstores import FAISS
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_huggingface import HuggingFaceEndpoint
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain, RetrievalQA
from huggingface_hub import login

# Login to Hugging Face
login(token=st.secrets["HF_TOKEN"])

# Load FAISS index and ensure it only happens once
if 'db' not in st.session_state:
    st.session_state.db = FAISS.load_local(
        "faiss_index", 
        HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L12-v2'),
        allow_dangerous_deserialization=True
    )

# Use session state for retriever
retriever = st.session_state.db.as_retriever(
    search_type="mmr",
    search_kwargs={'k': 1}
)

# Define prompt template
prompt_template = """
### [INST]
Instruction: You are a Q&A assistant. Your goal is to answer questions as accurately as possible based on the instructions and context provided without using prior knowledge. You answer in FRENCH
Analyse carefully the context and provide a direct answer based on the context. If the user said Bonjour or Hello your only answer will be Hi! comment puis-je vous aider?
Answer in french only

{context}
Vous devez répondre aux questions en français.
### QUESTION:
{question}
[/INST]
Answer in french only
Vous devez répondre aux questions en français.
"""

repo_id = "mistralai/Mistral-7B-Instruct-v0.3"

# Load the model only once
if 'mistral_llm' not in st.session_state:
    st.session_state.mistral_llm = HuggingFaceEndpoint(
        repo_id=repo_id, 
        max_length=2048, 
        temperature=0.05, 
        huggingfacehub_api_token=st.secrets["HF_TOKEN"]
    )

# Create prompt and LLM chain
prompt = PromptTemplate(
    input_variables=["question"],
    template=prompt_template,
)
llm_chain = LLMChain(llm=st.session_state.mistral_llm, prompt=prompt)

# Create QA chain
qa = RetrievalQA.from_chain_type(
    llm=st.session_state.mistral_llm,
    chain_type="stuff",
    retriever=retriever,
    chain_type_kwargs={"prompt": prompt},
)



def chatbot_response(user_input):
    response = qa.run(user_input)
    return response

# Create columns for logos
col1, col2, col3 = st.columns([2, 3, 2])

with col1:
    st.image("Design 3_22.png", width=150, use_column_width=True)

with col3:
    st.image("Altereo logo 2023 original - eau et territoires durables.png", width=150, use_column_width=True)



st.markdown('<h3 class="centered-text">🤖 AlteriaChat 🤖 </h3>', unsafe_allow_html=True)
st.markdown('<p class="centered-orange-text">"Votre Réponse à Chaque Défi Méthodologique "</p>', unsafe_allow_html=True)

# Input and button for user interaction
user_input = st.text_input("You:", "")
submit_button = st.button("Ask 📨")

import os
import streamlit as st
import psycopg2
from datetime import datetime

# Function to create a connection to PostgreSQL
def create_connection():
    return psycopg2.connect(
        host=os.getenv("DB_HOST"),
        database=os.getenv("DB_NAME"),
        user=os.getenv("DB_USER"),
        password=os.getenv("DB_PASSWORD"),
        port=os.getenv("DB_PORT")
    )

# Function to create the feedback table if it doesn't exist
def create_feedback_table(conn):
    cursor = conn.cursor()
    cursor.execute("""
        CREATE TABLE IF NOT EXISTS feedback (
            id SERIAL PRIMARY KEY,
            user_input TEXT NOT NULL,
            bot_response TEXT NOT NULL,
            rating INT CHECK (rating >= 1 AND rating <= 5),
            comment TEXT,
            timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP
        );
    """)
    conn.commit()
    cursor.close()

# Function to insert feedback into the database
def insert_feedback(conn, user_input, bot_response, rating, comment):
    cursor = conn.cursor()
    cursor.execute(
        "INSERT INTO feedback (user_input, bot_response, rating, comment, timestamp) VALUES (%s, %s, %s, %s, %s)",
        (user_input, bot_response, rating, comment, datetime.now())
    )
    conn.commit()
    cursor.close()

# Initialize connection and create the table if necessary
conn = create_connection()
create_feedback_table(conn)

# Streamlit app UI and logic
st.markdown("## Rate your experience")

# Create a star-based rating system using radio buttons
rating = st.radio(
    "Rating",
    options=[1, 2, 3, 4, 5],
    format_func=lambda x: "★" * x  # Display stars based on the rating
)

# Text area for leaving a comment
comment = st.text_area("Leave a comment")

# Display bot response and user input for context
st.markdown("### Your Question:")
st.write(user_input)
st.markdown("### Bot's Response:")
st.write(bot_response)

# Submit feedback
if st.button("Submit Feedback"):
    if rating and comment:
        insert_feedback(conn, user_input, bot_response, rating, comment)
        st.success("Thank you for your feedback!")
    else:
        st.warning("Please provide a rating and a comment.")

# Close the connection when done
conn.close()


# Motivational quote at the bottom
st.markdown("---")
st.markdown("La collaboration est la clé du succès. Chaque question trouve sa réponse, chaque défi devient une opportunité.")