Spaces:
Sleeping
Sleeping
import os | |
import streamlit as st | |
import pandas as pd | |
from langchain_community.vectorstores import FAISS | |
from langchain_community.embeddings import HuggingFaceEmbeddings | |
from langchain_huggingface import HuggingFaceEndpoint | |
from langchain.prompts import PromptTemplate | |
from langchain.chains import LLMChain | |
from huggingface_hub import login | |
from langchain_community.document_loaders import TextLoader | |
from langchain_text_splitters import CharacterTextSplitter | |
from langchain_community.document_loaders import PyPDFLoader | |
from langchain.chains import RetrievalQA | |
# Authenticate with Hugging Face | |
login(token=st.secrets["HF_TOKEN"]) | |
# Load FAISS index | |
db = FAISS.load_local("faiss_index", HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L12-v2'), allow_dangerous_deserialization=True) | |
# Set up retriever | |
retriever = db.as_retriever(search_type="mmr", search_kwargs={'k': 1}) | |
# Prompt template for the LLM | |
prompt_template = """ | |
### [INST] | |
Instruction: You are a Q&A assistant. Your goal is to answer questions as accurately as possible based on the instructions and context provided without using prior knowledge. You answer in FRENCH. | |
Analyse carefully the context and provide a direct answer based on the context. If the user said Bonjour or Hello, your only answer will be Hi! comment puis-je vous aider? | |
Answer in French only. | |
{context} | |
Vous devez répondre aux questions en français. | |
### QUESTION: | |
{question} | |
[/INST] | |
Answer in French only. | |
Vous devez répondre aux questions en français. | |
""" | |
# Set up the LLM from Hugging Face | |
repo_id = "mistralai/Mistral-7B-Instruct-v0.3" | |
mistral_llm = HuggingFaceEndpoint( | |
repo_id=repo_id, max_length=2048, temperature=0.05, huggingfacehub_api_token=st.secrets["HF_TOKEN"] | |
) | |
# Create prompt from prompt template | |
prompt = PromptTemplate( | |
input_variables=["question"], | |
template=prompt_template, | |
) | |
# Create LLM chain | |
llm_chain = LLMChain(llm=mistral_llm, prompt=prompt) | |
# Set up RetrievalQA chain | |
retriever.search_kwargs = {'k': 1} | |
qa = RetrievalQA.from_chain_type( | |
llm=mistral_llm, | |
chain_type="stuff", | |
retriever=retriever, | |
chain_type_kwargs={"prompt": prompt}, | |
) | |
# Streamlit interface setup | |
st.set_page_config(page_title="Alter-IA Chat", page_icon="🤖") | |
# Function to handle user input and display chatbot response | |
def chatbot_response(user_input): | |
response = qa.run(user_input) | |
return response | |
# Function to save user feedback | |
def save_feedback(user_input, bot_response, rating, comment): | |
feedback = { | |
"user_input": user_input, | |
"bot_response": bot_response, | |
"rating": rating, | |
"comment": comment | |
} | |
# Check if the feedback file exists | |
feedback_file = "feedback.csv" | |
if os.path.exists(feedback_file): | |
# Load existing feedback and append new feedback | |
feedback_df = pd.read_csv(feedback_file) | |
feedback_df = feedback_df.append(feedback, ignore_index=True) | |
else: | |
# Create a new dataframe for the feedback | |
feedback_df = pd.DataFrame([feedback]) | |
# Save feedback to CSV | |
feedback_df.to_csv(feedback_file, index=False) | |
# Create columns for logos | |
col1, col2, col3 = st.columns([2, 3, 2]) | |
with col1: | |
st.image("Design 3_22.png", width=150, use_column_width=True) | |
with col3: | |
st.image("Altereo logo 2023 original - eau et territoires durables.png", width=150, use_column_width=True) | |
# Adding centered header and subtitle | |
st.markdown(""" | |
<style> | |
.centered-text { text-align: center; } | |
.centered-orange-text { text-align: center; color: darkorange; } | |
</style> | |
""", unsafe_allow_html=True) | |
st.markdown('<h3 class="centered-text">🤖 AlteriaChat 🤖 </h3>', unsafe_allow_html=True) | |
st.markdown('<p class="centered-orange-text">"Votre Réponse à Chaque Défi Méthodologique "</p>', unsafe_allow_html=True) | |
# Input and button for user interaction | |
user_input = st.text_input("You:", "") | |
submit_button = st.button("Ask 📨") | |
# Handle user input and display response | |
if submit_button and user_input.strip(): | |
bot_response = chatbot_response(user_input) | |
st.markdown("### Bot:") | |
st.text_area("", value=bot_response, height=300) | |
# Star rating system | |
st.markdown("### How would you rate the response?") | |
rating = st.slider("Rate from 1 star to 5 stars", min_value=1, max_value=5, value=3) | |
# Comment section | |
comment = st.text_area("Any comments or suggestions for improvement?", "") | |
# Save feedback when the user submits a rating and comment | |
if st.button("Submit Feedback"): | |
save_feedback(user_input, bot_response, rating, comment) | |
st.success("Thank you for your feedback!") | |
# Motivational quote at the bottom | |
st.markdown("---") | |
st.markdown("La collaboration est la clé du succès. Chaque question trouve sa réponse, chaque défi devient une opportunité.") | |
# Section for the developer to review feedback | |
if st.checkbox("Show Feedback (Developer Only)"): | |
if os.path.exists("feedback.csv"): | |
feedback_df = pd.read_csv("feedback.csv") | |
st.dataframe(feedback_df) | |
else: | |
st.warning("No feedback available yet.") | |