testing / app.py
mery22's picture
Update app.py
4aa3f53 verified
raw
history blame
6.92 kB
import os
import csv
import streamlit as st
from langchain_community.vectorstores import FAISS
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_huggingface import HuggingFaceEndpoint
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain, RetrievalQA
from huggingface_hub import login
# Login to Hugging Face
login(token=st.secrets["HF_TOKEN"])
# Load FAISS index and ensure it only happens once
if 'db' not in st.session_state:
st.session_state.db = FAISS.load_local(
"faiss_index",
HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L12-v2'),
allow_dangerous_deserialization=True
)
# Use session state for retriever
retriever = st.session_state.db.as_retriever(
search_type="mmr",
search_kwargs={'k': 1}
)
# Define prompt template
prompt_template = """
### [INST]
Instruction: You are a Q&A assistant. Your goal is to answer questions as accurately as possible based on the instructions and context provided without using prior knowledge. You answer in FRENCH
Analyse carefully the context and provide a direct answer based on the context. If the user said Bonjour or Hello your only answer will be Hi! comment puis-je vous aider?
Answer in french only
{context}
Vous devez répondre aux questions en français.
### QUESTION:
{question}
[/INST]
Answer in french only
Vous devez répondre aux questions en français.
"""
repo_id = "mistralai/Mistral-7B-Instruct-v0.3"
# Load the model only once
if 'mistral_llm' not in st.session_state:
st.session_state.mistral_llm = HuggingFaceEndpoint(
repo_id=repo_id,
max_length=2048,
temperature=0.05,
huggingfacehub_api_token=st.secrets["HF_TOKEN"]
)
# Create prompt and LLM chain
prompt = PromptTemplate(
input_variables=["question"],
template=prompt_template,
)
llm_chain = LLMChain(llm=st.session_state.mistral_llm, prompt=prompt)
# Create QA chain
qa = RetrievalQA.from_chain_type(
llm=st.session_state.mistral_llm,
chain_type="stuff",
retriever=retriever,
chain_type_kwargs={"prompt": prompt},
)
# Streamlit UI setup
st.set_page_config(page_title="Alter-IA Chat", page_icon="🤖")
# Define function to handle user input and display chatbot response
def chatbot_response(user_input):
response = qa.run(user_input)
return response
import os
import csv
import streamlit as st
from huggingface_hub import Repository, HfApi
# Initialize the repository
reposss_id = "mery22/testing" # Replace with your repo ID
reposss = Repository(local_dir=".", clone_from=reposss_id, token=st.secrets["HF_TOKEN"])
# Define function to save feedback to CSV
def save_feedback(question, response, rating, comment):
try:
filename = 'feedback.csv'
file_exists = os.path.isfile(filename)
# Open CSV file and append the new feedback
with open(filename, 'a', newline='', encoding='utf-8') as csvfile:
fieldnames = ['question', 'response', 'rating', 'comment']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
if not file_exists:
writer.writeheader()
writer.writerow({'question': question, 'response': response, 'rating': rating, 'comment': comment})
# Push the updated file to the Hugging Face repository
repo.push_to_hub(commit_message="Updated feedback")
st.success("Thank you for your feedback! It has been saved.")
except Exception as e:
st.error(f"Error saving feedback: {e}")
st.write(f"Exception: {e}")
# Example usage:
question = "What is the capital of France?"
response = "The capital of France is Paris."
rating = 5
comment = "Good response!"
# Save feedback and check the output in Streamlit
save_feedback(question, response, rating, comment)
st.success("Thank you for your feedback! It has been saved.")
except Exception as e:
st.error(f"Error saving feedback: {e}")
st.write(f"Exception: {e}") # Debugging: Print any exceptions
# Example usage:
question = "What is the capital of France?"
response = "The capital of France is Paris."
rating = 5
comment = "Good response!"
# Save feedback and check the output in Streamlit
save_feedback(question, response, rating, comment)
# Optionally, display the file content to confirm it was written correctly
if st.button("View Feedback File"):
try:
with open('/tmp/feedback.csv', 'r', encoding='utf-8') as csvfile:
st.text(csvfile.read())
except FileNotFoundError:
st.error("Feedback file not found.")
# Use session state to store user input, bot response, rating, and comment
if 'user_input' not in st.session_state:
st.session_state.user_input = ""
if 'bot_response' not in st.session_state:
st.session_state.bot_response = ""
if 'rating' not in st.session_state:
st.session_state.rating = 3 # Default rating
if 'comment' not in st.session_state:
st.session_state.comment = ""
# Create columns for logos
col1, col2, col3 = st.columns([2, 3, 2])
with col1:
st.image("Design 3_22.png", width=150, use_column_width=True)
with col3:
st.image("Altereo logo 2023 original - eau et territoires durables.png", width=150, use_column_width=True)
# Add CSS for styling
st.markdown("""
<style>
.centered-text {
text-align: center;
}
.centered-orange-text {
text-align: center;
color: darkorange;
}
</style>
""", unsafe_allow_html=True)
# Center and color text
st.markdown('<h3 class="centered-text">🤖 AlteriaChat 🤖 </h3>', unsafe_allow_html=True)
st.markdown('<p class="centered-orange-text">"Votre Réponse à Chaque Défi Méthodologique "</p>', unsafe_allow_html=True)
# Input and button for user interaction
st.session_state.user_input = st.text_input("You:", st.session_state.user_input)
if st.button("Ask 📨"):
if st.session_state.user_input.strip() != "":
st.session_state.bot_response = chatbot_response(st.session_state.user_input)
if st.session_state.bot_response:
st.markdown("### Bot:")
st.text_area("", value=st.session_state.bot_response, height=600)
# Feedback Section
st.markdown("### Évaluation de la réponse")
st.session_state.rating = st.slider("Rating (1 to 5)", 1, 5, st.session_state.rating)
st.session_state.comment = st.text_area("Your comment:", st.session_state.comment)
if st.button("Submit Feedback"):
if st.session_state.comment.strip() != "":
save_feedback(st.session_state.user_input, st.session_state.bot_response, st.session_state.rating, st.session_state.comment)
else:
st.warning("⚠️ Please enter a comment.")
# Motivational quote at the bottom
st.markdown("---")
st.markdown("La collaboration est la clé du succès. Chaque question trouve sa réponse, chaque défi devient une opportunité.")