Spaces:
Sleeping
Sleeping
File size: 5,886 Bytes
b4e5268 1fe0c9e 8f64959 55a8b20 dd507bb 4f4aca6 b4e5268 1fe0c9e b321ba9 6a7d03a 1fe0c9e 6a7d03a 47025c6 d5caf79 b4e5268 b956157 1fe0c9e b4e5268 47025c6 b4e5268 1fe0c9e b4e5268 47025c6 1fe0c9e b4e5268 0000cad 4f4aca6 47025c6 b4e5268 1fe0c9e b4e5268 47025c6 b4e5268 1fe0c9e b4e5268 47025c6 b4e5268 6812dc5 68be054 0b0e73b ab55f29 647afad ab55f29 4aa3f53 2b8b939 5d2d937 2b8b939 68be054 2b8b939 68be054 1fe0c9e 68be054 598d787 ffdd294 417bcab ffdd294 417bcab 68be054 417bcab 1fe0c9e e575254 68be054 e575254 68be054 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 |
import os
import csv
import streamlit as st
from langchain_community.vectorstores import FAISS
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_huggingface import HuggingFaceEndpoint
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain, RetrievalQA
from huggingface_hub import login
# Login to Hugging Face
login(token=st.secrets["HF_TOKEN"])
# Load FAISS index and ensure it only happens once
if 'db' not in st.session_state:
st.session_state.db = FAISS.load_local(
"faiss_index",
HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L12-v2'),
allow_dangerous_deserialization=True
)
# Use session state for retriever
retriever = st.session_state.db.as_retriever(
search_type="mmr",
search_kwargs={'k': 1}
)
# Define prompt template
prompt_template = """
### [INST]
Instruction: You are a Q&A assistant. Your goal is to answer questions as accurately as possible based on the instructions and context provided without using prior knowledge. You answer in FRENCH
Analyse carefully the context and provide a direct answer based on the context. If the user said Bonjour or Hello your only answer will be Hi! comment puis-je vous aider?
Answer in french only
{context}
Vous devez répondre aux questions en français.
### QUESTION:
{question}
[/INST]
Answer in french only
Vous devez répondre aux questions en français.
"""
repo_id = "mistralai/Mistral-7B-Instruct-v0.3"
# Load the model only once
if 'mistral_llm' not in st.session_state:
st.session_state.mistral_llm = HuggingFaceEndpoint(
repo_id=repo_id,
max_length=2048,
temperature=0.05,
huggingfacehub_api_token=st.secrets["HF_TOKEN"]
)
# Create prompt and LLM chain
prompt = PromptTemplate(
input_variables=["question"],
template=prompt_template,
)
llm_chain = LLMChain(llm=st.session_state.mistral_llm, prompt=prompt)
# Create QA chain
qa = RetrievalQA.from_chain_type(
llm=st.session_state.mistral_llm,
chain_type="stuff",
retriever=retriever,
chain_type_kwargs={"prompt": prompt},
)
from datetime import datetime
# Streamlit interface with improved aesthetics
st.set_page_config(page_title="Alter-IA Chat", page_icon="🤖")
# Define function to handle user input and display chatbot response
def chatbot_response(user_input):
response = qa.run(user_input)
return response
# Create columns for logos
col1, col2, col3 = st.columns([2, 3, 2])
with col1:
st.image("Design 3_22.png", width=150, use_column_width=True) # Adjust image path and size as needed
with col3:
st.image("Altereo logo 2023 original - eau et territoires durables.png", width=150, use_column_width=True) # Adjust image path and size as needed
# CSS for styling
st.markdown("""
<style>
.centered-text {
text-align: center;
}
.centered-orange-text {
text-align: center;
color: darkorange;
}
.star-rating {
display: flex;
flex-direction: row-reverse;
justify-content: center;
cursor: pointer;
}
.star-rating input[type="radio"] {
display: none;
}
.star-rating label {
font-size: 2em;
color: #ddd;
padding: 0 5px;
transition: color 0.3s;
}
.star-rating input[type="radio"]:checked ~ label {
color: gold;
}
.star-rating input[type="radio"]:hover ~ label {
color: gold;
}
</style>
""", unsafe_allow_html=True)
st.markdown('<h3 class="centered-text">🤖 AlteriaChat 🤖 </h3>', unsafe_allow_html=True)
st.markdown('<p class="centered-orange-text">"Votre Réponse à Chaque Défi Méthodologique "</p>', unsafe_allow_html=True)
# Input and button for user interaction
user_input = st.text_input("You:", "")
submit_button = st.button("Ask 📨")
if submit_button:
if user_input.strip() != "":
bot_response = chatbot_response(user_input)
st.markdown("### Bot:")
st.text_area("", value=bot_response, height=300)
# Add rating and comment section
st.markdown("---")
st.markdown("#### Rate the Response:")
# Custom star rating HTML
rating_html = """
<div class="star-rating">
<input type="radio" id="5-stars" name="rating" value="5"><label for="5-stars">★</label>
<input type="radio" id="4-stars" name="rating" value="4"><label for="4-stars">★</label>
<input type="radio" id="3-stars" name="rating" value="3" checked><label for="3-stars">★</label>
<input type="radio" id="2-stars" name="rating" value="2"><label for="2-stars">★</label>
<input type="radio" id="1-star" name="rating" value="1"><label for="1-star">★</label>
</div>
"""
st.markdown(rating_html, unsafe_allow_html=True)
# Get the selected rating via JavaScript
rating = st.text_input("Selected Rating:", value="3", key="rating_input", label_visibility="hidden")
comment = st.text_area("Your Comment:")
# Submit feedback
feedback_button = st.button("Submit Feedback")
if feedback_button:
if comment.strip() == "":
st.warning("⚠ Please provide a comment.")
else:
st.success("Thank you for your feedback!")
# Store feedback (you can replace this part with code to save to a database or file)
with open("feedback.txt", "a") as f:
f.write(f"{datetime.now()} | Rating: {rating} | Comment: {comment}\n")
else:
st.warning("⚠ Please enter a message.")
# Motivational quote at the bottom
st.markdown("---")
st.markdown("La collaboration est la clé du succès. Chaque question trouve sa réponse, chaque défi devient une opportunité.") |