testing / app.py
mery22's picture
Update app.py
bc6d497 verified
raw
history blame
4.04 kB
import os
import streamlit as st
import pandas as pd
from langchain_community.vectorstores import FAISS
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_huggingface import HuggingFaceEndpoint
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from huggingface_hub import login
from langchain_community.document_loaders import TextLoader
from langchain_text_splitters import CharacterTextSplitter
from langchain_community.document_loaders import PyPDFLoader
from langchain.chains import RetrievalQA
# Authenticate with Hugging Face
login(token=st.secrets["HF_TOKEN"])
# Load FAISS index
db = FAISS.load_local("faiss_index", HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L12-v2'), allow_dangerous_deserialization=True)
# Set up retriever
retriever = db.as_retriever(search_type="mmr", search_kwargs={'k': 1})
# Prompt template for the LLM
prompt_template = """
### [INST]
Instruction: You are a Q&A assistant. Your goal is to answer questions as accurately as possible based on the instructions and context provided without using prior knowledge. You answer in FRENCH.
Analyse carefully the context and provide a direct answer based on the context. If the user said Bonjour or Hello, your only answer will be Hi! comment puis-je vous aider?
Answer in French only.
{context}
Vous devez répondre aux questions en français.
### QUESTION:
{question}
[/INST]
Answer in French only.
Vous devez répondre aux questions en français.
"""
# Set up the LLM from Hugging Face
repo_id = "mistralai/Mistral-7B-Instruct-v0.3"
mistral_llm = HuggingFaceEndpoint(
repo_id=repo_id, max_length=2048, temperature=0.05, huggingfacehub_api_token=st.secrets["HF_TOKEN"]
)
# Create prompt from prompt template
prompt = PromptTemplate(
input_variables=["question"],
template=prompt_template,
)
# Create LLM chain
llm_chain = LLMChain(llm=mistral_llm, prompt=prompt)
# Set up RetrievalQA chain
retriever.search_kwargs = {'k': 1}
qa = RetrievalQA.from_chain_type(
llm=mistral_llm,
chain_type="stuff",
retriever=retriever,
chain_type_kwargs={"prompt": prompt},
)
# Streamlit interface setup
st.set_page_config(page_title="Alter-IA Chat", page_icon="🤖")
# Function to handle user input and display chatbot response
def chatbot_response(user_input):
response = qa.run(user_input)
return response
import gspread
from oauth2client.service_account import ServiceAccountCredentials
import json
# Load Google service account credentials from Hugging Face secrets
GOOGLE_SERVICE_ACCOUNT_JSON = st.secrets["GOOGLE_SERVICE_ACCOUNT_JSON"]
# Google Sheets setup
scope = ["https://www.googleapis.com/auth/spreadsheets", "https://www.googleapis.com/auth/drive"]
service_account_info = json.loads(GOOGLE_SERVICE_ACCOUNT_JSON)
creds = ServiceAccountCredentials.from_json_keyfile_dict(service_account_info, scope)
client = gspread.authorize(creds)
sheet = client.open("users feedback").sheet1 # Replace with your Google Sheet name
# Function to save user feedback to Google Sheets
def save_feedback(user_input, bot_response, rating, comment):
feedback = [user_input, bot_response, rating, comment]
sheet.append_row(feedback)
# Streamlit app layout
st.markdown("<h3 style='text-align: center;'>🤖 Chatbot Feedback 🤖</h3>", unsafe_allow_html=True)
user_input = st.text_input("You:")
bot_response = "This is a bot response." # Replace this with your chatbot's response logic
st.markdown("### Rate the response:")
rating = st.selectbox("", [1, 2, 3, 4, 5])
st.markdown("### Leave a comment:")
comment = st.text_area("")
if st.button("Submit"):
if user_input.strip() and comment.strip():
save_feedback(user_input, bot_response, rating, comment)
st.success("Thank you for your feedback!")
else:
st.warning("Please provide both input and comment.")
st.markdown("---")
st.markdown("Collaboration is the key to success. Each question finds its answer, each challenge becomes an opportunity.")