temp / Capstone_v2 /app4.py
NEXAS's picture
Upload 15 files
3d981be verified
import os
from typing import List
import streamlit as st
from langchain_groq import ChatGroq
from langchain.prompts import PromptTemplate
from langchain_community.vectorstores import Qdrant
from langchain_community.embeddings.fastembed import FastEmbedEmbeddings
from qdrant_client import QdrantClient
from langchain_community.chat_models import ChatOllama
import chainlit as cl
from langchain.chains import RetrievalQA
from dotenv import load_dotenv
# Load environment variables
load_dotenv()
groq_api_key = os.getenv("GROQ_API_KEY")
qdrant_url = os.getenv("QDRANT_URL")
qdrant_api_key = os.getenv("QDRANT_API_KEY")
# Function to set custom prompt
def set_custom_prompt():
custom_prompt_template = """Use the following pieces of information to answer the user's question.
If you don't know the answer, just say that you don't know, don't try to make up an answer.
Context: {context}
Question: {question}
Only return the helpful answer below and nothing else.
Helpful answer:
"""
prompt = PromptTemplate(template=custom_prompt_template,
input_variables=['context', 'question'])
return prompt
# Function to initialize QA bot
def qa_bot():
# Initialize components
embeddings = FastEmbedEmbeddings()
client = QdrantClient(api_key=qdrant_api_key, url=qdrant_url)
vectorstore = Qdrant(client=client, embeddings=embeddings, collection_name="rag")
chat_model = ChatGroq(temperature=0, model_name="mixtral-8x7b-32768")
qa_prompt = set_custom_prompt()
# Build QA chain
qa_chain = RetrievalQA.from_chain_type(
llm=chat_model,
chain_type="stuff",
retriever=vectorstore.as_retriever(search_kwargs={'k': 2}),
return_source_documents=True,
chain_type_kwargs={'prompt': qa_prompt}
)
return qa_chain
# Main function to run Streamlit app
def main():
st.title("Chat With Documents")
st.write("Welcome to Chat With Documents using Llamaparse, LangChain, Qdrant, and models from Groq.")
# Initialize QA bot
chain = qa_bot()
# Start chat
user_input = st.text_input("You:", "")
if st.button("Send"):
# Process user input
res = chain.acall(user_input)
answer = res["result"]
source_documents = res["source_documents"]
# Display answer and source documents
st.write("Bot:", answer)
if source_documents:
st.write("Source Documents:")
for source_doc in source_documents:
st.write(source_doc.page_content)
if __name__ == "__main__":
main()