Pratik Dwivedi
test new version
c856835
raw
history blame
2.56 kB
import streamlit as st
# from dotenv import load_dotenv
from langchain_community.document_loaders.pdf import PyPDFDirectoryLoader, PyPDFLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain_community.embeddings import HuggingFaceInstructEmbeddings
from langchain_community.vectorstores import FAISS
from langchain.chains import ConversationalRetrievalChain
from langchain.llms import HuggingFaceHub
from langchain.memory import ConversationBufferMemory
# load_dotenv()
def make_vectorstore(embeddings):
# use glob to find all the pdf files in the data folder in the base directory
loader = PyPDFDirectoryLoader("data")
# load the documents
documents = loader.load()
# split the documents into chunks of 1400 characters with 0 overlap
text_splitter = CharacterTextSplitter(chunk_size=1400, chunk_overlap=0)
# split the documents into chunks of 1400 characters with 0 overlap
texts = text_splitter.split_documents(documents)
# create a vector store from the documents
docsearch = FAISS.from_documents(texts, embeddings)
return docsearch
def get_conversation(vectorstore):
# create a memory object to store the conversation history
memory = ConversationBufferMemory(memory_key="chat_history",return_messages=True,)
# create a conversational retrieval chain
conversation_chain = ConversationalRetrievalChain.from_chain_type(
llm=HuggingFaceHub(repo_id="google/flan-t5-xxl", model_kwargs={"temperature":0.5, "max_length":512}),
chain_type="stuff",
retriever=vectorstore.as_retriever(),
memory=memory)
return conversation_chain
def get_response(conversation_chain, query):
# get the response
response = conversation_chain.run(query)
return response
def main():
st.title("BetterZila RAG Enabled LLM")
embeddings = HuggingFaceInstructEmbeddings(repo_id="google/t5-v1_1-xl")
vectorstore = make_vectorstore(embeddings)
conversation_chain = get_conversation(vectorstore)
queries = ["Can you give me an example from history where the enemy was crushed totally from the book?", "What's the point of making myself less accessible?", "Can you tell me the story of Queen Elizabeth I from this 48 laws of power book?"]
for query in queries:
st.subheader(f"Query: {query}")
response = get_response(conversation_chain, query)
st.write(query)
st.write(response["llm_response"])
st.success("Responses generated!")
if __name__ == "__main__":
main()