Pratik Dwivedi commited on
Commit
3cf26ac
·
1 Parent(s): 80effc2
Files changed (1) hide show
  1. app.py +2 -6
app.py CHANGED
@@ -1,6 +1,5 @@
1
  import streamlit as st
2
- # from dotenv import load_dotenv
3
- from langchain_community.document_loaders.pdf import PyPDFDirectoryLoader, PyPDFLoader
4
  from langchain.text_splitter import CharacterTextSplitter
5
  from langchain_community.embeddings import HuggingFaceInstructEmbeddings
6
  from langchain_community.vectorstores import FAISS
@@ -8,8 +7,6 @@ from langchain.chains import ConversationalRetrievalChain
8
  from langchain.llms import HuggingFaceHub
9
  from langchain.memory import ConversationBufferMemory
10
 
11
- # load_dotenv()
12
-
13
  def make_vectorstore(embeddings):
14
  # use glob to find all the pdf files in the data folder in the base directory
15
  loader = PyPDFDirectoryLoader("data")
@@ -33,9 +30,8 @@ def get_conversation(vectorstore):
33
  # create a memory object to store the conversation history
34
  memory = ConversationBufferMemory(memory_key="chat_history",return_messages=True,)
35
 
36
- # create a conversational retrieval chain
37
  conversation_chain = ConversationalRetrievalChain.from_chain_type(
38
- llm=HuggingFaceHub(repo_id="google/flan-t5-xxl", model_kwargs={"temperature":0.5, "max_length":512}),
39
  chain_type="stuff",
40
  retriever=vectorstore.as_retriever(),
41
  memory=memory)
 
1
  import streamlit as st
2
+ from langchain_community.document_loaders.pdf import PyPDFDirectoryLoader
 
3
  from langchain.text_splitter import CharacterTextSplitter
4
  from langchain_community.embeddings import HuggingFaceInstructEmbeddings
5
  from langchain_community.vectorstores import FAISS
 
7
  from langchain.llms import HuggingFaceHub
8
  from langchain.memory import ConversationBufferMemory
9
 
 
 
10
  def make_vectorstore(embeddings):
11
  # use glob to find all the pdf files in the data folder in the base directory
12
  loader = PyPDFDirectoryLoader("data")
 
30
  # create a memory object to store the conversation history
31
  memory = ConversationBufferMemory(memory_key="chat_history",return_messages=True,)
32
 
 
33
  conversation_chain = ConversationalRetrievalChain.from_chain_type(
34
+ llm=HuggingFaceHub(repo_id="google/flan-t5-xxl", model_kwargs={"temperature":0.5, "max_length":512}),
35
  chain_type="stuff",
36
  retriever=vectorstore.as_retriever(),
37
  memory=memory)