Spaces:
Runtime error
Runtime error
File size: 5,236 Bytes
24f47ed 0c9339e 24f47ed 8e02412 0fe99dc 24f47ed 7a818f1 24f47ed 2c53f01 f23396e 905c717 24f47ed a51a087 24f47ed b32edee 24f47ed 07358c8 24f47ed |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 |
import streamlit as st
from streamlit_chat import message
from langchain.chains import ConversationalRetrievalChain
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.llms import CTransformers
from langchain.llms import Replicate
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import FAISS
from langchain.memory import ConversationBufferMemory
from langchain.document_loaders import PyPDFLoader
from langchain.document_loaders import TextLoader
from langchain.document_loaders import Docx2txtLoader
from transformers import AutoTokenizer, AutoModelForCausalLM
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
import os
from dotenv import load_dotenv
import tempfile
import torch
load_dotenv()
def initialize_session_state():
if 'history' not in st.session_state:
st.session_state['history'] = []
if 'generated' not in st.session_state:
st.session_state['generated'] = ["Hello! Ask me anything about π€"]
if 'past' not in st.session_state:
st.session_state['past'] = ["Hey! π"]
def conversation_chat(query, chain, history):
result = chain({"question": query, "chat_history": history})
history.append((query, result["answer"]))
return result["answer"]
def display_chat_history(chain):
reply_container = st.container()
container = st.container()
with container:
with st.form(key='my_form', clear_on_submit=True):
user_input = st.text_input("Question:", placeholder="Ask about your Documents", key='input')
submit_button = st.form_submit_button(label='Send')
if submit_button and user_input:
with st.spinner('Generating response...'):
output = conversation_chat(user_input, chain, st.session_state['history'])
st.session_state['past'].append(user_input)
st.session_state['generated'].append(output)
if st.session_state['generated']:
with reply_container:
for i in range(len(st.session_state['generated'])):
message(st.session_state["past"][i], is_user=True, key=str(i) + '_user', avatar_style="thumbs")
message(st.session_state["generated"][i], key=str(i), avatar_style="fun-emoji")
os.environ['HuggingFaceHub_API_Token']= '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'
tokenizer = AutoTokenizer.from_pretrained("NousResearch/Nous-Hermes-llama-2-7b")
def create_conversational_chain(vector_store):
load_dotenv()
# Create llm
llm = CTransformers(streaming=True,
model = AutoModelForCausalLM.from_pretrained("NousResearch/Nous-Hermes-llama-2-7b", device_map='auto',torch_dtype=torch.float16,load_in_4bit=True),
callbacks=[StreamingStdOutCallbackHandler()],
model_type="llama", config={'max_new_tokens': 500, 'temperature': 0.01})
# llm = Replicate(
# streaming = True,
# model = "NousResearch/Llama-2-7b-hf",
# callbacks=[StreamingStdOutCallbackHandler()],
# input = {"temperature": 0.01, "max_length" :500,"top_p":1})
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
chain = ConversationalRetrievalChain.from_llm(llm=llm, chain_type='stuff',
retriever=vector_store.as_retriever(search_kwargs={"k": 2}),
memory=memory)
return chain
def main():
load_dotenv()
# Initialize session state
initialize_session_state()
st.title("Multi-Docs ChatBot using llama-2-7b :books:")
# Initialize Streamlit
st.sidebar.title("Document Processing")
uploaded_files = st.sidebar.file_uploader("Upload files", accept_multiple_files=True)
if uploaded_files:
text = []
for file in uploaded_files:
file_extension = os.path.splitext(file.name)[1]
with tempfile.NamedTemporaryFile(delete=False) as temp_file:
temp_file.write(file.read())
temp_file_path = temp_file.name
loader = None
if file_extension == ".pdf":
loader = PyPDFLoader(temp_file_path)
elif file_extension == ".docx" or file_extension == ".doc":
loader = Docx2txtLoader(temp_file_path)
elif file_extension == ".txt":
loader = TextLoader(temp_file_path)
if loader:
text.extend(loader.load())
os.remove(temp_file_path)
text_splitter = CharacterTextSplitter(separator="\n", chunk_size=1000, chunk_overlap=100, length_function=len)
text_chunks = text_splitter.split_documents(text)
# Create embeddings
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2",
model_kwargs={'device': 'cpu'})
# Create vector store
vector_store = FAISS.from_documents(text_chunks,embeddings)
# Create the chain object
chain = create_conversational_chain(vector_store)
display_chat_history(chain)
if __name__ == "__main__":
main()
|