File size: 8,043 Bytes
97f5048 9e8e242 3903451 24d11d4 35331ff 24d11d4 9e8e242 3903451 24d11d4 97f5048 9e8e242 3903451 24d11d4 35331ff 24d11d4 97f5048 948317d 97f5048 948317d 97f5048 79f96f0 97f5048 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 |
import streamlit as st
from dotenv import load_dotenv
from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter
from langchain.vectorstores import FAISS
from langchain.embeddings import HuggingFaceEmbeddings # General embeddings from HuggingFace models.
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationalRetrievalChain
from htmlTemplates import css, bot_template, user_template
from langchain.llms import LlamaCpp # For loading transformer models.
from langchain.document_loaders import PyPDFLoader, TextLoader, JSONLoader, CSVLoader
import tempfile # μμ νμΌμ μμ±νκΈ° μν λΌμ΄λΈλ¬λ¦¬μ
λλ€.
import os
from huggingface_hub import hf_hub_download # Hugging Face Hubμμ λͺ¨λΈμ λ€μ΄λ‘λνκΈ° μν ν¨μμ
λλ€.
# PDF λ¬Έμλ‘λΆν° ν
μ€νΈλ₯Ό μΆμΆνλ ν¨μμ
λλ€.
def get_pdf_text(pdf_docs):
temp_dir = tempfile.TemporaryDirectory() # μμ λλ ν 리λ₯Ό μμ±ν©λλ€.
temp_filepath = os.path.join(temp_dir.name, pdf_docs.name) # μμ νμΌ κ²½λ‘λ₯Ό μμ±ν©λλ€.
with open(temp_filepath, "wb") as f: # μμ νμΌμ λ°μ΄λ리 μ°κΈ° λͺ¨λλ‘ μ½λλ€.
f.write(pdf_docs.getvalue()) # PDF λ¬Έμμ λ΄μ©μ μμ νμΌμ μλλ€.
pdf_loader = PyPDFLoader(temp_filepath) # PyPDFLoaderλ₯Ό μ¬μ©ν΄ PDFλ₯Ό λ‘λν©λλ€.
pdf_doc = pdf_loader.load() # ν
μ€νΈλ₯Ό μΆμΆν©λλ€.
return pdf_doc # μΆμΆν ν
μ€νΈλ₯Ό λ°νν©λλ€.
# κ³Όμ
# μλ ν
μ€νΈ μΆμΆ ν¨μλ₯Ό μμ±
def get_text_file(text_docs):
temp_dir = tempfile.TemporaryDirectory() # μμ λλ ν 리λ₯Ό μμ±ν©λλ€.
temp_filepath = os.path.join(temp_dir.name, text_docs.name) # μμ νμΌ κ²½λ‘λ₯Ό μμ±ν©λλ€.
with open(temp_filepath, "wb") as f: # μμ νμΌμ λ°μ΄λ리 μ°κΈ° λͺ¨λλ‘ μ½λλ€.
f.write(text_docs.getvalue())
text_loader = TextLoader(temp_filepath)
text_doc = text_loader.load()
return text_doc
def get_csv_file(csv_docs):
temp_dir = tempfile.TemporaryDirectory() # μμ λλ ν 리λ₯Ό μμ±ν©λλ€.
temp_filepath = os.path.join(temp_dir.name, csv_docs.name) # μμ νμΌ κ²½λ‘λ₯Ό μμ±ν©λλ€.
with open(temp_filepath, "wb") as f: # μμ νμΌμ λ°μ΄λ리 μ°κΈ° λͺ¨λλ‘ μ½λλ€.
f.write(csv_docs.getvalue())
csv_loader = CSVLoader(temp_filepath)
csv_doc = csv_loader.load()
return csv_doc
def get_json_file(json_docs):
temp_dir = tempfile.TemporaryDirectory() # μμ λλ ν 리λ₯Ό μμ±ν©λλ€.
temp_filepath = os.path.join(temp_dir.name, json_docs.name) # μμ νμΌ κ²½λ‘λ₯Ό μμ±ν©λλ€.
with open(temp_filepath, "wb") as f: # μμ νμΌμ λ°μ΄λ리 μ°κΈ° λͺ¨λλ‘ μ½λλ€.
f.write(json_docs.getvalue())
json_loader = JSONLoader(
file_path='./example_data/facebook_chat.json',
jq_schema='.messages[].content',
text_content=False)
data = loader.load()
json_doc = json_loader.load()
return json_doc
# λ¬Έμλ€μ μ²λ¦¬νμ¬ ν
μ€νΈ μ²ν¬λ‘ λλλ ν¨μμ
λλ€.
def get_text_chunks(documents):
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1000, # μ²ν¬μ ν¬κΈ°λ₯Ό μ§μ ν©λλ€.
chunk_overlap=200, # μ²ν¬ μ¬μ΄μ μ€λ³΅μ μ§μ ν©λλ€.
length_function=len # ν
μ€νΈμ κΈΈμ΄λ₯Ό μΈ‘μ νλ ν¨μλ₯Ό μ§μ ν©λλ€.
)
documents = text_splitter.split_documents(documents) # λ¬Έμλ€μ μ²ν¬λ‘ λλλλ€.
return documents # λλ μ²ν¬λ₯Ό λ°νν©λλ€.
# ν
μ€νΈ μ²ν¬λ€λ‘λΆν° λ²‘ν° μ€ν μ΄λ₯Ό μμ±νλ ν¨μμ
λλ€.
def get_vectorstore(text_chunks):
# μνλ μλ² λ© λͺ¨λΈμ λ‘λν©λλ€.
embeddings = HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L12-v2',
model_kwargs={'device': 'cpu'}) # μλ² λ© λͺ¨λΈμ μ€μ ν©λλ€.
vectorstore = FAISS.from_documents(text_chunks, embeddings) # FAISS λ²‘ν° μ€ν μ΄λ₯Ό μμ±ν©λλ€.
return vectorstore # μμ±λ λ²‘ν° μ€ν μ΄λ₯Ό λ°νν©λλ€.
def get_conversation_chain(vectorstore):
if vectorstore is None:
return None
model_name_or_path = 'TheBloke/Llama-2-7B-chat-GGUF'
model_basename = 'llama-2-7b-chat.Q2_K.gguf'
model_path = hf_hub_download(repo_id=model_name_or_path, filename=model_basename)
llm = LlamaCpp(model_path=model_path,
n_ctx=4086,
input={"temperature": 0.75, "max_length": 2000, "top_p": 1},
verbose=True, )
# λν κΈ°λ‘μ μ μ₯νκΈ° μν λ©λͺ¨λ¦¬λ₯Ό μμ±ν©λλ€.
memory = ConversationBufferMemory(
memory_key='chat_history', return_messages=True)
# λν κ²μ 체μΈμ μμ±ν©λλ€.
retriever = vectorstore.as_retriever() if hasattr(vectorstore, 'as_retriever') else None
if retriever is None:
return None
conversation_chain = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=vectorstore.as_retriever(),
memory=memory
)
return conversation_chain # μμ±λ λν 체μΈμ λ°νν©λλ€.
# μ¬μ©μ μ
λ ₯μ μ²λ¦¬νλ ν¨μμ
λλ€.
def handle_userinput(user_question):
print('user_question => ', user_question)
# λν 체μΈμ μ¬μ©νμ¬ μ¬μ©μ μ§λ¬Έμ λν μλ΅μ μμ±ν©λλ€.
response = st.session_state.conversation({'question': user_question})
# λν κΈ°λ‘μ μ μ₯ν©λλ€.
st.session_state.chat_history = response['chat_history']
for i, message in enumerate(st.session_state.chat_history):
if i % 2 == 0:
st.write(user_template.replace(
"{{MSG}}", message.content), unsafe_allow_html=True)
else:
st.write(bot_template.replace(
"{{MSG}}", message.content), unsafe_allow_html=True)
def main():
load_dotenv()
st.set_page_config(page_title="Chat with multiple Files",
page_icon=":books:")
st.write(css, unsafe_allow_html=True)
if "conversation" not in st.session_state:
st.session_state.conversation = get_conversation_chain(None)
if "chat_history" not in st.session_state:
st.session_state.chat_history = None
st.header("Chat with multiple Files:")
user_question = st.text_input("Ask a question about your documents:")
if user_question:
handle_userinput(user_question)
with st.sidebar:
st.subheader("Your documents")
docs = st.file_uploader(
"Upload your PDFs here and click on 'Process'", accept_multiple_files=True)
if st.button("Process"):
with st.spinner("Processing"):
# get pdf text
doc_list = []
for file in docs:
print('file - type : ', file.type)
if file.type == 'text/plain':
# file is .txt
doc_list.extend(get_text_file(file))
elif file.type in ['application/octet-stream', 'application/pdf']:
# file is .pdf
doc_list.extend(get_pdf_text(file))
elif file.type == 'text/csv':
# file is .csv
doc_list.extend(get_csv_file(file))
elif file.type == 'application/json':
# file is .json
doc_list.extend(get_json_file(file))
# get the text chunks
text_chunks = get_text_chunks(doc_list)
# create vector store
vectorstore = get_vectorstore(text_chunks)
# create conversation chain
st.session_state.conversation = get_conversation_chain(
vectorstore)
if __name__ == '__main__':
main() |