|
import os |
|
import streamlit as st |
|
import pickle |
|
import time |
|
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig |
|
from langchain.llms.base import LLM |
|
from langchain.chains import RetrievalQAWithSourcesChain |
|
from langchain.text_splitter import RecursiveCharacterTextSplitter |
|
from langchain.document_loaders import UnstructuredURLLoader |
|
from langchain.vectorstores import FAISS |
|
from langchain.embeddings import HuggingFaceEmbeddings |
|
from huggingface_hub import login |
|
|
|
|
|
login(os.getenv('HF_llama3chat8b')) |
|
|
|
class CustomHuggingFaceLLM(LLM): |
|
def __init__(self, model_name, temperature=0.7): |
|
|
|
|
|
quantization_config = BitsAndBytesConfig( |
|
load_in_8bit=True, |
|
llm_int8_enable_fp32_cpu_offload=True |
|
) |
|
|
|
self.model = AutoModelForCausalLM.from_pretrained(model_name, device_map="cpu", quantization_config=quantization_config) |
|
self.tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
self.temperature = temperature |
|
|
|
def _call(self, prompt, stop=None): |
|
input_ids = self.tokenizer.encode(prompt, return_tensors="pt") |
|
output = self.model.generate( |
|
input_ids, |
|
max_length=512, |
|
temperature=self.temperature, |
|
do_sample=True, |
|
top_p=0.95, |
|
top_k=3 |
|
) |
|
generated_text = self.tokenizer.decode(output[0], skip_special_tokens=True) |
|
return generated_text |
|
|
|
@property |
|
def _identifying_params(self): |
|
return {"model_name": self.model.config._name_or_path, "temperature": self.temperature} |
|
|
|
@property |
|
def _llm_type(self): |
|
return "custom_huggingface" |
|
|
|
|
|
|
|
main_directory = os.path.dirname(os.path.abspath(__file__)) |
|
|
|
st.title("Web Page search Bot: Research Tool π") |
|
st.sidebar.title("Article URLs") |
|
|
|
urls = [] |
|
for i in range(3): |
|
url = st.sidebar.text_input(f"URL {i+1}") |
|
urls.append(url) |
|
|
|
process_url_clicked = st.sidebar.button("Process URLs") |
|
file_path_faiss = "faiss_store.pkl" |
|
|
|
main_placeholder = st.empty() |
|
|
|
|
|
embedding_model = HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L6-v2') |
|
llm = CustomHuggingFaceLLM(model_name="meta-llama/Meta-Llama-3.1-8B", temperature=0.6) |
|
if process_url_clicked: |
|
|
|
loader = UnstructuredURLLoader(urls=urls) |
|
main_placeholder.text("Data Loading...Started...β
β
β
") |
|
data = loader.load() |
|
|
|
|
|
text_splitter = RecursiveCharacterTextSplitter( |
|
separators=['\n\n'], |
|
chunk_size=1000, |
|
chunk_overlap=100 |
|
) |
|
main_placeholder.text("Text Splitter...Started...β
β
β
") |
|
docs = text_splitter.split_documents(data) |
|
|
|
|
|
vectorstore_faiss = FAISS.from_documents(documents=docs,embedding=embedding_model) |
|
main_placeholder.text("Embedding Vector Started Building...β
β
β
") |
|
time.sleep(2) |
|
|
|
|
|
with open(file_path_faiss, "wb") as f: |
|
pickle.dump(vectorstore_faiss, f) |
|
|
|
query = main_placeholder.text_input("Question: ") |
|
if query: |
|
if os.path.exists(file_path_faiss): |
|
with open(file_path_faiss, "rb") as f: |
|
vectorstore = pickle.load(f) |
|
chain = RetrievalQAWithSourcesChain.from_llm(llm=llm, retriever=vectorstore.as_retriever(), verbose=True) |
|
result = chain({"question": query}, return_only_outputs=True) |
|
|
|
st.header("Answer") |
|
st.write(result["answer"]) |
|
|
|
|
|
sources = result.get("sources", "") |
|
if sources: |
|
st.subheader("Sources:") |
|
sources_list = sources.split("\n") |
|
for source in sources_list: |
|
st.write(source) |