import os import pickle from json import dumps, loads from typing import Any, List, Mapping, Optional import numpy as np import openai import pandas as pd import streamlit as st from dotenv import load_dotenv from huggingface_hub import HfFileSystem from langchain.llms.base import LLM from llama_index import ( Document, GPTVectorStoreIndex, LLMPredictor, PromptHelper, ServiceContext, SimpleDirectoryReader, StorageContext, load_index_from_storage, ) from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline # from utils.customLLM import CustomLLM load_dotenv() # openai.api_key = os.getenv("OPENAI_API_KEY") fs = HfFileSystem() # define prompt helper # set maximum input size CONTEXT_WINDOW = 2048 # set number of output tokens NUM_OUTPUT = 525 # set maximum chunk overlap CHUNK_OVERLAP_RATION = 0.2 @st.cache_resource def load_model(model_name: str): # llm_model_name = "bigscience/bloom-560m" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name, config="T5Config") pipe = pipeline( task="text-generation", model=model, tokenizer=tokenizer, # device=0, # GPU device number # max_length=512, do_sample=True, top_p=0.95, top_k=50, temperature=0.7, ) return pipe class CustomLLM(LLM): llm_model_name: str pipeline: Any def __init__(self, llm_model_name: str): super().__init__() self.llm_model_name = llm_model_name self.pipeline = load_model(mode_name=llm_model_name) def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: prompt_length = len(prompt) response = self.pipeline(prompt, max_new_tokens=525)[0]["generated_text"] # only return newly generated tokens return response[prompt_length:] @property def _identifying_params(self) -> Mapping[str, Any]: return {"name_of_model": self.llm_model_name} @property def _llm_type(self) -> str: return "custom" class LlamaCustom: def __init__(self, model_name: str) -> None: self.vector_index = self.initialize_index(model_name=model_name) @st.cache_resource def initialize_index(_self, model_name: str): index_name = model_name.split("/")[-1] file_path = f"./vectorStores/{index_name}" if os.path.exists(path=file_path): # rebuild storage context storage_context = StorageContext.from_defaults(persist_dir=file_path) # local load index access index = load_index_from_storage(storage_context) # huggingface repo load access # with fs.open(file_path, "r") as file: # index = pickle.loads(file.readlines()) return index else: # define llm prompt_helper = PromptHelper( context_window=CONTEXT_WINDOW, num_output=NUM_OUTPUT, chunk_overlap_ratio=CHUNK_OVERLAP_RATION, ) llm_predictor = LLMPredictor(llm=CustomLLM(llm_model_name=model_name)) service_context = ServiceContext.from_defaults( llm_predictor=llm_predictor, prompt_helper=prompt_helper ) # documents = prepare_data(r"./assets/regItems.json") documents = SimpleDirectoryReader(input_dir="./assets/pdf").load_data() index = GPTVectorStoreIndex.from_documents( documents, service_context=service_context ) # local write access index.storage_context.persist(file_path) # huggingface repo write access # with fs.open(file_path, "w") as file: # file.write(pickle.dumps(index)) return index def get_response(self, query_str): print("query_str: ", query_str) query_engine = self.vector_index.as_query_engine() response = query_engine.query(query_str) return str(response)