File size: 5,389 Bytes
19f4fce
 
 
 
 
 
 
 
215cfd3
19f4fce
183c15c
 
19f4fce
 
 
 
 
 
 
 
 
 
183c15c
19f4fce
 
 
 
 
 
 
 
 
 
 
 
 
183c15c
19f4fce
991fc6b
 
069e494
991fc6b
069e494
 
991fc6b
 
 
 
 
 
 
 
 
 
 
 
 
 
19f4fce
 
e594eb9
 
 
 
 
71b7421
e594eb9
 
 
 
 
 
 
 
 
 
 
13c5bb4
e594eb9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19f4fce
e594eb9
19f4fce
 
e594eb9
 
19f4fce
e594eb9
 
13c5bb4
e594eb9
 
 
 
 
 
19f4fce
e594eb9
 
 
 
 
 
 
991fc6b
19f4fce
e594eb9
 
 
 
 
6f97473
e594eb9
 
 
 
 
6f6f650
13c5bb4
991fc6b
 
6f97473
215cfd3
183c15c
e594eb9
19f4fce
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e594eb9
19f4fce
 
 
 
 
 
 
 
 
 
 
 
e594eb9
 
 
 
19f4fce
e594eb9
215cfd3
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
import os
import pickle
from json import dumps, loads
from typing import Any, List, Mapping, Optional

import numpy as np
import openai
import pandas as pd
import streamlit as st
from dotenv import load_dotenv
from huggingface_hub import HfFileSystem
from langchain.llms.base import LLM
from llama_index import (
    Document,
    GPTVectorStoreIndex,
    LLMPredictor,
    PromptHelper,
    ServiceContext,
    SimpleDirectoryReader,
    StorageContext,
    load_index_from_storage,
)
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline

# from utils.customLLM import CustomLLM

load_dotenv()
# openai.api_key = os.getenv("OPENAI_API_KEY")
fs = HfFileSystem()

# define prompt helper
# set maximum input size
CONTEXT_WINDOW = 2048
# set number of output tokens
NUM_OUTPUT = 525
# set maximum chunk overlap
CHUNK_OVERLAP_RATION = 0.2


@st.cache_resource
def load_model(model_name: str):
    # llm_model_name = "bigscience/bloom-560m"
    tokenizer = AutoTokenizer.from_pretrained(model_name)
    model = AutoModelForCausalLM.from_pretrained(model_name, config="T5Config")

    pipe = pipeline(
        task="text-generation",
        model=model,
        tokenizer=tokenizer,
        # device=0, # GPU device number
        # max_length=512,
        do_sample=True,
        top_p=0.95,
        top_k=50,
        temperature=0.7,
    )

    return pipe


@st.cache_resource
def load_model(mode_name: str):
    # llm_model_name = "bigscience/bloom-560m"
    tokenizer = AutoTokenizer.from_pretrained(mode_name)
    model = AutoModelForCausalLM.from_pretrained(mode_name, config="T5Config")

    pipe = pipeline(
        task="text-generation",
        model=model,
        tokenizer=tokenizer,
        # device=0, # GPU device number
        # max_length=512,
        do_sample=True,
        top_p=0.95,
        top_k=50,
        temperature=0.7,
    )

    return pipe


class OurLLM(CustomLLM):
    def __init__(self, model_name: str, model_pipeline):
        self.model_name = model_name
        self.pipeline = model_pipeline

    @property
    def metadata(self) -> LLMMetadata:
        """Get LLM metadata."""
        return LLMMetadata(
            context_window=CONTEXT_WINDOW,
            num_output=NUM_OUTPUT,
            model_name=self.model_name,
        )

    def complete(self, prompt: str, **kwargs: Any) -> CompletionResponse:
        prompt_length = len(prompt)
        response = self.pipeline(prompt, max_new_tokens=NUM_OUTPUT)[0]["generated_text"]

        # only return newly generated tokens
        text = response[prompt_length:]
        return CompletionResponse(text=text)

    def stream_complete(self, prompt: str, **kwargs: Any) -> CompletionResponse:
        raise NotImplementedError()

    # def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
    #     prompt_length = len(prompt)
    #     response = self.pipeline(prompt, max_new_tokens=525)[0]["generated_text"]

    #     # only return newly generated tokens
    #     return response[prompt_length:]

    # @property
    # def _identifying_params(self) -> Mapping[str, Any]:
    #     return {"name_of_model": self.model_name}

    # @property
    # def _llm_type(self) -> str:
    #     return "custom"

class LlamaCustom:
    # define llm
    # llm_predictor = LLMPredictor(llm=OurLLM())
    # service_context = ServiceContext.from_defaults(
    #     llm_predictor=llm_predictor, prompt_helper=prompt_helper
    # )
    def __init__(self, model_name: str) -> None:
        pipe = load_model(mode_name=model_name)
        llm = OurLLM(model_name=model_name, model_pipeline=pipe)
        self.service_context = ServiceContext.from_defaults(
            llm=llm, prompt_helper=prompt_helper
        )
        self.vector_index = self.initialize_index(model_name=model_name)

    @st.cache_resource
    def initialize_index(_self, model_name: str):
        index_name = model_name.split("/")[-1]

        file_path = f"./vectorStores/{index_name}"

        if os.path.exists(path=file_path):
            # rebuild storage context
            storage_context = StorageContext.from_defaults(persist_dir=file_path)

            # local load index access
            index = load_index_from_storage(storage_context)

            # huggingface repo load access
            # with fs.open(file_path, "r") as file:
            #     index = pickle.loads(file.readlines())
            return index
        else:
            # documents = prepare_data(r"./assets/regItems.json")
            documents = SimpleDirectoryReader(input_dir="./assets/pdf").load_data()

            index = GPTVectorStoreIndex.from_documents(
                documents, service_context=self.service_context
            )

            # local write access
            index.storage_context.persist(file_path)

            # huggingface repo write access
            # with fs.open(file_path, "w") as file:
            #     file.write(pickle.dumps(index))
            return index

    def get_response(self, query_str):
        print("query_str: ", query_str)
        # query_engine = self.vector_index.as_query_engine()
        query_engine = self.vector_index.as_query_engine(
            text_qa_template=text_qa_template, refine_template=refine_template
        )
        response = query_engine.query(query_str)
        print("metadata: ", response.metadata)
        return str(response)