File size: 8,534 Bytes
8b0ae10
d754e91
 
87a0e23
329aa8d
fd15ecb
d754e91
9c78439
 
 
 
d754e91
40a8f4e
d754e91
570c043
d754e91
 
8b0ae10
 
 
 
 
 
 
 
90c428d
40a8f4e
90c428d
79d936d
 
87a0e23
a1771a7
 
 
 
 
 
 
 
 
 
 
9c78439
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8b0ae10
90c428d
49b832f
d754e91
9c78439
 
40a8f4e
d754e91
90c428d
a1771a7
 
9c78439
 
40a8f4e
d754e91
 
9c78439
 
d754e91
 
9c78439
 
40a8f4e
d754e91
 
9c78439
 
03b5741
 
9c78439
 
40a8f4e
d754e91
 
49b832f
90c428d
40a8f4e
90c428d
966795b
79d936d
 
 
90c428d
 
 
4b2400e
fd15ecb
03b5741
 
40a8f4e
03b5741
fd15ecb
 
03b5741
 
40a8f4e
03b5741
fd15ecb
 
 
90c428d
d754e91
90c428d
d754e91
 
90c428d
a1771a7
 
40a8f4e
87a0e23
 
79d936d
 
 
90c428d
 
 
 
 
 
 
 
 
 
 
 
 
61dd8eb
03b5741
40a8f4e
61dd8eb
 
 
 
90c428d
03b5741
 
329aa8d
 
 
 
 
42c2f04
329aa8d
 
03b5741
 
329aa8d
90c428d
 
 
 
 
 
 
8b0ae10
 
90c428d
d754e91
90c428d
 
 
d754e91
a1771a7
 
d754e91
 
90c428d
 
 
d754e91
 
 
 
90c428d
 
 
 
d754e91
87a0e23
fd15ecb
03b5741
 
fd15ecb
 
90c428d
40a8f4e
90c428d
 
 
 
 
 
 
 
 
 
7b14813
87a0e23
40a8f4e
03b5741
 
a1771a7
 
 
9279c83
 
 
211c86c
9279c83
 
 
 
 
87a0e23
90c428d
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
import importlib
import os
import sys
import gc
import json
import re

from transformers import (
    AutoModelForCausalLM, AutoModel,
    AutoTokenizer, LlamaTokenizer
)

from .config import Config
from .globals import Global
from .lib.get_device import get_device


def get_torch():
    return importlib.import_module('torch')


def get_peft_model_class():
    return importlib.import_module('peft').PeftModel


def get_new_base_model(base_model_name):
    if Config.ui_dev_mode:
        return
    if Global.is_train_starting or Global.is_training:
        raise Exception("Cannot load new base model while training.")

    if Global.new_base_model_that_is_ready_to_be_used:
        if Global.name_of_new_base_model_that_is_ready_to_be_used == base_model_name:
            model = Global.new_base_model_that_is_ready_to_be_used
            Global.new_base_model_that_is_ready_to_be_used = None
            Global.name_of_new_base_model_that_is_ready_to_be_used = None
            return model
        else:
            Global.new_base_model_that_is_ready_to_be_used = None
            Global.name_of_new_base_model_that_is_ready_to_be_used = None
            clear_cache()

    model_class = AutoModelForCausalLM
    from_tf = False
    force_download = False
    has_tried_force_download = False
    while True:
        try:
            model = _get_model_from_pretrained(
                model_class, base_model_name, from_tf=from_tf, force_download=force_download)
            break
        except Exception as e:
            if 'from_tf' in str(e):
                print(
                    f"Got error while loading model {base_model_name} with AutoModelForCausalLM: {e}.")
                print("Retrying with from_tf=True...")
                from_tf = True
                force_download = False
            elif model_class == AutoModelForCausalLM:
                print(
                    f"Got error while loading model {base_model_name} with AutoModelForCausalLM: {e}.")
                print("Retrying with AutoModel...")
                model_class = AutoModel
                force_download = False
            else:
                if has_tried_force_download:
                    raise e
                print(
                    f"Got error while loading model {base_model_name}: {e}.")
                print("Retrying with force_download=True...")
                model_class = AutoModelForCausalLM
                from_tf = False
                force_download = True
                has_tried_force_download = True

    tokenizer = get_tokenizer(base_model_name)

    if re.match("[^/]+/llama", base_model_name):
        model.config.pad_token_id = tokenizer.pad_token_id = 0
        model.config.bos_token_id = tokenizer.bos_token_id = 1
        model.config.eos_token_id = tokenizer.eos_token_id = 2

    return model


def _get_model_from_pretrained(model_class, model_name, from_tf=False, force_download=False):
    torch = get_torch()
    device = get_device()

    if device == "cuda":
        return model_class.from_pretrained(
            model_name,
            load_in_8bit=Config.load_8bit,
            torch_dtype=torch.float16,
            # device_map="auto",
            # ? https://github.com/tloen/alpaca-lora/issues/21
            device_map={'': 0},
            from_tf=from_tf,
            force_download=force_download,
            trust_remote_code=Config.trust_remote_code
        )
    elif device == "mps":
        return model_class.from_pretrained(
            model_name,
            device_map={"": device},
            torch_dtype=torch.float16,
            from_tf=from_tf,
            force_download=force_download,
            trust_remote_code=Config.trust_remote_code
        )
    else:
        return model_class.from_pretrained(
            model_name,
            device_map={"": device},
            low_cpu_mem_usage=True,
            from_tf=from_tf,
            force_download=force_download,
            trust_remote_code=Config.trust_remote_code
        )


def get_tokenizer(base_model_name):
    if Config.ui_dev_mode:
        return

    if Global.is_train_starting or Global.is_training:
        raise Exception("Cannot load new base model while training.")

    loaded_tokenizer = Global.loaded_tokenizers.get(base_model_name)
    if loaded_tokenizer:
        return loaded_tokenizer

    try:
        tokenizer = AutoTokenizer.from_pretrained(
            base_model_name,
            trust_remote_code=Config.trust_remote_code
        )
    except Exception as e:
        if 'LLaMATokenizer' in str(e):
            tokenizer = LlamaTokenizer.from_pretrained(
                base_model_name,
                trust_remote_code=Config.trust_remote_code
            )
        else:
            raise e

    Global.loaded_tokenizers.set(base_model_name, tokenizer)

    return tokenizer


def get_model(
        base_model_name,
        peft_model_name=None):
    if Config.ui_dev_mode:
        return

    if Global.is_train_starting or Global.is_training:
        raise Exception("Cannot load new base model while training.")

    if peft_model_name == "None":
        peft_model_name = None

    model_key = base_model_name
    if peft_model_name:
        model_key = f"{base_model_name}//{peft_model_name}"

    loaded_model = Global.loaded_models.get(model_key)
    if loaded_model:
        return loaded_model

    peft_model_name_or_path = peft_model_name

    if peft_model_name:
        lora_models_directory_path = os.path.join(
            Config.data_dir, "lora_models")
        possible_lora_model_path = os.path.join(
            lora_models_directory_path, peft_model_name)
        if os.path.isdir(possible_lora_model_path):
            peft_model_name_or_path = possible_lora_model_path

            possible_model_info_json_path = os.path.join(
                possible_lora_model_path, "info.json")
            if os.path.isfile(possible_model_info_json_path):
                try:
                    with open(possible_model_info_json_path, "r") as file:
                        json_data = json.load(file)
                        possible_hf_model_name = json_data.get("hf_model_name")
                        if possible_hf_model_name and json_data.get("load_from_hf"):
                            peft_model_name_or_path = possible_hf_model_name
                except Exception as e:
                    raise ValueError(
                        "Error reading model info from {possible_model_info_json_path}: {e}")

    Global.loaded_models.prepare_to_set()
    clear_cache()

    model = get_new_base_model(base_model_name)

    if peft_model_name:
        device = get_device()
        torch = get_torch()
        PeftModel = get_peft_model_class()

        if device == "cuda":
            model = PeftModel.from_pretrained(
                model,
                peft_model_name_or_path,
                torch_dtype=torch.float16,
                # ? https://github.com/tloen/alpaca-lora/issues/21
                device_map={'': 0},
            )
        elif device == "mps":
            model = PeftModel.from_pretrained(
                model,
                peft_model_name_or_path,
                device_map={"": device},
                torch_dtype=torch.float16,
            )
        else:
            model = PeftModel.from_pretrained(
                model,
                peft_model_name_or_path,
                device_map={"": device},
            )

    if re.match("[^/]+/llama", base_model_name):
        model.config.pad_token_id = get_tokenizer(
            base_model_name).pad_token_id = 0
        model.config.bos_token_id = 1
        model.config.eos_token_id = 2

    if not Config.load_8bit:
        model.half()  # seems to fix bugs for some users.

    model.eval()
    if torch.__version__ >= "2" and sys.platform != "win32":
        model = torch.compile(model)

    Global.loaded_models.set(model_key, model)
    clear_cache()

    return model


def prepare_base_model(base_model_name=Config.default_base_model_name):
    Global.new_base_model_that_is_ready_to_be_used = get_new_base_model(
        base_model_name)
    Global.name_of_new_base_model_that_is_ready_to_be_used = base_model_name


def clear_cache():
    gc.collect()

    torch = get_torch()
    # if not shared.args.cpu: # will not be running on CPUs anyway
    with torch.no_grad():
        torch.cuda.empty_cache()


def unload_models():
    Global.loaded_models.clear()
    Global.loaded_tokenizers.clear()
    clear_cache()