Spaces:
Runtime error
Runtime error
Commit
·
87f6199
1
Parent(s):
1127261
Update app.py
Browse files
app.py
CHANGED
@@ -10,17 +10,18 @@ from transformers import (
|
|
10 |
logging,
|
11 |
)
|
12 |
|
|
|
|
|
|
|
13 |
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
|
14 |
tokenizer.pad_token = tokenizer.eos_token
|
15 |
tokenizer.padding_side = "right" # Fix weird overflow issue with fp16 training
|
16 |
|
17 |
-
# Specify the local path to the downloaded model file
|
18 |
-
model_path = "pytorch_model-00001-of-00002.bin"
|
19 |
|
20 |
# Initialize the GPT4All model
|
21 |
# model = GPT4All(model_path
|
22 |
model = AutoModelForCausalLM.from_pretrained(
|
23 |
-
|
24 |
quantization_config=bnb_config,
|
25 |
device_map=device_map
|
26 |
)
|
|
|
10 |
logging,
|
11 |
)
|
12 |
|
13 |
+
# Specify the local path to the downloaded model file
|
14 |
+
model_name = "pytorch_model-00001-of-00002.bin"
|
15 |
+
|
16 |
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
|
17 |
tokenizer.pad_token = tokenizer.eos_token
|
18 |
tokenizer.padding_side = "right" # Fix weird overflow issue with fp16 training
|
19 |
|
|
|
|
|
20 |
|
21 |
# Initialize the GPT4All model
|
22 |
# model = GPT4All(model_path
|
23 |
model = AutoModelForCausalLM.from_pretrained(
|
24 |
+
model_name,
|
25 |
quantization_config=bnb_config,
|
26 |
device_map=device_map
|
27 |
)
|