Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -5,9 +5,9 @@ import torch
|
|
5 |
from peft import PeftConfig, PeftModel
|
6 |
from transformers import LlamaForCausalLM, AutoTokenizer, BitsAndBytesConfig
|
7 |
|
8 |
-
|
9 |
model = LlamaForCausalLM.from_pretrained(
|
10 |
-
"GGmorello/FLAMES-
|
11 |
quantization_config=BitsAndBytesConfig(
|
12 |
load_in_4bit=True,
|
13 |
bnb_4bit_quant_type="nf4",
|
@@ -15,7 +15,7 @@ model = LlamaForCausalLM.from_pretrained(
|
|
15 |
bnb_4bit_compute_dtype=torch.bfloat16,
|
16 |
),
|
17 |
)
|
18 |
-
|
19 |
|
20 |
|
21 |
MAX_SEQ_LEN = 4096
|
|
|
5 |
from peft import PeftConfig, PeftModel
|
6 |
from transformers import LlamaForCausalLM, AutoTokenizer, BitsAndBytesConfig
|
7 |
|
8 |
+
config = PeftConfig.from_pretrained("GGmorello/FLAMES")
|
9 |
model = LlamaForCausalLM.from_pretrained(
|
10 |
+
"GGmorello/FLAMES-20k",
|
11 |
quantization_config=BitsAndBytesConfig(
|
12 |
load_in_4bit=True,
|
13 |
bnb_4bit_quant_type="nf4",
|
|
|
15 |
bnb_4bit_compute_dtype=torch.bfloat16,
|
16 |
),
|
17 |
)
|
18 |
+
model = PeftModel.from_pretrained(model, "GGmorello/FLAMES-20k")
|
19 |
|
20 |
|
21 |
MAX_SEQ_LEN = 4096
|