Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -5,17 +5,18 @@ import torch
|
|
5 |
from peft import PeftConfig, PeftModel
|
6 |
from transformers import LlamaForCausalLM, AutoTokenizer, BitsAndBytesConfig
|
7 |
|
8 |
-
config = PeftConfig.from_pretrained("GGmorello/FLAMES")
|
9 |
model = LlamaForCausalLM.from_pretrained(
|
10 |
-
"GGmorello/FLAMES-
|
11 |
-
quantization_config=BitsAndBytesConfig(
|
12 |
load_in_4bit=True,
|
13 |
bnb_4bit_quant_type="nf4",
|
14 |
bnb_4bit_use_double_quant=True,
|
15 |
bnb_4bit_compute_dtype=torch.bfloat16,
|
16 |
),
|
|
|
17 |
)
|
18 |
-
model = PeftModel.from_pretrained(model, "GGmorello/FLAMES-
|
19 |
|
20 |
|
21 |
MAX_SEQ_LEN = 4096
|
|
|
5 |
from peft import PeftConfig, PeftModel
|
6 |
from transformers import LlamaForCausalLM, AutoTokenizer, BitsAndBytesConfig
|
7 |
|
8 |
+
# config = PeftConfig.from_pretrained("GGmorello/FLAMES")
|
9 |
model = LlamaForCausalLM.from_pretrained(
|
10 |
+
"GGmorello/FLAMES-1'0k",
|
11 |
+
""" quantization_config=BitsAndBytesConfig(
|
12 |
load_in_4bit=True,
|
13 |
bnb_4bit_quant_type="nf4",
|
14 |
bnb_4bit_use_double_quant=True,
|
15 |
bnb_4bit_compute_dtype=torch.bfloat16,
|
16 |
),
|
17 |
+
"""
|
18 |
)
|
19 |
+
# model = PeftModel.from_pretrained(model, "GGmorello/FLAMES-100k")
|
20 |
|
21 |
|
22 |
MAX_SEQ_LEN = 4096
|