Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -8,13 +8,6 @@ from transformers import LlamaForCausalLM, AutoTokenizer, BitsAndBytesConfig
|
|
8 |
# config = PeftConfig.from_pretrained("GGmorello/FLAMES")
|
9 |
model = LlamaForCausalLM.from_pretrained(
|
10 |
"GGmorello/FLAMES-100k",
|
11 |
-
""" quantization_config=BitsAndBytesConfig(
|
12 |
-
load_in_4bit=True,
|
13 |
-
bnb_4bit_quant_type="nf4",
|
14 |
-
bnb_4bit_use_double_quant=True,
|
15 |
-
bnb_4bit_compute_dtype=torch.bfloat16,
|
16 |
-
),
|
17 |
-
"""
|
18 |
)
|
19 |
# model = PeftModel.from_pretrained(model, "GGmorello/FLAMES-100k")
|
20 |
|
|
|
8 |
# config = PeftConfig.from_pretrained("GGmorello/FLAMES")
|
9 |
model = LlamaForCausalLM.from_pretrained(
|
10 |
"GGmorello/FLAMES-100k",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
)
|
12 |
# model = PeftModel.from_pretrained(model, "GGmorello/FLAMES-100k")
|
13 |
|