Update app.py
Browse files
app.py
CHANGED
@@ -11,7 +11,8 @@ import torch
|
|
11 |
# subfolder = "Qwen-0.5B-GRPO/checkpoint-1868"
|
12 |
# filename = "simplescaling_s1-32B-Q4_K_S.gguf"
|
13 |
# model_name = "simplescaling/s1.1-32B"
|
14 |
-
model_name = "unsloth/Llama-4-Scout-17B-16E-Instruct-GGUF"
|
|
|
15 |
filename = "Llama-4-Scout-17B-16E-Instruct-UD-IQ2_XXS.gguf"
|
16 |
torch_dtype = torch.bfloat16 # could be torch.float16 or torch.bfloat16 torch.float32 too
|
17 |
cache_dir = "/data"
|
@@ -27,14 +28,14 @@ cache_dir = "/data"
|
|
27 |
model = Llama4ForConditionalGeneration.from_pretrained(
|
28 |
model_name,
|
29 |
attn_implementation="flex_attention",
|
30 |
-
gguf_file=filename,
|
|
|
31 |
torch_dtype=torch_dtype,
|
32 |
device_map="auto",
|
33 |
-
cache_dir = cache_dir,
|
34 |
)
|
35 |
|
36 |
tokenizer = AutoTokenizer.from_pretrained(model_name
|
37 |
-
, gguf_file=filename
|
38 |
# , subfolder=subfolder
|
39 |
)
|
40 |
SYSTEM_PROMPT = """
|
|
|
11 |
# subfolder = "Qwen-0.5B-GRPO/checkpoint-1868"
|
12 |
# filename = "simplescaling_s1-32B-Q4_K_S.gguf"
|
13 |
# model_name = "simplescaling/s1.1-32B"
|
14 |
+
# model_name = "unsloth/Llama-4-Scout-17B-16E-Instruct-GGUF"
|
15 |
+
model_name = "unsloth/Llama-4-Scout-17B-16E-Instruct-unsloth-bnb-4bit"
|
16 |
filename = "Llama-4-Scout-17B-16E-Instruct-UD-IQ2_XXS.gguf"
|
17 |
torch_dtype = torch.bfloat16 # could be torch.float16 or torch.bfloat16 torch.float32 too
|
18 |
cache_dir = "/data"
|
|
|
28 |
model = Llama4ForConditionalGeneration.from_pretrained(
|
29 |
model_name,
|
30 |
attn_implementation="flex_attention",
|
31 |
+
# gguf_file=filename,
|
32 |
+
# cache_dir = cache_dir,
|
33 |
torch_dtype=torch_dtype,
|
34 |
device_map="auto",
|
|
|
35 |
)
|
36 |
|
37 |
tokenizer = AutoTokenizer.from_pretrained(model_name
|
38 |
+
# , gguf_file=filename
|
39 |
# , subfolder=subfolder
|
40 |
)
|
41 |
SYSTEM_PROMPT = """
|