bobber commited on
Commit
85f8f28
·
verified ·
1 Parent(s): 224e47a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -5,11 +5,11 @@ from transformers import AutoProcessor, Llama4ForConditionalGeneration
5
 
6
  import torch
7
 
8
- from transformers import BitsAndBytesConfig
9
- bnb_config = BitsAndBytesConfig(
10
- load_in_4bit=True,
11
- llm_int8_enable_fp32_cpu_offload=True,
12
- )
13
 
14
  #Qwen/Qwen2.5-14B-Instruct-1M
15
  #Qwen/Qwen2-0.5B
@@ -19,7 +19,7 @@ bnb_config = BitsAndBytesConfig(
19
  # model_name = "simplescaling/s1.1-32B"
20
  # model_name = "unsloth/Llama-4-Scout-17B-16E-Instruct-GGUF"
21
  model_name = "unsloth/Llama-4-Scout-17B-16E-Instruct-unsloth-bnb-4bit"
22
- # model_name = "meta-llama/Llama-4-Scout-17B-16E-Instruct"
23
  filename = "Llama-4-Scout-17B-16E-Instruct-UD-IQ2_XXS.gguf"
24
  torch_dtype = torch.bfloat16 # could be torch.float16 or torch.bfloat16 torch.float32 too
25
  cache_dir = "/data"
 
5
 
6
  import torch
7
 
8
+ # from transformers import BitsAndBytesConfig
9
+ # bnb_config = BitsAndBytesConfig(
10
+ # load_in_4bit=True,
11
+ # llm_int8_enable_fp32_cpu_offload=True,
12
+ # )
13
 
14
  #Qwen/Qwen2.5-14B-Instruct-1M
15
  #Qwen/Qwen2-0.5B
 
19
  # model_name = "simplescaling/s1.1-32B"
20
  # model_name = "unsloth/Llama-4-Scout-17B-16E-Instruct-GGUF"
21
  model_name = "unsloth/Llama-4-Scout-17B-16E-Instruct-unsloth-bnb-4bit"
22
+ model_name = "meta-llama/Llama-4-Scout-17B-16E-Instruct"
23
  filename = "Llama-4-Scout-17B-16E-Instruct-UD-IQ2_XXS.gguf"
24
  torch_dtype = torch.bfloat16 # could be torch.float16 or torch.bfloat16 torch.float32 too
25
  cache_dir = "/data"