samidh commited on
Commit
183ac41
·
verified ·
1 Parent(s): 4318079

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -0
app.py CHANGED
@@ -12,12 +12,21 @@ base_model_name = "google/gemma-7b"
12
  #adapter_model_name = "samidh/cope-g2b-2c-hs.s1.5fpc.9-sx.s1.5.9o-vl.s1.5.9-hr.s5-sh.s5.l5e5-e3-d25-r8"
13
  adapter_model_name = "samidh/cope-g7bq-2c-hs.s1.5fpc.9-sx.s1.5.9o-VL.s1.5.9-HR.s5-SH.s5-l5e5-e3-d25-r8"
14
 
 
 
 
 
 
 
 
 
15
  bnb_config = BitsAndBytesConfig(
16
  load_in_4bit=True,
17
  bnb_4bit_quant_type="nf4",
18
  bnb_4bit_compute_dtype=torch.bfloat16,
19
  bnb_4bit_use_double_quant=True
20
  )
 
21
 
22
  model = AutoModelForCausalLM.from_pretrained(base_model_name,
23
  token=os.environ['HF_TOKEN'],
 
12
  #adapter_model_name = "samidh/cope-g2b-2c-hs.s1.5fpc.9-sx.s1.5.9o-vl.s1.5.9-hr.s5-sh.s5.l5e5-e3-d25-r8"
13
  adapter_model_name = "samidh/cope-g7bq-2c-hs.s1.5fpc.9-sx.s1.5.9o-VL.s1.5.9-HR.s5-SH.s5-l5e5-e3-d25-r8"
14
 
15
+ bnb_config = BitsAndBytesConfig(
16
+ load_in_4bit=True,
17
+ bnb_4bit_quant_type="nf4",
18
+ bnb_4bit_compute_dtype=torch.bfloat16,
19
+ #bnb_4bit_use_double_quant=True
20
+ )
21
+
22
+ """
23
  bnb_config = BitsAndBytesConfig(
24
  load_in_4bit=True,
25
  bnb_4bit_quant_type="nf4",
26
  bnb_4bit_compute_dtype=torch.bfloat16,
27
  bnb_4bit_use_double_quant=True
28
  )
29
+ """
30
 
31
  model = AutoModelForCausalLM.from_pretrained(base_model_name,
32
  token=os.environ['HF_TOKEN'],