RandomNameAnd6 commited on
Commit
c743aa7
·
verified ·
1 Parent(s): b12a372

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -11,7 +11,7 @@ max_seq_length = 2048 # Choose any! We auto support RoPE Scaling internally!
11
  dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
12
  load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.
13
 
14
- model, tokenizer = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2-1.5B", max_seq_length = max_seq_length, dtype = dtype, load_in_4bit = load_in_4bit)
15
  model = PeftModel.from_pretrained(model, "RandomNameAnd6/Phi-3-Mini-Dhar-Mann-Adapters-BOS")
16
 
17
  def generate_text(prompt):
 
11
  dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
12
  load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.
13
 
14
+ model, tokenizer = AutoModelForCausalLM.from_pretrained("unsloth/Qwen2-1.5B-bnb-4bit", max_seq_length = max_seq_length, dtype = dtype, load_in_4bit = load_in_4bit)
15
  model = PeftModel.from_pretrained(model, "RandomNameAnd6/Phi-3-Mini-Dhar-Mann-Adapters-BOS")
16
 
17
  def generate_text(prompt):