RandomNameAnd6 commited on
Commit
e486f7d
·
verified ·
1 Parent(s): c743aa7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -2
app.py CHANGED
@@ -9,9 +9,8 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
9
 
10
  max_seq_length = 2048 # Choose any! We auto support RoPE Scaling internally!
11
  dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
12
- load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.
13
 
14
- model, tokenizer = AutoModelForCausalLM.from_pretrained("unsloth/Qwen2-1.5B-bnb-4bit", max_seq_length = max_seq_length, dtype = dtype, load_in_4bit = load_in_4bit)
15
  model = PeftModel.from_pretrained(model, "RandomNameAnd6/Phi-3-Mini-Dhar-Mann-Adapters-BOS")
16
 
17
  def generate_text(prompt):
 
9
 
10
  max_seq_length = 2048 # Choose any! We auto support RoPE Scaling internally!
11
  dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
 
12
 
13
+ model, tokenizer = AutoModelForCausalLM.from_pretrained("unsloth/Qwen2-1.5B-bnb-4bit", max_seq_length = max_seq_length, dtype = dtype)
14
  model = PeftModel.from_pretrained(model, "RandomNameAnd6/Phi-3-Mini-Dhar-Mann-Adapters-BOS")
15
 
16
  def generate_text(prompt):