PhantHive commited on
Commit
2d665bd
·
verified ·
1 Parent(s): e9cf7a6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -20,7 +20,7 @@ tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
20
  model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path, quantization_config=bnb_config)
21
 
22
  # Load the Lora model
23
- model = PeftModel.from_pretrained(mode, model_id)
24
 
25
  def greet(text):
26
  with torch.no_grad(): # Disable gradient calculation for inference
 
20
  model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path, quantization_config=bnb_config)
21
 
22
  # Load the Lora model
23
+ model = PeftModel.from_pretrained(model, model_id)
24
 
25
  def greet(text):
26
  with torch.no_grad(): # Disable gradient calculation for inference