Alkhalaf commited on
Commit
fc79e83
·
verified ·
1 Parent(s): d43a866

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -11,10 +11,10 @@ model_name = "unsloth/llama-3.2-1b-instruct-bnb-4bit" # Base model
11
  adapter_name = "Alkhalaf/lora_model" # LoRA model adapter
12
 
13
  # Load tokenizer
14
- tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=hf_token)
15
 
16
  # Load the LoRA adapter configuration
17
- peft_config = PeftConfig.from_pretrained(adapter_name, use_auth_token=hf_token)
18
 
19
  # Load the base model
20
  base_model = AutoModelForCausalLM.from_pretrained(
@@ -26,7 +26,7 @@ base_model = AutoModelForCausalLM.from_pretrained(
26
 
27
  )
28
  # Apply the LoRA adapter to the base model
29
- model = PeftModel.from_pretrained(base_model, adapter_name, use_auth_token=hf_token)
30
 
31
  # Define prediction function
32
  def predict(input_text):
 
11
  adapter_name = "Alkhalaf/lora_model" # LoRA model adapter
12
 
13
  # Load tokenizer
14
+ tokenizer = AutoTokenizer.from_pretrained(model_name, token=hf_token)
15
 
16
  # Load the LoRA adapter configuration
17
+ peft_config = PeftConfig.from_pretrained(adapter_name, token=hf_token)
18
 
19
  # Load the base model
20
  base_model = AutoModelForCausalLM.from_pretrained(
 
26
 
27
  )
28
  # Apply the LoRA adapter to the base model
29
+ model = PeftModel.from_pretrained(base_model, adapter_name, token=hf_token)
30
 
31
  # Define prediction function
32
  def predict(input_text):