Alkhalaf commited on
Commit
d43a866
·
verified ·
1 Parent(s): 9e6d181

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -4
app.py CHANGED
@@ -5,27 +5,28 @@ from huggingface_hub import login
5
  import torch
6
  import os
7
  hf_token = os.getenv("llama")
8
- login(hf_token)
9
  # Model and adapter paths
10
  model_name = "unsloth/llama-3.2-1b-instruct-bnb-4bit" # Base model
11
  adapter_name = "Alkhalaf/lora_model" # LoRA model adapter
12
 
13
  # Load tokenizer
14
- tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=True)
15
 
16
  # Load the LoRA adapter configuration
17
- peft_config = PeftConfig.from_pretrained(adapter_name, use_auth_token=True)
18
 
19
  # Load the base model
20
  base_model = AutoModelForCausalLM.from_pretrained(
21
  peft_config.base_model_name_or_path,
 
22
 
23
  #torch_dtype=torch.float16
24
 
25
 
26
  )
27
  # Apply the LoRA adapter to the base model
28
- model = PeftModel.from_pretrained(base_model, adapter_name, use_auth_token=True)
29
 
30
  # Define prediction function
31
  def predict(input_text):
 
5
  import torch
6
  import os
7
  hf_token = os.getenv("llama")
8
+ #login(hf_token)
9
  # Model and adapter paths
10
  model_name = "unsloth/llama-3.2-1b-instruct-bnb-4bit" # Base model
11
  adapter_name = "Alkhalaf/lora_model" # LoRA model adapter
12
 
13
  # Load tokenizer
14
+ tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=hf_token)
15
 
16
  # Load the LoRA adapter configuration
17
+ peft_config = PeftConfig.from_pretrained(adapter_name, use_auth_token=hf_token)
18
 
19
  # Load the base model
20
  base_model = AutoModelForCausalLM.from_pretrained(
21
  peft_config.base_model_name_or_path,
22
+ token=hf_token,
23
 
24
  #torch_dtype=torch.float16
25
 
26
 
27
  )
28
  # Apply the LoRA adapter to the base model
29
+ model = PeftModel.from_pretrained(base_model, adapter_name, use_auth_token=hf_token)
30
 
31
  # Define prediction function
32
  def predict(input_text):