Update app.py
Browse files
app.py
CHANGED
@@ -63,23 +63,29 @@ demo = gr.ChatInterface(
|
|
63 |
if __name__ == "__main__":
|
64 |
demo.launch()
|
65 |
|
66 |
-
|
67 |
from peft import PeftModel
|
68 |
import torch
|
|
|
|
|
|
|
|
|
69 |
|
70 |
base_model_name = "unsloth/qwen2.5-math-7b-bnb-4bit"
|
71 |
peft_model_name = "Hrushi02/Root_Math"
|
72 |
|
73 |
-
# Load base model
|
74 |
base_model = AutoModelForCausalLM.from_pretrained(
|
75 |
base_model_name,
|
76 |
torch_dtype=torch.float16,
|
77 |
-
device_map="auto"
|
|
|
78 |
)
|
79 |
|
80 |
# Load PEFT model
|
81 |
-
model = PeftModel.from_pretrained(base_model, peft_model_name)
|
82 |
|
83 |
# Load tokenizer
|
84 |
-
tokenizer = AutoTokenizer.from_pretrained(base_model_name)
|
|
|
85 |
|
|
|
63 |
if __name__ == "__main__":
|
64 |
demo.launch()
|
65 |
|
66 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
67 |
from peft import PeftModel
|
68 |
import torch
|
69 |
+
import os
|
70 |
+
|
71 |
+
# Set Hugging Face token
|
72 |
+
os.environ["HF_TOKEN"] = "your_token_here"
|
73 |
|
74 |
base_model_name = "unsloth/qwen2.5-math-7b-bnb-4bit"
|
75 |
peft_model_name = "Hrushi02/Root_Math"
|
76 |
|
77 |
+
# Load base model with authentication
|
78 |
base_model = AutoModelForCausalLM.from_pretrained(
|
79 |
base_model_name,
|
80 |
torch_dtype=torch.float16,
|
81 |
+
device_map="auto",
|
82 |
+
token=os.environ["HF_TOKEN"]
|
83 |
)
|
84 |
|
85 |
# Load PEFT model
|
86 |
+
model = PeftModel.from_pretrained(base_model, peft_model_name, token=os.environ["HF_TOKEN"])
|
87 |
|
88 |
# Load tokenizer
|
89 |
+
tokenizer = AutoTokenizer.from_pretrained(base_model_name, token=os.environ["HF_TOKEN"])
|
90 |
+
|
91 |
|