Update app.py
Browse files
app.py
CHANGED
@@ -84,12 +84,20 @@ def load_model(model_type, selected_model):
|
|
84 |
tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL_NAME, token=HF_TOKEN)
|
85 |
|
86 |
if model_type == "Full Fine-Tuned":
|
|
|
87 |
model = AutoModelForCausalLM.from_pretrained(
|
88 |
selected_model,
|
89 |
torch_dtype=torch.bfloat16,
|
90 |
device_map="auto",
|
91 |
-
token=HF_TOKEN
|
92 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
93 |
else:
|
94 |
base_model = AutoModelForCausalLM.from_pretrained(
|
95 |
BASE_MODEL_NAME,
|
|
|
84 |
tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL_NAME, token=HF_TOKEN)
|
85 |
|
86 |
if model_type == "Full Fine-Tuned":
|
87 |
+
|
88 |
model = AutoModelForCausalLM.from_pretrained(
|
89 |
selected_model,
|
90 |
torch_dtype=torch.bfloat16,
|
91 |
device_map="auto",
|
92 |
+
token=HF_TOKEN # optional if using private repo
|
93 |
)
|
94 |
+
|
95 |
+
#model = AutoModelForCausalLM.from_pretrained(
|
96 |
+
#selected_model,
|
97 |
+
# torch_dtype=torch.bfloat16,
|
98 |
+
# device_map="auto",
|
99 |
+
# token=HF_TOKEN
|
100 |
+
# )
|
101 |
else:
|
102 |
base_model = AutoModelForCausalLM.from_pretrained(
|
103 |
BASE_MODEL_NAME,
|