Kevin Fink
commited on
Commit
·
cbf3fe5
1
Parent(s):
7a18ac9
dev
Browse files
app.py
CHANGED
@@ -126,7 +126,7 @@ def fine_tune_model(model, dataset_name, hub_id, api_key, num_epochs, batch_size
|
|
126 |
args=training_args,
|
127 |
train_dataset=train_dataset,
|
128 |
eval_dataset=saved_test_dataset,
|
129 |
-
|
130 |
)
|
131 |
|
132 |
elif os.access(f'/data/{hub_id.strip()}_train_dataset3', os.R_OK):
|
@@ -227,7 +227,7 @@ def run_train(dataset_name, hub_id, api_key, num_epochs, batch_size, lr, grad):
|
|
227 |
lora_dropout=0.1, # Dropout for LoRA layers
|
228 |
bias="none" # Bias handling
|
229 |
)
|
230 |
-
model = get_peft_model(model, lora_config)
|
231 |
result = fine_tune_model(model, dataset_name, hub_id, api_key, num_epochs, batch_size, lr, grad)
|
232 |
return result
|
233 |
# Create Gradio interface
|
|
|
126 |
args=training_args,
|
127 |
train_dataset=train_dataset,
|
128 |
eval_dataset=saved_test_dataset,
|
129 |
+
compute_metrics=compute_metrics,
|
130 |
)
|
131 |
|
132 |
elif os.access(f'/data/{hub_id.strip()}_train_dataset3', os.R_OK):
|
|
|
227 |
lora_dropout=0.1, # Dropout for LoRA layers
|
228 |
bias="none" # Bias handling
|
229 |
)
|
230 |
+
#model = get_peft_model(model, lora_config)
|
231 |
result = fine_tune_model(model, dataset_name, hub_id, api_key, num_epochs, batch_size, lr, grad)
|
232 |
return result
|
233 |
# Create Gradio interface
|