Kevin Fink
commited on
Commit
·
0958d38
1
Parent(s):
f9e951b
init
Browse files
app.py
CHANGED
@@ -17,10 +17,10 @@ def fine_tune_model(model_name, dataset_name, hub_id, api_key, num_epochs, batch
|
|
17 |
bias="none" # Bias handling
|
18 |
)
|
19 |
# Load the dataset
|
20 |
-
dataset = load_dataset(dataset_name)
|
21 |
|
22 |
# Load the model and tokenizer
|
23 |
-
model = AutoModelForSeq2SeqLM.from_pretrained(model_name, num_labels=2)
|
24 |
#model = get_peft_model(model, lora_config)
|
25 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
26 |
|
@@ -48,7 +48,7 @@ def fine_tune_model(model_name, dataset_name, hub_id, api_key, num_epochs, batch
|
|
48 |
logging_dir='./logs',
|
49 |
logging_steps=10,
|
50 |
push_to_hub=True,
|
51 |
-
hub_model_id=hub_id,
|
52 |
fp16=True,
|
53 |
#lr_scheduler_type='cosine',
|
54 |
)
|
|
|
17 |
bias="none" # Bias handling
|
18 |
)
|
19 |
# Load the dataset
|
20 |
+
dataset = load_dataset(dataset_name.strip())
|
21 |
|
22 |
# Load the model and tokenizer
|
23 |
+
model = AutoModelForSeq2SeqLM.from_pretrained(model_name.strip(), num_labels=2)
|
24 |
#model = get_peft_model(model, lora_config)
|
25 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
26 |
|
|
|
48 |
logging_dir='./logs',
|
49 |
logging_steps=10,
|
50 |
push_to_hub=True,
|
51 |
+
hub_model_id=hub_id.strip(),
|
52 |
fp16=True,
|
53 |
#lr_scheduler_type='cosine',
|
54 |
)
|