init
Browse files
training_scripts/finetune_t5.py
CHANGED
@@ -140,7 +140,11 @@ def train(
|
|
140 |
batch = [64] if batch is None else batch
|
141 |
for n, (lr_tmp, batch_tmp) in enumerate(product(lr, batch)):
|
142 |
logging.info(f"[TRAIN {n}/{len(lr) * len(batch)}] lr: {lr_tmp}, batch: {batch_tmp}")
|
|
|
|
|
|
|
143 |
trainer = Seq2SeqTrainer(
|
|
|
144 |
args=Seq2SeqTrainingArguments(
|
145 |
num_train_epochs=epoch,
|
146 |
learning_rate=lr_tmp,
|
@@ -151,10 +155,7 @@ def train(
|
|
151 |
seed=random_seed,
|
152 |
per_device_train_batch_size=batch_tmp,
|
153 |
),
|
154 |
-
data_collator=transformers.DataCollatorForSeq2Seq(tokenizer, model=
|
155 |
-
model_name=model_name,
|
156 |
-
use_auth_token=use_auth_token,
|
157 |
-
low_cpu_mem_usage=model_low_cpu_mem_usage)),
|
158 |
train_dataset=tokenized_dataset['train_ds'],
|
159 |
eval_dataset=tokenized_dataset['validation_ds'],
|
160 |
compute_metrics=compute_metric,
|
|
|
140 |
batch = [64] if batch is None else batch
|
141 |
for n, (lr_tmp, batch_tmp) in enumerate(product(lr, batch)):
|
142 |
logging.info(f"[TRAIN {n}/{len(lr) * len(batch)}] lr: {lr_tmp}, batch: {batch_tmp}")
|
143 |
+
model = load_model(
|
144 |
+
model_name=model_name, use_auth_token=use_auth_token, low_cpu_mem_usage=model_low_cpu_mem_usage
|
145 |
+
)
|
146 |
trainer = Seq2SeqTrainer(
|
147 |
+
model=model,
|
148 |
args=Seq2SeqTrainingArguments(
|
149 |
num_train_epochs=epoch,
|
150 |
learning_rate=lr_tmp,
|
|
|
155 |
seed=random_seed,
|
156 |
per_device_train_batch_size=batch_tmp,
|
157 |
),
|
158 |
+
data_collator=transformers.DataCollatorForSeq2Seq(tokenizer, model=model),
|
|
|
|
|
|
|
159 |
train_dataset=tokenized_dataset['train_ds'],
|
160 |
eval_dataset=tokenized_dataset['validation_ds'],
|
161 |
compute_metrics=compute_metric,
|