init
Browse files
training_scripts/finetune_t5.py
CHANGED
@@ -71,7 +71,7 @@ def train(
|
|
71 |
dataset_split_validation: str,
|
72 |
dataset_split_test: str,
|
73 |
lr: List,
|
74 |
-
epoch:
|
75 |
batch: List,
|
76 |
down_sample_train: int,
|
77 |
down_sample_validation: int,
|
@@ -138,7 +138,8 @@ def train(
|
|
138 |
if not skip_train:
|
139 |
lr = [1e-6, 1e-4] if lr is None else lr
|
140 |
batch = [64] if batch is None else batch
|
141 |
-
|
|
|
142 |
logging.info(f"[TRAIN {n}/{len(lr) * len(batch)}] lr: {lr_tmp}, batch: {batch_tmp}")
|
143 |
model = load_model(
|
144 |
model_name=model_name, use_auth_token=use_auth_token, low_cpu_mem_usage=model_low_cpu_mem_usage
|
@@ -146,7 +147,7 @@ def train(
|
|
146 |
trainer = Seq2SeqTrainer(
|
147 |
model=model,
|
148 |
args=Seq2SeqTrainingArguments(
|
149 |
-
num_train_epochs=
|
150 |
learning_rate=lr_tmp,
|
151 |
output_dir=f"{output_dir}/model_{n}",
|
152 |
evaluation_strategy='steps',
|
@@ -166,12 +167,6 @@ def train(
|
|
166 |
trainer.log_metrics("train", metrics)
|
167 |
trainer.save_metrics("train", metrics)
|
168 |
trainer.save_state()
|
169 |
-
|
170 |
-
# trainer.save_model(f'{output_dir}/model_{n}')
|
171 |
-
# tokenizer.save_pretrained(f'{output_dir}/model_{n}')
|
172 |
-
# # grid search
|
173 |
-
# with open(f'{output_dir}/model_{n}/hyperparameters.json', 'w') as f:
|
174 |
-
# json.dump({"learning_rate": lr_tmp, "batch_size": batch_tmp}, f)
|
175 |
del trainer
|
176 |
gc.collect()
|
177 |
torch.cuda.empty_cache()
|
|
|
71 |
dataset_split_validation: str,
|
72 |
dataset_split_test: str,
|
73 |
lr: List,
|
74 |
+
epoch: List,
|
75 |
batch: List,
|
76 |
down_sample_train: int,
|
77 |
down_sample_validation: int,
|
|
|
138 |
if not skip_train:
|
139 |
lr = [1e-6, 1e-4] if lr is None else lr
|
140 |
batch = [64] if batch is None else batch
|
141 |
+
epoch = [1, 3, 5] if epoch is None else epoch
|
142 |
+
for n, (lr_tmp, batch_tmp, epoch_tmp) in enumerate(product(lr, batch, epoch)):
|
143 |
logging.info(f"[TRAIN {n}/{len(lr) * len(batch)}] lr: {lr_tmp}, batch: {batch_tmp}")
|
144 |
model = load_model(
|
145 |
model_name=model_name, use_auth_token=use_auth_token, low_cpu_mem_usage=model_low_cpu_mem_usage
|
|
|
147 |
trainer = Seq2SeqTrainer(
|
148 |
model=model,
|
149 |
args=Seq2SeqTrainingArguments(
|
150 |
+
num_train_epochs=epoch_tmp,
|
151 |
learning_rate=lr_tmp,
|
152 |
output_dir=f"{output_dir}/model_{n}",
|
153 |
evaluation_strategy='steps',
|
|
|
167 |
trainer.log_metrics("train", metrics)
|
168 |
trainer.save_metrics("train", metrics)
|
169 |
trainer.save_state()
|
|
|
|
|
|
|
|
|
|
|
|
|
170 |
del trainer
|
171 |
gc.collect()
|
172 |
torch.cuda.empty_cache()
|