File size: 1,914 Bytes
02e480f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 |
# Deep learning
import torch
import torch.nn as nn
from torch import optim
from trainers import TrainerRegressor
from utils import RMSELoss, get_optim_groups
# Data
import pandas as pd
import numpy as np
# Standard library
import args
import os
def main(config):
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# load dataset
df_train = pd.read_csv(f"{config.data_root}/train.csv")
df_valid = pd.read_csv(f"{config.data_root}/valid.csv")
df_test = pd.read_csv(f"{config.data_root}/test.csv")
# load model
if config.smi_ted_version == 'v1':
from smi_ted_light.load import load_smi_ted
elif config.smi_ted_version == 'v2':
from smi_ted_large.load import load_smi_ted
model = load_smi_ted(folder=config.model_path, ckpt_filename=config.ckpt_filename, n_output=config.n_output)
model.net.apply(model._init_weights)
print(model.net)
lr = config.lr_start*config.lr_multiplier
optim_groups = get_optim_groups(model, keep_decoder=bool(config.train_decoder))
if config.loss_fn == 'rmse':
loss_function = RMSELoss()
elif config.loss_fn == 'mae':
loss_function = nn.L1Loss()
# init trainer
trainer = TrainerRegressor(
raw_data=(df_train, df_valid, df_test),
dataset_name=config.dataset_name,
target=config.measure_name,
batch_size=config.n_batch,
hparams=config,
target_metric=config.target_metric,
seed=config.start_seed,
checkpoints_folder=config.checkpoints_folder,
device=device,
save_ckpt=bool(config.save_ckpt)
)
trainer.compile(
model=model,
optimizer=optim.AdamW(optim_groups, lr=lr, betas=(0.9, 0.99)),
loss_fn=loss_function
)
trainer.fit(max_epochs=config.max_epochs)
if __name__ == '__main__':
parser = args.get_parser()
config = parser.parse_args()
main(config) |