# @package _global_ | |
# specify here default training configuration | |
defaults: | |
- _self_ | |
- trainer: default | |
- optimizer: adamw | |
- scheduler: null | |
- task: sequence-model | |
- model: null | |
- datamodule: null | |
- callbacks: default # set this to null if you don't want to use callbacks | |
- metrics: null | |
- logger: null # set logger here or use command line (e.g. `python run.py logger=wandb`) | |
- mode: default | |
- experiment: null | |
- hparams_search: null | |
# enable color logging | |
- override hydra/hydra_logging: colorlog | |
- override hydra/job_logging: colorlog | |
# path to original working directory | |
# hydra hijacks working directory by changing it to the current log directory, | |
# so it's useful to have this path as a special variable | |
# https://hydra.cc/docs/next/tutorials/basic/running_your_app/working_directory | |
work_dir: ${hydra:runtime.cwd} | |
# path to folder with data | |
data_dir: ${work_dir}/data/ | |
# pretty print config at the start of the run using Rich library | |
print_config: True | |
# disable python warnings if they annoy you | |
ignore_warnings: True | |
# check performance on test set, using the best model achieved during training | |
# lightning chooses best model based on metric specified in checkpoint callback | |
test_after_training: True | |
resume: False | |
# seed for random number generators in pytorch, numpy and python.random | |
seed: null | |
# name of the run, accessed by loggers | |
name: null | |