File size: 6,004 Bytes
0b32ad6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 |
import logging
import torch.nn as nn
from s3prl import Container, field
from s3prl.base import Logs
from s3prl.dataset.base import SequentialDataPipe
from s3prl.nn import S3PRLUpstreamDriver, UpstreamDownstreamModel
from s3prl.problem.base import Problem
from s3prl.problem.trainer import Trainer
from s3prl.util import workspace
from s3prl.util.configuration import default_cfg
from s3prl.util.seed import fix_random_seeds
from s3prl.util.workspace import Workspace, as_type
logger = logging.getLogger(__name__)
class SuperbProblem(Problem, Trainer):
@default_cfg(
workspace=field(
"???",
"\nWill put the following keys into this workspace:\n"
" 'train_dataset', 'train_sampler', 'valid_dataset', 'valid_sampler', and 'task'",
"str or Path or Workspace",
),
corpus=dict(
CLS=field(
"???",
"\nThe corpus class. You can add the **kwargs right below this CLS key",
str,
),
dataset_root=field("???", "The root path of the corpus", str),
),
train_datapipe=dict(
CLS=field(
"???",
"\nThe first datapipe class to be applied to the corpus. You can add the **kwargs right below this CLS key",
str,
),
),
train_sampler=dict(
CLS=field(
"???",
"\nThe batch sampler class. You can add the **kwargs right below this CLS key",
str,
),
),
valid_datapipe=dict(
CLS=field(
"???",
"\nThe first datapipe class to be applied to the corpus. You can add the **kwargs right below this CLS key",
str,
),
),
valid_sampler=dict(
CLS=field(
"???",
"\nThe batch sampler class. You can add the **kwargs right below this CLS key",
str,
),
),
test_datapipe=dict(
CLS=field(
"???",
"\nThe first datapipe class to be applied to the corpus. You can add the **kwargs right below this CLS key",
str,
),
),
test_sampler=dict(
CLS=field(
"???",
"\nThe batch sampler class. You can add the **kwargs right below this CLS key",
str,
),
),
upstream=dict(
CLS=field(
S3PRLUpstreamDriver,
"\nThe class of the upstream model following the specific interface. You can add the **kwargs right below this CLS key",
str,
),
name="???",
feature_selection="hidden_states",
freeze_upstream=field(
True,
"Set the entire upstream model's requires_grad to False, or else, leave it alone",
),
normalize=field(
False, "Apply layer-norm to upstream model's each layer hidden_state"
),
weighted_sum=field(
True,
"If True, apply weighted-sum on the selected layers; If False, take the final layer.\n"
"For the selected layers, see the 'layer_selections' option",
),
layer_selections=field(
None,
"If None, select all layers; Or, select the subset layers defined by this option",
),
),
downstream=dict(
CLS=field(
"???",
"\nThe downstream model class for each task. You can add the **kwargs right below this CLS key",
str,
),
),
task=dict(
CLS=field(
"???",
"\nThe task class defining what to do for each train/valid/test step in the train/valid/test dataloader loop"
"\nYou can add the **kwargs right below this CLS key",
str,
),
),
)
@classmethod
def setup(cls, **cfg) -> Container:
cfg = Container(cfg)
workspace = Workspace(cfg.workspace)
fix_random_seeds()
upstream = cfg.upstream()
stats = Container(
feat_frame_shift=upstream.downsample_rate,
)
logger.info("Preparing corpus")
train_data, valid_data, test_data, corpus_stats = Container(cfg.corpus()).split(
3
)
stats.add(corpus_stats)
logger.info("Preparing train data")
train_dataset = cfg.train_datapipe(**stats)(train_data, **stats)
train_sampler = cfg.train_sampler(train_dataset)
stats.add(train_dataset.all_tools())
workspace.environ.update(stats)
logger.info("Preparing valid data")
valid_dataset = cfg.valid_datapipe(**dict(workspace.environ))(
valid_data, **dict(workspace.environ)
)
valid_sampler = cfg.valid_sampler(valid_dataset)
logger.info("Preparing test data")
test_dataset = cfg.test_datapipe(**dict(workspace.environ))(
test_data, **dict(workspace.environ)
)
test_sampler = cfg.test_sampler(test_dataset)
logger.info("Preparing model and task")
downstream = cfg.downstream(upstream.output_size, **dict(workspace.environ))
model = UpstreamDownstreamModel(upstream, downstream)
task = cfg.task(model, **stats)
workspace["train_data"] = train_data
workspace["valid_data"] = valid_data
workspace["test_data"] = test_data
workspace["train_dataset"] = train_dataset
workspace["train_sampler"] = train_sampler
workspace["valid_dataset"] = valid_dataset
workspace["valid_sampler"] = valid_sampler
workspace["test_dataset"] = test_dataset
workspace["test_sampler"] = test_sampler
workspace["task"] = task
|