File size: 6,595 Bytes
8bbbc54 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 |
from __gin__ import dynamic_registration
import __main__ as train_script
import seqio
import t5.data.mixtures
from t5x import adafactor
from t5x.examples.t5 import network
from t5x import gin_utils
from t5x import models
from t5x import partitioning
from t5x import trainer
from t5x import utils
import tasks.nedd_tasks
import tasks.ul2_tasks as tasks2
# Macros:
# ==============================================================================
BATCH_SIZE = 128
DROPOUT_RATE = 0.0
LABEL_SMOOTHING = 0.0
LOSS_NORMALIZING_FACTOR = None
MIXTURE_OR_TASK_MODULE = None
MIXTURE_OR_TASK_NAME = 'ul2_en_nl_mc4_nedd_wiki_news_mix_1'
MODEL = @models.EncoderDecoderModel()
MODEL_DIR = 'ul2_small_en_nl_mc4_nedd_wiki_news_nl'
OPTIMIZER = @adafactor.Adafactor()
RANDOM_SEED = None
SHUFFLE_TRAIN_EXAMPLES = True
TASK_FEATURE_LENGTHS = {'inputs': 512, 'targets': 512}
TRAIN_STEPS = 1000000
USE_CACHED_TASKS = False
USE_HARDWARE_RNG = False
VOCABULARY = @seqio.SentencePieceVocabulary()
Z_LOSS = 0.0001
# Parameters for adafactor.Adafactor:
# ==============================================================================
adafactor.Adafactor.decay_rate = 0.8
adafactor.Adafactor.logical_factor_rules = \
@adafactor.standard_logical_factor_rules()
adafactor.Adafactor.step_offset = 0
# Parameters for utils.CheckpointConfig:
# ==============================================================================
utils.CheckpointConfig.restore = @utils.RestoreCheckpointConfig()
utils.CheckpointConfig.save = @utils.SaveCheckpointConfig()
# Parameters for utils.create_learning_rate_scheduler:
# ==============================================================================
utils.create_learning_rate_scheduler.base_learning_rate = 1.0
utils.create_learning_rate_scheduler.factors = 'constant * rsqrt_decay'
utils.create_learning_rate_scheduler.warmup_steps = 10000
# Parameters for train/utils.DatasetConfig:
# ==============================================================================
train/utils.DatasetConfig.batch_size = %BATCH_SIZE
train/utils.DatasetConfig.mixture_or_task_name = %MIXTURE_OR_TASK_NAME
train/utils.DatasetConfig.module = %MIXTURE_OR_TASK_MODULE
train/utils.DatasetConfig.pack = True
train/utils.DatasetConfig.seed = None
train/utils.DatasetConfig.shuffle = %SHUFFLE_TRAIN_EXAMPLES
train/utils.DatasetConfig.split = 'train'
train/utils.DatasetConfig.task_feature_lengths = %TASK_FEATURE_LENGTHS
train/utils.DatasetConfig.use_cached = %USE_CACHED_TASKS
# Parameters for train_eval/utils.DatasetConfig:
# ==============================================================================
train_eval/utils.DatasetConfig.batch_size = %BATCH_SIZE
train_eval/utils.DatasetConfig.mixture_or_task_name = %MIXTURE_OR_TASK_NAME
train_eval/utils.DatasetConfig.module = %MIXTURE_OR_TASK_MODULE
train_eval/utils.DatasetConfig.pack = True
train_eval/utils.DatasetConfig.seed = 42
train_eval/utils.DatasetConfig.shuffle = False
train_eval/utils.DatasetConfig.split = 'validation'
train_eval/utils.DatasetConfig.task_feature_lengths = %TASK_FEATURE_LENGTHS
train_eval/utils.DatasetConfig.use_cached = %USE_CACHED_TASKS
# Parameters for models.EncoderDecoderModel:
# ==============================================================================
models.EncoderDecoderModel.input_vocabulary = %VOCABULARY
models.EncoderDecoderModel.label_smoothing = %LABEL_SMOOTHING
models.EncoderDecoderModel.loss_normalizing_factor = %LOSS_NORMALIZING_FACTOR
models.EncoderDecoderModel.module = @network.Transformer()
models.EncoderDecoderModel.optimizer_def = %OPTIMIZER
models.EncoderDecoderModel.output_vocabulary = %VOCABULARY
models.EncoderDecoderModel.z_loss = %Z_LOSS
# Parameters for partitioning.PjitPartitioner:
# ==============================================================================
partitioning.PjitPartitioner.logical_axis_rules = \
@partitioning.standard_logical_axis_rules()
partitioning.PjitPartitioner.model_parallel_submesh = None
partitioning.PjitPartitioner.num_partitions = 1
# Parameters for utils.RestoreCheckpointConfig:
# ==============================================================================
utils.RestoreCheckpointConfig.path = []
# Parameters for utils.SaveCheckpointConfig:
# ==============================================================================
utils.SaveCheckpointConfig.dtype = 'float32'
utils.SaveCheckpointConfig.keep = 4
utils.SaveCheckpointConfig.period = 50000
utils.SaveCheckpointConfig.save_dataset = False
utils.SaveCheckpointConfig.use_gda = False
# Parameters for seqio.SentencePieceVocabulary:
# ==============================================================================
seqio.SentencePieceVocabulary.sentencepiece_model_file = \
'gs://t5-dutch-english/vocabs/nedd.32000.128extra/spiece.model'
# Parameters for network.T5Config:
# ==============================================================================
network.T5Config.dropout_rate = %DROPOUT_RATE
network.T5Config.dtype = 'bfloat16'
network.T5Config.emb_dim = 512
network.T5Config.head_dim = 64
network.T5Config.logits_via_embedding = False
network.T5Config.mlp_activations = ('gelu', 'linear')
network.T5Config.mlp_dim = 1024
network.T5Config.num_decoder_layers = 8
network.T5Config.num_encoder_layers = 8
network.T5Config.num_heads = 6
network.T5Config.vocab_size = 32128
# Parameters for train_script.train:
# ==============================================================================
train_script.train.checkpoint_cfg = @utils.CheckpointConfig()
train_script.train.eval_period = 2000
train_script.train.eval_steps = 20
train_script.train.infer_eval_dataset_cfg = None
train_script.train.model = %MODEL
train_script.train.model_dir = %MODEL_DIR
train_script.train.partitioner = @partitioning.PjitPartitioner()
train_script.train.random_seed = %RANDOM_SEED
train_script.train.stats_period = 100
train_script.train.summarize_config_fn = @gin_utils.summarize_gin_config
train_script.train.total_steps = %TRAIN_STEPS
train_script.train.train_dataset_cfg = @train/utils.DatasetConfig()
train_script.train.train_eval_dataset_cfg = @train_eval/utils.DatasetConfig()
train_script.train.trainer_cls = @trainer.Trainer
train_script.train.use_hardware_rng = %USE_HARDWARE_RNG
# Parameters for trainer.Trainer:
# ==============================================================================
trainer.Trainer.learning_rate_fn = @utils.create_learning_rate_scheduler()
trainer.Trainer.num_microbatches = None
# Parameters for network.Transformer:
# ==============================================================================
network.Transformer.config = @network.T5Config()
|