sonoisa's picture
Add T5 model and tokenizer pretrained on mC4/ja and Japanese Wikipedia corpus
780094b
raw
history blame
660 Bytes
{
"d_ff": 3072,
"d_kv": 64,
"d_model": 768,
"dropout_rate": 0.1,
"finetuning_task": null,
"id2label": {
"0": "LABEL_0",
"1": "LABEL_1"
},
"initializer_factor": 1.0,
"is_decoder": false,
"is_encoder_decoder": true,
"label2id": {
"LABEL_0": 0,
"LABEL_1": 1
},
"layer_norm_epsilon": 1e-06,
"n_positions": 512,
"num_heads": 12,
"num_labels": 2,
"num_layers": 12,
"relative_attention_num_buckets": 32,
"torchscript": false,
"use_bfloat16": false,
"vocab_size": 32128,
"max_length": 512,
"num_beams": 4,
"decoder_start_token_id": 0,
"pad_token_id": 0,
"bos_token_id": 0,
"eos_token_ids": [1]
}