Portuguese
tharindu commited on
Commit
ffcd67c
·
verified ·
1 Parent(s): e2d46e9

Upload folder using huggingface_hub

Browse files
best_model/decoder/config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "bert-base-multilingual-cased",
3
+ "add_cross_attention": true,
4
+ "architectures": [
5
+ "BertLMHeadModel"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.1,
8
+ "classifier_dropout": null,
9
+ "directionality": "bidi",
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 3072,
15
+ "is_decoder": true,
16
+ "layer_norm_eps": 1e-12,
17
+ "max_position_embeddings": 512,
18
+ "model_type": "bert",
19
+ "num_attention_heads": 12,
20
+ "num_hidden_layers": 12,
21
+ "pad_token_id": 0,
22
+ "pooler_fc_size": 768,
23
+ "pooler_num_attention_heads": 12,
24
+ "pooler_num_fc_layers": 3,
25
+ "pooler_size_per_head": 128,
26
+ "pooler_type": "first_token_transform",
27
+ "position_embedding_type": "absolute",
28
+ "torch_dtype": "float32",
29
+ "transformers_version": "4.29.2",
30
+ "type_vocab_size": 2,
31
+ "use_cache": true,
32
+ "vocab_size": 119547
33
+ }
best_model/decoder/generation_config.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "pad_token_id": 0,
4
+ "transformers_version": "4.29.2"
5
+ }
best_model/decoder/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7fed1543b11a26201299b540cb0b859c126c31825d3e1f0b7e4dc8e69d732e5e
3
+ size 825480985
best_model/decoder/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
best_model/decoder/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
best_model/decoder/tokenizer_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "clean_up_tokenization_spaces": true,
3
+ "cls_token": "[CLS]",
4
+ "do_lower_case": false,
5
+ "mask_token": "[MASK]",
6
+ "model_max_length": 512,
7
+ "pad_token": "[PAD]",
8
+ "sep_token": "[SEP]",
9
+ "strip_accents": null,
10
+ "tokenize_chinese_chars": true,
11
+ "tokenizer_class": "BertTokenizer",
12
+ "unk_token": "[UNK]"
13
+ }
best_model/decoder/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
best_model/encoder/config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "bert-base-multilingual-cased",
3
+ "architectures": [
4
+ "BertModel"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "directionality": "bidi",
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-12,
15
+ "max_position_embeddings": 512,
16
+ "model_type": "bert",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 0,
20
+ "pooler_fc_size": 768,
21
+ "pooler_num_attention_heads": 12,
22
+ "pooler_num_fc_layers": 3,
23
+ "pooler_size_per_head": 128,
24
+ "pooler_type": "first_token_transform",
25
+ "position_embedding_type": "absolute",
26
+ "torch_dtype": "float32",
27
+ "transformers_version": "4.29.2",
28
+ "type_vocab_size": 2,
29
+ "use_cache": true,
30
+ "vocab_size": 119547
31
+ }
best_model/encoder/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7e7902ebfef91c2dd83fc772dfede7bb7b9475e641df6e5c081f96deb1213af
3
+ size 711484973
best_model/encoder/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
best_model/encoder/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
best_model/encoder/tokenizer_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "clean_up_tokenization_spaces": true,
3
+ "cls_token": "[CLS]",
4
+ "do_lower_case": false,
5
+ "mask_token": "[MASK]",
6
+ "model_max_length": 512,
7
+ "pad_token": "[PAD]",
8
+ "sep_token": "[SEP]",
9
+ "strip_accents": null,
10
+ "tokenize_chinese_chars": true,
11
+ "tokenizer_class": "BertTokenizer",
12
+ "unk_token": "[UNK]"
13
+ }
best_model/encoder/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
best_model/eval_results.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ eval_loss = 4.638601120575762e-05
best_model/model_args.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"adafactor_beta1": null, "adafactor_clip_threshold": 1.0, "adafactor_decay_rate": -0.8, "adafactor_eps": [1e-30, 0.001], "adafactor_relative_step": true, "adafactor_scale_parameter": true, "adafactor_warmup_init": true, "adam_betas": [0.9, 0.999], "adam_epsilon": 1e-08, "best_model_dir": "outputs/mbert/best_model", "cache_dir": "cache_dir/mbert", "config": {}, "cosine_schedule_num_cycles": 0.5, "custom_layer_parameters": [], "custom_parameter_groups": [], "dataloader_num_workers": 0, "do_lower_case": false, "dynamic_quantize": false, "early_stopping_consider_epochs": false, "early_stopping_delta": 0, "early_stopping_metric": "eval_loss", "early_stopping_metric_minimize": true, "early_stopping_patience": 25, "encoding": null, "eval_batch_size": 8, "evaluate_during_training": true, "evaluate_during_training_silent": true, "evaluate_during_training_steps": 3200, "evaluate_during_training_verbose": true, "evaluate_each_epoch": true, "fp16": false, "gradient_accumulation_steps": 1, "learning_rate": 0.0001, "local_rank": -1, "logging_steps": 3200, "loss_type": null, "loss_args": {}, "manual_seed": 777, "max_grad_norm": 1.0, "max_seq_length": 256, "model_name": "bert-base-multilingual-cased-bert-base-multilingual-cased", "model_type": "bert-bert", "multiprocessing_chunksize": -1, "n_gpu": 1, "no_cache": false, "no_save": false, "not_saved_args": [], "num_train_epochs": 10, "optimizer": "AdamW", "output_dir": "outputs/mbert", "overwrite_output_dir": true, "polynomial_decay_schedule_lr_end": 1e-07, "polynomial_decay_schedule_power": 1.0, "process_count": 78, "quantized_model": false, "reprocess_input_data": true, "save_best_model": true, "save_eval_checkpoints": true, "save_model_every_epoch": true, "save_optimizer_and_scheduler": true, "save_steps": 3200, "scheduler": "linear_schedule_with_warmup", "silent": false, "skip_special_tokens": true, "tensorboard_dir": null, "thread_count": null, "tokenizer_name": null, "tokenizer_type": null, "train_batch_size": 8, "train_custom_parameters_only": false, "use_cached_eval_features": false, "use_early_stopping": false, "use_hf_datasets": false, "use_multiprocessing": false, "use_multiprocessing_for_evaluation": false, "wandb_kwargs": {"name": "bert-base-multilingual-cased"}, "wandb_project": "DORE", "warmup_ratio": 0.06, "warmup_steps": 4946, "weight_decay": 0.0, "model_class": "Seq2SeqModel", "base_marian_model_name": null, "dataset_class": null, "dataset_cache_dir": null, "do_sample": false, "early_stopping": true, "evaluate_generated_text": true, "faiss_d": 768, "faiss_m": 128, "include_title_in_knowledge_dataset": true, "length_penalty": 2.0, "max_length": 20, "max_steps": -1, "num_beams": 1, "num_return_sequences": 1, "rag_embed_batch_size": 16, "repetition_penalty": 1.0, "save_knowledge_dataset": true, "save_knowledge_dataset_with_checkpoints": false, "save_recent_only": true, "split_text_character": " ", "split_text_n": 100, "src_lang": "en_XX", "tgt_lang": "ro_RO", "top_k": null, "top_p": null, "use_multiprocessed_decoding": false}
best_model/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e85bb1a0ccb2f249890c8c838326d2071eb99e3a5ec8d8a875f12f5c031083c
3
+ size 3069268210
best_model/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e256f319386c1ba3833ac29cb14870f2e80ce6f86a6405851d9a5a16fcb97e1
3
+ size 627
best_model/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:724cc7d33efdcd3ae19790f6f434cb90392af95a253cba395ad28e3fc2303e0e
3
+ size 3579
checkpoint-74178-epoch-9/eval_results.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ eval_loss = 18.88406741046489
eval_results.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ eval_loss = 18.88406741046489
training_progress_scores.csv ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ global_step,eval_loss,train_loss
2
+ 3200,4.638601120575762e-05,9.859441342996433e-05
3
+ 6400,0.0004333633804356752,2.2505106244352646e-05
4
+ 8242,0.0006071640122126645,0.0020425980910658836
5
+ 9600,0.0001250970983686778,7.014982838882133e-05
6
+ 12800,0.004397115739013738,0.0006558408495038748
7
+ 16000,0.00012924883208755288,4.2052179196616635e-05
8
+ 16484,6.069257284924091e-05,1.1547194844752084e-06
9
+ 19200,7.374908047934749,6.571280002593994
10
+ 22400,11.5760720029495,6.235233783721924
11
+ 24726,13.781783858183111,6.484010219573975
12
+ 25600,13.465459120267806,6.241044044494629
13
+ 28800,14.799029918968648,6.351245880126953
14
+ 32000,12.572137959890721,6.461843013763428
15
+ 32968,15.59106514027025,5.936652660369873
16
+ 35200,11.594814583261519,6.3275146484375
17
+ 38400,16.371380089209648,6.0015058517456055
18
+ 41210,14.86689023171045,5.965159893035889
19
+ 41600,15.508951940448581,6.275737762451172
20
+ 44800,14.77081819253891,6.237776279449463
21
+ 48000,14.01954884138112,6.322638511657715
22
+ 49452,16.832226444597907,6.67988920211792
23
+ 51200,18.10885695726301,6.464913368225098
24
+ 54400,15.01399333033962,5.935701370239258
25
+ 57600,17.017078648603754,6.028385162353516
26
+ 57694,16.424798103861875,6.45088529586792
27
+ 60800,19.780675463280822,5.924292087554932
28
+ 64000,17.076165711515333,6.798635482788086
29
+ 65936,17.641285376662136,6.5727362632751465
30
+ 67200,14.17987766339905,5.9465556144714355
31
+ 70400,18.60497539729415,5.827696323394775
32
+ 73600,16.498589126535087,6.554838180541992
33
+ 74178,18.88406741046489,6.478570938110352