model update
Browse files- config.json +1 -1
- eval/metric.first.answer.paragraph_answer.question.asahi417_qg_koquad.default.json +1 -0
- eval/metric.first.sentence.paragraph_answer.question.asahi417_qg_koquad.default.json +1 -0
- eval/samples.test.hyp.paragraph_answer.question.asahi417_qg_koquad.default.txt +0 -0
- eval/samples.validation.hyp.paragraph_answer.question.asahi417_qg_koquad.default.txt +0 -0
- pytorch_model.bin +2 -2
- tokenizer_config.json +1 -1
- trainer_config.json +1 -0
config.json
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
{
|
2 |
-
"_name_or_path": "lmqg_output/mbart_large_cc25_koquad/
|
3 |
"_num_labels": 3,
|
4 |
"activation_dropout": 0.0,
|
5 |
"activation_function": "gelu",
|
|
|
1 |
{
|
2 |
+
"_name_or_path": "lmqg_output/mbart_large_cc25_koquad/model_xnwemx/epoch_5",
|
3 |
"_num_labels": 3,
|
4 |
"activation_dropout": 0.0,
|
5 |
"activation_function": "gelu",
|
eval/metric.first.answer.paragraph_answer.question.asahi417_qg_koquad.default.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"validation": {"Bleu_1": 0.2516278620423099, "Bleu_2": 0.1821211343705945, "Bleu_3": 0.13494306406273335, "Bleu_4": 0.10102987411289067}, "test": {"Bleu_1": 0.26600055072577267, "Bleu_2": 0.19302824919934103, "Bleu_3": 0.1432440107754292, "Bleu_4": 0.10772478491739491}}
|
eval/metric.first.sentence.paragraph_answer.question.asahi417_qg_koquad.default.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"validation": {"Bleu_1": 0.28332822555929404, "Bleu_2": 0.20933973640483905, "Bleu_3": 0.1574442567530699, "Bleu_4": 0.11926663360827003, "METEOR": 0.30373202197460736, "ROUGE_L": 0.2853838586458619, "BERTScore": 0.8309645159424983, "MoverScore": 0.8290751252962683}, "test": {"Bleu_1": 0.2691981760219469, "Bleu_2": 0.19566200846322834, "Bleu_3": 0.14524158089407038, "Bleu_4": 0.10924223302065934, "METEOR": 0.3022853307791723, "ROUGE_L": 0.2776374700887909, "BERTScore": 0.8388571484491499, "MoverScore": 0.8294576496428497}}
|
eval/samples.test.hyp.paragraph_answer.question.asahi417_qg_koquad.default.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
eval/samples.validation.hyp.paragraph_answer.question.asahi417_qg_koquad.default.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
pytorch_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9d02970b37673d2cb4e06c3e4c04db08c832b5cb927b3264369431ba3ea76fae
|
3 |
+
size 2444604857
|
tokenizer_config.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"bos_token": "<s>", "eos_token": "</s>", "sep_token": "</s>", "cls_token": "<s>", "unk_token": "<unk>", "pad_token": "<pad>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "src_lang": null, "tgt_lang": null, "additional_special_tokens": null, "model_max_length": 1024, "special_tokens_map_file": null, "name_or_path": "lmqg_output/mbart_large_cc25_koquad/
|
|
|
1 |
+
{"bos_token": "<s>", "eos_token": "</s>", "sep_token": "</s>", "cls_token": "<s>", "unk_token": "<unk>", "pad_token": "<pad>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "src_lang": null, "tgt_lang": null, "additional_special_tokens": null, "model_max_length": 1024, "special_tokens_map_file": null, "name_or_path": "lmqg_output/mbart_large_cc25_koquad/model_xnwemx/epoch_5", "tokenizer_class": "MBartTokenizer"}
|
trainer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"dataset_path": "asahi417/qg_koquad", "dataset_name": "default", "input_types": ["paragraph_answer"], "output_types": ["question"], "prefix_types": null, "model": "facebook/mbart-large-cc25", "max_length": 512, "max_length_output": 32, "epoch": 6, "batch": 4, "lr": 0.0001, "fp16": false, "random_seed": 1, "gradient_accumulation_steps": 16, "label_smoothing": 0.15}
|