trained with chimera_qa
Browse files- README.md +9 -9
- config.json +2 -2
- pytorch_model.bin +2 -2
- tokenizer.json +0 -0
- tokenizer_config.json +1 -1
- training_args.bin +2 -2
README.md
CHANGED
@@ -5,19 +5,19 @@ widget:
|
|
5 |
---
|
6 |
# xlm-roberta-base-finetune-qa
|
7 |
|
8 |
-
Finetuning `xlm-roberta-base` with the training set of `iapp_wiki_qa_squad`, `
|
9 |
-
Trained with [thai2transformers](https://github.com/vistec-AI/thai2transformers/blob/dev/scripts/downstream/train_question_answering_lm_finetuning.py).
|
10 |
|
11 |
-
|
12 |
```
|
13 |
export WANDB_PROJECT=wangchanberta-qa
|
14 |
|
15 |
export MODEL_NAME=xlm-roberta-base
|
16 |
-
python train_question_answering_lm_finetuning.py
|
17 |
-
--model_name $MODEL_NAME
|
18 |
-
--dataset_name
|
19 |
-
--output_dir $MODEL_NAME-finetune-
|
20 |
-
--log_dir $MODEL_NAME-finetune-
|
21 |
-
--pad_on_right
|
22 |
--fp16
|
23 |
```
|
|
|
5 |
---
|
6 |
# xlm-roberta-base-finetune-qa
|
7 |
|
8 |
+
Finetuning `xlm-roberta-base` with the training set of `iapp_wiki_qa_squad`, `thaiqa_squad`, and `nsc_qa` (removed examples which have cosine similarity with validation and test examples over 0.8; contexts of the latter two are trimmed to be around 300 `newmm` words). Benchmarks shared on [wandb](https://wandb.ai/cstorm125/wangchanberta-qa) using validation and test sets of `iapp_wiki_qa_squad`.
|
9 |
+
Trained with [thai2transformers](https://github.com/vistec-AI/thai2transformers/blob/dev/scripts/downstream/train_question_answering_lm_finetuning.py).
|
10 |
|
11 |
+
Train with:
|
12 |
```
|
13 |
export WANDB_PROJECT=wangchanberta-qa
|
14 |
|
15 |
export MODEL_NAME=xlm-roberta-base
|
16 |
+
python train_question_answering_lm_finetuning.py \
|
17 |
+
--model_name $MODEL_NAME \
|
18 |
+
--dataset_name chimera_qa \
|
19 |
+
--output_dir $MODEL_NAME-finetune-chimera_qa-model \
|
20 |
+
--log_dir $MODEL_NAME-finetune-chimera_qa-log \
|
21 |
+
--pad_on_right \
|
22 |
--fp16
|
23 |
```
|
config.json
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
{
|
2 |
-
"_name_or_path": "xlm-roberta-base
|
3 |
"architectures": [
|
4 |
"XLMRobertaForQuestionAnswering"
|
5 |
],
|
@@ -20,7 +20,7 @@
|
|
20 |
"output_past": true,
|
21 |
"pad_token_id": 1,
|
22 |
"position_embedding_type": "absolute",
|
23 |
-
"transformers_version": "4.
|
24 |
"type_vocab_size": 1,
|
25 |
"use_cache": true,
|
26 |
"vocab_size": 250002
|
|
|
1 |
{
|
2 |
+
"_name_or_path": "xlm-roberta-base",
|
3 |
"architectures": [
|
4 |
"XLMRobertaForQuestionAnswering"
|
5 |
],
|
|
|
20 |
"output_past": true,
|
21 |
"pad_token_id": 1,
|
22 |
"position_embedding_type": "absolute",
|
23 |
+
"transformers_version": "4.8.2",
|
24 |
"type_vocab_size": 1,
|
25 |
"use_cache": true,
|
26 |
"vocab_size": 250002
|
pytorch_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:faf558bd7a3dc268b2128c7d041f697825f4c27c5087de97b5d1dded0ed69132
|
3 |
+
size 1109903089
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"bos_token": "<s>", "eos_token": "</s>", "sep_token": "</s>", "cls_token": "<s>", "unk_token": "<unk>", "pad_token": "<pad>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "model_max_length": 416, "special_tokens_map_file": null, "name_or_path": "xlm-roberta-base"}
|
|
|
1 |
+
{"bos_token": "<s>", "eos_token": "</s>", "sep_token": "</s>", "cls_token": "<s>", "unk_token": "<unk>", "pad_token": "<pad>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "model_max_length": 416, "special_tokens_map_file": null, "name_or_path": "xlm-roberta-base", "tokenizer_class": "XLMRobertaTokenizer"}
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bd66a6c43756705178a5ff5b0af387c063b5043883bc3d7fb0f9aab8b717042c
|
3 |
+
size 2671
|