jiiyy commited on
Commit
dd458a1
·
1 Parent(s): 73ec5dd

Training in progress, epoch 1

Browse files
added_tokens.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
- "[CLS]": 0,
3
  "[MASK]": 4,
4
- "[PAD]": 1,
5
- "[SEP]": 2,
6
- "[UNK]": 3
7
  }
 
1
  {
2
+ "[CLS]": 2,
3
  "[MASK]": 4,
4
+ "[PAD]": 0,
5
+ "[SEP]": 3,
6
+ "[UNK]": 1
7
  }
config.json CHANGED
@@ -1,12 +1,12 @@
1
  {
2
- "_name_or_path": "klue/roberta-base",
3
  "architectures": [
4
  "BertForSequenceClassification"
5
  ],
6
  "attention_probs_dropout_prob": 0.1,
7
- "bos_token_id": 0,
8
  "classifier_dropout": null,
9
- "eos_token_id": 2,
 
10
  "gradient_checkpointing": false,
11
  "hidden_act": "gelu",
12
  "hidden_dropout_prob": 0.1,
@@ -49,18 +49,22 @@
49
  "LABEL_8": 8,
50
  "LABEL_9": 9
51
  },
52
- "layer_norm_eps": 1e-05,
53
- "max_position_embeddings": 514,
54
  "model_type": "bert",
55
  "num_attention_heads": 12,
56
  "num_hidden_layers": 12,
57
- "pad_token_id": 1,
 
 
 
 
 
58
  "position_embedding_type": "absolute",
59
  "problem_type": "single_label_classification",
60
- "tokenizer_class": "BertTokenizer",
61
  "torch_dtype": "float32",
62
  "transformers_version": "4.34.0",
63
- "type_vocab_size": 1,
64
  "use_cache": true,
65
- "vocab_size": 32000
66
  }
 
1
  {
2
+ "_name_or_path": "kykim/bert-kor-base",
3
  "architectures": [
4
  "BertForSequenceClassification"
5
  ],
6
  "attention_probs_dropout_prob": 0.1,
 
7
  "classifier_dropout": null,
8
+ "directionality": "bidi",
9
+ "embedding_size": 768,
10
  "gradient_checkpointing": false,
11
  "hidden_act": "gelu",
12
  "hidden_dropout_prob": 0.1,
 
49
  "LABEL_8": 8,
50
  "LABEL_9": 9
51
  },
52
+ "layer_norm_eps": 1e-12,
53
+ "max_position_embeddings": 512,
54
  "model_type": "bert",
55
  "num_attention_heads": 12,
56
  "num_hidden_layers": 12,
57
+ "pad_token_id": 0,
58
+ "pooler_fc_size": 768,
59
+ "pooler_num_attention_heads": 12,
60
+ "pooler_num_fc_layers": 3,
61
+ "pooler_size_per_head": 128,
62
+ "pooler_type": "first_token_transform",
63
  "position_embedding_type": "absolute",
64
  "problem_type": "single_label_classification",
 
65
  "torch_dtype": "float32",
66
  "transformers_version": "4.34.0",
67
+ "type_vocab_size": 2,
68
  "use_cache": true,
69
+ "vocab_size": 42000
70
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:68dfac82fcf3e918f7d932073c0326afaa4da666d63a3a6eacf774b4412c5f92
3
- size 442590065
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55b5489cbd6693c7e73e177cdcdb60318f7b232fd142610ddd521a544eb15bee
3
+ size 473306993
special_tokens_map.json CHANGED
@@ -1,7 +1,5 @@
1
  {
2
- "bos_token": "[CLS]",
3
  "cls_token": "[CLS]",
4
- "eos_token": "[SEP]",
5
  "mask_token": "[MASK]",
6
  "pad_token": "[PAD]",
7
  "sep_token": "[SEP]",
 
1
  {
 
2
  "cls_token": "[CLS]",
 
3
  "mask_token": "[MASK]",
4
  "pad_token": "[PAD]",
5
  "sep_token": "[SEP]",
tokenizer.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f528deccb283323c613f41f4cd7cd1ab207d533f13acf524fc27678317e2ed5c
3
- size 752097
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:508df639d8abd9ad6ed6ebc0ed892f31fb7f1f091fe7a9cddec42dfd939e25fd
3
+ size 1007781
tokenizer_config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "added_tokens_decoder": {
3
  "0": {
4
- "content": "[CLS]",
5
  "lstrip": false,
6
  "normalized": false,
7
  "rstrip": false,
@@ -9,7 +9,7 @@
9
  "special": true
10
  },
11
  "1": {
12
- "content": "[PAD]",
13
  "lstrip": false,
14
  "normalized": false,
15
  "rstrip": false,
@@ -17,7 +17,7 @@
17
  "special": true
18
  },
19
  "2": {
20
- "content": "[SEP]",
21
  "lstrip": false,
22
  "normalized": false,
23
  "rstrip": false,
@@ -25,7 +25,7 @@
25
  "special": true
26
  },
27
  "3": {
28
- "content": "[UNK]",
29
  "lstrip": false,
30
  "normalized": false,
31
  "rstrip": false,
@@ -42,18 +42,16 @@
42
  }
43
  },
44
  "additional_special_tokens": [],
45
- "bos_token": "[CLS]",
46
  "clean_up_tokenization_spaces": true,
47
  "cls_token": "[CLS]",
48
  "do_basic_tokenize": true,
49
- "do_lower_case": false,
50
- "eos_token": "[SEP]",
51
  "mask_token": "[MASK]",
52
  "model_max_length": 512,
53
  "never_split": null,
54
  "pad_token": "[PAD]",
55
  "sep_token": "[SEP]",
56
- "strip_accents": null,
57
  "tokenize_chinese_chars": true,
58
  "tokenizer_class": "BertTokenizer",
59
  "unk_token": "[UNK]"
 
1
  {
2
  "added_tokens_decoder": {
3
  "0": {
4
+ "content": "[PAD]",
5
  "lstrip": false,
6
  "normalized": false,
7
  "rstrip": false,
 
9
  "special": true
10
  },
11
  "1": {
12
+ "content": "[UNK]",
13
  "lstrip": false,
14
  "normalized": false,
15
  "rstrip": false,
 
17
  "special": true
18
  },
19
  "2": {
20
+ "content": "[CLS]",
21
  "lstrip": false,
22
  "normalized": false,
23
  "rstrip": false,
 
25
  "special": true
26
  },
27
  "3": {
28
+ "content": "[SEP]",
29
  "lstrip": false,
30
  "normalized": false,
31
  "rstrip": false,
 
42
  }
43
  },
44
  "additional_special_tokens": [],
 
45
  "clean_up_tokenization_spaces": true,
46
  "cls_token": "[CLS]",
47
  "do_basic_tokenize": true,
48
+ "do_lower_case": true,
 
49
  "mask_token": "[MASK]",
50
  "model_max_length": 512,
51
  "never_split": null,
52
  "pad_token": "[PAD]",
53
  "sep_token": "[SEP]",
54
+ "strip_accents": false,
55
  "tokenize_chinese_chars": true,
56
  "tokenizer_class": "BertTokenizer",
57
  "unk_token": "[UNK]"
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:39a55c006bbec520686e9d3bf51be88e268c1d4382cecd5401c6f24bf63fb9c6
3
  size 4091
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a82cb208c80ab4085bd749641b69b336a485f16e651b65e0a15df02234ffe084
3
  size 4091
vocab.txt CHANGED
The diff for this file is too large to render. See raw diff