lekhnathrijal commited on
Commit
49d77a7
·
verified ·
1 Parent(s): 29436a5

ai-research-lab/bert-question-classifier

Browse files
Files changed (4) hide show
  1. README.md +24 -30
  2. config.json +18 -0
  3. model.safetensors +1 -1
  4. tokenizer.json +10 -1
README.md CHANGED
@@ -21,11 +21,11 @@ should probably proofread and complete it, then remove this comment. -->
21
 
22
  This model is a fine-tuned version of [google-bert/bert-base-uncased](https://huggingface.co/google-bert/bert-base-uncased) on the None dataset.
23
  It achieves the following results on the evaluation set:
24
- - Loss: 1.9349
25
- - Accuracy: 0.9687
26
- - Recall: 0.8393
27
- - Precision: 0.8205
28
- - F1: 0.8298
29
 
30
  ## Model description
31
 
@@ -44,42 +44,36 @@ More information needed
44
  ### Training hyperparameters
45
 
46
  The following hyperparameters were used during training:
47
- - learning_rate: 1e-05
48
  - train_batch_size: 4
49
  - eval_batch_size: 4
50
  - seed: 42
51
  - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
52
  - lr_scheduler_type: linear
53
  - lr_scheduler_warmup_ratio: 0.1
54
- - num_epochs: 10
55
 
56
  ### Training results
57
 
58
  | Training Loss | Epoch | Step | Validation Loss | Accuracy | Recall | Precision | F1 |
59
  |:-------------:|:------:|:----:|:---------------:|:--------:|:------:|:---------:|:------:|
60
- | No log | 0.0959 | 100 | 5.3220 | 0.8786 | 0.3772 | 0.3462 | 0.3610 |
61
- | No log | 0.1918 | 200 | 5.1118 | 0.9118 | 0.5105 | 0.5152 | 0.5129 |
62
- | No log | 0.2876 | 300 | 4.7630 | 0.9237 | 0.5934 | 0.5786 | 0.5859 |
63
- | No log | 0.3835 | 400 | 4.5162 | 0.9247 | 0.6079 | 0.5821 | 0.5948 |
64
- | 4.9675 | 0.4794 | 500 | 4.2140 | 0.9288 | 0.6300 | 0.6041 | 0.6168 |
65
- | 4.9675 | 0.5753 | 600 | 3.9853 | 0.9357 | 0.6694 | 0.6402 | 0.6544 |
66
- | 4.9675 | 0.6711 | 700 | 3.7985 | 0.9364 | 0.6600 | 0.6476 | 0.6538 |
67
- | 4.9675 | 0.7670 | 800 | 3.5336 | 0.9426 | 0.7044 | 0.6775 | 0.6907 |
68
- | 4.9675 | 0.8629 | 900 | 3.3734 | 0.9438 | 0.7094 | 0.6841 | 0.6965 |
69
- | 3.7188 | 0.9588 | 1000 | 3.1605 | 0.9482 | 0.7326 | 0.7078 | 0.7200 |
70
- | 3.7188 | 1.0547 | 1100 | 3.0154 | 0.9495 | 0.7317 | 0.7181 | 0.7248 |
71
- | 3.7188 | 1.1505 | 1200 | 2.8699 | 0.9518 | 0.7478 | 0.7293 | 0.7384 |
72
- | 3.7188 | 1.2464 | 1300 | 2.7020 | 0.9540 | 0.7605 | 0.7406 | 0.7504 |
73
- | 3.7188 | 1.3423 | 1400 | 2.5487 | 0.9587 | 0.7863 | 0.7654 | 0.7757 |
74
- | 2.8304 | 1.4382 | 1500 | 2.4370 | 0.9609 | 0.8027 | 0.7749 | 0.7885 |
75
- | 2.8304 | 1.5340 | 1600 | 2.3423 | 0.9625 | 0.8043 | 0.7874 | 0.7958 |
76
- | 2.8304 | 1.6299 | 1700 | 2.2367 | 0.9637 | 0.8117 | 0.7940 | 0.8028 |
77
- | 2.8304 | 1.7258 | 1800 | 2.1771 | 0.9652 | 0.8154 | 0.8045 | 0.8099 |
78
- | 2.8304 | 1.8217 | 1900 | 2.0955 | 0.9655 | 0.8248 | 0.8019 | 0.8131 |
79
- | 2.2139 | 1.9175 | 2000 | 2.0031 | 0.9676 | 0.8316 | 0.8159 | 0.8237 |
80
- | 2.2139 | 2.0134 | 2100 | 1.9349 | 0.9687 | 0.8393 | 0.8205 | 0.8298 |
81
- | 2.2139 | 2.1093 | 2200 | 1.8722 | 0.9683 | 0.8344 | 0.8201 | 0.8272 |
82
- | 2.2139 | 2.2052 | 2300 | 1.8385 | 0.9682 | 0.8396 | 0.8161 | 0.8277 |
83
 
84
 
85
  ### Framework versions
 
21
 
22
  This model is a fine-tuned version of [google-bert/bert-base-uncased](https://huggingface.co/google-bert/bert-base-uncased) on the None dataset.
23
  It achieves the following results on the evaluation set:
24
+ - Loss: 1.4053
25
+ - Accuracy: 0.9730
26
+ - Recall: 0.8657
27
+ - Precision: 0.8414
28
+ - F1: 0.8534
29
 
30
  ## Model description
31
 
 
44
  ### Training hyperparameters
45
 
46
  The following hyperparameters were used during training:
47
+ - learning_rate: 9e-05
48
  - train_batch_size: 4
49
  - eval_batch_size: 4
50
  - seed: 42
51
  - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
52
  - lr_scheduler_type: linear
53
  - lr_scheduler_warmup_ratio: 0.1
54
+ - num_epochs: 5
55
 
56
  ### Training results
57
 
58
  | Training Loss | Epoch | Step | Validation Loss | Accuracy | Recall | Precision | F1 |
59
  |:-------------:|:------:|:----:|:---------------:|:--------:|:------:|:---------:|:------:|
60
+ | No log | 0.0959 | 100 | 4.9052 | 0.8992 | 0.4299 | 0.4437 | 0.4367 |
61
+ | No log | 0.1918 | 200 | 3.9607 | 0.9324 | 0.6086 | 0.6331 | 0.6206 |
62
+ | No log | 0.2876 | 300 | 3.1200 | 0.9433 | 0.6929 | 0.6868 | 0.6898 |
63
+ | No log | 0.3835 | 400 | 2.8097 | 0.9529 | 0.7516 | 0.7363 | 0.7438 |
64
+ | 3.8292 | 0.4794 | 500 | 2.4593 | 0.9570 | 0.7677 | 0.7611 | 0.7644 |
65
+ | 3.8292 | 0.5753 | 600 | 2.1407 | 0.9621 | 0.8024 | 0.7851 | 0.7937 |
66
+ | 3.8292 | 0.6711 | 700 | 2.0963 | 0.9611 | 0.7882 | 0.7847 | 0.7864 |
67
+ | 3.8292 | 0.7670 | 800 | 1.8911 | 0.9638 | 0.8055 | 0.7983 | 0.8019 |
68
+ | 3.8292 | 0.8629 | 900 | 1.8706 | 0.9627 | 0.8018 | 0.7908 | 0.7962 |
69
+ | 2.0673 | 0.9588 | 1000 | 1.7311 | 0.9649 | 0.8282 | 0.7946 | 0.8111 |
70
+ | 2.0673 | 1.0547 | 1100 | 1.7874 | 0.9670 | 0.8235 | 0.8157 | 0.8196 |
71
+ | 2.0673 | 1.1505 | 1200 | 1.6227 | 0.9690 | 0.8362 | 0.8247 | 0.8304 |
72
+ | 2.0673 | 1.2464 | 1300 | 1.4544 | 0.9720 | 0.8496 | 0.8433 | 0.8464 |
73
+ | 2.0673 | 1.3423 | 1400 | 1.5178 | 0.9711 | 0.8533 | 0.8326 | 0.8428 |
74
+ | 1.4953 | 1.4382 | 1500 | 1.4053 | 0.9730 | 0.8657 | 0.8414 | 0.8534 |
75
+ | 1.4953 | 1.5340 | 1600 | 1.4916 | 0.9721 | 0.8626 | 0.8359 | 0.8490 |
76
+ | 1.4953 | 1.6299 | 1700 | 1.4541 | 0.9719 | 0.8601 | 0.8360 | 0.8479 |
 
 
 
 
 
 
77
 
78
 
79
  ### Framework versions
config.json CHANGED
@@ -5,6 +5,24 @@
5
  ],
6
  "attention_probs_dropout_prob": 0.1,
7
  "classifier_dropout": null,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  "gradient_checkpointing": false,
9
  "hidden_act": "gelu",
10
  "hidden_dropout_prob": 0.1,
 
5
  ],
6
  "attention_probs_dropout_prob": 0.1,
7
  "classifier_dropout": null,
8
+ "custom_pipelines": {
9
+ "question-classifier": {
10
+ "default": {
11
+ "model": {
12
+ "pt": [
13
+ "ai-research-lab/bert-question-classifier",
14
+ "main"
15
+ ]
16
+ }
17
+ },
18
+ "impl": "classifier_pipeline.MultiTaskClassifierPipeline",
19
+ "pt": [
20
+ "AutoModelForSequenceClassification"
21
+ ],
22
+ "tf": [],
23
+ "type": "text"
24
+ }
25
+ },
26
  "gradient_checkpointing": false,
27
  "hidden_act": "gelu",
28
  "hidden_dropout_prob": 0.1,
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:da7de6a21c39286279104b31dea9f15dd59f21c308cbe6a8c355ab832e3be98b
3
  size 438057080
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9adc60526740d0f404a57222ebcc59a7f39b6ebcaccb008c8301f2bcfba094ad
3
  size 438057080
tokenizer.json CHANGED
@@ -6,7 +6,16 @@
6
  "strategy": "LongestFirst",
7
  "stride": 0
8
  },
9
- "padding": null,
 
 
 
 
 
 
 
 
 
10
  "added_tokens": [
11
  {
12
  "id": 0,
 
6
  "strategy": "LongestFirst",
7
  "stride": 0
8
  },
9
+ "padding": {
10
+ "strategy": {
11
+ "Fixed": 512
12
+ },
13
+ "direction": "Right",
14
+ "pad_to_multiple_of": null,
15
+ "pad_id": 0,
16
+ "pad_type_id": 0,
17
+ "pad_token": "[PAD]"
18
+ },
19
  "added_tokens": [
20
  {
21
  "id": 0,