Shotaro30678 commited on
Commit
65afb6a
·
verified ·
1 Parent(s): de4f640

End of training

Browse files
Files changed (5) hide show
  1. README.md +68 -0
  2. config.json +3 -3
  3. model.safetensors +1 -1
  4. tokenizer.json +2 -3
  5. training_args.bin +2 -2
README.md ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ base_model: michellejieli/emotion_text_classifier
4
+ tags:
5
+ - generated_from_trainer
6
+ metrics:
7
+ - accuracy
8
+ model-index:
9
+ - name: sentiment_analysis_for_emotion_chat_bot
10
+ results: []
11
+ ---
12
+
13
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
+ should probably proofread and complete it, then remove this comment. -->
15
+
16
+ # sentiment_analysis_for_emotion_chat_bot
17
+
18
+ This model is a fine-tuned version of [michellejieli/emotion_text_classifier](https://huggingface.co/michellejieli/emotion_text_classifier) on an unknown dataset.
19
+ It achieves the following results on the evaluation set:
20
+ - Loss: 0.4012
21
+ - Accuracy: 0.8653
22
+ - F1-score: 0.8582
23
+ - Num Input Tokens Seen: 130810880
24
+
25
+ ## Model description
26
+
27
+ More information needed
28
+
29
+ ## Intended uses & limitations
30
+
31
+ More information needed
32
+
33
+ ## Training and evaluation data
34
+
35
+ More information needed
36
+
37
+ ## Training procedure
38
+
39
+ ### Training hyperparameters
40
+
41
+ The following hyperparameters were used during training:
42
+ - learning_rate: 0.0001
43
+ - train_batch_size: 8
44
+ - eval_batch_size: 8
45
+ - seed: 42
46
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
47
+ - lr_scheduler_type: constant
48
+ - lr_scheduler_warmup_ratio: 0.03
49
+ - num_epochs: 5
50
+ - mixed_precision_training: Native AMP
51
+
52
+ ### Training results
53
+
54
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1-score | Input Tokens Seen |
55
+ |:-------------:|:-----:|:-----:|:---------------:|:--------:|:--------:|:-----------------:|
56
+ | No log | 1.0 | 6388 | 0.3932 | 0.8715 | 0.8678 | 26162176 |
57
+ | 0.6659 | 2.0 | 12776 | 0.3770 | 0.8724 | 0.8680 | 52324352 |
58
+ | 0.6659 | 3.0 | 19164 | 0.3531 | 0.8776 | 0.8749 | 78486528 |
59
+ | 0.643 | 4.0 | 25552 | 0.3735 | 0.8726 | 0.8696 | 104648704 |
60
+ | 0.643 | 5.0 | 31940 | 0.4012 | 0.8653 | 0.8582 | 130810880 |
61
+
62
+
63
+ ### Framework versions
64
+
65
+ - Transformers 4.44.2
66
+ - Pytorch 2.4.0+cu121
67
+ - Datasets 3.0.0
68
+ - Tokenizers 0.19.1
config.json CHANGED
@@ -26,7 +26,7 @@
26
  "anger": 1,
27
  "disgust": 2,
28
  "fear": 3,
29
- "joy": 4,
30
  "neutral": 0,
31
  "sadness": 5,
32
  "surprise": 6
@@ -40,8 +40,8 @@
40
  "position_embedding_type": "absolute",
41
  "problem_type": "single_label_classification",
42
  "torch_dtype": "float32",
43
- "transformers_version": "4.36.2",
44
  "type_vocab_size": 1,
45
- "use_cache": true,
46
  "vocab_size": 50265
47
  }
 
26
  "anger": 1,
27
  "disgust": 2,
28
  "fear": 3,
29
+ "happiness": 4,
30
  "neutral": 0,
31
  "sadness": 5,
32
  "surprise": 6
 
40
  "position_embedding_type": "absolute",
41
  "problem_type": "single_label_classification",
42
  "torch_dtype": "float32",
43
+ "transformers_version": "4.44.2",
44
  "type_vocab_size": 1,
45
+ "use_cache": false,
46
  "vocab_size": 50265
47
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:914ac522da4333a3e7f62e426f4181f8e49224457e161779ecf53c9150a8d3f1
3
  size 328507660
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:101e13a625e09c3b95d34aa00b14ffa59a49c3fd6b212927d089683dce3fcbca
3
  size 328507660
tokenizer.json CHANGED
@@ -7,9 +7,7 @@
7
  "stride": 0
8
  },
9
  "padding": {
10
- "strategy": {
11
- "Fixed": 512
12
- },
13
  "direction": "Right",
14
  "pad_to_multiple_of": null,
15
  "pad_id": 1,
@@ -97,6 +95,7 @@
97
  "end_of_word_suffix": "",
98
  "fuse_unk": false,
99
  "byte_fallback": false,
 
100
  "vocab": {
101
  "<s>": 0,
102
  "<pad>": 1,
 
7
  "stride": 0
8
  },
9
  "padding": {
10
+ "strategy": "BatchLongest",
 
 
11
  "direction": "Right",
12
  "pad_to_multiple_of": null,
13
  "pad_id": 1,
 
95
  "end_of_word_suffix": "",
96
  "fuse_unk": false,
97
  "byte_fallback": false,
98
+ "ignore_merges": false,
99
  "vocab": {
100
  "<s>": 0,
101
  "<pad>": 1,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7183a15db003666b9a6dadfbc84dbded49dc41eb8a7a5943323c44d90cb9d8b8
3
- size 4728
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c08ef626a255b5aa9779f36463699999bd85c8257a0da01aae061ed79c7d722c
3
+ size 5240