supermy commited on
Commit
4af927c
·
1 Parent(s): 2b10d1e

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +42 -0
README.md CHANGED
@@ -65,6 +65,19 @@ model = AutoModelForCausalLM.from_pretrained("supermy/couplet")
65
 
66
  bpe分词:"vocab_size"=50000
67
  ```
 
 
 
 
 
 
 
 
 
 
 
 
 
68
  [INFO|trainer.py:1608] 2022-11-30 12:51:36,357 >> ***** Running training *****
69
  [INFO|trainer.py:1609] 2022-11-30 12:51:36,357 >> Num examples = 260926
70
  [INFO|trainer.py:1610] 2022-11-30 12:51:36,357 >> Num Epochs = 81
@@ -106,4 +119,33 @@ bpe分词:"vocab_size"=50000
106
  eval_steps_per_second = 3.883
107
  perplexity = 26.6108
108
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109
  ```
 
65
 
66
  bpe分词:"vocab_size"=50000
67
  ```
68
+ [INFO|trainer.py:1608] 2022-12-07 02:32:58,307 >> ***** Running training *****
69
+ [INFO|trainer.py:1609] 2022-12-07 02:32:58,307 >> Num examples = 260926
70
+ [INFO|trainer.py:1610] 2022-12-07 02:32:58,307 >> Num Epochs = 160
71
+ [INFO|trainer.py:1611] 2022-12-07 02:32:58,307 >> Instantaneous batch size per device = 96
72
+ [INFO|trainer.py:1612] 2022-12-07 02:32:58,307 >> Total train batch size (w. parallel, distributed & accumulation) = 96
73
+ [INFO|trainer.py:1613] 2022-12-07 02:32:58,307 >> Gradient Accumulation steps = 1
74
+ [INFO|trainer.py:1614] 2022-12-07 02:32:58,307 >> Total optimization steps = 434880
75
+ [INFO|trainer.py:1616] 2022-12-07 02:32:58,308 >> Number of trainable parameters = 124439808
76
+ [INFO|trainer.py:1637] 2022-12-07 02:32:58,309 >> Continuing training from checkpoint, will skip to saved global_step
77
+ [INFO|trainer.py:1638] 2022-12-07 02:32:58,310 >> Continuing training from epoch 93
78
+ [INFO|trainer.py:1639] 2022-12-07 02:32:58,310 >> Continuing training from global step 253500
79
+
80
+
81
  [INFO|trainer.py:1608] 2022-11-30 12:51:36,357 >> ***** Running training *****
82
  [INFO|trainer.py:1609] 2022-11-30 12:51:36,357 >> Num examples = 260926
83
  [INFO|trainer.py:1610] 2022-11-30 12:51:36,357 >> Num Epochs = 81
 
119
  eval_steps_per_second = 3.883
120
  perplexity = 26.6108
121
 
122
+
123
+ {'loss': 3.0967, 'learning_rate': 1.8027961736571009e-07, 'epoch': 159.49}
124
+ {'loss': 3.0922, 'learning_rate': 1.227924944812362e-07, 'epoch': 159.68}
125
+ {'loss': 3.0934, 'learning_rate': 6.530537159676233e-08, 'epoch': 159.86}
126
+ {'train_runtime': 120967.2394, 'train_samples_per_second': 345.12, 'train_steps_per_second': 3.595, 'train_loss': 1.3456422273861828, 'epoch': 160.0}
127
+ ***** train metrics *****
128
+ epoch = 160.0
129
+ train_loss = 1.3456
130
+ train_runtime = 1 day, 9:36:07.23
131
+ train_samples = 260926
132
+ train_samples_per_second = 345.12
133
+ train_steps_per_second = 3.595
134
+ 12/08/2022 12:09:08 - INFO - __main__ - *** Evaluate ***
135
+ [INFO|trainer.py:2929] 2022-12-08 12:09:08,522 >> ***** Running Evaluation *****
136
+ [INFO|trainer.py:2931] 2022-12-08 12:09:08,522 >> Num examples = 1350
137
+ [INFO|trainer.py:2934] 2022-12-08 12:09:08,522 >> Batch size = 96
138
+ 100%|██████████| 15/15 [00:03<00:00, 4.16it/s]
139
+ [INFO|modelcard.py:449] 2022-12-08 12:09:13,448 >> Dropping the following result as it does not have all the necessary fields:
140
+ {'task': {'name': 'Causal Language Modeling', 'type': 'text-generation'}, 'metrics': [{'name': 'Accuracy', 'type': 'accuracy', 'value': 0.433615520282187}]}
141
+ ***** eval metrics *****
142
+ epoch = 160.0
143
+ eval_accuracy = 0.4336
144
+ eval_loss = 3.3005
145
+ eval_runtime = 0:00:03.93
146
+ eval_samples = 1350
147
+ eval_samples_per_second = 343.164
148
+ eval_steps_per_second = 3.813
149
+ perplexity = 27.1257
150
+
151
  ```