ales commited on
Commit
35da9bd
1 Parent(s): cb7522c

Delete logs/train_20221214-112746.log

Browse files
Files changed (1) hide show
  1. logs/train_20221214-112746.log +0 -263
logs/train_20221214-112746.log DELETED
@@ -1,263 +0,0 @@
1
- 12/14/2022 11:27:46 - WARNING - __main__ - Process rank: -1, device: cuda:0, n_gpu: 1distributed training: False, 16-bits training: True
2
- 12/14/2022 11:27:46 - INFO - __main__ - Training/evaluation parameters Seq2SeqTrainingArguments(
3
- _n_gpu=1,
4
- adafactor=False,
5
- adam_beta1=0.9,
6
- adam_beta2=0.999,
7
- adam_epsilon=1e-08,
8
- auto_find_batch_size=False,
9
- bf16=False,
10
- bf16_full_eval=False,
11
- data_seed=None,
12
- dataloader_drop_last=False,
13
- dataloader_num_workers=0,
14
- dataloader_pin_memory=True,
15
- ddp_bucket_cap_mb=None,
16
- ddp_find_unused_parameters=None,
17
- ddp_timeout=1800,
18
- debug=[],
19
- deepspeed=None,
20
- disable_tqdm=False,
21
- do_eval=True,
22
- do_predict=False,
23
- do_train=True,
24
- eval_accumulation_steps=None,
25
- eval_delay=0,
26
- eval_steps=1000,
27
- evaluation_strategy=steps,
28
- fp16=True,
29
- fp16_backend=auto,
30
- fp16_full_eval=False,
31
- fp16_opt_level=O1,
32
- fsdp=[],
33
- fsdp_min_num_params=0,
34
- fsdp_transformer_layer_cls_to_wrap=None,
35
- full_determinism=False,
36
- generation_max_length=225,
37
- generation_num_beams=None,
38
- gradient_accumulation_steps=1,
39
- gradient_checkpointing=True,
40
- greater_is_better=False,
41
- group_by_length=False,
42
- half_precision_backend=auto,
43
- hub_model_id=ales/whisper-small-belarusian,
44
- hub_private_repo=False,
45
- hub_strategy=every_save,
46
- hub_token=<HUB_TOKEN>,
47
- ignore_data_skip=True,
48
- include_inputs_for_metrics=False,
49
- jit_mode_eval=False,
50
- label_names=None,
51
- label_smoothing_factor=0.0,
52
- learning_rate=0.0001,
53
- length_column_name=length,
54
- load_best_model_at_end=True,
55
- local_rank=-1,
56
- log_level=passive,
57
- log_level_replica=passive,
58
- log_on_each_node=True,
59
- logging_dir=./runs/Dec14_11-27-46_129-213-88-66,
60
- logging_first_step=True,
61
- logging_nan_inf_filter=True,
62
- logging_steps=50,
63
- logging_strategy=steps,
64
- lr_scheduler_type=linear,
65
- max_grad_norm=1.0,
66
- max_steps=12000,
67
- metric_for_best_model=wer,
68
- mp_parameters=,
69
- no_cuda=False,
70
- num_train_epochs=3.0,
71
- optim=adamw_hf,
72
- optim_args=None,
73
- output_dir=./,
74
- overwrite_output_dir=False,
75
- past_index=-1,
76
- per_device_eval_batch_size=64,
77
- per_device_train_batch_size=64,
78
- predict_with_generate=True,
79
- prediction_loss_only=False,
80
- push_to_hub=True,
81
- push_to_hub_model_id=None,
82
- push_to_hub_organization=None,
83
- push_to_hub_token=<PUSH_TO_HUB_TOKEN>,
84
- ray_scope=last,
85
- remove_unused_columns=True,
86
- report_to=['tensorboard'],
87
- resume_from_checkpoint=None,
88
- run_name=./,
89
- save_on_each_node=False,
90
- save_steps=1000,
91
- save_strategy=steps,
92
- save_total_limit=None,
93
- seed=42,
94
- sharded_ddp=[],
95
- skip_memory_metrics=True,
96
- sortish_sampler=False,
97
- tf32=None,
98
- torch_compile=False,
99
- torch_compile_backend=None,
100
- torch_compile_mode=None,
101
- torchdynamo=None,
102
- tpu_metrics_debug=False,
103
- tpu_num_cores=None,
104
- use_ipex=False,
105
- use_legacy_prediction_loop=False,
106
- use_mps_device=False,
107
- warmup_ratio=0.0,
108
- warmup_steps=500,
109
- weight_decay=0.0,
110
- xpu_backend=None,
111
- )
112
- 12/14/2022 11:27:46 - INFO - __main__ - Training/evaluation parameters Seq2SeqTrainingArguments(
113
- _n_gpu=1,
114
- adafactor=False,
115
- adam_beta1=0.9,
116
- adam_beta2=0.999,
117
- adam_epsilon=1e-08,
118
- auto_find_batch_size=False,
119
- bf16=False,
120
- bf16_full_eval=False,
121
- data_seed=None,
122
- dataloader_drop_last=False,
123
- dataloader_num_workers=0,
124
- dataloader_pin_memory=True,
125
- ddp_bucket_cap_mb=None,
126
- ddp_find_unused_parameters=None,
127
- ddp_timeout=1800,
128
- debug=[],
129
- deepspeed=None,
130
- disable_tqdm=False,
131
- do_eval=True,
132
- do_predict=False,
133
- do_train=True,
134
- eval_accumulation_steps=None,
135
- eval_delay=0,
136
- eval_steps=1000,
137
- evaluation_strategy=steps,
138
- fp16=True,
139
- fp16_backend=auto,
140
- fp16_full_eval=False,
141
- fp16_opt_level=O1,
142
- fsdp=[],
143
- fsdp_min_num_params=0,
144
- fsdp_transformer_layer_cls_to_wrap=None,
145
- full_determinism=False,
146
- generation_max_length=225,
147
- generation_num_beams=None,
148
- gradient_accumulation_steps=1,
149
- gradient_checkpointing=True,
150
- greater_is_better=False,
151
- group_by_length=False,
152
- half_precision_backend=auto,
153
- hub_model_id=ales/whisper-small-belarusian,
154
- hub_private_repo=False,
155
- hub_strategy=every_save,
156
- hub_token=<HUB_TOKEN>,
157
- ignore_data_skip=True,
158
- include_inputs_for_metrics=False,
159
- jit_mode_eval=False,
160
- label_names=None,
161
- label_smoothing_factor=0.0,
162
- learning_rate=0.0001,
163
- length_column_name=length,
164
- load_best_model_at_end=True,
165
- local_rank=-1,
166
- log_level=passive,
167
- log_level_replica=passive,
168
- log_on_each_node=True,
169
- logging_dir=./runs/Dec14_11-27-46_129-213-88-66,
170
- logging_first_step=True,
171
- logging_nan_inf_filter=True,
172
- logging_steps=50,
173
- logging_strategy=steps,
174
- lr_scheduler_type=linear,
175
- max_grad_norm=1.0,
176
- max_steps=12000,
177
- metric_for_best_model=wer,
178
- mp_parameters=,
179
- no_cuda=False,
180
- num_train_epochs=3.0,
181
- optim=adamw_hf,
182
- optim_args=None,
183
- output_dir=./,
184
- overwrite_output_dir=False,
185
- past_index=-1,
186
- per_device_eval_batch_size=64,
187
- per_device_train_batch_size=64,
188
- predict_with_generate=True,
189
- prediction_loss_only=False,
190
- push_to_hub=True,
191
- push_to_hub_model_id=None,
192
- push_to_hub_organization=None,
193
- push_to_hub_token=<PUSH_TO_HUB_TOKEN>,
194
- ray_scope=last,
195
- remove_unused_columns=True,
196
- report_to=['tensorboard'],
197
- resume_from_checkpoint=None,
198
- run_name=./,
199
- save_on_each_node=False,
200
- save_steps=1000,
201
- save_strategy=steps,
202
- save_total_limit=None,
203
- seed=42,
204
- sharded_ddp=[],
205
- skip_memory_metrics=True,
206
- sortish_sampler=False,
207
- tf32=None,
208
- torch_compile=False,
209
- torch_compile_backend=None,
210
- torch_compile_mode=None,
211
- torchdynamo=None,
212
- tpu_metrics_debug=False,
213
- tpu_num_cores=None,
214
- use_ipex=False,
215
- use_legacy_prediction_loop=False,
216
- use_mps_device=False,
217
- warmup_ratio=0.0,
218
- warmup_steps=500,
219
- weight_decay=0.0,
220
- xpu_backend=None,
221
- )
222
- 12/14/2022 11:27:46 - INFO - __main__ - output_dir already exists. will try to load last checkpoint.
223
- 12/14/2022 11:27:46 - INFO - __main__ - last_checkpoint is None. will try to read from training_args.resume_from_checkpoint
224
- 12/14/2022 11:27:46 - INFO - __main__ - last_checkpoint is None. resume_from_checkpoint is either None or not existing dir. will try to read from the model saved in the root of output_dir.
225
- 12/14/2022 11:27:46 - INFO - __main__ - dir is not empty, but contains only: ['src', '.gitattributes', '.git', 'train_run_1.log', 'train_20221214-112746.log']. it is OK - will start training
226
- 12/14/2022 11:27:46 - INFO - datasets.info - Loading Dataset Infos from /home/ubuntu/.cache/huggingface/modules/datasets_modules/datasets/mozilla-foundation--common_voice_11_0/f8e47235d9b4e68fa24ed71d63266a02018ccf7194b2a8c9c598a5f3ab304d9f
227
- 12/14/2022 11:27:47 - INFO - datasets.info - Loading Dataset Infos from /home/ubuntu/.cache/huggingface/modules/datasets_modules/datasets/mozilla-foundation--common_voice_11_0/f8e47235d9b4e68fa24ed71d63266a02018ccf7194b2a8c9c598a5f3ab304d9f
228
- 12/14/2022 11:28:05 - WARNING - huggingface_hub.repository - /home/ubuntu/whisper-small-belarusian/./ is already a clone of https://huggingface.co/ales/whisper-small-belarusian. Make sure you pull the latest changes with `repo.git_pull()`.
229
- 12/15/2022 17:58:42 - WARNING - datasets.download.streaming_download_manager - Got disconnected from remote data host. Retrying in 5sec [1/20]
230
- 12/15/2022 18:01:33 - WARNING - datasets.download.streaming_download_manager - Got disconnected from remote data host. Retrying in 5sec [1/20]
231
- 12/15/2022 18:01:48 - WARNING - datasets.download.streaming_download_manager - Got disconnected from remote data host. Retrying in 5sec [2/20]
232
- 12/15/2022 18:02:03 - WARNING - datasets.download.streaming_download_manager - Got disconnected from remote data host. Retrying in 5sec [3/20]
233
- 12/15/2022 18:02:18 - WARNING - datasets.download.streaming_download_manager - Got disconnected from remote data host. Retrying in 5sec [4/20]
234
- 12/15/2022 18:02:33 - WARNING - datasets.download.streaming_download_manager - Got disconnected from remote data host. Retrying in 5sec [5/20]
235
- 12/15/2022 18:02:48 - WARNING - datasets.download.streaming_download_manager - Got disconnected from remote data host. Retrying in 5sec [6/20]
236
- 12/15/2022 18:03:03 - WARNING - datasets.download.streaming_download_manager - Got disconnected from remote data host. Retrying in 5sec [7/20]
237
- 12/15/2022 18:03:18 - WARNING - datasets.download.streaming_download_manager - Got disconnected from remote data host. Retrying in 5sec [8/20]
238
- 12/15/2022 18:03:33 - WARNING - datasets.download.streaming_download_manager - Got disconnected from remote data host. Retrying in 5sec [9/20]
239
- 12/15/2022 18:03:48 - WARNING - datasets.download.streaming_download_manager - Got disconnected from remote data host. Retrying in 5sec [10/20]
240
- 12/15/2022 18:04:03 - WARNING - datasets.download.streaming_download_manager - Got disconnected from remote data host. Retrying in 5sec [11/20]
241
- 12/15/2022 18:04:18 - WARNING - datasets.download.streaming_download_manager - Got disconnected from remote data host. Retrying in 5sec [12/20]
242
- 12/15/2022 18:04:33 - WARNING - datasets.download.streaming_download_manager - Got disconnected from remote data host. Retrying in 5sec [13/20]
243
- 12/15/2022 18:04:38 - WARNING - datasets.download.streaming_download_manager - Got disconnected from remote data host. Retrying in 5sec [14/20]
244
- 12/15/2022 18:04:43 - WARNING - datasets.download.streaming_download_manager - Got disconnected from remote data host. Retrying in 5sec [15/20]
245
- 12/16/2022 19:47:58 - WARNING - huggingface_hub.repository - Several commits (2) will be pushed upstream.
246
- 12/16/2022 19:47:58 - WARNING - huggingface_hub.repository - The progress bars may be unreliable.
247
- 12/16/2022 19:48:02 - WARNING - huggingface_hub.repository - remote: Scanning LFS files for validity, may be slow...
248
- remote: LFS file scan complete.
249
- To https://huggingface.co/ales/whisper-small-belarusian
250
- 72e5d42..1d54c35 main -> main
251
-
252
- 12/16/2022 19:48:36 - WARNING - huggingface_hub.repository - To https://huggingface.co/ales/whisper-small-belarusian
253
- 1d54c35..dc59786 main -> main
254
-
255
- 12/16/2022 19:48:39 - INFO - __main__ - *** Evaluate ***
256
- 12/16/2022 21:43:26 - WARNING - huggingface_hub.repository - remote: Scanning LFS files for validity, may be slow...
257
- remote: LFS file scan complete.
258
- To https://huggingface.co/ales/whisper-small-belarusian
259
- 2af2e4c..7dee023 main -> main
260
-
261
- 12/16/2022 21:43:59 - WARNING - huggingface_hub.repository - To https://huggingface.co/ales/whisper-small-belarusian
262
- 7dee023..881d121 main -> main
263
-