breeze-listen-w2v2-ml / breeze-listen-w2v2-ml.log
hanasim's picture
Training in progress, step 1400
8d4fecb verified
raw
history blame
4.47 kB
01/29/2024 19:52:08 - WARNING - __main__ - Process rank: 0, device: cuda:0, n_gpu: 1, distributed training: True, 16-bits training: True
01/29/2024 19:52:08 - INFO - __main__ - Training/evaluation parameters TrainingArguments(
_n_gpu=1,
adafactor=False,
adam_beta1=0.9,
adam_beta2=0.999,
adam_epsilon=1e-08,
auto_find_batch_size=False,
bf16=False,
bf16_full_eval=False,
data_seed=None,
dataloader_drop_last=False,
dataloader_num_workers=0,
dataloader_persistent_workers=False,
dataloader_pin_memory=True,
dataloader_prefetch_factor=None,
ddp_backend=None,
ddp_broadcast_buffers=None,
ddp_bucket_cap_mb=None,
ddp_find_unused_parameters=None,
ddp_timeout=1800,
debug=[],
deepspeed=None,
disable_tqdm=False,
dispatch_batches=None,
do_eval=True,
do_predict=False,
do_train=True,
eval_accumulation_steps=None,
eval_delay=0,
eval_steps=200,
evaluation_strategy=IntervalStrategy.STEPS,
fp16=True,
fp16_backend=auto,
fp16_full_eval=False,
fp16_opt_level=O1,
fsdp=[],
fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_grad_ckpt': False},
fsdp_min_num_params=0,
fsdp_transformer_layer_cls_to_wrap=None,
full_determinism=False,
gradient_accumulation_steps=1,
gradient_checkpointing=True,
gradient_checkpointing_kwargs=None,
greater_is_better=None,
group_by_length=True,
half_precision_backend=auto,
hub_always_push=False,
hub_model_id=simpragma/breeze-listen-w2v2-ml,
hub_private_repo=False,
hub_strategy=HubStrategy.EVERY_SAVE,
hub_token=<HUB_TOKEN>,
ignore_data_skip=False,
include_inputs_for_metrics=False,
include_num_input_tokens_seen=False,
include_tokens_per_second=False,
jit_mode_eval=False,
label_names=None,
label_smoothing_factor=0.0,
learning_rate=0.001,
length_column_name=input_length,
load_best_model_at_end=False,
local_rank=0,
log_level=passive,
log_level_replica=warning,
log_on_each_node=True,
logging_dir=/cosmos/home/sp-operator/ai/training/models/simpragma/breeze-listen-w2v2-ml/runs/Jan29_19-52-08_knight,
logging_first_step=False,
logging_nan_inf_filter=True,
logging_steps=500,
logging_strategy=IntervalStrategy.STEPS,
lr_scheduler_kwargs={},
lr_scheduler_type=SchedulerType.LINEAR,
max_grad_norm=1.0,
max_steps=-1,
metric_for_best_model=None,
mp_parameters=,
neftune_noise_alpha=None,
no_cuda=False,
num_train_epochs=4.0,
optim=OptimizerNames.ADAMW_BNB,
optim_args=None,
output_dir=/cosmos/home/sp-operator/ai/training/models/simpragma/breeze-listen-w2v2-ml,
overwrite_output_dir=True,
past_index=-1,
per_device_eval_batch_size=8,
per_device_train_batch_size=4,
prediction_loss_only=False,
push_to_hub=True,
push_to_hub_model_id=None,
push_to_hub_organization=None,
push_to_hub_token=<PUSH_TO_HUB_TOKEN>,
ray_scope=last,
remove_unused_columns=True,
report_to=[],
resume_from_checkpoint=None,
run_name=/cosmos/home/sp-operator/ai/training/models/simpragma/breeze-listen-w2v2-ml,
save_on_each_node=False,
save_only_model=False,
save_safetensors=True,
save_steps=200,
save_strategy=IntervalStrategy.STEPS,
save_total_limit=3,
seed=42,
skip_memory_metrics=True,
split_batches=False,
tf32=None,
torch_compile=False,
torch_compile_backend=None,
torch_compile_mode=None,
torchdynamo=None,
tpu_metrics_debug=False,
tpu_num_cores=None,
use_cpu=False,
use_ipex=False,
use_legacy_prediction_loop=False,
use_mps_device=False,
warmup_ratio=0.0,
warmup_steps=100,
weight_decay=0.0,
)
{'eval_loss': 5.472805500030518, 'eval_wer': 1.075673807878369, 'eval_runtime': 162.595, 'eval_samples_per_second': 4.078, 'eval_steps_per_second': 0.51, 'epoch': 0.41}
{'eval_loss': 5.127437114715576, 'eval_wer': 1.003800967519005, 'eval_runtime': 163.1607, 'eval_samples_per_second': 4.063, 'eval_steps_per_second': 0.509, 'epoch': 0.81}
{'loss': 6.5037, 'learning_rate': 0.0007890792291220557, 'epoch': 1.02}
{'eval_loss': 0.6166694760322571, 'eval_wer': 0.8130615065653075, 'eval_runtime': 161.3235, 'eval_samples_per_second': 4.11, 'eval_steps_per_second': 0.514, 'epoch': 1.22}
{'eval_loss': 0.328411728143692, 'eval_wer': 0.582930200414651, 'eval_runtime': 162.053, 'eval_samples_per_second': 4.091, 'eval_steps_per_second': 0.512, 'epoch': 1.63}
{'loss': 1.0482, 'learning_rate': 0.0005214132762312634, 'epoch': 2.03}
{'eval_loss': 0.3169207274913788, 'eval_wer': 0.5666897028334485, 'eval_runtime': 165.1028, 'eval_samples_per_second': 4.016, 'eval_steps_per_second': 0.503, 'epoch': 2.03}
{'eval_loss': 0.28758111596107483, 'eval_wer': 0.5425017277125086, 'eval_runtime': 160.9496, 'eval_samples_per_second': 4.119, 'eval_steps_per_second': 0.516, 'epoch': 2.44}