shuheng commited on
Commit
6d9f5e9
verified
1 Parent(s): 5e08570

End of training

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ eval_nbest_predictions.json filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -4,6 +4,8 @@ license: apache-2.0
4
  base_model: albert/albert-xxlarge-v2
5
  tags:
6
  - generated_from_trainer
 
 
7
  model-index:
8
  - name: squad_albert_xxl_finetuned
9
  results: []
@@ -14,7 +16,7 @@ should probably proofread and complete it, then remove this comment. -->
14
 
15
  # squad_albert_xxl_finetuned
16
 
17
- This model is a fine-tuned version of [albert/albert-xxlarge-v2](https://huggingface.co/albert/albert-xxlarge-v2) on an unknown dataset.
18
 
19
  ## Model description
20
 
 
4
  base_model: albert/albert-xxlarge-v2
5
  tags:
6
  - generated_from_trainer
7
+ datasets:
8
+ - squad
9
  model-index:
10
  - name: squad_albert_xxl_finetuned
11
  results: []
 
16
 
17
  # squad_albert_xxl_finetuned
18
 
19
+ This model is a fine-tuned version of [albert/albert-xxlarge-v2](https://huggingface.co/albert/albert-xxlarge-v2) on the squad dataset.
20
 
21
  ## Model description
22
 
all_results.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "eval_exact_match": 89.3282876064333,
4
+ "eval_f1": 95.19020584030557,
5
+ "eval_runtime": 549.793,
6
+ "eval_samples": 10808,
7
+ "eval_samples_per_second": 19.658,
8
+ "eval_steps_per_second": 2.457,
9
+ "total_flos": 4.123581730562765e+16,
10
+ "train_loss": 0.7573485501836784,
11
+ "train_runtime": 13379.8798,
12
+ "train_samples": 88638,
13
+ "train_samples_per_second": 6.625,
14
+ "train_steps_per_second": 0.207
15
+ }
eval_nbest_predictions.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:959509544b9a21fd469d787e8cd834ab0fc90c4c3ed3ac6f634ceb426e377c9a
3
+ size 48902870
eval_predictions.json ADDED
The diff for this file is too large to render. See raw diff
 
eval_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "eval_exact_match": 89.3282876064333,
4
+ "eval_f1": 95.19020584030557,
5
+ "eval_runtime": 549.793,
6
+ "eval_samples": 10808,
7
+ "eval_samples_per_second": 19.658,
8
+ "eval_steps_per_second": 2.457
9
+ }
runs/Dec22_10-39-15_xgpi4/events.out.tfevents.1734849211.xgpi4.3623.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:462859e7918ab534f3612e98ebe1abccaa745a4db796a2ebe2771860efaf72f5
3
+ size 412
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "total_flos": 4.123581730562765e+16,
4
+ "train_loss": 0.7573485501836784,
5
+ "train_runtime": 13379.8798,
6
+ "train_samples": 88638,
7
+ "train_samples_per_second": 6.625,
8
+ "train_steps_per_second": 0.207
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.0,
5
+ "eval_steps": 500,
6
+ "global_step": 2770,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.18050541516245489,
13
+ "grad_norm": 9.565498352050781,
14
+ "learning_rate": 2.4584837545126353e-05,
15
+ "loss": 1.0408,
16
+ "step": 500
17
+ },
18
+ {
19
+ "epoch": 0.36101083032490977,
20
+ "grad_norm": 3.8937153816223145,
21
+ "learning_rate": 1.9169675090252708e-05,
22
+ "loss": 0.7637,
23
+ "step": 1000
24
+ },
25
+ {
26
+ "epoch": 0.5415162454873647,
27
+ "grad_norm": 5.798653602600098,
28
+ "learning_rate": 1.3754512635379063e-05,
29
+ "loss": 0.716,
30
+ "step": 1500
31
+ },
32
+ {
33
+ "epoch": 0.7220216606498195,
34
+ "grad_norm": 3.4039201736450195,
35
+ "learning_rate": 8.339350180505416e-06,
36
+ "loss": 0.681,
37
+ "step": 2000
38
+ },
39
+ {
40
+ "epoch": 0.9025270758122743,
41
+ "grad_norm": 9.457993507385254,
42
+ "learning_rate": 2.924187725631769e-06,
43
+ "loss": 0.6554,
44
+ "step": 2500
45
+ },
46
+ {
47
+ "epoch": 1.0,
48
+ "step": 2770,
49
+ "total_flos": 4.123581730562765e+16,
50
+ "train_loss": 0.7573485501836784,
51
+ "train_runtime": 13379.8798,
52
+ "train_samples_per_second": 6.625,
53
+ "train_steps_per_second": 0.207
54
+ }
55
+ ],
56
+ "logging_steps": 500,
57
+ "max_steps": 2770,
58
+ "num_input_tokens_seen": 0,
59
+ "num_train_epochs": 1,
60
+ "save_steps": 500,
61
+ "stateful_callbacks": {
62
+ "TrainerControl": {
63
+ "args": {
64
+ "should_epoch_stop": false,
65
+ "should_evaluate": false,
66
+ "should_log": false,
67
+ "should_save": true,
68
+ "should_training_stop": true
69
+ },
70
+ "attributes": {}
71
+ }
72
+ },
73
+ "total_flos": 4.123581730562765e+16,
74
+ "train_batch_size": 32,
75
+ "trial_name": null,
76
+ "trial_params": null
77
+ }