Yannis98 commited on
Commit
e294e56
verified
1 Parent(s): b114135

End of training

Browse files
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ eval_nbest_predictions.json filter=lfs diff=lfs merge=lfs -text
37
+ predict_nbest_predictions.json filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -4,6 +4,8 @@ license: apache-2.0
4
  base_model: albert/albert-base-v2
5
  tags:
6
  - generated_from_trainer
 
 
7
  model-index:
8
  - name: trivia_albert_finetuned
9
  results: []
@@ -14,7 +16,7 @@ should probably proofread and complete it, then remove this comment. -->
14
 
15
  # trivia_albert_finetuned
16
 
17
- This model is a fine-tuned version of [albert/albert-base-v2](https://huggingface.co/albert/albert-base-v2) on an unknown dataset.
18
 
19
  ## Model description
20
 
 
4
  base_model: albert/albert-base-v2
5
  tags:
6
  - generated_from_trainer
7
+ datasets:
8
+ - TimoImhof/TriviaQA-in-SQuAD-format
9
  model-index:
10
  - name: trivia_albert_finetuned
11
  results: []
 
16
 
17
  # trivia_albert_finetuned
18
 
19
+ This model is a fine-tuned version of [albert/albert-base-v2](https://huggingface.co/albert/albert-base-v2) on the TimoImhof/TriviaQA-in-SQuAD-format dataset.
20
 
21
  ## Model description
22
 
all_results.json ADDED
The diff for this file is too large to render. See raw diff
 
eval_nbest_predictions.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e3b5808eb509e205beed38d494bac52f683f8cdcfe1404bf580f1e967b51237
3
+ size 18637995
eval_predictions.json ADDED
The diff for this file is too large to render. See raw diff
 
eval_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 4.0,
3
+ "eval_exact_match": 64.28106701366298,
4
+ "eval_f1": 70.91440871034429,
5
+ "eval_runtime": 11.4515,
6
+ "eval_samples": 3398,
7
+ "eval_samples_per_second": 296.73,
8
+ "eval_steps_per_second": 37.113
9
+ }
full_predict_tr_results.json ADDED
The diff for this file is too large to render. See raw diff
 
full_predict_val_results.json ADDED
The diff for this file is too large to render. See raw diff
 
predict_nbest_predictions.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e3b5808eb509e205beed38d494bac52f683f8cdcfe1404bf580f1e967b51237
3
+ size 18637995
predict_predictions.json ADDED
The diff for this file is too large to render. See raw diff
 
predict_tr_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "predict_samples_tr": 13545,
3
+ "test_exact_match": 91.63006344558322,
4
+ "test_f1": 95.08068270968953,
5
+ "test_runtime": 46.3663,
6
+ "test_samples_per_second": 292.13,
7
+ "test_steps_per_second": 36.535
8
+ }
predict_val_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "predict_samples_val": 3398,
3
+ "test_exact_match": 64.28106701366298,
4
+ "test_f1": 70.91440871034429,
5
+ "test_runtime": 11.3669,
6
+ "test_samples_per_second": 298.938,
7
+ "test_steps_per_second": 37.389
8
+ }
runs/Feb09_16-31-11_xgpi3/events.out.tfevents.1739090333.xgpi3.6778.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e109fa29daeeb6b59a70c99433902602683a441d7d712652e9b70eb3e13c5b5
3
+ size 412
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 4.0,
3
+ "total_flos": 897375340431360.0,
4
+ "train_loss": 1.4507015551779379,
5
+ "train_runtime": 419.078,
6
+ "train_samples": 13545,
7
+ "train_samples_per_second": 129.284,
8
+ "train_steps_per_second": 2.701
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 4.0,
5
+ "eval_steps": 500,
6
+ "global_step": 1132,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.76678445229682,
13
+ "grad_norm": 28.217533111572266,
14
+ "learning_rate": 1.674911660777385e-05,
15
+ "loss": 1.9708,
16
+ "step": 500
17
+ },
18
+ {
19
+ "epoch": 3.53356890459364,
20
+ "grad_norm": 29.53766441345215,
21
+ "learning_rate": 3.498233215547703e-06,
22
+ "loss": 1.0999,
23
+ "step": 1000
24
+ },
25
+ {
26
+ "epoch": 4.0,
27
+ "step": 1132,
28
+ "total_flos": 897375340431360.0,
29
+ "train_loss": 1.4507015551779379,
30
+ "train_runtime": 419.078,
31
+ "train_samples_per_second": 129.284,
32
+ "train_steps_per_second": 2.701
33
+ }
34
+ ],
35
+ "logging_steps": 500,
36
+ "max_steps": 1132,
37
+ "num_input_tokens_seen": 0,
38
+ "num_train_epochs": 4,
39
+ "save_steps": 500,
40
+ "stateful_callbacks": {
41
+ "TrainerControl": {
42
+ "args": {
43
+ "should_epoch_stop": false,
44
+ "should_evaluate": false,
45
+ "should_log": false,
46
+ "should_save": true,
47
+ "should_training_stop": true
48
+ },
49
+ "attributes": {}
50
+ }
51
+ },
52
+ "total_flos": 897375340431360.0,
53
+ "train_batch_size": 48,
54
+ "trial_name": null,
55
+ "trial_params": null
56
+ }