learn3r commited on
Commit
d69036c
·
verified ·
1 Parent(s): f37cbda

End of training

Browse files
Files changed (5) hide show
  1. README.md +20 -8
  2. all_results.json +18 -0
  3. eval_results.json +13 -0
  4. train_results.json +8 -0
  5. trainer_state.json +0 -0
README.md CHANGED
@@ -3,11 +3,23 @@ license: apache-2.0
3
  base_model: facebook/bart-large
4
  tags:
5
  - generated_from_trainer
 
 
6
  metrics:
7
  - rouge
8
  model-index:
9
  - name: bart_large_gov
10
- results: []
 
 
 
 
 
 
 
 
 
 
11
  ---
12
 
13
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -15,14 +27,14 @@ should probably proofread and complete it, then remove this comment. -->
15
 
16
  # bart_large_gov
17
 
18
- This model is a fine-tuned version of [facebook/bart-large](https://huggingface.co/facebook/bart-large) on an unknown dataset.
19
  It achieves the following results on the evaluation set:
20
- - Loss: 1.6328
21
- - Rouge1: 55.8983
22
- - Rouge2: 30.5018
23
- - Rougel: 38.7764
24
- - Rougelsum: 51.3611
25
- - Gen Len: 128.7407
26
 
27
  ## Model description
28
 
 
3
  base_model: facebook/bart-large
4
  tags:
5
  - generated_from_trainer
6
+ datasets:
7
+ - learn3r/gov_report_memsum_oracle
8
  metrics:
9
  - rouge
10
  model-index:
11
  - name: bart_large_gov
12
+ results:
13
+ - task:
14
+ name: Summarization
15
+ type: summarization
16
+ dataset:
17
+ name: learn3r/gov_report_memsum_oracle
18
+ type: learn3r/gov_report_memsum_oracle
19
+ metrics:
20
+ - name: Rouge1
21
+ type: rouge
22
+ value: 56.2783
23
  ---
24
 
25
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
27
 
28
  # bart_large_gov
29
 
30
+ This model is a fine-tuned version of [facebook/bart-large](https://huggingface.co/facebook/bart-large) on the learn3r/gov_report_memsum_oracle dataset.
31
  It achieves the following results on the evaluation set:
32
+ - Loss: 1.4450
33
+ - Rouge1: 56.2783
34
+ - Rouge2: 31.1387
35
+ - Rougel: 39.2121
36
+ - Rougelsum: 51.8068
37
+ - Gen Len: 128.5062
38
 
39
  ## Model description
40
 
all_results.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 19.94,
3
+ "eval_gen_len": 128.50617283950618,
4
+ "eval_loss": 1.445023536682129,
5
+ "eval_rouge1": 56.2783,
6
+ "eval_rouge2": 31.1387,
7
+ "eval_rougeL": 39.2121,
8
+ "eval_rougeLsum": 51.8068,
9
+ "eval_runtime": 214.343,
10
+ "eval_samples": 972,
11
+ "eval_samples_per_second": 4.535,
12
+ "eval_steps_per_second": 0.569,
13
+ "train_loss": 1.0796797680504182,
14
+ "train_runtime": 51892.12,
15
+ "train_samples": 17457,
16
+ "train_samples_per_second": 6.728,
17
+ "train_steps_per_second": 0.052
18
+ }
eval_results.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 19.94,
3
+ "eval_gen_len": 128.50617283950618,
4
+ "eval_loss": 1.445023536682129,
5
+ "eval_rouge1": 56.2783,
6
+ "eval_rouge2": 31.1387,
7
+ "eval_rougeL": 39.2121,
8
+ "eval_rougeLsum": 51.8068,
9
+ "eval_runtime": 214.343,
10
+ "eval_samples": 972,
11
+ "eval_samples_per_second": 4.535,
12
+ "eval_steps_per_second": 0.569
13
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 19.94,
3
+ "train_loss": 1.0796797680504182,
4
+ "train_runtime": 51892.12,
5
+ "train_samples": 17457,
6
+ "train_samples_per_second": 6.728,
7
+ "train_steps_per_second": 0.052
8
+ }
trainer_state.json ADDED
The diff for this file is too large to render. See raw diff