Amala3 commited on
Commit
cdf838f
·
verified ·
1 Parent(s): b70faa3

End of training

Browse files
Files changed (5) hide show
  1. README.md +84 -0
  2. config.json +34 -0
  3. generation_config.json +8 -0
  4. model.safetensors +3 -0
  5. training_args.bin +3 -0
README.md ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: google/flan-t5-large
4
+ tags:
5
+ - generated_from_trainer
6
+ model-index:
7
+ - name: models
8
+ results: []
9
+ ---
10
+
11
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
12
+ should probably proofread and complete it, then remove this comment. -->
13
+
14
+ # models
15
+
16
+ This model is a fine-tuned version of [google/flan-t5-large](https://huggingface.co/google/flan-t5-large) on the None dataset.
17
+ It achieves the following results on the evaluation set:
18
+ - Loss: 0.1902
19
+
20
+ ## Model description
21
+
22
+ More information needed
23
+
24
+ ## Intended uses & limitations
25
+
26
+ More information needed
27
+
28
+ ## Training and evaluation data
29
+
30
+ More information needed
31
+
32
+ ## Training procedure
33
+
34
+ ### Training hyperparameters
35
+
36
+ The following hyperparameters were used during training:
37
+ - learning_rate: 2e-05
38
+ - train_batch_size: 8
39
+ - eval_batch_size: 8
40
+ - seed: 42
41
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
42
+ - lr_scheduler_type: linear
43
+ - num_epochs: 2
44
+
45
+ ### Training results
46
+
47
+ | Training Loss | Epoch | Step | Validation Loss |
48
+ |:-------------:|:------:|:-----:|:---------------:|
49
+ | 0.3389 | 0.0699 | 500 | 0.2668 |
50
+ | 0.2719 | 0.1398 | 1000 | 0.2524 |
51
+ | 0.2612 | 0.2097 | 1500 | 0.2381 |
52
+ | 0.2634 | 0.2796 | 2000 | 0.2313 |
53
+ | 0.2403 | 0.3495 | 2500 | 0.2260 |
54
+ | 0.2433 | 0.4193 | 3000 | 0.2190 |
55
+ | 0.2351 | 0.4892 | 3500 | 0.2168 |
56
+ | 0.2424 | 0.5591 | 4000 | 0.2109 |
57
+ | 0.2198 | 0.6290 | 4500 | 0.2071 |
58
+ | 0.2313 | 0.6989 | 5000 | 0.2062 |
59
+ | 0.226 | 0.7688 | 5500 | 0.2058 |
60
+ | 0.2195 | 0.8387 | 6000 | 0.2030 |
61
+ | 0.2173 | 0.9086 | 6500 | 0.2009 |
62
+ | 0.2359 | 0.9785 | 7000 | 0.1969 |
63
+ | 0.2055 | 1.0484 | 7500 | 0.1961 |
64
+ | 0.2074 | 1.1183 | 8000 | 0.1980 |
65
+ | 0.2066 | 1.1881 | 8500 | 0.1938 |
66
+ | 0.2077 | 1.2580 | 9000 | 0.1937 |
67
+ | 0.196 | 1.3279 | 9500 | 0.1948 |
68
+ | 0.2027 | 1.3978 | 10000 | 0.1931 |
69
+ | 0.2001 | 1.4677 | 10500 | 0.1922 |
70
+ | 0.1925 | 1.5376 | 11000 | 0.1932 |
71
+ | 0.1933 | 1.6075 | 11500 | 0.1900 |
72
+ | 0.2038 | 1.6774 | 12000 | 0.1921 |
73
+ | 0.1892 | 1.7473 | 12500 | 0.1914 |
74
+ | 0.1956 | 1.8172 | 13000 | 0.1904 |
75
+ | 0.1956 | 1.8871 | 13500 | 0.1898 |
76
+ | 0.1925 | 1.9569 | 14000 | 0.1902 |
77
+
78
+
79
+ ### Framework versions
80
+
81
+ - Transformers 4.40.2
82
+ - Pytorch 2.1.2
83
+ - Datasets 2.18.0
84
+ - Tokenizers 0.19.1
config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/flan-t5-large",
3
+ "architectures": [
4
+ "T5ForConditionalGeneration"
5
+ ],
6
+ "classifier_dropout": 0.0,
7
+ "d_ff": 2816,
8
+ "d_kv": 64,
9
+ "d_model": 1024,
10
+ "decoder_start_token_id": 0,
11
+ "dense_act_fn": "gelu_new",
12
+ "dropout_rate": 0.1,
13
+ "eos_token_id": 1,
14
+ "feed_forward_proj": "gated-gelu",
15
+ "initializer_factor": 1.0,
16
+ "is_encoder_decoder": true,
17
+ "is_gated_act": true,
18
+ "layer_norm_epsilon": 1e-06,
19
+ "max_length": 1024,
20
+ "model_type": "t5",
21
+ "n_positions": 512,
22
+ "num_decoder_layers": 24,
23
+ "num_heads": 16,
24
+ "num_layers": 24,
25
+ "output_past": true,
26
+ "pad_token_id": 0,
27
+ "relative_attention_max_distance": 128,
28
+ "relative_attention_num_buckets": 32,
29
+ "tie_word_embeddings": false,
30
+ "torch_dtype": "float32",
31
+ "transformers_version": "4.40.2",
32
+ "use_cache": true,
33
+ "vocab_size": 32128
34
+ }
generation_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "decoder_start_token_id": 0,
3
+ "early_stopping": true,
4
+ "eos_token_id": 1,
5
+ "num_beams": 5,
6
+ "pad_token": 0,
7
+ "transformers_version": "4.40.2"
8
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:068999492671fd651558d5976dcb4cc6e7f2fea53b6d8cc7b1193658d86ef22b
3
+ size 3132668808
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23c95f1fceb4f180948543f6f5ce1f21afe6fa42cde40c7292ab85bc62d9324e
3
+ size 6648