shorecode commited on
Commit
7bf177e
·
verified ·
1 Parent(s): 3ea3464

Training complete!

Browse files
Files changed (4) hide show
  1. README.md +40 -21
  2. adapter_config.json +35 -0
  3. adapter_model.safetensors +3 -0
  4. training_args.bin +2 -2
README.md CHANGED
@@ -1,36 +1,55 @@
1
-
2
  ---
 
 
 
3
  tags:
4
- - autotrain
5
- - text2text-generation
6
- base_model: google/t5-efficient-tiny-nh8
7
- widget:
8
- - text: "I love AutoTrain"
9
- datasets:
10
- - gizemgg/wiki-eng-summary-trial-gen0-transformed-instruction
11
  ---
12
 
13
- # Model Trained Using AutoTrain
 
 
 
 
 
 
 
 
 
 
 
14
 
15
- - Problem type: Seq2Seq
16
 
17
- ## Validation Metrics
18
- loss: 2.225323438644409
19
 
20
- rouge1: 26.024
21
 
22
- rouge2: 24.1663
23
 
24
- rougeL: 25.432
25
 
26
- rougeLsum: 25.9929
 
 
 
 
 
 
 
 
27
 
28
- gen_len: 18.8
29
 
30
- runtime: 56.5847
31
 
32
- samples_per_second: 7.246
33
 
34
- steps_per_second: 1.82
35
 
36
- : 3.0
 
 
 
 
 
 
1
  ---
2
+ base_model: google/t5-efficient-tiny
3
+ library_name: peft
4
+ license: apache-2.0
5
  tags:
6
+ - generated_from_trainer
7
+ model-index:
8
+ - name: t5-efficient-tiny-nh8-summarizer
9
+ results: []
 
 
 
10
  ---
11
 
12
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
+ should probably proofread and complete it, then remove this comment. -->
14
+
15
+ # t5-efficient-tiny-nh8-summarizer
16
+
17
+ This model is a fine-tuned version of [google/t5-efficient-tiny](https://huggingface.co/google/t5-efficient-tiny) on an unknown dataset.
18
+
19
+ ## Model description
20
+
21
+ More information needed
22
+
23
+ ## Intended uses & limitations
24
 
25
+ More information needed
26
 
27
+ ## Training and evaluation data
 
28
 
29
+ More information needed
30
 
31
+ ## Training procedure
32
 
33
+ ### Training hyperparameters
34
 
35
+ The following hyperparameters were used during training:
36
+ - learning_rate: 7.000000000000001e-05
37
+ - train_batch_size: 10
38
+ - eval_batch_size: 10
39
+ - seed: 42
40
+ - optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
41
+ - lr_scheduler_type: linear
42
+ - num_epochs: 3
43
+ - mixed_precision_training: Native AMP
44
 
45
+ ### Training results
46
 
 
47
 
 
48
 
49
+ ### Framework versions
50
 
51
+ - PEFT 0.14.0
52
+ - Transformers 4.47.0
53
+ - Pytorch 2.4.0+cu121
54
+ - Datasets 3.0.0
55
+ - Tokenizers 0.21.0
adapter_config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": {
4
+ "base_model_class": "T5ForConditionalGeneration",
5
+ "parent_library": "transformers.models.t5.modeling_t5"
6
+ },
7
+ "base_model_name_or_path": "google/t5-efficient-tiny",
8
+ "bias": "none",
9
+ "eva_config": null,
10
+ "exclude_modules": null,
11
+ "fan_in_fan_out": false,
12
+ "inference_mode": true,
13
+ "init_lora_weights": true,
14
+ "layer_replication": null,
15
+ "layers_pattern": null,
16
+ "layers_to_transform": null,
17
+ "loftq_config": {},
18
+ "lora_alpha": 32,
19
+ "lora_bias": false,
20
+ "lora_dropout": 0.1,
21
+ "megatron_config": null,
22
+ "megatron_core": "megatron.core",
23
+ "modules_to_save": null,
24
+ "peft_type": "LORA",
25
+ "r": 16,
26
+ "rank_pattern": {},
27
+ "revision": null,
28
+ "target_modules": [
29
+ "q",
30
+ "v"
31
+ ],
32
+ "task_type": null,
33
+ "use_dora": false,
34
+ "use_rslora": false
35
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b4ac9862e4a49d47812174682b056db803cb9a1b1c81ab83bd7e4aed852eff0
3
+ size 793064
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9f2f7d0988c14243f3efb5ea5c6a2029539b2a27be4925d0a8c9f6cc199b1b2d
3
- size 5496
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8affc13c49d388f167ee3cae1f65f0450ab9c97dad121062bdc4d788e4095945
3
+ size 5304