Amanaccessassist commited on
Commit
716ab58
1 Parent(s): 71263f7

End of training

Browse files
README.md ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: google/vit-base-patch16-224-in21k
4
+ tags:
5
+ - generated_from_trainer
6
+ metrics:
7
+ - accuracy
8
+ model-index:
9
+ - name: finetuned-blurr-nonblur
10
+ results: []
11
+ ---
12
+
13
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
+ should probably proofread and complete it, then remove this comment. -->
15
+
16
+ # finetuned-blurr-nonblur
17
+
18
+ This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on an unknown dataset.
19
+ It achieves the following results on the evaluation set:
20
+ - Loss: 0.2435
21
+ - Accuracy: 0.9241
22
+
23
+ ## Model description
24
+
25
+ More information needed
26
+
27
+ ## Intended uses & limitations
28
+
29
+ More information needed
30
+
31
+ ## Training and evaluation data
32
+
33
+ More information needed
34
+
35
+ ## Training procedure
36
+
37
+ ### Training hyperparameters
38
+
39
+ The following hyperparameters were used during training:
40
+ - learning_rate: 2e-05
41
+ - train_batch_size: 64
42
+ - eval_batch_size: 64
43
+ - seed: 42
44
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
45
+ - lr_scheduler_type: linear
46
+ - num_epochs: 10
47
+ - mixed_precision_training: Native AMP
48
+
49
+ ### Training results
50
+
51
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy |
52
+ |:-------------:|:-----:|:----:|:---------------:|:--------:|
53
+ | 0.6486 | 1.0 | 14 | 0.6255 | 0.6646 |
54
+ | 0.552 | 2.0 | 28 | 0.5737 | 0.6772 |
55
+ | 0.4207 | 3.0 | 42 | 0.5175 | 0.7975 |
56
+ | 0.3545 | 4.0 | 56 | 0.4484 | 0.8861 |
57
+ | 0.2082 | 5.0 | 70 | 0.3621 | 0.8861 |
58
+ | 0.167 | 6.0 | 84 | 0.2930 | 0.9051 |
59
+ | 0.176 | 7.0 | 98 | 0.3003 | 0.8861 |
60
+ | 0.1275 | 8.0 | 112 | 0.2435 | 0.9241 |
61
+ | 0.11 | 9.0 | 126 | 0.2581 | 0.9051 |
62
+ | 0.1009 | 10.0 | 140 | 0.2474 | 0.9114 |
63
+
64
+
65
+ ### Framework versions
66
+
67
+ - Transformers 4.38.1
68
+ - Pytorch 2.1.2
69
+ - Datasets 2.1.0
70
+ - Tokenizers 0.15.2
all_results.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "eval_accuracy": 0.9240506329113924,
4
+ "eval_loss": 0.2435225546360016,
5
+ "eval_runtime": 8.0346,
6
+ "eval_samples_per_second": 19.665,
7
+ "eval_steps_per_second": 0.373,
8
+ "total_flos": 6.912285473621606e+17,
9
+ "train_loss": 0.2789982352937971,
10
+ "train_runtime": 718.4172,
11
+ "train_samples_per_second": 12.416,
12
+ "train_steps_per_second": 0.195
13
+ }
config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-base-patch16-224-in21k",
3
+ "architectures": [
4
+ "ViTForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "encoder_stride": 16,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.0,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "Blurr",
13
+ "1": "Non Blurr"
14
+ },
15
+ "image_size": 224,
16
+ "initializer_range": 0.02,
17
+ "intermediate_size": 3072,
18
+ "label2id": {
19
+ "Blurr": 0,
20
+ "Non Blurr": 1
21
+ },
22
+ "layer_norm_eps": 1e-12,
23
+ "model_type": "vit",
24
+ "num_attention_heads": 12,
25
+ "num_channels": 3,
26
+ "num_hidden_layers": 12,
27
+ "patch_size": 16,
28
+ "problem_type": "single_label_classification",
29
+ "qkv_bias": true,
30
+ "torch_dtype": "float32",
31
+ "transformers_version": "4.38.1"
32
+ }
eval_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "eval_accuracy": 0.9240506329113924,
4
+ "eval_loss": 0.2435225546360016,
5
+ "eval_runtime": 8.0346,
6
+ "eval_samples_per_second": 19.665,
7
+ "eval_steps_per_second": 0.373
8
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c24745fe885a08f73d3732c9c092f1c76788526ba582e2b5084569defff55cae
3
+ size 343223968
preprocessor_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_rescale": true,
4
+ "do_resize": true,
5
+ "image_mean": [
6
+ 0.5,
7
+ 0.5,
8
+ 0.5
9
+ ],
10
+ "image_processor_type": "ViTFeatureExtractor",
11
+ "image_std": [
12
+ 0.5,
13
+ 0.5,
14
+ 0.5
15
+ ],
16
+ "resample": 2,
17
+ "rescale_factor": 0.00392156862745098,
18
+ "size": {
19
+ "height": 224,
20
+ "width": 224
21
+ }
22
+ }
runs/May31_11-09-09_6c18d3c55927/events.out.tfevents.1717153750.6c18d3c55927.34.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ede4dda5b342f99aac9c3e675bc1f708a33d83e0f344b7b3f84c5035691c29f
3
+ size 11025
runs/May31_11-09-09_6c18d3c55927/events.out.tfevents.1717154483.6c18d3c55927.34.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c89d9acd1fffb51efe3bc4cf5d88da7e6e1930b71d9c4227eb99938ac7a9466
3
+ size 411
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "total_flos": 6.912285473621606e+17,
4
+ "train_loss": 0.2789982352937971,
5
+ "train_runtime": 718.4172,
6
+ "train_samples_per_second": 12.416,
7
+ "train_steps_per_second": 0.195
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,218 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.2435225546360016,
3
+ "best_model_checkpoint": "finetuned-blurr-nonblur/checkpoint-112",
4
+ "epoch": 10.0,
5
+ "eval_steps": 100,
6
+ "global_step": 140,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.71,
13
+ "grad_norm": 54505.6171875,
14
+ "learning_rate": 1.8571428571428575e-05,
15
+ "loss": 0.6486,
16
+ "step": 10
17
+ },
18
+ {
19
+ "epoch": 1.0,
20
+ "eval_accuracy": 0.6645569620253164,
21
+ "eval_loss": 0.625545084476471,
22
+ "eval_runtime": 7.9446,
23
+ "eval_samples_per_second": 19.888,
24
+ "eval_steps_per_second": 0.378,
25
+ "step": 14
26
+ },
27
+ {
28
+ "epoch": 1.43,
29
+ "grad_norm": 57206.48828125,
30
+ "learning_rate": 1.7142857142857142e-05,
31
+ "loss": 0.552,
32
+ "step": 20
33
+ },
34
+ {
35
+ "epoch": 2.0,
36
+ "eval_accuracy": 0.6772151898734177,
37
+ "eval_loss": 0.5736974477767944,
38
+ "eval_runtime": 7.72,
39
+ "eval_samples_per_second": 20.466,
40
+ "eval_steps_per_second": 0.389,
41
+ "step": 28
42
+ },
43
+ {
44
+ "epoch": 2.14,
45
+ "grad_norm": 53612.984375,
46
+ "learning_rate": 1.5714285714285715e-05,
47
+ "loss": 0.5206,
48
+ "step": 30
49
+ },
50
+ {
51
+ "epoch": 2.86,
52
+ "grad_norm": 51806.32421875,
53
+ "learning_rate": 1.4285714285714287e-05,
54
+ "loss": 0.4207,
55
+ "step": 40
56
+ },
57
+ {
58
+ "epoch": 3.0,
59
+ "eval_accuracy": 0.7974683544303798,
60
+ "eval_loss": 0.5174736380577087,
61
+ "eval_runtime": 7.726,
62
+ "eval_samples_per_second": 20.45,
63
+ "eval_steps_per_second": 0.388,
64
+ "step": 42
65
+ },
66
+ {
67
+ "epoch": 3.57,
68
+ "grad_norm": 56256.515625,
69
+ "learning_rate": 1.2857142857142859e-05,
70
+ "loss": 0.3545,
71
+ "step": 50
72
+ },
73
+ {
74
+ "epoch": 4.0,
75
+ "eval_accuracy": 0.8860759493670886,
76
+ "eval_loss": 0.44835516810417175,
77
+ "eval_runtime": 7.7183,
78
+ "eval_samples_per_second": 20.471,
79
+ "eval_steps_per_second": 0.389,
80
+ "step": 56
81
+ },
82
+ {
83
+ "epoch": 4.29,
84
+ "grad_norm": 70625.890625,
85
+ "learning_rate": 1.1428571428571429e-05,
86
+ "loss": 0.286,
87
+ "step": 60
88
+ },
89
+ {
90
+ "epoch": 5.0,
91
+ "grad_norm": 57182.4140625,
92
+ "learning_rate": 1e-05,
93
+ "loss": 0.2082,
94
+ "step": 70
95
+ },
96
+ {
97
+ "epoch": 5.0,
98
+ "eval_accuracy": 0.8860759493670886,
99
+ "eval_loss": 0.3621000051498413,
100
+ "eval_runtime": 7.8704,
101
+ "eval_samples_per_second": 20.075,
102
+ "eval_steps_per_second": 0.381,
103
+ "step": 70
104
+ },
105
+ {
106
+ "epoch": 5.71,
107
+ "grad_norm": 41587.15625,
108
+ "learning_rate": 8.571428571428571e-06,
109
+ "loss": 0.167,
110
+ "step": 80
111
+ },
112
+ {
113
+ "epoch": 6.0,
114
+ "eval_accuracy": 0.9050632911392406,
115
+ "eval_loss": 0.2930063307285309,
116
+ "eval_runtime": 7.906,
117
+ "eval_samples_per_second": 19.985,
118
+ "eval_steps_per_second": 0.379,
119
+ "step": 84
120
+ },
121
+ {
122
+ "epoch": 6.43,
123
+ "grad_norm": 89662.453125,
124
+ "learning_rate": 7.1428571428571436e-06,
125
+ "loss": 0.176,
126
+ "step": 90
127
+ },
128
+ {
129
+ "epoch": 7.0,
130
+ "eval_accuracy": 0.8860759493670886,
131
+ "eval_loss": 0.3003353178501129,
132
+ "eval_runtime": 7.7836,
133
+ "eval_samples_per_second": 20.299,
134
+ "eval_steps_per_second": 0.385,
135
+ "step": 98
136
+ },
137
+ {
138
+ "epoch": 7.14,
139
+ "grad_norm": 105894.6796875,
140
+ "learning_rate": 5.7142857142857145e-06,
141
+ "loss": 0.1271,
142
+ "step": 100
143
+ },
144
+ {
145
+ "epoch": 7.86,
146
+ "grad_norm": 73103.46875,
147
+ "learning_rate": 4.2857142857142855e-06,
148
+ "loss": 0.1275,
149
+ "step": 110
150
+ },
151
+ {
152
+ "epoch": 8.0,
153
+ "eval_accuracy": 0.9240506329113924,
154
+ "eval_loss": 0.2435225546360016,
155
+ "eval_runtime": 7.7728,
156
+ "eval_samples_per_second": 20.327,
157
+ "eval_steps_per_second": 0.386,
158
+ "step": 112
159
+ },
160
+ {
161
+ "epoch": 8.57,
162
+ "grad_norm": 65041.953125,
163
+ "learning_rate": 2.8571428571428573e-06,
164
+ "loss": 0.11,
165
+ "step": 120
166
+ },
167
+ {
168
+ "epoch": 9.0,
169
+ "eval_accuracy": 0.9050632911392406,
170
+ "eval_loss": 0.25813814997673035,
171
+ "eval_runtime": 7.8334,
172
+ "eval_samples_per_second": 20.17,
173
+ "eval_steps_per_second": 0.383,
174
+ "step": 126
175
+ },
176
+ {
177
+ "epoch": 9.29,
178
+ "grad_norm": 42873.71484375,
179
+ "learning_rate": 1.4285714285714286e-06,
180
+ "loss": 0.107,
181
+ "step": 130
182
+ },
183
+ {
184
+ "epoch": 10.0,
185
+ "grad_norm": 64285.99609375,
186
+ "learning_rate": 0.0,
187
+ "loss": 0.1009,
188
+ "step": 140
189
+ },
190
+ {
191
+ "epoch": 10.0,
192
+ "eval_accuracy": 0.9113924050632911,
193
+ "eval_loss": 0.24741442501544952,
194
+ "eval_runtime": 7.9653,
195
+ "eval_samples_per_second": 19.836,
196
+ "eval_steps_per_second": 0.377,
197
+ "step": 140
198
+ },
199
+ {
200
+ "epoch": 10.0,
201
+ "step": 140,
202
+ "total_flos": 6.912285473621606e+17,
203
+ "train_loss": 0.2789982352937971,
204
+ "train_runtime": 718.4172,
205
+ "train_samples_per_second": 12.416,
206
+ "train_steps_per_second": 0.195
207
+ }
208
+ ],
209
+ "logging_steps": 10,
210
+ "max_steps": 140,
211
+ "num_input_tokens_seen": 0,
212
+ "num_train_epochs": 10,
213
+ "save_steps": 100,
214
+ "total_flos": 6.912285473621606e+17,
215
+ "train_batch_size": 64,
216
+ "trial_name": null,
217
+ "trial_params": null
218
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e48fb5f8242b3a0aedc2a8f87542b09847512f1fd792dc2f4c02d7b52e0826c7
3
+ size 4920