rohan4s commited on
Commit
1d987f2
·
verified ·
1 Parent(s): 7d6cf28

Model save

Browse files
README.md CHANGED
@@ -18,7 +18,7 @@ should probably proofread and complete it, then remove this comment. -->
18
 
19
  This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
- - Loss: 0.2552
22
  - Accuracy: 0.9398
23
 
24
  ## Model description
@@ -44,16 +44,17 @@ The following hyperparameters were used during training:
44
  - seed: 42
45
  - optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
46
  - lr_scheduler_type: linear
47
- - num_epochs: 3
48
  - mixed_precision_training: Native AMP
49
 
50
  ### Training results
51
 
52
  | Training Loss | Epoch | Step | Validation Loss | Accuracy |
53
  |:-------------:|:-----:|:----:|:---------------:|:--------:|
54
- | 0.1809 | 1.0 | 48 | 0.3544 | 0.9136 |
55
- | 0.0946 | 2.0 | 96 | 0.2911 | 0.9319 |
56
- | 0.0625 | 3.0 | 144 | 0.2552 | 0.9398 |
 
57
 
58
 
59
  ### Framework versions
 
18
 
19
  This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
+ - Loss: 0.3183
22
  - Accuracy: 0.9398
23
 
24
  ## Model description
 
44
  - seed: 42
45
  - optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
46
  - lr_scheduler_type: linear
47
+ - num_epochs: 4
48
  - mixed_precision_training: Native AMP
49
 
50
  ### Training results
51
 
52
  | Training Loss | Epoch | Step | Validation Loss | Accuracy |
53
  |:-------------:|:-----:|:----:|:---------------:|:--------:|
54
+ | 0.176 | 1.0 | 48 | 0.4412 | 0.8901 |
55
+ | 0.0726 | 2.0 | 96 | 0.3465 | 0.9267 |
56
+ | 0.0356 | 3.0 | 144 | 0.3606 | 0.9215 |
57
+ | 0.0649 | 4.0 | 192 | 0.3183 | 0.9398 |
58
 
59
 
60
  ### Framework versions
all_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "epoch": 4.0,
3
- "total_flos": 4.72760450039808e+17,
4
- "train_loss": 0.7488827416673303,
5
- "train_runtime": 205.0233,
6
- "train_samples_per_second": 29.753,
7
- "train_steps_per_second": 0.936
8
  }
 
1
  {
2
+ "epoch": 3.0,
3
+ "total_flos": 3.54570337529856e+17,
4
+ "train_loss": 0.14949214458465576,
5
+ "train_runtime": 158.5609,
6
+ "train_samples_per_second": 28.853,
7
+ "train_steps_per_second": 0.908
8
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:57b465dc98b4e4988c3c76700d26533d136fb0a3c7de70443d947aa400cc26e0
3
  size 343267040
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e2bf9cde13293ce8cf990852a0f8d89daf074816d2aee7b63569bab686692ae
3
  size 343267040
runs/Dec09_11-26-01_85cc5ef1c5bf/events.out.tfevents.1733743575.85cc5ef1c5bf.23.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:00b436d1a6a70b11268c97a3d15abaaac1de19434e52ebaef8a021d3538b81cc
3
+ size 11335
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "epoch": 4.0,
3
- "total_flos": 4.72760450039808e+17,
4
- "train_loss": 0.7488827416673303,
5
- "train_runtime": 205.0233,
6
- "train_samples_per_second": 29.753,
7
- "train_steps_per_second": 0.936
8
  }
 
1
  {
2
+ "epoch": 3.0,
3
+ "total_flos": 3.54570337529856e+17,
4
+ "train_loss": 0.14949214458465576,
5
+ "train_runtime": 158.5609,
6
+ "train_samples_per_second": 28.853,
7
+ "train_steps_per_second": 0.908
8
  }
trainer_state.json CHANGED
@@ -1,196 +1,152 @@
1
  {
2
- "best_metric": 0.3590115010738373,
3
- "best_model_checkpoint": "finetuned-bangladeshi-traditional-food/checkpoint-192",
4
- "epoch": 4.0,
5
  "eval_steps": 500,
6
- "global_step": 192,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.20833333333333334,
13
- "grad_norm": 103857.609375,
14
- "learning_rate": 0.00018958333333333332,
15
- "loss": 2.5002,
16
  "step": 10
17
  },
18
  {
19
  "epoch": 0.4166666666666667,
20
- "grad_norm": 105647.0078125,
21
- "learning_rate": 0.0001791666666666667,
22
- "loss": 1.8826,
23
  "step": 20
24
  },
25
  {
26
  "epoch": 0.625,
27
- "grad_norm": 95205.671875,
28
- "learning_rate": 0.00016875,
29
- "loss": 1.5298,
30
  "step": 30
31
  },
32
  {
33
  "epoch": 0.8333333333333334,
34
- "grad_norm": 112156.96875,
35
- "learning_rate": 0.00015833333333333332,
36
- "loss": 1.1822,
37
  "step": 40
38
  },
39
  {
40
  "epoch": 1.0,
41
- "eval_accuracy": 0.8821989528795812,
42
- "eval_loss": 0.9452220797538757,
43
- "eval_runtime": 6.3292,
44
- "eval_samples_per_second": 60.355,
45
- "eval_steps_per_second": 3.792,
46
  "step": 48
47
  },
48
  {
49
  "epoch": 1.0416666666666667,
50
- "grad_norm": 133726.15625,
51
- "learning_rate": 0.0001479166666666667,
52
- "loss": 1.0287,
53
  "step": 50
54
  },
55
  {
56
  "epoch": 1.25,
57
- "grad_norm": 108899.1640625,
58
- "learning_rate": 0.0001375,
59
- "loss": 0.8519,
60
  "step": 60
61
  },
62
  {
63
  "epoch": 1.4583333333333333,
64
- "grad_norm": 113597.71875,
65
- "learning_rate": 0.00012708333333333332,
66
- "loss": 0.7239,
67
  "step": 70
68
  },
69
  {
70
  "epoch": 1.6666666666666665,
71
- "grad_norm": 125612.125,
72
- "learning_rate": 0.00011666666666666668,
73
- "loss": 0.6642,
74
  "step": 80
75
  },
76
  {
77
  "epoch": 1.875,
78
- "grad_norm": 107212.9453125,
79
- "learning_rate": 0.00010625000000000001,
80
- "loss": 0.5747,
81
  "step": 90
82
  },
83
  {
84
  "epoch": 2.0,
85
- "eval_accuracy": 0.9109947643979057,
86
- "eval_loss": 0.5519709587097168,
87
- "eval_runtime": 6.6782,
88
- "eval_samples_per_second": 57.201,
89
- "eval_steps_per_second": 3.594,
90
  "step": 96
91
  },
92
  {
93
  "epoch": 2.0833333333333335,
94
- "grad_norm": 43165.94921875,
95
- "learning_rate": 9.583333333333334e-05,
96
- "loss": 0.5008,
97
  "step": 100
98
  },
99
  {
100
  "epoch": 2.2916666666666665,
101
- "grad_norm": 65004.53515625,
102
- "learning_rate": 8.541666666666666e-05,
103
- "loss": 0.4558,
104
  "step": 110
105
  },
106
  {
107
  "epoch": 2.5,
108
- "grad_norm": 167956.78125,
109
- "learning_rate": 7.500000000000001e-05,
110
- "loss": 0.3775,
111
  "step": 120
112
  },
113
  {
114
  "epoch": 2.7083333333333335,
115
- "grad_norm": 93978.6953125,
116
- "learning_rate": 6.458333333333334e-05,
117
- "loss": 0.3374,
118
  "step": 130
119
  },
120
  {
121
  "epoch": 2.9166666666666665,
122
- "grad_norm": 200289.0625,
123
- "learning_rate": 5.4166666666666664e-05,
124
- "loss": 0.3112,
125
  "step": 140
126
  },
127
  {
128
  "epoch": 3.0,
129
- "eval_accuracy": 0.9345549738219895,
130
- "eval_loss": 0.39518749713897705,
131
- "eval_runtime": 6.583,
132
- "eval_samples_per_second": 58.028,
133
- "eval_steps_per_second": 3.646,
134
  "step": 144
135
  },
136
  {
137
- "epoch": 3.125,
138
- "grad_norm": 135331.640625,
139
- "learning_rate": 4.375e-05,
140
- "loss": 0.3232,
141
- "step": 150
142
- },
143
- {
144
- "epoch": 3.3333333333333335,
145
- "grad_norm": 87488.328125,
146
- "learning_rate": 3.3333333333333335e-05,
147
- "loss": 0.2519,
148
- "step": 160
149
- },
150
- {
151
- "epoch": 3.5416666666666665,
152
- "grad_norm": 56910.69921875,
153
- "learning_rate": 2.2916666666666667e-05,
154
- "loss": 0.3035,
155
- "step": 170
156
- },
157
- {
158
- "epoch": 3.75,
159
- "grad_norm": 30437.94921875,
160
- "learning_rate": 1.25e-05,
161
- "loss": 0.2739,
162
- "step": 180
163
- },
164
- {
165
- "epoch": 3.9583333333333335,
166
- "grad_norm": 39517.99609375,
167
- "learning_rate": 2.0833333333333334e-06,
168
- "loss": 0.2416,
169
- "step": 190
170
- },
171
- {
172
- "epoch": 4.0,
173
- "eval_accuracy": 0.9450261780104712,
174
- "eval_loss": 0.3590115010738373,
175
- "eval_runtime": 6.6223,
176
- "eval_samples_per_second": 57.684,
177
- "eval_steps_per_second": 3.624,
178
- "step": 192
179
- },
180
- {
181
- "epoch": 4.0,
182
- "step": 192,
183
- "total_flos": 4.72760450039808e+17,
184
- "train_loss": 0.7488827416673303,
185
- "train_runtime": 205.0233,
186
- "train_samples_per_second": 29.753,
187
- "train_steps_per_second": 0.936
188
  }
189
  ],
190
  "logging_steps": 10,
191
- "max_steps": 192,
192
  "num_input_tokens_seen": 0,
193
- "num_train_epochs": 4,
194
  "save_steps": 500,
195
  "stateful_callbacks": {
196
  "TrainerControl": {
@@ -204,7 +160,7 @@
204
  "attributes": {}
205
  }
206
  },
207
- "total_flos": 4.72760450039808e+17,
208
  "train_batch_size": 32,
209
  "trial_name": null,
210
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.2552085518836975,
3
+ "best_model_checkpoint": "finetuned-bangladeshi-traditional-food/checkpoint-144",
4
+ "epoch": 3.0,
5
  "eval_steps": 500,
6
+ "global_step": 144,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.20833333333333334,
13
+ "grad_norm": 192996.96875,
14
+ "learning_rate": 0.00018611111111111112,
15
+ "loss": 0.3852,
16
  "step": 10
17
  },
18
  {
19
  "epoch": 0.4166666666666667,
20
+ "grad_norm": 121565.640625,
21
+ "learning_rate": 0.00017222222222222224,
22
+ "loss": 0.2712,
23
  "step": 20
24
  },
25
  {
26
  "epoch": 0.625,
27
+ "grad_norm": 50398.125,
28
+ "learning_rate": 0.00015833333333333332,
29
+ "loss": 0.26,
30
  "step": 30
31
  },
32
  {
33
  "epoch": 0.8333333333333334,
34
+ "grad_norm": 54815.5703125,
35
+ "learning_rate": 0.00014444444444444444,
36
+ "loss": 0.1809,
37
  "step": 40
38
  },
39
  {
40
  "epoch": 1.0,
41
+ "eval_accuracy": 0.9136125654450262,
42
+ "eval_loss": 0.3543514907360077,
43
+ "eval_runtime": 6.5641,
44
+ "eval_samples_per_second": 58.196,
45
+ "eval_steps_per_second": 3.656,
46
  "step": 48
47
  },
48
  {
49
  "epoch": 1.0416666666666667,
50
+ "grad_norm": 68236.78125,
51
+ "learning_rate": 0.00013055555555555555,
52
+ "loss": 0.2191,
53
  "step": 50
54
  },
55
  {
56
  "epoch": 1.25,
57
+ "grad_norm": 50570.51953125,
58
+ "learning_rate": 0.00011666666666666668,
59
+ "loss": 0.1273,
60
  "step": 60
61
  },
62
  {
63
  "epoch": 1.4583333333333333,
64
+ "grad_norm": 29818.142578125,
65
+ "learning_rate": 0.00010277777777777778,
66
+ "loss": 0.1002,
67
  "step": 70
68
  },
69
  {
70
  "epoch": 1.6666666666666665,
71
+ "grad_norm": 125871.640625,
72
+ "learning_rate": 8.888888888888889e-05,
73
+ "loss": 0.177,
74
  "step": 80
75
  },
76
  {
77
  "epoch": 1.875,
78
+ "grad_norm": 80642.8828125,
79
+ "learning_rate": 7.500000000000001e-05,
80
+ "loss": 0.0946,
81
  "step": 90
82
  },
83
  {
84
  "epoch": 2.0,
85
+ "eval_accuracy": 0.9319371727748691,
86
+ "eval_loss": 0.29109665751457214,
87
+ "eval_runtime": 6.6124,
88
+ "eval_samples_per_second": 57.77,
89
+ "eval_steps_per_second": 3.63,
90
  "step": 96
91
  },
92
  {
93
  "epoch": 2.0833333333333335,
94
+ "grad_norm": 7823.91357421875,
95
+ "learning_rate": 6.111111111111112e-05,
96
+ "loss": 0.0793,
97
  "step": 100
98
  },
99
  {
100
  "epoch": 2.2916666666666665,
101
+ "grad_norm": 34183.34375,
102
+ "learning_rate": 4.722222222222222e-05,
103
+ "loss": 0.0629,
104
  "step": 110
105
  },
106
  {
107
  "epoch": 2.5,
108
+ "grad_norm": 6095.453125,
109
+ "learning_rate": 3.3333333333333335e-05,
110
+ "loss": 0.0408,
111
  "step": 120
112
  },
113
  {
114
  "epoch": 2.7083333333333335,
115
+ "grad_norm": 222553.59375,
116
+ "learning_rate": 1.9444444444444445e-05,
117
+ "loss": 0.076,
118
  "step": 130
119
  },
120
  {
121
  "epoch": 2.9166666666666665,
122
+ "grad_norm": 138554.953125,
123
+ "learning_rate": 5.555555555555556e-06,
124
+ "loss": 0.0625,
125
  "step": 140
126
  },
127
  {
128
  "epoch": 3.0,
129
+ "eval_accuracy": 0.9397905759162304,
130
+ "eval_loss": 0.2552085518836975,
131
+ "eval_runtime": 7.0148,
132
+ "eval_samples_per_second": 54.456,
133
+ "eval_steps_per_second": 3.421,
134
  "step": 144
135
  },
136
  {
137
+ "epoch": 3.0,
138
+ "step": 144,
139
+ "total_flos": 3.54570337529856e+17,
140
+ "train_loss": 0.14949214458465576,
141
+ "train_runtime": 158.5609,
142
+ "train_samples_per_second": 28.853,
143
+ "train_steps_per_second": 0.908
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144
  }
145
  ],
146
  "logging_steps": 10,
147
+ "max_steps": 144,
148
  "num_input_tokens_seen": 0,
149
+ "num_train_epochs": 3,
150
  "save_steps": 500,
151
  "stateful_callbacks": {
152
  "TrainerControl": {
 
160
  "attributes": {}
161
  }
162
  },
163
+ "total_flos": 3.54570337529856e+17,
164
  "train_batch_size": 32,
165
  "trial_name": null,
166
  "trial_params": null
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dfc3c0fa3829aad813c478aed9bb9b1658fbd9e712fdef79df8c0e858191a457
3
  size 5304
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c808b4acc8322444e72603dcec9aa89b3cdc7676daebfc6ee856a2d6b3a59458
3
  size 5304