robertsw commited on
Commit
1b6151e
1 Parent(s): 60fa8e9

robertsw/aesthetics_v1

Browse files
Files changed (5) hide show
  1. README.md +94 -0
  2. config.json +167 -0
  3. model.safetensors +3 -0
  4. preprocessor_config.json +27 -0
  5. training_args.bin +3 -0
README.md ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: facebook/dinov2-large
4
+ tags:
5
+ - generated_from_trainer
6
+ datasets:
7
+ - imagefolder
8
+ metrics:
9
+ - accuracy
10
+ model-index:
11
+ - name: aesthetics_v2
12
+ results:
13
+ - task:
14
+ name: Image Classification
15
+ type: image-classification
16
+ dataset:
17
+ name: imagefolder
18
+ type: imagefolder
19
+ config: default
20
+ split: train
21
+ args: default
22
+ metrics:
23
+ - name: Accuracy
24
+ type: accuracy
25
+ value: 0.5580614847630554
26
+ ---
27
+
28
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
29
+ should probably proofread and complete it, then remove this comment. -->
30
+
31
+ # aesthetics_v2
32
+
33
+ This model is a fine-tuned version of [facebook/dinov2-large](https://huggingface.co/facebook/dinov2-large) on the imagefolder dataset.
34
+ It achieves the following results on the evaluation set:
35
+ - Loss: 1.6501
36
+ - Accuracy: 0.5581
37
+
38
+ ## Model description
39
+
40
+ More information needed
41
+
42
+ ## Intended uses & limitations
43
+
44
+ More information needed
45
+
46
+ ## Training and evaluation data
47
+
48
+ More information needed
49
+
50
+ ## Training procedure
51
+
52
+ ### Training hyperparameters
53
+
54
+ The following hyperparameters were used during training:
55
+ - learning_rate: 5e-05
56
+ - train_batch_size: 64
57
+ - eval_batch_size: 64
58
+ - seed: 42
59
+ - gradient_accumulation_steps: 4
60
+ - total_train_batch_size: 256
61
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
62
+ - lr_scheduler_type: linear
63
+ - lr_scheduler_warmup_ratio: 0.1
64
+ - num_epochs: 3
65
+
66
+ ### Training results
67
+
68
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy |
69
+ |:-------------:|:-----:|:----:|:---------------:|:--------:|
70
+ | 1.1465 | 0.17 | 20 | 1.6860 | 0.5313 |
71
+ | 1.2703 | 0.34 | 40 | 1.8412 | 0.5014 |
72
+ | 1.3152 | 0.52 | 60 | 1.8200 | 0.5042 |
73
+ | 1.2313 | 0.69 | 80 | 1.7971 | 0.5112 |
74
+ | 1.3476 | 0.86 | 100 | 1.7649 | 0.5100 |
75
+ | 1.2597 | 1.03 | 120 | 1.7454 | 0.5175 |
76
+ | 1.0094 | 1.2 | 140 | 1.7356 | 0.5257 |
77
+ | 0.9743 | 1.37 | 160 | 1.7074 | 0.5352 |
78
+ | 1.0209 | 1.55 | 180 | 1.7331 | 0.5322 |
79
+ | 1.0692 | 1.72 | 200 | 1.7370 | 0.5331 |
80
+ | 1.0556 | 1.89 | 220 | 1.6788 | 0.5487 |
81
+ | 0.8634 | 2.06 | 240 | 1.6644 | 0.5536 |
82
+ | 0.79 | 2.23 | 260 | 1.6848 | 0.5531 |
83
+ | 0.7916 | 2.4 | 280 | 1.6761 | 0.5528 |
84
+ | 0.7454 | 2.58 | 300 | 1.6520 | 0.5534 |
85
+ | 0.7497 | 2.75 | 320 | 1.6337 | 0.5554 |
86
+ | 0.7537 | 2.92 | 340 | 1.6501 | 0.5581 |
87
+
88
+
89
+ ### Framework versions
90
+
91
+ - Transformers 4.38.2
92
+ - Pytorch 2.2.0
93
+ - Datasets 2.17.1
94
+ - Tokenizers 0.15.2
config.json ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/dinov2-large",
3
+ "apply_layernorm": true,
4
+ "architectures": [
5
+ "Dinov2ForImageClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.0,
8
+ "drop_path_rate": 0.0,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.0,
11
+ "hidden_size": 1024,
12
+ "id2label": {
13
+ "0": "Indie",
14
+ "1": "Kawaii",
15
+ "10": "boho",
16
+ "11": "classic",
17
+ "12": "classiccore",
18
+ "13": "clean",
19
+ "14": "coquette",
20
+ "15": "corporate",
21
+ "16": "cottagecore",
22
+ "17": "cowboycore",
23
+ "18": "cyberpunk",
24
+ "19": "dark_academia",
25
+ "2": "VSCO",
26
+ "20": "e-girl",
27
+ "21": "europecore",
28
+ "22": "fairy",
29
+ "23": "gorpcore",
30
+ "24": "goth",
31
+ "25": "grunge",
32
+ "26": "it",
33
+ "27": "it_girl",
34
+ "28": "kidcore",
35
+ "29": "mermaidcore",
36
+ "3": "Y2K",
37
+ "30": "minimalistic",
38
+ "31": "mob_wife",
39
+ "32": "monochrome",
40
+ "33": "normcore",
41
+ "34": "office",
42
+ "35": "office_siren",
43
+ "36": "old_money",
44
+ "37": "pearlcore",
45
+ "38": "preppy",
46
+ "39": "punk",
47
+ "4": "acubi",
48
+ "40": "romcom",
49
+ "41": "royalcore",
50
+ "42": "softgirl",
51
+ "43": "stealth_wealth",
52
+ "44": "streetwear",
53
+ "45": "techwear",
54
+ "46": "tomato",
55
+ "47": "twee",
56
+ "48": "vanilla",
57
+ "49": "vintage",
58
+ "5": "art",
59
+ "50": "westerncore",
60
+ "6": "athleisure",
61
+ "7": "balletcore",
62
+ "8": "barbiecore",
63
+ "9": "bohemian"
64
+ },
65
+ "image_size": 518,
66
+ "initializer_range": 0.02,
67
+ "label2id": {
68
+ "Indie": "0",
69
+ "Kawaii": "1",
70
+ "VSCO": "2",
71
+ "Y2K": "3",
72
+ "acubi": "4",
73
+ "art": "5",
74
+ "athleisure": "6",
75
+ "balletcore": "7",
76
+ "barbiecore": "8",
77
+ "bohemian": "9",
78
+ "boho": "10",
79
+ "classic": "11",
80
+ "classiccore": "12",
81
+ "clean": "13",
82
+ "coquette": "14",
83
+ "corporate": "15",
84
+ "cottagecore": "16",
85
+ "cowboycore": "17",
86
+ "cyberpunk": "18",
87
+ "dark_academia": "19",
88
+ "e-girl": "20",
89
+ "europecore": "21",
90
+ "fairy": "22",
91
+ "gorpcore": "23",
92
+ "goth": "24",
93
+ "grunge": "25",
94
+ "it": "26",
95
+ "it_girl": "27",
96
+ "kidcore": "28",
97
+ "mermaidcore": "29",
98
+ "minimalistic": "30",
99
+ "mob_wife": "31",
100
+ "monochrome": "32",
101
+ "normcore": "33",
102
+ "office": "34",
103
+ "office_siren": "35",
104
+ "old_money": "36",
105
+ "pearlcore": "37",
106
+ "preppy": "38",
107
+ "punk": "39",
108
+ "romcom": "40",
109
+ "royalcore": "41",
110
+ "softgirl": "42",
111
+ "stealth_wealth": "43",
112
+ "streetwear": "44",
113
+ "techwear": "45",
114
+ "tomato": "46",
115
+ "twee": "47",
116
+ "vanilla": "48",
117
+ "vintage": "49",
118
+ "westerncore": "50"
119
+ },
120
+ "layer_norm_eps": 1e-06,
121
+ "layerscale_value": 1.0,
122
+ "mlp_ratio": 4,
123
+ "model_type": "dinov2",
124
+ "num_attention_heads": 16,
125
+ "num_channels": 3,
126
+ "num_hidden_layers": 24,
127
+ "out_features": [
128
+ "stage24"
129
+ ],
130
+ "out_indices": [
131
+ 24
132
+ ],
133
+ "patch_size": 14,
134
+ "problem_type": "single_label_classification",
135
+ "qkv_bias": true,
136
+ "reshape_hidden_states": true,
137
+ "stage_names": [
138
+ "stem",
139
+ "stage1",
140
+ "stage2",
141
+ "stage3",
142
+ "stage4",
143
+ "stage5",
144
+ "stage6",
145
+ "stage7",
146
+ "stage8",
147
+ "stage9",
148
+ "stage10",
149
+ "stage11",
150
+ "stage12",
151
+ "stage13",
152
+ "stage14",
153
+ "stage15",
154
+ "stage16",
155
+ "stage17",
156
+ "stage18",
157
+ "stage19",
158
+ "stage20",
159
+ "stage21",
160
+ "stage22",
161
+ "stage23",
162
+ "stage24"
163
+ ],
164
+ "torch_dtype": "float32",
165
+ "transformers_version": "4.38.2",
166
+ "use_swiglu_ffn": false
167
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f82b051ece02fec589140071dfe4ba01a248afd23df02c5d6138f12940326687
3
+ size 1217944124
preprocessor_config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": {
3
+ "height": 224,
4
+ "width": 224
5
+ },
6
+ "do_center_crop": true,
7
+ "do_convert_rgb": true,
8
+ "do_normalize": true,
9
+ "do_rescale": true,
10
+ "do_resize": true,
11
+ "image_mean": [
12
+ 0.485,
13
+ 0.456,
14
+ 0.406
15
+ ],
16
+ "image_processor_type": "BitImageProcessor",
17
+ "image_std": [
18
+ 0.229,
19
+ 0.224,
20
+ 0.225
21
+ ],
22
+ "resample": 3,
23
+ "rescale_factor": 0.00392156862745098,
24
+ "size": {
25
+ "shortest_edge": 256
26
+ }
27
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:84f4a995195aadfbb28612acc417e19029378bc33f7fb2d5441ab349d3225531
3
+ size 4856