phuong-tk-nguyen commited on
Commit
0f50716
·
1 Parent(s): fe0c68e

Model save

Browse files
Files changed (5) hide show
  1. README.md +112 -0
  2. config.json +77 -0
  3. model.safetensors +3 -0
  4. preprocessor_config.json +22 -0
  5. training_args.bin +3 -0
README.md ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: microsoft/swin-base-patch4-window7-224-in22k
4
+ tags:
5
+ - generated_from_trainer
6
+ datasets:
7
+ - imagefolder
8
+ metrics:
9
+ - accuracy
10
+ model-index:
11
+ - name: swin-base-patch4-window7-224-in22k-finetuned-cifar10
12
+ results:
13
+ - task:
14
+ name: Image Classification
15
+ type: image-classification
16
+ dataset:
17
+ name: imagefolder
18
+ type: imagefolder
19
+ config: default
20
+ split: train
21
+ args: default
22
+ metrics:
23
+ - name: Accuracy
24
+ type: accuracy
25
+ value: 0.9858
26
+ ---
27
+
28
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
29
+ should probably proofread and complete it, then remove this comment. -->
30
+
31
+ # swin-base-patch4-window7-224-in22k-finetuned-cifar10
32
+
33
+ This model is a fine-tuned version of [microsoft/swin-base-patch4-window7-224-in22k](https://huggingface.co/microsoft/swin-base-patch4-window7-224-in22k) on the imagefolder dataset.
34
+ It achieves the following results on the evaluation set:
35
+ - Loss: 0.0414
36
+ - Accuracy: 0.9858
37
+
38
+ ## Model description
39
+
40
+ More information needed
41
+
42
+ ## Intended uses & limitations
43
+
44
+ More information needed
45
+
46
+ ## Training and evaluation data
47
+
48
+ More information needed
49
+
50
+ ## Training procedure
51
+
52
+ ### Training hyperparameters
53
+
54
+ The following hyperparameters were used during training:
55
+ - learning_rate: 5e-05
56
+ - train_batch_size: 32
57
+ - eval_batch_size: 32
58
+ - seed: 42
59
+ - gradient_accumulation_steps: 4
60
+ - total_train_batch_size: 128
61
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
62
+ - lr_scheduler_type: linear
63
+ - lr_scheduler_warmup_ratio: 0.1
64
+ - num_epochs: 1
65
+
66
+ ### Training results
67
+
68
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy |
69
+ |:-------------:|:-----:|:----:|:---------------:|:--------:|
70
+ | 2.303 | 0.03 | 10 | 2.1672 | 0.2334 |
71
+ | 2.0158 | 0.06 | 20 | 1.6672 | 0.657 |
72
+ | 1.4855 | 0.09 | 30 | 0.8292 | 0.8704 |
73
+ | 0.7451 | 0.11 | 40 | 0.2578 | 0.93 |
74
+ | 0.5618 | 0.14 | 50 | 0.1476 | 0.962 |
75
+ | 0.4545 | 0.17 | 60 | 0.1248 | 0.9642 |
76
+ | 0.4587 | 0.2 | 70 | 0.0941 | 0.9748 |
77
+ | 0.3911 | 0.23 | 80 | 0.0944 | 0.9712 |
78
+ | 0.3839 | 0.26 | 90 | 0.0848 | 0.9756 |
79
+ | 0.3864 | 0.28 | 100 | 0.0744 | 0.978 |
80
+ | 0.3141 | 0.31 | 110 | 0.0673 | 0.98 |
81
+ | 0.3764 | 0.34 | 120 | 0.0706 | 0.9764 |
82
+ | 0.3003 | 0.37 | 130 | 0.0600 | 0.984 |
83
+ | 0.3566 | 0.4 | 140 | 0.0562 | 0.9826 |
84
+ | 0.2855 | 0.43 | 150 | 0.0567 | 0.9816 |
85
+ | 0.3351 | 0.45 | 160 | 0.0543 | 0.9828 |
86
+ | 0.2977 | 0.48 | 170 | 0.0568 | 0.9798 |
87
+ | 0.2924 | 0.51 | 180 | 0.0577 | 0.9804 |
88
+ | 0.2884 | 0.54 | 190 | 0.0551 | 0.983 |
89
+ | 0.3067 | 0.57 | 200 | 0.0487 | 0.983 |
90
+ | 0.3159 | 0.6 | 210 | 0.0513 | 0.984 |
91
+ | 0.2795 | 0.63 | 220 | 0.0460 | 0.9846 |
92
+ | 0.3113 | 0.65 | 230 | 0.0495 | 0.9832 |
93
+ | 0.2882 | 0.68 | 240 | 0.0475 | 0.9838 |
94
+ | 0.263 | 0.71 | 250 | 0.0449 | 0.9854 |
95
+ | 0.2686 | 0.74 | 260 | 0.0510 | 0.9826 |
96
+ | 0.2705 | 0.77 | 270 | 0.0483 | 0.9846 |
97
+ | 0.2807 | 0.8 | 280 | 0.0430 | 0.9854 |
98
+ | 0.2583 | 0.82 | 290 | 0.0452 | 0.9858 |
99
+ | 0.2346 | 0.85 | 300 | 0.0435 | 0.9858 |
100
+ | 0.2294 | 0.88 | 310 | 0.0434 | 0.986 |
101
+ | 0.2608 | 0.91 | 320 | 0.0433 | 0.986 |
102
+ | 0.2642 | 0.94 | 330 | 0.0425 | 0.9866 |
103
+ | 0.2781 | 0.97 | 340 | 0.0417 | 0.986 |
104
+ | 0.247 | 1.0 | 350 | 0.0414 | 0.9858 |
105
+
106
+
107
+ ### Framework versions
108
+
109
+ - Transformers 4.35.0
110
+ - Pytorch 2.1.1
111
+ - Datasets 2.14.6
112
+ - Tokenizers 0.14.1
config.json ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "microsoft/swin-base-patch4-window7-224-in22k",
3
+ "architectures": [
4
+ "SwinForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "depths": [
8
+ 2,
9
+ 2,
10
+ 18,
11
+ 2
12
+ ],
13
+ "drop_path_rate": 0.1,
14
+ "embed_dim": 128,
15
+ "encoder_stride": 32,
16
+ "hidden_act": "gelu",
17
+ "hidden_dropout_prob": 0.0,
18
+ "hidden_size": 1024,
19
+ "id2label": {
20
+ "0": "Airplane",
21
+ "1": "Automobile",
22
+ "2": "Bird",
23
+ "3": "Cat",
24
+ "4": "Deer",
25
+ "5": "Dog",
26
+ "6": "Frog",
27
+ "7": "Horse",
28
+ "8": "Ship",
29
+ "9": "Truck"
30
+ },
31
+ "image_size": 224,
32
+ "initializer_range": 0.02,
33
+ "label2id": {
34
+ "Airplane": 0,
35
+ "Automobile": 1,
36
+ "Bird": 2,
37
+ "Cat": 3,
38
+ "Deer": 4,
39
+ "Dog": 5,
40
+ "Frog": 6,
41
+ "Horse": 7,
42
+ "Ship": 8,
43
+ "Truck": 9
44
+ },
45
+ "layer_norm_eps": 1e-05,
46
+ "mlp_ratio": 4.0,
47
+ "model_type": "swin",
48
+ "num_channels": 3,
49
+ "num_heads": [
50
+ 4,
51
+ 8,
52
+ 16,
53
+ 32
54
+ ],
55
+ "num_layers": 4,
56
+ "out_features": [
57
+ "stage4"
58
+ ],
59
+ "out_indices": [
60
+ 4
61
+ ],
62
+ "patch_size": 4,
63
+ "path_norm": true,
64
+ "problem_type": "single_label_classification",
65
+ "qkv_bias": true,
66
+ "stage_names": [
67
+ "stem",
68
+ "stage1",
69
+ "stage2",
70
+ "stage3",
71
+ "stage4"
72
+ ],
73
+ "torch_dtype": "float32",
74
+ "transformers_version": "4.35.0",
75
+ "use_absolute_embeddings": false,
76
+ "window_size": 7
77
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b121fdc3383bdaf42fab0046ef3cd2b1ed02716bc2e515b8b712b7f6090841e
3
+ size 347531616
preprocessor_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_rescale": true,
4
+ "do_resize": true,
5
+ "image_mean": [
6
+ 0.485,
7
+ 0.456,
8
+ 0.406
9
+ ],
10
+ "image_processor_type": "ViTImageProcessor",
11
+ "image_std": [
12
+ 0.229,
13
+ 0.224,
14
+ 0.225
15
+ ],
16
+ "resample": 3,
17
+ "rescale_factor": 0.00392156862745098,
18
+ "size": {
19
+ "height": 224,
20
+ "width": 224
21
+ }
22
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5ca702b59ed032851ccc2fa57d8ef38a9e73e34a95916e0c8c5d21a62c91976
3
+ size 4600