al-css commited on
Commit
75c04f7
·
verified ·
1 Parent(s): 616e636

End of training

Browse files
Files changed (5) hide show
  1. README.md +2 -1
  2. all_results.json +10 -10
  3. eval_results.json +5 -5
  4. train_results.json +5 -5
  5. trainer_state.json +11 -11
README.md CHANGED
@@ -3,6 +3,7 @@ library_name: transformers
3
  license: apache-2.0
4
  base_model: google/vit-base-patch16-224
5
  tags:
 
6
  - generated_from_trainer
7
  metrics:
8
  - accuracy
@@ -16,7 +17,7 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # Screenshots_detection_to_classification
18
 
19
- This model is a fine-tuned version of [google/vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224) on the None dataset.
20
  It achieves the following results on the evaluation set:
21
  - Loss: 0.1192
22
  - Accuracy: 0.9881
 
3
  license: apache-2.0
4
  base_model: google/vit-base-patch16-224
5
  tags:
6
+ - image-classification, screenshots detection
7
  - generated_from_trainer
8
  metrics:
9
  - accuracy
 
17
 
18
  # Screenshots_detection_to_classification
19
 
20
+ This model is a fine-tuned version of [google/vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224) on the private_images_dataset dataset.
21
  It achieves the following results on the evaluation set:
22
  - Loss: 0.1192
23
  - Accuracy: 0.9881
all_results.json CHANGED
@@ -1,13 +1,13 @@
1
  {
2
  "epoch": 4.0,
3
- "eval_accuracy": 0.9523809523809523,
4
- "eval_loss": 0.25383907556533813,
5
- "eval_runtime": 8.444,
6
- "eval_samples_per_second": 9.948,
7
- "eval_steps_per_second": 1.303,
8
- "total_flos": 1.0352929812509491e+17,
9
- "train_loss": 0.13229456402006604,
10
- "train_runtime": 163.4534,
11
- "train_samples_per_second": 8.174,
12
- "train_steps_per_second": 1.028
13
  }
 
1
  {
2
  "epoch": 4.0,
3
+ "eval_accuracy": 0.9880952380952381,
4
+ "eval_loss": 0.11922617256641388,
5
+ "eval_runtime": 9.0858,
6
+ "eval_samples_per_second": 9.245,
7
+ "eval_steps_per_second": 1.211,
8
+ "total_flos": 1.0290936220817818e+17,
9
+ "train_loss": 0.11647429920378186,
10
+ "train_runtime": 188.7378,
11
+ "train_samples_per_second": 7.036,
12
+ "train_steps_per_second": 0.89
13
  }
eval_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 4.0,
3
- "eval_accuracy": 0.9523809523809523,
4
- "eval_loss": 0.25383907556533813,
5
- "eval_runtime": 8.444,
6
- "eval_samples_per_second": 9.948,
7
- "eval_steps_per_second": 1.303
8
  }
 
1
  {
2
  "epoch": 4.0,
3
+ "eval_accuracy": 0.9880952380952381,
4
+ "eval_loss": 0.11922617256641388,
5
+ "eval_runtime": 9.0858,
6
+ "eval_samples_per_second": 9.245,
7
+ "eval_steps_per_second": 1.211
8
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 4.0,
3
- "total_flos": 1.0352929812509491e+17,
4
- "train_loss": 0.13229456402006604,
5
- "train_runtime": 163.4534,
6
- "train_samples_per_second": 8.174,
7
- "train_steps_per_second": 1.028
8
  }
 
1
  {
2
  "epoch": 4.0,
3
+ "total_flos": 1.0290936220817818e+17,
4
+ "train_loss": 0.11647429920378186,
5
+ "train_runtime": 188.7378,
6
+ "train_samples_per_second": 7.036,
7
+ "train_steps_per_second": 0.89
8
  }
trainer_state.json CHANGED
@@ -11,19 +11,19 @@
11
  {
12
  "epoch": 4.0,
13
  "step": 168,
14
- "total_flos": 1.0352929812509491e+17,
15
- "train_loss": 0.13229456402006604,
16
- "train_runtime": 163.4534,
17
- "train_samples_per_second": 8.174,
18
- "train_steps_per_second": 1.028
19
  },
20
  {
21
  "epoch": 4.0,
22
- "eval_accuracy": 0.9523809523809523,
23
- "eval_loss": 0.25383907556533813,
24
- "eval_runtime": 8.444,
25
- "eval_samples_per_second": 9.948,
26
- "eval_steps_per_second": 1.303,
27
  "step": 168
28
  }
29
  ],
@@ -44,7 +44,7 @@
44
  "attributes": {}
45
  }
46
  },
47
- "total_flos": 1.0352929812509491e+17,
48
  "train_batch_size": 8,
49
  "trial_name": null,
50
  "trial_params": null
 
11
  {
12
  "epoch": 4.0,
13
  "step": 168,
14
+ "total_flos": 1.0290936220817818e+17,
15
+ "train_loss": 0.11647429920378186,
16
+ "train_runtime": 188.7378,
17
+ "train_samples_per_second": 7.036,
18
+ "train_steps_per_second": 0.89
19
  },
20
  {
21
  "epoch": 4.0,
22
+ "eval_accuracy": 0.9880952380952381,
23
+ "eval_loss": 0.11922617256641388,
24
+ "eval_runtime": 9.0858,
25
+ "eval_samples_per_second": 9.245,
26
+ "eval_steps_per_second": 1.211,
27
  "step": 168
28
  }
29
  ],
 
44
  "attributes": {}
45
  }
46
  },
47
+ "total_flos": 1.0290936220817818e+17,
48
  "train_batch_size": 8,
49
  "trial_name": null,
50
  "trial_params": null