al-css commited on
Commit
638841c
·
verified ·
1 Parent(s): 3426e55

End of training

Browse files
Files changed (5) hide show
  1. README.md +2 -1
  2. all_results.json +10 -10
  3. eval_results.json +5 -5
  4. train_results.json +5 -5
  5. trainer_state.json +15 -15
README.md CHANGED
@@ -3,6 +3,7 @@ library_name: transformers
3
  license: apache-2.0
4
  base_model: google/vit-base-patch16-224
5
  tags:
 
6
  - generated_from_trainer
7
  metrics:
8
  - accuracy
@@ -16,7 +17,7 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # Screenshots_detection_to_classification
18
 
19
- This model is a fine-tuned version of [google/vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224) on the None dataset.
20
  It achieves the following results on the evaluation set:
21
  - Loss: 0.2538
22
  - Accuracy: 0.9524
 
3
  license: apache-2.0
4
  base_model: google/vit-base-patch16-224
5
  tags:
6
+ - image-classification, screenshots detection
7
  - generated_from_trainer
8
  metrics:
9
  - accuracy
 
17
 
18
  # Screenshots_detection_to_classification
19
 
20
+ This model is a fine-tuned version of [google/vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224) on the private_images_dataset dataset.
21
  It achieves the following results on the evaluation set:
22
  - Loss: 0.2538
23
  - Accuracy: 0.9524
all_results.json CHANGED
@@ -1,13 +1,13 @@
1
  {
2
  "epoch": 4.0,
3
- "eval_accuracy": 0.975609756097561,
4
- "eval_loss": 0.14302679896354675,
5
- "eval_runtime": 7.6666,
6
- "eval_samples_per_second": 10.696,
7
- "eval_steps_per_second": 1.435,
8
- "total_flos": 1.0042961854051123e+17,
9
- "train_loss": 0.0985631244938548,
10
- "train_runtime": 148.8516,
11
- "train_samples_per_second": 8.707,
12
- "train_steps_per_second": 1.102
13
  }
 
1
  {
2
  "epoch": 4.0,
3
+ "eval_accuracy": 0.9523809523809523,
4
+ "eval_loss": 0.25383907556533813,
5
+ "eval_runtime": 8.444,
6
+ "eval_samples_per_second": 9.948,
7
+ "eval_steps_per_second": 1.303,
8
+ "total_flos": 1.0352929812509491e+17,
9
+ "train_loss": 0.13229456402006604,
10
+ "train_runtime": 163.4534,
11
+ "train_samples_per_second": 8.174,
12
+ "train_steps_per_second": 1.028
13
  }
eval_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 4.0,
3
- "eval_accuracy": 0.975609756097561,
4
- "eval_loss": 0.14302679896354675,
5
- "eval_runtime": 7.6666,
6
- "eval_samples_per_second": 10.696,
7
- "eval_steps_per_second": 1.435
8
  }
 
1
  {
2
  "epoch": 4.0,
3
+ "eval_accuracy": 0.9523809523809523,
4
+ "eval_loss": 0.25383907556533813,
5
+ "eval_runtime": 8.444,
6
+ "eval_samples_per_second": 9.948,
7
+ "eval_steps_per_second": 1.303
8
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 4.0,
3
- "total_flos": 1.0042961854051123e+17,
4
- "train_loss": 0.0985631244938548,
5
- "train_runtime": 148.8516,
6
- "train_samples_per_second": 8.707,
7
- "train_steps_per_second": 1.102
8
  }
 
1
  {
2
  "epoch": 4.0,
3
+ "total_flos": 1.0352929812509491e+17,
4
+ "train_loss": 0.13229456402006604,
5
+ "train_runtime": 163.4534,
6
+ "train_samples_per_second": 8.174,
7
+ "train_steps_per_second": 1.028
8
  }
trainer_state.json CHANGED
@@ -3,32 +3,32 @@
3
  "best_model_checkpoint": null,
4
  "epoch": 4.0,
5
  "eval_steps": 500,
6
- "global_step": 164,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 4.0,
13
- "step": 164,
14
- "total_flos": 1.0042961854051123e+17,
15
- "train_loss": 0.0985631244938548,
16
- "train_runtime": 148.8516,
17
- "train_samples_per_second": 8.707,
18
- "train_steps_per_second": 1.102
19
  },
20
  {
21
  "epoch": 4.0,
22
- "eval_accuracy": 0.975609756097561,
23
- "eval_loss": 0.14302679896354675,
24
- "eval_runtime": 7.6666,
25
- "eval_samples_per_second": 10.696,
26
- "eval_steps_per_second": 1.435,
27
- "step": 164
28
  }
29
  ],
30
  "logging_steps": 500,
31
- "max_steps": 164,
32
  "num_input_tokens_seen": 0,
33
  "num_train_epochs": 4,
34
  "save_steps": 500,
@@ -44,7 +44,7 @@
44
  "attributes": {}
45
  }
46
  },
47
- "total_flos": 1.0042961854051123e+17,
48
  "train_batch_size": 8,
49
  "trial_name": null,
50
  "trial_params": null
 
3
  "best_model_checkpoint": null,
4
  "epoch": 4.0,
5
  "eval_steps": 500,
6
+ "global_step": 168,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 4.0,
13
+ "step": 168,
14
+ "total_flos": 1.0352929812509491e+17,
15
+ "train_loss": 0.13229456402006604,
16
+ "train_runtime": 163.4534,
17
+ "train_samples_per_second": 8.174,
18
+ "train_steps_per_second": 1.028
19
  },
20
  {
21
  "epoch": 4.0,
22
+ "eval_accuracy": 0.9523809523809523,
23
+ "eval_loss": 0.25383907556533813,
24
+ "eval_runtime": 8.444,
25
+ "eval_samples_per_second": 9.948,
26
+ "eval_steps_per_second": 1.303,
27
+ "step": 168
28
  }
29
  ],
30
  "logging_steps": 500,
31
+ "max_steps": 168,
32
  "num_input_tokens_seen": 0,
33
  "num_train_epochs": 4,
34
  "save_steps": 500,
 
44
  "attributes": {}
45
  }
46
  },
47
+ "total_flos": 1.0352929812509491e+17,
48
  "train_batch_size": 8,
49
  "trial_name": null,
50
  "trial_params": null