al-css commited on
Commit
4c4df3d
·
verified ·
1 Parent(s): 4d4c645

End of training

Browse files
Files changed (5) hide show
  1. README.md +2 -1
  2. all_results.json +13 -0
  3. eval_results.json +8 -0
  4. train_results.json +8 -0
  5. trainer_state.json +51 -0
README.md CHANGED
@@ -3,6 +3,7 @@ library_name: transformers
3
  license: apache-2.0
4
  base_model: google/vit-base-patch16-224
5
  tags:
 
6
  - generated_from_trainer
7
  metrics:
8
  - accuracy
@@ -16,7 +17,7 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # Screenshots_detection_to_classification
18
 
19
- This model is a fine-tuned version of [google/vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224) on the None dataset.
20
  It achieves the following results on the evaluation set:
21
  - Loss: 0.1430
22
  - Accuracy: 0.9756
 
3
  license: apache-2.0
4
  base_model: google/vit-base-patch16-224
5
  tags:
6
+ - image-classification, screenshots detection
7
  - generated_from_trainer
8
  metrics:
9
  - accuracy
 
17
 
18
  # Screenshots_detection_to_classification
19
 
20
+ This model is a fine-tuned version of [google/vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224) on the private_images_dataset dataset.
21
  It achieves the following results on the evaluation set:
22
  - Loss: 0.1430
23
  - Accuracy: 0.9756
all_results.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 4.0,
3
+ "eval_accuracy": 0.975609756097561,
4
+ "eval_loss": 0.14302679896354675,
5
+ "eval_runtime": 7.6666,
6
+ "eval_samples_per_second": 10.696,
7
+ "eval_steps_per_second": 1.435,
8
+ "total_flos": 1.0042961854051123e+17,
9
+ "train_loss": 0.0985631244938548,
10
+ "train_runtime": 148.8516,
11
+ "train_samples_per_second": 8.707,
12
+ "train_steps_per_second": 1.102
13
+ }
eval_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 4.0,
3
+ "eval_accuracy": 0.975609756097561,
4
+ "eval_loss": 0.14302679896354675,
5
+ "eval_runtime": 7.6666,
6
+ "eval_samples_per_second": 10.696,
7
+ "eval_steps_per_second": 1.435
8
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 4.0,
3
+ "total_flos": 1.0042961854051123e+17,
4
+ "train_loss": 0.0985631244938548,
5
+ "train_runtime": 148.8516,
6
+ "train_samples_per_second": 8.707,
7
+ "train_steps_per_second": 1.102
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 4.0,
5
+ "eval_steps": 500,
6
+ "global_step": 164,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 4.0,
13
+ "step": 164,
14
+ "total_flos": 1.0042961854051123e+17,
15
+ "train_loss": 0.0985631244938548,
16
+ "train_runtime": 148.8516,
17
+ "train_samples_per_second": 8.707,
18
+ "train_steps_per_second": 1.102
19
+ },
20
+ {
21
+ "epoch": 4.0,
22
+ "eval_accuracy": 0.975609756097561,
23
+ "eval_loss": 0.14302679896354675,
24
+ "eval_runtime": 7.6666,
25
+ "eval_samples_per_second": 10.696,
26
+ "eval_steps_per_second": 1.435,
27
+ "step": 164
28
+ }
29
+ ],
30
+ "logging_steps": 500,
31
+ "max_steps": 164,
32
+ "num_input_tokens_seen": 0,
33
+ "num_train_epochs": 4,
34
+ "save_steps": 500,
35
+ "stateful_callbacks": {
36
+ "TrainerControl": {
37
+ "args": {
38
+ "should_epoch_stop": false,
39
+ "should_evaluate": false,
40
+ "should_log": false,
41
+ "should_save": true,
42
+ "should_training_stop": true
43
+ },
44
+ "attributes": {}
45
+ }
46
+ },
47
+ "total_flos": 1.0042961854051123e+17,
48
+ "train_batch_size": 8,
49
+ "trial_name": null,
50
+ "trial_params": null
51
+ }