cyanjing commited on
Commit
667f701
1 Parent(s): 02c6072

End of training

Browse files
README.md CHANGED
@@ -3,6 +3,8 @@ library_name: transformers
3
  license: apache-2.0
4
  base_model: google/vit-base-patch16-224-in21k
5
  tags:
 
 
6
  - generated_from_trainer
7
  metrics:
8
  - accuracy
@@ -16,10 +18,10 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # vit-base-beans
18
 
19
- This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
- - Accuracy: 0.9850
22
- - Loss: 0.0830
23
 
24
  ## Model description
25
 
 
3
  license: apache-2.0
4
  base_model: google/vit-base-patch16-224-in21k
5
  tags:
6
+ - image-classification
7
+ - vision
8
  - generated_from_trainer
9
  metrics:
10
  - accuracy
 
18
 
19
  # vit-base-beans
20
 
21
+ This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the beans dataset.
22
  It achieves the following results on the evaluation set:
23
+ - Loss: 0.0628
24
+ - Accuracy: 0.9925
25
 
26
  ## Model description
27
 
all_results.json CHANGED
@@ -2,12 +2,12 @@
2
  "epoch": 5.0,
3
  "eval_accuracy": 0.9924812030075187,
4
  "eval_loss": 0.06277719885110855,
5
- "eval_runtime": 13.0497,
6
- "eval_samples_per_second": 10.192,
7
- "eval_steps_per_second": 1.303,
8
  "total_flos": 4.006371770595533e+17,
9
- "train_loss": 0.21926501512527466,
10
- "train_runtime": 1727.4415,
11
- "train_samples_per_second": 2.993,
12
- "train_steps_per_second": 0.376
13
  }
 
2
  "epoch": 5.0,
3
  "eval_accuracy": 0.9924812030075187,
4
  "eval_loss": 0.06277719885110855,
5
+ "eval_runtime": 6.6631,
6
+ "eval_samples_per_second": 19.961,
7
+ "eval_steps_per_second": 2.551,
8
  "total_flos": 4.006371770595533e+17,
9
+ "train_loss": 0.0,
10
+ "train_runtime": 0.0789,
11
+ "train_samples_per_second": 65533.623,
12
+ "train_steps_per_second": 8239.237
13
  }
config.json CHANGED
@@ -28,6 +28,7 @@
28
  "num_channels": 3,
29
  "num_hidden_layers": 12,
30
  "patch_size": 16,
 
31
  "qkv_bias": true,
32
  "torch_dtype": "float32",
33
  "transformers_version": "4.46.0.dev0"
 
28
  "num_channels": 3,
29
  "num_hidden_layers": 12,
30
  "patch_size": 16,
31
+ "problem_type": "single_label_classification",
32
  "qkv_bias": true,
33
  "torch_dtype": "float32",
34
  "transformers_version": "4.46.0.dev0"
eval_results.json CHANGED
@@ -2,7 +2,7 @@
2
  "epoch": 5.0,
3
  "eval_accuracy": 0.9924812030075187,
4
  "eval_loss": 0.06277719885110855,
5
- "eval_runtime": 13.0497,
6
- "eval_samples_per_second": 10.192,
7
- "eval_steps_per_second": 1.303
8
  }
 
2
  "epoch": 5.0,
3
  "eval_accuracy": 0.9924812030075187,
4
  "eval_loss": 0.06277719885110855,
5
+ "eval_runtime": 6.6631,
6
+ "eval_samples_per_second": 19.961,
7
+ "eval_steps_per_second": 2.551
8
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 5.0,
3
  "total_flos": 4.006371770595533e+17,
4
- "train_loss": 0.21926501512527466,
5
- "train_runtime": 1727.4415,
6
- "train_samples_per_second": 2.993,
7
- "train_steps_per_second": 0.376
8
  }
 
1
  {
2
  "epoch": 5.0,
3
  "total_flos": 4.006371770595533e+17,
4
+ "train_loss": 0.0,
5
+ "train_runtime": 0.0789,
6
+ "train_samples_per_second": 65533.623,
7
+ "train_steps_per_second": 8239.237
8
  }
trainer_state.json CHANGED
@@ -512,10 +512,10 @@
512
  "epoch": 5.0,
513
  "step": 650,
514
  "total_flos": 4.006371770595533e+17,
515
- "train_loss": 0.21926501512527466,
516
- "train_runtime": 1727.4415,
517
- "train_samples_per_second": 2.993,
518
- "train_steps_per_second": 0.376
519
  }
520
  ],
521
  "logging_steps": 10,
 
512
  "epoch": 5.0,
513
  "step": 650,
514
  "total_flos": 4.006371770595533e+17,
515
+ "train_loss": 0.0,
516
+ "train_runtime": 0.0789,
517
+ "train_samples_per_second": 65533.623,
518
+ "train_steps_per_second": 8239.237
519
  }
520
  ],
521
  "logging_steps": 10,