groderg commited on
Commit
a0f16d5
1 Parent(s): 28887f8

Evaluation on the test set completed on 2024_10_25.

Browse files
README.md ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: facebook/dinov2-large
4
+ tags:
5
+ - generated_from_trainer
6
+ metrics:
7
+ - accuracy
8
+ model-index:
9
+ - name: DinoVdeau-large-2024_10_25-prova_batch-size8_freeze_monolabel
10
+ results: []
11
+ ---
12
+
13
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
+ should probably proofread and complete it, then remove this comment. -->
15
+
16
+ # DinoVdeau-large-2024_10_25-prova_batch-size8_freeze_monolabel
17
+
18
+ This model is a fine-tuned version of [facebook/dinov2-large](https://huggingface.co/facebook/dinov2-large) on the None dataset.
19
+ It achieves the following results on the evaluation set:
20
+ - Loss: 2.8116
21
+ - F1 Micro: 0.5
22
+ - F1 Macro: 0.2126
23
+ - Accuracy: 0.5
24
+ - Learning Rate: 0.001
25
+
26
+ ## Model description
27
+
28
+ More information needed
29
+
30
+ ## Intended uses & limitations
31
+
32
+ More information needed
33
+
34
+ ## Training and evaluation data
35
+
36
+ More information needed
37
+
38
+ ## Training procedure
39
+
40
+ ### Training hyperparameters
41
+
42
+ The following hyperparameters were used during training:
43
+ - learning_rate: 0.001
44
+ - train_batch_size: 8
45
+ - eval_batch_size: 8
46
+ - seed: 42
47
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
48
+ - lr_scheduler_type: linear
49
+ - num_epochs: 1
50
+ - mixed_precision_training: Native AMP
51
+
52
+ ### Training results
53
+
54
+ | Training Loss | Epoch | Step | Validation Loss | F1 Micro | F1 Macro | Accuracy | Rate |
55
+ |:-------------:|:-----:|:----:|:---------------:|:--------:|:--------:|:--------:|:-----:|
56
+ | No log | 1.0 | 7 | 2.7061 | 0.5 | 0.2790 | 0.5 | 0.001 |
57
+
58
+
59
+ ### Framework versions
60
+
61
+ - Transformers 4.41.0
62
+ - Pytorch 2.5.0+cu124
63
+ - Datasets 3.0.2
64
+ - Tokenizers 0.19.1
all_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "eval_accuracy": 0.5,
4
+ "eval_f1_macro": 0.21258234519104083,
5
+ "eval_f1_micro": 0.5,
6
+ "eval_loss": 2.811582088470459,
7
+ "eval_runtime": 6.5102,
8
+ "eval_samples_per_second": 7.68,
9
+ "eval_steps_per_second": 1.075,
10
+ "learning_rate": 0.001,
11
+ "total_flos": 7.41394877560092e+16,
12
+ "train_loss": 3.289393833705357,
13
+ "train_runtime": 16.2028,
14
+ "train_samples_per_second": 3.086,
15
+ "train_steps_per_second": 0.432
16
+ }
config.json ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/dinov2-large",
3
+ "apply_layernorm": true,
4
+ "architectures": [
5
+ "Dinov2ForImageClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.0,
8
+ "drop_path_rate": 0.0,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.0,
11
+ "hidden_size": 1024,
12
+ "id2label": {
13
+ "0": "ALGAE",
14
+ "1": "Acr",
15
+ "2": "Acr_Br",
16
+ "3": "Anem",
17
+ "4": "CCA",
18
+ "5": "Ech",
19
+ "6": "Fts",
20
+ "7": "Gal",
21
+ "8": "Gon",
22
+ "9": "Mtp",
23
+ "10": "P",
24
+ "11": "Poc",
25
+ "12": "Por",
26
+ "13": "R",
27
+ "14": "RDC",
28
+ "15": "S",
29
+ "16": "SG",
30
+ "17": "Sarg",
31
+ "18": "Ser",
32
+ "19": "Slt",
33
+ "20": "Sp",
34
+ "21": "Turf",
35
+ "22": "UNK"
36
+ },
37
+ "image_size": 518,
38
+ "initializer_range": 0.02,
39
+ "label2id": {
40
+ "ALGAE": 0,
41
+ "Acr": 1,
42
+ "Acr_Br": 2,
43
+ "Anem": 3,
44
+ "CCA": 4,
45
+ "Ech": 5,
46
+ "Fts": 6,
47
+ "Gal": 7,
48
+ "Gon": 8,
49
+ "Mtp": 9,
50
+ "P": 10,
51
+ "Poc": 11,
52
+ "Por": 12,
53
+ "R": 13,
54
+ "RDC": 14,
55
+ "S": 15,
56
+ "SG": 16,
57
+ "Sarg": 17,
58
+ "Ser": 18,
59
+ "Slt": 19,
60
+ "Sp": 20,
61
+ "Turf": 21,
62
+ "UNK": 22
63
+ },
64
+ "layer_norm_eps": 1e-06,
65
+ "layerscale_value": 1.0,
66
+ "mlp_ratio": 4,
67
+ "model_type": "dinov2",
68
+ "num_attention_heads": 16,
69
+ "num_channels": 3,
70
+ "num_hidden_layers": 24,
71
+ "out_features": [
72
+ "stage24"
73
+ ],
74
+ "out_indices": [
75
+ 24
76
+ ],
77
+ "patch_size": 14,
78
+ "problem_type": "single_label_classification",
79
+ "qkv_bias": true,
80
+ "reshape_hidden_states": true,
81
+ "stage_names": [
82
+ "stem",
83
+ "stage1",
84
+ "stage2",
85
+ "stage3",
86
+ "stage4",
87
+ "stage5",
88
+ "stage6",
89
+ "stage7",
90
+ "stage8",
91
+ "stage9",
92
+ "stage10",
93
+ "stage11",
94
+ "stage12",
95
+ "stage13",
96
+ "stage14",
97
+ "stage15",
98
+ "stage16",
99
+ "stage17",
100
+ "stage18",
101
+ "stage19",
102
+ "stage20",
103
+ "stage21",
104
+ "stage22",
105
+ "stage23",
106
+ "stage24"
107
+ ],
108
+ "torch_dtype": "float32",
109
+ "transformers_version": "4.41.0",
110
+ "use_swiglu_ffn": false
111
+ }
logs/events.out.tfevents.1729856222.bioeos-Precision-7770 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd991d46260b11f3f8c128cc827e97633f8a85b965d7266a8e80b77a0abebddf
3
+ size 7171
logs/events.out.tfevents.1729856660.bioeos-Precision-7770 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91441d013a5e6ae109406f394d5f9ed39fc99336a95466d77e7506e4c40701fa
3
+ size 7171
logs/events.out.tfevents.1729857208.bioeos-Precision-7770 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9674bb12b823f311c2195946767fa03445dc6ced1ee96ec852242708c715a382
3
+ size 40
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9dcc64b5ee1c42955a3225cdec05b3efb1da6e46068ccf51f2d903314641f651
3
+ size 1228091092
preprocessor_config.json ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_valid_processor_keys": [
3
+ "images",
4
+ "do_resize",
5
+ "size",
6
+ "resample",
7
+ "do_center_crop",
8
+ "crop_size",
9
+ "do_rescale",
10
+ "rescale_factor",
11
+ "do_normalize",
12
+ "image_mean",
13
+ "image_std",
14
+ "do_convert_rgb",
15
+ "return_tensors",
16
+ "data_format",
17
+ "input_data_format"
18
+ ],
19
+ "crop_size": {
20
+ "height": 224,
21
+ "width": 224
22
+ },
23
+ "do_center_crop": false,
24
+ "do_convert_rgb": true,
25
+ "do_normalize": true,
26
+ "do_rescale": true,
27
+ "do_resize": true,
28
+ "image_mean": [
29
+ 0.485,
30
+ 0.456,
31
+ 0.406
32
+ ],
33
+ "image_processor_type": "BitImageProcessor",
34
+ "image_std": [
35
+ 0.229,
36
+ 0.224,
37
+ 0.225
38
+ ],
39
+ "resample": 3,
40
+ "rescale_factor": 0.00392156862745098,
41
+ "size": {
42
+ "height": 518,
43
+ "width": 518
44
+ }
45
+ }
runs/events.out.tfevents.1729855678.bioeos-Precision-7770 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04fb570cba4a9e0e0102bf302da0da5b36dd4f479bd4cb9b9b5779072b9d6bab
3
+ size 40
runs/events.out.tfevents.1729856650.bioeos-Precision-7770 ADDED
File without changes
test_results.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "eval_accuracy": 0.5,
4
+ "eval_f1_macro": 0.21258234519104083,
5
+ "eval_f1_micro": 0.5,
6
+ "eval_loss": 2.811582088470459,
7
+ "eval_runtime": 6.5102,
8
+ "eval_samples_per_second": 7.68,
9
+ "eval_steps_per_second": 1.075,
10
+ "learning_rate": 0.001
11
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "learning_rate": 0.001,
4
+ "total_flos": 7.41394877560092e+16,
5
+ "train_loss": 3.289393833705357,
6
+ "train_runtime": 16.2028,
7
+ "train_samples_per_second": 3.086,
8
+ "train_steps_per_second": 0.432
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 2.7060546875,
3
+ "best_model_checkpoint": "/home/bioeos/Documents/project_hub/DinoVdeau/models/DinoVdeau-large-2024_10_25-prova_batch-size8_freeze_monolabel/checkpoint-7",
4
+ "epoch": 1.0,
5
+ "eval_steps": 500,
6
+ "global_step": 7,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "eval_accuracy": 0.5,
14
+ "eval_f1_macro": 0.2789772727272727,
15
+ "eval_f1_micro": 0.5,
16
+ "eval_loss": 2.7060546875,
17
+ "eval_runtime": 6.5056,
18
+ "eval_samples_per_second": 7.686,
19
+ "eval_steps_per_second": 1.076,
20
+ "learning_rate": 0.001,
21
+ "step": 7
22
+ },
23
+ {
24
+ "epoch": 1.0,
25
+ "learning_rate": 0.001,
26
+ "step": 7,
27
+ "total_flos": 7.41394877560092e+16,
28
+ "train_loss": 3.289393833705357,
29
+ "train_runtime": 16.2028,
30
+ "train_samples_per_second": 3.086,
31
+ "train_steps_per_second": 0.432
32
+ }
33
+ ],
34
+ "logging_steps": 500,
35
+ "max_steps": 7,
36
+ "num_input_tokens_seen": 0,
37
+ "num_train_epochs": 1,
38
+ "save_steps": 500,
39
+ "stateful_callbacks": {
40
+ "EarlyStoppingCallback": {
41
+ "args": {
42
+ "early_stopping_patience": 10,
43
+ "early_stopping_threshold": 0.0
44
+ },
45
+ "attributes": {
46
+ "early_stopping_patience_counter": 0
47
+ }
48
+ },
49
+ "TrainerControl": {
50
+ "args": {
51
+ "should_epoch_stop": false,
52
+ "should_evaluate": false,
53
+ "should_log": false,
54
+ "should_save": true,
55
+ "should_training_stop": true
56
+ },
57
+ "attributes": {}
58
+ }
59
+ },
60
+ "total_flos": 7.41394877560092e+16,
61
+ "train_batch_size": 8,
62
+ "trial_name": null,
63
+ "trial_params": null
64
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bae2aa06aa6821d3aa45c5b980a80016ca182dc97ab306c3e0172de93fba6c31
3
+ size 5304
transforms.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train_transforms": [
3
+ {
4
+ "operation": "PreProcess"
5
+ },
6
+ {
7
+ "operation": "Resize",
8
+ "probability": 1.0
9
+ },
10
+ {
11
+ "operation": "RandomHorizontalFlip",
12
+ "probability": 0.25
13
+ },
14
+ {
15
+ "operation": "RandomVerticalFlip",
16
+ "probability": 0.25
17
+ },
18
+ {
19
+ "operation": "ColorJiggle",
20
+ "probability": 0.25
21
+ },
22
+ {
23
+ "operation": "RandomPerspective",
24
+ "probability": 0.25
25
+ },
26
+ {
27
+ "operation": "Normalize",
28
+ "probability": 1.0
29
+ }
30
+ ],
31
+ "val_transforms": [
32
+ {
33
+ "operation": "PreProcess"
34
+ },
35
+ {
36
+ "operation": "Resize",
37
+ "probability": 1.0
38
+ },
39
+ {
40
+ "operation": "Normalize",
41
+ "probability": 1.0
42
+ }
43
+ ]
44
+ }