fayetitchenal commited on
Commit
c08686d
1 Parent(s): 153836a

Training in progress, step 1000, checkpoint

Browse files
checkpoint-1000/config.json ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "nvidia/mit-b5",
3
+ "architectures": [
4
+ "SegformerForSemanticSegmentation"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "classifier_dropout_prob": 0.1,
8
+ "decoder_hidden_size": 768,
9
+ "depths": [
10
+ 3,
11
+ 6,
12
+ 40,
13
+ 3
14
+ ],
15
+ "downsampling_rates": [
16
+ 1,
17
+ 4,
18
+ 8,
19
+ 16
20
+ ],
21
+ "drop_path_rate": 0.1,
22
+ "hidden_act": "gelu",
23
+ "hidden_dropout_prob": 0.0,
24
+ "hidden_sizes": [
25
+ 64,
26
+ 128,
27
+ 320,
28
+ 512
29
+ ],
30
+ "id2label": {
31
+ "0": "background",
32
+ "1": "double_plant",
33
+ "2": "drydown",
34
+ "3": "endrow",
35
+ "4": "nutrient_deficiency",
36
+ "5": "planter_skip",
37
+ "6": "water",
38
+ "7": "waterway",
39
+ "8": "weed_cluster"
40
+ },
41
+ "image_size": 224,
42
+ "initializer_range": 0.02,
43
+ "label2id": {
44
+ "background": 0,
45
+ "double_plant": 1,
46
+ "drydown": 2,
47
+ "endrow": 3,
48
+ "nutrient_deficiency": 4,
49
+ "planter_skip": 5,
50
+ "water": 6,
51
+ "waterway": 7,
52
+ "weed_cluster": 8
53
+ },
54
+ "layer_norm_eps": 1e-06,
55
+ "mlp_ratios": [
56
+ 4,
57
+ 4,
58
+ 4,
59
+ 4
60
+ ],
61
+ "model_type": "segformer",
62
+ "num_attention_heads": [
63
+ 1,
64
+ 2,
65
+ 5,
66
+ 8
67
+ ],
68
+ "num_channels": 4,
69
+ "num_encoder_blocks": 4,
70
+ "patch_sizes": [
71
+ 7,
72
+ 3,
73
+ 3,
74
+ 3
75
+ ],
76
+ "reshape_last_stage": true,
77
+ "semantic_loss_ignore_index": 255,
78
+ "sr_ratios": [
79
+ 8,
80
+ 4,
81
+ 2,
82
+ 1
83
+ ],
84
+ "strides": [
85
+ 4,
86
+ 2,
87
+ 2,
88
+ 2
89
+ ],
90
+ "torch_dtype": "float32",
91
+ "transformers_version": "4.40.2"
92
+ }
checkpoint-1000/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9465671e94063e34efaa3b09a485f92c02f1db5627ad7425fc2c02bfa2c39f8b
3
+ size 338562516
checkpoint-1000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:35164507a0fe2a0b48e19357da3000657d86d54bca8a99be8dbab3a73507a5cd
3
+ size 677518143
checkpoint-1000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5c0b128aa404a033d5d55942f7c499dacdfa5b9b4f53ecf392f269b11553450
3
+ size 14575
checkpoint-1000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41b78c57b67aa6106855254359fc42bcbe7643b7a59a04559e65fedf65f2fbf5
3
+ size 563
checkpoint-1000/trainer_state.json ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.16666666666666666,
5
+ "eval_steps": 500,
6
+ "global_step": 1000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.08333333333333333,
13
+ "grad_norm": 10.64107608795166,
14
+ "learning_rate": 2.4999999999999998e-06,
15
+ "loss": 4.1627,
16
+ "step": 500
17
+ },
18
+ {
19
+ "epoch": 0.08333333333333333,
20
+ "eval_accuracy_background": 0.014990974836900402,
21
+ "eval_accuracy_double_plant": 0.09625850817128659,
22
+ "eval_accuracy_drydown": 0.11748866381009407,
23
+ "eval_accuracy_endrow": 0.019624906998737878,
24
+ "eval_accuracy_nutrient_deficiency": 0.39043216558572647,
25
+ "eval_accuracy_planter_skip": 0.03244089779472675,
26
+ "eval_accuracy_water": 0.4902746667077927,
27
+ "eval_accuracy_waterway": 0.37818186421979577,
28
+ "eval_accuracy_weed_cluster": 0.5157577782392276,
29
+ "eval_iou_background": 0.01485215844086368,
30
+ "eval_iou_double_plant": 0.025830652956995166,
31
+ "eval_iou_drydown": 0.09098524126821965,
32
+ "eval_iou_endrow": 0.011190399377301194,
33
+ "eval_iou_nutrient_deficiency": 0.16172838363740288,
34
+ "eval_iou_planter_skip": 0.007145844854739052,
35
+ "eval_iou_water": 0.052629358314533047,
36
+ "eval_iou_waterway": 0.008009602169192399,
37
+ "eval_iou_weed_cluster": 0.055193229922437775,
38
+ "eval_loss": 3.9555599689483643,
39
+ "eval_mean_accuracy": 0.22838338070714315,
40
+ "eval_mean_iou": 0.047507207882409426,
41
+ "eval_overall_accuracy": 0.09656532361323403,
42
+ "eval_runtime": 348.036,
43
+ "eval_samples_per_second": 8.166,
44
+ "eval_steps_per_second": 2.724,
45
+ "step": 500
46
+ },
47
+ {
48
+ "epoch": 0.16666666666666666,
49
+ "grad_norm": 12.677851676940918,
50
+ "learning_rate": 4.9999999999999996e-06,
51
+ "loss": 3.7698,
52
+ "step": 1000
53
+ },
54
+ {
55
+ "epoch": 0.16666666666666666,
56
+ "eval_accuracy_background": 0.29766305371935275,
57
+ "eval_accuracy_double_plant": 0.04300475016987658,
58
+ "eval_accuracy_drydown": 0.7567223885695242,
59
+ "eval_accuracy_endrow": 0.006538411852764754,
60
+ "eval_accuracy_nutrient_deficiency": 0.5373960102281351,
61
+ "eval_accuracy_planter_skip": 0.1493345947032268,
62
+ "eval_accuracy_water": 0.2900425128140623,
63
+ "eval_accuracy_waterway": 0.37062750835043584,
64
+ "eval_accuracy_weed_cluster": 0.242435460451953,
65
+ "eval_iou_background": 0.2681855463534359,
66
+ "eval_iou_double_plant": 0.02937987848319999,
67
+ "eval_iou_drydown": 0.30192234804386486,
68
+ "eval_iou_endrow": 0.006310524335748097,
69
+ "eval_iou_nutrient_deficiency": 0.19394253102070813,
70
+ "eval_iou_planter_skip": 0.08231306246610917,
71
+ "eval_iou_water": 0.10160883330705332,
72
+ "eval_iou_waterway": 0.050415164283578985,
73
+ "eval_iou_weed_cluster": 0.07122746911823011,
74
+ "eval_loss": 3.33693528175354,
75
+ "eval_mean_accuracy": 0.29930718787325905,
76
+ "eval_mean_iou": 0.12281170637910319,
77
+ "eval_overall_accuracy": 0.37769782397579943,
78
+ "eval_runtime": 260.2627,
79
+ "eval_samples_per_second": 10.92,
80
+ "eval_steps_per_second": 3.642,
81
+ "step": 1000
82
+ }
83
+ ],
84
+ "logging_steps": 500,
85
+ "max_steps": 120000,
86
+ "num_input_tokens_seen": 0,
87
+ "num_train_epochs": 20,
88
+ "save_steps": 500,
89
+ "total_flos": 1.596835724525568e+18,
90
+ "train_batch_size": 3,
91
+ "trial_name": null,
92
+ "trial_params": null
93
+ }
checkpoint-1000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a680a40140b6ca5fa47ac87d8120967d6766117ead261eaa84c073e1e9509b8
3
+ size 4539