File size: 5,862 Bytes
5b2fcab
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
from clearml import Task

# Connecting ClearML with the current process,
# from here on everything is logged automatically
from clearml.automation import HyperParameterOptimizer, UniformParameterRange
from clearml.automation.optuna import OptimizerOptuna

task = Task.init(
    project_name="Hyper-Parameter Optimization",
    task_name="YOLOv5",
    task_type=Task.TaskTypes.optimizer,
    reuse_last_task_id=False,
)

# Example use case:
optimizer = HyperParameterOptimizer(
    # This is the experiment we want to optimize
    base_task_id="<your_template_task_id>",
    # here we define the hyper-parameters to optimize
    # Notice: The parameter name should exactly match what you see in the UI: <section_name>/<parameter>
    # For Example, here we see in the base experiment a section Named: "General"
    # under it a parameter named "batch_size", this becomes "General/batch_size"
    # If you have `argparse` for example, then arguments will appear under the "Args" section,
    # and you should instead pass "Args/batch_size"
    hyper_parameters=[
        UniformParameterRange(
            "Hyperparameters/lr0", min_value=1e-5, max_value=1e-1
        ),
        UniformParameterRange(
            "Hyperparameters/lrf", min_value=0.01, max_value=1.0
        ),
        UniformParameterRange(
            "Hyperparameters/momentum", min_value=0.6, max_value=0.98
        ),
        UniformParameterRange(
            "Hyperparameters/weight_decay", min_value=0.0, max_value=0.001
        ),
        UniformParameterRange(
            "Hyperparameters/warmup_epochs", min_value=0.0, max_value=5.0
        ),
        UniformParameterRange(
            "Hyperparameters/warmup_momentum", min_value=0.0, max_value=0.95
        ),
        UniformParameterRange(
            "Hyperparameters/warmup_bias_lr", min_value=0.0, max_value=0.2
        ),
        UniformParameterRange(
            "Hyperparameters/box", min_value=0.02, max_value=0.2
        ),
        UniformParameterRange(
            "Hyperparameters/cls", min_value=0.2, max_value=4.0
        ),
        UniformParameterRange(
            "Hyperparameters/cls_pw", min_value=0.5, max_value=2.0
        ),
        UniformParameterRange(
            "Hyperparameters/obj", min_value=0.2, max_value=4.0
        ),
        UniformParameterRange(
            "Hyperparameters/obj_pw", min_value=0.5, max_value=2.0
        ),
        UniformParameterRange(
            "Hyperparameters/iou_t", min_value=0.1, max_value=0.7
        ),
        UniformParameterRange(
            "Hyperparameters/anchor_t", min_value=2.0, max_value=8.0
        ),
        UniformParameterRange(
            "Hyperparameters/fl_gamma", min_value=0.0, max_value=4.0
        ),
        UniformParameterRange(
            "Hyperparameters/hsv_h", min_value=0.0, max_value=0.1
        ),
        UniformParameterRange(
            "Hyperparameters/hsv_s", min_value=0.0, max_value=0.9
        ),
        UniformParameterRange(
            "Hyperparameters/hsv_v", min_value=0.0, max_value=0.9
        ),
        UniformParameterRange(
            "Hyperparameters/degrees", min_value=0.0, max_value=45.0
        ),
        UniformParameterRange(
            "Hyperparameters/translate", min_value=0.0, max_value=0.9
        ),
        UniformParameterRange(
            "Hyperparameters/scale", min_value=0.0, max_value=0.9
        ),
        UniformParameterRange(
            "Hyperparameters/shear", min_value=0.0, max_value=10.0
        ),
        UniformParameterRange(
            "Hyperparameters/perspective", min_value=0.0, max_value=0.001
        ),
        UniformParameterRange(
            "Hyperparameters/flipud", min_value=0.0, max_value=1.0
        ),
        UniformParameterRange(
            "Hyperparameters/fliplr", min_value=0.0, max_value=1.0
        ),
        UniformParameterRange(
            "Hyperparameters/mosaic", min_value=0.0, max_value=1.0
        ),
        UniformParameterRange(
            "Hyperparameters/mixup", min_value=0.0, max_value=1.0
        ),
        UniformParameterRange(
            "Hyperparameters/copy_paste", min_value=0.0, max_value=1.0
        ),
    ],
    # this is the objective metric we want to maximize/minimize
    objective_metric_title="metrics",
    objective_metric_series="mAP_0.5",
    # now we decide if we want to maximize it or minimize it (accuracy we maximize)
    objective_metric_sign="max",
    # let us limit the number of concurrent experiments,
    # this in turn will make sure we do dont bombard the scheduler with experiments.
    # if we have an auto-scaler connected, this, by proxy, will limit the number of machine
    max_number_of_concurrent_tasks=1,
    # this is the optimizer class (actually doing the optimization)
    # Currently, we can choose from GridSearch, RandomSearch or OptimizerBOHB (Bayesian optimization Hyper-Band)
    optimizer_class=OptimizerOptuna,
    # If specified only the top K performing Tasks will be kept, the others will be automatically archived
    save_top_k_tasks_only=5,  # 5,
    compute_time_limit=None,
    total_max_jobs=20,
    min_iteration_per_job=None,
    max_iteration_per_job=None,
)

# report every 10 seconds, this is way too often, but we are testing here
optimizer.set_report_period(10 / 60)
# You can also use the line below instead to run all the optimizer tasks locally, without using queues or agent
# an_optimizer.start_locally(job_complete_callback=job_complete_callback)
# set the time limit for the optimization process (2 hours)
optimizer.set_time_limit(in_minutes=120.0)
# Start the optimization process in the local environment
optimizer.start_locally()
# wait until process is done (notice we are controlling the optimization process in the background)
optimizer.wait()
# make sure background optimization stopped
optimizer.stop()

print("We are done, good bye")