Add `python train.py --freeze N` argument (#4238)
Browse files* Add freeze as an argument
I train on different platforms and sometimes I want to freeze some layers. I have to go into the code and change it and also keep track of how many layers I froze on what platform. Please add the number of layers to freeze as an argument in future versions thanks.
* Update train.py
* Update train.py
* Cleanup
Co-authored-by: Glenn Jocher <[email protected]>
train.py
CHANGED
@@ -53,9 +53,9 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary
|
|
53 |
opt,
|
54 |
device,
|
55 |
):
|
56 |
-
save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, = \
|
57 |
Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \
|
58 |
-
opt.resume, opt.noval, opt.nosave, opt.workers
|
59 |
|
60 |
# Directories
|
61 |
w = save_dir / 'weights' # weights dir
|
@@ -111,7 +111,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary
|
|
111 |
model = Model(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
|
112 |
|
113 |
# Freeze
|
114 |
-
freeze = [] #
|
115 |
for k, v in model.named_parameters():
|
116 |
v.requires_grad = True # train all layers
|
117 |
if any(x in k for x in freeze):
|
@@ -442,6 +442,7 @@ def parse_opt(known=False):
|
|
442 |
parser.add_argument('--save_period', type=int, default=-1, help='Log model after every "save_period" epoch')
|
443 |
parser.add_argument('--artifact_alias', type=str, default="latest", help='version of dataset artifact to be used')
|
444 |
parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify')
|
|
|
445 |
opt = parser.parse_known_args()[0] if known else parser.parse_args()
|
446 |
return opt
|
447 |
|
|
|
53 |
opt,
|
54 |
device,
|
55 |
):
|
56 |
+
save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze, = \
|
57 |
Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \
|
58 |
+
opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze
|
59 |
|
60 |
# Directories
|
61 |
w = save_dir / 'weights' # weights dir
|
|
|
111 |
model = Model(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
|
112 |
|
113 |
# Freeze
|
114 |
+
freeze = [f'model.{x}.' for x in range(freeze)] # layers to freeze
|
115 |
for k, v in model.named_parameters():
|
116 |
v.requires_grad = True # train all layers
|
117 |
if any(x in k for x in freeze):
|
|
|
442 |
parser.add_argument('--save_period', type=int, default=-1, help='Log model after every "save_period" epoch')
|
443 |
parser.add_argument('--artifact_alias', type=str, default="latest", help='version of dataset artifact to be used')
|
444 |
parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify')
|
445 |
+
parser.add_argument('--freeze', type=int, default=0, help='Number of layers to freeze. backbone=10, all=24')
|
446 |
opt = parser.parse_known_args()[0] if known else parser.parse_args()
|
447 |
return opt
|
448 |
|