diff --git a/train.py b/train.py index 4fa9005..df5e1ed 100644 --- a/train.py +++ b/train.py @@ -148,7 +148,7 @@ def train(hyp): scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) scheduler.last_epoch = start_epoch - 1 # do not move # https://discuss.pytorch.org/t/a-problem-occured-when-resuming-an-optimizer/28822 - # plot_lr_scheduler(optimizer, scheduler, epochs) + plot_lr_scheduler(optimizer, scheduler, epochs, save_dir = log_dir) # Initialize distributed training if device.type != 'cpu' and torch.cuda.device_count() > 1 and torch.distributed.is_available(): diff --git a/utils/utils.py b/utils/utils.py index 95d1198..8ac73e3 100755 --- a/utils/utils.py +++ b/utils/utils.py @@ -1005,7 +1005,7 @@ def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max return mosaic -def plot_lr_scheduler(optimizer, scheduler, epochs=300): +def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir='./'): # Plot LR simulating training for full epochs optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals y = []