|
|
|
@ -1,10 +1,3 @@
|
|
|
|
|
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
|
|
|
|
"""
|
|
|
|
|
Train a YOLOv5 model on a custom dataset
|
|
|
|
|
|
|
|
|
|
Usage:
|
|
|
|
|
$ python path/to/train.py --data coco128.yaml --weights yolov5s.pt --img 640
|
|
|
|
|
"""
|
|
|
|
|
import argparse
|
|
|
|
|
import math
|
|
|
|
|
import os
|
|
|
|
@ -55,12 +48,12 @@ RANK = int(os.getenv('RANK', -1))
|
|
|
|
|
WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def train(hyp, # 'path/to/hyp.yaml' 或 hyp 字典
|
|
|
|
|
def train(hyp,
|
|
|
|
|
opt,
|
|
|
|
|
device,
|
|
|
|
|
callbacks
|
|
|
|
|
):
|
|
|
|
|
# 定义训练过程中使用的变量
|
|
|
|
|
# 定义训练过程中使用的变量,启动深度学习模型的训练过程
|
|
|
|
|
save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze = \
|
|
|
|
|
Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \
|
|
|
|
|
opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze
|
|
|
|
@ -72,9 +65,9 @@ def train(hyp, # 'path/to/hyp.yaml' 或 hyp 字典
|
|
|
|
|
|
|
|
|
|
# 超参数
|
|
|
|
|
if isinstance(hyp, str):
|
|
|
|
|
with open(hyp, errors='ignore') as f:
|
|
|
|
|
with open(hyp, errors='ignore') as f:#使用with打开,如果遇到编码错误,就忽略并继续打开
|
|
|
|
|
hyp = yaml.safe_load(f) # 加载超参数字典
|
|
|
|
|
LOGGER.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items()))
|
|
|
|
|
LOGGER.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items()))#日志,生成字符串
|
|
|
|
|
|
|
|
|
|
# 保存运行设置
|
|
|
|
|
with open(save_dir / 'hyp.yaml', 'w') as f:
|
|
|
|
@ -82,7 +75,7 @@ def train(hyp, # 'path/to/hyp.yaml' 或 hyp 字典
|
|
|
|
|
with open(save_dir / 'opt.yaml', 'w') as f:
|
|
|
|
|
yaml.safe_dump(vars(opt), f, sort_keys=False)
|
|
|
|
|
|
|
|
|
|
# Loggers
|
|
|
|
|
# 日志记录和初始化
|
|
|
|
|
if RANK in [-1, 0]:
|
|
|
|
|
loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance
|
|
|
|
|
if loggers.wandb:
|
|
|
|
@ -90,24 +83,24 @@ def train(hyp, # 'path/to/hyp.yaml' 或 hyp 字典
|
|
|
|
|
if resume:
|
|
|
|
|
weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp
|
|
|
|
|
|
|
|
|
|
# Register actions
|
|
|
|
|
# 注册回调函数
|
|
|
|
|
for k in methods(loggers):
|
|
|
|
|
callbacks.register_action(k, callback=getattr(loggers, k))
|
|
|
|
|
|
|
|
|
|
# Config
|
|
|
|
|
plots = not evolve # create plots
|
|
|
|
|
# 初始化设置
|
|
|
|
|
plots = not evolve # 是否画图
|
|
|
|
|
cuda = device.type != 'cpu'
|
|
|
|
|
init_seeds(1 + RANK)
|
|
|
|
|
init_seeds(1 + RANK) #随机种子
|
|
|
|
|
with torch_distributed_zero_first(LOCAL_RANK):
|
|
|
|
|
data_dict = data_dict or check_dataset(data) # check if None
|
|
|
|
|
data_dict = data_dict or check_dataset(data) # 检查是否存在
|
|
|
|
|
train_path, val_path = data_dict['train'], data_dict['val']
|
|
|
|
|
nc = 1 if single_cls else int(data_dict['nc']) # number of classes
|
|
|
|
|
names = ['item'] if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names
|
|
|
|
|
assert len(names) == nc, f'{len(names)} names found for nc={nc} dataset in {data}' # check
|
|
|
|
|
is_coco = isinstance(val_path, str) and val_path.endswith('coco/val2017.txt') # COCO dataset
|
|
|
|
|
nc = 1 if single_cls else int(data_dict['nc']) # 类别数量
|
|
|
|
|
names = ['item'] if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # 名称
|
|
|
|
|
assert len(names) == nc, f'{len(names)} names found for nc={nc} dataset in {data}' # 检查数量与data_dict是否相匹配
|
|
|
|
|
is_coco = isinstance(val_path, str) and val_path.endswith('coco/val2017.txt') # COCO数据集路径
|
|
|
|
|
|
|
|
|
|
# Model
|
|
|
|
|
check_suffix(weights, '.pt') # check weights
|
|
|
|
|
# 训练模型
|
|
|
|
|
check_suffix(weights, '.pt') # 检查权重文件的后缀
|
|
|
|
|
pretrained = weights.endswith('.pt')
|
|
|
|
|
if pretrained:
|
|
|
|
|
with torch_distributed_zero_first(LOCAL_RANK):
|
|
|
|
@ -122,7 +115,7 @@ def train(hyp, # 'path/to/hyp.yaml' 或 hyp 字典
|
|
|
|
|
else:
|
|
|
|
|
model = Model(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
|
|
|
|
|
|
|
|
|
|
# Freeze
|
|
|
|
|
# 冻结模型层
|
|
|
|
|
freeze = [f'model.{x}.' for x in range(freeze)] # layers to freeze
|
|
|
|
|
for k, v in model.named_parameters():
|
|
|
|
|
v.requires_grad = True # train all layers
|
|
|
|
@ -130,21 +123,21 @@ def train(hyp, # 'path/to/hyp.yaml' 或 hyp 字典
|
|
|
|
|
LOGGER.info(f'freezing {k}')
|
|
|
|
|
v.requires_grad = False
|
|
|
|
|
|
|
|
|
|
# Image size
|
|
|
|
|
gs = max(int(model.stride.max()), 32) # grid size (max stride)
|
|
|
|
|
imgsz = check_img_size(opt.imgsz, gs, floor=gs * 2) # verify imgsz is gs-multiple
|
|
|
|
|
# 图像和网格尺寸
|
|
|
|
|
gs = max(int(model.stride.max()), 32) # 最小为32
|
|
|
|
|
imgsz = check_img_size(opt.imgsz, gs, floor=gs * 2) # 图像尺寸是网格的整数倍
|
|
|
|
|
|
|
|
|
|
# Batch size
|
|
|
|
|
if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size
|
|
|
|
|
# 批量大小
|
|
|
|
|
if RANK == -1 and batch_size == -1: # 单GPU,没有指定批量大小
|
|
|
|
|
batch_size = check_train_batch_size(model, imgsz)
|
|
|
|
|
|
|
|
|
|
# Optimizer
|
|
|
|
|
nbs = 64 # nominal batch size
|
|
|
|
|
accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing
|
|
|
|
|
hyp['weight_decay'] *= batch_size * accumulate / nbs # scale weight_decay
|
|
|
|
|
# 优化
|
|
|
|
|
nbs = 64 # 基准
|
|
|
|
|
accumulate = max(round(nbs / batch_size), 1)
|
|
|
|
|
hyp['weight_decay'] *= batch_size * accumulate / nbs # 调整权重系数
|
|
|
|
|
LOGGER.info(f"Scaled weight_decay = {hyp['weight_decay']}")
|
|
|
|
|
|
|
|
|
|
g0, g1, g2 = [], [], [] # optimizer parameter groups
|
|
|
|
|
g0, g1, g2 = [], [], [] # 初始化3个列表
|
|
|
|
|
for v in model.modules():
|
|
|
|
|
if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): # bias
|
|
|
|
|
g2.append(v.bias)
|
|
|
|
@ -153,25 +146,25 @@ def train(hyp, # 'path/to/hyp.yaml' 或 hyp 字典
|
|
|
|
|
elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): # weight (with decay)
|
|
|
|
|
g1.append(v.weight)
|
|
|
|
|
|
|
|
|
|
if opt.adam:
|
|
|
|
|
optimizer = Adam(g0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum
|
|
|
|
|
if opt.adam:#选择优化器
|
|
|
|
|
optimizer = Adam(g0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999))
|
|
|
|
|
else:
|
|
|
|
|
optimizer = SGD(g0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True)
|
|
|
|
|
|
|
|
|
|
optimizer.add_param_group({'params': g1, 'weight_decay': hyp['weight_decay']}) # add g1 with weight_decay
|
|
|
|
|
optimizer.add_param_group({'params': g2}) # add g2 (biases)
|
|
|
|
|
optimizer.add_param_group({'params': g1, 'weight_decay': hyp['weight_decay']}) # 添加g1 使用权重衰减
|
|
|
|
|
optimizer.add_param_group({'params': g2}) # 添加g2,不使用权重衰减
|
|
|
|
|
LOGGER.info(f"{colorstr('optimizer:')} {type(optimizer).__name__} with parameter groups "
|
|
|
|
|
f"{len(g0)} weight, {len(g1)} weight (no decay), {len(g2)} bias")
|
|
|
|
|
del g0, g1, g2
|
|
|
|
|
del g0, g1, g2 #删除
|
|
|
|
|
|
|
|
|
|
# Scheduler
|
|
|
|
|
# 神经网络 学习调度
|
|
|
|
|
if opt.linear_lr:
|
|
|
|
|
lf = lambda x: (1 - x / (epochs - 1)) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear
|
|
|
|
|
lf = lambda x: (1 - x / (epochs - 1)) * (1.0 - hyp['lrf']) + hyp['lrf'] # 计算学习率的线性衰减
|
|
|
|
|
else:
|
|
|
|
|
lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf']
|
|
|
|
|
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs)
|
|
|
|
|
lf = one_cycle(1, hyp['lrf'], epochs) # 周期变化
|
|
|
|
|
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
|
|
|
|
|
|
|
|
|
|
# EMA
|
|
|
|
|
# 使用指数移动平均(EMA)来平滑模型权重
|
|
|
|
|
ema = ModelEMA(model) if RANK in [-1, 0] else None
|
|
|
|
|
|
|
|
|
|
# Resume
|
|
|
|
@ -239,7 +232,7 @@ def train(hyp, # 'path/to/hyp.yaml' 或 hyp 字典
|
|
|
|
|
|
|
|
|
|
callbacks.run('on_pretrain_routine_end')
|
|
|
|
|
|
|
|
|
|
# DDP mode
|
|
|
|
|
# 处理DDP模型
|
|
|
|
|
if cuda and RANK != -1:
|
|
|
|
|
model = DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK)
|
|
|
|
|
|
|
|
|
@ -305,7 +298,7 @@ def train(hyp, # 'path/to/hyp.yaml' 或 hyp 字典
|
|
|
|
|
if 'momentum' in x:
|
|
|
|
|
x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']])
|
|
|
|
|
|
|
|
|
|
# Multi-scale
|
|
|
|
|
# 判断是否使用多尺度训练
|
|
|
|
|
if opt.multi_scale:
|
|
|
|
|
sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size
|
|
|
|
|
sf = sz / max(imgs.shape[2:]) # scale factor
|
|
|
|
@ -395,15 +388,9 @@ def train(hyp, # 'path/to/hyp.yaml' 或 hyp 字典
|
|
|
|
|
if RANK == -1 and stopper(epoch=epoch, fitness=fi):
|
|
|
|
|
break
|
|
|
|
|
|
|
|
|
|
# Stop DDP TODO: known issues shttps://github.com/ultralytics/yolov5/pull/4576
|
|
|
|
|
# stop = stopper(epoch=epoch, fitness=fi)
|
|
|
|
|
# if RANK == 0:
|
|
|
|
|
# dist.broadcast_object_list([stop], 0) # broadcast 'stop' to all ranks
|
|
|
|
|
|
|
|
|
|
# Stop DPP
|
|
|
|
|
# with torch_distributed_zero_first(RANK):
|
|
|
|
|
# if stop:
|
|
|
|
|
# break # must break all DDP ranks
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# end epoch ----------------------------------------------------------------------------------------------------
|
|
|
|
|
# end training -----------------------------------------------------------------------------------------------------
|
|
|
|
@ -526,48 +513,50 @@ def main(opt, callbacks=Callbacks()):
|
|
|
|
|
LOGGER.info('Destroying process group... ')
|
|
|
|
|
dist.destroy_process_group()
|
|
|
|
|
|
|
|
|
|
# Evolve hyperparameters (optional)
|
|
|
|
|
# 演化超参数
|
|
|
|
|
else:
|
|
|
|
|
# Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit)
|
|
|
|
|
meta = {'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3)
|
|
|
|
|
'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf)
|
|
|
|
|
'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1
|
|
|
|
|
'weight_decay': (1, 0.0, 0.001), # optimizer weight decay
|
|
|
|
|
'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok)
|
|
|
|
|
'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum
|
|
|
|
|
'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr
|
|
|
|
|
'box': (1, 0.02, 0.2), # box loss gain
|
|
|
|
|
'cls': (1, 0.2, 4.0), # cls loss gain
|
|
|
|
|
'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight
|
|
|
|
|
'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels)
|
|
|
|
|
'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight
|
|
|
|
|
'iou_t': (0, 0.1, 0.7), # IoU training threshold
|
|
|
|
|
'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold
|
|
|
|
|
'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore)
|
|
|
|
|
'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5)
|
|
|
|
|
'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction)
|
|
|
|
|
'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction)
|
|
|
|
|
'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction)
|
|
|
|
|
'degrees': (1, 0.0, 45.0), # image rotation (+/- deg)
|
|
|
|
|
'translate': (1, 0.0, 0.9), # image translation (+/- fraction)
|
|
|
|
|
'scale': (1, 0.0, 0.9), # image scale (+/- gain)
|
|
|
|
|
'shear': (1, 0.0, 10.0), # image shear (+/- deg)
|
|
|
|
|
'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001
|
|
|
|
|
'flipud': (1, 0.0, 1.0), # image flip up-down (probability)
|
|
|
|
|
'fliplr': (0, 0.0, 1.0), # image flip left-right (probability)
|
|
|
|
|
'mosaic': (1, 0.0, 1.0), # image mixup (probability)
|
|
|
|
|
'mixup': (1, 0.0, 1.0), # image mixup (probability)
|
|
|
|
|
'copy_paste': (1, 0.0, 1.0)} # segment copy-paste (probability)
|
|
|
|
|
# 超参数演化元数据
|
|
|
|
|
meta = {
|
|
|
|
|
'lr0': (1, 1e-5, 1e-1), # 初始学习率(SGD=0.01, Adam=0.001)
|
|
|
|
|
'lrf': (1, 0.01, 1.0), # 最终OneCycleLR学习率(lr0 * lrf)
|
|
|
|
|
'momentum': (0.3, 0.6, 0.98), # SGD动量/Adam beta1
|
|
|
|
|
'weight_decay': (1, 0.0, 0.001), # 优化器权重衰减
|
|
|
|
|
'warmup_epochs': (1, 0.0, 5.0), # 预热epochs(可以接受小数)
|
|
|
|
|
'warmup_momentum': (1, 0.0, 0.95), # 预热初始动量
|
|
|
|
|
'warmup_bias_lr': (1, 0.0, 0.2), # 预热初始偏置学习率
|
|
|
|
|
'box': (1, 0.02, 0.2), # 边界框损失增益
|
|
|
|
|
'cls': (1, 0.2, 4.0), # 类别损失增益
|
|
|
|
|
'cls_pw': (1, 0.5, 2.0), # 类别BCELoss正样本权重
|
|
|
|
|
'obj': (1, 0.2, 4.0), # 目标损失增益(随像素缩放)
|
|
|
|
|
'obj_pw': (1, 0.5, 2.0), # 目标BCELoss正样本权重
|
|
|
|
|
'iou_t': (0, 0.1, 0.7), # IoU训练阈值
|
|
|
|
|
'anchor_t': (1, 2.0, 8.0), # 锚点多重阈值
|
|
|
|
|
'anchors': (2, 2.0, 10.0), # 每个输出网格的锚点数(设为0则忽略)
|
|
|
|
|
'fl_gamma': (0, 0.0, 2.0), # 焦点损失gamma(efficientDet默认gamma=1.5)
|
|
|
|
|
'hsv_h': (1, 0.0, 0.1), # 图像HSV-色调增强(比例)
|
|
|
|
|
'hsv_s': (1, 0.0, 0.9), # 图像HSV-饱和度增强(比例)
|
|
|
|
|
'hsv_v': (1, 0.0, 0.9), # 图像HSV-亮度增强(比例)
|
|
|
|
|
'degrees': (1, 0.0, 45.0), # 图像旋转(+/- 度)
|
|
|
|
|
'translate': (1, 0.0, 0.9), # 图像平移(+/- 比例)
|
|
|
|
|
'scale': (1, 0.0, 0.9), # 图像缩放(+/- 增益)
|
|
|
|
|
'shear': (1, 0.0, 10.0), # 图像剪切(+/- 度)
|
|
|
|
|
'perspective': (0, 0.0, 0.001), # 图像透视(+/- 比例), 范围0-0.001
|
|
|
|
|
'flipud': (1, 0.0, 1.0), # 图像上下翻转(概率)
|
|
|
|
|
'fliplr': (0, 0.0, 1.0), # 图像左右翻转(概率)
|
|
|
|
|
'mosaic': (1, 0.0, 1.0), # 图像混拼(概率)
|
|
|
|
|
'mixup': (1, 0.0, 1.0), # 图像混合(概率)
|
|
|
|
|
'copy_paste': (1, 0.0, 1.0) # 片段复制粘贴(概率)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
with open(opt.hyp, errors='ignore') as f:
|
|
|
|
|
hyp = yaml.safe_load(f) # load hyps dict
|
|
|
|
|
if 'anchors' not in hyp: # anchors commented in hyp.yaml
|
|
|
|
|
hyp = yaml.safe_load(f) # 加载超参数字典
|
|
|
|
|
if 'anchors' not in hyp: # 设置默认值
|
|
|
|
|
hyp['anchors'] = 3
|
|
|
|
|
opt.noval, opt.nosave, save_dir = True, True, Path(opt.save_dir) # only val/save final epoch
|
|
|
|
|
# ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices
|
|
|
|
|
opt.noval, opt.nosave, save_dir = True, True, Path(opt.save_dir)
|
|
|
|
|
# 路径
|
|
|
|
|
evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv'
|
|
|
|
|
if opt.bucket:
|
|
|
|
|
os.system(f'gsutil cp gs://{opt.bucket}/evolve.csv {save_dir}') # download evolve.csv if exists
|
|
|
|
|
os.system(f'gsutil cp gs://{opt.bucket}/evolve.csv {save_dir}') # 下载evolve.csv
|
|
|
|
|
|
|
|
|
|
for _ in range(opt.evolve): # generations to evolve
|
|
|
|
|
if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate
|
|
|
|
@ -615,16 +604,14 @@ def main(opt, callbacks=Callbacks()):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def run(**kwargs):
|
|
|
|
|
# Usage: import train; train.run(data='coco128.yaml', imgsz=320, weights='yolov5m.pt')
|
|
|
|
|
|
|
|
|
|
opt = parse_opt(True)
|
|
|
|
|
for k, v in kwargs.items():
|
|
|
|
|
setattr(opt, k, v)
|
|
|
|
|
main(opt)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# python train.py --data mask_data.yaml --cfg mask_yolov5s.yaml --weights pretrained/yolov5s.pt --epoch 100 --batch-size 4 --device cpu
|
|
|
|
|
# python train.py --data mask_data.yaml --cfg mask_yolov5l.yaml --weights pretrained/yolov5l.pt --epoch 100 --batch-size 4
|
|
|
|
|
# python train.py --data mask_data.yaml --cfg mask_yolov5m.yaml --weights pretrained/yolov5m.pt --epoch 100 --batch-size 4
|
|
|
|
|
#
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
|
opt = parse_opt()
|
|
|
|
|
main(opt)
|
|
|
|
|