|
|
|
@ -43,7 +43,9 @@ hyp = {'lr0': 0.01, # initial learning rate (SGD=1E-2, Adam=1E-3)
|
|
|
|
|
'translate': 0.0, # image translation (+/- fraction)
|
|
|
|
|
'scale': 0.5, # image scale (+/- gain)
|
|
|
|
|
'shear': 0.0} # image shear (+/- deg)
|
|
|
|
|
print(hyp)
|
|
|
|
|
|
|
|
|
|
# Don't need to be printing every time
|
|
|
|
|
#print(hyp)
|
|
|
|
|
|
|
|
|
|
# Overwrite hyp with hyp*.txt (optional)
|
|
|
|
|
f = glob.glob('hyp*.txt')
|
|
|
|
@ -382,10 +384,12 @@ if __name__ == '__main__':
|
|
|
|
|
parser.add_argument('--adam', action='store_true', help='use adam optimizer')
|
|
|
|
|
parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%')
|
|
|
|
|
parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset')
|
|
|
|
|
parser.add_argument('--hyp', type=str, default='', help ='path to hyp yaml file')
|
|
|
|
|
opt = parser.parse_args()
|
|
|
|
|
opt.weights = last if opt.resume else opt.weights
|
|
|
|
|
opt.cfg = check_file(opt.cfg) # check file
|
|
|
|
|
opt.data = check_file(opt.data) # check file
|
|
|
|
|
opt.hyp = check_file(opt.hyp) #check file
|
|
|
|
|
print(opt)
|
|
|
|
|
opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test)
|
|
|
|
|
device = torch_utils.select_device(opt.device, apex=mixed_precision, batch_size=opt.batch_size)
|
|
|
|
|