Update train.py

pull/1/head
yxNONG 5 years ago committed by GitHub
parent 53cdaf6bf5
commit 1aa2b67933
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -147,15 +147,6 @@ def train(hyp):
# https://discuss.pytorch.org/t/a-problem-occured-when-resuming-an-optimizer/28822 # https://discuss.pytorch.org/t/a-problem-occured-when-resuming-an-optimizer/28822
# plot_lr_scheduler(optimizer, scheduler, epochs) # plot_lr_scheduler(optimizer, scheduler, epochs)
# Initialize distributed training
if device.type != 'cpu' and torch.cuda.device_count() > 1 and torch.distributed.is_available():
dist.init_process_group(backend='nccl', # distributed backend
init_method='tcp://127.0.0.1:9999', # init method
world_size=1, # number of nodes
rank=0) # node rank
model = torch.nn.parallel.DistributedDataParallel(model)
# pip install torch==1.4.0+cu100 torchvision==0.5.0+cu100 -f https://download.pytorch.org/whl/torch_stable.html
# Trainloader # Trainloader
dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt, dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt,
hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect) hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect)
@ -174,6 +165,15 @@ def train(hyp):
model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) # attach class weights model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) # attach class weights
model.names = data_dict['names'] model.names = data_dict['names']
# Initialize distributed training
if device.type != 'cpu' and torch.cuda.device_count() > 1 and torch.distributed.is_available():
dist.init_process_group(backend='nccl', # distributed backend
init_method='tcp://127.0.0.1:9999', # init method
world_size=1, # number of nodes
rank=0) # node rank
model = torch.nn.parallel.DistributedDataParallel(model)
# pip install torch==1.4.0+cu100 torchvision==0.5.0+cu100 -f https://download.pytorch.org/whl/torch_stable.html
# Class frequency # Class frequency
labels = np.concatenate(dataset.labels, 0) labels = np.concatenate(dataset.labels, 0)
c = torch.tensor(labels[:, 0]) # classes c = torch.tensor(labels[:, 0]) # classes
@ -289,7 +289,7 @@ def train(hyp):
batch_size=batch_size, batch_size=batch_size,
imgsz=imgsz_test, imgsz=imgsz_test,
save_json=final_epoch and opt.data.endswith(os.sep + 'coco.yaml'), save_json=final_epoch and opt.data.endswith(os.sep + 'coco.yaml'),
model=ema.ema, model=ema.ema.module if hasattr(model, 'module') else ema.ema,
single_cls=opt.single_cls, single_cls=opt.single_cls,
dataloader=testloader) dataloader=testloader)
@ -315,14 +315,6 @@ def train(hyp):
# Save model # Save model
save = (not opt.nosave) or (final_epoch and not opt.evolve) save = (not opt.nosave) or (final_epoch and not opt.evolve)
if save: if save:
if hasattr(model, 'module'):
# Duplicate Model parameters for Multi-GPU save
ema.ema.module.nc = model.nc # attach number of classes to model
ema.ema.module.hyp = model.hyp # attach hyperparameters to model
ema.ema.module.gr = model.gr = 1.0 # giou loss ratio (obj_loss = 1.0 or giou)
ema.ema.module.class_weights = model.class_weights # attach class weights
ema.ema.module.names = data_dict['names']
with open(results_file, 'r') as f: # create checkpoint with open(results_file, 'r') as f: # create checkpoint
ckpt = {'epoch': epoch, ckpt = {'epoch': epoch,
'best_fitness': best_fitness, 'best_fitness': best_fitness,

Loading…
Cancel
Save