|
|
|
@ -82,18 +82,19 @@ class Model(nn.Module):
|
|
|
|
|
def forward(self, x, augment=False, profile=False):
|
|
|
|
|
if augment:
|
|
|
|
|
img_size = x.shape[-2:] # height, width
|
|
|
|
|
s = [0.83, 0.67] # scales
|
|
|
|
|
y = []
|
|
|
|
|
for i, xi in enumerate((x,
|
|
|
|
|
torch_utils.scale_img(x.flip(3), s[0]), # flip-lr and scale
|
|
|
|
|
torch_utils.scale_img(x, s[1]), # scale
|
|
|
|
|
)):
|
|
|
|
|
# cv2.imwrite('img%g.jpg' % i, 255 * xi[0].numpy().transpose((1, 2, 0))[:, :, ::-1])
|
|
|
|
|
y.append(self.forward_once(xi)[0])
|
|
|
|
|
|
|
|
|
|
y[1][..., :4] /= s[0] # scale
|
|
|
|
|
y[1][..., 0] = img_size[1] - y[1][..., 0] # flip lr
|
|
|
|
|
y[2][..., :4] /= s[1] # scale
|
|
|
|
|
s = [1, 0.83, 0.67] # scales
|
|
|
|
|
f = [None, 3, None] # flips (2-ud, 3-lr)
|
|
|
|
|
y = [] # outputs
|
|
|
|
|
for si, fi in zip(s, f):
|
|
|
|
|
xi = torch_utils.scale_img(x.flip(fi) if fi else x, si)
|
|
|
|
|
yi = self.forward_once(xi)[0] # forward
|
|
|
|
|
# cv2.imwrite('img%g.jpg' % s, 255 * xi[0].numpy().transpose((1, 2, 0))[:, :, ::-1]) # save
|
|
|
|
|
yi[..., :4] /= si # de-scale
|
|
|
|
|
if fi is 2:
|
|
|
|
|
yi[..., 1] = img_size[0] - yi[..., 1] # de-flip ud
|
|
|
|
|
elif fi is 3:
|
|
|
|
|
yi[..., 0] = img_size[1] - yi[..., 0] # de-flip lr
|
|
|
|
|
y.append(yi)
|
|
|
|
|
return torch.cat(y, 1), None # augmented inference, train
|
|
|
|
|
else:
|
|
|
|
|
return self.forward_once(x, profile) # single-scale inference, train
|
|
|
|
|