From 685091a3fc41ebfef7020f27e2b290cf26e3ef4f Mon Sep 17 00:00:00 2001 From: Jake Poznanski Date: Sun, 28 Jun 2020 13:17:05 -0700 Subject: [PATCH 1/2] Adding torchscript export --- models/torchscript_export.py | 38 ++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) create mode 100644 models/torchscript_export.py diff --git a/models/torchscript_export.py b/models/torchscript_export.py new file mode 100644 index 0000000..1b9d6c5 --- /dev/null +++ b/models/torchscript_export.py @@ -0,0 +1,38 @@ +"""Exports a pytorch *.pt model to *.torchscript format + +Usage: + $ export PYTHONPATH="$PWD" && python models/torchscript_export.py --weights ./weights/yolov5s.pt --img 640 --batch 1 +""" + +import argparse + + +from models.common import * +from utils import google_utils + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--weights', type=str, default='./yolov5s.pt', help='weights path') + parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image size') + parser.add_argument('--batch-size', type=int, default=1, help='batch size') + opt = parser.parse_args() + print(opt) + + # Parameters + f = opt.weights.replace('.pt', '.torchscript') # onnx filename + img = torch.zeros((opt.batch_size, 3, *opt.img_size)) # image size, (1, 3, 320, 192) iDetection + + # Load pytorch model + google_utils.attempt_download(opt.weights) + model = torch.load(opt.weights, map_location=torch.device('cpu'))['model'].float() + model.eval() + + # Don't fuse layers, it won't work with torchscript exports + #model.fuse() + + # Export to jit/torchscript + model.model[-1].export = True # set Detect() layer export=True + _ = model(img) # dry run + + traced_script_module = torch.jit.trace(model, img) + traced_script_module.save(f) \ No newline at end of file From 615d6d0cfa44f1720df177101bf350b737887354 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 29 Jun 2020 14:00:13 -0700 Subject: [PATCH 2/2] FROM nvcr.io/nvidia/pytorch:20.06-py3 --- models/export.py | 55 ++++++++++++++++++++++++++++++++++++ models/onnx_export.py | 43 ---------------------------- models/torchscript_export.py | 38 ------------------------- 3 files changed, 55 insertions(+), 81 deletions(-) create mode 100644 models/export.py delete mode 100644 models/onnx_export.py delete mode 100644 models/torchscript_export.py diff --git a/models/export.py b/models/export.py new file mode 100644 index 0000000..2aa6ce4 --- /dev/null +++ b/models/export.py @@ -0,0 +1,55 @@ +"""Exports a YOLOv5 *.pt model to *.onnx and *.torchscript formats + +Usage: + $ export PYTHONPATH="$PWD" && python models/export.py --weights ./weights/yolov5s.pt --img 640 --batch 1 +""" + +import argparse + +import onnx + +from models.common import * +from utils import google_utils + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--weights', type=str, default='./yolov5s.pt', help='weights path') + parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image size') + parser.add_argument('--batch-size', type=int, default=1, help='batch size') + opt = parser.parse_args() + opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand + print(opt) + + # Input + img = torch.zeros((opt.batch_size, 3, *opt.img_size)) # image size, (1, 3, 320, 192) iDetection + + # Load PyTorch model + google_utils.attempt_download(opt.weights) + model = torch.load(opt.weights, map_location=torch.device('cpu'))['model'].float() + model.eval() + model.model[-1].export = True # set Detect() layer export=True + _ = model(img) # dry run + + # Export to torchscript + try: + f = opt.weights.replace('.pt', '.torchscript') # filename + ts = torch.jit.trace(model, img) + ts.save(f) + print('Torchscript export success, saved as %s' % f) + except: + print('Torchscript export failed.') + + # Export to ONNX + try: + f = opt.weights.replace('.pt', '.onnx') # filename + model.fuse() # only for ONNX + torch.onnx.export(model, img, f, verbose=False, opset_version=11, input_names=['images'], + output_names=['output']) # output_names=['classes', 'boxes'] + + # Checks + onnx_model = onnx.load(f) # load onnx model + onnx.checker.check_model(onnx_model) # check onnx model + print(onnx.helper.printable_graph(onnx_model.graph)) # print a human readable representation of the graph + print('ONNX export success, saved as %s\nView with https://github.com/lutzroeder/netron' % f) + except: + print('ONNX export failed.') diff --git a/models/onnx_export.py b/models/onnx_export.py deleted file mode 100644 index ca6bd50..0000000 --- a/models/onnx_export.py +++ /dev/null @@ -1,43 +0,0 @@ -"""Exports a pytorch *.pt model to *.onnx format - -Usage: - $ export PYTHONPATH="$PWD" && python models/onnx_export.py --weights ./weights/yolov5s.pt --img 640 --batch 1 -""" - -import argparse - -import onnx - -from models.common import * -from utils import google_utils - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--weights', type=str, default='./yolov5s.pt', help='weights path') - parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image size') - parser.add_argument('--batch-size', type=int, default=1, help='batch size') - opt = parser.parse_args() - opt.img_size *= 2 if len(opt.img_size) == 1 else 1 - print(opt) - - # Parameters - f = opt.weights.replace('.pt', '.onnx') # onnx filename - img = torch.zeros((opt.batch_size, 3, *opt.img_size)) # image size, (1, 3, 320, 192) iDetection - - # Load pytorch model - google_utils.attempt_download(opt.weights) - model = torch.load(opt.weights, map_location=torch.device('cpu'))['model'].float() - model.eval() - model.fuse() - - # Export to onnx - model.model[-1].export = True # set Detect() layer export=True - _ = model(img) # dry run - torch.onnx.export(model, img, f, verbose=False, opset_version=11, input_names=['images'], - output_names=['output']) # output_names=['classes', 'boxes'] - - # Check onnx model - model = onnx.load(f) # load onnx model - onnx.checker.check_model(model) # check onnx model - print(onnx.helper.printable_graph(model.graph)) # print a human readable representation of the graph - print('Export complete. ONNX model saved to %s\nView with https://github.com/lutzroeder/netron' % f) diff --git a/models/torchscript_export.py b/models/torchscript_export.py deleted file mode 100644 index 1b9d6c5..0000000 --- a/models/torchscript_export.py +++ /dev/null @@ -1,38 +0,0 @@ -"""Exports a pytorch *.pt model to *.torchscript format - -Usage: - $ export PYTHONPATH="$PWD" && python models/torchscript_export.py --weights ./weights/yolov5s.pt --img 640 --batch 1 -""" - -import argparse - - -from models.common import * -from utils import google_utils - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--weights', type=str, default='./yolov5s.pt', help='weights path') - parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image size') - parser.add_argument('--batch-size', type=int, default=1, help='batch size') - opt = parser.parse_args() - print(opt) - - # Parameters - f = opt.weights.replace('.pt', '.torchscript') # onnx filename - img = torch.zeros((opt.batch_size, 3, *opt.img_size)) # image size, (1, 3, 320, 192) iDetection - - # Load pytorch model - google_utils.attempt_download(opt.weights) - model = torch.load(opt.weights, map_location=torch.device('cpu'))['model'].float() - model.eval() - - # Don't fuse layers, it won't work with torchscript exports - #model.fuse() - - # Export to jit/torchscript - model.model[-1].export = True # set Detect() layer export=True - _ = model(img) # dry run - - traced_script_module = torch.jit.trace(model, img) - traced_script_module.save(f) \ No newline at end of file