From df7988d8d0909b76bef79660b14ac9d7b548a13f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 1 Jun 2020 13:53:47 -0700 Subject: [PATCH] onnx_export.py --- .github/workflows/greetings.yml | 2 +- README.md | 2 +- detect.py | 20 +++----------------- models/onnx_export.py | 32 ++++++++++++++++++++++++++++++++ 4 files changed, 37 insertions(+), 19 deletions(-) create mode 100644 models/onnx_export.py diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index 8c45e64..0f42e59 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -10,7 +10,7 @@ jobs: with: repo-token: ${{ secrets.GITHUB_TOKEN }} pr-message: 'Hello @${{ github.actor }}, thank you for submitting a PR! We will respond as soon as possible.' - issue-message: > + issue-message: | Hello @${{ github.actor }}, thank you for your interest in our work! Please visit our [Custom Training Tutorial](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data) to get started, and see our [Google Colab Notebook](https://github.com/ultralytics/yolov5/blob/master/tutorial.ipynb), [Docker Image](https://hub.docker.com/r/ultralytics/yolov5), and [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) for example environments. If this is a bug report, please provide screenshots and **minimum viable code to reproduce your issue**, otherwise we can not help you. diff --git a/README.md b/README.md index d5b9e38..548e449 100755 --- a/README.md +++ b/README.md @@ -108,4 +108,4 @@ To access an up-to-date working environment (with all dependencies including CUD ## Contact -**Issues should be raised directly in the repository.** For business inquiries or professional support requests please visit us at https://www.ultralytics.com. +**Issues should be raised directly in the repository.** For business inquiries or professional support requests please visit https://www.ultralytics.com or email Glenn Jocher at glenn.jocher@ultralytics.com. diff --git a/detect.py b/detect.py index 9ff2bd4..b18ff48 100644 --- a/detect.py +++ b/detect.py @@ -7,12 +7,12 @@ ONNX_EXPORT = False def detect(save_img=False): - imgsz = (320, 192) if ONNX_EXPORT else opt.img_size # (320, 192) or (416, 256) or (608, 352) for (height, width) - out, source, weights, half, view_img, save_txt = opt.output, opt.source, opt.weights, opt.half, opt.view_img, opt.save_txt + out, source, weights, half, view_img, save_txt, imgsz = \ + opt.output, opt.source, opt.weights, opt.half, opt.view_img, opt.save_txt, opt.img_size webcam = source == '0' or source.startswith('rtsp') or source.startswith('http') or source.endswith('.txt') # Initialize - device = torch_utils.select_device(device='cpu' if ONNX_EXPORT else opt.device) + device = torch_utils.select_device(opt.device) if os.path.exists(out): shutil.rmtree(out) # delete output folder os.makedirs(out) # make new output folder @@ -35,20 +35,6 @@ def detect(save_img=False): # Fuse Conv2d + BatchNorm2d layers # model.fuse() - # Export mode - if ONNX_EXPORT: - model.fuse() - img = torch.zeros((1, 3) + imgsz) # (1, 3, 320, 192) - f = opt.weights.replace(opt.weights.split('.')[-1], 'onnx') # *.onnx filename - torch.onnx.export(model, img, f, verbose=False, opset_version=11) - - # Validate exported model - import onnx - model = onnx.load(f) # Load the ONNX model - onnx.checker.check_model(model) # Check that the IR is well formed - print(onnx.helper.printable_graph(model.graph)) # Print a human readable representation of the graph - return - # Half precision half = half and device.type != 'cpu' # half precision only supported on CUDA if half: diff --git a/models/onnx_export.py b/models/onnx_export.py new file mode 100644 index 0000000..9906693 --- /dev/null +++ b/models/onnx_export.py @@ -0,0 +1,32 @@ +import argparse + +import onnx + +from models.common import * + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--weights', default='../weights/yolov5s.pt', help='model path RELATIVE to ./models/') + parser.add_argument('--img-size', default=640, help='inference size (pixels)') + parser.add_argument('--batch-size', default=1, help='batch size') + opt = parser.parse_args() + + # Parameters + f = opt.weights.replace('.pt', '.onnx') # onnx filename + img = torch.zeros((opt.batch_size, 3, opt.img_size, opt.img_size)) # image size, (1, 3, 320, 192) iDetection + + # Load pytorch model + google_utils.attempt_download(opt.weights) + model = torch.load(opt.weights)['model'] + model.eval() + # model.fuse() # optionally fuse Conv2d + BatchNorm2d layers TODO + + # Export to onnx + model.model[-1].export = True # set Detect() layer export=True + torch.onnx.export(model, img, f, verbose=False, opset_version=11) + + # Check onnx model + model = onnx.load(f) # load onnx model + onnx.checker.check_model(model) # check onnx model + print(onnx.helper.printable_graph(model.graph)) # print a human readable representation of the graph + print('Export complete. ONNX model saved to %s\nView with https://github.com/lutzroeder/netron' % f)