commit
f517ba81c7
@ -0,0 +1,214 @@
|
|||||||
|
# PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC/
|
||||||
|
# Download command: bash ./data/get_voc.sh
|
||||||
|
# Train command: python train.py --data voc.yaml
|
||||||
|
# Dataset should be placed next to yolov5 folder:
|
||||||
|
# /parent_folder
|
||||||
|
# /VOC
|
||||||
|
# /yolov5
|
||||||
|
|
||||||
|
start=`date +%s`
|
||||||
|
|
||||||
|
# handle optional download dir
|
||||||
|
if [ -z "$1" ]
|
||||||
|
then
|
||||||
|
# navigate to ~/tmp
|
||||||
|
echo "navigating to ../tmp/ ..."
|
||||||
|
mkdir -p ../tmp
|
||||||
|
cd ../tmp/
|
||||||
|
else
|
||||||
|
# check if is valid directory
|
||||||
|
if [ ! -d $1 ]; then
|
||||||
|
echo $1 "is not a valid directory"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
echo "navigating to" $1 "..."
|
||||||
|
cd $1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Downloading VOC2007 trainval ..."
|
||||||
|
# Download the data.
|
||||||
|
curl -LO http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtrainval_06-Nov-2007.tar
|
||||||
|
echo "Downloading VOC2007 test data ..."
|
||||||
|
curl -LO http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar
|
||||||
|
echo "Done downloading."
|
||||||
|
|
||||||
|
# Extract data
|
||||||
|
echo "Extracting trainval ..."
|
||||||
|
tar -xf VOCtrainval_06-Nov-2007.tar
|
||||||
|
echo "Extracting test ..."
|
||||||
|
tar -xf VOCtest_06-Nov-2007.tar
|
||||||
|
echo "removing tars ..."
|
||||||
|
rm VOCtrainval_06-Nov-2007.tar
|
||||||
|
rm VOCtest_06-Nov-2007.tar
|
||||||
|
|
||||||
|
end=`date +%s`
|
||||||
|
runtime=$((end-start))
|
||||||
|
|
||||||
|
echo "Completed in" $runtime "seconds"
|
||||||
|
|
||||||
|
start=`date +%s`
|
||||||
|
|
||||||
|
# handle optional download dir
|
||||||
|
if [ -z "$1" ]
|
||||||
|
then
|
||||||
|
# navigate to ~/tmp
|
||||||
|
echo "navigating to ../tmp/ ..."
|
||||||
|
mkdir -p ../tmp
|
||||||
|
cd ../tmp/
|
||||||
|
else
|
||||||
|
# check if is valid directory
|
||||||
|
if [ ! -d $1 ]; then
|
||||||
|
echo $1 "is not a valid directory"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
echo "navigating to" $1 "..."
|
||||||
|
cd $1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Downloading VOC2012 trainval ..."
|
||||||
|
# Download the data.
|
||||||
|
curl -LO http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar
|
||||||
|
echo "Done downloading."
|
||||||
|
|
||||||
|
|
||||||
|
# Extract data
|
||||||
|
echo "Extracting trainval ..."
|
||||||
|
tar -xf VOCtrainval_11-May-2012.tar
|
||||||
|
echo "removing tar ..."
|
||||||
|
rm VOCtrainval_11-May-2012.tar
|
||||||
|
|
||||||
|
end=`date +%s`
|
||||||
|
runtime=$((end-start))
|
||||||
|
|
||||||
|
echo "Completed in" $runtime "seconds"
|
||||||
|
|
||||||
|
cd ../tmp
|
||||||
|
echo "Spliting dataset..."
|
||||||
|
python3 - "$@" <<END
|
||||||
|
import xml.etree.ElementTree as ET
|
||||||
|
import pickle
|
||||||
|
import os
|
||||||
|
from os import listdir, getcwd
|
||||||
|
from os.path import join
|
||||||
|
|
||||||
|
sets=[('2012', 'train'), ('2012', 'val'), ('2007', 'train'), ('2007', 'val'), ('2007', 'test')]
|
||||||
|
|
||||||
|
classes = ["aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"]
|
||||||
|
|
||||||
|
|
||||||
|
def convert(size, box):
|
||||||
|
dw = 1./(size[0])
|
||||||
|
dh = 1./(size[1])
|
||||||
|
x = (box[0] + box[1])/2.0 - 1
|
||||||
|
y = (box[2] + box[3])/2.0 - 1
|
||||||
|
w = box[1] - box[0]
|
||||||
|
h = box[3] - box[2]
|
||||||
|
x = x*dw
|
||||||
|
w = w*dw
|
||||||
|
y = y*dh
|
||||||
|
h = h*dh
|
||||||
|
return (x,y,w,h)
|
||||||
|
|
||||||
|
def convert_annotation(year, image_id):
|
||||||
|
in_file = open('VOCdevkit/VOC%s/Annotations/%s.xml'%(year, image_id))
|
||||||
|
out_file = open('VOCdevkit/VOC%s/labels/%s.txt'%(year, image_id), 'w')
|
||||||
|
tree=ET.parse(in_file)
|
||||||
|
root = tree.getroot()
|
||||||
|
size = root.find('size')
|
||||||
|
w = int(size.find('width').text)
|
||||||
|
h = int(size.find('height').text)
|
||||||
|
|
||||||
|
for obj in root.iter('object'):
|
||||||
|
difficult = obj.find('difficult').text
|
||||||
|
cls = obj.find('name').text
|
||||||
|
if cls not in classes or int(difficult)==1:
|
||||||
|
continue
|
||||||
|
cls_id = classes.index(cls)
|
||||||
|
xmlbox = obj.find('bndbox')
|
||||||
|
b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text), float(xmlbox.find('ymax').text))
|
||||||
|
bb = convert((w,h), b)
|
||||||
|
out_file.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n')
|
||||||
|
|
||||||
|
wd = getcwd()
|
||||||
|
|
||||||
|
for year, image_set in sets:
|
||||||
|
if not os.path.exists('VOCdevkit/VOC%s/labels/'%(year)):
|
||||||
|
os.makedirs('VOCdevkit/VOC%s/labels/'%(year))
|
||||||
|
image_ids = open('VOCdevkit/VOC%s/ImageSets/Main/%s.txt'%(year, image_set)).read().strip().split()
|
||||||
|
list_file = open('%s_%s.txt'%(year, image_set), 'w')
|
||||||
|
for image_id in image_ids:
|
||||||
|
list_file.write('%s/VOCdevkit/VOC%s/JPEGImages/%s.jpg\n'%(wd, year, image_id))
|
||||||
|
convert_annotation(year, image_id)
|
||||||
|
list_file.close()
|
||||||
|
|
||||||
|
END
|
||||||
|
|
||||||
|
cat 2007_train.txt 2007_val.txt 2012_train.txt 2012_val.txt > train.txt
|
||||||
|
cat 2007_train.txt 2007_val.txt 2007_test.txt 2012_train.txt 2012_val.txt > train.all.txt
|
||||||
|
|
||||||
|
python3 - "$@" <<END
|
||||||
|
|
||||||
|
import shutil
|
||||||
|
import os
|
||||||
|
os.system('mkdir ../VOC/')
|
||||||
|
os.system('mkdir ../VOC/images')
|
||||||
|
os.system('mkdir ../VOC/images/train')
|
||||||
|
os.system('mkdir ../VOC/images/val')
|
||||||
|
|
||||||
|
os.system('mkdir ../VOC/labels')
|
||||||
|
os.system('mkdir ../VOC/labels/train')
|
||||||
|
os.system('mkdir ../VOC/labels/val')
|
||||||
|
|
||||||
|
import os
|
||||||
|
print(os.path.exists('../tmp/train.txt'))
|
||||||
|
f = open('../tmp/train.txt', 'r')
|
||||||
|
lines = f.readlines()
|
||||||
|
|
||||||
|
for line in lines:
|
||||||
|
#print(line.split('/')[-1][:-1])
|
||||||
|
line = "/".join(line.split('/')[2:])
|
||||||
|
#print(line)
|
||||||
|
if (os.path.exists("../" + line[:-1])):
|
||||||
|
os.system("cp ../"+ line[:-1] + " ../VOC/images/train")
|
||||||
|
|
||||||
|
print(os.path.exists('../tmp/train.txt'))
|
||||||
|
f = open('../tmp/train.txt', 'r')
|
||||||
|
lines = f.readlines()
|
||||||
|
|
||||||
|
for line in lines:
|
||||||
|
#print(line.split('/')[-1][:-1])
|
||||||
|
line = "/".join(line.split('/')[2:])
|
||||||
|
line = line.replace('JPEGImages', 'labels')
|
||||||
|
line = line.replace('jpg', 'txt')
|
||||||
|
#print(line)
|
||||||
|
if (os.path.exists("../" + line[:-1])):
|
||||||
|
os.system("cp ../"+ line[:-1] + " ../VOC/labels/train")
|
||||||
|
|
||||||
|
print(os.path.exists('../tmp/2007_test.txt'))
|
||||||
|
f = open('../tmp/2007_test.txt', 'r')
|
||||||
|
lines = f.readlines()
|
||||||
|
|
||||||
|
for line in lines:
|
||||||
|
#print(line.split('/')[-1][:-1])
|
||||||
|
line = "/".join(line.split('/')[2:])
|
||||||
|
|
||||||
|
if (os.path.exists("../" + line[:-1])):
|
||||||
|
os.system("cp ../"+ line[:-1] + " ../VOC/images/val")
|
||||||
|
|
||||||
|
print(os.path.exists('../tmp/2007_test.txt'))
|
||||||
|
f = open('../tmp/2007_test.txt', 'r')
|
||||||
|
lines = f.readlines()
|
||||||
|
|
||||||
|
for line in lines:
|
||||||
|
#print(line.split('/')[-1][:-1])
|
||||||
|
line = "/".join(line.split('/')[2:])
|
||||||
|
line = line.replace('JPEGImages', 'labels')
|
||||||
|
line = line.replace('jpg', 'txt')
|
||||||
|
#print(line)
|
||||||
|
if (os.path.exists("../" + line[:-1])):
|
||||||
|
os.system("cp ../"+ line[:-1] + " ../VOC/labels/val")
|
||||||
|
|
||||||
|
END
|
||||||
|
|
||||||
|
rm -rf ../tmp # remove temporary directory
|
||||||
|
echo "VOC download done."
|
@ -0,0 +1,18 @@
|
|||||||
|
# PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC/
|
||||||
|
# Download command: bash ./data/get_voc.sh
|
||||||
|
# Train command: python train.py --data voc.yaml
|
||||||
|
# Dataset should be placed next to yolov5 folder:
|
||||||
|
# /parent_folder
|
||||||
|
# /VOC
|
||||||
|
# /yolov5
|
||||||
|
|
||||||
|
# train and val datasets (image directory or *.txt file with image paths)
|
||||||
|
train: ../VOC/images/train/
|
||||||
|
val: ../VOC/images/val/
|
||||||
|
|
||||||
|
# number of classes
|
||||||
|
nc: 20
|
||||||
|
|
||||||
|
# class names
|
||||||
|
names: ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog',
|
||||||
|
'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
|
@ -0,0 +1,72 @@
|
|||||||
|
"""Exports a YOLOv5 *.pt model to ONNX and TorchScript formats
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
$ export PYTHONPATH="$PWD" && python models/export.py --weights ./weights/yolov5s.pt --img 640 --batch 1
|
||||||
|
"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
|
||||||
|
from models.common import *
|
||||||
|
from utils import google_utils
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
parser.add_argument('--weights', type=str, default='./yolov5s.pt', help='weights path')
|
||||||
|
parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image size')
|
||||||
|
parser.add_argument('--batch-size', type=int, default=1, help='batch size')
|
||||||
|
opt = parser.parse_args()
|
||||||
|
opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand
|
||||||
|
print(opt)
|
||||||
|
|
||||||
|
# Input
|
||||||
|
img = torch.zeros((opt.batch_size, 3, *opt.img_size)) # image size(1,3,320,192) iDetection
|
||||||
|
|
||||||
|
# Load PyTorch model
|
||||||
|
google_utils.attempt_download(opt.weights)
|
||||||
|
model = torch.load(opt.weights, map_location=torch.device('cpu'))['model'].float()
|
||||||
|
model.eval()
|
||||||
|
model.model[-1].export = True # set Detect() layer export=True
|
||||||
|
y = model(img) # dry run
|
||||||
|
|
||||||
|
# TorchScript export
|
||||||
|
try:
|
||||||
|
print('\nStarting TorchScript export with torch %s...' % torch.__version__)
|
||||||
|
f = opt.weights.replace('.pt', '.torchscript') # filename
|
||||||
|
ts = torch.jit.trace(model, img)
|
||||||
|
ts.save(f)
|
||||||
|
print('TorchScript export success, saved as %s' % f)
|
||||||
|
except Exception as e:
|
||||||
|
print('TorchScript export failure: %s' % e)
|
||||||
|
|
||||||
|
# ONNX export
|
||||||
|
try:
|
||||||
|
import onnx
|
||||||
|
|
||||||
|
print('\nStarting ONNX export with onnx %s...' % onnx.__version__)
|
||||||
|
f = opt.weights.replace('.pt', '.onnx') # filename
|
||||||
|
model.fuse() # only for ONNX
|
||||||
|
torch.onnx.export(model, img, f, verbose=False, opset_version=12, input_names=['images'],
|
||||||
|
output_names=['classes', 'boxes'] if y is None else ['output'])
|
||||||
|
|
||||||
|
# Checks
|
||||||
|
onnx_model = onnx.load(f) # load onnx model
|
||||||
|
onnx.checker.check_model(onnx_model) # check onnx model
|
||||||
|
print(onnx.helper.printable_graph(onnx_model.graph)) # print a human readable model
|
||||||
|
print('ONNX export success, saved as %s' % f)
|
||||||
|
except Exception as e:
|
||||||
|
print('ONNX export failure: %s' % e)
|
||||||
|
|
||||||
|
# CoreML export
|
||||||
|
try:
|
||||||
|
import coremltools as ct
|
||||||
|
|
||||||
|
print('\nStarting CoreML export with coremltools %s...' % ct.__version__)
|
||||||
|
model = ct.convert(ts, inputs=[ct.ImageType(name='images', shape=img.shape)]) # convert
|
||||||
|
f = opt.weights.replace('.pt', '.mlmodel') # filename
|
||||||
|
model.save(f)
|
||||||
|
print('CoreML export success, saved as %s' % f)
|
||||||
|
except Exception as e:
|
||||||
|
print('CoreML export failure: %s' % e)
|
||||||
|
|
||||||
|
# Finish
|
||||||
|
print('\nExport complete. Visualize with https://github.com/lutzroeder/netron.')
|
@ -1,42 +0,0 @@
|
|||||||
"""Exports a pytorch *.pt model to *.onnx format
|
|
||||||
|
|
||||||
Usage:
|
|
||||||
$ export PYTHONPATH="$PWD" && python models/onnx_export.py --weights ./weights/yolov5s.pt --img 640 --batch 1
|
|
||||||
"""
|
|
||||||
|
|
||||||
import argparse
|
|
||||||
|
|
||||||
import onnx
|
|
||||||
|
|
||||||
from models.common import *
|
|
||||||
from utils import google_utils
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
parser = argparse.ArgumentParser()
|
|
||||||
parser.add_argument('--weights', type=str, default='./yolov5s.pt', help='weights path')
|
|
||||||
parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image size')
|
|
||||||
parser.add_argument('--batch-size', type=int, default=1, help='batch size')
|
|
||||||
opt = parser.parse_args()
|
|
||||||
print(opt)
|
|
||||||
|
|
||||||
# Parameters
|
|
||||||
f = opt.weights.replace('.pt', '.onnx') # onnx filename
|
|
||||||
img = torch.zeros((opt.batch_size, 3, *opt.img_size)) # image size, (1, 3, 320, 192) iDetection
|
|
||||||
|
|
||||||
# Load pytorch model
|
|
||||||
google_utils.attempt_download(opt.weights)
|
|
||||||
model = torch.load(opt.weights, map_location=torch.device('cpu'))['model'].float()
|
|
||||||
model.eval()
|
|
||||||
model.fuse()
|
|
||||||
|
|
||||||
# Export to onnx
|
|
||||||
model.model[-1].export = True # set Detect() layer export=True
|
|
||||||
_ = model(img) # dry run
|
|
||||||
torch.onnx.export(model, img, f, verbose=False, opset_version=11, input_names=['images'],
|
|
||||||
output_names=['output']) # output_names=['classes', 'boxes']
|
|
||||||
|
|
||||||
# Check onnx model
|
|
||||||
model = onnx.load(f) # load onnx model
|
|
||||||
onnx.checker.check_model(model) # check onnx model
|
|
||||||
print(onnx.helper.printable_graph(model.graph)) # print a human readable representation of the graph
|
|
||||||
print('Export complete. ONNX model saved to %s\nView with https://github.com/lutzroeder/netron' % f)
|
|
Loading…
Reference in new issue