revert test module to confuse users...

pull/1/head
Jirka 5 years ago
parent ef6f5b33a8
commit e8ea772384

@ -71,9 +71,9 @@ jobs:
# detect custom # detect custom
python detect.py --weights runs/exp0/weights/last.pt --device $di python detect.py --weights runs/exp0/weights/last.pt --device $di
# test official # test official
python eval.py --weights weights/${{ matrix.yolo5-model }}.pt --device $di --batch-size 1 python test.py --weights weights/${{ matrix.yolo5-model }}.pt --device $di --batch-size 1
# test custom # test custom
python eval.py --weights runs/exp0/weights/last.pt --device $di --batch-size 1 python test.py --weights runs/exp0/weights/last.pt --device $di --batch-size 1
# inspect # inspect
python models/yolo.py --cfg models/${{ matrix.yolo5-model }}.yaml python models/yolo.py --cfg models/${{ matrix.yolo5-model }}.yaml
# export # export

@ -27,8 +27,8 @@ This repository represents Ultralytics open-source research into future object d
** AP<sup>test</sup> denotes COCO [test-dev2017](http://cocodataset.org/#upload) server results, all other AP results in the table denote val2017 accuracy. ** AP<sup>test</sup> denotes COCO [test-dev2017](http://cocodataset.org/#upload) server results, all other AP results in the table denote val2017 accuracy.
** All AP numbers are for single-model single-scale without ensemble or test-time augmentation. Reproduce by `python eval.py --data coco.yaml --img 736 --conf 0.001` ** All AP numbers are for single-model single-scale without ensemble or test-time augmentation. Reproduce by `python test.py --data coco.yaml --img 736 --conf 0.001`
** Speed<sub>GPU</sub> measures end-to-end time per image averaged over 5000 COCO val2017 images using a GCP [n1-standard-16](https://cloud.google.com/compute/docs/machine-types#n1_standard_machine_types) instance with one V100 GPU, and includes image preprocessing, PyTorch FP16 image inference at --batch-size 32 --img-size 640, postprocessing and NMS. Average NMS time included in this chart is 1-2ms/img. Reproduce by `python eval.py --data coco.yaml --img 640 --conf 0.1` ** Speed<sub>GPU</sub> measures end-to-end time per image averaged over 5000 COCO val2017 images using a GCP [n1-standard-16](https://cloud.google.com/compute/docs/machine-types#n1_standard_machine_types) instance with one V100 GPU, and includes image preprocessing, PyTorch FP16 image inference at --batch-size 32 --img-size 640, postprocessing and NMS. Average NMS time included in this chart is 1-2ms/img. Reproduce by `python test.py --data coco.yaml --img 640 --conf 0.1`
** All checkpoints are trained to 300 epochs with default settings and hyperparameters (no autoaugmentation). ** All checkpoints are trained to 300 epochs with default settings and hyperparameters (no autoaugmentation).

@ -233,7 +233,7 @@ def test(data,
if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='eval.py') parser = argparse.ArgumentParser(prog='test.py')
parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)') parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)')
parser.add_argument('--data', type=str, default='data/coco128.yaml', help='*.data path') parser.add_argument('--data', type=str, default='data/coco128.yaml', help='*.data path')
parser.add_argument('--batch-size', type=int, default=32, help='size of each image batch') parser.add_argument('--batch-size', type=int, default=32, help='size of each image batch')

@ -7,7 +7,7 @@ import torch.optim.lr_scheduler as lr_scheduler
import torch.utils.data import torch.utils.data
from torch.utils.tensorboard import SummaryWriter from torch.utils.tensorboard import SummaryWriter
import eval # import eval.py to get mAP after each epoch import test # import test.py to get mAP after each epoch
from models.yolo import Model from models.yolo import Model
from utils import google_utils from utils import google_utils
from utils.datasets import * from utils.datasets import *
@ -291,7 +291,7 @@ def train(hyp):
ema.update_attr(model, include=['md', 'nc', 'hyp', 'gr', 'names', 'stride']) ema.update_attr(model, include=['md', 'nc', 'hyp', 'gr', 'names', 'stride'])
final_epoch = epoch + 1 == epochs final_epoch = epoch + 1 == epochs
if not opt.notest or final_epoch: # Calculate mAP if not opt.notest or final_epoch: # Calculate mAP
results, maps, times = eval.test(opt.data, results, maps, times = test.test(opt.data,
batch_size=batch_size, batch_size=batch_size,
imgsz=imgsz_test, imgsz=imgsz_test,
save_json=final_epoch and opt.data.endswith(os.sep + 'coco.yaml'), save_json=final_epoch and opt.data.endswith(os.sep + 'coco.yaml'),

10
tutorial.ipynb vendored

@ -236,7 +236,7 @@
}, },
"source": [ "source": [
"# Run YOLOv5x on COCO val2017\n", "# Run YOLOv5x on COCO val2017\n",
"!python eval.py --weights yolov5x.pt --data coco.yaml --img 672" "!python test.py --weights yolov5x.pt --data coco.yaml --img 672"
], ],
"execution_count": null, "execution_count": null,
"outputs": [ "outputs": [
@ -319,7 +319,7 @@
}, },
"source": [ "source": [
"# Run YOLOv5s on COCO test-dev2017 with argument --task test\n", "# Run YOLOv5s on COCO test-dev2017 with argument --task test\n",
"!python eval.py --weights yolov5s.pt --data ./data/coco.yaml --task test" "!python test.py --weights yolov5s.pt --data ./data/coco.yaml --task test"
], ],
"execution_count": null, "execution_count": null,
"outputs": [] "outputs": []
@ -717,7 +717,7 @@
"for x in best*\n", "for x in best*\n",
"do\n", "do\n",
" gsutil cp gs://*/*/*/$x.pt .\n", " gsutil cp gs://*/*/*/$x.pt .\n",
" python eval.py --weights $x.pt --data coco.yaml --img 672\n", " python test.py --weights $x.pt --data coco.yaml --img 672\n",
"done" "done"
], ],
"execution_count": null, "execution_count": null,
@ -744,8 +744,8 @@
" do\n", " do\n",
" python detect.py --weights $x.pt --device $di # detect official\n", " python detect.py --weights $x.pt --device $di # detect official\n",
" python detect.py --weights runs/exp0/weights/last.pt --device $di # detect custom\n", " python detect.py --weights runs/exp0/weights/last.pt --device $di # detect custom\n",
" python eval.py --weights $x.pt --device $di # test official\n", " python test.py --weights $x.pt --device $di # test official\n",
" python eval.py --weights runs/exp0/weights/last.pt --device $di # test custom\n", " python test.py --weights runs/exp0/weights/last.pt --device $di # test custom\n",
" done\n", " done\n",
" python models/yolo.py --cfg $x.yaml # inspect\n", " python models/yolo.py --cfg $x.yaml # inspect\n",
" python models/export.py --weights $x.pt --img 640 --batch 1 # export\n", " python models/export.py --weights $x.pt --img 640 --batch 1 # export\n",

@ -1087,7 +1087,7 @@ def plot_targets_txt(): # from utils.utils import *; plot_targets_txt()
def plot_study_txt(f='study.txt', x=None): # from utils.utils import *; plot_study_txt() def plot_study_txt(f='study.txt', x=None): # from utils.utils import *; plot_study_txt()
# Plot study.txt generated by eval.py # Plot study.txt generated by test.py
fig, ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True) fig, ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)
ax = ax.ravel() ax = ax.ravel()

Loading…
Cancel
Save