{ "nbformat": 4, "nbformat_minor": 0, "metadata": { "colab": { "name": "YOLOv5 Tutorial", "provenance": [], "collapsed_sections": [], "machine_shape": "hm", "toc_visible": true, "include_colab_link": true }, "kernelspec": { "name": "python3", "display_name": "Python 3" }, "accelerator": "GPU", "widgets": { "application/vnd.jupyter.widget-state+json": { "c31d2039ccf74c22b67841f4877d1186": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_d4bba1727c714d94ad58a72bffa07c4c", "IPY_MODEL_9aeff9f1780b45f892422fdc96e56913", "IPY_MODEL_bf55a7c71d074d3fa88b10b997820825" ], "layout": "IPY_MODEL_d8b66044e2fb4f5b916696834d880c81" } }, "d4bba1727c714d94ad58a72bffa07c4c": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_102e1deda239436fa72751c58202fa0f", "placeholder": "​", "style": "IPY_MODEL_4fd4431ced6c42368e18424912b877e4", "value": "100%" } }, "9aeff9f1780b45f892422fdc96e56913": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_cdd709c4f40941bea1b2053523c9fac8", "max": 818322941, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_a1ef2d8de2b741c78ca5d938e2ddbcdf", "value": 818322941 } }, "bf55a7c71d074d3fa88b10b997820825": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_0dbce99bb6184238842cbec0587d564a", "placeholder": "​", "style": "IPY_MODEL_91ff5f93f2a24c5790ab29e347965946", "value": " 780M/780M [01:10<00:00, 10.5MB/s]" } }, "d8b66044e2fb4f5b916696834d880c81": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "102e1deda239436fa72751c58202fa0f": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "4fd4431ced6c42368e18424912b877e4": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "cdd709c4f40941bea1b2053523c9fac8": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "a1ef2d8de2b741c78ca5d938e2ddbcdf": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "0dbce99bb6184238842cbec0587d564a": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "91ff5f93f2a24c5790ab29e347965946": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } } } } }, "cells": [ { "cell_type": "markdown", "metadata": { "id": "view-in-github", "colab_type": "text" }, "source": [ "\"Open" ] }, { "cell_type": "markdown", "metadata": { "id": "t6MPjfT5NrKQ" }, "source": [ "\n", "\n", "\n", "This is the **official YOLOv5 🚀 notebook** by **Ultralytics**, and is freely available for redistribution under the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/). \n", "For more information please visit https://github.com/ultralytics/yolov5 and https://ultralytics.com. Thank you!" ] }, { "cell_type": "markdown", "metadata": { "id": "7mGmQbAO5pQb" }, "source": [ "# Setup\n", "\n", "Clone repo, install dependencies and check PyTorch and GPU." ] }, { "cell_type": "code", "metadata": { "id": "wbvMlHd_QwMG", "colab": { "base_uri": "https://localhost:8080/" }, "outputId": "185d0979-edcd-4860-e6fb-b8a27dbf5096" }, "source": [ "!git clone https://github.com/ultralytics/yolov5 # clone\n", "%cd yolov5\n", "%pip install -qr requirements.txt # install\n", "\n", "import torch\n", "import utils\n", "display = utils.notebook_init() # checks" ], "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stderr", "text": [ "YOLOv5 🚀 v6.1-370-g20f1b7e Python-3.7.13 torch-1.12.0+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ "Setup complete ✅ (8 CPUs, 51.0 GB RAM, 37.4/166.8 GB disk)\n" ] } ] }, { "cell_type": "markdown", "metadata": { "id": "4JnkELT0cIJg" }, "source": [ "# 1. Inference\n", "\n", "`detect.py` runs YOLOv5 inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/detect`. Example inference sources are:\n", "\n", "```shell\n", "python detect.py --source 0 # webcam\n", " img.jpg # image \n", " vid.mp4 # video\n", " path/ # directory\n", " 'path/*.jpg' # glob\n", " 'https://youtu.be/Zgi9g1ksQHc' # YouTube\n", " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n", "```" ] }, { "cell_type": "code", "metadata": { "id": "zR9ZbuQCH7FX", "colab": { "base_uri": "https://localhost:8080/" }, "outputId": "4b13989f-32a4-4ef0-b403-06ff3aac255c" }, "source": [ "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images\n", "#display.Image(filename='runs/detect/exp/zidane.jpg', width=600)" ], "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "\u001b[34m\u001b[1mdetect: \u001b[0mweights=['yolov5s.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/detect, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False\n", "YOLOv5 🚀 v6.1-370-g20f1b7e Python-3.7.13 torch-1.12.0+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", "\n", "Downloading https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5s.pt to yolov5s.pt...\n", "100% 14.1M/14.1M [00:00<00:00, 53.9MB/s]\n", "\n", "Fusing layers... \n", "YOLOv5s summary: 213 layers, 7225885 parameters, 0 gradients\n", "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, Done. (0.016s)\n", "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, Done. (0.021s)\n", "Speed: 0.6ms pre-process, 18.6ms inference, 25.0ms NMS per image at shape (1, 3, 640, 640)\n", "Results saved to \u001b[1mruns/detect/exp\u001b[0m\n" ] } ] }, { "cell_type": "markdown", "metadata": { "id": "hkAzDWJ7cWTr" }, "source": [ "        \n", "" ] }, { "cell_type": "markdown", "metadata": { "id": "0eq1SMWl6Sfn" }, "source": [ "# 2. Validate\n", "Validate a model's accuracy on [COCO](https://cocodataset.org/#home) val or test-dev datasets. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag. Note that `pycocotools` metrics may be ~1% better than the equivalent repo metrics, as is visible below, due to slight differences in mAP computation." ] }, { "cell_type": "markdown", "metadata": { "id": "eyTZYGgRjnMc" }, "source": [ "## COCO val\n", "Download [COCO val 2017](https://github.com/ultralytics/yolov5/blob/74b34872fdf41941cddcf243951cdb090fbac17b/data/coco.yaml#L14) dataset (1GB - 5000 images), and test model accuracy." ] }, { "cell_type": "code", "metadata": { "id": "WQPtK1QYVaD_", "colab": { "base_uri": "https://localhost:8080/", "height": 49, "referenced_widgets": [ "c31d2039ccf74c22b67841f4877d1186", "d4bba1727c714d94ad58a72bffa07c4c", "9aeff9f1780b45f892422fdc96e56913", "bf55a7c71d074d3fa88b10b997820825", "d8b66044e2fb4f5b916696834d880c81", "102e1deda239436fa72751c58202fa0f", "4fd4431ced6c42368e18424912b877e4", "cdd709c4f40941bea1b2053523c9fac8", "a1ef2d8de2b741c78ca5d938e2ddbcdf", "0dbce99bb6184238842cbec0587d564a", "91ff5f93f2a24c5790ab29e347965946" ] }, "outputId": "a9004b06-37a6-41ed-a1f2-ac956f3963b3" }, "source": [ "# Download COCO val\n", "torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../datasets && rm tmp.zip" ], "execution_count": null, "outputs": [ { "output_type": "display_data", "data": { "text/plain": [ " 0%| | 0.00/780M [00:00

\n", "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", "

\n", "\n", "Train a YOLOv5s model on the [COCO128](https://www.kaggle.com/ultralytics/coco128) dataset with `--data coco128.yaml`, starting from pretrained `--weights yolov5s.pt`, or from randomly initialized `--weights '' --cfg yolov5s.yaml`.\n", "\n", "- **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded\n", "automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n", "- **[Datasets](https://github.com/ultralytics/yolov5/tree/master/data)** available for autodownload include: [COCO](https://github.com/ultralytics/yolov5/blob/master/data/coco.yaml), [COCO128](https://github.com/ultralytics/yolov5/blob/master/data/coco128.yaml), [VOC](https://github.com/ultralytics/yolov5/blob/master/data/VOC.yaml), [Argoverse](https://github.com/ultralytics/yolov5/blob/master/data/Argoverse.yaml), [VisDrone](https://github.com/ultralytics/yolov5/blob/master/data/VisDrone.yaml), [GlobalWheat](https://github.com/ultralytics/yolov5/blob/master/data/GlobalWheat2020.yaml), [xView](https://github.com/ultralytics/yolov5/blob/master/data/xView.yaml), [Objects365](https://github.com/ultralytics/yolov5/blob/master/data/Objects365.yaml), [SKU-110K](https://github.com/ultralytics/yolov5/blob/master/data/SKU-110K.yaml).\n", "- **Training Results** are saved to `runs/train/` with incrementing run directories, i.e. `runs/train/exp2`, `runs/train/exp3` etc.\n", "

\n", "\n", "A **Mosaic Dataloader** is used for training which combines 4 images into 1 mosaic.\n", "\n", "## Train on Custom Data with Roboflow 🌟 NEW\n", "\n", "[Roboflow](https://roboflow.com/?ref=ultralytics) enables you to easily **organize, label, and prepare** a high quality dataset with your own custom data. Roboflow also makes it easy to establish an active learning pipeline, collaborate with your team on dataset improvement, and integrate directly into your model building workflow with the `roboflow` pip package.\n", "\n", "- Custom Training Example: [https://blog.roboflow.com/how-to-train-yolov5-on-a-custom-dataset/](https://blog.roboflow.com/how-to-train-yolov5-on-a-custom-dataset/?ref=ultralytics)\n", "- Custom Training Notebook: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/roboflow-ai/yolov5-custom-training-tutorial/blob/main/yolov5-custom-training.ipynb)\n", "
\n", "\n", "

Label images lightning fast (including with model-assisted labeling)" ] }, { "cell_type": "code", "metadata": { "id": "bOy5KI2ncnWd" }, "source": [ "# Tensorboard (optional)\n", "%load_ext tensorboard\n", "%tensorboard --logdir runs/train" ], "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "# ClearML (optional)\n", "%pip install -q clearml\n", "!clearml-init" ], "metadata": { "id": "DQhI6vvaRWjR" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "metadata": { "id": "2fLAV42oNb7M" }, "source": [ "# Weights & Biases (optional)\n", "%pip install -q wandb\n", "import wandb\n", "wandb.login()" ], "execution_count": null, "outputs": [] }, { "cell_type": "code", "metadata": { "id": "1NcFxRcFdJ_O", "colab": { "base_uri": "https://localhost:8080/" }, "outputId": "bce1b4bd-1a14-4c07-aebd-6c11e91ad24b" }, "source": [ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache" ], "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "\u001b[34m\u001b[1mtrain: \u001b[0mweights=yolov5s.pt, cfg=, data=coco128.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=-1, artifact_alias=latest\n", "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", "YOLOv5 🚀 v6.1-370-g20f1b7e Python-3.7.13 torch-1.12.0+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", "\n", "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n", "\u001b[34m\u001b[1mWeights & Biases: \u001b[0mrun 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs in Weights & Biases\n", "\u001b[34m\u001b[1mClearML: \u001b[0mrun 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 🚀 runs in ClearML\n", "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n", "\n", "Dataset not found ⚠️, missing paths ['/content/datasets/coco128/images/train2017']\n", "Downloading https://ultralytics.com/assets/coco128.zip to coco128.zip...\n", "100% 6.66M/6.66M [00:00<00:00, 75.2MB/s]\n", "Dataset download success ✅ (0.7s), saved to \u001b[1m/content/datasets\u001b[0m\n", "\n", " from n params module arguments \n", " 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n", " 1 -1 1 18560 models.common.Conv [32, 64, 3, 2] \n", " 2 -1 1 18816 models.common.C3 [64, 64, 1] \n", " 3 -1 1 73984 models.common.Conv [64, 128, 3, 2] \n", " 4 -1 2 115712 models.common.C3 [128, 128, 2] \n", " 5 -1 1 295424 models.common.Conv [128, 256, 3, 2] \n", " 6 -1 3 625152 models.common.C3 [256, 256, 3] \n", " 7 -1 1 1180672 models.common.Conv [256, 512, 3, 2] \n", " 8 -1 1 1182720 models.common.C3 [512, 512, 1] \n", " 9 -1 1 656896 models.common.SPPF [512, 512, 5] \n", " 10 -1 1 131584 models.common.Conv [512, 256, 1, 1] \n", " 11 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", " 12 [-1, 6] 1 0 models.common.Concat [1] \n", " 13 -1 1 361984 models.common.C3 [512, 256, 1, False] \n", " 14 -1 1 33024 models.common.Conv [256, 128, 1, 1] \n", " 15 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", " 16 [-1, 4] 1 0 models.common.Concat [1] \n", " 17 -1 1 90880 models.common.C3 [256, 128, 1, False] \n", " 18 -1 1 147712 models.common.Conv [128, 128, 3, 2] \n", " 19 [-1, 14] 1 0 models.common.Concat [1] \n", " 20 -1 1 296448 models.common.C3 [256, 256, 1, False] \n", " 21 -1 1 590336 models.common.Conv [256, 256, 3, 2] \n", " 22 [-1, 10] 1 0 models.common.Concat [1] \n", " 23 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n", " 24 [17, 20, 23] 1 229245 models.yolo.Detect [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n", "Model summary: 270 layers, 7235389 parameters, 7235389 gradients, 16.6 GFLOPs\n", "\n", "Transferred 349/349 items from yolov5s.pt\n", "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n", "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n", "\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(always_apply=False, p=0.01, blur_limit=(3, 7)), MedianBlur(always_apply=False, p=0.01, blur_limit=(3, 7)), ToGray(always_apply=False, p=0.01), CLAHE(always_apply=False, p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n", "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '/content/datasets/coco128/labels/train2017' images and labels...128 found, 0 missing, 2 empty, 0 corrupt: 100% 128/128 [00:00<00:00, 7926.40it/s]\n", "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /content/datasets/coco128/labels/train2017.cache\n", "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 975.81it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupt: 100% 128/128 [00:00\n", "\"ClearML" ], "metadata": { "id": "Lay2WsTjNJzP" } }, { "cell_type": "markdown", "metadata": { "id": "DLI1JmHU7B0l" }, "source": [ "## Weights & Biases Logging\n", "\n", "[Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_notebook) (W&B) is integrated with YOLOv5 for real-time visualization and cloud logging of training runs. This allows for better run comparison and introspection, as well improved visibility and collaboration for teams. To enable W&B `pip install wandb`, and then train normally (you will be guided through setup on first use). \n", "\n", "During training you will see live updates at [https://wandb.ai/home](https://wandb.ai/home?utm_campaign=repo_yolo_notebook), and you can create and share detailed [Reports](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY) of your results. For more information see the [YOLOv5 Weights & Biases Tutorial](https://github.com/ultralytics/yolov5/issues/1289). \n", "\n", "\n", "\"Weights" ] }, { "cell_type": "markdown", "metadata": { "id": "-WPvRbS5Swl6" }, "source": [ "## Local Logging\n", "\n", "Training results are automatically logged with [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) loggers to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc.\n", "\n", "This directory contains train and val statistics, mosaics, labels, predictions and augmentated mosaics, as well as metrics and charts including precision-recall (PR) curves and confusion matrices. \n", "\n", "\"Local\n" ] }, { "cell_type": "markdown", "metadata": { "id": "Zelyeqbyt3GD" }, "source": [ "# Environments\n", "\n", "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n", "\n", "- **Google Colab and Kaggle** notebooks with free GPU: \"Open \"Open\n", "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)\n", "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart)\n", "- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) \"Docker\n" ] }, { "cell_type": "markdown", "metadata": { "id": "6Qu7Iesl0p54" }, "source": [ "# Status\n", "\n", "![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg)\n", "\n", "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit.\n" ] }, { "cell_type": "markdown", "metadata": { "id": "IEijrePND_2I" }, "source": [ "# Appendix\n", "\n", "Additional content below for PyTorch Hub, CI, reproducing results, profiling speeds, VOC training, classification training and TensorRT example." ] }, { "cell_type": "code", "metadata": { "id": "GMusP4OAxFu6" }, "source": [ "import torch\n", "\n", "# PyTorch Hub Model\n", "model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # or yolov5n - yolov5x6, custom\n", "\n", "# Images\n", "img = 'https://ultralytics.com/images/zidane.jpg' # or file, Path, PIL, OpenCV, numpy, list\n", "\n", "# Inference\n", "results = model(img)\n", "\n", "# Results\n", "results.print() # or .show(), .save(), .crop(), .pandas(), etc." ], "execution_count": null, "outputs": [] }, { "cell_type": "code", "metadata": { "id": "FGH0ZjkGjejy" }, "source": [ "# YOLOv5 CI\n", "%%shell\n", "rm -rf runs # remove runs/\n", "m=yolov5n # official weights\n", "b=runs/train/exp/weights/best # best.pt checkpoint\n", "python train.py --imgsz 64 --batch 32 --weights $m.pt --cfg $m.yaml --epochs 1 --device 0 # train\n", "for d in 0 cpu; do # devices\n", " for w in $m $b; do # weights\n", " python val.py --imgsz 64 --batch 32 --weights $w.pt --device $d # val\n", " python detect.py --imgsz 64 --weights $w.pt --device $d # detect\n", " done\n", "done\n", "python hubconf.py --model $m # hub\n", "python models/tf.py --weights $m.pt # build TF model\n", "python models/yolo.py --cfg $m.yaml # build PyTorch model\n", "python export.py --weights $m.pt --img 64 --include torchscript # export" ], "execution_count": null, "outputs": [] }, { "cell_type": "code", "metadata": { "id": "mcKoSIK2WSzj" }, "source": [ "# Reproduce\n", "for x in (f'yolov5{x}' for x in 'nsmlx'):\n", " !python val.py --weights {x}.pt --data coco.yaml --img 640 --task speed # speed\n", " !python val.py --weights {x}.pt --data coco.yaml --img 640 --conf 0.001 --iou 0.65 # mAP" ], "execution_count": null, "outputs": [] }, { "cell_type": "code", "metadata": { "id": "gogI-kwi3Tye" }, "source": [ "# Profile\n", "from utils.torch_utils import profile\n", "\n", "m1 = lambda x: x * torch.sigmoid(x)\n", "m2 = torch.nn.SiLU()\n", "results = profile(input=torch.randn(16, 3, 640, 640), ops=[m1, m2], n=100)" ], "execution_count": null, "outputs": [] }, { "cell_type": "code", "metadata": { "id": "BSgFCAcMbk1R" }, "source": [ "# VOC\n", "for b, m in zip([64, 64, 64, 32, 16], [f'yolov5{x}' for x in 'nsmlx']): # batch, model\n", " !python train.py --batch {b} --weights {m}.pt --data VOC.yaml --epochs 50 --img 512 --hyp hyp.VOC.yaml --project VOC --name {m} --cache" ], "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "# Classification\n", "for m in [*(f'yolov5{x}.pt' for x in 'nsmlx'), 'resnet50.pt', 'efficientnet_b0.pt']:\n", " for d in 'mnist', 'fashion-mnist', 'cifar10', 'cifar100', 'imagenette160', 'imagenette320', 'imagenette', 'imagewoof160', 'imagewoof320', 'imagewoof':\n", " !python classify/train.py --model {m} --data {d} --epochs 10 --project YOLOv5-cls --name {m}-{d}" ], "metadata": { "id": "UWGH7H6yakVl" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "metadata": { "id": "VTRwsvA9u7ln" }, "source": [ "# TensorRT \n", "!pip install -U nvidia-tensorrt --index-url https://pypi.ngc.nvidia.com # install\n", "!python export.py --weights yolov5s.pt --include engine --imgsz 640 --device 0 # export\n", "!python detect.py --weights yolov5s.engine --imgsz 640 --device 0 # inference" ], "execution_count": null, "outputs": [] } ] }