From 6399d078ed8f6b416d3e6c5017d7d67c61d56fe4 Mon Sep 17 00:00:00 2001 From: kun <1819123358@qq.com> Date: Tue, 27 Jun 2023 23:11:22 +0800 Subject: [PATCH] =?UTF-8?q?=E7=9B=AE=E6=A0=87=E8=AF=86=E5=88=AB=E4=BB=A3?= =?UTF-8?q?=E7=A0=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/目标识别代码/detect.py | 260 ++++ src/目标识别代码/models/__init__.py | 0 .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 150 bytes .../__pycache__/__init__.cpython-38.pyc | Bin 0 -> 158 bytes .../__pycache__/__init__.cpython-39.pyc | Bin 0 -> 158 bytes .../models/__pycache__/common.cpython-36.pyc | Bin 0 -> 34179 bytes .../models/__pycache__/common.cpython-38.pyc | Bin 0 -> 33201 bytes .../models/__pycache__/common.cpython-39.pyc | Bin 0 -> 33189 bytes .../__pycache__/experimental.cpython-36.pyc | Bin 0 -> 4887 bytes .../__pycache__/experimental.cpython-38.pyc | Bin 0 -> 4831 bytes .../__pycache__/experimental.cpython-39.pyc | Bin 0 -> 4789 bytes .../models/__pycache__/yolo.cpython-36.pyc | Bin 0 -> 14174 bytes .../models/__pycache__/yolo.cpython-38.pyc | Bin 0 -> 14150 bytes .../models/__pycache__/yolo.cpython-39.pyc | Bin 0 -> 14135 bytes src/目标识别代码/models/common.py | 771 +++++++++++ src/目标识别代码/models/experimental.py | 107 ++ .../models/hub/anchors.yaml | 59 + .../models/hub/yolov3-spp.yaml | 51 + .../models/hub/yolov3-tiny.yaml | 41 + src/目标识别代码/models/hub/yolov3.yaml | 51 + .../models/hub/yolov5-bifpn.yaml | 48 + .../models/hub/yolov5-fpn.yaml | 42 + .../models/hub/yolov5-p2.yaml | 54 + .../models/hub/yolov5-p34.yaml | 41 + .../models/hub/yolov5-p6.yaml | 56 + .../models/hub/yolov5-p7.yaml | 67 + .../models/hub/yolov5-panet.yaml | 48 + .../models/hub/yolov5l6.yaml | 60 + .../models/hub/yolov5m6.yaml | 60 + .../models/hub/yolov5n6.yaml | 60 + .../models/hub/yolov5s-ghost.yaml | 48 + .../models/hub/yolov5s-transformer.yaml | 48 + .../models/hub/yolov5s6.yaml | 60 + .../models/hub/yolov5x6.yaml | 60 + src/目标识别代码/models/tf.py | 574 ++++++++ src/目标识别代码/models/yolo.py | 360 +++++ src/目标识别代码/models/yolov5l.yaml | 48 + src/目标识别代码/models/yolov5m.yaml | 48 + src/目标识别代码/models/yolov5n.yaml | 48 + src/目标识别代码/models/yolov5s.yaml | 48 + src/目标识别代码/models/yolov5x.yaml | 48 + src/目标识别代码/test.py | 300 +++++ src/目标识别代码/utils/__init__.py | 36 + .../utils/__pycache__/__init__.cpython-36.pyc | Bin 0 -> 1051 bytes .../utils/__pycache__/__init__.cpython-38.pyc | Bin 0 -> 1043 bytes .../utils/__pycache__/__init__.cpython-39.pyc | Bin 0 -> 1043 bytes .../__pycache__/augmentations.cpython-36.pyc | Bin 0 -> 11192 bytes .../__pycache__/augmentations.cpython-38.pyc | Bin 0 -> 11091 bytes .../__pycache__/augmentations.cpython-39.pyc | Bin 0 -> 11122 bytes .../__pycache__/autoanchor.cpython-36.pyc | Bin 0 -> 6527 bytes .../__pycache__/autoanchor.cpython-38.pyc | Bin 0 -> 6475 bytes .../__pycache__/autoanchor.cpython-39.pyc | Bin 0 -> 6492 bytes .../__pycache__/autobatch.cpython-36.pyc | Bin 0 -> 2199 bytes .../__pycache__/autobatch.cpython-38.pyc | Bin 0 -> 2234 bytes .../__pycache__/callbacks.cpython-36.pyc | Bin 0 -> 2433 bytes .../__pycache__/callbacks.cpython-38.pyc | Bin 0 -> 2434 bytes .../__pycache__/dataloaders.cpython-36.pyc | Bin 0 -> 40414 bytes .../__pycache__/dataloaders.cpython-38.pyc | Bin 0 -> 40099 bytes .../__pycache__/dataloaders.cpython-39.pyc | Bin 0 -> 40234 bytes .../__pycache__/downloads.cpython-36.pyc | Bin 0 -> 5012 bytes .../__pycache__/downloads.cpython-38.pyc | Bin 0 -> 5057 bytes .../__pycache__/downloads.cpython-39.pyc | Bin 0 -> 5110 bytes .../utils/__pycache__/general.cpython-36.pyc | Bin 0 -> 37428 bytes .../utils/__pycache__/general.cpython-38.pyc | Bin 0 -> 37487 bytes .../utils/__pycache__/general.cpython-39.pyc | Bin 0 -> 37130 bytes .../utils/__pycache__/loss.cpython-36.pyc | Bin 0 -> 6467 bytes .../utils/__pycache__/loss.cpython-38.pyc | Bin 0 -> 6359 bytes .../utils/__pycache__/metrics.cpython-36.pyc | Bin 0 -> 11611 bytes .../utils/__pycache__/metrics.cpython-38.pyc | Bin 0 -> 11555 bytes .../utils/__pycache__/metrics.cpython-39.pyc | Bin 0 -> 11596 bytes .../utils/__pycache__/plots.cpython-36.pyc | Bin 0 -> 20308 bytes .../utils/__pycache__/plots.cpython-38.pyc | Bin 0 -> 20135 bytes .../utils/__pycache__/plots.cpython-39.pyc | Bin 0 -> 20090 bytes .../__pycache__/torch_utils.cpython-36.pyc | Bin 0 -> 16698 bytes .../__pycache__/torch_utils.cpython-38.pyc | Bin 0 -> 17192 bytes .../__pycache__/torch_utils.cpython-39.pyc | Bin 0 -> 16748 bytes src/目标识别代码/utils/activations.py | 103 ++ src/目标识别代码/utils/augmentations.py | 348 +++++ src/目标识别代码/utils/autoanchor.py | 170 +++ src/目标识别代码/utils/autobatch.py | 66 + src/目标识别代码/utils/aws/__init__.py | 0 src/目标识别代码/utils/aws/mime.sh | 26 + src/目标识别代码/utils/aws/resume.py | 40 + src/目标识别代码/utils/aws/userdata.sh | 27 + src/目标识别代码/utils/benchmarks.py | 157 +++ src/目标识别代码/utils/callbacks.py | 71 + src/目标识别代码/utils/dataloaders.py | 1156 +++++++++++++++++ .../utils/docker/Dockerfile | 68 + .../utils/docker/Dockerfile-arm64 | 42 + .../utils/docker/Dockerfile-cpu | 39 + src/目标识别代码/utils/downloads.py | 180 +++ .../utils/flask_rest_api/README.md | 73 ++ .../utils/flask_rest_api/example_request.py | 19 + .../utils/flask_rest_api/restapi.py | 48 + src/目标识别代码/utils/general.py | 1050 +++++++++++++++ .../utils/google_app_engine/Dockerfile | 25 + .../additional_requirements.txt | 4 + .../utils/google_app_engine/app.yaml | 14 + .../utils/loggers/__init__.py | 308 +++++ .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 11519 bytes .../__pycache__/__init__.cpython-38.pyc | Bin 0 -> 11337 bytes .../utils/loggers/clearml/README.md | 222 ++++ .../utils/loggers/clearml/__init__.py | 0 .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 165 bytes .../__pycache__/__init__.cpython-38.pyc | Bin 0 -> 173 bytes .../__pycache__/clearml_utils.cpython-36.pyc | Bin 0 -> 5475 bytes .../__pycache__/clearml_utils.cpython-38.pyc | Bin 0 -> 5532 bytes .../utils/loggers/clearml/clearml_utils.py | 156 +++ .../utils/loggers/clearml/hpo.py | 84 ++ .../utils/loggers/wandb/README.md | 162 +++ .../utils/loggers/wandb/__init__.py | 0 .../wandb/__pycache__/__init__.cpython-36.pyc | Bin 0 -> 163 bytes .../wandb/__pycache__/__init__.cpython-38.pyc | Bin 0 -> 171 bytes .../__pycache__/wandb_utils.cpython-36.pyc | Bin 0 -> 19631 bytes .../__pycache__/wandb_utils.cpython-38.pyc | Bin 0 -> 19699 bytes .../utils/loggers/wandb/log_dataset.py | 27 + .../utils/loggers/wandb/sweep.py | 41 + .../utils/loggers/wandb/sweep.yaml | 143 ++ .../utils/loggers/wandb/wandb_utils.py | 584 +++++++++ src/目标识别代码/utils/loss.py | 234 ++++ src/目标识别代码/utils/metrics.py | 364 ++++++ src/目标识别代码/utils/plots.py | 522 ++++++++ src/目标识别代码/utils/torch_utils.py | 454 +++++++ 123 files changed, 10629 insertions(+) create mode 100644 src/目标识别代码/detect.py create mode 100644 src/目标识别代码/models/__init__.py create mode 100644 src/目标识别代码/models/__pycache__/__init__.cpython-36.pyc create mode 100644 src/目标识别代码/models/__pycache__/__init__.cpython-38.pyc create mode 100644 src/目标识别代码/models/__pycache__/__init__.cpython-39.pyc create mode 100644 src/目标识别代码/models/__pycache__/common.cpython-36.pyc create mode 100644 src/目标识别代码/models/__pycache__/common.cpython-38.pyc create mode 100644 src/目标识别代码/models/__pycache__/common.cpython-39.pyc create mode 100644 src/目标识别代码/models/__pycache__/experimental.cpython-36.pyc create mode 100644 src/目标识别代码/models/__pycache__/experimental.cpython-38.pyc create mode 100644 src/目标识别代码/models/__pycache__/experimental.cpython-39.pyc create mode 100644 src/目标识别代码/models/__pycache__/yolo.cpython-36.pyc create mode 100644 src/目标识别代码/models/__pycache__/yolo.cpython-38.pyc create mode 100644 src/目标识别代码/models/__pycache__/yolo.cpython-39.pyc create mode 100644 src/目标识别代码/models/common.py create mode 100644 src/目标识别代码/models/experimental.py create mode 100644 src/目标识别代码/models/hub/anchors.yaml create mode 100644 src/目标识别代码/models/hub/yolov3-spp.yaml create mode 100644 src/目标识别代码/models/hub/yolov3-tiny.yaml create mode 100644 src/目标识别代码/models/hub/yolov3.yaml create mode 100644 src/目标识别代码/models/hub/yolov5-bifpn.yaml create mode 100644 src/目标识别代码/models/hub/yolov5-fpn.yaml create mode 100644 src/目标识别代码/models/hub/yolov5-p2.yaml create mode 100644 src/目标识别代码/models/hub/yolov5-p34.yaml create mode 100644 src/目标识别代码/models/hub/yolov5-p6.yaml create mode 100644 src/目标识别代码/models/hub/yolov5-p7.yaml create mode 100644 src/目标识别代码/models/hub/yolov5-panet.yaml create mode 100644 src/目标识别代码/models/hub/yolov5l6.yaml create mode 100644 src/目标识别代码/models/hub/yolov5m6.yaml create mode 100644 src/目标识别代码/models/hub/yolov5n6.yaml create mode 100644 src/目标识别代码/models/hub/yolov5s-ghost.yaml create mode 100644 src/目标识别代码/models/hub/yolov5s-transformer.yaml create mode 100644 src/目标识别代码/models/hub/yolov5s6.yaml create mode 100644 src/目标识别代码/models/hub/yolov5x6.yaml create mode 100644 src/目标识别代码/models/tf.py create mode 100644 src/目标识别代码/models/yolo.py create mode 100644 src/目标识别代码/models/yolov5l.yaml create mode 100644 src/目标识别代码/models/yolov5m.yaml create mode 100644 src/目标识别代码/models/yolov5n.yaml create mode 100644 src/目标识别代码/models/yolov5s.yaml create mode 100644 src/目标识别代码/models/yolov5x.yaml create mode 100644 src/目标识别代码/test.py create mode 100644 src/目标识别代码/utils/__init__.py create mode 100644 src/目标识别代码/utils/__pycache__/__init__.cpython-36.pyc create mode 100644 src/目标识别代码/utils/__pycache__/__init__.cpython-38.pyc create mode 100644 src/目标识别代码/utils/__pycache__/__init__.cpython-39.pyc create mode 100644 src/目标识别代码/utils/__pycache__/augmentations.cpython-36.pyc create mode 100644 src/目标识别代码/utils/__pycache__/augmentations.cpython-38.pyc create mode 100644 src/目标识别代码/utils/__pycache__/augmentations.cpython-39.pyc create mode 100644 src/目标识别代码/utils/__pycache__/autoanchor.cpython-36.pyc create mode 100644 src/目标识别代码/utils/__pycache__/autoanchor.cpython-38.pyc create mode 100644 src/目标识别代码/utils/__pycache__/autoanchor.cpython-39.pyc create mode 100644 src/目标识别代码/utils/__pycache__/autobatch.cpython-36.pyc create mode 100644 src/目标识别代码/utils/__pycache__/autobatch.cpython-38.pyc create mode 100644 src/目标识别代码/utils/__pycache__/callbacks.cpython-36.pyc create mode 100644 src/目标识别代码/utils/__pycache__/callbacks.cpython-38.pyc create mode 100644 src/目标识别代码/utils/__pycache__/dataloaders.cpython-36.pyc create mode 100644 src/目标识别代码/utils/__pycache__/dataloaders.cpython-38.pyc create mode 100644 src/目标识别代码/utils/__pycache__/dataloaders.cpython-39.pyc create mode 100644 src/目标识别代码/utils/__pycache__/downloads.cpython-36.pyc create mode 100644 src/目标识别代码/utils/__pycache__/downloads.cpython-38.pyc create mode 100644 src/目标识别代码/utils/__pycache__/downloads.cpython-39.pyc create mode 100644 src/目标识别代码/utils/__pycache__/general.cpython-36.pyc create mode 100644 src/目标识别代码/utils/__pycache__/general.cpython-38.pyc create mode 100644 src/目标识别代码/utils/__pycache__/general.cpython-39.pyc create mode 100644 src/目标识别代码/utils/__pycache__/loss.cpython-36.pyc create mode 100644 src/目标识别代码/utils/__pycache__/loss.cpython-38.pyc create mode 100644 src/目标识别代码/utils/__pycache__/metrics.cpython-36.pyc create mode 100644 src/目标识别代码/utils/__pycache__/metrics.cpython-38.pyc create mode 100644 src/目标识别代码/utils/__pycache__/metrics.cpython-39.pyc create mode 100644 src/目标识别代码/utils/__pycache__/plots.cpython-36.pyc create mode 100644 src/目标识别代码/utils/__pycache__/plots.cpython-38.pyc create mode 100644 src/目标识别代码/utils/__pycache__/plots.cpython-39.pyc create mode 100644 src/目标识别代码/utils/__pycache__/torch_utils.cpython-36.pyc create mode 100644 src/目标识别代码/utils/__pycache__/torch_utils.cpython-38.pyc create mode 100644 src/目标识别代码/utils/__pycache__/torch_utils.cpython-39.pyc create mode 100644 src/目标识别代码/utils/activations.py create mode 100644 src/目标识别代码/utils/augmentations.py create mode 100644 src/目标识别代码/utils/autoanchor.py create mode 100644 src/目标识别代码/utils/autobatch.py create mode 100644 src/目标识别代码/utils/aws/__init__.py create mode 100644 src/目标识别代码/utils/aws/mime.sh create mode 100644 src/目标识别代码/utils/aws/resume.py create mode 100644 src/目标识别代码/utils/aws/userdata.sh create mode 100644 src/目标识别代码/utils/benchmarks.py create mode 100644 src/目标识别代码/utils/callbacks.py create mode 100644 src/目标识别代码/utils/dataloaders.py create mode 100644 src/目标识别代码/utils/docker/Dockerfile create mode 100644 src/目标识别代码/utils/docker/Dockerfile-arm64 create mode 100644 src/目标识别代码/utils/docker/Dockerfile-cpu create mode 100644 src/目标识别代码/utils/downloads.py create mode 100644 src/目标识别代码/utils/flask_rest_api/README.md create mode 100644 src/目标识别代码/utils/flask_rest_api/example_request.py create mode 100644 src/目标识别代码/utils/flask_rest_api/restapi.py create mode 100644 src/目标识别代码/utils/general.py create mode 100644 src/目标识别代码/utils/google_app_engine/Dockerfile create mode 100644 src/目标识别代码/utils/google_app_engine/additional_requirements.txt create mode 100644 src/目标识别代码/utils/google_app_engine/app.yaml create mode 100644 src/目标识别代码/utils/loggers/__init__.py create mode 100644 src/目标识别代码/utils/loggers/__pycache__/__init__.cpython-36.pyc create mode 100644 src/目标识别代码/utils/loggers/__pycache__/__init__.cpython-38.pyc create mode 100644 src/目标识别代码/utils/loggers/clearml/README.md create mode 100644 src/目标识别代码/utils/loggers/clearml/__init__.py create mode 100644 src/目标识别代码/utils/loggers/clearml/__pycache__/__init__.cpython-36.pyc create mode 100644 src/目标识别代码/utils/loggers/clearml/__pycache__/__init__.cpython-38.pyc create mode 100644 src/目标识别代码/utils/loggers/clearml/__pycache__/clearml_utils.cpython-36.pyc create mode 100644 src/目标识别代码/utils/loggers/clearml/__pycache__/clearml_utils.cpython-38.pyc create mode 100644 src/目标识别代码/utils/loggers/clearml/clearml_utils.py create mode 100644 src/目标识别代码/utils/loggers/clearml/hpo.py create mode 100644 src/目标识别代码/utils/loggers/wandb/README.md create mode 100644 src/目标识别代码/utils/loggers/wandb/__init__.py create mode 100644 src/目标识别代码/utils/loggers/wandb/__pycache__/__init__.cpython-36.pyc create mode 100644 src/目标识别代码/utils/loggers/wandb/__pycache__/__init__.cpython-38.pyc create mode 100644 src/目标识别代码/utils/loggers/wandb/__pycache__/wandb_utils.cpython-36.pyc create mode 100644 src/目标识别代码/utils/loggers/wandb/__pycache__/wandb_utils.cpython-38.pyc create mode 100644 src/目标识别代码/utils/loggers/wandb/log_dataset.py create mode 100644 src/目标识别代码/utils/loggers/wandb/sweep.py create mode 100644 src/目标识别代码/utils/loggers/wandb/sweep.yaml create mode 100644 src/目标识别代码/utils/loggers/wandb/wandb_utils.py create mode 100644 src/目标识别代码/utils/loss.py create mode 100644 src/目标识别代码/utils/metrics.py create mode 100644 src/目标识别代码/utils/plots.py create mode 100644 src/目标识别代码/utils/torch_utils.py diff --git a/src/目标识别代码/detect.py b/src/目标识别代码/detect.py new file mode 100644 index 00000000..0a88608d --- /dev/null +++ b/src/目标识别代码/detect.py @@ -0,0 +1,260 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Run inference on images, videos, directories, streams, etc. + +Usage - sources: + $ python path/to/detect.py --weights yolov5s.pt --source 0 # webcam + img.jpg # image + vid.mp4 # video + path/ # directory + path/*.jpg # glob + 'https://youtu.be/Zgi9g1ksQHc' # YouTube + 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream + +Usage - formats: + $ python path/to/detect.py --weights yolov5s.pt # PyTorch + yolov5s.torchscript # TorchScript + yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn + yolov5s.xml # OpenVINO + yolov5s.engine # TensorRT + yolov5s.mlmodel # CoreML (macOS-only) + yolov5s_saved_model # TensorFlow SavedModel + yolov5s.pb # TensorFlow GraphDef + yolov5s.tflite # TensorFlow Lite + yolov5s_edgetpu.tflite # TensorFlow Edge TPU +""" + +import argparse +import os +import platform +import sys +from pathlib import Path + +import torch +import torch.backends.cudnn as cudnn + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[0] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +from models.common import DetectMultiBackend +from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams +from utils.general import (LOGGER, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, + increment_path, non_max_suppression, print_args, scale_coords, strip_optimizer, xyxy2xywh) +from utils.plots import Annotator, colors, save_one_box +from utils.torch_utils import select_device, smart_inference_mode, time_sync + + +@smart_inference_mode() +def run( + weights=ROOT / 'yolov5s.pt', # model.pt path(s) + source=ROOT / 'data/images', # file/dir/URL/glob, 0 for webcam + data=ROOT / 'data/coco128.yaml', # dataset.yaml path + imgsz=(640, 640), # inference size (height, width) + conf_thres=0.25, # confidence threshold + iou_thres=0.45, # NMS IOU threshold + max_det=1000, # maximum detections per image + device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + view_img=False, # show results + save_txt=False, # save results to *.txt + save_conf=False, # save confidences in --save-txt labels + save_crop=False, # save cropped prediction boxes + nosave=False, # do not save images/videos + classes=None, # filter by class: --class 0, or --class 0 2 3 + agnostic_nms=False, # class-agnostic NMS + augment=False, # augmented inference + visualize=False, # visualize features + update=False, # update all models + project=ROOT / 'runs/detect', # save results to project/name + name='exp', # save results to project/name + exist_ok=False, # existing project/name ok, do not increment + line_thickness=3, # bounding box thickness (pixels) + hide_labels=False, # hide labels + hide_conf=False, # hide confidences + half=False, # use FP16 half-precision inference + dnn=False, # use OpenCV DNN for ONNX inference +): + source = str(source) + save_img = not nosave and not source.endswith('.txt') # save inference images + is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) + is_url = source.lower().startswith(('rtsp://'))#, 'rtmp://', 'http://', 'https://')) + webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file) + if is_url and is_file: + source = check_file(source) # download + + # Directories + save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run + (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir + + # Load model + device = select_device(device) + model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) + stride, names, pt = model.stride, model.names, model.pt + imgsz = check_img_size(imgsz, s=stride) # check image size + + # Dataloader + if webcam: + view_img = check_imshow() + cudnn.benchmark = True # set True to speed up constant image size inference + dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt) + bs = len(dataset) # batch_size + else: + dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt) + bs = 1 # batch_size + vid_path, vid_writer = [None] * bs, [None] * bs + + # Run inference + model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup + seen, windows, dt = 0, [], [0.0, 0.0, 0.0] + for path, im, im0s, vid_cap, s in dataset: + t1 = time_sync() + im = torch.from_numpy(im).to(device) + im = im.half() if model.fp16 else im.float() # uint8 to fp16/32 + im /= 255 # 0 - 255 to 0.0 - 1.0 + if len(im.shape) == 3: + im = im[None] # expand for batch dim + t2 = time_sync() + dt[0] += t2 - t1 + + # Inference + visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False + pred = model(im, augment=augment, visualize=visualize) + t3 = time_sync() + dt[1] += t3 - t2 + + # NMS + pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det) + dt[2] += time_sync() - t3 + + # Second-stage classifier (optional) + # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) + + # Process predictions + for i, det in enumerate(pred): # per image + seen += 1 + if webcam: # batch_size >= 1 + p, im0, frame = path[i], im0s[i].copy(), dataset.count + s += f'{i}: ' + else: + p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) + + p = Path(p) # to Path + save_path = str(save_dir / p.name) # im.jpg + txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt + s += '%gx%g ' % im.shape[2:] # print string + gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh + imc = im0.copy() if save_crop else im0 # for save_crop + annotator = Annotator(im0, line_width=line_thickness, example=str(names)) + if len(det): + # Rescale boxes from img_size to im0 size + det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round() + + # Print results + for c in det[:, -1].unique(): + n = (det[:, -1] == c).sum() # detections per class + s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string + + # Write results + for *xyxy, conf, cls in reversed(det): + if save_txt: # Write to file + xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh + line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format + with open(f'{txt_path}.txt', 'a') as f: + f.write(('%g ' * len(line)).rstrip() % line + '\n') + + if save_img or save_crop or view_img: # Add bbox to image + c = int(cls) # integer class + label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') + annotator.box_label(xyxy, label, color=colors(c, True)) + + if save_crop: + save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) + + # Stream results + im0 = annotator.result() + if view_img: + if platform.system() == 'Linux' and p not in windows: + windows.append(p) + cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux) + cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0]) + cv2.imshow(str(p), im0) + cv2.waitKey(1) # 1 millisecond + + # Save results (image with detections) + if save_img: + if dataset.mode == 'image': + cv2.imwrite(save_path, im0) + else: # 'video' or 'stream' + if vid_path[i] != save_path: # new video + vid_path[i] = save_path + if isinstance(vid_writer[i], cv2.VideoWriter): + vid_writer[i].release() # release previous video writer + if vid_cap: # video + fps = vid_cap.get(cv2.CAP_PROP_FPS) + w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + else: # stream + fps, w, h = 30, im0.shape[1], im0.shape[0] + save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos + vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) + vid_writer[i].write(im0) + + # Print time (inference-only) + LOGGER.info(f'{s}Done. ({t3 - t2:.3f}s)') + + # Print results + t = tuple(x / seen * 1E3 for x in dt) # speeds per image + LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) + if save_txt or save_img: + s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' + LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") + if update: + strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning) + + +def parse_opt(): + parser = argparse.ArgumentParser() + parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path(s)') + parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob, 0 for webcam') + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path') + parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') + parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') + parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold') + parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--view-img', action='store_true', help='show results') + parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') + parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') + parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes') + parser.add_argument('--nosave', action='store_true', help='do not save images/videos') + parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3') + parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') + parser.add_argument('--augment', action='store_true', help='augmented inference') + parser.add_argument('--visualize', action='store_true', help='visualize features') + parser.add_argument('--update', action='store_true', help='update all models') + parser.add_argument('--project', default=ROOT / 'runs/detect', help='save results to project/name') + parser.add_argument('--name', default='exp', help='save results to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + + parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)') + parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels') + parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences') + + parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') + parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') + opt = parser.parse_args() + opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand + print_args(vars(opt)) + return opt + + +def main(opt): + check_requirements(exclude=('tensorboard', 'thop')) + run(**vars(opt)) + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git a/src/目标识别代码/models/__init__.py b/src/目标识别代码/models/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/目标识别代码/models/__pycache__/__init__.cpython-36.pyc b/src/目标识别代码/models/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..721e8d870e3d688dd4da6d4d13a890f0d262710e GIT binary patch literal 150 zcmXr!<>h+1`%e-B5IhDEFu(|8H~?`m3y?@*2xib^^jpbL1QJFNUqLEdtzus6UH@eF zwx`>dK3~!Oa^cSBv)9EuoxSgAbN`F=ZBIK^$5iI$Q1n&}zEg`kf~ULxB!TG1AOaaM0yz#qT+9L_QW%06G#UL?G8BP?5yV%J3RkO`7kk$~ z*}d)Q_NC8PbiZ7<^ZD#`F;8djd)nOpVtw1wj@2=h`8oMzrn+W&MlrehDXBTdG4b)4 bd6^~g@p=W7w>WHof~7gBb|Aw)12F>tKUpw$ literal 0 HcmV?d00001 diff --git a/src/目标识别代码/models/__pycache__/__init__.cpython-39.pyc b/src/目标识别代码/models/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff1b438f2a104a01ba9666b4cc0324fdb7d9ce77 GIT binary patch literal 158 zcmYe~<>g`kf~ULxB!TG1AOaaM0yz#qT+9L_QW%06G#UL?G8BP?5yV%J3RkO`7kk$~ z*}d)Q_NC8PbiZ7<^ZD#`F;8djd)nOpVtw1wj@2=h`8oMzrn+W&MlrehDXBTdG4b)4 bd6^~g@p=W7w>WHof~7gBb|Aw)12F>tM!_(H literal 0 HcmV?d00001 diff --git a/src/目标识别代码/models/__pycache__/common.cpython-36.pyc b/src/目标识别代码/models/__pycache__/common.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52e8692c1e58b98524aca3c369a67a2ba1c7704b GIT binary patch literal 34179 zcmd6Q3v?XUdEUFF5L{l%4f<&8wXpwq&BYC~VEWlmv1H3aJ zf$glV7_v<}p+%?hqqgP5PU6_k!-?yrZ5+F<<8zv%PMRh?Njq_STqlaHoSqyvCvj5c z)$jZ7%%N<4DO{+mN!Y zd>nP(jpyyw4m|IW=X>zH)7pjSUGjV{o_AZf;Q1DLz7Nm0TH|;g_e;DVDSNEjkaC-! zasnxPt$j$@=ck-R%7nEaDf|7DQ%E^r9Yo4OKji_W9I_50<*=VJiIgMO?MS)ZPk9h2 zN3A=Ma)+Pt5K`{6?n264e#&X29J5}Bl-KzwuSd#p>u#jny<`^NfTw$`d+~Iy|Ne|+ zo{Nm%-^yi9HtY3fBVTV?^R=>_d69rWHEw!+k0`6G%GRmMwBuzOrFz+N=I3hVal?yE zH|G|4KU;Dx;vPR;FU|5rv{awNrvv4M%1qHwrG`D%w98()R(71Sy3kx0@AndC9(nM= z2Ojlek3RCqd2e|7VtM*fQI#*xS5&!PZaB7=)oJ$p%uHp$>(@_L%F3=Z8(wOONK)rOST(l}zDt6^Ut?aF7G#kZwX`yJ(&&{c_EuUxY=~As+oNhLi zWqav`#j6(&Ei5i9GPH6sH6HcS_ct0%r{px1m(W#vS-W(lTx>SV zMfM0;I2^^2gEo!W^^$UmmBtK)t}$IMVp!3Cr&2E$?Zw9QG$@Gt@gBu>5|_OffooWi zODT1$6LBMsv1F<-7j$DR(#Tq-6@3<|ry@&6+i(rcTWsn@7BFof;2$4IrOx59M-a>+ z|I=CKwjyJZ_nDVt_{vb5@#@nN%#b;8z%v%q7CcMvSN9l`s;!`ht*2LJ`_aGAFLrXH4t%mKCFj!u+(r}Pu zT=I;$PTLeq^OAlsY_gp|5HV9m-cYw7HqFQ2FYZU|2}H^fQ0WLLbmm==8Bk#>{%jg_ zI$I(2?Jh!BgHu0CtP#MKs@Ottth5=5^1TWNMoIoc9V!_Fxij0 z$wmV4z9mdtF>7HaG0Bq{jKr*J&d=Fi>_Vkvp9lWL0nTORrHX*Eic>6lW~1RH*z7}= zj0VxOamDL9QF5j)PBm5ikmZ>d8eZ&N<;-Jh2b%9C9u{o(Ql~9oT*X-g=1x1~DKBQ1 zYcrlXeGu0nwTpQTTkV$ntU8K2usmHXPS;AdT`cNJ;{2$62sjtE44RR_++>YbKif4C zG_?o%39kbE>kx4xRbCjTs_7a_@{bTZH6E2()gfe32M~;#>JG;93_|$zVPvMMu9lR= zwg~t|j8;}!;J@f;0B2JgkV7PFKv&hKuKIh`n)M4W&dl3oW)7+z>aJSej>pwFGDt8b z;ra0-r;_9qQ<*7}V1N!Io-GzH&zEZYjSR24o8J;sy?%cRrpvWjFby0G;P`Q7k}0_r zF;1bNOUfKbq@s#x)40MvP6PjJejcI=Py*-@wPHjUK@Sp;gqJw=1UsdhJ}qd7&v@sw zb42Z(2sD0#c;Vv+Fe7dm*AoyO2;U~SjCvh%Yq*Ljl0oUH7ps$?M`x$4@r0i4JNfP| z2JDeK#z2Fk?yx{Y2#9V?yp_QTH*pp8;VUm|1g__}eIv_t675`@v~6gmA7Hq;*E zAMYAmbqWdU0j4Hjc5FR!4kTdiPykQ8!%GkghIed52Rb7i>x{Hwha`CILq3U5IpM*O z^SNipIWGJY*b87-AQ*J`*%SyqAsGZ;AUO`$i+ed4a-eR#tj?4c%W66(h4(lv-t7eh zE2zB){@0_R)G>eDJ1M02oQX~f=O#&9u%g#Ze3M#{BfTOe6LJ7U-G(NB5v0rY3uO!P zP2Ed3=Ih0a<&tHO_jRyFk>NqSsx-0C8x21ma7i8p9RWEWuBs0Ax;SSW2j~C{8`hKnTMWV~;X;BZ6087D7zf zTP;i&w;CfxYjj0leA*5&v>Kfdg8T}^;xx*?k9CmBrp#^_A^2X-xtdJ)X^TI5nx?Q}5K*CL?ENONM<__c`tl)zK+U6FSgGosc$ zn{rZaY{{VT`CgWgzQ$0)XLM?X1bP^C+M@^{f9BeTgJQ2intdFi(q&VC8a&;QVZh{U zMMlpkA||Ar)6o$rUP>RIEjP*wbLyTqu~ZX5EYjNBHAm9<5Z<~Z%;3Ww{2*@@1}K=K z&fw8Y)HJ$e&OrkqZj@?X9K?9G?3s>|W`SUd#L;0vY$sGAAYqM^!}bxTryC(J5}7=} zn}m^qSTq3Ps0VL)`w`*|>3)be7w`aF0A;_FRwIlND=6Yuji+L5vmI^6+Q5`5=55KS ziZQ~9E=Ai3!6Js>6HCA=c`NUiqLfbYCh5i?xsu1nrbJ87^qQ<_JSB1$ zsX@~zH@qaI(E2>ox&kt(ClGi^Xj#mpht20MdA>0&sK?}f)iW-NJlSEdT38_}5nydH zI+BS6l+Q1U!VCu!)QBvX1&k_f~6n;@_y|?J1aVVz2xAG<4?HI7VwRu=8g2|cB#yMW z8*QE$#eFcPn`pO}Q{g$;t4DeF+#inwm z=N&1;Gk^=z#nrjO3cB;IW0nEy{$3Qmjy!>=GTha!0EnV`u9U^HZjQ(kcKEpuQ3FZE6kg%q%|TRur@bf7 zot4Qw-vKHD690JT#c8)ciC`H}B?wgSj%b1p#WVn_I8YxffGM;v2v{W@q>>f%q^2B* zPuC(N5ddNEeHuA4I#rb0K0IeddF@ARfDGha=1kOzTM4j`K{wjPLUIDGNv!})0?Cr{yauHna&FvbD>P+AXORnk-?V=_alVVQuZ<}~|R6Wd3mJIvsSI>knb<=gBUg4RG%E@NWe7jL}-RdjCR1CCnyp zoy29IKtM$8MM)4VG7k~k+7O3*VJ%WkILT^?MiB}TOE7=%pBCGMrKD8@1*$m)#A1!l zO~$CY$+&oN*xaU}1x}Y7=r5tt@0p8GO%@NW#z5YRk9~P=9s9Vpw~qnRYrVwBd{T&; z#QYjQz04{(R#33}6jORIKM>4%c}ZG$32}qgsHBzZQBE>m`lM>wwrJXAl-SKqzH{$n zXqQUkU5NU<(w^`WLH#z+7t}Qc?jh6R#%9crugD}>koCY>7{+0@()=aaJim;;L^Z+! z>Rz6;bj>E|^mux?=(LO75$!z$V-3~BOCO-SkdoT?J8@|iV-ko*jZH>tI2@o5F+;%p zC+0BkW%F1kj7>wGW(BYoSK$oGG^(q}IeC)+cLm=*iz@)QlwwK`z}+0HF;5y5!Yb%{{?^eFvmc{S8L13 z!y)Xg#uU0W^PTukYaShHClqjkFG8{}1aSr7S0pj7ne_iSo0>%skbY6t`??z4gD{+7 zq4c*RLQ3yIkr3KWsxI^!PojtiG`)+SCp0}*_*RkUETin-OGx`DJ_%4LgBAw3(?h2h zk?AJUW&xl680+xqbV@O0b+n;C5X$_CZkazcgC@P`$-@h;gfAgceh8&$d?7J)qZD1B zguw*bA<&e~s<99g#AM|_!0(cnw1B@%-6r)H`Di;LkdMX!`G~R+BqZ~ixs>!Jq$nmU z)}ch9tVAJ+B;>USNk4=90TOJ*K)fOrtWZGy-$DE**e*&e5ePY0d&qXT4ZWx2;q&}| zhUjazL$>hMPvVl!!}QDN?*oWpCl8;0^yQ$4!fyygl!>~bh|2nMC~7Cc=DEZKHm}*c zNZIDpiv)Ufc9|kbtyj~V)N&rZ3cV4rd3E;u85Gn#?EUOk2ot^I-quZ7&6IY6?Gb65 zK~R-QvXsg{g)10$KKG0}i5LHR*D&;wABC&#HH%Od z{Ob|1jv%3H9%L5WWQfhl5vuPLw^H~A(+idNz~;4l0=y_1p(kK8N@JIn)m`csl|B#5)-8LW(7w{2C|cZTRY!a0QdY=bp(41(H`qS(7IZKX|c; z<$Y<`w@?`VU_v1-@Q?QpFHYg1Yd)|9yAm$~>Wby8S4~;jWTC91h;$h;VJ}@~$n*xK zwH-C1hmaat+Z3o4NPZmUeGykcK6m)N4d-b!LE;BaLj7iiTH>#=MO}n5x4c!oNc=8j zf9=GRQvAOOx(?#W=hfk4H{OICSZhkl*6l;9TSh&L%*#M~I)FH0IIk!!`$+^ER=8}> z<(aWa8_PG~%Q4tLv`Q4kvn=B;0an&)i%0M-?j)*Mz~@4e8>Km1>>eq>Z^)>dQN93e zj}3#MdXClS5r|WUSTSJff(1i20vcD;chK-xCRr5RO8PF9Rij)h+Lcz>O8_M*a3x8? zlftSrW&vm9q+a^r(!$whvv$Y|AdfY52&p!%QbPC-xCHLtlaUAQSqH zoI&74Vc*ocx>TOgZUkrG&Y>yU3QPGee){sN-B;MA3<3(NDcB{T+GV@j)+3{mPRm?! z&4)57mMm!5f~2|>dF}{gDlvO68JJ&4t=g6~YlsX-ADR?WlbpHr5i3Tk#5q`kAB{W- z>m9_S_>`BZ%r=^;+%tm&rZu>hxz(_0J!_q@4rYL64KT1gZe6M@=is+-1#_^K{q!Z> zF0>D)K!6FxcNzE;+!`8~cH0|(Ib`}`F?`P2l{&n#W-2w9;`QA0is5KgXgiVS1WgnC zzZ)(3O}0uH9%h06T76w@Tgf^M_0RFeP0Hd#*e|m}4lHaYNkQRPK?$@h&UvKGgcPBB zK{pBama}Ikg%qq#00=a%jsRFuF?9xrFM#AieZ>W$VZ~(SH@O1w)mu>mjB)SAO}&S~ zk1!#%8YXp+Nc|{t3dp=x?D;)>*@Hc>z4vnzLevh?hl4$U>m)y+rHJUG*N=Skxo})VVbvOCgZ%#c75Tf;0eVpldRVla=rCZQAbYPuGPr^Qc2Q-5(~G!-D#L=;uX`C- zAXf3$y17N7_oI$)vK}9z;)*Ff5XCqA>xC$+#dkchWI3gq?4`W42%ud~Srj?=M}P`} z>I5D-K*e<&NI!5s1TrTHSP(|2Q3_xom3ky7EmD(ys@7wI#o=dAoW|~MwMGDs>`D;( zm7q^-zXZHjkPW_r*CAI*L8WgEhh1F>g+v_=pg>_Bk&wWd0*l&~1wy8+S6#NNn6_Dm z(4rf!Jz|81d&c5wMCiMyq=OwpowfzA)9w8v+RRN7qYt2>KMyLS8=qpz_YXSi8j9)I zghC&Z1d=I}aY$`ewE}R&#|hr92GZ#iq6Y?O?TH!aZ;9~glqU+z|%g`MvWyD}{${Xv<*61>Ya3!dcU~+5)R7VgAKoxh4 z0mV~*C)T=6-B4-cav^RR34_O9NW#`HZ$SW%W@oZ*5)=tBe~T-iVi|#tqJ!ElKH-UW(q7AS@uMbXZ*d$HM4r4i8le}NKK=WhKRGNHNTZuHfa z2CSSf5>#F?UsXilV*~&1E}t9B8@z02FxOlOzCR8HR|{#?*b@EQ zf^V===kkilH}a);<6w~-&GfPK8OBISG_MoJABW2}^h3pdda)~&@>TU_Ml;jsPi1z# zId2CG3VD9{76#`STwqXQaEyUSSQ2dGBl+SGM@VaDIU@nxY}9Dw!f^_qO7AMTNr3tg zs_A1jK3j|_ru+Z^D)GR=T&ZDw|KKDe`@NxQW6%kal?HX~-!CjB07dZqv5Ym?#6jsV zge9Shgd{AluYjd6KOqXRG!hA5X)~u5E%gD45TpU>8N^`I`7jd*O6nsF2uX^IGdB%F zTUP<00e^6QAOQ3{TDKt>doahCV#*H!fB@)FwPkohHR`75kwMc81#vG8=iPFH`;b;A zN#j9BG!W?)tOTNfA__sGR_6%g0Q-O{rdda{BmJtMK#l-_^N6p8>ibwT%Y_I$;Md!k zAJUwgMDUN{(_4aW$^0;-XMQMe@Xvk=g3$11l(8rV*+O<9Kn94SeFDu|hbxN9{u{KGhU8kLZnTqivyCmCP87R=K+h3_ zhU^+n5<7Y(sOD^lG$3L_dro6=Qf_+5c&e|RY4^3W?f&*ad$65r54DHeYuY0awDA`C z;%;WH=Jw&r;u?53(vDlv_F6ZizTu|A(yzG&N=`#U0>`l8*JEw$CUP?Fx@uoFy#(77 zRipM=C+n^Qw=vbP;Z1*apb^7;puOIWtKUIv5WA2PxaY(qx(*C==tjKp8Mp6-p`LWG z;nYH_J`%A~&qLG4o`STMsTl21XQaBex(>CET7B2e_BuC?{99I(H)^FfL~bP1dz|&v zQRKxFdbk0t++bxH$Gv|8_wXS*xbiK|7Yl`(dW7Un-O)^en$ZxvY z8zwf)b=sNp^N&tJa!^X^p@OhJAvHTFbXL z$(wHis@Bb#jTl0sz1iJV-G*Lnw$`JUTih+x?U?5s?X6bU-Fhh@JN8PZRhK^w71o7ZEuI+G7q_6hcnJzR`;L}ru}y3 zHvf6AwUK6LK*^5kJ~!>BPFR~T-<#3Ko$k)+{w4HjL!^2DZ9Rzb`D$0%A$Oa#g(aY# z?e0#Lw9DO9J&asm$NdOu-i?xPcXwJ_tvt%R1vMXaZ>ioPwcZ-mw+&xzb+=aUMEfwm zfbF~7t$=Mn9A+}wxabbth^b%e>iKpbf;ig8+T+ga-0|vhYX_vEo!6sijh-bF<8=|x zb+@$(HJHvl*oJ!+aJ&b(@8wm!59zl#_q%Cp?~?i4p7vg(o^WqlGM*c0?{oK6Pr7@n zrwIS;bx**Qpv}9T7+~}P^m4+PM7ZDD2i)4%KCti==Rv%Ahzssdf4+SX@zd@>lyL~9 zzaCF-;1iw?BYuW)^zQZ68=Z&oJf+_O9%Aa46>A@GC#sLQ2j$&aEXE$hj-363izO5N zJnX!&`sgK7zK6Fqa!mv!w2z?Y=kWfZd&E6#9k>qotezL!+wJZi_mF!S>9+%WZm&MZ z9xlb*{qCXTksHy=Uw zn%_1uczQEw1ZM)}Jnn+BsGsaA=V)&^V&%BQy7Spo`!0N21oXZIb95}6>k-WLv1%!p zUn9~rt9QA_th=}Y`g*key2hWm!#Cpg&%0{?o7Ke;VEFOch5B^+c-?B>&F>nAViBi& zDT5aMg#oJVQvvh_HP2M1C7i8Vz~tlYyWa#F{$xZ->mG-@{q|#9H8#TE|Iw&(v07P* zV4Ux9kArI5?cTFw%o^4S;L*K}O(q6leXo0OwJH?&-s&av`93_?aKFEi zcSq2NmQ%MLSTf#jG;)Yd3Ppa9WxU<6`kls7jb|nGbUr$J|-T(+ps6Tb)-1W$N#(!!$m#v340^Kv zUXFVJ>T^!fYF=${r?i(1Quy>@9=!5A*02&*`AP*ldy|#MT&;vijGH9Agx=-YN}WMw zENkRjoAVGi^5^OCd2U)&=A3-Ehk9aSqD6IPrZPL>EI1zYIc$zBFF;)@V%ei?i?Q8{ zbEl-;+9m}(GBx#N{*k$Izi;i80O`Uw`2{8X*er>*z|_q_WJ zI1d@MsUAJw(|}g|j)?;k2k>F+BDUDJ64Ucm>DU1;d7{$b6nZg^zZbWhMQi}!5`!1B zN)C3J&0$}>sV+=*R98s9zlIFqIvXV`k*;vJHzB~9`v|xR-HTxbszzp(ue+-+Ar}n2 zTXEC=ag=hGOSX?A?R|k{i>*TwDo50W2(wmTSEkcP-EOpeoH>JXN0Q$u`YV;9szGy#{e{zP<*VLlxG7?wwh) z@JGP9OtcC~xsAQ&*u`a3u}BR~5!@z>bA3iUkY(JpJEyWKb5)?0<|K@d_U3~^` zFjrsT4Y_BE?zuqG*+8Wat5M<}5awJNi*?%1#}xmFgF)(mkKx zOrWSp>!DXQ4_8rkaYi*;<%X`OYl{3LVPpRHC{TR`L7`uNFyEM#9gcBnTOn7n{Z^K1 za=wHYp9AWa3$aGob_)GZ^$2 z#(cViLn|8karRWy8e@|JVU&algT8f0IqX2_2^!So6iv z76`q5(p}6%tvp+D%B?-i;q6StDd!)sX3P2WXCLbUy4GN=f}N$HB6IT-c5~ujYq%@n z;Doh^$5vmE!>ZK)3~_8TonOFy@RGV(!Dg^0Kn0ptZL}&QL`RO~D6GToy|SXx>y&C? z;{qbxg*MW+=;mkJCBwJmCZ2 zShiY8KR-yB|E|<|%Q3;u@{sOO;UyZJ!(~&&6Az7nHl$S7_(FfrCem9Ju4?k;=F6+!`ho-7hDsOqCs> z(-@pzKwY@~GH-FgP$8*8c-0q?Dird?f5hLozgTPB(EZJcCN`0D1KhGqv1{@y|p8+g7>R&OL=0h)r znQe+i!i!cbju#`WdahMDtXo|0L2zA<@79%#{m6-ETy2DCB~S7!e`D*apN?hrFJ=dMoM>M=;1Q zNxDP`RFn&gI+xj)o;s6eKI#BQs7dBtWhD->ovM_kIr0g<5 z`y8CQ8ncBlXQqbgi$45K;24~MuB)$cROvSaaxXmh0i~L%0Q7XqUbY*dy%a=jha?W( zVv9Dq1Ljxbp*tvAx7pSMBm)a_>;RMO!Z2q+GrAznTQk*kP@U$OWlQSBUTHbNN0Cyh z>kPQ=&*MuhoDqd1OA_;Q7|-(f3GH|AE6gHL_f5t~l_3Am9XzT&#pe$FDe;YoS@<7J z|3yAUA;$vBHRM6^;C!fm(D^QS2|Zw5(q~E_1v6Ex2WYZGYE zBGpj8#I&UC9}N{FZCHl;1+xO>1z%lls!Lv4q-;5YA)}+>n>Va+fi$TYWX<{mDjUQL zQ3n`FH%pG8pq?><-O^fv@RB|Td8q(ryri`Vak?@Muxwc6g=GZZGjRw+e*pP#2vjz9 z=sN1}*{T1?;MZAKpD#SkKqTsdqf{Uyczs=f=JoeLg*VU(5#Tk)Q6_Gbf3#QOKEw`_ z-+dR6NPiC7v&OIpn!*mlm>Dy+;692ggSDe!@$TcEyaD71o_XhvM1C=VcNu)6-;Llt zfVcdf@A+>){_*UI;QOq6Gk{#%@jih?*qH2X%waJWZNQ3E79nfgj#9$!O#OY#*r`i0 zv+*o)j^f*_VdBdiN|RQaCZ4gU*&IeqYy}!@zTITx%+|(N;PK&#IuVA}WD;Nd zAxIotLB#rDyq#z#+bP%^40S8l0=W?cr!z1fhXnxJbE^q8xJ_B#`}GV|2U(Uu z8^{1H!cs;9PebZE%kskFiuB>?2yBh0t-5xWEgi0|3!Yu00u3ar@m9QD56dfddH2*g z>cT>@#BVo9o6W{QAU+0rsMI&TrVduUQ#blKYU7b+eEcak|3WX$dx}yJ$5*uHAlpo2$1t9EIK0ak1U$ao%J} zD=^ZqIpggf^!zXJ^=hbD2X{aY z@lmAS%TdDpKHkw6tg*O5*4Ty!dUXFKv%Ln^yr}vl(l&000D4BSzGAo|8zb)8^%z;^ z@<;XrEXqUHCOWP8Fm;0VWr3~})d$>RzT>D%iGPFVPKjwLX%Zv6b~NISi~yrB)|=t@ zw&fk>j2VZmuro7vfjAtG2p%7BX8>>$Q#9kV|sGh*;$@=`Eu;&4BB`TJhvw3c~UbQ*Ta7Sx1h zLvdw&LOXvNB4nYCFSGbPY{h?JEDXk9uk}K#RYtU$DBf;=!%Tm~Ak7z|ukGODUosF` zOa!-2;1T9k3eAc>A%zqK@6((aeIgwNHZ_bFUKAotp|2xP+9J4Uokph8Ryg)HqHtnu zM2#|_^M~5PU^9bF3@A=Os`STB%Z)r*h6;)LE501K--eFDaS+K=E2V$SW^6+Ng^U)+ z%Ec>()E_c`0(#y&48>U%|7PZ+b;e6m8z?DNTJ#bnTXftc%qf&+)d+LZ(HO7PEH&T1 zX3E!@0;r*HK$&2picJ0k$q3C0HV6?@wJb&-BejD8r6!(TDo0&9YB$rkF0LjR>_VUw zLM^8kV1T;Pyj0dBr_+2T=I|lNqn6)j{YFp!I*p2XmWNY(0sjcRD0(hVKM)WvcKQ5# z5bY_%D$ubC@d~AX@dEOqIMGzjkDS3Wc+J*Fh(QMBTobUU$`h016jd(+LenhHHIb%G zBR)CU#v!>I!EfOcIYKRBQkqLa^4bm=Dl2kV0(YK;#QPD*R#}nsHX+PG`r0C?ln*J5 zjT)mSPpud;b{jiQyfa6Ux*OM+xy~@Qfob;LjLU3r@NUDFi8f>ICB1 zMhsy;b6Dm_&G*8WeqpCGfM-L!#fn0Lkn^>k9)Jxu0x3+^+Aq&=eG3#GTZ@Q)z-|%& ztxj!bOq?=u8g&I%1q}tC{()oox^U>7UiG36roL6W7|X$`t6_FXeb}?|!Gb zUnn@7FGA`&h$he`BqB524nwC4h)(odhUkicvZAvTG;o9&jwOScn=TmKno~`@JJJyo zT~oxwtUe-33*GH>#}NZ=Rs)Yj;KJAEX0s8vT=dZqm9A4FIHe&};*HxsLkE(CzX)dV zm}4=5YpbG-Wfv%|VGl48(t6>y>+k^n0n?%!z?SIYwHkMPwt5UXLH2j!rpH*2iA-V( z$CjmntsY!uOtt+%CTbPdigQL`G-7+zW1K|NMVxsn<|@_C+6edB)<3ALXFi+-C0Rf` zg-LWEO$fPwn^4sd=F|X-A7qe2P>5kr^kFSQx3;mO|B7Hedz3>Y0)>jmn=$qPrQwdb z<(2#Q(ohIE(AmW%x?(%GdSM+pb0n}7Z2yGesRYdz#Qaoz&6f@KF4F@X#U0D@7_E6+h60~*YBO25*zsWa9xZ| zO5+l;@K!{+9D+?XN>6eKW+V}yrI_lz8F(*UkC80EFAJVf@NvQ>hiWn#nY#~y9JT}& zsnx+}3b72{q*ROV)kTCE{-b|SgKrq;%6QbuxY1&tQ0IZE*2w*`Au}I#1MAg!FUIv$ z9L+rKMK3PS35aQ{d9-w4nld{MFR#sWLuYwGB0|r3F3eMyAoVnZH#49mOM_{Vj~$sV z+cDdVm0kUJ=A>XR27H-L7}M()G~)g#UxQr$WXK5^d|JbuzOOjynKQ*%lNlp0>lvfg)+SdqRRdvE?M4{mKk95U-i$-* z_t${q(4$P8r;TG{WKB55tv^WNp)oi^n{%rg;ai-kEpIVz05WV_A-C)rJbgHemnfFz z=4y+2gf4J=WrTz(kU(|wcDxvo)@W@s^lWSD}7 z8Le(`vplu^x>+5g;~!d>MGbJHbl?)`Y-S1oD%&_9<*lu*mC_#Xe(P*O87a4a2}Y#3 zvu?ls-py3EIwMFms(Fh}ciTul+-!Bbm4T_H?-E+A-ihZO);iXJ*iNgTCL#3+ls5oP zu%GHYfOY`2?LyjmeAf^22=i{o`|LaLwmsmct6SXxF)>A*-HdIFz~u<8Z$dQ(tsHhJ z4`>sLwBVNNt*GVi+-xI-lEqE84{A zdj@`UgJntz%sD87Ib9lhc37h_VuOH)+a#rD{;f3|Bke&qjbr9DfT!*cZ>&bR*;iz-OO|jKa?`iir zw_h{eoZE*H#J_i7e}}bJk1k9}NL`8=aEZM9jVIvP-qp6FAPG50X~c~?V5Zu>gwy=H z_I7Uwk1i8V`U#+w+7IN!f~DikorlWJ{q^Ni-Ai%hh@LEv=BjhE+K%!J^NY|;7CzA< zYLK?A!L?$4_=x|r3mq{cumXG#*}?r^hg+A3w;hiS5(`|=KU%vwi=XQ> zwWkfVb4weTUE1IqP2gGrA^tjqhYi6GQbcL&h?T(qWt=f4<+S;bVMKc`a4O0r14PiU@1D3&A z6MKaE3%<@ku2YW9)nqTl-$j9&ijA}I%kWHBrK@6wfXUAm6M#zblO$epB_5`VIjrSc z>d#o@7Z^On_i5OZ{GVcpVNte@(bq~PD06nnP@AIuFr6RUdc~W@>#4!OG z%()<%z;T`B2rzHs`@0dmfq+B-H_}Ig0(o=R*dE;r>4wWyIpYAX0a-wzyt5Z^eF|Y7 zyqx9o7!wR-08Bd0gl!Cd2s!t#msD_4SeWbjXAWeueKAAwOyhO< zN4VmjO<5?E{kI6G!iG#agCUL$Ncvx*9J3%h%s}$TFRB>O3^G<9&Syx#kzD^qR1&|y zL=I;_Q`iQsHH=sqiwUwpqmZHFJ@WTiF#F@4#nz26-21JK)+XGcSHvQPL5OlY)U)h3 zuE^9hM}vQCY$#)X2@l;`fq284zBvH&MMUW8P8@v#83DC@H+E6&oa#QV{5?6M8K zmy%&Q00I#w598i-NIN-%3)Q&CC;<>6?Ffi55e-R#Y6524kf4Y+&k?j(a>pz|1Ygau zGUQql;mPh=FlPr*I+-)@RR}keIx#duQW+pFO7R28f{(Zuf!j~$n>0|`wjBv!+rUq6 z=@6e+Y+KK05vtDN>f!EO0_ZvY1=X)*^<66?9bD|N%nt-3m579uP`VE*p|wJ$HU}x$ zcYfWtQjGB3Uh~M^B}8;)g+2e^KVe-TTBR-|NxuZt2(qw#?SwEwD-6^cN`(ZuQ2_1; zm+B%L1Mo9xnUUER-Jd5ElJWOGPuKN%L2Uki)2Dsx_(32PHxa-tDJXg<=QbK0UcBS%h6~KUbY1X zvad1>ljmirDm+V=%%LCGNnG}OQ1EP|ow{KzJmugp59;aE(y3KLjWeG<1prOs!n!-u zKlmK1=el^5Ru`IO5syK!0(02F^n}DA_aUBqJ`KgFPrrkD^WlXzyRfB*us=#Qs+w4e zKpB|CDs_;)f$1)kE-0u}q_XY+OF~`L{-L@_$xxmw^HOX?R_LFP-vRaT@>`)On6~Fe zam8`PsJ1~x%s74g8q_iaWqEMnr?FEG9ssEKLpRJfW93@bjCJ|Le)&nP1owR;_IwoU z$1G<~_NzUFqcXBsIqu3UPgrB$0HB6-KZ4zTDX7&scgP)f*FbTmsxksEj{fU0Yv7|$ za`EFB--X(e!V*(}SISx1PZtAfQe_pH2`Zp&Kn0v;i*) zX{_D(ayJ=?=G9bC^vdNaW=!z$IAhoer57LR503>rFTo#G1SyN*#~-e$5Ad~+una#D z+alLw)B;|N>$OHNf!|f++TTwyGrz#nA%+@7K?#9>X+nk++5tx#ZgWcgICGMO>(&YR z6QP| zV*;^1qr!vS#|tWjaT$|sA@GZtGXC?%jBn}YR0eH=x!-f(WNT(*wocTF6e zsoQz*oqf=Qrs18qH%JRTKBEKFRqT96CxP5e!-@*Tpqtp2HPVWb1U8dZF$xmsYN02~Ii|MaN7H89%LSWrL6; zl{0C%h(svjFYJmi88KReUG)WH)4m(;$%Q4Sy^V=AoYAr7ljrLOFTwQEl!Un2KfLd^1K?iB{&*)xo)k=mKFFQTr z%jMnTr~j!luz;nWjsY`P?}q>qR;8BrML5c`{(wfV-WLUHT||AFZH}r>FfKiLxlV*H zmv@4n{(h$uW4)aq(ytC2Tr%hd4o(CmJOPiNVv7VkObxH3h_3`A{NN8e?HGW5-329@ z?1XqpVn-E`;NhQKkKdxgN}#o-jSb*fhJO;+3H_9=cC$Fr$c zb31~-^4HM}Vcv%sm)fDfqs8p`I@B$X247Feaw&)yP1n?Ss6~rMqc}k6rwx%=$bCVH zQ|cLfjmZ;!#bqnRshp`i1+y66>u74AVLTo?Qw4J-)(K#Qf7Lg2PBYeLV=AF-hWNZ^m~(wwvt92Sa3H!|v+ z;V=>jgUin2!J*nG=qPXs>Ks~TfQUjO!jB0eXBs0;3!f0E6^U-McTyXYNN`rw!bDZ`0r>Y(t0|L1i%RV>xW}KW2~zQhY7hOyo3+(6Ovwxr2OQbRM?G$A?$g{ zViAiT#p0g_y#e>_7I^&EtJjnD>!DYyMY4x8DYg78GDXk0^;8LQvsnD!tWt{{aH#05 zR5E&aNXjiYtBTE3EbSe+C=^>YXZ$IYp}xR? z!>+!_;7bfnugGmD}F^%_4(z=>R9$nF?FEgVtPIpYx=%l~=mCHxp5epZfa##|0i<3ar_0ywYG(xZPu_g2PkVQ_%K z1Ojj9eycR+RIZfozcQ;2t%i98o2LFo z)rx8VS_m@{D%z0Y-uz01cR>1o!jN0}C3mtc}TvqSvoq(%z@w4=XAy_pa$} zr!;%Xen}-7u88(SOdDoEo};~iY2y_yF|SXTH&<(7Z}@s9i^fegJZOltuPFJnQ;w|F zY+_!)iX)5(lAPpMgD3H`+UJ91I{XUsB;TKAaGt?5gBpV?3>Fx)7`P1H#^9X{o@Mak z4BpS+0}QS)_z;8V8GMYv#~E;CNV7rCZ5Od{kR5(2XZO>Z#XwRJe);pj;X_$;kRL*^IHaL F{}+ta`p5tP literal 0 HcmV?d00001 diff --git a/src/目标识别代码/models/__pycache__/common.cpython-38.pyc b/src/目标识别代码/models/__pycache__/common.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e03f8dd1dd2ca5781c9d8157bc179abc2c1533cd GIT binary patch literal 33201 zcmb__378z$U1wMI-P1GEGa8LFI$NhTvNXD7Tb5;8lE+qJkA)>WPGhIj>gpMFtB>)k z8XZk_fQ%h00maU;B-zA??_ee<-=Rzh-@$UY63Ij=(M;4}`cOGmiDhDPA1=o$iA+N7&2nF+ zPx2)*{qjAK8I}NM&_qwcJO`qm?z8HF6&-udR$_#wzPF z>*P6JUSHXe*-+V-*;v_>*;Lt_*<49y(v>ZlEtRdAt(9$=ZI$hr?Ufyw9hIG#ot4`% zw^ep!c1hkudAzbav%7M8=Jv{-%pQsBEAOpLWF{*6GW#m~Gy4&DpOv)wpEWWE9t>Cm z*5I=NYp}4YbZ|bHIfR&$HH4Ug|rIpD`UfS7~UA;cW=VK^r*#)%?($>KAm*5LH)8JgV_t`tw2MtHtT8qjFVywq_T+c)8#>1$Cizb-dq;JpAZ`4?ggi z7kcc`N6&l1`HO}8rK~DEGgnlFN}=l5UQ)-|bJNqst6sl;x?E6pu~zkB`C7TAY)5$m z#cEz>$3rs@AI6O73dbo|~Oj1zWNw?R>6W$mVM`W!YZ* z>a{Bu4_>`?^%`%lUc>E`i(dR%u2RmHYdLE?=*91^R%=eqsVOg_tM-z1?s6eps}{2C z5t48?vN;EB8nG)mnV!3^>| zon#IxurBZ}^O+EG8fqh6-3;6`J#%8eXIxdA@GQaKoxt4#$DaE7XFvYc&wT2YfAit5 zee}m}E&R&Y-v0|vz4HFgy)ys8*FXNYSFV5TscV>o%SZMeojCXudsw!g$_p4x%wF>n z$IC_A!Sn2iEhx=qgCYSlP+y;$b853WYvOphhFP#r;HH%lP?NyCRc$ASVe^8;s)H!w zl4s0zTBo+6C@<<4!$#W?1OYQW`f5QHQJ%O7-09ZQ!jGcHsFb%A1g`bTB zZ%3@?vjO1km=(u&_<0OrCW3M5^FpU;)yuay!c*t-v95A?AIHV%vn>QpU_Q_^76L}V zF-t+$a7{PphMW+>@Vx0ph{SJ1-9R?%n9Yc5E*Q9vx=|~L37eOm`4gD> zC){W?g804#Okp-@VNNmSlNgl9jH=De+Fs~FF=wC0iV6e33(AXS0e3|woAu0U)r+v{ z2Q3*DF4*d2ukU2e$zPnRsmejiGcQ!V(7EEnkE`uyz885!*1s1!V_^*zooiUjdFN)# z3)zM8v}fiI;5w*wGOb~&+vIyj-GT3$h8NFf^W~gvXR~^;I8TZr&UCg6oRPpBC5?JN z+cnW@>XP*kMFv?9qquPcO*^&1&sb&`46TCd-q zg?yo0Zq0-=i;G&CrN(i$1DC9um^l!M1tl(zxSkIeEk8C3Xs;Wr4Xm4>72>*)bwYd+ z@gk?+z;5Zr=VeVwZZ7<;g+fTK>%l02Wf5 zJL#?$su0%)XY$r~M9=v#zPp%hszB`ms$l?D*h|TH zVs%yu>fzkAg37l_VM)B_>gTt86~Pj`E^PmeAaHWX$Ml^T;C8|Nofr&!DN10sf;UX$ zi7iPHUlNmO;rg3~B3A&EA1_oc6fE#I6)#?$t7I=0a+W>b*I5>79}2V8MXd!9tmpoEMx1Mn`ZEP*2S18F@vnVYw+LhOq|_1c1bc zKDKx$+q1m{qqfodeJG_Hqqa~xz5$>>@erfNzzD4gfN2RdL%6~k_4{#TeatC=9w2}x zQJi|5fvo)}c$Z=DMh43uW4=ZLZ$_5I0)lc^LX_avC8OL+oo>%)82DZu*_ZaTngVbHUnq1OYKf@-^ip}rmf zTY@+z)C^w_Tn_*P2eJ_-REj zK%|Ai=nxyW6mk((W0`2g)+^`DESXIgdcid9l@l>yV1Nd|2KB6*-sbnL9Il!Ou2U@L z0s=Qs0_wgLS0h*#SS1T043vzgLJhMKY=jzED@{z@g3%OpgcV!}HX^c)7=jQiU}ec$ zdA|@OFN#v5ZpaNUkeG+2L^{yune=8nCL9*H%!|3%f)|}GL((Z!y(oCj${fVH0um{< z&Wl3IVj?|aK0VpOH^v2(mG3K_aZ$L(F03!ZbrP=$vvvXj5lsR^wPq8fn~ zL8`vR(m^H2Dcy{AQG*?b5Wda}<}U-!=Pw@wKW|w}XuKD*FV>WkpL3)T&j7^dv&#{I zX%yL>o1~HA+^p)1MSK1A0I-QnZ2%@P5r84p618`6^=p6@dSiSvaWkmq*cFF?&0r;z zUct*28#3J0EFW0LgL<-5jfJcTIaR#1SdvEF15w48?-S(ocbawT+*z5x^U|Ir%v>9A zK7!g80ZxQ~^Y(y7;gBj@fDP%0LUPFriFydylWE;mBXWp zt3xivdxpLZtIN@fq2-p5hCT9Fin5%*BUjZS28Y!_#<6GWFar@j2sDbYQRd(^Y~2*kvur*PTCuUKP? z))@#0u!?5A8enRDhAYsFIMHT|G8#F71*j|dhgkvZB}5^vei#L+A7vm5QkRAe|2Ie)06W0!QA#dq3o5&IHS+i00b9HY&TgzEX6R&Aa*HPj# zEY)AreTp$l)^r$={Pa?TxKIN^JrzY!E7l`TB)s@3RkLl8eP@wb#zqnLCVSLHR28)s zHL=ffi@Je@3CtJ+?w3(_L(^uPD#%<|;LAW47>1!v($pbZJHLoJ1oa##_F5?f?asGXR1f@_$n(8me|3%RSPA35 zlPzKuQ0EMFTGE1$u&Blj)VA+VOzc-An zeh5XV27@;-;8N~_B4XwD;p$-JO{WgE0cSZ>+%4a>IhrnPBN#}KGN(z9twu<1<)Z+x zv|%eB3B>;rmz_k=LZ~9f_H{L53H);9o?^CL2#7x&FxY}Ii7N#w;z>l*7N!L6LO_Ca zDh+%N_lnDBF|u3hTcIpDUE zFX{zP9lH8zP)zbJP)yyIhGHTnH%Rgh$pgwgVSU>iObL1!A3jB2&GXeF z25KeB^|MIZJ;43!RC_+Zn-M0(`6x>>5nzP*ykvyKh~&raSyND|KymK#5$!b<^TMYN zojZH>X7CoX-6CM$>POH$<{abJhY8waPF}NX%w@#gXnUDOM<`*Er-0qJIE(8G$@>9xc5yO8=9S*Abod|op0gd%==M}8%2 z7Cm+7!HYH6k@W(jT)0FIYY+8lyzLrlSoW5JH)%968s_=C!e(r?V6#O^GwA1*D9yY< zX>*4s*0PYHWg$YGB2E+u=(D)#AhB(JciVHY48c`5nD?y=PB8da45T~~&tJvWJ*TO~ zC4M(c={+dy(=5=RP(H6bp*!(*5qY9LoGdwQxZMi=sCqXN3wY&Q0D%Zm>T`_+aoJBI z(9=nSEo|eku{WU32e}GCc-CBZ5YJ-3oyT;p(c*IJUD$~<5HR*Z-%#%xq*&a|px1Y)psIy()-KiyUIdU^ zglj<*_H?UQoq@fKRL6@ylDm4gRx86;v>DHfMXYUz`z>4o7-E!9W7#hU6dlWaybCdp zvrHR-7laN>>t&?ggti|)412yH5!laqMP9{8)Nm@m)d5xl*T2JMClHV`i$TPNc#-UG z*%HQ#iDfb=PLVN#3Y&TnuxAT_7w!YEAv(|n0}~8Z%?UT537IiueG+(L)OvF2)& z1>844uY3f0!^Z+2#1!%SN$7%TGn(=u#hGeN6_!jVA#4?{MZOl$ts8m@JL8WDN+YEn z9HYxnEenkE@_Q_ZCCa>PX77F0uWPNgr$yiklu;%l2K#muP+)rlkhSv{v+d`kU97-q zXS!I1rdH3_b8JS3A?>iCALI*Z&Pm+;4z9N8p6b|oUssdBLJ11z=yy7)@rD$^W%%nX zf<1zzSue~y9(q2lHQFmrX{5d48i(V;*|U@CCs5MLi~KC!zP3ef1w|LuTHJsY=+Yoh z;R0f?LSpTUE?L~_Mbw}u(qI}oyi0qDbXJG@DW)VVqZSw}vjF}O@jVLw>eYVsvb{EY zRse^s57#MXyNG}*K-&ZS74Y!l6(DaZx`gO`BFY{?W12^o_~3#u4FveAs{zV|51_to z@KYaR#D^Jtgn@Ka^g$g^d!8?3&36_)q3lm^wM8;QnLrws{$vaK0cxD-l|}cT!P`Gz zIc-p5%o0!wBa)w9Q2VDu_wZybKW9sKm+=8=a|pXoa0l8*N-Tp=MK2)o3T||s-akQM zz+OONP|HOya{#gGP6o@s$5#kf0+*q=^m@goRnU)&YhN%IO7$TkIs;50y^8?qW;$ zUFsvVGCX_~1$>1?`S1`{j9CH?e9KQSJZMgxq@C;qIreV$BId<~1MBj3iRw}$BslO2 z;u4K(2PCL$2bTf&E-bhx!2$#yfT6(xv6PH}@emvJV})C98{}jSkXh}KlzOqvQb|du z?s4SOiaycJUghI5F!l|$n$6clcoZ7Gc4wD|HTE-tOJFUHn>HFU1>&@(1Tdj>BQ4EH zNGowV06}EoJ0b{W^o(oEA&b99vd$tH>NKteCEZRYny+7xKHWrFf88pNo=}Q0-ya}R z??q7^r9+RAp2J1VEg(W7xdcS$AsB?`pg|?CFr!fUUNY5j=tuZR&;fkj2#bGUWFdgG zVKEvKd=S4t{U0p4lIyyZ1_c%V+Bsp?r%ww1bPlss7IMayP`XK=U_#D#q6=q{?Nb~t z88~rxQb+kHi(TOIGV{EOm)1&v_$}nK*CEh=7=m6TZKS>x2}iz@Hq7tDjL3KDYZpmE z%L6Ep3D>y*=m>6F0E%yn2(95Qf!NdyXPZ~lIm2_RjiA;nZa@nN&CblPjH3u4|A?!F zqY?r$uMzzIDfTD^^yDb`Z)!W7U{ut_5AS&i_;EnGzz-JLtSJQQKD>a1txcf3&`hpa zg^z~t^goZc%i)iHj*ehe(kMDQN+Xt^8KO<%8kw0AZs55PSO|1^GQdp`z6(^hYAPC8 z7XTdv%H;P#gig&ZF9(W&7UG(r1^NxO@<7w*@(M{F$tgY#uvMa|{t9j5RZX$T)ZGlj z?G;j%Vo$x$#C;pbz>-`5&%ps$c(%=Za?A`e`h%~h@M1$xxap*iXo7%W3+ z*0)+N+{~b^?)w2J5kLrBC@f7PfnkD@>?|zBFG84UdnTa0oyZTzE`I9JntTk9N&5ZeeO*5bl;-c`jJ<`Q%M-m{~ z9VB5`5ORqjyf{1+3sr8oSZ?;mTOHD~uUoPF?6W0p{5^mNNANaecStEM%pFu6PU2HY zvm*N5%Zgbf_{#ym+RjYKw64f6{4#Rg)@r!S1Y?%W1j#!;_L~v3byh|p*rZ@3zSAJN z;c1P+x$_{N30tdV?}3qtU{6$^mBgN^Xdz~;mR(iGypf6HY1A6BhM(mVb}xhinLhB@ zYrNsp1*edA#LM$!E`O;|wdx7IJHwusbzpm+otuQ<(#@rwMAOc7`H0H(^jHmh#%edA{@jhVOTX?KC^-(M1jyP7-v~9Z z8Nx}pqouxPd;$6tD>!d9Ry#>|)QX@rzlJycrGaV)-vf;`ZdiTU9c>O`6Ga5yDbeeV zf}Rb%7_NTA?R(KsPddZRRTf(H{(u#G8KN${pyO7eXf)P3Bc;`)QPjTH>bqe!M%^&d zZ(35`S}Q&lcrl{h=B#P1MOr*TkRC%T$E@TS+KBJ|F_72B>c$$>JK+XV&h@T(2Xwv8 zm^C<$`ki&9_00_&#m2f9L)CA)$rnxR{^_(c<) z8FtsBhA&vFs-`spZvplRGF~~|*dTAd0vH;dF{>ejMq{J9p|l0P+-R*qFE_cHnp+nF zrEQJPR?^*kDIz^fxOie;ob9+nZ{`iZKcVWI-2NAXYHL^f*0%dI=GVQhjNOEdbmg|j zmddWiRw&=n^MS@TXPnT`+>Jh%_FJ9X{pUT_`UMS5+e&-gxF0)VZNN&|h&FC_w>S4K zpig6g=625bZ3Fq6Tm39({Yr+U-1o=P5qV z8-Rw8y3-0Z_PZ0!N8JPR?kp^K2Nq20c0$F1iC!LZexUT&B~$Xl`4p)pS|v0Nqu=N7 z{(yVfJ!I{<0qoa2FRG^_?r!&>dkFDIuy&3#ALsg>54-!^gU16e2A}bqNo%h)0p0q( z<`WA6DrqrWN8NqazOg{*4TOfq&KHg9m)$|s{PnI`+21uQPd4sw?onUVE1iu6C_ zrd|xI{axt~bfy2nw_|^2<4*TZq)A^7l%7J~r`Sl=FlOtfF4(D(7Ht zIih4fWF3As*0}3>pqX6=G~bL_I@X@&5zO>r*J8O7X7sWYle>j&>OURx|El2ret9i zA8*|AM&R!!15#S|INas8AA3l-YxJd{bFo=m2wV~eK z->Y*nm37|@qj9f$Z?h!W_ul3u^!YwKm+^gnHSLa|50{;ab^pBaR->B2-ATd5r(TBH z|E-4A?^Ndlm!e4ZDK~`hLr#s}=fpzQly%yA;Msulpf#yo1c39`5E85h0e=zo6=&96 zgR~F%Pc`Qm>&$vU4s#=Ipro2M%NM*)wf^h;O$4TwqC<0K*2!A6D^+g$^pdR@KHW-# zUY^H3&xlpHT*T(YXt6q5&fzA+&0StZ@6M{n9!6p);nVevX>b+k^K^bYmsiDEC*9r< zIx#U(rxY<=oSAU0Iv#doVpCV)DkP;Wl=KB#y@cAlFn4&_^(|7+qf=8)rXQUxR8KvT zK0P(nQ`opsAF1J0{_@`0YtF@5^~61gCyq=Uu5apOeQXXwOQq10PyGt33Dv6AtMx&K zN+;SoGdt(?bwy-WpL+cC{SRC%z(*MT+*wt-47-!cjGQ|A_>x$3CO+$@uO$FzUMUo3E;@E5GxQW+q!g=tbHTBr}o+i}8cTDV` z*pG~%i`X4lkL2g9+_C*$^klIr7BvWgoCPmzIoIIRJ|hEWH;f$60dUiC*v2d+AeFE!6VINC+xMGi0C z)nZ0NP{GwFmhypZj=IZy)Y=jOix#(R+x4|FM;!h~9_}5U+vJTNy2*0wO8OjjJzFsI z70SH{cPvi$pvui&JYAUXNtWkGv_4_nkRx4%OZ24IjS|HDrrEvsWCnXuV5SI`U{-Hn zBQ5r+7$w+UA=ZKVwC`!IfG98yN1;~nt#3*WQpGTCbQL9d$&uZ?p|hyZ;|=ES5BP-@ z&Rxqn$1^V<$fRI6gKKxnSi^d;oLuzfn}6 z{?Kchk1HrUJFRNqq~2{jCSD9@}3}obfjuAKMQJ^1ra!KW7tg2*H^u?3UQQkI?HU zHpWbp3o|*VP~W{6?j9~Wh4cf~Od);#?BhMaS05}Fv4IYFWp-}Du1y@M4|hczn6R$l zvEJ88VU^1OhA?*1&0WRb)||Rh#D14I058<8*l1Nk(2$%ukQv49q=KTj>*UJq#vGf}sefZJKN7CmfCrWmw_35IW#sLIr9%2A+ELioZ zpB~7~f0ygL<(OcHY!4o43DM_Jn>~s^91jK>;9Xtg{ehRw*0}&)Saxv>phMZ0MpDlx zF4k>qL-ZF~$VY6|`(D%JcQ*SMzXoKGiC6a@IDB~j9Y+t}`Z}KL!^Exos6wqCUf*qkRYh#MO2-IYcA5*C%H$l(R(MUJ&O%XsTdSkCcmija_$e~(O=)S1>{5$E(76`6k0Kk0U* z;N&u^V1IOKl`J1PL^#u8)(aIb=gOH>i-u14^0~a7$I8&U>k%>O?u+VKUp~o0SUh%?kC(2=Z3B)Zp z;K3%Xhp>?GOtMW}Kms#^XLHIf5VX(1|EM~XS?5fbQGM2jzX_b%(Zbj2s~pu=IBR`p zgqA*_R83{DJe`7<>|W4b3=FkH)CI?;Yc{$AqE_Z9At+k6+13Lj0}C|kU?tg^Va|dk zV68ZB)l|(vb(#_uEU6PaJmtg@MeL`3ngJDHJY>Pb@isVEAu>0M@hps=)FzJKWfFlp z8fg^0Gr;Z79yqH0EuTC1hpK*Tjd#@lVf+{P6a>c#DA$k&j)L={sKiCO3tmJIm>2bl z4N$>!Npt-g?FcsN>y`+}0eZn|!BPK@l@M#F-(+0W_D{755jQMa`<_{ZsDfNqYU+{~ z7k*lHkSFx5xRDL(wLqMdZ6!_m11j5EGeHL{^7mOA#!!}?F^!F$T3Ya;{tEJ9tu^CC zt!rSLi+O-$)hb*S)=GF|%|V;k!`|P5eE6;@8#@jiwHD)}{(?Od?xfFWil)Kpbl@lx zYY~61uM5z;{vN3C26`bvtDwKavbQprKyZlki=!FnPhmsLI@lLu*jg1bL&he2uf>&s zab{S2(YO(50BKs!{N`3H<`}@c1oG&2BlsS`Tju9`{td_iP2GlkNmm8NOn?^~f@bG&f4*JItd{a03 z5=RR68!UL*l(74raiSlh=W5J~P`d@05IS_Ef|l1!FbB13?OdA~V?}SU9+Y!jzlou| zbaRV?J^9GD)$OC+&hNipTO-MwZW1~X!~Qd8n-%9X^xxEIv2|av`k*;ZBBtL(_;Q4L z7-)Wt<__xQkZPx2-^S)`4o6{UbDVG5^++o)($F#E?QZn^Z;|_UI2PbUI4iXf>Wr7O zr@0q41dO*eG!ke|Si=h_M_?q{+-I$lF$XMIBW@h}M^k;#*$-$~4G*OltO>YgLNjox zf-bNh@gvX)A3zO;e}9no%?amF>F@$|*04r6hnh#+es`dG6xNdzPB<8B-C-*$o*VJ% zPHT;GhjS;+nF~YT7}vT+Ym^#J96^zS?kgoTYg9QZ%k~TXz6*NZWB5LX@7?$YR9j=# zx*KK(#syB})z9JW-AMHmzK`R&r+I=7C`Ur zzhpL6L4y}mA3@y4u>hcF1ojle9a$f6SFgdyGL=8FC!s+evNqAn%ZI6x)F%scooqhf z4)YyXpp^IzcsHiD2(-Hc#5T;GpEfk^n;z5xeLhQctF6 zM6R56YV}kaVhT^d84xp8>*HyF1v6-4FTE(#-|}%g??hnR&PO_-Xl6_}w_iaVw5jBp70thy7&z}UoCkey z7CAOGffpW*8mMOaIt-;PoQo&ok4&7ccChTj4303^$>44V#~9qjU=RV=N`HVfVHY*oSLENn_}j_V+>$~M*jkVM3tEEe@a4#UQjLwpQ@QL zoCJ!kLJT}6z`2WZI3LE1ai{0g9lh>5Iq;y8gPggyF6=t7VXJ|iV(6g;Ud|8@wM@SIE_(Gx|@d8 zyKDw=1DI6J`BbxDa_smgefp!nd5W;?@u*{dtcfx_(8NidF z-ed*AL&zyXPY*y(8vq|B*7awsmLnk-A#jDPUJTD$#K~MkzTW|m6)Iq~Zq)r4mW7jiDVCu@;tmuJkB_Nh%`d)nH zMRc-3jHdR^GB;o@wqh3RW6R8hPW{akPafUp)buE`Jd2|R z*iIMM=(--fS66m}CwxlLgYEHl1ZBO#Nv9F>hyjl_1V zs{|DyN*viK3M|#n`UwA8t3Rk4z;rn6Mv?%V3f1U9onUss=YnS2@Tcgjq;@m79YH39 z!STq>tmbbJtYME*xCubK;%ZJ9yRpXM2f69hd-u(uHV8q67p>>Aqc012Ey@R_ zCJT)5r} zW#zLKKaKnc zkWHp;c^v(vsEY)wZc#; z1T%39XWU@tCQtZ?)4>Y&a1O#5f_;VLJPx+jM_t4Ds$ZP11~A?!oDGZvZO`$8Zb z{K365zxO46{_Nh#DfK@PbH2y!#byyJ3qp7cZo0g0O|_P;+F-zl9DqkTwc?%~B0_){ z7W|&Xu^Tuz8JN8fvK%(CW#P(A4=LOwfRAIUP8_W+B24hl3d4HNH~xJC=iR!&Y@guf z=u~~=e%U{mZg&M1>NJk@4AFjylVtN=@Zz;u0V-{p4(2Z8{bf!|c)mQxZHR@dawA;l zB!HowHf%h&YCJO{AXBSX7|W(Z-+fc}35+dpM6GkrKQ8Haem1X#4dt_$rmr>o13 z*z+jwzBZ~d$s0|M7vha1J zjsPAW*ht3{8DPN$*8mAHg8NwQgc|{$5y#QTLF|VCmw|MB7@)T~v1SrG*o_x}aIeF; z8<5@4w2uf-wL;&M zEN!>?sqcY)ED0S9G&_{l0ki|CZ3p58k*^=R52oFU_o?UbwlUzwo15JM(dz`Ao$kQA zu|5DFBD1+!kRg3fW9|T15WfYtm3Bc#6LOQ)7)lU0I^UKvRPfn9?0Y%5Bza(F_eb;pXiU(*p(8==wlo&<$f` z0DFQhTX6G>KHPu^AF-V&m~WeSv`U~IHx$y?0Lw}-0@w=RlFDtM0896t5%ww(Tb9vKzgkG z-OMJ;HL_}#^@(wFLhndl!d4LRt-`fr_wtB;QhbMHXt8t@z4W2 zWY)+Y99YzdXSy2!de@$W9_Iv1vW_V*CjQxSQBGo4WwNbCtUyS_dfiY?1gC4G3NoFKKy)(qb02TT5Zt zXhvO|hvLu>`M(+wf)Ta?^BBNglo_H-QJ-KbLphsl4Ddl3^A%%oYmiyu@Y( zi?umes(3(s!A94ym*0lH+Uu+EY-HHBFBHn^MP{^^k@N2*G%pM-jjb8#Ax&6A6e^X# z)oGRuor_p(J;Tv*#!s-Ec@_~7Bkq3q6rcyT`YV<#;-|3W$N9L0fvhRQ8vx-RMhNJ> zz;dJ>t_ESBUr(rc5>J6XY7j`Ylg31F57-^8KiH4E0g+kB0PPW@5)DL~K;)^9fMLl@ zLc5R*(#+KU`(`AXG(|-%n?Pf12X2xKQ>-sYXlF7c1p_nGmLFoPvw)nB%fuPOrZxqsn9ghB9 z-Z1w6@GnvbSy8YC;P$OJHd}plX27E)72n8~O;O(}mkD$5G*C6zdJa6AkgcIE6bt4;effTcl^$}u!D8dNO+`=AEhA}v$2VvY0{Q;ii z7y@oY1RpwjqePCf)@%SBzc6`$5M@lGokaxftloPv5k(6Ri%7Rc1oHO&piYdoDOFzUOfF0)dXhFDiduA;HDJoD*f6f=#qf zr^eD1+RGj;z9T^gL6Y|ja4)j)4C4)zW|W~t^q)tRZWM`XyHP?^Z|h*BS8ddiQDV2w z;p!pt)c5xsQi3L|Mxqr@jd^!#gxHv#I)cd7u7es1nbj0nY2VRx{StQKW>C7V@t@#4 znU2n!u>0@n)EN1mYgE6$pk>=@Zj2cd<*>ftOk&U8Y5u1^VMg(Wpa}3#^bBHL~fz{ zKZ6c?A({lV@cuCl02pLM$`2kP#BUu1>&00w%n10iF{Y~BW2C)eOR?DUo6Jm-)jDB- zrK^9!ko#-1ZqNiha+ox)|iD10V>!_RQ=1*Le(@RYeB9X5bg$R zXcFg+d(p=mv3xafMLZyCFO# zoId7-&X^n32C*HkPXv_rzG%J)=4vq%>#~RavZFBnCf^@=IS8XMOPQ8UX(w>lL_f^M zU1{YBhV?PPF%`u8u?y$QG!ZZb$5x8y)+z45NFG2XliT-~Dp(cel5tMw{ zvhXN^DMqXgi8AG}Tp%p$QWS z%Uk0{QJTLI&W)V|JYe;YP1UuBeOl1KH;sLD^H^GQ0b2l}>I@eH)jqPS>gJv_dHupw z{D4}yP!CQVgfD7d)n@5#93lgW9qDk+gKeY4oWgjOaR(<{ONimEvDmo2sY<5enzTRw(3Lr9W0gLiEVjdB*wR?pJZ*HWYAgh z6b&@D9+|5a!S#DVd#<9F{|>%e!$8c6kP7I<2^=J7;w2U|hPRnGjCDTeO_HH$@^;>( zm}Z)R2pUiE?r8?JHhS=EhsDl|@E72Kxk5O;{)+lFz7~v?UgCQ|eclvct9uwiCAf2U#}j%fxf7Ux!;X zFY2?*{22zq?)Oq2ILQ@ln9#4iOe{Esk^*3L(=aBIU0(~Y>?am(VuBmSjo*;ho=OUh9h!nVLj4wssK5T)Y@uKsOYfLCFkP|JAU1m;59Q&NxTh7@ z_IPi{Og(Zg4VQThgy|d{A}AuK0oq^UaD9P+P#zg;p*)j}>m5Q#_uCvk@*V*UTs_rA zDrByEETTx-=`tlm-DPUoZ@}ax%QlFvnHoZ907f_2q?^7rfl-TFL1_9CTfyQ3Mt!iW z4TQ~a4#KU#-lv!0KG>{!>;XaKq1HpDVJB|Dmf^SyT3{`(`Pc!-!AA?JR0b;ylq&7S z8G~O&#piFDUQFDnv$adu9CZm$3z93#K^ptJy7jb6Nl)-nNOAk+;_fi(?{@|jkkr#5 ztc;c0FJJ__)8ak|F3~#4dsX=J}&ebJz7Y zm&+ zC;`WS15)%Mq;Z(CVs2Q3Gm_%n@~e@FDg3q^teDu)61pbgRVbKv4=<4^_Uuz+7?@JkFng`lVBTqMhC`8vWMbp8Ro z=hxqrVlF9xqVg(S_E7{PY)ptb6bOvEY>GA%n8kll9GmphSYQUMUaQn8Il%*~T`1OX zpd>B&bWqJ!#%)9JUt5zaa@hW^$>k|2IlIe1-6a~fZ>a@zYJ7Z+HSBmzH7psHwq#8^ zn+yv)<0x6n?}7x{a8A=P-GL2FXM_c^~TJZ|2AixCR{XQJ(8Dd>Y&j=w0Tt z4xjRy?7NUBAzwl?zQQ;Tvid56R~S6VydAo6f{$And<1Xb!KPAv#j!F&8*Yz<62U}> zf3ZX?jvwe`nmk_je57lL`R-0HjGdwuyf}SL#FC{NNTOD*CGnf+XdpqDx(+S`k@0`k zb{@Yfh99j8HzPdUhRz8v<-(tW#7lnfP<~ek5*fDW;;nW%h)fkp1f&Jj7RZNu*KN2{ zpF_~&KQPYNT?`H|*oVLyy5GvpI>pO{`!CPv(~hC(z?LU3dKxqz9)VuS!j1g8o4SA; zEw#ll7I8O&oeX3dO0}Ydy2?128<{GZAkO+faDtM!(LM(2z-+=nZ>TkvZFSrdW`hPH zf#f@JL%Jlt>O4w657H=DVKHc(-`v4H6uJxeYr+7>B>pOad!2>huA=#x!nohQrFUClAH4_l27%PB?Ba5^WRYC z-*apTi~!!4V$3*xpZ5_ieyf(p-o5|J+^=J>kxl#%?&ySCF5}oDF|VKiy}3^)3Qx3l zE@C^bUTO|sKF*9H7s}fDQ$D_j0S5w3#XM+XqFU9k5j=b5VeKND&@Z?VP(_(?2uB!{ z?Fnw2qgxYyw2uHUb~~?Mzob@8!S6J|7u+PK_jig@8EKOe!BRMCu{nyZiIhWo-HHyC zYuW45<;|9B*jz0Vu?V_jeCIbr+Lx7d+Bs%D%h|v{0R9N?1pXgluEA6IjotGt`xs96 zeuVGOGI*Lnl|h}sI~e>JgCA$`P6qE{@E!*5X7Do%KFZ)GgZDD{7=uqT_+_`w_oSe1bD+6Iwzcz4wVHdBaEra^Cu*$tnql*yHAiF%nEfhX+%U o;mGjN@T%c{Tm!=cshIv7P7MzaClQ-s>~LcEhp|QY%>(8C52BB|1ONa4 literal 0 HcmV?d00001 diff --git a/src/目标识别代码/models/__pycache__/common.cpython-39.pyc b/src/目标识别代码/models/__pycache__/common.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf97991a10defd67d6f0349ceb57540a9685c1a1 GIT binary patch literal 33189 zcmb__33MFCd1iOdJs1E35CjRH;3Dw`QozO9TU-=R!MzQdU?zC-0mC7Ov=Vwsr9{NZxE63@gX9w{d($xKq>R=F?J zC-0>){qjAK8Iews_FAN@4N?vxWu3hqDeHrj2avMC-iVZqLCO)NjN6-#vZ-(c zbw7yv&Gr`DZ;|_>xZi4T!~Hh7KZg75_FcHYOYR@S{SJEq_Y*;h$C0wrz8fib2Pr3z zvdi9$l-)tfNu*5LdyujxNI8X+z4kt&>mNBle|sbpPI1zz9*DjPzC!`G4J}xYOYeS-MQIvVZ!vo`P%GN zem|LWFW@_JrjncCgHWzAi&qB@6&!gz<>YhaLN;HkDckWA zSFT>Zu>Z={D_0r4auw0b7yQK4T&0{X*K+nm$WI)vR%>p~ttmgMtM*e)?ouIJs}{2C z5wZ{**_?|ujX0H@a_Xs~wlQa@ z9j@USt~qb1H6EzP8l#%BEjx4#si%y2vtfEB#x6W{n+4=e1g#GfNUzS~az+r$;GJhu ze8V=@81J^84dYEyt;eHV#x2XYCinX06}18P68zIq#2!BU^w)polVAO%&%F2xAN|_L zf9UqYXTSErpMLtq4}Si|`RBg=$+x|D{U@Hjib1$^&+bE$`=4eH%g)nzL8HmptA6rG zx#+mKpFO$>rTqggqrNsb=hkL(_T-Ur4I|(jMWmGr7?Z@9RUJ16kolov)kTtd(Kly1 zEmNCOj2{b%VS}A0OSat|dUTqjv0? z0h%4R6Znq29pKAE0i!-Ye6m)(beqtfI+u@kmCJYn7l+QV5xB;@(KHtf({Qa)$TL05 z3wdETj4(29c~KJZ8!^wwMqH~I^{fRG@t7C0Lm2GSNQ*BR)loO$#qfR-!~R+?R*fRP zZvn%XP1zVv4EJdOB|4*ObF+>gK3~i^=P;WhK<BMV`@r))ffU0SE=7Rj4MSW8fA*op%L?*Q#!7WQ2W}L{j1Yf3W>T*umyYbB7 z>@duFN?H*-Skee0jWi;SNV^e1_3AhhCoJ6n)~!x3Xip!Fq^Y3n?DU*d*ozFUnuoe; zmZuXDbvH|3FeTx+i5N$d^cJI;%#wnDFe9GIW}ltQmGu*Wuwp~iDFl9hFberXx!f8F zY1T1jkulkc7{^=YP23uY#zTr}d0agoQV@O|7SNtIm>ZZkAv?@@BlCo`B`y^h_| zjnB)Rl(!9o@%BX7iW^Rp5@=B{L9C`smIcd;*yfEWBnl$B1?HoUBDY4cu;T2M?)u>h z>3nD=Z%;(^m_NX0M;H*>)PoF&mrA-V0BGamA*8oPVo*1_jf+64_Bfoo6;ThfjMEGr zVL)_Nk1~)VCFlV53?2|0rSQ0=c>f&&bx61GT#JQbGE4hNj`Ex*tq_t2hk^s(tGUd?#%BCXEQLjQU5xXNeeQOree|G=JHS=!fes#L zY&m#N;Q?oK3!c1R0#A0BABTw5LQ+9dP}^&9kanUfNV}kF1c(*!(*jS-&PqW&mb+R| z`Bo_`iE++;emhqXETQWn^4|!7CWiw`-$?;&7uw%R0pLq9BD)>BVd0(lk{pR8DajVC zzhx@YERg&}p>n=pL$0a#iRxS>d!dlCor%8Av`{255UYySLQeVSvjL&Lx{5>W43(PgOsdva$|@I83?5NjX}ja#XNVoDxY z&xa_+&wmzqPe6#em?yHfK8&+Ymh3!@po6?NfSv^py>46#sjV(Recgaqf;2bWj9fRa z8=$~OHtL2;v9}vX9Ty%u-lUU=T5#Q83kkhsoJBWP%9S+Vl~D_@NOLT9>Rx+wZO z#MswDGJ4h`W^4Xw3Q)kd$C1@Hvxo@^7aVNUY(H^ircf$+q&d&)yShe8fY3&} zF2S<(TF)dAZcTP4(?z|8RY%Ku&Lq!5{on-2mHh}v?o7eATqTVHgBIbU!)Vw_=tG!^ zWs(p_&zc-dZnGMZzgx2=YQ`b_3_t|xnKix5@0m56F=U>^HRn76&nSUx&zWija{@DD zL6m@!`Eim2_!pj|POhFHK$Ht~sh zUcE`aFZ<>NksP}yzJQB;B)&RH1SB#^fNIga)SwFHrFsVWx=QKr;Wy#|r(0+8F@-Jc z`EZ)>BkZ!pIqdjyYC)TKB>d!wn(LNj9=aAl z9vPtq7v(Gz#lV~gnFBisc^D9Ad8QWzyNDny;)QCb$UZL0>nq#w~1B>?j z>p@@(FSQYvla2@sZI)!cOQK%|lF*wHkce9$b%|Y>W8gA4!9Zw(1Z~u*AiX7m40kmv zK$eM+9xOG>_wR@~H9WOh5^)2faZEAgyM#IYoo1ap`=kuuIcd)lL9UHBA4Bbn5GP8+ zd559NH#Eu?;zUYO5_G1}NFb;abCF8I-jf=4ArW0SMhqZY>-~fqX(n~5Xr+C)Pl?vq zk1__x4&H7}hwO+Q1w$D0LR}0YEhrh&YRhT$6xu8#WC?4a4k)Z)91*aF)d6cTu3Oik zlS3+v1S|qJsav67VtwNn3>6f^rhw@j4wXd!%(%)pQD0qV(7}iExb>JMtYV@E>}@<< zj!z6NH-$9bQMOY17!VgZqYf}QsMsN`gWkiKC>=y0RbW1|f_d4?IDE6J8!Sy{$Mkgj z0mcNMsbx64h7phya=MM;Zv@{-vp%|fk9uYv5tx+pBrb=O2J>st90T(JMbRQx1Hx<| zXc^6@8*9d?n^6K-fSH1S7!inE!VnVb%_va4g@H^;nV$cJx6=KCi1`s%%JNXs@;Ucb zSc5<~fpryX#MS-F&EfCIdx1zc%6_iw?PqH$Yc1jx&FAk!iO;gsU_SRLrYxDy6q@+y z}H_g`Kdn7=X^{Dzgqt;63Ehn&0XC=g}gGJ}@-~)WP z6+wN&$pdYqS&j^M>$KbXmWb;eO7mu>2|>0QBcavAXr3`a;9g?-UZguI1TA7Js%u|Y zGnU{kXYEP8Mk^<2rwhSas3qy7P(<8`TH2zLkXhb}%+N^Rj4#Dmqu$P75urT6G=WVA zK&m zw(L#0u8JjH3LI8^+@4v&*4xc=5?x{NsH;1}C+=p<}J=|0w z7Y}zM2EN864)8V2&p5b{ByJN2p;S-eOPf_?roWN5Z{ef(VzzaSkAzPhWlRL8J8`T} zAV)WV``M}Xcz!z&ehwvloTUX2=KT@~N07*m(=(r7M1i3^kRRIfDegy39yt5tlea>* z`PywF_3eHH-7qJNTOVdK@PZ;u61|Y4m;+ z_KPeq7);*Zc`!)|7O^F+ds7VOdTqBcKc?P?%z|3^7BV0~4EUU1AzaSu5$K`h;w;v{ zv6kL|=^kt<3}snM+9BM_V(mPJa+Ow(ThAhHw25U}E+lv%T0%uhjtirL5JmY(aB7JV zMZy0pn=p<*7U4zd<>D92vC-IuCuugWEb#!c_ck%X4#Q6(&*tR6aq0&N~NOiYtSz$~D@&aC^Fa}dK zZ6FZI7L4akK)w)r=YomBg?Q#hny`Azm_mPvIB{AyIc%#8J4}W4b%q5yPZ)pmArk|% z&u}t(fg1dK1Fm3T{b+HfT2qB3!%9qB!ETHo==fH^o^I-)?2H2jY8;s~pvx_HBnV?DbHOmO@G=-BxS+4gqf>ZI=a5;pJn9rD! z$JO)UQ+|5&C{48QIM3mz@Z^)H)qC;YohSN}c>2mFx*ZbxS8IX;XJ9{r9)%0U!4Avn zUTnz(SMNp*>VpU{3?12}JyklhMEw|ZzJd*)S49b4{`NO1ejF&*~|7E?U@M# zTOTeCltUEYOwelp!AzK1JQL(8)s`^5O~l9}q)f}_k{((xr$GeoFe|2)%nBG7ejIgo zqoVpK6F$b^zcG+bi%qD5cGvl!gEN9I?8u+tYO7|%IKeuSgVU{<0JP(%-x=*bj;FuR za@uIelqG0KcEC?B+WklBd*pO2Kj%nym+}9Jqr4Ey+<~Bjd1NiNNerV05ak9hHc!u= zkO*Tpkr~``5&rB)s=A-SGPv?pq)Y#(!qx|*QOl!=g`AWp30+4aAexBwN+AWL&9>f( zX6Wzf5EojJ&XncSTls_MOY(Q61C2sNBUwb6>@Y>FE=$)s$Z!I!S`Ha*v&n2f%sTzL zi!CL1sejC!@!=K<_zH^(@FAj@vIHOal%HOF&{8{DJ;e=5@7>}?+)s!!*5&C!7T@|1 z9oi_tUUpD|#&$>@aP7k6ixDj#001#GS|F8L5~v?iV?nA&?(GFRnFADDd$gur%Cl5! z5>EUe-qM!x`xv_uWBvvnwyA#{_U8ID1UR8>BrOX` zSeuPI2ti^II3Wm!^v$cw;m1ECTW1msbsE>gl5PhRE#u#jS5Za$+g5q>f>KQR?jVW! zQ54lNKJ+Zm6I`U?1fBCE2$rA-y#qt=8Z>F>72y=JgjfnULMi&g^ zjmV-Q(Ff@Zv;ks8SKhiVrNK~zzxJD0(x*QP|MVMHt1P^kSi<#Ag9HWqG>RDV zuee$?Dk(Vg3eoRB5|!d0Pp*RRre22=ii*1UVVsws9|6*Zepr#sTEd`?;{jIK+6>AM z&*X|#_-BZm|I>K79FOWJbcCvsN72bq8nOJykZh7v%E*)uf#-s;V03vhz(o+g3pBfG zE*f29fRBP?@_QjlXXcid1JyzsY0dBgeTG`^U}?qU6_$78P4RKSS|yqq%+NMn)f|fg z-mM5+UZHg<_S6qwDi$s)HqlS!(Wm0fTy4&2t?O-KL1hM9+fh{p6$V0dmyvje2&E)) z7*KtpP<~dM^>iCnEhN&u*>}dDpTzrpEqH@`g%wl2OZXFg_{waqYJVr#qkam_QJ-Y6 zj5M>h)pC($26c7c4LpegL*PPTn=9bvPRXl{Cqn6nC0Np2f+y|##1`Pmh|$86_3Qy! z8z75_|Ku!MB>fa)KgVF1f!?^xKo106eWk*1?Rrx0%5WxUU97*)&Ygzh(k-Q)K-1bH$U!`z@v>}G%towY zHLyF)4b4Mkl#L8f7d_LBVKc%c^@K5_3i%Z_<^&jM+)FH&Pxm#FjlM>z(cc(o3^vk@ zp~i4yMPmd~Y(nxyyyR@z>%*18HSoC6h}faVN-wGY!i%>{zwVhRIRT*r#M+MB2sf}1 z!cBUkrM_lj0d^HTG;cLlx+!nej-oZcj3@o2fod4v1C3Q)M19E{Z4P1sMHJs@vGa|B zpAFrNRR53Hchgj_cZZuRY_#fwh8=$%YA?(@2|HOd8>`)s(#p~(YF};l-LM*?UIh6! zEGciboftE2M%8b)tD38k7k5yq$I!|#J2i$j;=6wg?6tA7u?qE8y%5T|-c|2_uGby2 z2j@}0yQZ|ZxsFh5tU>;xUh1ZWT|b?6ri1(=rS;7X3&u^e_H&+9+Gr153^mrGlwofz zYIwn3QMK$5cnq*l(DBOSjdk+mE5M=A8LJvbXg1b+>q?u@%k}mu^m2o@p}Bd%C~awM zv{T;3OS)%C4|nW~yA?6?X5I|?6R!SOum5I9ZSHE{>UMv|g1Xm~v5T-VUb(BWsj{Q7 z83wuWd84t#ogg+eccKrL^A`8+;C`3Ac0ps)meOu75u{Gq>o8N+qm5g=t<60P=+l_d z+>5sE1AJcSO55*kvNzBgi+VPDTY+!eyltfe$n|A!Tk{}i&%reiv z-2EK*{4gLg>7K@j?6D^>4<{OX-A6Ek9_8}uGe6hZhdgJzeW-0e+VLvfy_$EpKY;jS zi~|m?z0-Xh_fx!|H-R}}W!vG#UT?Dbgtt$gJ&6^+eG8U-H!*0z0z?kD-&1<(q9yOc zQ5Cr+TO~9O0!C-?e4lsFJ7Dj+0ZQ3CCnl?Vyq(^D?*P*8!92UC`5MxK`G~j2+kXW0 zJ{xsU+q>;a*yZ;$U%Oz?$cvFZbZ<^Jwc!M`1YNTrf_jZlo>l^oa z_m;Y}u=iP@qk6kxPPvhNZ_--|cyEisLOE_XE zr{sYGssGhg&i>wV#7KR>K6owOxc|D*%q|$sH(`_xw{cbkBskLfp=%YNvF**NAMYnFt1A8TF&93H}b8Q;gNelGFPAXPS+o$Y@uNm$m_G#^E z0G_dm*kL~coQ#30&3dbl_tD_4=00nmSqp69+sJE_RMTPk!Z)BaPP{7EfMEG)Iz(4y z-K<@^T;+CAKh;X%-R*Jk+jH0-8np|TirCl~D^_RAIYh$T80JUy&aQg=F=U28f4sha z98$^nIXcjt&8y<9JKo+%IypI6rxr3@oSAg5xIT7qVnbNr3N)=O4E_a2J&)S_2zQ4$ z^-WUH6H`;KAAe%DP(AtD@l#V%J%vq}^^qDLiEM~3h*(8Jou!lUBWt+%8Z?>UVCQhiJs~w%uJlkzErH%09owOQ~)IQ^3>*BKQU8qv*)=c zQjhGMyizIG!;|NWRqUaO0Mv!DAG=&A&RlSvOnB<#qpyR5%agA`&3$@!WXoMWGRe)q zK@-kFmaVC$&h<2*9=UgN@8n**7`}j=h4pBD&dwd)>&H$MtFl4|C6c4yM{M^heBv(% z!0eohjW@H{B5bMir*D4?A5pVlVfjO3=viqBA%T~}YaIft6pw%>(Qgyxo02iTbhTLh zPvnDCK8`PSje$tSi`0|zc<^orvuZ0M^~t3&WLp~VT83(Er@*Qhw|P7D)iOQ={o{}I zg61xHqQQ2$T)RAe7CWVFEE^Wey&1PH&iIJR&0aWFnC{7zClp#An(-;kuD~Upt=G*G zOa)D=d(X-Y_Of8PBUFO3Q3D%rvA@MEVGS25A-GXH&Sej%5fgAWY8Bu5rq&@<3?rhe zD8b8)qVf&>iuwYcVBG$IUzpw8>6~+Y>(ahV8Vhi6tIy7`H(9$dE&3r4(3-)nTUX!8 zvYQ`2QqEP*+qt73ZjZ;KuV_3jqwMUos?`fsT~F6g1x3Of{HJJ~`ZENXe*MB+H7^^$ zBGR@@I_CtfER<#2eI_!CsSMgwEjVtbzk*7z%sZEN=TrffXCFlY0jjfe*q|T7Y-2>* zY^|BXEmQqDp!B-$_tukVg3Zc@_d@&p-|X&R zun9Q0;7k?kpV;w_(C;TD#z>S4GdZ_V-?wT>pcDW2>h+r4q+!gGJ&8f>p?3H;PXhQ9>gH|Pl1j$JS znNjR&Dk!STZm!&JT#IaXO&@7pHxB@WxmZ%4eE6xSo_Ol;_#2*h?1@YFjGsF_S#mn9 zA1^xNIJ97#2Oj_(3wAvgA8C2Myi4L4rwF)^lHtd6;_^G0k&0WeB%Q>E95f)bR8+a*` zKGQl};;cT?BGXU4C*7_T++1b_)-ByyCCeLv2xnl-`r*Q*Tsf0&@y^MBZL8m6V}6^# z?=T>dg`DDuOhH-zQ6oH4uAR>$*-TxF-*>s_Ucj*$Jf9;oz}jbX3M>y3&dDTskF9a! z$PtQ1S2Ibcos>zSa2)jD7V|;QLUqQykm)BCg&Y&q5XQ7rGNJt_4ij-ebOz)^j&`py zaco8dcUG>VeF(#s4UP!G_V>@TFTfFQi>=MffDVMuOr3rLatmaKOe_#=GOO%@gL#-M z!{(bM)U#*|wgmcNb~dvb$BTeyK*|b)7#te{(gK-_vqSukGbwHagpfhb?_|>KdvGW6 zL?&7V0A>rBq3$Ol7-Z6|jDUy`R_vYN0V{q_UcLKnh!AY$mn2;xgeFRVS)I$O&T^j~ zFmm4^KM5XD$)b5UlEZRqeq3;Y5=aEVtvDi>Wcu=8X#`w_S$zjsXCKc<4%`ReKx?q zNu2f3qSflFgzAeNwZ1dLM;}(IrZSkGZoyA=PiQ|5LE0tRf`imm2i*ZfEAwO#6s_Cr zXaEUdL1JCZBquY>QP9k*mFBOQs=266bHRcwbz*0zoKm7l`Baku4QD)YthZ>%0p=QU#W4%!4$+ z@ldNlERpZLAJqW!V*#T9DVQ#4XR(w2sfPMZrp27#6r3<= z!=kzGTScfSchR^+OVDrq)FLU)>Hsc*$$ftxtNi^&)NV(nR@0l zHi&91!H)$q$d9+?j32YFLR>E9ftFRfa7Dx_k%_hBY+>Jfe+%>B1E(D9RCE>2yfV~Z zv1cNg^aTRZG{l?^9%bS!#_#uaA)4RcgBAWjFGgt7^;cN-W(IWGIe_Y%e?!FRPh&I8 z8mvpku`MfXh0P84UX3e>MV(=!a?2L?u&!y}Go9PG_{IR9CGn2VJA&^4Jmvd*&c6Zq z<7WF-c`12k0J)~{Jc@PEu#}U=`Y76frKFUkuvMd-_bmN>*xag1vQm*0a!#U!Dbtd+ z#C0nz3-{JH!=dOm!{HHY81=Cw>`~H6hf*kEotd`kYhQ+M_bhgE+HjGNB-nWra)bw2 z9ZP(XMzj%Y#9>=7)efXt+!TOa>4-NY(Dt#Bu@qI)p5+|!Lh3Z`sA z$O7LTlQvt`e@A=`?3N?=rcL)lgcRcIYZ?i-2fc6*h3$h{Zm(nVloBG_vexict^#D4D}d~ASfPjP z4fGBRaO!yTB<*r2{dn_~H_T_8fl}hX;J#C0LP~lV5MH_3@J2>3qX6rT@Gu*Ht2J#! zU>EF+%>5t^M+}+A`%93i{0)mVXD32n&wLz@iz3_ z!}!wLjWCDvxcv%w(N;w2yO$x5kWzR8RhO3DGN?drWrO{w=07N$wjy;J#nr*Dpka@b z>5ji-;{RwpMU@T*RpiR!Zmpgkhl;{e`(Rp;T-uG{S(b1&g4^F>(UCduz5BpgX}J+M zhG|&gc*+_CKOBKqtU`!!)E_f%_)<<~mq3sDy2Fd6BtUPJo3DV{Pz1S2c;4)6-G=$; z34-Da$jL*Ts6X*+2r+u%ejN9D7wR%*seft#5l%;b9A-_NXI7n2AK^Q+kohU}9QMdM zR0k|CKY;9$;*|@qQSh2LqoKE%+j{|oSLDEJII&+ktx6jIt3Jn5b7c`{;gz$iUk9L z$fH{5;$pZu#(*Xtp2sK$_qpmg(@rpWHG_u`_@en~fjk52(WTl&U?E|r(*kYfz zXi&xoYDgkC%NsGrQDfSyuemdgxfS?9k_g;>vxRogqPacJ{51!0{3qD{a zcx~bcE2D%HNlNfkz`7;QB$+pri+gM=hFf7sxo)Bai32Hp>ICAcY8YWZbJ*60tariL z+#SH3sorRZAS=iTLeC7q7HdG>lGXQT?UthYEs zL_nNsQoX7bu5a?4w;}K&96Cn{1&}FE#Maw{*Y)`##N+FT z(7G&+7!XJ4D2!wBu(V}|W++tPm?j*(WX)T6hU1#%VbFeu(GdwfOGLtyJ_kq(*o{QT zIRl-c4R|Tpq7Vscm0M*`suSJ}6194V3>Aq!PtE~<)V9RmS z7)?cNv${kyA(6yUo?@g@bc`3{mbTgt>dKi9C*epIP*UL#9gGtyF0@-nY#a9!&PX;C zote~51eq`Z6P#;tnTeZO*EbNXBF>}{F`z0v(i?2ETRp(b~@d;|2rh3*=sn(p;MNS;4D<(mhB`(x#0iLww z&Ihf17Vq!Fn*!gsHU0y}>g2=8xMimLEK~7m9#_vt`h*ncwFo*cTJZL_Z~$xw?pPr> zVudp?I|9Q&D3h>p{tb3`@&u3sov#pw;}Fgk>?@?^aoDv!{u+*1{o2 zJTUJs@5KZ;u*6ri zA(ji@v!<)dLhKsKd#FvSEQ&Hbt#??idb$f*x)CB%P>yJQh7jT6hXCw6LIMnaXAvx) znrfZPbyxU7(hdOc4Q#mMsSQ}6g~R|YFN%1)cGQbP#z@#9c(}#j@na%i9{}_oH{MKP zr@Q$ar1I-<{03(CGcR=a0l0(9=7xpPG8;Rucl*XjiGyw$zH*lOlsiOUMjU5_a2h%{ ze3oFBnYN%A#~bDh*1BfU8R*f8cfiu(4l~~=c;P*B8ad$e2w%lMu7NfBQEtjz;nD{W zHWPK&UD+IUS9mG-ysfe$@S1`@p}V>?=B0Sj^bM=D2HuV-v^s^-;XCQV571rD{LraW z)xA>Q%F;?HtbM zFVWoS4Tv2lyJc00KMDC3Yb3rCKb@Q7uJk^;*NN?=ZJ5IMKlE5#Bt=ti1% zOG*z$*rRKW#-JAgWPpc4Ia@gN=>UfyiF#oik(lnuwF{WI8*P5UOT(KqO$Z`o64xGF zdvWcz0@H`T#xrz12dvT0<5#A*9lK@22(aH+pJ3 z1j3Mp0LGbeuQyoQJRe<($?*3(*Nw|UrN@FJe9n>>d=8UOJ%$)oC0%#+95iU|8!zN4 zew@ogbVq>zSDKyCW|H?JKd8dpd`(1dys91DCnu~)y{~yMSdTI}5bhF2Vw}B*xAtQKHC({pdMaR`ug-avl~0HH@r&jFyCK&JlK_jVDcJ zPZVG?!NMSn2DBGw8JpsaO7gWNb3*v<}6_`ZP~P4!b~cF>8n!Mg1&u4domzHR(M& z+OiPQl?<`CVYW~hX_3tg6>D=?o8mF{1qWTn?tcd>)P7%uCn#ez`+T9SZt%qdUnJ`J zNi7+}80Bd3dPuX`Fx5*X_;iwG!}=m?w7%(Tt>Y%kxyB-*vTVB-UIgfYqyC0viy|rl zI@Kh#iGj=`;u;X)C=&!_-^p^M9?k-NxG^wBPQ5VtXiFdqPnm~9yCD2%OTu2n21I+M zIJ8Tak$AG(2C#fuCa{!brC?`Bg}AWP{(C7JOPM<$8q|lDOxOt%K8|8&fEqBkSmGym z)sHb)VDMoEO$Gu@!97t0D1@lBt$y5D?)Q*yCwn=Hym2hQb^TcbsZ?LsRLqmdU)_O}z*mJzX&b{}>GNAc4#`bTeKFl=MN z0?*?3aT(d70gYi3nA!-^l61gF{yuw^y&B&sn)mVDZ?Co2;TxJo*vJe*(%Y51l%V80o=Puuq7;`%(v3}AjGuN@oN?B^px9=oG|s^-2rz{&iJxK zZ;HEV8yf)hG6|jzc@X!q!@|On+SbrRUD@?Ebv=k1p{~1d<_AnlGur9_3VSVXOPf9n z8_B!n!ZBds8!e}#NKcu-77P)3BYo0lWp!b**?b_(^q(|4Dg~D89A8I+gMqFaZ`;WjxSNvKfzXQ=0h^jP9yH> zP@$J?#1e=Qlg{GmVdu2$_Z(${>Q}N-vdRvB?=<8`TF~ezA1Z)gs+3Rrgp6Vu!gJul zx^{`ka4RI;)$~qioPfOpdv^PM->urzPY|5T)bRdgYe1s(=K#L-5A_(&Y=yueJ7lNWII0;Y39KI2Ea8 zl-p6BIgLf=+hxQy&aJAHq^F zOPQ7}YDaPOL_ZdRyYk8%mdnS0!!X|)*w7b;qMi1JykTzzRA#CxBXGzVxDmDoKLiyQ zXZ!yRRF^cKL{ai3kDJocP_a?BJ;Y5>@_e|Ftj3{U4zpiSk(1nfCu;6z+|lMLD6-s+ zH%7${Dsr*`697v4lDh`V>sqRY+-T-eas4Xtu0xxpm7;8hptL>IO5qm6=6cw+SFRQ1 z59*gu+F)C&4XC}y=Wc|myn<;MgAuqm$kPDsE8R`@C^;LQ8>H-r8g)nDE`b&xC$=g= zfgJ@5Ep-Jjw6Gw;@>Y2RIy|m9{U%_jdX$ra zh8~$!b!*o+lmXby@H1-VLOnFOA0DQ8Rhy*)ahO6P_NT+;4l5ZK_@)R~CB)!bDk7=~ zk6%D}C?+DmR#qR}QLT;JvRRazYN8$$pZxK9bX*pN>tU=WkK+fWYGu1Vq@Q6O5^}F2 z56tAm7WE$1_A?ARGrq}Y%RG6-}}BxK@8Sx4_|ex&-qdQ4jd>~7>C(kR-ff#p;$CV zY4hSBWADR*3B8o)NAXi|T-tkqnfV4*1_59c1yuunqEP`PbO6E`?r%!{0&{jmbD?%3 znm^65S)ZTCwSFUR(YUD3@#SA+AR>J)$ARlx(ZS;Q)#phCr%;jsadsR_hZMlLuEJVWKnhZJVaDvxJIG%&616AO0p!V+)Twh=yoJU|SoJUq- zdqF7qev9A>3;3X+q`O#!jCIfYC$e_BOf67%nRfLXR?xK&x&|u{Tku;5&`mb!R$wV$ z(qhqpdB4b3uz16)4|cVIxcSl`oCcgddK&J*Dpk)40L7&UE*S<+9f)B#{DSMPHbi;s z<|FXYLYk2w=7OXOdBF*UzlVy?-Lm|+I7w$~7qQjpBCr-LSEhqB_P2HGxe_Hk!9!ui z?T3rI!&m=D2dKcLo(^GV+`0XNMp$WD+y`MMi~0c)+3JJ(66!?5egj{+>1?(71Dz_( zl@3YjA{^j5f7EI2n%?GedfyoZxJuBA0t7W0rjg(`*$6=bI0iBP5e8pmu&5Et`6r!5 z3_w-xLIO=)dL|#~8dV*RzB7#gKbHe0haDCWl5Ol_a-s0pp$}usL@&r*y2WpV)hAiRrx|>T z!OtS-={Khcv3&(DXETD9{Z85}${lR{hThW~>@u;Ilt5)zN;uRiVN#Yq@jmU#3tlTLFkQFJo3u4t@iEh8djc`l5@Hiqq~&8 zc9ix#x5itoM!jt5+g?%WOTa;75^$_(fdgJ{)S2bCL5#L$Y*M~}U@%vKD1dEgv$mf& zg*_stxqU;L2WlsbP0+jD{japT4;7Y_Rtp2~$Jo-TT zX4?vaDI{~crvOwdzfUNDJAxa*#Dn^Dae@xmLaQ?9ZVGeIv?$}F~`K3 z(>P$o^TVMZtY&cc3QoB#J!Q~9A6aoRGC{oZ{N${Z7eJf!9uB zrAX7A2+(+Y`^ zW_I46ndjyAJhRV^j+X9y@b6o<#u@ty+mkHR&m*UAp%ARb1Q$lcYg~VgnyJ55&C*}H z>C_z5%&5?EYc8kRR#a@2Y9-EIVZs*9CKFEJG)H=7t&E<6aM9!Tdq&Yy6eaYO&@+ZP z2gH~ti_uM98yChkRz0vWRl2m+4rJI0;>?RGtwgLuL0Y8zUf628SAko?=j9gMu*jn%qdUA~3G)b>VI8w$&9F&!1S^+M zU1xD+?RbCY$+RD5}?Y=~ua&#JHqmNK#q4MoJae7%S)=AB|z6 z5lZ{5qo}Qetb(k=5ruu$QBuWM!!LjEe^YcMQ*y8({XzIB5bN#h@+0tfLb&#UyIl!+rnD= z2tzp{oE@`U>=e<966j-#!jAbB*6%A3RpB0DFQ7-*n8D97%pGZ#JH?JGfJHmNw;9$P z?Xhkl8*3ieVq3<7Axior<4~?b&!Dqt3qOb%6F4(j`ytdPdknS1+N#UBLN#{&_M4yn z>rcNv=aYI-B7G-zA3{z^Ga6WQbG*y9q47&b6Oi&_W7)cvD-w*;OjlY}Lm9JAE8c)$ zk7CWPl6_J-8-;1+C$09mFVdE@zr!|84P`Slwb`>#;(Jkg4wb=@T-UkcOQ2+VYhkKQ z01aFENi2rPCMlBQ>tmlqMIZaiH;gXt8W~V3cVCYH;S#5lWUbS!#Gn`%gYv(+3*FHJ%(XP5UvcrPF(SyQ9Rvc zhkP7k&ms5uaQ{C@)hX%HIsznRMnRlgwHt+T+V=cF*$YvUNI62|M%-3runshpNfasD zlhRvP4(57ML)lT9rORMWvJ#8@u%%bEN4g4@Dsv-jE8bEgFMFB4IG4!QX`zf;v9hj( zSKm;k=V#TDB;=4!YhwOseH-} zbaOO?A(=ghyT>(p4)tW8dJ_g|Jt=odFJL!&R9g$JYUCn)5i@a1vWHIz^9;HMO8xlm z>HDDh`8sXO3$)owRA>-4eJ_)mj*&}XNNhk@qb1?$3j>;rZybDp=6i;=8uT6l7J+yAMAhE_fK3Z22(qjldvhrCL~t5nQTp$}@wPYr1$9V+I_n3xhr8J{*` z&f}jxO4Yo48DYf#rH5a`wqGRzBrew|o059weQHu7cqFXRlKc>AM35e_7Vlau$czK< z;kwz{_cN=*GP`HUQymtg-hlhQDZ5VQGz%HrHDQOac8qSJ14k|F9&f+iDF_FSw}dd6 zQw~Rix+jJG{MupoTr(39xZL{Wqjw#>K<3&(;2fK_T5I@*sv4c9kJ*eh>bM9aC_tEO9!8!*K7u@rJkzl{_CjIFd7I1Qh$!9qQ3M6nlAHVk z_i>vK(T&Osqi3>X2o_2`12=w{YB=9sDa`QTan{}FxYd%egLz`&lK?-NWiU1|3FThVGa${2~yB&$SK*4Mu{;(iKc$5?eRc>g?XZH zw!F1IL}&-Gi_SiVIh#f!HRzWReXdsS$d}Ozy&lr$r)&NmU4FPW#xk>{wyvIPwFsvnr>*yZ_%yj zw({H5QB@;ad(v5YV9Gff_$C#EMXD60h}kk|5aKfmVsqDSksu_A9dTliDI-gCOy>dL z7rhiLk;=(}I7N`BCGL3bc7Oox4O(Iat6roEAWJ_Ml$EXHm968@;uXB!(%WIWsEStu zZ)yGY08yN!OPk8)B_XmZgmF7UASvI}EhxVxSxAFSS8Of#*^0hu`|g z#B3brydLd~9Z1khVoB<(4y@|(h8#K?@>IusJ3C{ fp3UWZj=Wh)fvLq^!^M3{c#4w|74Oh~qeRnzp7*L$83!G=f!`;KGP_mFurjHTBo3TKa1@ zovOnL$BYV1x9V!xii*urwZz%inXrYk&4d#;jiD{GTE<90xEOKwM}{#{6eWz5Ffsz1 zQ86OQVtAWZ$Aoc>RYo@_OIL2S0~t1hIQ62LW=nLUASvC)4cS74E9L8xZ?mek!z@;{aehIW*E-F4 z-_$GfhF}|&zR6`YDcfQ_-e;U;M#gGJYG&Zr6x-tHS(za(rFO$f3q4mD8SfiIOqh=_ zI%|*ci-O@KXW>4_UUgKIf4y?wz~?#$8D3m6Zh1vf{BMt+>lou~zdV zFG*^(yX@aY*p-(TKmOwffB*1z_kRD2kAC@+`~5$Bbo{^?&8AsqX4W*DV5hNe?Oc3z zA8X<`mY#h*BUQ5&Bndz|)X?!2TQf$E0#=12tT$VskVJ|s=hNdf!*40Rp@$U@8O>CAmQEPn7n&bLQLT35{0YoTx`ZUfCn6F8Tw|o5M|^aS6`G}T+&)}wA7mF~ zAC4#-vyY-Gz7gIc6Py!w4b8UuWEUE`?h`Kug~U&NZ8rY^aA-EEk(pb(>tr^hYxdw? zGDjFwFsHuRv-|8FzQbXpFwbo_buwp*UxsmY3vrYgb?8PN_*&9#T4=afYoBIlaoIhy zTg-|WMGJRi3wO`_3d_fOrHpi_c#^$@5n%&^Utqu;YLv5L<_fr_J-EDS)*J4#ZXq4% zjqb1=W7!Zx`X*!0vBH)?cj=R9=BWi~somAZDo3X zS}93#DUxo8yU%G-_-@Q-^fy z`_+f)I5cXWw!2J?_VcFirIJpn7`ZGCkNYMeX@^RV_a)SXC}Di_!~-WnAWvc{cZ2d# z0uLhtk(_Vz1fX-M1J8U?(uj|GtPsbk9P`YF9M^uCJTyO+xwu#2Bxo*0!7=8O-=S-3 zDUJX*bi>+F@dZ$RU=r{xS|jz955k(ZmRIOP0|pF^I&drVK84UhaR87L(uaN#0Q@N5 zF?P&lol26@Xpe5EP^Zi$)#=*!vgJH7AESWIETC#%^B4fFK9gE~%w4w2ca1GeAO+a> zCOnVpbbF9Ss-&DetlqU$!Ryp1wN!<+Y%PaTpo;5Z0#ge&gU?d6*RcFh)y^Hz{eLyh z(E25>(k+}Ls`VxYce*WQ__6Amd zmG&SpxJKEOG;-ai?x2v5J23FSLQ!;@&zihzH6afUm%YW_VX2j|)ZQ}Wxs1ht>xjlT zWYU3sh_Htp>d6&y6Y_gfYTU*nM9;fCnM=4I4AYev#Y=dU*p%TW2(`Yl zHoRu^fa#OQUPDdDO0?;Zkm)z{6YNOrfUSQI7|LvVx8&=9j(A{gRusZQ)I)Qg8N4Cs z{rFiyueI>x(CHTOz zs^W!u5aS6YFE(gfBG#hJ?eT|PZGgRbPKz*^S@G6`84)fo2NG^=MnC9ge5534^771Q z-d{75yu4;+DQD&pM#?3+;=?$2a7{`uD-|`QH6@v)hoPjc<#lSv+o)2QAgxNFEl8m# z+stFGMeLTuO5|}ts*JRy(=HEQdBsaWxl~RX#0eBgi`4Pj?EpDem6lk+x)-Sej-@{a zl$CDab*%Hm;y3W}N^XY9iYi_Wyw#1j0wh|NZf$B`lviknLKwFr2r&@tW-@kc+^TFVX_LEh!N#;OhBRJy|E6dzL0m4~gQ)*ODZVWKCD3 zY{|-yD&$Z0L^&c@4{`K5ZAbJR;Tb(OmF2It^Nd9CG*1Y%w)ligdaTMgRsZurY$P!y zb7+_`W;lEbN#0o`ek1%DL=Bfefyd_*JwB2Cx#kEe(m#HNpXbkV`6H~VmNbYrr3`F1;$Q6p@oemv7?iB23Q#Rs?{TPSm7U2HYi@E!BhG;Flf zs%YJ6#w}k2G~;k0>L?{=QG^Usv5Fy_ZMF<5g}GG#eU`AdSjF0A7OU7e-&N+dPGdeW z^~$`Ux{XZVmyH#XYVH)oKv? zNm8xeWB(??F2Az$@gF|;+lRls|GS@m^ot)q=>PtsJHJ}Gf9H?)H-Gx^2S2*M^UJ06 zR@_>9@yjpG&MiSSVVo?5hr~78UY7-IbPPGs*yBa+@DXnDjS?6;JD?#TY6pp!OZg&l zB2W-Jr&$-wsxx^hbxqW*OYHjTE?AxM%o65FHqA0Kv!>YuJBf9xXPT$? zW=$NOrAM!11T||xk^rPb1sz|uHDlx`>dH-`TB8*SNu*C^DW=d!pmL0H4-6Os8%`TVKF&9Whz)zQ%6$HwLxOEWY7lI(TFvo}dZ}7n z?f7v%qM0gB(+R_SLJPt;u2v77KY>w6myiT}A|j!RXN8W)J2i zbA&Mkb?TcvyU*U?+Z;*?_1yAOCv!IWB`8e9K+n9dMPVpo`6}}g~^*{z2QFVy6H%7 zbenA(%Z3=zHyH!Q+)ab-(kE3vh8g3yGu4lyKGA2WozUuC%F$KZ{l{;5|M}Ce&j*CH zK_c)I_m3kd1eF>VULWi79YFu4(PMq~Gh@}dmLm%`!Av@hvZ0LG=M=A%xqJ+34xz4i zE{>8kXf@jBzlf24A+wFM`-0lny4iDaEAZpwJSu|)xePx+!a4jV3d_G8C0Z@euw~F{ ziX*otfes_<+n+;4-~NRqqszNS%Ics30yba``3|yRrx*!&(Azi2o9wzdg=JHpG!N>W znrwC2iL!1)egb8rGnA8KO_@aF(;I9qp7TG(bcz==?7`K zD9M<}7xiJmoF+uRgjU6a`S;^$up)`Uk|PS@8^DRl`N(4s>i*K&hJ(NevLnfJJy#^+P=e1*EoC^nuxpkdFN zekQtsrPpcQWh%6FHv>PFbV}LC<#2G+Rhp#DD4E-rku`o8-#GTri4ew9G@ZQ1qI{gH z2mXM_%{O`ywKK>Adwg6{Pk?+RnR~4x_V|F?+8UEx=11Zc?{YH<8#m(c2;<4x5c6rz zWCX>5@zv&uFM#rWb$~_DXw+BUcV=2sUZo9aCt|?hsC|Pn9}ww-MAn|nnm+VPsK9#i zZDZS9)&V2|M%!^a1(Y(IVAHknWy`rgK0*PV=^|?@^Ep&BbS5=)%pJDFcZ^L-!13?d z5uSTM|Oev@kZP>ZX`8B6U=L!QZ46ZIPW?{(RA zQm5{w@VBJ$!rC>uZszvjN&CG0X66b9p0kL6m-mLW|16;o{xV5Zr#CFT8hK5(InYeOOIzBVgT~)6ZI3wlgKBL zr;w*JE3=o~HTEu-Q-}(^>M3}1t3&NMP~+7fBYNKD$y|CGp1!zgfY!&UhP!=E4D~6d z@aj)YuuT&u-_@tgbeO!0TY91dD((t~eLuf?H+_<9ml(Q<{p6J$CWfaG(5Fv{k&NA8 zV)T8)Hh3D1=@_+`J`Sf|OwV8*B8v@4{dAiN5Q3!1D>IM0yJjL; zcFn|M&cs6ul)>PtkK)|^70b_5hBP{oS$h7-MOyVX6=Y#lF-j0Zr9cS6C5kI^A8Qet zC9x0@TbL>%ZRrrphZSD&6HqOclZMR%pwVJ<{B}D;JoPv#j} zh_LWYyswg5QL>^6S402i`dcA_Dod9(l`qM8+QE&Q?HF;LT+%IoTOa8rVX7-OOn-}D z7%T7XcH%eMaVW22uKX%3u-lSCP8V0tuj5K6i@SDPoqN6T*9g<;S~LU2C?l5dXhO5%osBqK7|18G=jPj{wzF$ z$DhQLa*Cdm2<<#`1R0?nKgG}T=eXR$s+$#Pa3Ghs$y`XrV1|H}-hSFh> zr~f3FZ8mex4(|yWdfjL>C3#>q+DW50nH~JelH>w)07=M2zbkbxBfo*k8V>sWLvT6( gzjrQ|=6TvUp;*H*J;TGzO9)C!=J<*6>G9$J0cAKt(EtDd literal 0 HcmV?d00001 diff --git a/src/目标识别代码/models/__pycache__/yolo.cpython-36.pyc b/src/目标识别代码/models/__pycache__/yolo.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a6edbdeb8aabbcf3bb5c92b7aaea6df19d3f4b59 GIT binary patch literal 14174 zcmb_@Ym8jiecye}dv^B8j-vN)r^Q(;!Hoxi2#^m(Qrr(kideC>Fwjo{ zTET7l`~A;6cGt8&br$E`^M3x{=fCGCCXyfj@%PvN@CC#8dt=vc3F*%x_}{Y(!!`1T zYr0n5%$xGH@|JwT)pPmWn?`fVF}zc^k`GOV z8veOfKKEs#X}x7!HQcm2anEokyr*lY`*waAB^h@TC6j}a6_iZ5(9^O@Q#69Xh=4Kz*gBFjw$J{5}kO$e-Aie+v1Nqx`d&>pAbR_gw7)?&rC} z1@EZ$W8QOpQ|{#{lwZX8U-X`@T|)W=H|L&mPxYPJ=RKCc=&kCWgZxX#UB)in@?P>@ zbf0!lKd?-r`4FW~qEvZLdM|n}RZNVs{J_q?+`NODS5WhJTXo!f#$7pN0CujpXZq&L z#@pBO7q-)w0V~U2#eF=B`}lF(ho%0;H1e;SMrXIcTx9IpCJV0e%mWm%hgi0(Q-HIo}Wzq zlwYiP7m`{0j%VB3!A7f@Z5M-$GePSN8hLg9%yz5ZT4`@*mzPWBN|w1hpYd0=i;a46 zhv>DIGsC#+dF@iGy^XYU9sPGq)P%7&t4+6c*GH*&3eiky!z#1BM zj9z=~>eW|X4<~f(9Z&hyRx?cMEGG&xjp8k@;8yQc{pxz%3zKbCZ3cy+s&MQBWv>`) zDzAX*&0@XUDFzrI7YnD#o4!{lwVHP@r(0NWhBMV>HK?+USGem{D;t3yCS{|AYO~x5 zV{O$cS207vFBR)vq1vc~6TVmXN;t391dR;BPleC2oKCiw+Y04J14RWW^v! z0XeZiN-U5OyKIyV*C{*i7-bWn9le*xN8Okk2U^731pbm<9Hb!WrutUd5wtV&sdo$q z1P0fb38OE20amhhW#>93xlxJ?=H!?2+X(*G5d}uysF?(Su6f@mSX~ggg9cJ|;B-M| zkcu$nT8jWf*Ir^>G>FyWE&!{7`dVa;Wr;_WB~hDlBdAGptl9+T%CLtUckRC22O^YG1zWpWa_%==*qgyiCTFwcrW;2Xl1`wQck!~Rrb5>}rtcK20;urDwQOF559W$Ml`86aZk=pio%3SZ@Ke z{7WmN<~#p{Uv4&x_?vA{h4EoithA&KW0=scdrjo(>Iid@pruM1VWi`!mLED5RdvJE z0I4K1sk}VlG3W4i z%v>@%ll#u1#53YAjSxe)jNtzRF1}*)tcT{-hXF1dfD9V>(0uz}^=#y$$e;xDn*yed&u4j9S8Au6N_LX|(5#XI$%`QN~XD z)=j&Y=p}opZsLc{+DmkktlK+!FB#0%=DMisru0bIja55WQU7#P*^GN@V^aq&%K||>{*EP!)kTkv~*e|HML+Z6d5}+0kttg^;?qC>E#byPJ zf)ykIK$DFk@Z>BDhz}Y?Vkl~C2H*{#_d&I?+1m8Oc%X_vyJjU!x0=nZg1g-;HmW6c zn&Tu3!c~YdYMBKwzglUukRzOjQ9nS9hw0k&YM?RU6f5i!P$*h#x4ottMuG4x739(y z5gua$vlE)t&|3FHvjD8!M%W5Xe6|qoZiEh2SXPgtsg8&}VXWLzcZmG_PvR2 zx;r7@6I&C=5qJSfj}AC=#0|FFUT9ToAXagRAl?>0pn^T)_jfPkQbN(x>&U4$7zq@~ z-A0Q7ZDG9O`5Qt@BYsdU-I9BT4540RdxBmrqi_5tSnvf#R}tltLYoU`3unPz+|Ufv z1=d|-B*0;9-vJ=p5fBux+Xn~=dW-QI!hnpX&9pfM^68lK))a{7X|ppuB%>qv%}tPN z0JgnC0dQRaXM%i(bh=Qu4UtIK=*3^+QeI(1f~_b?s01T9fK;I{5WorrSxPuDp!OxN zUPsvoY(e`!%WgvZ4-0JP?{uFzDIFsiXeoP z1IibJXhg9hfp65Ec2jN|H8Bw_Cfv-ucs?%TQ3AANCQQ6k^gZx1uYRROI>H~n3a+KOay4_&XwJQ3 zyaU=Prxj-zMXPs>uBCnkV>sQ&eWUC2p&>EFn)@aYHUi`{YOz~U)wvHn0iqIUcGp%j zK^*NPKsmMmub}otT7I7bT2!Og9zl#}Im;bnWd{(&7JA^qu6fH+a~L~{m13R$!n*BX z1TBOKC6|*!!BO-J%^Fff2aOm4Q-q^WlNaldL`tnj`%)QsIo7Drd3t=Td#-AiFg5B& zocD`&s4ghYLy}=cM77W?he>DplNf$*l`CdXS7>~PU$9l@VRM9CyGA~!i|ecW4k`*rYkyB1~pE{L@opVRQ8=f}9X`#%sx0V>P~4zYCb zose(lYf+BF7J#t>SjQw=bt8kXSxdN6D4Fh>wIp;~tOn5OKz>NFPXK8QY(o0Lrl4og z-&sAfs1x%VTCb21K#JXjW*5WVt+#wnIHJ&h@2~#qMPU~^NgU-4FQy|3UG`+d&vl$j zXIK4C&1@LGdga<{*ZnZs21980op^SjGj}53s^`k`SuJ<0Z1z)gxf9D~Z?uA9T|@eD zj0%CEicjc3rEY5~0=D1YR>Gm5LzRXy-e~BE?rAjviOkymW+NBZfOeiO1)SQfGE1S^ zQa{bYR~e0{u_i|ilyQgwpk>T+5W+J6uQ|lW%oXH1hj#(m4}kt2T&L0{(8gn;y5)+k zWa}E>PT&hLa|w!=(=~%g5G_N>Q}Y32Z_0Z}#c`hr-f16D2~{$sYp9e$b~XzWx93G_ z)QT!8j=k2IzOo4ntz-og{gsuKEF`HWxGhgg{WC^#urbZ!e02*&p+o%+`th6cjcpYD zVh|`rf-mCM9rg1}#=sj`5vGR(pB4h)fgng(G#R6@K3n2%=&)56DSrwP5qJt`Iwgo4 zH#?c}iL&9&&s$)5B=h|7>mbNMvU>m)1SGXcqy=!vf)s0VwqAll8gfgcQt)A?*nh`7 zg0CVBS)8d%Ge^Nb(Im2iHf^vOZ?o6&q1OkynLWeXvv>V0t zSQ;9fL1>kmauq`RsI|Tt~EcDEU0}TKMFOhscP4BkQ zDQXjIBOQP!G>IS}@gZ2q*iHmtLyH=RaqULIXkk!eGXIX-E!k7&LWfbO+pCUVj&SQ0B!GS(Z5b8a58se1!H8@t73+8JF<<4ec z5~pInfRaUbstX$w#4XIvu`znAuoN7kb^~G;bBx;IZmhOcJK`RIcstd%k3jG%NEzl` z!Z=51s|z0MYQ_rFg5-HRvm9f{;BaNoe~W&pS_PAG_vIe zFnnSM7DTWw!L|aECux@Y0{YveI}rWNE<|?h8>$^17hz|4V2d#_){P?B&Aq&TwZL1j+K&+}GSw~XK1CRu^jlU5CCS~vd+W>C^H--9i z5$04VQfP&HS>QK*(YXB`)J9SJ9n|7+8KsFqEqGq@7iCP;XoGUsKExJ$S+e();0X8;uF@$gE8w<;O{fJy1K741$k>~6>#3Dcx03^qsnPl$M{ysH{a zEP*!17=cZbi*_(M;LWvSKJxx6X4sK9$>X=&Ki5( z(|EUQa?HEA;r?@f0KyT^5|->zM$=9?9bW_xk=}yuLm2kZX{C z`7~1D9Oiir_aPe)BNre#49>%%s;Xg~U4HOT!1;2h&}Spy$YUWj7#z!3L>Q;QZm8Mn zo;{|-OhA4)p$&p4h z?9Y)AXit$NZG97*$AX!O1^{xOuY>!5E5Se{?*z4|o9APz$h zbce13GZdb&=mPOMO$CQ7qZ(p`LgMUi#nnUU6~lZD{Ja0>pZ@0y zH^9Xvly`fxsyx3?QAKxrUy{mA>P!0_bl0|n7m*5`b;wQQCn`Qo;28l=KMgoX;5oNd z89yi~xoe7~Vd6T?O?c#214AR8UCT0TXf>+B)h@6QTHV_z_zh z=|QF8!BLQB`ZD|eG9qeUfeDk%6mUiOG|WDbJ@lhC{X&ojhl2SS(xjN}Ac+u@v1z>M zF$L})!+RfOKj`}ogTTBs2mFP;miCjzzkV1Z0p#F>zS%Fq`evYYq0>-yc zMh(+nV8Wui11*A7W0nx8tialud*sDE3SdUm7bFRXJGjLp61XuuGL3D8T;|YRDIm3P zW&Z{BlUx}`F|5wfAHD{GPG@aR{b#fkEWyCzbZoz^OvnItt~kk$!uyX<|0#lKLvbtP zBqtTiM%$zzoI$YgBgERmO!E@n0wC5R#*;H*xJV%eId@a|!W;1e=W9@5U|NAEA_ObE zCrCUc@${lmcHjvjYze>%JeD`Kwm3Y^h{MzDlDv%op!w=GcMuEd#rPg~-*k^HVoCa8 zyO(@uZ_UF1X1Mb*_Q6Htpon-6t+&5_(|Xs!JgHv#U7JGSeOs)fe;vf`LnLb=|E|4t zC6ISGFv<&HloP!S*;xR;(f5zR;A3?&>;p3s?XB<|h1v#Js_w)^`1TFWX}A9yRI#*s z;1*v*o()n#8V}$O-(XyrsL&$~BU`#RiTNjJ2n(iy>0l<94d#La!F+HqSO^van4^Nj zwIjjN;IZ<|ttdwNHeTw$(jng9Ot9anUJ`yDv0FCkem%${O@ELDpnbAC#V32TJV5rm zX#9$WcSUO(kUJ?_qSAppQ+b)c7S{#Ud*Tsw9vjxoQd=c_ z&hm8V$c8&l^-a36xd)&!{WwO=$*A{s@7|rAB?Kg!CsBV&>c76feu?#QU5XRY_hG4T zV}DQMP1eSn>=EuEIEnq6+{b%-4TVbZp3$2QkivZ}xKHr>YNz4JN`g8Q9EV5i(mkU) zU0d!OgWb>Gx8Ni()14(8cc%+02xkyh5zZ1z?hB3tQSOa+(w!upbZ6Zx@uWLLJi&~F z5aNn9LndnH-m!4fkIO1CgPfbKey*FU5bg$N53GvUPIRYMo0sS>>Bq z7zNpfheF4`_{?gUfFI{3tu8+D=U25wiDVew&|W*(C^Sg)+jTFWxm$I2<+bWa`l~3u ziG~@**TEgu@t7Mr8y@7fZ=z<8luv>qQvO)mp$%|xDBV+1G#@F`i&<5O?J-XG1fnxT zItx|j=q{7oBlMa*iy)hxMes=ihsIL9j9tc3;W@jtEyxP{7EwX}v%^KeSo+wb7cnxB z?pej7jsgbJ_kP4tyo&i?b9U%^t2yM}1NpJxT5uq66l}o%3QmuEW+hwl8O9d?p#d(d z1NBN@03`276JSx_z5o48m{;)pt{M3{yIS5e+h~ttr49k zt$ABPy}Azfh0QkXUcMhD)}Rf%LYuz&T@LV@jDA5zxw_Hv19|OCzSIf=IEccvI-Hc^ zT?O_V*RO{TqARK|{k~Y;VlVi@My|9<@R3Vgh8L+XBTbAuUVh_xXk7*pUp{~1^)Pbz zJjaP$J}={I*U?G#xdDGEzl}F`3^=WyUw0STm(Oph6eq@;JYHIZF!fR~C~d5@R0BQe zmJ4BBYd{CMxtAH$8Ho`)jt21IgULMl%2pc=SYh%FXk(xiaN-VQ$`fx@_=6AiCSTvt zSkmLOExotk0gL(I1RHp6ZcgPng`koczDaFhu5|NOhtW*)m_FJ4XUKI%K6J%aSZ?+Sy{g zp1pegr{E~DysW+OG#_}fwCNVJy5w}Wy5g;5SF>opdiw0?bEnTUS!!H*k*&ZWC%mY3_*rZ?>0IrfM?+OZ(J zzO56T=<;&l`9WuvvFw8nsHfyWL@O2>0qg+c#F4YZh&Y#ocB}0xLW1xzXdIp7xML?F zNSm!TvmNU~Hg_$I6$);v1dqHp2&a%}^=)>x;K3aS5C=HL242RtebwPnSgefkUok)t z^h*&gk`F`}190!R>UY55uDp8f6*g1ko}u&lYp>lHP!)F7hVv)}4LtaEE8M1-A4+%K zaAJ?*70wQ_aA)-t{=uP%OAe^ozw G*#8g7R>6b- literal 0 HcmV?d00001 diff --git a/src/目标识别代码/models/__pycache__/yolo.cpython-38.pyc b/src/目标识别代码/models/__pycache__/yolo.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1daabacbabf90fa150130591aca470d224da531e GIT binary patch literal 14150 zcmbVTYmi%4eZMbBSC7>`z204KY{ju1%ki$)kB|^HPGUQDk~(XXI1f~(LaTG7m8_S~ zy=!|lSDm(NhyyqYZJ{%SP>IqG0ScWCg|-yhX8NJSK+A{Dz}(>jElJ6CI@3?|Lxw_s z|NoIzyKAQ(s-1K0dB6Ve|2g~2z(7pH@9l5>V)2^~Y1&UIv+);4=3xZ)9YfQEmeGVR zjEbJo`DlmwgCW#p$ zm2DSjrbuO+zjkGNbs{stvqvkF)v3%>bw_4LbviR$y(4o+b!TQL*TpKks;Nwh)A7ok z)!muh)w?oxRrh4}RQG1~R`+H0RqxK+UEQDAuWQW&W-u!0O!_&kHf3qfflIMhb%`4K z6GkTeA+2USubt6ELJVBj#DH^8`QWOVnL$ZX45DPPTQZB1Au)`SVP_UC4mpQ1b0`}T zqbM8YvcqCbY`d;yjyOlf_$A|2L%!wc?PEB*d!2jB?-SddK`|jF#nep;r$i+?oclyb zOm8T;L+lj0MCzv5ZE>gAE$$L~*7JLj-?t%uH}d;?`S$&o>;2BO^ZxP!fY1B04>&uW z4><2%)mI5FccXkB=RfZ}*glT*Ln18>hyxv~{jf9N(KC-YbE;=Ie*(Fa*v0eC3Fi@U zk2rYK(6!npP`VeT(%I`g;+!bz7-i&lz~H0}p99s@KC`7gSbc^t5saGnsTe@zpIoyT9(Ubkq?&uRA(oF4!6$Dh{} ztX?%Pf1+kPC)&T(VHiHGrH?e9jeY3c*>f{)!^xKlrF^Pd7t0mLjm4gEb4BNYSPH*A zsm6-8RIjBPIdAEZS3iVCPQ^X6Qm@o!8!M@qnS7y`qTH2x-Px5~wGvw++FD5Kfh`=T zk*_yakhadF|C)}PAo5(PChAvRl6YrXAf~c3QIM?WE;(6Ix>9mWixnq`HDsygWplDfV{a=sId54ySyV6QDy3%5 z!vN_>FjQD}oov2dyMj4IcCi+WlxiifL~WexRi{*3^4uWC8_kw#g?bQa$a|BRh_K6Qp@LQ`x}4ZZpJ-}grkA2Xv9*$P7JUT18ijQ zMXjKVu!vlbX2QZ2QIJDK#PAn)Y$sZdiNvZ=u*85!Ue`167c~ov1t1&>!lxV$t5`U_ zb{>PjpNP$#;7cMo3hvqtsNI3)FgbPqqhe-T02QSursq`c0dN+V0(!78WHw& z+c!^PmJZE2>|2OOgoa&37uIOul8|K$99feVq9CFW#9jLpetD|w{iqEc@=*kL44jH^XQxgF#Fd>+xG4_tib+h2R@TiOeL3 z`0RRz^k@)rmm7`@>|QIZG%t5zjKHioHRLLC7v(};UFMg9P}7lhH?WGbB!YOCzBn@| zonp!L9GP8QE)+0i*mG)bT?U~-rJnNw>q^PFDyM0z*pti^&Z2psSA$5Y)~Mw2PC6v% zq-C0-DQalsm*o49z==h(*?cAEy4kF}i|Y4Mv>(wu2<{jnO^@kegqS{}kKu2(KBYGY zw;V#A0NeW|kRV(>iQxVO;4f+|!vmddTecSRa^avq@{jeYDgxV1jeZmx$2q9;8xGzS~%279oIOcv1ahygkn-D_Q zM?fiRw0<5U{BL{>R1m?a!2FAb>cis<^jFkQcC?m_x!HTMbd5^MQ&ez}D$G2{C!A|EoSF#2p!B-*(g{T?yQzWR4D?c9 zEV_Z71;wr)Tnlu3t|44q3M{NpaT{hiOgllOP?uM8Qrw3Tn8Rthr4Q-jpq@D+tRK{O z8VTLjk0G~HpXTxjqnYf|Q@8UP5gs3DGkkdV@Z9X&CSj38Z3KY{>`f#9CXEqYZxO<% zw9x{RB4_#}Jd8r974a;Pkl}}Xu=$%1B*^K#oB;#}@>o)!JnY**F_6|xkOU!IZOF6Q zTnZ%Q667O(+>aF)E!sAS5`u9;YbAOsz&EOpyB$+20nzYEAab~UYrs$V1B^nE1;!r6 znY0mvyTUpoyp@I%7^O1w1RL6ja}8)v#Gdi{$wTQlb2Rx3a`IVpnLK`syjy!BZ#qb z1;}uP(U4J3;Q`4X#Yj&eMa{d6G@&Pe3cG<2F3fJuIo^P#H*nZnJ_Yd zR!CW_MQPBflok~h!y+ycsEM%NK^l~uu~~zPg2#*m(Gxk>fsAu1r}M;LdOu2&8p7m2 zv;mGzoF4L}V?GTjM;h)xwo6;tdJQts>eIO#MwcMk(>%pMJ_KT<7LX%ko?RZgK^k0Wf%)?-N*bN%DD5D{JAj%t(SnQ?+9gAdVe~N8ij{uS zxNKn*od@BAgR*ufx&(Up4TGsf(aSnb1hM%F6qJ0u+BjaI323G;DOUIN&vg^5HYix@ zJ&75(xhrH+$aE|SvF;Y=g&+n4qXseui~|!UMlh1?S_FW*lda@d94XIWN+xkAj2J`? z(1I;uH0kgbypScTGla)7OQFUk>dvUKgw!H}OB73r68#+nfh57=r|=!uo7>-GTbq!E z)FmQQM&Xza6ei9M|G^{o!2SxG4^a_&%_8#b zeh2uQd_7DkMlFC=SgRPMRzA!m{_5qZ7(!~;*UK?jypRw;NDB%D8!8@@Gng*i7Jddb zgZhrDk=e)?Qz&|xqykjhWmtR>0By1EIxHet7&zbm{v#|q*2sahCJ;5*&`a&Fy6L8M z{P3LnuAU0QXHK6zcis)c4M>No+q6?%Te=y7m_1T>mp*faca2o)U45n*Nu@5-y<7!) zK@CF!NtWsj#V{cK8!M6pHQ5>psR)jN#g?d1Yern_8C+X}Bp@ljNRuNg1ug zT+D^$@abg`WHH5r=g!W~rl3&OAaLoV;8R zgZL;yPsVIm10mBU^Qt#JeccZuQmz6YkZds|i4tLOLGJ@)lgPl*$|0Co23!W9JkHyz za3Y|@rgr717p9i$wb5>E)F(I^iUQLyc{ipD!dWQIl@$gY!;2LG);|J^Sae`kbB(@M z)IEa0DAW`@ShrZ02=aF^=t~F+98ix55D6&E6q-YQaHx6H#%b^Z4)RrW!?JStNMgh? zPZ4W9#G55ikbH$AMm3sAIu!L$qOa(XIC=p=DLO{Xj1AZWb}VK}s-clL{Rjojz-0x7($D_qB9nBtpWJJSlf9wvC(NK zAHbXoo9qFiCGw0jMtlX*qwhR|dZCnyX+T}eTD1qDfom0#5YYWsby69!5f7A2k_4)O z`N~Ku?1$P>AH1D066r(=;Q8%;>)~(!MXglTY@F7#d^CFg3p7tZw>J_ zFqD?Mbme^jGF!0C$#W2auxp98vhu?Y-Bd0AU^AM!TJn}SdC%=zb=`BST{rU}8&YrR zJjX3^gK(XwiVc|}cJe}kymo{^N(E$sf>vh2+U}}=tYK{+G!|(U4Dux!h(=UupR#~@ z$bLJ&+Pd1u7X1ZQHrH^l+AZrO`NnE1`MG$VBQ5w9A)j#8liH4Tg>~(ha7}_mvqhIe z<2T^qcGYUv7y9QYUtcJjr-7DEF%wkgRRawIOO*2P!;zm`5(Nep8Dv3}3bO2rw(9UP zz$2Y2Km)DkL|_&juP*`N8mTYC2N8!(0(UoqQ$pw{(I{O?k%|$7XkeEQL9tvi>f~XH znB~n->J&v0is%g3zJ(IJ!EL@+f*Iw?LDY8D61YMN#XfSWj$feOq zNd$Kq5iBA2smRLfu$PM5$6!IQiF(r42!__Jh}?3q>eM_*BBAFE-HkLH)!iC00a?}G zim!C8-$ro5h{%-_)0^X4*4(oK31qz=X`uuJUh(IVU=#%FB`;))EjwWq&6X*kxC;#_ z3trNY$GccHpol#SHw*lq#IodLzTP&;!2+dU_o1+ZafRC94%mta-!#cH31^HA2TTN` zLuf-GkFrnG=vc@TJC72>xjcuum>+_w5F*wKRUOP+Xgx39A7Z79=*3sw{Q3Tiul&bD z#mf)Ie)va?w;y7;0}3~aF00+?0VbZnY?c~w1pS~_Rgk<)Ll6>@7I2Nv9LBeB)O}1t zI)YW@U=WVyQX|`i07@B|PopCQ`W_mDKqNm&QHLVN#dAmnh6}e`0pS|LN@*+1zJ@k$ zBCM;;q)%AD$$fefeiza&n!C2Za4W8CLO+tP>(<#j$l&-jnSsm%K!rhW;1IZr5B_gz zkZX{nu$<=MScMIRR=881;P1S4`7P9jQTrCTUAflo*4l9UevHRNjdCsf<__uyunKca z39h92B_`xf*IlmW{Gp~B~GTndpZG@F;2mF7%>^y2|WcHXe%%`V%sL&xyMLYQ~FB`N{}Rd zxeOt)nQnhed!7zk=L+~`Fs3kn8*U^$JSk-b&gM)|L0Vi;l0{Tt7 zd!|$lyudkQrVXQXFWAiMq=j!CiR@_}2~g{gqtiHm3|#KySieOA^3G7f$PYQ4Yyn4o zeN^k;r@=8pFkMi`okDW!!S6%aTL>)t_}1(2)+63byc>Kpy<0PdC4?CNpEb(wtS>9I zo?`T1%%BDSJHq+`Z<)hF?NMq$mu@(RaXN<^fHhI#1eApCdC>KRsc@XSY&aC?LVQ`V zc~&C-^;scI$a+=nu2YKSH1oxtoMDEg7X#hdpbz~mwc?F@1F3&SShsz~!KwD^J0T~h z_2$n1`_wm~b{M0qGbv(!J=BINNCqI>MP+yyAz8iv$pU%fn^2RX%$IRZ&@tK(+;G8% z*hSthOY`h@l*|I(f^!M?RXgAyx=%5=UdymTPG*2O;!C!CnR`Vra!lA0+y~bOamXMr z6;grwqj>T1KsgEdgaEy(99Kxr;8`Os%m$xgMia;ukMi%Q4;9zq zcmC{ue)V4uU4Vodkj~}hl62f`QRYN?P{D|lIi<>+LMpHpp$7FYpXkB?mjk%esp9nm zE@|t<{#B7=(30K}M9-7o2sin%>ng-$YJrChjB1GmRiH0!rKdkd*Z)Ld%Tj|bL@t~I z`f0s+=T->cqNu@DO^TK>`xesXX;{ASXy0P=?x#kq29o@f#5*Nnm!!$x+vxo?YCnb0 zHF}f#Vj(+e58H7`HF<#s4!D-Qh7mW3-A&AT7PWn|76MJX6qPAxGr()5`PE1Z0;}#BR>})w_0{!Y zTecwURp=u05CaB>QnhiV!o{So_o()(m`$Ng8mS5)9c*~3OywY7(hmG^)<%F;pN-+Fo5vza0=B@9OK?%HW#eQq zW$`2g@c(rFIrpkToY|{ z|IuQ5en>{BE$jPd)DP17XdKOG?tJf+Fm7543-bGD$&`YD`{~G54;V88+P|bp-kbWr zhuU`$*u1fgq!k;KRJLiW_QAKF5>v&$>D3( z>#$15^&yf#V_Xn$3}+0|!;@OU!b=a9MnGLqGT(zW_+8ToziS$u;)@QTGu>Q`ZO1}d z5xP;kp^I&kSdzN7#`^|be2Z}&cl)HaofWuOjTip=qVbY}IpVFvOD3s!H%xYQe$R{C zfWlQn{w34nTQo?<`ym+zT1k>$UaX_Pl7jQb@RQu*mmn=X3WWuRms0+~JU`u2j z@)Kg>5?yS}dT}p-OKoe6Y_o|~1ML%VmL*z)n0bI4THcU1?2UM%-k7(|8~3(*6W*i; zw~{wqzQfz;?JA613S*@2;)X3gNh%9Y0=pb)#qfF~a>+#9t6mCedgU<*QjhsVbPbP| zkeb9UK4suW(Cm)910#koq9H$zS6_F+sgvD)X*VlTJIVAOrwW?J0QYeO){S!0$-&AS^8H_Yyt z-hNdgM(K1`@k$Mlj<4$UPEAa}{CNzcrg_xn#@&n2QG$D{wjcEexc)O+>!+yRR;4%* z1rI}h7yG*hx2Yy>Q+LuHy#0Pcsq8P)Ehbd`m$lZghZNv7DR$BMwGWCE*~P69Z;!YW zy)=KgJ=4*;yC1z_;NioFKT6o^4`*i)4k64T946Yh!Ia^JX>UXw{vc6@KPq+;b@(Gh z9hi|YgeXK^i$&WZh!%gG(Q5fqGg-!GL;rj!N?F zuXVOA0lIgn22`oY6k2Z;#{`WBH_h8Z$!#ZNu=fo&7|wL902SzlHr|_*j9`OtYJ<%N zkM0xeQ?FZ!57Vq_A5xDpqCn5# z#U5Vn zX;ymZrKqqO;`jP&`!&@IcfR6*vx4+vy8ABVz<7y`RvOL3%*-NzZia4`n|FBhXd%TV zDJn_9N1e)5DycK)pTVPtnHlxGPKkrP`DKwysgi@K(yTL^noFVm+`+>Kj~qNo$$VoO z#1_VxNc;m0n8nOY@1cFn#WOi4>&a!OIod5$S5hhby8^(8XB$8fm|LoAa%QGdsyV&> z&E2=@qn-<-7FSfF8J?N(9M@}(Qq23H8_@%NAZ!@3UjVj0Kg~#+L5LrU1ZKVANF71F7`cAd=0BdL9q!Z17Fy)AA?O*}&6m8@TM@6sx%6T5;uX zqnz((@E-?o7=SUTyC1-hu6!^C(BG|Bu0YP6e*EmC)J&2L4Xmfmox9LwF4R?d!^0TV zbMRbj~x4auz#gis7IL8b=dJ=-rq@{z8lah@*1@YG$wAV}ndp#K$N zErj_G^Ay~bp%8{-z2QkRu<#fj9o1vdgPaHd15;u;;CB~+;o+YKN>w%_GYLY(-uo~H z7zhdwA}qtDT(MGLN=h+(9fBlnHmKeT9>LsRxKGF3@XR*+|?3;heD)&m~2gFQ0oQ zTg2sFauX$;6L;&-VQB&~ZHHzp+G4}nZe6z`+h4=Fb-;iH7afqI$r=p<_Qz0Q2)cE@ z?>nR@$5g>&wCzrOc(jwk!Kc&p(UksUt5S;f7O%qyL6S^?U zdRphNkv8~ircM4@X^X#sbO2wo9IS-WA)WeI6M=Cp zov{1u#HwxwN(0NfyiVWNpek$Y=wr2p+L{PnG36hizlqvmJ7Gs!BWB-9O^zO=@^d7E@miJbY=_IFP<-02T z()%iRr|+)pPw%fBNFS&iOdqV=lfI{ND1AuR8ga~ElvC-{Gg@`b((J>x^p%6UMt{Oc zr=HcS#*B7e6LHaZLlb@Wy`>|oW_k)G3DJ*|{!Ym>N(RIrN(Sv|v^Z)XP0yfgNDQNF zn9F9xh}dyMOCPh3i_t5_EknL*>#Y+wxsTW%DScGzwEM-F7#9;aEu0aR?6N;50%CGg z$!@Vn>=ntIW~aqnVxPEM?BB>AK>pyS{5{AY>gHSbVXph_N&EiNNkHfR%t?EX{c-#L zRehD9au3SqaQbuh1Fcg?KPXb-usGbdS`XQCZ9V<4J)?Sd@~4qIgIzprpSB+s_lhGo z4PC3gjM4)rt=k9ehwak^9ivR$G}Dh%UDTXK&5r@|gqYr?0dLNUqiy{W?Ya5%$wnOG zV_oUW{Valo7xnjPUOIB)PscbvZ=%<`)!9E#H;})nq;EE*^DjN72YGJ4aV)<=S+8B{?;f%NLTATmFbMy^^hzqw7Rl^C{gA3)`;e zYV{SQt&8ZtuA{~eJyWcT+BFBI`e8&vxkWp7C6le@7Hcw7lfss%fFFGP!uj)OpY;1w z?XoSMVy)^&RhB056P4^0J0ptAMW?t>w*6>b7OQS1D+@ICj=Y_9m!zFR^-{K6Y-C*w zkP7(&`6b8BdOG%kW`VH)gadx?5!=No=FhEP z#NcmgmvVtkQ&T#OU?MnQM&xR%T1zJs@bon;V|ZY3J2j+C*Ydz*kP1*r7~?=hVNOt8 z&<(Z1!fNXU)RzJyR2I2IS#(Wn#Y6x#anES$t-iL_N>C5%%xs%&5Q5v^8sNQ#MEFM7 zGatb$ZJKq^vk(sn4ZDiWuhYUMA4Qul($*pIvYN+0S16#^v{3{mb`OUwQwV&%d|! zwabberan4-?6M*kE&;jtk-36hwXfIZsS_CLKM}OXJ_W<+43MqpW-`-rh;IW@h} zI5q4CoTa)g{cyJxR+*D~(BC)9b``m@+)KHDTa&p(KhUsc&GD^*EDAr?p)JnzOS@2X zTw7)qmhyQF8FcNcQlHs#tk%ofoSh0tI%Rp7 zq6unfo}Q=yqoF|P?SP+FM>0INYkTw5Fx4$=_B~tr%&jO{%wbl zBcOJFaU=+j&mcJO0Q3c|X}AD$2~7GC?Uw%B7n>$>hG&igvngH$=sk0;lPj5*t)|rs zG=t5Ms&84Ig&qNfpcnK)?LaF;Vo#XD5&`7GZSIQ^v{J-LAdWh(BaYGNB@-gZ`V6Q< zjn>OUeE)@~fdWDp6C!2(1rVZgR$cJItwFS>9S;d(rhT^4(%#skY?!VOM5BZPkJwuT`tpGh(HhtrTOy9^o<3_ z*E68l6@=@)j?Z<3Ym2^x6)H}{?1pK_59Mp}T2>00o4K2&Tl#=L3hJ3Lg8C7Cj}h0y z`U&Lr=#yMNW;7BVdg^puC&J?+Z3JhhXJ@8owg`#@YC{N2U~eG-FlmhFdXo@FrHv+# z6gkr)!C~YB&5&z>gbXj>fyLj15J67w<_sV>kjIh&(F#ic+( zEq(V`UwQ9>w=Y0Y?d1^7l3a<`&tB_JAJ3B(PzZ}xd{ua8kEG|$+>IFmB` zU`J2~gu7C=eWO@{ju3_>VqXUu6tHLf-gz(;V~!@DMovCM5ph+?@C{}eNb{3hJ z1{^n=yTY)BDk0BOQ$oK~LP7lk70grgNkr)=^XANKW)`AF__`}kQ5}hH$p~VsECU&q z84Vfr6dvrP2FyqaKs0EFk;e5nP+=bsV$>J_8$F^o2D^-O1BX+V9Hjwx6pAjT`bdrd z_lT?#K*zz+wIp-5dZ0Jw@#H*nVnJ_VcR!CW_ zL21yZlm-Ha7kK%fmdL>u7f z#OWbl+U8S`a-`k%WjeH#sZ}8ptsb4rL3HsWUA@!ZbO^kEI>QVNZ6MMBvW;a-cZS@u z8d7bDIO!amFCr@7&(L1fn&7hmh^G0I`V?{ z0yrz5S(wU5^LkP940#!Qv%J8X=2>kRP?VyYHN8b+1~EfuS(De-VDv$O0zdamIpBuT zJ^*5*7LX%koLwDQBke6P&;0o|1&z*hn0Aol9Y94JYeGf~?TR5sFnSPc#Y#VCT(vNY z&V%s5K~cL4U3|UtromLA=w*W@{ODX63QDe4sh`TDp}YrC&#qB=vkEUy$=l&L$TMG>h51c#`VlqC8)0^&%3y-(mfsyB9i$hNj145>;)qKv*# zs7pXyAd0~8UNM@MtIIi8hq9!DH}8kgHroKM2SH5eaTDMq6=WBD5hDb2s7&CQmf6Vx!IZ^5vx zD`o-7Utf_dq#r<)LMnozZ?OexRO@nyvZk|CNre?&kaku% zF9&5bZ3n#)`Y~wXaUj(Q5Nw}5ja*}L6MEf->9>(|0HZOEeHsa}wgikhzyyjo;S@0C z6s$1I)7^j@%o|`xBQ7vUs8kqmj1VkWfr&6EW2y%I^->%qeOE`onv}6hjK!R944zv8 zK^Brsc+T|nbP~!`6~dNIO0H7G2OClX&5>78*Ob3fd>|}p9bdx-+HXRMXfy4Is=G^Y!mtzgfbCtSZ^eHCrS<1e*-)} zN1VL-qcrhGK1dpZ>>83atKi0jvT_lHV2D(gGSxz^BMs%HQph;)S!{3 zxIt>UQ5)&hMm&O}p(rpFm4`5$AIv~uF0U})7+$OduIu*^|Jgs(In;?0sM zNZz7|PMBwsEOB}$(Nl7WkvxZ>lpG^!M*G5j;b_#9R71mW`4I}xZ>y3s$chW8HxVd0 zLxS8Ur09Ty!bKEd^+tosh*TSL84ZhmF@P35C1=F%SOV~Uv8MAue6zDoejIboZ?Of4 zmdGQ{81Vy0kDk*As)bT4CINLVV^!y&eQRZs5YYQqby62HAs3WQk_2jj`O0K7=mlC4 z54@dvJOh-r@JGm99he2m&A=l=f6pR-b6C*qVNtXV3s^^a5rC;{!gwSDA1WT?2y$+$ z6=x%pOW*;8%peaa^Pf&j^t0rTxh1R+)WRtR`O@dXPs(FO=QKCr_EVa$7hc)eP1qt> z=K#W>@^8CC-FZp&QP0dUm0+FlcDJcD_*7{?41i8TIH8ccW7U9EdP59C!O}`2UMMr- zj<$9(EQjDThuPm6L&>-p_TZaZ)7n_Ka)qJoguBawoW*H`EUh%@g-R2x-B5Ce#c%IN%Ban94}jL>db=Iv0%v>(VixndD=uTZT?8xk;VS93K% zR&WrCOieoSJ^-FA+Q#?=h(*}B#BW(4V&`tMntPxTNnR_ui=4do&aFDGI@PX|d4LV8 zH+7!lHd#XW{}cinHU;eDxj1?72&w#q0~TS4Rw+K zPCm8`HIS|QORRXV<50ER)=2VhnAS)>FJ9wN6aGcWE1dJVwrfLqUH>1rD8aJXx=W%l z{9?MJR?4!N7Wu~ryht`veJzz_hN#ReCAdT}2Y(#-x+PJgZ;@fG}@Agle` z@s!T>`v?x1cjVHE>W$HDYwlWu1hVdrG*SWrulRK&82!M8$s5@;3b_1vLvNb+n)q7q zlm=Yh!Kwj;>}j}K-~%N#B_HwhmPt+)DF3<##U0El&nwA*A3IcDj~Ls#0#&y_47lQU-`EO z3s)bA{_~gW?>z{;P6;7cVMf8}x&2aoOh3NaDAwf=`a;zzBPo$o>H!pk>wNUE#f7b| zX6n)wtUdev@Ix2tnGPUO$~b+V))L)hm;DY6_Xb6bk{6Nk4F_JjJi>K^mEv~LeG6^g zLfFux3E?cD=0QCHHw>v9jlJ7IxgBM;U?GXz2m-_OeIx)0O{O760asy=S2zF$bs~7qV`?#!g6i6QyYdC_$3|_HOkZMnY*YTE|i72 ztpvAIy%G~*r{g_WbAPKdH@WA6)sfDef76*WfKp&xiyec&7@7h2pvJ>JR-|o^?GI%z zIzKJvuwk~g81^Y+TKVlrGvO4KfE{bf?`tY9)dQw7hJ3wF7Z}C51HUX?(n$s^5u@@K zJV~jZqQ<=D+bH`H0yA_C&eSmga+HGc5Mr{s<9ZTi(RN^N#*Jj=z#eDlnWMcD3fLd=HoyGxV;DIMU`)xvycSgG#<=s7(XaYw) zeOT*Vt-()2Fr8P&okVi`!5>7~yHw6XdUpHqZYA7JE}QP$pDY=pA zSrD|if2DF-+->6%7dwvN(A^u z|32+4=uEO{gE0}y>!LF(Lb3zlH7dc;2*L7Y2o@6vYC{H>$VIkgk+|N<6Y9^%iJr3k)t9!#(i*u5Q7K;BO$G~ zH;Nl8^_3D3Pk5gL;RRdiCLFgJ>*^S5!!d=}44QS~ylnR=1~i6j;W+<(`e0!_cGu7T z`O*IpBT(_c|3ke!x9# ztK3_3?ju~}%Z|qo_o;av);B6e7EZptu$_j!fv*3Kzy_uUHHbVphxK!M zP=2mPEGL>mtN=oZ^I#?F1ReU)=Rz;65?h%D|8pP>t9dcwj_ zJlM>gUw8p|w1oqsurIa(q>S^l! zj!a(GD$qaZ9R}%?%=heN)PO3XV5gqw25F9=g zQY9wuu)PgKCf7YYbYZ=a@@J_3=Mj+sc0a;I)0loh~o*Xcu|;!MMRU#V56J6m0WPmfKYA%)6MES25jXp>G zZ=xa9eDDGINrsOiV1=8a z&0auSY}Xyh2(@i}KSF&!t&hghjK-c1UkT%;H9s%^9xa(tFmNv&+U^KrWxVEn?+ACchDU{P6l9h6;ojJ}<=J;v$AG-W65NP}dYhh*$)CP;p{(YF4|VfbqdFTp+j2c(5Jp)kPkP|E9@!|T1Cf9&ep zvv}I#Om6m&U47e&i?J(o>+y&ibK|(}wnnfnF}7-8lsLR*@n%2P(?^~ycfcKVhumRz z#NFYJx;x!5cie@4$(=0icK5h@^Fzy+LB4~lx7au-F*qOWeV`e|vyRXe6Lo*+CXuG+ zALC#EQE$MD7T~kOn@p&Tt^NSzccavh6YhRoQR-qs?1G0Z)=`h6)z{JD0Jo@P)Pvk+Qta-wAxr=S zFw3~+-UH7YW_L~ZkSY4%D#eK?co^~>?C)M&vzoYO-9vkD4|#E=yuVD>n^5{+)|!JZQh?XE*h}Zv zIwF$v2&OsY?iY8Vm*x$&rrKI(_rq%jUPKIe!-Uh`U}hTOD8dZFEK$rF(~BFVy%EiL z{X{d~u-Hd5;|&qbU`E0aq8oK*7HJ)O!N5u1&8x%=d~T+E$%_>T7y0a&!osbC-XNtg z&k$bcn)2%fTs-4)_@D<|hUx!R+O-#cS#ytjkyZ70XaBsfN9C75-3{Z^ahUB8kPY+H z?0vpol}#!}W=8%xQhpHpA8!vD=G=)HKZ2*5OXN^-kUu`7+(pEh@%Y)UyNj6$$$@&= zPA9Gv#iqQn5696`5?^M^#Sbq)U@YLG*0&bztWY+?U!(dqot~Hw>-0UFhTMANU0uF= z3$w|fd5xy&i3PG5j)LtfgJ#brf7>0bm)qtinaz$x;)q@6r&_@#=jVsdUO~6njoJ6! zdG-xI=WWy9Y5VYNzI?~+cYUu_;`0cvW1onJPWMg#T8yR-f?;d zZJgfeL#5Z++oy*X9;gC6$})-8+XXX0;DL=fe+{`iPQ+mDnQkzd={^C9(3&>dosi65 zgK=n+)dqJiUGi^(`9s|qQzv?ES9Oi`d1`$xB0pl&vSk(u{_B*t?B#4(Jx8;zyXE2n zo*yjL;kj}gKQa$X;4C@g<)6|3U!&+H9_9RE&2jk-GkUt_x_I@8XVZQ^7jG&spSpO_ zw-B9^pXGi}6t7b+JZ=q~tL5X49NA&@k>wofiuTxoY0x$ zJidAl+E0Bh;SrQm$9*{l98{O8n~Tgd$FIv6O^o|>+@HFB>~z-6EzZ|u1wH7UE!1>n z{p_I4JxUQf^~lbJ@`hJ>G7HCd@a*+EURC+gr(k-4Dd2&&ACfjdLB(6|uFd3Z*Ya-g zgjd*6Y}PO>6}YN4)qgT-)?oFsSa9rOVa1W6CS|Hub$iMS2#G z_V9QwHw)V=qi)-2LdFZ2RsTY1p5>F2Vr7!``mZB90&H>8NNBix-w%U4Zj8WbI0_Xq zuFo0!ksA7C*cgH$nLy15O6iK89ExKc2GENPJp?0c%%F4>s^@($`{%pN&*Ty4m$%tAGuj#_zjm(SXPpGqj_t;@zuZvTx;2vwmk-vpCuDPhPlisl!gFtMYjVF{o?frM3h# z+2@eECj7oFHkLo!$>M>kE%9FqRRB3Xu;doY&U6+wsk*P@n;ySU6_ejhX3L7jkwZw5 zb;4vW_x;f{O}wg1?A`z*>X-9krg5XQ~iN2 z@au*Ia}V$Xl*hph;`)BmK%CH{=FrE6?i*OJ%z=sU2{KNH@Hf!;qt@UoxKkUzjadTy OhY&_}>N}v%nEwk`&C$sK literal 0 HcmV?d00001 diff --git a/src/目标识别代码/models/common.py b/src/目标识别代码/models/common.py new file mode 100644 index 00000000..17e40e60 --- /dev/null +++ b/src/目标识别代码/models/common.py @@ -0,0 +1,771 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Common modules +""" + +import json +import math +import platform +import warnings +from collections import OrderedDict, namedtuple +from copy import copy +from pathlib import Path + +import cv2 +import numpy as np +import pandas as pd +import requests +import torch +import torch.nn as nn +from PIL import Image +from torch.cuda import amp + +from utils.dataloaders import exif_transpose, letterbox +from utils.general import (LOGGER, ROOT, check_requirements, check_suffix, check_version, colorstr, increment_path, + make_divisible, non_max_suppression, scale_coords, xywh2xyxy, xyxy2xywh, yaml_load) +from utils.plots import Annotator, colors, save_one_box +from utils.torch_utils import copy_attr, smart_inference_mode, time_sync + + +def autopad(k, p=None): # kernel, padding + # Pad to 'same' + if p is None: + p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad + return p + + +class Conv(nn.Module): + # Standard convolution + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups + super().__init__() + self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False) + self.bn = nn.BatchNorm2d(c2) + self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity()) + + def forward(self, x): + return self.act(self.bn(self.conv(x))) + + def forward_fuse(self, x): + return self.act(self.conv(x)) + + +class DWConv(Conv): + # Depth-wise convolution class + def __init__(self, c1, c2, k=1, s=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups + super().__init__(c1, c2, k, s, g=math.gcd(c1, c2), act=act) + + +class DWConvTranspose2d(nn.ConvTranspose2d): + # Depth-wise transpose convolution class + def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0): # ch_in, ch_out, kernel, stride, padding, padding_out + super().__init__(c1, c2, k, s, p1, p2, groups=math.gcd(c1, c2)) + + +class TransformerLayer(nn.Module): + # Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance) + def __init__(self, c, num_heads): + super().__init__() + self.q = nn.Linear(c, c, bias=False) + self.k = nn.Linear(c, c, bias=False) + self.v = nn.Linear(c, c, bias=False) + self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads) + self.fc1 = nn.Linear(c, c, bias=False) + self.fc2 = nn.Linear(c, c, bias=False) + + def forward(self, x): + x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x + x = self.fc2(self.fc1(x)) + x + return x + + +class TransformerBlock(nn.Module): + # Vision Transformer https://arxiv.org/abs/2010.11929 + def __init__(self, c1, c2, num_heads, num_layers): + super().__init__() + self.conv = None + if c1 != c2: + self.conv = Conv(c1, c2) + self.linear = nn.Linear(c2, c2) # learnable position embedding + self.tr = nn.Sequential(*(TransformerLayer(c2, num_heads) for _ in range(num_layers))) + self.c2 = c2 + + def forward(self, x): + if self.conv is not None: + x = self.conv(x) + b, _, w, h = x.shape + p = x.flatten(2).permute(2, 0, 1) + return self.tr(p + self.linear(p)).permute(1, 2, 0).reshape(b, self.c2, w, h) + + +class Bottleneck(nn.Module): + # Standard bottleneck + def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion + super().__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c_, c2, 3, 1, g=g) + self.add = shortcut and c1 == c2 + + def forward(self, x): + return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) + + +class BottleneckCSP(nn.Module): + # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False) + self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False) + self.cv4 = Conv(2 * c_, c2, 1, 1) + self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3) + self.act = nn.SiLU() + self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n))) + + def forward(self, x): + y1 = self.cv3(self.m(self.cv1(x))) + y2 = self.cv2(x) + return self.cv4(self.act(self.bn(torch.cat((y1, y2), 1)))) + + +class CrossConv(nn.Module): + # Cross Convolution Downsample + def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False): + # ch_in, ch_out, kernel, stride, groups, expansion, shortcut + super().__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, (1, k), (1, s)) + self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g) + self.add = shortcut and c1 == c2 + + def forward(self, x): + return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) + + +class C3(nn.Module): + # CSP Bottleneck with 3 convolutions + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c1, c_, 1, 1) + self.cv3 = Conv(2 * c_, c2, 1) # optional act=FReLU(c2) + self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n))) + + def forward(self, x): + return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), 1)) + + +class C3x(C3): + # C3 module with cross-convolutions + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2 * e) + self.m = nn.Sequential(*(CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n))) + + +class C3TR(C3): + # C3 module with TransformerBlock() + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2 * e) + self.m = TransformerBlock(c_, c_, 4, n) + + +class C3SPP(C3): + # C3 module with SPP() + def __init__(self, c1, c2, k=(5, 9, 13), n=1, shortcut=True, g=1, e=0.5): + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2 * e) + self.m = SPP(c_, c_, k) + + +class C3Ghost(C3): + # C3 module with GhostBottleneck() + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2 * e) # hidden channels + self.m = nn.Sequential(*(GhostBottleneck(c_, c_) for _ in range(n))) + + +class SPP(nn.Module): + # Spatial Pyramid Pooling (SPP) layer https://arxiv.org/abs/1406.4729 + def __init__(self, c1, c2, k=(5, 9, 13)): + super().__init__() + c_ = c1 // 2 # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1) + self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k]) + + def forward(self, x): + x = self.cv1(x) + with warnings.catch_warnings(): + warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning + return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1)) + + +class SPPF(nn.Module): + # Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher + def __init__(self, c1, c2, k=5): # equivalent to SPP(k=(5, 9, 13)) + super().__init__() + c_ = c1 // 2 # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c_ * 4, c2, 1, 1) + self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2) + + def forward(self, x): + x = self.cv1(x) + with warnings.catch_warnings(): + warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning + y1 = self.m(x) + y2 = self.m(y1) + return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1)) + + +class Focus(nn.Module): + # Focus wh information into c-space + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups + super().__init__() + self.conv = Conv(c1 * 4, c2, k, s, p, g, act) + # self.contract = Contract(gain=2) + + def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2) + return self.conv(torch.cat((x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]), 1)) + # return self.conv(self.contract(x)) + + +class GhostConv(nn.Module): + # Ghost Convolution https://github.com/huawei-noah/ghostnet + def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups + super().__init__() + c_ = c2 // 2 # hidden channels + self.cv1 = Conv(c1, c_, k, s, None, g, act) + self.cv2 = Conv(c_, c_, 5, 1, None, c_, act) + + def forward(self, x): + y = self.cv1(x) + return torch.cat((y, self.cv2(y)), 1) + + +class GhostBottleneck(nn.Module): + # Ghost Bottleneck https://github.com/huawei-noah/ghostnet + def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride + super().__init__() + c_ = c2 // 2 + self.conv = nn.Sequential( + GhostConv(c1, c_, 1, 1), # pw + DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw + GhostConv(c_, c2, 1, 1, act=False)) # pw-linear + self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False), Conv(c1, c2, 1, 1, + act=False)) if s == 2 else nn.Identity() + + def forward(self, x): + return self.conv(x) + self.shortcut(x) + + +class Contract(nn.Module): + # Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40) + def __init__(self, gain=2): + super().__init__() + self.gain = gain + + def forward(self, x): + b, c, h, w = x.size() # assert (h / s == 0) and (W / s == 0), 'Indivisible gain' + s = self.gain + x = x.view(b, c, h // s, s, w // s, s) # x(1,64,40,2,40,2) + x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40) + return x.view(b, c * s * s, h // s, w // s) # x(1,256,40,40) + + +class Expand(nn.Module): + # Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160) + def __init__(self, gain=2): + super().__init__() + self.gain = gain + + def forward(self, x): + b, c, h, w = x.size() # assert C / s ** 2 == 0, 'Indivisible gain' + s = self.gain + x = x.view(b, s, s, c // s ** 2, h, w) # x(1,2,2,16,80,80) + x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2) + return x.view(b, c // s ** 2, h * s, w * s) # x(1,16,160,160) + + +class Concat(nn.Module): + # Concatenate a list of tensors along dimension + def __init__(self, dimension=1): + super().__init__() + self.d = dimension + + def forward(self, x): + return torch.cat(x, self.d) + + +class DetectMultiBackend(nn.Module): + # YOLOv5 MultiBackend class for python inference on various backends + def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, data=None, fp16=False, fuse=True): + # Usage: + # PyTorch: weights = *.pt + # TorchScript: *.torchscript + # ONNX Runtime: *.onnx + # ONNX OpenCV DNN: *.onnx with --dnn + # OpenVINO: *.xml + # CoreML: *.mlmodel + # TensorRT: *.engine + # TensorFlow SavedModel: *_saved_model + # TensorFlow GraphDef: *.pb + # TensorFlow Lite: *.tflite + # TensorFlow Edge TPU: *_edgetpu.tflite + from models.experimental import attempt_download, attempt_load # scoped to avoid circular import + + super().__init__() + w = str(weights[0] if isinstance(weights, list) else weights) + pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs = self._model_type(w) # get backend + w = attempt_download(w) # download if not local + fp16 &= pt or jit or onnx or engine # FP16 + stride = 32 # default stride + + if pt: # PyTorch + model = attempt_load(weights if isinstance(weights, list) else w, device=device, inplace=True, fuse=fuse) + stride = max(int(model.stride.max()), 32) # model stride + names = model.module.names if hasattr(model, 'module') else model.names # get class names + model.half() if fp16 else model.float() + self.model = model # explicitly assign for to(), cpu(), cuda(), half() + elif jit: # TorchScript + LOGGER.info(f'Loading {w} for TorchScript inference...') + extra_files = {'config.txt': ''} # model metadata + model = torch.jit.load(w, _extra_files=extra_files) + model.half() if fp16 else model.float() + if extra_files['config.txt']: + d = json.loads(extra_files['config.txt']) # extra_files dict + stride, names = int(d['stride']), d['names'] + elif dnn: # ONNX OpenCV DNN + LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...') + check_requirements(('opencv-python>=4.5.4',)) + net = cv2.dnn.readNetFromONNX(w) + elif onnx: # ONNX Runtime + LOGGER.info(f'Loading {w} for ONNX Runtime inference...') + cuda = torch.cuda.is_available() and device.type != 'cpu' + check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime')) + import onnxruntime + providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider'] + session = onnxruntime.InferenceSession(w, providers=providers) + meta = session.get_modelmeta().custom_metadata_map # metadata + if 'stride' in meta: + stride, names = int(meta['stride']), eval(meta['names']) + elif xml: # OpenVINO + LOGGER.info(f'Loading {w} for OpenVINO inference...') + check_requirements(('openvino',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/ + from openvino.runtime import Core, Layout, get_batch + ie = Core() + if not Path(w).is_file(): # if not *.xml + w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir + network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin')) + if network.get_parameters()[0].get_layout().empty: + network.get_parameters()[0].set_layout(Layout("NCHW")) + batch_dim = get_batch(network) + if batch_dim.is_static: + batch_size = batch_dim.get_length() + executable_network = ie.compile_model(network, device_name="CPU") # device_name="MYRIAD" for Intel NCS2 + output_layer = next(iter(executable_network.outputs)) + meta = Path(w).with_suffix('.yaml') + if meta.exists(): + stride, names = self._load_metadata(meta) # load metadata + elif engine: # TensorRT + LOGGER.info(f'Loading {w} for TensorRT inference...') + import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download + check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0 + if device.type == 'cpu': + device = torch.device('cuda:0') + Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr')) + logger = trt.Logger(trt.Logger.INFO) + with open(w, 'rb') as f, trt.Runtime(logger) as runtime: + model = runtime.deserialize_cuda_engine(f.read()) + context = model.create_execution_context() + bindings = OrderedDict() + fp16 = False # default updated below + dynamic = False + for index in range(model.num_bindings): + name = model.get_binding_name(index) + dtype = trt.nptype(model.get_binding_dtype(index)) + if model.binding_is_input(index): + if -1 in tuple(model.get_binding_shape(index)): # dynamic + dynamic = True + context.set_binding_shape(index, tuple(model.get_profile_shape(0, index)[2])) + if dtype == np.float16: + fp16 = True + shape = tuple(context.get_binding_shape(index)) + im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device) + bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr())) + binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items()) + batch_size = bindings['images'].shape[0] # if dynamic, this is instead max batch size + elif coreml: # CoreML + LOGGER.info(f'Loading {w} for CoreML inference...') + import coremltools as ct + model = ct.models.MLModel(w) + else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU) + if saved_model: # SavedModel + LOGGER.info(f'Loading {w} for TensorFlow SavedModel inference...') + import tensorflow as tf + keras = False # assume TF1 saved_model + model = tf.keras.models.load_model(w) if keras else tf.saved_model.load(w) + elif pb: # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt + LOGGER.info(f'Loading {w} for TensorFlow GraphDef inference...') + import tensorflow as tf + + def wrap_frozen_graph(gd, inputs, outputs): + x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=""), []) # wrapped + ge = x.graph.as_graph_element + return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs)) + + gd = tf.Graph().as_graph_def() # graph_def + with open(w, 'rb') as f: + gd.ParseFromString(f.read()) + frozen_func = wrap_frozen_graph(gd, inputs="x:0", outputs="Identity:0") + elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python + try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu + from tflite_runtime.interpreter import Interpreter, load_delegate + except ImportError: + import tensorflow as tf + Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate, + if edgetpu: # Edge TPU https://coral.ai/software/#edgetpu-runtime + LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...') + delegate = { + 'Linux': 'libedgetpu.so.1', + 'Darwin': 'libedgetpu.1.dylib', + 'Windows': 'edgetpu.dll'}[platform.system()] + interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)]) + else: # Lite + LOGGER.info(f'Loading {w} for TensorFlow Lite inference...') + interpreter = Interpreter(model_path=w) # load TFLite model + interpreter.allocate_tensors() # allocate + input_details = interpreter.get_input_details() # inputs + output_details = interpreter.get_output_details() # outputs + elif tfjs: + raise NotImplementedError('ERROR: YOLOv5 TF.js inference is not supported') + else: + raise NotImplementedError(f'ERROR: {w} is not a supported format') + + # class names + if 'names' not in locals(): + names = yaml_load(data)['names'] if data else [f'class{i}' for i in range(999)] + if names[0] == 'n01440764' and len(names) == 1000: # ImageNet + names = yaml_load(ROOT / 'data/ImageNet.yaml')['names'] # human-readable names + + self.__dict__.update(locals()) # assign all variables to self + + def forward(self, im, augment=False, visualize=False, val=False): + # YOLOv5 MultiBackend inference + b, ch, h, w = im.shape # batch, channel, height, width + if self.fp16 and im.dtype != torch.float16: + im = im.half() # to FP16 + + if self.pt: # PyTorch + y = self.model(im, augment=augment, visualize=visualize) if augment or visualize else self.model(im) + if isinstance(y, tuple): + y = y[0] + elif self.jit: # TorchScript + y = self.model(im)[0] + elif self.dnn: # ONNX OpenCV DNN + im = im.cpu().numpy() # torch to numpy + self.net.setInput(im) + y = self.net.forward() + elif self.onnx: # ONNX Runtime + im = im.cpu().numpy() # torch to numpy + y = self.session.run([self.session.get_outputs()[0].name], {self.session.get_inputs()[0].name: im})[0] + elif self.xml: # OpenVINO + im = im.cpu().numpy() # FP32 + y = self.executable_network([im])[self.output_layer] + elif self.engine: # TensorRT + if self.dynamic and im.shape != self.bindings['images'].shape: + i_in, i_out = (self.model.get_binding_index(x) for x in ('images', 'output')) + self.context.set_binding_shape(i_in, im.shape) # reshape if dynamic + self.bindings['images'] = self.bindings['images']._replace(shape=im.shape) + self.bindings['output'].data.resize_(tuple(self.context.get_binding_shape(i_out))) + s = self.bindings['images'].shape + assert im.shape == s, f"input size {im.shape} {'>' if self.dynamic else 'not equal to'} max model size {s}" + self.binding_addrs['images'] = int(im.data_ptr()) + self.context.execute_v2(list(self.binding_addrs.values())) + y = self.bindings['output'].data + elif self.coreml: # CoreML + im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) + im = Image.fromarray((im[0] * 255).astype('uint8')) + # im = im.resize((192, 320), Image.ANTIALIAS) + y = self.model.predict({'image': im}) # coordinates are xywh normalized + if 'confidence' in y: + box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels + conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float) + y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1) + else: + k = 'var_' + str(sorted(int(k.replace('var_', '')) for k in y)[-1]) # output key + y = y[k] # output + else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU) + im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) + if self.saved_model: # SavedModel + y = (self.model(im, training=False) if self.keras else self.model(im)).numpy() + elif self.pb: # GraphDef + y = self.frozen_func(x=self.tf.constant(im)).numpy() + else: # Lite or Edge TPU + input, output = self.input_details[0], self.output_details[0] + int8 = input['dtype'] == np.uint8 # is TFLite quantized uint8 model + if int8: + scale, zero_point = input['quantization'] + im = (im / scale + zero_point).astype(np.uint8) # de-scale + self.interpreter.set_tensor(input['index'], im) + self.interpreter.invoke() + y = self.interpreter.get_tensor(output['index']) + if int8: + scale, zero_point = output['quantization'] + y = (y.astype(np.float32) - zero_point) * scale # re-scale + y[..., :4] *= [w, h, w, h] # xywh normalized to pixels + + if isinstance(y, np.ndarray): + y = torch.tensor(y, device=self.device) + return (y, []) if val else y + + def warmup(self, imgsz=(1, 3, 640, 640)): + # Warmup model by running inference once + warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb + if any(warmup_types) and self.device.type != 'cpu': + im = torch.zeros(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input + for _ in range(2 if self.jit else 1): # + self.forward(im) # warmup + + @staticmethod + def _model_type(p='path/to/model.pt'): + # Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx + from export import export_formats + suffixes = list(export_formats().Suffix) + ['.xml'] # export suffixes + check_suffix(p, suffixes) # checks + p = Path(p).name # eliminate trailing separators + pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, xml2 = (s in p for s in suffixes) + xml |= xml2 # *_openvino_model or *.xml + tflite &= not edgetpu # *.tflite + return pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs + + @staticmethod + def _load_metadata(f='path/to/meta.yaml'): + # Load metadata from meta.yaml if it exists + d = yaml_load(f) + return d['stride'], d['names'] # assign stride, names + + +class AutoShape(nn.Module): + # YOLOv5 input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS + conf = 0.25 # NMS confidence threshold + iou = 0.45 # NMS IoU threshold + agnostic = False # NMS class-agnostic + multi_label = False # NMS multiple labels per box + classes = None # (optional list) filter by class, i.e. = [0, 15, 16] for COCO persons, cats and dogs + max_det = 1000 # maximum number of detections per image + amp = False # Automatic Mixed Precision (AMP) inference + + def __init__(self, model, verbose=True): + super().__init__() + if verbose: + LOGGER.info('Adding AutoShape... ') + copy_attr(self, model, include=('yaml', 'nc', 'hyp', 'names', 'stride', 'abc'), exclude=()) # copy attributes + self.dmb = isinstance(model, DetectMultiBackend) # DetectMultiBackend() instance + self.pt = not self.dmb or model.pt # PyTorch model + self.model = model.eval() + if self.pt: + m = self.model.model.model[-1] if self.dmb else self.model.model[-1] # Detect() + m.inplace = False # Detect.inplace=False for safe multithread inference + + def _apply(self, fn): + # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers + self = super()._apply(fn) + if self.pt: + m = self.model.model.model[-1] if self.dmb else self.model.model[-1] # Detect() + m.stride = fn(m.stride) + m.grid = list(map(fn, m.grid)) + if isinstance(m.anchor_grid, list): + m.anchor_grid = list(map(fn, m.anchor_grid)) + return self + + @smart_inference_mode() + def forward(self, imgs, size=640, augment=False, profile=False): + # Inference from various sources. For height=640, width=1280, RGB images example inputs are: + # file: imgs = 'data/images/zidane.jpg' # str or PosixPath + # URI: = 'https://ultralytics.com/images/zidane.jpg' + # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3) + # PIL: = Image.open('image.jpg') or ImageGrab.grab() # HWC x(640,1280,3) + # numpy: = np.zeros((640,1280,3)) # HWC + # torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values) + # multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images + + t = [time_sync()] + p = next(self.model.parameters()) if self.pt else torch.zeros(1, device=self.model.device) # for device, type + autocast = self.amp and (p.device.type != 'cpu') # Automatic Mixed Precision (AMP) inference + if isinstance(imgs, torch.Tensor): # torch + with amp.autocast(autocast): + return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference + + # Pre-process + n, imgs = (len(imgs), list(imgs)) if isinstance(imgs, (list, tuple)) else (1, [imgs]) # number, list of images + shape0, shape1, files = [], [], [] # image and inference shapes, filenames + for i, im in enumerate(imgs): + f = f'image{i}' # filename + if isinstance(im, (str, Path)): # filename or uri + im, f = Image.open(requests.get(im, stream=True).raw if str(im).startswith('http') else im), im + im = np.asarray(exif_transpose(im)) + elif isinstance(im, Image.Image): # PIL Image + im, f = np.asarray(exif_transpose(im)), getattr(im, 'filename', f) or f + files.append(Path(f).with_suffix('.jpg').name) + if im.shape[0] < 5: # image in CHW + im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1) + im = im[..., :3] if im.ndim == 3 else np.tile(im[..., None], 3) # enforce 3ch input + s = im.shape[:2] # HWC + shape0.append(s) # image shape + g = (size / max(s)) # gain + shape1.append([y * g for y in s]) + imgs[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update + shape1 = [make_divisible(x, self.stride) if self.pt else size for x in np.array(shape1).max(0)] # inf shape + x = [letterbox(im, shape1, auto=False)[0] for im in imgs] # pad + x = np.ascontiguousarray(np.array(x).transpose((0, 3, 1, 2))) # stack and BHWC to BCHW + x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32 + t.append(time_sync()) + + with amp.autocast(autocast): + # Inference + y = self.model(x, augment, profile) # forward + t.append(time_sync()) + + # Post-process + y = non_max_suppression(y if self.dmb else y[0], + self.conf, + self.iou, + self.classes, + self.agnostic, + self.multi_label, + max_det=self.max_det) # NMS + for i in range(n): + scale_coords(shape1, y[i][:, :4], shape0[i]) + + t.append(time_sync()) + return Detections(imgs, y, files, t, self.names, x.shape) + + +class Detections: + # YOLOv5 detections class for inference results + def __init__(self, imgs, pred, files, times=(0, 0, 0, 0), names=None, shape=None): + super().__init__() + d = pred[0].device # device + gn = [torch.tensor([*(im.shape[i] for i in [1, 0, 1, 0]), 1, 1], device=d) for im in imgs] # normalizations + self.imgs = imgs # list of images as numpy arrays + self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls) + self.names = names # class names + self.files = files # image filenames + self.times = times # profiling times + self.xyxy = pred # xyxy pixels + self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels + self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized + self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized + self.n = len(self.pred) # number of images (batch size) + self.t = tuple((times[i + 1] - times[i]) * 1000 / self.n for i in range(3)) # timestamps (ms) + self.s = shape # inference BCHW shape + + def display(self, pprint=False, show=False, save=False, crop=False, render=False, labels=True, save_dir=Path('')): + crops = [] + for i, (im, pred) in enumerate(zip(self.imgs, self.pred)): + s = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' # string + if pred.shape[0]: + for c in pred[:, -1].unique(): + n = (pred[:, -1] == c).sum() # detections per class + s += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string + if show or save or render or crop: + annotator = Annotator(im, example=str(self.names)) + for *box, conf, cls in reversed(pred): # xyxy, confidence, class + label = f'{self.names[int(cls)]} {conf:.2f}' + if crop: + file = save_dir / 'crops' / self.names[int(cls)] / self.files[i] if save else None + crops.append({ + 'box': box, + 'conf': conf, + 'cls': cls, + 'label': label, + 'im': save_one_box(box, im, file=file, save=save)}) + else: # all others + annotator.box_label(box, label if labels else '', color=colors(cls)) + im = annotator.im + else: + s += '(no detections)' + + im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np + if pprint: + print(s.rstrip(', ')) + if show: + im.show(self.files[i]) # show + if save: + f = self.files[i] + im.save(save_dir / f) # save + if i == self.n - 1: + LOGGER.info(f"Saved {self.n} image{'s' * (self.n > 1)} to {colorstr('bold', save_dir)}") + if render: + self.imgs[i] = np.asarray(im) + if crop: + if save: + LOGGER.info(f'Saved results to {save_dir}\n') + return crops + + def print(self): + self.display(pprint=True) # print results + print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' % self.t) + + def show(self, labels=True): + self.display(show=True, labels=labels) # show results + + def save(self, labels=True, save_dir='runs/detect/exp'): + save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) # increment save_dir + self.display(save=True, labels=labels, save_dir=save_dir) # save results + + def crop(self, save=True, save_dir='runs/detect/exp'): + save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) if save else None + return self.display(crop=True, save=save, save_dir=save_dir) # crop results + + def render(self, labels=True): + self.display(render=True, labels=labels) # render results + return self.imgs + + def pandas(self): + # return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0]) + new = copy(self) # return copy + ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name' # xyxy columns + cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name' # xywh columns + for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]): + a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)] # update + setattr(new, k, [pd.DataFrame(x, columns=c) for x in a]) + return new + + def tolist(self): + # return a list of Detections objects, i.e. 'for result in results.tolist():' + r = range(self.n) # iterable + x = [Detections([self.imgs[i]], [self.pred[i]], [self.files[i]], self.times, self.names, self.s) for i in r] + # for d in x: + # for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']: + # setattr(d, k, getattr(d, k)[0]) # pop out of list + return x + + def __len__(self): + return self.n # override len(results) + + def __str__(self): + self.print() # override print(results) + return '' + + +class Classify(nn.Module): + # Classification head, i.e. x(b,c1,20,20) to x(b,c2) + def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups + super().__init__() + c_ = 1280 # efficientnet_b0 size + self.conv = Conv(c1, c_, k, s, autopad(k, p), g) + self.pool = nn.AdaptiveAvgPool2d(1) # to x(b,c_,1,1) + self.drop = nn.Dropout(p=0.0, inplace=True) + self.linear = nn.Linear(c_, c2) # to x(b,c2) + + def forward(self, x): + if isinstance(x, list): + x = torch.cat(x, 1) + return self.linear(self.drop(self.pool(self.conv(x)).flatten(1))) diff --git a/src/目标识别代码/models/experimental.py b/src/目标识别代码/models/experimental.py new file mode 100644 index 00000000..cb32d01b --- /dev/null +++ b/src/目标识别代码/models/experimental.py @@ -0,0 +1,107 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Experimental modules +""" +import math + +import numpy as np +import torch +import torch.nn as nn + +from models.common import Conv +from utils.downloads import attempt_download + + +class Sum(nn.Module): + # Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070 + def __init__(self, n, weight=False): # n: number of inputs + super().__init__() + self.weight = weight # apply weights boolean + self.iter = range(n - 1) # iter object + if weight: + self.w = nn.Parameter(-torch.arange(1.0, n) / 2, requires_grad=True) # layer weights + + def forward(self, x): + y = x[0] # no weight + if self.weight: + w = torch.sigmoid(self.w) * 2 + for i in self.iter: + y = y + x[i + 1] * w[i] + else: + for i in self.iter: + y = y + x[i + 1] + return y + + +class MixConv2d(nn.Module): + # Mixed Depth-wise Conv https://arxiv.org/abs/1907.09595 + def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): # ch_in, ch_out, kernel, stride, ch_strategy + super().__init__() + n = len(k) # number of convolutions + if equal_ch: # equal c_ per group + i = torch.linspace(0, n - 1E-6, c2).floor() # c2 indices + c_ = [(i == g).sum() for g in range(n)] # intermediate channels + else: # equal weight.numel() per group + b = [c2] + [0] * n + a = np.eye(n + 1, n, k=-1) + a -= np.roll(a, 1, axis=1) + a *= np.array(k) ** 2 + a[0] = 1 + c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b + + self.m = nn.ModuleList([ + nn.Conv2d(c1, int(c_), k, s, k // 2, groups=math.gcd(c1, int(c_)), bias=False) for k, c_ in zip(k, c_)]) + self.bn = nn.BatchNorm2d(c2) + self.act = nn.SiLU() + + def forward(self, x): + return self.act(self.bn(torch.cat([m(x) for m in self.m], 1))) + + +class Ensemble(nn.ModuleList): + # Ensemble of models + def __init__(self): + super().__init__() + + def forward(self, x, augment=False, profile=False, visualize=False): + y = [module(x, augment, profile, visualize)[0] for module in self] + # y = torch.stack(y).max(0)[0] # max ensemble + # y = torch.stack(y).mean(0) # mean ensemble + y = torch.cat(y, 1) # nms ensemble + return y, None # inference, train output + + +def attempt_load(weights, device=None, inplace=True, fuse=True): + # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a + from models.yolo import Detect, Model + + model = Ensemble() + for w in weights if isinstance(weights, list) else [weights]: + ckpt = torch.load(attempt_download(w), map_location='cpu') # load + ckpt = (ckpt.get('ema') or ckpt['model']).to(device).float() # FP32 model + if not hasattr(ckpt, 'stride'): + ckpt.stride = torch.tensor([32.]) # compatibility update for ResNet etc. + model.append(ckpt.fuse().eval() if fuse and hasattr(ckpt, 'fuse') else ckpt.eval()) # model in eval mode + + # Compatibility updates + for m in model.modules(): + t = type(m) + if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model): + m.inplace = inplace # torch 1.7.0 compatibility + if t is Detect and not isinstance(m.anchor_grid, list): + delattr(m, 'anchor_grid') + setattr(m, 'anchor_grid', [torch.zeros(1)] * m.nl) + elif t is nn.Upsample and not hasattr(m, 'recompute_scale_factor'): + m.recompute_scale_factor = None # torch 1.11.0 compatibility + + # Return model + if len(model) == 1: + return model[-1] + + # Return detection ensemble + print(f'Ensemble created with {weights}\n') + for k in 'names', 'nc', 'yaml': + setattr(model, k, getattr(model[0], k)) + model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride # max stride + assert all(model[0].nc == m.nc for m in model), f'Models have different class counts: {[m.nc for m in model]}' + return model diff --git a/src/目标识别代码/models/hub/anchors.yaml b/src/目标识别代码/models/hub/anchors.yaml new file mode 100644 index 00000000..e4d7beb0 --- /dev/null +++ b/src/目标识别代码/models/hub/anchors.yaml @@ -0,0 +1,59 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# Default anchors for COCO data + + +# P5 ------------------------------------------------------------------------------------------------------------------- +# P5-640: +anchors_p5_640: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + + +# P6 ------------------------------------------------------------------------------------------------------------------- +# P6-640: thr=0.25: 0.9964 BPR, 5.54 anchors past thr, n=12, img_size=640, metric_all=0.281/0.716-mean/best, past_thr=0.469-mean: 9,11, 21,19, 17,41, 43,32, 39,70, 86,64, 65,131, 134,130, 120,265, 282,180, 247,354, 512,387 +anchors_p6_640: + - [9,11, 21,19, 17,41] # P3/8 + - [43,32, 39,70, 86,64] # P4/16 + - [65,131, 134,130, 120,265] # P5/32 + - [282,180, 247,354, 512,387] # P6/64 + +# P6-1280: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1280, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 19,27, 44,40, 38,94, 96,68, 86,152, 180,137, 140,301, 303,264, 238,542, 436,615, 739,380, 925,792 +anchors_p6_1280: + - [19,27, 44,40, 38,94] # P3/8 + - [96,68, 86,152, 180,137] # P4/16 + - [140,301, 303,264, 238,542] # P5/32 + - [436,615, 739,380, 925,792] # P6/64 + +# P6-1920: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1920, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 28,41, 67,59, 57,141, 144,103, 129,227, 270,205, 209,452, 455,396, 358,812, 653,922, 1109,570, 1387,1187 +anchors_p6_1920: + - [28,41, 67,59, 57,141] # P3/8 + - [144,103, 129,227, 270,205] # P4/16 + - [209,452, 455,396, 358,812] # P5/32 + - [653,922, 1109,570, 1387,1187] # P6/64 + + +# P7 ------------------------------------------------------------------------------------------------------------------- +# P7-640: thr=0.25: 0.9962 BPR, 6.76 anchors past thr, n=15, img_size=640, metric_all=0.275/0.733-mean/best, past_thr=0.466-mean: 11,11, 13,30, 29,20, 30,46, 61,38, 39,92, 78,80, 146,66, 79,163, 149,150, 321,143, 157,303, 257,402, 359,290, 524,372 +anchors_p7_640: + - [11,11, 13,30, 29,20] # P3/8 + - [30,46, 61,38, 39,92] # P4/16 + - [78,80, 146,66, 79,163] # P5/32 + - [149,150, 321,143, 157,303] # P6/64 + - [257,402, 359,290, 524,372] # P7/128 + +# P7-1280: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1280, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 19,22, 54,36, 32,77, 70,83, 138,71, 75,173, 165,159, 148,334, 375,151, 334,317, 251,626, 499,474, 750,326, 534,814, 1079,818 +anchors_p7_1280: + - [19,22, 54,36, 32,77] # P3/8 + - [70,83, 138,71, 75,173] # P4/16 + - [165,159, 148,334, 375,151] # P5/32 + - [334,317, 251,626, 499,474] # P6/64 + - [750,326, 534,814, 1079,818] # P7/128 + +# P7-1920: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1920, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 29,34, 81,55, 47,115, 105,124, 207,107, 113,259, 247,238, 222,500, 563,227, 501,476, 376,939, 749,711, 1126,489, 801,1222, 1618,1227 +anchors_p7_1920: + - [29,34, 81,55, 47,115] # P3/8 + - [105,124, 207,107, 113,259] # P4/16 + - [247,238, 222,500, 563,227] # P5/32 + - [501,476, 376,939, 749,711] # P6/64 + - [1126,489, 801,1222, 1618,1227] # P7/128 diff --git a/src/目标识别代码/models/hub/yolov3-spp.yaml b/src/目标识别代码/models/hub/yolov3-spp.yaml new file mode 100644 index 00000000..c6698215 --- /dev/null +++ b/src/目标识别代码/models/hub/yolov3-spp.yaml @@ -0,0 +1,51 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# darknet53 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [32, 3, 1]], # 0 + [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 + [-1, 1, Bottleneck, [64]], + [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 + [-1, 2, Bottleneck, [128]], + [-1, 1, Conv, [256, 3, 2]], # 5-P3/8 + [-1, 8, Bottleneck, [256]], + [-1, 1, Conv, [512, 3, 2]], # 7-P4/16 + [-1, 8, Bottleneck, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32 + [-1, 4, Bottleneck, [1024]], # 10 + ] + +# YOLOv3-SPP head +head: + [[-1, 1, Bottleneck, [1024, False]], + [-1, 1, SPP, [512, [5, 9, 13]]], + [-1, 1, Conv, [1024, 3, 1]], + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large) + + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P4 + [-1, 1, Bottleneck, [512, False]], + [-1, 1, Bottleneck, [512, False]], + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium) + + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P3 + [-1, 1, Bottleneck, [256, False]], + [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small) + + [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/src/目标识别代码/models/hub/yolov3-tiny.yaml b/src/目标识别代码/models/hub/yolov3-tiny.yaml new file mode 100644 index 00000000..b28b4431 --- /dev/null +++ b/src/目标识别代码/models/hub/yolov3-tiny.yaml @@ -0,0 +1,41 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple +anchors: + - [10,14, 23,27, 37,58] # P4/16 + - [81,82, 135,169, 344,319] # P5/32 + +# YOLOv3-tiny backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [16, 3, 1]], # 0 + [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 1-P1/2 + [-1, 1, Conv, [32, 3, 1]], + [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 3-P2/4 + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 5-P3/8 + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 7-P4/16 + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 9-P5/32 + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, nn.ZeroPad2d, [[0, 1, 0, 1]]], # 11 + [-1, 1, nn.MaxPool2d, [2, 1, 0]], # 12 + ] + +# YOLOv3-tiny head +head: + [[-1, 1, Conv, [1024, 3, 1]], + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [512, 3, 1]], # 15 (P5/32-large) + + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P4 + [-1, 1, Conv, [256, 3, 1]], # 19 (P4/16-medium) + + [[19, 15], 1, Detect, [nc, anchors]], # Detect(P4, P5) + ] diff --git a/src/目标识别代码/models/hub/yolov3.yaml b/src/目标识别代码/models/hub/yolov3.yaml new file mode 100644 index 00000000..d1ef9129 --- /dev/null +++ b/src/目标识别代码/models/hub/yolov3.yaml @@ -0,0 +1,51 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# darknet53 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [32, 3, 1]], # 0 + [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 + [-1, 1, Bottleneck, [64]], + [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 + [-1, 2, Bottleneck, [128]], + [-1, 1, Conv, [256, 3, 2]], # 5-P3/8 + [-1, 8, Bottleneck, [256]], + [-1, 1, Conv, [512, 3, 2]], # 7-P4/16 + [-1, 8, Bottleneck, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32 + [-1, 4, Bottleneck, [1024]], # 10 + ] + +# YOLOv3 head +head: + [[-1, 1, Bottleneck, [1024, False]], + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, Conv, [1024, 3, 1]], + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large) + + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P4 + [-1, 1, Bottleneck, [512, False]], + [-1, 1, Bottleneck, [512, False]], + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium) + + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P3 + [-1, 1, Bottleneck, [256, False]], + [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small) + + [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/src/目标识别代码/models/hub/yolov5-bifpn.yaml b/src/目标识别代码/models/hub/yolov5-bifpn.yaml new file mode 100644 index 00000000..504815f5 --- /dev/null +++ b/src/目标识别代码/models/hub/yolov5-bifpn.yaml @@ -0,0 +1,48 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 + ] + +# YOLOv5 v6.0 BiFPN head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14, 6], 1, Concat, [1]], # cat P4 <--- BiFPN change + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/src/目标识别代码/models/hub/yolov5-fpn.yaml b/src/目标识别代码/models/hub/yolov5-fpn.yaml new file mode 100644 index 00000000..a23e9c6f --- /dev/null +++ b/src/目标识别代码/models/hub/yolov5-fpn.yaml @@ -0,0 +1,42 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 + ] + +# YOLOv5 v6.0 FPN head +head: + [[-1, 3, C3, [1024, False]], # 10 (P5/32-large) + + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 1, Conv, [512, 1, 1]], + [-1, 3, C3, [512, False]], # 14 (P4/16-medium) + + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 1, Conv, [256, 1, 1]], + [-1, 3, C3, [256, False]], # 18 (P3/8-small) + + [[18, 14, 10], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/src/目标识别代码/models/hub/yolov5-p2.yaml b/src/目标识别代码/models/hub/yolov5-p2.yaml new file mode 100644 index 00000000..554117dd --- /dev/null +++ b/src/目标识别代码/models/hub/yolov5-p2.yaml @@ -0,0 +1,54 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple +anchors: 3 # AutoAnchor evolves 3 anchors per P output layer + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 + ] + +# YOLOv5 v6.0 head with (P2, P3, P4, P5) outputs +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 2], 1, Concat, [1]], # cat backbone P2 + [-1, 1, C3, [128, False]], # 21 (P2/4-xsmall) + + [-1, 1, Conv, [128, 3, 2]], + [[-1, 18], 1, Concat, [1]], # cat head P3 + [-1, 3, C3, [256, False]], # 24 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 27 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 30 (P5/32-large) + + [[21, 24, 27, 30], 1, Detect, [nc, anchors]], # Detect(P2, P3, P4, P5) + ] diff --git a/src/目标识别代码/models/hub/yolov5-p34.yaml b/src/目标识别代码/models/hub/yolov5-p34.yaml new file mode 100644 index 00000000..dbf0f850 --- /dev/null +++ b/src/目标识别代码/models/hub/yolov5-p34.yaml @@ -0,0 +1,41 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.50 # layer channel multiple +anchors: 3 # AutoAnchor evolves 3 anchors per P output layer + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [ [ -1, 1, Conv, [ 64, 6, 2, 2 ] ], # 0-P1/2 + [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 + [ -1, 3, C3, [ 128 ] ], + [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 + [ -1, 6, C3, [ 256 ] ], + [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 + [ -1, 9, C3, [ 512 ] ], + [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32 + [ -1, 3, C3, [ 1024 ] ], + [ -1, 1, SPPF, [ 1024, 5 ] ], # 9 + ] + +# YOLOv5 v6.0 head with (P3, P4) outputs +head: + [ [ -1, 1, Conv, [ 512, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 + [ -1, 3, C3, [ 512, False ] ], # 13 + + [ -1, 1, Conv, [ 256, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 + [ -1, 3, C3, [ 256, False ] ], # 17 (P3/8-small) + + [ -1, 1, Conv, [ 256, 3, 2 ] ], + [ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P4 + [ -1, 3, C3, [ 512, False ] ], # 20 (P4/16-medium) + + [ [ 17, 20 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4) + ] diff --git a/src/目标识别代码/models/hub/yolov5-p6.yaml b/src/目标识别代码/models/hub/yolov5-p6.yaml new file mode 100644 index 00000000..a17202f2 --- /dev/null +++ b/src/目标识别代码/models/hub/yolov5-p6.yaml @@ -0,0 +1,56 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple +anchors: 3 # AutoAnchor evolves 3 anchors per P output layer + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 + [-1, 3, C3, [768]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 11 + ] + +# YOLOv5 v6.0 head with (P3, P4, P5, P6) outputs +head: + [[-1, 1, Conv, [768, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P5 + [-1, 3, C3, [768, False]], # 15 + + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 19 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 23 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 20], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 26 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 16], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [768, False]], # 29 (P5/32-large) + + [-1, 1, Conv, [768, 3, 2]], + [[-1, 12], 1, Concat, [1]], # cat head P6 + [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) + + [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) + ] diff --git a/src/目标识别代码/models/hub/yolov5-p7.yaml b/src/目标识别代码/models/hub/yolov5-p7.yaml new file mode 100644 index 00000000..edd7d13a --- /dev/null +++ b/src/目标识别代码/models/hub/yolov5-p7.yaml @@ -0,0 +1,67 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple +anchors: 3 # AutoAnchor evolves 3 anchors per P output layer + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 + [-1, 3, C3, [768]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 + [-1, 3, C3, [1024]], + [-1, 1, Conv, [1280, 3, 2]], # 11-P7/128 + [-1, 3, C3, [1280]], + [-1, 1, SPPF, [1280, 5]], # 13 + ] + +# YOLOv5 v6.0 head with (P3, P4, P5, P6, P7) outputs +head: + [[-1, 1, Conv, [1024, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 10], 1, Concat, [1]], # cat backbone P6 + [-1, 3, C3, [1024, False]], # 17 + + [-1, 1, Conv, [768, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P5 + [-1, 3, C3, [768, False]], # 21 + + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 25 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 29 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 26], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 32 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 22], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [768, False]], # 35 (P5/32-large) + + [-1, 1, Conv, [768, 3, 2]], + [[-1, 18], 1, Concat, [1]], # cat head P6 + [-1, 3, C3, [1024, False]], # 38 (P6/64-xlarge) + + [-1, 1, Conv, [1024, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P7 + [-1, 3, C3, [1280, False]], # 41 (P7/128-xxlarge) + + [[29, 32, 35, 38, 41], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6, P7) + ] diff --git a/src/目标识别代码/models/hub/yolov5-panet.yaml b/src/目标识别代码/models/hub/yolov5-panet.yaml new file mode 100644 index 00000000..ccfbf900 --- /dev/null +++ b/src/目标识别代码/models/hub/yolov5-panet.yaml @@ -0,0 +1,48 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 + ] + +# YOLOv5 v6.0 PANet head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/src/目标识别代码/models/hub/yolov5l6.yaml b/src/目标识别代码/models/hub/yolov5l6.yaml new file mode 100644 index 00000000..632c2cb6 --- /dev/null +++ b/src/目标识别代码/models/hub/yolov5l6.yaml @@ -0,0 +1,60 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple +anchors: + - [19,27, 44,40, 38,94] # P3/8 + - [96,68, 86,152, 180,137] # P4/16 + - [140,301, 303,264, 238,542] # P5/32 + - [436,615, 739,380, 925,792] # P6/64 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 + [-1, 3, C3, [768]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 11 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [768, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P5 + [-1, 3, C3, [768, False]], # 15 + + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 19 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 23 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 20], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 26 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 16], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [768, False]], # 29 (P5/32-large) + + [-1, 1, Conv, [768, 3, 2]], + [[-1, 12], 1, Concat, [1]], # cat head P6 + [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) + + [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) + ] diff --git a/src/目标识别代码/models/hub/yolov5m6.yaml b/src/目标识别代码/models/hub/yolov5m6.yaml new file mode 100644 index 00000000..ecc53fd6 --- /dev/null +++ b/src/目标识别代码/models/hub/yolov5m6.yaml @@ -0,0 +1,60 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 0.67 # model depth multiple +width_multiple: 0.75 # layer channel multiple +anchors: + - [19,27, 44,40, 38,94] # P3/8 + - [96,68, 86,152, 180,137] # P4/16 + - [140,301, 303,264, 238,542] # P5/32 + - [436,615, 739,380, 925,792] # P6/64 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 + [-1, 3, C3, [768]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 11 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [768, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P5 + [-1, 3, C3, [768, False]], # 15 + + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 19 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 23 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 20], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 26 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 16], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [768, False]], # 29 (P5/32-large) + + [-1, 1, Conv, [768, 3, 2]], + [[-1, 12], 1, Concat, [1]], # cat head P6 + [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) + + [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) + ] diff --git a/src/目标识别代码/models/hub/yolov5n6.yaml b/src/目标识别代码/models/hub/yolov5n6.yaml new file mode 100644 index 00000000..0c0c71d3 --- /dev/null +++ b/src/目标识别代码/models/hub/yolov5n6.yaml @@ -0,0 +1,60 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.25 # layer channel multiple +anchors: + - [19,27, 44,40, 38,94] # P3/8 + - [96,68, 86,152, 180,137] # P4/16 + - [140,301, 303,264, 238,542] # P5/32 + - [436,615, 739,380, 925,792] # P6/64 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 + [-1, 3, C3, [768]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 11 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [768, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P5 + [-1, 3, C3, [768, False]], # 15 + + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 19 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 23 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 20], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 26 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 16], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [768, False]], # 29 (P5/32-large) + + [-1, 1, Conv, [768, 3, 2]], + [[-1, 12], 1, Concat, [1]], # cat head P6 + [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) + + [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) + ] diff --git a/src/目标识别代码/models/hub/yolov5s-ghost.yaml b/src/目标识别代码/models/hub/yolov5s-ghost.yaml new file mode 100644 index 00000000..ff9519c3 --- /dev/null +++ b/src/目标识别代码/models/hub/yolov5s-ghost.yaml @@ -0,0 +1,48 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.50 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, GhostConv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3Ghost, [128]], + [-1, 1, GhostConv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3Ghost, [256]], + [-1, 1, GhostConv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3Ghost, [512]], + [-1, 1, GhostConv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3Ghost, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, GhostConv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3Ghost, [512, False]], # 13 + + [-1, 1, GhostConv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3Ghost, [256, False]], # 17 (P3/8-small) + + [-1, 1, GhostConv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3Ghost, [512, False]], # 20 (P4/16-medium) + + [-1, 1, GhostConv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3Ghost, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/src/目标识别代码/models/hub/yolov5s-transformer.yaml b/src/目标识别代码/models/hub/yolov5s-transformer.yaml new file mode 100644 index 00000000..100d7c44 --- /dev/null +++ b/src/目标识别代码/models/hub/yolov5s-transformer.yaml @@ -0,0 +1,48 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.50 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3TR, [1024]], # 9 <--- C3TR() Transformer module + [-1, 1, SPPF, [1024, 5]], # 9 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/src/目标识别代码/models/hub/yolov5s6.yaml b/src/目标识别代码/models/hub/yolov5s6.yaml new file mode 100644 index 00000000..a28fb559 --- /dev/null +++ b/src/目标识别代码/models/hub/yolov5s6.yaml @@ -0,0 +1,60 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.50 # layer channel multiple +anchors: + - [19,27, 44,40, 38,94] # P3/8 + - [96,68, 86,152, 180,137] # P4/16 + - [140,301, 303,264, 238,542] # P5/32 + - [436,615, 739,380, 925,792] # P6/64 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 + [-1, 3, C3, [768]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 11 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [768, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P5 + [-1, 3, C3, [768, False]], # 15 + + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 19 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 23 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 20], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 26 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 16], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [768, False]], # 29 (P5/32-large) + + [-1, 1, Conv, [768, 3, 2]], + [[-1, 12], 1, Concat, [1]], # cat head P6 + [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) + + [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) + ] diff --git a/src/目标识别代码/models/hub/yolov5x6.yaml b/src/目标识别代码/models/hub/yolov5x6.yaml new file mode 100644 index 00000000..ba795c4a --- /dev/null +++ b/src/目标识别代码/models/hub/yolov5x6.yaml @@ -0,0 +1,60 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 1.33 # model depth multiple +width_multiple: 1.25 # layer channel multiple +anchors: + - [19,27, 44,40, 38,94] # P3/8 + - [96,68, 86,152, 180,137] # P4/16 + - [140,301, 303,264, 238,542] # P5/32 + - [436,615, 739,380, 925,792] # P6/64 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 + [-1, 3, C3, [768]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 11 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [768, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P5 + [-1, 3, C3, [768, False]], # 15 + + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 19 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 23 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 20], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 26 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 16], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [768, False]], # 29 (P5/32-large) + + [-1, 1, Conv, [768, 3, 2]], + [[-1, 12], 1, Concat, [1]], # cat head P6 + [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) + + [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) + ] diff --git a/src/目标识别代码/models/tf.py b/src/目标识别代码/models/tf.py new file mode 100644 index 00000000..b0d98cc2 --- /dev/null +++ b/src/目标识别代码/models/tf.py @@ -0,0 +1,574 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +TensorFlow, Keras and TFLite versions of YOLOv5 +Authored by https://github.com/zldrobit in PR https://github.com/ultralytics/yolov5/pull/1127 + +Usage: + $ python models/tf.py --weights yolov5s.pt + +Export: + $ python path/to/export.py --weights yolov5s.pt --include saved_model pb tflite tfjs +""" + +import argparse +import sys +from copy import deepcopy +from pathlib import Path + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +# ROOT = ROOT.relative_to(Path.cwd()) # relative + +import numpy as np +import tensorflow as tf +import torch +import torch.nn as nn +from tensorflow import keras + +from models.common import (C3, SPP, SPPF, Bottleneck, BottleneckCSP, C3x, Concat, Conv, CrossConv, DWConv, + DWConvTranspose2d, Focus, autopad) +from models.experimental import MixConv2d, attempt_load +from models.yolo import Detect +from utils.activations import SiLU +from utils.general import LOGGER, make_divisible, print_args + + +class TFBN(keras.layers.Layer): + # TensorFlow BatchNormalization wrapper + def __init__(self, w=None): + super().__init__() + self.bn = keras.layers.BatchNormalization( + beta_initializer=keras.initializers.Constant(w.bias.numpy()), + gamma_initializer=keras.initializers.Constant(w.weight.numpy()), + moving_mean_initializer=keras.initializers.Constant(w.running_mean.numpy()), + moving_variance_initializer=keras.initializers.Constant(w.running_var.numpy()), + epsilon=w.eps) + + def call(self, inputs): + return self.bn(inputs) + + +class TFPad(keras.layers.Layer): + # Pad inputs in spatial dimensions 1 and 2 + def __init__(self, pad): + super().__init__() + if isinstance(pad, int): + self.pad = tf.constant([[0, 0], [pad, pad], [pad, pad], [0, 0]]) + else: # tuple/list + self.pad = tf.constant([[0, 0], [pad[0], pad[0]], [pad[1], pad[1]], [0, 0]]) + + def call(self, inputs): + return tf.pad(inputs, self.pad, mode='constant', constant_values=0) + + +class TFConv(keras.layers.Layer): + # Standard convolution + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None): + # ch_in, ch_out, weights, kernel, stride, padding, groups + super().__init__() + assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument" + # TensorFlow convolution padding is inconsistent with PyTorch (e.g. k=3 s=2 'SAME' padding) + # see https://stackoverflow.com/questions/52975843/comparing-conv2d-with-padding-between-tensorflow-and-pytorch + conv = keras.layers.Conv2D( + filters=c2, + kernel_size=k, + strides=s, + padding='SAME' if s == 1 else 'VALID', + use_bias=not hasattr(w, 'bn'), + kernel_initializer=keras.initializers.Constant(w.conv.weight.permute(2, 3, 1, 0).numpy()), + bias_initializer='zeros' if hasattr(w, 'bn') else keras.initializers.Constant(w.conv.bias.numpy())) + self.conv = conv if s == 1 else keras.Sequential([TFPad(autopad(k, p)), conv]) + self.bn = TFBN(w.bn) if hasattr(w, 'bn') else tf.identity + self.act = activations(w.act) if act else tf.identity + + def call(self, inputs): + return self.act(self.bn(self.conv(inputs))) + + +class TFDWConv(keras.layers.Layer): + # Depthwise convolution + def __init__(self, c1, c2, k=1, s=1, p=None, act=True, w=None): + # ch_in, ch_out, weights, kernel, stride, padding, groups + super().__init__() + assert c2 % c1 == 0, f'TFDWConv() output={c2} must be a multiple of input={c1} channels' + conv = keras.layers.DepthwiseConv2D( + kernel_size=k, + depth_multiplier=c2 // c1, + strides=s, + padding='SAME' if s == 1 else 'VALID', + use_bias=not hasattr(w, 'bn'), + depthwise_initializer=keras.initializers.Constant(w.conv.weight.permute(2, 3, 1, 0).numpy()), + bias_initializer='zeros' if hasattr(w, 'bn') else keras.initializers.Constant(w.conv.bias.numpy())) + self.conv = conv if s == 1 else keras.Sequential([TFPad(autopad(k, p)), conv]) + self.bn = TFBN(w.bn) if hasattr(w, 'bn') else tf.identity + self.act = activations(w.act) if act else tf.identity + + def call(self, inputs): + return self.act(self.bn(self.conv(inputs))) + + +class TFDWConvTranspose2d(keras.layers.Layer): + # Depthwise ConvTranspose2d + def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0, w=None): + # ch_in, ch_out, weights, kernel, stride, padding, groups + super().__init__() + assert c1 == c2, f'TFDWConv() output={c2} must be equal to input={c1} channels' + assert k == 4 and p1 == 1, 'TFDWConv() only valid for k=4 and p1=1' + weight, bias = w.weight.permute(2, 3, 1, 0).numpy(), w.bias.numpy() + self.c1 = c1 + self.conv = [ + keras.layers.Conv2DTranspose(filters=1, + kernel_size=k, + strides=s, + padding='VALID', + output_padding=p2, + use_bias=True, + kernel_initializer=keras.initializers.Constant(weight[..., i:i + 1]), + bias_initializer=keras.initializers.Constant(bias[i])) for i in range(c1)] + + def call(self, inputs): + return tf.concat([m(x) for m, x in zip(self.conv, tf.split(inputs, self.c1, 3))], 3)[:, 1:-1, 1:-1] + + +class TFFocus(keras.layers.Layer): + # Focus wh information into c-space + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None): + # ch_in, ch_out, kernel, stride, padding, groups + super().__init__() + self.conv = TFConv(c1 * 4, c2, k, s, p, g, act, w.conv) + + def call(self, inputs): # x(b,w,h,c) -> y(b,w/2,h/2,4c) + # inputs = inputs / 255 # normalize 0-255 to 0-1 + inputs = [inputs[:, ::2, ::2, :], inputs[:, 1::2, ::2, :], inputs[:, ::2, 1::2, :], inputs[:, 1::2, 1::2, :]] + return self.conv(tf.concat(inputs, 3)) + + +class TFBottleneck(keras.layers.Layer): + # Standard bottleneck + def __init__(self, c1, c2, shortcut=True, g=1, e=0.5, w=None): # ch_in, ch_out, shortcut, groups, expansion + super().__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) + self.cv2 = TFConv(c_, c2, 3, 1, g=g, w=w.cv2) + self.add = shortcut and c1 == c2 + + def call(self, inputs): + return inputs + self.cv2(self.cv1(inputs)) if self.add else self.cv2(self.cv1(inputs)) + + +class TFCrossConv(keras.layers.Layer): + # Cross Convolution + def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False, w=None): + super().__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = TFConv(c1, c_, (1, k), (1, s), w=w.cv1) + self.cv2 = TFConv(c_, c2, (k, 1), (s, 1), g=g, w=w.cv2) + self.add = shortcut and c1 == c2 + + def call(self, inputs): + return inputs + self.cv2(self.cv1(inputs)) if self.add else self.cv2(self.cv1(inputs)) + + +class TFConv2d(keras.layers.Layer): + # Substitution for PyTorch nn.Conv2D + def __init__(self, c1, c2, k, s=1, g=1, bias=True, w=None): + super().__init__() + assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument" + self.conv = keras.layers.Conv2D(filters=c2, + kernel_size=k, + strides=s, + padding='VALID', + use_bias=bias, + kernel_initializer=keras.initializers.Constant( + w.weight.permute(2, 3, 1, 0).numpy()), + bias_initializer=keras.initializers.Constant(w.bias.numpy()) if bias else None) + + def call(self, inputs): + return self.conv(inputs) + + +class TFBottleneckCSP(keras.layers.Layer): + # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None): + # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) + self.cv2 = TFConv2d(c1, c_, 1, 1, bias=False, w=w.cv2) + self.cv3 = TFConv2d(c_, c_, 1, 1, bias=False, w=w.cv3) + self.cv4 = TFConv(2 * c_, c2, 1, 1, w=w.cv4) + self.bn = TFBN(w.bn) + self.act = lambda x: keras.activations.swish(x) + self.m = keras.Sequential([TFBottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j]) for j in range(n)]) + + def call(self, inputs): + y1 = self.cv3(self.m(self.cv1(inputs))) + y2 = self.cv2(inputs) + return self.cv4(self.act(self.bn(tf.concat((y1, y2), axis=3)))) + + +class TFC3(keras.layers.Layer): + # CSP Bottleneck with 3 convolutions + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None): + # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) + self.cv2 = TFConv(c1, c_, 1, 1, w=w.cv2) + self.cv3 = TFConv(2 * c_, c2, 1, 1, w=w.cv3) + self.m = keras.Sequential([TFBottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j]) for j in range(n)]) + + def call(self, inputs): + return self.cv3(tf.concat((self.m(self.cv1(inputs)), self.cv2(inputs)), axis=3)) + + +class TFC3x(keras.layers.Layer): + # 3 module with cross-convolutions + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None): + # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) + self.cv2 = TFConv(c1, c_, 1, 1, w=w.cv2) + self.cv3 = TFConv(2 * c_, c2, 1, 1, w=w.cv3) + self.m = keras.Sequential([ + TFCrossConv(c_, c_, k=3, s=1, g=g, e=1.0, shortcut=shortcut, w=w.m[j]) for j in range(n)]) + + def call(self, inputs): + return self.cv3(tf.concat((self.m(self.cv1(inputs)), self.cv2(inputs)), axis=3)) + + +class TFSPP(keras.layers.Layer): + # Spatial pyramid pooling layer used in YOLOv3-SPP + def __init__(self, c1, c2, k=(5, 9, 13), w=None): + super().__init__() + c_ = c1 // 2 # hidden channels + self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) + self.cv2 = TFConv(c_ * (len(k) + 1), c2, 1, 1, w=w.cv2) + self.m = [keras.layers.MaxPool2D(pool_size=x, strides=1, padding='SAME') for x in k] + + def call(self, inputs): + x = self.cv1(inputs) + return self.cv2(tf.concat([x] + [m(x) for m in self.m], 3)) + + +class TFSPPF(keras.layers.Layer): + # Spatial pyramid pooling-Fast layer + def __init__(self, c1, c2, k=5, w=None): + super().__init__() + c_ = c1 // 2 # hidden channels + self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) + self.cv2 = TFConv(c_ * 4, c2, 1, 1, w=w.cv2) + self.m = keras.layers.MaxPool2D(pool_size=k, strides=1, padding='SAME') + + def call(self, inputs): + x = self.cv1(inputs) + y1 = self.m(x) + y2 = self.m(y1) + return self.cv2(tf.concat([x, y1, y2, self.m(y2)], 3)) + + +class TFDetect(keras.layers.Layer): + # TF YOLOv5 Detect layer + def __init__(self, nc=80, anchors=(), ch=(), imgsz=(640, 640), w=None): # detection layer + super().__init__() + self.stride = tf.convert_to_tensor(w.stride.numpy(), dtype=tf.float32) + self.nc = nc # number of classes + self.no = nc + 5 # number of outputs per anchor + self.nl = len(anchors) # number of detection layers + self.na = len(anchors[0]) // 2 # number of anchors + self.grid = [tf.zeros(1)] * self.nl # init grid + self.anchors = tf.convert_to_tensor(w.anchors.numpy(), dtype=tf.float32) + self.anchor_grid = tf.reshape(self.anchors * tf.reshape(self.stride, [self.nl, 1, 1]), [self.nl, 1, -1, 1, 2]) + self.m = [TFConv2d(x, self.no * self.na, 1, w=w.m[i]) for i, x in enumerate(ch)] + self.training = False # set to False after building model + self.imgsz = imgsz + for i in range(self.nl): + ny, nx = self.imgsz[0] // self.stride[i], self.imgsz[1] // self.stride[i] + self.grid[i] = self._make_grid(nx, ny) + + def call(self, inputs): + z = [] # inference output + x = [] + for i in range(self.nl): + x.append(self.m[i](inputs[i])) + # x(bs,20,20,255) to x(bs,3,20,20,85) + ny, nx = self.imgsz[0] // self.stride[i], self.imgsz[1] // self.stride[i] + x[i] = tf.reshape(x[i], [-1, ny * nx, self.na, self.no]) + + if not self.training: # inference + y = tf.sigmoid(x[i]) + grid = tf.transpose(self.grid[i], [0, 2, 1, 3]) - 0.5 + anchor_grid = tf.transpose(self.anchor_grid[i], [0, 2, 1, 3]) * 4 + xy = (y[..., 0:2] * 2 + grid) * self.stride[i] # xy + wh = y[..., 2:4] ** 2 * anchor_grid + # Normalize xywh to 0-1 to reduce calibration error + xy /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32) + wh /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32) + y = tf.concat([xy, wh, y[..., 4:]], -1) + z.append(tf.reshape(y, [-1, self.na * ny * nx, self.no])) + + return tf.transpose(x, [0, 2, 1, 3]) if self.training else (tf.concat(z, 1), x) + + @staticmethod + def _make_grid(nx=20, ny=20): + # yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)]) + # return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float() + xv, yv = tf.meshgrid(tf.range(nx), tf.range(ny)) + return tf.cast(tf.reshape(tf.stack([xv, yv], 2), [1, 1, ny * nx, 2]), dtype=tf.float32) + + +class TFUpsample(keras.layers.Layer): + # TF version of torch.nn.Upsample() + def __init__(self, size, scale_factor, mode, w=None): # warning: all arguments needed including 'w' + super().__init__() + assert scale_factor == 2, "scale_factor must be 2" + self.upsample = lambda x: tf.image.resize(x, (x.shape[1] * 2, x.shape[2] * 2), method=mode) + # self.upsample = keras.layers.UpSampling2D(size=scale_factor, interpolation=mode) + # with default arguments: align_corners=False, half_pixel_centers=False + # self.upsample = lambda x: tf.raw_ops.ResizeNearestNeighbor(images=x, + # size=(x.shape[1] * 2, x.shape[2] * 2)) + + def call(self, inputs): + return self.upsample(inputs) + + +class TFConcat(keras.layers.Layer): + # TF version of torch.concat() + def __init__(self, dimension=1, w=None): + super().__init__() + assert dimension == 1, "convert only NCHW to NHWC concat" + self.d = 3 + + def call(self, inputs): + return tf.concat(inputs, self.d) + + +def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3) + LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}") + anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'] + na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors + no = na * (nc + 5) # number of outputs = anchors * (classes + 5) + + layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out + for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args + m_str = m + m = eval(m) if isinstance(m, str) else m # eval strings + for j, a in enumerate(args): + try: + args[j] = eval(a) if isinstance(a, str) else a # eval strings + except NameError: + pass + + n = max(round(n * gd), 1) if n > 1 else n # depth gain + if m in [ + nn.Conv2d, Conv, DWConv, DWConvTranspose2d, Bottleneck, SPP, SPPF, MixConv2d, Focus, CrossConv, + BottleneckCSP, C3, C3x]: + c1, c2 = ch[f], args[0] + c2 = make_divisible(c2 * gw, 8) if c2 != no else c2 + + args = [c1, c2, *args[1:]] + if m in [BottleneckCSP, C3, C3x]: + args.insert(2, n) + n = 1 + elif m is nn.BatchNorm2d: + args = [ch[f]] + elif m is Concat: + c2 = sum(ch[-1 if x == -1 else x + 1] for x in f) + elif m is Detect: + args.append([ch[x + 1] for x in f]) + if isinstance(args[1], int): # number of anchors + args[1] = [list(range(args[1] * 2))] * len(f) + args.append(imgsz) + else: + c2 = ch[f] + + tf_m = eval('TF' + m_str.replace('nn.', '')) + m_ = keras.Sequential([tf_m(*args, w=model.model[i][j]) for j in range(n)]) if n > 1 \ + else tf_m(*args, w=model.model[i]) # module + + torch_m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module + t = str(m)[8:-2].replace('__main__.', '') # module type + np = sum(x.numel() for x in torch_m_.parameters()) # number params + m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params + LOGGER.info(f'{i:>3}{str(f):>18}{str(n):>3}{np:>10} {t:<40}{str(args):<30}') # print + save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist + layers.append(m_) + ch.append(c2) + return keras.Sequential(layers), sorted(save) + + +class TFModel: + # TF YOLOv5 model + def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, model=None, imgsz=(640, 640)): # model, channels, classes + super().__init__() + if isinstance(cfg, dict): + self.yaml = cfg # model dict + else: # is *.yaml + import yaml # for torch hub + self.yaml_file = Path(cfg).name + with open(cfg) as f: + self.yaml = yaml.load(f, Loader=yaml.FullLoader) # model dict + + # Define model + if nc and nc != self.yaml['nc']: + LOGGER.info(f"Overriding {cfg} nc={self.yaml['nc']} with nc={nc}") + self.yaml['nc'] = nc # override yaml value + self.model, self.savelist = parse_model(deepcopy(self.yaml), ch=[ch], model=model, imgsz=imgsz) + + def predict(self, + inputs, + tf_nms=False, + agnostic_nms=False, + topk_per_class=100, + topk_all=100, + iou_thres=0.45, + conf_thres=0.25): + y = [] # outputs + x = inputs + for m in self.model.layers: + if m.f != -1: # if not from previous layer + x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers + + x = m(x) # run + y.append(x if m.i in self.savelist else None) # save output + + # Add TensorFlow NMS + if tf_nms: + boxes = self._xywh2xyxy(x[0][..., :4]) + probs = x[0][:, :, 4:5] + classes = x[0][:, :, 5:] + scores = probs * classes + if agnostic_nms: + nms = AgnosticNMS()((boxes, classes, scores), topk_all, iou_thres, conf_thres) + else: + boxes = tf.expand_dims(boxes, 2) + nms = tf.image.combined_non_max_suppression(boxes, + scores, + topk_per_class, + topk_all, + iou_thres, + conf_thres, + clip_boxes=False) + return nms, x[1] + return x[0] # output only first tensor [1,6300,85] = [xywh, conf, class0, class1, ...] + # x = x[0][0] # [x(1,6300,85), ...] to x(6300,85) + # xywh = x[..., :4] # x(6300,4) boxes + # conf = x[..., 4:5] # x(6300,1) confidences + # cls = tf.reshape(tf.cast(tf.argmax(x[..., 5:], axis=1), tf.float32), (-1, 1)) # x(6300,1) classes + # return tf.concat([conf, cls, xywh], 1) + + @staticmethod + def _xywh2xyxy(xywh): + # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right + x, y, w, h = tf.split(xywh, num_or_size_splits=4, axis=-1) + return tf.concat([x - w / 2, y - h / 2, x + w / 2, y + h / 2], axis=-1) + + +class AgnosticNMS(keras.layers.Layer): + # TF Agnostic NMS + def call(self, input, topk_all, iou_thres, conf_thres): + # wrap map_fn to avoid TypeSpec related error https://stackoverflow.com/a/65809989/3036450 + return tf.map_fn(lambda x: self._nms(x, topk_all, iou_thres, conf_thres), + input, + fn_output_signature=(tf.float32, tf.float32, tf.float32, tf.int32), + name='agnostic_nms') + + @staticmethod + def _nms(x, topk_all=100, iou_thres=0.45, conf_thres=0.25): # agnostic NMS + boxes, classes, scores = x + class_inds = tf.cast(tf.argmax(classes, axis=-1), tf.float32) + scores_inp = tf.reduce_max(scores, -1) + selected_inds = tf.image.non_max_suppression(boxes, + scores_inp, + max_output_size=topk_all, + iou_threshold=iou_thres, + score_threshold=conf_thres) + selected_boxes = tf.gather(boxes, selected_inds) + padded_boxes = tf.pad(selected_boxes, + paddings=[[0, topk_all - tf.shape(selected_boxes)[0]], [0, 0]], + mode="CONSTANT", + constant_values=0.0) + selected_scores = tf.gather(scores_inp, selected_inds) + padded_scores = tf.pad(selected_scores, + paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]], + mode="CONSTANT", + constant_values=-1.0) + selected_classes = tf.gather(class_inds, selected_inds) + padded_classes = tf.pad(selected_classes, + paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]], + mode="CONSTANT", + constant_values=-1.0) + valid_detections = tf.shape(selected_inds)[0] + return padded_boxes, padded_scores, padded_classes, valid_detections + + +def activations(act=nn.SiLU): + # Returns TF activation from input PyTorch activation + if isinstance(act, nn.LeakyReLU): + return lambda x: keras.activations.relu(x, alpha=0.1) + elif isinstance(act, nn.Hardswish): + return lambda x: x * tf.nn.relu6(x + 3) * 0.166666667 + elif isinstance(act, (nn.SiLU, SiLU)): + return lambda x: keras.activations.swish(x) + else: + raise Exception(f'no matching TensorFlow activation found for PyTorch activation {act}') + + +def representative_dataset_gen(dataset, ncalib=100): + # Representative dataset generator for use with converter.representative_dataset, returns a generator of np arrays + for n, (path, img, im0s, vid_cap, string) in enumerate(dataset): + im = np.transpose(img, [1, 2, 0]) + im = np.expand_dims(im, axis=0).astype(np.float32) + im /= 255 + yield [im] + if n >= ncalib: + break + + +def run( + weights=ROOT / 'yolov5s.pt', # weights path + imgsz=(640, 640), # inference size h,w + batch_size=1, # batch size + dynamic=False, # dynamic batch size +): + # PyTorch model + im = torch.zeros((batch_size, 3, *imgsz)) # BCHW image + model = attempt_load(weights, device=torch.device('cpu'), inplace=True, fuse=False) + _ = model(im) # inference + model.info() + + # TensorFlow model + im = tf.zeros((batch_size, *imgsz, 3)) # BHWC image + tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz) + _ = tf_model.predict(im) # inference + + # Keras model + im = keras.Input(shape=(*imgsz, 3), batch_size=None if dynamic else batch_size) + keras_model = keras.Model(inputs=im, outputs=tf_model.predict(im)) + keras_model.summary() + + LOGGER.info('PyTorch, TensorFlow and Keras models successfully verified.\nUse export.py for TF model export.') + + +def parse_opt(): + parser = argparse.ArgumentParser() + parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='weights path') + parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') + parser.add_argument('--batch-size', type=int, default=1, help='batch size') + parser.add_argument('--dynamic', action='store_true', help='dynamic batch size') + opt = parser.parse_args() + opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand + print_args(vars(opt)) + return opt + + +def main(opt): + run(**vars(opt)) + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git a/src/目标识别代码/models/yolo.py b/src/目标识别代码/models/yolo.py new file mode 100644 index 00000000..df420972 --- /dev/null +++ b/src/目标识别代码/models/yolo.py @@ -0,0 +1,360 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +YOLO-specific modules + +Usage: + $ python path/to/models/yolo.py --cfg yolov5s.yaml +""" + +import argparse +import contextlib +import os +import platform +import sys +from copy import deepcopy +from pathlib import Path + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +if platform.system() != 'Windows': + ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +from models.common import * +from models.experimental import * +from utils.autoanchor import check_anchor_order +from utils.general import LOGGER, check_version, check_yaml, make_divisible, print_args +from utils.plots import feature_visualization +from utils.torch_utils import (fuse_conv_and_bn, initialize_weights, model_info, profile, scale_img, select_device, + time_sync) + +try: + import thop # for FLOPs computation +except ImportError: + thop = None + + +class Detect(nn.Module): + stride = None # strides computed during build + onnx_dynamic = False # ONNX export parameter + export = False # export mode + + def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer + super().__init__() + self.nc = nc # number of classes + self.no = nc + 5 # number of outputs per anchor + self.nl = len(anchors) # number of detection layers + self.na = len(anchors[0]) // 2 # number of anchors + self.grid = [torch.zeros(1)] * self.nl # init grid + self.anchor_grid = [torch.zeros(1)] * self.nl # init anchor grid + self.register_buffer('anchors', torch.tensor(anchors).float().view(self.nl, -1, 2)) # shape(nl,na,2) + self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv + self.inplace = inplace # use inplace ops (e.g. slice assignment) + + def forward(self, x): + z = [] # inference output + for i in range(self.nl): + x[i] = self.m[i](x[i]) # conv + bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) + x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() + + if not self.training: # inference + if self.onnx_dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]: + self.grid[i], self.anchor_grid[i] = self._make_grid(nx, ny, i) + + y = x[i].sigmoid() + if self.inplace: + y[..., 0:2] = (y[..., 0:2] * 2 + self.grid[i]) * self.stride[i] # xy + y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh + else: # for YOLOv5 on AWS Inferentia https://github.com/ultralytics/yolov5/pull/2953 + xy, wh, conf = y.split((2, 2, self.nc + 1), 4) # y.tensor_split((2, 4, 5), 4) # torch 1.8.0 + xy = (xy * 2 + self.grid[i]) * self.stride[i] # xy + wh = (wh * 2) ** 2 * self.anchor_grid[i] # wh + y = torch.cat((xy, wh, conf), 4) + z.append(y.view(bs, -1, self.no)) + + return x if self.training else (torch.cat(z, 1),) if self.export else (torch.cat(z, 1), x) + + def _make_grid(self, nx=20, ny=20, i=0, torch_1_10=check_version(torch.__version__, '1.10.0')): + d = self.anchors[i].device + t = self.anchors[i].dtype + shape = 1, self.na, ny, nx, 2 # grid shape + y, x = torch.arange(ny, device=d, dtype=t), torch.arange(nx, device=d, dtype=t) + if torch_1_10: # torch>=1.10.0 meshgrid workaround for torch>=0.7 compatibility + yv, xv = torch.meshgrid(y, x, indexing='ij') + else: + yv, xv = torch.meshgrid(y, x) + grid = torch.stack((xv, yv), 2).expand(shape) - 0.5 # add grid offset, i.e. y = 2.0 * x - 0.5 + anchor_grid = (self.anchors[i] * self.stride[i]).view((1, self.na, 1, 1, 2)).expand(shape) + return grid, anchor_grid + + +class BaseModel(nn.Module): + # YOLOv5 base model + def forward(self, x, profile=False, visualize=False): + return self._forward_once(x, profile, visualize) # single-scale inference, train + + def _forward_once(self, x, profile=False, visualize=False): + y, dt = [], [] # outputs + for m in self.model: + if m.f != -1: # if not from previous layer + x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers + if profile: + self._profile_one_layer(m, x, dt) + x = m(x) # run + y.append(x if m.i in self.save else None) # save output + if visualize: + feature_visualization(x, m.type, m.i, save_dir=visualize) + return x + + def _profile_one_layer(self, m, x, dt): + c = m == self.model[-1] # is final layer, copy input as inplace fix + o = thop.profile(m, inputs=(x.copy() if c else x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs + t = time_sync() + for _ in range(10): + m(x.copy() if c else x) + dt.append((time_sync() - t) * 100) + if m == self.model[0]: + LOGGER.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} module") + LOGGER.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}') + if c: + LOGGER.info(f"{sum(dt):10.2f} {'-':>10s} {'-':>10s} Total") + + def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers + LOGGER.info('Fusing layers... ') + for m in self.model.modules(): + if isinstance(m, (Conv, DWConv)) and hasattr(m, 'bn'): + m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv + delattr(m, 'bn') # remove batchnorm + m.forward = m.forward_fuse # update forward + self.info() + return self + + def info(self, verbose=False, img_size=640): # print model information + model_info(self, verbose, img_size) + + def _apply(self, fn): + # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers + self = super()._apply(fn) + m = self.model[-1] # Detect() + if isinstance(m, Detect): + m.stride = fn(m.stride) + m.grid = list(map(fn, m.grid)) + if isinstance(m.anchor_grid, list): + m.anchor_grid = list(map(fn, m.anchor_grid)) + return self + + +class DetectionModel(BaseModel): + # YOLOv5 detection model + def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes + super().__init__() + if isinstance(cfg, dict): + self.yaml = cfg # model dict + else: # is *.yaml + import yaml # for torch hub + self.yaml_file = Path(cfg).name + with open(cfg, encoding='ascii', errors='ignore') as f: + self.yaml = yaml.safe_load(f) # model dict + + # Define model + ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels + if nc and nc != self.yaml['nc']: + LOGGER.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}") + self.yaml['nc'] = nc # override yaml value + if anchors: + LOGGER.info(f'Overriding model.yaml anchors with anchors={anchors}') + self.yaml['anchors'] = round(anchors) # override yaml value + self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist + self.names = [str(i) for i in range(self.yaml['nc'])] # default names + self.inplace = self.yaml.get('inplace', True) + + # Build strides, anchors + m = self.model[-1] # Detect() + if isinstance(m, Detect): + s = 256 # 2x min stride + m.inplace = self.inplace + m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward + check_anchor_order(m) # must be in pixel-space (not grid-space) + m.anchors /= m.stride.view(-1, 1, 1) + self.stride = m.stride + self._initialize_biases() # only run once + + # Init weights, biases + initialize_weights(self) + self.info() + LOGGER.info('') + + def forward(self, x, augment=False, profile=False, visualize=False): + if augment: + return self._forward_augment(x) # augmented inference, None + return self._forward_once(x, profile, visualize) # single-scale inference, train + + def _forward_augment(self, x): + img_size = x.shape[-2:] # height, width + s = [1, 0.83, 0.67] # scales + f = [None, 3, None] # flips (2-ud, 3-lr) + y = [] # outputs + for si, fi in zip(s, f): + xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max())) + yi = self._forward_once(xi)[0] # forward + # cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save + yi = self._descale_pred(yi, fi, si, img_size) + y.append(yi) + y = self._clip_augmented(y) # clip augmented tails + return torch.cat(y, 1), None # augmented inference, train + + def _descale_pred(self, p, flips, scale, img_size): + # de-scale predictions following augmented inference (inverse operation) + if self.inplace: + p[..., :4] /= scale # de-scale + if flips == 2: + p[..., 1] = img_size[0] - p[..., 1] # de-flip ud + elif flips == 3: + p[..., 0] = img_size[1] - p[..., 0] # de-flip lr + else: + x, y, wh = p[..., 0:1] / scale, p[..., 1:2] / scale, p[..., 2:4] / scale # de-scale + if flips == 2: + y = img_size[0] - y # de-flip ud + elif flips == 3: + x = img_size[1] - x # de-flip lr + p = torch.cat((x, y, wh, p[..., 4:]), -1) + return p + + def _clip_augmented(self, y): + # Clip YOLOv5 augmented inference tails + nl = self.model[-1].nl # number of detection layers (P3-P5) + g = sum(4 ** x for x in range(nl)) # grid points + e = 1 # exclude layer count + i = (y[0].shape[1] // g) * sum(4 ** x for x in range(e)) # indices + y[0] = y[0][:, :-i] # large + i = (y[-1].shape[1] // g) * sum(4 ** (nl - 1 - x) for x in range(e)) # indices + y[-1] = y[-1][:, i:] # small + return y + + def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency + # https://arxiv.org/abs/1708.02002 section 3.3 + # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. + m = self.model[-1] # Detect() module + for mi, s in zip(m.m, m.stride): # from + b = mi.bias.view(m.na, -1).detach() # conv.bias(255) to (3,85) + b[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) + b[:, 5:] += math.log(0.6 / (m.nc - 0.999999)) if cf is None else torch.log(cf / cf.sum()) # cls + mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) + + +Model = DetectionModel # retain YOLOv5 'Model' class for backwards compatibility + + +class ClassificationModel(BaseModel): + # YOLOv5 classification model + def __init__(self, cfg=None, model=None, nc=1000, cutoff=10): # yaml, model, number of classes, cutoff index + super().__init__() + self._from_detection_model(model, nc, cutoff) if model is not None else self._from_yaml(cfg) + + def _from_detection_model(self, model, nc=1000, cutoff=10): + # Create a YOLOv5 classification model from a YOLOv5 detection model + if isinstance(model, DetectMultiBackend): + model = model.model # unwrap DetectMultiBackend + model.model = model.model[:cutoff] # backbone + m = model.model[-1] # last layer + ch = m.conv.in_channels if hasattr(m, 'conv') else m.cv1.conv.in_channels # ch into module + c = Classify(ch, nc) # Classify() + c.i, c.f, c.type = m.i, m.f, 'models.common.Classify' # index, from, type + model.model[-1] = c # replace + self.model = model.model + self.stride = model.stride + self.save = [] + self.nc = nc + + def _from_yaml(self, cfg): + # Create a YOLOv5 classification model from a *.yaml file + self.model = None + + +def parse_model(d, ch): # model_dict, input_channels(3) + LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}") + anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'] + na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors + no = na * (nc + 5) # number of outputs = anchors * (classes + 5) + + layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out + for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args + m = eval(m) if isinstance(m, str) else m # eval strings + for j, a in enumerate(args): + with contextlib.suppress(NameError): + args[j] = eval(a) if isinstance(a, str) else a # eval strings + + n = n_ = max(round(n * gd), 1) if n > 1 else n # depth gain + if m in (Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, SPPF, DWConv, MixConv2d, Focus, CrossConv, + BottleneckCSP, C3, C3TR, C3SPP, C3Ghost, nn.ConvTranspose2d, DWConvTranspose2d, C3x): + c1, c2 = ch[f], args[0] + if c2 != no: # if not output + c2 = make_divisible(c2 * gw, 8) + + args = [c1, c2, *args[1:]] + if m in [BottleneckCSP, C3, C3TR, C3Ghost, C3x]: + args.insert(2, n) # number of repeats + n = 1 + elif m is nn.BatchNorm2d: + args = [ch[f]] + elif m is Concat: + c2 = sum(ch[x] for x in f) + elif m is Detect: + args.append([ch[x] for x in f]) + if isinstance(args[1], int): # number of anchors + args[1] = [list(range(args[1] * 2))] * len(f) + elif m is Contract: + c2 = ch[f] * args[0] ** 2 + elif m is Expand: + c2 = ch[f] // args[0] ** 2 + else: + c2 = ch[f] + + m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module + t = str(m)[8:-2].replace('__main__.', '') # module type + np = sum(x.numel() for x in m_.parameters()) # number params + m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params + LOGGER.info(f'{i:>3}{str(f):>18}{n_:>3}{np:10.0f} {t:<40}{str(args):<30}') # print + save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist + layers.append(m_) + if i == 0: + ch = [] + ch.append(c2) + return nn.Sequential(*layers), sorted(save) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml') + parser.add_argument('--batch-size', type=int, default=1, help='total batch size for all GPUs') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--profile', action='store_true', help='profile model speed') + parser.add_argument('--line-profile', action='store_true', help='profile model speed layer by layer') + parser.add_argument('--test', action='store_true', help='test all yolo*.yaml') + opt = parser.parse_args() + opt.cfg = check_yaml(opt.cfg) # check YAML + print_args(vars(opt)) + device = select_device(opt.device) + + # Create model + im = torch.rand(opt.batch_size, 3, 640, 640).to(device) + model = Model(opt.cfg).to(device) + + # Options + if opt.line_profile: # profile layer by layer + model(im, profile=True) + + elif opt.profile: # profile forward-backward + results = profile(input=im, ops=[model], n=3) + + elif opt.test: # test all models + for cfg in Path(ROOT / 'models').rglob('yolo*.yaml'): + try: + _ = Model(cfg) + except Exception as e: + print(f'Error in {cfg}: {e}') + + else: # report fused model summary + model.fuse() diff --git a/src/目标识别代码/models/yolov5l.yaml b/src/目标识别代码/models/yolov5l.yaml new file mode 100644 index 00000000..ce8a5de4 --- /dev/null +++ b/src/目标识别代码/models/yolov5l.yaml @@ -0,0 +1,48 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/src/目标识别代码/models/yolov5m.yaml b/src/目标识别代码/models/yolov5m.yaml new file mode 100644 index 00000000..ad13ab37 --- /dev/null +++ b/src/目标识别代码/models/yolov5m.yaml @@ -0,0 +1,48 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 0.67 # model depth multiple +width_multiple: 0.75 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/src/目标识别代码/models/yolov5n.yaml b/src/目标识别代码/models/yolov5n.yaml new file mode 100644 index 00000000..8a28a40d --- /dev/null +++ b/src/目标识别代码/models/yolov5n.yaml @@ -0,0 +1,48 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.25 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/src/目标识别代码/models/yolov5s.yaml b/src/目标识别代码/models/yolov5s.yaml new file mode 100644 index 00000000..f35beabb --- /dev/null +++ b/src/目标识别代码/models/yolov5s.yaml @@ -0,0 +1,48 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.50 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/src/目标识别代码/models/yolov5x.yaml b/src/目标识别代码/models/yolov5x.yaml new file mode 100644 index 00000000..f617a027 --- /dev/null +++ b/src/目标识别代码/models/yolov5x.yaml @@ -0,0 +1,48 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 1.33 # model depth multiple +width_multiple: 1.25 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/src/目标识别代码/test.py b/src/目标识别代码/test.py new file mode 100644 index 00000000..d0be671a --- /dev/null +++ b/src/目标识别代码/test.py @@ -0,0 +1,300 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Run inference on images, videos, directories, streams, etc. + +Usage - sources: + $ python path/to/detect.py --weights yolov5s.pt --source 0 # webcam + img.jpg # image + vid.mp4 # video + path/ # directory + path/*.jpg # glob + 'https://youtu.be/Zgi9g1ksQHc' # YouTube + 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream + +Usage - formats: + $ python path/to/detect.py --weights yolov5s.pt # PyTorch + yolov5s.torchscript # TorchScript + yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn + yolov5s.xml # OpenVINO + yolov5s.engine # TensorRT + yolov5s.mlmodel # CoreML (macOS-only) + yolov5s_saved_model # TensorFlow SavedModel + yolov5s.pb # TensorFlow GraphDef + yolov5s.tflite # TensorFlow Lite + yolov5s_edgetpu.tflite # TensorFlow Edge TPU +""" + +import argparse +import os +import platform +import sys +from pathlib import Path + +import torch +import torch.backends.cudnn as cudnn + +import json +import socket + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[0] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +from models.common import DetectMultiBackend +from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams +from utils.general import (LOGGER, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, + increment_path, non_max_suppression, print_args, scale_coords, strip_optimizer, xyxy2xywh) +from utils.plots import Annotator, colors, save_one_box +from utils.torch_utils import select_device, smart_inference_mode, time_sync + +UDP_IP = '192.168.43.58' # change to desired IP address +UDP_PORT = 1900 # change to desired port number +sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) +sock.bind((UDP_IP, UDP_PORT)) +sock.listen(2) +clientsocket, addr = sock.accept() + +test = 0 +x_pos = 0.0 +y_pos = 0.0 + +@smart_inference_mode() +def run( + weights=ROOT / 'yolov5s.pt', # model.pt path(s) + source=ROOT / 'data/images', # file/dir/URL/glob, 0 for webcam + data=ROOT / 'data/coco128.yaml', # dataset.yaml path + imgsz=(640, 640), # inference size (height, width) + conf_thres=0.25, # confidence threshold + iou_thres=0.45, # NMS IOU threshold + max_det=1000, # maximum detections per image + device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + view_img=False, # show results + save_txt=False, # save results to *.txt + save_conf=False, # save confidences in --save-txt labels + save_crop=False, # save cropped prediction boxes + nosave=False, # do not save images/videos + classes=None, # filter by class: --class 0, or --class 0 2 3 + agnostic_nms=False, # class-agnostic NMS + augment=False, # augmented inference + visualize=False, # visualize features + update=False, # update all models + project=ROOT / 'runs/detect', # save results to project/name + name='exp', # save results to project/name + exist_ok=False, # existing project/name ok, do not increment + line_thickness=3, # bounding box thickness (pixels) + hide_labels=False, # hide labels + hide_conf=False, # hide confidences + half=False, # use FP16 half-precision inference + dnn=False, # use OpenCV DNN for ONNX inference +): + source = str(source) + save_img = not nosave and not source.endswith('.txt') # save inference images + is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) + is_url = source.lower().startswith(('rtsp://'))#, 'rtmp://', 'http://', 'https://')) + webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file) + if is_url and is_file: + source = check_file(source) # download + + # Directories + save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run + (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir + + # Load model + device = select_device(device) + model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) + stride, names, pt = model.stride, model.names, model.pt + imgsz = check_img_size(imgsz, s=stride) # check image size + + # Dataloader + if webcam: + view_img = check_imshow() + cudnn.benchmark = True # set True to speed up constant image size inference + dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt) + bs = len(dataset) # batch_size + else: + dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt) + bs = 1 # batch_size + vid_path, vid_writer = [None] * bs, [None] * bs + + # Run inference + model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup + seen, windows, dt = 0, [], [0.0, 0.0, 0.0] + for path, im, im0s, vid_cap, s in dataset: + t1 = time_sync() + im = torch.from_numpy(im).to(device) + im = im.half() if model.fp16 else im.float() # uint8 to fp16/32 + im /= 255 # 0 - 255 to 0.0 - 1.0 + if len(im.shape) == 3: + im = im[None] # expand for batch dim + t2 = time_sync() + dt[0] += t2 - t1 + + # Inference + visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False + pred = model(im, augment=augment, visualize=visualize) + t3 = time_sync() + dt[1] += t3 - t2 + + # NMS + pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det) + dt[2] += time_sync() - t3 + + # Second-stage classifier (optional) + # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) + + # Process predictions + for i, det in enumerate(pred): # per image + seen += 1 + if webcam: # batch_size >= 1 + p, im0, frame = path[i], im0s[i].copy(), dataset.count + s += f'{i}: ' + else: + p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) + + p = Path(p) # to Path + save_path = str(save_dir / p.name) # im.jpg + + img_location = os.path.abspath(p) + + txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt + s += '%gx%g ' % im.shape[2:] # print string + gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh + imc = im0.copy() if save_crop else im0 # for save_crop + annotator = Annotator(im0, line_width=line_thickness, example=str(names)) + if len(det): + # Rescale boxes from img_size to im0 size + det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round() + + # Print results + for c in det[:, -1].unique(): + n = (det[:, -1] == c).sum() # detections per class + s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string + + # Write results + for *xyxy, conf, cls in reversed(det): + if save_txt: # Write to file + xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh + line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format + with open(f'{txt_path}.txt', 'a') as f: + f.write(('%g ' * len(line)).rstrip() % line + '\n') + + if save_img or save_crop or view_img: # Add bbox to image + c = int(cls) # integer class + label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') + annotator.box_label(xyxy, label, color=colors(c, True)) + + if save_crop: + save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) + + # Stream results + im0 = annotator.result() + if view_img: + if platform.system() == 'Linux' and p not in windows: + windows.append(p) + cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux) + cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0]) + cv2.imshow(str(p), im0) + cv2.waitKey(1) # 1 millisecond + + # Save results (image with detections) + if save_img: + if dataset.mode == 'image': + cv2.imwrite(save_path, im0) + else: # 'video' or 'stream' + if vid_path[i] != save_path: # new video + vid_path[i] = save_path + if isinstance(vid_writer[i], cv2.VideoWriter): + vid_writer[i].release() # release previous video writer + if vid_cap: # video + fps = vid_cap.get(cv2.CAP_PROP_FPS) + w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + else: # stream + fps, w, h = 30, im0.shape[1], im0.shape[0] + save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos + vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) + vid_writer[i].write(im0) + + # Print time (inference-only) + LOGGER.info(f'{s}Done. ({t3 - t2:.3f}s)') + + #Encode targets as JSON string and send via UDP socket + targets = [] + for *xyxy, conf, cls in reversed(det): + xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh + targets.append({'class': names[int(cls)], 'conf': float(conf), 'bbox': [float(val) for val in xywh]}) + json_str = json.dumps({'location': img_location, 'targets': targets, 'x_pos':x_pos, 'y_pos':y_pos}) + + clientsocket.send(json_str.encode('utf-8')) # change DEST_IP and DEST_PORT to desired values + print(json_str) + client() + + # Print results + t = tuple(x / seen * 1E3 for x in dt) # speeds per image + LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) + if save_txt or save_img: + s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' + LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") + if update: + strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning) + +def client(): + global test + global x_pos + global y_pos + if test == 0: + msg = clientsocket.recv(4096) + msg = msg.decode('utf-8') + recvmsg = json.loads(msg) + print(recvmsg) + x_pos = recvmsg['x_pos'] + y_pos = recvmsg['y_pos'] + if (x_pos != 0.0 and y_pos != 0.0): + test = 1 + +def parse_opt(): + parser = argparse.ArgumentParser() + parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path(s)') + parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob, 0 for webcam') + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path') + parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') + parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') + parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold') + parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--view-img', action='store_true', help='show results') + parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') + parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') + parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes') + parser.add_argument('--nosave', action='store_true', help='do not save images/videos') + parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3') + parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') + parser.add_argument('--augment', action='store_true', help='augmented inference') + parser.add_argument('--visualize', action='store_true', help='visualize features') + parser.add_argument('--update', action='store_true', help='update all models') + parser.add_argument('--project', default=ROOT / 'runs/detect', help='save results to project/name') + parser.add_argument('--name', default='exp', help='save results to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + + parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)') + parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels') + parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences') + + parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') + parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') + opt = parser.parse_args() + opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand + print_args(vars(opt)) + return opt + + +def main(opt): + check_requirements(exclude=('tensorboard', 'thop')) + run(**vars(opt)) + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git a/src/目标识别代码/utils/__init__.py b/src/目标识别代码/utils/__init__.py new file mode 100644 index 00000000..da53a4d2 --- /dev/null +++ b/src/目标识别代码/utils/__init__.py @@ -0,0 +1,36 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +utils/initialization +""" + + +def notebook_init(verbose=True): + # Check system software and hardware + print('Checking setup...') + + import os + import shutil + + from utils.general import check_requirements, emojis, is_colab + from utils.torch_utils import select_device # imports + + check_requirements(('psutil', 'IPython')) + import psutil + from IPython import display # to display images and clear console output + + if is_colab(): + shutil.rmtree('/content/sample_data', ignore_errors=True) # remove colab /sample_data directory + + # System info + if verbose: + gb = 1 << 30 # bytes to GiB (1024 ** 3) + ram = psutil.virtual_memory().total + total, used, free = shutil.disk_usage("/") + display.clear_output() + s = f'({os.cpu_count()} CPUs, {ram / gb:.1f} GB RAM, {(total - free) / gb:.1f}/{total / gb:.1f} GB disk)' + else: + s = '' + + select_device(newline=False) + print(emojis(f'Setup complete ✅ {s}')) + return display diff --git a/src/目标识别代码/utils/__pycache__/__init__.cpython-36.pyc b/src/目标识别代码/utils/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d6799b0b8506a8afa87100ed1d1f1df73cb6b977 GIT binary patch literal 1051 zcmZ8gPm3Hy6tC*;>F(*>p52+ONkov=QyP+4RP+!;qe~PIhG5pqGSKvNRc3m&x@)Ye zUYVi$G%A7wKfsuS2SE|cO_89Vz)w&|bx8K+#Y@P+S1m4!4ewX={{N}huY_T+dH8gE z(?RGDbndg^egjAj00=rlq(dxXe}a!NaqghlU0w_-l{2}Ym$}MQmM>G4^D;QhFnE}6 z2dE2l03`ncP>A3a#s~r-h8x_TR%4mK%r2b9E;@}LI6Q?(pxD-f%xDrpKdN#;3tB3vU0U#2 zF14S_B;za{$5@AwGMcG`(33o)+MP=?ckR7*d!eSh1PhPka?aAla&JH5rGiNNGA-te zCL~qqp^1@CN-k(Zh2TP-nio2K9^eVye}bwVzc zD?~qzHG0QnD(S~8FKKlFobQ;1qKunNiblVG^=TAa!Uc`+0CJ!k@j!RxA}^J;IJ76H zZS7&ZIl~DpsYscyjEh+~0b7W4|6i@*BAX`dU384{EVA&8nJWqj`snd55B_}kDNC$TK_QG ztbg6CzI^=Pv(4wKrqmvnc2xNQ_xx$iWcI=CTYFJzbTV-tF!htExB5 z(0y8!KnQ+-F|cz8Lhf!ry_jO#W z_QBuF!n|DvU~`X=m{!(7hx#<2t$P;ekobd!b*u7Eq7^#m)?sCxY|!=!kLM58LKT5loRT{?85YFE*l=(XGi`IT++eyi>l4&AD|l}CH@%spVF zQ036=s=Gw@t>cDc6~Lh0xVG5ZowDRGEhc@*R5=?Chc93fXkhDJVk|_kugX-goE1uH zkLCOzl{!czNq82I2UtgvWh_yIvZFL%150}|X{N9JtJmjh$_w!EX)0$~JYQ_@B)m|N zYe&ZUEMtVmD&8|$(n-MuBTNV`WC|@@e2I5n;&ThXzkB_re6PQ-hacQpwE9;r^}qb= z3y_;9Bt9Hy^tmZgu-jQ$u<{H<-!N?Y2{%O)>;L)vH~m3RyEBm%N?RPL$Z6AM)NIOd z!U`thOjr;r%z_EnJfye&vnno z-#vfy?fTA-qdCv`(MRupJp6Ff&>RtBejY-Gv$>86uGpCK!$xOl%X0w83akJJ);8{# zFQA9Jwu2oU;!TjxTaFdt9v1I{@3jHmgXQoEp*$g^(KQ{Of_K1A2cP7emKnQXxJv_$ F`!5kb9*zJ2 literal 0 HcmV?d00001 diff --git a/src/目标识别代码/utils/__pycache__/__init__.cpython-39.pyc b/src/目标识别代码/utils/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1298c56c3b46884e01dc5a1a0f5d5904ca4b8bfa GIT binary patch literal 1043 zcmY*YO^X~w7_RE?>6xD0ovb0?2Xo9ooJBzoK{UD~!Gj@~^)d`JJzbTV-mUH$tEyLK z=svBAAR#|s64*HiiePSv1pNj6f;y^0lA9MV!GmvgNZ1tA)AfG8&&%6M6om-R=HY*n zs}4ec8R6`)LAU}}_5cVPA>tB?*k9riCeAH1@RsMpQe{l;Wksg4gk{S_WxNOvQVbE# z&lWsdaQETLe*hFB_yl7F$q>UeZoXD+ndHpQo!ZXb+R44zZG0T&=RxgZRN2IR5!b4H z@b_4lx9b3G?g2p9G|J#{wM^e~_?lRsLzTMu*)xtgO>5(q7}SUAOB_9aX{U z_8LKzPK8f*)@X%DRE1URRYbZghip~tD!PDP$Ze3H+qUnw>TcnXow{3jq({!(0Y(Z{ z4%w}`D|FvFX*gB^4C<8|%boohO^>o-+Lu(7^Wkv#1SWw7w(h0I5)1lynF*THLP_n> zoF8UV2bqjh&XUOh>qt^YQxy|>oTYSNX>Tsg^tJ!-&4rrr0(^Xu$vI0F%iX<{7YcIi z$t0gM8k0mN2PR84Ex4dD6@m+yK?_%&;=QN%;?nQ$-~3Eo>M!l#Tep|3{`HUgpMLlW z$juWH9}P6RW{MQ_3zikMJO|OY44Zz+O%X-=f8P75Kj>+9F0w*tivtxoYr2e@O&LyU zK}EuZ1+l^`n1Ibgdgqx{agomA<}F-f^bGh00^{b1TnS1?-Q!HCGGTEJgAxnvDy|Zy zL(^qk%4AAMos`i;#Jp7VQt4JYFJY2pp$1#pKc-^BB^72f#kPT$+c{IaDQit9BU>c7 z*fmW!rKChVx1kY@n&YSNJ-ySzK$8l_ao;V)P7dk4qq7%-K8l9mnF9IMdumT)dySQV% zfFADJ4t8*ew?V#aIaY{!SiAwgHwO3smcx%@!qYfzbWMk6;C1lR!FxF;C8O^f?$Utc F{twMr9uoin literal 0 HcmV?d00001 diff --git a/src/目标识别代码/utils/__pycache__/augmentations.cpython-36.pyc b/src/目标识别代码/utils/__pycache__/augmentations.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..96aab0b2ebef6d37dd0ec4cbd8b5e706be20addf GIT binary patch literal 11192 zcmcIqYmgjQb?$q+XQrp;v9q(QeMy$r!WfM)i?uce99g^4>R}OSWu!&6rv&5PxjnNp zJ1^bt)jrZgrD|kC3PhA2BqUA<;ZejP1*wEec!i`${v-t{ic}Hk6on~pEn6Week3Fz zRLFO3_sp(VIK`jLRG+?mUwt3v@tt$~kxa&V`SU+nzVv2I`>wX$b#*t2F?EmE6Gc;{lJ!L0DyERu8|h}Im}zE<+2&AjsF^F~n#09m zMm-vhd~>8Yg7h1UqvB1)1Lri666u?oNcVVg%o%kKh)hKnSuu2z702nh!bMID<7q;? zS&WELq$b7VVoZ!9H6g;sG?Ogaj%%h|6XyQI8iah5%DUN+#}}1 z{o>U~9TxNA0r487?iEKx0qv*7N%1-{gPi-s0r7fqO61XYHX2V-JR%-N*%7fI-XI=B z>Q#bY(F&)x4qNA&cGa1-*Q!mY<=I}X-I}hfwaS!mtsPq6#X>BIU$}Jc+?mTkro7^m zS4%e>>7qhlmfMZCbUhgiOUJdForY6#9o3Z^q@vX9a(kn|0&^MnQmt)oTmSfx&J)(R z=eGa$#utBY>h<56+y0G(_cXTu`bp-4HtUdrM~;o?;>{61Nyq@j)svNST9FQkYRaYmiC_ z%C&CsYNAU$r+uOsVx%e)nOj_BZ*et}Y<M$NdK5W%RCA-RU$o zpEzqbT<4+b0Bp_G;DUs7It{z*U_^Z8;ec(tsL2U*{_m$j&OCnY`+xDN?|uHyUjFls zZU4c0c7Fe}+b?|b+RHC|@#WsHeg9MMdimX-xJGlk@rKtuHZyx|&8s!sYkP&so#|}; zIC;G4w49BOJaLpp9!_KH!8_SG)FnoU}KC*usUAPo61GT((=H-8?00)fKPh zxb8x`g}1m~F%_1avztx3m{TfZq2262#+BG7=%MX8Me7t~`YE+hl4cbcwN|Aa81;6o z6{OCilJw3<*_J`_%tqPikXC^@UbTp4|IE zG!|zjt+Zum`W!z@9HH0~-^>`?| zTVe15x@9*V$bnTVHQQnhrl}O9OC?fIVTpVYRV9GL zo~7F5Kbb|r0bDMvx5>?Mm2-B$K9n8DrPzmd%7gy#6}$LA8Mm+n1&$E~iCSx*V3y7X z$rh=-Ms3R}7?OB9Fq)3t3Ut>K@(DceTL?Layf@%dTcdIHTqZ`KAB}<-S&iZp5*x=o z(t+%>_V$NiUck7-w7wp)t*=jNqcld`$H6+grN_1F*3-yQqu!zJG4vSrvDzP}oJXcS z2@WII6g`G#bychnYNsR3v^c?TR0*?_mADj|ZQ*X`kcL<@abU>s(k6 zIo{JrTztK%k78!yFkGHqrz_@bpz$8e+BLl!>&CkY2yCKjqP0op+BfP6GTGgvpM)5y zxr_LiR!{mSOuU79mMUSiDrk}D=}}o~PzK@MSC+<_(2!UR{F&o6(4$Lkf*$r=jP+5>!xOCyt<m?}%h_8evFp@a$j1ZhX1 zvn}UI==^>Iw57e?`-ncEt7+?tF`%{;r>HMiWl`T;yCGjs?^i9(wd*NW}$IM^E()wfO0- zv1$z7fbuxH?0TJ4v(FS}h%ZXQ+DeB+ob=LM$SHv6_D0Puq#&v*cE^!Rs2J!q;JTxf zO4zdM0+d0qx7S(%NQ{__w253KLdpfoq0@0%qA;!)K)#Xk*h;|GgP4LP0pB_CF(L992K3_O7z1P_4MV(wF@1-?*Erf;HM5}aX#wS1HK zOz{fkP+w6F>S32ve-`C~+roEQl&823ya?0xB`*dolk#EKR0^i0$H8eRZJqFq+f2fg zKtiZ?*q`cWPyf+)qyD40&^UdBH|{}Cep=nr7$M1=^*qheTGi3_W5na&Gz<5nh{JRO z(PDOKkpR#4XND*8L3<42g7l~wsXJ=mX;6ct3BeWgnt_}R`Sg?mf;Mj@y2RBOm!Gb{ zWI{K>G%DW4Zr#D)Kb(sIgtCVjFtj5(?S=|iRDb)o==WK2NMo=tYjEY{yQu?3E+SP( z%T?S0tLD~Pu4lK(j%-kd-mJA`i|Q)Y*MSt3f(3$gNH&7>`NgF(mrEDUFP=GhImnjV zoz08(s&lF>g(HKZQ#VboE7ncgdp}kB3&z6(~1~C;Qfn2V6?PAgeP<77M z8jT>Kl53qHj`&FpozVa#aXg_ecuGap$y%#pi+~GAkXQ+HuZ;%FZO;R^HX2TaB$?c| zyJV>0cvvFHYZE=XGav-UEN1FQppq@!WJejL4k@21$ELZ-Og;+PI)eWclOIHj9SdZ; zzr1~`rIksl=to$eswfHT0E#Lt4lo4?0Tp}{8X0^?x}A6oplpz9!O!B5BL;qq%2Q|o zsY{8`UFE9 zLBe116QHDJu;sIbE?IJ zKkiSEPJ*wkc7W`oKjG)=dCYVIBj@tH`)go~{WjpI(8?avTKPe4)YBw-@J7%ZW(1c4 z7WTMyn_qw6X{<`?9W)jiIrg+a778ZONOZ&j4Uxt@jXSSG=TB<&(Wo{o$o@C&8?!Fw z_t*IO$E@Cy0gp}$`hdR2NU~Lh7P^yze2PyFiCmOFg&7_2C+lMZu82Ps-6!kg(X+Z& zAb<5yk@iP=yq?F79f;P|s%>9P*GlG9RV1Ur3@;WR-~2l8Kvpg_cBzP3j6h` zQ9m8!tG0Xd@9I;Rw641Z(WiYGQ=zR3Ev~b<7l4v;6O2?H?{YgljksvTI&RFKMv%nz z$N(Zz!aFtjQK~q!Zp+S7yQ>#ilyS0BskPviHXL}YfVXW|(wYTHr-fqx*jD(xZNxhj zB$l+M6ax;eb*kKM5%O%|lJBJ=y=iaAw^1sKrA0`pC29htZsA~P>k}S8wd1*gIXL=A z2&XC?$Bm-4fGLjr2z3Ee?$|O&?b&fM=L$>2R~)kBL2SJyFjKY=u%`lcIbgQjptkHv zh~e4AfVGP0HFx){MZO6YLJXdW1_7|W8Ag}b<~}|Q@^|yy`_O^=9Eg@4V`I7nq&*6n zXL-;pALSFc0~vtA3)e>x)`_zTemH63o(D3g$Si83oyAV+Q+l2qN9ry51e;)w>WJFu z52MBemw%6Ul!>M3N2dcOE(IW-16z48CNLH-*X^tVyH^1Y2GJb~uF^B^3V`3%Hhx5= z+k?3T=r-YQ8w9`+eS=AQ`!@hRn4g3K;Y0yeEr5+U9IX(5E9kAB5x4~)w?u4Gs~Fw1 zmxSBaS2{RG3KbTVa0bvIv5KfsPm>ogij;EO5FtX~HT`~Gg;El5@}VGbtfR^S>E6i1+J48?$GuQl~P+cP6OaMDWii)jvS5?UXyO05c72swxG4@L=L{<#=Zl*%e zDLm;wR^;{ZZW{O`gA}f5eD`i3H`?^78pN-MaW)7PhXvj z5`Q(P_&s|1<2hwqqo)te?I^w>6L$v+AQN&0mx@QIQ^~PmDCjDP1{{9twi_rz}SF09T=O6ZoqI-8s;;g*Zva`3g(1F#SP=(Oye~9 zX%vxFYkAH2T5W`S}nh5hE1hd&2IUCw4|C&_CmQz?pkZx8btWT^K zB)|-{=329mk}p!(AA%I)>nns&i#VI0yZi*z)SZqS@O8u(S8%N(&bWdLCy945ko+7f z7k1Z?Vi$*)%H4ShMcd)G3ZL#rkA658VdZbm4T2b?kcr9Fq6+loVNt;&$=hrbNCBUK z;G_t5y}_G$Hw6og;4wY`L1>l&WcVCJlV3ni9Q*{kkIy~yDnoG2l-DRE;3r?ugc*K% zasBH!KEP7S8^9M96rK*Z8rJ=d8tOyQqvWH1Npq z2l@N7u#eUh@=rUX!Gb36P#9QRZi~>$0)M+$OO(KocVO< z%tMg_ji8rdoM%C`8tTImm?%W+qmNA{x( z5tnmRgyZ<2`jVzr*(}}p704co(f{HE6GlS~_>45AJOol4+t;eI>9u8fMgAAn(23jw z5)5BfhxnIOAXF^Kc1L~WA@8F;vQM^UZ3__1Zkz@BDyFYG(nHW*Jq0mkPJ)y=iF`|q zLXpLxD>(Ir>F_Sv8&@c_YAq<=oH`DYi~U1!d4vWqP2^QX?guFh;p8IxL`}9SleQj* zl7#yZ@}*lbURST#-MXSltWiQc`2e+f4G}fPqevCgDlS`!S{119PTdlWzrrdXqn&sd z4K&L{1l3gF&w$-BcphLduRI|WKG7JTg55gIES^KXJd;osyP$%$g{<=uwG%f!3^EkX*3MXzQ6?WaJS_DAY0NAa*#QH@#MKPi)WTf z7tfqrL_E9ycxCA{m_se+zKM?vCO-J+$bFV3P3(sc5KN>YiDbHDl86O@3^GqLO(YX$ zoTc=D6!_BF7rA_%GKsu+S70D6ljvGnAWLVL)(tp>hmwu--n10 zCP)4Mp!9!In#mv0?fxK9Dv5TvRFa>hG6jz=Ar-_B5$J3x<5V0|LU02o1Z^Dk0<@{G zt=z!cO{*m@-uJUB&h)>Vv>So7o0cc&MH%&J*bGUf#6dYNWGHD$D#@mYfrzMH(?Qgu z>?$-S2_4A`+o2z|SK+HTI zej{^&#^Wx680tU(hXUl?@I$x^X6V`JTr8Wga#qS5wvKRojJ5kuKVwZ;2Tjv5ck{wj Hdis9>xcN~P literal 0 HcmV?d00001 diff --git a/src/目标识别代码/utils/__pycache__/augmentations.cpython-38.pyc b/src/目标识别代码/utils/__pycache__/augmentations.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50fe6e1316997e3497fd15c6ef8361425275db64 GIT binary patch literal 11091 zcma)CdyE`OdGD(3nd#|y?(FQ-KC@?z;|7k!-s289mRrBpK40whv0e-}G8x)ay|c4B zFIV?I(}N=IT@)TBjw159gzS(IVjz$ZL_iQi5dVaL6cOnHEk%&P$*dAZ$sZ6xB9Y%$ zJv008U6L8~*Y)_Sx*p%V>g}nNrQsLcZi@LgYufiIv;P-I<`f?H7}GSN6*a*GudyOi zZ(ii;trvCmHi{ATj@DyElZCkPdW>sKt6Rl{D$#4nda9VJr;F)&rkJT`i`n`>aX{4> zwOoC$IEZwlm>1FF&;?Dzg!!x{%xzvAcJj`Uh%f5G5{YM7afI@VTqH#bxl!>3kr7#> z#>Ar{CkBxk7kM%CtX4cC9us3?93@A^8^wgUAF2Dqo5XSPN~Dg7H;V_vtB`tycu>3= z_!HuB@mldPO70f};&ozDq{T7u`tJAR;)FPfy5r(4VoE%M)GGzQsuiZUj#(G$cEy>n zS1Wa=;n`lb*_c>dZImhDTD!EebA?C{y>#Wmh1vNaRbFz+E2T9@x9}^iRdY(Nqq=f~L^m~cqq$LFfq4V(QnhIp^osTOzu$VR_1&4uU$1@n_r_oU4>Oft zKl}dL-T(T|3@Yy{ino0C$~R{!o4XI5G(Y)`naVf6p?))E8qWToj?5`M?lAyI!zOFt z24hKdY=j{qn5-e9n5-G3PuFg&_NVEVK_LBK1Suk{vv}P91`wLBl{61zv(4IU>mz~- zt;XBj(|x{d2%Y-Ar85n=h=_{VErwLIpNjQUWJ}I2TU?DKy_4~o$n0cARt!9=`T99+n|0WT)?rM0ht@c6Xx_kzCBMc%yi9vW zYrFv^Ih6dEX>EgPTf?5}<9W?Dp4WbthwUE4OxwnLjCWEC{!ZkuR=K&KX6*xu_4tIO z<@#ylTVTb6qMDX1?YjF@{_w%VT3&@IRHIGVSz%_(9 zrfu_(leyMrZIG>xmf*qo+%9pPV!MJKM5-jHgDCNX-m{>f z&)NY>?Y9g^JB6l&h^70@SyvEmRk%npf*VG?Wb?PwEY4{qWF0 z3*AqmiA&q1u^f9C|CiY95Oc1;Fc0+-B#rcb$OZ0TB}c`+7*izCNbqX_R=6fQ0sHr?s1=Pbz}X?Nav`dIWn}>5WsK zL!q1n2w3&dAcKT(`ty1mC`$5aJjo`Ryoh|48NYXs8PllgGvg1D@tK?SIUZg9fYx(| z*pCanqZ9M^dPUD`f{#G0dHN1L5nuE4g@4DWOw4?19*f-WT(b^-bU-TVC%UPa zpX~5O9sQ>;x`CA=ssVA>q zlXKK03i*vq#oMKi1V*`51*g^>i9-F-+ZTGR&|(TL@jcqmAhJ}kA?71)_*zphCIfwG z742Nx3-mR+roN%bx)`gidL>kb2_S?CAQhw4dTGgByN8In>Xhn;6Wlxi5z#R=&Z0cW zhS^belt09zgYty~Oh61k+Ey{a4ieZrVn;5sAO+qq0QC+BLoiuH+6W2j7AF}FGSa)U zT``L=7Qr4JFv;Q4X{|Ado}>5Hyd1t72P{ANoA_!>jl=Aa7KqGMFiiTLD)3!m1GxwQ z+bT|5a;aLc!f1k-;areaaTNk1R2Xr&K)BZcdRs*s{LI_0^|JF9&ZumnEFIf(l=_D( zCZi}wI?8a{a-M|n?=?VM+UosT^Z}7gJ70{zpj&c^`i+V#>YJ-;@)7#}p4?RGJGF%V zO1;kC1`Ia|pz%>?=`=gRAqX1Cmz5Nw8vIDr_xb-p&_1bNEIE4)+z8dCwyV1ZP`;rW z-eDfNowjXRzixy)-!}c29~sqr(~pj#TfvTK#PK^u2mN^wKMn&PvSVBI49WP}w&feX zg|8UxL_66|wbOpuPojsEpBRP3*3PuE9`|B9kRp&gnNh97xAEm}iSB2^5?~IrbAHYr zz>|eU9q=WdurWm`0Dfbs$ZxCzuMGS$;n;)&urWocDf=)X#mPpm zmO;1I(W^TOpn>(rK}2~7f-DSy*FX$Bz#ioCy{NmxcYhHyCErhJeV0r+d8-F@Cy*+~ z0D8XxB;t6KocPEgl5592Y=XzPxtyTwScG*2uMjE-TM{F91X~~=8;}D$0t*kM7jd5k z76?VZo$IvTTBz%;b4+ z*>U{GnfxePs9tv|h$3$j$O6EpR{KGqOdQqwkxko=x71*qDxuDZ>IIMV6y}TEzVf8E$?GT*03sVcq*8Xm-a`{Y23Z@h12KMr7g$ z*$!Ee1n&Qik=obWfEq*EH!uc38iQ}aR3D(3zeF9rggUGMR8BSvKEjj4lgMgNNa0#D z;$Qagm)g4G%ZFOoI>T$H^b1z!`LlGdxW(*)E3`HnKF%z z%!OiU&psmWs?ZfyREqQoyp#onv_d2y)=R6(k5K_aGjNq^@RiDlphL_@l&qFlLTn}e z2)oy#Dp2Ow2UICeAV_y0(y6q>HPS!_f;waildDaN#~lVhNGDBE&K<3zbt9Yb$nP+U zBj|QxQ;q@Ykc^iUA^$q{K!E@Qw(Hib!Wc{mCZOAL5bcJNkJev%L~>$Yq?y*JLlqJqZg+bxSk5d;gtsPqiiG6EJnpl|u(ItnJa1dFq3e1$94cOBGn-7>R z+tij_4GnR2Ibe-qa@E~CQ<2j^2o332cM!0cH^T@K+dQ=ALGIoi{vbMV-wmK8huJV9 zrg@eJ%&{C`n&y&t2YfGOoY9JoM~ znWSpHm~X&HHgQa1kim=?8dS);z711>`G~;43j>64X~8&%!cz(jW@Rd?JOZ&Wpe+#@ z(-w_((u>3I>In)Q6ooODzv{A~>^n3aK2# zmMIblLZs1R5XJ_r20T$=t{CkM8OE5m=4V0yhww3(!%30`Y$XgE?a8@6#wN&515_SS zzrULaC808nb@?<^_W_~|hAYb_6`U&MW0-%CNFslQ$9I#r*S31}les0rN=K5;QQ;c+&XDl+> zOD)8`oPR|}>R3sGxtqj43)eCSr*a(WqwI0^0F2}$4B;e~F9P>KsEI~KlUFX}lZx6& zUi*B7OsR83*PxbO3*QG^nMI*I-^P0~<*@In2PwCJTSmFvE{Qh>PoA}C% zBr@;j{$xgJuWoK7BZmXR>i1qLRk=7tTk;gn2aG;$n0Y!#YKGvf{#gl zr>nT87_HmxN@=P4UJr~7IKhFjso(~5CZ(Y{|A{{PeFBGKe$jBAQJVY&>dCXru_-7C zQa$4;W|P*B>scre#BaF#ciV;XvV2a7=!iz*h(%40eX` zF1=cx3S!|Y)s*tZ4^KT(uWqci?x&ek0F!2Z6hcJ)2x(vyKh zM1Q?s0tw2iN#8~SCj;aj!&y)oEOd(@ExI8fI@7>{D6==j=!JNC7$*3f!rdk~5Y8*y zW$<;F!qQlziB^c$0Z&2W_~bf9{TN_~ZK!gKjz3S)2~cEzz=bGQEhJb#*?Tb%MYXU|?*5xsd>Rv01i+iWYT?s(w1hYpi! zyso!#0SFN+3pYFvUZn#f-1pGrmrxP~RYCProU6hRPc!9p^5XsY^SJsG-mSR#0^F__$zv!RT(MC0ZrmtIs~aWVIs55DH%ij)NgujVA}Rc%5MoJT_|c$e*ss}$?B&MJ48Iu7D6Ai9?i5O_6#hX4v094dqt%%n$! zw6!=YB$EyyS9%qr%jz?Gn^QDt>q}@SUqfwPOF&JLPAQ5>6(}urTfL4d4~mOmRk}XAPs(d3ZLEs1>8M=5_}&OQgmYB z{DLHp5d+=p{tu~^G*SpuR@qb^BTS}x>%ELHa`NLo_J4#pbY5L}Ac&PpqFFALgIIxsb-!>cJz(8%0*0K=frJ08$&jvobe8qCns n6WK^QW@W8}IbaROK>!JSvn({kP literal 0 HcmV?d00001 diff --git a/src/目标识别代码/utils/__pycache__/augmentations.cpython-39.pyc b/src/目标识别代码/utils/__pycache__/augmentations.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a61b7d71dae29c3041ecf22ed4f6c4dc439bd70d GIT binary patch literal 11122 zcma)Cd5k1id9PPh-CbRMOixeGx$9-T7~?kfcy?@zF^gxf+4Y*;*=2TE;>uc0O~0C+ zp6P2}RnL`b6lv@ziCI`nQ7-40b}m*BK*Zt@M}njLk;F(*_yZ^rK?G)*B#NR$5JHg1 z?|aqVbF7`DTlLjDzW3@K-~FDQN|_3NuYKXC3zy!hDBq>b-d`M<6L{PsOi{REDV%Y& z!Yn4=s-?=eW@+-RTM_w=R%4dILfm*YrYcOSnpQ%VXq99&Wu>ZVD_!lg`l=Z#Q|-6< zWu0EhRtKyBq$5_2N3Fqg3XgH)S%n*0sx{=~oIxI6)VRqL&oXP6@{1}@@)U9-{1M*A zGf0i{xAQC?Kx&NV_~5gOb%;O8NBI~^4)b^Laeg;ack##gQT}G6j_`N#d-z+BdK15w zzZLl7{Biy^{s2nu=KcIZKEc!c2!E*a{WzcG$53~azl%@thmm?SSFb4f_yPI0BM>IfGIfl;hi>caIz z&@UXoUX zYL6nH-S`nxS~^oUk9e~Ei;AzmsQj)Pws{(}Z0R4=Yi9VKR3=e^waU)^BxCPetjEU% zE!R&X$0jBx9-hC&0)5FA{Pr*8K5_0JH=mtG0c+d_hWzck2d}(prLH&aO4-{eRO)Mv z5Pc}M^bOEVkStbg*DX}+1*hWX)gZc97ggJ{lCq?*SavGh%`+<@sit8IyXyW#ePDdw zrm5(UHqNEzD1fg#GuC20TULbXvlo?Z5Cms!^(vO+x=J*EOWVxZl{I_AE!d4lW#ipv z?TYI>Fditgw^TX6ASRqf#V$Ix)Ign>4A}ZDMI1)wKRg34^Z2zNeCdaJs%r@-NtOQVs|tdZ zu;&u`eu=%HUe~XL#4J}fuL(j8STSG=2N&EtUMag?5qtZ@F(8RI0c^hQj>QCRR`yY8 zuT?nGhtVt_5#x9TQ4lRiEKqBWAYuz5K5etyK=u7pt(L1nG?WM-Cu%ik?clIL2c1u% ziA!6huq=B3|JT{+5OX%q#3b-QYiM1ws}A_VEEKAB-h{#`1j#~y1XWlg9s;U(f&g*- z9zb*`=!=7RTv}~IHHKx$*?mMrRZ%>Wh-kk&WtsVsXnfcmH?RqL6{8Ab<(d>V5P<6Z z*&tpcAy_GII(c0jLc2h(I(9A4T#t)W$R8Zc8I-*pk4x;PsDkoEB8BdcMnWVlM?!P* zAP=|L8ReNts2dP45wEX>0G3(N+ws8*3S8ZIJr*^<}7OuxZ?n zhm49IVnfPy+&9{+WqQiCDQm#or~%8iwN724R~NsdE{Sg^@Fe|+Z?<7DAQkl!om9+E zw$()q{iiUxh)*zuwKmC^kjghPyYn;rgygz&n7I@LJ5XT?v}}ZX8x3dYWt8vyi~wyt z_Cw>Ymrskw2>%@bL9AITlY$7e;_6foaT`#?FxXbT(`2&+sneG(T$(GKIyX1<E-~4h>ox^7FDxs zh#h8!)%%!OMEU$aCLjjrvEw!}*gRrKRb)U4YE1{!+A0`=2@7dGB&-`M$#0O6){*Oy zS-8Fk_Gp7i4wfEQY7^)=dS}fW;mdKr-V?uxug2s!j5cY2$ZQG2q~D3WxiG*`tV^!;7ADOGoB3H=qi zoxc|tZW2IIN2G?HR3QlJ$QPv)qZ<53&G-2KUeG?NT`W0!4%`T}Cbz4z1yH=93f^WO zxSh6bS-Yl(Jl`_>m>(HYe8Z29pj*xkDa7&HdK>+D5kC&I9kOFf_6*7Ra?A8}-^5q+ zR-%<`rCMn}?I+Pg%1?|aP}Y5|jHi0BZAcMFp1u*Kt#0AVof6H@geAc2Z)N?g-;XB) ziQ4b?4J#!b{SLHpt-;n1h%o7=A%PQqs$)bbe$F34$$&pdCC3#nzHCCmG2aB~k(n_7 zj6r{>6oG>vosbs0zC_wZ8V}J~F^8oKr3abeA!Wc`x7W*VJ^?OWvKx-DP!(upn0OCT zDrSq43;PUmz22;G7-d9MBuKjkhJw<3xT#lV10^G0P4b$r7o-iuL%4i;n;)&us%hpDf=KH#mO=+7eTkz z(5pKFpn&zqVCLl@2r@7P?uQt-huy1+Uq#(5b>|mBQ{qFE)^-G5;=^6AGmca-2GITW zBSGNRut$n`LYy<#AGVz{*5=Tj~L z>pFfT)`A#-O2&v5p+q4m%?`O^w>mU0Ej@^KDv;m;Qc#d7(7GcU^@)Ev;JXR{us^yySs5{978q7v1W?{}c#3e!U{OtV9T;am` z*_o4bLAqFPY+STgoKtnd9TD`Mx-@qN)u%7bUYS2RJD=|lm1Z2qZ9(#45RqXNaA?!3 zTX7dwt8=zosRS{ZY&L=@LM&x;MgtTJG$@!~aGxyMNwe0ld7yIe5?=~5uZ{)_bxmEP&P5>xs8jh5q9fe>vHG|#9D0NsmS{XL38q830;IgCm zk1_GbXd!#uq0otVfdDNzJZZTf1d7B_-5=SsJ)!<{FixdVXGHZJkgGWeXwV&rdZICd zV5iiC52kV*;yux=PoNH5m*A1(y`FkyQHaiN3o^;TUkUJLTLo>#P`AIt*ng5 z$tp-wj%%bs2qNC1w)(uta@0@rxZg*Ho1X$#sLzqG?tT$8yKTsRqUeWs6Z`=qGI5A( zhYU{w_kYJo?dh#wjv?*q7y}@U!8c*5_tVT@rw(629aaD;CzAmm;Ys32WE3c*aIG2f zult9cHSXrJ-5IQC+h$t_SSnT zpW9Z$;0>TR%x6m@g?n7Nsb0V5CaXO~BcV}~Q_vX+1)4C9lEQN@K{t#l+qn*gioNQ7 z@%2ZkiOGW)|Nl|S?+k(_N4kAr3`5}x;+HfiTPul_vrR;CokWN z;Ir)<$G)}Iaag zn5Y!#V|Ynx32BK)0;`*rl^>-7gkaznRp1vD5hI64k4U*JE{E7X)zj@!7D&_UhQz=b z1L+CGGNneiO4?>$@P_POa;sf>h5!({NmCqiTWKqu*d@I0Ta01`nqAuvqd+1#d~@$3oQ>Dp+S@ujNS|O)yY$ zyt#UKK5@~8GF+cJgW!nmk-kHOL~WGCpAurb-P!u>+&WU2ZMkZ$skN7pJ(yI2l_<2gDF&TuTYP`%* z>*f!Isy*hxnsz)lFnWg}G47OvAcV$utTPB$*&AV0iESL9ogjNB zwR{*IxE}yel0$3=5!M{b0cKeiFsW2!iah#Zs47TNlwvOGRbo= zDNbo4S{4{4dkUC2_K1dvoi+)K5mkI0-zX9xc7J3;kVPR4@oQiw4=MvH0ZJLFXigfn zb1(vWaUBYe-9=It*(A2P!%l*@(MA?L6}hm>0}P)CQg#APiCy21OL! zRcK^O^IGN+xP{Se^2n&NsJD_{9KKpt;NVCp6jm_nQ&9i06~v0#ig*emNl0G};avn> zlP|Fn1QH@ga2I@aJ1Og7u6KkU9D}eQe5qRoVJIscTYP=NGq!EsIyAQL>w9zk) zj!ZAL+~1DibmJ+cvJig;PatrSMvDO$D6|^zUb(TNxBAGK#=I53FBFsrHPJnn?d!OOkqOR!dNqmjK8i92Jj{wMk6HM`dEeEU;@|6a*hHqTA zrHS9Hc#@G0@{CD_exZR_ne(scNFG2bFp-n^XW))z;fRhQeV9GY?tx*Qgz=nI#rJ`` zFFZvfqsdEZe^OFA$$ej4A@l1D(KV>08~4DW0#q;@IWj)*W@IvRiUmYvDuxtXNR-8q zf{_S=I^st_`Tx?2BH)CGDI8MG2lf_D#^Ba=%k*BE;pvl&8s{+oIr`=_e88fGbC|4nrVDhN8IY zM|t0hF0OjUGDT%ga`j3&awZH;@S2B6NskLP-rwWGG`KF!vnvXj2iLFq43s_4aSo*} zChb(hgWumxcM^XwEmd77_o-&X;?eOb&r{U4c0FGqCf1+wZ;1vR-$U^3moGO_V*C{Z-#Y)`s`z){>{A+~&`F6%Hadj_VyfXG0g<^5 zl7kvR6hS2~ir8XXqgXHe4qzh6i-;}+E_rA%rkE8pg-8a&-#PZF$Z;C4KjUQ;$^3>&?d9G;{KeY37F^K*SHJKa$5g z9N_jBf8`s$@#(++o#REKz3wl81m)$VZz6${1ahb0Xh`i9y3vpnosbcofk0+R)43Z| zB1ao8RR_ZWk5hQu0QbS!g?kO24pUefleEwZ@j2iLXd9ng&8Qy(46$`tZqiZd2|6)~ z>Q`Ab_b2BIieo7H?me46V148V%6Es;@Z;W&cc;$;*Dn$vItbq%q_B|K}0 zFD~K1A>w{Q5`PKg{O(#(q~b7>r90oDDzy||S$KI`o(Y!1IM6qz_r)oQ)O!X<1-*G# zRu~}hn`|>HZ-U@>hz_4BYE^6Dnh+vhChmbC97_jExD^66LrD}=1+`Ccu@Xc4%@Ehf z>-Xa?;(}0k3*-8?a9)7L6ss^VOb9&AJPUxBvA_(DWb$H{%H{k;(+wA?=WF0q6ZnBNZ-rbYOP%?m$Mch6~%iAZN zS^Mb&w@=a^Ov~FRc=v9f^v?{;4+LU=gRFAXkza^ws349D- zbArNW`))G1<8Uy>i%lWmw^cUAYjtnjZ8qp0g2P3cS`tO#8wTjGKu#J6v)3)sGlX-N zKn~yzH9#^)I&DXh_`ffoh(YRUn7|u3aF;=Bq2#|`-jFFTZWQ+4?bxFVy0=JRZ;|8^ zcLP{M2hbZEUR@NI1f3tsdm(}b8}!e~v-mj~!sMq#y&>;xhlN zYZEQ^Q3&$8=Lz>CLxL5L+NKm7mNw;}ndCd-!DVTyC1@nv(fByf;g{27=W&RBAKFCEqF#Unv^m{jn+^5l3_a;Ff z(qOUo(D!>Fk4p;Be$P|k4S@Nx^Wr0TW1zd;{|VK;&;v3n4JjE*VX`+2iaJHifb^UA zQ4jk+sgwyi*v{V*#0mvoFBS^ovxGqUHuMN0@Y)+2QiE7SlDbzBw6Ej%71ou!73Buz zZd$IQb@%HdPIPZO+LgfEO$*YfK}ufWnjk(CIKWnE1R4R!Zp8gESYUPVstzI(MOT_m zB1PK+;TK*RkykV1bq9GsEzdjU8C4iW|2lQ`eF9|HgsPu3nItMfo3UqwqSmfNrzuVNj>ebS*ueiKJs@#!MLf=25W8-v5uy511q7A;T~Y71ycu J{eNuU_dhr`MX~?@ literal 0 HcmV?d00001 diff --git a/src/目标识别代码/utils/__pycache__/autoanchor.cpython-36.pyc b/src/目标识别代码/utils/__pycache__/autoanchor.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..27ecdf948770437157c79640e54d9fa450de2c4a GIT binary patch literal 6527 zcmb7ITZ|k@R*i_vdiJaO{jl3k&)73AZ@cYr&kjScpEG`FVC0$Y^{%~5k7}|jvZ|}P zDzhA!)$XpzLWr_)SfT3Trrp_R}G0p~_m zb-T^Z2icubk&zKMZbaOB&$+oYG?ab*qyJj}kE4wJD|_KeqJ9;(zkx!sRVKLgA zR|n;|%*j#IhNSow%TKh%ve%k{cg-zsc*<%9m71U3g9hw&o=0YIU)J*?O1}TWty?!% zqIA)#dCCt|Yw%#uvQ^|X0GHXj-^H)rLJ_hmt3tO9uL{zrYRF`Vw|U5AYL=lCvuuX7 z1^SsXjTZWi+l)}Ctx!}G9Tpl?ZW?MUNLEup`YDsfGtuEZmYqsGB;Ed>Fg1a$Kf8wF#`4-1 zfBl1h`_W%K|I0u6{EvTg?+<_Y`P1*OJ%9Q)&pV%f@q^!Z-u=_H9c=a1*U!GScwtSS z%9@R1igj9S>_lV54X3zSh#LxeHDeI$529dVhUfSM{&HMR;rVlFWGr`e@7DxlMLH(B za=2*_t-$|JY*au;!aG7ug}h2fHb2YUH0nYgn`Lb?G_cLa1Z}%P2PF+@?usL2Ree37OtC+Q}eO&Eoxl%*gCR77T`oFsWq>b;Nbn;q+G>?yx8PF?-B0 zYCsM?Ot({^Sskh7P>xo|&|_R4$Bd$5w9~q60)~;EVC_tp3NzKo4nq%1LS~1riFUR< zAcupgrz{+hBTr443$t<*zhlpm?LpMXH_gwuGK1;rOgIRGnura`{po(oWY5HC{@D(b zLvyS>v@;XTg)AHjjV_Z@a$3&pCfk|2%zfKr!I8}@WWwUw!<@C|ND^rvXT#yA?3qCZ zdzXDzn`wLaSBLL0SKMQyeMh@Pdn6nQvs$lzracQY@!I9wuBaZRI$S4%wPNMf`S_X2 zzS#%ILO2?n!ZcZRG)&Jk!3v{VgCoyGd(h45`cbOKtEArASx(%Jl66luYmQ3581BIL z)0>Xdkd?Y${)fM(>&I8h-}^nf{{4#fDGCpMrRD59KUopE(znQ(PNOKpPCl}5790*P zVl63nJ%VMB^G~qf5%*2Fs&RXd2Jg|-+DGz6Bo@y{+>dxoH^jvx-hY|BSgZI!(W^Hu zpTM&ZgT}P26EFIMzj(3c744dT8I^wTeML!0a)EIgzJ;R8d&)7b7`9}V!KitF*hLNz z@*}Jx5XWFz(tsmiIdBP>t$sqJlDvx%n`YI((I7&@ToJLzVGg~awgk-IEK%$=Z|lw8 zqZPzknv4>Gr-~bqQLnft?CmH~s(E%08Gf^_GU%93Mtn2kD&pIbx$gLQWz+lhhC-k= zf(;dk2O9_bg>!Wft%|SGVs&ufGklH@LUH4pv)Ui%`knXv5ZP{uzpr>QOWt^IIW^u=^*J#qF?^wZx zgR%!6R>h?TtJDmd%E>qH?aR|jL!_#dD-gLqVBNto6b9L@ zHmsLN=3jy^{g^)VR$S5|N zYM5Ga9!w4CS8lWJ)S{tHLf0Oc`BCP^cG1y$9i@VuhEuS8oDz^16^&>4T$Jh=ih7%b z>C;T$aQ&7dR8zmGrTef%N1dkzQZ81E>`*OGaU2C!Z%5o&O<}VOaH2T=2rW^aqh1ts zqM<{M0`A8AH&HNQ!I6&gGh#SRPDQSj&Ie~k~KWe$I7v<-?ZS07;9m*b1) zXa(nrUeSBw!rR(}_Q=8a$XxFfDM-;ZgPUjr?V10N2o;+Cu{v*q_64G9v{TaP8b{a@ zV6jX*BoyV3jmKsQR7<9SqiMN7xcXS2HBm}v6wb6Wff=$O0j&SsAQ>{k++Fc$uAL21 z)pVEz(hS@M=6#c`uzNJ;a4l2Mwg-VOgM_HS_cZ=;z<1UqJ}{sWeV{rBPLP(PniUK| zzadc1aiIP%Fq2hB`iTD|(M`ySO;*iG;3AL>m>CWOJy|eT9SyrfLG+Y7!~F^d{balNTY!BH-Ymr8t4CxGy22avs&L{F<-!% z=fYI=2;?1~(Cnsqlzds}Npqkz@x*+R0OIH5EArJ{(awcw znmf!@=jBOxs${O2;54J{vGz!CEI3X`FJF_dqv!bcpNHeu*++lYp1|BzNGxG;`+7Jj zjZINK(P5tnRfR?dy$Q!QSzr)M;k;7SSHjWXrW)2OjcHbWRi2g;9nqfrOx!2O+e6`^KOo1%V#LBpKhi!)$YUwdjkGs5x0sW1TzW@v38 z?2SX8`!kS@=MD!9p0(eBSzGTa2VupxbSUu8I$K_C%aQ%~XO)y!w*qBXT#A4_=g(%X z-gQlt5lP&@U*2z`H@$w#R>MZf?7@rnwFLxF%eDZn60`K>^~KSzyKEskts@wFrM?=6 zg!i#z8DL|V9gA>VJRE1f{3pif5WRuYXc!qHEd-A zg)uuGf93Ta&ktVz=>N{JU`eNBH*3MAHy6(znhx-`Y@u-U6s}J@5N~XML6AoKNqvG! z(Xwl0;7YJjKP09Hkl004A@ByeLsv}b9~{7n1B7!A@BZd;?=*Xp?w{ttuy6a=xx=IM zN$Zqn@gU_PJoi*!o%x1Uur9@7`*?9WueI&7(qwEwnp+iNS^DZ{rkb2yM-zA*c-|w> z(nFk2XwC;d7b2}W#Q6*)KgI#;dy&Cuo)k}SXv9<}F@pxzys3x^$4HW=+8HV+q91TH zjZmv-@^#cF6cMfYLzs9#Xk%)&Pi6@hh&O98OEB0)76;l&C6=)(x}qcc96S_V9+Omf zwArCFSx}yzkOZ^&M6CQ}NQqLKcP?Fsp!;HuyyJy7It{do&BRDHbGML3V(`OIx(ho4QfliLd+LtH+IbRj86fU-j7W`PL02CX~Db*I=buy*qP_Yn`d z`mx5|Bn)%x5i%s5%h4H)$B9DAM2xr;lX0<3?EB49YuchDz-si_x6xy`Sf{%A=4QK5&PK4@{`;IDQlhl3L88m zAWGu%jtAO~e85o(AHK*}#@`2Iyhj1wzvjyKHUCB0M#zK&EM+&#ghP>ug9!3m&0+pS z&Ppz>qtO2|MOH*Q)jSpCXmGz{R3{FrEGk}#DU=-V;NDM=zhcP;333^|aiy=ePT#$@ za{Im8x0bCN+ktXG(RVDrPI2E`uVP(35Dte3OE3u>TgR+8571fKMQh1wRJM^8Q1(Nh z*b~gIw#HYQt_E|lz7CpfS9}M=?mof`1xOs? z9dx^m9e)+Sio&D%Hj2E6kKpG-^e;0P2~xKk>hf=)r#=IdwGQLfK}_H8d=rzABfR|0 z<%d^V)+e>Xx~b65M&9Zn|Fu5yFH?R^Q5!I;Lrk9E$-lOm^NW?n&SJ6F^l=!ATlWT z_YHSPy-tsWbvkd1BZ*Oi{NZc9?W7uH4T1mQcZ_1 z+LFj{kwxC6wvXtMm!b?Uwje9T;9#i{uf)DfVIUIzMkMN(tbr3$-_hf?BC%PHM5!G6 zd-CT+z!-SfXtGHvZlbu3+b4@7)TcCM=K`6W3V{*(H1+m>vNmZD_I-Z)(SSh6L>yN2x0+E~k_Gj2}x42PWQ z9#!{H(zquGmQplA!q~|zs|Xk#0<5r$>@CS+ksN{?a!5|O^(jaq*aQ~X1i=D<5hVFu z^$e-;t`BLlYpScOUcFcK>V4nqZ;y;*8Lp>4{qNQPJjB?)(!;Y?5)W_S_SaBIw#+1# zqQ;lG{uRrH{xz3P{hL@$;Mb@n>&fLLr@m$_RZlOc>zU;YXHG^Yw)k>ZCS~d#TON|* zGAoDf@#SGTA@gz!wGk=aXNAeuc=l2=@GiNfHBVX1pjz{@yU>7LFYw3=ZpnH{)&Qj{)vHBb40Y7OrXTC_@>2J|iyzjVKe+i#%=**fdOiXGk+x`ZV=yv;+j%`uc> zj?J>RKtI@q7W$3bj8Le}P;?VJEHtRxFw|y{?52YBBPNZsd&bt{$~Tk9$5s6sYM#m|0ZHk2nLv+me#WNucS+ffq6R;3e} zezoE%19Bp>>XyAI`QFl%t8d?o%yO;TD3}qiN8FBtq}zYO)C9Wz;u4B0iz}c1^^gDU zr+@MEFaPYbAN>CA&QCsj^ye#2AN|eKtxrDx@$WtD{BY$iR(12WlV4vrv!XX(#m0`r zIxRHrM&qS5r?gRw8;V-#aR~N@Q7|#f^L!G2d9J4L{24VemVaaScLif5+KK*^M}>Tl ztnmK}YZB1v^DUvKL%z<}1+BW9MqSt1W@unVjY(Qz1AAgfQzo{>LDo(LiS^`qio7dK ztfysq4!cnnGP7;8lR>7N4HDfU?Qtv^4ijNg%NW@bZ?P@xJv;cE9VL5Uy z-A;w(`e-+ga%_DZJtm~UjK+@9PE%WV5D)XGZp)e`|o=#(>-sZx#o75%pG9u zk=AT*AY|c4XmpsIk+X7cJK4^Bz}(kO798BjLMEK6o#PBWA)7Ri2g2MV_Shib{D8d% zPi*IYCwG&5hq>Y=Bki|1typ_B91XKtuYV3Q>)9|9+b$1oi|!$+pdc$!VvRd~)`^SHv>t`1$-~SW3{^O!PLlhGHYRefo@Z?Suyxzy{A4gGv!+dCA zQ#cN_h}ET_bzhX>U?HDme0Z5gEanip1@;XI3p)cM!lzFJf#QFZe7!z;n1KdF$|3tXw=sy^KK-&wgPn zoGG`)dO_1a71*lc1oIZ!&z4(T1ogmK>~p%{qShy`{o}RayDuZ+~%o z<>wchCn4<3rc#a@^d(ugaxB}bc%HP%)xZioOWJ|$JAw5te|o=p86z&~K<)Q>HatMo zmwSG{`}4p0WZtraz^OL^itdn8ts|0eItvR6yPw0wt`-szccXN*UMZ6Qs#nQ0qvE(x z3Yf6!`A#c!WohZ%r9}%qo|MjJwd6FvgRn%!`Gb%NT2+lZ3NcBNO3Qcbs7 z_EbGe=pKqrZj=lh*Y{L3RPx*sY;m`RZ%vrU*qM0cWMdq%KmCp{7Ks zhUJQkQlqIxsFjYlBAkj6ZnN&xqLB>(*B+MnQRd2>lB3r(N(FZtPSN(UBLH4hG@KRk zQL5(@iqJIPxdb-SZ>dpgAUA;x@H$6*of=5FSg|uydxZ+}e^_rv+*wXxVT(90@hLe= zuXTVK88{7~Xv`-!VZy>`ImAzhT$)Y}ou4cp7kShLp2qo0d>AbU@Rvs0u*h;nJN?DP z!YNYyRLLuOubg>Z`^he?lUhV^qeP3>zi7+o6)l+m_lOCa<*_<%gWUyU-Dszz(J>CP zhZ=?N5qk0m#sjl#XpWa|r{&iOPag=hCdvt!k=c8hb|x@G79@c2e;g!3CWqwkw)iC9 z&IYM&I?MuLh6tm6lP$5EH0MYyQ_r@Cfhxm5(j0Id=q^Wr>#RdOU`QkR(E2d2IW6;= z4U7b$J0i#A_&pZnfR${0w2$-;6P<*d*kIi}keqkN5CL;x4yeh3@$N)03ACIfw3Ji9 zR0oU|wdu!6!l@38oY_dkBj?ydvsK$0Gusj9IfpSoK-3QO#)!COgMZ5XtHJd5c*pF{ zXtp^TCOc+0dXRO5JSY!sBQgVuM!_fY9gGU|TYN)cR7j(M?^l8Evl`$3jx+kh+;duA z;3??`n-7Gk?m@^qGRZWjIYjjx(P1PtMtk|3wC=Hv*-4Z&_gN7S&4&pfeO^81Xk$=&IKIIGgRlsDm0Eux9Q#A6!B%O^uck`Su@O!bkA(@$KTTGJ z@H7r>Zt15j4h}wVUxqtdZz>0|#JBYM_fI;TUTxEn{Qzf`l~=a{WmjDaalI3t&04+d zlBytlxPia8*FWUjFcxXPCF7Q?{G6;QXr#rw673el1!k96g2W(+b23+cyZ(Xg#SM}z&CWog#P{pEIEKMcX0JL7JIwdn{;nC_lJGk$I1%Q58_vKw2;r@fr0R6$@0HqJk*pKKIgS zw2UTS2YLby(KT@r*Yi}c zFxT<@kI+RkaR&5j9s3I72FKhO*tHZSP~J*Y_&-BI_JMe_cpr8UrU5;8MyMxiMH_q6 zF=AWEQds`XQ%6ivm@+9v1u)>9G>`_GAyQZTCc}h&PU*L44I`NjL(eQoJ*3GY^csd2 z^y?!b=swLbr`J`#&pn=O156Q)k0KdJBJoTj3CPbOxz#o3yoJPen*ADUCvW{Ip(RiE$v`mDD^Xhe&-x7<&tY0m+W-KE3R-@0Xm1frSR%?9kbT@2t zz|d?5Tl%!^{trH$6f){f48}(iRRJ{&6z$l)?`WPEC6w26<09ks+UfmM6piSpRP040i5@sQKdRtS6|6I&3d1DO1m z4k{gS9eDmf39veXQL(9%(tbI997Ee(+|Q)CUt!^Xs#`&STreBZVFIt6!cbscv2fHCYD_S_@{Y#p&;hOcw2bJkg_QN07gPdN-BUr#W5qcySA zbTvwAWk`&&TElxj>A6-b3$F!~_>_Nc$brU& zo>*X6oT)&AYM_(RfmNr_PvOz0nY8wlc#k(RUW_D0HtFM2$2x){pt+Q8%HqoN>GfbTf zUhBd8r9QkbQkG2d8Ca@pj4bdQg_oA|eyQ5HyHKh%eQd_U<}LMI(vgn0BEXEYRnP@L zfN&&n*q6xk8px4N%5bBMZ`=)(7P}ayG6`MYw=c_^j3chAe7gMv7Y~#U`Nm&r{L*x@mI+UB+jSR UFiqGF?bFDs^CE4ev)SYS2N!LUdH?_b literal 0 HcmV?d00001 diff --git a/src/目标识别代码/utils/__pycache__/autoanchor.cpython-39.pyc b/src/目标识别代码/utils/__pycache__/autoanchor.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c4424ee78aae185c511a9277ef57f13a4d5ecd30 GIT binary patch literal 6492 zcmb7IU2q)7ah{po`^DekhakW|Eh$mL(I5ztvh2Y86-hC&RkQ*zkp)9_Ufk>*4!GMr z&hCN4Y0s4k$naPRBIYq>SaotC^C=wEZm)W3L`x|UhWaOPxWVv8?jWm2Z@v!ww!CbM$j zK3^J?<1!~l(HfHCJ(iznj%BYl0`IC@T=kUI2r5-Sy9)`}tvrv+;Et^2MU;H!n>TOX zSd7v|uj(m3P|d;pNefny(}eD2;+O7gxcw$b$ktc~TI}$S&=i{N@D>lzH_brBG@D{A zfpO3cJ@gy*8KF=cq39%bSZEMkH`GRu?4*M96DEzPVu$mXe@3EBD@pu45Enq9oq22b zx40C^*PL8*HXP+w+_GiI+ImVlDxZx~@v|S9b>+#(tU0zDnHv@7Zj^+w73oB#Un#rF z0H4ULxFs)2zPotit+&4wnWbu_o;M?2i?|&LNw@!usc{Vb^;M7?3(KGW^hf{olYe~v zPk;Bx_y1yd=f|Hs`G@7_Pk#1%>*G&<^ykmpKUm(xu5Nt!^c!>Mmh}NF+c=R}rn&lN zG*(=7itB~Aqo9Qz17m*>go!Dh;}iJHadiOCpHL@bxhuPWDi|x$N%XE98f1f{h5s4$ zB%s~rTS85Se2uRO+I2ULwr;h|(7=uw6STty&cu+WOl*t8td$57Yss|~Syz}?OUv{$ zPNO7bX4_~bgG?tIBsv4y;#e>kCc>oVF|;MFvn`xGEMa(uJ>n19LylR4a_D}#l?u(Z zkxmYDbZrbH#-+fD#*WcSQ(tESHjtiRtxT8-Go1rFOph4e5nFs&w6d)MITB1hVc~$x zJu&5In3ZGr9e;1f!KE3|Jm!A>{=78HNC^+@FCV3YEA`* zLKY5%Mw`ila!O8bCtI2KnY&=J;P7L<8F1Db=B#NEA6j1?3WuMtrv_Q(`|JbQVQcui z!)`YIooDlmtSpJbpdbnhIEpNaNtJUAAb zTjT+z$<{{0^id{QVN^?T?;mO6^!rl1h8Vv0G8H=E+9YQ3;sCE_ul9XFN); zJ5F6zYW^qx_OEpP;&SD9&UdIqzse!(895BxV4DI zrLgrtlwe{ZpJ0D2?wIefXU45vn!HOgXqS0rL}G3};(o-dx+5kNcpqgiR4aZ^^lJ4> z$MO6t+^l*0Md$G6E>yjuUG*=a(Hq+*Dv3#pk$d_ZAZ^|ik7?1cJJSpn&I80O@{W)n zW;+7$3-Lu7I5Q{|$1(kc2qk$NGuF+Hfm1>RgY6~fnxoy2>7NN~ zRd#||3;pLy&7;G?0(gndGp=H{G3%@{`g;hJ+u7F%Z<}ucCDe5 z;|4uWmaQDiw#uF-tx_eh0?(3mVEayB{rqnqG~UFFt2#{k-H~+<0QLEi-|PJHpFf_p z>>zMz^?+hK_*80$;~UQ0+}!S`Ft4}riHN&Vx>73_$adA2Nj0PFxKRp-u;TenGj(He z@tcba7VJAIosCM-X?z2tu6wRS;#e~txO=W$U9cK;$mv+{9;a%Zn%lH%Ra&&+J65pj zfOgTus<_l)l^Q`qIr+x7_V{U`!BSPq6|mf!u;M@sg-Nz+xz62Q|EvXdijE^my}OlQ z)%yI*eEy{K-yoCshS4Gm`bT$Jh>g(8E9PcDIj z^qXpgI<9H%KCI4BU!e{XE*9)OwayWF83fwf5qFkS*w_LbCiaro>9r0nLw(+W1daLh zk_ijma)h4}!)fvy@}Ddp6FIa6o^t#YA4Ja~{H4)1D6(9AfO(&d&z&LB&lJ6)clO*@ zwVmwJKB=#P+%D4Q^)5OxdPN)NM~DeJ$740#LT(p`HKUc1M%y^d9%&4|PuR&H8V}8q zp)Vn_od6oMP7c`4JEMq-!{IQnc_J9=j0Y2h&4i$G zGC0si4vW^or%A%CHqD$`PsB5)*(0-A-J3Jr7KFW+14KmYaCeS~d)E15?k|0v{TXkY zok^X4j)cj!8IBxgZ6S}yquYqqfTj^-7`ZlPg}E)hE->pc%>v?I2jU+D;{QjyJ6L&2 zOM5KN8=&!YnCct?x0fcE&T$S?dq=bx@%(_fVxCD$9>34pW;;>RxzMtBWIjp&_jB@u ze0f{6a$%ZQ4s&ZqH81Ln)@->Vc567>w55M1z{lJNjvhBPTy?zX=1kt|>lvlF?WmjAZdfgb1 z&05{-swyL(xPia0*F|r-;})#CjVReg6zzGlh?$mc0Z%1X>GA8aqhEKyLXcWP{Pjvb zG5VPI;9~*!VwW9@poF46;0wHavi3~@A}@4#oz@Z5y1QyA8%Tm6uN;5Xt4eDXkx$ia zWdnGzIv&6MS~u4ZUi66xF&)k05LlEw%z&iCct6*J>+4f*#Ca4-xm%*Ek95OI-;Z_EJ|`u8jh24Mi^~q~8{8u_byrcPQF? zU8kfINJ@987#6pX-zOviZ9Wkz@Q2_MrF52g{#*?1I?pt;IuS3Zvlsz&HMCON6sxEe z)37<*wN&I~uH*aP!4O~9oWR58$uCf@f6nc`Q%ivZ)v>gM{|gY(52-kd&tV5a8o+~R z1bWg|v~WgkBi5BPh2}3jwZ#O5DU(W8zyjV$17)EZqLjsNGEC^_lzyAmAd+b_jLag< z2RAu@QG>98UVA7+0YEEE>wVSlho4Tg0Ivwgub@0gq7Y4@EXYlx*wrn_yoEw{l6{4> zl6QU>a`mYO;3VvE^Z_cO_%jC1Xe=%ZnkK@-MfC?zzbO_d*M7a!JZMqMZ`FIbw$i!w ztkoRb^X|Hh0hpR?3io1jU>0r^r@R-NL$g+FQw8|*MYRCdpYb!z1E(n?J42k2W@37Q zR&J(r_kz_-ovh5_-_qW;`x|^r$!8Qf6F!=#GLpqW(TVNbj?Vm|gz_4$+)n`Wsn|g- z(^%~rWLWAvk-ZcRR(X*+w6R^FR-CBcU7g?eqG(7*r9w9%NeuJJ4;f0DVg4#V#>H>V zy*rIIZjt*<+DP*g+~i}t`N|iVWgp`xi4Q&=@^%yij#BvQMIXe*-vNNUP2u0a>dJR? zQjEHda0;KvQg)+Es1%tvn?N0cRKMfFKJCkdq9=%=L5@0^leiAt02we zU!dN8MO?-(TMiJjzS%VHfunh+ZLof*zo5!rU z5YQ#r1?#+3uiQgjK&1}BVV5zx(i~rGxEi{(FvLb#&A~lG^<1l&g((AHd@9pOKWwjA z0RlF^x+j#6YNxnLh=tE^NEgZ#WDFWGx@-Y(aX|wKDuJ$9`*xi0fMTXkH!Xti5|dQi zBW@A}7Ho?@L85yH5tL%2`VfO|^#VM_M^ym9ixbrTJz+h{Tp&2TS67$*l7{Fr=tr#m zu)a6+tvpxS8K~0eeq#CY<)-yfwXkBV)<+-(XXjU1*rY7p6{vQzgYtQO z{rhEbBPVE{zd?BvZCI}e)%D`dF5X}5;r#*?(iE@(u4)A{^Za)H|6P0!QCdE#ehjY{u{@{MAU*uvi@@roN-+ZA4r7J3Dk_$1h?gCLNL5*79BUPt^&7q0o?C{-v( zuUII=HbJ$0eKR(aJWZg_YVj!y#bywvq}BLnO?s<84-m}jT3N6ELQo^@kE`@o ZB|21}G7i&(?vOr>QadLQ)w9`|{{w$Wl}7*o literal 0 HcmV?d00001 diff --git a/src/目标识别代码/utils/__pycache__/autobatch.cpython-36.pyc b/src/目标识别代码/utils/__pycache__/autobatch.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f88985e8ab128a11993ee76bc0027c67834b87b8 GIT binary patch literal 2199 zcmY*aTZm_!2-qR(p{d=M1DyeSeCe}MjhdQ_KWz*k>_51y*oOYEY~sav1(om6ajnJRynNNrDIq2~1iJI@c!n zehnjLFq5MR?xiM(H z@zfDC&ZyPATvc;h%?tbUEYEP8XXjBJjdOdNcp8wUK}Hd{#WN(cZ4jH=if4IN@f^=7 zo_~;+=!pp#S0p7;?wI5J=#@`V8wQBnsC73mpX0$U+@FuH->gES$q|552C3N=WYLm(INqLN!?)n1L~31V}+ zJi&Lg1;opMX8xW&uCPNq$F&;b6=E)cjlk;*5E8&sB_LrV1-gIb#oj!TYc*cwd0tyU z8>U*%vpTO4vyMo80&>Sy(6zuTygJc%2E427d5kRZD){QK^4t;dDd<$*IlAk;V_YN8 z!P@iSN#{XM+QqAo`Y&Eiva%ZAlErw&Ng69re*l>ncARLyx_virW_2p_&S^)~SzJ`f z?g-r*43m>9*DsuR!iYJ5zL@^|95-C@9-m3OYsL@X*1Qw+H{eGW*XImZ*N z;UxOfsdkcCby&pQ;DnRpRvqfcKHZeUlKiUU1_9KelAWYhoi_FTsjS1P%=4)QgjRug zQndW!UK+YwjE@58AdcRzjj_BHI40;Ww2ASB_JJX*e=5tPp;7sZFcY6fG2A}d(n*2W z8<-YgS=BU^we{3q4!SY(z)fdQ!914AQYblgP`JwyZ4jSbIygxW!zmDoLy;o>X4muo z2Ugl~RSK&EQD94B07?xBb^+}HrSq_@pa5EFm%v*yWb#H(@Tm0c8_;)g!Ih~KzeA;K zlJa#G85Pi3G!j$A^eJ`{ZS~beUv-X8i9D(=ooo+NFnYP+{PE?TE{x7K4pRw~N_PpP z8@3~1g#H#2_Q0iXAL37A>ZnaN8u&toD_fYu!N8}=w^T+-%yoR$n)#pwmpN>*t{;n> zihir__aizI#Tjqy<$);eF6^fQs0NkPlp-@1w>%%RH3~)gLzjk-enJZe@K{8_sNH2k z5ByN*-H^de9TI;_=zVueL9=lQS`IJk1+3u;>{ReeFg^;+hE~88codegk_2fYV|cBhD^4pgQKizz zi*&0+BCpkg7Z2Xs-FGb<_F*jZdR7g2^(ZZM{LrUvpdznwK$K@2>dkDau?mHHjFjqU g@^U{pg|!UtC;zU_!RQY;d9mp*xd2~u0|-G4L@?0;+|?QymSa888;0U$!vt;wiEgry>{<;= z$;}|uO*hgo&Jl~G?%+m-)(N8Y1gxPN78ddS zz8ChluGTb>xbk+re)*cPyf6r9#3*b?_G#Ga1pee>4WS#JguB<|SRQT(zUU1g46Wfc zZKQK;j2}1{F@u>LZR1g5j5yxbZXykxLGPk#=qq$x@mp|YfQYZwlEP#m^==Ep8xU9M zZnrNi7hrpC#6(Jt6n+XY5bULHr&rU3*$s&w2(!~|M=?!->=B}QV1Hi#a{1iKv!5RS z`Q-cEAHIJ2&1aMGk56~LTiMPK{Vg%; z-S)lprn1&l_M1^B_N9Ty0D`mvrbXaSfBRLozuue_VCE^AltY_=PyQg?6%_zJgSc+v zp~3J-YXuaHe676?l## zQsE|t8v~H?(o6YeGvF8w(+XlVEGAoa&yu{06jiMW+`shxp9*m`_eRgGl~P{FXZin6vcG z@>IpVQqh4_ybx>7iCC*SvA%G+6{qSB3z-|7bmDB?p?>7k4XG!#>y8@)P=-ovoTxi3 z>iZK6DkicPCo_5}hG$U7h%CDjgLUmsOlpc&D+6O%>sPuoG+b*JHizoR|=~RbM#O z8b~#-)|}tJyjwd=B~B`RCX7z66$+#0Z!(eYyVUJMBG8CBO31eQzR=-57v`Yf_o;F) zm4Oj6ZJ#x#ZfL?y4?@=QBav0P(Cqr%kZy_mlsEUXLKL1a>?a1829wm3DpQv?Js;9G z?1|)uF6}`c3a!_Nmn00fS{)|z!0!pY(_?UL_lUnK^sc)(6>qpfZHC&QMnzj#E$Vto zRr=ue6^TCeTb<46>9qE$zywZxfF3h}slcwlEs;6+F9M_8wn)M;pe7=LeSx#SKKn>l%S?Fn8fS$#RdJb#21d59`5OjO0xY>{j>N$543@c6+~{QQaQ4Z1LvsXZ+STfI*UZNKMJ zH&Ag`86b*NfqFihYODgG-X&%Fg{0iiOkpj;(>VzI^Rh@ISzCsY)huiQ9 literal 0 HcmV?d00001 diff --git a/src/目标识别代码/utils/__pycache__/callbacks.cpython-36.pyc b/src/目标识别代码/utils/__pycache__/callbacks.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39102f6c622716376e88affc56a16b4006f52d52 GIT binary patch literal 2433 zcmbVO-)kI29G}_U`;j#lV^Z6kf(|0YgBtKhrGz3av9(}Hp)H6`g^| zTn?1KJ{F&>c6eN@wrA8J4*EeNHL%G+dmdyv5{@iASW{k4bCJ%K3Ug63I$6t&;Mz3s zZSiny5IJym$Xq%*UJLrXCq0xYRwkO=F;B7E4PtL%qXD~pB_vqkTQW15pS@roaLxP1 z8x91IWxF0ru7%o;uC=Vx^~$WK4HK(D9=0IrwP@-X8Fs%X`gzFV-%%`D z{dDm2w~xR3;?Hk>dA$GQ+QI&B2fJTAdHC7E=Rd6Ns8DTPc>SHFH`g+}u9fVSsd3iJ$gw_q@$BeFp@tA@o?&=K9w zfVDyP$pdGX-lMTKvUjPu2)p*khF#|Y%0)N(Q}ZcbmodNQe*J0YOQq6fih{84+el> zV8qKLbqP7QyoJK)ni!rV4~iF}gsL=1OsUEgj?ZE#!iRU+fu# z(B_Eh83Yl+;Woh$fPf}n=eJ~zf_SK-{HDYt6{_L3%-t}ovs1?DKLFxzHnZqMQv7x4 zZ=PpDcKI-j0oMSwFf23LqFF>+Ru(V)5**-okk0mG>`iEn$;5oy=T@ z3#$$LO_aU{*pjTPRe$_EmZw0G z&Fx(11G%%Uv^c%+DGD&FiB-rw7L|SxNUJizLF%C8w3>U6Rzub6Nqrn0wjM^yn9Qtf zTxQMEq%na5?1KIUU|4O@)m9xZa}1v6!7G9f&@jaDwCCL&@^HLU^E{z^&(r77ua}W9 zBv_t@>}SP{b*%BvLk+rdfz)lssxP<=cPd-Q^WmG3O-_z8sr-o@u2gS2D+3wQRZ AH2?qr literal 0 HcmV?d00001 diff --git a/src/目标识别代码/utils/__pycache__/callbacks.cpython-38.pyc b/src/目标识别代码/utils/__pycache__/callbacks.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d45be5ab45a5e13751b429a75471f21034429bb3 GIT binary patch literal 2434 zcmbVO-)kI29G}_U`;j$w)+Dw$f52G8a76=(l@f}y#%jTmK&uZPkzsFV?y}9^&UI!l zA;mRcgbeT0T7x8qVin zEW)P&f<(Xs)_$$_VMS9>CvDGX++=ybx% zoFH!+<}ezb--P6YRn6IURQF|$vG3t?pP+@?F2EO_^82dPYDTLyiJ+9%+8*R z_dV_P3?KA`7t2;PmYx=BD|)ACB`z;2o77CK`dQk7sN1B8V`SL*mguLUhJRfGa%E}l z(a-mPd+@{lk6%Ch=Cj}L{q%6}`?dYOU-oytcy#}h{ZGGJ+g734JpalYi?6MvxLqsk zFZQ>SD(682!SiXbX3ha2wnb0y`=dHHoGHM%n43w+itnJ6@rUygj1+p;VKSm4a+7S7 z4U4IuBf6Ku>n7PFcby%2o5t42-l66K?Ajw6cAdMhzMX1~XoGxZk3d3{?pXIIC87+V ztPKB5F^}DHecU0;W0Blqo*xG)VzGjCer~o7UE&rRmvz@H6)UuX`%CQob$M8YMP5&$ zsk{qW3?hYxLncr(Gm$E(&Ouph*boY@!BPjBIr8Np=@djZG*apCbVNum4O|#mx(wu1q2RNFj(_Id$*pt3fdrSlX?RPD zV_j3K55R^epu3&|d=h_=V|cw%*rxAS4l{GI_WK{1#>#ux(G2=}(34SC3@v>bY)aPA zsyBWti&LP=##W~Ep4{G2TAW;2Hz2e686*h*6(}+GkA5CVvotAp;-G`1oHb`EmU9_{MS^aj#Hg?YwlFa$MfMMkuHwYPO2BPINn6i JrpR?l{{S&Vpeq0X literal 0 HcmV?d00001 diff --git a/src/目标识别代码/utils/__pycache__/dataloaders.cpython-36.pyc b/src/目标识别代码/utils/__pycache__/dataloaders.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7965a6084e8c0530689be17e76feccb9f43e8e81 GIT binary patch literal 40414 zcmbuo349#aedpO-eWGy^1i_nZQ4~ec6iCsuEYUhh@e*YTqDe@yJF?piR0Bk_(EzI( zB-k$aq8w34XfgIAlf+iyWOFBLdoqs4&i%>c@FekUHj{Od-kHRkamMB(Gn2_AyR(`c z`~Cf2Uw|lCI{`lZ>eZ`PuU@_5fB*mW=KlW7jo+#Hzx|t$$iI(t{q>PQ%CGu4FB0)1 zxriV2y>c`cwWF8w>=?_%>=@6*?U=|V?3m0Y?U>4??3m7_?bws+;TS7t=6iF!QMDN_ z_s#d``sW9719qM$56%zehAf{fubUsv4O>1{UOzvQ8?k)4ykUMcH)?sWyfL@Q+TNVo zY{xC7t)*?b?Uvt>+hNC@xt$z)%Gvola(B${%I%uJGk2%mpDFL2zbkjw{GQyN`MYy> z&+pCcwesHb*!);-%<_HZee-Y1y=i`bZvXr}xqIfvbK~;|atG!Q<_^x^o4a@ZQ0|b` z>o31~{&4Q_{9AHwnZGZ0-~3y1Z=JtCcmMnYxd-Nta+zIl7xs(38a;N-U?zF!<_gL{vFeJ?(NHaig%QrC_Q;Caw78XO}Po36(=@F{JSrDH>1I)&PHxVFMaCSh`-k#dp?qT z%HQX|iQ`#+zkd(MclhJ}0gmVVcl(F^w>%%soj)D%@AKdKe8hii@$S-H*J8OzO78a` zpyYvTUhZkmj`(lm>}{Mq!`XxWL!3Rt*|VHI>_5WUBb>d{Kk6T&?RQc7sDGT&{4y6T+Q6ldY>4JHj+Vcu~?{$d5!qlg@tk>GkGB>7JTKo8b4d8T~IAoN-n(= zloty@wMagdn_E0JS1yjl8tIeEb2F2L*=i$kX1*|6Y^3e*)M9lk-mtzrVSQ<&)Gzwc z$f!fEgN){2duaI0Y#YPwG$>wLnhT2Rs-A3Bt`_FYjh^Xpu~5nLu8r9A-m-d#9u}*OTEg`Fb%}$j{6L)mr_*3$@x}^~iw(vvaizOH5nH4% zR-2n)>gHxhuN0@KTBy#DT%K#hX1NrbKYW?wqLKwAi-#NWd248%hCIe9c4hu@J^Q|L zeyR|xypQC_y$4s`7j#bVx%c4IN+a{cQ^$`zkw15A;_bJhtm8)SQ^8zonM|uhf3fE8 zILGQyl9~bX=OZCt<687ubR}Agmb_XlBwvbOqin?sy;8!Dm69Q6t}Gg%EOjlywu(=* z8wWGk1FC9#4@nq#_YNA2lxR{72A<`u@n<5@$R*EL6VVCwgSSv^#249Fjbw3|m0xY7 zW{b7zT)oIP^u`Z1qKiKf2~N@K-#kik^2l@F_{GnB?dL!LXaC|yzW(Fie(MLn`1MbG z`nf;*#4r8XwU@r}nQ#5G>p%6Jo49-K8$a}%?eFJ0o2xp$c=eT@2WN|w;__nf@L_dW zy^lxgTX|~!LZNzL{K4|VbfH{*c)VS&5n%(ys!Q{YlxmdcrkFAREfleMlf}yPf?sST zmlk4M~9;$ z(co$F?Qzy5X8748K0zjolp?K(yB2*3>{E+VmbexvB{hY$)aHn$(NA1V1`mf|+|3cv z_~yurw-N;>MBWiO7kMXT%tmtJR+2Vv>G{FKO0;;!yx0)fPNfkVIej_)`r$_f;zf)EMpHm0yZnmyAB`8PF^TB`(9hfw(GjC z9SMrHrJ!;od#vK>xZ*jeG`D)jQn$3TZfVip(&XGqkl13nl~PVCYN8QWN5=Yd+Vr+o zVq^lX^2Xq)C(fMBKXLNZWd7Wl(~nIyGUuL}JT`eUfA9SVn-?daI-7s=smaNwo^13? z0s_yUed_$lcD+MyJqQX{@iQmSQQp(M|AB*zp7!fBQniJtt2J;-e6BLH07OdIdf5NMhBw75;fYZwgozE)e3Y9 zP+p7pu~BVoR%t0N0LsR0=L~elCxR(z-^!AdHN^UhN5&G3gdx2~-&|#`HdiR;tHq+< z@G6TCH-^`NLX-eod#)@57mGoD&d>EWv%0eOEqhg%cNN~lL#hH25wDL|3|7dU`nt~b zXEPd{AvZl_1(C=o#|`}K_5M4VuqPM|2ZK?)rf)Cidn98*x@MAanR)#uotCk48S~460fw8%3sZzSs!kPhp;rMKhP|`e%)?>E@44s&-{(gveYO75KsXc*`pFyK%KES`_-t*k;?;&$ zMrd~(t!@Z6l!pC;CLfHN3!?a6F^vH=c?H`aK}8@VQ#8i%wB;I$%?vgDD&(&e=VmW}bdvz_nYraIzz<2=?7$@0FEdrBK}!i32iKX%MsHF^iyBQ`DFhWd z9PHKEdX0ktt?IFg|3qP`$VFR9+Cc5+(R)=k5{Faf&JRVz|leUv0JB!Cp> z=e_DbX(hwn09W^dXaQzc*K5}vphocHv@)%xT7NA#R6R&ZF@lUBBKbfr_JVjl&yPKy zMm`Yt6Yy~fKk279CSL$-_T`1JDL2;>CtY({I;}F%;Dq+zT zv(>AxM9W!F>E%KN+*jz@L59=XuvXm@4$>m1iqxt>AAsorHy;&*1=@yC#Z3|_dM!qo zYCuYbk$lSFt~b%>Z|(%Qa@ZOPNFbJeI-l3#$>$r%g{hKuVs9Sk3u#jeC>+S2Gv>0j z;(me-*garqE~i;`wjUsjfY4?5hC%=bnY_rBn61Li^yJyZaC!N>n`X^$@KYo|z|TQt z(91})zV+41c)ZoO_F#afBSJpMb+cA~P2>#p`ZdvlQ0B4VyC@CROwiLBm!mqT4Q8$!IUzW6&4rs zlF8@i7yKn4Xui>x&$A`V%`3dQzc9^gXbys(RifuK`kV7KT`ZSdbEU2@fS*$(>yeiF z_wqApKOK*IeTj6;Hh>1C{ijhn%CDLw>E2Pcn*)aHDVeHH*YX z=$~_6Yk_QBy%%14b=v9>W>_9^>+@RlB+q@!c*xF*KwhKm^E&I)rsnhNf?EP!w0@gT z*z`|~C4(ji3=^esgVyczxNuWch8sBQm5-$L)_XGUt|Qs89gWN!q;$1bs7xc{(I~)bEde$mtF4XTNj+J+u#rL_ zvQWOvC2&&_YArqsJ8mRti$1LSb3t%cm6LPT8RW)|WMOepb=OIXJh4zarJkM)f`z~# zx->MUqracmZ3*2}g$|r)xL)RG$_1p6U@Q7pSgIk3EL4z`R*F~h_8N#;3ya8+tJPv4 zF^y~cH`MEYMKadw5WEQ3;Qvsf=L8KUUr}NP>dg?UmmTiC6~wB(QzLy1tfo8psbz#zuWGr+ z2?JKQcL&Gs_B^~U!$Mu1Z4O;j+JW2SlD%P|?MH1%@tO1$#K>i*RFO8t70 z3BV#3dw_jjZNN|K4hO0I9$M>eJ=1J0wGyul3Lck+_^snN?DyV?`+bZ}0?5DK9{{QW z+x)?gAdOr})J94hLbz(KM&bE&XEKb>B!ucF8VOrw1|#Zj$z@-O9;kcSy>)LaTld(c zL&hOy*)dt$GTFLYqg2cS{5p$R3&*4;(6&>`TY*VB4T_Eif2z_yAZhfUuPrQ|VHudV z#jXE8)YU&y^6yjyZW}h5t&SRl9i{cerbK+kR<=Vm$f}IqsAllToCKpv)@-gtm0eZU zb%m-#t8=qU3rp2P5EQOLix|~?OqY39Eq}RChT55*Js9M5pF}ciHlI7TXdViDtYt`#~f&RILz%{&W;6WBXwr8Mgza zK+vjTH^z*KiXmJTJ>kk)43yiU>EISOx)JJoMzHyKVG)U9(FQm8xGr8GX^b2{b~bAe8%@EjZB$(eOUnHS%eWVMqwSi)LlWQ^PXD_84 z&q8c698p^nuK`O$_-FJa8=YuUuTaeqYB0KM$V&lBgGs@EQBEt#@e>_f zXF1~>ewnKxN~;2WX)m)g9Z%ntP6R5S=GXqy3?1cH9VclMoBiLM*ktDoI3n}e;?#6u zz7ajxp);oqWYVIJdW4PF2{J~CMT`^^`QlxRw8>1C1&aa#Q@SKHNjgiBLtsZ4rU2JG z6x|wa0JvUaEYsN}QU@YyDh6}YNQ-UxFV4YmXQ>zbr4rkYF-b<&w-?K!PoFx4(DzL4 zOAz-*ho}{4g43#G+}qt;S%jaNmgqyADS9Ga-W7=Iw8avM%{Bv8D@zK z=YW+^^>L*K8L7!pLs7a8hkz%V@IJm0GYle_pL{t6l<(7ogC`=T0Y7yk=BJwwe$Ypj zYY@J-3E_vp0}jH+fbi)4n()2P4N)U{sosAaxEU0(Mw4XgnJc#P#!;%)Q!a17o)LVW zH+GPFiri~}@?i(cv(%7Gajy3E;?+hB^^1cp-%$BFALU%IaP?TZ{OlYW?<>{dKTsZg zT^Hjs<+()%vTfb0f!l469sC8gTR{3h>GuCjk~3OOO25|x(zeyNYSy=LLqxO?5I7x; z#|EN(z~FdvAU4_si`%QA35&VY{&dvOR)ZFhFZoTA4l=0t8mmrl>#&~R4Y%4)F=371 z)_)ON~X*7(Hq88T<5^f77&^K^=DZ7$zG8l2b2npFq(mfd0+ND{o zP{CtJcGUSGCB1gVu7{&6LEjX%Rs|u#N7=qe?V~f($73=?f5%&t#+_ zG|iPm8g2iM8(!(n(%bvVi&~e5sdf0KSN%M_d5gcx=21xJt_f4}u-w~pjkdRT)p;%N zEQyI2HnAjGQqncDs1YnE=Y$ImXn2n4Oh{$4*pwl>CIfz~1xFAc?CsinZH#$4`GEoP z+jn!M)y+QF&1+4$M+=_qSD3};z>_&3fUm33QsAKHXsPBD#Ey+ z<~V2IRAKreHdvLpOH0K-l!hr*gYTr~*pTA@HmI~S=#p$TqBD(Xr4gMoLapI3e>P3V zQbex=lX}{E-6u-B1%HesZ?g?YxLkdl%Z_bGVjGc(?(vddI(itX7mS3r#iU7M95ti85H?^9`P3o^2FlUH?1vf75CZQiSgXNg=fqVJPr#DF6>V*B`B z4E}s2zLHo;E`JbfJ>Yuq-H5sq7wsq!(Mp_h_%p;!@h}NW6r1pEl*No4Fft-|FD4ff zWkny1RQ8E;^Fh>N9Z&>Vxy0nH8r<6>GYzhz5&*@LyR@mPph-BPoD<@_y}jX#>2GeB z>{11h=^}Qm$jtX=k*Xq6$(pff7RVc9&z#M|iHbu&KvI!a>Oy6FmN&P*u|C@d!4_r$ zS!;j|X&PAx)@CTYhBbS(YP1&Ia>>XJw3;u^VE`kpPo|p4Z4%XTvA8(aEXW2sblXIF3r1eac!mC+JttqjVI##u=u0uK?rfjp8j^q`{ zb|gw4gt~}hV>lC0`Ab$7D+#U&jr1d(lCe>c`I&D>tk!s^6nQ@ZSGkV}n5`-%>7;%># zR&GkAgT{ZOfGC#(d^MVw^gH> zGYAvc-BbtE3MLSy!RV}%COaJu#H$AY$Jdty8lw9Q4LO0}YckdUK$ZLT)TS(OJiQLK zdVQes_VVL8Yqz0R-)I@V~V-B9`Pg62b(D_Iq5b1*v2Ny!78$~^Dnv6 z)>Az$Zk=&IO9_^XM*pl@+}MU!3;d?sp0!mVBJZDSI9zw?cS0o{vYz7XfB|;bPHiu3 z8a=9KAU<4#lJAj{%_=)w?*(tsWZkDk&u9#;Y4e}xc?Mp+J?M}*o2FX(Pimv1{HotU z(iXxZ$tWUF;Fsi=;+N*vGn>d|{2eCSh~|2WeSR;@ZJJ4t{y&;S#^DblyFITZ(zsJzx=sc zFLn%l*f6|5jJE9;K8aMIU%dXxKsZnufaLi?*k2ng4Pq;R)#z5Z{;dpy#7H+p0QFyb zo_bhyA{!V$GQyoNJ-9L$4wlv<#~I*0-5stAnKd7~4Ym6& z>nG@LufO41^tr^!`jwHD4J)H78^ht7Uhsw5n128{6*+%nxQ_ST6s{-T9FCA~2{({# z4M$10p}*Wz+lNfKFJz9wjp4@HoBZuJV7%z(j?EEkZGY(=e`jgjm1qVaeb@U#Sao>S zKMh%vIyPq_YtOsG#uusehaKUv3&CUBKdAYG$UQ*M4_=FWH1b?*W%J4w=JQ_U;hSy5 z4>6W+4!2nTa1AM5=`9knXwI3J&RMp0pK7gar2bp^-5+j*pk}QyGY@cnglk*Ft^U10 z%F&IHm2DwwCjYi@Bk6;+hg6%~!|aOf_QdVn_Xr^15aiU3!QX~it(%W{0F=>>U>SsW$c~F%>6p)&U)dFA!(BG_hr*pm$?mW_ z?r4_XYh^u@{iHvn=Y%`yXS1~#uUX$+qweZ!H)EH6@>#8l_{~I+<9K-G&hXCW-J{lz z)*05*n*dsSK1%I;cKb|}0vyi0GCKOF8> zZe=&)H99NXplu_Ol`YFtjO#WVt7lerQfF(p^CHh**%R(59mfoGPwB)pu~&Dm><#z& zqZcFee@MAmFWjOLscZ@FraXQ#9(+Y@hj(v{w65KZU-}BLU{~p+%@=bYdLxlAvlR)X z_E2lqWv4_RPx+DwZ?ybre-Ce>5!>2)=f}eI&1B&FLn~vgdaa|UyU*}@9ND3~Koh}F zbLZPDg2GR%?5jOlo8W0rh5OtpV!gbnwy$> zxu@A)GUyA(XzMY}Yw0Y|@*TjzvC97Np66nepVmllJkvZrZfiw%aD_Y0skTA1v2e`a zw>9Fw2}$XGlU3jIax&bXKYufE>1$Xu>@Q7BH$A!5HM5AnkriN#t+~TkuiZnRc83owVUtFDxw45PaI^B z%&j3hepN(3lju0u(f_uM*4rHX?I@6MZXIR}%^}_ghGt)vZ5Z-kvG-;h#@ir6^J>!c zc}->~CW@`g$m(k}G2zJeWXIGh_zjAKhj;~}go3w{>wfFoF-5=8dSRHN18?w!Tg*{? zuw$2H)>!pTPV0x@NA6MPoQsTcVcywyBr@lV#jGO>ljGBJZa{q32Wzo#CBHZ~eG%KK zRMSJM5to0S** z&UTMykK^$rLqHsmutL*XLk8XINmj4AYCVgzwDl~8*48sDEm#DqdW}>Zw~mFDq`nco z$maNzCiaw)HhKKj*2E=OZ=2V7%;r^k0ddarc+g^d)>vi<|8+@SmkA7hA=C0-c+HVGv^qOyWo)ND~;AGP^+)`l(@U*6EEDDEHS-i>+3Sc#Q zwGQ%C+0?tJBq2ufnEqZ6Lm)$$d zgCfN*fB6`~`g>>UvD1%YAvI>;;9;6IZbg2bFxQ zk`IyKCbv*6V5HISj`=C{?Zv<;?2oC|I@KCj?V%q0mahD^5*rDuA#vCag5On%EGpSx zRaXS;$ z?kXaaTc(I%NR1=_>18&Qu%5CJ}jP83#4;fO^nMaSEYg(#ygwu|3jn(JyyTfE< z2}{j%%az@hSR-NfU8w6!;n0X*EM9F2_ay>fz^@orOuQU8R;5}pL3<-%&L>V5`FTAZ z@Po50&CBLgO$DRs-v4RW) z%0W3XacqpZ0ddPHUt5qr{l!K`L*|^!0s)U;t2Iw8{LsLoH3XzJGgoaSE9PGr9H(R0 zWtQe$& zcvKb4kk6~kW1m{&H*KTVi264E!F$ykZ%WpyQ@MWp@XAZR+~n}E$*o^?nzxw>1Pm~+ zmf(YTb72V@(Na@!PzZ7f!W`iC94iEKAOO%p4lTwq515_G#b%}}!J}#+z6hS^Xj0j0 zpHgRSA)A=XV71JVet_!LUn3LgmpPLD2hbQeOW7gMLzXOEK{}d6qKxG%|GQ$N(Gj!_ z!_kAWzUXeev^Jr0knQazY)OVFk>q(Kx-Ta0a@hdN3O9}Rq0idS#NynWCBK7mXDg`Z zZHuL`9L!R4#Qw2V!A=EB73nNEBGLBt@x)>3s#o-aC%ba#Id(_u+g^`Ms1J-EaJ$PP z_-Tn3^e3#hncs1K)xTmu<{}s(;X11S*~m-+*3onwX8hp65Q-2ok<(Tw=3_6%F@3;* z0|x)|D;dlZGZ-Z%YqCC*9-{}t449*9v1?w~gJ?m99EcbEIK*!+`V&;~?_PecXQgkY ze`R20aAjy^-O8|-RWv6Cc6TB*MX2yvV~o7^0Ej zOBhu2hZrfqeqsO-Cg@=zjFTqKTq6~x$fsZK348Mcm++_Uh!!wX7&MJd8o|J54U;Or zr(`A_)*9h<%p~IoSJsCb2WODmTZ3VK88#NCb%WklZzyIXjQQ#lPp9Xp)|<~@fa~{D zp5{2fF@jM@%pZI(a`mu;AlUISQuIDr+U(5Cj}W5U|xm z8GnH&16gi;SG#}1nkVj9FJ|8Lur0{g@cs=>kP(W69dY`jdN&<6dV4yK-6SZ}!G~|0 zt_vRH#`?Clda6UX8DVJTTN0N6(W1xn4DDuz9fc?B8MktU)$8jgidWEAL8)e)2d0mm z^5%Ba$L$JO4sd~^Y_`5$uD~J+uLNjhgud}O*k&+6z_d6YXg?T;HJlS%;)#u6aFjy6 z7-Y`d_6eR-rwsQQiZi_aUAp&Mbm8A9X9KjPQ_(1J$pM?W%KEVxjM}w?N3JyvcV@w;cu%r z(1fg_kUO21Z2#@*yjnqVu=RG2ZrTyI4X{ObNC#|B{Tg?&PuY_QDx{HBz{J_~Vz z03`&5BhJG0E9|#244)A~rEYNl>bnfe4bWn(wq9?}cwx#)+GAcC`>+(*ozTu&uNmLHlN$3!{k9_0&&OwnHOCNQG6i#M{P8JAK4o zD)+aVk#L+RX5AC*K6)PxG8%Vi-oxaKoZhxg1n*!Y1shrUCJr$6{uKODgOXgTO4Od> z5-hN6w)`uWA`9?~vq2F8*@gU2Kmzpqsw%^wVgYYCh~{>rt3)6*UhthtkiSQQk1LUu zCn)li$=SdCiU0b0PfneE1fA7hmC8Z{vKn*YMy$A4%_WG(mA~K)&9lp6G0tb<_j8G= zJ6zTQKM`(Q@=dJePRfBrNCA&ct$ItxeC-Nm^vDnDk_|;&5AQWn+%a7?IPd~UH;9wq z_=H8>^C8_+Qm&}PXl@Z#j^-BA8yr$1<~7)(M79fNZ4}(CoSD>|SCwI%3NZvywgum# zXf>pkA1YSsxymR~s7@U9$m);BiF zdb-@cvHLI<0II#5%Q%uTj{s#2PLS(8MrEMsqGyropw=9vhxxGrZhF_V5B~pwh z{Isg?sS#f>j6qPb5wY=CI3q~nkFenPoB#^ac1J=v2%@#J3B`7I?**2+T`P^JFn&{B&#fe4yaU2XTzJR z1mCYSacGSwth(#znnf6B<9)vt-xgYpc)N@capph65F2ub+|BZ<@3`Iau}y8HU3>M2 z2$Yb-R*#bhH_^UE9!W4Z?h0aoUInSJbGAX7DqWw5_E!9abg*XluIn|@g=(HqP=tWV z@(4GOwr88x7h-=M&uzquB}RTNaN#q0OD*gOtSIJdW(0i3JViS~@M+`E?Ukx6IYRs0 z1dF6B*E?24?#}X$riDGARrF%)g&2At{GnxyP>Ms{y$OlPoGxPjLrL=Azx zoR9fUZ#l$fA1^fQ)vx6XM8j-mcq1U z{G?W`pW<3CQGH^tl6`2qA&Q6yTdK@;ecN15yqZbbQEJtmt z(_|=bMk_mbI_JZEFGFR-brTK5y3R?+P?pLGpmsqXeGze4L+j??;2 zclTSQ26wr)q)#x0-tGI&bK=sltN+rTtZIGA;07AtU_fubw#Tpc1T@a-LA&3t^$Zx& zp>X&k#9Rv3HHoW?B0^Ep8}_RNvZ6N}+!T4yqkM>?NZ|pkelgDEhgx}7l^FQ-kdYoy zo)o)@y@3`KSn1ln&RAUcu1BvW)?RxsBD+kUpGi+PgK;2QIMq#CJJqF>r!S54+chkG{m$b7H zve=V*U1^iw5Aio(#B)YF1gnG2wLycko228ny6c)p5s$i8b{^b?P9`b7Z}ei0N)hh7sbI$Ebonby)RnH?G}KQ3U!&zYm$3dn&_621vi zgq-mUo*N0mN-L4VQU^kUly-VDYDlh4=A5~&RO9w`Ni5vc) z^=eq&z4?v5TE|i;iU%8%&IUa^YTo#-Nvqi}z7ajR;)X@J%(jaD5Wg7asB7^kr zQZ2oUBLhWkwgd_wv=ljMHo7go&8ST$qmXnW9UbuM zx1}VF6mV<)_V{u%{wjP4lMi0hW|eORc+>D=oRYYxN2q~qjv@pfB8$X(# z8|=BmkI@&Q`sDW&{ESS%uM@=!{Aelov8Rn6Q{YO&kJqEuu^|!`1e)qU0S=2_2Vl&^ zU&QXJ@?M8Eg5R^NQSeH$PV@TFCeFfS6#Rf!o9bG6TEy5p*6 zR{k^f9FCoA)eEhL-Q#jfn|-YO67@6I?(@z1341=)8twa)Z`J=O^?R-UFSzSrlKN}z zu`)y;UH3i1eSLP{*E9z0XSgx3x_Z;fIQ7iZDj4pr7wV3#x;j^Vtep@nJKFz6??nW= za+16rG1@8K5@@qNOhi-aGc1XX85om1bBCxqTvrk^|D(ioSl7f{u{M@WFvs5Y7+7-6 zHL#>`WbB0mSQ4QJG~2LY$zibMP>9VLBIFn%V>|@OrNe>iF{mf7Z-{-G%b68W28v@y zrQx8%@y%5QCv)kjdkJs z_NcAG_bF(D4fyw2Psyz}d>^~?S@8X68>erSu!lF?=x=Bq5%t*l#^w>hjYh!V{3@Kj zeF8OtAq>Nb!hH@gPAfO1e%OVNquY=le;rwrg7$3L|(qGlV*T?0o*K3J4gv(8Ov~20s z+hOyKtxki@4}TME-reaoI$zamadk#&?Qp|7s$muMcD71u2rM_aT?AI}Q<}o2BZIIZ zBYzv>D%^9U&@XvHr#7gipO1?RP5*RJ?&s90E z#7Gn^bR$u2N1Z&-p-vvKjx@Xri-@ikOGcF!#4;iu?bTbyx@6CjC9 zYU2t>A<@_JZURCAT49bhIbua(rY0J~IpM*`r7ZwTi98Suz=k9E09=!UO@cuaH_{9U zm2jFdvyG6f()>&m5UWN=1Cxjz(2&a6Cpf0-l0&*l{0`-el<&~mP9Ep>sL2!kbc z`t9X7oj9@W7nk}*Hy-C#{Vkeb`{KMd^q3#rg1SR;5LB>;?1jlgR2?p79C%Vn7b&=9 z`-k)c`G?ZkZY~zePcg$escJOH?xbe)IwC+mR#@ zJp>91mmQNhVh0b6kd{NBVjd`BpF$SmwuF)crH{Lwk<~Ny_+W4F?O^3PPt}v5HdLb2 zN3Nk*^uofeD?X9?npdHbT34x)0aFiHEo)Kjy8A8GpU&K!+-2A7Zo5VfIz%e|KCFC+ zIEa5f7XMHlDRP6#UHaIwk>x!|A5!-ATUc)W)P(M6Q9W92d`ab|EjZ#Asy*vX%34l` zJ+|P8X(Ajt%g_ECvK88G7Jc=R*0rOJ^wgZ3dJDlR9IyLKDUqKkNGWU%!i|1B)MhIL zFb)WoTy@xbnx^otGDYuqk$0l^BwQ7yKEdZzV%%KxRHL^eK;=Up_@Uz(TSP{`gC&Q@ zql}7vMT^@wG}{`xIK^AmHiWTOYc|APT+d-IbU5ccfDY&E(*}o8zL+i1;hcA2A?FdQ z*MO6;XtuF#w~4-=zCNl=w1Zt?!_8lN+ z;&)XiE$2enj}j^v7h(icy1$WD2rwCo5$lJGU4wmEm)EH;Y^r=al;?Z2RQtH5k8{Yy zTWB-*edeM)m;+oCa6itkx*rtm286FhFqm5rx}m_?rQvkv3aH{UdoJR%!Zq1&8{@qK zJG=Z1?3BQO3c6}RUcMq7K7iNs-MHZN5ES$YdC?-&1sI!y6T|6`I3HS+NAXfW=S?P1 z3LvI;z||It@Cj4GGgI^vz$ZZ|?OBl3m-eIjSqDpDv1noOyBwp$+;DBw&k!oVr(>f; zj}IZH?9r4Hl4dtRG=s)}6Z(GloIc*0iI?d=DdARGDD=E}RB*!nmy@Wa6nJKfMhlmt zbC%yK-?tkA*?5p=h=~$ur^Eizc7Fo_X|(iq z;IOch{%_*V`#EN*iFkZg_YyYeTC8=>o$hMfU)Q{vu&Z{?)lBIQ#uAG$1nFj9K#I6k z46ELl6t#g9DHC)iQhlYis~Pl$etbB3Gji#bXCs%MXl{YgiN-+lb4_y)Cg{A(%>g?4 z1dGg+-Qx}iaN1dPEHl%ZoY$gTO~LB}C|wjhE&UpmT8zSPl3%mzEu8AI7P?x6wYfvV zwZTXj^YZznF6OJv$Iz%a$6Th>bxeLm@?zjj^e#oiO*x;5YSY*7zdd47T0@idLE6pE z5R`<-e=@P^BJ}Tc+kR5-{$(ZqT8S)$Q3P8&RYQN;Z@>XFop!DaSxTJ#%0Py30CLQ9 z3d14gOnYrK{?}DGrm}S;jg*C-c=I7A`TV#peyc7%tc!{Hi!x2VOQo|`5MUTp4t&)R zvmlcuGvP87KZw1c>o7v#}Mv1%L!aA-#w-tyTbyDW4cv(r4rM z=t-KR(&hIHPvzcmyT*JYY_GZnma@IZS`p_(-*akD_goUI=Zr z{K_z5^j_Mt(i8TS1~Fbtmxgqey7-3LQ-|FX*`n*4KAs;{US^2}EzRsPBJG-ca<*We zD}*6rA-fOe%vBCx{C$9r%ha+~r)poHlQ~^hsONyf^xB2|J~=#R&CMZ`>y6MF4U$PNNEc0nH9FCZIt;#6pFM5>=tGxY2TE^8mTIOw)xazi^Q^HsD2Q z9C!_#p3q;px*R4DDGvo3< zaVy)jIYgdn*3rUfBDO}fApbkFznvHxc2NJz>enA=>I}%6wC8;)`z&RAPc2}JIU3Ix zAp`FQ?H^SogYgbz8#r$C5~UZ6dbx<*LGY6_L4Z+v^*<$_8)&aXTd*$LuK{eymiU$* zIM7&`_o#7$(;BkY@SP^#=xLACw`u$&HPUyWoN?(W%A#R_@HP}bqtQU$>1)rg=5;&2 zqn`_-$#G@s3-YW6TI#OyG_$YWACPKgySI(CkD%M?auep`BSx*q-*-8F8H2 zvy=@7-5s0gsecDyB-51?`d22*Y7w&kSg;P&FVhBU&Loa z17II=f~;`A-`@N!1ZTASHXhw=eeimL8Q%~g|Aj6{zUS4>0=EO{t9r)mPT8}C45+v5 z%80FDd=;{Nh6+;m2v~Pxn6>PMQ0y05y#mGF^}4-k;o4Q55Q)5w^1a({V2QZjmHU!Nu(d#v@*@uL!K@Ewi z)uz;;ny%Yv|AU8H0ycSV&bJm9f6w_L4c{}sP-vw5BJ$odJ2$K(gLJ72Xg>6jd?}eX> z*V%Zt{+@5awNySfiA9Z>=O)zhJMG($E@)fQt)8Hu(fwl*Y^ZH7cj!I)+Tlg#H7fH$ z`}$%agU8^gPLC=1JtZ-fi3c>_$+fd&N+I!l!JVpXM2Rt$_ga~fPbkT86-(8hQKfHJ z!M{`Tf=;h0_Xo<=m3yC(8%o}z#JEChvCRiIQpQqakn@A8A*R}qAy;&2M4Kg|KY|x1 z`ynk8ZAB@4wqfQhrB52hPfVeL&-Ibpg5#c?VC4F;h4O8ra);T2E$Sp?Vjq+Ivd9*P z`N`pMm^=6wt)4Nh=39R}FLIQhML+D0ZUoMT(s(|aix*+`{M7T{YrbP-vLwD^wBDFJ z;--SXWt2eNB{LI1ir}w%NR29e5abuRVF*deiav`gjtVki8z@oz$9pSS*#&Df| z*W_bQO`JV8`Pf6dcAtIrMDrujyX<~r+3tM|~*{QaWLg;^( z-*K(Ve@%jv5dnp$WHa`aw^8EieewvEqD!>(4ezCGP&6{D$4FR|2%k!pF>vg`F8@os z8TUiEBrj%y%g`-p=$4_HK61sUNk8nhJr~pOPd7s_VOkG83w_MfX$y)D(;{j|2yG?h z8FEhY^2g*eTF4(!q1_NQjfoPHLky3&qxX}TZ{-O|P1KGTdJ*wP z&`ry0BMt}~Lh7Z+tMc+gRl8};9&ATwS%v?_cMHZp64dG1Wc5}4kj zf_0)Q$##+HQ&W6b@Lm&Z2_Cj0U#k9lsh?DRKEZ>Stm_W1`a!PQ$9DW4YmrrkGY8z) zW(E1~ov0`G*#@j{!b{a+>@1H z(gNWm7FI`QVxv2PRbag`{Jnh8^rL}wZ>DaoZOcJ(>@DQ)-5c}u3X`k)n`7|>ODpTha`Sw&(#BEqIT#E=$!n(}C~ zls7L*>F)UEyqR?XJ7gFL4*C>0sMkE|`@lj;u+WEzR=~syjwpt}%fJe74)PQ&7t;XB zgdr_iX-hy(pWww;_2)0RgOH2#BHiv!vu86N*M>0f0C$pGhrD>FqFaYJ70&rjoHWuFP#uP&zP-&fGxEe)uV(p7eOnzQ*22kEqrv~e zjnLJ9$H6U&|6Mt$B%H8MK3%Ewj3=zWpq!JnxttK6R}F@jrK z{ovm!(fVs)CV{+MiU1y)CeP0a?{OHdo3pP!awRhvL$%=5%mT>dS8Bbtz3 zU5P2~Dr_nfW!{71*b&MW9FpeJK+Ud(kR)0lB77{WrO0%={cK6HIL)HR>{zLlkhY~{ z7Ck!Z3qSh4x)CV0kZA>fC|OJUM0sr0A{N^@t=#P+=pqv-{a-T9~bvoT2Q( z@2?F>ET!ub8Tz9)?0%gC?>2S(UKmXC=Qjz1K)M^+&?JsjjdiMO^hF?8)xBg)K#+Y8 z786)I7!*gFMFWT;(NA5AepgF4;JpelSGnC}#;;>i>r)4T0?JSE<}J+po0P8Q1s|hq zyH*W>9ra$#37hNA*EY2FG=2E zJwN+YxXmrG_ngR`S(UIBK#c;*g3CZe#a&F}8?9N6H^q`7rJ1%KA_5TxdbV}}lEb`d zu>PvJZ`J!$aga)+ZEI>jU^hB^)%a<$nfAiPvOlB@4ZW^5(JhL97=N?zW z&z{S%(F23*0wEa{srjZjmqCG~;75EvBp6|3wFj%U5p1yb(U1++F0hSl1jl{p!{WFq zA}5ns;M^)&Gv4`hY9P4IH%JylCAL=D<0^|@W(f@N+%*K~fzDXqW&x{vW^6U zVBnE$lu6n=!Q)C4!Ywe#Q72x@->LyaPhwl0fGe(E!Z6`V zkMkO7m5&D+eItx!Q4gnxrsZNe{;+NnILNtIG~=c&Z57^`dTcw!R3qI21DiF32e}gi zg17=nDIAKE5^ajL^@HftUC41!_JY2-cAYI}|KonvAlAKuX*IOT9b3 z62J5e#xzOnGx64?R`c6+u+-!M-RX%n^H)zbwUOOBp}ytBLZvvKJytHabd@Hu7rFZC z70}nU0zSseIh7>C*$mcmNs}tkm2+FSWngnGLIyZslksb+ZnI}w(d2w9>|9wjF8vDgVnbxnEl`I@Kfw2{Il5fr%pk6e^Gt-IVC@@ zrH1e!^^-(2PrLv+%Oy3Ld{;)H+U91Orcx)I%%lGMCLVg% zQ%^i~`7PPiRkF=G<5uO>h7!TTLJfgbvz9R@RudVOqIFiP5-w8#WzO`4%X|%*uQ|2L zk-THz;Af|9}e-}wZ=(@184UWRRJ zk`naTUOVJ}%y5nigDE==Qt-T#l z1CjredXz13DUupPHmjXkss=)5jiEJ@M;MuD3uC}ov?2~#Jj9f#1d*LJH+5Fq;{$re z4=DK*2|Mm{%6GPk9)DhMVrHebi${zZt?zjKcWR3dQna8wWZ?DFWSBt%pmw_-{oHgq=V8k~Jb98SI8u+hKrIk&I(DC$~tk zhFS*}K@q24Rsn``*eTnG8-OM8AQuX{#6k@ESPvQ2R|;9;q93XB$rc1@LP{YyumXlb zq$OF75Rc24w*jv5v519FVWYyT#NCm&U^tp0&$y8`gpIb;_K|HN+f1M0MT{zS6l*lN zPQfH&`l2rAB=|NZ->&34lzddlOG<8%;M}lRd5xM2i)yMoa|!xxUHGR;jw<;cC6X%y zgG#daDAw#7oWk`U zosKIxphWava4!kefW$Syo0S|^@)jlcDS4}s`;|PPk@ob3;q96qB`!LRIpEzVyn38uE{aqkt6R^&EHk>b|srtd8e8LgNN!}_M9q$hg5)2!FoeRQ7QI$B%C!sSxhWGa5h~0+r$;^e z@LZ#hui|lW9K9gawqR}}g*4Or2GTl9xM){m>&zb~x8Bgnc%?Evvs7`a$ucpkDg?)B zp9jCLp7wT;6T$ybNem%aATgtpZ}sW{4n2)`EI|XIj6j<_7*JwsbckGTy{lAMn$CDdGZFlla=))u{zQok@l302glWSx zPAJZ*x!yC1U36-ptN_*qE3}b1P`QJ{8|eq<7yPAi@nQC4q^bwRF~eT_FSV1?!1vkJ zKC7YNlYQP^?}XPEi?en0KNM3S-*haUPG{0v(!KbH+~z-X*YNs*!GZGw`v#s%Cx<=| X(f{e8%+RipP3gk}wUIl!@B04$tEN>l literal 0 HcmV?d00001 diff --git a/src/目标识别代码/utils/__pycache__/dataloaders.cpython-38.pyc b/src/目标识别代码/utils/__pycache__/dataloaders.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8accc4986f589a5ec5065be607e00be62bd86af2 GIT binary patch literal 40099 zcmb`w349#aeILFvJ9}brgCGck`z$Lz7%ulS zW+dW7vJuB{%#x8cM|;_!Pw7PYWcDQDz1dSvUv|>z&z^P$vX2+e6yl91oIz)3 z#k_80-&EMXaJKQ}O5|kZZDZLf-W8@cMVt+b=F5iqm=Sr|xby{FA9IGCjqi(T5qQg%CcBjxVv zX7*{^?QzC&H;%h!aJSca1Mc2{yJvB?&$$P8_u%d=&V;icWxo}v2b_aQJy_U{^0I}) z&Y{hbIm5ZvIgA>=t95^$b3g8%!~FxBBhG`)*14$jkaGlSZ*$)097S1gM_CU$kD#na zP(u!Pk2=S2cdW2=;T;Qkls@B}c1}8{@XoAbUWkl8UfY&FnfLOgYThX**UncQ+rhnC z@a$!;SaQ?1IPIs#jUcWHONG2QZU)x5YPA%kr!T5P-eDY1t#f(rB6I0f#FERZw3JtF z0r5n(xHMTT6~?1M^3>Jh+;o244Ps}?`T0VSl%J=T+;JDBeOV@HsdDklhsZzo73Tmc$HHejqSC^N90c3Ql<=hfFYpF2n z6)zXYdxOl$C>mq2VGN7Kbf0;{QRloCocruvlk1qi@C6G zg8_Z33YV6Ps=%hQX326bUoHiy*-{~2$>Cdr=r_sIVJ^F{CC^323+wY-11gl;Nb^NO?X{%h4z_40lDuM`)H`=?dDSh?rK zvg=jL_i)h0Q`zWDc?rO;G9OqAONIF$y0A2#jh2_HL3F8tm{*+3S_lwcDaEfIqbS@j%?GCEW}{cimuvQSmvS?CRsU`TM-CpS zf46Gi-gEH4Og%`Sed@&Vv$^xfr`~kyTlfpQo>Il;RGBq6y~DpKeq*>!;Bt>4@C2;C zH{t_;TsN*8b;C0nrWf@QZ&)jl6{BwYW+UcA8*v|ZI?ae6EwK{8^0KBnDgdsCSS*~^ zZUlbhZCg=jWC6wc#)_d#iKBuQ;~7BdOQwTjUNxq$Hq5FUSW8%CLA-DktH2Es^99c> z)(TjX=EQ-(Sb8<09z>~s`xt^#N1pro&wui(KliDx{o5b?lOOvhw^n}kPd@yK=f3vg z&wlOtOJD!w`@VMLUp=P>@t*r94m~Gp{kir+b0?NCkB-h4Dut^{>XE~2a2$aZF(a8s zZ8K`lUCg@|Cyth?v-y(y$V4k|5W#|sy36Gt!5pRH3`Wnn4@sEJ@j_*`>J)((-7%u@Md6pk(#8x7W_zH&DOKgg8NF8G_t{(A$(>FyBMmI&~ z%)0Sx}*vz#mao?nq8c;i=K^{1l(kwddrzfd)4x_z5M)y z6m+JtRLaj5?8^Ym=v%uwXRiaomacghtCf3}iltKZ%KmfuXQou0*szbaD}t zTSPs9?jKLwB4oJ5>3WNUbBj<&Jv!0*eh9@0MjR^CgOjqe!O)y_09poXed( zH94I-f9CY#(?Ri5sf`nI{x#j`WSjEa*6)-6#PJWZOGvFiWt;tBKbEIdgYaihmjMda(4zw9jbmCRp)Q7_UkRsgGqxwtW3 z8x{d57I!N~Fgk8dmG~O}tjKsQhzSY|GQ~>KE9Of%w@`2bv$6zIN3c9#5K;hLQ&*~L zv7mBAC)*W9`DE=^b}=!hGrS#@2*iw-8GNEz!u{l*uxkFvh^jXtHrpo&IvB=JFE078 zZzAHS)Ue;HhWUYxe3E0`!PS|CwvR=EPCDWlYYkUaiTXf z^}aUL@?yT}#r-~@GAwW}v5;(}uIQ#9W59`V82lmR zH2vP{Z~KEzWHIRv;5X@at3URl3xgF4;gE;?PU>dL@0%lCnx3jnJ$wB8)S0Q%N9=p< zQ6RPKqH7m{J<7lj#hFsUz5>$3CcU8vM%%r(JU3S=?6W~MdH^2Hgoh`ApbC@YsUQ)O zg+cVspDYJb;Z^&Y$}3j*J9FG~U#nRjcd&h#q` zE;AsNq!fZ6CS#(He307 znCF{T()_0SFiM+ck*oijaLGM@)ItPuga~B_+31Vp1WhOU-XvrQmJAMW z0miZktd^wuAYQATlhvSGZhIH=o~;VYZo##~e$t5RX{KQA6>Rq!7^bT>Q0wJ<1-LN3 zYBdRt>wvm!BB>ot3YXKxi(CicO7QbR!dkNdVB{Dis1{<^jg=_Ukb{JkAIT*`2pn{W zi$c#NtdbZ=BdmgCF2~7~%LVc3%mP}PWZegqAWcYw9=IEiVf zmqwIOyEGSAc?BLbzKA6N1twgG!%70nlIeA2mGx#}zfvgU$JE{COB z3ZLNHo$4$`g=3*U&Vcm<-Qjr67D}b&m`PLE0wwL?r2k=D!UZG^E0&2Rqp}p(86AJ@ zrDM2U8^P-JC2K3PdTn7nCDaPC)lS>tY-=ti%0FBia)JWLOud?pV|E>p#R6)U%Zeb~ z-V?q!u9p#OZ~igPOJ)s;H!Pq?4l=~%xEL~1!cma;n&~1v^>6TIbE<6YTpO=HTVx^W71KTckARU07V;eyh&zlzLjoA6PjeX7+2v$jqXWD-n`$JHnJ{wEpyB!hp= zKn9zItSY!Y(b)Sxj!Tv$1~!giW!i1S8b#tUX1pIkYgl^)Py%>{fYNf3PD%i0+UX+T zbTUpieiKd)2D=Xuf)?Vu^=^#KEgPjx&yoml{&gY3!IDvPu=3|4^%!~(0s+sQGq0O! zREq^FAU)>|th^ibxMzXlieEQ0Z&`=T*N=m*e8!J85?<6xE+qXp=ig004AM!LY%jF{ zO3aT#67YO8S0gE@8RWHAz-o2m?UuYBLf#(6q>Nra{^5uddj+LFZ(Oo z;JeV;Zf^`>kGJK+kyp@T&jbHqXnLmtWA8M9H%PT*Fm{&v z1Dgok)OF^`)49o~&Odp4`ogQG;$#bqtE;B2kPqpjB{l^F>Lb6*s)@sbL?^BH{%?GT`hQU`FwbZ4qkaKF(Dlrjlg;L<&oj9b|EMn_?N@NEYVxF-XGYBQ zNNvkhRWq)xT~+pF(8N_j!DYybixs=JeZsx~=|)vSCQIO0CXmdl!jZHls*l*UR9e4v z#2()oq>CW3T`ymmh5Uz~4n}MlU=swll%XDF%^blX0g*|ybQzC;oeH4ita&i&Laa6^ zN7pGT^%yh9i|!m`%t1WAw8XsqlsHdSy-Bw8lu}itQC$+$rY6csYX&jC zKxo?EV(dRLcmM%fOHqfLjq9F4{Y6}t*A5K<<;c^} z0D(^&)MnZk#xG-iNU5!230m{6V~%lr0rA?{2rvrRh+>*o%_$jvEWeoM5$=Qtevj?b zG{Nn43L5CBlk0-;RAA{l&;mdNUIuu}KF@p}Em#2jS%^Ru$2bW}#MQ4$+hVwjQ@r{d zcvy&?O!tF6=->rFqzCXRY&)JOSAfy@9xj%9@%C~XDJ>-EEbr1$DJ>nAmUg5ij=3=c z_{5eNgsu2Zhxc9hjr&YZywX*-Do=E_#>+IiS&sau zFfBF*xf)in{J6?y8XCf!>UVLYNW83FR}Rxk%-WxKDR(W-FISh{yi)mVAVEIH$6SxB z%gbHPmq6f@=MSj!eDG!logh(2SHPTH?kjj0F1`$wU&hQ}Vf7$JW=~RNvGo{-ET#+_ zW6{y*kdZdmEI!iLAZfYSntzOQ*~miJi-=x))G=_%U?E0@n~FAKp0NP*4#7^yNvZ#e z?BM(U6hFF)GS3h$pU5vk)>x3{#BM9lYeLxYjWdR}r_I8fraA@^L&!vo`4> zqc*UrrjS3H0$TeSzE9E<5|*{3Wt*z}3+Cc<(i}y5a3v<(!%ySs&*5^@2$E)cThdDI zO2!n^XK{7>MR9iwmpg%=MQC<^dqNYduSXM@&lYB8^X0%e(Efy30h%b0e~haCzk%{?%nE>*VdZz_T=1Kyw1c=2L-$4L<4P+;St%>v4)02}B4WG%L8n^U%`X9{N#&7LD zmhRrJAM;=xANDm{x@*VeZ_IwG+l^Yzn+ zgaF*o=!C10qE|2KfS?8{xt~4Wh!MfySzLY-g0w869iSy>1@Raz@0t?$ z9vGKu-6sG;Ro)iT#IB{U$mE-V7Pgkqaa;W%zIEKtaGeWQ{V@Xp;{zIu+eo00vFN?2 za4m>JC!+z&EvEN7(ATNzwd1AIvqdPyuegfxZS{3NwB}01B@M1+nyi7@4l@#5tN$NC z6F~nN6aE}QR!B7J0q+Q)6<7FXj`SEJ6x|E}`hr)C_81w!U%=<+@G3amnsFf<<_981|DZ#==Gl7;YL1BPcD|*tCMu z)rdFBw=1TfWGeJ4v_YEmHect{`@J!zhkUQMMeZkY-z)c98{1fBz02=v*euB{d1HPG zZ35R0a01C5-rA0*JG`w6cacb_XIl3=<-R+7x(ns>c)MFq@8(l#d|oz|KIQFk`q?u8 zL5ZUg|JF+Yr4hhH|` zA4e(oIonu@ph|m6R6dyK9rO$rcuSmM<3_{@BbbbsOw>DGHz-74ObZfZ0fVlh=of+n zD3O-#$aNp*nkJD%K>-Xa_N-cVE%dmaq(BJo+uF&se*u>}gh1Mv(e1n*3ORJ!47Cl< z@7So*Y!r)PqqK_a519WC5!9@Si3xi=TI;=X<;ujh>aw>yQ=oZJt!wp7Qf?+@Xqy6~ zmAPxR4dLUQQ+oK}LkA8Wm^gT7;*i>hGRKWlsX8uQUAHkJI#vru*I0uus264H25XjitdTcSBxok1D7otuC zEF#2P3szI8rM`yjGF!%D>#RL+!v9?Io6sC<1o3)L*Q0>wxyia zI0Cv>6yn+rOu7nc0t}==|9Fsblx!3(QX$BDYz`!hI^1HpSjwx?wR_|}$8I8+@%uG* z*d{9vv0e8v_%#H9#aGkoPIpm&?2rUg)4jfl-O=$+R5iGs1LBib8`0CWeT;qL~BZikf6?r0~^GFebE$%%2wde*_P;_EgCKI4mL3#%?oi zCXK_8V#Oh~8Wm|)5_-2`JnO@M+8C4HjIrC;3(KxC<9?<_M5L(9;<(DF#x`lLbS`Z#tejMnI z6#osRMTN2#@<6>E<`oo68Al_Pz2w0h*gF9gf&0?w6nHmt;N2AMq3|4{eaB+#(y$<$ zm=>^Uq0F0FD@qvoa7Eb56^KBWVABfO_dXj^QpibcF%-1{P?de=oDHs$q&}n`6CImJzBP5U+aU3sQL{4`Ek%&2rQB7hd z&^m@<6^c&MA=lp=Uu`7Vf#pyDhAUhp-vf@3BET*Y@?|vl*U?+- zDQP=?UGiX1g!E`d$V#MbV5$|uFxiG}alV9^wk~OK1nswkG!JeUN$dVh8sc6dRY1T?huJK-BnxQNxl~g}!(m3Jo`TQRk{38Qlk-7R{g#$x# z%>-{_ZikP_4l4p<@Qxp2ob2!_0c9J&lK-tEqmX*!e|HP_n>Gfbndk`Rk87s)9T?w^ z#njXZH+9jPh?NGdq7B+>n*8d^Yy?Yz(Sqn3+7lvVgVAUFz@I^u*OlBIXNHe5;K$ur zaz|$ULDtIk56Rs`vLEd5pkjK*EW^E^0$Bo_zO=3ek0BP<$TU*DR|ln}~n-d#6t7!6?(Zh*s}?pDle5u;tP z6Wtgx3=pV2ftS?JGhmxRWP{TN3-K6kV2vm=j?8K(DNQ$zYes|D7Mb?X*dK&!GPUml z5p+lzik%Fwre3usTeBL)leVIqZwVB5idr$rtV=$G0>#WC=v`CT_cyDGn7hHb;%*k@ zbo^1ha}1YD>ZK(vh4i6dEyQreaV2miai!*C*|f7&TgV5Fxa$)W7pACGMvYCVT^7(zn!%m%1JSzs)z&wXed`pxf($ zr9lQ(2EK2!EDSyZUcTFh)S%wu_cVGy%zVo4_Iek3q0fiy=VtKgn}L)l__;~aru)+K z$k#8@hzw*Ic=M&B^g8Y6)i$b4`R35V;-j>BQ zBn=-lB6ZM|wH@9T$phUPa>CLSVXwCpYZUeh&x>TpLC$yfy(RKi^wnD;-ZqvF`GuLY zTVMHc$Su%Ecw#$!o6z!w;l_5SALz5cHs$SDxC{2KF=ybW+1Ls5;TTtceE@59a1;7| zVHYJd?w@&M&x5^khBiSa!jf3eMwEr8rkmkYe8HaZ3m_gg0EQs3FdKLK{eA{(9hMa= zSGV&Kp3A^^N|(%clBiZ`>J%TnRDAL;CT(BmN-5P5uzVQGWx% z&HgaLG3XgbyuA?nW_*m0ztP|5y}{XX6RH}tbL*yvl(w&NkF%{Yp;J~M5dld%==8zv z!gPPnN2q=;90yx!+b+Ee!@cJID;kWY-I@+QHtAt&3*kxI?+v4&U~7$haT! zw?V44UEbIprX7^D6w*HG^syd)8(JBbHfM(U^)uw{eD-qm(ns-*^>R#|#qZ(zUH)C+ zySte#pF?3$#dwsXRyK!PAg7D-DsHFPc^*#O` zXV+o`t+yDPH~mreMrG8$8|l`|milFu?BBgP(tP%^b?KKeMyF()Ff)8}DB`C#LsqzP z)o9ax2<@8`Y3@depLWLa6-Zs!9DeWPe)8qGI`8z=$D8?@zb4;4gX;;%`SxNx#?+7C z%{NsDchA=MdQW;&*Nw(g{$4$MFi+m#?e&iNZ-62dYX&P0>%n=$O{>1o--j9FamLi| zX_fZ({y0i{oa5Ozw-Q-+Ga%o1WuJe~b5W$9W-s9PO!)hR%n`nUClJA$XKsOF`}}d| zp3MO5ERoD;g|4Ggs^04V6C

VdsFLe8%T*+RBc4--5{^wE+*Mg3z=WFtd(xHSo%VtU~5t@J~7J;2DYW z+){CN5mroza3@n>aTlFfLSX}@`v_@Gc~dlXuca@{<}2I-VBcNacz4rGixN>yr&3z` z8U&8Cf!b%kmbS}9?1in&Bc8MiBnH&G(P^c7JMo^)O${@F-B1zlD)55RD)W1ptDm`gI`@^Uf5#_Z zVjv#}yF_fWR_a%nLfb|xD_16vh}qi0Y}Lb$cq0SCL?uA$KQi`944&kJ-<24GAo=*I zv*&V8pFb-sn0ks&&oe0TsYTc$R@`OmJtu_8g#bmi$u_q=>E)~5#P`VKD6&F9`my8Z zLxa(_P+2ph(OMDv<6tzh44cVha|60enIIs#9nftRqX<7dk$n1vWA*zRcI_4BE3w#8Wc0migc^gDVWKGN>_- zDgOXsf#C&40lUFu;KUH(Ff;rHpP7|1t^%%E>9xRc^pc%pYs?wik~Jd^ zcD-Z=J8p9_QUbSN<4l1DzD+w52#keL;Gb977@QMei*hutiU9jS2Gl-WMFsOS+34JC zMLo(YttDUw(Phbc#cKRMHdZF`#~2eS1x+3PIkLOIh)ATHR;v8(fgD)dtoE5EB*7yP zVj^Lk$BsAq4GWVj!0pz!!dl*KZ5j=&Q z+N6eJhfy1QJvyP*GqGQ(Ld{PeFKq zR50`z#{#JfLjsVxSD#DOGxhFzPrbL^SMRS6kR652qYp$YG$MXKezAGMAHZ)Ms{BFx zCa}%((>_EFe(yZ?d;1x%JwxhCFmdSihlIg|S%M!!`(nO@FfIlZ2|s~&@|Bd|mFu~* z)D|4TaG;ljF9>`?4S29>a#9VLScJ?fT;+&~#Qk_O=(7*BX>6AVhWHiPSc*-f_*Q-$ z8FD}ButlgF?M^hmdJd+rPB+q%`0c@OWMLSw-lLJ)VQ*t&1eSE@CA7k5Y;yX*QjU`F z0Lx1&E7l4F5sC(x21frF82!y6s`xrY6&oO`uv_gL;27bJK{D&Qh5T9wDmJ`xLsL-U zlXe{nH9yBN^tIvj$u=w_f|oXCQ^$Gx7=sfGPBM4`L2az1J8Dy8Lh%V*7KeR4$xR`J zxL`FYM53sc){~RCxHdRdxB_JoP=&2GLOQVA-_$DlgggNv14O`9Rl7FG9l50Tt|+KZ zh;m`xKPJnb@Ul==obm!i1+Azns4*A-8lo>20lqm|A!>$=67(im3H{n*5fXoaS(?$xR0qjfs{)!3;hconzl->6ZVvls-FZbh z{aRAy9Z1_nQf3mBMoa=eNbLc(On@ZwAL16(o%)})#@u>30d-j%peGG*L|~Le%mhdR zCHz4T?HwP&5GiP6&i3O232jUk0X4-L~oNkK>&;@e^1Ir2m`Ot3EW{ z?J#^MkwG=BfWNQeZ7fPGKu8~`HidEvp;_Gt#aR@R2$Kd4ivW;*R9(iNA}C&03~=fI zkXTyrRxU^{pvd*G=L!I94rMg3vz*FVl$LO;5s|?EhzS{%yj%H4|MrAgu=_dfp(v9Q_5;QG_W8(-LA27E-Wx0Cs5!{r%pM4{0A|$9VU10Hy76 zd&1UO3&YY1{-Rw@FGcVjEs&gH3rzKH^yo&mVa4pM1AD@tSa$}b6_BnX4tXl&zchPLyMoP#=0OZ3yz&&irn)75NEaBZKVe_gI-o2^> zYfTBbM+sw%Es&XSnxks9S8DwP(zepf7CsI-MhRbQHMTjBYBAl}EJnE&YP5BY@*Z06 z_hQ_)bKL#-9QK!uk4>p(2?#b~=4%d~jOZkIAevib6D}O-6qdo^(qhWFA|V)mr!cPy zpoX;{4mt;bN}gG1xl_nvcN!?^mNTlDq9#cF5Q86P@F4~tX7Co&GClwIum0t4JUMgj zQOHkrF_qv4G&Ib0gJ@yN&Bov|D|b=|Nw)ufy>Q7v5R zem*$GwA~Cw7~EiRHv_SCc`LIF@Rn#n{RnSKhiYF4F)?}w(v;5RU=jqCtix=gh$Z9; zEo8ttRGh-qRp0SuW|Kx}y8GgRz`Sxmo5e`avUcvVhDnCd_#n$m*|Y}4ImkuUUtM^Rl9S#$IbAItF+@hWn)P1TcaH9MB}D6`%u4+E^D)5d)UGyUV`XsiQA&y?!g739|hec+g5#g7XsD$ z0+u^K3GHJq1zpiZuJCG49_z_;<%=KJ4ZAPl`sRGHr&U>cdmAaiDF`qcT{ z*)vn8j-L{fPs1Ljw- zD5u*9vtg}7JQz~pIrN}3pCU<4iMTdRNvbuS_!2)RnRjz=$pkknHpB0PD5ix+OyMD5mGH$8r;MVL(npCCa3pt!#*DSpkbG8zO8~c z5~PfDKxputsXe*M}Ozg`W{7_$YfcHgNEFr0nX`$O{vi_rgt7Vt%&cK}ZGwHljVS2U4Nivi+{@;ruX`Io1Do8swlLy!gVO5}N;%DS z0c(Nwvt9wMBh=1I-v02Hlp^0Ht%!Bw4BfMm0GN+H4+;#lzo-}3YW$ww+Q=zPhyTpNQqrKm^RcJX_*>v>yrrC zheE?SY_rglV3ooGRqRAQ`4kX+lZ^i$>-+%*LaDS_x-a7Ce`$W1X#4K{&cCc>T7>pT zN#JZiY2$|McfTW9`fB`+kAk?Cw~vo%+V3rFupXb;i&ct z39cR(5#E!q-YE(-1tVHmz7~l_Pyw227tw!^?q15R^G00o%(foY5=2z zdBW$9Xl$kaJMaQ_kMUjRk+_r<<{L&nU~FKut$3P|ocgz;#>$@|Ut01_HS_tBL*CIb zhG7dUpGE#I$^XtUe@yC!y+!MN#+&&+i2NDJ|0(_4k0bw@cO(tcj#ckHgZH}Sz29dK zwCd13B6<0xl?mh%3n|4OZRPVBTa}mh%nvIih>JC`|9#>^0D zOd})M5w>A8j20^u2(bPB1xgDV@S4zn8+MFggv}dKV8=DjfE|e&qc6sQ9cf?)?}UOK z2Y?;>eAtQ!cI@{DB$o7hZbU&f0qgp(E7LKt;Yowi=wqtit8sfcr@*>gItJWpz-+Ns zs6Z(nGnW`W?E?b?e_HgUgSUZ&+jlG%R9k}LZwJ1ANHw>7ifv* zv$&IPJsZ@|PzFl&2j?N*ct7X?v}RDgH7ef<3cOWrNKbU&`NRUfGg`W3hz$vzk6!v0 z-f#<-Z*(>YuHNWCj)Rff=xhvsA(fH)P4YY7jNXRJH&1DYn4bdJ0zMWcy#%s12Hc4$(jbI3NhyGyqi2tO;JuHJ121 zZr1V)#QFUo@Y^{5+hN9s)Lg~1sr6vleQ#Y1i4;vs2B zU|w8;z-dVvpL7xz53S<(&8r7NjNrO@2)~C`krQ8J&A-T?O<2U$FEK%=i!U+u?-_Jb z7eQtfj(_OSNoJ5})no>VjAV<%4H*cE3Vz)H4)zLgq&)470Ho~?gQB|V(bLz zF@}G-HX;c^Dc5ZT$h!KD)s5?zNMsLakZ}T+JB0v%fEKM59X3#41;2^fa&$yf8WatM za1r|fjz%txh_nLy{ECSf1zd=M%K>KRI2U*_8iAy*UL0|Pj)3d$|rtM+10Od8zDvnD zJFzf-21;w_AHYfe1+*vKj`0xf++&RxEwQ1;`6|SBkazgt9vew+u%ll9`m%H2%n!SP zppSr~as50kuK2kf3c2gOl^dv&^@3Vp3PT6-2zqfR9&J8x;L|RA3L2;R6nWCXwLOwc zN@ThEeer_FJN+i!l4tU^JVOk01l8^t*x14oUjrP!P!0*idKtU)G9(5G`SMZBvu@-9 z%@EErZ1y?|dh^ovpmuolV@cAEgp`@|Q=&=0RxiPQmoToHbtFEV$A!PIJQ_wK$AaWc zky~T)Y7+aBGom%f&E=`Tm2I{`H+Fl?SK#tj!LzWd1)9&G4D4l+X)L02PGf9pE>vK6 zqiCU{c`#!#=xPf+aaabnX=8(F0Rr{Sm}WSegC;&-=A;(BN|ppy10?FQ5CmqeSqMAu zJPYfc!&$x0=5SVy12_!rh*c0G0blPu;5C6%QDmEN#i8I5`3z`_G5kSroG4x!_C{zurnS&8z!eF@wa9J z$@s>#-ac|{u8o(1ZY|EGX!q2GY4ziH055|g@ICSLV`omNpI}N)C;~n{bt3!Jg~QoF z>PX-wmy%|9jHah`SG&?ATvP90@gHaGRmOxFrl?aj82m>Bm;*dRU^ev9{vPIKFRcPQ zZP@by9#DJaQg%%r?ITu1Z*J%e|0eyeNMc}jGL?J+8?oZaBgwOHd>l6o*=mzb%vNWs z2M;~i0hKT+ZOJgPN7HL9%qI-0065(S6Xl=F)P*hIM6!ht4!L6RH$XsQXPy4acvI}QT+ zAW3&wH06S90uNVpIa&6gpDn>6%e7oxzMWGygJ(S8f`&(FNZ{31F(MsZ*@Fx%06zw7 ztUCnfFQ6+oi=#`m1KkE;9Pd}KImy8|(z6&&E&)Rzoh|NI=oB8n5}kMxB0ji8`Bf_2 z7wAL-YymdFq~I{~aqd%vNEWt7P=Sgf7Ob1>QiaaPTIdESOSJVi7}y@2dmc4WHI4cb zXd^%%#hfTloRGe>4;q_(IJpzQG_WZeY;Q^;lY(c%aDWE|ecOTvV#bt+0*trW0HtZD z=SQHN*L7s@y%{LuvDFA+WMLB~NH@Ml|I&S5&nt0glHiPGVU)@UZcSo*VKWuoH!;Of zR`})6A%Ph1WAk9t1YN~}Y|08tH6&e+(Buh@pRj{rq> zd;JX?IpKnJp6|l_>q@kFk2m#GxLFCG#^kBo<7v9F9sLM9E{MRz7yu!}q+~fo;g1&Z zj!ff<>Hd|s!x<6>F=z$c!2Z>x$7RxQmg;N}#KfZ3Yk$PGVvSU!`}tYGi=4 z&e?|f1&QZ6UPfR)73uVVVrX;x&m;Gm$u15@uX1{`C}B;u)BQ9s6HIJzl+h~Asl|r~ z;|G&Snll#XiZH{aFb!KJ;nZpx=JN@?b-@ky_WkvvnsFqeto5Q)dk&sE;2V!NQR8>% zC7$Is|5paT&)`eQ3yrP#7!}k<;t6YB?2^^za3j!LYnlW+2+@B(-z9j^_PmroYO?<8 zj79mBmc2nj95~#2NXshU$A>rh@Qr*JD=*SWco$R4lAvI}R8kAfLH>Z2M`CFuD#P&} z4VC^A^Al^T?`0skP^kA^@=9q&8yautL&1?RGA0B+DgD3*H% z&9tBdaSxJw4BL!{1t=X5;Is$v9&-fgV<6YJ8tN}mc&#S{{rj~BZ9F>vYro3BZ#+8x zp&#bo&ps;qEjw%@3Cnm9MY!KY;8A@8v4y7pAm5>126;!+ zfE}h-xll~NP7&*c{?uSB1i}%bk!T%`^C1AC-z%&@mV!4R*17@PJs=Q}krc*ADo_m8 zeUvIJr9HOow=RM>AP;ew7Z`2 zQ;l9&6DAvddb zw%Ge|CW~iF9(dlx-Vd6-*~B&b>AzMU=dG_8n+N~0B@(tgE$8$Mq&6PW*u%aW3H2=;afgW=7u)`@#XfxP)BRoc<{n9iy zjO@cX1f_kpd`y_b-oy2eG&%^@^|KOedEsG5IBlg`DP2P`PvOVLSqIH3OUHDl-V?i5|pTG-!#o*a$L>CyKAaMp~UAI zlO@$!SBg@O@8fEUBNFa38f$g#Du~xo5PgQFpx{v4MG+W`gW$hU&L9!RQEjkQLQXc_ zaFA>L?L_pbBn`Ba*uPA>trJM~4B~Pr*b%)FL_3(PV4yZr35U+O+1yNK66B`C%|yBC zBKktW7~IT&<5*Pe0U#ApKDr}BFY!g({xJKX1^2hK+O?_&S}?yhdb~M*CMZ+Ts#*Yna#030>o?zAwVfX7@ovsY(P6D(aZUp^aG3ESev;!)A zP|TQ3Fb_@19sD7+Mrpg>tKS$wOPw8XvzV+5VRYgsdy8{dyN4vzTAK>&4pSAB3tmPA8r4Zd>PW`Rl4Xh{5ojR@JSk2>Rw#!|fk<�@PY)- zPbqrWYN?iWer@{^cz}{koHpOX(LXqkQvn_}=>ew$CNz=W1ynCCR9D*(yq-tH*C*;f zP3S}fN1{F%sqMbq+J8Mbe_PW~j8@x>A_9Ee+C>OsL8}7;KaLDk+z1%|JZ{#*yFbfj z5$A}p#;(_GR1^K))+W9$-8F=WbQhc}QkAr+>Bq&;*jjM@dQJHvUi?h6DWZm;GV8XA z*nd*dzGg+@7>Rb4n5jaLz;O+n$iQ)hh+$Vutp$PfSqKuXBNGjp5Dk_#eJ!9e_1_qf z!4MR^wzJs6pX+F$l=2oN*G9FO17?;ST5I2j30GP!SKNJet-l$!9UNvk3(BlETq(>8 zO4R#*N!wx1pk3Qn!3m5NE`k!)io+u%xJeiXHqLO9vWt@B@JEn$u04%ql0jpGj6OsW z!Y^9k{vq45$#y^$l8Y@gw;|&w-Rfl?*{26Wnr$DkA>+iEnxGV#D@4|+-kya$E8FA{ zMeXw}uIGjvnkkGH?3!i4wy8Tg7I0B-+vFxLIiOI{BC&lig*Yp0vwpI9aqS>{p*0eB zu@srWT}+!`U^DnAgD3G62BkmBOyA3dKW9+m?YkKJ4aVNh*n1d!KLdw>FoRN}kQ8M9 zf`stTu$=iZ<{$^HDUbEGsC*DvKFY~JkucReV=zOedM63H5%PlEEhiQt*g3}yD%_Sb ziu5srG(+AEfz>q9$Th}s*$^TQ;L;mbEIgsZm)qS3q0Y!LT;e=%wfi6-YLFLj3S8-m`)pMLD5J{Rr+7zj7t z1waT_qCB9p#axMu)a5zHU|qzcULO0Mgrx|HOTyIV877>tI6=>)pG(byNlf}#n2qVG zO;4o|J7@_-n;$xY(`$syI``J;$Df)ycYONshj;8e_w32=eB~YTzA$PBzXMKf&A08j zJ7m8nc%<~*`-GE&pY94ZWDifP<&dG0@(=y*lrJ2nSKS}hlapQCtZV|igzx@N*~KQe zNcK~jN{}EoD8=3*{oa0tZpT3ft>>$FLU0PruLDPDABQv*{td{7Hj`T;j6-F)aZT@$ z6R)wQYtq-RWmfmuWQSmaxIT^R1ZO5y8W5mDC_(?xD+UN^?gWa}?&r2lYPLvJ-!xwu z<2g{;s+R-^P8FurZuICXb|2dc_km!EFQzH8uO~sS^mq_pfQ(8$Z-!e4lkZG6$q)!> z6Odto#S+>S@+ z`2+u4g-{$BwUIUJu;m-&HbhT` zo3PF$?5CIuJ`ymOt{Y3Ea!36#MO>I;u#|_^Rg0)7(1rLOPsntscZfKPaIxu1lKJ0( z{Bh=Y5}}Xq<{PH_ZakASXq=Rkh?#~13=GZ-K*_GDT70i8#M%hKAUm^`{3#fw)kNI1+(RKUdPs0`}JNX zxECS>G})KX8`}GW2poQww;XY-g&|Hik>Sdh5As$R{xsya;ivr-dx2V>NEBjM%rb6t zw{b$`oqMCh@{%)*op=J5`#b_DUody{TysaD%f~%8w&0{lrbn?a<1iRPh)ywktZ??g zZ$C6>P!nMxAVYK4(z#GX}tsJgg29~01DzHx0Ket0a`*Aw<0z1d|!)5 zVzSnEihu@wZn-o`qt*KM>B-iD&`RVW*iwQ6xCAPbSVcpf?;_NZF-h`iIuw7?izg*K+NXvn3g~$Rvc8WYn-=cK z=*hsf=WPa={-HHu*ri3XiI8HeQ5(K(#N@MYy&I+}cSbb>Xazudh-wzN`zcy`4x1ja zoY4I+L!n754|`fj$lfS!5e4tB-h!Q1lwc#>hXE{fXpBRP#kg#Bq`nTPwTn3i00<>0 zKt>H9qb}Lw4$`|02DJ8W_zu9(6PR#M|0@6(KtA0tQP3^07HGhb47GkDA4i+Ojd!%? z8<4@k!690aY_%tB*_?@IeK72x8BTYjA6pzC(}&?N&{l{58a4p;V&Ll z-$EDNqN-snf0iE58*Y(W!0}eki zzSiq}fYT#>QKYXy>`KwW4EhAKypLIg{wJxf{s+@+#=gNo2=Jd^+MhAt6l`Ls?MTQb z;86pps0{z!IjD#=)&7&rN}8O0Idi(sEd37YYCGo~4yz(k#HS?yfkk>tYX_b_JOHYD z2T+vujWHmU`0n*kR9HaAEaZA1(L(^@9zkPB(!3YW44o)6Az!FRZveDIJ_ucm?DX3Q z13(&ZgSfer@#Luw;sgjxbTi^cQqUAOH*rqF|0jv~n<$MW6eM!cwlvZ!xZ|$_PY^to z+)05{L0&HfLJC`$AS)CTon}tlTdQ(+eMBFcoFPJlsf`6qjI0dS;&gw6?F!rI^Kgq1 z-1j;?SXFVa&!NdqKlL(B9|0d``2fst`XQ4Xg4TMlF(hxpNAqfZjZcH_!(JbyReVl) zB8+6^ecqGx=-jS?&g3-JY(8PT?pj~Oj>sPE%+3)Yv2X#`D*yj)2WbGEy?}FQv``3S zk|w|N2bvNAb6q6uvYZhFce04ZGNR%Xf)(gP$(F0n<94mIfGUkGoN`3`B3hYT3*oSv zG`HZwCwU7$=cK&$ef!kRiK9)~3rGVlDgawC8Wl}te5neVyt}pyUtHaWI1pX!=>JI@ zbn)jfK5HrhsVWWM%e2`5rVL;L3{J6g zak1V>{01p-&e~0pkd+v9CTG)7CDAnzj@wYgW-!@{jf%CR};&N9uojG4F{b+74>{p=BfM{K-?Y9;+G?2zwi z3IsH7l+^-ZeIr)4H9B7O+zBp+TH-?fvQRrwc<9rX+Z5Ho8vr-xI`F|5r0D1>M<-e^ z2o}ENIpk1%Y|aaeaZt6P0qqpDW$z9&TO*{SOxw%M6HJ?7@FasCrU{bz56nWYE3nuo zuus?kkF942U7W7>F+L_<6K8}oaB`)EzyFNsUqF!6UnEvdntdW-!mvu0$?HPQCYw-T z6Z?&MU`=2p!7kB(3H&vgegBw@!Se1OElU`xf3Pgvearf0Pljii4uR+$fdpxI9n94% zx?^?_R&sWnz~wF>a13CqX>l(S>W&0sjb5_kcyvHe!BZfB0r_D@MymoI-wbkCZ%Q^7 zk}>E^$yxQDUsOS#|5udAy&NV?o1{-N0mHlL`Ph_K5F5Ki3kWK2q)B-RQ|`r7&S1 zFO`~VN^0umG#OZ(wZ~Y7R|XXw7P`>6!m-=Rv6E4hftFDu!Ga|&-Thf+m+_HRCi1;C z#6r)Ks`4Nj?SaQV${`uRYc%j6(LjoIC-_<%@4j{IQty0q3L~9B<&#(>BW#2@ISC^B z6KumL8T=%Jf6aj0v-&-L0f}fuB3XaSA;#`y&~B#01p37B;J2~VUG@cB9i-9&plZ^| zbb26tm*Ue|Jm~mihw;xPQ0R~yCWKF>SNBvFWQY@oTrl<`@K!dC0ZZaIme90F+eMw! zl8)MMieTuiPn~`0@_lw^7CX!{Az5xWj8N682T@d*OSloqY2y#*pyE=rj0{lJ%wD{V zW07&XQ7au{br>`__RKZN)aUZxWLxiwzd`EznYyx`cf~YBwYgyoYC5@Qo4pGzT79OfDV4Z*-9| zB@lPqdxsb}MfNgfQY1={0(BeuIzhU^V(&}`QJnUwrZF=*7BEIu7O)UJZSbx)?|_jJ zoG1o2A|rwN+H85#j|y^8Z$_Ck&G-_E()9d!&cDy|wmsKcNbap%2b?@&j&`VXpqt~2 zLqaM=9OJqk^#Y>)6_zbiohqWBPsX)9POc()5Bk;&A6&c4ik|`WMAPM;I3!Gn!VfaD zaB<(qWujQe_cHiC2LF;ld%>{s3;dD{B0Lp>z?0l(ZR_iQQHR6;>}VT&aygUzB#6Xb z3E3$DFGiRj>IV_xfD?M-fuoOO!jgi+x9H_4e8Vx|Df$TRVrmKOXdH3^$4kKPl0}(8 z^9_>INKK1-kXR+%OxN|`4vs%?dLX^2n>Qi#f=W4F>4QW8$7O;EMV(+D;4#6687k%% zC?T<8H}oOqH)s?A(oxc!1pe`vz>t$*?p3+eJmob2XgJSqm}k|aj-^ENCd+~ z?}VeGDS>Sr^)2AyYTNHLaKL=pSXO;9U&f&ER%M3oqU;hymxM-m`fI?;?egI?l*P*&Q;2Rz05{cQi@xCgjpm(T5qEVCFGSRq3ZP)5J1& z9BDYR7<(Q(IN@^@@|? z#qY3oYBk8*L^s+)!7yVR84#!h-TGzn)hb_7oA`hx71}G~7-L%)Y-O;Gfz4n$gB=X+ zVz86JE(W_9+>IdU*ClHdCvm+7p5qXHj@f>ZL7uno;|p=XD;&)zZiWME`uOP!YMu|z zG23Yd(+r9XxCGRP8OVxQVeF?F{569JUlWTwp_9MLTiP3o<(p`fg)|IA%L8SVo9#M7 z$3BzQ61_LeD&+)TwL%0k$}&kFEmxi8Qb8Q8yFZBfEO07o|EcQH+V2d=X4*tE6c4fH z9`mG`iCS1R{O^NqoafgilgV^)G}(p5ulN6#>B9qqJ-s~_diM4_myGwlE5iTNed)d( RLnFzJk=9q;|!{{r+=4}Smv literal 0 HcmV?d00001 diff --git a/src/目标识别代码/utils/__pycache__/dataloaders.cpython-39.pyc b/src/目标识别代码/utils/__pycache__/dataloaders.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..57e635bbc06d97e9e0d5f6f6af359c2958e6379b GIT binary patch literal 40234 zcmb`w3w#{cc^|qnJNv}q4T2yDlEbG6iUc2`WQn3AQY1l950EAy$=XO>FE9&m!F_-; z1Cm(J+KwniRBBpm;^z9LbfMU(Wv6!PG>#oNi7h*En}^#bt#doS+tf|$ICPyRvC~{f z*RlKme`odufUQMrlay3OUL9lo{r0JBAvi*xR5M% zq&qaWX%te$&U9z7E8QjMkwSN|C*33YXrZ^*m+q5%tk7Q^NDoLpURYBcOb<$4FRV=u zN!#nv>*RO1xLyxwq2fq-Lr`O5dZS#~l-`8jM8Pa>PH!%5NpC4`O>dPt$-=hc_Vo7R zj`WV=ZRy*JJJUO*yrVE$98Hf(K2_LNyghw;ad&!maZh?raV$Mn+?(E8+?U>0+@IcG zJdi#h^*Rf86c45k7Vk{oS-dNKSMg2hHx=(r-(9>XeNXXF`cU!S^u5Ky>BGhQ()Shb zPv2jBApJn`Ncu?eX!>aJ!SsXDMpxlj@p$?;^4;kZR!@4|>P?@t`qB^OPUWJthpm2V zU`fBKr60*{o_nq6F=3;GSs{;(Fhp2oJT);4SVb0KSc?zXw@SHtN^lL&&)SdD{ka`zFP%GR z9atZl(X2bHgLvXkG|um|?!x)gIDgankaf4UaVBitV;w@-GuFMXx8^utfGL#l&* zhn06+J3qDH<}5zdy=WCj<9^rC!qh@BS8}s%zFczrj_krLPc!G8Oa9vFLe_EeGmDvQ zd#(P)1=r4&oSCv+bo}IWd44f7pLN`vpD5&9H)l_kulSK-{>s9<--n7;xtN*9V9n>I z-TbB8Xt$p_esuEaxf7G;GRIGy^`noTK6&!QS-)fA@y9Zg51l=6^!PcybNYO4`a&ic zTfa}8+PRAhc{|6h^2t(VFF{qXdq1AaX3WTwl7?3ABC?m~8Uwvh9? zoSeL+a{xWeIezrYV(Gw@ML%(6@$z|OkWDhnLY2R=h%5>)bR%xro<5&x8P7~5XO}ZG zdE0R-htIq2ymM&p-r2l+eqm~Cx?J45P{1%17Tx@`vv;vvC||mB@8$f3{N71Bn=kD- zw&1ws;vP=gXd)e+D$WD=m1ccoZaz2bhv(*J)8XQL*$>Z`kaP1hX#)w;%ekrf(T;RD z>&zgzluw6eaWPyxcnQe`Cgn>$ZatVbif9Vw^RvG0I_dD`;-!lD_CjVVYggZn?q5O;!Emqo_Fm$p4o^-Lvv`>)0Q+_mpmS@q&)#BeNng2%&Xc2_J&?|d}AKF%#Y@-U>7)kY&PdQ z`AQC3QXkvrYxA#$?7PwG|9TL~i9=8S#czD_>!17dH-7CS-~8x5xw-V&Z+`F-Pk-Zs zzxj=;Fa5m#v&1U6{vXa#$DT ze92Fo&t0+dvpL5FXo&Fb@REEdUpp`FqLO>?Xg^+m^Z}eXadt=V)B3al&3*{^wvpyk zvW86M+mP`>bD{e5EomqR|7=`o2SzmMfxLsDj zZuuxBcyoqBcnFVmNT`JLsFuPo#kDcbejMj*L&XKz7^)IZFf5@Z43?+Wgq&H@WnlCR zhCS!%b733?jtGt@ju?(Oj)WEdP^_kTI(9|a3tPs5X3wFmq!&gB2UEKbX{}?4laaC_ zhw+Pni)Jx|p`?jFK$4O(Uv_e4);x3SQ8P~jVP>bN%eIv-%@!8T{EV4*O{^|pD)Yo! zPK}!_o7r@;vt!cGsnUEQJDoEx0Ze0T&GL-73P77*bkCPdd*<_nLizIEGwNrmP@dXb zB>1=YI`;Hl^kZ)wLt#^xGQ5H>*wLM%NqOKQV7hEkb@-4uc|I5P-7TAqx~gl3?3}w` zmkyanORA14&V!B%*^glOM`Je$CvI{H-{j=nBuuhTF&AU*VI&j2!H$e}3J{R>62`)@ zNwxaj*S_+;knsgn;)`pL7WCy!2^$n3v+UvP2q^qI_qrza;*KjtSU0UFPp zIeqR#quzlx?E`ve@e?P`qC64Yf6qQY(Rdv{=9Z@xUEm!fUz#ZcLPcc0?4MvK-^`9g zi#d$2wS0+owU6O(RCTr?3B}`Br*YlTTK@k&8q@#Yz?GyH)w-~xP_J$MaZm?Z;BEXfLZcBsA_&n>HYZ+Jd zVW2q606=)3r%(u{>$G2Y!*l&51L=T^`c~q4!t0qK z<(izROgwq??8K>wlZVVbdu$M6X5KOLz%oVPk^EF4XI=*JVv-_JB&F$`UznLG!@wJ`BLLMEohuL22a@H=P z!}d;|^>Z8qm^nvF)}z^}99|-vFS)?eK*r%hu4G?eMPquw%K9lQcPT%OS1m7;T$@-I zbm{zpI>#;9D>m1s-*>!8l%G>wwuyKAzEhr80j5y7jtn3* zNFmooVOQp?Fsb!b_%b>n_W+yYavV!hZI3Xad11ykvNkx;=ml((SqGalk-^3Wx0uN^ zXWjniXoZJ@;6ia$)~{UJWA(nZ4GL*Mq9_H$k#R}>XmgNHoD13eQ7{^|U&N2i&dL&e zRkJ_B{IbOt%Ve-YGno(Ln!_eTT4i|EZb-GZUeu8!mzA>xi5qe?Pna6WMtX=S(JR1k z;W|>B2GuuUb?l_soOIDQzz(=JJ20wCAAKztDCsNri2Vyl{sj)lKoZh=v|Ejp%n7Cz zb6Ub;!~7&OnPS;m0CdgxsZ0jjwGdpvyIbXH%nIkh{x}mp$L|d0YdTjb)aOjP@=;ci z{&3;HABS)Zam|RNBJr?n2M$KtA4lm34#z~&y2oUHgFZHbAEVp>_ozPJ5_ zadrfUz@)~sb_=is01bepVa2V40MDe=LEventWNyKtS(G;59A09_ zabExHK!%fb42NT3=g)?!5sVv!L#*RwYZd~P}f)j3)WV*Q|f*I zb-S38HoCp&2SZlmWwiRNcF|OW;6*6tsKzi9F^E0XS={aMV&IT_y!cg34^1rEg2bWix& zu1NxKkZTKI_%7vM5APb=^?J8ym4V4hWtUu=|{-I!--yGBbYq>|;kK&%LVKccD#R zyVA0Bg@i~RF0d;gQt$s=)UfYGQn{_Uwi^4gMZnUy}6x%7%%uVqzV$Y@3%r3zrG`79d5=m(0rMG4mWG z9c3HRS^~r(0b|zA9ZD)<`jA;kB-LGq%+U>gG7sX~akHgq$cOmSV9FK%DnV#V8}<=C znX~4{AU-J0Bh}1g8=vaB%=2rBhd1nUFXFr;qpJ&}(%AzOA?l^mT z!nVt{LT_v*^Jb zV9i_zGCJl-JR)y@Mfw)OS(F0Tr@^yA^rSoQ@j&;^0s6cNNR)lY_4pDcb1#uPMeK=2IEvq7aNdF6sK?US%Nzb>ry4(Q9>FH>?y!B?8#qXLSN<0d`qk z&qHnr%+p^R@W5Z=ss@Pw?~HoJOhm!Vh%7hZPb+HanXhYmE4sO}qK}%d>xazOdxTds zu~SUhDrQBkRnyl@K)B{&=AwBuY(Ie?#p^!8++HN3YwZ8V{2w9lJI}f0`BRvGQPZd` z@Sk|~KQs9+tOAxByfk(<^dC;n{=ZoknSpE#yH|c3+hiF!!kYFVPL^-29LtKV)thxF zcFoT&loy<=ZD$ujX?%p2xh44+H*+al03B1D-DjWUg|ke!#Z)3Nn-nU*OeXU+lnVnE z!j3br=Tg{PUC5EIlM>l)UD^SIC4<^XcsM+uCG}-{K(#gvDx&enJa>%jfnCU`eMf}@ zR}1!FSeT}8E#hi(z~YeW1Pqh?Ur`+_-na0hMkn`_sPK4qL3B^SUMPCZpBLOXm^?{WDDw0a5IYZV;47nI+ z!U)9j+L8`UIVmN;?~t4LycQvD!L_LT#sps( z#5ll6P!Zxvf|?~!q-c%>cu!aid=Wr=qZ$?rLZ}~oISe@8!TFv`;pl|?G;GBK7~f?j z0vMkRV0?G2CxG!`z=2^0esq7Qz#v?lDsU7ZwxBgN=)TV`%m$% zqnZNg+_3haGZ6^ht01_E0?HHf?jyNHKMa+O0xUET@Ov$&@a*#9(L&+LJT&B&9h>59 z`!9IWm?`AvZCN49z;y#s+kc6Qb=dqrdFNjtNee+nt>7(Tv(3%Do|8O+Oo#*!057<~ zaF><>=mlsF54M2d#$pRVFyFQQ$2ING`19t*-j zn16&VFnQo^4J%|5N@MTlVVV0_S&`IE3!9RlMjDq(xmi-3hGse>fdeMO+g`*Qk7X~E^eplo3 zaNg~)MvvRe7LUV9=$VgUC&#LBFJ?uB?u!XO8u#MZv5A+X)r8wees-?kN?t;J@cH0g zuY#WiQV12+fMZV92EF)_Hn)~yh3nef5L%1Z)-AF2Zg-e>mvk@AQfO9apOkjjU**+@ z0WZ49?Ybj!p2m5%oNuUYWSi9vucNk!EjjOc7+NCq3A{R>3*>pY%f!{q?uNN7q#3HI z#`#t`?+mVPLpxpW_Qus6yh>fq4Q>8+-P^2Qj?7%A7dsq6t(~<|uWO0?``5iLXn+i> zZxHV)wg1-YN6i##qHRT&ccY|3?%jpl?T{ICc=6e=HDIl|jx_;I5!>Be+atXnL zj_`1;mqI$^bs}AdaG_+IU@`J75`T5qc}k`5mZr#|_>2JX$$uZDK2eIn4=C_F(b1kvCZ0 zVli@#i`_&bLWChKMl2?3A+Kr_9WbZF2ePR_W>Me^@d4CF^S9)#>ULiaHae5 z<;!D>mYK!Mn&5KAD%^M9fqe(|jqN`$cEH|^Hb=E| zocc3&aXwdhY|kF2ykJk~%zb8nt` zcK#P}?nkXGGmz6RaS&e_fx;&o+3HTbT zHF2I0a$iCj*O#ozQ5fK5Ay}(C*-*qPOaZ+r8gNVt23|RP49ugAv2h{iD41wiBtekz zn4ClybvXHAzL2#Gi+kig%dDfB(K{7q*rY2Duupd|`3e%>;D`&6VTaKxC}$~hV6z}bi`1>kl5#{C<0o=L14FfE$JlQCeA#iSdayU}Sq9q9B8iU~}O2=xGy|N!cbbKF6o8$deHloe=TFKY)LWKD`W`_f!7z6FHeUfdEy0<^U zoDeIrY#OBhAzV~SQX&1LFoH;GhxDi(*A7C66@}1hScF+|SSbyXg~oqU8%gv7_)I!Y(s2l08<#uZW5WMoYH4I$x-$R{~wq7(n|6X>KIE zVgz&%SI-fYMC%+PYAV|rO(+x+<&Xgiy{mJ5fRl>_hIet1=_Q^I&%h?Jma;(gfTv7C zibzvC0{Jd*if$f^}RZ>U`aSq54kLrz4Fq6ncU_^`uBM!SVkhEAYRuzUFwlb62F zOsDA@$8WPGcr=#Z)-r$O^x}gfDk%X&t%ryZKb^dr0fX;=SO5`)!uDtuUMm3XrAGWlQsFFcM;N zO$I4A7qe@g>!BAzFJeo+pv^$#Y8lv+!i0hs29WmA0_=mtZmR7$eEY^qrc_W?@{X!@ zO{)nfa1Cq()v{tyiyY01t?=4_D}WH~5!__|CKL7%7+_G{vKJdG8cmawf@wA&x|U8*@gL=HJ)E`lS9BZec6BQYCEC#{VlgwWC* zxfHo(D~`dZ@?A?q7-4lo9MNU>q6pRNvPiq(sdwae?Ob%B8SwAbl&&viph4^z+e zV8+)25mDN6o#agC#b;3uW|v^uw?J5dJ6{^Fc6;5meu!EIaIAqzrrbR@2xb4;xuLb# z30^05f+n(}xYtee#J1XuNc;9v@G#galDYV-yN+zT*M+^7P+|y_4O?Pa;dLq$i8ox2 z(OIul{KDp^ltA>!Q0cf4az}1xi@&Ias-P_^yWA0}0o57WgkdStZg&IrEKC!gg^1`W z?67LjJ9$FXV1y`w(2{nN5wie+>c)j2*6QvU=9xjphbxrqv5n+FO}$H_vT> ziEG5_yRO%^EPN@2rBg24zJfsVfVv~f~`J_i1-DPdK4uuVR zxpAGe;oe@`ZEdRUQ6)+W0#e?Mxwr?jvff*7Z3DF2zBW`H@vvethrG2&?{yD* zYo*QmuuC?`6F1<#`>pNPHjqvm-3QP|eQrOFeODo_hq=wWA;#N;!>n%hHrz1mq-T=2 zdR_;3+5S9Cbl~){@q%7EVr|Ehw|J(vMP_}!w+X_m&2q=)plqL%C2oZ6&saTtj<*TD z3|gDfrGEVy>b76I5x)2t++*B`*i-nuqq^1G8r-{GdQm@vvGcmn`cQS7x2<**J9PUC zI`-oZ&V;v}Gr=*HvfJ9qHc8pewzB=+R&P7sCv%6ljkzjZ8LjQJG}0Lv2vvtMq9Za+ zPgFOd#(Hnl1w6gF!`o4Na4CfJ*b>>M+p0Ugo!0gXA@qKjxmn#C=4h0Lz1vW3V4S|j zmc857hw9gE7#F{WIXW)$gq7jx10gTD9wNfEEwfEyA@uKr2yxd+e%uOOW=t4(;j)at=H zx!oOgAMkF6Iu&~cI}ZE7y8XIQ-RXoz!0m>jo~W_1|2q=e6#f=ZKF{O}Oels> z7D9J2_ZrKV@us8^e}@-^wb}b)zFM8dsdP8(p~MQRGNg2Fkk`MIwO!mScX zp3CJ-Me;?*rs-#Z5l?}EC|}OZ=cg~gXekyrG5H3a>csE~o|w-4q&ekI(bB!1JU5*! z(bV6(t+MvEx>Xhhp^9FmkoI-R8ENvg+k8D~7V~h5EzKeyH*>{#cTsY1wXzrHU3l{t zQ0Yz6D+Z?Sek|~cp~r%qQKnoFKsgYuX~{|_URymy%)YjbnaALsMY}tA3&9YK>k4w= zdQVKg^osS%1t9Bp5E)s&U{HZ8KtC_F%cDTJB z2uA9D=I>*2iixZd65K2LK#9OAeNaN9RtY{?WICSO$&S0-Vxw>wVeJj zPwPj$A-noEz9cN!3LgL3zl7-7prRNwn5qE(y zsxx;|3>wP)!5^|K zH2nwW{vDIYcwvI~#~(WJ=$XvpXCD==%RbEuXPFdv)gXuwd+Z{7%?U^{0o)Mh*t!#w z+NJg*ypL>+P434}K6vzOU;x^b9V^~5T*<=&4t606u!M})9nNKy_z|(Ff?`ah{l0M_ zw-|`sDWNXI=NC?q@EyRfLrspz!TpH1b|``2FY)PsEpUBB^R(#{%|k!C_s4RLei_j7 z*w@%uiohW@$>j@vDsT`{kA_z)d}*<+)Rv@mSmylYeRLbbuRov$L@kMoTc+3h1wY9l zQ*K>0H@>}Innx{oegPr6P!hOe4(dls;^}CgK*wPFSb*o}=$Mkv$chkw`8(J~b3utD zN>AFE9w5W(~QiNfGAiiQ+|PWQ^5m#Z7z`OXYF(Z!3f}l9M0N# zKzG1=DjBZe0kc!-@XU0{et?fO=79u6QzbAL8&Tk|tl^Ka+ai>pNy9%!b%%R5)Ja=Z z{&zv#t1MG{bR7cUAxN#_nhCM568ql`V~qh(2OJEiv~BQ;8iG<_3+@}jl^&E(JUpQ7 z4Ab+QR(G^|jYB<7}NK9AO~QEfOJhpnE8nrL6{jMGE}|1=n(wcdcf z3s3ArT>~nHLDU(-RfwZ?Dk8RMm610h2P$1-d)4kzczl}Dg8Cm(8YKu6=Qsx7SUv=k zADEc)e>OA|0q-Zu2r)9y!GVFyA=a7G)>8={e%XMD0SqU=wf~!H5{7_Dm;pvznt4&x zkbv<6IGn5DCEZIvUVu{Y@F~jx9Sh?C(6LvZPE=FX&T3b+yV_Ijt@e@EgleM)6e$!R zUN3&(hT!$#HwyK9KYn9y+Wd?MDTCKN3y*Fu1)gTW{vDVtbb15AJ;Kz$i=clI&p;X# zqllOnLq7g;!t2O%U7T-|70`A52d$C<7zlKe(gj7Y**n~xTCbI;!IUE4P7y9gOd%e| zm41)oqKsiXTyVfIi|Z)dLh=55WAfi#*kUiyAG}MfJ`yu9eziJL9>;GNenWGE$aNnM zRSvpqYeO)g!^oi*T5X-x10HgiGzWNG^hcfwLl8;_Sq8rTLGbmES}gtmvWYd2P3&y+ zu8*^ZUiOnWSN#hl6KmeFMkEvXrDVb*$vP0IeU4z4R0db4*|50?begEnKFZSvnH*zs zoXNvTDkBZWQIi}Ka!-h|C~WabYYHvIZLDY^Qbm=dTCzmUmHvs`WoVLsKuqQAXu(Q< zU8Ct^as`+Ta02eAW~HBAwN@4y|8@+K10yDE@@!S0X8AsIn$)|A#bwMghC~ei ziguQQE4jP$1Jr@;4XwfQ5@pcsBBhrT0E_*LNST8Wb{%{UAb^DpQndrZSHtTNtIaNm zSPkAw084ME8317~2GK^+izAYg?sd*Uv;?q9%YC#`>wzE#7W;F(0D4CtjKv#>5DF@; zKnRs$QX!Awq=>m;qp0pXWi&AcT=e+;srN1_W`2*Tr*qTh#lumSV9X0Qz7I za1Q&idJ98A3lKxs*0;3qY)cEqvKDZT7Dj3tAUghDlgq;Y3>0=FC{@H3h0y!>p!YNR z0Fw_gc?wUNoc;c*fAfcrO`UlFf|Knmr4~;oQ8Fjv#ba=Dhj|&Mu9HaXt%< zJ{@t?&n5oAGXu9RSR$rUCk0yqmb6$AZ?Ow}jEl#p*Dvvb9YTXEMoR_+OaNpS2uYwA zgiE|fDCv3TE-(>dn-rBIwuNEc%L{~|_6{aPOlV_g-^N5tU7liaPl(3J=$(#C_Dm) zKZ5hP_AqdQ0W44_vZJpj5iupI-LJ=WSXYEcKrAzUidp}Sk}M8fQ#P#*up5171KI@G zQ5*uaUq=RuTYxrx;h2C$4xr6nF|dI<7B;WWkRJA*V7ma2+xNRP{^hpTOQs9QEr}1r z&`WfNdMk`uL5+cW630{zv4ya?^yjhN0ZM4Dqo`fF%<7PfgW9yh7s8-Q0ivCslQYBq zkQ<##A#6x=t`o+dQ5s0NaCr4%1E3L+i>MqxGmJp5hZ)}pWl@~BJZ}ax8d|NtT~F|K zpal^Wiu4;a!hePlP;O7$!*GlP8wTrb!C)L?e1e7B!BSAx6kh*NiMbc)Qvzw)xk@)&d z$^Hn>evxMy_;S_BWed;dX8UEqw_(XuHmtqmz z32|0TJVcG8?AE|~LWFW0gyrwLfU-~WUAdS;;67neCiWnw#2K?G$yS7uyqHrL9kjh+ zowwtgI&(;8@eeHtT@kT*UJSnwhQSyiPsG9eH}x%dJS4!FC{tDmO23k zc|$90#M2XsL-Qi7?SVYPbJhlZxvICfZbh1{LK7Q2>nco{TH7yu9<9SH8)V5xTl-DI z0(YsmM4!O->8tl0&oQVaXaBF&YX!Y8O$n@kHWykzy#4YXztIz*A7&34{l3*Rz;E_= zea|DXk=GlLQb~qlLP>|$$r2C|9bWfP=tUjnJ@_T*+r{-yz8HCdI?Q85kz?-%(Wu+M z1fmgvl7whX)QQGx7>jHAHSKC-`L)9#TC}{NzkoRw9emsy;5tNmI{4~-^0i(V&Z5HA zUp(W+Yij_HLB2`kw`I?P#w|<&8d8Ok>Kd%AK?G)kox!y*h}1P!ip1Ov{o=RWwIG0X zx~$C&S)CyBx`a$la%aFkpn0uZu*MLz@lv-p_$8Uhdr2r_=U4;RvF59|pZ7`-s5L9& zD-zh;!L7Ix3G_%}60N)_VRnIKfKF;qIRd=|+9M61To)id@oHGv>I!WVgP|-n9m?)T zQ!YqNzUL6y|12PgQWYJzB}`PrO^YI|t}PN9GDzHE9uUxhPoT-0HWDC`h$SLZqLns& z4NnZkarEO5pC=Kv4+KVY@TE{opha1MM{Gl7rOz`IVe)f)=09U1BuewGD6ISA##`O- zFtlA_O$+5T4j3Qw8J>HH`RHbRq*CpD6e~7jCE{fzM zWmy*EI~F-6E7OegBX@q@m;IPL3Krd0<81qqS(3EZ2etMS2f_{#olI=sl_$^qt> zT-gsP>mrjY2beoBrFttJ{UW=-d4kR3N#>hVc#fwsk0N-jpf4C!!yA$ALwyKVAq+LOVPixnOUfha|Hk){$;zr!qYwdV z!QUE3rXXxzh9kku0~aDF681UeqPpNh1KZXhr3gA8tf)V-^#M-&Xagt00Zf>8g%hY8KvyHf#lA7wb?TMxTiF!$? zH&L(WNe#J2<(LLNEd3_xcS!wr1ob2Gd{|aA?q|MU|2?RmlKP)k*S#p}FS|#|AmV7b z?+M)3Dfj&eN1*WxH6l`%Z(15dJ+XwcIiihv9&;^qdCvNUAru9;_+w zD;JLd>uNAt>=yb@+Q-TzE>C*Ey}+9W1LWXt;NRvM+XcNAqukpJB!r_*!d?VCl6M#5b6$wbt6;7s41hUnjqP*6<1}zJ5YQ!2C1-7cjA? z=Ow{8gau598SP)gx&0X?#ELe-n+P+MsDWJa3QP%5qpV5@w?Pds(i!l-1!Ay?{h@f~ zlMv)w;fdF9vYcNaw(kXTudw}(!i)E;g6>!J?TxWGmh9C?3l_J3oiEO=L1IG$AabEA zk}j@>#n(1^d+3KC4`U8t@+ zgMO8zL}5xn$RRcBom}xa&YaLV1IP%C1E)op$$}+m9TjO#Mn&_r>6X?>$n31QZhwxC zzNJL$H`vo5MQ4!2uz!oyMP9WEd0~H^SA}2y0&_o_qOgT{d5x8GOoXQ3iWHh+HFDyf zCOL7Bbi~)s&qJ;>ugpx^$%%VfxO@G|evltHt{lMcfmQ;7PyP~uzrh(rg4Su046RTUepooib}WCUo6aSVqufdm_$LVtrH87P-R07PXK zI;3a|N`L}L2y;q$5W;7INTcz(oad0YHn1)=KEl3wVZjaxe;%3m^Xr z983|zJkK0Ak4+h&n!}%EPL{7ELb^SRD)(a12GPyw2G@5Bc}p`~YTH{=sY0&^dFzki z5v#m4C@4!Ch6;jI81$eJqZ9jjP(P@gaMMvLLa24Fg=Zmwdj%94WOm8m92}sWcLe7~ zaGrv08tMe_hJOuZNT;RULkspuEkavrC}w^i(m2R4Jn)0HxP>q_bn~ZuIb8tY-vfyS zB8IE$Nr|ve=O)yyc9*W9TI=2f~B0E>3X{n8@a zRrgESI-aRJ(WYFJyX6{6Kp{ji`zhGTLXubm7cVqH47qORE?$8YASQ1H_&F>ONe5iYeH}ub|p8MQVn1R19T@jCT-UR6E-f%J-9BV zOumT&e_wmR{*S1%JmGz_QXv#N;>V}*^qkGw<8WC{iE1G;lck#1{?Dl7cfuoVwuI=v zHi8U0EzVEG&~Sc|oghMY?gThJ=Snc$v1#L@7&mR)?`Vog@p%onal;ce2hsa_bOm4Y zppnpjU`K@+lU=n5r;xqNUJ|gkY%guW^)&2%4yKi3&cU>NM&KayC}JCQFs+|2r*#Cl z6|lCIt;?QVZU6lY`uZUE-#%nQfK{+SLCSwd>`ei-sW@s-#QxCGllpP(I3OEhufcj` zb(W~PPY)s-+^0DhZ$cl&C&ux&%+>GNsWQ3IOLwowS$#;fgeLLcb z_`~qripCGcA4MSKsIG~BPC7PSo-Xe{aCaME!>lv~$=o0H2(~b{(5Nf`InoIBZtoOP zG5{9ziuf1@8ARk5BQt?moInKh(Zrql%i`w6*qQ)Y!|wqhQj?H~G z@Y7947z5C)#P;smwHGlal`S?rY-Ko)pj*ulgd*#wbsRsg0f~JP!Bgal`Z%^ac6~g$UulA zV1TBE2NT<+YR}?HRAS?KG4v78m6A}jCjm)cx(m9U-nl*%!3I`I{Y_6MayGa&hyZy| z<*%?o`rB3;B*-ZfB2&> z=eU|gD3}z3%|MDSrUXbKXC?0{<^t#e&&Yi)K~h0aCu=~0$)F$4BH+u#=*5R*Gpw2L zyMhlW#EV4Cz0k0ypJBfl_jh_A33`Xn6JyexHBeeqisLntvTd`LVUrGch z3>A%y#})t>A+Eayvp?Vx`0Z1eBc;GL*zRE}zn~NWczmV;@WRLu6is3)!aNZhQGgt5 zda%tQ&a$sTu|yRRl+|?nvO2HoFYT))yhN=VZjbR=53ho3?p;z(?56t^%@VC11N-SJ z7RtqIFthw#D~P*z&FX=cqc=N=;V_RPa;|P5eFXAdKxH<=!ISlNxJ&CIKJjz{kIA*W7il z*=PYD&*6OtjxOJBbL5OAe9$3g>VIcYzyLdH86d7qV1C%RBlzk@^QK8Up6oX9F1t;6 znV^Qblb`@)j4Q03&xgQuhTl`d2V2Ue!XlbEjUN-AMyNkhJ|zODmPSRh{_@g?HvC$L zYz`OTP9#bVMcYtdI*kyYm_hD4mpnS7LG*JlT)Zb_-BP)XQTEg4@wi_1M$-v^lYw7SrhEazAwUfgYrvRX5aAcl z%N^{01KbD(cz~CW1d@XC=&DnVv$pkDsg!KD&?9{hdEFizc(=ro^lJI3S8yiefe8MIPus_1RaO$NTOb+Z%p$Ww7l=uEF^69R|DwO4_0_h42FYsw$Ohk*EXfw*xc>Nu7 zLOUiJ!}QY_I%fzy;n$axHR%8#bsyrg3sJeIR(%k z_`DG4ECj|R1>z;=B;cltAoVkq}Og%t9}$@Z)W?r1E`-54YlP_%S#) zHP;CqA5=2*5ns1po~qzGF zK<~feb|))Mlf0zBDG=AzPhsobv8iq!7@l;8uLOLA9SUn zgh^dOLkBy1x#tq@*@!!q)mT}=Pfj)4#9N^^n5j*cd7b(}_%(oXN1#HlTMS756#alL z0Vr*(J^K?d%H+S(j9DK(atQUV40jRY-H zh_GS$3tr0-qF!~fzN51%=4(vD*1kLu|TXJJadSpwkb$M{v2 zyt`OQG<*Uon-3xAl=zLBybm80!nZ(ez|K19V70-II(|Hd$FDG9c%z7kZ_EaKrhx=@ ztn@Be4I8ZJ6dF+4qG4o!v>PsrR_KD!hNe%U0`)}#>~DpfKilZg_BZTM9ZRq1&n?&^ z3mZ$ZN9X~jD1h#(b%X5Duh+XFyw8eb zwt2{efE3^B1PxK&53)0rV;KBmHp&MYRZz94S`+d?P8dzg&TLYA^0mPWiudb4ALP4- zHf<;@$6J?kAbNmx;wh7)EJP=nS%meoWYFSm=C|H9o;xw=%by2h4kL;T5a_hiivQ zr4iUTQ>he(eac1=ywXW;qqYz3dh~yx*UK>Y$Pw77(8hTOWL%RdBWD@KVM6ZMheP?p z7`S5d8PjaO^$*Vs9l;@i30uPy0Za2UYFgUJ!K%%QJqHYpuK}sA1I3^_XQX5VC}(>n5;E^_d}0AcwBv2?i`p3C)=TiLWoYv?|wEIE)lCb{FpM>8Hv`H zg*%n783Dyf;93713fC$=!sjr2rsB&aC*v#($Yj}M@KZ=7lmMg2BOSuWZ-n(a^VH-+ zrzg%FoqXuNE!)mKc|7=t^A@>Zn792u2KTn?-FDs2L+8j;R&GQ zV2Ok~LWo`kA506fP~_>4iH}r4nhDL`b<{>(x*>u7#vL6Wf}K|e;h>Sqr{+de2(K)l zli@F-5XzMyAHM@{U)d}td$H>Fa(W=Dz(>)sr}lCMWVpyG#vXj3Tw=2khTp_q_v7-t zC$NwLt^5e3>JBan>`Gxb;={So%t8rsuh_Rvf+w=5zQKl}eb3J1qihNYBSr1;x`fo%1DC=9XAOyL5F&%g4 z3{VbXVD89#@F_NtuBc8S9GFgQxEGPYt&5pUPZ2n1?=1HCC?*z_2} zv*aONYVCgOSFuKRn-nIP`&%oR67vPb%!W6(9iF)68{JIsFZ2j#FiN3S^ofR40vkYL zXo$#HUUz^?P6Pr;hd{0iv+40 zEG4zToFPO?#W@{l@XbkeJ6mU<47F$ zlp6N{B;n4u3RPw$l1vbh~O|w1ovB%F%s>X-|>bMG5_fA4&a|CSw7=hL#f>EI6C%Ew9 zypRk8<%}0f;g1UEA=-pN!l=N9g}_8joXlBsopz$qKQtx_j$kC72pKkNmBAI0C2xH5 zT``bCoT2((8^RetF95m&gfmA6t8nFM_*IIxEnT^2fXlB)&Bet1ie9K-^zFBJ_}&TR z-6;29ZVTlb^H6m$FW!_?^x;$RjFU_9ilP;upI3l>I^aqV`n3vfvvLhV3!qpe=wL8` zmjNDtYA8?vI1*(cO)BDnvQT8{=n)w2w%&XT-=0P=Mf4%w=uObC86DSp@EvIy0Cm=S z;Yb0o5{!(2qym%|eMeMK9)25)cXDD@4cK2lV8az7bn^gCiEbF($B#>JbMP3I4GpFk zmB% z72yL&vfJNdnaSL@nFt~NFIo0iOpc(ej+029r(+1`0pwFe3~_wY>awEVf0508&ml~r6^7>IwQBepfH{PR(A$VR;cl26(qJ4!&c&1~S3Qs;Kv|;4k+7A5 znBYdmRf%|`B<8Q9HIh;g%RxI+OD^GzzZODH@C)Zw0>laGx(N_eaF(*-7j#5i*6THK zZnV_y_>lVka*8Mr20#WhH?lYQ6sz+?>{rl7k6+6e!#VUg*jG`v$D$!oFZDQ951}4b zdLN8~dLgdd2!%Jk-X?b=4z%cZnuMGZJ8o+g+<7!BN0~LKW1F;IX833>tuZo^CI$wqe-&swB(R}{lf6Vh$(_H$?9s*KTI=!A7j#C!WD?5MO zC%uZ#l!_L|4?`{j8!O1>R!j)dbw3UXz9-ZZ!tXIQvdDlaw*?S~)(Tt)VtUeFkrG_k zd<2ppBalmyzQ%kd5IP8s8RF+z`Z3g0uTGLg z6g%7a$~Zz%9UrgU{i9#HIi2YwnQtijz+%6uE2D%Hd<+ggmsSdX0{pa!m?UH+W}L<8 zBs5Npl7#Pi*t^ipwgKZ#Xbc$Ts4`$%Ab^&gpqMp&-y2Sj5SbCeO`!6D-7=FI%C`hmDsuL zB_VpkhzF>w#wm9}@BumnTZr@MCm3%l!&qK0OctKx8Pu?Q*_EGSt|?)nVS}Cx3lsX%cht-#-y8Ng06m-RXS1T8|)NVC-~dL;SH&S@A!R$mx=3hJmvm8sFITN#9p4a#>OCzb}>HA%-^!Ck;Rg1d$< z8uGn$Kuy6|po4(}V2egu1bz=1R52ahLD$a3VIZm_z!5~R#9hxARpa7W+@F+hk3um< z+cz-SaE}4dhD-WZjB_Oxs4eYLo(&%_mvUp~(L$lF&7?kGK3N0PwdNel^va}i%c+<> zmpONvICnCOGSM=NBwet*Ww^yWN9IR%*e7^5$Gf?9{jPbt%&*<3kU!30PDvlC&@6=H z1F6?-;A!!__>IfAdHbysnCUnkK8_8tj-AlQ$6;FeNj~qFnfxmzpJGDJ+5WG50}|9W z31;;!cQAL5Nwb@h6UY<8g|A|pI~)r{R!AiKK;I8vwKTFv-d`AORETL^vv~j8Aim)c=?#Fy-OA6^R?Vs&Ec$ z0zeFba1zN9Jd_X{49o|OX=sZGxe?e9OJ4}hAHpx>3Mc`YN3TERVrrDJCE_GRPy$UG z#yUn?!rUOr zFv%F6-M~w{3j`vP9BviVjM2dt;! z(u`{3D?eh{1i4~O{wsXF=8~6XBkTT7yv4)p1ScHeHQp7sS$6Ylyo1*9lhb4e>wlfd zByk+WA<-nkZU9BT1nm`eBvuK1a$rZa+&Y8N2FF^NBtR6(&0%zR75Rd`BvSW)<=roe>83Hc`=rIY<7Tg$=Asr^M0Fdn?_};>On#EdRVL3Nft$om<~f!sV5+G5F0kyUnfwzblw8>FW^$cLH>~a^PjQO&oPlsy^p!~Gx-3McQIk7T2=rj-w&b+Dxw<} zxC-lKETBC~^K*8AD`N|)h7w7TU!)6mVu>=$9lgRxx&{A#Zy&{*j;82KA!eZCYQ!GK zy(pmokE`?uheK9X%k{}oOOXhVqLz}A-p|}#Rvy6;RO3`)nV9yDq6{BPh9iZGD1eTQ zdZF|{QDnV}vH@K^CJ_RO&m^KOt%=?9h5VEt0z~eTkI$i=e5=h*;Tv+eI0lU!Xk0=- zKL(ki*!pw*LvJC0s02iC7SsI#-p5L%v6+REQW_QzPO5~sRgH7|tL$k<3v#!+&{;ox z=G3D$B`iwH)WHWRbTQVn*0c%4ZEiTbi;1ibQbFl{RVlkL%g-n(j4zRpRZOjZ)+P#4 zu@VNETg!xi#_v=&lfSmJ1$!MY(8fZMm?O+>V6u_PCMG76%}lm1*~(-alkH4)Fu4th z->X_yXixHL4?N8&`~s`}Hj^w*U*HWTtd^4*(#N=@%b4<7e z><=-K9q|ftzslshOhUZvE+#@Ze}lR2F!?@{_b?%;=!?b&iYq7Gaf-o(#>)l9la^h| z1>CYj1X{{Ai61VOt%X8P!n`}&W(GKy<^NRsP&vY53m* z?K!`m7mvr2@!@y}I=|Zg-y{$A^>=l5o$K1w^>jSi^Ue_ePxd5xwhRo#4|cf&TifpY G`~M9jCn$>m literal 0 HcmV?d00001 diff --git a/src/目标识别代码/utils/__pycache__/downloads.cpython-36.pyc b/src/目标识别代码/utils/__pycache__/downloads.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e85b1169b3b341baf0fae597701e138c0c92655f GIT binary patch literal 5012 zcma)A&2JmW72nxi?vg9&!;<_V`6C-Sjm>L1~37G|Z=Yj(ea+_+Fmp z!=Og_K0d~GfEwfb`6S;7Y6m~Sck?}<#<_XLC_EL9dFP@#tuQKczoTlQ^d5p|W3j*z z`(jzGqI65O_ChTTmMSK7;Ya7oBfW_v>&VvMRz}z8v!0lq{m$*o#_ zCW|WjA?q@+fN{CHN8{>4eVnlYZipk>P+8rVUojh=;LM;ux2_o1AxUs-FFyPWc3fI0 z3@4UUB5^t*3~M)&OuH;(kYrI!DXScYi6bkkK{H64R-nSDlGtq=Dj9iQ%0Q@E)S4F} z5{WB5T{X{`{c-(bu2-_`y$pdt*Hc zquVdd9xoofp{0IHH@S`5Van>VJ`*qEW>`JLGkwOLDck{M??AThV(e8V zG34`(zl6Kb$kiYW<4i}b%)YY0d_kKQoWzrzn{5$Q0x7rT(NXP=N*o?&xmh8kXa~2; zAr@(eHB~UhI3_1dYDuNpE@~M`_gsd^Wi1X4piR<&40D=^8I)g9#W4PPrjki1Ba3nx zNuT*Nn9>$uG+8gh3}vRe$f1Y#3~qNVNW$(p@t5n@yP3MhGhK#uNB6q-AqC8_>lx@N zQ!dTLsH}p1HW<&|wHS1pv{WdR`Myyf?ph67cGXDN>9Y^nHKP?mZ%3i8|6oSXWk!4v zBge=h?_zDe=TO(hp82lX{`4EWyL$H>^>IFozAts(h~{iGT=7n`Mb}{=!>t&0mgp3}7MP z{tB$$KNKH6c_UPB0si@ z#UeB+3$>}jW+bVX*Q`m2G0RaScIPi&zHs@3ACG#MgW0lDL9?y&{9Fab;Ln~13(g^b zHbOaua#jRNtoz6O**E>!gn42Q6rwPZ3~P%jR)ba`%At4$t%7VMv1_fBD9NZuR8|vf zwR1C>9GD%M+p>z5>7;^`EXjny?I29tFsfF;MPh$%ap6MZ=yS`&mZ}W2kZPIEFNSGG zZhoy2faRzKOd@b-BxWRmK2#(qbW+?L3aTtGmy+>}+#aLFrcgrbIo z1&>?Iuq>|x%LC@=gp#brF!N@XjWHWYae&Q$$|FB&?n4ROIEfJ(;{Yp&WnI-OaxR6T z4g2=-;*koWI{iE(A4%KRm)mU%mcR?#=9wR{o~=xv3E{U5i^z~4>YL;NpnXoC^zsXg zaO>H6E*;kg^3=UvW^>jMe-23WyUkjLdwi&G_8e$tp67u}Y|4N)(Ux2vhV6aQwdy0V z5z=IBAE_p{M$~B6=sJ9qkENChEZf1y`2=}9zKgsayySG%>$&UWUEp5V?3*{tUbg4L zZo9DCv4#h$`F`qY)?p=tky|5{wnkjs95JKFTKDpOC@1*-yHZ0^-TfcpC8z{*0RJ2%dl7V4aZK}e+5Y{AnW2H z8kRt?Kug~@;X=r{41kr|04Q~{Z}iNr$um=cRRkmk=p^m))V)3AS3|> z#>#foLQq0cM4D;eIGl=SbH`J}CHX!BcJ8rh79V;-UN_hwjgK1+$^OLLhP!zOv3X)H zws034>`d$|M4cA*YUF^ne$D{WKXc;b=b4zL+wor|9u43@a{a&@pVv|sAY;+ zChxZ*qNYEXzkRc9Rss(TPh7Cmo1qX;UynEes=&1AK}P4P{as`a|BGK?w>V4(UpfMn2@Euytl2s#zOOULJcTa=bU+^!KF@j7tbzK>X)JK9Gu#IG)*Qi|fNM>NH+ z@IbkSDN5XHnS2Ny+JRq2bchv(AH{xXg;%D1#LxMN%Jr!~^&evz zuGPU?=u8@oR{Y{z%7;HIDL9>Z;+<#w)Ba$Y^EV%@^F%9DqSi`HG;siZjLZxL#PLYV zlxBqLg(qfbgU#sBKOP-O)7POW$p*R`4|iH9+7K~`3eus6f9#x#T7iEULH&`H*p!9B zNMfrR{H+eOGi_08De)r7F$)|~E0Ji5ycHeUSy`#A2?|3K zFIYpAT2Xkyiyxq&;3j5`r})5>sL&WI>L~FvQA|9J0HalX8(SW{iD9ycj2tKZr(yVc zYZ75`o=uzMF!H=bPwXtSE%X2gvh?~Cn6Z%^-1JrqLOfEKtPPn^HvRnZ(cvCj^SkVv z@&2A3KJs+Uh9k!C@q@uM?J0y+x5lpN=i4RYGCjTUS)-6yOtJ&b6c@3UR!6Y}3W1AF zUn4ZQ!u#rDvz5fAFAt9$Su`-=VW5NG*4n znV~H$$R47gC{P$luXP9rO{E(!0*nd|GDtR9?U%e}RG{SK!&0 zEwj{qv!)jDcB{?K*=88b)lKT!_|X{)cwWXMBNWMN%IF!ZtZyjRG6z=ASYv%t**&wB z!{gv_8`dh56_tON^_Vz|afN|L;|3#ZIOmcXncTc=Tr_M$<*Bz-TrihBvB(Vi|G?nZ zMdJ!23eN4>ul|f(=gyW#Q%fq5I$aTl&1-3{Qxh^s^QdN&RSUz^k@dx(9i&bas4%Xl zb_a(`OD{?p2-S?E86jekx*~X|8%UM9m>qY5D0Lfws>eJ?3$M=2y{Si)t<*wyTEvR= zrTI7tn-LZkf$WCrBSY*0TmO6k#mw=mU;govzkT|*x{{i}RK8M4w@Nu8#g$4)!2mMoz#Oq1nVk?my< zu-#1TK}%}X8TD-ZNJBOrt(9j%D5C|d>lwF<+mJ1{Zkhv=+t3VCR*$VR@f0+~205Nv zW89g5{w^BafqdQB#A`@m%I95w4t9`}i$NGBxvpB6dUl=pf;KKYsVBSFIwGzIQf|tm zt2$kkIy}&_vqDJH4zAZiEYb;^s%(mJOir29(#ne6)Kbd!oQK$DGYK9*o1_C7<~}AS z@&1A;hVhTn^;|~jGLN!|N1yuy7}FkNv{^sL3}t3^$zhYX3~u)+(HUT)xW zugB0nqI-+CAqmXM!}`#!(7MWcxmHn)wn|HkkKVKx?0jIY8A9Q0uNkm!Ysr>9wXNr@ zvUk~KBMM=`W3b%6Go$Y^Bbn_PEr;j0a}#UpJ>8y*Jxe{a^YPbqclGYu2jhGj`aaiv zW16$=8E2P`r{Mv8_ebs(ctm7gF{mZu=mRF;Pn$d$s6YNKJ3H7p-?K>yw43IqK-7kB zjn8V`e#BDe^>eShGIJr>J`558_ZJWg{6op%Lq3lK=|{2hgXN}Fek^>_nctND3BTQp z=C7@&K%Sh#I$paeWix8b$4iMjbK%0d3&;JGd15bgmK2AJV}PI7zKZ?Cs#GelxjYQ3 z0nd`jHt#|(RZ}Wxca$DisKYhJ6;YB!^WIL)b8)E850v3o7uIrL5P$$1se zlOP0HVVVnr>p_^hVcckdxzzsN?Adc-l)4Ht%k=;($5Cp=5^-on;tb_{YU}CR4QwoU zG%ak3EG+~Sj^^94p&71vH65p@A!Wh6J~Jw73&H%5yGO9AB)>7tqM2u7%+~Q@5>$~* zvZHXMB1Ml=7_mN%kOi@M9O2oT z*>_-_MLvS)#3l@2i}vNgD7^2(o;4`(VvnqOR7W^%%~1({X!IOj;@dJ`MMMJ*@^QX{ zP>}B?1mqJ9ukR+~Jw(5rxoTcD`}w{L&+TQN3(xs}hC0a@jag-zBjz?oyty%AQf+UI z1A+JBy@MaPX$^Kl0!3ijLGYZ%s{1zQyq3-BVZY3Kx0dkLU9H_f@csOOng93j$zBdV?C^(m+vL~UN;+mdqI(|IJ&*MY813>yHz_7+O(D*g zGhY{EHmMWV;IZl(K<}XQA|81bMGvZ2vIG(cTKu*NC@}^|SoDSvRTfqT=9S@9xPOLD5#kkiS*MWs6O`1EwG)2uX_3LaOf$L(nmbYeNZ z?tut{nhfN0HIEtdQLP<(EA(kTcVguR4DYRCAE^d6CXAn%@7UNCX${TPE>t#Cv+_jB zmhW{a??i}rhTS`-UZPVI06B(fB+qXIpiO71oIo;B3+1T|mgmV)mAu=0ccs+H`j9^4 zH_{B5TEMx~Zpk<*TUAe=wTY0klDgLeAt`8Jtn9=Q(iF-ovO*)}XeOkM9ZwRUgn|Zq z-edb%a_ByJU1x`M7%rQMLpnGCsur8hlm3Ub=i7Fj6q>0OV7P=O0o!mgVc>#@+x~F=mdylNEr1k&T(&cn94F_5H#?LJcfwjd5Z|RS6as{c0=aSYPB&J8AUHtwr)omU z8}L}+>uw~i8rr5&(S29hcx%Luoin6E8D7DEIhy(5O z8G~=o0<>p#qE~3P1szujKA*hTJ;v@ee=<*b1qI=v=k8^mxto=MiY3aZ%@VV16F$EA zXPWx2ZJBHwqf9(h5{~XR$KYc&Y9`x>zsdIg+i}I)SUY{_$g#QRn+w&}GmEM{x3U)fT~?%c8Vx~tlqO496nEzFW=NyZvuj{;*6R1FHD3IPl#$`ryb-j+fh zuR|^JcVK{h8@W63N)r`(#js>fJKERQ&hQ!JJtxW+5tir^*W<>GePCF z+(Di*;PB6RNflNpUk2)+9Mgf_>F1(xzFqG;uG@Fq&H=%(S`T?M(0a?hz~G$#ioa~- z3*7HorJNIY477WE_mT;G+llr)!*d|#rp%y-{!zYHpN3JsO_@BO$mG&9_ZocPEt~JZ zjXPzwUirXm>eS&)lb?j=)Fcv7q@aEl z8d+)u5uX!}k%ryN%?QI0yk{@Ho)o5eaD6)J;%0O@%Sh{fWjf=+pOTcIPTu#vlm01x zxXkOrFaITJ^Le;5h;k%h#R<#H*tRTGG-lkQwcG$SP{B*#3Wi&}_9ThK*S zJVOO7q2octPyPM|$ zP}5(_M2JsHnYAlZ#*}>+dH*TOMa~#+9q!`;P|sX)1g?Jr2G=wv5O)|Zawe{F=Zy=- zFOhD&Q=*F<)o?AthYx)6Nb^HG73Z<;25zS|eX7vW7ci_Pz!p-QK11?N+^v~Yk4D{@;nM@R4q**GUKg(lkliIQ>v@+Sn!SDwVa_J6_Lv zc4l+$UB@1+dB6xEp{V7RkZO&D1OfpM2!twlM*Io$2t=hX2nivf0D|ASvzs_+k+Q3~ z-yi3mdw#!jW*Xz;o`K(;5B`1a{lkXwC#vlJS*V=HBfpP?8+C&-Zbq!mbZ*v7om+KF z=XTxJd7)m=xl?zLTanu-){Bhh+EJ6z_f!~3?<2@; zEL2%$zur)*$lZF_y%~W?P10ZF6AtjUBdSD!XsC z3wRtnZp+$cvi7R+4c2GkBE}U5B^o#Ic5u!$Gd8(-#aJ|KLyb{yyS!p$=^klSAP==a!j>0)&}v!oK4(-Tn?UdsyIhLAy4L^Us2jVQ_-*<1}eLFUAPijrn# zcX71L6~XmhAXVmKbkYst%xwj#neZU1JTGM+RG7r`LL?$9y}Y#ax-M3&%);=jj1`+} z%SjxCF%}kq>_zIwhS(3W{&613{OPNI`R&jD{K2p8{Q8F<{^&a&?fmA$TR*#c=hpA; zY`^=LpMUGlw|{bVGl`P*FV3E-ow%xXf0YlfLe{#QnG?!o?6ePS$qEv~G+B|A*))5C zO)+s0Et%0|)HD3i{n&W)oqQPuWwc>)edCsK8=B?TO>U_s-~F4K@3C|;eXCy> zSiI0@=y7zvdmFmIEW2HR%`)ZE?0%tLRHe4J#(43j#bD=yu^mIKBQNh5?eV^~X3MX? zYV@6L_6>H$h+|lAW%IAh*m9YXeywk`_YgjBVr9MKSl`8t<-Xbd*#mpKdhdzBUOtY# zKj^-SmTONg*A>D6R{gpgdwG5gn7A^O=shONyJRv@uYJPa4)&ht+oT8DPfN6yO7YA2 z$uMu}p>=T9kDcNFt}{gGsn9{t7t7Kbk&u)aOYc{Wwv6 zun|h-C&H)O_e1HQ@jGF>d~H(&^0~A;njFS0Kec_8_^DN^)nJ`Pm|zPSluOvm>x5Eb z%yP1py7QMWU%GtSPxpA2gV}~sL8q(q{8AI(@@HRz2+mP|HbFjtd{zWXZ2DjDXJ7PZ zGvr^&n+`sss`9ds%1KVR+`^v29P$1L2tw}YnhoyklCih zF{;JP)(dMa+1TntR=TIHtP~J&Eq7!~%Ubsun$f5sW5L6~8<&lhV0k3>XCSKl91_DU zn?*Lk98Ee$SOxTRgq;K$%j^Iv|0mDIS&Y0p3I2m-HdWY^b2+o!wbxJ8jyK6wjDF-e ziOe)qf4Li~fE9m|+r02iwq+|51O?y?>>UI21Shv+l52rcIy*3H`BRK=$$jMGc0gur zZ>unzHO7lOO#EV4D-iT{%q<6oUgl$0}?&**CY%tL9d5%LVAy0s4ve*yj1%VWof=k?g(x*N6S5 zhy6#?-u5Kl%O{cVL=-*ecNp_(4r1)BPgOH`6Zw_zpj$+h!hU%cLjp3owNhidRYt;(4r=7F44p=hDYlmFmjVb90SuSZjp}=&Lo8 z&YTFkiJZIZp%4WP8OXVM5i^$KMkn}0kE!LH!pbd}*gNGd#3$UCF}`bFx3Mc?A)2W{ zQ#CWQb}VBX4@1a16M?+2d-v=MSm^I~jP%(dHqXx>JINAHBqr}A9vZg)VY}SV(zD9UmrQz^c%`= z(T+y@pXaNV_zFro5)jX#RP|CXr;WJQRiiQ@p?l@du5z@iyg}u$;oMWZDsp$Fv%3=Q zu59Rv3-$Ogk|l&EGmr;7t2jvXqyOTETXzvp2CiUMtb zx?yf5iLip4J3fE$jivcFgSW0MUc7PRSk#ljiS`R$`RegE7V2-jbYme(_l;KCWyI;e z`{p}hW+-+peR4Ed;^rHz(Z;}l)SIU#;5D@ee8wkVHkwRsZ$%^>e>8vBW`b-MJ{7*V zYUh`7ny!a#Mw~%XWg2;8pm#8tvb(C(i)erhRveWl+1(yHn#ZLY@Z zMzg*-KmUBtzVzlsZ|PL$uB$rTS{imAAXHK#=~Mg76Y!g$PlL!(fvVscnF7Hgw-f?> zO{xg+!S?oT1ozz0eu1?u?jnGtkk7hbR`#F(pHo^hMV)(i1sj9^b>MXoN@{<2FHlrY z+Z_}+gKzyFuc*>C^~1j)ozS%IY!za7G`;hrZr^vi0I!VI`UtDxfp_f-zd8k%@W=ix zxgh=iZITZ!A!zsbfi)AJYzplMN9RyubmDdy{o{OEpN0*r9lRo9)J(3IzBz61L$_@H z*loOW=Id1tFJw+LN!G$3efGleO^Yxwg2Ts9+-sf-HoA#W&y67TxsX2xlr$vbPeh)6 zPJ}G8f|xIfqjbaWl`zI|Q1!x_ucf6q9<0yBJ-j-d%LC4)Uz^LN@Mk5(pGO{f-y{B6 zf3(bN*Y26Es$l}~(M|5Gh>oB) zS@9)GXbH_NSqXj+q4gSGEgjigSqV3UMYFtM15s#G;jLd>MMKri%#h~{!Ia=M#!7lh ze2Pj;Jj?-mTKYEb`Rfjb$r=)Jlk}&Mw`@&O_{$ENlK^_zqPOxQ$N)VcgCf1GO3b)h z9NqK-F$c*>j#opOWX!*tynCLajtj;+PjBJ-P~TiPM5S*I7@%p+7$(g73Q~?2eB0PY zu)JtouUy$xk0*{Ba^V=p(f@5c^t}XMO0wd}dc|ezHuT+@OGy1mNqaUfsPS`eSUzrHv}XK9D;PVp@FzTEtz) p(GigjhKgr83GYS0S+YCnBeCuLg7XwtNf&N;W1i#f_fC4B{vY!QIEerN literal 0 HcmV?d00001 diff --git a/src/目标识别代码/utils/__pycache__/general.cpython-36.pyc b/src/目标识别代码/utils/__pycache__/general.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f5e0f87193eec44abb09142c15a4e5de377b87b7 GIT binary patch literal 37428 zcmb`w349#aeIGtEdt$M;ArL&ZA}NZXNbt}_(zGOsha`&xX_6A96{YoJ-vZdh9^ji< z5?C*6$Dl0AmMqzE9QmMQ=XT=QPVD%sWB=XMO`4>2)BN*yrfm{Cc49PX>n2T_vi+p} zet&QF07OY?KYx(>=FOWo@BF>r{odTs)06nYSO4eacRp!Ze`a<3#Sp(AKX)=>S;|UV z%2rOvPTO*I(vDn1>5yE*>9AZQ>4;pT>8M;|>6l#O>9}0G(p_>*q!V)OPIt>SnNG^J zC*330-gK{A`_g^5hD!bA{&c_1HiSz9<-zoz#3QAl@^E@s;?dGg<&pG=#ABs3<SEbIC65TC&pP`SIdzm0WA3_vH8Hqs4uyM}2F_ zS+di2Z{HqAOO+4}F*&6Q}!^VA_r{d2WhZMkBnA5_1kZdKb5 zdq~};wqLQ*534b?1J^g@A5ph|)K)vyt}Eg63H94*x7vezCzX?bf<@xRT)5SOAcV@~tYCScYKAS(6Kb=2Yd{o_~?!ICzp@%Oy z7aQj-^)J)`br2;!mVXT6e*QsAy+IwiVyQ!UyZHE0DE$Od-l*UY(f)FY@dJ7rDT z>V!Ip_@w$hbxNH^ET{gZdb2u%n9A?O^Rwz4;(3WbsvbjpN}X4aqx5O@gi7N&li!1U zPpS)eR>)t-KbhaNWUGIro>rMFm?Ml%QGHuw)g+$Js(-EWY6`Iua_&*n>#ZqE4WK1q zHKPj5pDwH4SF@^wJQY<|71U5wf1oZZjg&d{hpMJ7A$C#yf9kTDM@;AMRpXeM1y#qL ztG=V&sv3xSYEiunZ`afxsduO!L&_!f_JW0+?^H{;yR5FLXYhPp`RZA96)6kqIrVO& z)YW^`^SHiM{af{tdLN!N)ce(sBV|$jJM|OlgGhOs`T$_`?dk(dHcI?S^)k}lp+2NO zj9fpaKB7K~>pRso^)XzR)W26BSD!$RE9#HcPph9n>>2eZ>Sxu@A@(lyr|Rd`FCg}; z`bG6gND!Ic=jIktLoR3qsGx^OMPxCrarHJ9nYUv zf2O{uzJ%Bd`9<|*w8~arQD4RVd(}TtU&EU(s_(1cP+v#NOISbeOTQoCk0aMN)c=FD z52$ade~Mf`p?*{SGhE-7|3LmH^6v{-j+Nh-`2c=Df#3V`Czh<2BKn-NEAf1y_`xNs z_><|Em2=h_`$4@g@nF7^*V$64<`qhAVwu=@V$AmAD(mIFLOG9kROjdNSr4(qxfz|$ zs?*hK33uVsS#Ji{WNs#(o6S^f-dv5j;-}`kLbZ}*o>;mtcf3$yF}>3+E6GgfJ-1NL zk2!v9vO1qBRBL{0s^C@fuDfibdOv*T@Wdn1f~%2w9?C{aEepF@k5!@T`WlWG`Bt-LQ%dCP|1K(J@n&>{(7&Xu^+V-CUvIM+?)Hs?Lvv{gCTv zKUB(B{Aj+CtExO&kmnUcZo8kf^cIx%y}q zzyGCo>{|Z%*PlYS!jk|p-_hufo6qXpjNXJCW47Lmi}c9^96LXOrsZZZv2Ok_?p+6g z<>(yIK2HA3qti(};K&HgE{`lBN5JLl(AqjzS+^ z90uryR9Ho>0FaMzYB8~Iw$50OTj#CHaM<#~vkCns8;}paAa)k%ibEf--+6@SBUPA6 zc{BMG7Ge&VAf;;+H&v*(1(i=_Q>vPq&FfUITJf@&W<6f--aT#-F-_;{1H1X&ZZ}_= z+MS!$)!JMO0y*a){9OJ<5Hzf(dKYbFH8A&&p)nURDTD#Ak5bEo3>%?fd=JnDtJ{Lt zOu*T{-gSt}Vt!8F`xpyl4PmRk0RwAF7(Z01=CURC-tkt(iQ{7tEr=+ZpD(zc>xZjz zShJyQWx`(5m`;PMIm0Q{eU@u$-hFwjDcRZ?Jo9eE=!Pa{~5=vAccv z9l_5{BWT#}AmOb+C6?Y z>U(elbR1Fz4K$ty65EfI)jdz5=a+TC%V#G``6rRVB@VHiFgl$?_mdcaI0gbka6i&p zBS1-l7)nOqgNS(6lI12n!tIXv4W3gB6~o~3`WGFEM;bsx0ELnB=%*JeQgVn8IW2}7 zwm#7asn~2(pFrNON*G~ckzu!>>_{V0Og6%NA89>H)?%~~E%qYbr;y5Y+y^<3x4#i( zn&RliCv*>5r-?r`7hApi*6n4CTXs4><}Uv){ApsedpWg|L2kzeG#9KU#_?mN>hv`5 zgdf(`s^`aNvK0jwa{V5iFIO++A2RoTs8I2OxdCp#vhl+YPaHqxM~&dE{_w5-p zEnAuOBPA&`ga-SupqZ;@yc<eLM}^AEp@gwH&@jj z2*~5?UgSifQk(aqM}?Eo>ycIu@5U-Whf#_P{>oHm$GQx;c@U2cY75U)%fQVnB0g8j zdSsVCz873CUzYXhN6NEU@IZy+M})*?E@i>K2sF-to%BPwsp-|z&{uQw8uXSO$l(3A z<7KxWf#oEy_!EGRQT$8Vx`?zKA2k2CZtlm=-GCr(L6u+`H3Ass5JwzW5G{;a)Q_Du zOyCL#VgUj{#JZS1;E{%vK`~7j3X@CrBd(Xl00M!N2CeQuS0Y?$w zru1gK4^$hO)5HKJ-03ot!n%SBuAIFC;|)E zF>2TQgZ=@)Ookjsjeoqhnr3-O~?_9Igo6TP}U z{jC8DaQsi<)h5J@w%XL}dXQopApL2i2Vxh+ArYeQku4eaq3+GA!cz;VBl)Vpf54F%;0Sd zIAX7}Xy=jTW&EUuxYHkrhvE^2}1XxB9X>h%Xj7b?H_ zKmxOR@HhmY#m`8+Tv}^ax2V0w$anCM*H+U=t%UfK|8C-J@E}HH^CLOv(zG=1HDK}c zD5SZL*4>~10gbsaK)!Yh^m|aYehxts=RCuNcQJUD!IoA)oFl0Fi-W8`LK7{xV@CG>X8c^T z9t*u!u?`#ddodLdPj^7Pu^`^;w;>MMR=pRDqD<3y>j|t*h19n>k6RV|ny}>fUB~8g zdC_#~ITQ&B8_w!!*N@I#ma9z-On9nR$(g~EaihTaHMYVmLr2)!`g%_O!dsfJOKE@-NnoLVqW$_;S#}r!SmvlExAA{JVjOxIF+du+&ol zpBc^6IVf$SV1lT3cmZ$f_adM+4E%M4iZ@SmhZIzzMvZ8=kWgoClc(7^fjQ}6?6S_z z&E@rPq09gq4?+s4uQe~^ReRWioMx1QVf#t8hov=3mO|srm%qhQgF%aE;;`1BHH8R5 zkcc8}F!QXd5TU8+Fu>Ma_k>Ii9aV-dRZ!CawWl^OLfzzX2u10o!wqVK@N&{({C&?9!n3|u~jOLMR!Ze}2O_uI|#pkMpAd4aRt z;w-4g>%mV3>^q1U>RogozyKkWA-WJ*3@=6&qxeOxVnKlnPT*Nso`s*Yn7$Zm4|F3& zJp>2(Ics5q7p1ISi~+sJy)MKO)ITkTv5Yri8FynDUo~2aAZKzh{DdrBNb)Z?!XL3z z_?lCJxCIgH7aP&`{TgdA?)5-&?`y=N8-b*5_{XbOv;HM}4O-EX-CIz?&JuoP%DGw! zobr(i&LSl}OgfYYP_Z=ZAD3yC(KSOH3TATFw=ZGrW#JG|buf>rkTdi6Z;{xX$4$b? zXNYAwrm~gQ2y716e`jav6>StfSPG?~g`>?DVi2&7KVn@Ab!x<}+iwh%VWye-6KvT} zGmtj68mjL=+IR7j7D5-c@rEsIYR$$5{1~8i4zwJ`gPPVLT*68E(Wwk&4?hX0my|+f zsw#KrtVk%XAAvfk>>8EQyHTg#t@98z3zza4uL|SAsZ;0tZZ4CITbr6H%=?MQDB1`Y z3-Jsu`_U7p9(?fF89yw=$DzpuUyn5wE>;T_$hDk?beB6*y_}&wE*&?~Ttz>LC!(qH z?TjCq%2m>#DKL?nnats6&Y`TCM0*teUpxy`83}tBsB6$3f&PlhyMBACt-pZhEv6=f z8~%>)rS~HM%C$b?!=ky09J0JF>N(^tio1l-RDoMg z7TfV~v4^?Hkb<{%bWz2zD!MKJv1fYm>qE)?%{&qxF!_Z9CMJx&D!O-k@4oRp=jyS8 zckVfG_rd+?HAfzO;Kbpxne(U4JaYE*;UmWa%}BcU^b_YEIyLdo;j<5&J$CHq@iFH( zgf6HI2mN6%b-9^w&V3$o(Ri+=DiuH8ya#_I2|dQr1jUkAZ6flP-<`<>DG)O^Kv$4a z&~ItzJ%9uYxlFb+U4;&D20+u&kBc<{>Tc-9s$~l8dF4BmIlsG{tw2i6NJ7^nEGjeQ zte*9IRut)n(Vnc|*O3{ul)%k>9kZqXDT6OF*wP9NK!_N|vfc0Fi2;aE(xEmhVfR9t zL4~GhGDwqJNR`0Q{JG?EpS7N)5mVS5$P-XXybc{Yj+X=}-Fv|Sh6PHx3iY=RdngKg z6CmJ9{Hnu3niL7-9=X;6>Hr&{P**L3yvCdsN+DTcv$*=DFGjU)5MF>tjGn}-4MVnM z4Pl)KGLZq|*a!;wdltk}fkJjcS%G;5jsQoV^KOXg+WK{n#Jlkx^e{8oIr8%)kb-6}Ae$0Pf4^ zuO{lr-8**dE@fSB$M~E_U0oN%r5e@>Bpllh&E^+`G(bfYo-S1<{ZKhOZAlpY$z- zh+zu^@oWGD8;d7RN$5|5K+}|mskqY{5kN4>F|HV-7~FyY(?No}s%4s151_WzSo6Ns z7z1HJV~Dy#vU2p*sGPsU<#puOJTQbn}!|Ku;w+*)bqt#7q*-yuVrb2JbMnbwpLv3kd zgiyk!2G*K(cUg; zFC7Dt?FM6_{{-dScx)%|M8(MYvcZc8#d5g0IpGc1pE%tdZ++a07=44=x6lugEV4oeZ&pQJnABH@TcQNQJ=6Vdop8uA=id#K^Bv5Zh{{Kw+t1NkCIho$w0|Aq~_n{*1F0kcSRO(BX%I4jZd9 zkq=BL;qmE$SC1h`?K+aGr&22K=5%2)pW3y{%TBwv(4_>IZ0Rx+MhA@mPh+Zw8=&PTkjru$aFQuZ zy8chNyFLh=f72+n+Ze@K9x)Rt>q_rUa4=||qJl5*>{5K|~WQjQH|#BCGx zp{BD2CUK@VNk@&{^}(Gf7{^kzIbu^c1&%OPkDCk-+YIR4>dqGC=AbW03D=w2Ua5Mi zY>EI0hpehwpdxgvKDs(vb!v*MXsqsBaO*uLd%5ZXZZ82w^!(unt95Ft$zx^Awn#R1&6iO zaPVkVhSajw$=0=W7P5fS-5u~;@9XFXY|W|bcB-CW+7x(}nUvrnG=-h>V}WI%ezSS$ z(yqBI7)h?w$w{5Rv`gowQV+pdt2sKCcCl)R%j4tlzA1y-fv?G~UAbxv_A;UxKMDZ? zHg=h#)LgAps&}z$#!~fI%DhrbA?x=({PF+%r597iB`sBOQ!po`s#7UWbZT;exd8#? zf+sQ~?6Rpx(X_YJ``?lRK)i*e=73iK56QQrylMa<_0LfzSP?h04u@L>*+SCF`P2Y<@tFUE_Go}%u@QZeC1aWj3xA6^UY9A zm%t^u)6!Ug3JZd36rA8Nk8T?7LWC?o3X?JiCAQu z%c%6HXat5i9IQiFM`NcKp!}+&Aad%{c)<_PWVM3G_7&U+XX*FONtUKG32=^4h%AF9 zZ($}0I_Z_kf0A7xe1oTtNHh103#Yz@ODIqgl@{m_=0VWTbKV5X39^0+3ik?d z7%WAxAg{*ETQbk-C~#;cuQU|*D zKCSl$tt~@s<9{ebc$l(4RN}<`FfVf1z4zML4Szt)0zL}!(<`NvQgeZa( zLdEGmM3p6H7WYtaA{My@Qp{N;NquqCVpIq!&Dv0Y787(O60+LiwpAIFJn#tt(G|VG zqNTbPV=vkBVNYC^;P~|SfVB8x*FrB!Nj5-)X$QIZG8m$s*#yYrdksqzw1$K)aV@-< zXgJkB^m_5U?)AZuDS`CvYho!`IOh$((b#sw9?a`PDtqXj0G0tcy+PG0X?=|l-cB__ zjrdw9WmP{2dRPspK`;$NOBf44YfKG+l*XXY-QQ}@C0J&m$jwJj@6 z_uIOSm8R0)YTvMF-xKKFoJb(mL2tzW`QRBNv#7Zawo7TMo0;fDatbYd;3I{dH%x&>Yi>c0Vi)Yqm+ z&8I+N6m+!W06CQfU$4ejtWpA&5Mp9VA;g=ESOb-EOl$}M5{ftEi#kYi6UTK?$y%5y z#%(1=k+uS-h%QK4+R-{$bFbf^ zLc2jih3i|}gO-Bgj;P_4h@-xKXNs&%8Kyb#PT87Q-DUW!agg_(+vW;$O*E3K58aj$ z_6eA0dy8-C>CCdZ?rcs44kCtm67HnFeLQu#l!t?Qt1z}m=b;8kEmUjtRV_n3sK!NG zT#u*bpxcD3FG#1}6VP>)f>A5w+ld-ULn>e%>Tfthy@Cd#2Mz>E2C@dG=%CaU+#}cm zFFTu0<)@|~X^o8p>;OiOj?pdikf=#S_-r!???$cSm#O2-K4_49=d}A>d3btYivY+x z%;f^eBCGh_9j^Fo*1_-{ByvWqNwD0g@rQ-WO|r9WfkH6_5UHobxiVa0qVTza)?S89 ziwyn`D9pDDzO4<$re_f4%$g32UQKuw(meP|(0!VKM5f0IC1Jd$l$-)K-|7s4(-?+` zX?zP4P{ljI_IrTs2kq_7Iwy*|yB)m|?=PqDdP^b8bs1xM__+!JrB5JuN2ff|A;HoI zO1k90L@T$Y=9t# z6e6KoitpgpQyRw{Acs#Vm1pb+FbZ z-eIbFE)dhZvJ*>q8faA#;7P%cQmbO?tw_(I9{EGO?HGR*Hw)mvmaL=&M{Bqt2;GAK zjr+~G2Zf|E0{YsJOfu-LI4^~Y)V_e`;Th<=T!R!Uyg>cBpq_XqXdj-{hiS-BF#?F8 zX(~>o*rN3|=xJKM35kjN?%bt)4V)3LX+xuz19m4 zR;$>JkUGNc4yn%9^nK{J(Z+EdoU1?9n(1R^?*vPS?>;14QagIJ!OJwYqB3B%(+{6W zJzNHE9B;OmH*7SVNEh71aeyt=K_<8rLqC=jk9WfY?!qIujb7RZEt>RMe}gFz%s01j zT+SA}NAe5)02C;mt}Yxdl>$#;L1P`DV{2lTRSaQscatglMZ(4ia#8t0f8;^nkvM)~ zxzpH_3G2I2zPM1rUk@Q@VGSbR7W=Pp)39^}3jGSO2fRChJw_Y$G!(k?=LDYslL(u* zvtd=hC{Babdul&)3}PuSigPg7N{9f^r|g411>(88&x?zI0L(#!FhE5Z$cy{v*Dng; z`bdht`EwpY0+T`;DS-rbPg!6>CvHJU{D@b>x)i|@_|_OxBcax`+=Hqw4Ou|!<%e{yt3w>Ig+BFqk%XnAk`w#=Tu(1aL za|VIW%$aRf&IAjd>0sp%c3p$+3~fjjsb>PiYJ4E2{646?;bpzXjGC zY`Z}j5G0yy>_!{^N+rFa;xM4*hsf0suJiX)!wt!aeU zLoc@;y=2&D}-(G(D$q^p~#ZF8T-vk~db8li4R zyn%L>`dER`braX4)R`UJeLWWF&aT_{84fUdoE$TBBPro1AQ5IEBqonmFITW{PmxsA z!=x7QdDJtZjz)DC)<)99=3ECgCMcU6~)NRW8<`+R=Z9hayLJuy|kP z3n3^o{o&@=X7D=PFN`@w_{)}YL(l{)8w*q%i8nZyy!0GEMTfhiaD|}_>-%Xqyah2}Ry>8jP~e9_NTxp#!f>!cvFeNQMhMz& ztdNEc+e0YOMMWp-H#c>{9V$*y3};~-@Z8<63emuFaqM)Ph62Ndl8$reC)yiN%tXoX z3r9pVC2&Uw4uNveQLPgGh!LcS0rcH((EOb-gcD^>w zr1FEm5Pt3Egb zUZnXHmYukQXBPdy;FHy*A`a$!?j7-dc-X*a z<|TWMtr}3n#DR0@J!*v%P#hGs$U1N6Ds5B4OV)Fs@9G}3;KU;8FAk7?Qz^oJG=K-m z3E=)TWeAf(Y+D(I%ZCl6#1K3+B%i6j5k3U9%f-}r>37_P+Cem=WkyzFAIdPH9e z>2+$Iv>)m~s6JT3&o`prE288p0P|6AM6Ji$c+PvYDdet0@7JgeY9rq?=_7asB~3g@ zWzSzV;J=8H{;M~tHVKovwzw8nm@YNUk;TahNsK@j6g@XXc@!_MQz?||=;sj^C}13; zhgmJI$852-Tg3LVS=Q8+U^Q(hZY;tkgP9bc|3(7&Z-zxLthVx;2@eV-q}Gg)dd$laCwQuj8g`wLRbZBom2qd@Yu6t}Vs$l2@N3Y*W^RjYtmSnO6iyltNJ zsnLa9H&Idf+zu;IveB&$NDu!(?Gn|{xObb{jh2zthG9|KBdL3lH!d~qV~vJ!_qKbe zLG=@gFf-%o4vf&9=*?a1%~epsv4a!FjW0Z2FU(!Hcb_{h1fjWgDpIQ;om;(2@O7d; z5;XNO0}2?tJ}vZ6<)^a1aw*B5x-AD^5_b=X zmx=8~3S16Ls5{Zz}7P&Uqugl>x(k$>`M2ZrW)f8QE;>v0qCKBrpiM+_>!2orO zVG^6PyS-DlrPMMeSjsjlT18wd(srkMOO`B_^-|!#Yno$RQkty_G;T9^-ym2Dozku! zwcE9#Dq_$(@N-#}(ai%-kk?(Mo3~}N-KvU&<_pg&6|tbdX;#!$$G>s_nr;g?c_Cn! zRy2yFQf6gk|Jf)S+&i@lFoPhorgEvXZL7$O48h+vTNYh8+MF$yKf*xMt&DzdD~%H% z=dV)o#qRsd_i~%AFrpIx266$qcN zb`B=!$C%BL#!igw=#W^27mCw+l;%;%=O>RIK6m)+v2$m+D_4I8?_+N=SWiEq#Q`n~ zW*1{+G@wtQWWUSJPUW%xIIG(BB~dKlm23s-+i@1s4R@XFB(@IKz#$9!DvH!wSV9P@ zOfjMw^+Jh_CWmc!vsKJko)6+IGJ$4c>m?PT`i)E_pqlM#;rx6)hx#e{7~L-UE;)E( ztVdXUKS?!rbMXD{pe(u+8FBm*ENs6&zUY4AnLCR(uN-g6vKvbA;0o$u5mhfkh9n1UW~u6Eq431|bM zMs(}3NqCtQ=ceo1UQOyU8FvTymfwN;HPwvuK$BO260bIZ-bo`|h}sa-6!GJ^`rc-t z@Z4y+_jE{gfg{##@Kd$CyZfF4d-m53wM#M@39?x`10^lFE>1lJ{X#BvvM`T|PB+gf z8c&^trx4KQJcmyLQHKutJw~%YArh{fW_V|rNnAsoLd;J9*@2S-#)AB<@|Oy*o?PUV z2?7&RB|anHKpIXb!C^WbC{QHH=b1Z!vfxlU$HJwC8e?>d)Nf@V>c5MKrQ>jho1

&$FR1D}{cu$i8=(jK-P#;QPhz>hI zwTm=UD8x=SojidBtfk+M_gc%6SSpF1v}r#grcF3uWh<}~cYeVs01}lr@lx8t`788K z40WErA}x(h=!1B_)mxU#1uv}Fc0|OhKV>%Zpca%4J@O_>q>vc}1$1mWLQeYS086qw68tRIN|};koIUGE)+q=c@J*dXMA9cX?TG{4fGE{ZQLhLsh0ZKrY06h z4(Hc62Y1&)Pz7@Pr#WN=@GV+!4il}OutQ+6ypfOZWpE#Z(`-J~V?bv&(w@Wt`TMB9 z+TU(J!1xN^<*Lome#buoFO^W7TKB^UxVz)o(6ixZBU5s;<(2qzP^1uV3%iNKDd{LJ zq9TB1Hmr+KkwA$6Z=hH)^sEClf$(i0ZVNK{N>s1#HYPZ0(>wE53_v8xWfPpNBp>Sw7f(I2}C+Kw)3 z6@|6w?Z*{vl-PHiIzad6UBV#*Y<+$G(N2v!M1`I3UO-;bHvI_U|8D>{^`Yb9 z@gi=G%@<(vp}r$L4)+Hb&!+43#G}-7h#xG#o$>4V?zez$(sEx%AgMzRT3gUcBre2IR8VCy+D`V?fp#BCkEx&cRj)IA*Z)z6TM%>L5RzSbV$Ju}^ z8$jaKqB%&FlMG~~v*%#h_-Mca<=Kv4%`r3lg`to3vYSZ zQAG4jEI_LzRYnba*o|g5-d|lxc=}&Z8IJ|*|>kpE#LgaH@~>` zd++?zt=qPbz45+pe*NhkZ#cN~o4@$Y&)<9K_NVmckqs;DN4SXJcS-*jrZ~py7+YD$ z=(DU|VVPIp z4#86&P>%fZN>CE8C97mkd<4CN@n#r6%t_!;O6#qw@Ru32kcJK3Qiv>oWws1Gd_PKp zWC3piXRd%giAE0xD#1^D4rvQ}@!Qv4xB~SJ_6)CE_4Qa>ncM>U{7SGl5E;@xg&Oq7 z8PG1?OqF>15dqWhYL!;+7`sp4tsHCP9|vd{KN4v75&}bdvDYrFLBc9YGM|`lm>Q@}Bt=5mL*+-WX%nakbga*tdN znD-8t_YRo%4w&~2nD-8t_YRo%4&c4;`X})kLcE3W&hprhADIM8H>-OvZhi#jG#;P2 zfW14_%lfC81{ZheLt(NF=N7URKXQ2nyUx?eNo)g{@iOvqIuYE#)3fzNic0qcPfUsF z?$&EOtv0v`Drh}qJ*{G-Iv$IrWZe8NsDARA+d70Ow?Lc#%IUs?Q{7gT0zcbpSE@=4 zOm(Os!Kp{h#`g}$WjP;g!WERo>ziBSJ3zOoC)V_yz-qr4?+9AP?ypFpf-`{4F$P2i zrdC`3GO`*^Q_7=nXAQ)Xma`RpG_U^;ej$8sndG~LbGfwnJA$995m4&G$=N(lVwh-c z2|&ngvP`EOfGhqz1G!)dyT5q69xyUblY(9Rl#1f84mgh$uR z;1;}XIEOVRHx2TVH526f5C?t`*+?4V_>I`TQN&0lzKE8#@N9Ez30p2zo*YkE@))P4 zTRBD#HgCvTA$mDBLc=_?7+kE3%L))BH4-em6A@9IZ>8Cm+YMGs2^4o?QpHpRb{+`o zW`H%V{SYX>ffDqua#TOd;5cppgL|1OXxWw_>Huk~*FVz)X$Y{lx5q~GKFzV|fbg~{ zNT$+Miu1y>&f>6E*xsp@)0`|c=<{p_mDkXaa$1_@rIY4XX4cFQNCz{%Vvbg{<5O%a z$KMLU!v0m{&+(@D$0Z;T|FejYa8nfg$0Bcq)A?cJr9F3(!tOS83s2WqTY0iRPu1s% z`s#L`q>t0eaf&|A&R2K9GKn+r)jeuIPrm1A_dMU8C)%sS>H(f$&(rJGgFLUEC)M+W zdNsk*>8DQv7T#>gfc^-(PQiDXy^|eV%LI&N(kb1?7{y6F${1C>%k(B#rjOXNJnd&} zfB`{xnE<8#lrdUNmWlnA*D#O*)-tx90gW}wf}}RF@>_VjnE}_!@>T}7GT6r8HU{Js zmd6+nNi9BB}1LDo)I~h>nv3xg!dl-;oUp~NqTAbxKFrfZ# znG(Y?MTKRWHJ0yZaF_uncbQHVu$SgwhrNb#RBJy z3RL=tklhG)UwDDhX9jxoK>q>-#tJOlx`T%Z*#0q9@;8=ooy$p5HaUFu$;mYV@yjlv$_+`Cza;#-^TjM7q&-~aS+(hsz$NN>v@5ujuJmxx#fimO)enBJ(9B>_F>MAJ5 zkYAp`kk2ptHlD5+at^zWBUP*bIf_dBBkZ@tCg{Und)%RNYmLRLj0}a1MhN|lvofE5y7kF>o%NY{;n5QR$hEL~UZQ4Rhz>RJ6{Pd5 zx-8+$4QDGMCMKD#f73Z#`+>iQ~HvQs&%Oo3{Y5RsKEajpUoks}bl>?!-S? zYBc|>n7bAwDI)i4Vz*bUve#KrGQP;u3C(6i3C#hcX_)ub`26*TLMPA--Ob=e_$E5P zPyzf>onX#=1b*L&e|mtu4{==Zw*tq^AptOH!LEa?qau++V4AgOBliQM;puPV3WF}M zfYneV0vN@%(X~+2@eRN>SZQnmru-wCb{;5>ozWp~G({ZYG<3)!Aw{_frDH`4Vf!ao zJ3ylCc+9uYX>v9O$$kmXu{XR>ku61<$5QYNZ8ty7T_2E`SJx~ca4Gh|PlmmqtrEu82j@&M#pp&;asP>@KJ|UZtt`87QB%o zKIGpD>_9FXIDwju3xoygJzzmb2xN)l`KGS?Zyiwkz z?;Pyapvl5z!{&{F4fcDr#6}%pBw3C<9Q0^9fxUivGa`>un?))nX?sjsx(oYO3)Kub z3>#OoE3YRx6~i2HIJ$4P_lcq;1uV1y>r>yxDZx+vcx^F--@^^3 zA`pDuwvv!TF+yDj217a-z`PQY6gSG(5LPcdPw zR+tHE<(HPMetHPGSCfd=)WD*(N&t#N`2m+6-HSoO(p`^>@eks<=vc3u#|<2Eiuu$^ zZIFMP9I)YhHqB(q`u2U(zxTWR`*-&ha6FZjBTnI83O~k=;OE|r#X_PffD1H*7)D%$ zaEu#F!)iSM7CP`nd%hXHuAcJ*#JgxM8{4mL;$c524tjE%V227r7{knib^d>-;) zTL;X(&3p+sigzz2sWorJhavVXLVLd0+lVx}@Lu>NmJg0L>H{&RvAJJ%g7Vb6h#Gjo znYQ`P;=oIxc@O752x5OJ^wbnk`{3eGW2lj646fw?Nkffj^*>;{UzGQ79Jlq`Onh;e zCE^LT9-u~u6;n=Q$dtZ#Q)3t!{6UZfY%b=)!zkPoqzsy+h`AkVr9~(R^4^P^5>aNe z1i{gClQVT3;=a`cT6hx)4NwhQYepYi1D?P5t;SGLPl)vgD5IH%!`@754j2^B-O3%b zihW!hX+*F^U|;K5P?DKRxaRfYASk=ogRxq(2tylMGtyXtr$dc3jZwBz4Nk%P4RJFD zgY18?cL|mcI`c0MRgS;1Y;5?5!uoO33l7R%+bVZ$W3>HdV=ayc-bf?fEbJ)gIa)T` zg|>bcbvOGMQvIfnXc^r8nsYMi6bDII<-K6U7S}b_z2vyRdGTA7@y5DF5{Fbd`ma06 z!n~m6LueNq1mHfHcP5$eL<%ET-}rC^+dK*i`?m4f z93{=>_7H5b#{Q1_ww)>S7$F=JQ}BbvUVQH|3}GppT_neA>>b}zPd1BrxcX?Sz{70{ zdul+-cgWwrxo`SQo1Xd0H-G74_hEm^*MOLz;(jYG69L}-5q(ADS$sl;JBbmG&F1rS zs!&EW!4vJkUt^?9?zvyMzdqX9LXJ^S$=MgB0#3`~hR!>t>IoOS9dkH87bgHYrn{gn zsL+zy^$vcB`g6?j zZRQAHD&#NYgnMJ1ryS(Rcmg+^-SFKQ9$(DU6!0y{P!&euPz9$ACz#18m&H`jvC%x0 z#>vomI?XYEf|Ht&xxts-sx=eUzmA$Qbok684>88U#PBLD;ILJgfyCxO6XiTlI`*B( zz%Jy>=h9K8UY<$IQ#2x-;2`4kRtzH8p}YcYW+|V+_XY0kf6OLE&8IuTN*6?S@txA7 zoSo~3Ag2gF>)S;tGAmQU;`(+@NIG_f=kOIubQkT`U*K4+r0@O^Ml-`EH9u*C9&abT z=fqgOIP@5PW2F@U%6w89O|=B|m9zcIfWCh z_=n%Zjaa1^lA?iQGk(0v7j>H9xF|A#o;QG^BDV1d(XZc}v}drwQOF zc=nR;F`%y0Ut;SPAC+d;zw|{UvB2Otk!p2 z89!Vf<(FQi1Ua<>;<8H7dWqxtcGeTY#*r*e=~<7FHBMil@x4>NbMgVOjBmmu=tmfw zVDK=5lMD_cH+D8~+48%s;Mj>%%+?E>+J^6I)!F{$xk_$kT9d)2uV}|{#{6=Eoci%u zR{pCjGum-DfoO+6!7A)I{R3uM1-gAah-5C)8l?9UY-wAvoM8Z*+1NnSDbIi)km>Vi zd_A7O#81i`OtsWnRD6x#cLYC|UD(}!qJ@A_MnDQTQ^P(izsXveQ_JMJtpBq z*^Mpy%olIw1D}PQ)Tm$u1fP$@TT!(JZ~gINQjW!>xxJ^+1MX^6t-WHYbuZxF>n{#? zgGde37SIN)H#`%abr*ZlQyfgN*vEQlxNP*Iufx~K*==g{shdZ+D$IQfX}5qw+HA%S zC2b+6V#d(4P&M;xl{~1m(QERNyK3dU)%1nj5L!0U=!F!qg{}iw(QCk)Bxx>P>8$OI z7I8LMqYtgwehpj0sb-YcjCIbWX+`VV4tW+|T`$f^bMynjY|*jR9Ef^5=h++wBrAN^ zL_J}EZ9L~*s`t$-%w>Xm+1E7yXMc?^6yn>c!FP_Y_coJ|g6$U9`2QQ#_zg5 zyei?~p78^BT~C5b*bPzuEaJN6t~y!lxC2KbcPsQhfON~{kHG(0d-(ny&zt%kS(YnT zqMRuBGhBTv5nd^`nC~i-YdAp^D6K=I)AUWLN2d9GTznaOhv+=g+Ru?X29$#%CBQzO zF`Q|lKC~tE0C0MWBO*$W3eLSi;t6=3tVlHMqO)b?*FrebXsMaMb%Pxj7sJ=;e1|*6>Jgxw{LAf`)W3m&Tmdt!Iy+(J74=0?A10>xrQWvt$BpXy zO1r+glfY?0wtfd^_qL8WC%s1X~C@0WOO+=-cO zA86iop@yTi&{5e50;>Q&<_kAU<;VFY`3w*jKi6Xgx?V8UU=}sKKa8iZY{~gCRvZEc zgB|WwY--E}2Yv}n?-!38Fvs9RfdQeC2OA)VsYfSkJc?ZQCSiZ2kY!eg{gEb4EUDju z&Vjkd=bDZ7R(!ZOvjMUzq>v-*%-K6bVdo(nIj85)qFht&OCOjPgZd-11B?Jp=msm`=)p!v zwrhkI;l?@t`yiDd!AEf(!&AQr#RX1(prvuPt9lS@0f;XSl@-PS3}uv0#1{_AvL?Be z-4Z4h#Mq>^_sW>_i$Ky>-H4(~Yf(Aiv3_5VXY2*2hFCg7Ow)FapKrKir6bC4WB|BRNT=f_D%m z_zqRBy%R>P7crkk0wH99KJ>rdw+xxwcvBhYsz1doC`Uxm!EpJ1@y^j2AZ3EXOjv8w zwckJ-@-9AO0>?IE_YkpIJSoK1C6X_kPu+{Pd>(=VJtIV57EQR3vyY<`y$z5l=juY7 zkdMg1X2Nej4`d-B;tb2_TD9f|4s_zvc*j1ymW7eY*YuL~9SG6F=T9LW?7wgVBB+F* zB*XFF4Y)bZP^28 zsKZhPXth3jG0($N^gP%F@bThu0ILAFbx4U*a?&#$Q?YPibeX>sAH#sGx#R$xw_zcJ z%O%IoZB9I>250d`JsvS+b3wVsp~*TMw}QvJucRyP&9 zhe@Dfs8ZNY%hvB&-5ioBX%3xP7E-1`oO0OQ(HNQ`u%#OJ*mZ@b;W$TrZ30JE@gG0@ z8hl;@Uc_A2_?AYr)p988*m9W?4k|QHRW@Eu6$qLxAilccu|iphY&Tm@4U`23>kR;3 z>`{Ggbq_EBrU#U339j4>g7yPI*@IxWm@|>?t(nssGSJ!a155mPiX66>(J8#DZ$a=H ztQg*T}t*i zEDGKi3}Oh5A7?>y_!MmUNn7ti90plhkMw&m4pPb|VOwCr9eil(=aG&JSIL;oquJiZ z^r+N(E;xb5c>IO*%Y5%c3_fgLrW+>wfxB3?1h@~jlXj#Ymlt0ppVNLDm2Z^R&=CG$ zv%O=X(TU^@aR?q+xf zo%y!#1I+aSKBHF7>~lqp&=7%H(;hZYxn>X6usN$$S98>jSEk3gvC3@M;`bzG*aAW< zK2}+q#5rZmCf^+Z^e#|0u8AZ47&dhYER(S@5FZhL_katWgZe4!YK1o7-1*t*3{;GA z+OI3e6dRhcmt#Wq(C?Ge^v0W;%Un%{)bBB=fjld^Sm7)AxolRR$;bgUe)#yqCytq& zZ=Ygwgq%Id7+Ei~z3&1CNbJsrAmWe#n2T{Xv)?`O=*i5vhtA+@5@*ex;XTZI6WbMg z?AVzHPMtlL4j(@?anA3@(QX;x6RA>yz8_yZtZMA-@?*^eZ0nLUPx%?TdDI}XpWlNQ zCu-$$^8Hj-?_{$SgIgJpV(DEB=$I@Y5I2XT-p^Zl|LQj}_$&i*%(8=D(;1lFI`VN* z^MT$XA3nsE_a8A3brk3m!Xx&X&GOkigR2bQ$KYiKA4h=G4at*#f%o5H@H-5CpFx_z zA2Ik727k`rFB$wN29%@alL_V{3>$gNV|L{nRdZJ1UAzU#L@yZdOb|H%$Qv+at!3Pj;ufEw&+2TsGO0?vZ%3xO}oV-7E1}@utbXbf3iI#TAqN z>3)ePiYq5qrB_M3t+;w}O?r*Q?c&<>TB&1Q`ewO0>09KwK7FfPH>5Yn^|thFa^0BT zh-xOVvy{RKI2- zu+#g3^xh@uuTeLp@5yf%zgP9C6{=sYd_Ih}+${I2)avp33YJ>)yp8s*&EGP}({yO|lj@So7PxhxzIi!CpnC1aX!8dnp{pPp2It|nCpc}l9Rrcgs!{e^Oshm!NsJ~X1 z)N@FgQNOC*ij<0an|dDCHz;4dL%kDEs_I?p-AI{Lf1}>3-iMSos`un?o_~{i&w_;# z->*J^v^T2{st+O8uc(*QWn7zw*qwV*zN92eEM)UT_LBKC~>JM}U3am3!D z{$71TeG;)})fM$AS1e(%BW9r>ekp%`Q-e27eeFj zOTS;)r$PhYt8^tF$d~duTXd$qLeWhw67>!ZSbjofy}VbL%p)Gt`Kf%?Lo9iEOy{%e zM7dnVUF1a88^bk~8_VY=GUaJ+YMQwc$EUnPxs+v|c)BokxKLy+AB?xKb2{m9AvLk}(f9DmY`tcw>G6L^yDdHnd%FIpBGkEmKp_BI?KXvG8+kRch76(0Vq;MZfT?F+&$;EPMbRg`v z6$_>O`GWGs{I)5b&lOy>*N;!>@(9}MCr66qte2T6lvE}0hP^v?4&FIBJ7DXZSVr4O zSx;s?FRvF#xzfvr9zJ&Hu@fgV$M!##Iep~hq5TI>`7xE%=L;p@+L`V;e(XdBdFszX zIrg&l$2i+|06%vF0xwhz%~`V{&#H#z?S;^Mm{^}Um??AC1ZOThQJR`%=4z)z1z2+KYxDEWFE|w zCkMiM5Ebd|2>dWs-gTSbhUS}h9zwoa`*$IZWsBACP3uxiPYAReGwI1wh6`r$RSPwuBtB8tT1Xv%$B+pro0zuswdR^#b=+{G!hE7TBF~mxJk&qXe zNb0l&qHKj84=GH>V<8pClq4#593Yl)3L}m;mUpmJb3ho5o-Vmgq2v}+-pM+uoSVpN zCs!_cS%82Zth8?*G>MqL)0Ji0`QCOnUmV$<8`b6MsTTMoJcRLc`RhYa4L#X8YpGBb zaPbJ5bQY6E_yAWaH4Vvd5<&)d0J*SA0g47>wC&@gd4z>>u|&}S${Gy0DPwS7 zv7E~m-FpV>Sq~2lM71ESSpGu6^;|zvp2E5gXG=4F9N10kvc}Xzh^?-vv0m~C*4T+4 zWT&i%Wm`#Wi`8lAeMmW+lZC;D_+zKJ=43u!k4V*Wd%Vy*5Ds7x$mbMhkxNurt3$JP zWrG)~+6rvq3o#2bXJ+%UP}QD~E?}4*4XN-c^wW!BHXp0cW<+KaGd>UHp?sX_08 z=UeAbA0CJcnEJ77jKMr-`obP0nyq4(kj=sTZ zXf-;Ysz&%W(mEDGv$1Myz7z2-g;b{FKFEQ*-PIV=lo>tUftG1b?LC}E&Gjg^mG;{< zPhteKqxk`MV43C`)`W>e{umzK~B2sv8xJD%Xwnj9W zRVv6d5ceac>}1}LpUdmvvYYp3Zh;<@3yRa4j-3*fIlf1=@P-6^yrE;#05S=vN~BF zFSsP97nU|Cv9Fk&99G$Ts(AgM@C)gu5cus67d^07GhQL*`pd|pJaT8|!11BO4?d7N z_~6MF*dyH4VNc;kRv=(u)~?#R-$O{)D(xdfpnz3h3JXMq7edv@e8h{+#~?OE7i_H2 zuotfa{}KvIG2r6Z96H4GYIr=M;wtexAq=Z@plztqwSQ{rVAjiSKUvOB0!Auv(2mkb zVYIUPXkl1q^~`qUC>C;A@^#Ls>8Yu*_CQ|*q2p)`HLj~x`2N3TU%C9)mM`!rBg zLJ(M{JTcH_DA9v>Y;ap-tUQ_5J6S|xs+jf2SAo9IxL$rz7N#GaoWM#4E+i!v3Y_oz_f> zXb%^3VrB}RIF~I>=lwQ7dM-bl%}p%rM*=OqQ1CLDcjKj6H@cg;ArC?IeB>lr$YlIv zCNo)9(?!NpnatVgY|%W?(`-eBL4(-VDXyNvgBMs0$01=S!jf{$G>7r@K4!ZELGv`L z@JqHZEHu>}1A83cbQneABn8L_a)`}U9}1V z)UUVjaiUg#Fj@LnkSeP|znQVd;29n^lPQ!qM<2vPH-;bt>>0Bv-9aw_ZYD#HV^{lQ zyxsu=?SL|L;prA=PX&bt>!P2=OF$n2LNON-%=Dxm9nqNhrLF3&4_-hCehshIAgI6I zvf2OynXXxn9OeUjb1MR(RUk@hBCI!q;0h^6n5xtlop#LceAybAWHu z^e_ZN;&C)yZ?0NibRwZQllKxnA!3>?+`;LASfc~_IX3UD4CWX-gWyWAqJgk;8ClhF z#;~$M_n0)iv_D*dpTN&eAP5Op)!70qX2}m>=-z+~nv_w`GPuZqWA+P{@*9!m{rE`@ z3A;O*2q&VNsX6>w{s@Bi;U^+=^K!;=ZNl0rCd*hP3cveNBTXGFKn38H^-O(bvS^mb zpNxG!B2}(J5m+gd3qrRWYce84%22j&?!5BjSo-DSIp{eA&eL&d97v9q4kHKSv-lZ7 znhS2_(sngg9yyiTUklel`eqn*uz=a^^KtO-{4_wZLPXaBLZ4< zV-WpCt%PT`!NW+C4>!dvi-J07jk(~r0H`g6ht_Z)uXN-n>a65i+Y{N z$aE=ZhLK2IKZf8-_yudy7H+t*+S4;2z$~sIZ8me(fYA@sUN@z2*%{qNvhng4Su#;6 zIPI7wo%mlLIZlO)oFRVWmKi$^`127mzSMyM#Tl*FX()qY;Fze%cn{vx??pgu8(8xa z6?>j2C@JT}jIPqKE8*tcCXbdb3Iq!Mi=Wrosj0mFLzE~060&+ALR40o?0MA~x*(^F zRBh1`>i8kY*>=LuK`3 z-Ww|`w=ryu+yjif>vvlJFF z`A|Gyx~ulPWWvoD6FRSigE^h>t#kS}kz$?&^Qj6sGoOEhC&7HiFvQSUtrHO-L)_Ce zrS%t#+-L7~uG(9j>$a6t$8`{(rTw**VnDFAKNPwgZq~0|wO(UXYx=j4ztMWZNct|M z{b&4SMMVH{m9^JxTTSyeu*{DGdZ$3-VXK(d8gxvUO+Pl0p}69w0R56uD2bL7Wq}9T?e8wxoR&o6JX}_JzCF4$yj1(^T$ww#+38M?C4lnz$ zqsJe3;Lu4wBE=`5DTWw;wH6sK7fKL}iOJG!?pXPJh8n$e!bEc=oxu~)arsup50B(Z z>F@}6OiE>DRAdIt6dQ=oF&+LS7q{CUO_UUDD{fz*MKk z8}Z0|2Q^?Qku12ar&Jv4BXI_(KGTU`7wYJ)<&pR@lV5n+wjp6>E3ut}J9iE4I9-YF zy<^9oyY}u*uQ>4V{YUqo$~=1f_2cQP_y`*Cmuh2tl!m?8MPE4Nc{;bAPN@hwPd}(Btp54WxL-+9s?QS9-G>+q}2(n z2sNUj5h3Kr>n&)u1|Gb+{Ro~7Jxi;pFh#IZLA&uwP^g1=QP9-BGdA!pkklop)pf)} zap1534HpxaY-1xJtpY26JS%CGv2}#>%pkJ?y9K?}__f>FxIP3%X>X8VfK+N~7wK9+ z)?{sgsE{EvBgSPU$mM4&is)5T_1mENz|;eKfPK$-*TsGf-97k$J$Ms}ow4i`nSYW{ z{TT#TphZ-}P-9)l6{l5R{}mrdou5Ta>P#UL!@oqRFVsz>d+RSzH;C!y@E*vy4XdNi zQi;7ypZ=OL!4F+aiux&@)adKC5cjRYYsQshAUI(wBCrHtY=N+?%h3I+@be%xzzUne z&RRw@t|HHaO*6W&p-O7|mMzfLMdH0iA-&kDuL7~@n5==BV4MF5y` z7?+0CTc@8y8n9k{%z59^AOdm}KN)bAW*UT$N+2NC`3P+H(FLm?iwDaC#xWz|#Wi2` z!=>_hzhgv~Cpl_h)+VO{MJwXa_+wP$i62qf85fW#!)&W-#xX%6lz|BW#+}*-uCeq# zqLLdom^h9NhL)O5g?tQ!iX{z=5@y)o;2H}< zjfuw{MqVHGK>AdehD?w?bk3kElZQYr1S&QJXz0GSPQjh1uFe5*XoxvA_%+r#ZZAPr zSkQ4;vc$&6F@e4hSR9@c`s27;3SMN2R^qrr;-t!My->N?6;%?)0SoHd1O+U{5Yp2*ERgaKq~07FV-8M! zd8~ILV9=y|u_%T@gL-3T&i^}F>38mTVK?E37#%I)Nbs3{H)23)L>Jc#5b6itV`_#o zV#uW?BxKuQJJ(~@br@+^dtgzh8Ns$2LtM*{@g4qTjf~(81;P&`0Rn?TvLt51iUCD9 z1UooK0Kq^j;O?e!Tt!zvf>;5hgF%hJSUV)Cv8L4MIP~})j9#iuB{9$m40JolS40?( zl#7O7f3tFszjGqeOE9{n+J+Ce(WxX%hP{sdpO~r2 zZSF*2Y6^N5N0?)0bE)h(S%=^VZ?Uplpgwh=(!VrYd1QoZX`o`CaVs4r`()Vz)Sf%7 ze~yx2MTx_-2aXE{SI~G=(E=+ZbtKzd*<#lhrALbFB)v3@f};VZH3sZ9WXB4KuyN+8 za9*E6@;-tsi=^tJvcWL>Xjz72bGn(`t7|(n(LlPr39Kt!O?`ox+qr5vl_b+fz{ZR@ zf{D-=$WuV%fw`h`vw7*`8VVys>P3K3PBXA_EjnKJmtQw;A;2?Z~Ccyx~ z%VpcPTzMKsIg|)I4Os)`eVHa_YPwjgw6Sc)oJ!m=uS`40`t1+S|KFc`!7=V~PQi6x zuX4&G4(Hk#o?&hP!DPV`DHEn(=V3JMsY>@#4q)ObmYM@*0Z62ta=dZ?C4-gj=_%ey zF%*ZLX^yG$?U$Yz=+a+7)Zm)C83O=@G%=@s6$0QT7?(ZWz&M73hNV7#ffiC-m=cSZ zewnX)8NonOCpePfX4=#ENb0Wd5G&RTHL!#qM+O6LX-Zey@wL#Xntk{{ui7!ZiF z8NxbtFP-(Bc;s$GO`(W!C0Jwi*{eh&*b7IJe&DxWYaPUJqc16C#J4O zwZgrk|6esO>HvD)MO)QewZ-nD4I+3#w(BOl2G1n;MJ#2QY(G3TG3s~7YN`#9{svxN zGB_gTo#vPjMn%dqc&zr)_4uWgz-(uyVJlKxCT9I?b%wEeDl52-0?kr!hW=&KzDF9DqmB>Z@gS7Q%ANTxa+12Qe;mBtQ%kUquyfL)s6o0L5=!-BU65GUFG zZbU+MAAqqRz6~j`S@3w_(@J;H+DT};TzXpN*w@-$2O^#L3CaBtN}`hh;c<>WCGd6v z`2v4Jjn4@6J?G%@B?J;=F$&Uqg!)cSF7BZnMJ#$5WSJ97^7_KM*_aSl8qlFRolnw# zNk}Yso0`8^sYmj_9|Z(hR0y*n)dnI7A1NxmpS%h{OU$-aI#EKZ3SvxF@a((62zE>) zK_=g3c%z^tB!J1wk=bOG8wW0Uof3AzcPfb|?U%(cRypl0gG03C+8)gKLNq(*3IXN; zIlUg$DQR8RFy3BQ4ObH@VVY6hAng%_%@AN8mM<9nRa`9xQH?_vyu02a4@RDL7&dQJNtNB@}IUrixxr95ASIkna5IV?oA@^Wf6Y3izz|20E?7|=}b+M=q! z4@{t2;AO9F158v_Iwb2OATtU|UGcvh$%4sO*H;w?O(u{EORKv!w(V*?-mN%{3I=ODa$TgfI zjU`5w#saH|#z>ml)I5VY1chuukL#!bC&fLd%a`aP=t#T2Borj z&^b}e!+*YB6x*ZoP>wh=|KEn}^6wzb5Zd-Ct!%{xs zolM)!z(MwOkHPNEbMCk0;X#6J450Kdy9*eL)Z(`{?G$Km4~F$192o&;J)03TZn$vk zNi~-(U?}zjqI9TWC*g1tgI^C+{l@$B6D$WCzMw%X_?9;K`h7^G&^fK^7Bl29%tp_( zQ2Tm3bSeLa!bYx4SPn4x2D=B$MmMBQdNppSuOqJ!64 z^tMF!oHIf43K0sTK<>77Jkcq^ObAb%IU6=cn9NFR0wljgzbj}Pl2$bYkC~^uC>8&P zvR|UK5v_)?rp04M913IC((pLymi6W!BI~W+SShJFY&4;1-GOp`2YLO@Nk9!W5a$3S zuu~c~P!NO-kD+3@9E^8^G;&=&$Khn}g{5C_#=9^%D$0rLVTelaqx(+|Jvj8hUa-~3 zxQ$v2O4XCo;JYDIPl6H`riy^_5irvi95SVFIj?kXHLHY+7%&bk$^hs22g5Ab^(6*4EGAflJCwZ^k}oB52=%cJ4Ju z&oR07hkFsK58&#)9YN&;eVKJP1ZZ*3@@(9#!yT-|)JM$2DNrUi0{>$uU62xoHz{Z? zVxZ~GDNx9OT?5r?GZxTUJguyv#Ye>n8itlV3b%%jL{x&RvDwfYp}lChIneKPd+yw> z!NMf1U7H7pCso0vH5xdxv{VdCcjCdL&V!S{m4mg`^M)OOE9;D# zJUoyRKYPRf?Ld9-RXw*0`ZVdT{*O$FLYK#_CFipR@1gvRzYLlbPnT!*7mI=Ws8;lt*~LEYPw_2!XX!7MO!Z z>AmQRAN8iOE`?|S;TjWcv{*h5M&cG`A_EQ=;#V_%1aZBLfgo)v1#U2wrl1caYp~eN z2-1X#txm5+`W3qp9h}Kd7Auj#naN^{J(WBLVNAv^i3kxd{V{4Pdu*jM%bF@vut1I0 z$?C6TvsyORbA^pz$&leF&$ZOZ86%dJ8i(<;RwGs|{MMjc5`KP8H1jURHg_X!8dPm_ z|B?PWxJo!edQg?kV+F(0Awl0l6<&#V$J?6U)}E|$>;Mtu&(#i$VTDR3C0#^7g9TLL zvcVszaVnm`xe8vv747JP>ictnwV)g7?`7C65pll;4!n&eUly!XZ(|Z|{4c8ATRz?k z==nZ!r9|35A4Z|rQXPP~&9i{G`J24H@jlP;R#YRaL#lH%`nVePsV>$1yhs3Nf8_NO zG9d%pfpTD7uGMDJRSy+!?jv}%T*zFn5I7^J%uw2^#|{F)GmZZm+}xMcP&C zUeU-@Vu#5#LrdZaV*#-+3k^={VEKFry9*Tw#?=UE#?>%czP`hN1mGR>)G=%}ys(ow zaBzCEh^<`^8%@0O!~sT*9p#E3mZ}S!TOGIT&M`JoiD-xLN)+a>@8bP>%RwxRl23Ab;$rKL1NZQ-?18vBv88` zlT>_ds;t_z%3t=cP{odN}W$-$@GmPy-G){GkhTsF3GiInI6900rbNLI{ z>7vBGp})XJ+=8G5X>_AvFdXcdZ>c3ngF5=qr}1;Q;yn^(Zau+P1N_#2RqMC=fj72F zN{s;7zq|&Y#c5c)iAW%SXNaF=7w;fc8U#EZm#KooAjBY2+qPH;JMrNadb_e zVDLHwP@^WWjcyd`2^V@eVje%)*wSLAM+UvFuG5?&{Wwd&<~}g;xaGFa5m@$gmMMY& zImks<&j;N3* zPLFvJJfo2^3}zJRG*lX+jEd4J>-iXvXxxj|p2CciP>BUFQO}{&@irB;F)L(fo9cMK zmxQ~S*c)Ex#>})sfdM7Jb4}_G)efY!21pC_1F5~_bwD9O!9Gw(oP9~blN8eP_q|SR z3+p11Ro!aY#gNyHvU`Y{$(tkI33nSf&`kXd?qkc;@&#+^XJqo81Ajl?vp{u6kGK31 z4N$!c;Eq)4XKJoS z_M!B%eWJ*LD&k^7_4COY<8&8?O(XVVSSQs=X*V{tL7_qQ%m=G6Fbgp<3xMvJ*QZwD zZ9L~akaO5gqSq_bYPE)Mn)E)ScuC;>zpWC1f7k0*YlVGXIlmHY25c`JOFaJ{mq50l zxLF5vP-1?Sx*5H+bro@$b;dDbaJ?H}joD#Mu%=+_Z;{orK3F$v#@Ein+Jc!QLu^-* z$bU1;brE$d&$93wVR3G$Ce;RRs6q}9mihH$mZ|6|?UuT4le#}HwQQ7HHW`JGckBFy zDj*3tJH6Xr&DnejIt9pL?P|c==t-ZdZP>9B6D7_T*nd*hcJ(Us(1vnot0;T6d7IQA zT1KK7fvsq}r0zi8gw(i`H5!K8+w2VxS_nCqm33+tM(B3*<_`Ae5(wMCokOBVfPJx2 zn7V3hKYduRdu zHK0_%>!U*6RDL82jOIvwXHyP-Bkm5+A`{z*7%Ve`*VPT3BI+OyEwXB`1(<_qBvfeK zqf(q8t1E$z&#r5+mneyTNVHWKMsG2EUyUF)HviVORl3ZAXl;eeGRb&}0fm@r8cXCW zf_1YZOI9-0`dr}dYf$R}*Q($x+0ZBT zz%S`kYfV&e{8O!_X07|RqfzrxfQwD*5a39NfJeG+GiByl2ZQ^mXm@YcF4%_Q5Y-a3 z2}`Js%-c_DEu|9Sn#zQ_a7+j$HJ3B`b1XGVSXyY|^=jdG{IpgJ$77o2u;f9*tTm|- zjm~XreQ}V1a-G{+cd=45;7rL7tU*M8x3}hJ;zF&@p=ccjX`6%0*EN%T3J?A}8%SZ0 z!;lC~--PIOT6p>J{hp1m52jI7*0Qs zb*@^QE4DiT$p|?^L(^n1-DwDfu1kxeQ)vfJnLP(h0T^h#k;Ns*GEeJ#&F*;lZIxxU zcGf_~(LE@rvdo+f@^GoZ<1u(fh<*x13~XtVErqQ~&@Gh4Ozq;ipE|hz^!`(aPM_k| zUHv5#g-y)h9sQ^l54IS%RdmuQE6?+dHa9zx$42F>Y8Zm_dOnH5APG&YY=3QslTCIQ zyM?B~;tKyMfGOf_7+Oa$c^XxK?7;jOYBp@v625Aj52*3;lV}!pV^U|S?_=s9g90B# z@)z7fW&<_%!%Qei4Ddcz5rSSft33ilzTg)3#< z6J7;%h9g!lrcevUJ>O)rd%ze{he{1;%I>6r7Afm2{VeKeS;ibF{$weC6A@a#==J;@ zeV#$ig{k3=-vk&)IGTV_p;kjY^vNcYbsLYX82nT3<0%y$*uvFd`tVDFQRg22Hb}W= zs?k!9mxQ)$M75U^WCc|UL_b!!gEDay#NB-nF-vtoM`D|MuLC+cuzkXgbk_7~s!OzP z-O#%InX!GTZrIB@!N!s4Uq;6g!&>05XHxB3=a=BIpX2i1n7_9 z=A>fJbJKZz=rG9x3Y7(Zdh~-I;NR!(1!;|BJsd9-^`jSN&X4iX3-~c|c0`Eq6M$&= zAGmqu5DC?{3zNQu|5@J}hTZ5AA&)?3wCyRpPd90KsTW8xa8cq(0rcRi+-xW^vNbfr z&F@sA`;VR2>p*`uHGSBu4`}hA?Q<*fVfcrPPmNYK-bjQp8Mg=d2EvPPMho=&*u;Af zR02g@2^zTRWoWas#f5|n(Mu84ov!Sxl?~U6nu|`8q!u_-Z3o{pop-n2y=TYn>3xlo zj3R;T)Yd?~O74nK0QtO-bB+}*prRACLy!iYQ}FhIZ+wT5{wW8-|I!R9)#?Ugya6#k z33LYr4LA!Tw920=z|?V`<1Q#nD2w=bh^{jNml3xK3DS$hpfsXGc!9p1g-Z?ZW{eIP z`hyHaRY$F6Isx~!Da!U{df&yTf=t@#Q`pT3c2IW6?zU42dLB{=?*_84fdsl`$|FD* z`X#)5wT<{k_|O*2KvJ5-?*M)-F(-v^Vu$DL7ihVl(*l^E=96q{=h%p5C_E%Au%192 zVB&V81WkIN-^heO-6#DLIBfuH8;PILi>>Sk$$1DkOj8lrvN(yeDC9~DS-xo@PGs2t zTxE@mhc?6}(f3iL!g($9G7Ot-hdh0!;X3&myx-D)mL9`TSP2pa@!NN5n|4qa>M&*` zDimOoZr*5w9*d5a$U{2}ev3hCSM_I+b|*W@isM@QruVHVsscHMVkw4 zdM0HLB^SC{KnBB zJh=%91h~RLu>b_ejpI}bP=m^yF$%)8xU^Jk=m*14;OQY0CkPMl?w~NEJb}p)MguIK zNBHEACdpMpA zKO1>AIud0^E+(FXmV_8w?DkOo;vXFPAr}I0CL&P&+n1mOg)dKhKK!f=ZGds^073Mk zd~Xhefo(nU`55J2T;sf!Hbd19Ly?D*`v5X;q=A4=4A4My&SJv{q-(s1Lv)^&rGt%b z=uQ2PP3B5mj-UG}bc)Ix*dRnHHVz>N6j|bQ2-Sl@Tz3AU>Tg0H91-`B~ho@8w;&B{Kp^5o6m8eohgdI)@lTt$aWjHSJ z+#Z#h07)#Z^OEE3<0(+|u#{k{4z4$}0c|76)L*lxEQy^x1C@_G5z;?Gt8gSI3F8Wu z+jWUbyKbo?!qPD;XC#kksnKTfBRPBK5}iapE`BZ_2chkR&YDV(k4rC?2mO=0^0Zf6 zNr$l4kQSM4a2hvJT~T_iJnfUGp7e5sybJyK_%f;!*n-jq$+c2)y>1O^*=n+YnTi9~LJSaYwAZ0F;o&Gw3zov;dX+Xr+>Tn7Ryc zVwGnimgtYMgg7!e7e<^)2O0ym7km=@SsndxK1`U06=z_%5hG6mIQ5Sa&}WP41|#w{ z)Sqv{YYh&3sWx;ypNeUU+^Bwg&G??wft7$ z+7Dt{SywBrfw5~Ttl+esX7!ZcuEDEZ=d!NZZ8Dc{2fmaoEvC8DegpM!)i>I&pF-*g zUe#GdDywRhHL94vNF@#!N&WkXHzYq;80!OW#*QOu z##YAj3U1*=$`c8#X=xbF)~FA?X)g0Ljs7Oq;3AF3b&1z<$nITlwnZc`&X8|x*zlR7 z1^aP?>eOcoZit{s-D!b^o4afYBEsgnr-gAfC{Wevt&L;N9pUSE6&vF7B~Z_XZ-O5I z3y4#2W^RK!!?siqA$d<8xbC8NZJDCDJ*<{ zWN3Ol(x0f_ck3-*UHIznZTR-nzj@on%>%Ew_p4ui{gzkn-TKu}e)Zqnv+q?;>aQXj z)@&5@ir02g#}Gv`#bX%TQHJ#fWLeTqzm0aR5?r_SG+T=wTC0B-uj-#7`UBpFLjUdF zMTJX?&6^NW`vvDdfI!+KVM822$#L!j%BlY`UYC!^B=H2v+L3v^0BBAIbhohCT!NEJ ze+X&V(k%taf>>nh(8u3KNsun!31CkZG@tMtgI-^|0OTc<|1x|3Wd>hjAT#^#7-M;UTfKxz(};Zq&vSTO|Krf~;upuyJ%_-M zT5Ok#Xb`MYiq4~$XD~e@aGiWT{DWsfSji)peHW@-VW)-aq#>4$l-wl&dxD%6{nGO1 z;G#HhiNKB1qTvoIb^!o02;vHirCPHRSB%}-AsIV3En<_kXnuq~RHCb&xP8Z@Ty~nv zE_1ovT<$QJJI&=TbGchCd(3-#%zJyxdwa}#d(3-#%zJyxdwcL+Wc3qx4I$n_c*o?x zaz8o@Hf}=ShOzUbu#fR5)*0-$DWBKB&osEDL$3+bY9u$4E&0*&W7u1sP7T9#f6U9s z%jslr2gl6%6Dca)5j-&^rrYbU@$}o^Ca9qPkoDAyk?MGQ8tj*BP8$?JdClz@LXKO2 zo1n=lzn#R9q>&sTT}Sz zKSa~8%x^(v!OXNC6iPT*ShmRv7BGc>9T^3L^;eMhO{~F~UpK(<<(K&PkFj2PbMXXT zZ$Z1Jp#dJ#cuL_9XK?fIh+eMo1wbNBiXyzjRce;c@$VT31S8nI#iRJZ81h^xI75Kh zg4hTh2|eh5OeXAo!h)K^_`ooMOI|B%>^$vE7F9-ob8I$6|2U#;aPY^ezN64Wl8l0^ z5uNMLvil-hw_`IKj^+dB!JQ7+9D+011j}P9d!b}DiTn(&;9O&D4$Z(-5{LNGc@Aef z<2V(#OTo-9RtI_*Q1d{aq&$JrHACIt9#)v#G_Fh5Opxyb9R40uLfVkPug~g?8D*XR zCR*BpYB|2doCL5yybjjB$RU&s<{)0vqQaNpHc}1u@+4%8kBrSVuu8HdyaYi& z6yFx{}Y2FyfvV0>@xMC5F5c~ zYH%&$>#G`rBYL0O;531G!^$IbY3jq#VOnQ#v@1;NRLyD57TWTcYyqu7(3WyuYUQO< z=2oUQn4-UCUP0(hQ`FLm->kJY6o%RSTgcyn;8A6WE8zfs?ne=j?okB%Wsx{cyg=PB z*RECTxH(PTqSmWh)dqDN59#Lt{XCpsZRMf-YC8|)=P~^14s|CF)K|OJ9v-C6!}EDu zJ`c!O_p1Xu5TA$Pt0O%Ao(JFKz<9{3eVcYefP9%)VSXSKhrt za0g<0b{Z7&JA})R;LT9nh7#ys`42~x{Hy-`|9S6U^hUJx8r(yOlU3s*LbfR2(0h%e z4mAFOZU~B%7D7?KgIfyO8?uVc_-bE+u)xD|W#bNea<0jb98LM9#CrKcmiilI<7rb~ zDXpGYYHG@hr^pMmdnFoRN?B_}U$Wq?#Ek4zCrTn#fu|rQ1*cPIwkny0KX0Ce z-In>Msb;?M1Vk_a*iDHITU8EOgkZ^#8dG=`VK8{W3WWYapp?i$2q;Aae(*?8IyVNq z2}KA9)=&!2O6#=L-nq`C1n&?P)!s2vQhP^AZmpH=ZOY#Y>eojv&E>aF0geW5&lL+( zIHRGgmAeGH=m8Etaf{x97*sDyr;6`F%9(O+G*bb_DSeAGMY>PrS0B_cci^9_nY*qx zTXi%)jyGNr?Od~5e!{1Iq$ z1ODl|*_SYT8T@hSlPi*?KZOXqM?)~}VB@FACsC-Qt%>M;fJnINTe!ly%PU|lT#W+8 zu<3Ips4d?BY(k~1M?@MDWqTDzkz;!^4#!6b!nY6_g&JApB`Dy9e-C;DDNp+JcYuCO zx^-GkPBL@rBMlq73njCQW%KaG-o#o>#u`!WS39Q2@7HZj6N_&J(Nqb3c$uP_?U+hTX3S`C5)wg$&| zG`*4cU^aucXu*c3GO*x244xdjlnB@k!iu;D;FB0FvxOdsEmY(`E(?+Wu>F*xI-FS1 zaMNIJUtbwo2nQ}uPq0AS@F{>B*s-THWx*@q#xVOfzTF{9jr@wpd3od=-8cm|2h;BT0fzydYieYnUz zL%oZs(x}5s6HaQ`HSMt3PQy`k!_^!DIoW~N;*A_pS?!PLpw0=9g9Q?xqT>uffNBp| zju8UAgFlr!gm5mz@hQzIRUAVCWoKCO)!%BQj@Yx&5_VFEA_dO{*`OpKl^TZxljA7R zUKGczfb<~GZfFR=L)4zbg5C5S?=E?ct%%Rm-jA%6cj@v53pI$SoZK566J*d%U1ZdP zfN*Xb2lI9W1F>`x`}1~YL>i|`i-bthc9^tu8#bF3${DZ)IK50A991z)dUQo z3iauMVQYW@-w-fjP=COwN8f=#!UEopi}4VWHPKkDoXa&Fb}l?Brq;;6bvD)kpZ$Qz zq0pb+`-4CJ5B&Svdl$E&rW{d!?ax7^P6~Em;gDJqIN{mO7)D)%aUvTG!_<9)V&VV- zj!XmqIAQ7CFeO{=YfHk%shFIN5jO`8AeSBFdcEYD3UbNTI91El25<1>Y>N8xYN8h+ z#cYS_nC+}atJrxk5jln>q&jE2c#=KL%-wWFP+e$SlpCH#Ext3m497JHK7TR%WDyv> zXLfmYc{N$Z9s=}aIeheaU_!haV+yxM=?f-4+spFsq!%tEZ3+zjh+SQ7%AUQc+6zto zGSGvl3Ule9|JWfQDbVmU1v9(6p4L;l_Y$T=lvyi5P&0L(rjC8wbecpPZz6?(juq`S z;|{O?x14>sx;&^S%-RB^QA_KowPue&0`2wOL95uu*}iHNUi4e*&w`T79FC`8spx&) z8t=ept(b+e4Xx>`uE5jf)fLr#wo)w{f%hBYL66!0`OXE{Kd@~UEiN6tw(RjPj8zBB z?>5R^Sub~GwZHLZbtMi1?xP`Z0uY6sqh%9qXzQm?cdd_MwZe=z<_FGywKcC4*C3GrQL79lvS5uezqX79ULt&#tJhF!NcB zqdj4q2D5_hi>w*Xx{&fy>?~g;>m!7_r*SeCuRkq8SSn3nd*2$FjWuwXw6W&aRo7M5 zs%0x77S7&`+3K&}TyI6aZ3{L>xq9>X%GxY|Twy-Z%K%!dovZ^hylx(O;KYL!CM`lP z%=NNr4he<VRBA7+GISkWn z*F?bKC6f;RPRbYB+`V$c1cz#7In+s9JW-Lmm6dERZM|#2fu#U;U^&%66vWOG)E?k~ z4}3BQVbOh33NmVOLBr&%2JAs0ltMf!Yac9OFGoRPyEVSEqom>71%lns*u+uUxYaR_ z5yC6cfnPLs(R=4%5_53wkerLLb8tr`RV(Jf^21Icv<{wTJEp^ zaAWjF1-(yw;=W3Meg8LR!jXe4iUpj0#qZzTK2k}#*t3|!(YZLxXN@5;7OFE}!V!wJ zpSR+uD16;5C|L{dMp_TEZWf=@0ER6&w zXyk;NV#Yspq%|1letGWT+z?)mujZM< zH`d#9f7y#CJzyrf;YFIj`PG=*tMOs%+~+QO+|>WbS<~on06+ID2*e(Ru=dSI<46rw z3pOde1$;Ki3AkJk%_wBe6pV&c2ZbRD4T5!mFB%vtJSQ_HE_Nmk5gWPjbvgK`MKbU% zLk)4fXH9v*j_brYPhhg9F49Ah{^{^(qZfKoJk8p%v;UJ|E_j$n`{giv-Z~HigQ2>B z(7KHunlNa~hV*+d8CYN`c+$X670@l9na6R}bl`=^+E&JqHj_N2)K76#unAYb?dm7a z~6GE0bS)1y3$Zlg1RDriM>!eSq*^JM75KjB2v*^aM2y zTHK)^Uqg_yI9_JqUq%2u(=+bNph=UZ+Jw9pp&BbtsN>oF%E>4#1*2B)Cl+5EbDLM{HXFNNJ60 z3>)+1cy`daw~Omvn;ce!BerVpfkEyMVrxS$fT*n(_4b8uc4h}atrP8O$8lZ&CAcPU z#O}ou_QAHp#gX|EwS3^Ekdq3N#^GRiE2fs=t-qU1$+?xF#vT97-@(+>2si}ljiq}oZ)zD(||zuKi%_S5as+^<5~>iOj` zV9MB`q_yNmjBdiTP}TCRlRT)k+G+BUgR1Ae+4O}R4_ek&#ePc|Md2TSg}Va$M~cSL zR#$AVe;$W+RlCrd^_Q`io4P=0&8>}@nNZrB6y$}%|jYkas6Uri0ZkzDDlB_RcyDk@#I z`^o($GCKzEtn@BP*t=tJ&z+SN_<-%e`9K-2YwpU!<4t$qI^^ye#ST_Z$mNeF*jLzR zhm9Xyq!Zsjwk69^(_;GTT!PelhzH6o=3@zy(>UT22&_qEQ}aWqL`V5KU3@^cF-F3G z;8aifj@C!kzl_h4UBdrat}svNK5Z;G?Slio=N==7|0t}*bFqSmGS5f zREEh3zBF>9>b}riUByl&59?p!)NX1T0T-K>@!5y5JdepU9~cSP_`S&+H|~{3Lti8a zyy8fFxw&aKuICj8A^OXVL8Bte{&^+l$kG^Cr60gR_z~{L@MDJ4H-1~37~P*JTF$^ALdkzpc5fbd>$X3{CNc2BmqY zgVhI?o^^)agMp0kiFmz1A6BEevK_*tf+Cw#==H(i3fr1s40OFE2bkI=DWI3pUYvRW zTDRm0r0+Itki>_WKu4Fi^vy{Iqp7{g(pbLgwYQBsf!y3SOV10IT-HhFS5PH_OIQyU?A2O_SL z&xENpf`$joJ4Pc6{>Xv`hhdMLO3t@Z@CaoH+*U z#eZPJ`-pp}H3j{MDmoM{{+xF!jVe}LJK_-d=(c9;8Mp8?vCWDR>pQrmgQGHH2xW|dh;_bWiy=+}W z9c)8N7B2s-NC)#T24PAg5FaN;(+GALlSu|?`ke^UZIqMYe&p_C9#@Dp^!(Tr2|v>p zT5<%w*TUg%Vz0WPi7Cz)-z7P)^lMz(Tkt4^Ey`w1_rsFB)mkH~l%}`c*owSSxcyF> zCrRs%E?u8-yk6J$|0-^>w@whP$rvbrmi4z;$`(dIip1mJ+VNIc~uzm~5OaL+= zL2&Pvz!1zPB4rVV3SAfkv#SH9EVkHecKI=5Uu1BEqbwQ_LHQJ@WDf>5BS@TB5C<^j zr_9a_$==6z-ppxTB1v_^ffZ+X!{!THi#NuT`-Bsu5hVrT96Q8ujai8{eBIsFALp>* zSN~%?hTj4F#ur&q3f|Nd9{qdh+_`AmA&^l?@$Ptlk4?S1~D2Kns9+o=2ihHU^}s*mdv9KhEh z7&?5|4CF~Zv-Md%kmiOD%k}WU2mE*@qslpcZkyN^2R-1_HFFNy@32J&8T>X|DQkI* zw|~swRS2M^8{^q<=5xYNFxO>1JImk^-l9fO0q}R)!*(gx?7HeTXQ}EkPxF8-c92`C zG@(eo{4mBA5Nz=&%IRSoG{$W5Z2*Aq40Yg|xYCbf88<=Bw~&-PZ#Edr~Pi6$d(b-lFB9M_3;hEvc^6xKVD0~7A`r2 zlwXy*fEq;j^E>e3(DdYK`97*^zD7rGT+aQ_+ZfxyfF8%1#LOID%7v}z+pCWAC4E~Y9KQXw<;9nSkjSI=A7v!S`<}(R*GVL`C4l;NxgJTSiGEfYj zW$+S%4>R}(gO4(B8Qjg_cNu&|0#?K=4EjS1=r=EJz~Y-~_?lgWBKnMpvz53LiJy%) zTNs9i=4Me9i>6Q5bKwDmVhMuNpG6iw_24mr0lr;$^!Q1k9GXbiU&e2nVI9FEir)#O zdUX!z>F5x0o@zpvLO+F1N$fM{8`8T8v-H8s+#m2vXPL3qIyEi9pO|FiGJ0poiBGMQB{{V*9`NjYM literal 0 HcmV?d00001 diff --git a/src/目标识别代码/utils/__pycache__/general.cpython-39.pyc b/src/目标识别代码/utils/__pycache__/general.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68f5d11add0a89188ed9a105ef36c3c3777c9b83 GIT binary patch literal 37130 zcmbuo349#Kc_%vEb7C+!2!JGbszp&0L6aaz9TsWopm>OqNRTEW$r@1}4WZ*`_6*@gXLFH&#r?vAs!leZ{iZdFwcb-)3VcaXQI+v17-^=Dpu@E^nfj zx8DE%t2sf+&igIMsqU_>yT1DFs!n%x#RK^J{ulps{M*+8fxqTM>t7TP_v7b|h64c= zNCuRp?7Wq<8SoV-b{PV#2CZb{xE*R9E|a=kTqt6aAw zx8WMk-&WY3+%9Ew=7$P9k~<2oNxsGkWY=VOE?A4ZmIBG)?C|t%l~@-@j%4>_Ba3@g zm+D@!7p&y%*^%ixRL_P$^3LpCsyF*umB9PEmjWs=7+A0u?Bu@ee#x;)^)bid>+r6B z#!eon#Sg5AzelZ3-kaSzeVn1i*LC)ES$5ZZlK9acwheM%iw590bp_50cN+0?p#I)>gnq#nK$PCl*vKs}<4 zqsFxQh8k5TaW}61clCO88h07>U(^{jhC7wrIbo?s)nmBN%KhW&4Y;3BPpBlyo>Wh& zv$#%WN09C*^+vqQWzS}x%#JKr>JQb^Dt*aXz`x|Q`mZXZ6yDFMKT?xw3U_&=98tLq z0d+$4sS^_+HLYftK3P!TR0UN;nxZPHGHNKPZz)%Kh$*W-R_E0P+?`W@qAsdA-0AGS zYB%PjqTYljSN*A~s(IXb>dorskb72rTm8Ix1~KQ=TXF%UTu_(rbU{6<-ir4Z)t{-$ z>N&*Bskf_lAf}?;sjlGqCgrPltM}kdRlQfe4>9xV&(#OiFCga4>iyZ97Jp8?f5}3L zA5w@|VwWL0Z6qnRrs(+_`8F$aB|3iIDy@b2B zs{d2{iuyS2-lndpPayqe^;hbX>emqSocfOXl=?L8-mX5Q{ypBkLw#5M2lXEj^G@~G z>T~MzxVw^_S6^7LFbiK)zlG;_sV}LQk@k7@J@sYv6~w%d-8B8~@E6w`RiJVu=y6&=t>iy8^gQJfu{{(;1jI@gk%Q3u3_C0a(_{sBkrVc(j zcJiUoGh+vjA0P8Wo}SGnyKAo=J97GglV^^+(s5Ad()nS}o5KF8%~{0ojCZ0)Y!4pM-Co7<406lU&s}GYfrN0nI*|}1_bpFm=cMb17E0A$^(!j}ZdCrgJ z+>})QI<`54AYjKVy&czoN+9^&7ruak|K>wq_~F8ruIp{WuzznZoqC z*@Jj?Z3F>Z??Ra2ZR;Q3Nn1FPXhmS!IvxCVL(^_~3)bkE9+weLVHR!81oHYoS0u*cO&qFSa~;Weg~Rw@;ric_4e<@J(ew2zc;IkZ9O5-azRQ@rg7t8_`oDs zO99q`M-c;7XqRjwxIC{3|I=P~oQ&}hNs8VJo ztDQ`#=%oP+dbrZLd)P!`BF8GLb~E2@H=Cc>otf07*>W4C5;B7Lx%{m`Pz^lQJ#VQ% z6%g_mns^S=Mo0mFDK(ACz!Fx5M}THnrGQ5hMmi2~30*Afdmlr2|A1ehvJnGs@(&-# zmon+Rd+%@~+33-suokox$zII4p6iE7WvuRCx;W=Yf#b9;X-rUvIP1FF8YQ1XMQ%5% zvlCXxvaPsvmzA*eJ%~A)k>$ac_+y6~vs%VO)pGm1z#`BNAQLF)4CayxRaqMX^LFKK zFI2S^n8oM851LtKr2&HN@PA>skuTN2-yF(Ta7SInbFf-Xqo0@-^+>BT&MC_ z>AZD&0V9~6%nrFjt2EcPCW7*#`O@ShP>LVYrIP2zrqV?PaB}@Foh_8kXOEd@KbR|e zwaEY)!5Z>I4~-r@>4%S;K7I1E-+{>EC@HJ`80hu!?D=fo?>ggwT`1IxNZ`fMI`Q;7 znz!+^Qnf@wQ9o2n7qWizd{&Q_+^la+p=s&jj33TRp+Pj*kJg&Ga*8`qMXJ`L9J(iK zmw=zZ9QRs-0RA}UxEG;L;(an=2%R5&9K^nK!EM6=96m0JLoA7ujr58OfOP7bHOl2a zL2_FtP3K&a*^4V16g!Yl7sgfk-YT-|*D^>W@H-#Pdtkffyj;fhSCMyl?9SAolcPr; zdN6hPq0?=!N4RUip2CByNx;IqUA6U~hmf#UJU~W40o%S96o?8g1*)ONkQZKzK%5FM z*;utfFIoj&CKMJUz|oNfbcpfQ;B-tyRqP5O3~O|#W3_YTqaF^&p9(&E|;_in#0BAhmYrqvlsoyVPS>zCd5@jyRp2- zfV5(Q(o&_Fp$!~f?Lt$fLRRl#5wUVU?UCOC-Jf&4Y(W;LA1=&br2{)Yz(l0X zsq<;@J_40xFr|JlGcmbxDtcz!tOl882U5uYbI9!WA_&-VAjUYLWDx(b`b&t*@I~t% z7u5auxf>B=15iK&jPe1jH&(O?;R-s3k&F1zQ-+hg4w_hqKoi#}rZIpi3ZzifnSefv zC_q$*T(=)~y|nfKQX!J4m0jsbrLskEpsCcmk;xEjtFo$A>u}?3Mwl}s;UDk$llmRp zkZ6Pd0@1Dly$=SI{gIFgUIlp65K3sYQKqA_?rh$3_dAu~@SPL-R^$i94VN|X!Z|!? z&8U#}a6u>L%IL)TbbdDLcL35e+3|E{W@SHOXz9hAmrA`4nd;r>ZRv)*932Y#NJ+Gi zO8N0rs!&q1dEO^dsdKaGym_N%*@_B-Ch2X^UrmDcJgeat#Ozp5Vy>I!Al}~3WOpEF zon{q$(H2I=#&kziRHi$oI&clExa!0;q7stQFd6RiSKS~Eet9RycMIocdrg4PPpVca)~tvxDO4jjr9vT70= zASpCColi#3JoMnfFwXi{M&qMFU}H zQnIR}yu-={-DA}F%KmT#{tA9BdC36qX&kh-68i+bjMQ#XGKbbtyJ)RVfSxN87-UiR1X)gr9pDL8b39mShe9^UKz| zFc%PWcR~~j%-w52oU#Iy0dSXK<@n|aEY*4Y&2}+vt|JtGTeyUdBNsDSQM2juC>o?V zl-853ADOuzSBqMo(8O#pV+NC`Tt9)}OZe4Rrfv0cWp42F9LO+>Yf7Q5{52qzt4*?- z(td%DLLK7G%wJ^5M5y4mBl<(Q{;!W8Cq%~2kicn+BUqJTzY-$*OAQ$iozaPnK~)q1 z-$XqJIST!L1k}ZWK`&D6=ZX4~(oV!^G7ZxbY)x&_Xz8NBq4YC)L8r^*to}Mm6fg;3 z@M2h#CV5shhpv`VMyft^q)sb`E>_R_HI`W$xUgQtQ`^AR*E8WsAlBGc`JGK;u9jS_>$l%J=>e5{mFs z*-*%OQzhj#hpU-t$TDc?SD0FcUhd5J*Yf=#-WgP6>rdeRnk94B#(H!hp0q~>>`!oG z2>Y}R=?56P3jNyI@O)@KJRiX?d>IQ1^l=pLLh>&3T!8WO(dIx`qtst;pr3=*#*0)# z3)W&3_&(-!ELn?jg)-+uSkN1>pgZT@YIGYW-E?9;^hBU&k6_I82dbfu1XSp%U4*0t z>FuRzr1`uyFdy@}Ano^5W6-idx;KpGYtjM%&)euS}w6o<@y3Q27s>mtToVcZ3Iu&Yi+gkZ{vL%Z$l8`?+`N*=Yzfl z@_~Y71IZR_5-Gs&VK3-~ro+_y;Xbq&A@l<#LtYd*gz$lY*FhzTJVo#nH@Yn_(TVBi zeQ2?Z8ZVScmfO~46~&r}odr@)b>r8AI(qAA|J}*8haP?4_`x%&$4{Pq;963B>ABAWJ z9b=zA046UpHB1o5LT(z)%&KD1kJX>SBS}P;v2#H&CFY{AG!>bZ9}y ztBVHYoAvB%h_wzLyt-t4-xhcqt*62S!CD1v#=n6U9Y#h$O$W}}z_dV2m!VYGArHlY zqXINsie0u@(1K7U5PhUs2jLM=5+da?2yDo1LoIdw>~=P84FaRGH;Au>Q0fenj7*Jy z$=U)L`88xQBgSPUh~=MoBZy@ev>cdrU=FbA8SjQzuc^5Q)!&b-P~uFb%VhLPC^b3E zYtR^~aj2{=X7aNttGPlABKtJ%n)TiCZ>SIC^0UYULT$s^_-&T-XAFdL{wePSAM~s! z>c@Cfr=Y~>zBPQ^IFgJA4luIiG{zFh*}4jCzY1OfK>-%n4rbLdnr{`l0tU?JzD6sF z-8**d&Zk{($8gyj19Q*;F=`fT1~QN32WPT#LLZ>i2~Flp<9@J^zG#+4fz62l2WKd^ z!?+SR);@|y$qp8oB1^FnuDg*V1yl^Ze+pKh|Cb$P4+8dprT-e&w*HYvYV^;+gW(PY z2W@PaT+vV3JF3*H*X3o>R84LaijyhcQ|Q-Q7~FyYFy!!F#$N&fq&FfCSgtYVd~RzH z0XK}F3^+?O4MIpI50=4V2sZohk~N6MQnjXG1Tzv`R5Pm|ES4_#T@$)g;HZHOE0k-B zRNOTyh<&jZ$m z$AtbEo>qbv8K8COy1`Q$WjL6)g)gh~Ka~&`<_;Dqf6%`W?DXF=kiq{4+`*v$Hc8mg zg6?cV{}p2DgKrmBs(RoHL5N{>aD=N{8AD(+Vh}82)<##zIT~3zDuF%!Vfmt%}*r>{0@$=-oi;pjZZpUD^PvtwX2qwsMyN zD_Ti8!S0%e1J7W6D69~y1Sr_4M!sTmt$^A^p0(GZyVBXv+{2B|8jCmaPi*v+(C}o= zt3(kvyAC-O$5B}~qjTd~XV)$-J?Y{?*A`sT`3qR(M7y9q*?eBie>H?_=KG(}Mjc^5 ztfk+J0H}*d;kw~Kec)zHO>i{~xYTC^Y#Yqv77U06R~UGu4|ajNDQmYer?m|b^YAAt zVFEc6$T?5}$OwkSk~~S9q94qY7F^rQV!C0&x8gDG&ff} z5N=_s)Gb##UJHX=u@`B#O3bZSgN*SKV3!RmPO#ZFTIsKQg_B;AtF;kUW-I-ju3_Xb>EU&Y@Sn`~-L-2zySjo*%D>_seT$!vi zF~OBHRI$&xl`fOKQ1Sq!=g0K_hmv6niNZh!UJ7PYP;yk!1}h|JBs^TlVx1SIMS@Hs zJui%cq6wyT*6LOy2gHZiIP>;4#OP}X4glZ`Zrp0vceE-+!Z_Q?#5FV(nhQSJ*#gp) zo|b;VQ0@4ZQ;9Qf0*uO(BRB^Q`5(yWM{5R$%1tKI`Ca8S*ha3)@o}9!ze{H)oMUhi ztB=n4U91{{^6)UcdnD(7yqBnJ?|JtI49>iusAuT35WCSjL$JO0HBcbL~evJ*Lf68d%Dv5v;%l}nx$rd zMF0+oryZ|UgOK4$?`)ZmQVa!LXO>gseD}p?hr0Ebacl6(-Mk}u)UQDR^aIPV*Meva zOAIr6_9D%jI#(8Rm8M2gzl>lguEXfEADq>BaE0!qv=eZ`g5ZG#uRBa3be!}|@Zbvu z6Eq|w-zxec4u_ff5GS0|@As~p@I83tk~0g0j32;8YmL2L6nO*i0vQBa8-Nc;+*)t# zgjA$gBmFDk+lOB}JkkcGh!)k_AzVccGJb4JpzoB0z#^j&!WF=R8a*`!jaSiuu&K`= zgCCkoYXxELckm#5q~BeZBy~9wTym7+V*vzM_(%z)OEp(Zll}y|!xaPnHaM1}sUySq z_e!iLk2)D*`T8Fgd=){8iovJkZe#DvQ{x7S7^zolo3P&jTP>Q zkV01@^U-QZpRPvS(?H2lBIs&ZD?BUmgVm_W_Vm~ax9h3uYurT}Wc0ul^^zAuCO9>$ zZ)hB5j(UF$n4xKj68~~!z`Pf&NvoL zWi6LaO+}QSqg$B=2|KGWhXhwY3N3pP$P3n@Xf2r*E{rpri~w~Ovr4aJW5$>sAWQRk zi^8TcrveA3IhDOg8L-y?1_$AfkN^V(FBL*brMK450yI`Gy`wV1oijcDy7249PY7%T z!%M?H!Ek|oAn;HEu>$Wwt<3~=HgAXDmXJdb!7vEzG0dN6CGi|t;_Qa6f(UayNlc&L zJ|7WMN~3qoi!FB0=ST=D*)-tmka+T)dl6n=Z*KMaD)vb}_?3VJi)vs#pgKS%OAFpy-z&pJz3A-12s&TyO zyej6c%9z&+cW28Tco>+q$E30iTNU{yNdP|I& zacgs+94PIG9j?b6l?^)`@-_w7<-j_nXT8!c!(|PFx_8}H&Xwz!#HsY(<_Px$lO zHg&Zo*;28$IL3uUcqd^^D%*#hQ~4}>;u}S=Jvs};hcj22rORspia|9j>f%bwDMPCX zncmR+t{PES4s2QAWa6_V3r>x5sJ!kp^#>YU9@q~k6UY~sqHCqLagN{#y!1@g$xci_ zv>IA%cmYx(dSN##LJn$Z;Si`h7w=8I2`Q6(2CuhHwBM11;|F#nfX2gGE>J6Cir?Ad z_}}Ck4AVi{W+a-M*n)`hr-e68qP1*+Ixzzfp(jI`0{mnmaL0kh-Z+iE#B%ysgq8Cx zZBX_H5J{PGR@Y5j0y9FB`>qGoHzK=xFY*L}6dNfEJ3!`l+kId&dLc}9<6kdOJnsjs z5xLuG584qtIhOqs$k0ED*_sT?vV&sU)Ud?;oHoJt3OAHDf#+@Qc%m|beGj_R1slde z*u07xYxsVJx>hhatYOsv9A2LGfcs&Q654*5Vn(*q-kot>0Y$;6TlA9&iE!qhSOEFW7MS$(KsCac^Ig&tW(Wao=Lb zJXe?$?Zb^wLuKIcgQrIy8hvm-_-Q2EMKuLQ=fW%)Zb;4r5ae7r4lg6CB>|@+;us}B$@vIVe_ZTmF~-kD{u4O>vh7r{?}rihzrKu<$|KTVH9!5V{AYqF*cCH@c|aYr>!1eZ8T zF70lp7GN1)u%?AS1WyUhA8M2ls8Jx4yCdKqDHi}=P~S^lir{x{M7R)pw6eyb|9;sc zC}09;9VKu;1uQGeL8J6t=!zfqX0afJSOD7^Gio?rx&Wr(W+Z~g?*XPgjI_F&!ApFk z^#3ZuXv+9SB&{tlGk6;@eAuyc6!F*WN_coKUC38L!*hjvn>myW1tCj@jw&o7S~^bD zHSpLGXO=Scpx|_xty?jxwPdX48XLosA+BhtSI<`I62UU)jPh;<9sgcw}`Rd5~hPIt8C)*cPBU^2-*!ncvr zM$Aw=E%74snYFKCR}IccOv5>W?xzNqh{C@In(WWjl>fcZX0KAgnUMQ6;Ghncd{r<} zqm6O2@xQ4~uWz~^fb%zG(nZifA10x}Qe6PI?ehS(#Q|^i^lH!Y)>K0q0;+oh`nUmo zP(7;mim>|U{@zG*dX79AMNp@WudEe)+c0$Cu=jXrKLT&*Y##m#vX8G~@7OV>{}Lcp>6NB71OVv% zh)*kPPSoilf!YOWq?YTp%G%q|D&gVhob1JP1}Z3c+V6LU24x*-2bm3Z^1tKmRf-?^fA2s6n^ebn?{gZ3a$#!iXpvqIqFV%9eBYFnVpMXt6rO!lm?hH35N8kUud6+CTo>lD^&5Q29S z{%djt+Qb{U{}^g*3n~?53luG8?M0^HTu}|ewE`;&9&q6+!J3#C8Liw@*Yi@xP}@}^ z%HFgKaB+9*N6<)poB@?aP?g58{cRFT371MYw9$_@H=mf9k@0Tm=EU($i+1Hv7ULif zH)c-2MyJm*MsObodBxB+P$W|R9$q(0DshZ3-3@dKv27zrg+_zHJq!#CTOBL}mw^L4 zTgcNCUD${PiQV*+2e$x^=EflSPGUOh>cz~W!gQg!5&`~Gbb^Bs13rghVVJ$DhSrIQfaf<-JTNiD zsKo*Ja9FTqtb*Hxq%*ZwLkCd$xnWV^KnZavrUv=utZ|%+!k!U%0m?$PPTG!bX;5WQ zHS_D$2v~;*SqDIU#9OV_BRAgj8TdNrZb8r2s10f(bDH=yM(Gkm{(r0zr~j+BR&5e4 zc5rbJwiVc2IG#|N3|s~!gVJU*R6()Db?PSc5~nrbUZ8>Z7&SQ1O>@>*6D%o!jhkgv zZKU1~2z z=yvqx4)*3U?Ll{riZTJV#7eIGinV*}s383MHmdM!5%JtJo`?SueT<-mj~X9Uw-VVQ zOp>@i0DVNjk->KjL?#S`gN^P%ZV0f8nMx;#j-+*Nn;pPH-hZ0|txsM#g`5kez@+ot ztMBx~+_K}&K7scw=_T^|^kUY4Y6aORh2p8~L>idRk@U`O8F+@cBOp)aZV&EYnHj#J zZs-hZ6pmWN)!LR`4x$lKp?VKXae~CI)jW4#fnJH#L|yem#*2T@&4vxE)92=9-iD@1 zPct#CtPotLcrPj@GJ4lFlnA+>4Q*9o$tuP=U#Pk18Z`SoBs0qJpYX0BM@ajARBzvP zHIPQ#Ua!IE-+?i}+g`7Kw`41x)B%s9Q5M;(h297MP_Ly@Q@LM13Uv5+P|&J7jnPA6r6t1B zLKAOP1IOXV^%`br|B4?ryjqLm&}i9qHWmW;C)c^Pea~rKi78#*h=K%A2kKrUE)@D4 zg7)r9yBs9GpUkK+-f}Cplw&*sbfe3>l7H_mHdKOZRsxS>Iv3DSgVA`)9w^Z z$ERz2XeH!;1BUYsgq&Bb?G@V{f=Gli1EaI#EZtcMf!L@IyP;F*1T&dF55)jDXMGD2 zRbu3oXLYu2UA+3v%Bp%h>pvs@jxyNIGXERzxQ;M~vgnDrjQXnP2U}a_-$H6PiI;&8dF8F(=n_HdDz*n>Fr-r8 zULRbKkOr-UbuaEC&~ehp#TaV4xGkIfbssoF<7U%mCG2jRVG*<0($Aodwsp&aYH)~u zf(IJB=&1Z0-IPJEg=^ua+!#1Vc#(h;p%OzJ>B%3GKbwxK2wY0<=iw3__(8~-9r(q; zp>rF42PE6G)o`)Ti$ljYp*o8(@_;Gbz>g~ROJ4-M7!2A_lL%;rFvlq>jIa?RI6y}F`NT-{xn^G`s#j8e#b$~>lyl!`hAB%pfjGx zX%85m-2px+=N590xBoeU71V!%!keN2N@hT97U2%gs5QQC}y@I7r~aCjogF%Rb& zKtl|FB^9~CUExujN@0mjQ&#ra zssC-wQwbkDacaMV&EVzPqh@tLWe2UETZxXt{bRa3S=sh#l9NfeyOwS!xcnBhK);7g z+>fAA)4CO*Zkt_&HcMMv$i$Gi6mi^GWlz0qcvjTCZ(0Pjny=Juutl?3clT@ejodv; zA*xoAQ5}$5+F4Vhl6fK&KqAj&oD;c=sOVJv^rB(s3>%IT$rAaNGrf2{92rj&C3ihmPPZofbL$y-X-7@0b>aycAAN zrQXHh{S01WqPOEN8G{pAnS#8T*>~}+D8V}#v)9YHb&w-q_u7dVeFrIb_tFxA)!&QR z=E)`S>ih-dex-wmM>xzjlt98slpv}z`kGMeCR(^+Lww^#cR=})Ien&KvzD`GcD9Lc zWR-)X!szM=)CYELA4=1NcRk05nx0SkDB#+J*$xsvp%**ZDY97sATs?SEb&BT@8GaKcF|^coC`YhlJN3;ss2}wj8xmCvutT?Q6v8NM;{Xt^XotZk8MJp_(~12~t`lL3H_2d-%s1EP zfyO4GqYxuHT&Utx@CbLo88$|_Ms?LJ@W*FSBI@S4Fl6YZ>#Oz6sI#KI%~6MY;#H11 zevLogb4~?)KF#2a1WXaf9q{Lk)Fd&ik-bz;-QDa1K>nAQml!8Qol^a; z12=r;q`DtKz?~Rx3%)J%w(vxl9k~>H4jL1pb+OVz1B`!g(}yewFq#QL{cm4}E)-5Y z(Z%4~Y-j+Cdk1Kw7iPW%3D8Jd2x;MTHFpFN^J1)a1I}U<;^q^&}9Kyhwgc7 z+kixkoH+XC8PTm`iyC^<_+yi)NAJha{RBEi#SW|xq9B`wa04PO_jCZI;zCr+vSSZb ze;aDykf=N&9<7zA7Niqz5!TgY(aSbnM#a12<5Yv_7cYbSqr+ttfk+&s6DV9i;RuRc z*r$XrE+uqcg~t+4+fj)b(8=OvFFxHlodA&!N(r{=;_5>i&^FRh{WXgUlE}HUQ1#d| zaHK}7aKI+1h_^`ZPBX2$FB^2an!64#SQH8)36t_yMb&~3X zl4||+sV-A-gtpKdrlw}xFgmtEED-fU(ss{?UZE19#=zNC2_AR0R3clPT_s$5afQ<( zb_qNC=(@a17L2g#l?{hm)#PCPcf#2KDM{yans5)bU7*r`R9sTT1+kt1DB)?mgDwN^ z3edbyRx0sFslO1XRe2{uie6+1Q6zFMPB@h=GzOe7m?!wJI(msOW9DVW8R|FKjR;bl z;_0;2<7hOqR9(0Pf+}ZzLi^Er*%r zR%Rjp4Xun~1ko25eh!PyGGd%Tli(!ktbPh)`hH>#lJEMNRz|@}MK;;!IDE@uxF^%S z)gFLK8*H}x*5f(|GF#bPFRqEP>oKfgOqW?brMc_yD%ZKJYj&F)=evL}WuuB|ZVJ&r zeIILUzZ`}%fvkpNtglzrtiFDJQcX{-MNT3P$1ci3y6gur>6&PeiW_j3`YX6^%7NHb z*%)v$c5JvATN%$^#v|NEdA^`ED=o!|7>%Jf&1IUF(I@*N0woXG5-(-!G}>aeJj5{0 z5P58v?+FEr}Te9GOSq;1`IX{qz>X1%@kK*Y&99zTajc%JN*utvx;!n*0XFa2M|`%e~+yC zA94EwJ_iCnx^G$G(q{E0MAUz^(;7e^?TMfvj>6;Ju(txjWRG|rWc}#70RS{7gJ&dd z2waA5%U}R;*oNc5z!$(S=QN@I8nmSF>nP5{QP2-pzh z?B^HzpMyiHV-w%(2jhoS1Fm`?zwlW^9I)@!Kr3?DE7zclJ=rv(s4Q1xiML&FD3OmG;iE+5= zPkAZHoQ&6=;D*_FBSj^aNOe5(Ov$+U9ncA7HMd^~ zIc@`Pf+nZ@c24&i1V+sXEN!h@spd2g^(`c#p5N?IZvoi5@ZtkafUWOgZ-LjkvRD5$ zMhQ#&X5`RcK|pIku81>&&2byV0H)yo%sW{Qlu>_2IQ?ZD^KbOBzAoa;c}}n98~r8H94d0)%cHFNl*`RPZu9N24*JqMjiXs%R;Qj$bEeP^==&Li4AwE&z+fYTn-J)|to&v^Zeef>gRKn6|1NW4mWd9QNr#t*7?53BCbO_iuvjLj zE$?P9!e9@By$p!qmhWJ2Cj(-uWhx_<$rdkD#js4>(DLgTP^++f4+CmTmhWS5KZAn| z4ly{&;1~nK&+@|z9$|2t!3hSV3{E1@UuU;o&&SgY#uz-x;4ub|Gk61oCm2v=-m)eI z8YN)Q{WY0VrgZm+tWMnR+hZ2#Z*cZGzk!H$aJ_Eza7Nj1*GE92UgIwZjLQjB*fq@t z)BLvUU5342PeOOF&S>7~R*`GQynn`Pr|OIj(dkydJI zNsG5g3uwL;uB~-}WiCPi=xY(f`8Uw22nC>sf?Y$IQhDxLjWxFBS_WT+yyN2B#ksxI zU4NOQ%t8b9W;kb$zzng|(zpH8@&4}r(s*;UTl$GHY8jIojQ5K4?c*&Y+!#X{eWu5j z**XSoAM;l!za@QR-1W^E1igj9tqfK`Er#&I6o&BP-0$LT+YoXX^>*fY4^~NrM5F!} z!A(QZpos8O;TFoNM&Dkgr)18nL9{>Iq!<`a3ND6%yh@KHU#KOY8P}g#-`bV+neqLZ z^+~?g`ph_En|pJd0n$%nd_E%Q%PHT~)n z5W)mt6CgGhRXJoKf+a)h`-~SV5B-41I!v$Ar2tThh^wITP?DjPnCZT7gJoa9WtQ zTR8l}XKckC6bCD(ih04Kmywf6S1cFIR1M=4{}Qij8vsM#xHemN;Ge9S5xlu>wi;-j z>X83`%jIV*4H;3SX$4C&hCI5X&SDA16*Mq(gMLLW&J_WFl!(nygFvHO@lWqzUxH|I z{f|qZT!)aXGH&1p8GuOw8yAJc4nvu0&4ljgXB}shOUAwU`pMrl zhbRtnS07wVI7FzS@kh)^Moxx^oCqAHfsvqaAE6eyFp*=gvW zLHlK?f+6RDf%QXVXfuJZfzu5kO5TnK(@m1QX8&Qd#kua%ZnzSo<**5oS!;H8WjDL) z$=T1W`!6W2X27KeGEQ(ffVkx!2GL>MH2(11v~9c{4}@<~=vM(5I5LVX4GxV0W2kJJ+`(+cpSQs zSYY&lu4BoD&n>Xv0SulT1r)2X9fTFJc;mwiZL`IWka-pfhRazj+)> zZJKTo2zNLf^wAnq#eo@6(*-46BUdwa!k!Nou@ykn0C+Em1|9hr_DhQ~Y6k9vKW`#JW1^L)!4esJ# zN0M=DD%+D1d6LQ)5+R8jF>%Qb?2^otQeX*inwB_X;&$ayOt?776N*L2p5Sy0aMZEW zY>Ty5q-E4}BM!ysWm#Bx}FClsaj4ujeD zPH6tCu@m+FR~_@E0uG(k;Be~N43gE`M24|I^eq>Fgg(U!~1^ljX&Vu zU);C66ALawv|s;oaMK_Id$DjxDG8kL!{!}ET?KJM7)+Yf+JahQt2{?01OS`?U4aqQ zavv@N1mTe$k>ebqW@|iB*|k(}lvL$fs+jBzs;BCJ?{a)TLCtS9HUM#AzDsq@cUQwz z_|MOTPGAYC?)e_zRcO`bd&woL9<(jQ9Xpd2^USY;ZCP;n3&E%Iz~+7P{nh?zyb7;( z^rRmi<(9sPV^brH;ocs7(cI4uuspmOfGb3Y0&_oMSNl!b^Q)@^&_S;PEeNY1mmc~L ze|Cw1ZkjQe+5SdcU;P<-0;EKgSua7*@?vZWqsclBP&?RxHm)Xhfrb$6HRBE+^|zjT zx!PZ=C&=1rNTVLtS8vTeg9PG@)U{TzkMnD)Vfd`?Y`m+LWaePNxZGUxbb&#!8t0v&gh5K5M*?)dI26PP{{h04B9=*P77@5P+-><%6quh0k za@SP{n>nlN@X?tyG(pV(qR?}+Y?i+EpFrL9J_glVGvb&Z__EdKWX4`xh1v<9jS-t) zUtRx#?Y?~O<>GL4eKmozkZk>bwUmWs>}tDEqVp5Xl2>RL0O)hOB%!~rI2 z>5a#l@va9kKgRC3^|C(boBs^XaN_mHMF>j8GWMr!l-bw_?>`%BZgX{Wb(31P4kF?F zO_;5<)tefvXtZrfKJ8PzX?n0e3m{jRPxP_|t<`SUff?Ssh%|6N!3vWWAr307D6e+v$E--B6c3+6m|*YyDmx^&OHm*#EUHlm2Ep6^BN(X>>T*_ zV(Yhe0md8$N9D*_4ts`2Dv5eA50xHua)Hh8jvAQ-^LD%Zee=G_&uxD8Z(skpU%U^8 z%h{$ue+CQxMI(}N4ik<9#V~;p$|^u) zngSVIHsGKABQ`N&zDfz!IVYluZ|BGLr}!oau|zmn-@y5En{OtDGz@2Q~2+ezi~C_d25uU;gA+pKMHi{@!Q@&n7Y#_9_uf@X{V zR`_LXw|a3%gdN-&91KRVsqij)i?uZvw}zwK=FKgLTZ3Dy9Y{&%=q)yVHumF9ANYt~ zI7Q-^h-vT;uSCbO;hdYl@zD4qQ%!N?5Pt4g5QrfKVg0j%#w8eB7K|x+2Uu)U5-_-+ zmtlyQ30S+R1qwm}8V1(@2P*JXcuzh`{KCvVBx|I`hs@wu6-vP+3N=KL&nkPh-NLbH zoF#zgOLVKFpEVq@=uM5IfZ(Fki4Ex=2VcR1CpxbN;i%PxJMb2&2e_=)I8O<0wrW(r zfXTqxO2GXCu9kpq0nIEkRx z`%%pqc(OYEFbWAI0|;0IKpffz}a8;n$!F6%e9`=aqGDljLH z+?TT>8h%GmFTKQynqv2_F`-q~|DDk*ptFa^S(HL zHQWX%PK%o%1qV6UQBuUyY!5a1K=6r>&Oy!*J0b01$Nypx!kB99_``Q0qo}I=xPU61;0iu+1)kjhvnE z%VWA&JssF6q@n+{96cu!1GInKWg=_MM)903MKt;KH<^HAH;dStJ*c)Lnnw! z*k*nO&tC81Dz6W*HT49#C!IuGN%J=|pak8NWxxQG*|sy%4qmaEvdOl6_8@s{SC^GgLB(Z;*2@-DWr zH8_dO*7@4J(kC-6hhg4Ih%l$m$g;;_mT?1`!x{H{rDtlcoT@$3%XlbSSp|o7jj!q9 z!=AMd`c}H@QHa4tf=W;Qxp44QYGnA%%D{?<{UgKs?yMxhEbIn#0CI6%^Hdt2Zg~Q0 zB2U+8ez3*@E`PLv{*s#4~cOs}n5sR-J@4Vh` z9B;*L!%$~Oe?fnsT=g8_I*S$OK>KcEfihh zrYbvbsh?zdj9w6pEep;OU?`kr05}fae{Ki6}&hIC24z$KgWJ7HOD9 z`xZxk2F-i*`CE^-n7_XIwmTL0NT{|Qkk&`L61o?oII5-huQrRHulHX27{phvtQA<= zE8@Wvs0b5N`vk_TRrQ5>RTVoPKdL{>DZQbWK{0iZe-3UMZOhS)KBVcfW}RT{f}J-5od*0!%Qdds_;yyDMTq zHNyi?p@Fuocmt6fXJanldqAM3E9%F)PFQQ`SSb{D<$JK**k=dzM}mAu<84%wIVf*F z3cC+>Er88_54k2e(4mW}Fe;He-r+3E1@u+!QBo}y)Sa;`r+z{t4!*`g1mJ<{BSeQc z+^hJrL8_8))CAaij7AWglQq4n%CJuk)n={)oZLEb{6T-Ull&6)Kx~8`GnFbh@L&<@ ztm(D1FhOyF@dUpG9ss?ILg9lp@Cm}iW-+I9)g)&T?nT>*xerVlE$Bbqw+uPl_$P^X z$B@1JSB$tqk|Q<@?ABNGkz4)=;)m85{VeUV5OwiQ40vN1Bgf6mb_=tKRqH!U0e?yN z+-x=rDM8#42AGEGZzEmP-ABI;&5^TPAxp?-Okp75H;GrJAspg_#L3yxtXuQ?5-+^l z_v(Etg3Q18_4zjBXW{=B5D&57X}+PH0>QE{$za6TQId$$R}dsSC_%&D$KAu%t`KPG znz8c_u9I(XOHp$r7T*6hleZ2+32{^dL;jJFSh7I0qnK z#^4r^u($CIz5&A%k@ZK=)J*I8j3Rr(2+%ai(Or_tUhP#a7lNMb+S!RK3;|e4Q2kEG zDZ+H7#CnCT#e7O(!3Gp>$La-_OD>*!ad<=uPItu-?9)+LGI8#p`v7c~`Ew5d7hp}H zuAqJ^%8UUrAzN@OlfV#cDI$LnhH^a^1hYv2_AR#9Ao{=O-4_@f<|vD9WSEZ>wPZ^H zeCy?&SP-8+$d=7U14-V`JQq2wGS1Dx?uJh$&Z3497zP+`il@*C=SRy*0%ChN#47G* zj&xaVa2cU$NLC=hwzh(G!lyS8d_(5cNBI@yKblr8)%H>3JRZhg{!Z?d5i)n zca7nECooudLp#NO&?yw31z6c&Y=0#gi(+n#7n!9$dZR`)aPa|@RoP;iMcm6zV=O`T zqxerjUZf>75yu9G=dkIW|DJ&Ayz!w1IKOgTLv0pmv>!@3wqK@)0}RbmiCvIw^4ClU z1kP7BKbk8DLGEPxseuZBR&B%pW3S&!H|hsrSO%AZFW?BnOUM-axNK7s*#h4q>7F%b z38bKz<0pIg2@W~jFQvC2t2tNnW2j<9lk@?!zrSTT`3$WfArhA%xlm;|&Qdab430K* zh}=p|Xu{<$%%zqOz^k1Gj7EG}P1Y3LuadsRhXfc_ba1Rikqgq@OQ~FWtNfLnOIx=7%7GLce z+gOGcD(&i@M;mai`OIVrdPX@~)|E4H4fS}GW5Ov;_Q=U?!}YyUt|kZTcbV9l1S<+z zVMO_31_-=;%YokLYt8g3q%#Lf9VWT^ECkd@;+Q;rfAXH|1Gai*W|9-#Pl| ziPYG!(?<>-KBFCGIKs3;2>j?{M@~O*^30KB=;+DOF~1i_jirRKq|ynhetcFBpKiw| zh@$lf>_?L0H2I;ki>N`wJ--VXM`sIT@^Mbrd>}2ysZyph4*GWa_N|I9%4&I1t!!;~L zCcanVT_kQd;(uY-9a^A8g)ACCVa|mE5PBsDPJbLp{KP{i2nP6^-tm*Cg>DGa^;hxp zTUbYMkm9H7sCIo5(kH{CNO`6OVKxb8?0`AAj~nPfiO}v)h?2*v|4RVJH~xXVw)~?W z96v}V(3%g*zYxCOKrJVA@J=uuiHBqO*WDTK?(T_5Y%gS*cvmFY*U`78uPfFWi^swP zJ@LMHn5h$yL?o7o#SH#3pc1^VWDG))Vi<)5h+2{6K7DJklMBMdBl|Xe^96 y*2nt?qRbbI<1c|rEY^X$-dLAVMnfO1z`w*N;=R4wW4FXNM1tL+SS%J=6a1fqfWnXf literal 0 HcmV?d00001 diff --git a/src/目标识别代码/utils/__pycache__/loss.cpython-36.pyc b/src/目标识别代码/utils/__pycache__/loss.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d8a7ca6c701aba02124cc593a07fb32f64ab92dd GIT binary patch literal 6467 zcmcgw-H#hr6`wm_9)GUa>#Wzin{8SG)TLcjMYI$`H#Gf(Hfl+slG$qH^^Cptjz4l| zoNTst5W)#mf{1{4fj;qoph8N;3$&_${s|Rt+^15Al2s+>3sQv;<#+CQy_d0g`xFMUz-(!SA6kBxc;oQ#(RCp+O}!O3}faPkvQ z4xEBF1J2A?ujf7ej8>c7DQ2D;gyFH)sNak_gMOIV#lE!DH71>njls6t8H`ZQdcHes z@J0~$!S$c~n0~LXH5qlqkKP8JQ+UEM2%SY}Wozwzp~uf6ok z^$UYwaPBkr+`oF?`Y7rI;d+2`tPU^8OxPO?qD}YVQ;+5`Rj7k#I(I;uRNVWKCg>MJ zMVhZc16p18?C)qT?A|jjrlASbvo30N^E;ZU)h(PMC68d*hl3G+z7uUeHE4IDkmNU! z$t!D3wHhX*H_3#a@D7kj8*5!QhOi=NnIv?X3C%SHQOnbmgrwOY^nJ)A&3$jAG*Yvr z6^@2JmyYXp`klyirQYw$S#0FyO-P4C;I^85X*Gi3W}~J{GxUQNFJXO})ODLd16|xi z+$pem0fclG+8`RsumamzIrNaLd+RspQ1qiAq_|Uf!Y_dct*hZc`dE*Rh;?iclWlqV(Z8e&hW#8sF zNkXPt1Ae~2y(%hU4n$))$ivZ>nZtID9xju~m>T0541ltrrGBI5LrWRg?G3z9K;@k4 zo*gxUq(#Y`--iA&KT%234}v|lkVG*bDMf9Jr%@smhGepiUa%CWiOTeMxG|@cF~f|N zF zs=NBcuJzMAi*Zm5UC8A0i7wlXUaz6F$4THdQz=g=ntzh|CYGDXLP5mTWtBK>7Wk49&EU3AF>ro*K#WB0affm#zZ8l)eN^E*g zi&GGVEmCDo*|XNQJY!4e=U~}3SvF>|JQJLkz)2~NaR#mc0AQWwN3gFp9{k>0<7A4` z2uHn|lL%!V3|=8Zl1XGll2bkTohYFvT>!pFWQoW!h)mOcaEE-bA&sFM@fBjuD*J3i zoqqf9JE4s99&`<9VcZ8R!xq>wFe0bpW~ke-GrKSIz1yI3QV%C&h1QQz`F0`%g!|Ud zKSouhRw6gq{Bba-dC%q@PMrUN&1asSZg*nq|6ilux9k7d%oA|tb> z2EE}Z@(H~kn|fq&6{K@zD#zhKBlu$qJI3(F@UHN`u^E|NE3&&O^2AlG&4htq1R>95 z7F!3}%v;**$Ps2Y?O`Wl-Ai3&>jeB6JXy@-R0;V~UV6*q--~jvkJMPl8%Q|Tb>L@g zM|p||x&?0zeH|}L0jgJM8y(h73#U7y>N5y)XWQ18<}M1I{~?-1TM6UlJkY-GrO{F* z`}6Vwmf5n_$>kiuGmoc&hg{JDMpf~y;=PFXBCUY;4Bkt4FDZwmE8H!gmUc4;-Qjse zNw(*o>oh3{%M1shd*1J~HzR2eHo9n+GeKj+58Nci1a3~STA>$(L(*vlZsOJ`d{KD5 zOIn_EES{Vc5CtcJ^dI-a;kVY>udQ5q_u1F3uC-q*Uw!;fJ3m_6En?{I2l&;N5ghv4 z5q5<@yyr(g5BYtxqOo~l$Y*FV>dXnLr5*YG5FrlvJZZH3Nb1cYp1y)nP8*UI(BBUb zOa?Nw(Fhv-CWT6!5QhyT-tl~Cb^4ya4F?ocCoGE#5ILh5ZUvyD+S*c+<%8&m> zmqP4Z5_8kKJ*SB@?V_y3b67tk(Av^zd^{gz#k@#SE2WQ@jc#tN#TDG$jHoPV-TYX4 z08zTAbPGsTW;_dbt|*F#T3B4czG;^nttE_Fz+M+{qP(I7MQ0SP5T!f0Cj~Wj&SP7U z6#t=@!Ax@t8c|VEkyS-h1f;Qd_7l|Rpub5{!5PZp2sreR#*Da%(!8if6)_8sWYc+C z`Uy=K;4g?HqB_=b4ysi}mas+?ar#9d#v*u&iFD9|G`qx~+2eUR$TKe%)#;bV$V@7< z6ZAi!oqgtcDlc8p`PWoTO^LKat&ad%Um|odQ9J4ts}%K_g62Nz&8ix58&xZ*ngNL> zdS3#1SrzOX&r+G}GD93;vntz2fSiz6oN#8BBJ>He zyuJ1uz{@=KvKgW->ZU^Dvay^?KXhbr(?&X;c7KQbo>O`7mi550oA%EWDL=gyn& zD>4wIKC34J%0`G|L=9w_7V_oZ z5NRZYEpq_W$R$63k0O9M-X;d8yjP}MBb*}WZ29~o&AvuVi#Pghe;@7=p%D~HvlIC} zegehXvJ#tQLz$XvzitLNE+`!z$t%upMufd)M4S{*84fDsr)ehEk@~PB*@k4m-!=hr zr)~n4yOQ-0>u+DE8^iOPlDTzrsHPn%snQu3`b2dTeK83Fu1OTG)JPhnWHwRH1=LiA zud$-~myLP6R}f0s`dvm5SwWG311_QR-N>BFs1XI%F0v9bv^#a+H}IVP8U0q=P8OO* zGPh$zie|{r!Z)EK6}jq!-BskHG2)muES3H(6PSzmg0NYAT%kcmHjAu?W`Fooir7?Dn#MUM-kzd3k`e*tqf;B4Y2{u<{|%L~j7j{@3dI0<O_03wZY z`Vt|VF4CBfPbt8=XqOPjP;Cj`1alR{EY3EIZ!0L{6iQ}TdB)3D zwJq`(MFBLzjK#@l+sh>L#dB(1r%iJMr{l7i^UQgiXbW}Zt>#IsTZF$W!4H+EEhtS} z=Hm+XK>H{OL~QUtN8ooVE%*~!8FDWYm)1w0IWdR*X;>|H@Fc)Q>aozz2}qmzdJJBt z*e&Di&>A#eg*RM`m&8){2x-+LZ%|d`BBZ*6Jub&b#hh4xZ$poGMXacu-xePeDR_`& zu_TUlE6DL@JR3d{nLeV<+a?^uvk9kVjv3R0-JQp7)0l4snf@ZI1G`Wy4(2L|r7AoD z`dO$QrPA3qpGS@2UjYveBrly#pqg&h1kP1>xXPacr;P`{x7YXscz8WM%aToYg#GoK5fl3o!6(>k(BYFfXNvjnM8j&>5b^P!bqBBI>a z)Q9mG!21=RkXF?662Te*&=uWerk;FuvYBI)bOO2+gs8TDj2+hQGguj2_R`lppM zi_T{`abb2UfGEh6>w1GGJ~^s%!BHPoSNnaHN65VTqP~hGgLj%?;vi=ep6ckTvdSWo x48wj}edtu*C6f$;GF`RgVLO!0iR8=ZL$sT)1Y$x&89m2xM#d`5mR3rO{|2;enbrUR literal 0 HcmV?d00001 diff --git a/src/目标识别代码/utils/__pycache__/loss.cpython-38.pyc b/src/目标识别代码/utils/__pycache__/loss.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d22df59c868c1c19049c9cddf6d1927ec7e4ce02 GIT binary patch literal 6359 zcmb_gTaP43b&iOlW*v7&^DfiqkiY~RrgJ#?r4x-!K)>&9H92*i{itl=xrQgIg3yi>tFk4QzFV}Ob^Sh?@cZS~ ztnlw!ciCO8t#A~{3>pmipAVz-^oO?J{NXo${{3&?`0_JJ?4?U%=EK?TzJM zbmHCThV4!qksfEdxx3XgE1G$+Fy5padZM?0#O#nw_>^%Li>^hQy2BOoZAH`xn9-EB z2g5-K)9B8M}8|^kWgtnuw z*OC{oJ`|qy{bsL$E`F+ZvRko;jr$3GfI&ow;JCqNXN zFl~USwH?cGsBO}X z7QwzQ7~S}-Mia9m47BxfV6F8nGlvQpOXJ;d)}u zF`OnUXMX3Kb;g)A4BD8r6S!y0+SA!*hRsILm~uMXua7V4zil;-r=%7tAKP{GZd4xF zw6S8LaQl56UuxG~$5=M(yDL%)zbmfGIgGpO$`W3w4$>2CH~RgCIYT*5J#2G?=D6j% zs4v;DG#W9wIkDZhfj>K`9CythCsjV#)A?5GVH9o0Snca5oNKi6fIcU+?FvgWP=>2ARc7p(b)CT46Y^zPxJwp}S)9NI=QcPQ!?9j~ zlK>bv*CiRDbcn2jXe%1`H@s9OtHfO+V)SA(M0ztl<)5I0o^%Q1BSbC{c@#uv=}!0~ zIo#3K$dBb)iMe2Gv=Mg(?ep)2G1SM=HKI9jC!8E#;%mT)g23%CH{@X9fzFR_gphxX z?(&^P=%7wrw+F`k4m6M$$HwhR;{1V(JNNvFR1akN!&TkZLmp~a8|h|HVKP`1qLS;$hz){m3n7v z(w^BQnPX~X+4Ia%TaBG4y|oY7Wad1ZluyufWC8L$L{3726#FiN_osMFgaOlo!L9M? zX~Qd@(&O)CdGE!vAEBv9$$(W)66gO}o(1MrJQ3ODNwmNbBV zc(}qjoxe8hkH&FG;QGuNx0%IM1VY9#F2l>kaGDe!9P%z4r{Y~Jv17aI#BMi3?sA#6 zxv~&7AU?Un6X#T${ZN}5d&-_<14uI!LFNwMCS1@n8WaN9+EBLnNm9G^ zT0KAYITT?W{T9C?I_Fup(p3Yz?KJNF$(Dhz_IK7_{l)X&d1I^n*VQ+k{f~pc-#Q{U zwy}n=E!>YG2Eyfj7>6>FAEHI9-CH9`ep^>C8@}*BCS@fayfoT@=es5NfA02*Q21gs7ukL?uv)d1Lchw585B#_{lY zs2DUbS5s#&(e6-28`<<4ew>7Qfryco@5)b6<#`f;w>9rSd=)8H^zoR0mI3`;YmKj2 zE0zZzuqfvEWxm4Sf^zylsGc(a^AV0*7~OZ;cJvJd7cuK1F}jVAflM6{LOy-h#nnmT|ds$$;DwY|_ zu36K*V_6_~f2RFZ0PL6s-j zDGPwDK?V_RSyfKzZiT5VNl|4mstQL2iqqL}&T)CILX71#&WmQ_B;L&_3%zXFI@!N`?VJLrK^?XJq_j(I@=`Yfsy zv-`Cv@`V~ng8o<74eGV}x{#kTQ7$FB9<@CMAiYgsVxx93C@oV2W-FTcVz6Lp$Tv)_ zY-$$tnV$M8z{;sXYCNk|SR~F09A&jE=p83u(%Q<*7k&_ydqn;N*_IQ;w~(pPZIT97 zC|Z`39lZ#Yvafx7_>zH@4Y4Wf;I`Ml{^rLvfBRosas{ z5-A8x+lNP3WnUFH>=#m(xgjLEN%?0WkVS01Tl4+9JiY41r&7$Ub5{sRn(#|c-bLNG zJ{MvA!Q0PXR-^>TMmDeXC^?9pMB#Xtw(yZaTLd*0aA#vZ1vcj(l*1YD6?ev+Uf{mOLPiHI>jF?T_6hCry#m-K+r*H6PQ$aUF{JNx_ConFn*CW~I$`$$jCv?HTU+5< z3oMgrGhQcmHjrE&zv087L(2_l*#k6Ci~hQiwdJ&5wa z1_d~ovey=E#2OFQP zHTTF#loTKcGnQtf-Jp=pmsHHU**47$XiloC64;AyS9_=owctW%W$1j4xU@d{R8$4>F|1ZNy%PW-^;jAd6f{kJ0}e-1 znp6=9;>2*;%W!}z$*NkNT%Z#TJj}XG#V7D+4e6jYm@_ z5YxnCA|xn6!dTv<+CC8imAXy4({{Vj8EDby$iJk1*09x*2h@jzO855~HE=0Ek9XZ_ z6RNfM0egFz_vBwuV+Nxl0)Mg7MB~j}nb2ru1`?;!m48bd%4;7EG+W?(3r|EVGEpY{ zLU6e*Y;KG6BM<0rglGTC%ZgvwdBVGOgrTmu#4iiaqW&x9kDwGtXM?j&pYp6Hz@^U? z2g|4Z-5Gy{FX7K@WaML5TK*A{w-Xr>p$J%hfe49sN^E0EP#cH)`OnVW-w$rW_beAnJYA|X4?DD{U6DbIBDP1$4$X%^vr u(tKhx-vrYvg3?&CF5#=Yg?Lj34Ua)dbd7->sUim)-|C|^A literal 0 HcmV?d00001 diff --git a/src/目标识别代码/utils/__pycache__/metrics.cpython-36.pyc b/src/目标识别代码/utils/__pycache__/metrics.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..180077af501a90abb5ed1aadbfc701fde34eab69 GIT binary patch literal 11611 zcmb_iTZ|l8d9GXEYI=HlW_o6NF80>iUgKWIvz}phA&yzI>%|KM55^dFY3wvLeX4KM zQ{6sQ?dj=M^U_{elvo%-62ihm6i8S_VkM*`B3zOvk)k9-LL>rGAtYQ@-jVXKijrH*jC>p9Pi#6iKgeXlGQ{rtFO=hszEu}cdPO=0`-cpNcmSSc_VPh48 zrP(+z<1E7_P$%<@!m@1gmck|nT5*ECf#ui?%Cl^i&7pjfonm=556l!Rum!e=)HHjT zEwLj=n+#Xa(rGdd*&`ytgIK*(70^-wX#ZC{4_iUo(p)qhXJrg>@E=4%Z-05L28+t-mZ0v(&7&nr14P((UT>59(u zfz~%rBNh-uEA*%>V#8SMwvJ6otnbnkI8Be!c7!o;I&>0SIIP|O!LJYxW4zUIS*SN1 zH`LsAXtc1haID;N%eL>hc)}zyH(Sk{P9YYmJE2-Z#Y(8Qi`q_G?5PAP8YJ#*wOW2{ z25FDBThW%(jCxGv3rJrK)s^bkKmPsS`O0@bcdjgofjxqU(mEdRHB8!98rYpN+Jhuk zouuOzP$Q^=cKS+R#cnALEzpTkO{&X5o=V>$zU+=l!wa zC_!E@TGU$R2HG_}7!R_PJE!#L`wM|_N9DC3PwNw@bc6O4^9~k*`2kj_VxMl*o>gvq z=2?X$gA6El0#7EGm{X#aE`Cg@s3f{>C4+cle4so{XC3!5emsZ|R7ztUjko;?^qUUG z{cHf%XJd;B%44eHPXfOfVbg?V5PY|XC7GyXLX>3pl}tn>lcHoQDwzrv8#pHX-0rH? z;`BOGkbO`a$d3wI$-zi)lA=Hl72evi&YWHyb+E4u7S2lLriXTyk*t?}hgmH#U8~%* zJ#XK*`iv$^vOR2(ID2N*U8}uwx?+32!%y?F>&k7s3b{Sb zMQ^1tB)0Dfp)SOG{Z)ugp<x2Q+8a)@H+Ffi zC`^GEBt!JZ_H_`(uLzzP*#m_%bhJ)4`ICS0@a3|wB4!`n$ba(>~o(9u4ga;UYtcy>a~Zu5B>M&+Sau7_&5sJo>mfp$@^ zw2}4OI3Q>H_^WYLixTDt`}p8W~k&F3>3n zqw24bfC_>cRVB+&tEv@Jv}7K@GyqPN2%HWa;q`_=sQ~qax`ENqHv$+8P}&?NF)|!j ze4zJZ$i?v_f*6#(KG30vw0_)u2XIN4YB830p6pYNEOJV1k{W?|(NCa83T+ZBHm6XH zATbYB03$BKkm(0S7Y9bGkja+nz5WZ)0g>#vS9Z&zg}{OYhai$!sW_}t#F~YXbQ_)i zSy6SAy|?zgLhi|Xb=RTx%Lm35gYKF1I8OXtJI_z!wI}6v9c0%z+1v$FFgMR39U7Hp zy&WdNMfEN4t7(_Pxt%64a)HWYKDXOF(ODSVt~p#Xcp=H(fnxqn0`Dg9D^xoUGr!hi zoJ@B9G=cL3h~W059)ASc599G@28x+eb9!DkK-d{IuN_lQYNAG&3Wq-u#BakRc*0TO z`YCYxU?(yIj+9R12Dc2>8XP;N7!%f!QjD`PmWCUqlYN7F2{ys9C{MCU5X^KqaRKt7 zL-xTX8~#@Jt{m>$=kH2?=m+P^B>m|}Z+ZmJtRMmU6ubem3>-2i4B|j3K`Q`)CIN-} zQHn7IgK6tR*%ab(@5yJwZi!L(lgRM1gh|y~oswVUj#n^3!*iOIy+ZyWs&;TI!(^#c zck6zs^br(FmjTQl$G?|9w78XF)iP}+{YY$%oU0cgYiT8$N&ZxzH58a!dUqMl7B~{d zt_7WE^kXppu`)PJF#2QQOQyXUF2y_%NY}#@7xy74qMCRQ{nrQsKVM=XMG}4-awUNglA8u(K{7~^@8S^k5%jKcf$^TL=4aO^htuEXgbiBo9@ z!nlJ}@*-2a1u=OVRtbV7r``prWplFrnA0s-t%|kL>JF`WlGjhcskS`F_eN!w%^l0> zw$U3bW4~o>IAlDY>aH$ZJ9xTlc&`<#N{fR__l)t8S~xqJ9Sy*SE&$B=C&`%q(E76V z`1JxhI?&;=u;u~D6g<#_u+~PcFD+Y-QEO4}a3`a7k$rq{D(eVAEL)v6B1tgv$pR7* z&XU5$!dA67tGm^vw6baTQZ{jt3qoV;l64>~VKSW4k>M zHxLj~GX;$V;mXX^}})le<*cZtTVd{_{Vw|T1!->kGj&2FKVM-p8z$zYpO zmsCSrfFYJsAJ(!k)y^SxO#Dx1rj|i&3iWcTr5)AcNN3dEk;C%)zz8E31$?5=BNP+O z9E?Q@M}{?7q?ge+bnYVsxE77mP$4X#@ETAH48~xf z<`IX3SchVW3_C<#C{>|Qrx6j_9{K2E9$}Kz?P|-lo8mMJx|}iMzzSa{a7gRL{C26* z{$rGQv|>dy)vVT=IlKZ<>uRWsl=v9x=JANV3b1Q;)Sagw(v_wH16C+>zD#eq8brD@ zW(^QQnp0eea73A6zJVwh{t&oOd!&NcgHk-h7=s ztdZL)g{Xt10kyfQv|eE2n~G~_IKtMagJh6^U{3jCq=-K80YqxX8n9d_Duli=7#9}I zrU*^fRaigM7&#ZD_;(P;o2kMG!5Fj1{}}nXVV>o0ry3LN81(}2s=QaZqCAI~Pb14z zsLM$OCh+`}JzgFE#C*R>$8ZI*d%;*Um@Y>+bo2S~M#y*DrbS zo1NVdG+jU!hF(qE5&26t}l(*p6>Xj`x%-)R-R7}yhxm9%BKT`{b4NcdkEk77ow>gVrJ^ z*Q(g{W``r7H5A#~Xv@3Qfi5H5#Msh4oi8Rl$KGghSBh{UT@x@!$?umz0o{e`t+$(4 zBu)h#OF2J@K5%8=z^UH0xeE>Bg&D~8a;-GVr@i`C8)24u6Pmo3s5!Qe9V?EbRjGn- zLK)Wv9QzIB9h!vfianEQ8PI?RsryYQOpxb}@nwu4R&GKegsHc8%TAj@abc{@DG)Fw zbwUbGu#GlKNsxd*tG1{BzhiGTg~2P=_f^xn_{e?x*@DQo?kGiJW3O zV%?^I;{s*~Rt(gcpt}s*UL=AE1SSCC3MPXr(tvOXsm^pi*PrRnBHlg;JRs_%gDIo| zqdIf_Jlzer@7|l2XgPznvwJfHZVvVGxJAeX)97I~n90-K3}zhUgE?SuhafPxd-0*e zP%l5wkkW(cU~Zrxl5Q}#N}_BwScuAIaPu%K%PEM&rvejq4&!wDfcY+>=Q!@ymJoS| z2IwCNVs}*U19X=GsKafzDaII%rVU5y9~JoFNE&Wk#7O-IL=T@5w-m+TDAT8v-~oad zjYhJ9xgd`;tumK~1%dMUU;!AcIGB}~Ih-M8^C0@t_&QU4T>4VYMcfcJ5_pod0_txO z%p>wx*Dz0g3^c6v7)nymwhPd)$M8Rb5{+t!`=R6eH0F`u!N3AF91D)qzU0AbL4*_O zqrn5%HJbkk;7d73fMvZZ;96*OceZOq){6S}YN@E#2yCxWYONH;Hdae$CSGgeg$^i(cCn2$S%#O) ziLy0OCS0{JCj2wr6vLOd5je$4%4mdIknqAQx#xL2j`iH6ph}L~bxXL|HsA zqcS5%?v+8tjzOakp_xIF*og?PpQgKML3Ud2udL{l#CGow^H>z-p}kzAfQ=p9;v$cz z3Sn0e+QB}HXij%^XSKVwBR86=h)_L_%5AR*E02Od!j3)lsH7X};b&KJyHPb6|De0n z`}O6@zN9dZ@xR9=N;^3LQBFa1eg!DpMIq2$7(aB1{Hs)*j7t7z1int-8wCEGz&8o} z1whdhyCURCF-7zKKjo!;j1HXymXb{4o`@u5PUqi2esuICc}CKQ!Wxi1_|tbo6y3xn zJRD<++=@sya)^%#C%;d!X5{co93+XpPm& zL)iFj+~Kl>k9(_u8ad-I1Q0b57SuhI9p^hY*x7EN+ZMYfduNr8WRWy;A^!ApCz|l z_z6o-Rl$)F>na`J0sOjEG7a8Mp2+#Ld+1VA(4s@At; zYNv@*yy;YNz0E&GwSJSprvVBn{uHH&OyvdHIZBZ`%0CMLiQbiWX0wDkB+lOi+y&x_ zqev*)ES%3JSU6|2DQ!i#pJeM~kjki>>h{vZTXW)-qDkIoojGpVDaPfeDrGu$`VsGp z_y@M|9{RU!CS&lrQ0I7Qb7Wr;3;(D0f~W+&%iH9 z!5!zo9l5Afg{>k!XQC2mnc(gipDW>xXRinF4}&{K+`%SZG{80Jmkf8@<4l6@3^pY= zCV`L}I2v@Hz|9XndlIQMJ~@&clK_`QX>d#i(g9y~Mw~Gb#4y8I8MFfL5ogXJd`96e z1j!ci{etiX3oKB9U;$yQJWkmW zGw^?>?2icin7|?WH}}#1i1fp_jPvm0)Pnec|Ac@DNZgMRC;;=peT)D$m{N~wIVg%V z`xqfhj6ik4{uBi$#I9bvc#(fWV(#;r3lpUhYn4kS{t_zj-v9^`ZFpeK`i6`&O~M}| z5dj1~W~EdZmv6ai6>2v{K&U?ADe?8D_yj{-Y>QZh@JEDkCk!&7nuUH7j4LL+kNZa! tU6?#4gDFqYNqHv#42`ZB(wYfK!4NY!bs@QwDVv34%$zZ2&9SN3{{^Xw2cZA} literal 0 HcmV?d00001 diff --git a/src/目标识别代码/utils/__pycache__/metrics.cpython-38.pyc b/src/目标识别代码/utils/__pycache__/metrics.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6cac34b3ef41920df7bd1c518e7bed6a21e40c49 GIT binary patch literal 11555 zcmb_idu$x}U7z2)c6Yp9KeDzrv6IZ@a@XwD@x|_4DIv|{a?SNB;-+_Ld)+48j`uga zUVA;e`OP@Cx3dK)caRE&9#B9uJ+u}@5Fy<`5s#{ce_Egvkf1zTjS8rbYtB$aDMFP_ zNageW&FuP-@Q~=P<~zUl^ZWkJy9Wm?1;1B6^PdYheppeyL7AOD1DUgU{4WAx>n%luFX+QbxA3N*33-@w`$R;BVk5o_?N{viukys2;a{Spk$Q4kx%j|r1tS?evltR zD(~w2@D0WXsw%hn11K3o$q_2y5Avf`oj=4MM(H?LpH+&-+Xt+RP3|`A>rSK2ouJ=ic<^@?w8VXw-3kwy9?Cs?d7$|gS*ZynEhJpLm9*dI=WYc;*YLYxp+UQhw`7u2e{ zUkO=W=_=gVWSG;Ozl#p?clYDJM88j;sr|*D)9>47wrJEXnhNu3{_09mjo5m`*6t{R zuIN8b1HA3zmDm2@m2ZCLcVGRzU%mV5Kee^_>ASaIzVhnrKYaDZM_+s8Ctkhz8&@`( zjpp^okH2|l?#gOVZ}?Y|P0h46qExjWc&_gcVJM&GQ`9UIS-g|=+&NRB4fcL?`6)bd zvsEMlCBPLU+GvFL+2V%&ZbtQ9s`{$ z%8bwosSS>^ild85xUn}a=NRndIzxeZ6Ib=5@*=x>?26hM?u>+LII^r_WtngoGmVDn zZfaBM>`6M?Mj)kLPYC@S*I^)<&CL9<@ zc8d2nH@OLV(1;eWlhiFe914d|Dd;s8=4G#mdwNam>ZQwGIqDVk_f5OGTIv*nf!-Wp zLDE~&H_qKquV~?5I83=yN@suPK&ZdS#6no0-N{t8ODBp|hX=y_o7f-2DP3E@zSCh2 zh&zNQ7YNTgDJrX$|sZ3ER_-$yw$78B{f5`W_U-mc_Kr*a+5oA5<`F=ok`qECa-kI(4UAk7wWu8N9#(q0ULmVffmM{D(9adly)(p-5|I911Ov>aYYrHI4@TkKKflkvR(;XZa3v}?MlP( z{T=h}=oUDl<_1Yexy=8)Ppi=kXd1k>&+~Y4$nsciC$Lw}T{?SW=JEbP__(4nuZvz! z%G=1}Ae&-CHha*1#tl}5w|#n-?{~Gfe9FFwZ0;Mjvg zj#jRKVF$kC>5{wGUjxIGlD6Si1Cayw+$C;_osyv1M{IQppa%53ngkQdVW!V&Z zsQ!$4O@B6)lafalwMd;g5wYcnUBAOhxmB-zZPmrm%ERBm@~u;ix*yOEPQQz$Acd#Egw-Moh_Xnn&f-ag|3es2Na3H|dEK-{{{nzAlx0)eD6_OY zdyI{;wEPb<6=gXs%N~)>C_BN1*#WJ<##xq`tiTS?U!^`-mS^HJM%txWk?PRbtUiwn zgb;M88mW+Kja5m|LN!1ca-pn|Wg5yzWzf1hr9!l$e+LRfM-QPaKxhlp7^F0Cb5rXW z$eDOjp#h<29d0hYgPs*nK z?7gk;Wp(H7m0)}NPw$#rPAZ63B?|s`&WJhWw)xzai|o80#k*)p(iY=LM|!nUZ$&B4 zP<;i|YB?3q?P`O_c!uhYKsc?o94s={7F`jOcQGw4qFT%oAjTywQEL;r{$i60!Y=VN zfhP%&`LNCN#AC>Q5RXqBMzQiNuN5>Mh@E2v^$2@dl`Se%x${TcJB!CB;3}{H6iMNOrGaCK7s~!5mp_ck z*lGazoA|d2dp0=JYg(bB?EQw3=yQS){FZjJoEE1;wW~n&(qM6EQr)#7_4JMb)gOCX z0v%UX19ZvNAA&h?O1W0xFfy@w^yjRnbfcxdOwUCrF2B;u}jDf>5qMiFymq0Ls-(aF4j2a%8w*>P`$%cQoy3}c1d3A8s;t~a+Co8PGyU@l>gs1JsghWo9g3C`0z za|@&QTJ&rGZ)3>Q+C7G61w-8tZeh=uG2So7DR->tSbG@g@6PJDh_O$K0f#0vQ9pT#UdJCgLD-N3Wf=i0yvGv($ zdjrq<9Nu$9yV?|>&TU1!FAPrhS4R`@p85|}{v0Xt@1LKx-+Z-*fp!fzEj4*SQUQ03 zAT_jp?~Bv+P=K1+ROxMyz4e z>SLZHh_A(d4w)h?Sc(eLoU@7M93Qb7HzNj892ak;h9?Mokia$@OW_*IU}caWO;K4C z@26_XzK$amX>gA~lMI~7GSV$HgYwwg5O1VrBo)Lo0m&mXkU&s1o4zYfQ6cfFNOQc6 z;;>ktw6%Q&kr^Lm$*PeZ6D!*!f+Dq6f%~p^SG_u@im>)bT9F70DvQwn7+g6kC-BTh ztSsIsJM+q3wmaAo%?hls@&a{x9rSodISqNCMJ--1`548s1ontIBWRVYt=~b7G?5f$v0=46ws!-v)#WJezDsX?yJQ499ESLFV;SNu zFqcyBWkB1w1$7oZ(?Ny=CA?%Qh$4N-f|{CHR)ohbj4;5nw-|Y)!?X@@xvVsA^1)@r z(^Xt$^J8H;Oo1t9f&mgiAAJw}Gy`4uWyl|dv2vI<#fO#^(qZb1k3#QB@qCyOUxM#$ ztOoT1bBrVZb>t^{d0x1c=??MzG|GFG$CY;}&nVBq~2+12P?GZHHEu% zh%!12vqe;2GM<1h#eZF!nX5|mxjnc5o>Y@=UHihNNOx9)=I+AzV!C8Fp4SXY8Tl$N zufce&tiYnxsw=RR{cEd`F(v)_O5H0BtOxbVa>AAdHnuZBc_Q6y5b5M|DUo#^yKLIS zee||3N=31quk^TjIi^*z{I_1mICtQ*=dF7C4eveo%>2{yPo0Dzv`f9U$=tQ8PQ9@z z;NB|kFQt6fS!fC`X5Nyorl5F|eIFbDng$z4hFV|i3Amldr!0|!Q4IEMw~_QXYlyMj}!*mM17d$z_*8?Tif707HQb3s;J*rRh7km z@cHkV8TETPi#Wa|P4Q9I&hNR08M=pEES9)2VPl`g2E=5cXHH0``PD-m7JvnHwLph% ztSp%!i1!8@??hnNa+07D7W+E0iz)FY3>IHi7pX{JYSwX&5<&ey#1cY{87>LOy}Os4 zP##BU;B>f`&&ng8FRJ1L)V~Z+)GmvVGQ_xx8L~^rpHePIR=JFD$!der*>V}g(THmV zk*3%n@Ie9}B5;;Km%z3JkSSu|;wk{m;uC8~tCpFbNo#4-IxK50-+lWlpZ)lEzuZ1u zA#Cpb9zue+h&P~Cug_@m$xP1poBSs zRUK`HD42opMJgPEX95t`U^*N|8W1)iwb|Fncg8y7aJ!G791v}?;V9C8Nt=mIfx-jc zk8ZC^_8dds@$D6&Yy#~H2qfggeHdXp94k;@1}hE=;RH$$Mvx^4yaW(tXjj-&kmiB4_FxmLvSE3|5rFLtNXZgog=YHk!!}C&B{K zw99+}!5h@?4-cROI}XR=k_p@)R`USHQpG%Hfw5##%SpruyD235!VPv9Dg9 z#@JM`+asvSK)^)CHL%S9j5*~s%QfNf;zN-Rb$eFAxkz8%SX(UVPd(jycS(Oz!t;QFus>4KOx?h< zhG!Aa`W*6j*6=LOm9(|la!Fexur^1jxpHJI%$CtjzUJf$15ghAjD@+l4zD;T>*i#g zwAvy=nrcGG=__mSonj}Ixsomxn{(nQ6&p^Ywdj=0N*Qv~*+KZyF`8aTjER3l+gQui zb@m85ivPoKapu9HMztw6^?hA0XldjM_>S4bAoEMIsShJTE0N9y>7W3Uf)Gg9mDEkauG9|Pu7Cz2fJD%GXeI~x zS>2Ca-S)nsZmwSko4QwbkKqghu-^`2T)bL5DdU9m>lb^5Gjy)Nfs@Uv$(oi^7i)Fj z?Hl7ZexR^8Fz1uwz;@xI*s$G(O94P~FJL{cKP$tIGC3!E&&z;Qj6aeOq{S<^c*zCC zZ&K>F2$0Q&aD{+pm9Qpam*Ic?eX991fta!#P_qLnUch@{PW&0wY>QASJ!>c%Borj> zqvU4TQB6?UE;JlO0o6-1B&rQbDgw<2$$%(3WTZqnBh2(sE|tg-n1r&<46US257XOq z;J5>jFNADXm?p9!r0ZlUU@cKkZGX;AZb}~cPXCWh{vSHeIr8KzWhSvQ*a!=<7T|6-^C7ng+DCU=WO?(w4yAXi5VV@7Z0~zok{OPMAa(W^@5389R zy3zrT9NebT3((Q%Bb%N_Abx|VXghccoZz$s*ghoaVif-yD$pTt7H0{}NqoK^{zm57 zA~}`bLOUUDslc~F{9gP$((P<7Xh=3ug?7;Uk@82RAW`}ddLw`(IUw2iT))-hpemf& zRBphcXWR@B(7M55Yh9e;DcJHq?%^vl?<~}CwiLTL=fHi*-Zz)vh=$Xcfd?Z@bz8yr z8x*>dPF|k5t-qvk>lS)ndxtlgw5GYpFM!fs3BGCp@z56;dC7C#0EcOXo*dK^FBqVJT%uK~b*ep3}DdImvZcdgE$s~E94 zr`LqLfpNZxR#0Zh=#wH7x(W>Mc7Z;{Nc?p6Ll~s@rNA9cb`h`96t5C^gg_DCj#~R< z>#f$8e+4lvTG{K?D#FOZruh9GwUm?ELeMNbUS$y`riQqAdmJ`(9E?5wO#J=Fc^O^) zX&UsS0L2mUb9f;dyt+X4nIC&0m#BUpffEEIA)lob`TE4i3ET!iH)+AvSK`!011YoN z))3tmpP*JR5%^_*Vn#eg>9-Sjn!q^%WTlEv0)U6F$3e3K6%p3PKr-;RAbXy${}2+2 zIu4t33iv;xj;b@#DkUu^hg6OUYTM5Cg62dMC5ueaI(NN_TQcLXRVs9Zy&ug+^GG@w zzdxOr9Ryzk1akY7F~dzz#s?&2Ob4K#ey|9JRG3<#@3#;(0dK%}XP^swg9R!$>E)9; zwq$?~C`f{G(nY$M0W@GC#IxNWbixM_-T)Qw+-)7yfl*0mM;K=q)TQ$gNg1iG6~ep$ z4WtnG!8kINMRYKxj1;_TNg9-qlb*DELIFdef%T2O1YL<}2!R~<)c8bFJJP3*LgLT- z2b@(*7_m3&Ysial5cn2>y97v_##-VQQtj+cj2AblNT)Ak#Mi0aKN0w60^cU^F97Y4 zrkOJEPCT01bckED?q zJ8Hx+>OmMQ{++;+1n!5j_@-BR-wu=o!Va@3H4oN(Vh75S3tUiJxUT8QiuwgXNFF2q zVJa~$U$}5Vyd1Z=&ubz|mCL+YDVN1>Q_o)jh*B+BPL29P>^vHQ9Yy4g0(_22smP4q z@i{iApNu~&Qi_}m^4m-K{e+B4OFx6OBBUxN)h8)Jr3{ipC%3VKawQEDSw-nnu_NR( md3^k%0J=``f-Js*#hV-%Y3qD?Dp#?JX~P<`#;t+T@&5uRDG)IL literal 0 HcmV?d00001 diff --git a/src/目标识别代码/utils/__pycache__/metrics.cpython-39.pyc b/src/目标识别代码/utils/__pycache__/metrics.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a30d52e36b96308e205098dd831e30316c83c62 GIT binary patch literal 11596 zcmb_idu$x}U7z2)c6Yp9KeDzrv6IZ@a@XuN@x|_4fsiJb<}TNsh#RhH%PmQ_P{OA0QAJMe0Q)cJSLgooP;THh7R?;}* zdV`gieCs70Z=+!>nQU@*Glw ze25RDF88#?NBHRT8Xvu(mxg$ekMVKT5Az9LK>Y}Rgzx7EP%_G2&nNj5Qv3LEevltR zD({>8@D0WXsycW111K3o$q_2y5Avf`lRv~CM(H@$->wx;v=2BJo7``>*S$uadr`d^ zxXXSd>Xp#h!d|txB1_CmUbIkQlyiGjk@q`Zp)UfMcV;vUbbh!3@9Z-M2Ue)(& zG0STk8aFl><}>Gt=3O+GKVjf?_anbdzfYg7{pA-^VYpE1WKfsfuk-6S%T=$Ds!qyX3MZ;>sT8X~ilINmt_|*@8>PsIzU6F059e&g#gNH6R{3cc(X&X4E z0XmE{wx1@HkWeRRXu8(XIt<6AE$XpBgmP2AqJL29WM0q~aYXyIj)O5wZV>To8a3)M z(8;3Aik+C+;5e%|I#t50y=gheU?G^zg-al-}a=*IwkNT75y=NE14D@+#lWjm2Z}Xq=kRCbiBu_L7MQ z2D&@Ndz{2X$8jJI?*Tg-&CU*5QWv?9diu(Je-CQko3eiAs zj<_JnuG?4TZs=ExcrYHO+-a?|zjGiqZ!s|+7if1fmEE8d#j4{2@%~NhkKvTA&12u` zI0wWX!jp@KCbaGzCx1q(GM?ei^Xb^$7~IsJ#L0H-C>PnWeS=XNbLgTP4Pne|JQxkf z7O|m84fPh&q7jr&c1yEVN{sMkuO`>68Im=_J8FiyH6yZSv|BS8Pj2AOHW}Y{L*K~b z4zX@^esNl!$k49bLhyo{R@y&^E@S ziA`2bfmHL}K_I((zPa35iF`L&@Llh^FT9%XwuE1)hhVM8-CF&+AIM6Sd5wna1$?{S zt*nUaemL#8_`SWkA}anVcOi;e;i*R-t<|H2mHFvPbNNx>RXxAa@_4<`@E(2J{Gwlp zPCV&Hcwv?k7Zn3n!#wDo6Sb9PKZrs(EDasCPPy)7v4W*F!+L}@h3>JSDqi#pJ%>TVltI>>T8oaj8^F;TM<*~Y6O$NL8n;)=?= zE_yvF?;?+bY>IW+>_PWgKUxvN_UT=|-_^Qah$E7B&)s_9w$@Kny)g2{2~i0Gb&Qw6 zu?K}5ty}@ajzY=PC3mmC3Wh0#ZNsldA_wldOX3ncB|){1*yzXiQ{TK9W=G&E=lsvjMEpge@iP`W;rvtpxRJD?W}^9{vuN@0@Pb!-#fp=EpJm zckyU#*K3tqzEI7lPdA!C+3*Y!y{7H#rDx0Xbf;TEt)0Cvvz<%y3p3cTz25NV{YHD> z(vGSO&_R(zdtk=^iT$jk3PC$97DOJeBn4~;n<7eVwfMve7rYh(TB9C>EwADyR@)cN zFv<7<@LWLH36j-`U0GR%`jFW507WebNiV4RiPOENve!y%uhsG)IIVi*FNcXqD4Gls zE|pu@Hc_J>0o*NHu5`;t$U*q}UVtG)ZP{BBqjE}I`8w8SV9MgbM2{efiEv8k9x0TZ zZR}1=nkh;5urRR#Zy9pi@LOTR>JZQoo=T#(*6^$+y4MnuGz}ApUa2RnQZj;agFvff zR9nbKtx94mtWPqSQyp@`7O`4n0a2Fd)fqfpvHuW86jJPGcV0JbF}whv4Q1JsG0Gex z&mLo=EG_@TOh;MH$g)S|Gs;e~VRpbMuyK}UHY>0L^w+6RmgSjv2S(bZQ<2ip*Qq{- z48#w#s2b^zdW}^{#6l@R8FI0#k!1$TNLes8OiIOQNB<5~g^n3RO@O!-sIf?0;P$4` zv5>Rzq+$!=-qx?C_UaBwgpl~sbnJw}9C>~pvN{hyLe z``LS2-^=9A-7CBH^gq69ZaJya=y8?!efz9<176!4Zp%k@PLRr7bR=nuaikNo+NigZ z6lkcv3~F_}3g~vFL1g?W)mxG9T5UO4Vy!OtLXmedE#^=y-bUbA0>rlx8(RKClMBKw z@eF~d2$12h&F#eF$i4?pNE}9U@+@x@3=@c*V+H*Pdsvq(Dpa}iM@;w$JRt#JgZZby z^n=#HO_)>$p&JY|=xH$Rw2~#YBduifJkRkVv`Fz`K7yJw9|fw6B}3=I8&*g&xah&= zTI-rde|7e*GKT*4Y(*L;n2mmvCQIZzo^Tof*eNLkN*U;5LMp+rR)#zP1V#c%+oNo8 z4HN2fQq&Y}b^0eU0?td$ES^S2JV7OydUK^5EeJm>nu!_ujp}x#n5SmD4>L)Z%k`ih zmCNr%rLq@5{x<&Y!k!II_nKDdD0{zQB>J2n2EV15{fA&koYt-tIIK+U9{oA%soQ92E)g}uw(6Ffq!6mH`)x5+x6F>f zC%P^1?%A>35?K1GhrYPdPGK zuyVCdMrl~=XtR;s)JUD~WOk0l^(9i+KM6C1d&U7<#=nD4F(9z zqLXsAp=b=FVZIyemL6V^{f7WyitWUmTl%#NggwOpuMSJQ&qS5>2bB)|DC{cOO)$q3 z&_>`aIra?rl_yB&<6`?_SCr{0_(6l@>fnQ0+_4y@8oa$D>r{x~VETI*l%x zN$tas%Sh6^?$w%s*N_)lG}MC6VghlQz#cJYMXhqR^}DDEX~PT;sG z?-JjnW0Q+f?&C$A-YwSt2{3c5p+Rxg7O@9$oZf0j8wQ8JXjA9rgcg8#!>?pTCR~O1 zb889WF0vO>@M1vQxCwO@J=0N!1SR}qDTpF-(Se$pS<*zn9gHx*vo{&Jq~o*+ak->5 zU*LmFT43t9%I1r4I!=KpXQBZTK_7Y#JTwCv@W_xS2xH|iZ;B5sX{5u{86Sn-lj8X} zBfbQ$-B=Ck2j&<@{%goj^zyuLGqW+o_tPlv(l{>C)NU9|3r1(K|Ij{(x#|BLp4eU- z<_to3gddQz_VS~AlJXFw`}kD1CJ(j_md+2tU3!QgzNyRl@lA~LE-iS>)S`)7Z2jjM zyoXF{ABYOvjh)-tOXx!_p2J!?&U>A}QSX!5Nt6In9+=x!StSRm{nD2oesy4ahJF z+*{|KJ_Q45m)h%+8SGZQdSgYv;Z@vUN`<~R-xPsj>XP54ppcT?ubcu#0MpxOHLxcf zKDsH+Pu>y4415^XtDXoTX~HB2PG4Cl_w(7XzTAS7rQU!~?Q)?B!MUJ?{Y6)@#3@9m) z!96b{%mQ!4o3Tzod?3+7G?R`uToFbh0S2Q#5Z5Td{5@PqoI~+v@PwxUGzObRf0*UK z&xfINyWmLs7<62bFo>+ zElTtZBWNxtEb594lgQk=ci9Q$Dq;gy#J${C9{3yvDO3OV02GbOqC**C=fwUZ{f6KU3@gw5G{i?3~Pb)Qo44OF-SoP ztalP4+labN!682d5qU7SAXeT<;SE3o7RArWIeN3{z1cd4WO;8U9bp(bQ|GW8;TL7_ zq7@(F#y%}ROc1ltOl~|87m%i1<_iespniXR043OQJg!P6aEDmU0~kveoU_PUw5jDJ z;)WY3JZahijW-E9N#?PyUY^F-)Un$msL4PWABAu{g8xC(=+sIEj2^s4pu)b{B~TB> z55z8Tp~Qr0J|p35Vy>;P zE|kot-`adf$$V16^MK+>8d*m(eI3s#o&`K>v&iFF#j`M5GFE5GC1ZiW>MW&Z%ZW8V zQ${!Wnw2jMKsofY=4VwMUMeT+W@VkU@DfX!bt070S61O?#ZD@-B~vUkXT?z}w!B7b z!7JI7GGwW@gB?gGYI?qFc>Dv}DvjJU*(2;I{tv?)ng`z+HKy3q_f4~4q>(G&PcG8q z;M}LcpC3ayi~leaU&0u>STY^IiAS;w7YR5^!F0)GDjkzD%#0k|4+Vsn)fM(HPSnst z$#%&NA(*k)dv^fKiME!;Y)Vnr#b2Z6E~gC%1`Cf|33+~KCDJh=of%+V5Di@>CiPe_ zF}(v9Eue)sA(6Qm+g&o3F4-Onfa?}QqyxtaVu{*fih}FgBcqH_83--~bVbO0Qts2c zisef4^#5hD|6t7Z`yvEL43T)bL5 zC8LRRYZrS)HuSK_gF`N;$(oi|7pwKq?;GweexNWFz6;68;QH`qth;{0r!XP8BCsAm zoRPsvnVglq=VVw);g944Y4Mx5c*zCCZ&8X&JMr592yF;>VF_yzb{W3fPgBhw5Kxru zfto!~@jTx1v*L?Xv)kYj3n&{T6qM#xZkio6WZ5n>97H3kmuM)Eh)Grg%?QbWC_5yl zL^&(Y^ieL=l`JqHWt|;6-8wT)Z`Xkj4?yY=vN>^@$cm7zlclh>L_NLz$8PtgU(M0)vgT|sMFs)*!4l1lKg z#}w|+2>*SfV6EFs`A;Yed%v*)HI_7cikA2?4pAxDDKK{O$&2Su1UmwL`r_c8n-rg; z=6_1yPYC=OfzK280)am#@D~6jN1lr0E2Rvr`+ww~-VwTZVq97}i!hVomw7|{9ZGf~ z0CB@UA9@BE@FD!^6C`qwB8m?So1DMWS&kfBtkPr9G3ZmAp7%h#!BdPKJOvJOS_14L z5_Ag1|Aq=o2%Lo(0<#jI?}xvMy}CdSs2`@C5VzFeharA1{vPRewijR|JE=lD=>13~ zBvOzleF(h~?2;Ui`~lp&+2f!(9N<)L!TM+1ju8C1!IWh$PV*G({`Up=94&YP$~jw9 zF40+VU-Avui*Q=Q!OXye5z@M?;VTacb4iCV&)ha&GPrXSy{|nJjCNboT;xMQX|Dv| zzJPe>yA3`Bm$^E3B+0KN;{hOE!_(%9=DJPUa~ zCM|kxBge;Oduwrs{0i_VkfpC}43niVO^p4@w{u4qKaEq{B5rV5yblnrLzr=O6=mNV zLy7nV0Bq_vRAIJf5J2`<>m0g@rA$1tCj51b^9ou)nIS_^icIJ#Fvi;j`e38$({mrf zAieJi?ijL**rF-EN#OMa9s#(c*FM&Ilk=5dM?8#H_L{STkh5?p%6~^M<)pR{HOpR5 zS%BGTAjaMvhn=kgwTMenm!IYbqe@Dt&i%{(o!WYCx=vy$+qomFThSTQF6#Y zt#d!9_$6C?(^8=$?EPpqnn%*f`2Fd`?jTSbA?VwujA?FzGTtpI<9GxL>Iar!Ud5?J z`pOF-7Vrjqod&wVms+5LQ$fC4#}+Nn0R>c0PP)hjW&jO12nTKV2c7UighD_CJa^j! zbzoFd(lN3@#7|O%-HRQ!N2;3#`DghFwN=y7Fq}th?7%x6Z zMJ9bmBfd`U{+YnH2>c6ye+6idoY=lKcQ4wGO0*Tp`lJ$SM^Ha&c^< zt@t;}euuz!3GBgCX9uQkyH{-Gyb(p}K^QCkoxoEB?uW9cQ1RRjlm)^Lvnf3f)_rmZ z%96WWP+PdU>Bx%a1wlxzkpD21SeGwcxFB9uZSM1$NK)l8Z&u1>@jKM>!vIOD1X zpH~j05!g{g-YCFFtCUJ?^;YM&O#Si%>IBI3AivO*-&)8Bw)8$oD?+MbQhkylRLUSp zbaERzC|A-jkyVmDt=uDL$PW}A1u#vDEoAj9pmcAfo%89bT*WD-EoaObcLqks{|jLn B7Zd;h literal 0 HcmV?d00001 diff --git a/src/目标识别代码/utils/__pycache__/plots.cpython-36.pyc b/src/目标识别代码/utils/__pycache__/plots.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e95bdd529e339d2bae17f839345a1abd79a4594d GIT binary patch literal 20308 zcmbV!36LDud0ux<&$+X+C-%ZM1VIoBk&Bx+2;$~Kl&B>PLLxPxIG&w1JDAzI@VXb+ z9rq}Vma-|^kdSgB$D-&^DqFUENR^5s%eL&WV#h}!rxZDH;*OIjwiRn_mCKbXr(B4S zeBb|OW)~o<; z%2rOz&e`(o+9;)xj?WsSUd$_(gx3~UC?h)ymsO_sintRl?O8ZMrbyg+S zm33#`&K)QvtM5|pR%ta@u|IC3K3RQCW!90qVyPiD{Hmpf*PYzsrN`H;++1l!jVw88 zRE@oA=MJL9xY~vm+tA{WnkYrpBMXJJeB(Z{a=% zf-;-)VJ*mzhcSb#@&Z=|BJ%ts7uRidNsXuz_`8h$Ppf&HC#NIlJOlYF=uC1hX(}hie6!y^Uqw#Uw-CV z=*~a4a6ODY``nc)PhSh;*DLi>bHxu+#T%vKa()RN!cmjgrJE}iU8TVdF_dSEk^4m3C=kfCZ5Y<#LgW(ECJ?IghV*07=Qh zak1p6IM~PtNX}JJEU1g6j;Z*o7Pc*>5?I_+7`xQ0HMLjdjrhT_v&Qk|h~9oAYO-U0 zpS5JGZEE7xL||V=@7JunC$1eJ9_!@#gk8jvBMWm9X)z1j2$|3`PjvaVeH7I zQ>QLqyhDdB9ht{CM~@vle(Go#d*axMqr5(J=FNLv_$Bv%l_3cH-G-e|M(|=VB<5t@W-!z z;iYfB{wv>H|HQYx_`TnJ?WbQ_!&=`u{`eDfM_v+2eyIh*_U2k^_fltS70)j=>#eho za8lfcnALvR?9N<1UujhQe17gstywJ8ytByk+Ru*booKChG1<)|%j6*@dzd`TWG@pA zt@kl`lnJ+2&oUv|=yx%BjLG9n=9nC0atH}dRJ>KFm3+SxMrx%-Xg6jZeH=A87YpO* zC)miA5`6;sFwvVdXW}K0fVgbe9=F>=4?NatjY+x=d`uXhAV03!4407!$JGht0z zok-y5xgetKX$z?%X=K`3iad{N?3cT+!R6DFOi0(W5q%0jVWiUV^^+`cWuBp32_uVT zt&g!KAHXEN>0_m9`8-$2O{f@;&AN@*#TWWr z%qCmbQMC5B@F2py9ox5F1M3Uy$zF=wNAZ}-S$95Q+15=*$zz%adABAL?#Lm%z6WS+BJ$qn2B7C9t4;Tq)2` zoBKrll~%2UIdJQG)F62-r{-Sr5#^qi)6U#~PfUJ(Hcpyp6zZjXK1}8F^`=^>F`v%o zZ>|(-rbhFu=?h50wC5N6O0iz@Z#0$MVaq1UcFL`Jzy{o5tVY6ja245Zb*!~rzFUoU>yG&`wr#kq1+9W_xt1Sa zPU%nE*f85#oAMKEVO#8FTKC%}(e4$~&QB>&iEVw*YD{5-0n~i5TjN&K>wwu8S?9b6 z(f}1w43~r!I84aGH`8c#&Qy`D-nlVb9lL z@@Zj4m1aYyS)II&Dq)~{5Z9V}9j0r|g37DXQema$gSe(xGtK0I6O%DKvQ?=G&*Y)< z*w}Is_Jr-&yX*|ogguPkG`{+I)D_vF_u=_DkFUo)?z6Em1-1qD6;Tr^r3T2z)TA0z z8RVjBT4W_Mv=KE5b{AJXWiw&&LZi_HeKhsLz2pU5Db(hCe`!996c&p`E(SkbA5w*E z`j?Oi>?*d~8}=b+uG=56{YVg7x6@W99zm;U}vpv`P@P@&kT4i0hY?({KVeh_qhu2mOpc#QN$m1Rs!-a>qd&D!T5d*d35q zexwl#*f$Y4tuJ)j+ttx->$tRzth42q5JF<1{aqsHWbw?|A|#edd8N7H&1EmNTD7%o zqcsO%q+eXny0EstQlHKG&1`?2_RnT3jn;~vtx$6E=Gr4>+1}r%6q~BV<&zU%{`Pl$ zdDln(^fTu+NHkk1qI~be)vx$JbmQFYc<8EvUw{nNziF7c;CUtOgW3#<5wyYx$R$iw zyh;Oyxlt@>(mIcpmz++t15yz#7@M+GWH|~_?eT(wjevyMZsd6Nw#^-l=G^Q>9setOT4#x$HBchE{ME4?pJ#4rmWt$OHM!{$NW_E zj%!ajasY^wi!7^HvSSTkcUM~4OB%$kUp-014(!>J78q3$YK+xwN_Xx)xv0zN(W}U>O+5G*1j!rL&#_L zDhfxpbr|0xp##u)fX^f`QeFlk0}(0Fa)u$Cxj2o(IEx4DteuEF;T*xKjG-ip?-6?n ztml9*AO~rCZ1a@%$CH)rpHH4se&u{lAcK7dbHI6YK$Y$~=v(&8#-7OvYK=y-QS-dc z>YCZ4Itq1-$^FkEt=Xd|5^b4LeR}t>gsJSQxyAa`R9=Tf_{CuCtT{I$+Xa!sU7J(b(h`zD91w z=Cm`L%S}S+%fOT)DSF8wlb`rpVmylo%UIe-=rmekyx<8t5VGs38TYSe7}F&WBFgvh zz8n%#=J4iSP33HKVL`tib@TcINIxIZRs3uwC|G7eKY^%QVZrw?c@4>I1S4?O-rm&K zA7g`rzQV@uX7UUZN;CQ+Ot{@<{l1sE%?FU2NUPUTaT#BatRdl~22;CJZYru-zL7=J z9?~lf@1QFAIIagF%=Za4!yfp`TAsnmDiG^30q!KJgXZ1AG&Ch(P;IDw1C_62d zvbQQ8bQ2(+lGa_Ng)+AEd!HyVSzb#$q@1#;F(Y>Cr8s-L0{m`dF-EB-ts4*A`+bGg zN`0lCZLHK6OFG+J$}XANWb1{N%!n2RbECqoQeIVbZ-mRRTNNP0lv$h`VoFU ze-8X8cH3(eils1i!~8~?OH1we^MzY9=Cz}9b934K_w0lB?ActRx2~1PK)GR5hQtsN z)vAO8i^OP{*lv`(wma8otu5%AxcTgiR`|h$TeXUhPEB3B0rqlZrLhb))mnj&SZP4W ztodQWTUiu)OX${Vp>vBRh@Mf6rKp8Ny~=z+RaZO=rNId?@k-sh(YzhTy_+kg64@Yj zz|%aNp<8JzH91kg26o3KU8KgW)n0+Nr6qM-33;e!QX^E>g>YxFs}74pw$Lf zyKk)F_V*d&o4Dv>H@Q;;IN_{Eq|64MQ@84tLClMET+~2R1Um&YT|BETn89z`8g zYG4EOFmkaVS_Njo27;^gNGFDR)5ZoP-r5F6p>CTv+9ZN#5DVfvEKwyIN7JId=`|gPRg#gCmOf6}Y#YIfyBH@uzquZe-opb z^%Hu|nAzqrc&p%fKZRPc?^qz!gxx-Pe|5h1Oq;f?`y1c}zl|XvZKPqeZ>`OFl``gO z?)qD-N$3|jgMZKDt4!Elvr7L5B%!?++6D7e-O0*4C-1*ce6esI9+_ zlD=3(0|mEPSa}thvemIybH*kGcDaD91k+u;97JfH;_*W1vAfR8*bbu;f?+#B44Rhf z!{P+J%>`42{dC>_I5fW{`=5gG!kD=C1?y%8OqDH+g%UznB21QmQ6*gfMSL2!*Iz&q z#tN-g3EH+xWHZ?BV!(gM{bAfMXetf@MWGF@!|rg`G?oC%HDfK_vYW%E zU*2!#P0xRc_T3}l0+wd%L$>~Xl-)~pjT6;YdE*tVlD>%2zCC9rF1U1ZGJXXq7tAO6 zIC_V{B+4( zm4r&d50na$OO7#WV+CmA!kCk=T#sAI^cwNU(PvvQq6R1r;x`S97`zkhjNWxt$Nh;Q zP3?rwf7kgCp8xOg%%jywJlT+WI`gz;7tc6@b;wY+fqB5u`-9QyRFF|aFl`J;idDfu zn8txfb62Y~=rb%f_3dg@WW((wer1a^{5c>djtW-}&Pwo(Aia#VGv@CIU~iN1xEjN{ z3SX)2?2PxzrL4M3jl*=jEf}kcVL^>8JDrJO0(1MMqDi1P${0uJOa|ko9qRuI`S>~< zyURAN{v;R&w#Um(6bCm*odRK>u~6oyXtykB${M5Hos+Tzo&JOI&L&0>gBiOE{x$<`-H;=sl3KaSrt?sEl zOpTzcrS90GrHW40XDhmiV-C?Xs>oHuF4+f9Dt|7%NAdM$kif4pO20|~2OrN<*NZ}f z3&8Gvw+T5q#&sH9kP@ZrG7A3-M>9jWcTtecccEz0C)abAl)q>E3s?SE`_*&$z36lu zI`tBMH79S3E;o?FeZ?y98^~|$XF`C3E>7iH^y>{WT`{?oXJ7inzxj*(e}1m}{9ikF z&(5|ZbBC8O3Kd?cseP4Iawzx%_UbcD>Z_np&Vej4r{#?+d~UoQnt~ zQjU>`KEodF(n_sn%sP2nQ6;f61G+|e-RH_2rnAc%w7=+87d>7PL|yOW|h&D8q8vSXA^cSsOR z^mRbKM(TwL7>LEfn@dZsC;vN(p8i3OMD8j^j4)PjdWA~Sh|Y2s(t&;p36u-5oRR5= zNyy;Fs{qSX1+sZ_6*r0(Ymh-G`H5M|(;?g1RbhMjzu`hQNw=A!s*hd!2J<2!3~V8$ zub|XZNUTU2Vs{#Uqaut$Jdf;`ykqZyXudBp3c;L0`ku%*_~Qgh_rTh91SL~)ov~*e zx>?3im%^pCR0M?)Ns>rPM-c3 zZ2lG#AV+|f9` zBzTVp2D%Zr9a04E+;Jca(Yj+e8)TgxXX|Y8L1<{;Ks~M)Ac%0iov z3*JF%q&O&O3xXBMg&hbXTB5_HWuIN_I2?FtNOsVSFbwzJeIqCr)}SQJ`bqF;>~w&; zNC`)k65KnX#$e%*am21UxzLW^D!|tV%d@TjCmvNC)SJdm{f8*K4t)oaHf4dSyC|r< z2oeyqh6>;=LL-ENIe8Xi_+Fu}}; z>b$Ka$g`=*r;Gp{r;im=#-xbFM~@icnlz{XIg?*Q5~jS852sWMb{s=tzlu_rW(BH5 z!gh+Qp;KJbKhHj2X7T_nrG=KTKzZD405kI@(2C~sDSMAI0bl^A;CB>%dU`7f5fBpi z3JLujGWU~EHPQj$@NlbG*#)!ZrpEC%MiLOR0I@(@i9z&^tSUdQ21M9+u>=27z^#Ud zoWBqg<+lnbP=l|#3PQ5f!+^-^HzL?>2oBv|13diQd>VoqMi9uE2jsXAwTI~!Ciw_? zj4!kYy1L005@{z_v)$5m5(YA(ytD_!57WQQ7`sdF+U)u#uo9pqJ%qK=q$ERbzla?8 z^U-oUntj*7$Fpcb!VoChoB@;=E#%P!B1dyLNoF&K7mu^oHYR_{s!fXcEH8znk_uqc zDivVp@_Yp*RDvL)05l*Mu}%rKz%fBW32ko+d7R@Y575Z4&`5U?NGCgRdv6pi2{&ng z3#~&>NKWrYUEfeJh$aL03XM$QL#GbqQB#p{qG`PU9H2CWh zpaQ68ov73#0Pz=14L}8ki@Ww}f(GYmOr>bJ1>^a@orE6;NekbqFHXazej}Rs%5nrgX3@AZ=il zEjF*z3=}9CHAqxPt7DAgTzBxhQ|*GGYd3wMH@_R>VjPv_{MAE@>{NTzBWhn63>B;q z(Vc2m9bjZP&%znb#pW7w0cs;il{SCDHHy7^;D36;D7cF84r881CEAs>U{qcURszp zr{`%9i=eKc;r-`ml65=}?Ud)QsI>z;um;DQDBU1G+ejjTs7ss=<11bX!Xl)=h9YR& zS3~<+J66AN_59rNcFO#oo;!8Cz0J?JG}wHz2v2Etf89G+sh4Nl;}Gc^OYnXo4q>x2 zf%eN2vBt``M-4z zZ0#UsxiQW;oMCc`$uyG_NZO-UuD(C}e5+Jao9i@i>A6eKWiMRKg4?tQV5ft_Lep>! z?Fu%*Vmm3TLms*55{SShu$8p}H{XjJ^cbhWBZ+2qXE)FZG8!J@0bC4YP;Pug+`8pj zbMc*k6l#VT_>G$LyS#;9DZ~^@??l1|Wf1@%5Z1#u$fX8PhK8fQ%ZS$ydIqa|f+#DZ zpX4b18Aja|5`}3SK~WP(p{i)cAyPy{95Vg+C3;h|VdE49b5SDj5nM2#GkTQHr&tt@mT^R5_o15TgQojzY|b`oMZ}1PcE!{G=ed#(-Z5I3(#-9Q1+v zz z6!=Tl#PXR4zV5gaYB$>{7cNQgG#5)qgIx37D!Xho@EQc8XmrqRJ>`#^cI1CVVwu}C z1asj6l#5_qGNm(&#>bPztJ|?YST&SSf@fv0Hjl`R4lYW8idQ8WFWimH9&Q)es~ zzuvZAP{ke{2bb@v4&8l3+nv>2Sgqaa7}BiTf5%a?G`w^YtH-4E0UF=HzxTduiz)S? z>Yf1Maxng+RMg)aBr)>4_%wH&oBPP^WIrBZ8+mH(&OY@RuU3z!;~41x461w76Ufbw z%L*@kl;*zbes!4bFj5WHo%rUJoM_Cz=7YA!vsW-gqa3R9wp((X(9FaN^4GpY+_1gej zsF^^X*=P>74~_UAyunf_l=I6Kn%)fOqZjW8cM>&v&j_Jo-dxQ*5&dRPi|PH(G# z3Hk6=c~frPDz656a z5bl6LFk`e2$m(?WYST{FkDcEdMlS*G@U+5=Ba*Y&o%Ek=xg!A=tne-T7|12>d)qyC ze}{X9$zH6Zr>_u5a~y?S4q`<@)4BxDEZlI2PZXJg+J0AfH}@9BPSlC_qQ6)|H@+5^ zxls_3rK)@jP>iJ#60EcIhmeF3zuAIsn`{_foYE4E8H=FqW_@07=WVuXAQAn34lfBM zE{F-Z?#9(W#ay@a18nvJlNXtM5XtPU_{cS_M|y+FStbP}2JDD-(W%O$AS^9Evtln_+?A$E0}h^5ot;&>!^oo6B@Owxt^aV9^31Oi73 z@*v&`;`nf}R~8WqKdAp78~h(8V&XZ*+}GIN!P`QD6WmL@m{2bpGaKDT$kVe>>Qc5YbG?otnJ<78pDo)RNP6r+8`pN98o8zYwZic#;*sCi9lCe&ngyND8e zm5-sbFn$pBTIyJG)Tn)}v=_aXn;;#eZlKzg!Xce{=yzJY+Z#EyCk zZ(AYo55WUjDB&MG!9UttBiCy0e8)BFxt7|gtGYytEUD9fzj^6p-0bfGDdBrB%3EgV5(tvUQWfl{OAy;AT zWc~3{43)b$Xk(GiD^aav;$LFxC)wJrKWHcuwTnB~+eN`-gd{LVj70r?n2X3nmXn5x zwL>gr3|}Dp?g!*10KYWVCnPXA1=!|S`{;LXls>G3e)C}IR;Xho6^>C z316><*2by^!#&(C1Xr9<5x5x0Kq!Vq%dWaG&%<7ZD_4RV;ADX&g&<8>i(seiYC;3B z96<*#Zz20>l?tNJ*aiaXZRs}v?U1qm9ULzX>}qg{)3ybu{gScC!Nd>BhogKjkmzK^8rVEfpc4fYK$uSH`r4S_92wvL{#9+s(5`h04D`P5M)0JFX9ww zi7_ZlG04q0XWP#5yEZ5-;9NY4QQDF0Q&};8>xXa&jYgzd5$$k|`m=|Rx1H0N(58#T z*udK{M2EtRghdm>4<8ky^7&?`j7u%Yc_#Qybsk4Vznhma7@1Zes%=9%BMzQMvA{|g zg9QZvyZU7|baYW)WYzDWC89QsPZ*hEfzfZ`XwhN?DsZUJvsKKPdi2k-q%W1?<>{2! zD4E8KZ0RnS);uHF38IK~hKw<^mqYtj=+sw?=Fq2ru0&@B`Y}wb3m*vp1En|qW{{fz zh>YWE6v3Gj@+uvrJk}vJ^ZU{7-dGPlx^caNA0#V&GWd?-EBJ8>&EL^7?}*(UI09=k z@qx}f1Z3hw!V&`vi3SdA?+CLN9KkDb>$dl@4?8`|;jO_LP14Tu5qtI9K@=GAN1fQ( zkBP1gyL2ZmZ|~81oEC&a7&@%i20hXe2$NjZw6+7ufEt*E_-^1+8Y{!zkMXWyJ}Fq? z`~89e_Jiey;Wh{_3R1X25EDX+AYvg*+{PFa#FlQ|C~${%`-qz4nZNmSFTg@`Q^7(r zCKei4wmO5h)!yNc10|*;uzWfg1R98g`UUHQn6E;c4-dYGe*6Ma5Y~kIP8d%Vgg-~k zY&Vu6+yiSW;-4VNt_8zC<t)-c zkMW&9gpHSJn4l6f_xA!TO{(}#Q1x3R&@NQTz4AR`dnc?y<1Vx`nG<6_;YmQj9B;uP z5eV*}V%9|uAbgZG7(8c3nkw+N-%A|TfifuFR|b<6VVNSo-MzO1VJp^Vk43eRg5b5Mdn1t7EpPM zE$z~SXkz@k-8J~v7~I>3hBW*duiXwnh9l523Ken6*$gxBOo%!*_rpT|5|34;$|oe0O-6miK~UNbenFZT(4bcbg%M2tz{( zMCkT2c)&ht3@V(YwDeBV^Tt+VXROsvV;(roz>~X}#UK>9K}a`HASDnoM@eVh)_(~f zAMV;n!w;7s!tj5sAnY;&d||L7PQ02xcp+@6kW+{-8AvZtmWzs(f!aAOveSOG7x$V# zuw@34%rKB*oC+!Jx;M`PeZG$JZN!e7Ul%lb571~r&xc;#1odjJyqt zMm{_O9C{DGf<=5uM-T@)Hf{Dn?FJs<|G*IQ1cx5!;*gxlcg7(R3c}bk5@ppMnb)OK z_IVgC5pYB>VUwytkutY16C*D1r)rWZ?t}(g71Yf+;ZJHPtnkG<))B>602id00f<9EV69}}y z482)REl})I^WOV63} z9PKVvesleP56!$o=w>-Hz?D&8j|(Fd1s1-hNg*Z!5oQKy3dX7_aXuXY2I(I`&o%=4 zXt*w4<6Gw6_2=iV7a4cL&j7x0d_{^mfVRXWYCi~LLNGDTrvx%V7Jvq#ufgjKYKLwD z9mSvt-Wi7MB0iHSa}TySV?9^`6p6sY;ZDA-240Ss_$*-H!hIRNVnYhT2}4*pUjL^| zfY5E`GR%F{k^E4<6iC2Q&M}DTuPGabrIC+Dmhcua5R9+w?SoteV;FoljE}&`{8s=( zC*TnbXn4*PUxov&3I3^3^cMXL;o7Ng;8g_kNx~j6)_wIwPP|xev3|ycE74HKkqUpN z{OgE(a~ZNZc42OVHcy62F#@XMuffE*vgq+GdGKNL5|*=R7siR`58KPLpqG~$-{g&% z%#L-#QGE$V4c%Ln(rpv^R6uo@#EXvRy(<$Q>eiaLCuLc4a?UbGE*?hf%XFbW$ATdy zBCAK4`w)|#K{7jl7(M)}C1VgE`-OkFAg_lN5H*84!IzwiT)c8knj1r{Thus3BFsNP z_-T1V3%XGrL7;z$(ZnLe5SawX{g^P-w0&6={ZR+6^n4y|%wu=;8C(A%rxnrvmQ(z7 zCX@^D_Kg1T_+8NdgGCovja23tyu5;8QBO0QsXrga^Lf=Q=JWbZvvPBTOim7+d`~a$$Eo{394$DRz}W(nc7=ODLtrEj_{Z|A=$SGugq*KVw2Q z-h?Rs0&~B{@s`2Nhp##fl|-zRRA9WJY6=%y8z+=+0C!HJC^`cKWH*NMa;0 Vn0nj+ZA~1U*_qm&ni<{w{{be>m&yPD literal 0 HcmV?d00001 diff --git a/src/目标识别代码/utils/__pycache__/plots.cpython-38.pyc b/src/目标识别代码/utils/__pycache__/plots.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..344ded49cba00c8b8df67ba5cdd0f0473217e5e5 GIT binary patch literal 20135 zcmbV!3vgW5dER~P6N@EY1Of1&6-h}1C4z5>WGImoDbR8xh^FXR-pJl8_AHhQ>;pXa zf&{jA9o3){ISFmWNs~B=+(qKVk=-U~9*&c`shcL9rga*pk7;x3wrLXALF!IC&15D+ zH+H}8Kf3@yDU+Ej=KlBG^SbAr^Pm5H@b3Qpw1wZ7=bGyGU$U$}=EJ?e1Rl=g^1o?Y zma_7evXxV_^S0cbyo0-2bL(#2wOQuXym~AjlXR>WuP5>eNjtS^4!$LkaMiTcj`&ibzW zuKMo$?)skmp85m%2kNBQ$wxh;&)HtF>%Q3ZM)kgW48b|q2^{m>db|H04y;J4XgGe1$d)2<% zR{lhJuljcNh&piF#wbpz=hUO>F_b^94yh@WKcNn*Be|tezD>=jv+7Bt&Z-yGIdvYXC)E{oK|PJs zQ>c9reO^+7>JMO05|L)kY<^6jW<|dY!m3GigU&skykq z`;}sF9rxtBuRiyT)=fR>MzLq>#Y#C!$?cLZ-s&c&n~h+yKT2PGVP^W-%Y{qNUX8rz z7iO+S@#kK+eEFHHQQ}&)UT!W0QMz=!Tv{m1p+Pic(z<+OsjAEMawG7gzEZ8aSSU4{ zTKQ47+9;WV!XgF}C98g+=$ES1C>dPW<)SL9C>`jPLV3AVUJRnt^2)92$Cg)?SD0L0 zLGl(qAu(4C8fD*)ysH;xzJ2{$_(h%!!Sk!F@_HXWJ#sHpDkVFO zizOSy!6LdqH&4YdsUD^~t`fH`ELmJ7F~R95ez93=YQKcdw()b2VvoH10|3fy+uv)= zVIwN{b~3atp@&zkf)xPHo3x`W^yke?j^yLh)5lL8KgIj$OQ)aU{fQ$-r+I(+*wIV* z_{Hf<$BrM($4{J^K7HguK7RVx#fwK#@A#!lPoH``A3u8G^zq{-^6?`_E}eM#>3say z#nY!RpuZzWE*_gkKgUm;IC=VbKK}TLCyw*^$dRL`p1GKhpE_~;6rWEXJ96aINxbK2 z^NewvyeQ*2{=~&6-twt&eBm%Zbr(Rz3a$5zwDDmbP@m9#-Fm}WvwzUJ;o{ovEr@#8M-@m^0 zYhQi+=U)EG>%Z}p)sOz^&wtNXUj59=E1310Cl5V7b?jxq>z5aS=Kj>;N|ZiZtNKBy zSzmnWAr8s|h+B@;+Gkd3s!*slszISJb+*I)eNc36&_Y;f7S~&>Hky|S_BD*o^ z=)LG7?_tos-iM@qkYGPRl8603CtuxHad#h-X2Sb>i7N5l<6Ly82<X za7#0emNRsm%^m{)qU>+mf%U2&%8nkicUT|7=PGB_`3}prZeY)_GyN!s;?&5UO@E%0y)yG`QyvpnN zt-#t?u(Vh!YYyMMLsEQ^-UGnF`20r8X=S&*0OWRhGC^c%6zk;OW&EHRR7>@8aJ{MI4fm|w#sX^YIZl9|1V}o`MA~y~$z7+MP?8ZL z!5`D4fr56wh(v%jShd@L9ed5vBcatu1`bfr-D$P0Rxa>5v2NLkAkMlCkENl#=DY{= z%CmyRLR$YL8|!3Stvx}Kb!?06Onp-`ZoSvkPNDXzrgo54Koc7!jU4*vL&?u{OT12I z4XE;7xn8thVjbrt;0N%)nQ8T3sn%-ETTeZCYU;$)k;#5dM2zC)W$a>Ka~>j()2S&6 zfraR$aHRoQ+>RG<`JV*{?Rjh7_6Gt;h^zJo?Z6G=t9Hg}C&JibE=+`0fs60y z?+ap~9mMD3tByVs+CZ6jCxL|q?J6XLR40vENqOoEGGRi$UpWmY^g?&>m7qV!2HRLZ zKLEJ}Xr{aopp1&GIx2n#s8<+l#6#&RbQXWETi>1^>efz3ZFiM5hXo;$Gp%=toRY(L z=SmPbs+FbYl0TKZu(()T$u$~9H`bOb3rq=F;@pBbJfP;Qjn`s@bRZw zgJ$a9-KdtDs>~@A(o^}`yFRz)L;rj2+&Ym*UjnGy{lNU^gCDwnZgM2@R52(*_Stw_ zl)d2lWgP(B433d>MK16qN>}}A1ADwtDr+J@l$z7cx(qLJs&&n^i98X$JOvRtR%`sG5>lISnkwrO(uo#>nqGo*Q$+j;Z{{auu4%FDo_?ODE&Mu zB+ARhIw)H{)o3=1+;tzC3sss%U7_}tJz%Ht7q@o_R_DHzcH?$yXk#)rIBaCS>8`(m zcKUrifQ)Z3UFd-lzrFY4q>^e0PJRO93YcQ~S#U;3`~H8#lSATzJFrRFcU;>7m;4f* zW1V;iIpbxg9cu%BgkQ$TEafi5fWO^#3j7oJIJ;R+T$8%MMU=(Bgq$6$^F1b?PmOG^ zzA2sKyS+OO-pzi2`Q3hhy;pC{>h(M4gv5M|PsQ$dw)L9b_*Wp3(qh$?lrs+epK~M~ zXIfGTlj~URWINSsfp(~YQN(xl>Y+7qQM;Er=d{y73bSO*r^95J3Yqed)2EVm;+(J7 z9R1_im&jEPW(aJy-G}o25R6uG6FaT2Po?g7{I2edCO2j@Au|d-Z{{mZyk_gqh4+-> z>$AOQj4H9i!kAK^3-^pkag2Ry7!U4Q8LNXfvOXA}XWA>Q;FiAP?CYLUdLc*7FH3Ir zr20T%((G#$qJ+xqu;f|h8PEM#UwMWOLxOjx{n+mR1OAFLk@<}!&E1GojvP)lt;8oUh zY_|j8u6^LH{n%Z5a-I8SoigL(fadIuc6k9te*fq+33#c}{mj!FBLCJT%>7k*WN7%d*fbu5aPp!Jn?}Fo^Lsh^VjkC@o0G$2uT9)&cbH#uKy1@xIj> z-lRtxZ;-XwLUA~o8(fgGELI!B5NM2po#U-x&vK>Tv}d3PaFTTmU@~TA!&zQ2OFPSw zBEiv|S*K^2gZPQB)|F9|t*q?`?kZeK0ENY~1$>f~D zR4Jg}0?F#*T>F=?=IwYp(N2a5ky+ty8|(5O;`k;6K}*I!>0l= zOiLxlt&X>5g|J$Hu)AOpF`zdDxV8&HE)gUx^&XTnS;y=JvD zsMQDA6VfLcAJ~VxYG1P6cc9%LX2O2--XEsro?Sy9U^6Cno7%>w8=npaI)iE;Okre$ zp$j+^#sP=lNU$6N&6VO>Q>`DAZ;*B=A=LQ!U?;Eb+XtVVsaB)_omT`&7BZ>q7lGBY`$ zzXRemJT?ae4zSLL%Z(3Y}m|JZ?Os)k{(qEbt+fL-wX&dxPWeCqPjmfA* z+j@nCqMBdw(Uk`K!>Fru|9bOQl<;pXmCNLPSOZ^ke@0%lG1uf6gBo}oCv~n`!OLq+ z2)`9Jb-8YE2y=u0Ja#QwkUrclp2EEaX=%ijtl6lxsqo~ zp54oXKV)iD&J=`^{rJ;gK;HV7Ay}+m`Ii{gJRY}jc+Ne=4i}eCZX=Ept8RbTsyl7)Z84@n z#kidWw}+}tt`7Bq6d8N~VkqVXb{1O2;)c6(!_F?jXsM$lB+h`>mi6w5Im3KsPTm>X zBQ!E%rNAe=SFz$D6aq!nhNjxs%vVtjzdt+&OK9`fG~N-LB}vT3i;W`YP^M%u2H9>Y z0Ck9*IX{ZQo?q5^x3O{~-`{BJda+h8#W2@lHtrStvS!2ubiP>h{eO&7V?hPlChgYY zt;L1jcWqkhwpIWy|2ODj(#>b!I#^tp@~ah$)4cV!Sd!H5yX`yaT%T5~tR zoQ?gZCrc%xR2j6Ytc$>iAH$3FrvajPadEK>yMae@w;o~sE6k6H$OQvs983U0f0PJ{ znp%U9q{xPQgVo?LYRrK=*Sc2lee6KyXuz~91kHkJ`B@Zow*{s6G5axF{{iyurn~l) z`NhimE0`aB1i4$xQoE7CiId&&%YdA)z@CSis&^UML`kL-gH*yYyIXbw@(<{ilr+`r_lluRT2^vq^Wj_iZXT*rJw{_q#<84Tt$hgAI2&dYBN+y32&OK}0_+%J+)!GTGEBZ>WI@q}b z;Q+o6@6*>IW2HmLSR?obNPgxCe+C#x;n4!+WuYb*c-!~y4{_S5oG`|Diw9R~% z`Ob=mughWvwozjd~D85AD@T)848?xgEP z*&-AFBq$~}3g=A0umUqhW&yB09PA2VGL-y;8o`Vr7rpInk8I>iUT2Tmjyc;N4$q5O zLyasr?a^=)rh<aL>n5DoxSN4B)hKJnJJ+e{vCaAbaW=Ycg@i}x)86UW-);r4J0 zW19$a;RG!FJ45d^S6>6~m>7pGJ)CGy;Co!Z7Vcm@wF5Kf!rg>Cy&O)wZec`#YOGs6 z{<`;u+unthc`zIgcZECE1n2!V_r?@3{U?#Xx3dpC-|lLiJMYyx#n$)R*weZU01-5% z$VbGWx`kjmDE4q2$K_7|z(X`7BoppL?xC(f2nx9_mwr-!(w8dY8viuVJc`t!jTk=ea5@{QSe;yg@ z2LLAB^)IvVk9q(WgAGl3ojSc3Un-L?fAly1^1#>6b@}U;&fT?hEqChZ95@(F1(W;r z3@iT#Koq}LZum_tR6L5!!7mXAF&D}$5|0QqMgV$)Exfs{<7OXo9^>WTc$tgk=Bo=y`r1Aak z5G3&|{`R^f_5^C}#a|qzt1Q^!ggAyK9J+=^kWQnI5lK-58lq&6TKbzD!)pYzW0bf> zdOxBSg}aNm{7(WH`GSmb6?U*TBo0U&p0S`q!pK3S6pa_TU8IRUL2Nz_ zA`dZ;A}18@SSO+4EJG@(ylyHbN_ngUCmvJ zlu8@&0AGc^>r5Sroae3r`2!3;ij^0erR%Nub62Ufu2Wcp==ejpO>Wa95Q3cu7YNFW z`uEu)vsi?*U#P$-vgqp{XO_pMisH-0P8nY@cdw$WkbahH)q3Qu0P3G%e}dkhLTWM| zrJ9RDwazUOrF>|t^wDa#7C9zgQ$!U^Ge{};AwckBQ1GLs3mXmp8uf*aSI1JIaDsJHRsic$c&h|4EN2Jp%$$c^)e5xV0*=%GQ!urUZVJvP?fWO`7^ z#4BLx#P|eEpsB~BAA)X0Fx=!F53GU)MXoqi3`A97)%)%s7A)kdXJj=!FgKWez@WK` zriTGI9k=wLym4zj1rI_>4T)z2Z;>4KDmiGGRKsv(Nm~Pdg!>-*B_!XIgz-)nfxGTB+q+H?Ng~9n5GZt$uO3Y&>R-ai#Yxq6L=t z!kD@&g4%Q-*AzKu&-jVssMhfCqty5!wDd=x~S~+=mkbf)m}( zCSE*%*ZV`LNuoky4@t@6xJGRKFv_;*{r#xchf8oGjV%*eI%`P z${L$bY64o2^cg1ShdRRy1zmG+->vq*YV`nJsW)C`FDl1zs|Ohxs`jgg)Wa39OE5l! zhpI_+kfEWKI08dCz9|NV-a&vZT8=O@^bYnynCNWhT&Uyfgc@HHDH?gBn8A}|tw`@s zkEKyE{a{J09F(mIL^I<2I(d~;T2HZotN$Br zQDVt2Lmq^n*H9$L_La!K+KSgNTsc2=vXwUXGgGHewzda_MGaowEWr<&J5cuzSL>C@ z)(C|3##~vK5g)Ocn+*yx&%e0wtU+7EkxK8Wal@t&La|WR!YC-3S8(C?-t+lB89qFf zrDj0{DF^|OSo7X{UwG~XjQbG!(o+N!L`=K$A3eWLA!Gdq2#N$}2)Ietxd|)3`$hbo zyKuf+|57*c$}`=>7cQSScJ)7aM=$5u^DTE_>X7~uw9jXrfo-`|g?kjne5e~6qkvha zws^g024gJqzjF>!(&HH6`Uv081zV@jYJHsG34qqn^{_YrpD!n|+fO(A}}y8%{^voOs2s9qF@dJ-T& z*Q?Z;vu{Txp(uz|-Y6>n0WUC=61xDBH2^DAgm*)zt49f7Lk+$Moo1P^)fEK7!Bn0k z4HfZCQWW7#c~{Ugbw$|I``9?Ps2L1NF%MzObjFwI|ImghQ=H4kNMDZ$JIk+VX zGH|-99ed;($;82K8qeQ|CrHStPcX!W661iapGU9jdwOqw3xVgpb8-2<4gwF;Iiy99 zSQv+}{oOYVIl-0}!-tU#drvVjEI~e`{jqD>r$rJZUm+F-tRZIf0hSx7QKewANka~e z3#pGA0w1JALbP!QOTS}-se?cU>G?j4WEjK{k@g}#rWx9YlKxIsjgokw&NdL*9f-Q` zQ{!MckQ+M#U^xkz<3Qr!E(}P3J&=4zl{?@>P!n)0fFR#-KWw!o#H0nXd^-tqRg4UR z1n)goV-#V9+ZWO>jpb~-Q$h_9Mris4d3}KIS03DoU|$}lP|Rr0shr%y29Uq;Shx0A zFkizGBfe;M!!Oq?gk$Us{5HAx8_J=9-`xw9FHRr~DjE!sY5NnX#$kW5>kj&Z? zdU?3rrw-#jL3S$a_z@cDItSDd)Ih~d%RVh~e^BfNRZD>NEY%zSH8}p@oDiW*FR;QGwvwUQ zU;G-VDKte89&utcHDY&g(xqCg6c(y9lM~_yjAYzGh>mRagr$MC&p=A;mfe zw<#||^t)Lx%$4Gr;itci_kYz8@^!daMd(C0gRrd}l;>cem<6&o z>(hECYfOrNT7Qy_|BT=<0U zC2rQN+vFmg75*yvu2$_%9A2!%nuVZG zJfSr`O$e*Ta}qun58f&SQ>sJ&|BXrxqYiS?L0<<(w@#Km97gekI3NcdjS)O)m0Ixh zYl!68wq_xIKU1;ct%2Soy3_|ZB6tQ6F3LzA=tzjhNw3+RVWdXL;Mbs)fd!bD9>k!_ z6MQTSDg6m}$!xZR$%%Sh3hXwnvf@vcTpj>wK=*^zl{G!gkHWi?5J3T~=Ef(2KKO;= zJeH?M$b9fEV`4Rkp|8&c<6x-U87;(vL>Ps<7bUl)1aU&0ogxD8*d9Y`VT~|sjwBJ{ z0^9UPW_p|0XtPt=p!99p#Nh?s)%CC2QJ*Rse>RMRZ=u#67M&Ya>@`AKvuzGUPCxaC zD`QMxd^SXt6z}cG>qm&A>`ut*9uvn>fX{W)?Y((gNf(HN-^;E(Oz_(TqPDsBkRQ|m zx$pkEiLaj%TD1hqW&P1BKWyUTz6&+Mm)?h4G>8Zo?D=91VN#U_&+Hk6;0Mt_e~dsh zh4eKXt}xL``YXtN%gpcv$8p``&LP4;=CNM)y6zl=cf5&nsmJ}9&A9COE2Mtn}gCfHy$^J^i%f^X>wOIEXUEN zic1wvn(!N6lOq^H`y>369|e$zBS;+Lp8qOyA0hZS!8L+S*dRZ}$27qw0U~d{S#7|MeuX)tw2*%=Nix+qGeaFLfrX!? zmq;`q8Tc=>NR+9Oh+l#pT>u6z2 zTd7|h9vx1V-|xg9rz9Rb0(CCbvR-{yhBLSAl*sG zzA%V^;DIErx#%ScQ&cka0uo>k=apR=MTHnrhe|>NO2Jn%1P#oDMM5t@?r@mENXBTy z5~FKEO;l~QQ>^bGz@wJhwcvtG;!sv+q#|SxF8YMp4U_GjUMqgZjSqnJY^OGa`AC8| zdpzwdvyklU5N$1^_J$q_Fzbt~zJF%RLQrJ&a0wCIDqnW2wXS?A8woC2L@Mg)qW+sC z%+C;K*qM+(Bl30E|7|49p5f|QH0WA+$B;DVVn`KRqfeLppcgoq;~|@zZr;kZJceT4 zwa@5B@IvEim}g58Q2hk*uotTE7$I=*-F)(JJf;lkE{Ae)HY|!QCQ378%=kWA&cdVX zlmK9-p(Uy1=AO(^`^5eMfW9H3rwD9#M)^}mPu_LTU@#kA2QfIe;s~{b4F~=3YbM8x zmr9`#qCu@2OF85BEW?#YuIzerJR75owBAlak?gZ6QyM=Z*Mu-z?v1i6;LbAW` z?;yH6wUy~E*g>vXYtKxxw@vw+qq)w{&GD=oPUq!XLyz%@AA+qbGz(DqnEE>sA^rC; zhQE;jeG!G51V~;~=wa5|P@sF%J!*Q!$wbN{dw$cbZs?4#`tL#Mn;v2r#8xdP-c7@= zI3}_2DTm$N^`4?`;ET`XuHbvcGJ>tz)R-0+amij=eZ8?U$Q%5m>#`a*B;!eDY|0WM zLHv6T?i>q5=AgZjR#%@qR+Gl)?yilP4xfzXGinI}TV1?G_p8?|*G3pU9_ zR=<-O-%cRf+%K`F=x;GuTq9(Vei-HLGfa_a7+dS-k&5i{{dB{)NxNVDo9Nts7!_%8 zr86uGDFcUsgox1cI_B)McOeSnAgrlyoBl)8FOj?SenUv~;S$>L8z?ny(@iSCGk3Iq zw&4yl&QfFZgi9(Gz^6`0q7&nP9RXd4XQ9kt?|0joozFIE2uhbgT2;sB)?%$8 z*Z~p4A#~#hFou38Uj2|&pzw)h^9tpbRa^gGIM3jEYh>W!%7T(W{la??o(DR@U_hvE zpe5U2!Q^2K$Rc63GYBgz@rYtr51Kk3TD+NUgzXT`uQdglQ0wNx1`;pPp=G zuU>dQ_in%UoKHM0Y;mR4r-A~`-_oCgT6pm5O^~7PGnjGh(5UfF>W-PePn%qTOB?p|Xc)Q*1@-?#MWiP+NNAYHVjj8V-01N2qA3_ymlV4<> z?0MPlm-$#HAQRdr=CbArB%YF*`nfmi(VVB4X! zXFw#10T7Elc!EUXA~Z)7lLH_V`cr7xLf9Iux|ORu@%^r^pSxCKbVct+l99kAWP{60 z#|u@Cjzg6L8%Y9nKoTJM>#Oj+noSSYz-;=Cr#wU_)5IQQ>i!yMc}(@bHlYinzb6M+ zx0J|+hjoZ{BfOW98uol@6-{UJxvI2G2-O_^N2FO2IiDGwGK z`$O&=4$*?p@GRQHp)=nHUws;XeZt^zl%He-gad&>_|7!Xc!B(gSR<{W7QwkN1PfFa z@dPF+Bn7L=z>FMFGMw@tCeg^;m_*tic$17?nq0M)zPAorUV~v5|FMORN%#llUqBR^ z3lOKVa#QOx&$0r8fvUj@Y>i8^J`cNtqfQQYBP;`nu#CK$)$%P9*+VTRO5wI?KKo@WMyey&jf}{IQf_Y}dW^{!6jQ+38I>IbxGwcxczmxd{_=hI?753=p zAHz-ZdUkI-cQ+?%PM(hvg@S693I&Nfjhv=0VSHS*;5y9H#<`qX(ltCxiEtzTghL`! zjpj1AY0Sod_@Uqz#m(K&x=-_!uK*ys*ic1ZgcxWx<>D;dI(m*{{1KMBSDt;&7^~xK z!wnqu8;cF?MjGcuxXE>jAWgu1F8?XS{Cf~te_7-K77P+lf-q+A5u|$k;UBl?YN@N2 z5m}-*&Qz2eH}xp%)6l1@1iSe7=LF9XOcVSH!50XAkKhjozDdAvK?(hmSRe5W8%s3p zC87<9gd-G87X2PNxk)UOXU*}or>G?PR{%VZrfnikMvUC_Je$7}=d6=)c@#VCW{2W# ob|8ChXm>i5?oXy1JM(0EFgcj)PakrCY@>%KcBglyC(;l6e;eC+1poj5 literal 0 HcmV?d00001 diff --git a/src/目标识别代码/utils/__pycache__/plots.cpython-39.pyc b/src/目标识别代码/utils/__pycache__/plots.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..310f24b79e51bb888b95bbad29d00b0166a87a68 GIT binary patch literal 20090 zcmbV!3vgW5dER~P6N@EY1VQkj6-h~4iG)ayl1PRUNs$6ATY_kkj;)RC&0^02TyP)Y zxfdj`z3V6joybY(Fi9tgqsUz-jvd-5F`~I^F5R^Qb$ztw*&poeu?m7SY-v_1s{F-ga2@p__=khD`v7Lw8?RY=J_UGFPo zx@G-^tmJJe49I=3FeLZk!Z7af+DLtCVQYP~Fk0VM*d}cfwXyp4!uI-jVZ6Shu%o`S zu(Q6au&chiu)F?1;emRtkgGpfc(A^wu&2Jau(!Uiu&@45;UQ_8tUX+Rr0|GsRrXh$ zPF|%{devF63y)S(^N*=_RV|fK{Z;!@_Q!41Np%jW>?%^{Ewx1r+_codic>gPIk;jK zCMx4nYWSvIcpNoG)K=8kiW-O1XeFk$t=K3ZQ`=E~SUs{0JgkEs1OZH(jz^@4g#9YFb$>Y$oH`BUm~bqM!k>P2-}O`_y@Wp8Dt zIIq9x(}ssbzGf5yYEud>ZE!asgvp@bxNH^>S=XB zol(ys^$cpCMW5%?pgM-X=g@9Sokz(j zm75F8yk95MPf( z3&nD?sg)mPtBtZLC@x?yQL^e6OMbapjgr9?T`8%Giqe5zE>@Pxm4zTmEiK=;GP$(8 zw9MqvGLkp=35nTi(5U!+7{hqaN;}>}{1kbOwDr`y6_B%_>>};JisY%0hqMbbh1|pPD*y?8q_RkDojK6z@+RIy}Yu zvy+F<72;>7&P^UUT!JaJaWbL6SBPrdC^mD=nb>#t6tkIIQxzVXj~=}-RYTVMP2pa1$V z{P5c9Z+!jrUwq|jumAI}t^DXWe(49k_S)xOS;nkif8yYi6O*q9Uca&cH1{VKmZS8^ zTGbEA&HBPK523HW#AUVinpK)87ORbFP%KWItToG}n*R(Ez1qP0#6qc734%)G)+&w2 zZsZ-k2h9r}2JGv-Na_a(_5no6UT4hmISH2IIU{y!%Y84KDiaZUKMx6BXL0$H03lEy zutU%Rq?}P}yVZ6>M^A*VvbS4+jzD+2HS1o&v!KZuacqr9rwQ_|egwD3tu_LkXND&u zkL+sX&dh0@L!S8n`HJ2LU=C`WIQ1=VuB5?pC2dBZ*Hw!V0pdyOCOCjpX9GLiNiTXNSZBa~%C zNbturY2R5~{yzi=F#jud8?a-qT6!e38p*%``nfx-w$;i7UMJQqI~v4Ux8bogv{#+? zgHCx?kXZh-jTN%3*6tw58n(q&rnadUw_Y}NQ_D9^-5{-i9yanDyU|DAa;KZ`buz0! zkeB7UWF2A+=Mb<0tM5#=`Y%*#wdRdyo<25lbmCCHUlaYJcx4HD*VmkZ$m2w63P4~Q zda)8LFJK${fsRto3D>AL8#=?{)MXS2r_=p-)?DT&Q)`w~QB`J3i?slFHO7+d1Ty98 z^B!dlGh3+Egt-wHd{(xcq&;docFxWKGM$k>J%zF|E8P2w;o&qcU&vjL-wZ487w{Pu zf=OESk>jYXs$XT1im6c%Xvld6)e!hnLUE&Qi&AGAjV5rWsi*Iz&gg2XHW37~Q;}Pm zDVI4Ry&va^oA4|y|1E&fp10<0f8gCODhg^AEAx?FbsLxC6C zL2N#@;^-5h4P=RR;#g!*tYRWac2cO7kf(Ie7bf(Fl+$oRFLW1P4KhK0kY)M&7Dy^U zFy)N^T@?CK@moN;;y?p^v8T{k_{DC0dw#H6J0Z2*71kUQgh)=e-Yb$x4&R+CL&T`g zEjAbZiQJimh1zniu`mJYVLiK~b!mBjwVuxf&D{E2?a$|`jfKS^SEZ=qPqYTj)V;4! zEjLw#Qz!&y?wjxZ{O*tb@6}UlL>~Pdz}(#r&wnBKi7Th_!;z;-K?%~%`r9-KROA9b zqIA`-Hn5u;<%;HlN2yuetjlmBr&`xsi^vo4%2N=s1XUupURsJ`y1Cd;z~9BX!9t=~ z6!Wi?fYnazMm{OzU0+~!x>jveiZ`kXqEm|EPLa}%LE#r!AyHW>)j`7wsYbJ5B&++- zTIdfkRcN|p57=q^#qFJT7FeA7cG`{Gt)cbFTj!&Zb;n(M747r~djJ{VX1dq|Wqy0_ z$4Ml85*+*_2oo^F^0VN7km>#Zh$n|+26teOvTwPz1zz|SJQL#}Nxb5;V{PDzaK-qT zrQEAA;A^*?0+$5-&1{qtx1=s`4`ng18fP2p{D8^lQzP4}Z%SwRZts?ZceCFFN=?7N z*{e5Z_4=K4LSj0`r((A}+q!Kx{tXDCv{NaVXks8Ltv)uK9u)| zV5gFs*kOfzDs{`_cXekpxjv%_nNe_aGhboiwyi%O-cydR&-R`%s>C)6V@d_7&OKvN z9An=q#)CWee;V^-`b=Pao@uYJf}8q^v#)zb>4h9Qzbv`Ylj;k$glUd%1pJG+?5zY88*VitjfmIGukQ99psVJo@We}7(G<-ixge)ZHDgk$Vv;T0XVN|dsuM?k4%ehT244E8a8w7-yBkl;*zs?-hBs)@dhfC>o2U(Ea-HK*TE$93 z{qVx9eWy*J||e$0P-<26VB4IS=Jerln6F|nbclHeoy|pXH@G8Ri=E= z`*ASq19}Dz-2oZG=CvVp83HJxzad;PZeeZ<88kjXxg<1#p5C;PR}Y{otpFe^i|A5k zhI~P9VOYWeXDcfOCuw zS%rY2Ka32hR!(CfN|b!zeu9`rliEC?QCwGi2w-f}o6~`fH=DwIYa+?YH2vZxQkrc64p9eUH%O~4PI_duO zp0t;aN&Z?6d95vavEe_iDgpN9<4`#^p;cb-H)4>~QvsQ!rIOpNj<;%sut|WZyI>D7 zpm!W(-wopPMnH2_dWVJfkSkT@mJ6ca*Up5QPI}d9XHcsTawa5AGCi;kcf~$ueQV*$8Nb~=WbL%!5caB zQK?Do#(nR8v9wgJFV=I7#rjM|=bE#*Su>hky|f@BqGiIosC2zjR8`&U;T)`CWr*#| z9Ji0coI!A3Y8T??1fv9%A^LHYkzI`J31Jtl{$lqMHZd`g12cMDSJ6=+ z*_+!|W7X(Nzo8sBebk7xud=6~E<8G!~Yp^W*xv(GEjis8s{hZ|d?Du#zi_ zjjP~G3yTnOs||?AwIE9Ri!)-miM%>3e_pu)!8xWe8MSCjudrBB^NT*Z(qMfUb+zta zY2Jtu{~|C1tvjH{RwdkD~xI$kDE9==bmDRi_0g=5!Z-Sw?Agpoi_Nkn9HDE+)RSo$BZZf)qxZld;lUS z<^}c?+P&h2yK}?NEW#|Qqa>uwfY+w=?uj|Wd}mJH8QLQ>BVwh%C%o6N;vtj)MYV>8 z*x2;fQ4PQUc?!19=8Y-5BQ`^lScsPzCCs5rNj?U-ZZQCLh@4qJiosf6(FM1$e67&m zXzF^YRy4&h$YD0_75cJf#6@(zQ1bnMgi>Q81==R<*5jLti@opKu*q$%0ABv@&_&)Y zWZ)QBSf22!a~P+2>%V46Qon~I__qYVO~Cq^Mf%GCkv$XHC9}}~o|&=(w{3pQ`Y`UG zhFI313cH!ukgb0YIh&*&T2@GM&ax^c#5s!wO`5{MI^T`u1p5tL8rZnWkazU9^C2vT z(O;n}!P+wC1YQ86SqBRb<_z2Kiv4lxO*@SJ7_2K`t+{Jp&M3uGPnJqXsS0RSMVEjP zKaCgb&jLj8(!xRoHUp0aZ9T&LSD7CZkqc(XIG6wg{wNWYG&KexNs$fb1*^dw)R+Z% zu66C)d)a}^(ST`J44OsL@+}l~w*@8mG5dh6{}6e1(_LH2{KDMYtC$~s7`dBFP`i-9 ziId&&tAL!az@CSSs&^XNL`kL-gH*yYyIXb?@(<{iX`2fGT0L-nwBJ$HYTZAS5ac>hhfTv$_%wt z7V`=gGb6H4{HmjWlS=-zETp7#h+f7m(+d=FJdMf|=xDaRCG78P!JdPi4wSJ!><|0G zE%-~(#vNwymu5;qlK-eP>z=f(eJYH-X6=A&6@8^w9PHeIZ~$M3_vtH;vC<)AtPy+z zBtP?napNf+40eW~C=Uy5feZ*AQ#wq|I>sEoO7%a(KDWW%K4K~P+V)^Df;L;jLDffN z1MV5ni2hr#_E0byW~g)U>u=j1{2NG!fBV-C&2PguZ86_vzH`pQ*JUvSTd1*!VPao6 zG(Q$*)c{m;IA{SeA25z=$9BjN{pZKgW>CBUaMX&FxP#6SWs6MwCMYI03g=A0r~(s3 z<|<%&IM^8uck>f!1T%_U^tP)#vYsz_o!x3H=4@*?JTE2nl>gswq?1^+)aX?kPk$PYfVL;*oEWxkm_81qK4~G(QL%1b%H%bMx~nKX zgag3Tkxgy0PrbeE7Lx}Y9GTzMd02P;ZQ=JUJA!ww=g0=HP$WP{<`;u+un(lc`)1_?hJRR zanAc~_u2$7{Y~WW>Ffp12SF08bH}|}r`Y*^3wv5u03d?K6#0l4Pd5=v2gM$)Be?u= z0Qi80gk-{5$UW5c_&^~SrBP3!PS3?devZDKMwdL1GU+D)A|@UNXzivTS?rRa4gX%x zwKMnkMt<@9|7zVlrJqLa%TSa*z@{$~h?S+llqit4QTFGNv9=!|@2-87g@4oouokRq z(QDM_#pp7Zf8|HN^jG`;;#8Nne&tkT=UVQ>;aPAmnhEmz^fW8~BtR6uTxs}CEfhS8 z&BFf>2q71$Ez*t%G)DM&gDt$-#ahjncVdfh^c~Y*VRi~qCj=!5xIfF@o69n58EM;xwQiC1K4Lb95mish+s+%=r4F>`BZIG(<%4debje%SKX`w{X$* z7o=}rjAi8OQ3@ir@pr)3RE5~mT*8YI5c3VC8_^7VgaU3*@*oBl8CiA$gZ0%30d zgDK%~efMC_zd)|904z5H=0+#ZkVxK;#ohfPb;s>Jki_@7Ly*F=_}k--*yE_R2Y+#x ztg>K<sbK1L)(@n?uqJ!ZuoEFAn*_YDQ-gPj&%|$&N8Hu z%Il_5qL9ZraMeLgpgADc=>rRsdh_t+&^SPAsTdnfd6sR2G^7n_fR94obtVo*&I=cT z`~ij^#VQNU@|9Nng^N^J*C?n#Z2Sq_@>?{CgJ35D0fNec{(ZK{ER-PW7w6y%S@8AG zFw5goMe!wLpNub?yEmt+kbIVE)q3PD1L{w+KSA%$AeE0tspdjZt#eC6DIeM@y|5ZC zM2^YV6i)@y3{nby2oU@j6#S^^;(EisMtz}SmNR5~z!5t9jMF5Er!=1Q0JP;Y>aG8{ zq7;74;_``+0laStGUGg=e{MMtcxcSQp9lqRkA<~27#`Fy@dlVWF+KqkXy)V3Bm=@oL-GqRc<7#mDJV9s1c)58Fqi<^2--gav~14KDmiGGRKsvtNm~RhVOti;`4ql(!%_+I`Nzy4xC~1I;&$eQ zTKy0*sviI@2#to-(3^n{2-%cAF+i9NVu&It)mmwzp^523_7CI>6CnVO@$O#dz^`HC zo7#t!)CT4wZbztWl%UragTx3(e~(NWxtW-zUnlq?K$P|?0la1lFxD8nB8`AaRmhVF z&rW$Ma>~p4SJ>uP3HGi7!Jv#_5T>Mvw`DN`<7b0qb$1P4&5JWI)NE{Ni8(?WgqWwS{F0PPE3*SBO z3NBpUJJ|jz{W_4P`waUapWkq4H3a%dt4<#`d^eqeM240`scWb4ez7CogA4=Ht-h|x zv5pXVua#QLb#q%OSip=<(&`si%lc#1m{oeuB2-|3KZc1Aa9Iyxvb3PrTgbfYP8^wQ z#d425elQ0tIMP&Q;Aj1SZxDO)+1S7hiO{{nTulI*glO%=YgoNZtTqCxA z7-gF@{(e;J!zCE;rzj0Aoiz+%-fqAOLnhIN0~PD+>0`7L8||NX0k&F)2xK z%bOAq4p<>*Kkmd;inds=man#xK?0aUm;$Dy--O-;a^lUW+awLvqNRU4NObzb)O?10 zuQEg+0jn`w0Bu+v^q-(r2Kbo{vZ`PDoNV{O`q2jr+yWAnVhW_lVT^+zQ_SfMK###) zaO;<0IUYpbATSaxM_8{BQh>Z6l?XG2ShB{(lNyGuBYlR+`JoO;)Yer8cUYfcn;N6D z^x7-zMQ!J3F~;#X!RU5yH|@YT?v(NDQoGdyw-6YMnj;JZy#-7U6P>O6Vg`h&p;eKM zkvFRLsfWlkk=~{rR*!VkV`@LY0lC{%-lU-CJgOd(zFc(xx?VEap$>*g$mJ7nfW?9& zJM%l?xI`!^39k^=!_!l;2L$7R;1p#I88sW>0G^R8`-7XJmG*;0wfrbIUOurwflVhA z*w@IBq`_K>ja~iUaElU)eg#q=#JPq7*@gMQz7W|LTk-mt3#TWZXr;~l#KiF@T3dtS zf(FlSmf`2j?XUZfSL<{6)(C|0#%x7b5dW}|n+=N7FJ4-I)(GsiG>C~A1I$n<`bmw$ zHH~12s}(J*fnybG+Z+=3y?naRC*KCgWVu-s`3bxqf@;Bg`K1?L!iOAWuM-56KTNyR zKYBVJU!!!f_9Fx(f)fPXpKIKI`S{%Ld>OwN&YbSHIoD0R`dl~h#q+0)IsFgc)ysMQ zbj!UuaZvvWdMIR`gGsqug+~-Nd}tc$BY`!hws56n24oEKzkLb<(vukF+6YH*5%Z*Xld_(%F}CXD;NxSz3KCwZQ|SX@`dKA7;IBD!kO}}pl9leu&4L3acn^| z)RAHy0+Q*3uh93Q4O6B#SBR0qP6|6Npu*fFBBBs>`j0t6Dis>>Kxfxk zw#*@YAN~udXMs(7_#1adL8!+e^Ls+7ciJOh=Vah?R6F*_w~~qRZW_9F)Bem zr0ub5)~7WRL|-8W2FxL5^Z}NG+{Hoe5|BaTLg>Yj$OsN`7TUPeHE4sWgJcHj`96$d z02%-S(L{DkGkgyv{hh2DCV4}hEg-ibxL_=!U^tK&I|E=i37X+R^x-B9NRmA;0Z5eF z;5<;<;7~y9{Vn%nR%={LS>tNQ%_PiJF|rJj#P?f`5d;oyy=^apUGvWb3YuO)es}Wi z%7ZfztjojXiScYo?UMWT2GG87pj&$^7%}x$M7!yZY&*EF?Z()+5)ZKk*^xXwtR`gE9!D<^xBJu~+{ejYg*`t)vs`DtI?OugqlQa$ zaR8XOjssl#NtvTAHVDHyGTj=!fPE+@5R7I{Z)YoQ(i0HJ;5LN6af3I3Ual+t#t`$% z*aDzv2{5F^dc(gAhaa30B9iH=+&p90MuupA`I{i8&=Ns(#Cg%wh}XdxmTGCPc(qFN zI3W(VNX89><;XU_dc$vxnjwLoXkDQPq*UkdHl#%ee%I#==cKS^IPP!Z{a-hfd=0Ku z5jhdYAPlUGTI)Y*xa76NxO_^45-I;MBjsuLpNGhXdBzS`ICOwWOcA|^ncdiVSKj=# zEFNQ z=HUm0)kVZ$F%#)O=R1x7K>HFpD{rP6blAk#GEoQK%TFz_CeccamTO}>n5tY`y z0V1rwm(_0p06|gF636_8AeHwbH)t-vNlgX}H%w&~hKd=WezQKMcd%VveAN0=Z2V^g z2MM@(2%&;1aSpMhQ$RaJRrw8IW~r(1BQiaVj##J2P_T`sa{qd83FL8?Z;+Jz&U5Hw{MpFqrW; z%yDsCIp|lTh(Q~-$6>rXfcpp>>@embiaTlV1Y=Lz`WoMH0${4ljobTeL1LXTe*wik z4{sZC&ui8~jwUv4VT3r_WLxk!xd>;4SC{^)6}uCMCo8dPA?OoNXiaZZGGGuWwDZt| z#|pueDiOekqmsj@gB%1_cVK+$Wa-CYBu|KAaM0Em#gkU4tDe4%Xr3*r79#aCb2dCS z(5gh!`p9||&j7+j8O;L?3K2Q!Rl75c)CigUDl|1P1QXYTm~(l8pJmb3pMXk zsOvFyDXy~OQZ+!m6sN=gDcMFX#$M zVnvx=O1f0kLlLGW_~RH%&o@iTl( z6MPCF^5&b+(6^YVN>W$IKbRnyVw{Vi{*}Oj&(K38+K&wUpRj$Nwe9--wyF&}EAzrB z!D$AGx+1pz5Juu3LZ;LV435#=P|Rq2=8n zI*J|-nvYkmH*41`6J`JUre)-pk-(nCCfIo*CE@&vO9BfMlRfXj zya}@vo;(Q<@K7%nB6Fa}#Xa2xjdOem4+aKeIUhmfg3vXhX2p(laRffTN~=p_jQRWkGf5?&7{ zkzGp5LIkNtC7}hS;Hw#ePG&+Pp`9RiI80z9V<7(Gn8j$vtG3!9)^`xzQA_Q->ViD( z!5c^Bg#5uhpHRDCw%y%p#jm*b5wM)C>H*Azhki#f2AZ!p6Uojt(bzI-Pw0{Gvc5>{ z`=&Q72E|qn*Cj5-W*M_%t#)Nh*+_Tb`bFePbwPh8Df4s0VA#9ikE2D9#Q5OZ@Yju1=OZ_p3FCO>98R0@qC&*GeO5nDmO#K$-+I*Smq zwxXU9<<1~m$VU{1F$3YQ`U)%J*#QI-BWLQLXRWxg zy68JB`!oS53Jx=;&5Fn{-ou*S)ylGOgfgKVVkn{V64_TH`+DTm7Yz{=N-A{1$YZ9X z&)~hH>+p0OLl)y82qvg;6rPU}JPkqa8Wq_qjU1LCtnsJN?(T4pOmw4Sg-#G1d%r9a zeYk{9kiEU55!?|IHx1Ut^GV+rf*G;zJlG=;eHA(|b;oJy2ED+^H=INI5KhG5G+HOQ zN}749zEYDbO=G*Fn}8h(I?>}seoo>n7)OI zT*SN6Fnq7q3Z)k526{vIO_&ygI6{d?N9Ote6=)C#wdj@&f|Afw-nG13>%8B(HUo>u z5G*2-w$<*(fy_O@Foe2c3Beu-`?0%03u!xf86#`=f4KkJjJY4eLCb41pbA?d6mAuP z%mtNj)aVWbjle)oO5qqvA+%xpw9$8DN8w*PboXW}(_NxRxkjzs)6L$-qv=$TZhT&IylGyhYzwYjQe@Qp+`6qH0 z@bO|A!A@;z%n|IjC8%>s%SVsE{NN@VrBS@S&vqQLzMD~ZY$la)3y z2FZuf*FMJ-X@)VgegUb-uG~*HjEA)Q!Cyk>K39&$R(is+5Hga`-$+9kJTd0%w0F7# z_5=*6@R}dLId)S@{c&1_joahuEIxz*~7>Gd} zXPI#vRwCdoVB;dH@V4Db8&4QEX6lv$;R0I<;s8!n7!MTuVX65vdxyqGj~K*Z{TkRa z^tBZpiiOq&0?=(|wm;h_7}*S@DRqqAE!N7w{D+_oAs#=1G4w;j>W6TWTCt5`lY+{M zt-k>u8vJgJ3_M*~L>)p5!Fv#kH$MQ4EJf{2A@&eHVd`WgC)v&*Os~in?I{a=aEK{2 z3?es*xWg>OkO9mQjJV849N2+tUqUM(6W6{3vU3t-XB1=y$kZML*~talpvBRDH5v|* z=uo8S5}m;h4_-S7qJz5-os%FsTmjJnu{PVy+^cOMJTw7=@Cd;f?-HD+u%3E@;K=sK zCkpZBCH|;2IHfC<+$Goq5ssAGfBxBgD|_+Gi@Eptz2`#WS>cDvtv(eLaoCnl6x70l zUvGjMb)Uh7YX@&Mbxq}_E?jE$&C(p>8~>TDA0^U%4?y*g@V`ZbXcQ8rit#{cp#(xP zK7nejBw|i5AU_X*h8#+l0H*ZoEOWjq*BZqUIvyK>9Sc6RCil-~kpYJ(-f4@woDbRO zvWs8kqd2j@$<%ifoaXC3!IY>Vvc1nTPjYwGPeE< zTDB0YMsx1mMIPsV?_Zp{Tux#Ly&s810++}VTxNP(sB3f_>KfQg5}gCG0BK)egj?2Z zeCPva<985rfpBD6*JDiGU*jatq~6yibYbuJe{Qy?H3 zdJ4A=0+*S>reCo?>PQ;Lz`D69lgsyoas3tL!8~Js)SbnNSr8kZ=Y2SI=KCP4q)EDj z&-0W%NC}7sqJ!|AX&&kV3F>Dr(i*xD{0c)bH)Rnlu!V-4Fa;CJz_gr5GTibpOrl|< zm_!;Jcr*L8o2G5Gmw#^^w!93>F8&D%y^(MV%KvvLHm^dC#>!2s(H6@Ji~y^>`*Q=EqCY*<=OO(PHJ#$>hggJV(CYUNkNV8oIGDVge#p+k- zNIb&~O8rJmjxqHT!7l*h`w(r0|2$-LZZbr8NlS97sf5@Nyb0eBxzC=zDAkR9(<^Ii z9g(B&!}g3EEP^6ZL@?vG(A#=!SrRyYRIoT>pA%hr$iXR;&!bLHi?H=u9FeR4B}exq z0umsc@z4?O)B3+M>oBvNjaWm}{~qQO;Qy59SJ|VZe;PN*>lwN6+})h4nLiyRibd5d z7mE^X8aYj0V)wXe!4=q|jYm1NsB3tb5V1!7!-j;V8kJ>m!*Y#(*hBd*ikrKkb)V%c zUj;yRsi8`~h%ZoSDy13tbM!37_>(MmFFgN(u}sI=h8sBQH>MgIi!=_1aFgp4fk-sm z=kjkr%>Vw7^_N8+V8I{(Wd~#P9zm+tAO0VUu9mwB8IdK5;}Auqab1tHzF76D9N11i z{yD)E!FhsTBlsf0?-BeV!QT)tMo{9tB*aJD!N%-NLy4$CBHIWBlSRLWPHqs(25ZEBhE=D1IcLwL^3-Rce4Z8Q$xGbsdRrb<=C00(}T&u WWPkdg17sV0e0*1WM|wQ{!2bsl3U22B literal 0 HcmV?d00001 diff --git a/src/目标识别代码/utils/__pycache__/torch_utils.cpython-36.pyc b/src/目标识别代码/utils/__pycache__/torch_utils.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4cc6132e331ff28f6c8bdf3d31b89027a9a4fa77 GIT binary patch literal 16698 zcma)j3y>VgdEU(I?DO{a4u``%@Hmb@5V$3AhdTfSMFG)g~)+IJ9eyuqMfATXI!$PB(_{i6i0TsDv1@l@~CnqE|n6AML}^z zaw@SLmMhBl{j-~W4#4Gw1B{PJHdwl5jRH;rw-Eb>p`@{X-isCrRNs7aJO=BC_-)b7ujYLD9cs*Q0SR{PWxU}n?}^&Yh!Fth4mbpRzt z)Jy81I)surHLda}nOBdh$8bNY4yzg5kGUhY<9P3^Is&N2)ts6~=?V2Abxa*c$rI{v zbpj>tRR#5=I*F1e)rZw-bq2YUO9uXyI;)<-)BDuR>Vmq6+$r^ndQSZqa;Mct)Md4R z+?i($^}M?Bs-doIT7|QC`k=asr>l5+N?F&8e6cf>xw3w}rOV6Cs$Z>pnR~>ag}fQ& z%B`mFuKA5pvs7_4N>a*o+vQe!9eMjo$zR5O{Cw5(b#-yoch&ilU%FD#rFz}1=i_1G z((}(gd*Nz0SYCF^E5%!`_NuLBm@2pGEsaKDdZ}7>i>lKZx^=6nT;(YDR<-OF z{q?r%w3eJhn?IZ$Bi6mVVPrmKVE1sAUQKP7fr)jDE%brehz)k)^2U(dF?7Z^YG%_$ zYHb>UvB@UpLOvG8eJ*{Nt2UQh?KUy=MoYP2ve_zDbV*@M)>0E`THuQF#V3Ew_&-mh z(+ekG{`SB8&2N4Ax8D5iUwZ3Tf9l>p_?@?2|NP5uzW$XrH~-eRfAc5byz`G=UdKA$ zdVJ=@?9rEHYhRA~ko;_Wo!eP%X{`Psj+8?-%+5itQPO_#{_)J7s<+Cex_4%_*FzSlO7K#QiNk@OR$=666_N-Wepm;hQzks!dBz z__3ND;8fqm65<|PvGq|uUP~yHzHX@a8?jB(Gzhh6Tm#L+LMIkFiTT<2x!E}^p8N1XHIwfO6nw<>K#bJ zfnpE6i^b4tHpAg(bj$NDG<}WZatT?TAYAfxNjC-8^d2_mqD6@;*)CbK2b<0S$jczX znweQEWsaEI!P9*U#_naozytaVrYse^8_OErZa=0#v_4^Wmoko}e6Nos^WzoMj{`1o z*S=6Z?)ZSY0wHst-5ICl$@1j>vj|M-YQPxmAYH*y7!K}_t=w<3o^_D z8De3kOD?L)3kO@PetVTfh!iBudVUEV@D#R}f$00OtomUj`NY<~>%AxlbLTwJARkk| zpmj@!F^o+Q5Y!CgJkIq8_c^RAAKXriC=V2_mCXlg7)|Mr_0O6M-FAfG7lZEm=zi=1NSz z1P8Yh;`uwdNXDk%`bGJ2RE(PO#NOJSBY1R zz&?+6Z^Q*UzLL=22;#Mz^l5Hz9EnMz+wa$-9;Ku^h9GguXmVsj0ot6iEg2=9TOQju2MM{AueZ4osKog*F*Z-U_YRxzJpDV7uSWoT`IK0yEoZF0(gw zoiI8xpef6xwp%Pmqo}&N7$M;nz1?bw;o6JUl6UV1_~mzngMwKe5x&uC>POLMX!AW` za=GM{d|%6?!d$b|a8aPo()-P7pLJAj1L@_?B!v*swS zj5%TE@aJ<5c~DYw*woMR?Piz}{#Mkb=1Ly{$QW=4jWRCp1QHyw_)1cbk(UQX1)}j? zuo`8&2r(0N)`|sIggRwy)d~9%#OmT)L`de)9f%5VQMnb`O7?ECq%{yBZ|e86@&N|1 zH)7!g?aExmj;mY6r78rfm(U=hC9xFdm@reO{s7820r_!8gSdo@EFr_uZW=1HlJG`- z@CtH>_4js5P0Z(@0y$Cf4KP&SuEjCSg#43t?OLLi+{8gf`+-T3Xl%nzuVnN`yMlN6 zn((59n)P*Oi#fEKjx-z?m6GzFM2R&DxW##>#4+*yXm7{>H!lNhaVkirCo zBnWYFuU)VDVfw;a*=>_O0MTOQhI(*Qv)k8V5&e54M20nLT0q?c*h*dCTaD{w;v;|D zMRFL~$!`cl@QqEQo%OMw5Q8k=s>LKH`-T$xjpRlQMF1Km;LtsB?C|c-o z(_D#rzu^z7K@rY!6wW>YGTr`S5YHN|uk>1jlz#&K{xMqaP}xlrI5)JBKu-w}{bZ0- z!z*^{JAtWotk{4B`45s~2A;rnykYb|5}26d&LH`^@rKAb%0_FvO%0D5qsB&Rt>BNa zxAVp;3t)-qjf}i|XOPAl27*|Sj(XDzDgg$XLT{r%2I;O~pf-y881B2&LwgK0@w%-h z-$*@SY=8ybeDsE)c5h^xr-C>}_C zUv4#LI>|@?n|a%GI&tbeC-sK`9oo%S(*@GCSHoDN?RBh~`EZnqQ1Qj*u08kkr3=OL z7hZhsnG4t6HV=2kpKIPK)vGGfln%|zM0n|RP8|{%*u?=*hei6@szld^cuLz~q|UbH zQQK)RADVMpXRI@@Sn|uuGvokHXOHM&4Tx*ic3t$fXRo~A2uZ*+ILwnB%bD(2hn;)m zz@6Ca+)^igx%zZI(@DrQ=L+O@Ma=DD$G&{!TBc(^bLE9h)i8#Q{60;oS*Mv?;d12K zvR*w|U1ax2hKo@jjgzXj!+5=Q+tp!GyEy&el2*$LlP<`NhW2XnwPCjE75h45bUI^o zw<**?-{7m#ZgaKaQo9OwRotFfSp>moQ&t3<<=G9he#304 z+f<$|5-LG;x!R6&2phUti$sQUYJO0!5+Wn(8gbTS)4ff`9-5y12r8pKl2HRslR=U; zVQTs!^QH>Tb{H$^%7c_?&~xiMtOwRSd|_lzurh%Yz2W!--=m)-n)-n6i-CDvmTEX;RXZN~0vh^o=Bs^J~`HKSEDX zx*%P@92iXy+6qY0Yt}2D2C;<}D?P{6KoBn`0j()(3iPJ~m=6N83;hy^V=731R9gDi zgGAI0WGm4vhdi$T1a0FOGpuC#PtbP;`GJ)b@`+k@6BaQ-t6cA2iN#>G*hp@qHqz_+ z{lQu;NCa_MIfm5m-I&Tvg4}}mjewYDB0HNu1Us9lXS?h^w~>kLY_FN?x7NPu4@2J| zx&5t;0e=UK9%+?3Ww1}IUA9}Y^OQkxBgk@&=<8$th#FP9phIM-Ly!>!+1d!ijGZ8i zy?#bc85qHSh#S!ez7&nXA5~+UCf?VU0A*Buw z_tm99*3aCa&0$w<45ZuABa$D7sDm{CbINTD!Y;^h#)IsV#j%2^rVv*tY7ZRb@hjaF4n zuWf@p3fN|I_FDDQ3)5T0-LXu|dk;GwZp@-i$&&}l>=i%OPofeFc4ho&B zPudKb%#)FX3EC|iUdKK&f7A;T&&n)2S@0v$W@q}@r}L~k?sc*d2Vuog&UC}ecLqtQ z`;XLxFn2&6J&s-?W$t=t`K{1iF4dPr*1rfnF(GhZD;UpW%X2FrIEMSRubsOGg7Yql zC`&zFq-jt?b2K~FAn{T6xM_)*3-l54rMKFEVsRK?p|$>?+Vpn%RK;z&Yi)hzx7i!J zwT;g1?h0;GJ9}VZtXXPS#TY?K&0o~0J0999FdipTi-AfsGVBG3G+}_F9M>BmWfejD^XC@wSGRAT!pVLPz0X z&yFaTU40B?n(QcyD{8r6a;e@b`SU0A9TwUU?Al?nch0EWg>jrESmMdE^xtHGhzZ2L zFfNVt7H1raj%yKmva4^os;kvsZmCr)bEZvncX0?j?XsstsojRW5e~SGw!dC1m!KTw zNAxFI7i+a6sUZ?IAQyQM*}Q0X&J)7&mvAkHrY{3&Lt`(7<~n3fw^6V?Sd$BJk9wC5 z2ySlY(z2<47sWr1%lk1TM#_fxkTQ?Prof^T<`mTRS#u295JiiOWs4e5al$emMSj>C zG54Zw3NX{=N#rSrFxBk0k0C_h7%t)Lv{{2%gK`sTYr@@Y_L8-d0HTVh@faYYRZDTT z=+p?a+g%(w4hSgfhX{B-mcs7g6??7zE$RhZ4NHF>`TGV$l$8Nd?uZ7YbI^mtyd4cC zY8576@WI~wuoo(~|H41o_QEf0e<5d|#3e6O$gF(?uL2nXpT3GC459+=Yz3k|LRQ#+ zGb4E$`I8E@MVN|_Cc5QGsF^8TX9ybuYaD_Yl)K(-Wah_7EmPVJW@mW6}N)PxUV|d0?kTV4tQP3le1U zwp*<%``&*7n8*DvfL#%u5f6z|`pYQo+hw_VTvJ{iF>1>%#I1yJ1rm^`Z`=Gs1SaO} zWTPo0Vptr50D+}PGwY548VX_WJ`)UlC@vcliyJ^zi#D$oKKv z#se=ww=>8nE1Kon;Xc|q8$cFlzOY+BEHeWOdNC|d)B^Bq-5h~90+icBM^3D3xV40j zMuxMP{zY`G|27lS#Jt@#C6+>SQOM1M8=ivd15c#C{wBb?ws+dgXeZSF8f9-ClizgYwZ-K10@BgXbuyky&a|#&=r_K^h99Ql7Y>0LuU+R zF`OAbZ^Cwea{_WEtnDyY43HK@Iz;5dlCzVHrW__B)WoiE3?SDH8qPYym+4-Bc(V$h z3|%8ok&jh6!%n5%S}fI_-c{cR!HstL}l0Dl@q;j zn08sV>n}e^7L|Beeyp14iCA(A7hE572+}qQHOQzX=^D8U7YD7GwKPg#sS={b92B1# zY*DqCO2Xb1^AWB9NhMPoMh*^y8th25o#WUD{$U3LS*wkrZWr7KaIl~)OQ2?mP;J!4 zl>yTD>wuV$H%%(2H_bgRA^;zb4Ot-q#|AvobU}5e)@ues-js87Wivc2DuG7?H->SN|Bgqa1 z?NTL-sp_rJ_HJrl|5J7|034%3uy_l8hd6@?;4p18^v|#-jya6eqV1s#b@~{rLt(bp zrs0;Fw97<7ViLv}_#8rOu~|qj!6~TtSV-J1%H6(QL`7n;`?QFs2k>HivF3PC=n4FBDp|U z_XEPgPxp>ekb3QUZf9@G)Q#r zi=aYrTnXs8iCSEZsGUIf1l}247IEu}ur2{@6y!N7RoRXP;^ux^*f@EkUoX z?Nk$YVRZFJYrFh0(zzfF%fh7E4a>qFSQhrevapYq1%Dj!MoMe}7Xzz_0})siYr|4g z(0DDvj-uB+I0EcH%BzR48qOQBje)LRVnXZ^uXXJbJ2wU*Yk-XYJ<|6V{YlJfHxA++ ztj>P*u#EozR%mZ+Uyy*g_Yr>zYh%}L!*5DsGM@-j?UrC zud-+k8Xpw>QYX7j_Lvppb?3k%k5`;Wj?a0{BTwR*KZ>WLd_pM4G34bY)bF0d*YgZ^ z$sBZS{WnL6Xl#yqn7gV@9LmbzlJ~whKrqk=V&ij^rx7JhZ9}{VS4L@*X6w znJsi3BHA1ToB=!W7zpbbbGO)zXDthNDsTIt<~=G3-T_m87m#le+%Ud(R{uBL${ap_ zlm=3`I6Ln(BzF*s1z#ly6+?n*kTR9=NlQW>Y=j}e-*m?u#W6-M9_5yV7*ckC5fza! zDt_07_sjB@KV^K%L~va~C0~V*1_Nl%xW}F>2|Y@YgsQX{4GEKAn8k!qp{aL(Q8W4M zf?%5ro!ACFy-I_8NneBn|DRc6b7{gss2vqJ$XjrWYwy1E%h?{oC9Z6o-@UZ3!0&8w z26v1-n9m&$zz5DT`bAhC_@Z0A&$_|Tgo;N40&HH2 zO&b;ceE<|P9*hq~;o|x)q{VV*-qvf#%HVK%SuDU>6pnO%VACA#Flr+fJp!y50X{)B z`vJ=OSVH8c*iLly7LcsHAC#}lj9)nk<|sQ#Begz&U9?t`UKVCjXdjy&chz?M0z^3&}L0s7g=Sj1S?2xtavs&6mO}tx^5pm+M^ufBr z(gz(9?Bg&XllMbH=R@!k&h+p51N1LPZxP)vjnkRA8{0@V1HcRb<~stDdjJN3zBtpV z=8J&J(NC#GE-YZ2T7rjL)^ur0D{pqC@3r?5E*h*msE%$wqVn|vJf4%u2CTl zw9pAA_ghHB;`$WIdqiS?6us6$OhW@Qx_&oOK}lXXZKQ$x9v{(#p%F-KR37`(*nj!= z-~aAelBGJ-fJL}HL7j-rH@c(~Cnly5U2f<>Bw;qve-WKmF0B`mS1(_?0#7RFgn;}r z^c=>nJ$oME9@|P%O}eZR8r;b|-y5vc*&$x%Y6IqG8kJl%OM|>{V^5P$bu#^_ofN^Q zV^7!h>CQyn;c?z#_u%wYwVckV6Af=#hKKmaxjZC+-2J7U7z^XhB7(T`llrH*8lrr` zgpb8xLKYkuhJ|a3I+VWBlP>h%XSFOPS$-IoVakf?92bep0)uf|5A$i~Ld$<{YnD-L z;t;e%u@g4H&oTIILIBB&nBqv$D54aLLdx>#NG5oO^;^pwm$`;!MTe{);)Ln`!aaaP z;m`v(a~j~@VKk&4mXTQ#+*S`k!K%V&_45MP4R=Z|MDgSE*D>vtY8$F9qAJAOB(bN6mn~#qMe%%C=i4n9 z(+~shA`kn&dS`fP3}LN^>_eTV7>9r+o6RCKSAx!`$~z>lT(sP1S!BZxbG0L$Ny9oX z3u}$QqDQ!*neOV%w*7mDQBWrHyxY#(os0b`V-dIWNqhznq zvydnR%T0<&Lob4n;M)%{+C>;REP-MSD40#8v4Wr(WC4Lygcn@@GFVM&#rE!iO;G1b z3n9!%e&7xCgD_?qco0H^TKGYc<^*Oa^0R|kCszy|5=vl(qcH<+hro@n7rYgWopXVq z5n>z|m%T*sZD&t>;~B$BBfx+{kKTi&3A2l!DNqx9&=$mfxs6hoTy66k22Tha)Pa7? zGwpFLM|$uW01cxl4A+dI!8ZsDKBvWw-js$ro4H1L!6`Wi-q`Aa^agGdXNIw&06Xkd zXAvcRD2ii=;1J4>`mX8PdD}eH8NW(k>Ax3#y`) zTJ)n>KK(f?q{v3yD*YHsAK~)$)scJ(0_;lLmj#z*h?|cr^I;-dO`0CL9*C1fe*+?G z6{D@XpLrpUTfS$Az$<_j{%F97#0XHD_epy)kRbP4Q+RSvsviej8Dr}INKT%@C5>GJ zA5a{Lm-4>JO|Gs6JgG#o`86iO1f6(<6cP>5Sbsq1hf_-}*%{vri|M@Br}& zir7kAkHNUliE(<6L`Vr>=tKk!a+X4V1%}&V_jx6bb7<>DguT(v1PGj1LQ?ReDV5Lh z-WkVICl=&ncHY-p>#dUmWeX9dK;>r30k4M;Al%+*Ek^!OL@7Xp_k5=_-NRhxX&h}i zkMM480has-!09}N4~YmOYOUjy;8x`uSO(HLo4Jlp6ND5v_pR4-Z#PbNbUq4{i;j7( zoN;rjy|ew%cb@G&+_Aa2k22VfA3%I`W~N_`Qcw~^1@n`p`(Hi_hC(FKPco5xe;;#O zr!B7eMS-SX1s`;muz|bR{SplY5GUS=D9{q?#o{KKcq4)+^p7QQjL5`o^ML(Ml!=-k zMnOM;W^i2PK4pu=o2#XISM;ESpwBVcdcEXCaXo>GAIBx%5~Zw6LbK%E z&+G||uzzNGT66NDf?2R#d?tZYoO%_~bs@22s3B+R=fo9S#K4){0MFblvdQ*tGHG+Y-Jk z^P*^(i@+U>4qwUn`2Njvylvml+`fH#)|Xfegc97IZRyI9QoCAIEqp{1q2v*N>4@;Q zBim*lO};yKiKTgK>Q}z~{@EXV_w2nthN(u#RbLM!){&O-5??>XGCG~170H)zAO%A8 zx^@`(#0~@^zP1;~KL&>yooqP?mZLrjCVumH-_6`NpkSxAT&xp#xC1gP<`W2oci>Fi z1mzi}r_#e6C<$9;jXv0a?_h#?C9U-Z8vBGvapijU;pd2at7PKr^2Jj{?00<;8{7f@2zn zV{8&+wvE`@(UE2whcrhDAES>c@CGNgJE?> z9~hzl;CoVift_4rf{RG9!2I(_fPkd9`e`Q5APJK&T(!#3@`Tj#TTFeKl~ilxTW&?1p51)2sq?(W#FQ_CxsK6%0%c?YUuB(jQU7TK^F7=y zij^13LJNUTGr@=Eh8{qImiw5WV%}cR2l;Ra$@Tm>{XstP^y(^;D@?92`8bj=1^tMk zsuyP973BBK_3EPLGSYk4UYAoz@2S7c3T|cOzT6S{%eQ@M6Ao;9(9cQlJ*Rr)NZ;9e zoLyRNmKk1%@1rtXdHsLUsL<^ho_Nw~#uZ5jEIgAtU z{Ouk|To)aWz}QS$HcFE=naFg2G^wk^ zI>{)RaX+8m-r+@gT43(CzsL9Y+u!5+`@Z+3?(UR^zjt1%i!aS++V@%5{FgxCS^T_< zhNcNErwLsc6+Ne`r;#(%GnR{~re0lgVmwju*%0PBh_8GUDe&W z-PO_DXmw9+Pjzo@udcZdxyGs^5+b=~tmwIi+vQZdysusE5^2@`iB?+8VSjG_D_U)r zskx6Vrryyd=FWd%H!5pZEk|Cd7X6 z2x<7Z=2H$Q>7Ni5J90QgJI;E3_|Ox^Us#m9V?G;1(D2H(cqJ z>a{RYtXJw1ox{FPHPFWmnz{z8-4F!jo% zD;Lk^uRfnUcVEXWL%TFzt4p`_{7arIorbL6C<#|M!o5){x_N)8;X3s>=RmP33I`54 zn4@#%rL(7;lIPUwzEikSC{+rx6?ekHB9JN7=CPu9)%85AxMfXEJUua48(e-YC-z>t@D{UUa1!g74O7Er;8i`bk>l=c*xyMSRsd) z>_QR_eB3yXpv5zhXhu3FNAdiC1^zv-g%&E$;OCt{;%mNM2FY8&8~s?>3_ud@ViWO< zEt+!Lw^nU|+IKZ!-HWa1y5`4L%{A>RNGDFno@v=r6H}8Dli120Ab?xRd)0c~U%>fG z9$0JSl~Y${o}amJ%u&tGdb86Sa-@5`S(2{js2x1rfX(OTc>2g2n z9@sLzKyBHlkm+ohFhuNbjN2wa@!rq%PBaEwffB(+Px<3<>iW7@LZ?aApYYrOGltJDyTQIPZJOre8Rcvy3T^{ zH@stq4uPI5G-oG@_3EK!1+T3v`K6+FD7prR)bQS+rw%{;ghGa?iAmhQRGV;=gco+# zn|`B7nnYR>raixa0eBA^3qbWpv8j?*C2Mb-yL3vKfCwH~!Qi9|ztl7NAx2 zWiUhE@MF9WIM}^`RyJ!!psiYKAU47r(Ox%}W6P#*2WDUZA{UtDcsUX1i!u4CpIl9; za#x_Q8G)IP*Z!1{ezKeb>>ad4ke~C@ffmHt@Cte*rfap|ZbNsk7+~5@2}{_wwZJ@! zcQ0EUC9oE4`Mtne&9JnlFLNIDh}ItWPooi4O?wW3eO#+yX5}9C<*4o6J)3&>a{g>n zaM6f60=+$UU;EkLeh!HEU5okS5miXcsH+ben>bS`cwT94Ngl-RN}k4+%f&Mw*}96K zH-Kawl;@4~GA>h~ujoH#T*uXkNFvxInb3PiXiQ9o`mOB;pE_Ov_XLhMPQ1v`IIInR z(HaMRStvBzd@-6!$(8vCAvdV+Mqf2yi)gIkLIK30;)U$KQCM^(YV%&Hg?Ce! z5UzXzZv#o?350rQ9h`HhSHUN-sa~j;w(sZ2Puw;HK$|aXCoF9%%EB2kwIF2<>Hv ztBNZb;7Vu{v5huxB69|mg!t7)y@;xC^sEt@LY>-dK}wvyBz|mP?r6lr3D}mv@-0`^ z^K&JLSFfT*73tj*RFy8JZiIO+92zKhC*q9J8ud~H=<<7#Ygz&8ZM^|$rbv36~2 z5rj{GmI!MZJk>YL7FK7g|M*?AY_G=GwABRqCq{7FU%?eIH9xtSk|Gk_U(-bLs`hGh z<5F0CXaS*`x5D@Levj)+x zR2cWFC1?Di6F4`E&UmG@XM6P=-s2o9I)`pJ`}SR<%;hb(l?sI2LW6kTzGb1MWY$fG zlL`}!O2MD2%PNp`$@AT+%;L>qtW@*El-Hbv_*sPH7{+9?Mm!d{KA}w+2{N4JH7X@P zOrE<{bQ|OixU;chRct@f^yc-?V*EBY7|;zM?4vkHS>Ri>YeiyU=dT+X3I*yMzlT#5 zphhcNBkk)el!FZ4SdFbQr%nw|^Im*821<={>YUnYlC*lwz%$P0GI2MtXe>i85J@~i zFTFPdOO{2-PnEkw7pU^-K$BmF;Lt7uC(^6kYnuFm%4OCxzXzNrDbgHqMPIbMFZ;cs zTS;e`dokiyV7{Y!ZwD6BPJhtxw=wc}(RT;bLLE5QvutB58(btF#6|BSddYzRs$M4- z#0NE$z;%GTy`nGBvCjS={;qaU2|L0>f4ouj4rv40a>DQ9XlJ!IX21`V%PIBl{vZjS z(iOylWHgq%ENpPk1jZT&Qb>0OUF88h2l3n~hIVUW_+3+s+)F&EErb7DKPYxBr)$T# z9{E*&&^N_yF}gy|^i{lJs65R5VJV1-J?tZrfKQ{eSCtaW>40naW)NE)Swp+OYjt-J z$+70@Xn@@S?RaRWNy_osrHhwtJmLK4SAO}kPAlPDUh)^}weeOwlEB74(w&w?_2-x* zGKFTXUUPwXjb<3DHoTTGJ{1m7C(6J4{MF}AUp$vTd+z1u&z!sZk$$i>^nC3`p;8i& z(sW>aJi<+glJk;6LJ?cB=WkY>Z_FyiNq}?@J@QVxM zr2S57w^GZhkk?9$ic;M!Tz<(>qyWp{G>^3mXY3>6pi|PcUab|In4D`_FP2VcQ!QI9 zb5ilSJl1x;Wxja%YN}^`E^Y)x}33!TT{eAzR6c5-CDEiQnw0s&bu9v zG7oyupqvQa`W(U1e%&ur@>RE5mrG$Bkcz1$W(&ndw;pj7icZc$UiwJFE^nz;T)_EIoG|?o zc78*#+R(0&Y{?V$9rW^SB$|=ptbQ7~`^1(a#j(yGU&afkruOHM0p~SH(3F3ni>??R zR4URI+*W0L14W1l1F0!2q_#@qDuo#gH86g?3mUva?G$v@rwaCJpw&QY=RuR+G2Zwr z=q)r^-uz`tbOlyEP6<;%z8%1T5a?~lYoLsYz!qtY@vDIy^#RSY+x3vd<#*86!d#Qg z2Ua;&TQvZm0_lP#EiJ<{4Fm`L8x$z)DNYw_2Eo})<^ds1-ovVY&m}aZ|5M&&zA6zN3 z+%1M-V#MwS={bWl1vkko_h2QqKU^NcS`2v>YZ*~c_Uf)Rt+O8V?uC-wD|WqW1}U){ zX}1_f8W($zCYbuWgA8~Uum!oEC>P;PKyJ_*q=Ig&EfHjbp7}Vf9oUbf?fo$1cXe%f zhpx52hk_l%Hue&GBe*9tv3GgL-{0|SdymxG_VAKjzjOV~0D4UJq1InGZ6@P2Tvr^5 z=+mL|7cX7*XmXAbZXB`O-^PV=M$ajI(6709aK6cpL7}Gn5SkE+B9EQy_R`mYT66!3u=|EG}sX7 zvNLw!be3&LyjB{LAgnjS8LN8PRyT?CdXdTx)(*(&BN!!8+^&U&Uk}ZNLS;^|zVpxu zZ3PFGg89tWJ@*4r8~0n^I&~ieMpJBZi{ioB7=EO7%9;qpGeg7Us&@iR}9U4myXiLfC^KQ+()sQECm*bJznN&V^wRdups@VYyW3@s}P5|iw%(rad!^C08 zA$S~_pULjsSc+oEip_;qLqbtdtCc^Cp~CKt9Z}hKWwhoxo>#|h&u(tJo%OUr>sz-0vqz${Vg|tMf0`MH5?` zY7p&RTn0~8Ig(vyG$3 zbNJ1My00eEfU2Gk^(Ba$ZZ&6muqWp%kD8XeO>liPhgRqMYpDK3{Jc*g(Gn&Eh=e{J z+XeP)>${+*Pw0bCh$vj747R3}Kw;>QK|$(;>1-52$zH&W>Bo?#C^AabiQ{fT5$eB0 z5yjPMz6R9>1t-#$6=yG-bH<|Gxl)##1WdGd3GNv^9btO2-$T+=EgxsH6AAL;itD@usUuQSOK)B{MMO;m!o>H$3Va+OYZH6hHo@!d-vqw>Q1?>)T(- zU0Am!zJ&rbFAx>*>?^p)ATi+7MgTps9D~&0Tu-RHiTqOnt$7%Yk=pJ0IN%caCGkt) z*EL6*UqXNgMQLR^DZfaJ;!OZSNSiQY&)$!*r{pAf)ts*>6YdfeDA*i%;R}z3LjIzK;k2DOa38h zw^(O8q?-Hc5wkWtMLfY)5jzlv`q`0w0cBnsgiwJ4Kv(0A26_tNZ;PQ( z(Hty_v|fqw>L!A!*;QpijFzV^*AamH83(7#YvVtqwH(?-=xqWXd7J3|x_J_W`Ub$m zt_UX22Gkuyo%|0>)b{;*=J=8@y?Mazp{9fF{|Wgm*iOE|pThQI$OsBhM79nBI?F&C zD8n#JL0HoR1FEvIXizUe*|-kjGSGM9R$wbJC*J9e1bgxutoR}mQpv2@wkj4veO3{m z?MI)0LIhW(zw|!9+rAcRH?YO)@{dsW!D03H$0xU(I1i0msuog12KIQZ0(-o2m>VjM z(Ke5Tnd)XuVxhFd!>$5~W8+wzz$nK9lXr=Z8o(N>36|H;@47?FIII|5q*0Mx5&5^& zeTqjG?9h(T6+5BSO1n^{Ev?o2B3%yXA2dlv*>I(H=yi_ z6A-c5?m_m6sGg;2$jwj{c1Kp^&V}Xask@r%sZ8aYuqVrAkybSUwH0bDq#SOL447~u z!S)pf{byGEF%wQ%q3*UrQLTT8MvqFEW(+H>vX?}i{k9wg$ygFUbr9G$rA6Y(0i()r ziqK(5!(|!9uX3DDle_U%kft?#HHjJ+vY>DgHbuo~6{e~(yk#(l#r(`_52TpXYAOH%V?#8@2@9RgoFM8jw2Dgb(9$J5&1+m@~@im|da~RhqBvA-A zBQbLgn|%(uBF`g%BvB9oj=0dCm-QwXIt}jUH|(A7>5=S0OAU-C53?hwusq9zJG(KM z{I4wjHzZ+Zi%^qSl8*cio0Vz8DpqKJGH;aJn~|&sD|cZ&jET~X(DbfLUs9Y4yMSGE z6XtKg4Pi0j0B+Q3RepgZai*a~L$`-M)be968HMRipQ>A^(PE>-#%vsB8n_@rW44w{ z&cRhESj^cs^Xh5d%%j1cZI@yFMFx>JaiZU{0%1HJ&Hu5F=PGT_{g@6k4)SC z$V?7U{)jyW`QuST-UsCQ`=13z*kZFKB6E8cntrIzl*kB8QrX}m9T7+eo9zR&Api2; z!9L%yF2qGbSs9WdrJe|-QBMTNs3){`_1wYdy>4Z7z*Qg}QhND$P$COI8@jN)YKcD4 z4}`bzh5_icb`Zy9iPKIDJ3|~c1RbT+nufG%O`C(=l_OqnwSJpEK@w_OX?|8iGkFHp08SdIM^e+0ANwSvoty+LTu8t1k zW__FwNj~*T8@IMnSaM~{IPrumvNNfDD|X_@WSF2&zXV|pjZZ6%jryr+b>oqnp2Yoc zvT73gAWQ;0 zWR3(eG!?~z*;>H9kAN+uOM)mCZpqgOM1mh#N&k#FQeyefm{1*(F(!(0Q>l~`#pI8W zfVR@d#nX!zc9Xf^N0LoPT>IwA0 zU|@wDAgsG49Ak#J@Oka?`Wzx^lOkbkI!1@Jd76K_ z7*Uf=&&a>SycEJge=pHkUy$b^wSFJfI9fUnyP<~6!((2DpImwmTwDsChw)SXZCu<@ zT3%LhdAWi++HObzcR>6exWE`4F?`^UZYeT*Fiv6K3oT_5i_VF{yfU*)ZXqqB9wNZp zJkI`OC`)vYc)8}k{nuB&|LgDmkCX6rT5tw6T?iqHi4!qH{X8Q9V0CEJ;aUYBhfo5B z9mBBF#QWe@g;gRS*?%AmqsKz1-;|%_q?E!z=`K<@lIt@X<4k)260zq#;7cEH2})Ww zq1kT>k`TkWg=g?n1_x3-QatFs$G{WSangP*_u&YQ#khy~nAMnoP{&*lgJ%2G_0)Cv z4}l~=gcVHndP-4xV@Ccf42xjML_RM{@So&yV{orL$?Jp#`UtRt;_5HRlLWNoH}R0) zVk30bVx`FdK=)RZ(HBGg#`X)8RPqshMMt=D;ya^<_QB605u^(wIwcBK`EOAD!MDE2 zzyEdegTGq%{+)mQH$OO0Pp# z3Xbs@Amdvhp&`LHEx+M+mDAL>zzXQHza{hs~S39xV2RO_c`rFPg z7)lh6>ab?`!`pR!KScPQ^j4tn5agW%!ORY83=jDfNAmW6{^^Mzx zLt)Q0e0c`o9xViP%`!TRYJUpk93&mO*C=dndr=<7ZAc|h8S@dd3XoJgTbdU1id2Q4{eg-TsD?7*-f2IXwyYC|9}LRiFO@5^X&K;ZhFK& zX?Ew2u}849OmkyX?M<9$`1cQ@qDcPP`Qt{en_hl^f?#wf#4J#}*bqIfh&)1GTTIGn zx~-zJqF9L59#KXOIZGW<*lRj+2KWu35FbjuLvGWCCS^79@R7@Z4!)CEG`&0E8PvLx ziY}&9zUv-hd=ThN6Axl+&k-kk$kiP+g2(V^7*Ju(W`GU8RbXg3ZFcmvRNaZxRf-Kx!9jS(#t0-kaH=@tj2;CT zqGTozLwz8Mbcx^)>W}ueLEHIAKhPSwLSQw1Cpddf)~ik{?Wm`kk<&7j$q@5pkV<=+ ztw9GtrOvGD)}qm2SOj`Ht&ujsgmWJ9?6}`lG2g1G+d+7X+iA}x!ye*Xp5fK_6a*4d z7(?WXd=lFyzXSwQB2v3aKE>KBYBvZ{Hjy)niw$3L!&Ns#)kpUF(2jO92je66LmmZ4 zu1kqr`RJ&&@-ResLv!OrBltX^l>uH;E+z(uGLVwQ#?<#2dr@+5JLE~g6)~qRf21nU z;-?01HGHY1;VZ2;a*mah8s2Eys*?~^xgp;1)N`8&fA z_alMJMau&fkwVUI48l-@;1iTEGE!3spH_sH3ey;-ICmb>H}?P`Ld45x3)CjVnT0?k z2LE<9@O457AI0EE$U310y6{+^MYrwL8VrC=l6-!dJH@N7!5v~_k6MFU7BtZT++zQt z%8D`fTJiMN9)!de3)S zV;u~3PGc+8y@U^R1F+yn08Z;!e0W3 zxff3r_TvT4OX_@ubM96H=5G}r@4Sd_)j-ov!6$;K8D|W@*@)nN%oi;3CLNIY5bqGq zncs+wm#XMBu5c;}U3tNUM-(UmFAK*7nyGQ|`J0O@VjWov^z|3q4l1sbTnEV3F-|$K zapOz5IedZUMX@#KfjgKTzN+)_y`1NGo4&HSdGqFkuc9>&aBy>?F6R#w8l}9b<5Qam zB@g*?hZK)Hv}yIx;@fLikvAXg{nCfeP5k(WC+~j;MjJ)4wy37WJX&^Q_<84%P)egy zdPT(tDsrGGU)w4}WfcZlhzIcAtlCPM&!~VpsHkM+*QXVYG`FfqI4$MFqvWi-ws$~k zl`%!@g7Ju^lwp`s@WCE5d-8ec78*K%x8G_2OAn*!Jp{?CD!y*mI+wx2JwSAm@F&kGA)O*JR^V-6Vcoyph6BCi1}JV> z*N^bLa6Rxh?WIm!w1nr68@LF5H1|3l#+e#LqYCjM7J-tdB^RE!(sL*j%jY z3E9s%QxWhj zd~`E`3kvg8R0B;-!SK_Cjap@YHSlSuP;j6I^u}(qQ@<4{S*y$Zn# zUpj`}ZjFpGPg4fI6HaW4JC=6m zebW?nZ7Rr193s&PP>>I!nCtHpC+3>9BEvcH0a|J!FKtXS*B%*B4dix=+=^ng9^;7J zQKSQ%V`z&69WWwIQml5U{=P(GEg3&s?D(HhxNGVm0^ zwA9%P=@3j^mi^3)Gr7p*(@Y9XKF8#znfw})uQ5?H?hlze&4eK|DtsaenYhp5-!f75 zZJK3+ztp+Jv9$w-`*Us$zsr z@Fcwo64Q)=|2v-mL?0zb!7^!M)L{63uR-Sxq`X)rHjqfA5?!f5L+?!@yFU?69Zp#I gPiL$|5^Y1NkyJXBOzlNFkQvH6jFCZX5$9+8Ut=?WV*mgE literal 0 HcmV?d00001 diff --git a/src/目标识别代码/utils/__pycache__/torch_utils.cpython-39.pyc b/src/目标识别代码/utils/__pycache__/torch_utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..174fdc872898692d70d3b0a649870bc09b765531 GIT binary patch literal 16748 zcma)jd5|2}d0%%=&$+X+i^XCOTx>xgu|oliT>!iw5F|((q691<03@XmrQz(n-rbp< z3$J?s?4;M_Xekqs3l8myoj4TaU6qxHvC4KSPJG3RlE0jkE-S7|+?8^vEYTF?lr2>% zWf#k3=J)%0W-pLco~eHQ`n~VHe#iH|yG#B383Vt!{_yW-TNe%E2YlH4OX1;ZT;7;v z7|JLZ%2Za(ESPe)3YOesg_zv!f-U!WAujhsA%T0WmaL}=DU)sOTDqPoWF#N2_0_Y5 ztmLg)f1zL6P2#e1y6!A}|uYszpF)rVBS%Bg`n7T&SP?XMnEgDcjYsdlKLSIxqNdO;1V zUC8Cth#Gy>C_JoQRO4zlNfavY7+P3?ojmv-g{I{0qPMot&XAeQT0i6LOp_# z$JC?hF_b*63hJc#FiM_KpHipQQ^=iEFRL@^8RR~!KCLdOk05tSeMWsuT}19lRaDQb z%gCL+V5pC)E3X>r%8FHZ3Qt$nH9TFz)6>ejYUE$(3}h}ZU2E#{g46aZH7|3INHmi- z!(6%9@ZDQ}z0@epyBZ}a<+`nMv$cf0eYxZ>;68e`;`zEV+xA^`w&a&CmvpICb8Go{ zn7H`-g$w7dg#G0Ox4c-q;cBnaY=o(Dv)0sT6sG4YHMgirz8hxeD}GVAUKw4vVdmoV z&z`whymDsdWB1<0FSMBrGcP@V<>J}m)#nQ5?wJ^6n5fJ*n%eC=_k!nYr=^=WD#}%k za&J`1ZqZ+AxlVJ=IaF?|(xJl+#^^lz!r3!U#d8`>-znWFRcfW#nmg%W63A2<^O#Yx z?s^_(+=)+5J~25}=9_vy93>;xy}UtWUNf+TI9RWymd(J#g2raHG+<*k+KI~>L2}#B z8Q-Xy4I8PoVg$wtn!Rbxy59!wert@^jkmo z*6rVTc?pYtp!1dGfl&ft=enGRggjKP-3Q& zn4X-Tnw-K?4gdk%TG6XFoBjg!UvtAc<1d}LGV|Qbg_Dj{JL}a>XVlT|^>#(Oo+B%G z`pKxTd{QUTRCA-lzG4sCi^b4tG{V7Wb<^|CHGGZJa}imcAYAfhNjLb`{cI|WCW$QC zE?Kng4UYiQ6UuL8&8(F&hfIACW%n%^Um#2NQDk~crYse^6VqcKH5BOAXU%ST983AA z9}5iMo;Q6PP>DPCRV>?jN%BstyL_pcces4VCa10eArq}iqv5K~@5^F5$;CiTv@|y= zJnv6FIW;+b9A#(EUUqz4s&K-+Bj~kTt)>R)a8%nxE2rcvHkvmZsQ219mzQg9$#b1{ zi};VjtR02g=t17dHf=zl^GWBb>pBa*-||i#JqmiV(4L(vH|s~+HN3XAH5kZFO$tj$_OqXy}lo$3l+kUG}nnYR>W<9@z4tNe*3qW7dp`o5Da$!zHGV`d zUb3mDfTzOnjJmlDR_0sWEEU5>?hlNr-LL{?wWotRMZ2O784lFB2(KPRN0u4Vvx8+8%n&-vNF2x47$fnJH3M&q};(ETgc z>!yBO#Z}@}Be2in-OF+I62xy9`ujnAHOHqlbD85vj2qp4|2*nZO1fhR5~qv?MphkQ zTlU&*J+P_uAji)-fs1ykR8spfsP(F5~b7=8E}?)^!|}hy;QylIgr@h1TR$Xx`es@tIRK za7*B1>+~gb^nY;~og<(n3#FD@EJx$0xVji2-v*`KXomsYvz3yU-xc-?a(Tq@db6RA zqO;KE3&P|=$t(H3mKlY)Myc+qVtJubQ(!A$2IsN?d1kkQCjpW9lV=@gL3twcpJzkS3fj+^Wd06vksn!1@%I+vVASb-*?%_5k1mGFc$Nm znK85GFs_U_X6Eqca}IfsPjk@J+z6c1j4-vLE;San>^}JcPNGr9<#D7qW#$c2kC2H6 z#yoic9dHMJPYpx1wV7nK5`9}CF%#;Uo-*#ND< z5FloM*6f_@mc|w>{g473QSoJ=F2ZLMivQ#ty9yq=W~?UBJ~fV${t}LdZTP9B*+}I6 zq@mJRjj8CYWiaeY3X?AZS(-P!&UBa9mYeN{KaqEujoOm4!4H}ZM;egkcbjOQ zz|O7)guF`Wh*z&TM=m;nbF=Imsde^kFP_7DoTFvu=ndz2tTrZp#Pn!(ok;>uUR6W;aiL7L)3-fML@DralC`)GQ5+Yh|Kd@7IqEIlpY+ z_pzM{BxuEGWqosn!j9!ztFblaVm>wqcl&N~IR^5K4dPa?W~Y0h zjC)@NLiLLXmb)7xVg>f=ruW$(&a~Ga#QU@8`Fm(P2tCdOvJEUJ&{qPiBN-&s;38V- z;Q-oMFBc?73_O9=2+|G(Cg!;_NWNj*6^TaKXpc9l!BJz_SWfvn*xOm-(=%X#>E(>P zdnibQk@N+zARYCkFRKJtW(s``2N|Thg1+ib+(&R9R-=0iHTH(B#_y&cGnT=0t{+yr zm$QvioR9vRzY8+c9<_Ic?C5KF!$@_M>%*rYruMOoN&`NNr~UGjTFwTX!#9K2>ew3U z{qfb^K_sl&t9t^h28hN3Gi{QLPdC!7n+SDon$0t9SKdR z6Q_=IQuDxvcB9#Ffq1QU7^}Cuj&)=@9H!z^eDS%f&pmVTT=DF=7oU6f+|_r?!=2IR z8aGO{ii$L&Lr0E8xao9G9TG{`1^%nUB4ce-qU%FEm@P0)XH)a2?Sz*P%_%*Au|(o@ z-)zY*FB~D!cRG7S_o_oEtF&sOpIx~8f+JJ_)8H^qb}VP&9qX`DF^oZ@6Puix>%=cr zp2=rA37O`UFt{S-cD`d@x_mX$v7f#CLMFdg^LXeq6OzO**OH~`3F9K1cZD?SpK;RC zRv51}Z@M~6Y8U4oJke@;VbTRX(a=_HzA?;JyyCjL7#+@7&28u+-;#D4?Yc|dDcm*h z_C&@aNJY!leg*vWIf7;VreCTR>u$ZNm%=0ho-0JP#N`+j2d|QBZLQ4!mRW~=1movtuR*7^V?}s zzvtH6a&Pc80DGs97}i6k19rlH*0jMqA2KtLQ^%lpq(H0=fQ1gC<`kID0hA=5k=Pa) zP{!1+p*=Bj%a7ueP%vtIWR$BGD9NzoK)oQ!+L5xRhX4TadM9q$XVOLz_IXQ<@&dMo zB82T%u<{$MazlGcv!+Pc*U`$OHe^^Cj_UKs-6NuuMfqd}eta3vnhfnPFvxiW5;Nsp z=$b2*2ThY?1qW3OWgrGIWg)dy9BD$*q@*xep#+AiH$Z+@sC|OO`c$z#5f}}S*?Ewo z*R4+nAhqD4JoL+P)fdEzNlKIg`Pl#lg23!TUItN21qqc!A72U*Q5%q}M7JE0xBf2L z#xd43^Fh3tLB6kTAfH&xuEF?4Xc)e7^M)Guk{N?lVmY~-T26Nk`2E#fkO<s;bJ7usx ztX8&LGJMMDJdP1&IXjTWmOrF+sS)T2S?UQZApJphbqMTfCx~IMpPi=+yxoCZ!MA@U zdbYz&sNESalpTvGi%oWVb*iR(G!%z1g7k zpl=6*YR?;XkWqV)_N#qJlWITG6jOgskOQLvo**|6Fa^z9Y3?9k%{h8ki=wm`*^^v-@g8t z06I(F>HOH~@)obLTFZv ziRniVJI4<@kDi#4B)<+^p9W;DRG(EP=hjK*mh*78omZK!H!Eu5)+X3vfNeA;uU0O; zFtJhG9m|Bg_ptM+`t)I^e(ckSoyL>XQ&V}z`CxCjP7`)U2Zc_>CrySF=E+FH1Wl85 zuVbH{KIVmq3o^@27Tkw4*_pWTOrCWoyiOLfA1pJ9Z9IcG2#+qmw*Dcz!A~Fd<(`(nEVjKftXf55RE~QV+yAAhNOP~H-Kb^0NYRZ^s{=k@=ND~Hr(YCM`bkeLs6vGMXjgq2_r5^|Q4CwiX zagWSoiED1dYl08^ZHNLC(4hwFY1VI$blSr7uOj)+%y8WIjp!e{N>KCSWYx9zeaAO? z?dacR7XEe=Fy zw+OA+)i+$#)!{ESRU1p2X%Y8boCr@h*i)j^YC-4-``miVUn-VM(1P+q`WILiYqlcM zA(A&B7kQA@Jbj$w_$VPPe-77dX!te$fK5Oq-;nKDaaZ2yB4lgY&ZCH!rTodeG)Q;1<3+xyG5vsNEVj)FiLW;ne9cZ z{pN@{VV*>OP zcC&*+(LY8qrT+l=`v$cx4p9J!2BmYb;Ni>!>%)oKh6xya zu$%AwQsu6{^gEkg`k%JHl!qmWOIG3)WT1V4pn!p2!9fOj0o%3$=#teK#0TekO7b@H zk1Ny`vm@DdqyZQuSsXwx)JHOn(H}@7=6;3t{@A zTUuPY;$D0K?jfderjLd;c}JK8wWTzlk40uZ;;sG~lWn^-1cNlKSP&iNGh$L_=I<_^D)k{+Ia6ZtK8PL9D#Q0^o$ zijox3tcQBeG7tu;Fziu~+RVU$qHHZ%R1NTKU5AVrn0s&@u!tBFPxD5C)%Xn-{2CKd zmAu_GCYC~TR!GqHO;155f;ZA%dJEuPcM268nBPs!xYu&FxItmlK zQG*Fy{M(kKF`DBsF=*Rc@6EZJ2ZyF zhS5jr6sZ=G3rkK?GBR3+i3m}#5~5iqN_861I)j(!ZGa@yhD(N?5$Mjx=R1SWe62ZK zsyV$$=d*=jF(*z!(&~Bu*(RcQaVa4`Ls8ft8IOBMRhT#Tu`p1ZF1BGt*6lLQX$qPv zG+W3vTp+#7gbN9VFBtSa7Vu!nkOgVCZHmk2of-=`GQec zH&G;U*nm-0_(5o^tj2M{^i@sLPjV-@3Zk@TuBK4}GZu6%!e*!&t-?@MjVT+(u$Z4) z9f16jSsg?U-h?U)Ox2yE#+u1LjAS5ltHY?<1z$o8X0(2mR7Y6LZLf|h1Elib`MK(t z^gpiRYp@gZop7pI)c>lZ(EQBmsJv%4(mfbg?|nP*JQTfe%z&rFf+?x@zBnX&TP4?k zD$JqZ1R;q+I2oy#Ygp{_SQUK%31o?qQt-8fiFw^@gQe4YeSX8x`KB33FSOI-_sQN1 z{nh8##aSlno$3F{$Nz;S%xw{CiX!XiKVh|4C*tKAtxxuiihDDX@?hL9&4)2nxe?ml zb?xhKu$w+$7d?T+8}L5F8B_pIX}zxRvL}u-jMKjDp$!%N7z{>Xw%4ZamKwCzh`^Xn z!bk(pLuk!53h6m`0~H?&iJL{a+c%4-NX&MhX7RKQ^TI*4lKLW4836np2H-t}#GpN6 z{cpXKNLcUKiNrg0dYF<&>|sbA4_W%hfINTiXTb}$SZs;NTwX!bzZ5hjGD4*k(|e>W z0_k9}y{{C6U;kI%S~oBxRZ0vEX_b*XB46Z=XcoB-s6n~!;QemD7#?sGNQXo{KMzV2 z$CZFmoLG&kA+-|-pTHZ2q2MNhBo0fGR$`bMk}xBfc*?99Nc+}|IapoU2x(oAhWP<;HZVWzf%#!C%n$o$e(*;jf271{@JwJea3BI}-deWQeyGP5VTaM{ z9vlJoALZ2pSPAEDY`L#%ycmP|;PYMM#m?ov$ZQ~E|A6%TReu~~-@SsPh?PMk&l*R6 z5UaCybzhKx9r!_iKi0)qeSouDq0RZBAcpi%U?ZIf`e1*CO_`1$9L4SZa>|iAHia|a zX3-S1KUf1wo$My*V^Zwdor4cPGVeTiV#;$Kd;-_>F+3gP6Jj`yBQH0hdG{Q?o+nQK z8Itq*mzc#k4gJeZeuc>*5-`T;IT+9*BS4s5D%ERnr9o3lL=ggoL}VBVfY>2%UCW?;VFVW-m%6L?Rt!ehqi&DYC2B8o(k$tvF~!qs5JPB&WV>#W;Y?Z^2@ zA{g32~iEYToA2urtgfj}hkk#Y0~%#r%)UuW`VCNU<$tzTh|;+Os@63|mP zv^Fv68?5;{l6*Ge)c=A|@k)K(>%a<7Y!#5qN1`S-Hj<}=!&sHN{`Z8o=Dh7BHM4=A z1K0qM#sb3{fvRSkr^Wkfn+KulS&+F7K!rPiGAeZ$>!7Ls0kChgfnogQDg6`N%A7#& zmqaFoi*xj-Yu`o;7W|bUOw^jHaD^%3bC$$C*oZ-Z^XaxZjDw3@Jj%@p>7%RwyDE6U zir=x}3bVY0*NoT9IRwC{%FkuVARK*x-ET7CN%D3j05 z=yx$LK{#mb724=a`aC4nAEFprOLt*El#Y2g$D448Ywx}zOJS2Lx45!#WcSj}vWjEN z8QeDZfb-u5!MpG1qIZPmfhW3^$k4%Xgn2KFi#06TC(#dn6`fD@4smC%hoCOEh<$$# zdJ>%>9<2HA{O#2reEE(4{SqQydJ-qimTjiaH(IUO z5QHKqmrwXEH9yii(m80kC&2?{D`~dY2e5tC4bwvi%xX+Qh+{5@L5F?(dgeO3g+LG> z!3yN`>lvZ#){Oog^oaPzRI#WkaG4ZwT5zU3&7*_~`iPl>is~=urwN$Q-@;9Qo0ZT? z%e6LR`rKPl##{`|8`}?0TI30hz7B0kosrg z0tHJ$x9h*v0 z4LYn52;9j$-y5jY*&#mXN*z{a+L2r}i6JC}347YX%Q~6$X`K`qrejak^pl;jyu*XM z!R5i=sY^MXVJ8~igbWWcj#GIE0J*KDofr$_&MX7A#`PDt5+7h9a-0ZISeP{z zFic$56 zgWtsaNm#`CMp{P6q*xRpmQP2bfbjc`<&Mi-Lvvo=V?&}$m|kDFZP*hIY{Qo)0p9f~ zGZZU}YY=udA?DDs_QJllAJ1dBc3XqiFf6|gea&(qHWK#?Vk6PZJ45$}bcV@DaPzj< z33xU*$&cakG?HE*EC|prOfZ&;o$8g@D4US7IJ}i(AX5@s96%bVfOP!2`3t=$r}$E> z>xn1U!@L(ND>lD3W~o8kS_S(t^f|cnk4N=?5{SFasNk=fiT}YQm_7 z5O5cH_y!bM2SuGo$a4r9Lrg1T{7|P~!84+oY&MhK3KR5F&EF@9<)VE+%OV?oirX-V z#*B`m%_{}-CbvUB#-+`4mvOS?-#d(gGI?q5$E74k5pF_BMlqNZ;uff1Y?SU+#2g`S zv`u}Cj;iQcs275<$HkzbXQ@A`#2TWUVBW9<3gMvSLF7DLXi{0L2p75jH{e03Mccdm zQMltlKhr`ZGm`JSi|8K2F(+1&3eh#Fh07FaPGAP^3U>krPc9ldB$U7mMq>ut4uKoG zi_yT00&dT_z~l&_4jj#%qcFE~Mu<}w$4VpeKqwoRCd|%)*g$RYL39uj=GIGLvfbkM z44zOus0Zr-&$Pxh9qGYi^fQd7Fk&;b2A?7@@|^ZMdRpr4WacVG2&d#Ax?`gU5+b-% zoFfbv1sLLECJ`ciD2j23;1J1=`mPzAx3yJ*S&>r;~N$E+cX}wwMbs zUdE_&huIl%5J&3Fx^5%t9Y#f0 zNLHT4B`nu9@J*G0@2QfEuE$q5qE6t;9fXAdds1Qlxst9eUGCCwM~7NNtfr;}KR0qA@;k?mT^~7%c*Xh?dbZ$Qr|g zg)k(B`}Vi+?Lh^fykJMjOQ8+A@L7Kp&9;+oa0Gfrip6EF7EitbA4mf0)EU__p~)WL z7Sk7n&~;KVhX)8>(99O&dIXk!PK?uojDk`VmQF<1AZID$7h$^Nb)1!RHtSSuD+(8Qzm zX5cUpT{6PtZJ>?)u5H=>kg@MapQtImInx)=9B$Bui`w%+w?8X&6_tTeF@b-)WOZk zrk+1qYE_D=i4STb)I93X9Tir0bkppk$#>^2F*tAU|F=K-$mF|!^VGfXLa-6iwMAPc z2HLn{xV-a7D6Y{Xy&_S9LKcJqc8xUDUtyJnlmG_~1Ja@2=OnBSS}QsF^89dG!gFF|| zj)1uIVM<-`>nFHh`Xvk(I>^T4xh(n&rzDbtXii^3S-1nS77_{r#4om*4AV!rtdDS( zEz9&OU<lq3wRGrVoQ0)UT9^$YCeMJBk2*+MyA!2@uRgjZi-aurFKgoUeFhEgaL zm#za{WM!Gjd(qx&sQ4vZf@|=c#WNY`ZGD>MI5?u?n-W%rk&q4m<#X$W@IVI!2<&0NpCH*LonI( zgUlUaa*4?mCbLXlWAb?>UuN|CH`kPF?!{kqy{Fn(1{E_XJ=1fuQMg0_U zTllr0%zNQjm^xLLAePhI9`6+-wjBki_rlbp4`ZWfj;yuUVu<>nMNb3#3$a{mIF-qy n`Z6PyIhaQFU@Dn8o{Hn2&Baq`)Qx7wGucc!vmfbjZuI{HP6H^r literal 0 HcmV?d00001 diff --git a/src/目标识别代码/utils/activations.py b/src/目标识别代码/utils/activations.py new file mode 100644 index 00000000..084ce8c4 --- /dev/null +++ b/src/目标识别代码/utils/activations.py @@ -0,0 +1,103 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Activation functions +""" + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class SiLU(nn.Module): + # SiLU activation https://arxiv.org/pdf/1606.08415.pdf + @staticmethod + def forward(x): + return x * torch.sigmoid(x) + + +class Hardswish(nn.Module): + # Hard-SiLU activation + @staticmethod + def forward(x): + # return x * F.hardsigmoid(x) # for TorchScript and CoreML + return x * F.hardtanh(x + 3, 0.0, 6.0) / 6.0 # for TorchScript, CoreML and ONNX + + +class Mish(nn.Module): + # Mish activation https://github.com/digantamisra98/Mish + @staticmethod + def forward(x): + return x * F.softplus(x).tanh() + + +class MemoryEfficientMish(nn.Module): + # Mish activation memory-efficient + class F(torch.autograd.Function): + + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x))) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + sx = torch.sigmoid(x) + fx = F.softplus(x).tanh() + return grad_output * (fx + x * sx * (1 - fx * fx)) + + def forward(self, x): + return self.F.apply(x) + + +class FReLU(nn.Module): + # FReLU activation https://arxiv.org/abs/2007.11824 + def __init__(self, c1, k=3): # ch_in, kernel + super().__init__() + self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False) + self.bn = nn.BatchNorm2d(c1) + + def forward(self, x): + return torch.max(x, self.bn(self.conv(x))) + + +class AconC(nn.Module): + r""" ACON activation (activate or not) + AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter + according to "Activate or Not: Learning Customized Activation" . + """ + + def __init__(self, c1): + super().__init__() + self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) + self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1)) + self.beta = nn.Parameter(torch.ones(1, c1, 1, 1)) + + def forward(self, x): + dpx = (self.p1 - self.p2) * x + return dpx * torch.sigmoid(self.beta * dpx) + self.p2 * x + + +class MetaAconC(nn.Module): + r""" ACON activation (activate or not) + MetaAconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is generated by a small network + according to "Activate or Not: Learning Customized Activation" . + """ + + def __init__(self, c1, k=1, s=1, r=16): # ch_in, kernel, stride, r + super().__init__() + c2 = max(r, c1 // r) + self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) + self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1)) + self.fc1 = nn.Conv2d(c1, c2, k, s, bias=True) + self.fc2 = nn.Conv2d(c2, c1, k, s, bias=True) + # self.bn1 = nn.BatchNorm2d(c2) + # self.bn2 = nn.BatchNorm2d(c1) + + def forward(self, x): + y = x.mean(dim=2, keepdims=True).mean(dim=3, keepdims=True) + # batch-size 1 bug/instabilities https://github.com/ultralytics/yolov5/issues/2891 + # beta = torch.sigmoid(self.bn2(self.fc2(self.bn1(self.fc1(y))))) # bug/unstable + beta = torch.sigmoid(self.fc2(self.fc1(y))) # bug patch BN layers removed + dpx = (self.p1 - self.p2) * x + return dpx * torch.sigmoid(beta * dpx) + self.p2 * x diff --git a/src/目标识别代码/utils/augmentations.py b/src/目标识别代码/utils/augmentations.py new file mode 100644 index 00000000..498776a6 --- /dev/null +++ b/src/目标识别代码/utils/augmentations.py @@ -0,0 +1,348 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Image augmentation functions +""" + +import math +import random + +import cv2 +import numpy as np +import torchvision.transforms as T +import torchvision.transforms.functional as TF + +from utils.general import LOGGER, check_version, colorstr, resample_segments, segment2box +from utils.metrics import bbox_ioa + +IMAGENET_MEAN = 0.485, 0.456, 0.406 # RGB mean +IMAGENET_STD = 0.229, 0.224, 0.225 # RGB standard deviation + + +class Albumentations: + # YOLOv5 Albumentations class (optional, only used if package is installed) + def __init__(self): + self.transform = None + prefix = colorstr('albumentations: ') + try: + import albumentations as A + check_version(A.__version__, '1.0.3', hard=True) # version requirement + + T = [ + A.Blur(p=0.01), + A.MedianBlur(p=0.01), + A.ToGray(p=0.01), + A.CLAHE(p=0.01), + A.RandomBrightnessContrast(p=0.0), + A.RandomGamma(p=0.0), + A.ImageCompression(quality_lower=75, p=0.0)] # transforms + self.transform = A.Compose(T, bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels'])) + + LOGGER.info(prefix + ', '.join(f'{x}'.replace('always_apply=False, ', '') for x in T if x.p)) + except ImportError: # package not installed, skip + pass + except Exception as e: + LOGGER.info(f'{prefix}{e}') + + def __call__(self, im, labels, p=1.0): + if self.transform and random.random() < p: + new = self.transform(image=im, bboxes=labels[:, 1:], class_labels=labels[:, 0]) # transformed + im, labels = new['image'], np.array([[c, *b] for c, b in zip(new['class_labels'], new['bboxes'])]) + return im, labels + + +def normalize(x, mean=IMAGENET_MEAN, std=IMAGENET_STD, inplace=False): + # Denormalize RGB images x per ImageNet stats in BCHW format, i.e. = (x - mean) / std + return TF.normalize(x, mean, std, inplace=inplace) + + +def denormalize(x, mean=IMAGENET_MEAN, std=IMAGENET_STD): + # Denormalize RGB images x per ImageNet stats in BCHW format, i.e. = x * std + mean + for i in range(3): + x[:, i] = x[:, i] * std[i] + mean[i] + return x + + +def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5): + # HSV color-space augmentation + if hgain or sgain or vgain: + r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains + hue, sat, val = cv2.split(cv2.cvtColor(im, cv2.COLOR_BGR2HSV)) + dtype = im.dtype # uint8 + + x = np.arange(0, 256, dtype=r.dtype) + lut_hue = ((x * r[0]) % 180).astype(dtype) + lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) + lut_val = np.clip(x * r[2], 0, 255).astype(dtype) + + im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))) + cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=im) # no return needed + + +def hist_equalize(im, clahe=True, bgr=False): + # Equalize histogram on BGR image 'im' with im.shape(n,m,3) and range 0-255 + yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV) + if clahe: + c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) + yuv[:, :, 0] = c.apply(yuv[:, :, 0]) + else: + yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram + return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB + + +def replicate(im, labels): + # Replicate labels + h, w = im.shape[:2] + boxes = labels[:, 1:].astype(int) + x1, y1, x2, y2 = boxes.T + s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels) + for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices + x1b, y1b, x2b, y2b = boxes[i] + bh, bw = y2b - y1b, x2b - x1b + yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y + x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh] + im[y1a:y2a, x1a:x2a] = im[y1b:y2b, x1b:x2b] # im4[ymin:ymax, xmin:xmax] + labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0) + + return im, labels + + +def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32): + # Resize and pad image while meeting stride-multiple constraints + shape = im.shape[:2] # current shape [height, width] + if isinstance(new_shape, int): + new_shape = (new_shape, new_shape) + + # Scale ratio (new / old) + r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) + if not scaleup: # only scale down, do not scale up (for better val mAP) + r = min(r, 1.0) + + # Compute padding + ratio = r, r # width, height ratios + new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) + dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding + if auto: # minimum rectangle + dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding + elif scaleFill: # stretch + dw, dh = 0.0, 0.0 + new_unpad = (new_shape[1], new_shape[0]) + ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios + + dw /= 2 # divide padding into 2 sides + dh /= 2 + + if shape[::-1] != new_unpad: # resize + im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR) + top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) + left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) + im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border + return (im, ratio, (dw, dh)) + + +def random_perspective(im, + targets=(), + segments=(), + degrees=10, + translate=.1, + scale=.1, + shear=10, + perspective=0.0, + border=(0, 0)): + # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(0.1, 0.1), scale=(0.9, 1.1), shear=(-10, 10)) + # targets = [cls, xyxy] + + height = im.shape[0] + border[0] * 2 # shape(h,w,c) + width = im.shape[1] + border[1] * 2 + + # Center + C = np.eye(3) + C[0, 2] = -im.shape[1] / 2 # x translation (pixels) + C[1, 2] = -im.shape[0] / 2 # y translation (pixels) + + # Perspective + P = np.eye(3) + P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y) + P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x) + + # Rotation and Scale + R = np.eye(3) + a = random.uniform(-degrees, degrees) + # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations + s = random.uniform(1 - scale, 1 + scale) + # s = 2 ** random.uniform(-scale, scale) + R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) + + # Shear + S = np.eye(3) + S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) + S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) + + # Translation + T = np.eye(3) + T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels) + T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels) + + # Combined rotation matrix + M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT + if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed + if perspective: + im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114)) + else: # affine + im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) + + # Visualize + # import matplotlib.pyplot as plt + # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() + # ax[0].imshow(im[:, :, ::-1]) # base + # ax[1].imshow(im2[:, :, ::-1]) # warped + + # Transform label coordinates + n = len(targets) + if n: + use_segments = any(x.any() for x in segments) + new = np.zeros((n, 4)) + if use_segments: # warp segments + segments = resample_segments(segments) # upsample + for i, segment in enumerate(segments): + xy = np.ones((len(segment), 3)) + xy[:, :2] = segment + xy = xy @ M.T # transform + xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine + + # clip + new[i] = segment2box(xy, width, height) + + else: # warp boxes + xy = np.ones((n * 4, 3)) + xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1 + xy = xy @ M.T # transform + xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine + + # create new boxes + x = xy[:, [0, 2, 4, 6]] + y = xy[:, [1, 3, 5, 7]] + new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T + + # clip + new[:, [0, 2]] = new[:, [0, 2]].clip(0, width) + new[:, [1, 3]] = new[:, [1, 3]].clip(0, height) + + # filter candidates + i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10) + targets = targets[i] + targets[:, 1:5] = new[i] + + return im, targets + + +def copy_paste(im, labels, segments, p=0.5): + # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy) + n = len(segments) + if p and n: + h, w, c = im.shape # height, width, channels + im_new = np.zeros(im.shape, np.uint8) + for j in random.sample(range(n), k=round(p * n)): + l, s = labels[j], segments[j] + box = w - l[3], l[2], w - l[1], l[4] + ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area + if (ioa < 0.30).all(): # allow 30% obscuration of existing labels + labels = np.concatenate((labels, [[l[0], *box]]), 0) + segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1)) + cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED) + + result = cv2.bitwise_and(src1=im, src2=im_new) + result = cv2.flip(result, 1) # augment segments (flip left-right) + i = result > 0 # pixels to replace + # i[:, :] = result.max(2).reshape(h, w, 1) # act over ch + im[i] = result[i] # cv2.imwrite('debug.jpg', im) # debug + + return im, labels, segments + + +def cutout(im, labels, p=0.5): + # Applies image cutout augmentation https://arxiv.org/abs/1708.04552 + if random.random() < p: + h, w = im.shape[:2] + scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction + for s in scales: + mask_h = random.randint(1, int(h * s)) # create random masks + mask_w = random.randint(1, int(w * s)) + + # box + xmin = max(0, random.randint(0, w) - mask_w // 2) + ymin = max(0, random.randint(0, h) - mask_h // 2) + xmax = min(w, xmin + mask_w) + ymax = min(h, ymin + mask_h) + + # apply random color mask + im[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)] + + # return unobscured labels + if len(labels) and s > 0.03: + box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32) + ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area + labels = labels[ioa < 0.60] # remove >60% obscured labels + + return labels + + +def mixup(im, labels, im2, labels2): + # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf + r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0 + im = (im * r + im2 * (1 - r)).astype(np.uint8) + labels = np.concatenate((labels, labels2), 0) + return im, labels + + +def box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n) + # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio + w1, h1 = box1[2] - box1[0], box1[3] - box1[1] + w2, h2 = box2[2] - box2[0], box2[3] - box2[1] + ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio + return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates + + +def classify_albumentations(augment=True, + size=224, + scale=(0.08, 1.0), + hflip=0.5, + vflip=0.0, + jitter=0.4, + mean=IMAGENET_MEAN, + std=IMAGENET_STD, + auto_aug=False): + # YOLOv5 classification Albumentations (optional, only used if package is installed) + prefix = colorstr('albumentations: ') + try: + import albumentations as A + from albumentations.pytorch import ToTensorV2 + check_version(A.__version__, '1.0.3', hard=True) # version requirement + if augment: # Resize and crop + T = [A.RandomResizedCrop(height=size, width=size, scale=scale)] + if auto_aug: + # TODO: implement AugMix, AutoAug & RandAug in albumentation + LOGGER.info(f'{prefix}auto augmentations are currently not supported') + else: + if hflip > 0: + T += [A.HorizontalFlip(p=hflip)] + if vflip > 0: + T += [A.VerticalFlip(p=vflip)] + if jitter > 0: + color_jitter = (float(jitter),) * 3 # repeat value for brightness, contrast, satuaration, 0 hue + T += [A.ColorJitter(*color_jitter, 0)] + else: # Use fixed crop for eval set (reproducibility) + T = [A.SmallestMaxSize(max_size=size), A.CenterCrop(height=size, width=size)] + T += [A.Normalize(mean=mean, std=std), ToTensorV2()] # Normalize and convert to Tensor + LOGGER.info(prefix + ', '.join(f'{x}'.replace('always_apply=False, ', '') for x in T if x.p)) + return A.Compose(T) + + except ImportError: # package not installed, skip + pass + except Exception as e: + LOGGER.info(f'{prefix}{e}') + + +def classify_transforms(size=224): + # Transforms to apply if albumentations not installed + return T.Compose([T.ToTensor(), T.Resize(size), T.CenterCrop(size), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)]) + diff --git a/src/目标识别代码/utils/autoanchor.py b/src/目标识别代码/utils/autoanchor.py new file mode 100644 index 00000000..f2222203 --- /dev/null +++ b/src/目标识别代码/utils/autoanchor.py @@ -0,0 +1,170 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +AutoAnchor utils +""" + +import random + +import numpy as np +import torch +import yaml +from tqdm import tqdm + +from utils.general import LOGGER, colorstr + +PREFIX = colorstr('AutoAnchor: ') + + +def check_anchor_order(m): + # Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary + a = m.anchors.prod(-1).mean(-1).view(-1) # mean anchor area per output layer + da = a[-1] - a[0] # delta a + ds = m.stride[-1] - m.stride[0] # delta s + if da and (da.sign() != ds.sign()): # same order + LOGGER.info(f'{PREFIX}Reversing anchor order') + m.anchors[:] = m.anchors.flip(0) + + +def check_anchors(dataset, model, thr=4.0, imgsz=640): + # Check anchor fit to data, recompute if necessary + m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect() + shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True) + scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale + wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh + + def metric(k): # compute metric + r = wh[:, None] / k[None] + x = torch.min(r, 1 / r).min(2)[0] # ratio metric + best = x.max(1)[0] # best_x + aat = (x > 1 / thr).float().sum(1).mean() # anchors above threshold + bpr = (best > 1 / thr).float().mean() # best possible recall + return bpr, aat + + stride = m.stride.to(m.anchors.device).view(-1, 1, 1) # model strides + anchors = m.anchors.clone() * stride # current anchors + bpr, aat = metric(anchors.cpu().view(-1, 2)) + s = f'\n{PREFIX}{aat:.2f} anchors/target, {bpr:.3f} Best Possible Recall (BPR). ' + if bpr > 0.98: # threshold to recompute + LOGGER.info(f'{s}Current anchors are a good fit to dataset ✅') + else: + LOGGER.info(f'{s}Anchors are a poor fit to dataset ⚠️, attempting to improve...') + na = m.anchors.numel() // 2 # number of anchors + try: + anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False) + except Exception as e: + LOGGER.info(f'{PREFIX}ERROR: {e}') + new_bpr = metric(anchors)[0] + if new_bpr > bpr: # replace anchors + anchors = torch.tensor(anchors, device=m.anchors.device).type_as(m.anchors) + m.anchors[:] = anchors.clone().view_as(m.anchors) + check_anchor_order(m) # must be in pixel-space (not grid-space) + m.anchors /= stride + s = f'{PREFIX}Done ✅ (optional: update model *.yaml to use these anchors in the future)' + else: + s = f'{PREFIX}Done ⚠️ (original anchors better than new anchors, proceeding with original anchors)' + LOGGER.info(s) + + +def kmean_anchors(dataset='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True): + """ Creates kmeans-evolved anchors from training dataset + + Arguments: + dataset: path to data.yaml, or a loaded dataset + n: number of anchors + img_size: image size used for training + thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0 + gen: generations to evolve anchors using genetic algorithm + verbose: print all results + + Return: + k: kmeans evolved anchors + + Usage: + from utils.autoanchor import *; _ = kmean_anchors() + """ + from scipy.cluster.vq import kmeans + + npr = np.random + thr = 1 / thr + + def metric(k, wh): # compute metrics + r = wh[:, None] / k[None] + x = torch.min(r, 1 / r).min(2)[0] # ratio metric + # x = wh_iou(wh, torch.tensor(k)) # iou metric + return x, x.max(1)[0] # x, best_x + + def anchor_fitness(k): # mutation fitness + _, best = metric(torch.tensor(k, dtype=torch.float32), wh) + return (best * (best > thr).float()).mean() # fitness + + def print_results(k, verbose=True): + k = k[np.argsort(k.prod(1))] # sort small to large + x, best = metric(k, wh0) + bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr + s = f'{PREFIX}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr\n' \ + f'{PREFIX}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, ' \ + f'past_thr={x[x > thr].mean():.3f}-mean: ' + for x in k: + s += '%i,%i, ' % (round(x[0]), round(x[1])) + if verbose: + LOGGER.info(s[:-2]) + return k + + if isinstance(dataset, str): # *.yaml file + with open(dataset, errors='ignore') as f: + data_dict = yaml.safe_load(f) # model dict + from utils.dataloaders import LoadImagesAndLabels + dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True) + + # Get label wh + shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True) + wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh + + # Filter + i = (wh0 < 3.0).any(1).sum() + if i: + LOGGER.info(f'{PREFIX}WARNING: Extremely small objects found: {i} of {len(wh0)} labels are < 3 pixels in size') + wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels + # wh = wh * (npr.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1 + + # Kmeans init + try: + LOGGER.info(f'{PREFIX}Running kmeans for {n} anchors on {len(wh)} points...') + assert n <= len(wh) # apply overdetermined constraint + s = wh.std(0) # sigmas for whitening + k = kmeans(wh / s, n, iter=30)[0] * s # points + assert n == len(k) # kmeans may return fewer points than requested if wh is insufficient or too similar + except Exception: + LOGGER.warning(f'{PREFIX}WARNING: switching strategies from kmeans to random init') + k = np.sort(npr.rand(n * 2)).reshape(n, 2) * img_size # random init + wh, wh0 = (torch.tensor(x, dtype=torch.float32) for x in (wh, wh0)) + k = print_results(k, verbose=False) + + # Plot + # k, d = [None] * 20, [None] * 20 + # for i in tqdm(range(1, 21)): + # k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance + # fig, ax = plt.subplots(1, 2, figsize=(14, 7), tight_layout=True) + # ax = ax.ravel() + # ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.') + # fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh + # ax[0].hist(wh[wh[:, 0]<100, 0],400) + # ax[1].hist(wh[wh[:, 1]<100, 1],400) + # fig.savefig('wh.png', dpi=200) + + # Evolve + f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma + pbar = tqdm(range(gen), bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar + for _ in pbar: + v = np.ones(sh) + while (v == 1).all(): # mutate until a change occurs (prevent duplicates) + v = ((npr.random(sh) < mp) * random.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0) + kg = (k.copy() * v).clip(min=2.0) + fg = anchor_fitness(kg) + if fg > f: + f, k = fg, kg.copy() + pbar.desc = f'{PREFIX}Evolving anchors with Genetic Algorithm: fitness = {f:.4f}' + if verbose: + print_results(k, verbose) + + return print_results(k) diff --git a/src/目标识别代码/utils/autobatch.py b/src/目标识别代码/utils/autobatch.py new file mode 100644 index 00000000..c231d24c --- /dev/null +++ b/src/目标识别代码/utils/autobatch.py @@ -0,0 +1,66 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Auto-batch utils +""" + +from copy import deepcopy + +import numpy as np +import torch + +from utils.general import LOGGER, colorstr +from utils.torch_utils import profile + + +def check_train_batch_size(model, imgsz=640, amp=True): + # Check YOLOv5 training batch size + with torch.cuda.amp.autocast(amp): + return autobatch(deepcopy(model).train(), imgsz) # compute optimal batch size + + +def autobatch(model, imgsz=640, fraction=0.9, batch_size=16): + # Automatically estimate best batch size to use `fraction` of available CUDA memory + # Usage: + # import torch + # from utils.autobatch import autobatch + # model = torch.hub.load('ultralytics/yolov5', 'yolov5s', autoshape=False) + # print(autobatch(model)) + + # Check device + prefix = colorstr('AutoBatch: ') + LOGGER.info(f'{prefix}Computing optimal batch size for --imgsz {imgsz}') + device = next(model.parameters()).device # get model device + if device.type == 'cpu': + LOGGER.info(f'{prefix}CUDA not detected, using default CPU batch-size {batch_size}') + return batch_size + + # Inspect CUDA memory + gb = 1 << 30 # bytes to GiB (1024 ** 3) + d = str(device).upper() # 'CUDA:0' + properties = torch.cuda.get_device_properties(device) # device properties + t = properties.total_memory / gb # GiB total + r = torch.cuda.memory_reserved(device) / gb # GiB reserved + a = torch.cuda.memory_allocated(device) / gb # GiB allocated + f = t - (r + a) # GiB free + LOGGER.info(f'{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free') + + # Profile batch sizes + batch_sizes = [1, 2, 4, 8, 16] + try: + img = [torch.zeros(b, 3, imgsz, imgsz) for b in batch_sizes] + results = profile(img, model, n=3, device=device) + except Exception as e: + LOGGER.warning(f'{prefix}{e}') + + # Fit a solution + y = [x[2] for x in results if x] # memory [2] + p = np.polyfit(batch_sizes[:len(y)], y, deg=1) # first degree polynomial fit + b = int((f * fraction - p[1]) / p[0]) # y intercept (optimal batch size) + if None in results: # some sizes failed + i = results.index(None) # first fail index + if b >= batch_sizes[i]: # y intercept above failure point + b = batch_sizes[max(i - 1, 0)] # select prior safe point + + fraction = np.polyval(p, b) / t # actual fraction predicted + LOGGER.info(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%) ✅') + return b diff --git a/src/目标识别代码/utils/aws/__init__.py b/src/目标识别代码/utils/aws/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/目标识别代码/utils/aws/mime.sh b/src/目标识别代码/utils/aws/mime.sh new file mode 100644 index 00000000..c319a83c --- /dev/null +++ b/src/目标识别代码/utils/aws/mime.sh @@ -0,0 +1,26 @@ +# AWS EC2 instance startup 'MIME' script https://aws.amazon.com/premiumsupport/knowledge-center/execute-user-data-ec2/ +# This script will run on every instance restart, not only on first start +# --- DO NOT COPY ABOVE COMMENTS WHEN PASTING INTO USERDATA --- + +Content-Type: multipart/mixed; boundary="//" +MIME-Version: 1.0 + +--// +Content-Type: text/cloud-config; charset="us-ascii" +MIME-Version: 1.0 +Content-Transfer-Encoding: 7bit +Content-Disposition: attachment; filename="cloud-config.txt" + +#cloud-config +cloud_final_modules: +- [scripts-user, always] + +--// +Content-Type: text/x-shellscript; charset="us-ascii" +MIME-Version: 1.0 +Content-Transfer-Encoding: 7bit +Content-Disposition: attachment; filename="userdata.txt" + +#!/bin/bash +# --- paste contents of userdata.sh here --- +--// diff --git a/src/目标识别代码/utils/aws/resume.py b/src/目标识别代码/utils/aws/resume.py new file mode 100644 index 00000000..b21731c9 --- /dev/null +++ b/src/目标识别代码/utils/aws/resume.py @@ -0,0 +1,40 @@ +# Resume all interrupted trainings in yolov5/ dir including DDP trainings +# Usage: $ python utils/aws/resume.py + +import os +import sys +from pathlib import Path + +import torch +import yaml + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[2] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH + +port = 0 # --master_port +path = Path('').resolve() +for last in path.rglob('*/**/last.pt'): + ckpt = torch.load(last) + if ckpt['optimizer'] is None: + continue + + # Load opt.yaml + with open(last.parent.parent / 'opt.yaml', errors='ignore') as f: + opt = yaml.safe_load(f) + + # Get device count + d = opt['device'].split(',') # devices + nd = len(d) # number of devices + ddp = nd > 1 or (nd == 0 and torch.cuda.device_count() > 1) # distributed data parallel + + if ddp: # multi-GPU + port += 1 + cmd = f'python -m torch.distributed.run --nproc_per_node {nd} --master_port {port} train.py --resume {last}' + else: # single-GPU + cmd = f'python train.py --resume {last}' + + cmd += ' > /dev/null 2>&1 &' # redirect output to dev/null and run in daemon thread + print(cmd) + os.system(cmd) diff --git a/src/目标识别代码/utils/aws/userdata.sh b/src/目标识别代码/utils/aws/userdata.sh new file mode 100644 index 00000000..5fc1332a --- /dev/null +++ b/src/目标识别代码/utils/aws/userdata.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# AWS EC2 instance startup script https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html +# This script will run only once on first instance start (for a re-start script see mime.sh) +# /home/ubuntu (ubuntu) or /home/ec2-user (amazon-linux) is working dir +# Use >300 GB SSD + +cd home/ubuntu +if [ ! -d yolov5 ]; then + echo "Running first-time script." # install dependencies, download COCO, pull Docker + git clone https://github.com/ultralytics/yolov5 -b master && sudo chmod -R 777 yolov5 + cd yolov5 + bash data/scripts/get_coco.sh && echo "COCO done." & + sudo docker pull ultralytics/yolov5:latest && echo "Docker done." & + python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo "Requirements done." & + wait && echo "All tasks done." # finish background tasks +else + echo "Running re-start script." # resume interrupted runs + i=0 + list=$(sudo docker ps -qa) # container list i.e. $'one\ntwo\nthree\nfour' + while IFS= read -r id; do + ((i++)) + echo "restarting container $i: $id" + sudo docker start $id + # sudo docker exec -it $id python train.py --resume # single-GPU + sudo docker exec -d $id python utils/aws/resume.py # multi-scenario + done <<<"$list" +fi diff --git a/src/目标识别代码/utils/benchmarks.py b/src/目标识别代码/utils/benchmarks.py new file mode 100644 index 00000000..d412653c --- /dev/null +++ b/src/目标识别代码/utils/benchmarks.py @@ -0,0 +1,157 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Run YOLOv5 benchmarks on all supported export formats + +Format | `export.py --include` | Model +--- | --- | --- +PyTorch | - | yolov5s.pt +TorchScript | `torchscript` | yolov5s.torchscript +ONNX | `onnx` | yolov5s.onnx +OpenVINO | `openvino` | yolov5s_openvino_model/ +TensorRT | `engine` | yolov5s.engine +CoreML | `coreml` | yolov5s.mlmodel +TensorFlow SavedModel | `saved_model` | yolov5s_saved_model/ +TensorFlow GraphDef | `pb` | yolov5s.pb +TensorFlow Lite | `tflite` | yolov5s.tflite +TensorFlow Edge TPU | `edgetpu` | yolov5s_edgetpu.tflite +TensorFlow.js | `tfjs` | yolov5s_web_model/ + +Requirements: + $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU + $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU + $ pip install -U nvidia-tensorrt --index-url https://pypi.ngc.nvidia.com # TensorRT + +Usage: + $ python utils/benchmarks.py --weights yolov5s.pt --img 640 +""" + +import argparse +import platform +import sys +import time +from pathlib import Path + +import pandas as pd + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +# ROOT = ROOT.relative_to(Path.cwd()) # relative + +import export +import val +from utils import notebook_init +from utils.general import LOGGER, check_yaml, file_size, print_args +from utils.torch_utils import select_device + + +def run( + weights=ROOT / 'yolov5s.pt', # weights path + imgsz=640, # inference size (pixels) + batch_size=1, # batch size + data=ROOT / 'data/coco128.yaml', # dataset.yaml path + device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + half=False, # use FP16 half-precision inference + test=False, # test exports only + pt_only=False, # test PyTorch only + hard_fail=False, # throw error on benchmark failure +): + y, t = [], time.time() + device = select_device(device) + for i, (name, f, suffix, cpu, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, CPU, GPU) + try: + assert i not in (9, 10), 'inference not supported' # Edge TPU and TF.js are unsupported + assert i != 5 or platform.system() == 'Darwin', 'inference only supported on macOS>=10.13' # CoreML + if 'cpu' in device.type: + assert cpu, 'inference not supported on CPU' + if 'cuda' in device.type: + assert gpu, 'inference not supported on GPU' + + # Export + if f == '-': + w = weights # PyTorch format + else: + w = export.run(weights=weights, imgsz=[imgsz], include=[f], device=device, half=half)[-1] # all others + assert suffix in str(w), 'export failed' + + # Validate + result = val.run(data, w, batch_size, imgsz, plots=False, device=device, task='benchmark', half=half) + metrics = result[0] # metrics (mp, mr, map50, map, *losses(box, obj, cls)) + speeds = result[2] # times (preprocess, inference, postprocess) + y.append([name, round(file_size(w), 1), round(metrics[3], 4), round(speeds[1], 2)]) # MB, mAP, t_inference + except Exception as e: + if hard_fail: + assert type(e) is AssertionError, f'Benchmark --hard-fail for {name}: {e}' + LOGGER.warning(f'WARNING: Benchmark failure for {name}: {e}') + y.append([name, None, None, None]) # mAP, t_inference + if pt_only and i == 0: + break # break after PyTorch + + # Print results + LOGGER.info('\n') + parse_opt() + notebook_init() # print system info + c = ['Format', 'Size (MB)', 'mAP@0.5:0.95', 'Inference time (ms)'] if map else ['Format', 'Export', '', ''] + py = pd.DataFrame(y, columns=c) + LOGGER.info(f'\nBenchmarks complete ({time.time() - t:.2f}s)') + LOGGER.info(str(py if map else py.iloc[:, :2])) + return py + + +def test( + weights=ROOT / 'yolov5s.pt', # weights path + imgsz=640, # inference size (pixels) + batch_size=1, # batch size + data=ROOT / 'data/coco128.yaml', # dataset.yaml path + device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + half=False, # use FP16 half-precision inference + test=False, # test exports only + pt_only=False, # test PyTorch only + hard_fail=False, # throw error on benchmark failure +): + y, t = [], time.time() + device = select_device(device) + for i, (name, f, suffix, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, gpu-capable) + try: + w = weights if f == '-' else \ + export.run(weights=weights, imgsz=[imgsz], include=[f], device=device, half=half)[-1] # weights + assert suffix in str(w), 'export failed' + y.append([name, True]) + except Exception: + y.append([name, False]) # mAP, t_inference + + # Print results + LOGGER.info('\n') + parse_opt() + notebook_init() # print system info + py = pd.DataFrame(y, columns=['Format', 'Export']) + LOGGER.info(f'\nExports complete ({time.time() - t:.2f}s)') + LOGGER.info(str(py)) + return py + + +def parse_opt(): + parser = argparse.ArgumentParser() + parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='weights path') + parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') + parser.add_argument('--batch-size', type=int, default=1, help='batch size') + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') + parser.add_argument('--test', action='store_true', help='test exports only') + parser.add_argument('--pt-only', action='store_true', help='test PyTorch only') + parser.add_argument('--hard-fail', action='store_true', help='throw error on benchmark failure') + opt = parser.parse_args() + opt.data = check_yaml(opt.data) # check YAML + print_args(vars(opt)) + return opt + + +def main(opt): + test(**vars(opt)) if opt.test else run(**vars(opt)) + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git a/src/目标识别代码/utils/callbacks.py b/src/目标识别代码/utils/callbacks.py new file mode 100644 index 00000000..2b32df0b --- /dev/null +++ b/src/目标识别代码/utils/callbacks.py @@ -0,0 +1,71 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Callback utils +""" + + +class Callbacks: + """" + Handles all registered callbacks for YOLOv5 Hooks + """ + + def __init__(self): + # Define the available callbacks + self._callbacks = { + 'on_pretrain_routine_start': [], + 'on_pretrain_routine_end': [], + 'on_train_start': [], + 'on_train_epoch_start': [], + 'on_train_batch_start': [], + 'optimizer_step': [], + 'on_before_zero_grad': [], + 'on_train_batch_end': [], + 'on_train_epoch_end': [], + 'on_val_start': [], + 'on_val_batch_start': [], + 'on_val_image_end': [], + 'on_val_batch_end': [], + 'on_val_end': [], + 'on_fit_epoch_end': [], # fit = train + val + 'on_model_save': [], + 'on_train_end': [], + 'on_params_update': [], + 'teardown': [],} + self.stop_training = False # set True to interrupt training + + def register_action(self, hook, name='', callback=None): + """ + Register a new action to a callback hook + + Args: + hook: The callback hook name to register the action to + name: The name of the action for later reference + callback: The callback to fire + """ + assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}" + assert callable(callback), f"callback '{callback}' is not callable" + self._callbacks[hook].append({'name': name, 'callback': callback}) + + def get_registered_actions(self, hook=None): + """" + Returns all the registered actions by callback hook + + Args: + hook: The name of the hook to check, defaults to all + """ + return self._callbacks[hook] if hook else self._callbacks + + def run(self, hook, *args, **kwargs): + """ + Loop through the registered actions and fire all callbacks + + Args: + hook: The name of the hook to check, defaults to all + args: Arguments to receive from YOLOv5 + kwargs: Keyword Arguments to receive from YOLOv5 + """ + + assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}" + + for logger in self._callbacks[hook]: + logger['callback'](*args, **kwargs) diff --git a/src/目标识别代码/utils/dataloaders.py b/src/目标识别代码/utils/dataloaders.py new file mode 100644 index 00000000..2c04040b --- /dev/null +++ b/src/目标识别代码/utils/dataloaders.py @@ -0,0 +1,1156 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Dataloaders and dataset utils +""" + +import contextlib +import glob +import hashlib +import json +import math +import os +import random +import shutil +import time +from itertools import repeat +from multiprocessing.pool import Pool, ThreadPool +from pathlib import Path +from threading import Thread +from urllib.parse import urlparse +from zipfile import ZipFile + +import numpy as np +import torch +import torch.nn.functional as F +import torchvision +import yaml +from PIL import ExifTags, Image, ImageOps +from torch.utils.data import DataLoader, Dataset, dataloader, distributed +from tqdm import tqdm + +from utils.augmentations import (Albumentations, augment_hsv, classify_albumentations, classify_transforms, copy_paste, + letterbox, mixup, random_perspective) +from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, check_dataset, check_requirements, check_yaml, clean_str, + cv2, is_colab, is_kaggle, segments2boxes, xyn2xy, xywh2xyxy, xywhn2xyxy, xyxy2xywhn) +from utils.torch_utils import torch_distributed_zero_first + +# Parameters +HELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' +IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp' # include image suffixes +VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes +BAR_FORMAT = '{l_bar}{bar:10}{r_bar}{bar:-10b}' # tqdm bar format +LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html + +# Get orientation exif tag +for orientation in ExifTags.TAGS.keys(): + if ExifTags.TAGS[orientation] == 'Orientation': + break + + +def get_hash(paths): + # Returns a single hash value of a list of paths (files or dirs) + size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes + h = hashlib.md5(str(size).encode()) # hash sizes + h.update(''.join(paths).encode()) # hash paths + return h.hexdigest() # return hash + + +def exif_size(img): + # Returns exif-corrected PIL size + s = img.size # (width, height) + with contextlib.suppress(Exception): + rotation = dict(img._getexif().items())[orientation] + if rotation in [6, 8]: # rotation 270 or 90 + s = (s[1], s[0]) + return s + + +def exif_transpose(image): + """ + Transpose a PIL image accordingly if it has an EXIF Orientation tag. + Inplace version of https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py exif_transpose() + + :param image: The image to transpose. + :return: An image. + """ + exif = image.getexif() + orientation = exif.get(0x0112, 1) # default 1 + if orientation > 1: + method = { + 2: Image.FLIP_LEFT_RIGHT, + 3: Image.ROTATE_180, + 4: Image.FLIP_TOP_BOTTOM, + 5: Image.TRANSPOSE, + 6: Image.ROTATE_270, + 7: Image.TRANSVERSE, + 8: Image.ROTATE_90,}.get(orientation) + if method is not None: + image = image.transpose(method) + del exif[0x0112] + image.info["exif"] = exif.tobytes() + return image + + +def seed_worker(worker_id): + # Set dataloader worker seed https://pytorch.org/docs/stable/notes/randomness.html#dataloader + worker_seed = torch.initial_seed() % 2 ** 32 + np.random.seed(worker_seed) + random.seed(worker_seed) + + +def create_dataloader(path, + imgsz, + batch_size, + stride, + single_cls=False, + hyp=None, + augment=False, + cache=False, + pad=0.0, + rect=False, + rank=-1, + workers=8, + image_weights=False, + quad=False, + prefix='', + shuffle=False): + if rect and shuffle: + LOGGER.warning('WARNING: --rect is incompatible with DataLoader shuffle, setting shuffle=False') + shuffle = False + with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP + dataset = LoadImagesAndLabels( + path, + imgsz, + batch_size, + augment=augment, # augmentation + hyp=hyp, # hyperparameters + rect=rect, # rectangular batches + cache_images=cache, + single_cls=single_cls, + stride=int(stride), + pad=pad, + image_weights=image_weights, + prefix=prefix) + + batch_size = min(batch_size, len(dataset)) + nd = torch.cuda.device_count() # number of CUDA devices + nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers + sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) + loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates + generator = torch.Generator() + generator.manual_seed(0) + return loader(dataset, + batch_size=batch_size, + shuffle=shuffle and sampler is None, + num_workers=nw, + sampler=sampler, + pin_memory=True, + collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn, + worker_init_fn=seed_worker, + generator=generator), dataset + + +class InfiniteDataLoader(dataloader.DataLoader): + """ Dataloader that reuses workers + + Uses same syntax as vanilla DataLoader + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler)) + self.iterator = super().__iter__() + + def __len__(self): + return len(self.batch_sampler.sampler) + + def __iter__(self): + for _ in range(len(self)): + yield next(self.iterator) + + +class _RepeatSampler: + """ Sampler that repeats forever + + Args: + sampler (Sampler) + """ + + def __init__(self, sampler): + self.sampler = sampler + + def __iter__(self): + while True: + yield from iter(self.sampler) + + +class LoadImages: + # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4` + def __init__(self, path, img_size=640, stride=32, auto=True): + files = [] + for p in sorted(path) if isinstance(path, (list, tuple)) else [path]: + p = str(Path(p).resolve()) + if '*' in p: + files.extend(sorted(glob.glob(p, recursive=True))) # glob + elif os.path.isdir(p): + files.extend(sorted(glob.glob(os.path.join(p, '*.*')))) # dir + elif os.path.isfile(p): + files.append(p) # files + else: + raise FileNotFoundError(f'{p} does not exist') + + images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS] + videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS] + ni, nv = len(images), len(videos) + + self.img_size = img_size + self.stride = stride + self.files = images + videos + self.nf = ni + nv # number of files + self.video_flag = [False] * ni + [True] * nv + self.mode = 'image' + self.auto = auto + if any(videos): + self.new_video(videos[0]) # new video + else: + self.cap = None + assert self.nf > 0, f'No images or videos found in {p}. ' \ + f'Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}' + + def __iter__(self): + self.count = 0 + return self + + def __next__(self): + if self.count == self.nf: + raise StopIteration + path = self.files[self.count] + + if self.video_flag[self.count]: + # Read video + self.mode = 'video' + ret_val, img0 = self.cap.read() + while not ret_val: + self.count += 1 + self.cap.release() + if self.count == self.nf: # last video + raise StopIteration + path = self.files[self.count] + self.new_video(path) + ret_val, img0 = self.cap.read() + + self.frame += 1 + s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ' + + else: + # Read image + self.count += 1 + img0 = cv2.imread(path) # BGR + assert img0 is not None, f'Image Not Found {path}' + s = f'image {self.count}/{self.nf} {path}: ' + + # Padded resize + img = letterbox(img0, self.img_size, stride=self.stride, auto=self.auto)[0] + + # Convert + img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + img = np.ascontiguousarray(img) + + return path, img, img0, self.cap, s + + def new_video(self, path): + self.frame = 0 + self.cap = cv2.VideoCapture(path) + self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)) + + def __len__(self): + return self.nf # number of files + + +class LoadWebcam: # for inference + # YOLOv5 local webcam dataloader, i.e. `python detect.py --source 0` + def __init__(self, pipe='0', img_size=640, stride=32): + self.img_size = img_size + self.stride = stride + self.pipe = eval(pipe) if pipe.isnumeric() else pipe + self.cap = cv2.VideoCapture(self.pipe) # video capture object + self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size + + def __iter__(self): + self.count = -1 + return self + + def __next__(self): + self.count += 1 + if cv2.waitKey(1) == ord('q'): # q to quit + self.cap.release() + cv2.destroyAllWindows() + raise StopIteration + + # Read frame + ret_val, img0 = self.cap.read() + img0 = cv2.flip(img0, 1) # flip left-right + + # Print + assert ret_val, f'Camera Error {self.pipe}' + img_path = 'webcam.jpg' + s = f'webcam {self.count}: ' + + # Padded resize + img = letterbox(img0, self.img_size, stride=self.stride)[0] + + # Convert + img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + img = np.ascontiguousarray(img) + + return img_path, img, img0, None, s + + def __len__(self): + return 0 + + +class LoadStreams: + # YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams` + def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True): + self.mode = 'stream' + self.img_size = img_size + self.stride = stride + + if os.path.isfile(sources): + with open(sources) as f: + sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())] + else: + sources = [sources] + + n = len(sources) + self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n + self.sources = [clean_str(x) for x in sources] # clean source names for later + self.auto = auto + for i, s in enumerate(sources): # index, source + # Start thread to read frames from video stream + st = f'{i + 1}/{n}: {s}... ' + if urlparse(s).hostname in ('www.youtube.com', 'youtube.com', 'youtu.be'): # if source is YouTube video + check_requirements(('pafy', 'youtube_dl==2020.12.2')) + import pafy + s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL + s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam + if s == 0: + assert not is_colab(), '--source 0 webcam unsupported on Colab. Rerun command in a local environment.' + assert not is_kaggle(), '--source 0 webcam unsupported on Kaggle. Rerun command in a local environment.' + cap = cv2.VideoCapture(s) + assert cap.isOpened(), f'{st}Failed to open {s}' + w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + fps = cap.get(cv2.CAP_PROP_FPS) # warning: may return 0 or nan + self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback + self.fps[i] = max((fps if math.isfinite(fps) else 0) % 100, 0) or 30 # 30 FPS fallback + + _, self.imgs[i] = cap.read() # guarantee first frame + self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True) + LOGGER.info(f"{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)") + self.threads[i].start() + LOGGER.info('') # newline + + # check for common shapes + s = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0].shape for x in self.imgs]) + self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal + if not self.rect: + LOGGER.warning('WARNING: Stream shapes differ. For optimal performance supply similarly-shaped streams.') + + def update(self, i, cap, stream): + # Read stream `i` frames in daemon thread + n, f, read = 0, self.frames[i], 1 # frame number, frame array, inference every 'read' frame + while cap.isOpened() and n < f: + n += 1 + # _, self.imgs[index] = cap.read() + cap.grab() + if n % read == 0: + success, im = cap.retrieve() + if success: + self.imgs[i] = im + else: + LOGGER.warning('WARNING: Video stream unresponsive, please check your IP camera connection.') + self.imgs[i] = np.zeros_like(self.imgs[i]) + cap.open(stream) # re-open stream if signal was lost + time.sleep(0.0) # wait time + + def __iter__(self): + self.count = -1 + return self + + def __next__(self): + self.count += 1 + if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit + cv2.destroyAllWindows() + raise StopIteration + + # Letterbox + img0 = self.imgs.copy() + img = [letterbox(x, self.img_size, stride=self.stride, auto=self.rect and self.auto)[0] for x in img0] + + # Stack + img = np.stack(img, 0) + + # Convert + img = img[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW + img = np.ascontiguousarray(img) + + return self.sources, img, img0, None, '' + + def __len__(self): + return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years + + +def img2label_paths(img_paths): + # Define label paths as a function of image paths + sa, sb = f'{os.sep}images{os.sep}', f'{os.sep}labels{os.sep}' # /images/, /labels/ substrings + return [sb.join(x.rsplit(sa, 1)).rsplit('.', 1)[0] + '.txt' for x in img_paths] + + +class LoadImagesAndLabels(Dataset): + # YOLOv5 train_loader/val_loader, loads images and labels for training and validation + cache_version = 0.6 # dataset labels *.cache version + rand_interp_methods = [cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4] + + def __init__(self, + path, + img_size=640, + batch_size=16, + augment=False, + hyp=None, + rect=False, + image_weights=False, + cache_images=False, + single_cls=False, + stride=32, + pad=0.0, + prefix=''): + self.img_size = img_size + self.augment = augment + self.hyp = hyp + self.image_weights = image_weights + self.rect = False if image_weights else rect + self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training) + self.mosaic_border = [-img_size // 2, -img_size // 2] + self.stride = stride + self.path = path + self.albumentations = Albumentations() if augment else None + + try: + f = [] # image files + for p in path if isinstance(path, list) else [path]: + p = Path(p) # os-agnostic + if p.is_dir(): # dir + f += glob.glob(str(p / '**' / '*.*'), recursive=True) + # f = list(p.rglob('*.*')) # pathlib + elif p.is_file(): # file + with open(p) as t: + t = t.read().strip().splitlines() + parent = str(p.parent) + os.sep + f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path + # f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib) + else: + raise FileNotFoundError(f'{prefix}{p} does not exist') + self.im_files = sorted(x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS) + # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib + assert self.im_files, f'{prefix}No images found' + except Exception as e: + raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {HELP_URL}') + + # Check cache + self.label_files = img2label_paths(self.im_files) # labels + cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') + try: + cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict + assert cache['version'] == self.cache_version # matches current version + assert cache['hash'] == get_hash(self.label_files + self.im_files) # identical hash + except Exception: + cache, exists = self.cache_labels(cache_path, prefix), False # run cache ops + + # Display cache + nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupt, total + if exists and LOCAL_RANK in {-1, 0}: + d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupt" + tqdm(None, desc=prefix + d, total=n, initial=n, bar_format=BAR_FORMAT) # display cache results + if cache['msgs']: + LOGGER.info('\n'.join(cache['msgs'])) # display warnings + assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {HELP_URL}' + + # Read cache + [cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items + labels, shapes, self.segments = zip(*cache.values()) + self.labels = list(labels) + self.shapes = np.array(shapes) + self.im_files = list(cache.keys()) # update + self.label_files = img2label_paths(cache.keys()) # update + n = len(shapes) # number of images + bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index + nb = bi[-1] + 1 # number of batches + self.batch = bi # batch index of image + self.n = n + self.indices = range(n) + + # Update labels + include_class = [] # filter labels to include only these classes (optional) + include_class_array = np.array(include_class).reshape(1, -1) + for i, (label, segment) in enumerate(zip(self.labels, self.segments)): + if include_class: + j = (label[:, 0:1] == include_class_array).any(1) + self.labels[i] = label[j] + if segment: + self.segments[i] = segment[j] + if single_cls: # single-class training, merge all classes into 0 + self.labels[i][:, 0] = 0 + if segment: + self.segments[i][:, 0] = 0 + + # Rectangular Training + if self.rect: + # Sort by aspect ratio + s = self.shapes # wh + ar = s[:, 1] / s[:, 0] # aspect ratio + irect = ar.argsort() + self.im_files = [self.im_files[i] for i in irect] + self.label_files = [self.label_files[i] for i in irect] + self.labels = [self.labels[i] for i in irect] + self.shapes = s[irect] # wh + ar = ar[irect] + + # Set training image shapes + shapes = [[1, 1]] * nb + for i in range(nb): + ari = ar[bi == i] + mini, maxi = ari.min(), ari.max() + if maxi < 1: + shapes[i] = [maxi, 1] + elif mini > 1: + shapes[i] = [1, 1 / mini] + + self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride + + # Cache images into RAM/disk for faster training (WARNING: large datasets may exceed system resources) + self.ims = [None] * n + self.npy_files = [Path(f).with_suffix('.npy') for f in self.im_files] + if cache_images: + gb = 0 # Gigabytes of cached images + self.im_hw0, self.im_hw = [None] * n, [None] * n + fcn = self.cache_images_to_disk if cache_images == 'disk' else self.load_image + results = ThreadPool(NUM_THREADS).imap(fcn, range(n)) + pbar = tqdm(enumerate(results), total=n, bar_format=BAR_FORMAT, disable=LOCAL_RANK > 0) + for i, x in pbar: + if cache_images == 'disk': + gb += self.npy_files[i].stat().st_size + else: # 'ram' + self.ims[i], self.im_hw0[i], self.im_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i) + gb += self.ims[i].nbytes + pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB {cache_images})' + pbar.close() + + def cache_labels(self, path=Path('./labels.cache'), prefix=''): + # Cache dataset labels, check images and read shapes + x = {} # dict + nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages + desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels..." + with Pool(NUM_THREADS) as pool: + pbar = tqdm(pool.imap(verify_image_label, zip(self.im_files, self.label_files, repeat(prefix))), + desc=desc, + total=len(self.im_files), + bar_format=BAR_FORMAT) + for im_file, lb, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar: + nm += nm_f + nf += nf_f + ne += ne_f + nc += nc_f + if im_file: + x[im_file] = [lb, shape, segments] + if msg: + msgs.append(msg) + pbar.desc = f"{desc}{nf} found, {nm} missing, {ne} empty, {nc} corrupt" + + pbar.close() + if msgs: + LOGGER.info('\n'.join(msgs)) + if nf == 0: + LOGGER.warning(f'{prefix}WARNING: No labels found in {path}. See {HELP_URL}') + x['hash'] = get_hash(self.label_files + self.im_files) + x['results'] = nf, nm, ne, nc, len(self.im_files) + x['msgs'] = msgs # warnings + x['version'] = self.cache_version # cache version + try: + np.save(path, x) # save cache for next time + path.with_suffix('.cache.npy').rename(path) # remove .npy suffix + LOGGER.info(f'{prefix}New cache created: {path}') + except Exception as e: + LOGGER.warning(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}') # not writeable + return x + + def __len__(self): + return len(self.im_files) + + # def __iter__(self): + # self.count = -1 + # print('ran dataset iter') + # #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF) + # return self + + def __getitem__(self, index): + index = self.indices[index] # linear, shuffled, or image_weights + + hyp = self.hyp + mosaic = self.mosaic and random.random() < hyp['mosaic'] + if mosaic: + # Load mosaic + img, labels = self.load_mosaic(index) + shapes = None + + # MixUp augmentation + if random.random() < hyp['mixup']: + img, labels = mixup(img, labels, *self.load_mosaic(random.randint(0, self.n - 1))) + + else: + # Load image + img, (h0, w0), (h, w) = self.load_image(index) + + # Letterbox + shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape + img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment) + shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling + + labels = self.labels[index].copy() + if labels.size: # normalized xywh to pixel xyxy format + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1]) + + if self.augment: + img, labels = random_perspective(img, + labels, + degrees=hyp['degrees'], + translate=hyp['translate'], + scale=hyp['scale'], + shear=hyp['shear'], + perspective=hyp['perspective']) + + nl = len(labels) # number of labels + if nl: + labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1E-3) + + if self.augment: + # Albumentations + img, labels = self.albumentations(img, labels) + nl = len(labels) # update after albumentations + + # HSV color-space + augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) + + # Flip up-down + if random.random() < hyp['flipud']: + img = np.flipud(img) + if nl: + labels[:, 2] = 1 - labels[:, 2] + + # Flip left-right + if random.random() < hyp['fliplr']: + img = np.fliplr(img) + if nl: + labels[:, 1] = 1 - labels[:, 1] + + # Cutouts + # labels = cutout(img, labels, p=0.5) + # nl = len(labels) # update after cutout + + labels_out = torch.zeros((nl, 6)) + if nl: + labels_out[:, 1:] = torch.from_numpy(labels) + + # Convert + img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + img = np.ascontiguousarray(img) + + return torch.from_numpy(img), labels_out, self.im_files[index], shapes + + def load_image(self, i): + # Loads 1 image from dataset index 'i', returns (im, original hw, resized hw) + im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i], + if im is None: # not cached in RAM + if fn.exists(): # load npy + im = np.load(fn) + else: # read image + im = cv2.imread(f) # BGR + assert im is not None, f'Image Not Found {f}' + h0, w0 = im.shape[:2] # orig hw + r = self.img_size / max(h0, w0) # ratio + if r != 1: # if sizes are not equal + interp = cv2.INTER_LINEAR if (self.augment or r > 1) else cv2.INTER_AREA + im = cv2.resize(im, (int(w0 * r), int(h0 * r)), interpolation=interp) + return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized + return self.ims[i], self.im_hw0[i], self.im_hw[i] # im, hw_original, hw_resized + + def cache_images_to_disk(self, i): + # Saves an image as an *.npy file for faster loading + f = self.npy_files[i] + if not f.exists(): + np.save(f.as_posix(), cv2.imread(self.im_files[i])) + + def load_mosaic(self, index): + # YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic + labels4, segments4 = [], [] + s = self.img_size + yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border) # mosaic center x, y + indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices + random.shuffle(indices) + for i, index in enumerate(indices): + # Load image + img, _, (h, w) = self.load_image(index) + + # place img in img4 + if i == 0: # top left + img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles + x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image) + x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image) + elif i == 1: # top right + x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc + x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h + elif i == 2: # bottom left + x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) + x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h) + elif i == 3: # bottom right + x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) + x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) + + img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] + padw = x1a - x1b + padh = y1a - y1b + + # Labels + labels, segments = self.labels[index].copy(), self.segments[index].copy() + if labels.size: + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format + segments = [xyn2xy(x, w, h, padw, padh) for x in segments] + labels4.append(labels) + segments4.extend(segments) + + # Concat/clip labels + labels4 = np.concatenate(labels4, 0) + for x in (labels4[:, 1:], *segments4): + np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() + # img4, labels4 = replicate(img4, labels4) # replicate + + # Augment + img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste']) + img4, labels4 = random_perspective(img4, + labels4, + segments4, + degrees=self.hyp['degrees'], + translate=self.hyp['translate'], + scale=self.hyp['scale'], + shear=self.hyp['shear'], + perspective=self.hyp['perspective'], + border=self.mosaic_border) # border to remove + + return img4, labels4 + + def load_mosaic9(self, index): + # YOLOv5 9-mosaic loader. Loads 1 image + 8 random images into a 9-image mosaic + labels9, segments9 = [], [] + s = self.img_size + indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices + random.shuffle(indices) + hp, wp = -1, -1 # height, width previous + for i, index in enumerate(indices): + # Load image + img, _, (h, w) = self.load_image(index) + + # place img in img9 + if i == 0: # center + img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles + h0, w0 = h, w + c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates + elif i == 1: # top + c = s, s - h, s + w, s + elif i == 2: # top right + c = s + wp, s - h, s + wp + w, s + elif i == 3: # right + c = s + w0, s, s + w0 + w, s + h + elif i == 4: # bottom right + c = s + w0, s + hp, s + w0 + w, s + hp + h + elif i == 5: # bottom + c = s + w0 - w, s + h0, s + w0, s + h0 + h + elif i == 6: # bottom left + c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h + elif i == 7: # left + c = s - w, s + h0 - h, s, s + h0 + elif i == 8: # top left + c = s - w, s + h0 - hp - h, s, s + h0 - hp + + padx, pady = c[:2] + x1, y1, x2, y2 = (max(x, 0) for x in c) # allocate coords + + # Labels + labels, segments = self.labels[index].copy(), self.segments[index].copy() + if labels.size: + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format + segments = [xyn2xy(x, w, h, padx, pady) for x in segments] + labels9.append(labels) + segments9.extend(segments) + + # Image + img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax] + hp, wp = h, w # height, width previous + + # Offset + yc, xc = (int(random.uniform(0, s)) for _ in self.mosaic_border) # mosaic center x, y + img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s] + + # Concat/clip labels + labels9 = np.concatenate(labels9, 0) + labels9[:, [1, 3]] -= xc + labels9[:, [2, 4]] -= yc + c = np.array([xc, yc]) # centers + segments9 = [x - c for x in segments9] + + for x in (labels9[:, 1:], *segments9): + np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() + # img9, labels9 = replicate(img9, labels9) # replicate + + # Augment + img9, labels9 = random_perspective(img9, + labels9, + segments9, + degrees=self.hyp['degrees'], + translate=self.hyp['translate'], + scale=self.hyp['scale'], + shear=self.hyp['shear'], + perspective=self.hyp['perspective'], + border=self.mosaic_border) # border to remove + + return img9, labels9 + + @staticmethod + def collate_fn(batch): + im, label, path, shapes = zip(*batch) # transposed + for i, lb in enumerate(label): + lb[:, 0] = i # add target image index for build_targets() + return torch.stack(im, 0), torch.cat(label, 0), path, shapes + + @staticmethod + def collate_fn4(batch): + img, label, path, shapes = zip(*batch) # transposed + n = len(shapes) // 4 + im4, label4, path4, shapes4 = [], [], path[:n], shapes[:n] + + ho = torch.tensor([[0.0, 0, 0, 1, 0, 0]]) + wo = torch.tensor([[0.0, 0, 1, 0, 0, 0]]) + s = torch.tensor([[1, 1, 0.5, 0.5, 0.5, 0.5]]) # scale + for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW + i *= 4 + if random.random() < 0.5: + im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2.0, mode='bilinear', + align_corners=False)[0].type(img[i].type()) + lb = label[i] + else: + im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2) + lb = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s + im4.append(im) + label4.append(lb) + + for i, lb in enumerate(label4): + lb[:, 0] = i # add target image index for build_targets() + + return torch.stack(im4, 0), torch.cat(label4, 0), path4, shapes4 + + +# Ancillary functions -------------------------------------------------------------------------------------------------- +def flatten_recursive(path=DATASETS_DIR / 'coco128'): + # Flatten a recursive directory by bringing all files to top level + new_path = Path(f'{str(path)}_flat') + if os.path.exists(new_path): + shutil.rmtree(new_path) # delete output folder + os.makedirs(new_path) # make new output folder + for file in tqdm(glob.glob(f'{str(Path(path))}/**/*.*', recursive=True)): + shutil.copyfile(file, new_path / Path(file).name) + + +def extract_boxes(path=DATASETS_DIR / 'coco128'): # from utils.dataloaders import *; extract_boxes() + # Convert detection dataset into classification dataset, with one directory per class + path = Path(path) # images dir + shutil.rmtree(path / 'classification') if (path / 'classification').is_dir() else None # remove existing + files = list(path.rglob('*.*')) + n = len(files) # number of files + for im_file in tqdm(files, total=n): + if im_file.suffix[1:] in IMG_FORMATS: + # image + im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB + h, w = im.shape[:2] + + # labels + lb_file = Path(img2label_paths([str(im_file)])[0]) + if Path(lb_file).exists(): + with open(lb_file) as f: + lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels + + for j, x in enumerate(lb): + c = int(x[0]) # class + f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename + if not f.parent.is_dir(): + f.parent.mkdir(parents=True) + + b = x[1:] * [w, h, w, h] # box + # b[2:] = b[2:].max() # rectangle to square + b[2:] = b[2:] * 1.2 + 3 # pad + b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int) + + b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image + b[[1, 3]] = np.clip(b[[1, 3]], 0, h) + assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}' + + +def autosplit(path=DATASETS_DIR / 'coco128/images', weights=(0.9, 0.1, 0.0), annotated_only=False): + """ Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files + Usage: from utils.dataloaders import *; autosplit() + Arguments + path: Path to images directory + weights: Train, val, test weights (list, tuple) + annotated_only: Only use images with an annotated txt file + """ + path = Path(path) # images dir + files = sorted(x for x in path.rglob('*.*') if x.suffix[1:].lower() in IMG_FORMATS) # image files only + n = len(files) # number of files + random.seed(0) # for reproducibility + indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split + + txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files + [(path.parent / x).unlink(missing_ok=True) for x in txt] # remove existing + + print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only) + for i, img in tqdm(zip(indices, files), total=n): + if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label + with open(path.parent / txt[i], 'a') as f: + f.write(f'./{img.relative_to(path.parent).as_posix()}' + '\n') # add image to txt file + + +def verify_image_label(args): + # Verify one image-label pair + im_file, lb_file, prefix = args + nm, nf, ne, nc, msg, segments = 0, 0, 0, 0, '', [] # number (missing, found, empty, corrupt), message, segments + try: + # verify images + im = Image.open(im_file) + im.verify() # PIL verify + shape = exif_size(im) # image size + assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels' + assert im.format.lower() in IMG_FORMATS, f'invalid image format {im.format}' + if im.format.lower() in ('jpg', 'jpeg'): + with open(im_file, 'rb') as f: + f.seek(-2, 2) + if f.read() != b'\xff\xd9': # corrupt JPEG + ImageOps.exif_transpose(Image.open(im_file)).save(im_file, 'JPEG', subsampling=0, quality=100) + msg = f'{prefix}WARNING: {im_file}: corrupt JPEG restored and saved' + + # verify labels + if os.path.isfile(lb_file): + nf = 1 # label found + with open(lb_file) as f: + lb = [x.split() for x in f.read().strip().splitlines() if len(x)] + if any(len(x) > 6 for x in lb): # is segment + classes = np.array([x[0] for x in lb], dtype=np.float32) + segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in lb] # (cls, xy1...) + lb = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh) + lb = np.array(lb, dtype=np.float32) + nl = len(lb) + if nl: + assert lb.shape[1] == 5, f'labels require 5 columns, {lb.shape[1]} columns detected' + assert (lb >= 0).all(), f'negative label values {lb[lb < 0]}' + assert (lb[:, 1:] <= 1).all(), f'non-normalized or out of bounds coordinates {lb[:, 1:][lb[:, 1:] > 1]}' + _, i = np.unique(lb, axis=0, return_index=True) + if len(i) < nl: # duplicate row check + lb = lb[i] # remove duplicates + if segments: + segments = segments[i] + msg = f'{prefix}WARNING: {im_file}: {nl - len(i)} duplicate labels removed' + else: + ne = 1 # label empty + lb = np.zeros((0, 5), dtype=np.float32) + else: + nm = 1 # label missing + lb = np.zeros((0, 5), dtype=np.float32) + return im_file, lb, shape, segments, nm, nf, ne, nc, msg + except Exception as e: + nc = 1 + msg = f'{prefix}WARNING: {im_file}: ignoring corrupt image/label: {e}' + return [None, None, None, None, nm, nf, ne, nc, msg] + + +class HUBDatasetStats(): + """ Return dataset statistics dictionary with images and instances counts per split per class + To run in parent directory: export PYTHONPATH="$PWD/yolov5" + Usage1: from utils.dataloaders import *; HUBDatasetStats('coco128.yaml', autodownload=True) + Usage2: from utils.dataloaders import *; HUBDatasetStats('path/to/coco128_with_yaml.zip') + Arguments + path: Path to data.yaml or data.zip (with data.yaml inside data.zip) + autodownload: Attempt to download dataset if not found locally + """ + + def __init__(self, path='coco128.yaml', autodownload=False): + # Initialize class + zipped, data_dir, yaml_path = self._unzip(Path(path)) + try: + with open(check_yaml(yaml_path), errors='ignore') as f: + data = yaml.safe_load(f) # data dict + if zipped: + data['path'] = data_dir + except Exception as e: + raise Exception("error/HUB/dataset_stats/yaml_load") from e + + check_dataset(data, autodownload) # download dataset if missing + self.hub_dir = Path(data['path'] + '-hub') + self.im_dir = self.hub_dir / 'images' + self.im_dir.mkdir(parents=True, exist_ok=True) # makes /images + self.stats = {'nc': data['nc'], 'names': data['names']} # statistics dictionary + self.data = data + + @staticmethod + def _find_yaml(dir): + # Return data.yaml file + files = list(dir.glob('*.yaml')) or list(dir.rglob('*.yaml')) # try root level first and then recursive + assert files, f'No *.yaml file found in {dir}' + if len(files) > 1: + files = [f for f in files if f.stem == dir.stem] # prefer *.yaml files that match dir name + assert files, f'Multiple *.yaml files found in {dir}, only 1 *.yaml file allowed' + assert len(files) == 1, f'Multiple *.yaml files found: {files}, only 1 *.yaml file allowed in {dir}' + return files[0] + + def _unzip(self, path): + # Unzip data.zip + if not str(path).endswith('.zip'): # path is data.yaml + return False, None, path + assert Path(path).is_file(), f'Error unzipping {path}, file not found' + ZipFile(path).extractall(path=path.parent) # unzip + dir = path.with_suffix('') # dataset directory == zip name + assert dir.is_dir(), f'Error unzipping {path}, {dir} not found. path/to/abc.zip MUST unzip to path/to/abc/' + return True, str(dir), self._find_yaml(dir) # zipped, data_dir, yaml_path + + def _hub_ops(self, f, max_dim=1920): + # HUB ops for 1 image 'f': resize and save at reduced quality in /dataset-hub for web/app viewing + f_new = self.im_dir / Path(f).name # dataset-hub image filename + try: # use PIL + im = Image.open(f) + r = max_dim / max(im.height, im.width) # ratio + if r < 1.0: # image too large + im = im.resize((int(im.width * r), int(im.height * r))) + im.save(f_new, 'JPEG', quality=50, optimize=True) # save + except Exception as e: # use OpenCV + print(f'WARNING: HUB ops PIL failure {f}: {e}') + im = cv2.imread(f) + im_height, im_width = im.shape[:2] + r = max_dim / max(im_height, im_width) # ratio + if r < 1.0: # image too large + im = cv2.resize(im, (int(im_width * r), int(im_height * r)), interpolation=cv2.INTER_AREA) + cv2.imwrite(str(f_new), im) + + def get_json(self, save=False, verbose=False): + # Return dataset JSON for Ultralytics HUB + def _round(labels): + # Update labels to integer class and 6 decimal place floats + return [[int(c), *(round(x, 4) for x in points)] for c, *points in labels] + + for split in 'train', 'val', 'test': + if self.data.get(split) is None: + self.stats[split] = None # i.e. no test set + continue + dataset = LoadImagesAndLabels(self.data[split]) # load dataset + x = np.array([ + np.bincount(label[:, 0].astype(int), minlength=self.data['nc']) + for label in tqdm(dataset.labels, total=dataset.n, desc='Statistics')]) # shape(128x80) + self.stats[split] = { + 'instance_stats': { + 'total': int(x.sum()), + 'per_class': x.sum(0).tolist()}, + 'image_stats': { + 'total': dataset.n, + 'unlabelled': int(np.all(x == 0, 1).sum()), + 'per_class': (x > 0).sum(0).tolist()}, + 'labels': [{ + str(Path(k).name): _round(v.tolist())} for k, v in zip(dataset.im_files, dataset.labels)]} + + # Save, print and return + if save: + stats_path = self.hub_dir / 'stats.json' + print(f'Saving {stats_path.resolve()}...') + with open(stats_path, 'w') as f: + json.dump(self.stats, f) # save stats.json + if verbose: + print(json.dumps(self.stats, indent=2, sort_keys=False)) + return self.stats + + def process_images(self): + # Compress images for Ultralytics HUB + for split in 'train', 'val', 'test': + if self.data.get(split) is None: + continue + dataset = LoadImagesAndLabels(self.data[split]) # load dataset + desc = f'{split} images' + for _ in tqdm(ThreadPool(NUM_THREADS).imap(self._hub_ops, dataset.im_files), total=dataset.n, desc=desc): + pass + print(f'Done. All images saved to {self.im_dir}') + return self.im_dir + + +# Classification dataloaders ------------------------------------------------------------------------------------------- +class ClassificationDataset(torchvision.datasets.ImageFolder): + """ + YOLOv5 Classification Dataset. + Arguments + root: Dataset path + transform: torchvision transforms, used by default + album_transform: Albumentations transforms, used if installed + """ + + def __init__(self, root, augment, imgsz, cache=False): + super().__init__(root=root) + self.torch_transforms = classify_transforms(imgsz) + self.album_transforms = classify_albumentations(augment, imgsz) if augment else None + self.cache_ram = cache is True or cache == 'ram' + self.cache_disk = cache == 'disk' + self.samples = [list(x) + [Path(x[0]).with_suffix('.npy'), None] for x in self.samples] # file, index, npy, im + + def __getitem__(self, i): + f, j, fn, im = self.samples[i] # filename, index, filename.with_suffix('.npy'), image + if self.album_transforms: + if self.cache_ram and im is None: + im = self.samples[i][3] = cv2.imread(f) + elif self.cache_disk: + if not fn.exists(): # load npy + np.save(fn.as_posix(), cv2.imread(f)) + im = np.load(fn) + else: # read image + im = cv2.imread(f) # BGR + sample = self.album_transforms(image=cv2.cvtColor(im, cv2.COLOR_BGR2RGB))["image"] + else: + sample = self.torch_transforms(self.loader(f)) + return sample, j + + +def create_classification_dataloader(path, + imgsz=224, + batch_size=16, + augment=True, + cache=False, + rank=-1, + workers=8, + shuffle=True): + # Returns Dataloader object to be used with YOLOv5 Classifier + with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP + dataset = ClassificationDataset(root=path, imgsz=imgsz, augment=augment, cache=cache) + batch_size = min(batch_size, len(dataset)) + nd = torch.cuda.device_count() + nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) + sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) + generator = torch.Generator() + generator.manual_seed(0) + return InfiniteDataLoader(dataset, + batch_size=batch_size, + shuffle=shuffle and sampler is None, + num_workers=nw, + sampler=sampler, + pin_memory=True, + worker_init_fn=seed_worker, + generator=generator) # or DataLoader(persistent_workers=True) diff --git a/src/目标识别代码/utils/docker/Dockerfile b/src/目标识别代码/utils/docker/Dockerfile new file mode 100644 index 00000000..2280f209 --- /dev/null +++ b/src/目标识别代码/utils/docker/Dockerfile @@ -0,0 +1,68 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# Builds ultralytics/yolov5:latest image on DockerHub https://hub.docker.com/r/ultralytics/yolov5 +# Image is CUDA-optimized for YOLOv5 single/multi-GPU training and inference + +# Start FROM NVIDIA PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch +FROM nvcr.io/nvidia/pytorch:22.07-py3 +RUN rm -rf /opt/pytorch # remove 1.2GB dir + +# Downloads to user config dir +ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ + +# Install linux packages +RUN apt update && apt install --no-install-recommends -y zip htop screen libgl1-mesa-glx + +# Install pip packages +COPY requirements.txt . +RUN python -m pip install --upgrade pip wheel +RUN pip uninstall -y Pillow torchtext # torch torchvision +RUN pip install --no-cache -r requirements.txt albumentations wandb gsutil notebook Pillow>=9.1.0 \ + 'opencv-python<4.6.0.66' \ + --extra-index-url https://download.pytorch.org/whl/cu113 + +# Create working directory +RUN mkdir -p /usr/src/app +WORKDIR /usr/src/app + +# Copy contents +# COPY . /usr/src/app (issues as not a .git directory) +RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app + +# Set environment variables +ENV OMP_NUM_THREADS=8 + + +# Usage Examples ------------------------------------------------------------------------------------------------------- + +# Build and Push +# t=ultralytics/yolov5:latest && sudo docker build -f utils/docker/Dockerfile -t $t . && sudo docker push $t + +# Pull and Run +# t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all $t + +# Pull and Run with local directory access +# t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all -v "$(pwd)"/datasets:/usr/src/datasets $t + +# Kill all +# sudo docker kill $(sudo docker ps -q) + +# Kill all image-based +# sudo docker kill $(sudo docker ps -qa --filter ancestor=ultralytics/yolov5:latest) + +# Bash into running container +# sudo docker exec -it 5a9b5863d93d bash + +# Bash into stopped container +# id=$(sudo docker ps -qa) && sudo docker start $id && sudo docker exec -it $id bash + +# Clean up +# docker system prune -a --volumes + +# Update Ubuntu drivers +# https://www.maketecheasier.com/install-nvidia-drivers-ubuntu/ + +# DDP test +# python -m torch.distributed.run --nproc_per_node 2 --master_port 1 train.py --epochs 3 + +# GCP VM from Image +# docker.io/ultralytics/yolov5:latest diff --git a/src/目标识别代码/utils/docker/Dockerfile-arm64 b/src/目标识别代码/utils/docker/Dockerfile-arm64 new file mode 100644 index 00000000..fe92c8d5 --- /dev/null +++ b/src/目标识别代码/utils/docker/Dockerfile-arm64 @@ -0,0 +1,42 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# Builds ultralytics/yolov5:latest-arm64 image on DockerHub https://hub.docker.com/r/ultralytics/yolov5 +# Image is aarch64-compatible for Apple M1 and other ARM architectures i.e. Jetson Nano and Raspberry Pi + +# Start FROM Ubuntu image https://hub.docker.com/_/ubuntu +FROM arm64v8/ubuntu:20.04 + +# Downloads to user config dir +ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ + +# Install linux packages +RUN apt update +RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt install -y tzdata +RUN apt install --no-install-recommends -y python3-pip git zip curl htop gcc \ + libgl1-mesa-glx libglib2.0-0 libpython3.8-dev +# RUN alias python=python3 + +# Install pip packages +COPY requirements.txt . +RUN python3 -m pip install --upgrade pip wheel +RUN pip install --no-cache -r requirements.txt gsutil notebook \ + tensorflow-aarch64 + # tensorflowjs \ + # onnx onnx-simplifier onnxruntime \ + # coremltools openvino-dev \ + +# Create working directory +RUN mkdir -p /usr/src/app +WORKDIR /usr/src/app + +# Copy contents +# COPY . /usr/src/app (issues as not a .git directory) +RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app + + +# Usage Examples ------------------------------------------------------------------------------------------------------- + +# Build and Push +# t=ultralytics/yolov5:latest-M1 && sudo docker build --platform linux/arm64 -f utils/docker/Dockerfile-arm64 -t $t . && sudo docker push $t + +# Pull and Run +# t=ultralytics/yolov5:latest-M1 && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/datasets:/usr/src/datasets $t diff --git a/src/目标识别代码/utils/docker/Dockerfile-cpu b/src/目标识别代码/utils/docker/Dockerfile-cpu new file mode 100644 index 00000000..d61dfeff --- /dev/null +++ b/src/目标识别代码/utils/docker/Dockerfile-cpu @@ -0,0 +1,39 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# Builds ultralytics/yolov5:latest-cpu image on DockerHub https://hub.docker.com/r/ultralytics/yolov5 +# Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLOv5 deployments + +# Start FROM Ubuntu image https://hub.docker.com/_/ubuntu +FROM ubuntu:20.04 + +# Downloads to user config dir +ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ + +# Install linux packages +RUN apt update +RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt install -y tzdata +RUN apt install --no-install-recommends -y python3-pip git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3.8-dev +# RUN alias python=python3 + +# Install pip packages +COPY requirements.txt . +RUN python3 -m pip install --upgrade pip wheel +RUN pip install --no-cache -r requirements.txt albumentations gsutil notebook \ + coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu tensorflowjs \ + --extra-index-url https://download.pytorch.org/whl/cpu + +# Create working directory +RUN mkdir -p /usr/src/app +WORKDIR /usr/src/app + +# Copy contents +# COPY . /usr/src/app (issues as not a .git directory) +RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app + + +# Usage Examples ------------------------------------------------------------------------------------------------------- + +# Build and Push +# t=ultralytics/yolov5:latest-cpu && sudo docker build -f utils/docker/Dockerfile-cpu -t $t . && sudo docker push $t + +# Pull and Run +# t=ultralytics/yolov5:latest-cpu && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/datasets:/usr/src/datasets $t diff --git a/src/目标识别代码/utils/downloads.py b/src/目标识别代码/utils/downloads.py new file mode 100644 index 00000000..9d4780ad --- /dev/null +++ b/src/目标识别代码/utils/downloads.py @@ -0,0 +1,180 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Download utils +""" + +import logging +import os +import platform +import subprocess +import time +import urllib +from pathlib import Path +from zipfile import ZipFile + +import requests +import torch + + +def is_url(url, check_online=True): + # Check if online file exists + try: + url = str(url) + result = urllib.parse.urlparse(url) + assert all([result.scheme, result.netloc, result.path]) # check if is url + return (urllib.request.urlopen(url).getcode() == 200) if check_online else True # check if exists online + except (AssertionError, urllib.request.HTTPError): + return False + + +def gsutil_getsize(url=''): + # gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du + s = subprocess.check_output(f'gsutil du {url}', shell=True).decode('utf-8') + return eval(s.split(' ')[0]) if len(s) else 0 # bytes + + +def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''): + # Attempts to download file from url or url2, checks and removes incomplete downloads < min_bytes + from utils.general import LOGGER + + file = Path(file) + assert_msg = f"Downloaded file '{file}' does not exist or size is < min_bytes={min_bytes}" + try: # url1 + LOGGER.info(f'Downloading {url} to {file}...') + torch.hub.download_url_to_file(url, str(file), progress=LOGGER.level <= logging.INFO) + assert file.exists() and file.stat().st_size > min_bytes, assert_msg # check + except Exception as e: # url2 + file.unlink(missing_ok=True) # remove partial downloads + LOGGER.info(f'ERROR: {e}\nRe-attempting {url2 or url} to {file}...') + os.system(f"curl -L '{url2 or url}' -o '{file}' --retry 3 -C -") # curl download, retry and resume on fail + finally: + if not file.exists() or file.stat().st_size < min_bytes: # check + file.unlink(missing_ok=True) # remove partial downloads + LOGGER.info(f"ERROR: {assert_msg}\n{error_msg}") + LOGGER.info('') + + +def attempt_download(file, repo='ultralytics/yolov5', release='v6.1'): + # Attempt file download from GitHub release assets if not found locally. release = 'latest', 'v6.1', etc. + from utils.general import LOGGER + + def github_assets(repository, version='latest'): + # Return GitHub repo tag (i.e. 'v6.1') and assets (i.e. ['yolov5s.pt', 'yolov5m.pt', ...]) + if version != 'latest': + version = f'tags/{version}' # i.e. tags/v6.1 + response = requests.get(f'https://api.github.com/repos/{repository}/releases/{version}').json() # github api + return response['tag_name'], [x['name'] for x in response['assets']] # tag, assets + + file = Path(str(file).strip().replace("'", '')) + if not file.exists(): + # URL specified + name = Path(urllib.parse.unquote(str(file))).name # decode '%2F' to '/' etc. + if str(file).startswith(('http:/', 'https:/')): # download + url = str(file).replace(':/', '://') # Pathlib turns :// -> :/ + file = name.split('?')[0] # parse authentication https://url.com/file.txt?auth... + if Path(file).is_file(): + LOGGER.info(f'Found {url} locally at {file}') # file already exists + else: + safe_download(file=file, url=url, min_bytes=1E5) + return file + + # GitHub assets + assets = [ + 'yolov5n.pt', 'yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt', 'yolov5n6.pt', 'yolov5s6.pt', + 'yolov5m6.pt', 'yolov5l6.pt', 'yolov5x6.pt'] + try: + tag, assets = github_assets(repo, release) + except Exception: + try: + tag, assets = github_assets(repo) # latest release + except Exception: + try: + tag = subprocess.check_output('git tag', shell=True, stderr=subprocess.STDOUT).decode().split()[-1] + except Exception: + tag = release + + file.parent.mkdir(parents=True, exist_ok=True) # make parent dir (if required) + if name in assets: + url3 = 'https://drive.google.com/drive/folders/1EFQTEUeXWSFww0luse2jB9M1QNZQGwNl' # backup gdrive mirror + safe_download( + file, + url=f'https://github.com/{repo}/releases/download/{tag}/{name}', + url2=f'https://storage.googleapis.com/{repo}/{tag}/{name}', # backup url (optional) + min_bytes=1E5, + error_msg=f'{file} missing, try downloading from https://github.com/{repo}/releases/{tag} or {url3}') + + return str(file) + + +def gdrive_download(id='16TiPfZj7htmTyhntwcZyEEAejOUxuT6m', file='tmp.zip'): + # Downloads a file from Google Drive. from yolov5.utils.downloads import *; gdrive_download() + t = time.time() + file = Path(file) + cookie = Path('cookie') # gdrive cookie + print(f'Downloading https://drive.google.com/uc?export=download&id={id} as {file}... ', end='') + file.unlink(missing_ok=True) # remove existing file + cookie.unlink(missing_ok=True) # remove existing cookie + + # Attempt file download + out = "NUL" if platform.system() == "Windows" else "/dev/null" + os.system(f'curl -c ./cookie -s -L "drive.google.com/uc?export=download&id={id}" > {out}') + if os.path.exists('cookie'): # large file + s = f'curl -Lb ./cookie "drive.google.com/uc?export=download&confirm={get_token()}&id={id}" -o {file}' + else: # small file + s = f'curl -s -L -o {file} "drive.google.com/uc?export=download&id={id}"' + r = os.system(s) # execute, capture return + cookie.unlink(missing_ok=True) # remove existing cookie + + # Error check + if r != 0: + file.unlink(missing_ok=True) # remove partial + print('Download error ') # raise Exception('Download error') + return r + + # Unzip if archive + if file.suffix == '.zip': + print('unzipping... ', end='') + ZipFile(file).extractall(path=file.parent) # unzip + file.unlink() # remove zip + + print(f'Done ({time.time() - t:.1f}s)') + return r + + +def get_token(cookie="./cookie"): + with open(cookie) as f: + for line in f: + if "download" in line: + return line.split()[-1] + return "" + + +# Google utils: https://cloud.google.com/storage/docs/reference/libraries ---------------------------------------------- +# +# +# def upload_blob(bucket_name, source_file_name, destination_blob_name): +# # Uploads a file to a bucket +# # https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python +# +# storage_client = storage.Client() +# bucket = storage_client.get_bucket(bucket_name) +# blob = bucket.blob(destination_blob_name) +# +# blob.upload_from_filename(source_file_name) +# +# print('File {} uploaded to {}.'.format( +# source_file_name, +# destination_blob_name)) +# +# +# def download_blob(bucket_name, source_blob_name, destination_file_name): +# # Uploads a blob from a bucket +# storage_client = storage.Client() +# bucket = storage_client.get_bucket(bucket_name) +# blob = bucket.blob(source_blob_name) +# +# blob.download_to_filename(destination_file_name) +# +# print('Blob {} downloaded to {}.'.format( +# source_blob_name, +# destination_file_name)) diff --git a/src/目标识别代码/utils/flask_rest_api/README.md b/src/目标识别代码/utils/flask_rest_api/README.md new file mode 100644 index 00000000..a726acbd --- /dev/null +++ b/src/目标识别代码/utils/flask_rest_api/README.md @@ -0,0 +1,73 @@ +# Flask REST API + +[REST](https://en.wikipedia.org/wiki/Representational_state_transfer) [API](https://en.wikipedia.org/wiki/API)s are +commonly used to expose Machine Learning (ML) models to other services. This folder contains an example REST API +created using Flask to expose the YOLOv5s model from [PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5/). + +## Requirements + +[Flask](https://palletsprojects.com/p/flask/) is required. Install with: + +```shell +$ pip install Flask +``` + +## Run + +After Flask installation run: + +```shell +$ python3 restapi.py --port 5000 +``` + +Then use [curl](https://curl.se/) to perform a request: + +```shell +$ curl -X POST -F image=@zidane.jpg 'http://localhost:5000/v1/object-detection/yolov5s' +``` + +The model inference results are returned as a JSON response: + +```json +[ + { + "class": 0, + "confidence": 0.8900438547, + "height": 0.9318675399, + "name": "person", + "width": 0.3264600933, + "xcenter": 0.7438579798, + "ycenter": 0.5207948685 + }, + { + "class": 0, + "confidence": 0.8440024257, + "height": 0.7155083418, + "name": "person", + "width": 0.6546785235, + "xcenter": 0.427829951, + "ycenter": 0.6334488392 + }, + { + "class": 27, + "confidence": 0.3771208823, + "height": 0.3902671337, + "name": "tie", + "width": 0.0696444362, + "xcenter": 0.3675483763, + "ycenter": 0.7991207838 + }, + { + "class": 27, + "confidence": 0.3527112305, + "height": 0.1540903747, + "name": "tie", + "width": 0.0336618312, + "xcenter": 0.7814827561, + "ycenter": 0.5065554976 + } +] +``` + +An example python script to perform inference using [requests](https://docs.python-requests.org/en/master/) is given +in `example_request.py` diff --git a/src/目标识别代码/utils/flask_rest_api/example_request.py b/src/目标识别代码/utils/flask_rest_api/example_request.py new file mode 100644 index 00000000..773ad893 --- /dev/null +++ b/src/目标识别代码/utils/flask_rest_api/example_request.py @@ -0,0 +1,19 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Perform test request +""" + +import pprint + +import requests + +DETECTION_URL = "http://localhost:5000/v1/object-detection/yolov5s" +IMAGE = "zidane.jpg" + +# Read image +with open(IMAGE, "rb") as f: + image_data = f.read() + +response = requests.post(DETECTION_URL, files={"image": image_data}).json() + +pprint.pprint(response) diff --git a/src/目标识别代码/utils/flask_rest_api/restapi.py b/src/目标识别代码/utils/flask_rest_api/restapi.py new file mode 100644 index 00000000..8482435c --- /dev/null +++ b/src/目标识别代码/utils/flask_rest_api/restapi.py @@ -0,0 +1,48 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Run a Flask REST API exposing one or more YOLOv5s models +""" + +import argparse +import io + +import torch +from flask import Flask, request +from PIL import Image + +app = Flask(__name__) +models = {} + +DETECTION_URL = "/v1/object-detection/" + + +@app.route(DETECTION_URL, methods=["POST"]) +def predict(model): + if request.method != "POST": + return + + if request.files.get("image"): + # Method 1 + # with request.files["image"] as f: + # im = Image.open(io.BytesIO(f.read())) + + # Method 2 + im_file = request.files["image"] + im_bytes = im_file.read() + im = Image.open(io.BytesIO(im_bytes)) + + if model in models: + results = models[model](im, size=640) # reduce size=320 for faster inference + return results.pandas().xyxy[0].to_json(orient="records") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Flask API exposing YOLOv5 model") + parser.add_argument("--port", default=5000, type=int, help="port number") + parser.add_argument('--model', nargs='+', default=['yolov5s'], help='model(s) to run, i.e. --model yolov5n yolov5s') + opt = parser.parse_args() + + for m in opt.model: + models[m] = torch.hub.load("ultralytics/yolov5", m, force_reload=True, skip_validation=True) + + app.run(host="0.0.0.0", port=opt.port) # debug=True causes Restarting with stat diff --git a/src/目标识别代码/utils/general.py b/src/目标识别代码/utils/general.py new file mode 100644 index 00000000..1c525c45 --- /dev/null +++ b/src/目标识别代码/utils/general.py @@ -0,0 +1,1050 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +General utils +""" + +import contextlib +import glob +import inspect +import logging +import math +import os +import platform +import random +import re +import shutil +import signal +import sys +import threading +import time +import urllib +from datetime import datetime +from itertools import repeat +from multiprocessing.pool import ThreadPool +from pathlib import Path +from subprocess import check_output +from typing import Optional +from zipfile import ZipFile + +import cv2 +import numpy as np +import pandas as pd +import pkg_resources as pkg +import torch +import torchvision +import yaml + +from utils.downloads import gsutil_getsize +from utils.metrics import box_iou, fitness + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # YOLOv5 root directory +RANK = int(os.getenv('RANK', -1)) + +# Settings +DATASETS_DIR = ROOT.parent / 'datasets' # YOLOv5 datasets directory +NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads +AUTOINSTALL = str(os.getenv('YOLOv5_AUTOINSTALL', True)).lower() == 'true' # global auto-install mode +VERBOSE = str(os.getenv('YOLOv5_VERBOSE', True)).lower() == 'true' # global verbose mode +FONT = 'Arial.ttf' # https://ultralytics.com/assets/Arial.ttf + +torch.set_printoptions(linewidth=320, precision=5, profile='long') +np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5 +pd.options.display.max_columns = 10 +cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader) +os.environ['NUMEXPR_MAX_THREADS'] = str(NUM_THREADS) # NumExpr max threads +os.environ['OMP_NUM_THREADS'] = '1' if platform.system() == 'darwin' else str(NUM_THREADS) # OpenMP (PyTorch and SciPy) + + +def is_ascii(s=''): + # Is string composed of all ASCII (no UTF) characters? (note str().isascii() introduced in python 3.7) + s = str(s) # convert list, tuple, None, etc. to str + return len(s.encode().decode('ascii', 'ignore')) == len(s) + + +def is_chinese(s='人工智能'): + # Is string composed of any Chinese characters? + return bool(re.search('[\u4e00-\u9fff]', str(s))) + + +def is_colab(): + # Is environment a Google Colab instance? + return 'COLAB_GPU' in os.environ + + +def is_kaggle(): + # Is environment a Kaggle Notebook? + return os.environ.get('PWD') == '/kaggle/working' and os.environ.get('KAGGLE_URL_BASE') == 'https://www.kaggle.com' + + +def is_docker() -> bool: + """Check if the process runs inside a docker container.""" + if Path("/.dockerenv").exists(): + return True + try: # check if docker is in control groups + with open("/proc/self/cgroup") as file: + return any("docker" in line for line in file) + except OSError: + return False + + +def is_writeable(dir, test=False): + # Return True if directory has write permissions, test opening a file with write permissions if test=True + if not test: + return os.access(dir, os.W_OK) # possible issues on Windows + file = Path(dir) / 'tmp.txt' + try: + with open(file, 'w'): # open file with write permissions + pass + file.unlink() # remove file + return True + except OSError: + return False + + +def set_logging(name=None, verbose=VERBOSE): + # Sets level and returns logger + if is_kaggle() or is_colab(): + for h in logging.root.handlers: + logging.root.removeHandler(h) # remove all handlers associated with the root logger object + rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings + level = logging.INFO if verbose and rank in {-1, 0} else logging.ERROR + log = logging.getLogger(name) + log.setLevel(level) + handler = logging.StreamHandler() + handler.setFormatter(logging.Formatter("%(message)s")) + handler.setLevel(level) + log.addHandler(handler) + + +set_logging() # run before defining LOGGER +LOGGER = logging.getLogger("yolov5") # define globally (used in train.py, val.py, detect.py, etc.) +if platform.system() == 'Windows': + for fn in LOGGER.info, LOGGER.warning: + setattr(LOGGER, fn.__name__, lambda x: fn(emojis(x))) # emoji safe logging + + +def user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'): + # Return path of user configuration directory. Prefer environment variable if exists. Make dir if required. + env = os.getenv(env_var) + if env: + path = Path(env) # use environment variable + else: + cfg = {'Windows': 'AppData/Roaming', 'Linux': '.config', 'Darwin': 'Library/Application Support'} # 3 OS dirs + path = Path.home() / cfg.get(platform.system(), '') # OS-specific config dir + path = (path if is_writeable(path) else Path('/tmp')) / dir # GCP and AWS lambda fix, only /tmp is writeable + path.mkdir(exist_ok=True) # make if required + return path + + +CONFIG_DIR = user_config_dir() # Ultralytics settings dir + + +class Profile(contextlib.ContextDecorator): + # Usage: @Profile() decorator or 'with Profile():' context manager + def __enter__(self): + self.start = time.time() + + def __exit__(self, type, value, traceback): + print(f'Profile results: {time.time() - self.start:.5f}s') + + +class Timeout(contextlib.ContextDecorator): + # Usage: @Timeout(seconds) decorator or 'with Timeout(seconds):' context manager + def __init__(self, seconds, *, timeout_msg='', suppress_timeout_errors=True): + self.seconds = int(seconds) + self.timeout_message = timeout_msg + self.suppress = bool(suppress_timeout_errors) + + def _timeout_handler(self, signum, frame): + raise TimeoutError(self.timeout_message) + + def __enter__(self): + if platform.system() != 'Windows': # not supported on Windows + signal.signal(signal.SIGALRM, self._timeout_handler) # Set handler for SIGALRM + signal.alarm(self.seconds) # start countdown for SIGALRM to be raised + + def __exit__(self, exc_type, exc_val, exc_tb): + if platform.system() != 'Windows': + signal.alarm(0) # Cancel SIGALRM if it's scheduled + if self.suppress and exc_type is TimeoutError: # Suppress TimeoutError + return True + + +class WorkingDirectory(contextlib.ContextDecorator): + # Usage: @WorkingDirectory(dir) decorator or 'with WorkingDirectory(dir):' context manager + def __init__(self, new_dir): + self.dir = new_dir # new dir + self.cwd = Path.cwd().resolve() # current dir + + def __enter__(self): + os.chdir(self.dir) + + def __exit__(self, exc_type, exc_val, exc_tb): + os.chdir(self.cwd) + + +def try_except(func): + # try-except function. Usage: @try_except decorator + def handler(*args, **kwargs): + try: + func(*args, **kwargs) + except Exception as e: + print(e) + + return handler + + +def threaded(func): + # Multi-threads a target function and returns thread. Usage: @threaded decorator + def wrapper(*args, **kwargs): + thread = threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True) + thread.start() + return thread + + return wrapper + + +def methods(instance): + # Get class/instance methods + return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith("__")] + + +def print_args(args: Optional[dict] = None, show_file=True, show_fcn=False): + # Print function arguments (optional args dict) + x = inspect.currentframe().f_back # previous frame + file, _, fcn, _, _ = inspect.getframeinfo(x) + if args is None: # get args automatically + args, _, _, frm = inspect.getargvalues(x) + args = {k: v for k, v in frm.items() if k in args} + try: + file = Path(file).resolve().relative_to(ROOT).with_suffix('') + except ValueError: + file = Path(file).stem + s = (f'{file}: ' if show_file else '') + (f'{fcn}: ' if show_fcn else '') + LOGGER.info(colorstr(s) + ', '.join(f'{k}={v}' for k, v in args.items())) + + +def init_seeds(seed=0, deterministic=False): + # Initialize random number generator (RNG) seeds https://pytorch.org/docs/stable/notes/randomness.html + # cudnn seed 0 settings are slower and more reproducible, else faster and less reproducible + import torch.backends.cudnn as cudnn + + if deterministic and check_version(torch.__version__, '1.12.0'): # https://github.com/ultralytics/yolov5/pull/8213 + torch.use_deterministic_algorithms(True) + os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8' + os.environ['PYTHONHASHSEED'] = str(seed) + + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + cudnn.benchmark, cudnn.deterministic = (False, True) if seed == 0 else (True, False) + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) # for Multi-GPU, exception safe + + +def intersect_dicts(da, db, exclude=()): + # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values + return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape} + + +def get_latest_run(search_dir='.'): + # Return path to most recent 'last.pt' in /runs (i.e. to --resume from) + last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True) + return max(last_list, key=os.path.getctime) if last_list else '' + + +def emojis(str=''): + # Return platform-dependent emoji-safe version of string + return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str + + +def file_age(path=__file__): + # Return days since last file update + dt = (datetime.now() - datetime.fromtimestamp(Path(path).stat().st_mtime)) # delta + return dt.days # + dt.seconds / 86400 # fractional days + + +def file_date(path=__file__): + # Return human-readable file modification date, i.e. '2021-3-26' + t = datetime.fromtimestamp(Path(path).stat().st_mtime) + return f'{t.year}-{t.month}-{t.day}' + + +def file_size(path): + # Return file/dir size (MB) + mb = 1 << 20 # bytes to MiB (1024 ** 2) + path = Path(path) + if path.is_file(): + return path.stat().st_size / mb + elif path.is_dir(): + return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / mb + else: + return 0.0 + + +def check_online(): + # Check internet connectivity + import socket + try: + socket.create_connection(("1.1.1.1", 443), 5) # check host accessibility + return True + except OSError: + return False + + +def git_describe(path=ROOT): # path must be a directory + # Return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe + try: + assert (Path(path) / '.git').is_dir() + return check_output(f'git -C {path} describe --tags --long --always', shell=True).decode()[:-1] + except Exception: + return '' + + +@try_except +@WorkingDirectory(ROOT) +def check_git_status(repo='ultralytics/yolov5'): + # YOLOv5 status check, recommend 'git pull' if code is out of date + url = f'https://github.com/{repo}' + msg = f', for updates see {url}' + s = colorstr('github: ') # string + assert Path('.git').exists(), s + 'skipping check (not a git repository)' + msg + assert check_online(), s + 'skipping check (offline)' + msg + + splits = re.split(pattern=r'\s', string=check_output('git remote -v', shell=True).decode()) + matches = [repo in s for s in splits] + if any(matches): + remote = splits[matches.index(True) - 1] + else: + remote = 'ultralytics' + check_output(f'git remote add {remote} {url}', shell=True) + check_output(f'git fetch {remote}', shell=True, timeout=5) # git fetch + branch = check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out + n = int(check_output(f'git rev-list {branch}..{remote}/master --count', shell=True)) # commits behind + if n > 0: + pull = 'git pull' if remote == 'origin' else f'git pull {remote} master' + s += f"⚠️ YOLOv5 is out of date by {n} commit{'s' * (n > 1)}. Use `{pull}` or `git clone {url}` to update." + else: + s += f'up to date with {url} ✅' + LOGGER.info(s) + + +def check_python(minimum='3.7.0'): + # Check current python version vs. required python version + check_version(platform.python_version(), minimum, name='Python ', hard=True) + + +def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False, verbose=False): + # Check version vs. required version + current, minimum = (pkg.parse_version(x) for x in (current, minimum)) + result = (current == minimum) if pinned else (current >= minimum) # bool + s = f'{name}{minimum} required by YOLOv5, but {name}{current} is currently installed' # string + if hard: + assert result, s # assert min requirements met + if verbose and not result: + LOGGER.warning(s) + return result + + +@try_except +def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True, cmds=()): + # Check installed dependencies meet YOLOv5 requirements (pass *.txt file or list of packages) + prefix = colorstr('red', 'bold', 'requirements:') + check_python() # check python version + if isinstance(requirements, (str, Path)): # requirements.txt file + file = Path(requirements) + assert file.exists(), f"{prefix} {file.resolve()} not found, check failed." + with file.open() as f: + requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(f) if x.name not in exclude] + else: # list or tuple of packages + requirements = [x for x in requirements if x not in exclude] + + n = 0 # number of packages updates + for i, r in enumerate(requirements): + try: + pkg.require(r) + except Exception: # DistributionNotFound or VersionConflict if requirements not met + s = f"{prefix} {r} not found and is required by YOLOv5" + if install and AUTOINSTALL: # check environment variable + LOGGER.info(f"{s}, attempting auto-update...") + try: + assert check_online(), f"'pip install {r}' skipped (offline)" + LOGGER.info(check_output(f'pip install "{r}" {cmds[i] if cmds else ""}', shell=True).decode()) + n += 1 + except Exception as e: + LOGGER.warning(f'{prefix} {e}') + else: + LOGGER.info(f'{s}. Please install and rerun your command.') + + if n: # if packages updated + source = file.resolve() if 'file' in locals() else requirements + s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \ + f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n" + LOGGER.info(s) + + +def check_img_size(imgsz, s=32, floor=0): + # Verify image size is a multiple of stride s in each dimension + if isinstance(imgsz, int): # integer i.e. img_size=640 + new_size = max(make_divisible(imgsz, int(s)), floor) + else: # list i.e. img_size=[640, 480] + imgsz = list(imgsz) # convert to list if tuple + new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz] + if new_size != imgsz: + LOGGER.warning(f'WARNING: --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}') + return new_size + + +def check_imshow(): + # Check if environment supports image displays + try: + assert not is_docker(), 'cv2.imshow() is disabled in Docker environments' + assert not is_colab(), 'cv2.imshow() is disabled in Google Colab environments' + cv2.imshow('test', np.zeros((1, 1, 3))) + cv2.waitKey(1) + cv2.destroyAllWindows() + cv2.waitKey(1) + return True + except Exception as e: + LOGGER.warning(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}') + return False + + +def check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''): + # Check file(s) for acceptable suffix + if file and suffix: + if isinstance(suffix, str): + suffix = [suffix] + for f in file if isinstance(file, (list, tuple)) else [file]: + s = Path(f).suffix.lower() # file suffix + if len(s): + assert s in suffix, f"{msg}{f} acceptable suffix is {suffix}" + + +def check_yaml(file, suffix=('.yaml', '.yml')): + # Search/download YAML file (if necessary) and return path, checking suffix + return check_file(file, suffix) + + +def check_file(file, suffix=''): + # Search/download file (if necessary) and return path + check_suffix(file, suffix) # optional + file = str(file) # convert to str() + if Path(file).is_file() or not file: # exists + return file + elif file.startswith(('http:/', 'https:/')): # download + url = file # warning: Pathlib turns :// -> :/ + file = Path(urllib.parse.unquote(file).split('?')[0]).name # '%2F' to '/', split https://url.com/file.txt?auth + if Path(file).is_file(): + LOGGER.info(f'Found {url} locally at {file}') # file already exists + else: + LOGGER.info(f'Downloading {url} to {file}...') + torch.hub.download_url_to_file(url, file) + assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}' # check + return file + elif file.startswith('clearml://'): # ClearML Dataset ID + assert 'clearml' in sys.modules, "ClearML is not installed, so cannot use ClearML dataset. Try running 'pip install clearml'." + return file + else: # search + files = [] + for d in 'data', 'models', 'utils': # search directories + files.extend(glob.glob(str(ROOT / d / '**' / file), recursive=True)) # find file + assert len(files), f'File not found: {file}' # assert file was found + assert len(files) == 1, f"Multiple files match '{file}', specify exact path: {files}" # assert unique + return files[0] # return file + + +def check_font(font=FONT, progress=False): + # Download font to CONFIG_DIR if necessary + font = Path(font) + file = CONFIG_DIR / font.name + if not font.exists() and not file.exists(): + url = "https://ultralytics.com/assets/" + font.name + LOGGER.info(f'Downloading {url} to {file}...') + torch.hub.download_url_to_file(url, str(file), progress=progress) + + +def check_dataset(data, autodownload=True): + # Download, check and/or unzip dataset if not found locally + + # Download (optional) + extract_dir = '' + if isinstance(data, (str, Path)) and str(data).endswith('.zip'): # i.e. gs://bucket/dir/coco128.zip + download(data, dir=f'{DATASETS_DIR}/{Path(data).stem}', unzip=True, delete=False, curl=False, threads=1) + data = next((DATASETS_DIR / Path(data).stem).rglob('*.yaml')) + extract_dir, autodownload = data.parent, False + + # Read yaml (optional) + if isinstance(data, (str, Path)): + with open(data, errors='ignore') as f: + data = yaml.safe_load(f) # dictionary + + # Checks + for k in 'train', 'val', 'nc': + assert k in data, f"data.yaml '{k}:' field missing ❌" + if 'names' not in data: + LOGGER.warning("data.yaml 'names:' field missing ⚠️, assigning default names 'class0', 'class1', etc.") + data['names'] = [f'class{i}' for i in range(data['nc'])] # default names + + # Resolve paths + path = Path(extract_dir or data.get('path') or '') # optional 'path' default to '.' + if not path.is_absolute(): + path = (ROOT / path).resolve() + for k in 'train', 'val', 'test': + if data.get(k): # prepend path + data[k] = str(path / data[k]) if isinstance(data[k], str) else [str(path / x) for x in data[k]] + + # Parse yaml + train, val, test, s = (data.get(x) for x in ('train', 'val', 'test', 'download')) + if val: + val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path + if not all(x.exists() for x in val): + LOGGER.info('\nDataset not found ⚠️, missing paths %s' % [str(x) for x in val if not x.exists()]) + if not s or not autodownload: + raise Exception('Dataset not found ❌') + t = time.time() + root = path.parent if 'path' in data else '..' # unzip directory i.e. '../' + if s.startswith('http') and s.endswith('.zip'): # URL + f = Path(s).name # filename + LOGGER.info(f'Downloading {s} to {f}...') + torch.hub.download_url_to_file(s, f) + Path(root).mkdir(parents=True, exist_ok=True) # create root + ZipFile(f).extractall(path=root) # unzip + Path(f).unlink() # remove zip + r = None # success + elif s.startswith('bash '): # bash script + LOGGER.info(f'Running {s} ...') + r = os.system(s) + else: # python script + r = exec(s, {'yaml': data}) # return None + dt = f'({round(time.time() - t, 1)}s)' + s = f"success ✅ {dt}, saved to {colorstr('bold', root)}" if r in (0, None) else f"failure {dt} ❌" + LOGGER.info(f"Dataset download {s}") + check_font('Arial.ttf' if is_ascii(data['names']) else 'Arial.Unicode.ttf', progress=True) # download fonts + return data # dictionary + + +def check_amp(model): + # Check PyTorch Automatic Mixed Precision (AMP) functionality. Return True on correct operation + from models.common import AutoShape, DetectMultiBackend + + def amp_allclose(model, im): + # All close FP32 vs AMP results + m = AutoShape(model, verbose=False) # model + a = m(im).xywhn[0] # FP32 inference + m.amp = True + b = m(im).xywhn[0] # AMP inference + return a.shape == b.shape and torch.allclose(a, b, atol=0.1) # close to 10% absolute tolerance + + prefix = colorstr('AMP: ') + device = next(model.parameters()).device # get model device + if device.type == 'cpu': + return False # AMP disabled on CPU + f = ROOT / 'data' / 'images' / 'bus.jpg' # image to check + im = f if f.exists() else 'https://ultralytics.com/images/bus.jpg' if check_online() else np.ones((640, 640, 3)) + try: + assert amp_allclose(model, im) or amp_allclose(DetectMultiBackend('yolov5n.pt', device), im) + LOGGER.info(f'{prefix}checks passed ✅') + return True + except Exception: + help_url = 'https://github.com/ultralytics/yolov5/issues/7908' + LOGGER.warning(f'{prefix}checks failed ❌, disabling Automatic Mixed Precision. See {help_url}') + return False + + +def yaml_load(file='data.yaml'): + # Single-line safe yaml loading + with open(file, errors='ignore') as f: + return yaml.safe_load(f) + + +def yaml_save(file='data.yaml', data={}): + # Single-line safe yaml saving + with open(file, 'w') as f: + yaml.safe_dump({k: str(v) if isinstance(v, Path) else v for k, v in data.items()}, f, sort_keys=False) + + +def url2file(url): + # Convert URL to filename, i.e. https://url.com/file.txt?auth -> file.txt + url = str(Path(url)).replace(':/', '://') # Pathlib turns :// -> :/ + return Path(urllib.parse.unquote(url)).name.split('?')[0] # '%2F' to '/', split https://url.com/file.txt?auth + + +def download(url, dir='.', unzip=True, delete=True, curl=False, threads=1, retry=3): + # Multi-threaded file download and unzip function, used in data.yaml for autodownload + def download_one(url, dir): + # Download 1 file + success = True + f = dir / Path(url).name # filename + if Path(url).is_file(): # exists in current path + Path(url).rename(f) # move to dir + elif not f.exists(): + LOGGER.info(f'Downloading {url} to {f}...') + for i in range(retry + 1): + if curl: + s = 'sS' if threads > 1 else '' # silent + r = os.system(f'curl -{s}L "{url}" -o "{f}" --retry 9 -C -') # curl download with retry, continue + success = r == 0 + else: + torch.hub.download_url_to_file(url, f, progress=threads == 1) # torch download + success = f.is_file() + if success: + break + elif i < retry: + LOGGER.warning(f'Download failure, retrying {i + 1}/{retry} {url}...') + else: + LOGGER.warning(f'Failed to download {url}...') + + if unzip and success and f.suffix in ('.zip', '.tar', '.gz'): + LOGGER.info(f'Unzipping {f}...') + if f.suffix == '.zip': + ZipFile(f).extractall(path=dir) # unzip + elif f.suffix == '.tar': + os.system(f'tar xf {f} --directory {f.parent}') # unzip + elif f.suffix == '.gz': + os.system(f'tar xfz {f} --directory {f.parent}') # unzip + if delete: + f.unlink() # remove zip + + dir = Path(dir) + dir.mkdir(parents=True, exist_ok=True) # make directory + if threads > 1: + pool = ThreadPool(threads) + pool.imap(lambda x: download_one(*x), zip(url, repeat(dir))) # multi-threaded + pool.close() + pool.join() + else: + for u in [url] if isinstance(url, (str, Path)) else url: + download_one(u, dir) + + +def make_divisible(x, divisor): + # Returns nearest x divisible by divisor + if isinstance(divisor, torch.Tensor): + divisor = int(divisor.max()) # to int + return math.ceil(x / divisor) * divisor + + +def clean_str(s): + # Cleans a string by replacing special characters with underscore _ + return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s) + + +def one_cycle(y1=0.0, y2=1.0, steps=100): + # lambda function for sinusoidal ramp from y1 to y2 https://arxiv.org/pdf/1812.01187.pdf + return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1 + + +def colorstr(*input): + # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world') + *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string + colors = { + 'black': '\033[30m', # basic colors + 'red': '\033[31m', + 'green': '\033[32m', + 'yellow': '\033[33m', + 'blue': '\033[34m', + 'magenta': '\033[35m', + 'cyan': '\033[36m', + 'white': '\033[37m', + 'bright_black': '\033[90m', # bright colors + 'bright_red': '\033[91m', + 'bright_green': '\033[92m', + 'bright_yellow': '\033[93m', + 'bright_blue': '\033[94m', + 'bright_magenta': '\033[95m', + 'bright_cyan': '\033[96m', + 'bright_white': '\033[97m', + 'end': '\033[0m', # misc + 'bold': '\033[1m', + 'underline': '\033[4m'} + return ''.join(colors[x] for x in args) + f'{string}' + colors['end'] + + +def labels_to_class_weights(labels, nc=80): + # Get class weights (inverse frequency) from training labels + if labels[0] is None: # no labels loaded + return torch.Tensor() + + labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO + classes = labels[:, 0].astype(int) # labels = [class xywh] + weights = np.bincount(classes, minlength=nc) # occurrences per class + + # Prepend gridpoint count (for uCE training) + # gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image + # weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start + + weights[weights == 0] = 1 # replace empty bins with 1 + weights = 1 / weights # number of targets per class + weights /= weights.sum() # normalize + return torch.from_numpy(weights).float() + + +def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)): + # Produces image weights based on class_weights and image contents + # Usage: index = random.choices(range(n), weights=image_weights, k=1) # weighted image sample + class_counts = np.array([np.bincount(x[:, 0].astype(int), minlength=nc) for x in labels]) + return (class_weights.reshape(1, nc) * class_counts).sum(1) + + +def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper) + # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/ + # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n') + # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n') + # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco + # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet + return [ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, + 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90] + + +def xyxy2xywh(x): + # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center + y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center + y[:, 2] = x[:, 2] - x[:, 0] # width + y[:, 3] = x[:, 3] - x[:, 1] # height + return y + + +def xywh2xyxy(x): + # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x + y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y + y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x + y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y + return y + + +def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): + # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x + y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y + y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x + y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y + return y + + +def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0): + # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right + if clip: + clip_coords(x, (h - eps, w - eps)) # warning: inplace clip + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = ((x[:, 0] + x[:, 2]) / 2) / w # x center + y[:, 1] = ((x[:, 1] + x[:, 3]) / 2) / h # y center + y[:, 2] = (x[:, 2] - x[:, 0]) / w # width + y[:, 3] = (x[:, 3] - x[:, 1]) / h # height + return y + + +def xyn2xy(x, w=640, h=640, padw=0, padh=0): + # Convert normalized segments into pixel segments, shape (n,2) + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = w * x[:, 0] + padw # top left x + y[:, 1] = h * x[:, 1] + padh # top left y + return y + + +def segment2box(segment, width=640, height=640): + # Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy) + x, y = segment.T # segment xy + inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height) + x, y, = x[inside], y[inside] + return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # xyxy + + +def segments2boxes(segments): + # Convert segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh) + boxes = [] + for s in segments: + x, y = s.T # segment xy + boxes.append([x.min(), y.min(), x.max(), y.max()]) # cls, xyxy + return xyxy2xywh(np.array(boxes)) # cls, xywh + + +def resample_segments(segments, n=1000): + # Up-sample an (n,2) segment + for i, s in enumerate(segments): + s = np.concatenate((s, s[0:1, :]), axis=0) + x = np.linspace(0, len(s) - 1, n) + xp = np.arange(len(s)) + segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T # segment xy + return segments + + +def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None): + # Rescale coords (xyxy) from img1_shape to img0_shape + if ratio_pad is None: # calculate from img0_shape + gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new + pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding + else: + gain = ratio_pad[0][0] + pad = ratio_pad[1] + + coords[:, [0, 2]] -= pad[0] # x padding + coords[:, [1, 3]] -= pad[1] # y padding + coords[:, :4] /= gain + clip_coords(coords, img0_shape) + return coords + + +def clip_coords(boxes, shape): + # Clip bounding xyxy bounding boxes to image shape (height, width) + if isinstance(boxes, torch.Tensor): # faster individually + boxes[:, 0].clamp_(0, shape[1]) # x1 + boxes[:, 1].clamp_(0, shape[0]) # y1 + boxes[:, 2].clamp_(0, shape[1]) # x2 + boxes[:, 3].clamp_(0, shape[0]) # y2 + else: # np.array (faster grouped) + boxes[:, [0, 2]] = boxes[:, [0, 2]].clip(0, shape[1]) # x1, x2 + boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0]) # y1, y2 + + +def non_max_suppression(prediction, + conf_thres=0.25, + iou_thres=0.45, + classes=None, + agnostic=False, + multi_label=False, + labels=(), + max_det=300): + """Non-Maximum Suppression (NMS) on inference results to reject overlapping bounding boxes + + Returns: + list of detections, on (n,6) tensor per image [xyxy, conf, cls] + """ + + bs = prediction.shape[0] # batch size + nc = prediction.shape[2] - 5 # number of classes + xc = prediction[..., 4] > conf_thres # candidates + + # Checks + assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0' + assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0' + + # Settings + # min_wh = 2 # (pixels) minimum box width and height + max_wh = 7680 # (pixels) maximum box width and height + max_nms = 30000 # maximum number of boxes into torchvision.ops.nms() + time_limit = 0.3 + 0.03 * bs # seconds to quit after + redundant = True # require redundant detections + multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img) + merge = False # use merge-NMS + + t = time.time() + output = [torch.zeros((0, 6), device=prediction.device)] * bs + for xi, x in enumerate(prediction): # image index, image inference + # Apply constraints + # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height + x = x[xc[xi]] # confidence + + # Cat apriori labels if autolabelling + if labels and len(labels[xi]): + lb = labels[xi] + v = torch.zeros((len(lb), nc + 5), device=x.device) + v[:, :4] = lb[:, 1:5] # box + v[:, 4] = 1.0 # conf + v[range(len(lb)), lb[:, 0].long() + 5] = 1.0 # cls + x = torch.cat((x, v), 0) + + # If none remain process next image + if not x.shape[0]: + continue + + # Compute conf + x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf + + # Box (center x, center y, width, height) to (x1, y1, x2, y2) + box = xywh2xyxy(x[:, :4]) + + # Detections matrix nx6 (xyxy, conf, cls) + if multi_label: + i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T + x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1) + else: # best class only + conf, j = x[:, 5:].max(1, keepdim=True) + x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres] + + # Filter by class + if classes is not None: + x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)] + + # Apply finite constraint + # if not torch.isfinite(x).all(): + # x = x[torch.isfinite(x).all(1)] + + # Check shape + n = x.shape[0] # number of boxes + if not n: # no boxes + continue + elif n > max_nms: # excess boxes + x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence + + # Batched NMS + c = x[:, 5:6] * (0 if agnostic else max_wh) # classes + boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores + i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS + if i.shape[0] > max_det: # limit detections + i = i[:max_det] + if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean) + # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4) + iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix + weights = iou * scores[None] # box weights + x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes + if redundant: + i = i[iou.sum(1) > 1] # require redundancy + + output[xi] = x[i] + if (time.time() - t) > time_limit: + LOGGER.warning(f'WARNING: NMS time limit {time_limit:.3f}s exceeded') + break # time limit exceeded + + return output + + +def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer() + # Strip optimizer from 'f' to finalize training, optionally save as 's' + x = torch.load(f, map_location=torch.device('cpu')) + if x.get('ema'): + x['model'] = x['ema'] # replace model with ema + for k in 'optimizer', 'best_fitness', 'wandb_id', 'ema', 'updates': # keys + x[k] = None + x['epoch'] = -1 + x['model'].half() # to FP16 + for p in x['model'].parameters(): + p.requires_grad = False + torch.save(x, s or f) + mb = os.path.getsize(s or f) / 1E6 # filesize + LOGGER.info(f"Optimizer stripped from {f},{f' saved as {s},' if s else ''} {mb:.1f}MB") + + +def print_mutation(results, hyp, save_dir, bucket, prefix=colorstr('evolve: ')): + evolve_csv = save_dir / 'evolve.csv' + evolve_yaml = save_dir / 'hyp_evolve.yaml' + keys = ('metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', 'val/box_loss', + 'val/obj_loss', 'val/cls_loss') + tuple(hyp.keys()) # [results + hyps] + keys = tuple(x.strip() for x in keys) + vals = results + tuple(hyp.values()) + n = len(keys) + + # Download (optional) + if bucket: + url = f'gs://{bucket}/evolve.csv' + if gsutil_getsize(url) > (evolve_csv.stat().st_size if evolve_csv.exists() else 0): + os.system(f'gsutil cp {url} {save_dir}') # download evolve.csv if larger than local + + # Log to evolve.csv + s = '' if evolve_csv.exists() else (('%20s,' * n % keys).rstrip(',') + '\n') # add header + with open(evolve_csv, 'a') as f: + f.write(s + ('%20.5g,' * n % vals).rstrip(',') + '\n') + + # Save yaml + with open(evolve_yaml, 'w') as f: + data = pd.read_csv(evolve_csv) + data = data.rename(columns=lambda x: x.strip()) # strip keys + i = np.argmax(fitness(data.values[:, :4])) # + generations = len(data) + f.write('# YOLOv5 Hyperparameter Evolution Results\n' + f'# Best generation: {i}\n' + + f'# Last generation: {generations - 1}\n' + '# ' + ', '.join(f'{x.strip():>20s}' for x in keys[:7]) + + '\n' + '# ' + ', '.join(f'{x:>20.5g}' for x in data.values[i, :7]) + '\n\n') + yaml.safe_dump(data.loc[i][7:].to_dict(), f, sort_keys=False) + + # Print to screen + LOGGER.info(prefix + f'{generations} generations finished, current result:\n' + prefix + + ', '.join(f'{x.strip():>20s}' for x in keys) + '\n' + prefix + ', '.join(f'{x:20.5g}' + for x in vals) + '\n\n') + + if bucket: + os.system(f'gsutil cp {evolve_csv} {evolve_yaml} gs://{bucket}') # upload + + +def apply_classifier(x, model, img, im0): + # Apply a second stage classifier to YOLO outputs + # Example model = torchvision.models.__dict__['efficientnet_b0'](pretrained=True).to(device).eval() + im0 = [im0] if isinstance(im0, np.ndarray) else im0 + for i, d in enumerate(x): # per image + if d is not None and len(d): + d = d.clone() + + # Reshape and pad cutouts + b = xyxy2xywh(d[:, :4]) # boxes + b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square + b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad + d[:, :4] = xywh2xyxy(b).long() + + # Rescale boxes from img_size to im0 size + scale_coords(img.shape[2:], d[:, :4], im0[i].shape) + + # Classes + pred_cls1 = d[:, 5].long() + ims = [] + for a in d: + cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])] + im = cv2.resize(cutout, (224, 224)) # BGR + + im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 + im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32 + im /= 255 # 0 - 255 to 0.0 - 1.0 + ims.append(im) + + pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction + x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections + + return x + + +def increment_path(path, exist_ok=False, sep='', mkdir=False): + # Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc. + path = Path(path) # os-agnostic + if path.exists() and not exist_ok: + path, suffix = (path.with_suffix(''), path.suffix) if path.is_file() else (path, '') + + # Method 1 + for n in range(2, 9999): + p = f'{path}{sep}{n}{suffix}' # increment path + if not os.path.exists(p): # + break + path = Path(p) + + # Method 2 (deprecated) + # dirs = glob.glob(f"{path}{sep}*") # similar paths + # matches = [re.search(rf"{path.stem}{sep}(\d+)", d) for d in dirs] + # i = [int(m.groups()[0]) for m in matches if m] # indices + # n = max(i) + 1 if i else 2 # increment number + # path = Path(f"{path}{sep}{n}{suffix}") # increment path + + if mkdir: + path.mkdir(parents=True, exist_ok=True) # make directory + + return path + + +# OpenCV Chinese-friendly functions ------------------------------------------------------------------------------------ +imshow_ = cv2.imshow # copy to avoid recursion errors + + +def imread(path, flags=cv2.IMREAD_COLOR): + return cv2.imdecode(np.fromfile(path, np.uint8), flags) + + +def imwrite(path, im): + try: + cv2.imencode(Path(path).suffix, im)[1].tofile(path) + return True + except Exception: + return False + + +def imshow(path, im): + imshow_(path.encode('unicode_escape').decode(), im) + + +cv2.imread, cv2.imwrite, cv2.imshow = imread, imwrite, imshow # redefine + +# Variables ------------------------------------------------------------------------------------------------------------ +NCOLS = 0 if is_docker() else shutil.get_terminal_size().columns # terminal window size for tqdm diff --git a/src/目标识别代码/utils/google_app_engine/Dockerfile b/src/目标识别代码/utils/google_app_engine/Dockerfile new file mode 100644 index 00000000..0155618f --- /dev/null +++ b/src/目标识别代码/utils/google_app_engine/Dockerfile @@ -0,0 +1,25 @@ +FROM gcr.io/google-appengine/python + +# Create a virtualenv for dependencies. This isolates these packages from +# system-level packages. +# Use -p python3 or -p python3.7 to select python version. Default is version 2. +RUN virtualenv /env -p python3 + +# Setting these environment variables are the same as running +# source /env/bin/activate. +ENV VIRTUAL_ENV /env +ENV PATH /env/bin:$PATH + +RUN apt-get update && apt-get install -y python-opencv + +# Copy the application's requirements.txt and run pip to install all +# dependencies into the virtualenv. +ADD requirements.txt /app/requirements.txt +RUN pip install -r /app/requirements.txt + +# Add the application source code. +ADD . /app + +# Run a WSGI server to serve the application. gunicorn must be declared as +# a dependency in requirements.txt. +CMD gunicorn -b :$PORT main:app diff --git a/src/目标识别代码/utils/google_app_engine/additional_requirements.txt b/src/目标识别代码/utils/google_app_engine/additional_requirements.txt new file mode 100644 index 00000000..42d7ffc0 --- /dev/null +++ b/src/目标识别代码/utils/google_app_engine/additional_requirements.txt @@ -0,0 +1,4 @@ +# add these requirements in your app on top of the existing ones +pip==21.1 +Flask==1.0.2 +gunicorn==19.9.0 diff --git a/src/目标识别代码/utils/google_app_engine/app.yaml b/src/目标识别代码/utils/google_app_engine/app.yaml new file mode 100644 index 00000000..5056b7c1 --- /dev/null +++ b/src/目标识别代码/utils/google_app_engine/app.yaml @@ -0,0 +1,14 @@ +runtime: custom +env: flex + +service: yolov5app + +liveness_check: + initial_delay_sec: 600 + +manual_scaling: + instances: 1 +resources: + cpu: 1 + memory_gb: 4 + disk_size_gb: 20 diff --git a/src/目标识别代码/utils/loggers/__init__.py b/src/目标识别代码/utils/loggers/__init__.py new file mode 100644 index 00000000..8ec846f8 --- /dev/null +++ b/src/目标识别代码/utils/loggers/__init__.py @@ -0,0 +1,308 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Logging utils +""" + +import os +import warnings +from pathlib import Path + +import pkg_resources as pkg +import torch +from torch.utils.tensorboard import SummaryWriter + +from utils.general import colorstr, cv2 +from utils.loggers.clearml.clearml_utils import ClearmlLogger +from utils.loggers.wandb.wandb_utils import WandbLogger +from utils.plots import plot_images, plot_results +from utils.torch_utils import de_parallel + +LOGGERS = ('csv', 'tb', 'wandb', 'clearml') # *.csv, TensorBoard, Weights & Biases, ClearML +RANK = int(os.getenv('RANK', -1)) + +try: + import wandb + + assert hasattr(wandb, '__version__') # verify package import not local dir + if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.2') and RANK in {0, -1}: + try: + wandb_login_success = wandb.login(timeout=30) + except wandb.errors.UsageError: # known non-TTY terminal issue + wandb_login_success = False + if not wandb_login_success: + wandb = None +except (ImportError, AssertionError): + wandb = None + +try: + import clearml + + assert hasattr(clearml, '__version__') # verify package import not local dir +except (ImportError, AssertionError): + clearml = None + + +class Loggers(): + # YOLOv5 Loggers class + def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, include=LOGGERS): + self.save_dir = save_dir + self.weights = weights + self.opt = opt + self.hyp = hyp + self.logger = logger # for printing results to console + self.include = include + self.keys = [ + 'train/box_loss', + 'train/obj_loss', + 'train/cls_loss', # train loss + 'metrics/precision', + 'metrics/recall', + 'metrics/mAP_0.5', + 'metrics/mAP_0.5:0.95', # metrics + 'val/box_loss', + 'val/obj_loss', + 'val/cls_loss', # val loss + 'x/lr0', + 'x/lr1', + 'x/lr2'] # params + self.best_keys = ['best/epoch', 'best/precision', 'best/recall', 'best/mAP_0.5', 'best/mAP_0.5:0.95'] + for k in LOGGERS: + setattr(self, k, None) # init empty logger dictionary + self.csv = True # always log to csv + + # Messages + if not wandb: + prefix = colorstr('Weights & Biases: ') + s = f"{prefix}run 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs in Weights & Biases" + self.logger.info(s) + if not clearml: + prefix = colorstr('ClearML: ') + s = f"{prefix}run 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 🚀 in ClearML" + self.logger.info(s) + + # TensorBoard + s = self.save_dir + if 'tb' in self.include and not self.opt.evolve: + prefix = colorstr('TensorBoard: ') + self.logger.info(f"{prefix}Start with 'tensorboard --logdir {s.parent}', view at http://localhost:6006/") + self.tb = SummaryWriter(str(s)) + + # W&B + if wandb and 'wandb' in self.include: + wandb_artifact_resume = isinstance(self.opt.resume, str) and self.opt.resume.startswith('wandb-artifact://') + run_id = torch.load(self.weights).get('wandb_id') if self.opt.resume and not wandb_artifact_resume else None + self.opt.hyp = self.hyp # add hyperparameters + self.wandb = WandbLogger(self.opt, run_id) + # temp warn. because nested artifacts not supported after 0.12.10 + if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.11'): + s = "YOLOv5 temporarily requires wandb version 0.12.10 or below. Some features may not work as expected." + self.logger.warning(s) + else: + self.wandb = None + + # ClearML + if clearml and 'clearml' in self.include: + self.clearml = ClearmlLogger(self.opt, self.hyp) + else: + self.clearml = None + + def on_train_start(self): + # Callback runs on train start + pass + + def on_pretrain_routine_end(self): + # Callback runs on pre-train routine end + paths = self.save_dir.glob('*labels*.jpg') # training labels + if self.wandb: + self.wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]}) + if self.clearml: + pass # ClearML saves these images automatically using hooks + + def on_train_batch_end(self, ni, model, imgs, targets, paths, plots): + # Callback runs on train batch end + # ni: number integrated batches (since train start) + if plots: + if ni == 0 and not self.opt.sync_bn and self.tb: + log_tensorboard_graph(self.tb, model, imgsz=list(imgs.shape[2:4])) + if ni < 3: + f = self.save_dir / f'train_batch{ni}.jpg' # filename + plot_images(imgs, targets, paths, f) + if (self.wandb or self.clearml) and ni == 10: + files = sorted(self.save_dir.glob('train*.jpg')) + if self.wandb: + self.wandb.log({'Mosaics': [wandb.Image(str(f), caption=f.name) for f in files if f.exists()]}) + if self.clearml: + self.clearml.log_debug_samples(files, title='Mosaics') + + def on_train_epoch_end(self, epoch): + # Callback runs on train epoch end + if self.wandb: + self.wandb.current_epoch = epoch + 1 + + def on_val_image_end(self, pred, predn, path, names, im): + # Callback runs on val image end + if self.wandb: + self.wandb.val_one_image(pred, predn, path, names, im) + if self.clearml: + self.clearml.log_image_with_boxes(path, pred, names, im) + + def on_val_end(self): + # Callback runs on val end + if self.wandb or self.clearml: + files = sorted(self.save_dir.glob('val*.jpg')) + if self.wandb: + self.wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in files]}) + if self.clearml: + self.clearml.log_debug_samples(files, title='Validation') + + def on_fit_epoch_end(self, vals, epoch, best_fitness, fi): + # Callback runs at the end of each fit (train+val) epoch + x = dict(zip(self.keys, vals)) + if self.csv: + file = self.save_dir / 'results.csv' + n = len(x) + 1 # number of cols + s = '' if file.exists() else (('%20s,' * n % tuple(['epoch'] + self.keys)).rstrip(',') + '\n') # add header + with open(file, 'a') as f: + f.write(s + ('%20.5g,' * n % tuple([epoch] + vals)).rstrip(',') + '\n') + + if self.tb: + for k, v in x.items(): + self.tb.add_scalar(k, v, epoch) + elif self.clearml: # log to ClearML if TensorBoard not used + for k, v in x.items(): + title, series = k.split('/') + self.clearml.task.get_logger().report_scalar(title, series, v, epoch) + + if self.wandb: + if best_fitness == fi: + best_results = [epoch] + vals[3:7] + for i, name in enumerate(self.best_keys): + self.wandb.wandb_run.summary[name] = best_results[i] # log best results in the summary + self.wandb.log(x) + self.wandb.end_epoch(best_result=best_fitness == fi) + + if self.clearml: + self.clearml.current_epoch_logged_images = set() # reset epoch image limit + self.clearml.current_epoch += 1 + + def on_model_save(self, last, epoch, final_epoch, best_fitness, fi): + # Callback runs on model save event + if self.wandb: + if ((epoch + 1) % self.opt.save_period == 0 and not final_epoch) and self.opt.save_period != -1: + self.wandb.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi) + + if self.clearml: + if ((epoch + 1) % self.opt.save_period == 0 and not final_epoch) and self.opt.save_period != -1: + self.clearml.task.update_output_model(model_path=str(last), + model_name='Latest Model', + auto_delete_file=False) + + def on_train_end(self, last, best, plots, epoch, results): + # Callback runs on training end + if plots: + plot_results(file=self.save_dir / 'results.csv') # save results.png + files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))] + files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()] # filter + self.logger.info(f"Results saved to {colorstr('bold', self.save_dir)}") + + if self.tb and not self.clearml: # These images are already captured by ClearML by now, we don't want doubles + for f in files: + self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats='HWC') + + if self.wandb: + self.wandb.log(dict(zip(self.keys[3:10], results))) + self.wandb.log({"Results": [wandb.Image(str(f), caption=f.name) for f in files]}) + # Calling wandb.log. TODO: Refactor this into WandbLogger.log_model + if not self.opt.evolve: + wandb.log_artifact(str(best if best.exists() else last), + type='model', + name=f'run_{self.wandb.wandb_run.id}_model', + aliases=['latest', 'best', 'stripped']) + self.wandb.finish_run() + + if self.clearml: + # Save the best model here + if not self.opt.evolve: + self.clearml.task.update_output_model(model_path=str(best if best.exists() else last), + name='Best Model') + + def on_params_update(self, params): + # Update hyperparams or configs of the experiment + # params: A dict containing {param: value} pairs + if self.wandb: + self.wandb.wandb_run.config.update(params, allow_val_change=True) + + +class GenericLogger: + """ + YOLOv5 General purpose logger for non-task specific logging + Usage: from utils.loggers import GenericLogger; logger = GenericLogger(...) + Arguments + opt: Run arguments + console_logger: Console logger + include: loggers to include + """ + + def __init__(self, opt, console_logger, include=('tb', 'wandb')): + # init default loggers + self.save_dir = opt.save_dir + self.include = include + self.console_logger = console_logger + if 'tb' in self.include: + prefix = colorstr('TensorBoard: ') + self.console_logger.info( + f"{prefix}Start with 'tensorboard --logdir {self.save_dir.parent}', view at http://localhost:6006/") + self.tb = SummaryWriter(str(self.save_dir)) + + if wandb and 'wandb' in self.include: + self.wandb = wandb.init(project="YOLOv5-Classifier" if opt.project == "runs/train" else opt.project, + name=None if opt.name == "exp" else opt.name, + config=opt) + else: + self.wandb = None + + def log_metrics(self, metrics_dict, epoch): + # Log metrics dictionary to all loggers + if self.tb: + for k, v in metrics_dict.items(): + self.tb.add_scalar(k, v, epoch) + + if self.wandb: + self.wandb.log(metrics_dict, step=epoch) + + def log_images(self, files, name='Images', epoch=0): + # Log images to all loggers + files = [Path(f) for f in (files if isinstance(files, (tuple, list)) else [files])] # to Path + files = [f for f in files if f.exists()] # filter by exists + + if self.tb: + for f in files: + self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats='HWC') + + if self.wandb: + self.wandb.log({name: [wandb.Image(str(f), caption=f.name) for f in files]}, step=epoch) + + def log_graph(self, model, imgsz=(640, 640)): + # Log model graph to all loggers + if self.tb: + log_tensorboard_graph(self.tb, model, imgsz) + + def log_model(self, model_path, epoch=0, metadata={}): + # Log model to all loggers + if self.wandb: + art = wandb.Artifact(name=f"run_{wandb.run.id}_model", type="model", metadata=metadata) + art.add_file(str(model_path)) + wandb.log_artifact(art) + + +def log_tensorboard_graph(tb, model, imgsz=(640, 640)): + # Log model graph to TensorBoard + try: + p = next(model.parameters()) # for device, type + imgsz = (imgsz, imgsz) if isinstance(imgsz, int) else imgsz # expand + im = torch.zeros((1, 3, *imgsz)).to(p.device).type_as(p) # input image + with warnings.catch_warnings(): + warnings.simplefilter('ignore') # suppress jit trace warning + tb.add_graph(torch.jit.trace(de_parallel(model), im, strict=False), []) + except Exception: + print('WARNING: TensorBoard graph visualization failure') diff --git a/src/目标识别代码/utils/loggers/__pycache__/__init__.cpython-36.pyc b/src/目标识别代码/utils/loggers/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..175ef21000c8edceef3c72eb4e222a6a4a20818f GIT binary patch literal 11519 zcmcgyTaX;rS?=5PTz9m)TJ5e@E2(WOmRCq4S&p4#BZO`Ff{na3vST?@GM(Nzy|djr zGd()pyV{+a4X%uGfgsz2B!DkTD3U-zNPsGmP)R6?Cl>`2#RCuh3a&&|Jn#Y!s#L-E z|7T`)XC+slD441K&;6Xg&-w4C9~c`ey!(TnF8}=ln)c7yuHPK;595hn)iq6MRZZx^ zXy{d)Q=@7iH5+Ers#>~g%T#S{%T}{UtwyeyujaWe(Ots|C( zn!Ccg*9xy|@|LdAUrbeNXMREZ#F%-by)_9FBgn7SBn~vj1!rf zwT-jX<3hvtWV1mNp@*IpFMBPq+%FnwvE7IgH)wiwKTbG`krg^?CpWx+IC*dBF(JX(LW$H!hIzTL~UW@K_01Qq!s46`EVTRp|>rb zezLd;_gbC&g z4+=1D%MjXj5jEJ+&~VLUj?!+iNBdk%4u$hcF~nSy6Gw@`G22&#)8B<7Y8P6z(71mY zt?!7tFbCe1^2t%25A*Yk<$lky(1^K~I^{%qLF@E#wCOgYI3ARv<#422Ys9KN(ex7; z)Z)`^>DK}x@=keBg&J^4XWyXQeB?RzOy$AO)SimPGnEfL*ePvzjh%J5G+dQS!!K&b`i~ zffxJnqO=oRp)rZP0Q5eoI2Hr>LQTna-5d) zuQ^`gtR_i&@$~6Ng!f;K;$-omGiM$;ElFN-Uc(-)$S8gj;oq_gZdqH7iB`KakTE54VkQ}3p?<+LIo zA4wcS?D(5)zn1u-BFn&M!AWzmx8b{Zi8OmnopfqO?Ibl1&~@nYbD~vQrq8%8XJCULu2@@ zM4HrYLKX3@rv-2f(Ne31E~H|bep*0TCNXV&nn@yATTQJ-PT3#HVG{lWIR}ZM8 z8Jw1@kgD1sS)A85Gt3hU}CYH#B!t{b$1#C0p}tu!|%oW`W_#{e|l z-uWAbe&>HqAJ+?bY}3+n$d&YdYTEiVYUcE^IjJAf??YSJm_=*Zuu(HS6;xGdk0Qg+{v!m!od9 z3NQB%G7!#C1_J6}8ww^gz|yrvK1%BY-4_02tda;tEz5yVL?YPQd>(~|U}w$^!I*M5NJ)UHX`KS9fP z{g$YlV-XUcM`kd7A;ScmTJ=V>tne^FM~ayZK9MxjhUpjYK_Z>OuHR{utAtSjKr>1p zv^hMTqdx<)p&gXRFk(VJvcE#6Ys(*Yab#?gV4Mt_wG`WmXwy?W8tc>j2<{=H%PZk!H1xqU1gNuRoT-*Z8^GZ3a^uvT~2|Fwb zwnbnKp^yAJ!LX!kED#H9sa?cEqcI08r5t*U-7sM4lh!XUNh0~1bVfIc8gcTOpjnSQ zH%*kPMRiQeUINu>5qaID&Fd7np6RWceI|-Mm;rYtxcktZ3C;mk>!H?zpiK18!1gG@ zvqlL|5-TiZAU!uh1+fyXA3zpzCJ7Bh(@ebTwS6UTb3j2MPFg3mp3ppLS9j`(F)Q`GZOGGEIJ2x37&-TkL7t9m?4KGmQQeg zCBVQvTaZo;#SI@Cz3LWv$0YtWh|5`s#hgB=m-Q(Upo#FEY2{NQhJ#9Nv(jO zG&xD+Kf`$!t_n!XSd!yt#UYVaOvh^LlGLBe<{4^9jHVeby*KXMOt}9VddEbVngLF| z9nWFM8i_diQA!WvIgqQ8OlgQaI4*`_uU>|k%aF=UH&Lq-+Y!_C1%F@kRmWhRx)G!l zdxb1~s#A1=Ty{xYyD*w@h_-q=3zA3RtN>f7Me-@lkjRWvBWUh@>2VwwEubdDE58cl zVjcgUpj8S13yRnqJ1Sw1cF4-U(G0nIVl(xg>}Ts3(+ zKD7*W!jDWg;t!_MuRsPTTEbc%QW0^UZMejtDc=CZu|X}^Nd~lu!3i zwj5z~vr}4<6`4AZpMlt)hAI#?dS}r)2X`8wfu8Z&890jhumBa1S7W*c#(;ZwM$AAB zQCZjEc5A<^UD946QYy-KHp6ZvEcG z#bN(SzkeazCkj0srP4M+0AV>C4<~wBIN2?A$9NX--qrLnt+*d^bcj>e;7ZFY3R4On zkP^jPl{ZhYyM3SstLWu<%(-uSyj$+>JIJHM>2Rhyeh!ZP)pKe-xZ2o1=K1~MK{YcJ zp{gN_-%93sG`8#>B4}xFvW*WwGRoN+$MT|&L1(6>Xa;etIyzlIp_@pGByQQB@ z^+Kw9oh&LV57tlK(oah|;hSVNEa+8sM%k`zBnyXAO9VBz9-W{q=g~p=5+qtvv*EYo z(CGI5a2ij?bM3eehU#Nj&BjgL0!XdiqnE8gd4*L^Q3Y=GI6^RB&n5n@mgAd z3W3v3-c5B(LTJil(3z!l-fzMGkRF0&hiTHhas^=dLkFsm8f&g9ItnGq;mY_)_0WiT zS4)U~_)LBTQ!PyNWg^5z%-fW*7w)bENz0ER5?2DbNNZZ8&r-c5&!D6jzC!i-2JdvBVl1n4fcVx;Dl{2&pKZhkiK$?8Ii(oOUMwmO(nlNCyJ>4~7AwG?E)QRkx$$5(S z=piC#P*4UeKd7NK*o!xmipt1W?=aJ=JkzWFnR1wk@)_8L*(67%m5RKPt;yt@S8AEf zJy0TtOk6fJI;D#^_c(E$A+zI&1v4#hJtCb@T>U($uj)93I$*an{KUuZHvF6aiJyE0 zZw&i|2uYbuMRu}b4F!=%^B_C5ki46EkVzyXfOP+Q8`{Hn;RCeSlYXCj&@$Ac4G&aL ztd$m5f)^G5*xyy$Ry?h`OHZ+Elk~V+1*H zV~HioG`eTJsl5&HuG3r@`4*rJdUy-M5+hzdyPc)_Fx$_cu`>=#ocDk1c+UVlZjQ?MNv!gJa2572)i zoJ7dpNlGE&nq4ED>S?$X_-WT@v42QGd7l43I1Nwb;M*nz=EZ#76o&~9Zky@BeMXLA zjUOA%NjW>54G#|I)#VGrd5?&rL)+q>r4a!Ok9!mQe`_do+pYSosan)pS?72%FEvV7n(YrBdDs6>v+$pG-dR3N7+Dm= zN^K<_5a?rxhc|-ULrjoVYugtPeO9ssxkX-PnuT>s5#UbY(V=w2ZhstY+yVJdQ~omm za20-?@}DK}8w73;cm-f#PEy=JofwDS94gYuvsL7_`B6jK}Sg6|3C z`m%f__fh%;RtT`!K$I@;0!T|FYX|W=CJ!m8-Uvjs00|nzt1dQz#jot4Wg*AfSzf0p z$cmMh2)qc8X8S5u8NW|ZJ&D@^k+HxnLEsKa{~ff(Cjd0Vf)$K#n?7fhp-|64kvhm1 zpjHjEpw7slG_QB2?nICyJ|}I|1)70-qxAWdgrX zU`O|Hj`@8G4fHxO$%mXVeLiR9a=GzbCYR0Sb4PNe++j(5YIue}6S;@+#E${&Tsc-v zC=t41q^m}C#hAgBAzd!&xNMvjW1@^)R^$V1#h}YPlyYJM;&mn+dlCW@mqY4!y(3Vf z1qZ)=sGnQChSOe`?I`vg&@D_;2a*Kg`_?J4S)CZ6`(P!gaU%lTJpLmwqL7Quij11- z4!EL*;!cGw4-#i&-472Y{`t}R`zn>n0?+q|ti$kaB{7!>L)-=|Qab!y!qpFAFnhXz zisA?-s+Pxy3#y{O;&8hDRl?4c1AHM-`)%A;$pYf>8dDcCBm(GR>i-fl6hFh&9?TIy zG8J+0D#+{8*KD}?h!n%6jKmmpf_obiDjG68+m=y?Yaitpd<7e~Fs8&WUwV{Bk4{Nq2fdViO5C6XF1c4K zh=p67Ad%moGN}fHrHRSZmC83MM-xlB#Ms4^%#g&}|A8ka$*(~U()C6G5jeW27>9J6 zhLkMfwqS&AcP=Lg8~TmV0?A@JMR{`b7)}RfLf%VWc)W5vN{TX(bl!PP?O?C+Eh$GevMbNq1 z@1Fzr_k-@_ZB4-t?;IR`;a$B``zikVf?bsnW+0oKnc)p&Nbp}r*rcl`xW!m8dWM|s z>O`P*ovm+ts6#!}wFzITm{P*hMk_3BViVd9E6gOyXTlvndrcZF?LyzuHc9ZYrLDpU zdHF?*BQ`PcMZ<9R0AE3XB?eLT;?juWL;Oquq?jZx8+j$Vd?6=ow^64{vMYDnb9?rb ztvP%mG_>YU-oA%yLd18vOBq3l$cj-S+ksJHJ6VP2yL-@j{J(+L)DBw8LX@x3C|$me zWMpX@6}+^A3@`mNuA=@I(ZNGISOJx9VL|x{z(P?Tqx>p?rwK6S|1RZ-wAlEPZ&L0a z0(%)jSwtiW6cY?>BXwX*{{^CGbs7_d1-ZSNaW`E=vPDG3!+Ray%~wDPwE^o>N+fgv zPfX{#=Nq&y-`z->5 zE>P#){cYsa9HfYcK!Mkf*j96o^uH%aO)_TRMyG0y>}Lu|s1tjOCh1G*k0YCb37 z6ZY|*osgeE!`-w8Bw;98Fb^B`PW!;Yf~9ZtcbVt{boJ+y81malSZ1O5Pbg0S&EKUY zFTYAvUnB5!fHdp6BC5Hrvi|;<>c2zaPYDn{yKvmG@BRP{|B8o$&AFUq6ee@|LaC6E z)G|spbVB>_Fx`9wnXQkL&>?jJGr`2I050JmH60idke~4Mjcwdk;;u5=HGT)C1kT6| z5q}7CU6Tw8G7oy%YZaVd9!3J{IGkF-n6VIX2RdEECSWq&FzZ?m(r_n;jL`7Br6uEQ z1^I^h1TdMP5VfwU&j6VYa{U+;VGQ4=-ioCE1iVa}9JE@}Idl1uOG{5JJ-O)Y`eMal z+8loU!q*YbiWfBSK?qKTyjAv07bbCU?QbS&fem$puw=|)AT@@T5Ju6opaPxQqJ$T=$GyJVeg+KJD?7AUU-@J^e`Z{?8UPIYiM4Z)oR72k)uJpff z8l*0_^5-n+(b1k9ovx@Dwd$26Y>TRwx+5i4X3smFP}PiMe^NRP<$V2!2HFUZ`>hSR zkE&_s((I}i)Av1T`H>iLXS(BhT*^qwpQA1l)FlJFgI0CFLW2u#yk5i3#JIht&WOsh zzrrKa;#2IQsA>3Q$R1$E<>T9RY1;;@Mbmdecmm6e} cgGuSIlRX8}u+bAK*H2N-EdQ$ZaQXay0UH?-`2YX_ literal 0 HcmV?d00001 diff --git a/src/目标识别代码/utils/loggers/__pycache__/__init__.cpython-38.pyc b/src/目标识别代码/utils/loggers/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a1e49ea5ed86aa87431b12e5a02ce80c3c2cb94b GIT binary patch literal 11337 zcmb_iS&$q_d9JIg&uPt!Ml;grsue=oSxc=0w1B*prM)-M&I)NkONZ7?PiIX}_4K8z znvvAeV+fDd;3dQ^7zlgUYkI<9BOhQpunEAdAM6JnjGr8Ss8@Rf6yXO)_;^rY9`}?d|ucYX{a;O61qf#l-8Tbp1VI$D+$rXB71Z#i$s&V-~%uMsuHI zpyZ+2p~Z~6ZyNlI@uK%}qiMZl_}=Sg=nAVijg|+{@;&~(d22BPOh@`{QK#E>_9>bU z7Y~D`!=wp)A177Y`pyme9@c%IF~m+WaoZ3Rt5)&7$n6r7$W0=5L`;j_V$W@}cvM(d zjG4Wi9hr-*YBgw9y`?Cqhnaiay4kyC>|82G3o~Xsa%HK}D9c+{Wf1vt#)?ywR=p*| zNXB+$`7~QRQTNNTQ70m_Fyrjia#PIpvsRpK*IQ93Xq2md80S<*`r%SN3fWfprFL1C z>vg|A;{av299z*`oVY<8ak8Ru$Ij&ovmd_qQ~cs=skH3NFlaSPrH*^DaO!m7^f?P; zOE8!r#{ljf008!66g2$SQuK5M`At6;x$}5J3E&&xMniETxe|Zhp2toJ8!P1qMlj;$cQZ7DUlN+c&9~P?7%xie!x2`#>Gy&b7Dg5!h1waiYdJF!oGT^BGxUJ*9wB#oqx zw2`jr2Ph3}(UoMB?i$^sFjj5Txc-vZiMmXb4Zje9=2deg)lIz# z?s>~vLA#pM8|kLkOw)++;kVFYa@b;OYYVhCR?=cO-ibw5{seEUwj=xrdhJoYG&W+N z?xt&_c<+77QMkvV@osw6T;;5HMp>MVFN%E{57W_v!nG?J=ZR=NqI11Tv4726$>JDTRZn_cDVW~05meNnasOiYP`{qz2Y z@cQ#Vpy%Iog`wK#G1A-OA>hC)nT`%Be88_T=KC%4!oZFnn(gExSq_>f=2|yP^;Q@T z@~yerrhKIy>ik&4k7Q5@Pqd|93CQuC{GbRWI5eFdgM8z{rP9g5na;$Pg0m+JA3D>? zEtl)-CvU>y&<-?quftFgoG4Ta|@QUZvM5%Bp;yoSe!GXc^m> zQ-r+O8NccW)rBbZj(Cp;<S6+%*jdB!l z!nZuEVP(;SbnupgaH(7mI==Vu7cRcAe8&6tue|Xo4~Rk_@wSfBNh>+^{6!4tzuSO) zQS+c7k8K!_qf5WhihNBFhTBL2vi-3n&+`3|7yV|~l8?8_Qk?bfKXfH3%gDPCL<`>0 zNL9_T$~%6%-l~c~qRCMX=il(kk+%>)xqbFn@m^))?m5u{l zFkh}jXcnhbQ7I6eq>_!NPIW4M;v&D%ZppF?K(+L*F9ndpdY!$#MD=u+lU_@DbAG*b zqu^a>HGFU0FGovk-6-Gknk^9DlGudM^KZ8OO5}@z+yQ!Kyf_t>m;DkZ5hrixosR8R zJBsavTkY7z{GcP^WYDbCmxLcXi~g-JPLoecDw(|a!ZXi2b@@u14E?AaMN(2Cs>#X( zQn3>>=UZy9*!7oN^<_VHp+Nj*Bpue<&`fI8;|xv=d8t`J6Kb4>A7_x4QOKo_6H!Z6 z7GkH~DvQ{z;;5w*g7#uH9s$bGALto5&WhxXvTQb z3{Y>fts_q>5uTXsTw!vs!T)`8109BxPdH0GLK@&w*m%7cY z*}}MEecrn6aPG8JD&=+r0e{y~ydEc>qrD`LqJQi(%MJgo6E*ht~rfrW!UiA6(x0&QL4c1>6v$+zsi2JE0D zEGq{VRdN+R2RT`=`Bk>;<+QB@rh!$pfoa$qIBFx^ z6k3ejvEU2P4#>|cJM|u!@*Xz}yPpUe)v$BVMy{CE8#Pyspllto**z{_C!@+0-xZV3 zx56?Eg9kI-!|0+I&jW{>J!8#~PGr_BERkkFXPp7%W27Phd3qCk5Ncujt0+_KC(k2w z*OHIp6`S*0_FX=S;#cs5i zhcju-(^fNx6OESe>rgP*3wRD?36&5^N@K7*T$oT^Qt9~s9k(n)JU$dZYU4Q7TWB5b z1~4E(Cm||R5S)2)!Wk4cE*osKI#ZeA_2xS?|vM+cHfVAlYfu|s(kCn!Y2*a{k3r#_0U;{X~NCjL1L9nu!y#3v}Qc5m3I zO-;8Qz@eJ?7`k)PVGhugmm&G#7xWw|9Y_$W9Ya1w*i({9INQ_?da-W}vI?>+F_a+g zY$pXOv?$1Y3?{IEb=+!hBZhnN3#%a|1>`w&kuMO?7SVvasLuHtNUZCZBU={b0V+j) z1ifaG>jzU_B|c?04UeXLiH$Ue%3ovs=HcT^f0HoCmrxzjDm0)`hG%JfJ2e$<#^5)G z1O5n||9u zzip$r0o3xf(caja(HrmPx+4k$&e`>ULd{O}Z}Lno!l9OzG=&TvP<3K9 zdF68NT7^aQ@;>0(u`=4tcX#X|l{;&bwW;ptW5(;|b)Ms$aJLhEn`vilkH!ksr>!Vb z`g%0I%AWbsUP8yH?Gx4qAlyp(`pWTsmR2!>Lj{C_o|n4ClIzIiM* zGqG9jBvDv6Q$2RyJRvWl<{rh)j9F9;sj{u>(aiqX5kUp+M<-}Y4-K?GLGFv~y5E$u zOeLZvXajf&a2|qo?6lf`6Co%S@}dU^1r6URj5B2+N+JBPveYg}B5c=#D0ZT9xEN=k zFyOe;!zqqPh({R>8q-;e(|!{^h%6(H_K?bJcL3HuG@g#KDLqw0L!pH>+!;SA?%gEF z^$?;TQIln$ni=cMJ&1)+EmN#K@zQ({HT@7`Z$6OZ{Me!Pip{2cKXOW;$Ic?FO zfTsYZn1jE-+$IUT3FU*83&k{;$dJ!=5kppr5C+bqAuL#I&vxzbS=6FL*o!vuXo>Yf8){XQkgvWAo>zhA)eU%(8V)Mhud!ClY(=&FQQRRq$MhnG$ntG}@%9saCmAW7KSYnt)X)QqMWX4(L zW5etTS|Uz^<1~7C1eJ;}WtYu%F14W;{1Qz5_EOYua}+g0HCZo%;d!d!?0nFK4%D{6 zD)HPV*dm2Tu;H3wQt&ru5z;lLo;++GP%uxA@_#$KHi+Z(9-B6k8kUcZC%guU+_mLI zWPAqOLkTP_$_=rfON{V!lE(FytQGgu#=NOQu&J)gvK1FS=(}YiM`YQ1)q2x-%c2}Y zE+M}R=!Rcr>z7ZjBw1EVc9S*8=^nzJF#4gA5iG?Piji0BdC0x4!;tA6%E&pi&p~n> z;=4Bz<)a-)jWV1{2?-qm?a?0#^$hwlJgjNc9wge2)y5H!+#Tg=2x@k%-UPJCe|D{= zat9e8CidE{+9aHk$+v7V#V~!<7Sp6bjXbpmuNf%@8ebY>WXi4WuI(9MMeW5Q-o0Yq z&}cX?D;KfQGp2k6uXQQgZdUJ4R9emXB^6OeoJ|Hd3n<-YO{jh1UeTt7OFK%>0`y|7 zKL*;a;}FsY?PN+2SIa(eXs2FYK!>fYlzy!0HxWIO=l+2>=?FQ{1D&HA7)SdKo9&80 zFI*N_PoFAUmoCTVrP#c@ja*{fNz{h{lqWgi5u^=UJQVciuMD|8G0*pE*CSALl2F9NS@xDt7hhM;uSdG0+ZahL9h% zX4@AKXijtsa_Ky~Bw?5`z}v|@K9qS_-p`^=H9-0cO#e2(%(SGFtJnSjQ`|Kb@l#Hq z*0yP?UuC8v_y&b6Iq}$qN$;11c2=oql@gVbsgS@Upx~761&DJLn1dLc(*N?4EVlwt z%s>nV;X(=>|mIZYOhp^GbOUvQl05ssdOCyeqAFE0gwCu!C8W5 z2}%SNf`Fh#@G8OQ3GNd78Ns?5TR+IVP;nejI0=xlCeNjuR4O%^N~Dshbm~AVm)bAc zhUho_CRD4%O5Tj3Ns4d1zCj&+l)e$ zBZY|VpXGb_yNrt*#7?#}11E(o?5Z9fJ)Y17{Sk-g`WFN1CNdXEwbw2+a2_=CH$TVIyWFX&gY4B6W8nyC}Z{si!Gfro66Ys5G7Vvxxew%jl zag=f&vV_WT>(bJJ7?HlLO&X(s0Ven(ug*lXbmioK_`lF4BsYx?ZCp!e3 zzEU6kITv&N>r~umVTCF3mA@oE&D7?}$64@dsoy?@6WkAb)At#NZQR+jdBPi7$L@3L z^9bcQY-48{$%>u7j0A=FZ3Hj6W`x^>d27{@ySpY?tZFKA7+=${g}%a2aV8rMKI&ln zlFhjwPAw6+J^CY9APJ!sk2dmPqH=_61bdK=^J~84sq12TakUELa^I z?{1_CM)K^>qH2OvQOM|cw>IA7@3PAq0NX}pQ?(Kv5^Ch1aUJvv#NqaCTow6kAf3s| zhw+k21Ro_(jQs~peU*SD$gdI50o=yt+I%_BD#hdLhRpD6#~CuLUhgpyz_Vglhj{O& z3>fP82W`+@>)xlZ(enG~QrURGnI@ws{??8K4)}x$&4E?A7Qand4+!_A{)^}_V{55I z^HhGAV9Rk-jp*G3b=ZecOy^zJ zLV4@6I0Ao(b};$AiCAX4oylbqszwd9iBq^6!vJm%na;0Mm{1Wwf!m0$<8lkq(1VJH zT!d?Ft>7jQ(e-54`W=J=upf3UiFc}NZ_hx|-Lea#TUK;TtlX_(K2unp_@ zW}yz8Mdfqp8#lX#P-g<>NX3^~;!i(yv*N33Yu>eB*nvVFp2^s3%LS6W`D{ua;77=f zI(%dy5I6~r%;0|ve5vXO{PwW?I}T>GLis}`l8lzsgE>jrfiE={s|@zGmShFjas`yB z$rq>8=N$$0ZA4+i^`ZVuWJH%&X?B7a;M$%`H_h zx1dA1x)o1aoYk%L1tR%ZIlA$fE><-5k#b43m-7R5bP-MVo6B;P#gw&ia-kgZlbASv zAw-y#S3XZk*^=@En{XxL1n3T$#hscAl_`Zw6|78%>s>Yuiy7qM`x2sgEv|7vwl8sDK>oJ;G7!CU0hP^l|ZtNeWiqA?4MMV}GeE SCd`Z~pJ2+)f6_RgKlgt(Q1N{L literal 0 HcmV?d00001 diff --git a/src/目标识别代码/utils/loggers/clearml/README.md b/src/目标识别代码/utils/loggers/clearml/README.md new file mode 100644 index 00000000..64eef6be --- /dev/null +++ b/src/目标识别代码/utils/loggers/clearml/README.md @@ -0,0 +1,222 @@ +# ClearML Integration + +Clear|MLClear|ML + +## About ClearML + +[ClearML](https://cutt.ly/yolov5-tutorial-clearml) is an [open-source](https://github.com/allegroai/clearml) toolbox designed to save you time ⏱️. + +🔨 Track every YOLOv5 training run in the experiment manager + +🔧 Version and easily access your custom training data with the integrated ClearML Data Versioning Tool + +🔦 Remotely train and monitor your YOLOv5 training runs using ClearML Agent + +🔬 Get the very best mAP using ClearML Hyperparameter Optimization + +🔭 Turn your newly trained YOLOv5 model into an API with just a few commands using ClearML Serving + +
+And so much more. It's up to you how many of these tools you want to use, you can stick to the experiment manager, or chain them all together into an impressive pipeline! +
+
+ +![ClearML scalars dashboard](https://github.com/thepycoder/clearml_screenshots/raw/main/experiment_manager_with_compare.gif) + + +
+
+ +## 🦾 Setting Things Up + +To keep track of your experiments and/or data, ClearML needs to communicate to a server. You have 2 options to get one: + +Either sign up for free to the [ClearML Hosted Service](https://cutt.ly/yolov5-tutorial-clearml) or you can set up your own server, see [here](https://clear.ml/docs/latest/docs/deploying_clearml/clearml_server). Even the server is open-source, so even if you're dealing with sensitive data, you should be good to go! + +1. Install the `clearml` python package: + + ```bash + pip install clearml + ``` + +1. Connect the ClearML SDK to the server by [creating credentials](https://app.clear.ml/settings/workspace-configuration) (go right top to Settings -> Workspace -> Create new credentials), then execute the command below and follow the instructions: + + ```bash + clearml-init + ``` + +That's it! You're done 😎 + +
+ +## 🚀 Training YOLOv5 With ClearML + +To enable ClearML experiment tracking, simply install the ClearML pip package. + +```bash +pip install clearml +``` + +This will enable integration with the YOLOv5 training script. Every training run from now on, will be captured and stored by the ClearML experiment manager. If you want to change the `project_name` or `task_name`, head over to our custom logger, where you can change it: `utils/loggers/clearml/clearml_utils.py` + +```bash +python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache +``` + +This will capture: +- Source code + uncommitted changes +- Installed packages +- (Hyper)parameters +- Model files (use `--save-period n` to save a checkpoint every n epochs) +- Console output +- Scalars (mAP_0.5, mAP_0.5:0.95, precision, recall, losses, learning rates, ...) +- General info such as machine details, runtime, creation date etc. +- All produced plots such as label correlogram and confusion matrix +- Images with bounding boxes per epoch +- Mosaic per epoch +- Validation images per epoch +- ... + +That's a lot right? 🤯 +Now, we can visualize all of this information in the ClearML UI to get an overview of our training progress. Add custom columns to the table view (such as e.g. mAP_0.5) so you can easily sort on the best performing model. Or select multiple experiments and directly compare them! + +There even more we can do with all of this information, like hyperparameter optimization and remote execution, so keep reading if you want to see how that works! + +
+ +## 🔗 Dataset Version Management + +Versioning your data separately from your code is generally a good idea and makes it easy to aqcuire the latest version too. This repository supports supplying a dataset version ID and it will make sure to get the data if it's not there yet. Next to that, this workflow also saves the used dataset ID as part of the task parameters, so you will always know for sure which data was used in which experiment! + +![ClearML Dataset Interface](https://github.com/thepycoder/clearml_screenshots/raw/main/clearml_data.gif) + +### Prepare Your Dataset + +The YOLOv5 repository supports a number of different datasets by using yaml files containing their information. By default datasets are downloaded to the `../datasets` folder in relation to the repository root folder. So if you downloaded the `coco128` dataset using the link in the yaml or with the scripts provided by yolov5, you get this folder structure: + +``` +.. +|_ yolov5 +|_ datasets + |_ coco128 + |_ images + |_ labels + |_ LICENSE + |_ README.txt +``` +But this can be any dataset you wish. Feel free to use your own, as long as you keep to this folder structure. + +Next, ⚠️**copy the corresponding yaml file to the root of the dataset folder**⚠️. This yaml files contains the information ClearML will need to properly use the dataset. You can make this yourself too, of course, just follow the structure of the example yamls. + +Basically we need the following keys: `path`, `train`, `test`, `val`, `nc`, `names`. + +``` +.. +|_ yolov5 +|_ datasets + |_ coco128 + |_ images + |_ labels + |_ coco128.yaml # <---- HERE! + |_ LICENSE + |_ README.txt +``` + +### Upload Your Dataset + +To get this dataset into ClearML as a versionned dataset, go to the dataset root folder and run the following command: +```bash +cd coco128 +clearml-data sync --project YOLOv5 --name coco128 --folder . +``` + +The command `clearml-data sync` is actually a shorthand command. You could also run these commands one after the other: +```bash +# Optionally add --parent if you want to base +# this version on another dataset version, so no duplicate files are uploaded! +clearml-data create --name coco128 --project YOLOv5 +clearml-data add --files . +clearml-data close +``` + +### Run Training Using A ClearML Dataset + +Now that you have a ClearML dataset, you can very simply use it to train custom YOLOv5 🚀 models! + +```bash +python train.py --img 640 --batch 16 --epochs 3 --data clearml:// --weights yolov5s.pt --cache +``` + +
+ +## 👀 Hyperparameter Optimization + +Now that we have our experiments and data versioned, it's time to take a look at what we can build on top! + +Using the code information, installed packages and environment details, the experiment itself is now **completely reproducible**. In fact, ClearML allows you to clone an experiment and even change its parameters. We can then just rerun it with these new parameters automatically, this is basically what HPO does! + +To **run hyperparameter optimization locally**, we've included a pre-made script for you. Just make sure a training task has been run at least once, so it is in the ClearML experiment manager, we will essentially clone it and change its hyperparameters. + +You'll need to fill in the ID of this `template task` in the script found at `utils/loggers/clearml/hpo.py` and then just run it :) You can change `task.execute_locally()` to `task.execute()` to put it in a ClearML queue and have a remote agent work on it instead. + +```bash +# To use optuna, install it first, otherwise you can change the optimizer to just be RandomSearch +pip install optuna +python utils/loggers/clearml/hpo.py +``` + +![HPO](https://github.com/thepycoder/clearml_screenshots/raw/main/hpo.png) + +## 🤯 Remote Execution (advanced) + +Running HPO locally is really handy, but what if we want to run our experiments on a remote machine instead? Maybe you have access to a very powerful GPU machine on-site or you have some budget to use cloud GPUs. +This is where the ClearML Agent comes into play. Check out what the agent can do here: + +- [YouTube video](https://youtu.be/MX3BrXnaULs) +- [Documentation](https://clear.ml/docs/latest/docs/clearml_agent) + +In short: every experiment tracked by the experiment manager contains enough information to reproduce it on a different machine (installed packages, uncommitted changes etc.). So a ClearML agent does just that: it listens to a queue for incoming tasks and when it finds one, it recreates the environment and runs it while still reporting scalars, plots etc. to the experiment manager. + +You can turn any machine (a cloud VM, a local GPU machine, your own laptop ... ) into a ClearML agent by simply running: +```bash +clearml-agent daemon --queue [--docker] +``` + +### Cloning, Editing And Enqueuing + +With our agent running, we can give it some work. Remember from the HPO section that we can clone a task and edit the hyperparameters? We can do that from the interface too! + +🪄 Clone the experiment by right clicking it + +🎯 Edit the hyperparameters to what you wish them to be + +⏳ Enqueue the task to any of the queues by right clicking it + +![Enqueue a task from the UI](https://github.com/thepycoder/clearml_screenshots/raw/main/enqueue.gif) + +### Executing A Task Remotely + +Now you can clone a task like we explained above, or simply mark your current script by adding `task.execute_remotely()` and on execution it will be put into a queue, for the agent to start working on! + +To run the YOLOv5 training script remotely, all you have to do is add this line to the training.py script after the clearml logger has been instatiated: +```python +# ... +# Loggers +data_dict = None +if RANK in {-1, 0}: + loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance + if loggers.clearml: + loggers.clearml.task.execute_remotely(queue='my_queue') # <------ ADD THIS LINE + # Data_dict is either None is user did not choose for ClearML dataset or is filled in by ClearML + data_dict = loggers.clearml.data_dict +# ... +``` +When running the training script after this change, python will run the script up until that line, after which it will package the code and send it to the queue instead! + +### Autoscaling workers + +ClearML comes with autoscalers too! This tool will automatically spin up new remote machines in the cloud of your choice (AWS, GCP, Azure) and turn them into ClearML agents for you whenever there are experiments detected in the queue. Once the tasks are processed, the autoscaler will automatically shut down the remote machines and you stop paying! + +Check out the autoscalers getting started video below. + +[![Watch the video](https://img.youtube.com/vi/j4XVMAaUt3E/0.jpg)](https://youtu.be/j4XVMAaUt3E) diff --git a/src/目标识别代码/utils/loggers/clearml/__init__.py b/src/目标识别代码/utils/loggers/clearml/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/目标识别代码/utils/loggers/clearml/__pycache__/__init__.cpython-36.pyc b/src/目标识别代码/utils/loggers/clearml/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8cb98ffc2b2ecdbfdbe80612d96acb41ec227e68 GIT binary patch literal 165 zcmXr!<>h+1`%e-B5IhDEFu(|8H~?`m3y?@*2xib^^jpbL1QJFNU;TkxSF4y8d)GhN zz3u7trO#J%zg)QU`RsKuPiOCY+T8zQecRKH)iIU%Ir(L#x@LMtF{LG$ImIzK`RVDY qMa416IjM<7xj8ZM@tJvg`kf~ULxB!TG1AOaaM0yz#qT+9L_QW%06G#UL?G8BP?5yV%2AlKC@=EdIi zPj+v6x_#;M72Piv?tDIbUCh(j`<^!UzgXY)v}1KlWqwY6nW?Uso>5F`NoG!QOiq4! sdTLQ|Oma?YVo`2ROniK1US>&ryk0@&Ee@O9{FKt1R6CGepMjVG0D+q}z5oCK literal 0 HcmV?d00001 diff --git a/src/目标识别代码/utils/loggers/clearml/__pycache__/clearml_utils.cpython-36.pyc b/src/目标识别代码/utils/loggers/clearml/__pycache__/clearml_utils.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9eb003e1d6e910fbdf780db79adc055b15e4f480 GIT binary patch literal 5475 zcmaJ_-H#+y6|b+Z{_dICU6x&Ti6sl{G|V>2SImka?1G@n05Y)94pwUFR!>(?byaik z?V0JeIx+M@Ow0z~i4U5XXe9E6F{p`&e}K>R;EQfx-}Qx<=?(JFB?$oVYx6VEH z+;h+Qopb7udcAVvlYi~peYd9lOYYUDvgs5mcEyHAGjgyrW;$ z;3}tqg{Pdi;XulR%E4XT6^tV8Y{EPHH}rKM*A2uUe%h_@xw2A zVeDKm`s(@D5?z@vfDJW z^3!pgNKYm_v;8DWxWHiXvL`(e$jsXE#9mf)-Ti=zFo|8)*NFpuC0un}mvD(qjHX&o zsby+RbgB1@6zaOJPYh|MM$ejRQ+;B}g0y?KET$&2&R}G!FP;ne^PNOz$x>=fb!P8b zd`Fg9ajL(g$0uf^r9Ff1W3)`8vXWY?bQ-hXU!4?WwO30E;9y^cs$S8)tG%JgdTRF? zw0>`eRauSIS%a-~K_z}KSJ|rKvNqKw#nfii(;ELP$(Q%fXuV@JGd5TyEwbtp8pdm< zwV&wRno;5DHmaJUsw=7oW>k2(jjFDw8mBezZK(c=>aU<*#PP0;U%8Ta%)tqJI9wcQ zKXO=pE{+#7rxQkzBf9}8WWZxDf9tZ)m(D>byG|&D<0p6-OVO%0_>C(wMi(D?DAQmC zl^H`fWMlh**2wEe;{xve=q8=>_y;d0JisDe>?CpE(0f`pa1K1OwIRfSd?dtNGM*%I z!|^?`Du)G-tRG_LK{xcfiU&yqvlT~Xv7i&ip@fBDI22$%YH=lj+)VOimq->1FE8p0 zV9;2(8~A%9F^s+&KzMM2{u0gu=;hW1AVJ7zYez*_Tw0r10Ui^^nI!`uGjrdIG9&i0 zLhSVeF@ARovv|1MmCjx;g0!>_?ROyR3TgRW3X-wJquQYxRO#ius=GlwepZ(#Zzkl-ajo<#_(_jAh z=Ffij=?A~rzVX2yZ%p6+?4$Q?{OH%)BjDTqV;3H8J-j`XVI;OA1)XAhChh$0s<$mD zZVlE7>4&~_XM!BZQeqvAX4Z84&2_XDqo$XPhJFI~W4dkF#=XY<#{Kl(;P-*nrwQ|) ziOx51i48P?h5(~caA}*&U?#xc0!kH_eNAr{fL0||M$cvyU_w2sU&@gn2Yv5P@DX$g z-{C*7AU=%6#@rdv>nALTa3{bR@PN(!;@8wRp zASd(n27{JTSqpH0fCFZuWXPT5Aa=TmkO2b=j*!uEo*4lII^Hmn8)_Nl-OHTA*pG%R z5Keal;~IDz#tqPb$YKOeeL}EKxI5&zAhZt<@X()c?$)D^y&jPo#A?wUPlUcw%A8o~ z!vkiHL;!45R1kH*ll_p99zDhs9067Rq~E7yBf>F{?+z)VDh(>F!%UVGt~ljN|ANi| zijEhF#MudQO|u-8eKH98(Y=W45-xEK4bxN2)ucW}aCHr4pfyvyXW?$i0@?yI(b}pl zA^;co4`r!WW)=*{+C>1R{2|m?`8rYCps;(D6v2#DuN%4sUI>`1Hq|k&PES)>kvJ7o z-K{c{jLvJq%GAavhFox7Af)!9a2(8S$e}c017HRlN_KjLv>0E}dJ7LF32C_9&)N@ZHWZ&Y$AU{14ZE-jk7fWE8v#!u&0Gw<( z7cMxL^7FjyI4N;b^237`vtg6YI{A#6`20}|1#T}*S*gv>Z!@#seCf(d`;TR% zxqohD#lZ-YI(M?hg?GW9bHi#P9OEAYOLn$QS@0c_eegzPB5Q>~!Bd zM1NPfSQoEccxOI2tMBY2ct=?W1;*kGJxOY@$L|4aZWDitLONdOLT45sXjT!@<5Ez9 zqL8s^W|jz|PG;h$G7}nCke)a95}T5XwLgees_JFJ=j!k!>OofTFAa=oy8}rI=5()g|3l*s3US`M-+iT5MQlS(Mx(mKZRUi6~;RL(owVg z!pu);lBWITDW5Eoervc$FDfd0A5bi*)I#Zm*4WdMQ@EQ<-z%UfX;UAUbrP{-&qy9k ziAvuq(mPo|=~V!-TfGu9P!5%;Mx7dDoj`uASA&(JXq&=DCpMK$6i2Qblj7mWvLROx zH*99VYp`Mm_P%&39eF~;dDoy3;sXB}ZyS@+wmB(JDwC>QO-tZiOHH|!R;K!CZBm!V zB22!5CrdneNBT+$SAOU1Ba?~bL>6^z%f zoA9m=ucDCZokU@`3LV_pJB1ydW(`ESyOfqz*$R0f>8E!lg+5Vt+wjI!@6XHe?lSid zQ~=wda))vQpdZ{5rHbbWJi{TBwX1la8})v`oYil+qa(V1?+^vshBH#_A))~F zsa1&>3t~S&pFTFcwOC9&s?Y`4Qt|SlLpPd3#U5#`kF1gI0Gs?%1w51jef~fpYr!tx z&iYmmi-bdZAPt2~2bC-amYEW?c)lDibU-m?!6y&J|Az0}G|+6}%Z3s5Gjk9|n>Q^Q zrn>1SLo+rmI-l$JA$k@ObtyLhaUt?{f+#Df?;_*IGjjuYh76!-<;d_PW-C<43QF$g zS$+o%Jw(k{sA*C2RWzs}2ick@NGnKblcDf9_eNQ%pX>)7kSsI%Ls~Ba&}Y`pNCuhd z4~D2ahyB5bo5Z6W;DI0z@5bSP(>^$1LskF=$1E#!kWZmkf){_C=McO-^C-)xBwIzUa8f^uwuUUh0wj;``63YC&Jf?p_!D4?+jrmtn`M%1sl?%OcI*_)#4vcz{?{&mdZ-=^kmYR(;PD01-o(L9bz5WttpmQi}JvZh9x z$N7*bsNUq1`5<$I-$&t&D)G5X<8M%Nie6S{C2N#Oq3k5Li-(DTj6BcCiumNpds1tLf>F z>gww5s$W&jD~*Pu;dkw$e{bALmu_QM7omu!FH5vu3g98w2Li0w_fyR zw`Jtjvq_RlU#2{_gEUULKxg^9FMScp+`8n8?Y!oBJ0TZQns{EI6NkyKf;S!8IXvPb zMlx-zj^@A^=~CY^G91}eePl>8Gsf0|wy%#&S(5hHmgUT3)(P|+=(BdIXx~nBmaJsf zzRv7zi*Lv(EAQ)X>&dZ6Z)MxyJLs)auXHktRhBX9y~R;U*2eX$bfAyy*R)5pm$dI{ zuWPcA+2bayKVD!CtFbz3ux1xj;`efqEhsKa``W0S+00qi_+LrBqJBafAEB9v!4|VJ za~{*sUt89Gs&i{Xh1M-pHAPidR1Z$5(7J`HuBaNz8u&I;y{YO=)XTWyrQu5#QlGiF zWgi!eYwg7@EAGbi6XtfJICf<>1ceNF;uoVHivsEHMzZThQn*2i!9r z!w=^gG{KoL^ddI2A6(n>d-1S@cQ3w9cRl>!^C=Iph@ZG=61p@_>xS;GPx>~17?6*I zm`cXeRIa*#PnzYj5R&yG%-ropL09o0iJ-dT&@2{qq9l^gGIU1*?Dy8Vl0aTei)H6X zW(&_R>h_`3Sh*Vp+axg*zZ*h$aKrhfoQF7dg7F}^!%S$ktD9J4u3YnWb zew-Ugke3p_7mDFKmoSM(n_cN{hkFo~mZ9y=aZza6?_K*C5Dg5ffSbh|i0 zBJ8AbobJLv6}L0)%1O2O$yN6xEl+O=>m9&dq4D0$>(8syr}X?+8owV*|9J_;(`VK{`NM~Q|L8Z@ ze*24$fBBQ^2fzFH{SVf!z5mB+`|o}7;k(y<{Ok2SVBF3lrygB9y*`jpEY@QMpJII? z?ey)b(KQ_060{Z44+7~;1UZVO#J5psW?jeMTtVp=b-iLV^<#L~Z`g+MAIq{0`-Y`A z@fbI3)4pMUQJv@w{$+3rh~@mxMB_<3ViiTG!4+xbhT0}GmL3x(U=g9`S24jTZpKxDB8PCdf8PP94Tg&?#K5b!9N zZti9?z+MkY4Pv$EhA*N(9mGD+EgKWs z`FwLgzE_=~I&~=1+<_}j#i4)h%mJOQAB)u82#b?u1$GCd5Yr#k%XrS=5m!(!J<~i* z>iclRp1};1W~Oghcw4fBvcyc3wkpe+wpHRkl9jD0v!F!QCR{B=8N@13gA67J*4ZU<%$ZpicZynZB zK>Xa4*&OBIt}o*DF6QcEP`bF^ov8i}iE0Ugf z7A-+=syVh%G4Sa2GhtpZ%&SN@-hzq3xa{FSW|~7_PHs}TE5%*3Tm%zL$t(()r*WCu zAQA#P_z8sl4+EGmyns+)<;7G)msOSk&cHN;D`}^Q{K!)i?7K4>Qg3*GnRGIeh!9uc zH(Tymw`i$=Y?G&hK4Bul6U1h2A$t+5lHo)2(1pGXLNQ}Dta%&047w}p?4g#j7*1J7 z%R-Ds>*O|^NEE^big<^_%c~I-Pq~7Nt$J>Um$3nn8(dNQuqnO<9=Qnx&Fw-e{&nhc ziol7Is=SWiD&?}!cD_WzB}L0`BLQsw4b`QNjX#b$zn40UNb)DCMUlyzjxLdfs4Ma& zR}_Ol96_N~Dtbk4>USWNScJM7pAElsSTR30_hXu-X@7YlAbp(vNJz3~D#4g9c}A4+{d<|dWcfrE>m5Wkg1i%W$0yv83+2fN@?wZL0USl z;i*Ggk>eehly6a%Mb6}^F)GWZT!8Danf;Ez${lF^tW`PGg421&pq_o`JFIB9Zj37H z=BPSyMm4#ZRgeqTGgB^Q&c42^jT-WZJenczW7T7@lzn5=%o^__G0vJOYd^C_3t2NO zA#3~~D{mdkYEwBgxGv?!%3HV1^)8^lan*#4fqs1aKpQP$|F@6tfL$)LCfwYebY2;< zM6yIWoAHSZSu4_X8#cM<|9Rfwo#+0p@?aa38Bu@$6oio?tMOfdHq1ilymI=5dMSDY z&WL@;hW(kNoVVuxZ@7x3ilE&&!&QvQ8dN1kz7kQ6SgcPtX29rXHp+$0{=5DfxCj18kcr+x7AG*SW* zCoC^@5LC&$O1ZI5>X}!`Zx?&93$0YZ$e*F9&r(q!CnDe0Qh|WZP5sMDN|*B5+%5Ch zF{|AqmGI!uL(xrR#$O;Qs_H9&`u?OxxkV)H%48~XRPf@j5goo-K!=Ke#~~_Y?$9m% zDsf(zFqy_Bx8m+G!p=fOiYPoK;RrJ)al$>uJ^C7e_Zehxb^VCGh?wEHekaO0q6NeY zpV`*%-p|AFtt|0jjeiqt@wz~-GDRh%tDcuTo`-0E5L3PGd2c}pCnIIgV`<=d{4vbo z-=Tt2asT0hq8xq_#iMuxL3~BGoXSJalIm?89M<{cljQ zOoO#aejBG!C`~Ce<7-4fYF@- 1: + raise ValueError('More than one yaml file was found in the dataset root, cannot determine which one contains ' + 'the dataset definition this way.') + elif len(yaml_filenames) == 0: + raise ValueError('No yaml definition found in dataset root path, check that there is a correct yaml file ' + 'inside the dataset root path.') + with open(yaml_filenames[0]) as f: + dataset_definition = yaml.safe_load(f) + + assert set(dataset_definition.keys()).issuperset( + {'train', 'test', 'val', 'nc', 'names'} + ), "The right keys were not found in the yaml file, make sure it at least has the following keys: ('train', 'test', 'val', 'nc', 'names')" + + data_dict = dict() + data_dict['train'] = str( + (dataset_root_path / dataset_definition['train']).resolve()) if dataset_definition['train'] else None + data_dict['test'] = str( + (dataset_root_path / dataset_definition['test']).resolve()) if dataset_definition['test'] else None + data_dict['val'] = str( + (dataset_root_path / dataset_definition['val']).resolve()) if dataset_definition['val'] else None + data_dict['nc'] = dataset_definition['nc'] + data_dict['names'] = dataset_definition['names'] + + return data_dict + + +class ClearmlLogger: + """Log training runs, datasets, models, and predictions to ClearML. + + This logger sends information to ClearML at app.clear.ml or to your own hosted server. By default, + this information includes hyperparameters, system configuration and metrics, model metrics, code information and + basic data metrics and analyses. + + By providing additional command line arguments to train.py, datasets, + models and predictions can also be logged. + """ + + def __init__(self, opt, hyp): + """ + - Initialize ClearML Task, this object will capture the experiment + - Upload dataset version to ClearML Data if opt.upload_dataset is True + + arguments: + opt (namespace) -- Commandline arguments for this run + hyp (dict) -- Hyperparameters for this run + + """ + self.current_epoch = 0 + # Keep tracked of amount of logged images to enforce a limit + self.current_epoch_logged_images = set() + # Maximum number of images to log to clearML per epoch + self.max_imgs_to_log_per_epoch = 16 + # Get the interval of epochs when bounding box images should be logged + self.bbox_interval = opt.bbox_interval + self.clearml = clearml + self.task = None + self.data_dict = None + if self.clearml: + self.task = Task.init( + project_name='YOLOv5', + task_name='training', + tags=['YOLOv5'], + output_uri=True, + auto_connect_frameworks={'pytorch': False} + # We disconnect pytorch auto-detection, because we added manual model save points in the code + ) + # ClearML's hooks will already grab all general parameters + # Only the hyperparameters coming from the yaml config file + # will have to be added manually! + self.task.connect(hyp, name='Hyperparameters') + + # Get ClearML Dataset Version if requested + if opt.data.startswith('clearml://'): + # data_dict should have the following keys: + # names, nc (number of classes), test, train, val (all three relative paths to ../datasets) + self.data_dict = construct_dataset(opt.data) + # Set data to data_dict because wandb will crash without this information and opt is the best way + # to give it to them + opt.data = self.data_dict + + def log_debug_samples(self, files, title='Debug Samples'): + """ + Log files (images) as debug samples in the ClearML task. + + arguments: + files (List(PosixPath)) a list of file paths in PosixPath format + title (str) A title that groups together images with the same values + """ + for f in files: + if f.exists(): + it = re.search(r'_batch(\d+)', f.name) + iteration = int(it.groups()[0]) if it else 0 + self.task.get_logger().report_image(title=title, + series=f.name.replace(it.group(), ''), + local_path=str(f), + iteration=iteration) + + def log_image_with_boxes(self, image_path, boxes, class_names, image, conf_threshold=0.25): + """ + Draw the bounding boxes on a single image and report the result as a ClearML debug sample. + + arguments: + image_path (PosixPath) the path the original image file + boxes (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class] + class_names (dict): dict containing mapping of class int to class name + image (Tensor): A torch tensor containing the actual image data + """ + if len(self.current_epoch_logged_images) < self.max_imgs_to_log_per_epoch and self.current_epoch >= 0: + # Log every bbox_interval times and deduplicate for any intermittend extra eval runs + if self.current_epoch % self.bbox_interval == 0 and image_path not in self.current_epoch_logged_images: + im = np.ascontiguousarray(np.moveaxis(image.mul(255).clamp(0, 255).byte().cpu().numpy(), 0, 2)) + annotator = Annotator(im=im, pil=True) + for i, (conf, class_nr, box) in enumerate(zip(boxes[:, 4], boxes[:, 5], boxes[:, :4])): + color = colors(i) + + class_name = class_names[int(class_nr)] + confidence = round(float(conf) * 100, 2) + label = f"{class_name}: {confidence}%" + + if confidence > conf_threshold: + annotator.rectangle(box.cpu().numpy(), outline=color) + annotator.box_label(box.cpu().numpy(), label=label, color=color) + + annotated_image = annotator.result() + self.task.get_logger().report_image(title='Bounding Boxes', + series=image_path.name, + iteration=self.current_epoch, + image=annotated_image) + self.current_epoch_logged_images.add(image_path) diff --git a/src/目标识别代码/utils/loggers/clearml/hpo.py b/src/目标识别代码/utils/loggers/clearml/hpo.py new file mode 100644 index 00000000..96c2c544 --- /dev/null +++ b/src/目标识别代码/utils/loggers/clearml/hpo.py @@ -0,0 +1,84 @@ +from clearml import Task +# Connecting ClearML with the current process, +# from here on everything is logged automatically +from clearml.automation import HyperParameterOptimizer, UniformParameterRange +from clearml.automation.optuna import OptimizerOptuna + +task = Task.init(project_name='Hyper-Parameter Optimization', + task_name='YOLOv5', + task_type=Task.TaskTypes.optimizer, + reuse_last_task_id=False) + +# Example use case: +optimizer = HyperParameterOptimizer( + # This is the experiment we want to optimize + base_task_id='', + # here we define the hyper-parameters to optimize + # Notice: The parameter name should exactly match what you see in the UI: / + # For Example, here we see in the base experiment a section Named: "General" + # under it a parameter named "batch_size", this becomes "General/batch_size" + # If you have `argparse` for example, then arguments will appear under the "Args" section, + # and you should instead pass "Args/batch_size" + hyper_parameters=[ + UniformParameterRange('Hyperparameters/lr0', min_value=1e-5, max_value=1e-1), + UniformParameterRange('Hyperparameters/lrf', min_value=0.01, max_value=1.0), + UniformParameterRange('Hyperparameters/momentum', min_value=0.6, max_value=0.98), + UniformParameterRange('Hyperparameters/weight_decay', min_value=0.0, max_value=0.001), + UniformParameterRange('Hyperparameters/warmup_epochs', min_value=0.0, max_value=5.0), + UniformParameterRange('Hyperparameters/warmup_momentum', min_value=0.0, max_value=0.95), + UniformParameterRange('Hyperparameters/warmup_bias_lr', min_value=0.0, max_value=0.2), + UniformParameterRange('Hyperparameters/box', min_value=0.02, max_value=0.2), + UniformParameterRange('Hyperparameters/cls', min_value=0.2, max_value=4.0), + UniformParameterRange('Hyperparameters/cls_pw', min_value=0.5, max_value=2.0), + UniformParameterRange('Hyperparameters/obj', min_value=0.2, max_value=4.0), + UniformParameterRange('Hyperparameters/obj_pw', min_value=0.5, max_value=2.0), + UniformParameterRange('Hyperparameters/iou_t', min_value=0.1, max_value=0.7), + UniformParameterRange('Hyperparameters/anchor_t', min_value=2.0, max_value=8.0), + UniformParameterRange('Hyperparameters/fl_gamma', min_value=0.0, max_value=4.0), + UniformParameterRange('Hyperparameters/hsv_h', min_value=0.0, max_value=0.1), + UniformParameterRange('Hyperparameters/hsv_s', min_value=0.0, max_value=0.9), + UniformParameterRange('Hyperparameters/hsv_v', min_value=0.0, max_value=0.9), + UniformParameterRange('Hyperparameters/degrees', min_value=0.0, max_value=45.0), + UniformParameterRange('Hyperparameters/translate', min_value=0.0, max_value=0.9), + UniformParameterRange('Hyperparameters/scale', min_value=0.0, max_value=0.9), + UniformParameterRange('Hyperparameters/shear', min_value=0.0, max_value=10.0), + UniformParameterRange('Hyperparameters/perspective', min_value=0.0, max_value=0.001), + UniformParameterRange('Hyperparameters/flipud', min_value=0.0, max_value=1.0), + UniformParameterRange('Hyperparameters/fliplr', min_value=0.0, max_value=1.0), + UniformParameterRange('Hyperparameters/mosaic', min_value=0.0, max_value=1.0), + UniformParameterRange('Hyperparameters/mixup', min_value=0.0, max_value=1.0), + UniformParameterRange('Hyperparameters/copy_paste', min_value=0.0, max_value=1.0)], + # this is the objective metric we want to maximize/minimize + objective_metric_title='metrics', + objective_metric_series='mAP_0.5', + # now we decide if we want to maximize it or minimize it (accuracy we maximize) + objective_metric_sign='max', + # let us limit the number of concurrent experiments, + # this in turn will make sure we do dont bombard the scheduler with experiments. + # if we have an auto-scaler connected, this, by proxy, will limit the number of machine + max_number_of_concurrent_tasks=1, + # this is the optimizer class (actually doing the optimization) + # Currently, we can choose from GridSearch, RandomSearch or OptimizerBOHB (Bayesian optimization Hyper-Band) + optimizer_class=OptimizerOptuna, + # If specified only the top K performing Tasks will be kept, the others will be automatically archived + save_top_k_tasks_only=5, # 5, + compute_time_limit=None, + total_max_jobs=20, + min_iteration_per_job=None, + max_iteration_per_job=None, +) + +# report every 10 seconds, this is way too often, but we are testing here +optimizer.set_report_period(10) +# You can also use the line below instead to run all the optimizer tasks locally, without using queues or agent +# an_optimizer.start_locally(job_complete_callback=job_complete_callback) +# set the time limit for the optimization process (2 hours) +optimizer.set_time_limit(in_minutes=120.0) +# Start the optimization process in the local environment +optimizer.start_locally() +# wait until process is done (notice we are controlling the optimization process in the background) +optimizer.wait() +# make sure background optimization stopped +optimizer.stop() + +print('We are done, good bye') diff --git a/src/目标识别代码/utils/loggers/wandb/README.md b/src/目标识别代码/utils/loggers/wandb/README.md new file mode 100644 index 00000000..d78324b4 --- /dev/null +++ b/src/目标识别代码/utils/loggers/wandb/README.md @@ -0,0 +1,162 @@ +📚 This guide explains how to use **Weights & Biases** (W&B) with YOLOv5 🚀. UPDATED 29 September 2021. + +- [About Weights & Biases](#about-weights-&-biases) +- [First-Time Setup](#first-time-setup) +- [Viewing runs](#viewing-runs) +- [Disabling wandb](#disabling-wandb) +- [Advanced Usage: Dataset Versioning and Evaluation](#advanced-usage) +- [Reports: Share your work with the world!](#reports) + +## About Weights & Biases + +Think of [W&B](https://wandb.ai/site?utm_campaign=repo_yolo_wandbtutorial) like GitHub for machine learning models. With a few lines of code, save everything you need to debug, compare and reproduce your models — architecture, hyperparameters, git commits, model weights, GPU usage, and even datasets and predictions. + +Used by top researchers including teams at OpenAI, Lyft, Github, and MILA, W&B is part of the new standard of best practices for machine learning. How W&B can help you optimize your machine learning workflows: + +- [Debug](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#Free-2) model performance in real time +- [GPU usage](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#System-4) visualized automatically +- [Custom charts](https://wandb.ai/wandb/customizable-charts/reports/Powerful-Custom-Charts-To-Debug-Model-Peformance--VmlldzoyNzY4ODI) for powerful, extensible visualization +- [Share insights](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#Share-8) interactively with collaborators +- [Optimize hyperparameters](https://docs.wandb.com/sweeps) efficiently +- [Track](https://docs.wandb.com/artifacts) datasets, pipelines, and production models + +## First-Time Setup + +
+ Toggle Details +When you first train, W&B will prompt you to create a new account and will generate an **API key** for you. If you are an existing user you can retrieve your key from https://wandb.ai/authorize. This key is used to tell W&B where to log your data. You only need to supply your key once, and then it is remembered on the same device. + +W&B will create a cloud **project** (default is 'YOLOv5') for your training runs, and each new training run will be provided a unique run **name** within that project as project/name. You can also manually set your project and run name as: + +```shell +$ python train.py --project ... --name ... +``` + +YOLOv5 notebook example: Open In Colab Open In Kaggle +Screen Shot 2021-09-29 at 10 23 13 PM + +
+ +## Viewing Runs + +
+ Toggle Details +Run information streams from your environment to the W&B cloud console as you train. This allows you to monitor and even cancel runs in realtime . All important information is logged: + +- Training & Validation losses +- Metrics: Precision, Recall, mAP@0.5, mAP@0.5:0.95 +- Learning Rate over time +- A bounding box debugging panel, showing the training progress over time +- GPU: Type, **GPU Utilization**, power, temperature, **CUDA memory usage** +- System: Disk I/0, CPU utilization, RAM memory usage +- Your trained model as W&B Artifact +- Environment: OS and Python types, Git repository and state, **training command** + +

Weights & Biases dashboard

+
+ +## Disabling wandb + +- training after running `wandb disabled` inside that directory creates no wandb run + ![Screenshot (84)](https://user-images.githubusercontent.com/15766192/143441777-c780bdd7-7cb4-4404-9559-b4316030a985.png) + +- To enable wandb again, run `wandb online` + ![Screenshot (85)](https://user-images.githubusercontent.com/15766192/143441866-7191b2cb-22f0-4e0f-ae64-2dc47dc13078.png) + +## Advanced Usage + +You can leverage W&B artifacts and Tables integration to easily visualize and manage your datasets, models and training evaluations. Here are some quick examples to get you started. + +
+

1: Train and Log Evaluation simultaneousy

+ This is an extension of the previous section, but it'll also training after uploading the dataset. This also evaluation Table + Evaluation table compares your predictions and ground truths across the validation set for each epoch. It uses the references to the already uploaded datasets, + so no images will be uploaded from your system more than once. +
+ Usage + Code $ python train.py --upload_data val + +![Screenshot from 2021-11-21 17-40-06](https://user-images.githubusercontent.com/15766192/142761183-c1696d8c-3f38-45ab-991a-bb0dfd98ae7d.png) + +
+ +

2. Visualize and Version Datasets

+ Log, visualize, dynamically query, and understand your data with W&B Tables. You can use the following command to log your dataset as a W&B Table. This will generate a {dataset}_wandb.yaml file which can be used to train from dataset artifact. +
+ Usage + Code $ python utils/logger/wandb/log_dataset.py --project ... --name ... --data .. + +![Screenshot (64)](https://user-images.githubusercontent.com/15766192/128486078-d8433890-98a3-4d12-8986-b6c0e3fc64b9.png) + +
+ +

3: Train using dataset artifact

+ When you upload a dataset as described in the first section, you get a new config file with an added `_wandb` to its name. This file contains the information that + can be used to train a model directly from the dataset artifact. This also logs evaluation +
+ Usage + Code $ python train.py --data {data}_wandb.yaml + +![Screenshot (72)](https://user-images.githubusercontent.com/15766192/128979739-4cf63aeb-a76f-483f-8861-1c0100b938a5.png) + +
+ +

4: Save model checkpoints as artifacts

+ To enable saving and versioning checkpoints of your experiment, pass `--save_period n` with the base cammand, where `n` represents checkpoint interval. + You can also log both the dataset and model checkpoints simultaneously. If not passed, only the final model will be logged + +
+ Usage + Code $ python train.py --save_period 1 + +![Screenshot (68)](https://user-images.githubusercontent.com/15766192/128726138-ec6c1f60-639d-437d-b4ee-3acd9de47ef3.png) + +
+ +
+ +

5: Resume runs from checkpoint artifacts.

+Any run can be resumed using artifacts if the --resume argument starts with wandb-artifact:// prefix followed by the run path, i.e, wandb-artifact://username/project/runid . This doesn't require the model checkpoint to be present on the local system. + +
+ Usage + Code $ python train.py --resume wandb-artifact://{run_path} + +![Screenshot (70)](https://user-images.githubusercontent.com/15766192/128728988-4e84b355-6c87-41ae-a591-14aecf45343e.png) + +
+ +

6: Resume runs from dataset artifact & checkpoint artifacts.

+ Local dataset or model checkpoints are not required. This can be used to resume runs directly on a different device + The syntax is same as the previous section, but you'll need to lof both the dataset and model checkpoints as artifacts, i.e, set bot --upload_dataset or + train from _wandb.yaml file and set --save_period + +
+ Usage + Code $ python train.py --resume wandb-artifact://{run_path} + +![Screenshot (70)](https://user-images.githubusercontent.com/15766192/128728988-4e84b355-6c87-41ae-a591-14aecf45343e.png) + +
+ + + +

Reports

+W&B Reports can be created from your saved runs for sharing online. Once a report is created you will receive a link you can use to publically share your results. Here is an example report created from the COCO128 tutorial trainings of all four YOLOv5 models ([link](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY)). + +Weights & Biases Reports + +## Environments + +YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): + +- **Google Colab and Kaggle** notebooks with free GPU: Open In Colab Open In Kaggle +- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) +- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart) +- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) Docker Pulls + +## Status + +![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg) + +If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), validation ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit. diff --git a/src/目标识别代码/utils/loggers/wandb/__init__.py b/src/目标识别代码/utils/loggers/wandb/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/目标识别代码/utils/loggers/wandb/__pycache__/__init__.cpython-36.pyc b/src/目标识别代码/utils/loggers/wandb/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fcf7867f1ee251ab02ffa17c2615350556d4826d GIT binary patch literal 163 zcmXr!<>h+1`%e-B5IhDEFu(|8H~?`m3y?@*2xib^^jpbL1QJFNUwwgGSF4y8d)GhN zz3u7trO#J%zg)QU`RsKuPiOCY+T8zQecRKH)iIU%Ir(L#x@LMtF{LG$ImIzK`RVDY oMa41YiFqkWG4b)4d6^~g@p=W7w>WHa^HWN5Qtd!C6$3E?0O83sMgRZ+ literal 0 HcmV?d00001 diff --git a/src/目标识别代码/utils/loggers/wandb/__pycache__/__init__.cpython-38.pyc b/src/目标识别代码/utils/loggers/wandb/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2572bdd4dc66a5cb3faaf8fc99d09269dc45e7c5 GIT binary patch literal 171 zcmWIL<>g`kf~ULxB!TG1AOaaM0yz#qT+9L_QW%06G#UL?G8BP?5yV$tAlKC@=EdIi zPj+v6x_#;M72Piv?tDIbUCh(j`<^!UzgXY)v}1KlWqwY6nW?Uso>5F`NoG!QOiq4! qdTLQ|OnG8nN>WUGd}dx|NqoFsLFFwDo80`A(wtN~kWHU~m;nF{QZ?uR literal 0 HcmV?d00001 diff --git a/src/目标识别代码/utils/loggers/wandb/__pycache__/wandb_utils.cpython-36.pyc b/src/目标识别代码/utils/loggers/wandb/__pycache__/wandb_utils.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e9ccb17e0e0ab51da0dccf4dbac330f2216fdde GIT binary patch literal 19631 zcmc(HdypK*dEdU* z^p8_1mMh8c_w~%|-T`8wIF%f*y*)GCJ^lFl`+nb}_oL(Eg?GRDr^~OkE$cs6BY!32 zpT@_3-nJ}dRV`&Jr)5`d`F5&~e5a}@e5YFJcBY!KSti}ewsX~-$z9t^HK#Xn`VD?JXUx4_TiVID zX4?m<2ikMhIk_Hd9c<56=i7&>huRC(1>4$Qke(f`9`=rShgD(M@qaJ4?=}0H)j5*3 zyrbTItIkG2f77=3PdU}YFIyewCCj_tbDFLys-wHku3ddVjaQF($5w4sRuiw=)#GYX zO}%baA5_z72Hy{YcyXPBq-}4vW<3cS}?6ADj?F8Od(5`ptYo5lP^z-##o!=K5D*?ZQ8>;YwYV%k^ci<%biE_S#2U%-1$i*RQ0)?B(Y^{_$t7gySpg-pWQz)dRdX z2n*(NwbAmzVy$-5(|)7dsnx>tm2*pE&lk; zj$FXUpF}dS0&9RtQPz(+hb&Bs%`v4~UZ>(zi>tcZuK9t+T!h(8?X5PpzHI3!)ccd? zkvwzu`geZo*T41Eue|%4fBoCP{Q393^!LC0&aYj6_nqHW=OVUDgOD4l4)2w;Wtj}l>sr4Wp`gtUl zJ#Cllw5{inPezo%MFAff(a#~HFa~@atd14fyN;gY2rw#Z*VeCLtSNeqtwz)~rd ze#?1#awipJn%O}L<4<1$!}v?q73(FgZ(3yzSwU{s=_-?N=7TYrB|ghW&;GK>N6#>S z{+k0we_@cudkcK;3uqIwm|J?bpIO|lw_9PhvDWEo4-}L2wC-v@%wX?eHd8n2tx7R0 zH2g-#59*y2FH9@!23^G1!o1f}KDSkv?rwUWFfEOZ`Sn$=*24A(Q=qx9fQmXWTc6p39x*fpb>0`GtyKtWwlPd z?d>P=(i)~#8tANcIS`+M%EIuo7=8iNtv`zEWZ zKUmxhuDm7~Y2oUGaQBBE5)hD77hTr0-*ytoHmKru8^m zBVUl!k~s@A!Z^azxy?qHj|n%-tM081H%7#rZW&=z$%m=#W}rXJHa^7U43m!_2~+jW zhCa(&UY1lf^!|069;Iv3r;nq8KZnEu8JF-c53uEem)|FeCTSa)*6PP_i z&`t0qihtoqc93eO$%%KIAxBbU*UbuM2H@A=K3S~#ZIf^2f;`s!b}-f~M5|lG%1Jr2 zi&X_3j3;YbMwwJFL7vBz#X4uX$}In+6|jABH@5#>laJc}?p^Jp+`jhngM5?ir_p{o zYJUvvgLAU2C#^xse}9m^ZR?K&*{Dp8)U{Ip&n>9@uJiUZBvGn4$!)vq*w%0k2H>bQ zP(C(tVM!mtG*)cAfG>R*$@})%eu=E+Ohb^u8BJc(KVZa4tGiNf2_i6D*uGV9mX<10 z+GTy=uxVD-5AgaJ6B6Ejg8Kp9`*Tc4C&D+va!hlzdJ8y5|9L(Uk!ZNH>a{m3Mg13e z`vjAdNUCGPaciKlY7PvR8BN9zmd%5>(Xe2yg$I>bGI^5c74KKaxm$#x^Ku{2>L<|b zi}?6_F{zX+_ek^s#X2K-@S;i3@SH8RR#+kaqr80@AO8*#&jO^j2&Vz1k1I$^fKA9n zl>?Z}sJt4(cUBct5#KpgQsel}s|VGDnncMl^^lrU)5sOnjGD!>qI%dXdE;K$n^*;^ z@+Q>Wos>GL=3mbMPaIMUC^MxFt0VZHR`;o+_?}TG)cwjuE3@hWbqse72uwX07OxS` zT^8I_KLYH7EFo&66GPNfZX}lcQ?BfGd=t?EMp2*vAo@VJcVpN>!NuRz^@i^nSmyea z%x(jWUAOD3DD@sb@74oXh;OmpSai>CyUJUw_gcXzH(>3N3esewv(oA*;Oh15O;1Ar zL5u`IeWzT1n?lJojIpQ9JJ?qg(2bRN0Li5^$LGs+zp*0YjGxI60lHh;#N%cx=ovWh z&4%LC)Rh9d@1g|W*9JaUZmZGp+`3)^|Lp+HV^U-yfjvj&P~K_g@vdoHsdrpJh_1Wr znK@I@o1cZ=&;}NAF=y-DTV`IOS;Y+Xd{1HYo`(nCT`S?L?uv9t=Hsk%U_A&nePDMy z@)ynAV-#oBdJRnMnFe%@H8Xww83VGV)>4?i8j;2dg)aYc_$1(R4g>@oVby>>0^6Td zb|)R!O~(Oatx+(h)Z}#t;~IcvEu*F+pG7_gBx2R_YFh4$AzxTyoub??Db^X6d|A?o zU~*tLr-ErP->);*oC#*Z+J84Vz$+2u>75LOc{Z3GWPxw8=+9pZ<^bXC*U-1-L9opX zKq)q)%D$CB`>8>8VArNP*C7OKFy5o=`EA>ulhP0mXn9`cHXQvOmG2aSL%fC>uz>Hw z_#9DV8!6PwGY{Zl6H%e{!drPW3-S&CjoXf%#yc!k+(7&KJ1Bo2zL)Slh4(Gt*-?r$ zlrPIT?+@HvmRv=N`*C-ovt`Qazd4)-Jio)cW+r$QxM(TWe4sfYZB2Aa!7;WFT|G8> zg_7ml_U6~|J;C=zHIJ(jpEMu<)%aTwGPrt>eUqG}${2we4c~JdOe@ofpT=XDPyqLp znR*LST*!!g;k0|HgZ%?R&|U~q&354}ywg2N$m(y_SGt)hoS@d+DOvU3IU40pcmy1-89`N-C+BbTp}X zwTqn|$WBil?s;Wgcj-Oq;QeDVqpg-L$Rv2THRY$0pp#9GH6e5-^b8E zI4KXQ+|*Xq>zy^PUtpy(!ra2_%gsP3d?(!TJQ)l2|o|%t89R9BTS)N;h1nUsu21w^2xJ& zG9!IW0D;j{rbaP>xi!J5X$48aaExVw`Z5e6rRbTtI=H;t-Kw=A0_&*Sfo;Wrsc<@} zAx+lW_04diUEjjLF*s04kZmd%^hfIp6nSe(83ep;{cK zA*%y3*924g6cbs#7nmcU3Fi$Q$3Cm=olJd!WsWkr!h~97nC)#s=6hiogx1?MDlcSS zB?qm~YpoiDChCAp{!m4rcCZgrh51^I9jw)U7!Q3`u*S3Yyl4Pfq}ljiiaG%`f|8T9 z9r;(lRX%@}_n5A*$CZ;JuvN3!(TEB`UfMOXy zm7p{LPXmN*rtl$;&nO2x{$Z5@kB3BPX29cfyU?41Joq`}MPv?v5|Nlgvc?=jb4Dds ze{_`7U7(4LJcqJ5f>JaD0s@iyK4`F?oQ2ePJ>o~Ft! z+$m-iUH0gy{mcxe?y0>uvYex(7HS%0e>OsDJSZJh3#AiyrwLVfkZu<7DdFSbGmcLgpNZAn^&GGekW8(J zPYECFklHvtWqg1Tnv*b&Sj{Pw+Q{oaKyDhQk{Jb67CUBk*AX0d0PlopF?xo5H1g~q zo=xG|0kklt|Eri_4xp!5DB4-{4+fe+Mx~($Tg`(k14MH~N*#wU*wY0)8Lbo>XNj=P(k@19fputQs&Dag<_@ zt(uFg2RKf)jr_4)n3OOQM@mqr4@8IAAOtkCq|s%puMZ zR1m9j6w{IZHr(aJVdnzqOu#k(2C*f`lyikbH(|d9GlF^(^RAd936LTZi|_=tFti@y z(72aDHQDcctKl!wrfeeUdR;1R0W?q1_y~=lV%}_YdxE6MLCuSl^oHEjFdP)z-!&GF z2jdOF6klwnOt=L0?kT)0fuSPOsrzWi@b8D;5(`F3;0IyBWCFy@mskLo#42NqFnjGX zk6`zQV{lk>*D!QHEO_Wx&w!$gx7l4;7a-Lyt?6zLD-`G+?5~BPW*%k)u!;%tJ@OM{ zTYMYMZZpOi>Di16&9B3 z!OD8g2i%7}m|ge76cqR{LtNAs1So(lJe0g6TGiw&VG71!eUUGCi3y>dewm5Dzx=t# zC#65ddjc^j>hZp<0H%j9bCp0&U+2?LGx-dkP6MF{xD{Y$aA4NE*=^nQ!d#~dEhF$N zg`q^yKge2k&EO@$YqV_YOT3+1>9%_9j$e%sr}((y4YOZRDAf4Z`1qedVin5*bIP#z zGv~mxbP~|0fa`gFoC5Asm>>n@SwM~kDBdGHI)`13cGd&cj5I?a1+o14*O#&$To9>6vPLFN#YYrQUO+SF4IaucM^dWhgV;UH#ulRx9r3|K8S$B)E{;pKXWz*{mdqdu*vcN?wLF#A^%5K0;h! zDd<9XFyhPaKOvev4YZ_h>|lSLwozJEyPei{1R)^xsdu$tHRg2>HH$B=f3Val}pY%r! z!phOF0O|vq&xD;QUh6Ru49dllTFVq=7RpRBzJ zcNX0HAUqIS=1vwi(Udqi^DL*t^`vqtPa7cG85?BP7|}FC1mH}gc*y9MDgpNbU8}N` zD>h3C?lZpO9qZ?@0T7Qf2{ABL|I(-!xJ;J=*y!C7;QLU4g?3Lq4`xt-N(m0lEEzO* zJ2+q$$T)(RBo4lA2ygvl+k9{G-EZCBV{*eA)8 zzH>VmQ5Cl|96UD)r-QAz+g>o#%Qf+*W;8;Wk+m^FE!k?Rs~c+#xISt<-9mJiz&T?k z@>E}NpKEZRQM5_^;rO2MB3LO+1FU`%&k`sM8z5~~Vb(aw!!(`beS;U>FwHK8FcgM4 zVfUWjzyF2$O^(ixPQoGJCNWdKR2etM0UDo1aN%)Y228}YeUmTE!=)qPG+~M8IBrbe zU@;nxXlYyRRv?Rb=W|TNSJ`6jEfy#YmqdS_clVkG!Z9{1ef=;u`#g$R#cA6CYRdxS z@gKK-7HF=2V06nT^;T%f;vd5zi0B2t{V7_*K7^QQIy)Iy1TqYE=q^Avwm0YlfyfbE zHY*x5bn6}11@u#dZ)^6d)0u{CE2q-bgz=4lo>ge@JNcb4o5qEh^{7^N5ratJ7y*@Z?sMv<=xJad(>uI zrgi}w^+5geFc2l2A?BzFF5AmUMS(V8Pqu!?yCr}UoCjt|qrt-~A!fP{)Pvc+Y?+~B zgRDd%2Syqj@!-1kY9N7shdjXW->8Qr3DWs-jw(T_2fnOMWTOWhyWb#0XTCwF(;5NCQTufP~Xyx z00u>g-y7)|`jtF2%#~vunH4Z&z*4>mZ-G}AZ!rjbg+334jKZLED|yv$Sxo4U3s4*& zTyoKUX3ahQlnglvJ!G)kaLMk{GB_Nr_DEl3xsxFp(=6@JgiNtQgpLX<9D37?R0W`- zhMUaLP>_l_L*xaxCL_h1&Wi4UUnLPfME=x3$?zpCr~wxCN*QcN5b?nNN#_R4=|dKZ zxK(HIL!rG9+Bb)HH4)SMg(qo=C8m4o6>?T=2B^6G$M*Bkh~^UJzWAh=u>GeNhYg51 z<|+jgfrIX$sYos@RkLQAqYy>oJRu@9(%?LD{UhSH8d1?A@{mOocUR^9p_N43Gb5T2 zS-LTX&?gh7042gP56HoTKi(6#8O|CiNmLLqTxfzmV-k6YIZm(ffYBd>2;S$OyBel8 zyPMkOlLweQ!-SGe{|pmx=KM5s#JpkpRvmt&PxJamkW}CRWBy?#)N4d^RwolmkAqg} zaH@(3r7Mi6+sxC?u?|VVOx6kJ_R2@ShI^mj`f(<$asd{N0{%U0Pp8tUMdap@mS79X zLt@eb68~EORHh6iT&A6*;FOW}M?K4nl%f0Ae-AZMw2C;rhzzNbVGJ%wbXJIo;yrD` z&WEPMVb%TwIYHtOdJ&baYeR!}umTI@lu`{~M?V4-91jU8nF(lUpRQCkJrBre)Fo8(sv>1W^|z zKoDLI+TBgq=VPLD!>EPDv-D#?L3px;Wdue^QK|Nym3zQ69mIv~WVU{Gkiin)j(rhQ z?~5>MQS%WWAat25gF_KvQKV)hk+#sDP$CB;1TUd&-FP0?CDbV3{U93#7n&zQCMXZG z?13s0b%{S>0)6{q)&QZQCqPg6xCZ;l_O5Y^lWLr&7Stp(t*N(w$#4%rVm!A14Q_hZ zB2pvz+4{p^hU1C|_8TDBB7$5?JL5a$oryINxnu22?o4fM4#pAao%%5c$PFn2%izU9 zJFi^*jJ-3xGlRIAX|y;!m<0ZrVV{u#^Q}4fo|UTuKx_y_8%!(4)=VF=<}5XT2XhWI zCD((g!Ss{XZTrR*Y>z_&1TLzDJ5IyinH`KF9;b*=%n!;4UYu2j-%26AW~#Zci;-aN z%AC91w9VQ_uOYA;=xGezS0XiTv?FzrZnD=53m%0z+&J-u7*zz54Het+qMZ;0UQw&N zhaEBAxDfPlyb9}IzWelr0W`*}2q_9tNIov`KsGd660>2i(Nbn>C!-jldqSP{y1(vX zzn~sm$H>6w$DQFe5LJh^<%q{n0t+WWwV0!jeZ({H0SF-;T3l2*7l)&{&_!v~Ou8sL z6tT+OmYSA^rGAkuM4_c4`kWj^$(kSu!39x4)9ZZ!saQ)Zx@Df95p#aEXyQ1el`wa~ zU`2U3-t-zO-NnxZT`b@U!^vJjc$uIJ#@JeSoSzVcQL!;4rnEVtIp$D>NI$tnTzTV;OU*Vk$ z@u082Bu|4b=RyCXTtFDnPa}%B^g45i5y~(FpiG#h^9wyr;hmKO28>WZ=wD_cZ#;`! zbqpRQ(>-=HI#*LPBRB4yf?OC~KO^WV95eF+u@gFe1GuZbroYTiWvG|~D&Bha$LOem3-rByku78sq*(U>k3HQE05@Fx0@v{AhfEqe(X6;F59+503k();x zd;w~I-cAF$P*OT5$73Hyk(TyBnR}5|A{EPc4?&$s#VTG+sYsa!>5xJIeJmZ)YdGLQ z{U7!MoCq=6|17-k4TQRH{Q@LgMkK%&Z#$o{XbXUb47s+E(N7^R1~xxjXJ9Xc!~p-# zGZ-EzPWd4fYrvf}czJ42I0WmTV%_39>7P$Qs*NF@5EASTOxWPxXe(o(*4W2zdJMJz z=;0e#{STpK%YBtK_isF<5Pm->0A4+H&Dwep8aiB1kWlkGMYKVQ1ik!_1#KQzDE03k zWhUS|N6e{68AdXW;jj_xkWG}^8OL=AF%K4E9+*xblztpCj6wa7r+e=V5F-Iun_5i| zvcKSb0b045hTNH{Z(xEGc6eU6_z-FthAw&~oY)aI^Nh}P4;(0@_`#BgzTJ*{k{hCO z7LXA(I|!bk$Cu&KB3L?rm^VFFf)(R?QPk0C_tRVLM(32fE$J4b)0k%}x{3mTg!@Mb z{FP*E+y_R@$9s85V2>9qwkwW|UAS~ooni>eJJmGWM-i@!NJtFf`;=<`j~KlERcxSt zz-=3G_Qyv-D>aGO&yaZM@5R~;p6;M6YBu=${T~$Br~el6hUMdkAXeceNnb`<-(tSL z_450++fOZibT!7mVhsj6YHZ1ItiR0`{tlDhKmz_mD>C?$>b7}Ct~f#x5N~8#`tKn@ zUbxg(kYB1y8?H~btiQ@+g2_oHUt{u%OnwVVnBMTVeWMXj9vF5jp7h`5BePXA-n ztxAA{DPjgAVtOy%4G-LNk-o+yeFBYGV|1#|;xh-HORj6;5QXm|kJB9v&L=sDIE1=c zhBs!xK4SMD_+OGi;)J2}qfqB``JY7shGJ=&?Qj+cDZGAz)Em|=V7+0$G|)CP8t$1zu;6xv)R5VJIYn>>E%lgw63R8- z=*%IjRXMP%eEl~@Sr$!&`&g0alJNbyvV#|06Afw%S@aaP0>jtnQ`lFPRxS7wO!yvb zAaX1MZ(vjjj#eOA7F+|-vO@rR4|GimxQoU4;5fKFLe~FxxZ3{_KmGs0L4}zl{EwzH zlGqH%tBOPb?iG>Ma)=RLJ4#hZeo1Mfz(GdHxAdJff;~VGZ$QrRTjaAGXA}7h&j92z z4kq2nL;$&9_!xo3DCg4ugg5>vlYhqK_nG{2B*M4!AMo;DAVECkUT&rTB_I7OKC*?! zRMG}-{~O*9nQ)`yp)d^D*@WG~mH#B+%7y;v(d`IQgtf^tf!O!t#(&60?`DPo!v;Uo zTO>?O7~8+)8-*dh!d#8X4JMk2@JF&n!$QgNgeg|?hV7+Esrrw2Tc9*Yg6R6TVZ{kj zjK|2~h2wjM@NEv1A^g@D(4w;d9#lXyDa=}jzUQ!$CBx0K}T!JT}BUECuh9(K_3J)L*N08^xJP>&Jfm@o) zm_^-_7mgZ6XK4p^(wchbhRY zFoQYl-F2)ug_ctgAqE-*Gp)9I zFmsI7{9r&V9@j`JC0_8e?g#n6qtd|5a=vgBviimi4l_rm8zjT2&YWkY6zKhYck80J z+~coI^j|p#FEdYIxZM>YS0UV6Jj-MVSueU*F#|DDgkudM?h>908mHTZo-m0w)jiSQ z7u_FXC;;7Mg(Y*8yA|lSL5lh|lGqWhe}Z?uhy>i@Cz;=1B5vp<NXz=BPAcu zO(c~$*+=0lg6?@Bg(`S67(!%bzqYP{NCUW8UdHh1iGg~X3baZGH4zni6MpFdo zuQL(m`P0lDXCf?fz?|U99p-+R$)}jGztJX!DT4tGW_3c_6NAn^b(+2qWnz8G+|G-r zZsiEzc$CBei=3nn9kG)00C5c9Vi*k~Bk?iul6(E?No$E*#daTR&kO*i$tA z`BK^`d@?_ke=I+npDr9KoXr=GEao50AITRB$E9qt;+Pn#h|dZue+UWfC5)89@p>?H z{)_||IN}pNg_Y%M%UxOfm!g%fKMk^R{ccaM(3KsZu3dwAzPN^s-dkP-OFZK@+TG6S zTOOi-&V1|#9{=cvAA3yJ`J7m!5v2^PuKdmqD1v9A4ZrYi^0*F3Cfcc>B!v3oH-5lc z8e~OaC#uKa(Gf~o?yATwyX^6va2z<*ahTY}-&r{^?XKe|hq&EZXufq;DnjdQw0rI3 zHS&nU7$)D?X(Z>lZ*_YxR!clx$HU8w@HU47!306ykl%g)$FOMv(d%?xa#S7n!j(%` zFI_lynR)^adgIJ=)UhS}Tuv?;B7CpiZV=|8`3Q3nbHZ=tjPL1rwG5mkhXl+>Mo5D@ zKy$-9i4``apQAU?fp<{un_&1;`aiSae?byDU6>bfiX6X$WPW$4)mYY~5B!)3%pbf; z@hXeZB}}arqJ4nXja{@RHbv|R#0dEkL7sm0(&cAZn`f?k{a0D`I1>Sd#E;>W`Gq1H zJbBI)C)v%8Ai597&BHaX1IIzj@Lld=`6UM71Yr&)lzI?o{VFRIFSR$jI*{O8*-v2> zQS)BsrXFXthnP@jke>*W<9_nvAo^+Eqh>6dBh1IkWM=9f8!pU0X&As$WSQczO+#ho uKk>>U&p35@V<*9VXB`Kem?q5vuF|PeI-kqW)F}e1H?pjs&c^g_RMtm^y~Y+-}mT!eqy3v;qR^A{@%(fTbA{oc`^Ez z$Hn9L1kc!(rL3x@Y~{4(>BYvt;|-onw5OImD|c!^OAR3 zg=zuMGp*uQv0Ah(KkJvS*!_v?R<-Pxo0BTLXjP~DX@Ba9?WdYEn_2x?e)nh1z3aBL zlWWaw?W^wFny=2wb-uNKYoWTZwOC!;I#4}eTipZFvxC)x{vrRMDqL}buZDNLV!vRu z54o0qr+>$qvsuu8WLx~Foa(`kS#9S<%fHKa`kpGP(sk#GUAfjZpTJh7q-FyhG z98!0@YNOqUmGhicxwCuX`LNMygbhFN>TTtPolYz8);ih?b$xZS(O&oTPCM`}Ho^_> z1;4Sr5eD7^-l;}C@Pnndxl+p&J1Vbs+M$0b+^V%PVv_nCUQ!S5#;t0BL`m(|wW zDU`1`(d^kyU7guN(colTovp9t~FYIRIJr5_&R8G+O=BbK6`TcBX8ftUw8T%aBQH-7E>TW|dCTUTEH)~|o&toZ*?yCHS;PDuLz{~ww^_P z6{L{-@qPv$8Oa04D7%l*^g$*nb=|pc@7kfmk@Zv8EqzbVehyDDK6kmBt;x%mB75m= zHd%2ax7lg5ql^x=u_-iXD9Sd18mAibFvS(9VXfC1>vFGJk{04yG9f0JVa@$!okojD zt^4pW$RV-p8M|b=wq8VjY*cC7AnD7fehwLhap2=%m8{Ud;^=vf0wc4o*!mTWH>EIE z{YvQeER|C3YtHLayQwhU&-7B51NQ~%QR^>R&sr~Xon4h)w8HEar=v{1p9}LcQ+$?* zpMA>Y<7b$|;B!4kf1&5%y#>DaIWQ#5WOn(f8|kIX^{rNvX{@(9+JDoIGQQRw9Ykr+ z5oR=Xq28(#qe3HSw1cqTUiBkafiiRvV~cWrTLmPs$n9+V?Z}nJ@3eG=JgaU0S{?(>mlogH1)m?j@ak>D^A3(M)+1;@qGmhCw^XFE4tyL8iW zMsu#CkD%-=OYY*PEcs)|gji@SwsQr1a2LzoOzq;j>Eav9j&J6`_j~q!@X!6$uG>rX zuuMI74NFu>Ek`bRS1roawmQmhRnoGbg@QD@8o5E}Z-Jic+HZ%IbT!A~e;BIfGLKnOLbIkSAVFr8tb8*?vgjwFbVxM<+K`Fbb zT{q12^VhB2bT8d6^uQ7JTRnW_+r@WkzlB|&>1Fywl*)!~Sn6fL1KopoXV0olsC2x? zGs_i6m(edxMsdSFb|ZVdRS*3jype6ygK%j(JXa~}2|SGMz@)AEK@e}Zm|ZBfE$g5? zmdR*8*um71Vd)2%a7AQZqO>rKD0OnX5#4% zGIu;@(o_a{W-RF zB;kZty&v;qHf|H!mJmQVksYS`ZqLSSJ3~&SQs>PorF)KY!F4iN*{des&xSdy?bR^f zFT^WY#Og>nw426CTFr@M{mLkl3MYG5eU&-LwajoOS^k6-vVFiBY5&_MAGiPQTiQpt zJ?-auxjx%>(Y_nEpGW(7X+L|y>ZO7kJ@=}u-y3G)GCA&`-2!-RLFKPFug^dVrTSAO z&+CqD4dB^o%&~^xg=6|I-0ki&LZ#JN zt+xah82)QttW4{B*@*B}mlZX|jlQ3W$7Iiz`wP5xf(dtuaEGWo*mbpf3rI(kh|RV$ zJX!5*ZC8r=ao&E23E5vYFMPI!T~^J4tumue;z3k44+f1!1#>N&ro@uTlN_&jzdAvB z5jM`tJ=<3QFq(ZHpP1#&38OuP9m)^(tTU1a$C<*td0T3&qSn|SCGF$*1lN%G7J#)y zPz^wRuY#NeH)vxPxxhjat*l3pH%bLQfj|icr^_)v8WE9%(ObF4&i%7-J$Np_pEwQ-K9LVGN| z@bGtTqY-!pn0WzZvey6$*IRW~lzI=G^6H@{n{TP!Sn^I?_LRR?-)V(MypXj=DoB%! z_G)WK0bOrg-u5*_4FpCA+;_waE>rw?hCS|R^A7eE1$1L|Fo5Jzn&b18deB&vaSooz z5COegmx;>FSkN=@;R_ALsi`XkjNd^Cyl)FAU3slW+xO~v9bC8#Opi&Ci3Iu_nL~M} zna5kEakbv|03JHtif`sj#czHJ+QSx5kcTa615k$D?4$%sobNY<;JJi9ObU=CN+3FF0mkw$xgVa_8c$u}aZ8_{$+mRv)LyKr~1eaV#7zdW1=Jio@fW+r$QdIT`dyPJ~& zKPKCy@E*1hUp+E7K|wJe0>v9ZKT`7fZ32}6EAkp^ z9w5rnj@%t+u^vd_oZj(Asy*k2JKIBk7Q-m;4m8m0tkgoV5LSC|u&*C9q|WXolx#?b zxA0EyFac_?U0?Mp-qEApX+!*bXkXNQGB1GEq&o%ydxxQUNY!U|+TNKbyv~~U0+=A4 zl4XGO4QNtHy`-Z_&2t@4dnnYNJRE!FpzgAM0(AK{)$#5f6!eR{av5uS032$q7@SJ7(^wrwg*;G z!->i#gTRLcrqh0s+HmATP={uIQ>=ZAi7eU2nEN>EEEq%%0;`Qrp#D*oxtqz0OsGIc znVoG2d_O8Ml7({Tx7G|q6OBM7U*08J0u_Wks4B|UYV2UG_TzXMw2)X6 z8GAvLfDF>i;9rVr0hNN1ld&E1?|Zp)&d%lB?-p>q03CSF-2ZMS>weeG5tiXOYLrt2 zI|qO^`2wS#2Y(Lo5Ex)$f3ii^Es5^;{ z11rZS8+@D%7GvF8R0t|6)k#Cn>=E=6*q_F2J9-0!{s5nVy1rH5K3}-$I<(|;?;G8p z-;YYi_A6l)EgJ&0fT^qotQrEX2$f*CX%bpFq3qc(ZR{Gb*g+seGlp&qd)wz=)u1Nr z9Aupwgm}JLfG!FE)XV`GxXmIyC43xwCh#faGr5*Mp9MGqw5b*GDd7X^s7>Hg#s`q0 zIR(3j)tpwT&7A%>pq`mtx<9L2z!NKstF)A2Us&oV!iD~#%7zQg1I>f5)8z2U zU*}u_ve7l5Cf40u>g9KE9ICtsI}Yqa3xF`N8lW6N7lD;xkFA=AtGhW)wvEx;a~;+s z&c47QTYnw-2{pAz9sbu${s5d2YTD(TbKb~V?giBQLww(dQl`fAei*Hi67fu2wL9M) zjCeQS<9OZyaJ6bvDh0kWQ~&ZPUx_gfm`dBH(TG$z1U>>22KpUEcC6G5DV)IUWC%qH zU@91@D^!PNC^sH zhuT~SbyUpKcm*i<2;P<8 zQW5o3h%{vT55j8+4r9&m-N0co0b&N!9|Ub1GmThf^i#OjN0CH%xGp;D7;O+0eDr0< zV4}3Y-C5lb9Cf3#t~)zelu+-$;#wGL=uuigtC$<#CJWK~kW}uEa)U9$A!OKpP1=hl z#>Tw|ltlXn^JwhAL(VD~f!1}Edb<)1(;nAq7)SQ zC{2je6#yvMEm}<85iex&mM8^tus*{Ve1Zv4o~|(=stkBpw*JLYUN+bRf|0~(GRA=7g&u$?lD=nLI`77E5|X=( zh9^yca^)LLWE7i$89_Uvv1Q=S1)pfI51XT9)2Mf7LF}AlSZrdj^s$CF7}l{{8%tWd z1$j~XI}#hp23uV8$g#fCq+eI)V#5Bw5E|iw1eCyc!FH!@>{o-i>#WcnBh1n0TS*T9 zLf3?!bT9;cu@Mlz2^{neFL&Dh5szslDX*WzF1cZ!*YC&oa`)i}MMXI-Xf^50;I*pb z2ki$#&%e|N!Usi7t&|L(c>vW%JQ%`LW_eG@yM#F#vVvSasBL$G#-%75V@m;kVqdOg zgjyq)6GYV(%Tt{gEQL_G@xOu!k``fu#WdXN*U+Cr4~p2E2FI1=VE&y4RZ#=!-akrH zgT=iCca8x$k`|Q@A+I%L00ZD+4(W1Tp=*PL)f8VHNsDd{C>ah22rdByrvwPd>1XMr z=s8U|r3ka&uX9l*NB5MjqMRW#lo%3N_xF+o8}#)5j=ExrF1zi|M`V9aGs*`rTg%j5%ZIT@y{?^XQN9fpQ&X>%l$dV;5m3>A#A5@lG@BWSXp`>u=&91jT+p&_qJPb{8dk_a@P(1#;mr+LE41k8(K}L2{$HP znhLH_20HZM{wI5H!tK?};FG1*^jbz`UQ5y0+03z=%F;!pvMNU_A==6JGAd7a%@BfS z;R;1WTDMdQKp1e_SQEoyzog)1;~U@ExbM|3eM6o0akje6!-!3VcEQg zp8-FpK)(c+W|mAE1P?CQ0aT9hCU*#5H^jR^LO0)k|L*kxj2Y$dM)?!i;~fU5L?_UP z)3M@h?zeZgR(zejYpoHs>G=)7zJ2fT8qA=mhFb9h&phCZ1`52ilQggbkFtSj-r*I* zBKh^U)IVkNRLMcvbjFh=jJO)KG#ossh10>-yvu$#RMG{V>Lu)|C@pJauw|xN>gdLL z0}hhfj&30mOu(M8C;6%?sL(U0(CFVJfoNiEya;S^X`i0bc$%6ESY> z@})UAeI(o_DiJpi8q=3pjQ(nxrsl7_sGWcV#MB+~W9@+&p!oSgU zcGEBvq#6FuIRKT}-lVStat9P|)iR=3L*?Fuxj;WgAh&L>IqeykyRvkyqg0k*KWk9z zcXPXWuBc>fu^l@53CheasgXs_^Es+WfgwD?*I9HE4F zP9&wj6qW%ljr(0(-{2jn38kLx!JMJLk+f%zwP%mCH#H~?mAO39qD@XEE!uGAPjbY& zgLa6=BPn$eEO$7yy z)a&=`7Q=o0`ChhHWSMY(e?iUm3Ih%|;;y%vi)v21Kwp<0{3=(ce*ozOd7)e#L^>5; zP4y3nu>p6?Okmz%wCLZ-yX`}_sm-=b?E?7fzWV23H%j=!fUhP9ZJeEoUTt8XFo3py zQJ^LG4=j^Lv4{6UEO+m!9D+MYX$<$ZYr6Z$;3e27G#`wT?RL(OD}oP zQx7oq2>|mQKxhtUz!Rj4NmB?s)E9Llgtbv3{YE;5ekD&0i{)5HW(6!6_>^zLTi^l4 zTMQOorCWqSrm*u|OkOoy785Mw0Ud`3u3Yk-T=$MXCPR*c6d4jXT(VpA4GxE^J<=Ch z?qrC@T1%5O(bGURLPrH14!v$hwgOvG3r=QeC{B%~gBA(!PsWcq?Nz;l%dwasssS!O z)aNCbr~%S=sSMUbXbdp?)6UDVvJV+5qFtS(ABybF$i6VVtBIeY!U>vc3Gp6#iF}s0 zD1(2F)qP|yKaKZZ!tR$&h;=)7Y-!kt1lF9RxFU?uJv66(7RhooV%2+agfuJtGJ=^Yn|X!)>5tvL0e?Tw3Za z-1{)sk9HudT!4+EfPW9zGbuNfduN{hqIWWm^NtJq$U-W2GhY-kuoZY)GZrQDgdiOXw$KA@1h(0VOWaJ^Cwbe{F zAz&#kUpf%)WGH!y!t|l;r53Fd*2|uv_bA=HmJsu_p=JTYMnbc3NZac?}aZGjzw zPm)g&+saM?>5CPh0c!vp&8SSLvy>h(Pw=$0ot9WPjsh62!IjfqC3+grya{9R!KKz!I8~@2iQ^DD zPlf@d2th4UgOV_FXjv$c1y+J<(Y9V*#&rob3V1tOVX&fk5~jm)FT?k%B5|2GGA7Zp zh&8a$&=26N+@J<~$@Vb5DK)`!5NZng*z{|_Y@yXhz!^_Mz`ihZ-6E2MWuXwxazycF z{R%c~5kaw~-HF}u?&LbQw`1*2?N0Y5R7#~j?EuvwWq?|Lj%~bj?i2Rz%JkSa1!OM&c%z~r{uYt%s zv@M54hLTx02&xS@3E9SA2HpiZ42Cu+DxDjIw0Y1}Y4%LID3pm*NEh zIryV2By$p|LBY*9w3lXvP(xYQoSzi9@urQQvz+NZnbw;r{Y$(te%FYu7<{x~^Azy$ zCSDs5)ewA)Lyy}D?GfmaH1RgTyrs!!dAw(ee4W$$o zQO;}&gyR<($+BAC=Iv_iKZ1@9|IwRHl;cA#MN~2s%$x$|WaZjNzz~Y~LFLeh4(+i& zBC@>$^`unnaCR%u3XK- z5oNl^j>d;_ie}`-D^!pRV+cqKn2PddejsTgCujhMZLRCyV5iblRe~rJY}B{?K>t49 zP-L@%2}~jWAaXBVzPO>k%8u+2d%uc%U*IMo_Od3*_CqF028d+No^lot^OCt)hL1?j zf;e*!iiaQ?J(vXh?KApDZzBIIn|qh7~{DoL=BAJ=^?TL zqBgbW_AehCRriq0^X=*CJoqK$sVNPhuVi_u@FEqu$3aZ8h3QyvveaLaZC}Oht`R zz>n|~$!32k85?QBSOM@}o;MhCw+)PsBV!l7om8hZf^JU@jz&~OJ|mzKL-+xO+n<%? z>o0;D44wXFk`uBech~#DJo>le>jpnl|5?WefB*VNMC$3^ zLSFd&n{L&`w7)3n$B@=9F<-y*u^YB`BenFtwE^B08!>oMyRXF_JI{}4&!Zu*x4 z1N}w46ehdE$Nz$fm`&@<5&!Fdi3A6pHt~h7a&sCf!YR=ImdWoUsm|0dfP2D>!jJ;P zMypdSZt{4v105I~_0r|YWjXydR+DH5Q^X8L#Pc|}jrQGkb-u*aJcUNAyoo|&BqIHO zu2^NkW54I79dKf5mX70mCs&-p*}NHtQHb;4!c)*N5Aex5-MjyfdtuB+Lm5a#ozoY5 z1_@Y+rD@NznIb&V>7_nvT?d*sESHwsW_r`&0nTOy7G2lmaFqp)hJ{zZ^4Bb!T80Mj zDnhvo+XL%`9@5Kv7W#xRI39a*`(SYh1w%kpFV!s3*F*NIvS3!Z`fp%T5{je^aSsy` zB@*soPiS|^Gf}0+!bMMFD>NL9E{8o;Y3_o%!G!w3Fd}Cw@CJsY;Cu$6Zow}Qbvs0$ zw}IE(5w|e60K5jbNA~ys8-Dg@;-vpyc&BicG0sOe#cejrEk=`$fZb7?*TBvSsd^Kr~9)B zGhn!v;D-3rm6)!fID)sr9SHjnE*we#0WPtzFn9y7z?urV2AX@EfsB9zX^k#v0V**e5Jb&R@pwT$o?G5JO{1PEL&+ zj77Zy`@-*j5ornWBJ@i!z!&$-87Nqh0A9s2X6vu29PC7S*o6w>&qq&{)HJ7i7&1%C z5yED%A8-(rfxlC6*sP22UW^Gng!PHuF}!E4xexZL8GPUo!;^vpug-SAmMj*-CE-n= zm0_^@@tMQK>R^6TL(G<9un?OJBAnJ*JFskw)_iyHD{k0W4kd2!9rO4-B1R5|OHRL~md6KFZhtdd>>V<{Wq{)PECOQU5I_f1AnA zF?oZ@H6|}J(M$vf1p5Omj72wgk_5VfSkrK$}7x0%;Z%juP`~!gx!ou7e))l zG??{>Y+npGdlY9nL6nL4DU*mli0W3BppC~)91!yqT5&d_>?x^9o4fEmS)M;ZhlXopCiK7I9)zp# zLXn@6h*&9@7w{a_dpLOo6t!H+(Hz2#y7Qx4a9FhBEE{)~{Zxhjja?79c^(6z(uUuY zDbyI9!ZbP=xbV_03TxZIWXe=%O|`s3I- zUeMXmtMqIS4(6^y9bZ}pvG1%bfh`^j8e5(A(ThGpgN}XRgO9%NJ&!yhYkg7-(FkmY zF;{*l2)lyEsSVHYFYrJQw@pmD*iASPfFB70dufmzfu*P&{s@okrj?G0y|gPn?+Mp| zj~(ZbVZ`>1%VBG81HV~B0&JoA)^Vu_rL(cMvz5F?9#JU6(mU`G$x-l&ogLVyCDN|# zr3Cu`Fwg;&|{|S;4$@b6Eu?Rz&Bljyu^#5d; z?;(kt4y=kedyZdGGQZi>YOH8(4gA;%EFHW`@hXE*Cd{gqJ$oOk8*^w)Oo<=}gs=t@ zZ|**I=IoQK%>!A1{`)K|`&)1!A!Iade%Xk2PM(a#`FBGQg!0jZdARPk;VNhuCQKrh z-*6B{5M^O4sfVF9=cI~fwzfMulz`pXnu-{EzkNYZu-aiJvrHD59AWY(lhaIom^a8)Q=Xw;7guho 10 else 1 + if opt.evolve or opt.noplots: + self.bbox_interval = opt.bbox_interval = opt.epochs + 1 # disable bbox_interval + train_from_artifact = self.train_artifact_path is not None and self.val_artifact_path is not None + # Update the the data_dict to point to local artifacts dir + if train_from_artifact: + self.data_dict = data_dict + + def download_dataset_artifact(self, path, alias): + """ + download the model checkpoint artifact if the path starts with WANDB_ARTIFACT_PREFIX + + arguments: + path -- path of the dataset to be used for training + alias (str)-- alias of the artifact to be download/used for training + + returns: + (str, wandb.Artifact) -- path of the downladed dataset and it's corresponding artifact object if dataset + is found otherwise returns (None, None) + """ + if isinstance(path, str) and path.startswith(WANDB_ARTIFACT_PREFIX): + artifact_path = Path(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias) + dataset_artifact = wandb.use_artifact(artifact_path.as_posix().replace("\\", "/")) + assert dataset_artifact is not None, "'Error: W&B dataset artifact doesn\'t exist'" + datadir = dataset_artifact.download() + return datadir, dataset_artifact + return None, None + + def download_model_artifact(self, opt): + """ + download the model checkpoint artifact if the resume path starts with WANDB_ARTIFACT_PREFIX + + arguments: + opt (namespace) -- Commandline arguments for this run + """ + if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): + model_artifact = wandb.use_artifact(remove_prefix(opt.resume, WANDB_ARTIFACT_PREFIX) + ":latest") + assert model_artifact is not None, 'Error: W&B model artifact doesn\'t exist' + modeldir = model_artifact.download() + # epochs_trained = model_artifact.metadata.get('epochs_trained') + total_epochs = model_artifact.metadata.get('total_epochs') + is_finished = total_epochs is None + assert not is_finished, 'training is finished, can only resume incomplete runs.' + return modeldir, model_artifact + return None, None + + def log_model(self, path, opt, epoch, fitness_score, best_model=False): + """ + Log the model checkpoint as W&B artifact + + arguments: + path (Path) -- Path of directory containing the checkpoints + opt (namespace) -- Command line arguments for this run + epoch (int) -- Current epoch number + fitness_score (float) -- fitness score for current epoch + best_model (boolean) -- Boolean representing if the current checkpoint is the best yet. + """ + model_artifact = wandb.Artifact('run_' + wandb.run.id + '_model', + type='model', + metadata={ + 'original_url': str(path), + 'epochs_trained': epoch + 1, + 'save period': opt.save_period, + 'project': opt.project, + 'total_epochs': opt.epochs, + 'fitness_score': fitness_score}) + model_artifact.add_file(str(path / 'last.pt'), name='last.pt') + wandb.log_artifact(model_artifact, + aliases=['latest', 'last', 'epoch ' + str(self.current_epoch), 'best' if best_model else '']) + LOGGER.info(f"Saving model artifact on epoch {epoch + 1}") + + def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False): + """ + Log the dataset as W&B artifact and return the new data file with W&B links + + arguments: + data_file (str) -- the .yaml file with information about the dataset like - path, classes etc. + single_class (boolean) -- train multi-class data as single-class + project (str) -- project name. Used to construct the artifact path + overwrite_config (boolean) -- overwrites the data.yaml file if set to true otherwise creates a new + file with _wandb postfix. Eg -> data_wandb.yaml + + returns: + the new .yaml file with artifact links. it can be used to start training directly from artifacts + """ + upload_dataset = self.wandb_run.config.upload_dataset + log_val_only = isinstance(upload_dataset, str) and upload_dataset == 'val' + self.data_dict = check_dataset(data_file) # parse and check + data = dict(self.data_dict) + nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names']) + names = {k: v for k, v in enumerate(names)} # to index dictionary + + # log train set + if not log_val_only: + self.train_artifact = self.create_dataset_table(LoadImagesAndLabels(data['train'], rect=True, batch_size=1), + names, + name='train') if data.get('train') else None + if data.get('train'): + data['train'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'train') + + self.val_artifact = self.create_dataset_table( + LoadImagesAndLabels(data['val'], rect=True, batch_size=1), names, name='val') if data.get('val') else None + if data.get('val'): + data['val'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'val') + + path = Path(data_file) + # create a _wandb.yaml file with artifacts links if both train and test set are logged + if not log_val_only: + path = (path.stem if overwrite_config else path.stem + '_wandb') + '.yaml' # updated data.yaml path + path = ROOT / 'data' / path + data.pop('download', None) + data.pop('path', None) + with open(path, 'w') as f: + yaml.safe_dump(data, f) + LOGGER.info(f"Created dataset config file {path}") + + if self.job_type == 'Training': # builds correct artifact pipeline graph + if not log_val_only: + self.wandb_run.log_artifact( + self.train_artifact) # calling use_artifact downloads the dataset. NOT NEEDED! + self.wandb_run.use_artifact(self.val_artifact) + self.val_artifact.wait() + self.val_table = self.val_artifact.get('val') + self.map_val_table_path() + else: + self.wandb_run.log_artifact(self.train_artifact) + self.wandb_run.log_artifact(self.val_artifact) + return path + + def map_val_table_path(self): + """ + Map the validation dataset Table like name of file -> it's id in the W&B Table. + Useful for - referencing artifacts for evaluation. + """ + self.val_table_path_map = {} + LOGGER.info("Mapping dataset") + for i, data in enumerate(tqdm(self.val_table.data)): + self.val_table_path_map[data[3]] = data[0] + + def create_dataset_table(self, dataset: LoadImagesAndLabels, class_to_id: Dict[int, str], name: str = 'dataset'): + """ + Create and return W&B artifact containing W&B Table of the dataset. + + arguments: + dataset -- instance of LoadImagesAndLabels class used to iterate over the data to build Table + class_to_id -- hash map that maps class ids to labels + name -- name of the artifact + + returns: + dataset artifact to be logged or used + """ + # TODO: Explore multiprocessing to slpit this loop parallely| This is essential for speeding up the the logging + artifact = wandb.Artifact(name=name, type="dataset") + img_files = tqdm([dataset.path]) if isinstance(dataset.path, str) and Path(dataset.path).is_dir() else None + img_files = tqdm(dataset.im_files) if not img_files else img_files + for img_file in img_files: + if Path(img_file).is_dir(): + artifact.add_dir(img_file, name='data/images') + labels_path = 'labels'.join(dataset.path.rsplit('images', 1)) + artifact.add_dir(labels_path, name='data/labels') + else: + artifact.add_file(img_file, name='data/images/' + Path(img_file).name) + label_file = Path(img2label_paths([img_file])[0]) + artifact.add_file(str(label_file), name='data/labels/' + + label_file.name) if label_file.exists() else None + table = wandb.Table(columns=["id", "train_image", "Classes", "name"]) + class_set = wandb.Classes([{'id': id, 'name': name} for id, name in class_to_id.items()]) + for si, (img, labels, paths, shapes) in enumerate(tqdm(dataset)): + box_data, img_classes = [], {} + for cls, *xywh in labels[:, 1:].tolist(): + cls = int(cls) + box_data.append({ + "position": { + "middle": [xywh[0], xywh[1]], + "width": xywh[2], + "height": xywh[3]}, + "class_id": cls, + "box_caption": "%s" % (class_to_id[cls])}) + img_classes[cls] = class_to_id[cls] + boxes = {"ground_truth": {"box_data": box_data, "class_labels": class_to_id}} # inference-space + table.add_data(si, wandb.Image(paths, classes=class_set, boxes=boxes), list(img_classes.values()), + Path(paths).name) + artifact.add(table, name) + return artifact + + def log_training_progress(self, predn, path, names): + """ + Build evaluation Table. Uses reference from validation dataset table. + + arguments: + predn (list): list of predictions in the native space in the format - [xmin, ymin, xmax, ymax, confidence, class] + path (str): local path of the current evaluation image + names (dict(int, str)): hash map that maps class ids to labels + """ + class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()]) + box_data = [] + avg_conf_per_class = [0] * len(self.data_dict['names']) + pred_class_count = {} + for *xyxy, conf, cls in predn.tolist(): + if conf >= 0.25: + cls = int(cls) + box_data.append({ + "position": { + "minX": xyxy[0], + "minY": xyxy[1], + "maxX": xyxy[2], + "maxY": xyxy[3]}, + "class_id": cls, + "box_caption": f"{names[cls]} {conf:.3f}", + "scores": { + "class_score": conf}, + "domain": "pixel"}) + avg_conf_per_class[cls] += conf + + if cls in pred_class_count: + pred_class_count[cls] += 1 + else: + pred_class_count[cls] = 1 + + for pred_class in pred_class_count.keys(): + avg_conf_per_class[pred_class] = avg_conf_per_class[pred_class] / pred_class_count[pred_class] + + boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space + id = self.val_table_path_map[Path(path).name] + self.result_table.add_data(self.current_epoch, id, self.val_table.data[id][1], + wandb.Image(self.val_table.data[id][1], boxes=boxes, classes=class_set), + *avg_conf_per_class) + + def val_one_image(self, pred, predn, path, names, im): + """ + Log validation data for one image. updates the result Table if validation dataset is uploaded and log bbox media panel + + arguments: + pred (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class] + predn (list): list of predictions in the native space - [xmin, ymin, xmax, ymax, confidence, class] + path (str): local path of the current evaluation image + """ + if self.val_table and self.result_table: # Log Table if Val dataset is uploaded as artifact + self.log_training_progress(predn, path, names) + + if len(self.bbox_media_panel_images) < self.max_imgs_to_log and self.current_epoch > 0: + if self.current_epoch % self.bbox_interval == 0: + box_data = [{ + "position": { + "minX": xyxy[0], + "minY": xyxy[1], + "maxX": xyxy[2], + "maxY": xyxy[3]}, + "class_id": int(cls), + "box_caption": f"{names[int(cls)]} {conf:.3f}", + "scores": { + "class_score": conf}, + "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] + boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space + self.bbox_media_panel_images.append(wandb.Image(im, boxes=boxes, caption=path.name)) + + def log(self, log_dict): + """ + save the metrics to the logging dictionary + + arguments: + log_dict (Dict) -- metrics/media to be logged in current step + """ + if self.wandb_run: + for key, value in log_dict.items(): + self.log_dict[key] = value + + def end_epoch(self, best_result=False): + """ + commit the log_dict, model artifacts and Tables to W&B and flush the log_dict. + + arguments: + best_result (boolean): Boolean representing if the result of this evaluation is best or not + """ + if self.wandb_run: + with all_logging_disabled(): + if self.bbox_media_panel_images: + self.log_dict["BoundingBoxDebugger"] = self.bbox_media_panel_images + try: + wandb.log(self.log_dict) + except BaseException as e: + LOGGER.info( + f"An error occurred in wandb logger. The training will proceed without interruption. More info\n{e}" + ) + self.wandb_run.finish() + self.wandb_run = None + + self.log_dict = {} + self.bbox_media_panel_images = [] + if self.result_artifact: + self.result_artifact.add(self.result_table, 'result') + wandb.log_artifact(self.result_artifact, + aliases=[ + 'latest', 'last', 'epoch ' + str(self.current_epoch), + ('best' if best_result else '')]) + + wandb.log({"evaluation": self.result_table}) + columns = ["epoch", "id", "ground truth", "prediction"] + columns.extend(self.data_dict['names']) + self.result_table = wandb.Table(columns) + self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") + + def finish_run(self): + """ + Log metrics if any and finish the current W&B run + """ + if self.wandb_run: + if self.log_dict: + with all_logging_disabled(): + wandb.log(self.log_dict) + wandb.run.finish() + + +@contextmanager +def all_logging_disabled(highest_level=logging.CRITICAL): + """ source - https://gist.github.com/simon-weber/7853144 + A context manager that will prevent any logging messages triggered during the body from being processed. + :param highest_level: the maximum logging level in use. + This would only need to be changed if a custom level greater than CRITICAL is defined. + """ + previous_level = logging.root.manager.disable + logging.disable(highest_level) + try: + yield + finally: + logging.disable(previous_level) diff --git a/src/目标识别代码/utils/loss.py b/src/目标识别代码/utils/loss.py new file mode 100644 index 00000000..9b9c3d9f --- /dev/null +++ b/src/目标识别代码/utils/loss.py @@ -0,0 +1,234 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Loss functions +""" + +import torch +import torch.nn as nn + +from utils.metrics import bbox_iou +from utils.torch_utils import de_parallel + + +def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441 + # return positive, negative label smoothing BCE targets + return 1.0 - 0.5 * eps, 0.5 * eps + + +class BCEBlurWithLogitsLoss(nn.Module): + # BCEwithLogitLoss() with reduced missing label effects. + def __init__(self, alpha=0.05): + super().__init__() + self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss() + self.alpha = alpha + + def forward(self, pred, true): + loss = self.loss_fcn(pred, true) + pred = torch.sigmoid(pred) # prob from logits + dx = pred - true # reduce only missing label effects + # dx = (pred - true).abs() # reduce missing label and false label effects + alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4)) + loss *= alpha_factor + return loss.mean() + + +class FocalLoss(nn.Module): + # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) + def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): + super().__init__() + self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() + self.gamma = gamma + self.alpha = alpha + self.reduction = loss_fcn.reduction + self.loss_fcn.reduction = 'none' # required to apply FL to each element + + def forward(self, pred, true): + loss = self.loss_fcn(pred, true) + # p_t = torch.exp(-loss) + # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability + + # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py + pred_prob = torch.sigmoid(pred) # prob from logits + p_t = true * pred_prob + (1 - true) * (1 - pred_prob) + alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha) + modulating_factor = (1.0 - p_t) ** self.gamma + loss *= alpha_factor * modulating_factor + + if self.reduction == 'mean': + return loss.mean() + elif self.reduction == 'sum': + return loss.sum() + else: # 'none' + return loss + + +class QFocalLoss(nn.Module): + # Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) + def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): + super().__init__() + self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() + self.gamma = gamma + self.alpha = alpha + self.reduction = loss_fcn.reduction + self.loss_fcn.reduction = 'none' # required to apply FL to each element + + def forward(self, pred, true): + loss = self.loss_fcn(pred, true) + + pred_prob = torch.sigmoid(pred) # prob from logits + alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha) + modulating_factor = torch.abs(true - pred_prob) ** self.gamma + loss *= alpha_factor * modulating_factor + + if self.reduction == 'mean': + return loss.mean() + elif self.reduction == 'sum': + return loss.sum() + else: # 'none' + return loss + + +class ComputeLoss: + sort_obj_iou = False + + # Compute losses + def __init__(self, model, autobalance=False): + device = next(model.parameters()).device # get model device + h = model.hyp # hyperparameters + + # Define criteria + BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) + BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device)) + + # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 + self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets + + # Focal loss + g = h['fl_gamma'] # focal loss gamma + if g > 0: + BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) + + m = de_parallel(model).model[-1] # Detect() module + self.balance = {3: [4.0, 1.0, 0.4]}.get(m.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7 + self.ssi = list(m.stride).index(16) if autobalance else 0 # stride 16 index + self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance + self.na = m.na # number of anchors + self.nc = m.nc # number of classes + self.nl = m.nl # number of layers + self.anchors = m.anchors + self.device = device + + def __call__(self, p, targets): # predictions, targets + lcls = torch.zeros(1, device=self.device) # class loss + lbox = torch.zeros(1, device=self.device) # box loss + lobj = torch.zeros(1, device=self.device) # object loss + tcls, tbox, indices, anchors = self.build_targets(p, targets) # targets + + # Losses + for i, pi in enumerate(p): # layer index, layer predictions + b, a, gj, gi = indices[i] # image, anchor, gridy, gridx + tobj = torch.zeros(pi.shape[:4], dtype=pi.dtype, device=self.device) # target obj + + n = b.shape[0] # number of targets + if n: + # pxy, pwh, _, pcls = pi[b, a, gj, gi].tensor_split((2, 4, 5), dim=1) # faster, requires torch 1.8.0 + pxy, pwh, _, pcls = pi[b, a, gj, gi].split((2, 2, 1, self.nc), 1) # target-subset of predictions + + # Regression + pxy = pxy.sigmoid() * 2 - 0.5 + pwh = (pwh.sigmoid() * 2) ** 2 * anchors[i] + pbox = torch.cat((pxy, pwh), 1) # predicted box + iou = bbox_iou(pbox, tbox[i], CIoU=True).squeeze() # iou(prediction, target) + lbox += (1.0 - iou).mean() # iou loss + + # Objectness + iou = iou.detach().clamp(0).type(tobj.dtype) + if self.sort_obj_iou: + j = iou.argsort() + b, a, gj, gi, iou = b[j], a[j], gj[j], gi[j], iou[j] + if self.gr < 1: + iou = (1.0 - self.gr) + self.gr * iou + tobj[b, a, gj, gi] = iou # iou ratio + + # Classification + if self.nc > 1: # cls loss (only if multiple classes) + t = torch.full_like(pcls, self.cn, device=self.device) # targets + t[range(n), tcls[i]] = self.cp + lcls += self.BCEcls(pcls, t) # BCE + + # Append targets to text file + # with open('targets.txt', 'a') as file: + # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)] + + obji = self.BCEobj(pi[..., 4], tobj) + lobj += obji * self.balance[i] # obj loss + if self.autobalance: + self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item() + + if self.autobalance: + self.balance = [x / self.balance[self.ssi] for x in self.balance] + lbox *= self.hyp['box'] + lobj *= self.hyp['obj'] + lcls *= self.hyp['cls'] + bs = tobj.shape[0] # batch size + + return (lbox + lobj + lcls) * bs, torch.cat((lbox, lobj, lcls)).detach() + + def build_targets(self, p, targets): + # Build targets for compute_loss(), input targets(image,class,x,y,w,h) + na, nt = self.na, targets.shape[0] # number of anchors, targets + tcls, tbox, indices, anch = [], [], [], [] + gain = torch.ones(7, device=self.device) # normalized to gridspace gain + ai = torch.arange(na, device=self.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt) + targets = torch.cat((targets.repeat(na, 1, 1), ai[..., None]), 2) # append anchor indices + + g = 0.5 # bias + off = torch.tensor( + [ + [0, 0], + [1, 0], + [0, 1], + [-1, 0], + [0, -1], # j,k,l,m + # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm + ], + device=self.device).float() * g # offsets + + for i in range(self.nl): + anchors, shape = self.anchors[i], p[i].shape + gain[2:6] = torch.tensor(shape)[[3, 2, 3, 2]] # xyxy gain + + # Match targets to anchors + t = targets * gain # shape(3,n,7) + if nt: + # Matches + r = t[..., 4:6] / anchors[:, None] # wh ratio + j = torch.max(r, 1 / r).max(2)[0] < self.hyp['anchor_t'] # compare + # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2)) + t = t[j] # filter + + # Offsets + gxy = t[:, 2:4] # grid xy + gxi = gain[[2, 3]] - gxy # inverse + j, k = ((gxy % 1 < g) & (gxy > 1)).T + l, m = ((gxi % 1 < g) & (gxi > 1)).T + j = torch.stack((torch.ones_like(j), j, k, l, m)) + t = t.repeat((5, 1, 1))[j] + offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j] + else: + t = targets[0] + offsets = 0 + + # Define + bc, gxy, gwh, a = t.chunk(4, 1) # (image, class), grid xy, grid wh, anchors + a, (b, c) = a.long().view(-1), bc.long().T # anchors, image, class + gij = (gxy - offsets).long() + gi, gj = gij.T # grid indices + + # Append + indices.append((b, a, gj.clamp_(0, shape[2] - 1), gi.clamp_(0, shape[3] - 1))) # image, anchor, grid + tbox.append(torch.cat((gxy - gij, gwh), 1)) # box + anch.append(anchors[a]) # anchors + tcls.append(c) # class + + return tcls, tbox, indices, anch diff --git a/src/目标识别代码/utils/metrics.py b/src/目标识别代码/utils/metrics.py new file mode 100644 index 00000000..08880cd3 --- /dev/null +++ b/src/目标识别代码/utils/metrics.py @@ -0,0 +1,364 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Model validation metrics +""" + +import math +import warnings +from pathlib import Path + +import matplotlib.pyplot as plt +import numpy as np +import torch + + +def fitness(x): + # Model fitness as a weighted combination of metrics + w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95] + return (x[:, :4] * w).sum(1) + + +def smooth(y, f=0.05): + # Box filter of fraction f + nf = round(len(y) * f * 2) // 2 + 1 # number of filter elements (must be odd) + p = np.ones(nf // 2) # ones padding + yp = np.concatenate((p * y[0], y, p * y[-1]), 0) # y padded + return np.convolve(yp, np.ones(nf) / nf, mode='valid') # y-smoothed + + +def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=(), eps=1e-16): + """ Compute the average precision, given the recall and precision curves. + Source: https://github.com/rafaelpadilla/Object-Detection-Metrics. + # Arguments + tp: True positives (nparray, nx1 or nx10). + conf: Objectness value from 0-1 (nparray). + pred_cls: Predicted object classes (nparray). + target_cls: True object classes (nparray). + plot: Plot precision-recall curve at mAP@0.5 + save_dir: Plot save directory + # Returns + The average precision as computed in py-faster-rcnn. + """ + + # Sort by objectness + i = np.argsort(-conf) + tp, conf, pred_cls = tp[i], conf[i], pred_cls[i] + + # Find unique classes + unique_classes, nt = np.unique(target_cls, return_counts=True) + nc = unique_classes.shape[0] # number of classes, number of detections + + # Create Precision-Recall curve and compute AP for each class + px, py = np.linspace(0, 1, 1000), [] # for plotting + ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000)) + for ci, c in enumerate(unique_classes): + i = pred_cls == c + n_l = nt[ci] # number of labels + n_p = i.sum() # number of predictions + if n_p == 0 or n_l == 0: + continue + + # Accumulate FPs and TPs + fpc = (1 - tp[i]).cumsum(0) + tpc = tp[i].cumsum(0) + + # Recall + recall = tpc / (n_l + eps) # recall curve + r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases + + # Precision + precision = tpc / (tpc + fpc) # precision curve + p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score + + # AP from recall-precision curve + for j in range(tp.shape[1]): + ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j]) + if plot and j == 0: + py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5 + + # Compute F1 (harmonic mean of precision and recall) + f1 = 2 * p * r / (p + r + eps) + names = [v for k, v in names.items() if k in unique_classes] # list: only classes that have data + names = dict(enumerate(names)) # to dict + if plot: + plot_pr_curve(px, py, ap, Path(save_dir) / 'PR_curve.png', names) + plot_mc_curve(px, f1, Path(save_dir) / 'F1_curve.png', names, ylabel='F1') + plot_mc_curve(px, p, Path(save_dir) / 'P_curve.png', names, ylabel='Precision') + plot_mc_curve(px, r, Path(save_dir) / 'R_curve.png', names, ylabel='Recall') + + i = smooth(f1.mean(0), 0.1).argmax() # max F1 index + p, r, f1 = p[:, i], r[:, i], f1[:, i] + tp = (r * nt).round() # true positives + fp = (tp / (p + eps) - tp).round() # false positives + return tp, fp, p, r, f1, ap, unique_classes.astype(int) + + +def compute_ap(recall, precision): + """ Compute the average precision, given the recall and precision curves + # Arguments + recall: The recall curve (list) + precision: The precision curve (list) + # Returns + Average precision, precision curve, recall curve + """ + + # Append sentinel values to beginning and end + mrec = np.concatenate(([0.0], recall, [1.0])) + mpre = np.concatenate(([1.0], precision, [0.0])) + + # Compute the precision envelope + mpre = np.flip(np.maximum.accumulate(np.flip(mpre))) + + # Integrate area under curve + method = 'interp' # methods: 'continuous', 'interp' + if method == 'interp': + x = np.linspace(0, 1, 101) # 101-point interp (COCO) + ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate + else: # 'continuous' + i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes + ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve + + return ap, mpre, mrec + + +class ConfusionMatrix: + # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix + def __init__(self, nc, conf=0.25, iou_thres=0.45): + self.matrix = np.zeros((nc + 1, nc + 1)) + self.nc = nc # number of classes + self.conf = conf + self.iou_thres = iou_thres + + def process_batch(self, detections, labels): + """ + Return intersection-over-union (Jaccard index) of boxes. + Both sets of boxes are expected to be in (x1, y1, x2, y2) format. + Arguments: + detections (Array[N, 6]), x1, y1, x2, y2, conf, class + labels (Array[M, 5]), class, x1, y1, x2, y2 + Returns: + None, updates confusion matrix accordingly + """ + if detections is None: + gt_classes = labels.int() + for i, gc in enumerate(gt_classes): + self.matrix[self.nc, gc] += 1 # background FN + return + + detections = detections[detections[:, 4] > self.conf] + gt_classes = labels[:, 0].int() + detection_classes = detections[:, 5].int() + iou = box_iou(labels[:, 1:], detections[:, :4]) + + x = torch.where(iou > self.iou_thres) + if x[0].shape[0]: + matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() + if x[0].shape[0] > 1: + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 1], return_index=True)[1]] + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 0], return_index=True)[1]] + else: + matches = np.zeros((0, 3)) + + n = matches.shape[0] > 0 + m0, m1, _ = matches.transpose().astype(int) + for i, gc in enumerate(gt_classes): + j = m0 == i + if n and sum(j) == 1: + self.matrix[detection_classes[m1[j]], gc] += 1 # correct + else: + self.matrix[self.nc, gc] += 1 # background FP + + if n: + for i, dc in enumerate(detection_classes): + if not any(m1 == i): + self.matrix[dc, self.nc] += 1 # background FN + + def matrix(self): + return self.matrix + + def tp_fp(self): + tp = self.matrix.diagonal() # true positives + fp = self.matrix.sum(1) - tp # false positives + # fn = self.matrix.sum(0) - tp # false negatives (missed detections) + return tp[:-1], fp[:-1] # remove background class + + def plot(self, normalize=True, save_dir='', names=()): + try: + import seaborn as sn + + array = self.matrix / ((self.matrix.sum(0).reshape(1, -1) + 1E-9) if normalize else 1) # normalize columns + array[array < 0.005] = np.nan # don't annotate (would appear as 0.00) + + fig = plt.figure(figsize=(12, 9), tight_layout=True) + nc, nn = self.nc, len(names) # number of classes, names + sn.set(font_scale=1.0 if nc < 50 else 0.8) # for label size + labels = (0 < nn < 99) and (nn == nc) # apply names to ticklabels + with warnings.catch_warnings(): + warnings.simplefilter('ignore') # suppress empty matrix RuntimeWarning: All-NaN slice encountered + sn.heatmap(array, + annot=nc < 30, + annot_kws={ + "size": 8}, + cmap='Blues', + fmt='.2f', + square=True, + vmin=0.0, + xticklabels=names + ['background FP'] if labels else "auto", + yticklabels=names + ['background FN'] if labels else "auto").set_facecolor((1, 1, 1)) + fig.axes[0].set_xlabel('True') + fig.axes[0].set_ylabel('Predicted') + plt.title('Confusion Matrix') + fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250) + plt.close() + except Exception as e: + print(f'WARNING: ConfusionMatrix plot failure: {e}') + + def print(self): + for i in range(self.nc + 1): + print(' '.join(map(str, self.matrix[i]))) + + +def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7): + # Returns Intersection over Union (IoU) of box1(1,4) to box2(n,4) + + # Get the coordinates of bounding boxes + if xywh: # transform from xywh to xyxy + (x1, y1, w1, h1), (x2, y2, w2, h2) = box1.chunk(4, 1), box2.chunk(4, 1) + w1_, h1_, w2_, h2_ = w1 / 2, h1 / 2, w2 / 2, h2 / 2 + b1_x1, b1_x2, b1_y1, b1_y2 = x1 - w1_, x1 + w1_, y1 - h1_, y1 + h1_ + b2_x1, b2_x2, b2_y1, b2_y2 = x2 - w2_, x2 + w2_, y2 - h2_, y2 + h2_ + else: # x1, y1, x2, y2 = box1 + b1_x1, b1_y1, b1_x2, b1_y2 = box1.chunk(4, 1) + b2_x1, b2_y1, b2_x2, b2_y2 = box2.chunk(4, 1) + w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + + # Intersection area + inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \ + (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0) + + # Union Area + union = w1 * h1 + w2 * h2 - inter + eps + + # IoU + iou = inter / union + if CIoU or DIoU or GIoU: + cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width + ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height + if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1 + c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared + rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center dist ** 2 + if CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 + v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / (h2 + eps)) - torch.atan(w1 / (h1 + eps)), 2) + with torch.no_grad(): + alpha = v / (v - iou + (1 + eps)) + return iou - (rho2 / c2 + v * alpha) # CIoU + return iou - rho2 / c2 # DIoU + c_area = cw * ch + eps # convex area + return iou - (c_area - union) / c_area # GIoU https://arxiv.org/pdf/1902.09630.pdf + return iou # IoU + + +def box_area(box): + # box = xyxy(4,n) + return (box[2] - box[0]) * (box[3] - box[1]) + + +def box_iou(box1, box2, eps=1e-7): + # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py + """ + Return intersection-over-union (Jaccard index) of boxes. + Both sets of boxes are expected to be in (x1, y1, x2, y2) format. + Arguments: + box1 (Tensor[N, 4]) + box2 (Tensor[M, 4]) + Returns: + iou (Tensor[N, M]): the NxM matrix containing the pairwise + IoU values for every element in boxes1 and boxes2 + """ + + # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2) + (a1, a2), (b1, b2) = box1[:, None].chunk(2, 2), box2.chunk(2, 1) + inter = (torch.min(a2, b2) - torch.max(a1, b1)).clamp(0).prod(2) + + # IoU = inter / (area1 + area2 - inter) + return inter / (box_area(box1.T)[:, None] + box_area(box2.T) - inter + eps) + + +def bbox_ioa(box1, box2, eps=1e-7): + """ Returns the intersection over box2 area given box1, box2. Boxes are x1y1x2y2 + box1: np.array of shape(4) + box2: np.array of shape(nx4) + returns: np.array of shape(n) + """ + + # Get the coordinates of bounding boxes + b1_x1, b1_y1, b1_x2, b1_y2 = box1 + b2_x1, b2_y1, b2_x2, b2_y2 = box2.T + + # Intersection area + inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \ + (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0) + + # box2 area + box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + eps + + # Intersection over box2 area + return inter_area / box2_area + + +def wh_iou(wh1, wh2, eps=1e-7): + # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2 + wh1 = wh1[:, None] # [N,1,2] + wh2 = wh2[None] # [1,M,2] + inter = torch.min(wh1, wh2).prod(2) # [N,M] + return inter / (wh1.prod(2) + wh2.prod(2) - inter + eps) # iou = inter / (area1 + area2 - inter) + + +# Plots ---------------------------------------------------------------------------------------------------------------- + + +def plot_pr_curve(px, py, ap, save_dir=Path('pr_curve.png'), names=()): + # Precision-recall curve + fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) + py = np.stack(py, axis=1) + + if 0 < len(names) < 21: # display per-class legend if < 21 classes + for i, y in enumerate(py.T): + ax.plot(px, y, linewidth=1, label=f'{names[i]} {ap[i, 0]:.3f}') # plot(recall, precision) + else: + ax.plot(px, py, linewidth=1, color='grey') # plot(recall, precision) + + ax.plot(px, py.mean(1), linewidth=3, color='blue', label='all classes %.3f mAP@0.5' % ap[:, 0].mean()) + ax.set_xlabel('Recall') + ax.set_ylabel('Precision') + ax.set_xlim(0, 1) + ax.set_ylim(0, 1) + plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") + plt.title('Precision-Recall Curve') + fig.savefig(save_dir, dpi=250) + plt.close() + + +def plot_mc_curve(px, py, save_dir=Path('mc_curve.png'), names=(), xlabel='Confidence', ylabel='Metric'): + # Metric-confidence curve + fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) + + if 0 < len(names) < 21: # display per-class legend if < 21 classes + for i, y in enumerate(py): + ax.plot(px, y, linewidth=1, label=f'{names[i]}') # plot(confidence, metric) + else: + ax.plot(px, py.T, linewidth=1, color='grey') # plot(confidence, metric) + + y = smooth(py.mean(0), 0.05) + ax.plot(px, y, linewidth=3, color='blue', label=f'all classes {y.max():.2f} at {px[y.argmax()]:.3f}') + ax.set_xlabel(xlabel) + ax.set_ylabel(ylabel) + ax.set_xlim(0, 1) + ax.set_ylim(0, 1) + plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") + plt.title(f'{ylabel}-Confidence Curve') + fig.savefig(save_dir, dpi=250) + plt.close() diff --git a/src/目标识别代码/utils/plots.py b/src/目标识别代码/utils/plots.py new file mode 100644 index 00000000..5df27a34 --- /dev/null +++ b/src/目标识别代码/utils/plots.py @@ -0,0 +1,522 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Plotting utils +""" + +import math +import os +from copy import copy +from pathlib import Path +from urllib.error import URLError + +import cv2 +import matplotlib +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +import seaborn as sn +import torch +from PIL import Image, ImageDraw, ImageFont + +from utils.general import (CONFIG_DIR, FONT, LOGGER, Timeout, check_font, check_requirements, clip_coords, + increment_path, is_ascii, threaded, try_except, xywh2xyxy, xyxy2xywh) +from utils.metrics import fitness + +# Settings +RANK = int(os.getenv('RANK', -1)) +matplotlib.rc('font', **{'size': 11}) +matplotlib.use('Agg') # for writing to files only + + +class Colors: + # Ultralytics color palette https://ultralytics.com/ + def __init__(self): + # hex = matplotlib.colors.TABLEAU_COLORS.values() + hexs = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB', + '2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7') + self.palette = [self.hex2rgb(f'#{c}') for c in hexs] + self.n = len(self.palette) + + def __call__(self, i, bgr=False): + c = self.palette[int(i) % self.n] + return (c[2], c[1], c[0]) if bgr else c + + @staticmethod + def hex2rgb(h): # rgb order (PIL) + return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4)) + + +colors = Colors() # create instance for 'from utils.plots import colors' + + +def check_pil_font(font=FONT, size=10): + # Return a PIL TrueType Font, downloading to CONFIG_DIR if necessary + font = Path(font) + font = font if font.exists() else (CONFIG_DIR / font.name) + try: + return ImageFont.truetype(str(font) if font.exists() else font.name, size) + except Exception: # download if missing + try: + check_font(font) + return ImageFont.truetype(str(font), size) + except TypeError: + check_requirements('Pillow>=8.4.0') # known issue https://github.com/ultralytics/yolov5/issues/5374 + except URLError: # not online + return ImageFont.load_default() + + +class Annotator: + # YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations + def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'): + assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.' + non_ascii = not is_ascii(example) # non-latin labels, i.e. asian, arabic, cyrillic + self.pil = pil or non_ascii + if self.pil: # use PIL + self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) + self.draw = ImageDraw.Draw(self.im) + self.font = check_pil_font(font='Arial.Unicode.ttf' if non_ascii else font, + size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12)) + else: # use cv2 + self.im = im + self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width + + def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)): + # Add one xyxy box to image with label + if self.pil or not is_ascii(label): + self.draw.rectangle(box, width=self.lw, outline=color) # box + if label: + w, h = self.font.getsize(label) # text width, height + outside = box[1] - h >= 0 # label fits outside box + self.draw.rectangle( + (box[0], box[1] - h if outside else box[1], box[0] + w + 1, + box[1] + 1 if outside else box[1] + h + 1), + fill=color, + ) + # self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') # for PIL>8.0 + self.draw.text((box[0], box[1] - h if outside else box[1]), label, fill=txt_color, font=self.font) + else: # cv2 + p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3])) + center = (int((box[0]+box[2])/2),int((box[1]+box[3])/2)) + cv2.rectangle(self.im, p1, p2, color, thickness=self.lw, lineType=cv2.LINE_AA) + if label: + tf = max(self.lw - 1, 1) # font thickness + w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[0] # text width, height + outside = p1[1] - h >= 3 + p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3 + cv2.rectangle(self.im, p1, p2, color, -1, cv2.LINE_AA) # filled + cv2.putText(self.im, + label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2), + 0, + self.lw / 3, + txt_color, + thickness=tf, + lineType=cv2.LINE_AA) + cv2.putText(self.im, "("+str(center[0])+","+str(center[1])+")", (p1[0]+5, p1[1] -50 if outside else p1[1] + h + 2), 2, self.lw / 3, txt_color, + thickness=tf, lineType=cv2.LINE_AA) + + def rectangle(self, xy, fill=None, outline=None, width=1): + # Add rectangle to image (PIL-only) + self.draw.rectangle(xy, fill, outline, width) + + def text(self, xy, text, txt_color=(255, 255, 255)): + # Add text to image (PIL-only) + w, h = self.font.getsize(text) # text width, height + self.draw.text((xy[0], xy[1] - h + 1), text, fill=txt_color, font=self.font) + + def result(self): + # Return annotated image as array + return np.asarray(self.im) + + +def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')): + """ + x: Features to be visualized + module_type: Module type + stage: Module stage within model + n: Maximum number of feature maps to plot + save_dir: Directory to save results + """ + if 'Detect' not in module_type: + batch, channels, height, width = x.shape # batch, channels, height, width + if height > 1 and width > 1: + f = save_dir / f"stage{stage}_{module_type.split('.')[-1]}_features.png" # filename + + blocks = torch.chunk(x[0].cpu(), channels, dim=0) # select batch index 0, block by channels + n = min(n, channels) # number of plots + fig, ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True) # 8 rows x n/8 cols + ax = ax.ravel() + plt.subplots_adjust(wspace=0.05, hspace=0.05) + for i in range(n): + ax[i].imshow(blocks[i].squeeze()) # cmap='gray' + ax[i].axis('off') + + LOGGER.info(f'Saving {f}... ({n}/{channels})') + plt.title('Features') + plt.savefig(f, dpi=300, bbox_inches='tight') + plt.close() + np.save(str(f.with_suffix('.npy')), x[0].cpu().numpy()) # npy save + + +def hist2d(x, y, n=100): + # 2d histogram used in labels.png and evolve.png + xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n) + hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges)) + xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1) + yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1) + return np.log(hist[xidx, yidx]) + + +def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5): + from scipy.signal import butter, filtfilt + + # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy + def butter_lowpass(cutoff, fs, order): + nyq = 0.5 * fs + normal_cutoff = cutoff / nyq + return butter(order, normal_cutoff, btype='low', analog=False) + + b, a = butter_lowpass(cutoff, fs, order=order) + return filtfilt(b, a, data) # forward-backward filter + + +def output_to_target(output): + # Convert model output to target format [batch_id, class_id, x, y, w, h, conf] + targets = [] + for i, o in enumerate(output): + for *box, conf, cls in o.cpu().numpy(): + targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf]) + return np.array(targets) + + +@threaded +def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=1920, max_subplots=16): + # Plot image grid with labels + if isinstance(images, torch.Tensor): + images = images.cpu().float().numpy() + if isinstance(targets, torch.Tensor): + targets = targets.cpu().numpy() + if np.max(images[0]) <= 1: + images *= 255 # de-normalise (optional) + bs, _, h, w = images.shape # batch size, _, height, width + bs = min(bs, max_subplots) # limit plot images + ns = np.ceil(bs ** 0.5) # number of subplots (square) + + # Build Image + mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init + for i, im in enumerate(images): + if i == max_subplots: # if last batch has fewer images than we expect + break + x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin + im = im.transpose(1, 2, 0) + mosaic[y:y + h, x:x + w, :] = im + + # Resize (optional) + scale = max_size / ns / max(h, w) + if scale < 1: + h = math.ceil(scale * h) + w = math.ceil(scale * w) + mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h))) + + # Annotate + fs = int((h + w) * ns * 0.01) # font size + annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names) + for i in range(i + 1): + x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin + annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders + if paths: + annotator.text((x + 5, y + 5 + h), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames + if len(targets) > 0: + ti = targets[targets[:, 0] == i] # image targets + boxes = xywh2xyxy(ti[:, 2:6]).T + classes = ti[:, 1].astype('int') + labels = ti.shape[1] == 6 # labels if no conf column + conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred) + + if boxes.shape[1]: + if boxes.max() <= 1.01: # if normalized with tolerance 0.01 + boxes[[0, 2]] *= w # scale to pixels + boxes[[1, 3]] *= h + elif scale < 1: # absolute coords need scale if image scales + boxes *= scale + boxes[[0, 2]] += x + boxes[[1, 3]] += y + for j, box in enumerate(boxes.T.tolist()): + cls = classes[j] + color = colors(cls) + cls = names[cls] if names else cls + if labels or conf[j] > 0.25: # 0.25 conf thresh + label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}' + annotator.box_label(box, label, color=color) + annotator.im.save(fname) # save + + +def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''): + # Plot LR simulating training for full epochs + optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals + y = [] + for _ in range(epochs): + scheduler.step() + y.append(optimizer.param_groups[0]['lr']) + plt.plot(y, '.-', label='LR') + plt.xlabel('epoch') + plt.ylabel('LR') + plt.grid() + plt.xlim(0, epochs) + plt.ylim(0) + plt.savefig(Path(save_dir) / 'LR.png', dpi=200) + plt.close() + + +def plot_val_txt(): # from utils.plots import *; plot_val() + # Plot val.txt histograms + x = np.loadtxt('val.txt', dtype=np.float32) + box = xyxy2xywh(x[:, :4]) + cx, cy = box[:, 0], box[:, 1] + + fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True) + ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0) + ax.set_aspect('equal') + plt.savefig('hist2d.png', dpi=300) + + fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True) + ax[0].hist(cx, bins=600) + ax[1].hist(cy, bins=600) + plt.savefig('hist1d.png', dpi=200) + + +def plot_targets_txt(): # from utils.plots import *; plot_targets_txt() + # Plot targets.txt histograms + x = np.loadtxt('targets.txt', dtype=np.float32).T + s = ['x targets', 'y targets', 'width targets', 'height targets'] + fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True) + ax = ax.ravel() + for i in range(4): + ax[i].hist(x[i], bins=100, label=f'{x[i].mean():.3g} +/- {x[i].std():.3g}') + ax[i].legend() + ax[i].set_title(s[i]) + plt.savefig('targets.jpg', dpi=200) + + +def plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_val_study() + # Plot file=study.txt generated by val.py (or plot all study*.txt in dir) + save_dir = Path(file).parent if file else Path(dir) + plot2 = False # plot additional results + if plot2: + ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)[1].ravel() + + fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True) + # for f in [save_dir / f'study_coco_{x}.txt' for x in ['yolov5n6', 'yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]: + for f in sorted(save_dir.glob('study*.txt')): + y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T + x = np.arange(y.shape[1]) if x is None else np.array(x) + if plot2: + s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_preprocess (ms/img)', 't_inference (ms/img)', 't_NMS (ms/img)'] + for i in range(7): + ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8) + ax[i].set_title(s[i]) + + j = y[3].argmax() + 1 + ax2.plot(y[5, 1:j], + y[3, 1:j] * 1E2, + '.-', + linewidth=2, + markersize=8, + label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO')) + + ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5], + 'k.-', + linewidth=2, + markersize=8, + alpha=.25, + label='EfficientDet') + + ax2.grid(alpha=0.2) + ax2.set_yticks(np.arange(20, 60, 5)) + ax2.set_xlim(0, 57) + ax2.set_ylim(25, 55) + ax2.set_xlabel('GPU Speed (ms/img)') + ax2.set_ylabel('COCO AP val') + ax2.legend(loc='lower right') + f = save_dir / 'study.png' + print(f'Saving {f}...') + plt.savefig(f, dpi=300) + + +@try_except # known issue https://github.com/ultralytics/yolov5/issues/5395 +@Timeout(30) # known issue https://github.com/ultralytics/yolov5/issues/5611 +def plot_labels(labels, names=(), save_dir=Path('')): + # plot dataset labels + LOGGER.info(f"Plotting labels to {save_dir / 'labels.jpg'}... ") + c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes + nc = int(c.max() + 1) # number of classes + x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height']) + + # seaborn correlogram + sn.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9)) + plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200) + plt.close() + + # matplotlib labels + matplotlib.use('svg') # faster + ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel() + y = ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8) + try: # color histogram bars by class + [y[2].patches[i].set_color([x / 255 for x in colors(i)]) for i in range(nc)] # known issue #3195 + except Exception: + pass + ax[0].set_ylabel('instances') + if 0 < len(names) < 30: + ax[0].set_xticks(range(len(names))) + ax[0].set_xticklabels(names, rotation=90, fontsize=10) + else: + ax[0].set_xlabel('classes') + sn.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9) + sn.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9) + + # rectangles + labels[:, 1:3] = 0.5 # center + labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000 + img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255) + for cls, *box in labels[:1000]: + ImageDraw.Draw(img).rectangle(box, width=1, outline=colors(cls)) # plot + ax[1].imshow(img) + ax[1].axis('off') + + for a in [0, 1, 2, 3]: + for s in ['top', 'right', 'left', 'bottom']: + ax[a].spines[s].set_visible(False) + + plt.savefig(save_dir / 'labels.jpg', dpi=200) + matplotlib.use('Agg') + plt.close() + + +def imshow_cls(im, labels=None, pred=None, names=None, nmax=25, verbose=False, f=Path('images.jpg')): + # Show classification image grid with labels (optional) and predictions (optional) + from utils.augmentations import denormalize + + names = names or [f'class{i}' for i in range(1000)] + blocks = torch.chunk(denormalize(im.clone()).cpu().float(), len(im), + dim=0) # select batch index 0, block by channels + n = min(len(blocks), nmax) # number of plots + m = min(8, round(n ** 0.5)) # 8 x 8 default + fig, ax = plt.subplots(math.ceil(n / m), m) # 8 rows x n/8 cols + ax = ax.ravel() if m > 1 else [ax] + # plt.subplots_adjust(wspace=0.05, hspace=0.05) + for i in range(n): + ax[i].imshow(blocks[i].squeeze().permute((1, 2, 0)).numpy().clip(0.0, 1.0)) + ax[i].axis('off') + if labels is not None: + s = names[labels[i]] + (f'—{names[pred[i]]}' if pred is not None else '') + ax[i].set_title(s, fontsize=8, verticalalignment='top') + plt.savefig(f, dpi=300, bbox_inches='tight') + plt.close() + if verbose: + LOGGER.info(f"Saving {f}") + if labels is not None: + LOGGER.info('True: ' + ' '.join(f'{names[i]:3s}' for i in labels[:nmax])) + if pred is not None: + LOGGER.info('Predicted:' + ' '.join(f'{names[i]:3s}' for i in pred[:nmax])) + return f + + +def plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *; plot_evolve() + # Plot evolve.csv hyp evolution results + evolve_csv = Path(evolve_csv) + data = pd.read_csv(evolve_csv) + keys = [x.strip() for x in data.columns] + x = data.values + f = fitness(x) + j = np.argmax(f) # max fitness index + plt.figure(figsize=(10, 12), tight_layout=True) + matplotlib.rc('font', **{'size': 8}) + print(f'Best results from row {j} of {evolve_csv}:') + for i, k in enumerate(keys[7:]): + v = x[:, 7 + i] + mu = v[j] # best single result + plt.subplot(6, 5, i + 1) + plt.scatter(v, f, c=hist2d(v, f, 20), cmap='viridis', alpha=.8, edgecolors='none') + plt.plot(mu, f.max(), 'k+', markersize=15) + plt.title(f'{k} = {mu:.3g}', fontdict={'size': 9}) # limit to 40 characters + if i % 5 != 0: + plt.yticks([]) + print(f'{k:>15}: {mu:.3g}') + f = evolve_csv.with_suffix('.png') # filename + plt.savefig(f, dpi=200) + plt.close() + print(f'Saved {f}') + + +def plot_results(file='path/to/results.csv', dir=''): + # Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv') + save_dir = Path(file).parent if file else Path(dir) + fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True) + ax = ax.ravel() + files = list(save_dir.glob('results*.csv')) + assert len(files), f'No results.csv files found in {save_dir.resolve()}, nothing to plot.' + for f in files: + try: + data = pd.read_csv(f) + s = [x.strip() for x in data.columns] + x = data.values[:, 0] + for i, j in enumerate([1, 2, 3, 4, 5, 8, 9, 10, 6, 7]): + y = data.values[:, j].astype('float') + # y[y == 0] = np.nan # don't show zero values + ax[i].plot(x, y, marker='.', label=f.stem, linewidth=2, markersize=8) + ax[i].set_title(s[j], fontsize=12) + # if j in [8, 9, 10]: # share train and val loss y axes + # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) + except Exception as e: + LOGGER.info(f'Warning: Plotting error for {f}: {e}') + ax[1].legend() + fig.savefig(save_dir / 'results.png', dpi=200) + plt.close() + + +def profile_idetection(start=0, stop=0, labels=(), save_dir=''): + # Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection() + ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel() + s = ['Images', 'Free Storage (GB)', 'RAM Usage (GB)', 'Battery', 'dt_raw (ms)', 'dt_smooth (ms)', 'real-world FPS'] + files = list(Path(save_dir).glob('frames*.txt')) + for fi, f in enumerate(files): + try: + results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows + n = results.shape[1] # number of rows + x = np.arange(start, min(stop, n) if stop else n) + results = results[:, x] + t = (results[0] - results[0].min()) # set t0=0s + results[0] = x + for i, a in enumerate(ax): + if i < len(results): + label = labels[fi] if len(labels) else f.stem.replace('frames_', '') + a.plot(t, results[i], marker='.', label=label, linewidth=1, markersize=5) + a.set_title(s[i]) + a.set_xlabel('time (s)') + # if fi == len(files) - 1: + # a.set_ylim(bottom=0) + for side in ['top', 'right']: + a.spines[side].set_visible(False) + else: + a.remove() + except Exception as e: + print(f'Warning: Plotting error for {f}; {e}') + ax[1].legend() + plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200) + + +def save_one_box(xyxy, im, file=Path('im.jpg'), gain=1.02, pad=10, square=False, BGR=False, save=True): + # Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop + xyxy = torch.tensor(xyxy).view(-1, 4) + b = xyxy2xywh(xyxy) # boxes + if square: + b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square + b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad + xyxy = xywh2xyxy(b).long() + clip_coords(xyxy, im.shape) + crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2]), ::(1 if BGR else -1)] + if save: + file.parent.mkdir(parents=True, exist_ok=True) # make directory + f = str(increment_path(file).with_suffix('.jpg')) + # cv2.imwrite(f, crop) # save BGR, https://github.com/ultralytics/yolov5/issues/7007 chroma subsampling issue + Image.fromarray(crop[..., ::-1]).save(f, quality=95, subsampling=0) # save RGB + return crop diff --git a/src/目标识别代码/utils/torch_utils.py b/src/目标识别代码/utils/torch_utils.py new file mode 100644 index 00000000..354a802a --- /dev/null +++ b/src/目标识别代码/utils/torch_utils.py @@ -0,0 +1,454 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +PyTorch utils +""" + +import math +import os +import platform +import subprocess +import time +import warnings +from contextlib import contextmanager +from copy import deepcopy +from pathlib import Path + +import torch +import torch.distributed as dist +import torch.nn as nn +import torch.nn.functional as F +from torch.nn.parallel import DistributedDataParallel as DDP + +from utils.general import LOGGER, check_version, colorstr, file_date, git_describe + +LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv('RANK', -1)) +WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) + +try: + import thop # for FLOPs computation +except ImportError: + thop = None + +# Suppress PyTorch warnings +warnings.filterwarnings('ignore', message='User provided device_type of \'cuda\', but CUDA is not available. Disabling') + + +def smart_inference_mode(torch_1_9=check_version(torch.__version__, '1.9.0')): + # Applies torch.inference_mode() decorator if torch>=1.9.0 else torch.no_grad() decorator + def decorate(fn): + return (torch.inference_mode if torch_1_9 else torch.no_grad)()(fn) + + return decorate + + +def smartCrossEntropyLoss(label_smoothing=0.0): + # Returns nn.CrossEntropyLoss with label smoothing enabled for torch>=1.10.0 + if check_version(torch.__version__, '1.10.0'): + return nn.CrossEntropyLoss(label_smoothing=label_smoothing) # loss function + else: + if label_smoothing > 0: + LOGGER.warning(f'WARNING: label smoothing {label_smoothing} requires torch>=1.10.0') + return nn.CrossEntropyLoss() # loss function + + +def smart_DDP(model): + # Model DDP creation with checks + assert not check_version(torch.__version__, '1.12.0', pinned=True), \ + 'torch==1.12.0 torchvision==0.13.0 DDP training is not supported due to a known issue. ' \ + 'Please upgrade or downgrade torch to use DDP. See https://github.com/ultralytics/yolov5/issues/8395' + if check_version(torch.__version__, '1.11.0'): + return DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK, static_graph=True) + else: + return DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK) + + +def reshape_classifier_output(model, n=1000): + # Update a TorchVision classification model to class count 'n' if required + from models.common import Classify + name, m = list((model.model if hasattr(model, 'model') else model).named_children())[-1] # last module + if isinstance(m, Classify): # YOLOv5 Classify() head + if m.linear.out_features != n: + m.linear = nn.Linear(m.linear.in_features, n) + elif isinstance(m, nn.Linear): # ResNet, EfficientNet + if m.out_features != n: + setattr(model, name, nn.Linear(m.in_features, n)) + elif isinstance(m, nn.Sequential): + types = [type(x) for x in m] + if nn.Linear in types: + i = types.index(nn.Linear) # nn.Linear index + if m[i].out_features != n: + m[i] = nn.Linear(m[i].in_features, n) + elif nn.Conv2d in types: + i = types.index(nn.Conv2d) # nn.Conv2d index + if m[i].out_channels != n: + m[i] = nn.Conv2d(m[i].in_channels, n, m[i].kernel_size, m[i].stride, bias=m[i].bias) + + +@contextmanager +def torch_distributed_zero_first(local_rank: int): + # Decorator to make all processes in distributed training wait for each local_master to do something + if local_rank not in [-1, 0]: + dist.barrier(device_ids=[local_rank]) + yield + if local_rank == 0: + dist.barrier(device_ids=[0]) + + +def device_count(): + # Returns number of CUDA devices available. Safe version of torch.cuda.device_count(). Supports Linux and Windows + assert platform.system() in ('Linux', 'Windows'), 'device_count() only supported on Linux or Windows' + try: + cmd = 'nvidia-smi -L | wc -l' if platform.system() == 'Linux' else 'nvidia-smi -L | find /c /v ""' # Windows + return int(subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1]) + except Exception: + return 0 + + +def select_device(device='', batch_size=0, newline=True): + # device = None or 'cpu' or 0 or '0' or '0,1,2,3' + s = f'YOLOv5 🚀 {git_describe() or file_date()} Python-{platform.python_version()} torch-{torch.__version__} ' + device = str(device).strip().lower().replace('cuda:', '').replace('none', '') # to string, 'cuda:0' to '0' + cpu = device == 'cpu' + mps = device == 'mps' # Apple Metal Performance Shaders (MPS) + if cpu or mps: + os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False + elif device: # non-cpu device requested + os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable - must be before assert is_available() + assert torch.cuda.is_available() and torch.cuda.device_count() >= len(device.replace(',', '')), \ + f"Invalid CUDA '--device {device}' requested, use '--device cpu' or pass valid CUDA device(s)" + + if not (cpu or mps) and torch.cuda.is_available(): # prefer GPU if available + devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7 + n = len(devices) # device count + if n > 1 and batch_size > 0: # check batch_size is divisible by device_count + assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}' + space = ' ' * (len(s) + 1) + for i, d in enumerate(devices): + p = torch.cuda.get_device_properties(i) + s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / (1 << 20):.0f}MiB)\n" # bytes to MB + arg = 'cuda:0' + elif mps and getattr(torch, 'has_mps', False) and torch.backends.mps.is_available(): # prefer MPS if available + s += 'MPS\n' + arg = 'mps' + else: # revert to CPU + s += 'CPU\n' + arg = 'cpu' + + if not newline: + s = s.rstrip() + LOGGER.info(s) + return torch.device(arg) + + +def time_sync(): + # PyTorch-accurate time + if torch.cuda.is_available(): + torch.cuda.synchronize() + return time.time() + + +def profile(input, ops, n=10, device=None): + """ YOLOv5 speed/memory/FLOPs profiler + Usage: + input = torch.randn(16, 3, 640, 640) + m1 = lambda x: x * torch.sigmoid(x) + m2 = nn.SiLU() + profile(input, [m1, m2], n=100) # profile over 100 iterations + """ + results = [] + if not isinstance(device, torch.device): + device = select_device(device) + print(f"{'Params':>12s}{'GFLOPs':>12s}{'GPU_mem (GB)':>14s}{'forward (ms)':>14s}{'backward (ms)':>14s}" + f"{'input':>24s}{'output':>24s}") + + for x in input if isinstance(input, list) else [input]: + x = x.to(device) + x.requires_grad = True + for m in ops if isinstance(ops, list) else [ops]: + m = m.to(device) if hasattr(m, 'to') else m # device + m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m + tf, tb, t = 0, 0, [0, 0, 0] # dt forward, backward + try: + flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPs + except Exception: + flops = 0 + + try: + for _ in range(n): + t[0] = time_sync() + y = m(x) + t[1] = time_sync() + try: + _ = (sum(yi.sum() for yi in y) if isinstance(y, list) else y).sum().backward() + t[2] = time_sync() + except Exception: # no backward method + # print(e) # for debug + t[2] = float('nan') + tf += (t[1] - t[0]) * 1000 / n # ms per op forward + tb += (t[2] - t[1]) * 1000 / n # ms per op backward + mem = torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0 # (GB) + s_in, s_out = (tuple(x.shape) if isinstance(x, torch.Tensor) else 'list' for x in (x, y)) # shapes + p = sum(x.numel() for x in m.parameters()) if isinstance(m, nn.Module) else 0 # parameters + print(f'{p:12}{flops:12.4g}{mem:>14.3f}{tf:14.4g}{tb:14.4g}{str(s_in):>24s}{str(s_out):>24s}') + results.append([p, flops, mem, tf, tb, s_in, s_out]) + except Exception as e: + print(e) + results.append(None) + torch.cuda.empty_cache() + return results + + +def is_parallel(model): + # Returns True if model is of type DP or DDP + return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel) + + +def de_parallel(model): + # De-parallelize a model: returns single-GPU model if model is of type DP or DDP + return model.module if is_parallel(model) else model + + +def initialize_weights(model): + for m in model.modules(): + t = type(m) + if t is nn.Conv2d: + pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif t is nn.BatchNorm2d: + m.eps = 1e-3 + m.momentum = 0.03 + elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]: + m.inplace = True + + +def find_modules(model, mclass=nn.Conv2d): + # Finds layer indices matching module class 'mclass' + return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)] + + +def sparsity(model): + # Return global model sparsity + a, b = 0, 0 + for p in model.parameters(): + a += p.numel() + b += (p == 0).sum() + return b / a + + +def prune(model, amount=0.3): + # Prune model to requested global sparsity + import torch.nn.utils.prune as prune + for name, m in model.named_modules(): + if isinstance(m, nn.Conv2d): + prune.l1_unstructured(m, name='weight', amount=amount) # prune + prune.remove(m, 'weight') # make permanent + LOGGER.info(f'Model pruned to {sparsity(model):.3g} global sparsity') + + +def fuse_conv_and_bn(conv, bn): + # Fuse Conv2d() and BatchNorm2d() layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/ + fusedconv = nn.Conv2d(conv.in_channels, + conv.out_channels, + kernel_size=conv.kernel_size, + stride=conv.stride, + padding=conv.padding, + groups=conv.groups, + bias=True).requires_grad_(False).to(conv.weight.device) + + # Prepare filters + w_conv = conv.weight.clone().view(conv.out_channels, -1) + w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var))) + fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape)) + + # Prepare spatial bias + b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias + b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps)) + fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn) + + return fusedconv + + +def model_info(model, verbose=False, imgsz=640): + # Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320] + n_p = sum(x.numel() for x in model.parameters()) # number parameters + n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients + if verbose: + print(f"{'layer':>5} {'name':>40} {'gradient':>9} {'parameters':>12} {'shape':>20} {'mu':>10} {'sigma':>10}") + for i, (name, p) in enumerate(model.named_parameters()): + name = name.replace('module_list.', '') + print('%5g %40s %9s %12g %20s %10.3g %10.3g' % + (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std())) + + try: # FLOPs + p = next(model.parameters()) + stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32 # max stride + im = torch.zeros((1, p.shape[1], stride, stride), device=p.device) # input image in BCHW format + flops = thop.profile(deepcopy(model), inputs=(im,), verbose=False)[0] / 1E9 * 2 # stride GFLOPs + imgsz = imgsz if isinstance(imgsz, list) else [imgsz, imgsz] # expand if int/float + fs = f', {flops * imgsz[0] / stride * imgsz[1] / stride:.1f} GFLOPs' # 640x640 GFLOPs + except Exception: + fs = '' + + name = Path(model.yaml_file).stem.replace('yolov5', 'YOLOv5') if hasattr(model, 'yaml_file') else 'Model' + LOGGER.info(f"{name} summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}") + + +def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416) + # Scales img(bs,3,y,x) by ratio constrained to gs-multiple + if ratio == 1.0: + return img + h, w = img.shape[2:] + s = (int(h * ratio), int(w * ratio)) # new size + img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize + if not same_shape: # pad/crop img + h, w = (math.ceil(x * ratio / gs) * gs for x in (h, w)) + return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean + + +def copy_attr(a, b, include=(), exclude=()): + # Copy attributes from b to a, options to only include [...] and to exclude [...] + for k, v in b.__dict__.items(): + if (len(include) and k not in include) or k.startswith('_') or k in exclude: + continue + else: + setattr(a, k, v) + + +def smart_optimizer(model, name='Adam', lr=0.001, momentum=0.9, decay=1e-5): + # YOLOv5 3-param group optimizer: 0) weights with decay, 1) weights no decay, 2) biases no decay + g = [], [], [] # optimizer parameter groups + bn = tuple(v for k, v in nn.__dict__.items() if 'Norm' in k) # normalization layers, i.e. BatchNorm2d() + for v in model.modules(): + if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): # bias (no decay) + g[2].append(v.bias) + if isinstance(v, bn): # weight (no decay) + g[1].append(v.weight) + elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): # weight (with decay) + g[0].append(v.weight) + + if name == 'Adam': + optimizer = torch.optim.Adam(g[2], lr=lr, betas=(momentum, 0.999)) # adjust beta1 to momentum + elif name == 'AdamW': + optimizer = torch.optim.AdamW(g[2], lr=lr, betas=(momentum, 0.999), weight_decay=0.0) + elif name == 'RMSProp': + optimizer = torch.optim.RMSprop(g[2], lr=lr, momentum=momentum) + elif name == 'SGD': + optimizer = torch.optim.SGD(g[2], lr=lr, momentum=momentum, nesterov=True) + else: + raise NotImplementedError(f'Optimizer {name} not implemented.') + + optimizer.add_param_group({'params': g[0], 'weight_decay': decay}) # add g0 with weight_decay + optimizer.add_param_group({'params': g[1], 'weight_decay': 0.0}) # add g1 (BatchNorm2d weights) + LOGGER.info(f"{colorstr('optimizer:')} {type(optimizer).__name__}(lr={lr}) with parameter groups " + f"{len(g[1])} weight(decay=0.0), {len(g[0])} weight(decay={decay}), {len(g[2])} bias") + return optimizer + + +def smart_hub_load(repo='ultralytics/yolov5', model='yolov5s', **kwargs): + # YOLOv5 torch.hub.load() wrapper with smart error/issue handling + if check_version(torch.__version__, '1.9.1'): + kwargs['skip_validation'] = True # validation causes GitHub API rate limit errors + if check_version(torch.__version__, '1.12.0'): + kwargs['trust_repo'] = True # argument required starting in torch 0.12 + try: + return torch.hub.load(repo, model, **kwargs) + except Exception: + return torch.hub.load(repo, model, force_reload=True, **kwargs) + + +def smart_resume(ckpt, optimizer, ema=None, weights='yolov5s.pt', epochs=300, resume=True): + # Resume training from a partially trained checkpoint + best_fitness = 0.0 + start_epoch = ckpt['epoch'] + 1 + if ckpt['optimizer'] is not None: + optimizer.load_state_dict(ckpt['optimizer']) # optimizer + best_fitness = ckpt['best_fitness'] + if ema and ckpt.get('ema'): + ema.ema.load_state_dict(ckpt['ema'].float().state_dict()) # EMA + ema.updates = ckpt['updates'] + if resume: + assert start_epoch > 0, f'{weights} training to {epochs} epochs is finished, nothing to resume.\n' \ + f"Start a new training without --resume, i.e. 'python train.py --weights {weights}'" + LOGGER.info(f'Resuming training from {weights} from epoch {start_epoch} to {epochs} total epochs') + if epochs < start_epoch: + LOGGER.info(f"{weights} has been trained for {ckpt['epoch']} epochs. Fine-tuning for {epochs} more epochs.") + epochs += ckpt['epoch'] # finetune additional epochs + return best_fitness, start_epoch, epochs + + +class EarlyStopping: + # YOLOv5 simple early stopper + def __init__(self, patience=30): + self.best_fitness = 0.0 # i.e. mAP + self.best_epoch = 0 + self.patience = patience or float('inf') # epochs to wait after fitness stops improving to stop + self.possible_stop = False # possible stop may occur next epoch + + def __call__(self, epoch, fitness): + if fitness >= self.best_fitness: # >= 0 to allow for early zero-fitness stage of training + self.best_epoch = epoch + self.best_fitness = fitness + delta = epoch - self.best_epoch # epochs without improvement + self.possible_stop = delta >= (self.patience - 1) # possible stop may occur next epoch + stop = delta >= self.patience # stop training if patience exceeded + if stop: + LOGGER.info(f'Stopping training early as no improvement observed in last {self.patience} epochs. ' + f'Best results observed at epoch {self.best_epoch}, best model saved as best.pt.\n' + f'To update EarlyStopping(patience={self.patience}) pass a new patience value, ' + f'i.e. `python train.py --patience 300` or use `--patience 0` to disable EarlyStopping.') + return stop + + +class ModelEMA: + """ Updated Exponential Moving Average (EMA) from https://github.com/rwightman/pytorch-image-models + Keeps a moving average of everything in the model state_dict (parameters and buffers) + For EMA details see https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage + """ + + def __init__(self, model, decay=0.9999, tau=2000, updates=0): + # Create EMA + self.ema = deepcopy(de_parallel(model)).eval() # FP32 EMA + # if next(model.parameters()).device.type != 'cpu': + # self.ema.half() # FP16 EMA + self.updates = updates # number of EMA updates + self.decay = lambda x: decay * (1 - math.exp(-x / tau)) # decay exponential ramp (to help early epochs) + for p in self.ema.parameters(): + p.requires_grad_(False) + + @smart_inference_mode() + def update(self, model): + # Update EMA parameters + self.updates += 1 + d = self.decay(self.updates) + + msd = de_parallel(model).state_dict() # model state_dict + for k, v in self.ema.state_dict().items(): + if v.dtype.is_floating_point: + v *= d + v += (1 - d) * msd[k].detach() + + def update_attr(self, model, include=(), exclude=('process_group', 'reducer')): + # Update EMA attributes + copy_attr(self.ema, model, include, exclude) + + +import time +import torch + +if torch.cuda.is_available(): + torch.backends.cudnn.benchmark = True + +_EPOCHS = {} + +def time_synchronized(): + global _EPOCHS + if not torch.cuda.is_available(): + return time.time() + else: + if torch.cuda.current_device() not in _EPOCHS: + _EPOCHS[torch.cuda.current_device()] = 0 + n = time.time() + if n - _EPOCHS[torch.cuda.current_device()] > 600: + torch.cuda.empty_cache() + _EPOCHS[torch.cuda.current_device()] = n + return torch.cuda.Event(enable_timing=True)