commit 22320987dcf07c98fea8fa6dc4f1efe99f0bf9c5 Author: ppplusss <1175214815@qq.com> Date: Wed Jul 3 16:54:31 2024 +0800 提交 diff --git a/.idea/.gitignore b/.idea/.gitignore new file mode 100644 index 0000000..359bb53 --- /dev/null +++ b/.idea/.gitignore @@ -0,0 +1,3 @@ +# 默认忽略的文件 +/shelf/ +/workspace.xml diff --git a/.idea/Imag_Enhanc_License_recog-main.iml b/.idea/Imag_Enhanc_License_recog-main.iml new file mode 100644 index 0000000..8b8c395 --- /dev/null +++ b/.idea/Imag_Enhanc_License_recog-main.iml @@ -0,0 +1,12 @@ + + + + + + + + + + \ No newline at end of file diff --git a/.idea/inspectionProfiles/Project_Default.xml b/.idea/inspectionProfiles/Project_Default.xml new file mode 100644 index 0000000..f0c79c5 --- /dev/null +++ b/.idea/inspectionProfiles/Project_Default.xml @@ -0,0 +1,41 @@ + + + + \ No newline at end of file diff --git a/.idea/inspectionProfiles/profiles_settings.xml b/.idea/inspectionProfiles/profiles_settings.xml new file mode 100644 index 0000000..105ce2d --- /dev/null +++ b/.idea/inspectionProfiles/profiles_settings.xml @@ -0,0 +1,6 @@ + + + + \ No newline at end of file diff --git a/.idea/misc.xml b/.idea/misc.xml new file mode 100644 index 0000000..a971a2c --- /dev/null +++ b/.idea/misc.xml @@ -0,0 +1,4 @@ + + + + \ No newline at end of file diff --git a/.idea/modules.xml b/.idea/modules.xml new file mode 100644 index 0000000..1e65cb7 --- /dev/null +++ b/.idea/modules.xml @@ -0,0 +1,8 @@ + + + + + + + + \ No newline at end of file diff --git a/Font/platech.ttf b/Font/platech.ttf new file mode 100644 index 0000000..d66a970 Binary files /dev/null and b/Font/platech.ttf differ diff --git a/PlateDetect.py b/PlateDetect.py new file mode 100644 index 0000000..e92c1d0 --- /dev/null +++ b/PlateDetect.py @@ -0,0 +1,37 @@ +#coding:utf-8 +from ultralytics import YOLO +import cv2 + +# 所需加载的模型目录 +path = 'models/best.pt' +# 需要检测的图片地址 +img_path = "TestFiles/aa.jpg" + +# 加载预训练模型 +model = YOLO(path, task='detect') + +# 检测图片 +results = model(img_path) + +# 读取原始图片 +img = cv2.imread(img_path) + +# 遍历所有检测结果 +for result in results: + # 获取检测框的坐标 + for i, box in enumerate(result.boxes): + x1, y1, x2, y2 = map(int, box.xyxy[0]) + # 裁剪检测框内的图像 + cropped_img = img[y1:y2, x1:x2] + # 显示裁剪后的图像 + cv2.imshow("Cropped Image", cropped_img) + cv2.waitKey(0) + # 构建保存路径 + save_path = f"TestFiles/cropped_license_plate_{i}.jpg" + # 保存裁剪后的图像 + cv2.imwrite(save_path, cropped_img) + +# 显示原始图像上的检测结果 +res = results[0].plot() +cv2.imshow("YOLOv5 Detection", res) +cv2.waitKey(0) diff --git a/README.md b/README.md new file mode 100644 index 0000000..8b55e27 --- /dev/null +++ b/README.md @@ -0,0 +1,37 @@ +# ImagEnhanc-Licenserecog_cxy_zhw + +#### 介绍 +数字图像期末作业《图像基本处理和车牌定位识别系统》——曹芯依、张昊玮 + +#### 软件架构 +软件架构说明 + + +#### 安装教程 + +1. xxxx +2. xxxx +3. xxxx + +#### 使用说明 + +1. xxxx +2. xxxx +3. xxxx + +#### 参与贡献 + +1. Fork 本仓库 +2. 新建 Feat_xxx 分支 +3. 提交代码 +4. 新建 Pull Request + + +#### 特技 + +1. 使用 Readme\_XXX.md 来支持不同的语言,例如 Readme\_en.md, Readme\_zh.md +2. Gitee 官方博客 [blog.gitee.com](https://blog.gitee.com) +3. 你可以 [https://gitee.com/explore](https://gitee.com/explore) 这个地址来了解 Gitee 上的优秀开源项目 +4. [GVP](https://gitee.com/gvp) 全称是 Gitee 最有价值开源项目,是综合评定出的优秀开源项目 +5. Gitee 官方提供的使用手册 [https://gitee.com/help](https://gitee.com/help) +6. Gitee 封面人物是一档用来展示 Gitee 会员风采的栏目 [https://gitee.com/gitee-stars/](https://gitee.com/gitee-stars/) diff --git a/Readme.txt b/Readme.txt new file mode 100644 index 0000000..25ba28e --- /dev/null +++ b/Readme.txt @@ -0,0 +1,20 @@ +1.创建虚拟环境并安装相关依赖 +#创建虚拟环境 +conda create -n pytorch_1.8 python=3.8 +#进入虚拟环境 +conda activate pytorch_1.8 + +# 安装项目依赖包 +python -m pip install opencv-python -i https://pypi.tuna.tsinghua.edu.cn/simple/ +python -m pip install matplotlib -i https://pypi.tuna.tsinghua.edu.cn/simple/ +pip install gradio==3.47.1 +(缺少哪个包就类似方法安装哪个包即可) + +2.运行项目脚本 +# 2.1进入到项目虚拟环境 +conda activate pytorch_1.8 +# 2.2 cd 到项目根目录 +D: +cd Imag_Enhanc_License_recog-main/ +# 2.3 运行项目 +python demo_ui_main.py diff --git a/TestFiles/013671875-90_268-248&480_423&559-419&546_248&559_250&482_423&480-0_0_3_24_25_32_28_30-153-86.jpg b/TestFiles/013671875-90_268-248&480_423&559-419&546_248&559_250&482_423&480-0_0_3_24_25_32_28_30-153-86.jpg new file mode 100644 index 0000000..0df6eaa Binary files /dev/null and b/TestFiles/013671875-90_268-248&480_423&559-419&546_248&559_250&482_423&480-0_0_3_24_25_32_28_30-153-86.jpg differ diff --git a/TestFiles/013671875-93_102-226&489_426&558-426&558_234&546_226&489_417&494-0_0_5_25_33_24_24_33-86-80.jpg b/TestFiles/013671875-93_102-226&489_426&558-426&558_234&546_226&489_417&494-0_0_5_25_33_24_24_33-86-80.jpg new file mode 100644 index 0000000..080ec1f Binary files /dev/null and b/TestFiles/013671875-93_102-226&489_426&558-426&558_234&546_226&489_417&494-0_0_5_25_33_24_24_33-86-80.jpg differ diff --git a/TestFiles/014453125-90_269-261&439_483&505-481&505_262&504_261&439_483&442-1_0_5_29_32_30_32_30-72-59.jpg b/TestFiles/014453125-90_269-261&439_483&505-481&505_262&504_261&439_483&442-1_0_5_29_32_30_32_30-72-59.jpg new file mode 100644 index 0000000..f9ebf2b Binary files /dev/null and b/TestFiles/014453125-90_269-261&439_483&505-481&505_262&504_261&439_483&442-1_0_5_29_32_30_32_30-72-59.jpg differ diff --git a/TestFiles/014609375-87_108-288&462_492&535-478&523_288&535_303&470_492&462-0_0_3_24_31_28_27_26-97-158.jpg b/TestFiles/014609375-87_108-288&462_492&535-478&523_288&535_303&470_492&462-0_0_3_24_31_28_27_26-97-158.jpg new file mode 100644 index 0000000..6bc4b3b Binary files /dev/null and b/TestFiles/014609375-87_108-288&462_492&535-478&523_288&535_303&470_492&462-0_0_3_24_31_28_27_26-97-158.jpg differ diff --git a/TestFiles/014609375-91_260-252&418_456&490-456&490_260&478_252&418_447&422-0_0_3_26_29_26_29_26-152-59.jpg b/TestFiles/014609375-91_260-252&418_456&490-456&490_260&478_252&418_447&422-0_0_3_26_29_26_29_26-152-59.jpg new file mode 100644 index 0000000..3cd305d Binary files /dev/null and b/TestFiles/014609375-91_260-252&418_456&490-456&490_260&478_252&418_447&422-0_0_3_26_29_26_29_26-152-59.jpg differ diff --git a/TestFiles/015703125-88_266-247&416_463&490-463&485_252&490_247&427_456&416-0_0_3_24_31_25_32_24-138-190.jpg b/TestFiles/015703125-88_266-247&416_463&490-463&485_252&490_247&427_456&416-0_0_3_24_31_25_32_24-138-190.jpg new file mode 100644 index 0000000..63beab4 Binary files /dev/null and b/TestFiles/015703125-88_266-247&416_463&490-463&485_252&490_247&427_456&416-0_0_3_24_31_25_32_24-138-190.jpg differ diff --git a/TestFiles/015859375-90_261-217&496_449&565-449&565_226&559_217&496_448&497-0_0_3_24_33_33_33_25-99-69.jpg b/TestFiles/015859375-90_261-217&496_449&565-449&565_226&559_217&496_448&497-0_0_3_24_33_33_33_25-99-69.jpg new file mode 100644 index 0000000..98982cb Binary files /dev/null and b/TestFiles/015859375-90_261-217&496_449&565-449&565_226&559_217&496_448&497-0_0_3_24_33_33_33_25-99-69.jpg differ diff --git a/TestFiles/015859375-90_269-263&454_495&523-495&523_264&522_263&458_493&454-0_0_3_26_33_26_33_32-78-23.jpg b/TestFiles/015859375-90_269-263&454_495&523-495&523_264&522_263&458_493&454-0_0_3_26_33_26_33_32-78-23.jpg new file mode 100644 index 0000000..2fe7b3b Binary files /dev/null and b/TestFiles/015859375-90_269-263&454_495&523-495&523_264&522_263&458_493&454-0_0_3_26_33_26_33_32-78-23.jpg differ diff --git a/TestFiles/015859375-90_90-266&399_498&468-496&467_266&468_266&400_498&399-0_0_3_24_28_24_31_30-159-95.jpg b/TestFiles/015859375-90_90-266&399_498&468-496&467_266&468_266&400_498&399-0_0_3_24_28_24_31_30-159-95.jpg new file mode 100644 index 0000000..b573f03 Binary files /dev/null and b/TestFiles/015859375-90_90-266&399_498&468-496&467_266&468_266&400_498&399-0_0_3_24_28_24_31_30-159-95.jpg differ diff --git a/TestFiles/016015625-88_90-298&486_503&565-499&550_298&565_298&495_503&486-0_0_3_27_27_24_30_24-108-194.jpg b/TestFiles/016015625-88_90-298&486_503&565-499&550_298&565_298&495_503&486-0_0_3_27_27_24_30_24-108-194.jpg new file mode 100644 index 0000000..975555a Binary files /dev/null and b/TestFiles/016015625-88_90-298&486_503&565-499&550_298&565_298&495_503&486-0_0_3_27_27_24_30_24-108-194.jpg differ diff --git a/TestFiles/1.mp4 b/TestFiles/1.mp4 new file mode 100644 index 0000000..5ba643f Binary files /dev/null and b/TestFiles/1.mp4 differ diff --git a/TestFiles/aa.jpg b/TestFiles/aa.jpg new file mode 100644 index 0000000..275b0ce Binary files /dev/null and b/TestFiles/aa.jpg differ diff --git a/TestFiles/cropped_license_plate_0.jpg b/TestFiles/cropped_license_plate_0.jpg new file mode 100644 index 0000000..5907f65 Binary files /dev/null and b/TestFiles/cropped_license_plate_0.jpg differ diff --git a/TestFiles/cropped_license_plate_0_0.jpg b/TestFiles/cropped_license_plate_0_0.jpg new file mode 100644 index 0000000..03de335 Binary files /dev/null and b/TestFiles/cropped_license_plate_0_0.jpg differ diff --git a/TestFiles/down.jpeg b/TestFiles/down.jpeg new file mode 100644 index 0000000..69da069 Binary files /dev/null and b/TestFiles/down.jpeg differ diff --git a/__pycache__/carPlate_recognize.cpython-311.pyc b/__pycache__/carPlate_recognize.cpython-311.pyc new file mode 100644 index 0000000..5703314 Binary files /dev/null and b/__pycache__/carPlate_recognize.cpython-311.pyc differ diff --git a/__pycache__/carPlate_recognize.cpython-38.pyc b/__pycache__/carPlate_recognize.cpython-38.pyc new file mode 100644 index 0000000..ec61b1e Binary files /dev/null and b/__pycache__/carPlate_recognize.cpython-38.pyc differ diff --git a/__pycache__/detect_tools.cpython-311.pyc b/__pycache__/detect_tools.cpython-311.pyc new file mode 100644 index 0000000..fc0f17b Binary files /dev/null and b/__pycache__/detect_tools.cpython-311.pyc differ diff --git a/__pycache__/imgTest.cpython-311.pyc b/__pycache__/imgTest.cpython-311.pyc new file mode 100644 index 0000000..8b554f4 Binary files /dev/null and b/__pycache__/imgTest.cpython-311.pyc differ diff --git a/__pycache__/img_enhancement.cpython-311.pyc b/__pycache__/img_enhancement.cpython-311.pyc new file mode 100644 index 0000000..f06d576 Binary files /dev/null and b/__pycache__/img_enhancement.cpython-311.pyc differ diff --git a/__pycache__/img_enhancement.cpython-38.pyc b/__pycache__/img_enhancement.cpython-38.pyc new file mode 100644 index 0000000..c597f6a Binary files /dev/null and b/__pycache__/img_enhancement.cpython-38.pyc differ diff --git a/__pycache__/tab1_1.cpython-311.pyc b/__pycache__/tab1_1.cpython-311.pyc new file mode 100644 index 0000000..aa14fa7 Binary files /dev/null and b/__pycache__/tab1_1.cpython-311.pyc differ diff --git a/__pycache__/tab1_1.cpython-38.pyc b/__pycache__/tab1_1.cpython-38.pyc new file mode 100644 index 0000000..00de4a9 Binary files /dev/null and b/__pycache__/tab1_1.cpython-38.pyc differ diff --git a/__pycache__/tab1_2.cpython-311.pyc b/__pycache__/tab1_2.cpython-311.pyc new file mode 100644 index 0000000..8cfe61e Binary files /dev/null and b/__pycache__/tab1_2.cpython-311.pyc differ diff --git a/__pycache__/tab1_2.cpython-38.pyc b/__pycache__/tab1_2.cpython-38.pyc new file mode 100644 index 0000000..cdba955 Binary files /dev/null and b/__pycache__/tab1_2.cpython-38.pyc differ diff --git a/__pycache__/tab1_3.cpython-311.pyc b/__pycache__/tab1_3.cpython-311.pyc new file mode 100644 index 0000000..cdd3bd7 Binary files /dev/null and b/__pycache__/tab1_3.cpython-311.pyc differ diff --git a/__pycache__/tab1_3.cpython-38.pyc b/__pycache__/tab1_3.cpython-38.pyc new file mode 100644 index 0000000..52bd719 Binary files /dev/null and b/__pycache__/tab1_3.cpython-38.pyc differ diff --git a/__pycache__/tab2.cpython-311.pyc b/__pycache__/tab2.cpython-311.pyc new file mode 100644 index 0000000..66da81f Binary files /dev/null and b/__pycache__/tab2.cpython-311.pyc differ diff --git a/__pycache__/tab2.cpython-38.pyc b/__pycache__/tab2.cpython-38.pyc new file mode 100644 index 0000000..3d96317 Binary files /dev/null and b/__pycache__/tab2.cpython-38.pyc differ diff --git a/__pycache__/tab3.cpython-311.pyc b/__pycache__/tab3.cpython-311.pyc new file mode 100644 index 0000000..bf967db Binary files /dev/null and b/__pycache__/tab3.cpython-311.pyc differ diff --git a/carPlate_recognize.py b/carPlate_recognize.py new file mode 100644 index 0000000..6b97145 --- /dev/null +++ b/carPlate_recognize.py @@ -0,0 +1,85 @@ +import cv2 +import numpy as np + +''' +基于Opencv图像处理的车牌定位和分割 +''' +def car_plate_recognize(car): + """=========================== 1. 定位车牌(车牌检测)===========================""" + # 蓝色、黄色和绿色所对应的色彩空间 + lower_blue = np.array([100, 110, 110]) + upper_blue = np.array([130, 255, 255]) + lower_yellow = np.array([15, 55, 55]) + upper_yellow = np.array([50, 255, 255]) + lower_green = np.array([35, 100, 100]) + upper_green = np.array([85, 255, 255]) + + hsv = cv2.cvtColor(car, cv2.COLOR_BGR2HSV) # 将BGR图像转化到HSV的颜色空间 + mask_blue = cv2.inRange(hsv, lower_blue, upper_blue) + mask_yellow = cv2.inRange(hsv, lower_yellow, upper_yellow) + mask_green = cv2.inRange(hsv, lower_green, upper_green) + mask_plate = cv2.bitwise_or(mask_blue, mask_yellow) + mask_plate = cv2.bitwise_or(mask_plate, mask_green) + + # 根据阈值找到对应颜色 + mask = cv2.cvtColor(mask_plate, cv2.COLOR_GRAY2BGR) + mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY) + Matrix = np.ones((20, 20), np.uint8) + mask1 = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, Matrix) + mask = cv2.morphologyEx(mask1, cv2.MORPH_OPEN, Matrix) # 形态学开运算 + ret, mask = cv2.threshold(mask, 0, 255, cv2.THRESH_BINARY) # 二值化进而获取轮廓 + contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # 获取轮廓 contours + + # 初始化 box + box = None + + # 寻找轮廓最大的 定位车牌 + for i in range(len(contours)): + cnt = contours[i] + area = cv2.contourArea(cnt) + if area > 3000: + rect = cv2.minAreaRect(cnt) + box = cv2.boxPoints(rect) + box = np.int_(box) + break # 找到一个符合条件的就跳出循环 + + if box is None: + raise ValueError("No contours found that meet the size requirement.") + + plate = cv2.drawContours(car.copy(), [box], -1, (0, 255, 0), 3) + + """=========================== 2. 分割车牌中的每个字符 ===========================""" + ys = [box[0, 1], box[1, 1], box[2, 1], box[3, 1]] + xs = [box[0, 0], box[1, 0], box[2, 0], box[3, 0]] + ys_sorted_index = np.argsort(ys) + xs_sorted_index = np.argsort(xs) + x1 = box[xs_sorted_index[0], 0] + x2 = box[xs_sorted_index[3], 0] + y1 = box[ys_sorted_index[0], 1] + y2 = box[ys_sorted_index[3], 1] + ROI_plate = plate[y1:y2, x1:x2] + ROI_plate_gray = cv2.cvtColor(ROI_plate, cv2.COLOR_BGR2GRAY) # 灰度化 + ROI_plate_blur = cv2.GaussianBlur(ROI_plate_gray, (5, 5), 0) # 高斯滤波 + ret, ROI_plate_Binary = cv2.threshold(ROI_plate_blur, 127, 255, cv2.THRESH_BINARY) # 二值化 + # 形态学腐蚀 去除边框 + kernel = np.ones((5, 5), dtype=np.uint8) + ROI_erode = cv2.erode(ROI_plate_Binary, kernel, iterations=1) + # 根据宽度 裁剪7个字符 + width = ROI_erode.shape[1] + height = ROI_erode.shape[0] + word_0 = ROI_erode[0:height, 0:np.uint8(height / 2)] + word_1 = ROI_erode[0:height, np.uint8(height / 2):height] + size = np.uint8((width - height) / 5) + word_2 = ROI_erode[0:height, height + 0 * size:height + 1 * size] + word_3 = ROI_erode[0:height, height + 1 * size:height + 2 * size] + word_4 = ROI_erode[0:height, height + 2 * size:height + 3 * size] + word_5 = ROI_erode[0:height, height + 3 * size:height + 4 * size] + word_6 = ROI_erode[0:height, height + 4 * size:height + 5 * size] + word_all = [word_0, word_1, word_2, word_3, word_4, word_5, word_6] + return plate, word_all + + +if __name__ == "__main__": + car = cv2.imread(r'/mnt/data/cropped_license_plate_0.jpg', 1) + plate, _ = car_plate_recognize(car) + cv2.imwrite("plate.jpg", plate) diff --git a/cropped_license_plate.jpg b/cropped_license_plate.jpg new file mode 100644 index 0000000..5907f65 Binary files /dev/null and b/cropped_license_plate.jpg differ diff --git a/cropped_license_plate_with_margin.jpg b/cropped_license_plate_with_margin.jpg new file mode 100644 index 0000000..03de335 Binary files /dev/null and b/cropped_license_plate_with_margin.jpg differ diff --git a/demo_ui_main.py b/demo_ui_main.py new file mode 100644 index 0000000..ccc4ce4 --- /dev/null +++ b/demo_ui_main.py @@ -0,0 +1,15 @@ +import gradio as gr +from tab1_1 import img_handle_1 +from tab1_2 import img_handle_2 +from tab1_3 import img_handle_3 +from tab2 import Car_segmentation +from tab3 import Car_detection + + +if __name__ == "__main__": + gr.close_all() + with gr.TabbedInterface( + [img_handle_1(), img_handle_2(), img_handle_3(), Car_segmentation(), Car_detection()], + ["图像处理1:几何处理", "图像处理2:颜色空间变化", "图像处理3:频率像素点操作", "进阶功能:车牌定位与分割", "YOLO车牌检测与OCR识别"], + ) as demo: + demo.launch(share=True) diff --git a/detect_tools.py b/detect_tools.py new file mode 100644 index 0000000..5376b76 --- /dev/null +++ b/detect_tools.py @@ -0,0 +1,91 @@ +# encoding:utf-8 +import cv2 +from PyQt5.QtGui import QPixmap, QImage +import numpy as np +from PIL import Image,ImageDraw,ImageFont +import csv +import os + +# fontC = ImageFont.truetype("Font/platech.ttf", 20, 0) + +# 绘图展示 +def cv_show(name,img): + cv2.imshow(name, img) + cv2.waitKey(0) + cv2.destroyAllWindows() + + +def drawRectBox(image, rect, addText, fontC, color=(0,0,255)): + """ + 绘制矩形框与结果 + :param image: 原始图像 + :param rect: 矩形框坐标, int类型 + :param addText: 类别名称 + :param fontC: 字体 + :return: + """ + # 绘制位置方框 + cv2.rectangle(image, (rect[0], rect[1]), + (rect[2], rect[3]), + color, 2) + + # 绘制字体背景框 + # cv2.rectangle(image, (rect[0] - 1, rect[1] - 50), (rect[2], rect[1]), color, -1, cv2.LINE_AA) + # 图片 添加的文字 位置 字体 字体大小 字体颜色 字体粗细.无法正常显示中文 + # cv2.putText(image, addText, (int(rect[0])+2, int(rect[1])-3), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2) + + # 可以显示中文 + # 字体自适应大小 + font_size = int((rect[3]-rect[1])/1.5) + fontC = ImageFont.truetype("Font/platech.ttf", font_size, 0) + img = Image.fromarray(image) + draw = ImageDraw.Draw(img) + draw.text((rect[0]+2, rect[1]-font_size), addText, (0, 0, 255), font=fontC) + imagex = np.array(img) + return imagex + + +def img_cvread(path): + # 读取含中文名的图片文件 + # img = cv2.imread(path) + img = cv2.imdecode(np.fromfile(path, dtype=np.uint8), cv2.IMREAD_COLOR) + return img + + +def draw_boxes(img, boxes): + for each in boxes: + x1 = each[0] + y1 = each[1] + x2 = each[2] + y2 = each[3] + cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2) + return img + + + +def cvimg_to_qpiximg(cvimg): + height, width, depth = cvimg.shape + cvimg = cv2.cvtColor(cvimg, cv2.COLOR_BGR2RGB) + qimg = QImage(cvimg.data, width, height, width * depth, QImage.Format_RGB888) + qpix_img = QPixmap(qimg) + return qpix_img + + + + +# 封装函数:图片上显示中文 +def cv2AddChineseText(img, text, position, textColor=(0, 255, 0), textSize=50): + if (isinstance(img, np.ndarray)): # 判断是否OpenCV图片类型 + img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) + # 创建一个可以在给定图像上绘图的对象 + draw = ImageDraw.Draw(img) + # 字体的格式 + fontStyle = ImageFont.truetype( + "simsun.ttc", textSize, encoding="utf-8") + # 绘制文本 + draw.text(position, text, textColor, font=fontStyle) + # 转换回OpenCV格式 + return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR) + + + diff --git a/flagged/log.csv b/flagged/log.csv new file mode 100644 index 0000000..7fe72b5 --- /dev/null +++ b/flagged/log.csv @@ -0,0 +1,3 @@ +query_image,output,flag,username,timestamp +flagged\query_image\84d31d63af14e21377a9\down.jpeg,flagged\output\c822449a03f2e35edc0f\image.webp,,,2024-07-02 03:56:38.879751 +flagged\query_image\1b33b1e3b74157eda3d3\013671875-90_268-248480_423559-419546_248559_250482_423480-0_0_3_24_25_32_28_30-153-86.jpg,,,,2024-07-02 04:01:23.109896 diff --git a/flagged/output/c822449a03f2e35edc0f/image.webp b/flagged/output/c822449a03f2e35edc0f/image.webp new file mode 100644 index 0000000..41bfabb Binary files /dev/null and b/flagged/output/c822449a03f2e35edc0f/image.webp differ diff --git a/flagged/query_image/1b33b1e3b74157eda3d3/013671875-90_268-248480_423559-419546_248559_250482_423480-0_0_3_24_25_32_28_30-153-86.jpg b/flagged/query_image/1b33b1e3b74157eda3d3/013671875-90_268-248480_423559-419546_248559_250482_423480-0_0_3_24_25_32_28_30-153-86.jpg new file mode 100644 index 0000000..bdc0b1e Binary files /dev/null and b/flagged/query_image/1b33b1e3b74157eda3d3/013671875-90_268-248480_423559-419546_248559_250482_423480-0_0_3_24_25_32_28_30-153-86.jpg differ diff --git a/flagged/query_image/84d31d63af14e21377a9/down.jpeg b/flagged/query_image/84d31d63af14e21377a9/down.jpeg new file mode 100644 index 0000000..69da069 Binary files /dev/null and b/flagged/query_image/84d31d63af14e21377a9/down.jpeg differ diff --git a/images/car.jpg b/images/car.jpg new file mode 100644 index 0000000..8c51400 Binary files /dev/null and b/images/car.jpg differ diff --git a/images/car.png b/images/car.png new file mode 100644 index 0000000..9926e4b Binary files /dev/null and b/images/car.png differ diff --git a/images/car_test.jpg b/images/car_test.jpg new file mode 100644 index 0000000..46a3c74 Binary files /dev/null and b/images/car_test.jpg differ diff --git a/images/imagexx.webp b/images/imagexx.webp new file mode 100644 index 0000000..5d0379b Binary files /dev/null and b/images/imagexx.webp differ diff --git a/imgTest.py b/imgTest.py new file mode 100644 index 0000000..b1e74be --- /dev/null +++ b/imgTest.py @@ -0,0 +1,71 @@ +#coding:utf-8 +from ultralytics import YOLO +import cv2 +import detect_tools as tools +from PIL import ImageFont +from paddleocr import PaddleOCR +import os +os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"#程序中链接了多个 OpenMP 运行时库的副本 + +def get_license_result(ocr,image): + """ + image:输入的车牌截取照片 + 输出,车牌号与置信度 + """ + result = ocr.ocr(image, cls=True)[0] + if result: + license_name, conf = result[0][1] + if '·' in license_name: + license_name = license_name.replace('·', '') + return license_name, conf + else: + return None, None + +# 需要检测的图片地址 +img_path = "TestFiles/down.jpeg" +now_img = tools.img_cvread(img_path) + +fontC = ImageFont.truetype("Font/platech.ttf", 50, 0) +# 加载ocr模型 +cls_model_dir = 'paddleModels/whl/cls/ch_ppocr_mobile_v2.0_cls_infer' +rec_model_dir = 'paddleModels/whl/rec/ch/ch_PP-OCRv4_rec_infer' +ocr = PaddleOCR(use_angle_cls=False, lang="ch", det=False, cls_model_dir=cls_model_dir,rec_model_dir=rec_model_dir) + +# 所需加载的模型目录 +path = 'models/best.pt' +# 加载预训练模型 +# conf 0.25 object confidence threshold for detection +# iou 0.7 int.ersection over union (IoU) threshold for NMS +model = YOLO(path, task='detect') +# model = YOLO(path, task='detect',conf=0.5) +# 检测图片 +results = model(img_path)[0] + +location_list = results.boxes.xyxy.tolist() +if len(location_list) >= 1: + location_list = [list(map(int, e)) for e in location_list] + # 截取每个车牌区域的照片 + license_imgs = [] + for each in location_list: + x1, y1, x2, y2 = each + cropImg = now_img[y1:y2, x1:x2] + license_imgs.append(cropImg) + # cv2.imshow('111',cropImg) # cv2.waitKey(0) + # 车牌识别结果 + lisence_res = [] + conf_list = [] + for each in license_imgs: + license_num, conf = get_license_result(ocr, each) + print(license_num, conf) + if license_num: + lisence_res.append(license_num) + conf_list.append(conf) + else: + lisence_res.append('无法识别') + conf_list.append(0) + for text, box in zip(lisence_res, location_list): + now_img = tools.drawRectBox(now_img, box, text, fontC) + +# now_img = cv2.resize(now_img,dsize=None,fx=0.5,fy=0.5,interpolation=cv2.INTER_LINEAR) +# cv2.imshow("YOLOv8 Detection", now_img) +# cv2.waitKey(0) \ No newline at end of file diff --git a/img_enhancement.py b/img_enhancement.py new file mode 100644 index 0000000..9b19038 --- /dev/null +++ b/img_enhancement.py @@ -0,0 +1,218 @@ +import cv2 +import numpy as np +import albumentations as A + +#图像处理类 +class Image_enhancement: + def __init__(self,): + pass + """===============================1.几何处理:==================================""" + # 1.1 图片旋转(可选择旋转角度) + def roate(self, frame=None,angle=30): + h, w = frame.shape[:2] + M = cv2.getRotationMatrix2D((w / 2, h / 2), angle, 1) + img_roate = cv2.warpAffine(frame, M, (w, h)) + return img_roate + + # 1.2 图片缩放(可选择缩放大小) + def resize(self, frame=None,w=224,h=224): + img_resized = cv2.resize(frame, (w, h)) # w,h + return img_resized + + """===============================2.颜色空间变化:==================================""" + # 2.1 图片的对比度、饱和度、亮度、色调变化 + def ColorJitter(self, frame=None,brightness=0,hue=0,saturation=0,contrast=0): + transform = A.ColorJitter(brightness=float(brightness),hue=float(hue),saturation=float(saturation),contrast=float(contrast),p=1) + img_result = transform(image=np.array(frame)) + img_color = img_result['image'] + return img_color + + # 2.2 灰度图转换 + def ToGray(self, frame=None ): + # cvtColor的第一个参数是处理的图像,第二个是RGB2GRAY + gray_img = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) + return gray_img + + # 2.3 直方图均衡化 + def equalhist(self, frame=None): + # 直方图均衡化增强对比度:通过调整图像的灰度分布,增加图像的对比度,从而使图像更加清晰。 + frame_lab = cv2.cvtColor(frame, cv2.COLOR_BGR2LAB) + frame_lab[:, :, 0] = cv2.equalizeHist(frame_lab[:, :, 0]) + enhanced_frame = cv2.cvtColor(frame_lab, cv2.COLOR_LAB2BGR) + return enhanced_frame + + """===============================3.频率像素点操作:==================================""" + # 3.1 模糊 + # 3.1.1 高斯滤波 + def Gaussblur(self, frame=None,count=3): + transform = A.GaussianBlur(blur_limit=count, p=1) + img_result = transform(image=np.array(frame))['image'] + return img_result + + # 3.1.3 随机模糊 + def Blur(self, frame=None, count=3): + transform = A.Blur(blur_limit=count, p=1) + img_result = transform(image=np.array(frame))['image'] + return img_result + + # 3.1.2 中值滤波 + def Medianblur(self, frame=None, count=3): + transform = A.MedianBlur(blur_limit=count, p=1) + img_result = transform(image=np.array(frame))['image'] + return img_result + + # 3.2 锐化 + # 3.2.1 使用sobel算子进行锐化 + def sobel(self, frame=None): + img_gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) + kernelx1 = np.array([[-1, -2, -1], [0, 0, 0], [1, 2, 1]], dtype=int) + kernely1 = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]], dtype=int) + x1 = cv2.filter2D(img_gray, cv2.CV_16S, kernelx1) + y1 = cv2.filter2D(img_gray, cv2.CV_16S, kernely1) + absX1 = cv2.convertScaleAbs(x1) + absY1 = cv2.convertScaleAbs(y1) + img_result = cv2.addWeighted(absX1, 0.5, absY1, 0.5, 0) + return img_result + + # 3.2.2 使用Prewitt算子进行锐化 + def Prewitt(self, frame=None): + img_gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) + kernelx1 = np.array([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]], dtype=int) + kernely1 = np.array([[-1, -1, 1], [0, 0, 0], [1, 1, 1]], dtype=int) + x1 = cv2.filter2D(img_gray, cv2.CV_16S, kernelx1) + y1 = cv2.filter2D(img_gray, cv2.CV_16S, kernely1) + absX1 = cv2.convertScaleAbs(x1) + absY1 = cv2.convertScaleAbs(y1) + img_result = cv2.addWeighted(absX1, 0.5, absY1, 0.5, 0) + return img_result + + # 3.2.3 使用robert算子进行锐化 + def robert(self, frame=None): + img_gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) + kernelx1 = np.array([[-1, 0], [0, 1]], dtype=int) + kernely1 = np.array([[0, -1], [1, 0]], dtype=int) + x1 = cv2.filter2D(img_gray, cv2.CV_16S, kernelx1) + y1 = cv2.filter2D(img_gray, cv2.CV_16S, kernely1) + absX1 = cv2.convertScaleAbs(x1) + absY1 = cv2.convertScaleAbs(y1) + img_result = cv2.addWeighted(absX1, 0.5, absY1, 0.5, 0) + return img_result + + # 3.3 添加噪声 + # 3.3.1 添加高斯噪声 + def add_gaussian_noise(self, frame=None, mean=0,sigma=30): + # 生成高斯噪声矩阵 + row, col, ch = frame.shape + gaussian = np.random.randn(row, col, ch) * sigma + mean + gaussian = gaussian.reshape(row, col, ch) + img_result = frame + gaussian + # 转换数据类型为8位无符号整数类型 + img_result = cv2.convertScaleAbs(img_result) + return img_result + # 3.3.2 添加椒盐噪声 + def add_salt_and_pepper_noise(self, frame=None, percentage=0): # percentage [0,100] + # 确保百分比在 0 到 100 之间 + if percentage < 0 or percentage > 100: + percentage=0 + + # 生成椒盐噪声矩阵 + row, col, ch = frame.shape + noise = np.zeros((row, col, ch), np.uint8) + for i in range(row): + for j in range(col): + rand = np.random.randint(0, 100) + if rand < percentage: + noise[i][j] = [0, 0, 0] + elif rand > 100 - percentage: + noise[i][j] = [255, 255, 255] + else: + noise[i][j] = frame[i][j] + + # 将椒盐噪声矩阵添加到原始图像中 + img_result = cv2.add(frame, noise) + return img_result + # 3.3.3 添加均值噪声 + def add_mean_noise(self, frame=None, mean=0,std_dev=30): + # 生成均值噪声矩阵 + row, col, ch = frame.shape + noise = np.random.normal(mean, std_dev, (row, col, ch)).astype(np.uint8) + # 将均值噪声矩阵添加到原始图像中 + img_result = cv2.add(frame, noise) + return img_result + + # 3.4 边缘检测 + def Canny(self, frame=None): + # 转为灰度图 + gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + # 高斯滤波 + gray = cv2.GaussianBlur(gray, (5, 5), 0) + # 边缘检测 + canny_edged = cv2.Canny(gray, 30, 250) + return canny_edged + + +if __name__ == "__main__": + # 类实例化 + img_enhance = Image_enhancement() + + img_path = r'images/car_test.jpg' + img = cv2.imread(img_path) + + # 测试1.1 图片旋转(可选择旋转角度) + img_roate = img_enhance.roate(img,45) + cv2.imwrite("out_imgs/out_1.jpg",img_roate) + # 测试1.2 图片缩放(可选择缩放大小) + img_resized = img_enhance.resize(img, w=224, h=224) + cv2.imwrite("out_imgs/out_2.jpg", img_resized) + + # 测试2.1 图片的对比度、饱和度、亮度、色调变化 + img_resized = img_enhance.ColorJitter(img, brightness=10,hue=10,saturation=10,contrast=10) + cv2.imwrite("out_imgs/out_3.jpg", img_resized) + # 测试2.2 灰度图转化 + gray_img = img_enhance.ToGray(img) + cv2.imwrite("out_imgs/out_4.jpg", gray_img) + # 测试2.3 直方图均衡化 + enhanced_frame = img_enhance.equalhist(img) + cv2.imwrite("out_imgs/out_5.jpg", enhanced_frame) + + # 3.1 模糊 + # 3.1.1 高斯滤波 + img_gauss = img_enhance.Gaussblur(img,count=5) + cv2.imwrite("out_imgs/out_6.jpg", img_gauss) + + # 3.1.2 随机模糊 + img_Blur = img_enhance.Blur(img, count=5) + cv2.imwrite("out_imgs/out_7.jpg", img_Blur) + + # 3.1.3 中值滤波 + img_Medianblur = img_enhance.Medianblur(img, count=5) + cv2.imwrite("out_imgs/out_8.jpg", img_Medianblur) + + # 3.2 锐化 + # 3.2.1 使用sobel算子进行锐化 + img_sobel = img_enhance.sobel(img) + cv2.imwrite("out_imgs/out_9.jpg", img_sobel) + # 3.2.2 使用Prewitt算子进行锐化 + img_Prewitt = img_enhance.Prewitt(img) + cv2.imwrite("out_imgs/out_10.jpg", img_Prewitt) + # 3.2.3 使用robert算子进行锐化 + img_robert = img_enhance.robert(img) + cv2.imwrite("out_imgs/out_11.jpg", img_robert) + + # 3.3 添加噪声 + # 3.3.1 添加高斯噪声 + img_gaussian_noise = img_enhance.add_gaussian_noise(img,mean=0,sigma=30) + cv2.imwrite("out_imgs/out_12.jpg", img_gaussian_noise) + # 3.3.2 添加椒盐噪声 + img_salt_and_pepper_noise = img_enhance.add_salt_and_pepper_noise(img, percentage=10) + cv2.imwrite("out_imgs/out_13.jpg", img_salt_and_pepper_noise) + # 3.3.3 添加均值噪声 + img_mean_noise = img_enhance.add_mean_noise(img, mean=0, std_dev=30) + cv2.imwrite("out_imgs/out_14.jpg", img_mean_noise) + + # 3.4 边缘检测 + img_Canny = img_enhance.Canny(img) + cv2.imwrite("out_imgs/out_15.jpg", img_Canny) + + + diff --git a/models/best.pt b/models/best.pt new file mode 100644 index 0000000..bbef2e2 Binary files /dev/null and b/models/best.pt differ diff --git a/models/best_others.pt b/models/best_others.pt new file mode 100644 index 0000000..a5829f0 Binary files /dev/null and b/models/best_others.pt differ diff --git a/out_imgs/out_1.jpg b/out_imgs/out_1.jpg new file mode 100644 index 0000000..8c6f2f5 Binary files /dev/null and b/out_imgs/out_1.jpg differ diff --git a/out_imgs/out_10.jpg b/out_imgs/out_10.jpg new file mode 100644 index 0000000..57f6a9a Binary files /dev/null and b/out_imgs/out_10.jpg differ diff --git a/out_imgs/out_11.jpg b/out_imgs/out_11.jpg new file mode 100644 index 0000000..69ce8c1 Binary files /dev/null and b/out_imgs/out_11.jpg differ diff --git a/out_imgs/out_12.jpg b/out_imgs/out_12.jpg new file mode 100644 index 0000000..a57fc89 Binary files /dev/null and b/out_imgs/out_12.jpg differ diff --git a/out_imgs/out_13.jpg b/out_imgs/out_13.jpg new file mode 100644 index 0000000..de28d46 Binary files /dev/null and b/out_imgs/out_13.jpg differ diff --git a/out_imgs/out_14.jpg b/out_imgs/out_14.jpg new file mode 100644 index 0000000..2b0d564 Binary files /dev/null and b/out_imgs/out_14.jpg differ diff --git a/out_imgs/out_15.jpg b/out_imgs/out_15.jpg new file mode 100644 index 0000000..8af49eb Binary files /dev/null and b/out_imgs/out_15.jpg differ diff --git a/out_imgs/out_2.jpg b/out_imgs/out_2.jpg new file mode 100644 index 0000000..8b141cc Binary files /dev/null and b/out_imgs/out_2.jpg differ diff --git a/out_imgs/out_3.jpg b/out_imgs/out_3.jpg new file mode 100644 index 0000000..6beb022 Binary files /dev/null and b/out_imgs/out_3.jpg differ diff --git a/out_imgs/out_4.jpg b/out_imgs/out_4.jpg new file mode 100644 index 0000000..dd6700a Binary files /dev/null and b/out_imgs/out_4.jpg differ diff --git a/out_imgs/out_5.jpg b/out_imgs/out_5.jpg new file mode 100644 index 0000000..f39e774 Binary files /dev/null and b/out_imgs/out_5.jpg differ diff --git a/out_imgs/out_6.jpg b/out_imgs/out_6.jpg new file mode 100644 index 0000000..4538ccc Binary files /dev/null and b/out_imgs/out_6.jpg differ diff --git a/out_imgs/out_7.jpg b/out_imgs/out_7.jpg new file mode 100644 index 0000000..ee077bc Binary files /dev/null and b/out_imgs/out_7.jpg differ diff --git a/out_imgs/out_8.jpg b/out_imgs/out_8.jpg new file mode 100644 index 0000000..1d19390 Binary files /dev/null and b/out_imgs/out_8.jpg differ diff --git a/out_imgs/out_9.jpg b/out_imgs/out_9.jpg new file mode 100644 index 0000000..519a516 Binary files /dev/null and b/out_imgs/out_9.jpg differ diff --git a/paddleModels/whl/cls/ch_ppocr_mobile_v2.0_cls_infer/inference.pdiparams b/paddleModels/whl/cls/ch_ppocr_mobile_v2.0_cls_infer/inference.pdiparams new file mode 100644 index 0000000..3449efb Binary files /dev/null and b/paddleModels/whl/cls/ch_ppocr_mobile_v2.0_cls_infer/inference.pdiparams differ diff --git a/paddleModels/whl/cls/ch_ppocr_mobile_v2.0_cls_infer/inference.pdiparams.info b/paddleModels/whl/cls/ch_ppocr_mobile_v2.0_cls_infer/inference.pdiparams.info new file mode 100644 index 0000000..f31a157 Binary files /dev/null and b/paddleModels/whl/cls/ch_ppocr_mobile_v2.0_cls_infer/inference.pdiparams.info differ diff --git a/paddleModels/whl/cls/ch_ppocr_mobile_v2.0_cls_infer/inference.pdmodel b/paddleModels/whl/cls/ch_ppocr_mobile_v2.0_cls_infer/inference.pdmodel new file mode 100644 index 0000000..b90c155 Binary files /dev/null and b/paddleModels/whl/cls/ch_ppocr_mobile_v2.0_cls_infer/inference.pdmodel differ diff --git a/paddleModels/whl/det/ch/ch_PP-OCRv4_det_infer/inference.pdiparams b/paddleModels/whl/det/ch/ch_PP-OCRv4_det_infer/inference.pdiparams new file mode 100644 index 0000000..089594a Binary files /dev/null and b/paddleModels/whl/det/ch/ch_PP-OCRv4_det_infer/inference.pdiparams differ diff --git a/paddleModels/whl/det/ch/ch_PP-OCRv4_det_infer/inference.pdiparams.info b/paddleModels/whl/det/ch/ch_PP-OCRv4_det_infer/inference.pdiparams.info new file mode 100644 index 0000000..082c148 Binary files /dev/null and b/paddleModels/whl/det/ch/ch_PP-OCRv4_det_infer/inference.pdiparams.info differ diff --git a/paddleModels/whl/det/ch/ch_PP-OCRv4_det_infer/inference.pdmodel b/paddleModels/whl/det/ch/ch_PP-OCRv4_det_infer/inference.pdmodel new file mode 100644 index 0000000..223b861 Binary files /dev/null and b/paddleModels/whl/det/ch/ch_PP-OCRv4_det_infer/inference.pdmodel differ diff --git a/paddleModels/whl/rec/ch/ch_PP-OCRv4_rec_infer/inference.pdiparams b/paddleModels/whl/rec/ch/ch_PP-OCRv4_rec_infer/inference.pdiparams new file mode 100644 index 0000000..4c3d9e9 Binary files /dev/null and b/paddleModels/whl/rec/ch/ch_PP-OCRv4_rec_infer/inference.pdiparams differ diff --git a/paddleModels/whl/rec/ch/ch_PP-OCRv4_rec_infer/inference.pdiparams.info b/paddleModels/whl/rec/ch/ch_PP-OCRv4_rec_infer/inference.pdiparams.info new file mode 100644 index 0000000..923329f Binary files /dev/null and b/paddleModels/whl/rec/ch/ch_PP-OCRv4_rec_infer/inference.pdiparams.info differ diff --git a/paddleModels/whl/rec/ch/ch_PP-OCRv4_rec_infer/inference.pdmodel b/paddleModels/whl/rec/ch/ch_PP-OCRv4_rec_infer/inference.pdmodel new file mode 100644 index 0000000..dccddcc Binary files /dev/null and b/paddleModels/whl/rec/ch/ch_PP-OCRv4_rec_infer/inference.pdmodel differ diff --git a/tab1_1.py b/tab1_1.py new file mode 100644 index 0000000..2682aac --- /dev/null +++ b/tab1_1.py @@ -0,0 +1,49 @@ +import gradio as gr +from img_enhancement import Image_enhancement + +"""1.1 对图像进行几何处理:图片旋转、图片缩放""" + +# 类实例化 +img_enhance = Image_enhancement() + +def img_handle_1(): + def pridict_1(query_image=None,angle=30): + img_roate = img_enhance.roate(query_image, angle) + return img_roate + + def pridict_2(query_image=None,w=224,h=224): + img_resized = img_enhance.resize(query_image, int(w), int(h)) + return img_resized + + title = "

图像处理操作1:几何处理

" + description = "1.对图像进行几何处理:图片旋转、图片缩放" # "频率像素点操作:模糊、锐化、添加噪声、边缘检测等操作" + + with gr.Blocks() as demo: + gr.Markdown(title) + gr.Markdown(description) + with gr.Row(): + with gr.Column(scale=1): + img = gr.components.Image(label="图片") + angle_num = gr.components.Slider(minimum=0, maximum=360, step=5, value=45, label="选择要旋转的角度") + btn_1 = gr.Button("图片旋转", ) + + w = gr.Number(label="图片缩放宽为:",value=224) + h = gr.Number(label="图片缩放高为:", value=224) + btn_2 = gr.Button("图片缩放", ) + + with gr.Column(scale=1): + out = gr.components.Image(label="处理后的图片为", height="auto") + + btn_1.click(fn=pridict_1, inputs=[img, angle_num], outputs=out) + + btn_2.click(fn=pridict_2, inputs=[img, w,h], outputs=out) + + return demo + + +if __name__ == "__main__": + with gr.TabbedInterface( + [img_handle_1()], + ["图像处理1:几何处理"], + ) as demo: + demo.launch() \ No newline at end of file diff --git a/tab1_2.py b/tab1_2.py new file mode 100644 index 0000000..108aa52 --- /dev/null +++ b/tab1_2.py @@ -0,0 +1,55 @@ +import gradio as gr +from img_enhancement import Image_enhancement + +"""1.2 对图像进行颜色空间变化:图片的对比度调整、灰度图转换、直方图均衡化""" + +# 类实例化 +img_enhance = Image_enhancement() + +def img_handle_2(): + def pridict_1(query_image=None,brightness=10,hue=10,contrast=10,saturation=10): + img_Color = img_enhance.ColorJitter(query_image, brightness,hue,saturation,contrast) + return img_Color + + def pridict_2(query_image=None,method="灰度化"): + if method=="灰度化": + img_out = img_enhance.ToGray(query_image) + elif method=="直方图均衡化": + img_out = img_enhance.equalhist(query_image) + return img_out + + title = "

图像处理操作2:颜色空间变化

" + description = "2.对图像进行颜色空间变化:图片的对比度调整、灰度图转换、直方图均衡化" # "颜色空间变化:图片的对比度调整、灰度图转换、直方图均衡化;" "频率像素点操作:模糊、锐化、添加噪声、边缘检测等操作" + + with gr.Blocks() as demo: + gr.Markdown(title) + gr.Markdown(description) + with gr.Row(): + with gr.Column(scale=1): + img = gr.components.Image(label="图片") + brightness = gr.components.Slider(minimum=0, maximum=100, step=5, value=10, label="选择亮度") + hue = gr.components.Slider(minimum=0, maximum=100, step=5, value=10, label="选择色调") + contrast = gr.components.Slider(minimum=0, maximum=100, step=5, value=10, label="选择对比度") + saturation = gr.components.Slider(minimum=0, maximum=100, step=5, value=10, label="选择饱和度") + btn_1 = gr.Button("对比度调整", ) + + method = gr.components.Radio(label="算法选择", choices=["灰度化", "直方图均衡化"], + value="灰度化",) + btn_2 = gr.Button("点击转化", ) + + with gr.Column(scale=1): + out = gr.components.Image(label="处理后的图片为", height="auto") + + btn_1.click(fn=pridict_1, inputs=[img, brightness,hue,contrast,saturation], outputs=out) + + btn_2.click(fn=pridict_2, inputs=[img, method], outputs=out) + + return demo + + +if __name__ == "__main__": + with gr.TabbedInterface( + [img_handle_2()], + ["图像处理2:颜色空间变化"], + ) as demo: + demo.launch() \ No newline at end of file diff --git a/tab1_3.py b/tab1_3.py new file mode 100644 index 0000000..6cac6d8 --- /dev/null +++ b/tab1_3.py @@ -0,0 +1,91 @@ +import gradio as gr +from img_enhancement import Image_enhancement + +"""1.3 对图像进行频率像素点操作:模糊、锐化、添加噪声、边缘检测等操作""" + +# 类实例化 +img_enhance = Image_enhancement() + +def img_handle_3(): + def pridict_1(query_image=None,method_1="高斯滤波",count=3): + if method_1 == "高斯滤波": + img_mohu = img_enhance.Gaussblur(query_image,count) + elif method_1 == "随机模糊": + img_mohu = img_enhance.Blur(query_image, count) + else: # 中值滤波 + img_mohu = img_enhance.Medianblur(query_image, count=5) + return img_mohu + + def pridict_2(query_image=None,method_2="robert"): + if method_2=="sobel": + img_out = img_enhance.sobel(query_image) + elif method_2=="Prewitt": + img_out = img_enhance.Prewitt(query_image) + else:#robert + img_out = img_enhance.robert(query_image) + return img_out + + def pridict_3(query_image=None,method_3="高斯噪声",mean=0,sigma=30,percentage=10): + if method_3=="高斯噪声": + img_noise = img_enhance.add_gaussian_noise(query_image,mean,sigma) + elif method_3=="椒盐噪声": + img_noise = img_enhance.add_salt_and_pepper_noise(query_image, percentage) + else:# 均值噪声 + img_noise = img_enhance.add_mean_noise(query_image, mean, sigma) + return img_noise + + def pridict_4(query_image=None,method_4="yes"): + if method_4=="yes": + img_detect= img_enhance.Canny(query_image) + else: #no + img_detect = query_image + return img_detect + + title = "

图像处理操作3:频率像素点操作

" + description = "3.对图像进行频率像素点操作:模糊、锐化、添加噪声、边缘检测等操作" + + with gr.Blocks() as demo: + gr.Markdown(title) + gr.Markdown(description) + with gr.Row(): + with gr.Column(scale=1): + img = gr.components.Image(label="图片") + method_1 = gr.components.Radio(label="模糊算法选择", choices=["高斯滤波", "随机模糊","中值滤波"], + value="高斯滤波", ) + count = gr.components.Slider(minimum=0, maximum=8, step=1, value=3, label="模糊次数") + + btn_1 = gr.Button("模糊处理", ) + + method_2 = gr.components.Radio(label="算子选择", choices=["sobel", "Prewitt","robert"], + value="robert",) + btn_2 = gr.Button("锐化处理", ) + + method_3 = gr.components.Radio(label="添加噪声类型选择", choices=["高斯噪声", "椒盐噪声", "均值噪声"], + value="高斯噪声", ) + mean = gr.components.Slider(minimum=0, maximum=100, step=2, value=0, label="均值") + sigma = gr.components.Slider(minimum=0, maximum=100, step=2, value=30, label="标准差") + percentage = gr.components.Slider(minimum=0, maximum=100, step=5, value=30, label="百分比") + btn_3 = gr.Button("添加噪声", ) + + method_4 = gr.components.Radio(label="是否边缘检测", choices=["yes","no"], + value="yes", ) + btn_4 = gr.Button("边缘检测", ) + + + with gr.Column(scale=1): + out = gr.components.Image(label="处理后的图片为", height="auto") + + btn_1.click(fn=pridict_1, inputs=[img, method_1,count], outputs=out) + btn_2.click(fn=pridict_2, inputs=[img, method_2], outputs=out) + btn_3.click(fn=pridict_3, inputs=[img, method_3,mean,sigma,percentage], outputs=out) + btn_4.click(fn=pridict_4, inputs=[img,method_4], outputs=out) + + return demo + + +if __name__ == "__main__": + with gr.TabbedInterface( + [img_handle_3()], + ["图像处理3:频率像素点操作"], + ) as demo: + demo.launch() \ No newline at end of file diff --git a/tab2.py b/tab2.py new file mode 100644 index 0000000..d945540 --- /dev/null +++ b/tab2.py @@ -0,0 +1,40 @@ +import gradio as gr +import cv2 +from carPlate_recognize import car_plate_recognize + +def Car_segmentation(): + def pridict(query_image=None): + img_cvt = cv2.cvtColor(query_image, cv2.COLOR_BGR2RGB) + plate, word_all = car_plate_recognize(img_cvt) + return plate,word_all + + title = "

基于Opencv图像处理的车牌定位和分割

" + description = "对输入的车牌进行车牌的定位与分割操作" + examples = [['images/car.jpg'],['images/car.png'],['images/car_test.jpg']] + + with gr.Blocks() as demo: + gr.Markdown(title) + gr.Markdown(description) + with gr.Row(): + with gr.Column(scale=1): + #with gr.Column(scale=2): + img = gr.components.Image(label="图片") + btn = gr.Button("点击定位与分割", ) + with gr.Column(scale=1): + out_1 = gr.components.Image(label="车牌定位:",height="auto") + out_2 = gr.Gallery(label="车牌分割:",columns=[4], height="auto",object_fit="contain") + + inputs = [img] + outputs = [out_1,out_2] + btn.click(fn=pridict, inputs=inputs, outputs=outputs) + gr.Examples(examples, inputs=inputs) + + return demo + + +if __name__ == "__main__": + with gr.TabbedInterface( + [Car_segmentation()], + ["Opencv车牌定位与分割"], + ) as demo: + demo.launch(show_api=False,inbrowser=False,)#auth=("admin", '1234') \ No newline at end of file diff --git a/tab3.py b/tab3.py new file mode 100644 index 0000000..76ea30e --- /dev/null +++ b/tab3.py @@ -0,0 +1,84 @@ +import gradio as gr +import cv2 +import time +from ultralytics import YOLO +from paddleocr import PaddleOCR +import numpy as np +import detect_tools as tools +from imgTest import get_license_result +import os + +os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE" + +# 加载YOLOv8检测模型 +model_path = 'models/best.pt' +yolo_model = YOLO(model_path, task='detect') + +# 加载车牌识别模型 +cls_model_dir = 'paddleModels/whl/cls/ch_ppocr_mobile_v2.0_cls_infer' +rec_model_dir = 'paddleModels/whl/rec/ch/ch_PP-OCRv4_rec_infer' +ocr = PaddleOCR(use_angle_cls=False, lang="ch", det=False, cls_model_dir=cls_model_dir, rec_model_dir=rec_model_dir) + + +def Car_detection(): + def predict_image(query_image): + start_time = time.time() + + # 图像预处理 + img = cv2.cvtColor(query_image, cv2.COLOR_BGR2RGB) + print(f"Image preprocessing time: {time.time() - start_time:.2f}s") + + # 使用YOLOv8检测车辆和车牌位置 + yolo_start = time.time() + results = yolo_model(img)[0] + yolo_output = img.copy() # 复制原图像用于显示YOLO结果 + location_list = results.boxes.xyxy.tolist() + print(f"YOLO detection time: {time.time() - yolo_start:.2f}s") + + # 处理每个检测到的车牌区域 + license_numbers = [] + for location in location_list: + x1, y1, x2, y2 = list(map(int, location)) + crop_img = img[y1:y2, x1:x2] + + # 使用PaddleOCR识别车牌号 + license_num, confidence = get_license_result(ocr, crop_img) + if license_num: + license_numbers.append(license_num) + else: + license_numbers.append("无法识别") + + # 在YOLO结果图上绘制检测框 + cv2.rectangle(yolo_output, (x1, y1), (x2, y2), (0, 255, 0), 2) + + return yolo_output, "\n".join(license_numbers) + + title = "

基于Opencv图像处理的车牌检测与识别

" + description = "上传一张包含车辆的图像,系统将检测车辆并识别车牌号码" + # examples = [['images/car.jpg'], ['images/car.png'], ['images/car_test.jpg']] + + with gr.Blocks() as demo: + gr.Markdown(title) + gr.Markdown(description) + with gr.Row(): + with gr.Column(scale=1): + img = gr.components.Image(label="上传图片") + btn = gr.Button("点击检测与识别") + with gr.Column(scale=1): + out_1 = gr.components.Image(label="YOLO定位结果:", height="auto") + out_2 = gr.components.Textbox(label="车牌识别结果:", type="text", lines=6) + + inputs = [img] + outputs = [out_1, out_2] + btn.click(fn=predict_image, inputs=inputs, outputs=outputs) + # gr.Examples(examples, inputs=inputs) + + return demo + + +if __name__ == "__main__": + with gr.TabbedInterface( + [Car_detection()], + ["Opencv车牌检测与识别"], + ) as demo: + demo.launch(show_api=False, inbrowser=True)