();
+ newSortedRect.add(chineseRect);
+ RebuildRect(sortedRect, newSortedRect, specIndex, color);
+
+ if (newSortedRect.size() == 0) {
+ return -3;
+ }
+
+ for (int i = 0; i < newSortedRect.size(); i++) {
+ Rect mr = newSortedRect.get(i);
+ Mat auxRoi = new Mat(img_threshold, mr);
+
+ auxRoi = preprocessChar(auxRoi);
+ if (this.isDebug) {
+ String str = tempPath + "debug_char_auxRoi_" + Integer.valueOf(i).toString() + ".jpg";
+ opencv_imgcodecs.imwrite(str, auxRoi);
+ }
+ resultVec.add(auxRoi);
+ }
+ return 0;
+ }
+
+ /**
+ * 字符尺寸验证;去掉尺寸不符合的图块
+ * @param r
+ * @return
+ */
+ public static Boolean verifySizes(Mat r) {
+ float aspect = 45.0f / 90.0f;
+ float charAspect = (float) r.cols() / (float) r.rows();
+ float error = 0.7f;
+ float minHeight = 10f;
+ float maxHeight = 35f;
+ // We have a different aspect ratio for number 1, and it can be ~0.2
+ float minAspect = 0.05f;
+ float maxAspect = aspect + aspect * error;
+ // area of pixels
+ float area = countNonZero(r);
+ // bb area
+ float bbArea = r.cols() * r.rows();
+ // % of pixel in area
+ float percPixels = area / bbArea;
+
+ return percPixels <= 1 && charAspect > minAspect && charAspect < maxAspect && r.rows() >= minHeight && r.rows() < maxHeight;
+ }
+
+ /**
+ * 字符预处理: 统一每个字符的大小
+ *
+ * @param in
+ * @return
+ */
+ private Mat preprocessChar(Mat in) {
+ int h = in.rows();
+ int w = in.cols();
+ int charSize = CHAR_SIZE;
+ Mat transformMat = Mat.eye(2, 3, CV_32F).asMat();
+ int m = Math.max(w, h);
+ transformMat.ptr(0, 2).put(Convert.getBytes(((m - w) / 2f)));
+ transformMat.ptr(1, 2).put(Convert.getBytes((m - h) / 2f));
+
+ Mat warpImage = new Mat(m, m, in.type());
+ warpAffine(in, warpImage, transformMat, warpImage.size(), INTER_LINEAR, opencv_core.BORDER_CONSTANT, new Scalar(0));
+
+ Mat out = new Mat();
+ resize(warpImage, out, new Size(charSize, charSize));
+
+ return out;
+ }
+
+ /**
+ * 去除车牌上方的钮钉
+ *
+ * 计算每行元素的阶跃数,如果小于X认为是柳丁,将此行全部填0(涂黑), X可根据实际调整
+ *
+ * @param img
+ * @return
+ */
+ private Mat clearLiuDing(Mat img) {
+ final int x = this.liuDingSize;
+
+ Mat jump = Mat.zeros(1, img.rows(), CV_32F).asMat();
+ for (int i = 0; i < img.rows(); i++) {
+ int jumpCount = 0;
+ for (int j = 0; j < img.cols() - 1; j++) {
+ if (img.ptr(i, j).get() != img.ptr(i, j + 1).get())
+ jumpCount++;
+ }
+ jump.ptr(i).put(Convert.getBytes((float) jumpCount));
+ }
+ for (int i = 0; i < img.rows(); i++) {
+ if (Convert.toFloat(jump.ptr(i)) <= x) {
+ for (int j = 0; j < img.cols(); j++) {
+ img.ptr(i, j).put((byte) 0);
+ }
+ }
+ }
+ return img;
+ }
+
+ /**
+ * 根据特殊车牌来构造猜测中文字符的位置和大小
+ *
+ * @param rectSpe
+ * @return
+ */
+ private Rect GetChineseRect(final Rect rectSpe) {
+ int height = rectSpe.height();
+ float newwidth = rectSpe.width() * 1.15f;
+ int x = rectSpe.x();
+ int y = rectSpe.y();
+
+ int newx = x - (int) (newwidth * 1.15);
+ newx = Math.max(newx, 0);
+ Rect a = new Rect(newx, y, (int) newwidth, height);
+ return a;
+ }
+
+ /**
+ * 找出指示城市的字符的Rect,例如苏A7003X,就是A的位置
+ * 之所以选择城市的字符位置,是因为该位置不管什么字母,占用的宽度跟高度的差不多,能大大提高位置的准确性
+ * @param vecRect
+ * @return
+ */
+ private int GetSpecificRect(final Vector vecRect, PlateColor color) {
+ Vector xpositions = new Vector();
+ int maxHeight = 0;
+ int maxWidth = 0;
+ for (int i = 0; i < vecRect.size(); i++) {
+ xpositions.add(vecRect.get(i).x());
+
+ if (vecRect.get(i).height() > maxHeight) {
+ maxHeight = vecRect.get(i).height();
+ }
+ if (vecRect.get(i).width() > maxWidth) {
+ maxWidth = vecRect.get(i).width();
+ }
+ }
+
+ int specIndex = 0;
+ for (int i = 0; i < vecRect.size(); i++) {
+ Rect mr = vecRect.get(i);
+ int midx = mr.x() + mr.width() / 2;
+
+ if(PlateColor.GREEN.equals(color)) {
+ if ((mr.width() > maxWidth * 0.8 || mr.height() > maxHeight * 0.8)
+ && (midx < this.theMatWidth * 2 / 8 && midx > this.theMatWidth / 8)) {
+ specIndex = i;
+ }
+ } else {
+ // 如果一个字符有一定的大小,并且在整个车牌的1/7到2/7之间,则是我们要找的特殊车牌
+ if ((mr.width() > maxWidth * 0.8 || mr.height() > maxHeight * 0.8)
+ && (midx < this.theMatWidth * 2 / 7 && midx > this.theMatWidth / 7)) {
+ specIndex = i;
+ }
+ }
+ }
+
+ return specIndex;
+ }
+
+ /**
+ * 这个函数做两个事情
+ *
+ * - 把特殊字符Rect左边的全部Rect去掉,后面再重建中文字符的位置;
+ *
- 从特殊字符Rect开始,依次选择6个Rect,多余的舍去。
+ *
+ *
+ * @param vecRect
+ * @param outRect
+ * @param specIndex
+ * @return
+ */
+ private int RebuildRect(final Vector vecRect, Vector outRect, int specIndex, PlateColor color) {
+ // 最大只能有7个Rect,减去中文的就只有6个Rect
+ int count = 6;
+ if(PlateColor.GREEN.equals(color)) {
+ count = 7; // 绿牌要多一个
+ }
+ for (int i = 0; i < vecRect.size(); i++) {
+ // 将特殊字符左边的Rect去掉,这个可能会去掉中文Rect,不过没关系,我们后面会重建。
+ if (i < specIndex)
+ continue;
+
+ outRect.add(vecRect.get(i));
+ if (--count == 0)
+ break;
+ }
+
+ return 0;
+ }
+
+ /**
+ * 将Rect按位置从左到右进行排序
+ *
+ * @param vecRect
+ * @param out
+ * @return
+ */
+ public static void SortRect(final Vector vecRect, Vector out) {
+ Vector orderIndex = new Vector();
+ Vector xpositions = new Vector();
+ for (int i = 0; i < vecRect.size(); ++i) {
+ orderIndex.add(i);
+ xpositions.add(vecRect.get(i).x());
+ }
+
+ float min = xpositions.get(0);
+ int minIdx;
+ for (int i = 0; i < xpositions.size(); ++i) {
+ min = xpositions.get(i);
+ minIdx = i;
+ for (int j = i; j < xpositions.size(); ++j) {
+ if (xpositions.get(j) < min) {
+ min = xpositions.get(j);
+ minIdx = j;
+ }
+ }
+ int aux_i = orderIndex.get(i);
+ int aux_min = orderIndex.get(minIdx);
+ orderIndex.remove(i);
+ orderIndex.insertElementAt(aux_min, i);
+ orderIndex.remove(minIdx);
+ orderIndex.insertElementAt(aux_i, minIdx);
+
+ float aux_xi = xpositions.get(i);
+ float aux_xmin = xpositions.get(minIdx);
+ xpositions.remove(i);
+ xpositions.insertElementAt((int) aux_xmin, i);
+ xpositions.remove(minIdx);
+ xpositions.insertElementAt((int) aux_xi, minIdx);
+ }
+
+ for (int i = 0; i < orderIndex.size(); i++)
+ out.add(vecRect.get(orderIndex.get(i)));
+
+ return;
+ }
+
+ public void setLiuDingSize(int param) {
+ this.liuDingSize = param;
+ }
+
+ public void setColorThreshold(int param) {
+ this.colorThreshold = param;
+ }
+
+ public void setBluePercent(float param) {
+ this.bluePercent = param;
+ }
+
+ public final float getBluePercent() {
+ return this.bluePercent;
+ }
+
+ public void setWhitePercent(float param) {
+ this.whitePercent = param;
+ }
+
+ public final float getWhitePercent() {
+ return this.whitePercent;
+ }
+
+ public boolean getDebug() {
+ return this.isDebug;
+ }
+
+ public void setDebug(boolean isDebug) {
+ this.isDebug = isDebug;
+ }
+
+
+}
diff --git a/原yx-image-recognition/src/main/java/com/yuxue/easypr/core/CoreFunc.java b/原yx-image-recognition/src/main/java/com/yuxue/easypr/core/CoreFunc.java
new file mode 100644
index 00000000..91e8a4ce
--- /dev/null
+++ b/原yx-image-recognition/src/main/java/com/yuxue/easypr/core/CoreFunc.java
@@ -0,0 +1,269 @@
+package com.yuxue.easypr.core;
+
+import org.bytedeco.javacpp.BytePointer;
+import org.bytedeco.javacpp.opencv_core;
+import org.bytedeco.javacpp.opencv_core.Mat;
+import org.bytedeco.javacpp.opencv_core.MatVector;
+import org.bytedeco.javacpp.opencv_core.Size;
+import org.bytedeco.javacpp.opencv_highgui;
+import org.bytedeco.javacpp.opencv_imgproc;
+import org.bytedeco.javacpp.indexer.FloatIndexer;
+
+import com.yuxue.enumtype.Direction;
+import com.yuxue.enumtype.PlateColor;
+
+
+/**
+ *
+ * @author yuxue
+ * @date 2020-05-16 21:09
+ */
+public class CoreFunc {
+
+ /**
+ * 根据一幅图像与颜色模板获取对应的二值图
+ *
+ * @param src
+ * 输入RGB图像
+ * @param r
+ * 颜色模板(蓝色、黄色)
+ * @param adaptive_minsv
+ * S和V的最小值由adaptive_minsv这个bool值判断
+ *
+ * - 如果为true,则最小值取决于H值,按比例衰减
+ *
- 如果为false,则不再自适应,使用固定的最小值minabs_sv
+ *
+ * @return 输出灰度图(只有0和255两个值,255代表匹配,0代表不匹配)
+ */
+ public static Mat colorMatch(final Mat src, final PlateColor r, final boolean adaptive_minsv) {
+ final float max_sv = 255;
+ final float minref_sv = 64;
+ final float minabs_sv = 95;
+
+ // 转到HSV空间进行处理,颜色搜索主要使用的是H分量进行蓝色与黄色的匹配工作
+ Mat src_hsv = new Mat();
+ opencv_imgproc.cvtColor(src, src_hsv, opencv_imgproc.CV_BGR2HSV);
+ MatVector hsvSplit = new MatVector();
+ opencv_core.split(src_hsv, hsvSplit);
+ opencv_imgproc.equalizeHist(hsvSplit.get(2), hsvSplit.get(2));
+ opencv_core.merge(hsvSplit, src_hsv);
+
+ // 匹配模板基色,切换以查找想要的基色
+ int min_h = r.minH;
+ int max_h = r.maxH;
+
+ float diff_h = (float) ((max_h - min_h) / 2);
+ int avg_h = (int) (min_h + diff_h);
+
+ int channels = src_hsv.channels();
+ int nRows = src_hsv.rows();
+ // 图像数据列需要考虑通道数的影响;
+ int nCols = src_hsv.cols() * channels;
+
+ // 连续存储的数据,按一行处理
+ if (src_hsv.isContinuous()) {
+ nCols *= nRows;
+ nRows = 1;
+ }
+
+ for (int i = 0; i < nRows; ++i) {
+ BytePointer p = src_hsv.ptr(i);
+ for (int j = 0; j < nCols; j += 3) {
+ int H = p.get(j) & 0xFF;
+ int S = p.get(j + 1) & 0xFF;
+ int V = p.get(j + 2) & 0xFF;
+
+ boolean colorMatched = false;
+
+ if (H > min_h && H < max_h) {
+ int Hdiff = 0;
+ if (H > avg_h)
+ Hdiff = H - avg_h;
+ else
+ Hdiff = avg_h - H;
+
+ float Hdiff_p = Hdiff / diff_h;
+
+ float min_sv = 0;
+ if (true == adaptive_minsv)
+ min_sv = minref_sv - minref_sv / 2 * (1 - Hdiff_p);
+ else
+ min_sv = minabs_sv;
+
+ if ((S > min_sv && S <= max_sv) && (V > min_sv && V <= max_sv))
+ colorMatched = true;
+ }
+
+ if (colorMatched == true) {
+ p.put(j, (byte) 0);
+ p.put(j + 1, (byte) 0);
+ p.put(j + 2, (byte) 255);
+ } else {
+ p.put(j, (byte) 0);
+ p.put(j + 1, (byte) 0);
+ p.put(j + 2, (byte) 0);
+ }
+ }
+ }
+
+ // 获取颜色匹配后的二值灰度图
+ MatVector hsvSplit_done = new MatVector();
+ opencv_core.split(src_hsv, hsvSplit_done);
+ Mat src_grey = hsvSplit_done.get(2);
+
+ return src_grey;
+ }
+
+ /**
+ * 判断一个车牌的颜色
+ *
+ * @param src
+ * 车牌mat
+ * @param r
+ * 颜色模板
+ * @param adaptive_minsv
+ * S和V的最小值由adaptive_minsv这个bool值判断
+ *
+ * - 如果为true,则最小值取决于H值,按比例衰减
+ *
- 如果为false,则不再自适应,使用固定的最小值minabs_sv
+ *
+ * @return
+ */
+ public static boolean plateColorJudge(final Mat src, final PlateColor color, final boolean adaptive_minsv) {
+ // 判断阈值
+ final float thresh = 0.49f;
+
+ Mat gray = colorMatch(src, color, adaptive_minsv);
+
+ float percent = (float) opencv_core.countNonZero(gray) / (gray.rows() * gray.cols());
+
+ return (percent > thresh) ? true : false;
+ }
+
+ /**
+ * getPlateType 判断车牌的类型
+ *
+ * @param src
+ * @param adaptive_minsv
+ * S和V的最小值由adaptive_minsv这个bool值判断
+ *
+ * - 如果为true,则最小值取决于H值,按比例衰减
+ *
- 如果为false,则不再自适应,使用固定的最小值minabs_sv
+ *
+ * @return
+ */
+ public static PlateColor getPlateType(final Mat src, final boolean adaptive_minsv) {
+ if (plateColorJudge(src, PlateColor.BLUE, adaptive_minsv) == true) {
+ return PlateColor.BLUE;
+ } else if (plateColorJudge(src, PlateColor.YELLOW, adaptive_minsv) == true) {
+ return PlateColor.YELLOW;
+ } else if (plateColorJudge(src, PlateColor.GREEN, adaptive_minsv) == true) {
+ return PlateColor.GREEN;
+ } else {
+ return PlateColor.UNKNOWN;
+ }
+ }
+
+ /**
+ * 获取垂直或水平方向直方图
+ *
+ * @param img
+ * @param direction
+ * @return
+ */
+ public static float[] projectedHistogram(final Mat img, Direction direction) {
+ int sz = 0;
+ switch (direction) {
+ case HORIZONTAL:
+ sz = img.rows();
+ break;
+
+ case VERTICAL:
+ sz = img.cols();
+ break;
+
+ default:
+ break;
+ }
+
+ // 统计这一行或一列中,非零元素的个数,并保存到nonZeroMat中
+ float[] nonZeroMat = new float[sz];
+ opencv_core.extractChannel(img, img, 0);
+ for (int j = 0; j < sz; j++) {
+ Mat data = (direction == Direction.HORIZONTAL) ? img.row(j) : img.col(j);
+ int count = opencv_core.countNonZero(data);
+ nonZeroMat[j] = count;
+ }
+
+ // Normalize histogram
+ float max = 0;
+ for (int j = 0; j < nonZeroMat.length; ++j) {
+ max = Math.max(max, nonZeroMat[j]);
+ }
+
+ if (max > 0) {
+ for (int j = 0; j < nonZeroMat.length; ++j) {
+ nonZeroMat[j] /= max;
+ }
+ }
+
+ return nonZeroMat;
+ }
+
+ /**
+ * Assign values to feature
+ *
+ * 样本特征为水平、垂直直方图和低分辨率图像所组成的矢量
+ *
+ * @param in
+ * @param sizeData
+ * 低分辨率图像size = sizeData*sizeData, 可以为0
+ * @return
+ */
+ public static Mat features(final Mat in, final int sizeData) {
+
+ float[] vhist = projectedHistogram(in, Direction.VERTICAL);
+ float[] hhist = projectedHistogram(in, Direction.HORIZONTAL);
+
+ Mat lowData = new Mat();
+ if (sizeData > 0) {
+ // resize.cpp:3784: error: (-215:Assertion failed) !ssize.empty() in function 'cv::resize'
+ opencv_imgproc.resize(in, lowData, new Size(sizeData, sizeData));
+ }
+
+ int numCols = vhist.length + hhist.length + lowData.cols() * lowData.rows();
+ Mat out = Mat.zeros(1, numCols, opencv_core.CV_32F).asMat();
+ FloatIndexer idx = out.createIndexer();
+
+ int j = 0;
+ for (int i = 0; i < vhist.length; ++i, ++j) {
+ idx.put(0, j, vhist[i]);
+ }
+ for (int i = 0; i < hhist.length; ++i, ++j) {
+ idx.put(0, j, hhist[i]);
+ }
+ for (int x = 0; x < lowData.cols(); x++) {
+ for (int y = 0; y < lowData.rows(); y++, ++j) {
+ float val = lowData.ptr(x, y).get(0) & 0xFF;
+ idx.put(0, j, val);
+ }
+ }
+
+ return out;
+ }
+
+
+
+ /**
+ * 显示图像
+ * @param title
+ * @param src
+ */
+ public static void showImage(final String title, final Mat src) {
+ if (src != null) {
+ opencv_highgui.imshow(title, src);
+ opencv_highgui.cvWaitKey(0);
+ }
+ }
+
+}
diff --git a/原yx-image-recognition/src/main/java/com/yuxue/easypr/core/Features.java b/原yx-image-recognition/src/main/java/com/yuxue/easypr/core/Features.java
new file mode 100644
index 00000000..6384828b
--- /dev/null
+++ b/原yx-image-recognition/src/main/java/com/yuxue/easypr/core/Features.java
@@ -0,0 +1,90 @@
+package com.yuxue.easypr.core;
+
+import static com.yuxue.easypr.core.CoreFunc.features;
+import static org.bytedeco.javacpp.opencv_core.merge;
+import static org.bytedeco.javacpp.opencv_core.split;
+
+import org.bytedeco.javacpp.opencv_core.Mat;
+import org.bytedeco.javacpp.opencv_core.MatVector;
+import org.bytedeco.javacpp.opencv_imgproc;
+
+/**
+ *
+ * @author yuxue
+ * @date 2020-05-05 08:26
+ */
+public class Features implements SVMCallback {
+
+ /***
+ * EasyPR的getFeatures回调函数
+ * 本函数是生成直方图均衡特征的回调函数
+ * @param image
+ * @return
+ */
+ @Override
+ public Mat getHisteqFeatures(final Mat image) {
+ return histeq(image);
+ }
+
+ private Mat histeq(Mat in) {
+ Mat out = new Mat(in.size(), in.type());
+ if (in.channels() == 3) {
+ Mat hsv = new Mat();
+ MatVector hsvSplit = new MatVector();
+ opencv_imgproc.cvtColor(in, hsv, opencv_imgproc.CV_BGR2HSV);
+ split(hsv, hsvSplit);
+ opencv_imgproc.equalizeHist(hsvSplit.get(2), hsvSplit.get(2));
+ merge(hsvSplit, hsv);
+ opencv_imgproc.cvtColor(hsv, out, opencv_imgproc.CV_HSV2BGR);
+ hsv = null;
+ hsvSplit = null;
+ System.gc();
+ } else if (in.channels() == 1) {
+ opencv_imgproc.equalizeHist(in, out);
+ }
+ return out;
+ }
+
+ /**
+ * EasyPR的getFeatures回调函数
+ * 本函数是获取垂直和水平的直方图图值
+ * @param image
+ * @return
+ */
+ @Override
+ public Mat getHistogramFeatures(Mat image) {
+ Mat grayImage = new Mat();
+ opencv_imgproc.cvtColor(image, grayImage, opencv_imgproc.CV_RGB2GRAY);
+
+ Mat img_threshold = new Mat();
+ opencv_imgproc.threshold(grayImage, img_threshold, 0, 255, opencv_imgproc.CV_THRESH_OTSU + opencv_imgproc.CV_THRESH_BINARY);
+
+ return features(img_threshold, 0);
+ }
+
+ /**
+ * 本函数是获取SITF特征子的回调函数
+ *
+ * @param image
+ * @return
+ */
+ @Override
+ public Mat getSIFTFeatures(final Mat image) {
+ // TODO: 待完善
+ return null;
+ }
+
+ /**
+ * 本函数是获取HOG特征子的回调函数
+ *
+ * @param image
+ * @return
+ */
+ @Override
+ public Mat getHOGFeatures(final Mat image) {
+ // TODO: 待完善
+ return null;
+ }
+
+
+}
diff --git a/原yx-image-recognition/src/main/java/com/yuxue/easypr/core/PlateDetect.java b/原yx-image-recognition/src/main/java/com/yuxue/easypr/core/PlateDetect.java
new file mode 100644
index 00000000..d3041770
--- /dev/null
+++ b/原yx-image-recognition/src/main/java/com/yuxue/easypr/core/PlateDetect.java
@@ -0,0 +1,111 @@
+package com.yuxue.easypr.core;
+
+import java.util.Vector;
+
+import org.bytedeco.javacpp.opencv_core.Mat;
+
+
+/**
+ * 车牌检测识别
+ * 分两个步骤: 1、车牌定位 2、车牌判断
+ * @author yuxue
+ * @date 2020-04-24 15:33
+ */
+public class PlateDetect {
+
+ // 车牌定位, 图片处理对象
+ private PlateLocate plateLocate = new PlateLocate();
+
+ // 切图判断对象
+ private PlateJudge plateJudge = new PlateJudge();
+
+ /**
+ * @param src 图片路径,不能包含中文及特殊字符
+ * @param resultVec 车牌的图块集合
+ * @return the error number
+ *
+ * - 0: plate detected successfully;
+ *
- -1: source Mat is empty;
+ *
- -2: plate not detected.
+ *
+ */
+ public int plateDetect(final Mat src, Vector resultVec) {
+ Vector matVec = plateLocate.plateLocate(src); // 定位
+
+ if (0 == matVec.size()) {
+ return -1;
+ }
+
+ if (0 != plateJudge.plateJudge(matVec, resultVec)) { //对多幅图像进行SVM判断
+ return -2;
+ }
+ return 0;
+ }
+
+
+ /**
+ * 生活模式与工业模式切换
+ * @param pdLifemode
+ */
+ public void setPDLifemode(boolean pdLifemode) {
+ plateLocate.setLifemode(pdLifemode);
+ }
+
+ public void setGaussianBlurSize(int gaussianBlurSize) {
+ plateLocate.setGaussianBlurSize(gaussianBlurSize);
+ }
+
+ public final int getGaussianBlurSize() {
+ return plateLocate.getGaussianBlurSize();
+ }
+
+ public void setMorphSizeWidth(int morphSizeWidth) {
+ plateLocate.setMorphSizeWidth(morphSizeWidth);
+ }
+
+ public final int getMorphSizeWidth() {
+ return plateLocate.getMorphSizeWidth();
+ }
+
+ public void setMorphSizeHeight(int morphSizeHeight) {
+ plateLocate.setMorphSizeHeight(morphSizeHeight);
+ }
+
+ public final int getMorphSizeHeight() {
+ return plateLocate.getMorphSizeHeight();
+ }
+
+ public void setVerifyError(float verifyError) {
+ plateLocate.setVerifyError(verifyError);
+ }
+
+ public final float getVerifyError() {
+ return plateLocate.getVerifyError();
+ }
+
+ public void setVerifyAspect(float verifyAspect) {
+ plateLocate.setVerifyAspect(verifyAspect);
+ }
+
+ public final float getVerifyAspect() {
+ return plateLocate.getVerifyAspect();
+ }
+
+ public void setVerifyMin(int verifyMin) {
+ plateLocate.setVerifyMin(verifyMin);
+ }
+
+ public void setVerifyMax(int verifyMax) {
+ plateLocate.setVerifyMax(verifyMax);
+ }
+
+ public void setJudgeAngle(int judgeAngle) {
+ plateLocate.setJudgeAngle(judgeAngle);
+ }
+
+ public void setDebug(boolean debug, String tempPath) {
+ plateLocate.setDebug(debug);
+ plateLocate.setTempPath(tempPath);
+ }
+
+}
diff --git a/原yx-image-recognition/src/main/java/com/yuxue/easypr/core/PlateJudge.java b/原yx-image-recognition/src/main/java/com/yuxue/easypr/core/PlateJudge.java
new file mode 100644
index 00000000..a743ea45
--- /dev/null
+++ b/原yx-image-recognition/src/main/java/com/yuxue/easypr/core/PlateJudge.java
@@ -0,0 +1,107 @@
+package com.yuxue.easypr.core;
+
+import org.bytedeco.javacpp.opencv_core;
+import org.bytedeco.javacpp.opencv_imgproc;
+
+import java.util.Vector;
+
+import org.bytedeco.javacpp.opencv_core.Mat;
+import org.bytedeco.javacpp.opencv_core.Rect;
+import org.bytedeco.javacpp.opencv_core.Size;
+import org.bytedeco.javacpp.opencv_ml.SVM;
+
+import com.yuxue.constant.Constant;
+
+
+/**
+ * 车牌判断
+ * @author yuxue
+ * @date 2020-04-26 15:21
+ */
+public class PlateJudge {
+
+ private SVM svm = SVM.create();
+
+ public PlateJudge() {
+ loadSVM(Constant.DEFAULT_SVM_PATH);
+ }
+
+ public void loadSVM(String path) {
+ svm.clear();
+ // svm=SVM.loadSVM(path, "svm");
+ svm=SVM.load(path);
+ }
+
+ /**
+ * EasyPR的getFeatures回调函数, 用于从车牌的image生成svm的训练特征features
+ */
+ private SVMCallback features = new Features();
+
+
+ /**
+ * 对单幅图像进行SVM判断
+ * @param inMat
+ * @return
+ */
+ public int plateJudge(final Mat inMat) {
+ int ret = 1;
+ // 使用com.yuxue.train.SVMTrain 生成的训练库文件
+ Mat features = this.features.getHistogramFeatures(inMat);
+ /*Mat samples = features.reshape(1, 1);
+ samples.convertTo(samples, opencv_core.CV_32F);*/
+
+ Mat p = features.reshape(1, 1);
+ p.convertTo(p, opencv_core.CV_32FC1);
+ ret = (int) svm.predict(features);
+ return ret;
+
+ // 使用com.yuxue.train.PlateRecoTrain 生成的训练库文件
+ // 在使用的过程中,传入的样本切图要跟训练的时候处理切图的方法一致
+ /*Mat grayImage = new Mat();
+ opencv_imgproc.cvtColor(inMat, grayImage, opencv_imgproc.CV_RGB2GRAY);
+ Mat dst = new Mat();
+ opencv_imgproc.Canny(grayImage, dst, 130, 250);
+ Mat samples = dst.reshape(1, 1);
+ samples.convertTo(samples, opencv_core.CV_32F);*/
+
+ // 正样本为0 负样本为1
+ /*if(svm.predict(samples) <= 0) {
+ ret = 1;
+ }*/
+ /*ret = (int)svm.predict(samples);
+ System.err.println(ret);
+ return ret ;*/
+
+ }
+
+ /**
+ * 对多幅图像进行SVM判断
+ * @param inVec
+ * @param resultVec
+ * @return
+ */
+ public int plateJudge(Vector inVec, Vector resultVec) {
+
+ for (int j = 0; j < inVec.size(); j++) {
+ Mat inMat = inVec.get(j);
+
+ if (1 == plateJudge(inMat)) {
+ resultVec.add(inMat);
+ } else { // 再取中间部分判断一次
+ int w = inMat.cols();
+ int h = inMat.rows();
+
+ Mat tmpDes = inMat.clone();
+ Mat tmpMat = new Mat(inMat, new Rect((int) (w * 0.05), (int) (h * 0.1), (int) (w * 0.9), (int) (h * 0.8)));
+ opencv_imgproc.resize(tmpMat, tmpDes, new Size(inMat.size()));
+
+ if (plateJudge(tmpDes) == 1) {
+ resultVec.add(inMat);
+ }
+ }
+ }
+ return 0;
+ }
+
+
+}
diff --git a/原yx-image-recognition/src/main/java/com/yuxue/easypr/core/PlateLocate.java b/原yx-image-recognition/src/main/java/com/yuxue/easypr/core/PlateLocate.java
new file mode 100644
index 00000000..c889d80d
--- /dev/null
+++ b/原yx-image-recognition/src/main/java/com/yuxue/easypr/core/PlateLocate.java
@@ -0,0 +1,354 @@
+package com.yuxue.easypr.core;
+
+import java.util.Vector;
+
+import static org.bytedeco.javacpp.opencv_core.*;
+import static org.bytedeco.javacpp.opencv_imgproc.*;
+
+import com.yuxue.constant.Constant;
+import org.bytedeco.javacpp.opencv_imgcodecs;
+
+import org.bytedeco.javacpp.opencv_core.CvPoint2D32f;
+import org.bytedeco.javacpp.opencv_core.Mat;
+import org.bytedeco.javacpp.opencv_core.MatVector;
+import org.bytedeco.javacpp.opencv_core.Point;
+import org.bytedeco.javacpp.opencv_core.Point2f;
+import org.bytedeco.javacpp.opencv_core.RotatedRect;
+import org.bytedeco.javacpp.opencv_core.Scalar;
+import org.bytedeco.javacpp.opencv_core.Size;
+
+
+/**
+ * 车牌定位
+ * @author yuxue
+ * @date 2020-04-24 15:33
+ */
+public class PlateLocate {
+
+ // PlateLocate所用常量
+ public static final int DEFAULT_GAUSSIANBLUR_SIZE = 5;
+ public static final int SOBEL_SCALE = 1;
+ public static final int SOBEL_DELTA = 0;
+ public static final int SOBEL_DDEPTH = CV_16S;
+ public static final int SOBEL_X_WEIGHT = 1;
+ public static final int SOBEL_Y_WEIGHT = 0;
+ public static final int DEFAULT_MORPH_SIZE_WIDTH = 17;
+ public static final int DEFAULT_MORPH_SIZE_HEIGHT = 3;
+
+ // showResultMat所用常量
+ public static final int WIDTH = 136;
+ public static final int HEIGHT = 36;
+ public static final int TYPE = CV_8UC3;
+
+ // verifySize所用常量
+ public static final int DEFAULT_VERIFY_MIN = 3;
+ public static final int DEFAULT_VERIFY_MAX = 20;
+
+ final float DEFAULT_ERROR = 0.6f;
+ final float DEFAULT_ASPECT = 3.75f;
+
+ // 角度判断所用常量
+ public static final int DEFAULT_ANGLE = 30;
+
+ // 高斯模糊所用变量
+ protected int gaussianBlurSize = DEFAULT_GAUSSIANBLUR_SIZE;
+
+ // 连接操作所用变量
+ protected int morphSizeWidth = DEFAULT_MORPH_SIZE_WIDTH;
+ protected int morphSizeHeight = DEFAULT_MORPH_SIZE_HEIGHT;
+
+ // verifySize所用变量
+ protected float error = DEFAULT_ERROR;
+ protected float aspect = DEFAULT_ASPECT;
+ protected int verifyMin = DEFAULT_VERIFY_MIN;
+ protected int verifyMax = DEFAULT_VERIFY_MAX;
+
+ // 角度判断所用变量
+ protected int angle = DEFAULT_ANGLE;
+
+ // 是否开启调试模式,0关闭,非0开启
+ protected boolean debug = true;
+
+ // 开启调试模式之后,切图文件保存路径
+ protected String tempPath = Constant.DEFAULT_TEMP_DIR + System.currentTimeMillis() + "/";
+
+ /**
+ * 生活模式与工业模式切换
+ * @param islifemode
+ * 如果为真,则设置各项参数为定位生活场景照片(如百度图片)的参数,否则恢复默认值。
+ *
+ */
+ public void setLifemode(boolean islifemode) {
+ if (islifemode) {
+ setGaussianBlurSize(5);
+ setMorphSizeWidth(9);
+ setMorphSizeHeight(3);
+ setVerifyError(0.9f);
+ setVerifyAspect(4);
+ setVerifyMin(1);
+ setVerifyMax(30);
+ } else {
+ setGaussianBlurSize(DEFAULT_GAUSSIANBLUR_SIZE);
+ setMorphSizeWidth(DEFAULT_MORPH_SIZE_WIDTH);
+ setMorphSizeHeight(DEFAULT_MORPH_SIZE_HEIGHT);
+ setVerifyError(DEFAULT_ERROR);
+ setVerifyAspect(DEFAULT_ASPECT);
+ setVerifyMin(DEFAULT_VERIFY_MIN);
+ setVerifyMax(DEFAULT_VERIFY_MAX);
+ }
+ }
+
+ /**
+ * 定位车牌图像
+ * @param src 原始图像
+ * @return 一个Mat的向量,存储所有抓取到的图像
+ */
+ public Vector plateLocate(Mat src) {
+ Vector resultVec = new Vector();
+
+ Mat src_blur = new Mat();
+ Mat src_gray = new Mat();
+ Mat grad = new Mat();
+
+ int scale = SOBEL_SCALE;
+ int delta = SOBEL_DELTA;
+ int ddepth = SOBEL_DDEPTH;
+
+ // 高斯模糊。Size中的数字影响车牌定位的效果。
+ GaussianBlur(src, src_blur, new Size(gaussianBlurSize, gaussianBlurSize), 0, 0, BORDER_DEFAULT);
+ if (debug) {
+ opencv_imgcodecs.imwrite(tempPath + "debug_GaussianBlur.jpg", src_blur);
+ }
+
+ // Convert it to gray 将图像进行灰度化
+ cvtColor(src_blur, src_gray, CV_RGB2GRAY);
+ if (debug) {
+ opencv_imgcodecs.imwrite(tempPath + "debug_gray.jpg", src_gray);
+ }
+
+ // 对图像进行Sobel 运算,得到的是图像的一阶水平方向导数。
+
+ // Generate grad_x and grad_y
+ Mat grad_x = new Mat();
+ Mat grad_y = new Mat();
+ Mat abs_grad_x = new Mat();
+ Mat abs_grad_y = new Mat();
+
+ Sobel(src_gray, grad_x, ddepth, 1, 0, 3, scale, delta, BORDER_DEFAULT);
+ convertScaleAbs(grad_x, abs_grad_x);
+
+ Sobel(src_gray, grad_y, ddepth, 0, 1, 3, scale, delta, BORDER_DEFAULT);
+ convertScaleAbs(grad_y, abs_grad_y);
+
+ // Total Gradient (approximate)
+ addWeighted(abs_grad_x, SOBEL_X_WEIGHT, abs_grad_y, SOBEL_Y_WEIGHT, 0, grad);
+
+ if (debug) {
+ opencv_imgcodecs.imwrite(tempPath + "debug_Sobel.jpg", grad);
+ }
+
+ // 对图像进行二值化。将灰度图像(每个像素点有256 个取值可能)转化为二值图像(每个像素点仅有1 和0 两个取值可能)。
+
+ Mat img_threshold = new Mat();
+ threshold(grad, img_threshold, 0, 255, CV_THRESH_OTSU + CV_THRESH_BINARY);
+
+ if (debug) {
+ opencv_imgcodecs.imwrite(tempPath + "debug_threshold.jpg", img_threshold);
+ }
+
+ // 使用闭操作。对图像进行闭操作以后,可以看到车牌区域被连接成一个矩形装的区域。
+
+ Mat element = getStructuringElement(MORPH_RECT, new Size(morphSizeWidth, morphSizeHeight));
+ morphologyEx(img_threshold, img_threshold, MORPH_CLOSE, element);
+
+ if (debug) {
+ opencv_imgcodecs.imwrite(tempPath + "debug_morphology.jpg", img_threshold);
+ }
+
+ // Find 轮廓 of possibles plates 求轮廓。求出图中所有的轮廓。这个算法会把全图的轮廓都计算出来,因此要进行筛选。
+
+ MatVector contours = new MatVector();
+ findContours(img_threshold, contours, // a vector of contours
+ CV_RETR_EXTERNAL, // 提取外部轮廓
+ CV_CHAIN_APPROX_NONE); // all pixels of each contours
+
+ Mat result = new Mat();
+ if (debug) {
+ src.copyTo(result);
+ // 将轮廓描绘到图上输出
+ drawContours(result, contours, -1, new Scalar(0, 0, 255, 255));
+ opencv_imgcodecs.imwrite(tempPath + "debug_Contours.jpg", result);
+ }
+
+ // Start to iterate to each contour founded
+ // 筛选。对轮廓求最小外接矩形,然后验证,不满足条件的淘汰。
+ Vector rects = new Vector();
+
+ for (int i = 0; i < contours.size(); ++i) {
+ RotatedRect mr = minAreaRect(contours.get(i));
+ if (verifySizes(mr))
+ rects.add(mr);
+ }
+
+ int k = 1;
+ for (int i = 0; i < rects.size(); i++) {
+ RotatedRect minRect = rects.get(i);
+ /*if (debug) {
+ Point2f rect_points = new Point2f(4);
+ minRect.points(rect_points);
+
+ for (int j = 0; j < 4; j++) {
+ Point pt1 = new Point(new CvPoint2D32f(rect_points.position(j)));
+ Point pt2 = new Point(new CvPoint2D32f(rect_points.position((j + 1) % 4)));
+
+ line(result, pt1, pt2, new Scalar(0, 255, 255, 255), 1, 8, 0);
+ }
+ }*/
+
+ // rotated rectangle drawing
+ // 旋转这部分代码确实可以将某些倾斜的车牌调整正,但是它也会误将更多正的车牌搞成倾斜!所以综合考虑,还是不使用这段代码。
+ // 2014-08-14,由于新到的一批图片中发现有很多车牌是倾斜的,因此决定再次尝试这段代码。
+
+ float r = minRect.size().width() / minRect.size().height();
+ float angle = minRect.angle();
+ Size rect_size = new Size((int) minRect.size().width(), (int) minRect.size().height());
+ if (r < 1) {
+ angle = 90 + angle;
+ rect_size = new Size(rect_size.height(), rect_size.width());
+ }
+ // 如果抓取的方块旋转超过m_angle角度,则不是车牌,放弃处理
+ if (angle - this.angle < 0 && angle + this.angle > 0) {
+ Mat img_rotated = new Mat();
+ Mat rotmat = getRotationMatrix2D(minRect.center(), angle, 1);
+ warpAffine(src, img_rotated, rotmat, src.size()); // CV_INTER_CUBIC
+
+ Mat resultMat = showResultMat(img_rotated, rect_size, minRect.center(), k++);
+ resultVec.add(resultMat);
+ }
+ }
+
+ return resultVec;
+ }
+
+
+ /**
+ * 对minAreaRect获得的最小外接矩形,用纵横比进行判断
+ *
+ * @param mr
+ * @return
+ */
+ private boolean verifySizes(RotatedRect mr) {
+ float error = this.error;
+
+ // China car plate size: 440mm*140mm,aspect 3.142857
+ float aspect = this.aspect;
+ int min = 44 * 14 * verifyMin; // minimum area
+ int max = 44 * 14 * verifyMax; // maximum area
+
+ // Get only patchs that match to a respect ratio.
+ float rmin = aspect - aspect * error;
+ float rmax = aspect + aspect * error;
+
+ int area = (int) (mr.size().height() * mr.size().width());
+ float r = mr.size().width() / mr.size().height();
+ if (r < 1)
+ r = mr.size().height() / mr.size().width();
+
+ return area >= min && area <= max && r >= rmin && r <= rmax;
+ }
+
+ /**
+ * 显示最终生成的车牌图像,便于判断是否成功进行了旋转。
+ * @param src
+ * @param rect_size
+ * @param center
+ * @param index
+ * @return
+ */
+ private Mat showResultMat(Mat src, Size rect_size, Point2f center, int index) {
+ Mat img_crop = new Mat();
+ getRectSubPix(src, rect_size, center, img_crop);
+
+ if (debug) {
+ opencv_imgcodecs.imwrite(tempPath + "debug_crop_" + index + ".jpg", img_crop);
+ }
+
+ Mat resultResized = new Mat();
+ resultResized.create(HEIGHT, WIDTH, TYPE);
+ resize(img_crop, resultResized, resultResized.size(), 0, 0, INTER_CUBIC);
+ if (debug) {
+ opencv_imgcodecs.imwrite(tempPath + "debug_resize_" + index + ".jpg", resultResized);
+ }
+ return resultResized;
+ }
+
+
+
+ public String getTempPath() {
+ return tempPath;
+ }
+
+ public void setTempPath(String tempPath) {
+ this.tempPath = tempPath;
+ }
+
+ public void setGaussianBlurSize(int gaussianBlurSize) {
+ this.gaussianBlurSize = gaussianBlurSize;
+ }
+
+ public final int getGaussianBlurSize() {
+ return this.gaussianBlurSize;
+ }
+
+ public void setMorphSizeWidth(int morphSizeWidth) {
+ this.morphSizeWidth = morphSizeWidth;
+ }
+
+ public final int getMorphSizeWidth() {
+ return this.morphSizeWidth;
+ }
+
+ public void setMorphSizeHeight(int morphSizeHeight) {
+ this.morphSizeHeight = morphSizeHeight;
+ }
+
+ public final int getMorphSizeHeight() {
+ return this.morphSizeHeight;
+ }
+
+ public void setVerifyError(float error) {
+ this.error = error;
+ }
+
+ public final float getVerifyError() {
+ return this.error;
+ }
+
+ public void setVerifyAspect(float aspect) {
+ this.aspect = aspect;
+ }
+
+ public final float getVerifyAspect() {
+ return this.aspect;
+ }
+
+ public void setVerifyMin(int verifyMin) {
+ this.verifyMin = verifyMin;
+ }
+
+ public void setVerifyMax(int verifyMax) {
+ this.verifyMax = verifyMax;
+ }
+
+ public void setJudgeAngle(int angle) {
+ this.angle = angle;
+ }
+
+ public void setDebug(boolean debug) {
+ this.debug = debug;
+ }
+ public boolean getDebug() {
+ return debug;
+ }
+
+}
diff --git a/原yx-image-recognition/src/main/java/com/yuxue/easypr/core/SVMCallback.java b/原yx-image-recognition/src/main/java/com/yuxue/easypr/core/SVMCallback.java
new file mode 100644
index 00000000..037c198e
--- /dev/null
+++ b/原yx-image-recognition/src/main/java/com/yuxue/easypr/core/SVMCallback.java
@@ -0,0 +1,44 @@
+package com.yuxue.easypr.core;
+
+import org.bytedeco.javacpp.opencv_core.Mat;
+
+
+/**
+ * @author Created by fanwenjie
+ * @author lin.yao
+ *
+ */
+public interface SVMCallback {
+
+ /***
+ * EasyPR的getFeatures回调函数,本函数是生成直方图均衡特征的回调函数
+ *
+ * @param image
+ * @return
+ */
+ public abstract Mat getHisteqFeatures(final Mat image);
+
+ /**
+ * EasyPR的getFeatures回调函数, 本函数是获取垂直和水平的直方图图值
+ *
+ * @param image
+ * @return
+ */
+ public abstract Mat getHistogramFeatures(final Mat image);
+
+ /**
+ * 本函数是获取SITF特征子的回调函数
+ *
+ * @param image
+ * @return
+ */
+ public abstract Mat getSIFTFeatures(final Mat image);
+
+ /**
+ * 本函数是获取HOG特征子的回调函数
+ *
+ * @param image
+ * @return
+ */
+ public abstract Mat getHOGFeatures(final Mat image);
+}
diff --git a/原yx-image-recognition/src/main/java/com/yuxue/entity/PlateFileEntity.java b/原yx-image-recognition/src/main/java/com/yuxue/entity/PlateFileEntity.java
new file mode 100644
index 00000000..8b115a08
--- /dev/null
+++ b/原yx-image-recognition/src/main/java/com/yuxue/entity/PlateFileEntity.java
@@ -0,0 +1,81 @@
+package com.yuxue.entity;
+
+import java.io.Serializable;
+import java.util.List;
+
+import lombok.Data;
+import lombok.NoArgsConstructor;
+
+/**
+ * t_plate_file
+ * @author yuxue
+ * 2020-04-30 11:04:47.169
+ */
+@Data
+@NoArgsConstructor
+public class PlateFileEntity implements Serializable {
+ /**
+ * id
+ */
+ private Integer id;
+
+ /**
+ * fileName
+ */
+ private String fileName;
+
+ /**
+ * filePath
+ */
+ private String filePath;
+
+ /**
+ * fileType
+ */
+ private String fileType;
+
+ /**
+ * fileLength
+ */
+ private Integer fileLength;
+
+ /**
+ * plate
+ */
+ private String plate;
+
+ /**
+ * plateColor
+ */
+ private String plateColor;
+
+ /**
+ * lastRecoTime
+ */
+ private String lastRecoTime;
+
+ /**
+ * tempPath
+ */
+ private String tempPath;
+
+ /**
+ * recoPlate
+ */
+ private String recoPlate;
+
+ /**
+ * recoColor
+ */
+ private String recoColor;
+
+ /**
+ * recoCorrect
+ * 0未识别 1正确 2错误 3未检测到车牌
+ */
+ private Integer recoCorrect;
+
+ private List debug;
+
+ private static final long serialVersionUID = 1L;
+}
\ No newline at end of file
diff --git a/原yx-image-recognition/src/main/java/com/yuxue/entity/PlateRecoDebugEntity.java b/原yx-image-recognition/src/main/java/com/yuxue/entity/PlateRecoDebugEntity.java
new file mode 100644
index 00000000..6f8935fd
--- /dev/null
+++ b/原yx-image-recognition/src/main/java/com/yuxue/entity/PlateRecoDebugEntity.java
@@ -0,0 +1,66 @@
+package com.yuxue.entity;
+
+import java.io.Serializable;
+import lombok.Data;
+import lombok.NoArgsConstructor;
+
+/**
+ * t_plate_reco_debug
+ * @author yuxue
+ * 2020-04-30 16:17:58.795
+ */
+@Data
+@NoArgsConstructor
+public class PlateRecoDebugEntity implements Serializable {
+ /**
+ * id
+ */
+ private Integer id;
+
+ /**
+ * parentId
+ */
+ private Integer parentId;
+
+ /**
+ * fileName
+ */
+ private String fileName;
+
+ /**
+ * filePath
+ */
+ private String filePath;
+
+ /**
+ * debugType
+ */
+ private String debugType;
+
+ /**
+ * fileLength
+ */
+ private Integer fileLength;
+
+ /**
+ * lastRecoTime
+ */
+ private String lastRecoTime;
+
+ /**
+ * recoPlate
+ */
+ private String recoPlate;
+
+ /**
+ * plateColor
+ */
+ private String plateColor;
+
+ /**
+ * sort
+ */
+ private Integer sort;
+
+ private static final long serialVersionUID = 1L;
+}
\ No newline at end of file
diff --git a/原yx-image-recognition/src/main/java/com/yuxue/entity/Result.java b/原yx-image-recognition/src/main/java/com/yuxue/entity/Result.java
new file mode 100644
index 00000000..d04272b3
--- /dev/null
+++ b/原yx-image-recognition/src/main/java/com/yuxue/entity/Result.java
@@ -0,0 +1,93 @@
+package com.yuxue.entity;
+
+import java.util.HashMap;
+
+import com.yuxue.exception.ErrorEnum;
+
+
+/**
+ * 返回值封装模型类
+ * @author yuxue
+ * @date 2018-09-07
+ */
+public class Result extends HashMap {
+
+ private static final long serialVersionUID = 1L;
+
+ private static final Integer SUCCESS_CODE = 200;
+ private static final String SUCCESS_INFO = "Success!";
+
+ public Result() {
+ put("code", SUCCESS_CODE);
+ put("msg", SUCCESS_INFO);
+ put("success", true);
+ }
+
+ public Result(Object obj) {
+ put("code", SUCCESS_CODE);
+ put("msg", SUCCESS_INFO);
+ put("obj", obj);
+ put("success", true);
+ }
+
+ public static Result ok() {
+ return new Result();
+ }
+
+ public static Result ok(Object obj) {
+ return new Result(obj);
+ }
+
+ /**
+ * 待办任务切面需要返回的数据
+ * 与前端业务逻辑无关
+ *
+ * @param todo
+ * @return
+ */
+ public static Result ok(Object obj, Object todo) {
+ Result result = new Result(obj);
+ result.put("todo", todo);
+ return result;
+ }
+
+ public static Result error() {
+ return error(ErrorEnum.COMMON_ERROR);
+ }
+
+ public static Result error(String msg) {
+ Result result = error(ErrorEnum.COMMON_ERROR);
+ result.put("msg", msg);
+ return result;
+ }
+
+ public static Result error(String msg, int code) {
+ Result result = error(ErrorEnum.COMMON_ERROR);
+ result.put("msg", msg);
+ result.put("code", code);
+ return result;
+ }
+
+ public static Result error(ErrorEnum fwWebError) {
+ Result result = new Result();
+ result.put("code", fwWebError.code);
+ result.put("msg", fwWebError.msg);
+ result.put("success", false);
+ return result;
+ }
+
+ public static Result error(int code, String msg) {
+ Result result = new Result();
+ result.put("code", code);
+ result.put("msg", msg);
+ result.put("success", false);
+ return result;
+ }
+
+
+ @Override
+ public Result put(String key, Object value) {
+ super.put(key, value);
+ return this;
+ }
+}
diff --git a/原yx-image-recognition/src/main/java/com/yuxue/entity/SystemMenuEntity.java b/原yx-image-recognition/src/main/java/com/yuxue/entity/SystemMenuEntity.java
new file mode 100644
index 00000000..b3044865
--- /dev/null
+++ b/原yx-image-recognition/src/main/java/com/yuxue/entity/SystemMenuEntity.java
@@ -0,0 +1,53 @@
+package com.yuxue.entity;
+
+import java.io.Serializable;
+import java.util.Date;
+
+import lombok.Data;
+import lombok.NoArgsConstructor;
+
+/**
+ * t_system_menu
+ * @author
+ */
+@Data
+@NoArgsConstructor
+public class SystemMenuEntity implements Serializable {
+ private Integer id;
+
+ private String menuName;
+
+ private String menuUrl;
+
+ private Integer parentId;
+
+ private Integer sort;
+
+ private Integer menuLevel;
+
+ private String menuIcon;
+
+ private Integer showFlag;
+
+ private Integer platform;
+
+ private Integer menuType;
+
+ private String permission;
+
+ private Date updateTime;
+
+ private Integer editorId;
+
+ private String createTime;
+
+ private Integer creatorId;
+
+ private Integer version;
+
+ private Integer delFlag;
+
+ private static final long serialVersionUID = 1L;
+
+
+}
\ No newline at end of file
diff --git a/原yx-image-recognition/src/main/java/com/yuxue/entity/TempPlateFileEntity.java b/原yx-image-recognition/src/main/java/com/yuxue/entity/TempPlateFileEntity.java
new file mode 100644
index 00000000..09f21875
--- /dev/null
+++ b/原yx-image-recognition/src/main/java/com/yuxue/entity/TempPlateFileEntity.java
@@ -0,0 +1,51 @@
+package com.yuxue.entity;
+
+import java.io.Serializable;
+import lombok.Data;
+import lombok.NoArgsConstructor;
+
+/**
+ * temp_plate_file
+ * @author yuxue
+ * 2020-04-30 09:39:59.928
+ */
+@Data
+@NoArgsConstructor
+public class TempPlateFileEntity implements Serializable {
+ /**
+ * id
+ */
+ private Integer id;
+
+ /**
+ * fileName
+ */
+ private String fileName;
+
+ /**
+ * filePath
+ */
+ private String filePath;
+
+ /**
+ * fileType
+ */
+ private String fileType;
+
+ /**
+ * fileLength
+ */
+ private Long fileLength;
+
+ /**
+ * parentId
+ */
+ private Integer parentId;
+
+ /**
+ * level
+ */
+ private Integer level;
+
+ private static final long serialVersionUID = 1L;
+}
\ No newline at end of file
diff --git a/原yx-image-recognition/src/main/java/com/yuxue/enumtype/Direction.java b/原yx-image-recognition/src/main/java/com/yuxue/enumtype/Direction.java
new file mode 100644
index 00000000..e17914f9
--- /dev/null
+++ b/原yx-image-recognition/src/main/java/com/yuxue/enumtype/Direction.java
@@ -0,0 +1,46 @@
+package com.yuxue.enumtype;
+
+public enum Direction {
+
+ VERTICAL("VERTICAL","垂直"),
+ HORIZONTAL("HORIZONTAL","水平"),
+ UNKNOWN("UNKNOWN","未知");
+
+ public final String code;
+ public final String desc;
+
+ Direction(String code, String desc) {
+ this.code = code;
+ this.desc = desc;
+ }
+
+ public static String getDesc(String code) {
+ Direction[] enums = values();
+ for (Direction type : enums) {
+ if (type.code().equals(code)) {
+ return type.desc();
+ }
+ }
+ return null;
+ }
+
+ public static String getCode(String desc) {
+ Direction[] enums = values();
+ for (Direction type : enums) {
+ if (type.desc().equals(desc)) {
+ return type.code();
+ }
+ }
+ return null;
+ }
+
+
+ public String code() {
+ return this.code;
+ }
+
+ public String desc() {
+ return this.desc;
+ }
+
+}
diff --git a/原yx-image-recognition/src/main/java/com/yuxue/enumtype/PlateColor.java b/原yx-image-recognition/src/main/java/com/yuxue/enumtype/PlateColor.java
new file mode 100644
index 00000000..970efbcb
--- /dev/null
+++ b/原yx-image-recognition/src/main/java/com/yuxue/enumtype/PlateColor.java
@@ -0,0 +1,59 @@
+package com.yuxue.enumtype;
+
+
+/**
+ * 车牌颜色
+ * @author yuxue
+ * @date 2020-05-08 12:38
+ */
+public enum PlateColor {
+
+ BLUE("BLUE","蓝牌", 100, 130),
+ GREEN("GREEN","绿牌", 38, 100),
+ YELLOW("YELLOW","黄牌", 15, 40),
+ UNKNOWN("UNKNOWN","未知", 0, 0);
+
+ public final String code;
+ public final String desc;
+
+ // opencv颜色识别的HSV中各个颜色所对应的H的范围: Orange 0-22 Yellow 22- 38 Green 38-75 Blue 75-130
+ public final int minH;
+ public final int maxH;
+
+ PlateColor(String code, String desc, int minH, int maxH) {
+ this.code = code;
+ this.desc = desc;
+ this.minH = minH;
+ this.maxH = maxH;
+ }
+
+ public static String getDesc(String code) {
+ PlateColor[] enums = values();
+ for (PlateColor type : enums) {
+ if (type.code().equals(code)) {
+ return type.desc();
+ }
+ }
+ return null;
+ }
+
+ public static String getCode(String desc) {
+ PlateColor[] enums = values();
+ for (PlateColor type : enums) {
+ if (type.desc().equals(desc)) {
+ return type.code();
+ }
+ }
+ return null;
+ }
+
+
+ public String code() {
+ return this.code;
+ }
+
+ public String desc() {
+ return this.desc;
+ }
+
+}
diff --git a/原yx-image-recognition/src/main/java/com/yuxue/exception/ErrorEnum.java b/原yx-image-recognition/src/main/java/com/yuxue/exception/ErrorEnum.java
new file mode 100644
index 00000000..581135b9
--- /dev/null
+++ b/原yx-image-recognition/src/main/java/com/yuxue/exception/ErrorEnum.java
@@ -0,0 +1,69 @@
+package com.yuxue.exception;
+
+/**
+ * 系统错误提示
+ * @author yuxue
+ * @date 2018-09-07
+ */
+public enum ErrorEnum {
+
+ // 200-->Success!
+ // 6000-->Fail!
+
+ // common
+ COMMON_ERROR("Fail!", 6000),
+ COMMON_PARAMS_ERR("提交参数不合法", 6001),
+ COMMON_PARAMS_ID_ERR("提交参数ID不合法", 6002),
+ COMMON_EMPTY_CONDITION_RESULT("没有找到符合条件的数据", 6003),
+ COMMON_PARAMS_NOT_EXIST("提交的字段不存在,或者参数格式错误", 6004),
+
+ // sql
+ SQL_ERROR("mysql通用错误", 6100),
+ SQL_INSERT_FAIL("增加失败", 6101),
+ SQL_DELETE_FAIL("删除失败", 6102),
+ SQL_UPDATE_FAIL("修改失败", 6103),
+ SQL_RECORD_EXIST("添加重复记录", 6104),
+ SQL_ID_NOT_EXIST("主键ID不能为空", 6105),
+ SQL_VERSION_NOT_EXIST("数据版本version不能为空", 6106),
+
+ // io
+ FILE_IO_ERROR("io通用错误", 6200),
+ FILE_NOT_EXIST("文件没找到,请联系管理员", 6201),
+ FILE_DATA_NULL("文档中不不存在有效的数据", 6202),
+ FILE_DATA_ERR("文档中的数据格式错误", 6203),
+
+ // form
+ INVALID_PASSWORD("密码格式错误", 6300),
+ INVALID_EMAIL("邮件格式错误", 6301),
+ INVALID_NAME("账号格式错误", 6302),
+ INVALID_PARAMS("填写字段不合法", 6303),
+
+
+ // shiro-login
+ NO_LOGIN("用户未登录", 401),
+ UNAUTHORIZED("权限不足", 7001),
+ ADMIN_ONLY("只有管理员账号可以调用这个接口", 6402),
+ NO_PERSSIOM("没有权限请求", 6403),
+ WRONG_ACCOUNT_OR_PSW("账号或密码错误", 6404),
+ WRONG_ACCOUNT_PSW("账号密码错误", 6405),
+ WRONG_ACCOUNT_WRONG("用户没有权限(令牌、用户名、密码错误)", 401),
+
+ // uploading
+ UPLOAD_FILE_TYPE_ERROR("上传文件格式错误", 6500),
+ UPLOAD_FILE_UPLOADING("uploading", 6501),
+ UPLOAD_FILE_NOT_EXIST("文件不存在", 6502),
+ UPLOAD_FILE_SIZE_MAX("上传的文件大小超出限制", 6503),
+
+ // es
+ ES_BIG_PAGE_SEARCH("单页查询数据不能超过10000!", 9000);
+
+ // NoSQL
+
+ public final String msg;
+ public final int code;
+
+ ErrorEnum(String msg, int code) {
+ this.msg = msg;
+ this.code = code;
+ }
+}
diff --git a/原yx-image-recognition/src/main/java/com/yuxue/exception/ResultReturnException.java b/原yx-image-recognition/src/main/java/com/yuxue/exception/ResultReturnException.java
new file mode 100644
index 00000000..db6697d1
--- /dev/null
+++ b/原yx-image-recognition/src/main/java/com/yuxue/exception/ResultReturnException.java
@@ -0,0 +1,53 @@
+package com.yuxue.exception;
+
+
+/**
+ * 自定义runtime异常
+ * @author yuxue
+ * @date 2018-09-07
+ */
+public class ResultReturnException extends RuntimeException {
+ private static final long serialVersionUID = 1L;
+
+ private String msg = ErrorEnum.COMMON_ERROR.msg;
+ private int code = ErrorEnum.COMMON_ERROR.code;
+
+ public ResultReturnException(ErrorEnum error) {
+ super(error.msg);
+ this.msg = error.msg;
+ this.code = error.code;
+ }
+
+ public ResultReturnException(String msg) {
+ super(msg);
+ this.msg = msg;
+ }
+
+ public ResultReturnException(String msg, Throwable e) {
+ super(msg, e);
+ this.msg = msg;
+ }
+
+ @Deprecated
+ public ResultReturnException(String msg, int code) {
+ super(msg);
+ this.msg = msg;
+ this.code = code;
+ }
+
+ @Deprecated
+ public ResultReturnException(String msg, int code, Throwable e) {
+ super(msg, e);
+ this.msg = msg;
+ this.code = code;
+ }
+
+ public String getMsg() {
+ return msg;
+ }
+
+ public int getCode() {
+ return code;
+ }
+
+}
diff --git a/原yx-image-recognition/src/main/java/com/yuxue/exception/ResultReturnExceptionHandler.java b/原yx-image-recognition/src/main/java/com/yuxue/exception/ResultReturnExceptionHandler.java
new file mode 100644
index 00000000..a5aca635
--- /dev/null
+++ b/原yx-image-recognition/src/main/java/com/yuxue/exception/ResultReturnExceptionHandler.java
@@ -0,0 +1,84 @@
+package com.yuxue.exception;
+
+
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.http.converter.HttpMessageNotReadableException;
+import org.springframework.web.bind.annotation.ExceptionHandler;
+import org.springframework.web.bind.annotation.RestControllerAdvice;
+import org.springframework.web.multipart.MultipartException;
+
+import com.yuxue.entity.Result;
+
+
+/**
+ * 捕获RestController抛出的异常
+ * @author yuxue
+ * @date 2018-09-06
+ */
+@RestControllerAdvice
+public class ResultReturnExceptionHandler {
+
+ protected static Logger log=LoggerFactory.getLogger(ResultReturnExceptionHandler.class);
+
+ /** 捕捉shiro的异常 *//*
+ @ResponseStatus(HttpStatus.UNAUTHORIZED)
+ @ExceptionHandler(ShiroException.class)
+ public Result handle401(ShiroException e) {
+ log.error(e.getMessage(), e);
+ return Result.error(ErrorEnum.UNAUTHORIZED);
+ }
+
+ *//** 捕捉UnauthorizedException *//*
+ @ResponseStatus(HttpStatus.UNAUTHORIZED)
+ @ExceptionHandler(UnauthorizedException.class)
+ public Result handle401() {
+ return Result.error(ErrorEnum.UNAUTHORIZED);
+ }*/
+
+ /** 文件上传大小异常 */
+ @ExceptionHandler(MultipartException.class)
+ public Result handleMultipart(Throwable t) {
+ log.error(t.getMessage(), t);
+ return Result.error(ErrorEnum.UPLOAD_FILE_SIZE_MAX);
+ }
+
+ /** jackson转换Bean * */
+ @ExceptionHandler(HttpMessageNotReadableException.class)
+ public Result handleJsonConv(Throwable t) {
+ log.error(t.getMessage(), t);
+ return Result.error(ErrorEnum.COMMON_PARAMS_NOT_EXIST);
+ }
+
+ /** 异常参数处理器 */
+ @ExceptionHandler(IllegalArgumentException.class)
+ public Result handleRRException(Throwable e) {
+ //log.error(e.getMessage(), e);
+ return Result.error(ErrorEnum.COMMON_PARAMS_ERR.code, e.getMessage());
+ }
+
+ /** 自定义异常 */
+ @ExceptionHandler(ResultReturnException.class)
+ public Result handleRRException(ResultReturnException e) {
+ log.error(exTraceBack(e), e);
+ return Result.error(e.getCode(), e.getMsg());
+ }
+
+ @ExceptionHandler(Exception.class)
+ public Result handleException(Exception e) {
+ log.error(exTraceBack(e), e);
+ return Result.error("系统发生错误,请联系管理员");
+ }
+
+ public static String exTraceBack(Exception e) {
+ StringBuilder sb = new StringBuilder();
+ StackTraceElement[] stackTrace = e.getStackTrace();
+ for (int i = 0; i < stackTrace.length; i++) {
+ sb.append("<---");
+ sb.append(String.format("[%s * %s] ", stackTrace[i].getClassName(), stackTrace[i].getMethodName()));
+ }
+ sb.append(e.getMessage());
+ return sb.toString();
+ }
+}
diff --git a/原yx-image-recognition/src/main/java/com/yuxue/mapper/PlateFileMapper.java b/原yx-image-recognition/src/main/java/com/yuxue/mapper/PlateFileMapper.java
new file mode 100644
index 00000000..2762cba7
--- /dev/null
+++ b/原yx-image-recognition/src/main/java/com/yuxue/mapper/PlateFileMapper.java
@@ -0,0 +1,25 @@
+package com.yuxue.mapper;
+
+import com.yuxue.entity.PlateFileEntity;
+import java.util.List;
+import java.util.Map;
+import org.apache.ibatis.annotations.Mapper;
+
+@Mapper
+public interface PlateFileMapper {
+ int deleteByPrimaryKey(Integer id);
+
+ int insert(PlateFileEntity record);
+
+ int insertSelective(PlateFileEntity record);
+
+ PlateFileEntity selectByPrimaryKey(Integer id);
+
+ List selectByCondition(Map map);
+
+ int updateByPrimaryKeySelective(PlateFileEntity record);
+
+ int updateByPrimaryKey(PlateFileEntity record);
+
+ List getUnRecogniseList();
+}
\ No newline at end of file
diff --git a/原yx-image-recognition/src/main/java/com/yuxue/mapper/PlateRecoDebugMapper.java b/原yx-image-recognition/src/main/java/com/yuxue/mapper/PlateRecoDebugMapper.java
new file mode 100644
index 00000000..180028e2
--- /dev/null
+++ b/原yx-image-recognition/src/main/java/com/yuxue/mapper/PlateRecoDebugMapper.java
@@ -0,0 +1,30 @@
+package com.yuxue.mapper;
+
+import java.util.List;
+import java.util.Map;
+
+import org.apache.ibatis.annotations.Mapper;
+import org.apache.ibatis.annotations.Param;
+
+import com.yuxue.entity.PlateRecoDebugEntity;
+
+@Mapper
+public interface PlateRecoDebugMapper {
+ int deleteByPrimaryKey(Integer id);
+
+ int insert(PlateRecoDebugEntity record);
+
+ int insertSelective(PlateRecoDebugEntity record);
+
+ PlateRecoDebugEntity selectByPrimaryKey(Integer id);
+
+ List selectByCondition(Map map);
+
+ int updateByPrimaryKeySelective(PlateRecoDebugEntity record);
+
+ int updateByPrimaryKey(PlateRecoDebugEntity record);
+
+ int deleteByParentId(@Param("parentId")Integer parentId);
+
+ int batchInsert(@Param("list")List list);
+}
\ No newline at end of file
diff --git a/原yx-image-recognition/src/main/java/com/yuxue/mapper/SystemMenuMapper.java b/原yx-image-recognition/src/main/java/com/yuxue/mapper/SystemMenuMapper.java
new file mode 100644
index 00000000..b6843ce0
--- /dev/null
+++ b/原yx-image-recognition/src/main/java/com/yuxue/mapper/SystemMenuMapper.java
@@ -0,0 +1,25 @@
+package com.yuxue.mapper;
+
+import java.util.List;
+import java.util.Map;
+
+import org.apache.ibatis.annotations.Mapper;
+
+import com.yuxue.entity.SystemMenuEntity;
+
+@Mapper
+public interface SystemMenuMapper {
+ int deleteByPrimaryKey(Integer id);
+
+ int insert(SystemMenuEntity record);
+
+ int insertSelective(SystemMenuEntity record);
+
+ SystemMenuEntity selectByPrimaryKey(Integer id);
+
+ List selectByCondition(Map map);
+
+ int updateByPrimaryKeySelective(SystemMenuEntity record);
+
+ int updateByPrimaryKey(SystemMenuEntity record);
+}
\ No newline at end of file
diff --git a/原yx-image-recognition/src/main/java/com/yuxue/mapper/TempPlateFileMapper.java b/原yx-image-recognition/src/main/java/com/yuxue/mapper/TempPlateFileMapper.java
new file mode 100644
index 00000000..9f6c5a9a
--- /dev/null
+++ b/原yx-image-recognition/src/main/java/com/yuxue/mapper/TempPlateFileMapper.java
@@ -0,0 +1,34 @@
+package com.yuxue.mapper;
+
+import java.util.List;
+import java.util.Map;
+
+import org.apache.ibatis.annotations.Mapper;
+import org.apache.ibatis.annotations.Param;
+
+import com.yuxue.entity.TempPlateFileEntity;
+
+@Mapper
+public interface TempPlateFileMapper {
+ int deleteByPrimaryKey(Integer id);
+
+ int insert(TempPlateFileEntity record);
+
+ int insertSelective(TempPlateFileEntity record);
+
+ TempPlateFileEntity selectByPrimaryKey(Integer id);
+
+ List selectByCondition(Map map);
+
+ int updateByPrimaryKeySelective(TempPlateFileEntity record);
+
+ int updateByPrimaryKey(TempPlateFileEntity record);
+
+ int turncateTable();
+
+ int batchInsert(@Param("list")List list);
+
+ int updateFileInfo();
+
+
+}
\ No newline at end of file
diff --git a/原yx-image-recognition/src/main/java/com/yuxue/service/FileService.java b/原yx-image-recognition/src/main/java/com/yuxue/service/FileService.java
new file mode 100644
index 00000000..dedea66c
--- /dev/null
+++ b/原yx-image-recognition/src/main/java/com/yuxue/service/FileService.java
@@ -0,0 +1,16 @@
+package com.yuxue.service;
+
+import java.io.File;
+import java.util.List;
+
+import com.alibaba.fastjson.JSONObject;
+
+
+public interface FileService {
+
+ List getFileTreeByDir(String dir, String typeFilter);
+
+ File readFile(String filePath);
+
+
+}
\ No newline at end of file
diff --git a/原yx-image-recognition/src/main/java/com/yuxue/service/PlateService.java b/原yx-image-recognition/src/main/java/com/yuxue/service/PlateService.java
new file mode 100644
index 00000000..c857e7c2
--- /dev/null
+++ b/原yx-image-recognition/src/main/java/com/yuxue/service/PlateService.java
@@ -0,0 +1,16 @@
+package com.yuxue.service;
+
+
+public interface PlateService {
+
+ public Object getProcessStep();
+
+ Object recognise(String filePath, boolean reRecognise);
+
+ Object refreshFileInfo();
+
+ Object recogniseAll();
+
+
+
+}
\ No newline at end of file
diff --git a/原yx-image-recognition/src/main/java/com/yuxue/service/SystemMenuService.java b/原yx-image-recognition/src/main/java/com/yuxue/service/SystemMenuService.java
new file mode 100644
index 00000000..f821be55
--- /dev/null
+++ b/原yx-image-recognition/src/main/java/com/yuxue/service/SystemMenuService.java
@@ -0,0 +1,30 @@
+package com.yuxue.service;
+
+import java.util.List;
+import java.util.Map;
+
+import com.github.pagehelper.PageInfo;
+import com.yuxue.entity.SystemMenuEntity;
+
+
+/**
+ * 服务实现层接口
+ * @author yuxue
+ * @date 2019-06-20 16:15:23
+ */
+public interface SystemMenuService {
+
+ public SystemMenuEntity getByPrimaryKey(Integer id);
+
+ public PageInfo queryByPage(Integer pageNo, Integer pageSize, Map map);
+
+ public List queryByCondition(Map map);
+
+ public Map save(SystemMenuEntity systemMenuEntity);
+
+ public Integer deleteById(Integer id);
+
+ public Integer updateById(SystemMenuEntity systemMenuEntity);
+
+ public Object getUserMenu();
+}
diff --git a/原yx-image-recognition/src/main/java/com/yuxue/service/impl/FileServiceImpl.java b/原yx-image-recognition/src/main/java/com/yuxue/service/impl/FileServiceImpl.java
new file mode 100644
index 00000000..c9b69192
--- /dev/null
+++ b/原yx-image-recognition/src/main/java/com/yuxue/service/impl/FileServiceImpl.java
@@ -0,0 +1,62 @@
+package com.yuxue.service.impl;
+
+import java.io.File;
+import java.util.List;
+
+import org.springframework.stereotype.Service;
+
+import com.alibaba.druid.util.StringUtils;
+import com.alibaba.fastjson.JSONObject;
+import com.google.common.collect.Lists;
+import com.yuxue.constant.Constant;
+import com.yuxue.exception.ResultReturnException;
+import com.yuxue.service.FileService;
+import com.yuxue.util.FileUtil;
+
+
+@Service
+public class FileServiceImpl implements FileService {
+
+ @Override
+ public List getFileTreeByDir(String dir, String typeFilter) {
+ if(StringUtils.isEmpty(dir)){
+ dir = Constant.DEFAULT_DIR;
+ }
+ if(StringUtils.isEmpty(typeFilter)){
+ typeFilter = Constant.DEFAULT_TYPE;
+ }
+
+ File f = new File(dir);
+ List list = FileUtil.listFile(f, typeFilter, false);
+ List result = Lists.newArrayList();
+ list.stream().forEach(n->{
+ JSONObject jo = new JSONObject();
+ jo.put("id", n.getAbsolutePath());
+ jo.put("pid", n.getParentFile().getAbsolutePath());
+ jo.put("filePath", n.getAbsolutePath());
+ jo.put("fileName", n.getName());
+ jo.put("isDir", n.isDirectory());
+ result.add(jo);
+ });
+ return result;
+ }
+
+
+ @Override
+ public File readFile(String filePath) {
+
+ File f = new File(filePath);
+ if(!f.exists() || f.isDirectory()) {
+ throw new ResultReturnException("filePath参数异常,找不到指定的文件: " + filePath);
+ }
+
+ if(!f.exists() || f.isDirectory()) {
+ throw new ResultReturnException("读取图片异常:" + f.getName());
+ }
+ return f;
+ }
+
+
+
+
+}
diff --git a/原yx-image-recognition/src/main/java/com/yuxue/service/impl/PlateServiceImpl.java b/原yx-image-recognition/src/main/java/com/yuxue/service/impl/PlateServiceImpl.java
new file mode 100644
index 00000000..547a53d7
--- /dev/null
+++ b/原yx-image-recognition/src/main/java/com/yuxue/service/impl/PlateServiceImpl.java
@@ -0,0 +1,271 @@
+package com.yuxue.service.impl;
+
+import java.io.File;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.Vector;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.bytedeco.javacpp.opencv_core.Mat;
+import org.bytedeco.javacpp.opencv_imgcodecs;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.stereotype.Service;
+import org.springframework.transaction.annotation.Propagation;
+import org.springframework.transaction.annotation.Transactional;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.yuxue.constant.Constant;
+import com.yuxue.easypr.core.CharsRecognise;
+import com.yuxue.easypr.core.CoreFunc;
+import com.yuxue.easypr.core.PlateDetect;
+import com.yuxue.entity.PlateFileEntity;
+import com.yuxue.entity.PlateRecoDebugEntity;
+import com.yuxue.entity.TempPlateFileEntity;
+import com.yuxue.enumtype.PlateColor;
+import com.yuxue.mapper.PlateFileMapper;
+import com.yuxue.mapper.PlateRecoDebugMapper;
+import com.yuxue.mapper.TempPlateFileMapper;
+import com.yuxue.service.PlateService;
+import com.yuxue.util.FileUtil;
+
+
+
+@Service
+public class PlateServiceImpl implements PlateService {
+
+
+ // 车牌定位处理步骤,该map用于表示步骤图片的顺序
+ private static Map debugMap = Maps.newLinkedHashMap();
+ static {
+ debugMap.put("result", 99);
+ debugMap.put("debug_GaussianBlur", 0); // 高斯模糊
+ debugMap.put("debug_gray", 1); // 图像灰度化
+ debugMap.put("debug_Sobel", 2); // Sobel 算子
+ debugMap.put("debug_threshold", 3); //图像二值化
+ debugMap.put("debug_morphology", 4); // 图像闭操作
+ debugMap.put("debug_Contours", 5); // 提取外部轮廓
+ debugMap.put("debug_result", 6); // 原图处理结果
+ debugMap.put("debug_crop", 7); // 切图
+ debugMap.put("debug_resize", 8); // 切图resize
+ debugMap.put("debug_char_threshold", 9); //
+ // debugMap.put("debug_char_clearLiuDing", 10); // 去除柳钉
+ debugMap.put("debug_specMat", 11); //
+ debugMap.put("debug_chineseMat", 12); //
+ debugMap.put("debug_char_auxRoi", 13); //
+ }
+
+
+ @Autowired
+ private PlateFileMapper plateFileMapper;
+
+ @Autowired
+ private PlateRecoDebugMapper plateRecoDebugMapper;
+
+ @Autowired
+ private TempPlateFileMapper tempPlateFileMapper;
+
+
+ @Override
+ public Object recognise(String filePath, boolean reRecognise) {
+ filePath = filePath.replaceAll("\\\\", "/");
+ File f = new File(filePath);
+ PlateFileEntity e = null;
+
+ Map paramMap = Maps.newHashMap();
+ paramMap.put("filePath", filePath);
+ List list= plateFileMapper.selectByCondition(paramMap);
+ if(null == list || list.size() <= 0) {
+ if(FileUtil.checkFile(f)) {
+ e = new PlateFileEntity();
+ e.setFileName(f.getName());
+ e.setFilePath(f.getAbsolutePath().replaceAll("\\\\", "/"));
+ e.setFileType(f.getName().substring(f.getName().lastIndexOf(".") + 1));
+ plateFileMapper.insertSelective(e);
+ }
+ reRecognise = true;
+ } else {
+ e = list.get(0);
+ }
+
+ if(reRecognise) {
+ doRecognise(f, e, 0); // 重新识别
+ e = plateFileMapper.selectByPrimaryKey(e.getId()); // 重新识别之后,重新获取一下数据
+ }
+
+ // 查询数据库,返回结果
+ paramMap.clear();
+ paramMap.put("parentId", e.getId());
+ e.setDebug(plateRecoDebugMapper.selectByCondition(paramMap));
+
+ return e;
+ }
+
+
+
+ @Override
+ @Transactional(propagation = Propagation.REQUIRED)
+ public Object refreshFileInfo() {
+ File baseDir = new File(Constant.DEFAULT_DIR);
+ if(!baseDir.exists() || !baseDir.isDirectory()) {
+ return null;
+ }
+ List resultList = Lists.newArrayList();
+
+ // 获取baseDir下第一层级的目录, 仅获取文件夹,不递归子目录,遍历
+ List folderList = FileUtil.listFile(baseDir, ";", false);
+ folderList.parallelStream().forEach(folder -> {
+ if(!folder.getName().equals("temp")) {
+ // 遍历每一个文件夹, 递归获取文件夹下的图片
+ List imgList = FileUtil.listFile(folder, Constant.DEFAULT_TYPE, true);
+ if(null != imgList && imgList.size() > 0) {
+ imgList.parallelStream().forEach(n->{
+ TempPlateFileEntity entity = new TempPlateFileEntity();
+ entity.setFilePath(n.getAbsolutePath().replaceAll("\\\\", "/"));
+ entity.setFileName(n.getName());
+ entity.setFileType(n.getName().substring(n.getName().lastIndexOf(".") + 1));
+ resultList.add(entity);
+ });
+ }
+ }
+ });
+
+ tempPlateFileMapper.turncateTable();
+ tempPlateFileMapper.batchInsert(resultList);
+ tempPlateFileMapper.updateFileInfo();
+
+ return 1;
+ }
+
+
+ @Override
+ public Object recogniseAll() {
+ // 查询到还没有进行车牌识别的图片
+ List list = plateFileMapper.getUnRecogniseList();
+
+ // 开启多线程进行识别
+ Random r = new Random(99);
+ list.parallelStream().forEach(n->{
+ File f = new File(n.getFilePath());
+ if(FileUtil.checkFile(f)) {
+ doRecognise(f, n, r.nextInt());
+ }
+ });
+
+ return 1;
+ }
+
+
+ @Override
+ public Object getProcessStep() {
+ return debugMap;
+ }
+
+
+ /**
+ * 单张图片 车牌识别
+ * 拷贝文件到临时目录
+ * 过程及结果更新数据库
+ * @param f 调用方需要验证文件存在
+ * @param result
+ * @return
+ */
+ public Object doRecognise(File f, PlateFileEntity e, Integer seed) {
+
+ // 插入识别过程图片数据信息 通过temp文件夹的文件,更新数据库
+ List debug = Lists.newArrayList();
+
+ Long ct = System.currentTimeMillis();
+ String targetPath = Constant.DEFAULT_TEMP_DIR.concat(ct.toString() + seed)
+ .concat(f.getAbsolutePath().substring(f.getAbsolutePath().lastIndexOf(".")));
+
+ // 先将文件拷贝并且重命名到不包含中文及特殊字符的目录下
+ FileUtil.copyAndRename(f.getAbsolutePath(), targetPath);
+
+ // 开始识别,生成过程及结果切图,将识别结果更新到数据库
+ Mat src = opencv_imgcodecs.imread(targetPath);
+
+ String tempPath = Constant.DEFAULT_TEMP_DIR + ct + "/";
+ FileUtil.createDir(tempPath); // 创建文件夹
+
+ // 车牌检测对象
+ PlateDetect plateDetect = new PlateDetect();
+ plateDetect.setPDLifemode(true);
+ plateDetect.setDebug(true, tempPath); // 将过程的图块保存到盘符
+
+ Vector matVector = new Vector();
+ if (0 == plateDetect.plateDetect(src, matVector)) { // 定位及判断,获取到车牌图块Mat
+
+ CharsRecognise cr = new CharsRecognise();
+ cr.setCRDebug(true);
+ for (int i = 0; i < matVector.size(); ++i) { // 遍历车牌图块Mat,进行识别
+ Mat img = matVector.get(i);
+
+ String palte = cr.charsRecognise(img, tempPath); // 字符识别
+ PlateColor color = CoreFunc.getPlateType(img, true);
+ String fileName = "result_" + i + ".png";
+
+ // 识别的车牌,保存图片文件
+ String str = tempPath + fileName;
+ // 此方法生成的文件,中文名称都是乱码,试了各种编解码均无效,OpenCV自身的编解码问题。
+ opencv_imgcodecs.imwrite(str, img);
+ // 重命名文件,让生成的文件包含中文
+ // String newName = palte + "_"+ color + ".png";
+ // FileUtil.renameFile(str, newName);
+
+ PlateRecoDebugEntity de = new PlateRecoDebugEntity();
+ de.setRecoPlate(palte);
+ de.setFilePath(str);
+ de.setFileName(fileName);
+ de.setPlateColor(color.desc);
+ de.setParentId(e.getId());
+ de.setDebugType("result");
+ de.setSort(debugMap.get("result"));
+ debug.add(de);
+ }
+ } else {
+ e.setRecoCorrect(3); // 未检测到车牌
+ }
+
+ new File(targetPath).delete(); // 删除拷贝的文件
+
+ e.setTempPath(tempPath);
+
+ List debugList = FileUtil.listFile(new File(tempPath), Constant.DEFAULT_TYPE, false);
+
+ debugList.parallelStream().forEach(d -> {
+ String name = d.getName().substring(0, d.getName().lastIndexOf("."));
+
+ Pattern pattern = Pattern.compile("\\d+$");
+ Matcher matcher = pattern.matcher(name);
+ if(matcher.find()) {
+ name = name.substring(0, name.lastIndexOf("_"));
+ }
+
+ if(!"result".equals(name)) {
+ PlateRecoDebugEntity de = new PlateRecoDebugEntity();
+ de.setRecoPlate("");
+ de.setFilePath(d.getAbsolutePath().replaceAll("\\\\", "/"));
+ de.setFileName(d.getName());
+ de.setPlateColor("");
+ de.setParentId(e.getId());
+ de.setDebugType(name);
+ de.setSort(debugMap.get(name));
+ debug.add(de);
+ }
+ });
+
+ // 更新图片主表信息
+ plateFileMapper.updateByPrimaryKeySelective(e);
+
+ plateRecoDebugMapper.deleteByParentId(e.getId());
+
+ plateRecoDebugMapper.batchInsert(debug);
+
+ return 1;
+ }
+
+
+}
diff --git a/原yx-image-recognition/src/main/java/com/yuxue/service/impl/SystemMenuServiceImpl.java b/原yx-image-recognition/src/main/java/com/yuxue/service/impl/SystemMenuServiceImpl.java
new file mode 100644
index 00000000..79c3cea5
--- /dev/null
+++ b/原yx-image-recognition/src/main/java/com/yuxue/service/impl/SystemMenuServiceImpl.java
@@ -0,0 +1,101 @@
+package com.yuxue.service.impl;
+
+
+import com.github.pagehelper.PageHelper;
+import com.github.pagehelper.PageInfo;
+import com.google.common.collect.Maps;
+import com.yuxue.entity.SystemMenuEntity;
+import com.yuxue.mapper.SystemMenuMapper;
+import com.yuxue.service.SystemMenuService;
+
+import org.springframework.transaction.annotation.Transactional;
+import org.springframework.transaction.annotation.Propagation;
+
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.stereotype.Service;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * 服务实现层
+ * @author yuxue
+ * @date 2019-06-20 16:15:23
+ */
+@Service
+public class SystemMenuServiceImpl implements SystemMenuService {
+
+ @Autowired
+ private SystemMenuMapper systemMenuMapper;
+
+
+ @Override
+ public SystemMenuEntity getByPrimaryKey(Integer id) {
+ SystemMenuEntity entity = systemMenuMapper.selectByPrimaryKey(id);
+ return entity;
+ }
+
+ @Override
+ public PageInfo queryByPage(Integer pageNo, Integer pageSize, Map map) {
+ PageHelper.startPage(pageNo, pageSize);
+ PageInfo page = new PageInfo(systemMenuMapper.selectByCondition(map));
+ return page;
+ }
+
+ @Override
+ public List queryByCondition(Map map) {
+ return systemMenuMapper.selectByCondition(map);
+ }
+
+ @Override
+ @Transactional(propagation = Propagation.REQUIRED)
+ public Map save(SystemMenuEntity entity) {
+ entity.setId(0);
+ systemMenuMapper.insertSelective(entity);
+
+ Map result = new HashMap<>();
+ result.put("id" , entity.getId());
+ return result;
+ }
+
+ @Override
+ @Transactional(propagation = Propagation.REQUIRED)
+ public Integer deleteById(Integer id){
+ return systemMenuMapper.deleteByPrimaryKey(id);
+ }
+
+ @Override
+ @Transactional(propagation = Propagation.REQUIRED)
+ public Integer updateById(SystemMenuEntity systemMenuEntity) {
+ if(null == systemMenuEntity || systemMenuEntity.getId() <= 0){
+ return 0;
+ }
+ return systemMenuMapper.updateByPrimaryKeySelective(systemMenuEntity);
+ }
+
+
+ @Override
+ public Object getUserMenu() {
+ Map map = Maps.newHashMap();
+ //根据角色查询菜单--未完成 //根据层级 sort排序
+ map.put("showFlag", 1);
+ List menus = systemMenuMapper.selectByCondition(map);
+
+ //按层级封装,最多三级
+ Map result = Maps.newHashMap();
+
+ result.put("first", menus.stream().filter(n -> {
+ return n.getMenuLevel() == 1;
+ }));
+ result.put("second", menus.stream().filter(n -> {
+ return n.getMenuLevel() == 2;
+ }));
+ result.put("third", menus.stream().filter(n -> {
+ return n.getMenuLevel() == 3;
+ }));
+ return result;
+ }
+
+
+
+}
diff --git a/原yx-image-recognition/src/main/java/com/yuxue/train/ANNTrain.java b/原yx-image-recognition/src/main/java/com/yuxue/train/ANNTrain.java
new file mode 100644
index 00000000..37c1f78a
--- /dev/null
+++ b/原yx-image-recognition/src/main/java/com/yuxue/train/ANNTrain.java
@@ -0,0 +1,215 @@
+package com.yuxue.train;
+
+import java.util.Random;
+import java.util.Vector;
+
+import org.opencv.core.Core;
+import org.opencv.core.CvType;
+import org.opencv.core.Mat;
+import org.opencv.core.TermCriteria;
+import org.opencv.imgcodecs.Imgcodecs;
+import org.opencv.ml.ANN_MLP;
+import org.opencv.ml.Ml;
+import org.opencv.ml.TrainData;
+
+import com.yuxue.constant.Constant;
+import com.yuxue.util.FileUtil;
+import com.yuxue.util.PlateUtil;
+
+
+/**
+ * 基于org.opencv包实现的训练
+ *
+ * 图片文字识别训练
+ * 训练出来的库文件,用于识别图片中的数字及字母
+ *
+ * 测试了一段时间之后,发现把中文独立出来识别,准确率更高一点
+ *
+ * 训练的ann.xml应用:
+ * 1、替换res/model/ann.xml文件
+ * 2、修改com.yuxue.easypr.core.CharsIdentify.charsIdentify(Mat, Boolean, Boolean)方法
+ *
+ * @author yuxue
+ * @date 2020-05-14 22:16
+ */
+public class ANNTrain {
+
+ private ANN_MLP ann = ANN_MLP.create();
+
+ static {
+ System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
+ }
+
+ // 默认的训练操作的根目录
+ private static final String DEFAULT_PATH = "D:/PlateDetect/train/chars_recognise_ann/";
+
+ // 训练模型文件保存位置
+ private static final String MODEL_PATH = DEFAULT_PATH + "ann.xml";
+
+ public void train(int _predictsize, int _neurons) {
+ Mat samples = new Mat(); // 使用push_back,行数列数不能赋初始值
+ Vector trainingLabels = new Vector();
+ Random rand = new Random();
+ // 加载数字及字母字符
+ for (int i = 0; i < Constant.numCharacter; i++) {
+ String str = DEFAULT_PATH + "learn/" + Constant.strCharacters[i];
+ Vector files = new Vector();
+ FileUtil.getFiles(str, files); // 文件名不能包含中文
+
+ // int count = 200; // 控制从训练样本中,抽取指定数量的样本
+ int count = files.size(); // 控制从训练样本中,抽取指定数量的样本
+ for (int j = 0; j < count; j++) {
+
+ String filename = "";
+ if(j < files.size()) {
+ filename = files.get(j);
+ } else {
+ filename = files.get(rand.nextInt(files.size() - 1)); // 样本不足,随机重复提取已有的样本
+ }
+
+ Mat img = Imgcodecs.imread(filename, 0);
+
+ Mat f = PlateUtil.features(img, _predictsize);
+ samples.push_back(f);
+ trainingLabels.add(i); // 每一幅字符图片所对应的字符类别索引下标
+
+ // 增加随机平移样本
+ samples.push_back(PlateUtil.features(PlateUtil.randTranslate(img), _predictsize));
+ trainingLabels.add(i);
+
+ // 增加随机旋转样本
+ samples.push_back(PlateUtil.features(PlateUtil.randRotate(img), _predictsize));
+ trainingLabels.add(i);
+
+ // 增加膨胀样本
+ samples.push_back(PlateUtil.features(PlateUtil.dilate(img), _predictsize));
+ trainingLabels.add(i);
+
+ // 增加腐蚀样本
+ /*samples.push_back(PlateUtil.features(PlateUtil.erode(img), _predictsize));
+ trainingLabels.add(i); */
+ }
+ }
+
+ samples.convertTo(samples, CvType.CV_32F);
+
+ //440 vhist.length + hhist.length + lowData.cols() * lowData.rows();
+ // CV_32FC1 CV_32SC1 CV_32F
+ Mat classes = Mat.zeros(trainingLabels.size(), Constant.strCharacters.length, CvType.CV_32F);
+
+ float[] labels = new float[trainingLabels.size()];
+ for (int i = 0; i < labels.length; ++i) {
+ classes.put(i, trainingLabels.get(i), 1.f);
+ }
+
+ // samples.type() == CV_32F || samples.type() == CV_32S
+ TrainData train_data = TrainData.create(samples, Ml.ROW_SAMPLE, classes);
+
+ ann.clear();
+ Mat layers = new Mat(1, 3, CvType.CV_32F);
+ layers.put(0, 0, samples.cols()); // 样本特征数 140 10*10 + 20+20
+ layers.put(0, 1, _neurons); // 神经元个数
+ layers.put(0, 2, classes.cols()); // 字符数
+
+ ann.setLayerSizes(layers);
+ ann.setActivationFunction(ANN_MLP.SIGMOID_SYM, 1, 1);
+ ann.setTrainMethod(ANN_MLP.BACKPROP);
+ TermCriteria criteria = new TermCriteria(TermCriteria.EPS + TermCriteria.MAX_ITER, 30000, 0.0001);
+ ann.setTermCriteria(criteria);
+ ann.setBackpropWeightScale(0.1);
+ ann.setBackpropMomentumScale(0.1);
+ ann.train(train_data);
+
+ // FileStorage fsto = new FileStorage(MODEL_PATH, FileStorage.WRITE);
+ // ann.write(fsto, "ann");
+ ann.save(MODEL_PATH);
+ }
+
+
+ public void predict() {
+ ann.clear();
+ ann = ANN_MLP.load(MODEL_PATH);
+
+ int total = 0;
+ int correct = 0;
+
+ // 遍历测试样本下的所有文件,计算预测准确率
+ for (int i = 0; i < Constant.strCharacters.length; i++) {
+
+ char c = Constant.strCharacters[i];
+ String path = DEFAULT_PATH + "learn/" + c;
+
+ Vector files = new Vector();
+ FileUtil.getFiles(path, files);
+
+ for (String filePath : files) {
+
+ Mat img = Imgcodecs.imread(filePath, 0);
+ Mat f = PlateUtil.features(img, Constant.predictSize);
+
+ int index = 0;
+ double maxVal = -2;
+ Mat output = new Mat(1, Constant.strCharacters.length, CvType.CV_32F);
+ ann.predict(f, output); // 预测结果
+ for (int j = 0; j < Constant.strCharacters.length; j++) {
+ double val = output.get(0, j)[0];
+ if (val > maxVal) {
+ maxVal = val;
+ index = j;
+ }
+ }
+
+ // 膨胀
+ f = PlateUtil.features(PlateUtil.dilate(img), Constant.predictSize);
+ ann.predict(f, output); // 预测结果
+ for (int j = 0; j < Constant.strCharacters.length; j++) {
+ double val = output.get(0, j)[0];
+ if (val > maxVal) {
+ maxVal = val;
+ index = j;
+ }
+ }
+
+ String result = String.valueOf(Constant.strCharacters[index]);
+ if(result.equals(String.valueOf(c))) {
+ correct++;
+ } else {
+ // 删除异常样本
+ /*File f1 = new File(filePath);
+ f1.delete();*/
+
+ System.err.print(filePath);
+ System.err.println("\t预测结果:" + result);
+ }
+ total++;
+ }
+
+ }
+
+ System.out.print("total:" + total);
+ System.out.print("\tcorrect:" + correct);
+ System.out.print("\terror:" + (total - correct));
+ System.out.println("\t计算准确率为:" + correct / (total * 1.0));
+
+ //牛逼,我操 total:13178 correct:13139 error:39 计算准确率为:0.9970405220822584
+
+ return;
+ }
+
+ public static void main(String[] args) {
+
+ ANNTrain annT = new ANNTrain();
+ // 这里演示只训练model文件夹下的ann.xml,此模型是一个predictSize=10,neurons=40的ANN模型
+ // 可根据需要训练不同的predictSize或者neurons的ANN模型
+ // 根据机器的不同,训练时间不一样,但一般需要10分钟左右,所以慢慢等一会吧
+ // 可以考虑中文,数字字母分开训练跟识别,提高准确性
+ annT.train(Constant.predictSize, Constant.neurons);
+
+ annT.predict();
+
+ System.out.println("The end.");
+ return;
+ }
+
+
+}
\ No newline at end of file
diff --git a/原yx-image-recognition/src/main/java/com/yuxue/train/CnANNTrain.java b/原yx-image-recognition/src/main/java/com/yuxue/train/CnANNTrain.java
new file mode 100644
index 00000000..6dd4323c
--- /dev/null
+++ b/原yx-image-recognition/src/main/java/com/yuxue/train/CnANNTrain.java
@@ -0,0 +1,228 @@
+package com.yuxue.train;
+
+import java.io.File;
+import java.util.Random;
+import java.util.Vector;
+
+import org.opencv.core.Core;
+import org.opencv.core.CvType;
+import org.opencv.core.Mat;
+import org.opencv.core.TermCriteria;
+import org.opencv.imgcodecs.Imgcodecs;
+import org.opencv.ml.ANN_MLP;
+import org.opencv.ml.Ml;
+import org.opencv.ml.TrainData;
+
+import com.yuxue.constant.Constant;
+import com.yuxue.util.FileUtil;
+import com.yuxue.util.PlateUtil;
+
+
+/**
+ * 基于org.opencv官方包实现的训练
+ *
+ * 图片文字识别训练
+ * 训练出来的库文件,用于识别图片中的中文字符
+ * 测试了一段时间之后,发现把中文独立出来识别,准确率更高一点
+ *
+ * @author yuxue
+ * @date 2020-07-02 22:16
+ */
+public class CnANNTrain {
+
+ private ANN_MLP ann = ANN_MLP.create();
+
+ static {
+ System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
+ }
+
+ // 默认的训练操作的根目录
+ private static final String DEFAULT_PATH = "D:/PlateDetect/train/chars_recognise_ann/";
+
+ // 训练模型文件保存位置
+ private static final String MODEL_PATH = DEFAULT_PATH + "ann_cn.xml";
+
+
+ public void train(int _predictsize, int _neurons) {
+ Mat samples = new Mat(); // 使用push_back,行数列数不能赋初始值
+ Vector trainingLabels = new Vector();
+ Random rand = new Random();
+
+ // 加载汉字字符
+ for (int i = 0; i < Constant.strChinese.length; i++) {
+ String str = DEFAULT_PATH + "learn/" + Constant.strChinese[i];
+ Vector files = new Vector();
+ FileUtil.getFiles(str, files);
+
+ // int count = 300; // 控制从训练样本中,抽取指定数量的样本
+ int count = files.size(); // 不添加随机样本
+ for (int j = 0; j < count; j++) {
+
+ String filename = "";
+ if(j < files.size()) {
+ filename = files.get(j);
+ } else {
+ filename = files.get(rand.nextInt(files.size() - 1)); // 样本不足,随机重复提取已有的样本
+ }
+
+ Mat img = Imgcodecs.imread(filename, 0);
+
+ // 原图样本
+ samples.push_back(PlateUtil.features(img, _predictsize));
+ trainingLabels.add(i);
+
+ // 增加随机平移样本
+ samples.push_back(PlateUtil.features(PlateUtil.randTranslate(img), _predictsize));
+ trainingLabels.add(i);
+
+ // 增加随机旋转样本
+ samples.push_back(PlateUtil.features(PlateUtil.randRotate(img), _predictsize));
+ trainingLabels.add(i);
+
+ // 增加腐蚀样本
+ samples.push_back(PlateUtil.features(PlateUtil.erode(img), _predictsize));
+ trainingLabels.add(i);
+ }
+ }
+
+ samples.convertTo(samples, CvType.CV_32F);
+
+ //440 vhist.length + hhist.length + lowData.cols() * lowData.rows();
+ // CV_32FC1 CV_32SC1 CV_32F
+ Mat classes = Mat.zeros(trainingLabels.size(), Constant.strChinese.length, CvType.CV_32F);
+
+ float[] labels = new float[trainingLabels.size()];
+ for (int i = 0; i < labels.length; ++i) {
+ classes.put(i, trainingLabels.get(i), 1.f);
+ }
+
+ // samples.type() == CV_32F || samples.type() == CV_32S
+ TrainData train_data = TrainData.create(samples, Ml.ROW_SAMPLE, classes);
+
+ ann.clear();
+ Mat layers = new Mat(1, 3, CvType.CV_32F);
+ layers.put(0, 0, samples.cols()); // 样本特征数 140 10*10 + 20+20
+ layers.put(0, 1, _neurons); // 神经元个数
+ layers.put(0, 2, classes.cols()); // 字符数
+
+ ann.setLayerSizes(layers);
+ ann.setActivationFunction(ANN_MLP.SIGMOID_SYM, 1, 1);
+ ann.setTrainMethod(ANN_MLP.BACKPROP);
+ TermCriteria criteria = new TermCriteria(TermCriteria.EPS + TermCriteria.MAX_ITER, 30000, 0.0001);
+ ann.setTermCriteria(criteria);
+ ann.setBackpropWeightScale(0.1);
+ ann.setBackpropMomentumScale(0.1);
+ ann.train(train_data);
+
+ // FileStorage fsto = new FileStorage(MODEL_PATH, FileStorage.WRITE);
+ // ann.write(fsto, "ann");
+ ann.save(MODEL_PATH);
+ }
+
+
+ public void predict() {
+ ann.clear();
+ ann = ANN_MLP.load(MODEL_PATH);
+
+ int total = 0;
+ int correct = 0;
+
+ // 遍历测试样本下的所有文件,计算预测准确率
+ for (int i = 0; i < Constant.strChinese.length; i++) {
+
+ String strChinese = Constant.strChinese[i];
+ String path = DEFAULT_PATH + "learn/" + strChinese;
+ Vector files = new Vector();
+ FileUtil.getFiles(path, files);
+
+ for (String filePath : files) {
+ Mat img = Imgcodecs.imread(filePath, 0);
+ Mat f = PlateUtil.features(img, Constant.predictSize);
+
+ int index = 0;
+ double maxVal = -2;
+
+ Mat output = new Mat(1, Constant.strChinese.length, CvType.CV_32F);
+ ann.predict(f, output); // 预测结果
+ for (int j = 0; j < Constant.strChinese.length; j++) {
+ double val = output.get(0, j)[0];
+ if (val > maxVal) {
+ maxVal = val;
+ index = j;
+ }
+ }
+
+ // 腐蚀 -- 识别中文字符效果会好一点,识别数字及字母效果会更差
+ f = PlateUtil.features(PlateUtil.erode(img), Constant.predictSize);
+ ann.predict(f, output); // 预测结果
+ for (int j = 0; j < Constant.strChinese.length; j++) {
+ double val = output.get(0, j)[0];
+ if (val > maxVal) {
+ maxVal = val;
+ index = j;
+ }
+ }
+
+ String result = Constant.strChinese[index];
+
+ if(result.equals(strChinese)) {
+ correct++;
+ } else {
+ // 删除异常样本
+ /*File f1 = new File(filePath);
+ f1.delete();*/
+
+ System.err.print(filePath);
+ System.err.println("\t预测结果:" + Constant.KEY_CHINESE_MAP.get(result));
+ }
+ total++;
+ }
+ }
+ System.out.print("total:" + total);
+ System.out.print("\tcorrect:" + correct);
+ System.out.print("\terror:" + (total - correct));
+ System.out.println("\t计算准确率为:" + correct / (total * 1.0));
+
+ //预测结果:
+ //单字符100样本数 total:3230 correct:2725 error:505 计算准确率为:0.8436532507739938
+ //单字符200样本数 total:3230 correct:2889 error:341 计算准确率为:0.8944272445820434
+ //单字符300样本数 total:3230 correct:2943 error:287 计算准确率为:0.9111455108359133
+ //单字符400样本数 total:3230 correct:2937 error:293 计算准确率为:0.9092879256965944
+ //无随机样本 total:3230 correct:3050 error:180 计算准确率为:0.9442724458204335
+ //无随机,删除异常样本 total:3050 correct:2987 error:63 计算准确率为:0.979344262295082
+ //无随机,删除异常样本 total:2987 correct:2973 error:14 计算准确率为:0.9953130231001004
+ //无随机,删除异常样本 total:2987 correct:2932 error:55 计算准确率为:0.9815868764646802
+ //无随机,删除异常样本 total:2987 correct:2971 error:16 计算准确率为:0.9946434549715434
+
+ // 个人测试多次之后,得出结论:
+ // 1、每个字符下样本数量不一致,最多的299个样本,最少的不到10个样本;从测试结果来看,样本太少会影响预测结果
+ // 2、这里的训练跟测试的样本都是基于相同的样本文件,所以测试结果存在一定的局限性,仅供参考;
+ // 3、测试过程中,使用了随机样本,实际上发现重复样本对预测结果影响不大
+ // 4、中文字符分离出来之后,预测准确性要高很多
+ // 5、随机平移、随机旋转、膨胀、腐蚀,会增加样本数量,同时增加预测准确性
+ // 6、每次重新训练后,结果是不一致的,,没有重新训练,多次使用样本预测,结果是一致的
+ // 7、经过多次测试,这里的训练方案跟预测结果,准确率在90%左右
+ // 8、用于训练的样本,尽量要多一点,样本特征丰富一点,这样子可以提高准确性;但是用于预测的样本,要尽量规范、正常
+
+ return;
+ }
+
+
+ public static void main(String[] args) {
+
+ CnANNTrain annT = new CnANNTrain();
+
+ // 这里演示只训练model文件夹下的ann.xml,此模型是一个predictSize=10,neurons=40的ANN模型
+ // 可根据需要训练不同的predictSize或者neurons的ANN模型
+ // 根据机器的不同,训练时间不一样,但一般需要10分钟左右,所以慢慢等一会吧
+ // 可以考虑中文,数字字母分开训练跟识别,提高准确性
+ annT.train(Constant.predictSize, Constant.neurons);
+
+ annT.predict();
+
+ System.out.println("The end.");
+ return;
+ }
+
+
+}
\ No newline at end of file
diff --git a/原yx-image-recognition/src/main/java/com/yuxue/train/SVMTrain.java b/原yx-image-recognition/src/main/java/com/yuxue/train/SVMTrain.java
new file mode 100644
index 00000000..faba4fb8
--- /dev/null
+++ b/原yx-image-recognition/src/main/java/com/yuxue/train/SVMTrain.java
@@ -0,0 +1,279 @@
+package com.yuxue.train;
+
+import java.io.File;
+import java.util.List;
+
+import org.opencv.core.Core;
+import org.opencv.core.Core.MinMaxLocResult;
+import org.opencv.core.CvType;
+import org.opencv.core.Mat;
+import org.opencv.core.TermCriteria;
+import org.opencv.imgcodecs.Imgcodecs;
+import org.opencv.imgproc.Imgproc;
+import org.opencv.ml.Ml;
+import org.opencv.ml.SVM;
+import org.opencv.ml.TrainData;
+
+import com.google.common.collect.Lists;
+import com.yuxue.constant.Constant;
+import com.yuxue.enumtype.Direction;
+import com.yuxue.util.FileUtil;
+
+/**
+ * 基于org.opencv官方包实现的训练
+ *
+ * windows下环境配置:
+ * 1、官网下载对应版本的openvp:https://opencv.org/releases/page/2/ 当前使用4.0.1版本
+ * 2、双击exe文件安装,将 安装目录下\build\java\x64\opencv_java401.dll 拷贝到\build\x64\vc14\bin\目录下
+ * 3、eclipse添加User Libraries
+ * 4、项目右键build path,添加步骤三新增的lib
+ *
+ * 图片识别车牌训练
+ * 训练出来的库文件,用于判断切图是否包含车牌
+ *
+ * 训练的svm.xml应用:
+ * 1、替换res/model/svm.xml文件
+ * 2、修改com.yuxue.easypr.core.PlateJudge.plateJudge(Mat) 方法
+ * 将样本处理方法切换一下,即将对应被注释掉的模块代码取消注释
+ * @author yuxue
+ * @date 2020-05-13 10:10
+ */
+public class SVMTrain {
+
+ // 默认的训练操作的根目录
+ private static final String DEFAULT_PATH = "D:/PlateDetect/train/plate_detect_svm/";
+
+ // 训练模型文件保存位置
+ private static final String MODEL_PATH = DEFAULT_PATH + "svm2.xml";
+
+ static {
+ System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
+ }
+
+ public static void main(String[] arg) {
+ // 训练, 生成svm.xml库文件
+ train();
+
+ // 识别,判断样本文件是否是车牌
+ pridect();
+ }
+
+ public static void train() {
+
+ // 正样本 // 136 × 36 像素 训练的源图像文件要相同大小
+ List imgList0 = FileUtil.listFile(new File(DEFAULT_PATH + "/learn/HasPlate"), Constant.DEFAULT_TYPE, false);
+
+ // 负样本 // 136 × 36 像素 训练的源图像文件要相同大小
+ List imgList1 = FileUtil.listFile(new File(DEFAULT_PATH + "/learn/NoPlate"), Constant.DEFAULT_TYPE, false);
+
+ // 标记:正样本用 0 表示,负样本用 1 表示。
+ int labels[] = createLabelArray(imgList0.size(), imgList1.size());
+ int sample_num = labels.length; // 图片数量
+
+ // 用于存放所有样本的矩阵
+ Mat trainingDataMat = null;
+
+ // 存放标记的Mat,每个图片都要给一个标记
+ Mat labelsMat = new Mat(sample_num, 1, CvType.CV_32SC1);
+ labelsMat.put(0, 0, labels);
+
+ for (int i = 0; i < sample_num; i++) { // 遍历所有的正负样本,处理样本用于生成训练的库文件
+ String path = "";
+ if(i < imgList0.size()) {
+ path = imgList0.get(i).getAbsolutePath();
+ } else {
+ path = imgList1.get(i - imgList0.size()).getAbsolutePath();
+ }
+
+ Mat inMat = Imgcodecs.imread(path); // 读取样本文件
+ Mat dst = getFeature(inMat); // 获取样本文件的特征
+
+ // 创建一个行数为sample_num, 列数为 rows*cols 的矩阵; 用于存放样本
+ if (trainingDataMat == null) {
+ trainingDataMat = new Mat(sample_num, dst.rows() * dst.cols(), CvType.CV_32F);
+ }
+
+ // 将样本矩阵转换成只有一行的矩阵,保存为float数组
+ float[] arr = new float[dst.rows() * dst.cols()];
+ int l = 0;
+ for (int j = 0; j < dst.rows(); j++) { // 遍历行
+ for (int k = 0; k < dst.cols(); k++) { // 遍历列
+ double[] a = dst.get(j, k);
+ arr[l] = (float) a[0];
+ l++;
+ }
+ }
+
+ trainingDataMat.put(i, 0, arr); // 多张图的特征合并到一个矩阵
+ }
+
+ // Imgcodecs.imwrite(DEFAULT_PATH + "trainingDataMat.jpg", trainingDataMat);
+
+ // 配置SVM训练器参数
+ TermCriteria criteria = new TermCriteria(TermCriteria.EPS + TermCriteria.MAX_ITER, 20000, 0.0001);
+ SVM svm = SVM.create();
+ svm.setTermCriteria(criteria); // 指定
+ svm.setKernel(SVM.RBF); // 使用预先定义的内核初始化
+ svm.setType(SVM.C_SVC); // SVM的类型,默认是:SVM.C_SVC
+ svm.setGamma(0.1); // 核函数的参数
+ svm.setNu(0.1); // SVM优化问题参数
+ svm.setC(1); // SVM优化问题的参数C
+ svm.setP(0.1);
+ svm.setDegree(0.1);
+ svm.setCoef0(0.1);
+
+ TrainData td = TrainData.create(trainingDataMat, Ml.ROW_SAMPLE, labelsMat);// 类封装的训练数据
+ boolean success = svm.train(td.getSamples(), Ml.ROW_SAMPLE, td.getResponses());// 训练统计模型
+ System.out.println("svm training result: " + success);
+ svm.save(MODEL_PATH);// 保存模型
+ }
+
+
+ public static void pridect() {
+ // 加载训练得到的 xml 模型文件
+ SVM svm = SVM.load(MODEL_PATH);
+
+ // 136 × 36 像素 需要跟训练的源图像文件保持相同大小
+ doPridect(svm, DEFAULT_PATH + "test/A01_NMV802_0.jpg");
+ doPridect(svm, DEFAULT_PATH + "test/debug_resize_1.jpg");
+ doPridect(svm, DEFAULT_PATH + "test/debug_resize_2.jpg");
+ doPridect(svm, DEFAULT_PATH + "test/debug_resize_3.jpg");
+ doPridect(svm, DEFAULT_PATH + "test/S22_KG2187_3.jpg");
+ doPridect(svm, DEFAULT_PATH + "test/S22_KG2187_5.jpg");
+ }
+
+ public static void doPridect(SVM svm, String imgPath) {
+ Mat src = Imgcodecs.imread(imgPath);
+ Mat dst = getFeature(src);
+ // 如果训练时使用这个标识,那么符合的图像会返回9.0
+ float flag = svm.predict(dst);
+
+ if (flag == 0) {
+ System.err.println(imgPath + ": 目标符合");
+ }
+ if (flag == 1) {
+ System.out.println(imgPath + ": 目标不符合");
+ }
+ }
+
+ public static int[] createLabelArray(Integer i1, Integer i2) {
+ int labels[] = new int[i1 + i2];
+
+ for (int i = 0; i < labels.length; i++) {
+ if(i < i1) {
+ labels[i] = 0;
+ } else {
+ labels[i] = 1;
+ }
+ }
+ return labels;
+ }
+
+
+ public static Mat getFeature(Mat inMat) {
+
+ Mat histogram = getHistogramFeatures(inMat);
+ Mat color = getColorFeatures(inMat);
+
+ List list = Lists.newArrayList();
+ list.add(histogram);
+ list.add(color);
+
+ Mat dst = new Mat();
+ // hconcat 水平拼接 // vconcat 垂直拼接
+ Core.hconcat(list, dst);
+ return dst;
+ }
+
+
+ public static Mat getHistogramFeatures(Mat src) {
+ Mat img_grey = new Mat();
+ Imgproc.cvtColor(src, img_grey, Imgproc.COLOR_BGR2GRAY);
+
+ Mat img_threshold = new Mat();
+ Imgproc.threshold(img_grey, img_threshold, 0, 255, Imgproc.THRESH_OTSU + Imgproc.THRESH_BINARY);
+
+ // Histogram features
+ float[] vhist = projectedHistogram(img_threshold, Direction.VERTICAL);
+ float[] hhist = projectedHistogram(img_threshold, Direction.HORIZONTAL);
+
+ // Last 10 is the number of moments components
+ int numCols = vhist.length + hhist.length;
+
+ Mat features = Mat.zeros(1, numCols, CvType.CV_32F);
+ int j = 0;
+ for (int i = 0; i < vhist.length; i++) {
+ features.put(0, j, vhist[i]);
+ j++;
+ }
+ for (int i = 0; i < hhist.length; i++) {
+ features.put(0, j, hhist[i]);
+ j++;
+ }
+ return features;
+ }
+
+ public static float[] projectedHistogram(Mat inMat, Direction direction){
+ Mat img = new Mat();
+ inMat.copyTo(img);
+ int sz = img.rows();
+ if(Direction.VERTICAL.equals(direction)) {
+ sz = img.cols();
+ }
+ // 统计这一行或一列中,非零元素的个数,并保存到nonZeroMat中
+ float[] nonZeroMat = new float[sz];
+ Core.extractChannel(img, img, 0); // 提取0通道
+ for (int j = 0; j < sz; j++) {
+ Mat data = Direction.HORIZONTAL.equals(direction) ? img.row(j) : img.col(j);
+ int count = Core.countNonZero(data);
+ nonZeroMat[j] = count;
+ }
+ // Normalize histogram
+ float max = 1F;
+ for (int j = 0; j < nonZeroMat.length; j++) {
+ max = Math.max(max, nonZeroMat[j]);
+ }
+ for (int j = 0; j < nonZeroMat.length; j++) {
+ nonZeroMat[j] /= max;
+ }
+ return nonZeroMat;
+ }
+
+
+ public static Mat getColorFeatures(Mat src) {
+ Mat src_hsv = new Mat();
+ Imgproc.cvtColor(src, src_hsv, Imgproc.COLOR_BGR2GRAY);
+
+ int sz = 180;
+ int[] h = new int[180];
+
+ for (int i = 0; i < src_hsv.rows(); i++) {
+ for (int j = 0; j < src_hsv.cols(); j++) {
+ int H = (int) src_hsv.get(i, j)[0];// 0-180
+ if (H > sz - 1) {
+ H = sz - 1;
+ }
+ if (H < 0) {
+ H = 0;
+ }
+ h[H]++;
+ }
+ }
+ // 创建黑色的图
+ Mat features = Mat.zeros(1, sz, CvType.CV_32F);
+
+ for (int j = 0; j < sz; j++) {
+ features.put(0, j, (float)h[j]);
+ }
+
+ MinMaxLocResult m = Core.minMaxLoc(features);
+ double max = m.maxVal;
+
+ if (max > 0) {
+ features.convertTo(features, -1, 1.0f / max, 0);
+ }
+ return features;
+ }
+
+
+}
diff --git a/原yx-image-recognition/src/main/java/com/yuxue/util/Convert.java b/原yx-image-recognition/src/main/java/com/yuxue/util/Convert.java
new file mode 100644
index 00000000..4e447336
--- /dev/null
+++ b/原yx-image-recognition/src/main/java/com/yuxue/util/Convert.java
@@ -0,0 +1,87 @@
+package com.yuxue.util;
+
+import org.bytedeco.javacpp.BytePointer;
+
+/**
+ * There are 3 kinds of convert functions:
+ * 1. [float|double|int|long] to[Float|Double|Int|Long](BytePointer pointer)
+ * 2. byte[] getBytes([float|double|int|long] value)
+ * 3. [float|double|int|long] to[Float|Double|Int|Long](byte[] value)
+ *
+ * @author lin.yao
+ *
+ */
+public class Convert {
+
+ public static float toFloat(BytePointer pointer) {
+ byte[] buffer = new byte[4];
+ pointer.get(buffer);
+ return toFloat(buffer);
+ }
+
+ public static double toDouble(BytePointer pointer) {
+ byte[] buffer = new byte[8];
+ pointer.get(buffer);
+ return toDouble(buffer);
+ }
+
+ public static int toInt(BytePointer pointer) {
+ byte[] buffer = new byte[4];
+ pointer.get(buffer);
+ return toInt(buffer);
+ }
+
+ public static long toLong(BytePointer pointer) {
+ byte[] buffer = new byte[8];
+ pointer.get(buffer);
+ return toLong(buffer);
+ }
+
+ public static byte[] getBytes(float value) {
+ return getBytes(Float.floatToIntBits(value));
+ }
+
+ public static byte[] getBytes(double value) {
+ return getBytes(Double.doubleToLongBits(value));
+ }
+
+ public static byte[] getBytes(int value) {
+ final int length = 4;
+ byte[] buffer = new byte[length];
+ for (int i = 0; i < length; ++i)
+ buffer[i] = (byte) ((value >> (i * 8)) & 0xFF);
+ return buffer;
+ }
+
+ public static byte[] getBytes(long value) {
+ final int length = 8;
+ byte[] buffer = new byte[length];
+ for (int i = 0; i < length; ++i)
+ buffer[i] = (byte) ((value >> (i * 8)) & 0xFF);
+ return buffer;
+ }
+
+ public static int toInt(byte[] value) {
+ final int length = 4;
+ int n = 0;
+ for (int i = 0; i < length; ++i)
+ n += (value[i] & 0xFF) << (i * 8);
+ return n;
+ }
+
+ public static long toLong(byte[] value) {
+ final int length = 8;
+ long n = 0;
+ for (int i = 0; i < length; ++i)
+ n += ((long) (value[i] & 0xFF)) << (i * 8);
+ return n;
+ }
+
+ public static double toDouble(byte[] value) {
+ return Double.longBitsToDouble(toLong(value));
+ }
+
+ public static float toFloat(byte[] value) {
+ return Float.intBitsToFloat(toInt(value));
+ }
+}
diff --git a/原yx-image-recognition/src/main/java/com/yuxue/util/FileUtil.java b/原yx-image-recognition/src/main/java/com/yuxue/util/FileUtil.java
new file mode 100644
index 00000000..55c45114
--- /dev/null
+++ b/原yx-image-recognition/src/main/java/com/yuxue/util/FileUtil.java
@@ -0,0 +1,196 @@
+package com.yuxue.util;
+
+import java.io.File;
+import java.io.FileFilter;
+import java.io.IOException;
+import java.nio.file.FileAlreadyExistsException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Vector;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+
+import com.yuxue.exception.ResultReturnException;
+
+
+/**
+ *
+ * @author yuxue
+ * @date 2020-04-19 15:23
+ */
+public class FileUtil {
+
+ static Lock lock = new ReentrantLock();
+
+ public static boolean copyAndRename(String from, String to) {
+ Path sourcePath = Paths.get(from);
+ Path destinationPath = Paths.get(to);
+ try {
+ Files.copy(sourcePath, destinationPath);
+ } catch(FileAlreadyExistsException e) {
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ return true;
+ }
+
+ public static boolean checkFile(final File file) {
+ if(file.exists() && file.isFile()) {
+ return true;
+ }
+ return false;
+ }
+
+ /**
+ * 重命名文件
+ * @param file
+ * @param newName 可以是文件名,也可以是路径+文件名
+ * @return
+ */
+ public static boolean renameFile(String filePath, String newName) {
+ File file = new File(filePath);
+ return renameFile(file, newName);
+ }
+
+ /**
+ * 重命名文件
+ * @param file
+ * @param newName 可以是文件名,也可以是路径+文件名
+ * @return
+ */
+ public static boolean renameFile(File file, String newName) {
+ if(file.exists()) {
+ String targetPath = null;
+ if(newName.indexOf("/") >= 0 || newName.indexOf("\\\\") >= 0) {
+ targetPath = newName;
+ } else {
+ targetPath = file.getParentFile().getAbsolutePath() + "/" + newName;
+ }
+
+ File targetFile = new File(targetPath);
+ file.renameTo(targetFile);
+ return true;
+ }
+ return false;
+ }
+
+ public static void createDir(String dir) {
+ File file = new File(dir);
+ if(file.exists() && file.isDirectory()) {
+ return ;
+ } else {
+ file.mkdirs();
+ }
+ }
+
+ /**
+ * 删除并重新创建目录
+ * @param dir
+ */
+ public static void recreateDir(final String dir) {
+ new File(dir).delete();
+ new File(dir).mkdir();
+ }
+
+
+ /**
+ * 递归获取文件信息
+ * @param path String类型
+ * @param files
+ */
+ public static void getFiles(final String path, Vector files) {
+ getFiles(new File(path), files);
+ }
+
+
+ /**
+ * 递归获取文件信息
+ * @param dir FIle类型
+ * @param files
+ */
+ private static void getFiles(final File dir, Vector files) {
+ File[] filelist = dir.listFiles();
+ for (File file : filelist) {
+ if (file.isDirectory()) {
+ getFiles(file, files);
+ } else {
+ files.add(file.getAbsolutePath());
+ }
+ }
+ }
+
+
+ /**
+ *
+ * @param dir
+ * @param filename
+ * @param recursive
+ * @return
+ */
+ public static List listFile(File dir, final String fileType, boolean recursive) {
+ if (!dir.exists()) {
+ throw new ResultReturnException("目录:" + dir + "不存在");
+ }
+
+ if (!dir.isDirectory()) {
+ throw new ResultReturnException(dir + "不是目录");
+ }
+
+ FileFilter ff = null;
+ if (fileType == null || fileType.length() == 0) {
+ ff = new FileFilter() {
+ @Override
+ public boolean accept(File pathname) {
+ return true;
+ }
+ };
+ } else {
+ ff = new FileFilter() {
+ @Override
+ public boolean accept(File pathname) {
+ if (pathname.isDirectory()) {
+ return true;
+ }
+ String name = pathname.getName().toLowerCase();
+ String format = name.substring(name.lastIndexOf(".") + 1);
+ if (fileType.contains(format)) {
+ return true;
+ } else {
+ return false;
+ }
+ }
+ };
+ }
+ return listFile(dir, ff, recursive);
+ }
+
+
+
+ /**
+ *
+ * @param dir
+ * @param ff
+ * @param recursive 是否遍历子目录
+ * @return
+ */
+ public static List listFile(File dir, FileFilter ff, boolean recursive) {
+ List list = new ArrayList();
+ File[] files = dir.listFiles(ff);
+ if (files != null && files.length > 0) {
+ for (File f : files) {
+ // 如果是文件,添加文件到list中
+ if (f.isFile() || (f.isDirectory() && !f.getName().startsWith("."))) {
+ list.add(f);
+ } else if (recursive) {
+ // 获取子目录中的文件,添加子目录中的经过过滤的所有文件添加到list
+ list.addAll(listFile(f, ff, true));
+ }
+ }
+ }
+ return list;
+ }
+
+}
diff --git a/原yx-image-recognition/src/main/java/com/yuxue/util/ImageUtil.java b/原yx-image-recognition/src/main/java/com/yuxue/util/ImageUtil.java
new file mode 100644
index 00000000..fc22f67c
--- /dev/null
+++ b/原yx-image-recognition/src/main/java/com/yuxue/util/ImageUtil.java
@@ -0,0 +1,899 @@
+package com.yuxue.util;
+
+import java.time.Duration;
+import java.time.Instant;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.Vector;
+
+import org.opencv.core.Core;
+import org.opencv.core.CvType;
+import org.opencv.core.Mat;
+import org.opencv.core.MatOfPoint;
+import org.opencv.core.MatOfPoint2f;
+import org.opencv.core.Point;
+import org.opencv.core.Rect;
+import org.opencv.core.RotatedRect;
+import org.opencv.core.Scalar;
+import org.opencv.core.Size;
+import org.opencv.imgcodecs.Imgcodecs;
+import org.opencv.imgproc.Imgproc;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.yuxue.constant.Constant;
+import com.yuxue.enumtype.PlateColor;
+
+
+/**
+ * 车牌图片处理工具类
+ * 将原图,经过算法处理,得到车牌的图块
+ * @author yuxue
+ * @date 2020-05-18 12:07
+ */
+public class ImageUtil {
+
+ private static String DEFAULT_BASE_TEST_PATH = "D:/PlateDetect/temp/";
+
+ static {
+ System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
+ }
+
+ // 车牌定位处理步骤,该map用于表示步骤图片的顺序
+ private static Map debugMap = Maps.newLinkedHashMap();
+ static {
+ debugMap.put("yuantu", 0); // 原图
+ debugMap.put("gaussianBlur", 0); // 高斯模糊
+ debugMap.put("gray", 0); // 图像灰度化
+ debugMap.put("sobel", 0); // Sobel 运算,得到图像的一阶水平方向导数
+ debugMap.put("threshold", 0); //图像二值化
+ debugMap.put("morphology", 0); // 图像闭操作
+ debugMap.put("clearInnerHole", 0); // 降噪
+ debugMap.put("clearSmallConnArea", 0); // 降噪
+ debugMap.put("clearAngleConn", 0); // 降噪
+ debugMap.put("clearHole", 0); // 降噪
+ debugMap.put("contours", 0); // 提取外部轮廓
+ debugMap.put("screenblock", 0); // 外部轮廓筛选
+ debugMap.put("crop", 0); // 切图
+ debugMap.put("resize", 0); // 切图resize
+
+ // 设置index, 用于debug生成文件时候按名称排序
+ Integer index = 100;
+ for (Entry entry : debugMap.entrySet()) {
+ entry.setValue(index);
+ index ++;
+ }
+ }
+
+ public static void main(String[] args) {
+ Instant start = Instant.now();
+ String tempPath = DEFAULT_BASE_TEST_PATH + "test/";
+ String filename = tempPath + "/100_yuantu.jpg";
+ filename = tempPath + "/100_yuantu1.jpg";
+ // filename = tempPath + "/109_crop_0.png";
+
+ // 读取原图
+ Mat src = Imgcodecs.imread(filename);
+
+ Boolean debug = true;
+
+ // 高斯模糊
+ Mat gsMat = ImageUtil.gaussianBlur(src, debug, tempPath);
+
+ // 灰度图
+ Mat gray = ImageUtil.gray(gsMat, debug, tempPath);
+
+ Mat sobel = ImageUtil.sobel(gray, debug, tempPath);
+
+ Mat threshold = ImageUtil.threshold(sobel, debug, tempPath);
+
+ // Mat scharr = ImageUtil.scharr(gray, debug, tempPath);
+ // Mat threshold = ImageUtil.threshold(scharr, debug, tempPath);
+
+ Mat morphology = ImageUtil.morphology(threshold, debug, tempPath);
+
+ List contours = ImageUtil.contours(src, morphology, debug, tempPath);
+
+ Vector rects = ImageUtil.screenBlock(src, contours, debug, tempPath);
+
+ PlateUtil.loadSvmModel("D:/PlateDetect/train/plate_detect_svm/svm2.xml");
+ PlateUtil.loadAnnModel("D:/PlateDetect/train/chars_recognise_ann/ann.xml");
+
+ Vector dst = new Vector();
+ PlateUtil.hasPlate(rects, dst, debug, tempPath);
+
+ System.err.println("识别到的车牌数量:" + dst.size());
+ dst.stream().forEach(inMat -> {
+ PlateColor color = PlateUtil.getPlateColor(inMat, true, debug, tempPath);
+ System.err.println(color.desc);
+
+ Vector charMat = new Vector();
+ PlateUtil.charsSegment(inMat, color, charMat, debug, tempPath);
+
+
+ });
+
+ /*String filename = tempPath + "/hsvMat_1590994270425.jpg";
+ Mat src = Imgcodecs.imread(filename);
+ Vector charMat = new Vector();
+ PlateUtil.charsSegment(src, PlateColor.BLUE, charMat, true, tempPath);*/
+
+ Instant end = Instant.now();
+ System.err.println("总耗时:" + Duration.between(start, end).toMillis());
+
+ // ImageUtil.rgb2Hsv(src, debug, tempPath);
+ // ImageUtil.getHSVValue(src, debug, tempPath);
+ }
+
+
+
+ /**
+ * 高斯模糊
+ * @param inMat
+ * @param debug
+ * @return
+ */
+ public static final int DEFAULT_GAUSSIANBLUR_SIZE = 5;
+ public static Mat gaussianBlur(Mat inMat, Boolean debug, String tempPath) {
+ Mat dst = new Mat();
+ Imgproc.GaussianBlur(inMat, dst, new Size(DEFAULT_GAUSSIANBLUR_SIZE, DEFAULT_GAUSSIANBLUR_SIZE), 0, 0, Core.BORDER_DEFAULT);
+ if (debug) {
+ Imgcodecs.imwrite(tempPath + debugMap.get("gaussianBlur") + "_gaussianBlur.jpg", dst);
+ }
+ return dst;
+ }
+
+
+ /**
+ * 将图像进行灰度化
+ * @param inMat
+ * @param debug
+ * @param tempPath
+ * @return
+ */
+ public static Mat gray(Mat inMat, Boolean debug, String tempPath) {
+ Mat dst = new Mat();
+ Imgproc.cvtColor(inMat, dst, Imgproc.COLOR_BGR2GRAY);
+ if (debug) {
+ Imgcodecs.imwrite(tempPath + debugMap.get("gray") + "_gray.jpg", dst);
+ }
+ inMat.release();
+ return dst;
+ }
+
+
+ /**
+ * 对图像进行Sobel 运算,得到图像的一阶水平方向导数
+ * @param inMat 灰度图
+ * @param debug
+ * @param tempPath
+ * @return
+ */
+ public static final int SOBEL_SCALE = 1;
+ public static final int SOBEL_DELTA = 0;
+ public static final int SOBEL_X_WEIGHT = 1;
+ public static final int SOBEL_Y_WEIGHT = 0;
+ public static Mat sobel(Mat inMat, Boolean debug, String tempPath) {
+ Mat dst = new Mat();
+ Mat grad_x = new Mat();
+ Mat grad_y = new Mat();
+ Mat abs_grad_x = new Mat();
+ Mat abs_grad_y = new Mat();
+
+ // Sobel滤波 计算水平方向灰度梯度的绝对值
+ Imgproc.Sobel(inMat, grad_x, CvType.CV_16S, 1, 0, 3, SOBEL_SCALE, SOBEL_DELTA, Core.BORDER_DEFAULT);
+ Core.convertScaleAbs(grad_x, abs_grad_x); // 增强对比度
+
+ Imgproc.Sobel(inMat, grad_y, CvType.CV_16S, 0, 1, 3, SOBEL_SCALE, SOBEL_DELTA, Core.BORDER_DEFAULT);
+ Core.convertScaleAbs(grad_y, abs_grad_y);
+ grad_x.release();
+ grad_y.release();
+
+ // 计算结果梯度
+ Core.addWeighted(abs_grad_x, SOBEL_X_WEIGHT, abs_grad_y, SOBEL_Y_WEIGHT, 0, dst);
+ abs_grad_x.release();
+ abs_grad_y.release();
+
+ if (debug) {
+ Imgcodecs.imwrite(tempPath + debugMap.get("sobel") + "_sobel.jpg", dst);
+ }
+ return dst;
+ }
+
+
+ /**
+ * 对图像进行scharr 运算,得到图像的一阶水平方向导数
+ * @param inMat
+ * @param debug
+ * @param tempPath
+ * @return
+ */
+ public static Mat scharr(Mat inMat, Boolean debug, String tempPath) {
+
+ Mat dst = new Mat();
+
+ Mat grad_x = new Mat();
+ Mat grad_y = new Mat();
+ Mat abs_grad_x = new Mat();
+ Mat abs_grad_y = new Mat();
+
+ //注意求梯度的时候我们使用的是Scharr算法,sofia算法容易收到图像细节的干扰
+ //所谓梯度运算就是对图像中的像素点进行就导数运算,从而得到相邻两个像素点的差异值 by:Tantuo
+ Imgproc.Scharr(inMat, grad_x, CvType.CV_32F, 1, 0);
+ Imgproc.Scharr(inMat, grad_y, CvType.CV_32F, 0, 1);
+ //openCV中有32位浮点数的CvType用于保存可能是负值的像素数据值
+ Core.convertScaleAbs(grad_x, abs_grad_x);
+ Core.convertScaleAbs(grad_y, abs_grad_y);
+ //openCV中使用release()释放Mat类图像,使用recycle()释放BitMap类图像
+ grad_x.release();
+ grad_y.release();
+
+ Core.addWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 0, dst);
+ abs_grad_x.release();
+ abs_grad_y.release();
+ if (debug) {
+ Imgcodecs.imwrite(tempPath + debugMap.get("sobel") + "_sobel.jpg", dst);
+ }
+ return dst;
+ }
+
+
+ /**
+ * 对图像进行二值化。将灰度图像(每个像素点有256个取值可能, 0代表黑色,255代表白色)
+ * 转化为二值图像(每个像素点仅有1和0两个取值可能)
+ * @param inMat
+ * @param debug
+ * @param tempPath
+ * @return
+ */
+ public static Mat threshold(Mat inMat, Boolean debug, String tempPath) {
+ Mat dst = new Mat();
+ Imgproc.threshold(inMat, dst, 100, 255, Imgproc.THRESH_OTSU + Imgproc.THRESH_BINARY);
+ if (debug) {
+ Imgcodecs.imwrite(tempPath + debugMap.get("threshold") + "_threshold.jpg", dst);
+ }
+ inMat.release();
+ return dst;
+ }
+
+
+ /**
+ * 使用闭操作。对图像进行闭操作以后,可以看到车牌区域被连接成一个矩形装的区域
+ * @param inMat
+ * @param debug
+ * @param tempPath
+ * @return
+ */
+ // public static final int DEFAULT_MORPH_SIZE_WIDTH = 15;
+ // public static final int DEFAULT_MORPH_SIZE_HEIGHT = 3;
+ public static final int DEFAULT_MORPH_SIZE_WIDTH = 9;
+ public static final int DEFAULT_MORPH_SIZE_HEIGHT = 3;
+ public static Mat morphology(Mat inMat, Boolean debug, String tempPath) {
+ Mat dst = new Mat(inMat.size(), CvType.CV_8UC1);
+ Size size = new Size(DEFAULT_MORPH_SIZE_WIDTH, DEFAULT_MORPH_SIZE_HEIGHT);
+ Mat element = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, size);
+ Imgproc.morphologyEx(inMat, dst, Imgproc.MORPH_CLOSE, element);
+ if (debug) {
+ Imgcodecs.imwrite(tempPath + debugMap.get("morphology") + "_morphology0.jpg", dst);
+ }
+
+ // 填补内部孔洞,为了去除小连通区域的时候,降低影响
+ Mat a = clearInnerHole(dst, 8, 16, debug, tempPath);
+
+ // 去除小连通区域
+ Mat b = clearSmallConnArea(a, 1, 10, debug, tempPath);
+
+ // 按斜边去除
+ // Mat e = clearAngleConn(b, 5, debug, tempPath);
+
+ // 填补边缘孔洞
+ // Mat d = clearHole(a, 4, 2, debug, tempPath);
+
+ return b;
+ }
+
+
+ /**
+ * Find 轮廓 of possibles plates 求轮廓。求出图中所有的轮廓。
+ * 这个算法会把全图的轮廓都计算出来,因此要进行筛选。
+ * @param src 原图
+ * @param inMat morphology Mat
+ * @param debug
+ * @param tempPath
+ * @return
+ */
+ public static List contours(Mat src, Mat inMat, Boolean debug, String tempPath) {
+ List contours = Lists.newArrayList();
+ Mat hierarchy = new Mat();
+ // 提取外部轮廓
+ // CV_RETR_EXTERNAL只检测最外围轮廓,
+ // CV_RETR_LIST 检测所有的轮廓
+ // CV_CHAIN_APPROX_NONE 保存物体边界上所有连续的轮廓点到contours向量内
+ Imgproc.findContours(inMat, contours, hierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_NONE);
+
+ if (debug) {
+ Mat result = new Mat();
+ src.copyTo(result); // 复制一张图,不在原图上进行操作,防止后续需要使用原图
+ // 将轮廓描绘到原图
+ Imgproc.drawContours(result, contours, -1, new Scalar(0, 0, 255, 255));
+ // 输出带轮廓的原图
+ Imgcodecs.imwrite(tempPath + debugMap.get("contours") + "_contours.jpg", result);
+ }
+ return contours;
+ }
+
+
+ /**
+ * 根据轮廓, 筛选出可能是车牌的图块
+ * @param src
+ * @param matVector
+ * @param debug
+ * @param tempPath
+ * @return
+ */
+ public static final int DEFAULT_ANGLE = 30; // 角度判断所用常量
+ public static final int TYPE = CvType.CV_8UC3;
+ public static Vector screenBlock(Mat src, List contours, Boolean debug, String tempPath){
+ Vector dst = new Vector();
+ List mv = Lists.newArrayList(); // 用于在原图上描绘筛选后的结果
+ for (int i = 0, j = 0; i < contours.size(); i++) {
+ MatOfPoint m1 = contours.get(i);
+ MatOfPoint2f m2 = new MatOfPoint2f();
+ m1.convertTo(m2, CvType.CV_32F);
+ // RotatedRect 该类表示平面上的旋转矩形,有三个属性: 矩形中心点(质心); 边长(长和宽); 旋转角度
+ // boundingRect()得到包覆此轮廓的最小正矩形, minAreaRect()得到包覆轮廓的最小斜矩形
+ RotatedRect mr = Imgproc.minAreaRect(m2);
+
+ double angle = Math.abs(mr.angle);
+
+ if (checkPlateSize(mr) && angle <= DEFAULT_ANGLE) { // 判断尺寸及旋转角度 ±30°,排除不合法的图块
+ mv.add(contours.get(i));
+ Size rect_size = new Size((int) mr.size.width, (int) mr.size.height);
+ if (mr.size.width / mr.size.height < 1) { // 宽度小于高度
+ angle = 90 + angle; // 旋转90°
+ rect_size = new Size(rect_size.height, rect_size.width);
+ }
+
+ // 旋转角度,根据需要是否进行角度旋转
+ /*Mat img_rotated = new Mat();
+ Mat rotmat = Imgproc.getRotationMatrix2D(mr.center, angle, 1); // 旋转
+ Imgproc.warpAffine(src, img_rotated, rotmat, src.size()); // 仿射变换 考虑是否需要进行投影变换?
+ */
+
+ // 切图
+ Mat img_crop = new Mat();
+ Imgproc.getRectSubPix(src, rect_size, mr.center, img_crop);
+ if (debug) {
+ Imgcodecs.imwrite(tempPath + debugMap.get("crop") + "_crop_" + j + ".png", img_crop);
+ }
+
+ // 处理切图,调整为指定大小
+ Mat resized = new Mat(Constant.DEFAULT_HEIGHT, Constant.DEFAULT_WIDTH, TYPE);
+ Imgproc.resize(img_crop, resized, resized.size(), 0, 0, Imgproc.INTER_CUBIC);
+ if (debug) {
+ Imgcodecs.imwrite(tempPath + debugMap.get("resize") + "_resize_" + j + ".png", resized);
+ j++;
+ }
+ dst.add(resized);
+ }
+ }
+ if (debug) {
+ Mat result = new Mat();
+ src.copyTo(result); // 复制一张图,不在原图上进行操作,防止后续需要使用原图
+ // 将轮廓描绘到原图
+ Imgproc.drawContours(result, mv, -1, new Scalar(0, 0, 255, 255));
+ // 输出带轮廓的原图
+ Imgcodecs.imwrite(tempPath + debugMap.get("screenblock") + "_screenblock.jpg", result);
+ }
+ return dst;
+ }
+
+ /**
+ * 对minAreaRect获得的最小外接矩形
+ * 判断面积以及宽高比是否在制定的范围内
+ * 黄牌、蓝牌、绿牌
+ * 国内车牌大小: 440mm*140mm,宽高比 3.142857
+ * @param mr
+ * @return
+ */
+ final static float DEFAULT_ERROR = 0.7f; // 宽高比允许70%误差
+ final static float DEFAULT_ASPECT = 3.142857f;
+ public static final int DEFAULT_VERIFY_MIN = 1;
+ public static final int DEFAULT_VERIFY_MAX = 30;
+ private static boolean checkPlateSize(RotatedRect mr) {
+
+ // 切图面积取值范围
+ int min = 44 * 14 * DEFAULT_VERIFY_MIN;
+ int max = 44 * 14 * DEFAULT_VERIFY_MAX;
+
+ // 切图横纵比取值范围;关键在于纵横比例
+ float rmin = DEFAULT_ASPECT - DEFAULT_ASPECT * DEFAULT_ERROR;
+ float rmax = DEFAULT_ASPECT + DEFAULT_ASPECT * DEFAULT_ERROR;
+
+ // 切图计算面积
+ int area = (int) (mr.size.height * mr.size.width);
+ // 切图宽高比
+ double r = mr.size.width / mr.size.height;
+ /*if (r < 1) { // 注释掉,不处理width 小于height的图片
+ r = mr.size.height / mr.size.width;
+ }*/
+ return min <= area && area <= max && rmin <= r && r <= rmax;
+ }
+
+
+ /**
+ * rgb图像转换为hsv图像
+ * @param inMat
+ * @param debug
+ * @param tempPath
+ * @return
+ */
+ public static Mat rgb2Hsv(Mat inMat, Boolean debug, String tempPath) {
+ // 转到HSV空间进行处理
+ Mat dst = new Mat();
+ Imgproc.cvtColor(inMat, dst, Imgproc.COLOR_BGR2HSV);
+ List hsvSplit = Lists.newArrayList();
+ Core.split(dst, hsvSplit);
+ // 直方图均衡化是一种常见的增强图像对比度的方法,使用该方法可以增强局部图像的对比度,尤其在数据较为相似的图像中作用更加明显
+ Imgproc.equalizeHist(hsvSplit.get(2), hsvSplit.get(2));
+ Core.merge(hsvSplit, dst);
+
+ if (debug) {
+ // Imgcodecs.imwrite(tempPath + "hsvMat_"+System.currentTimeMillis()+".jpg", dst);
+ }
+ return dst;
+ }
+
+
+ /**
+ * 获取HSV中各个颜色所对应的H的范围
+ * HSV是一种比较直观的颜色模型,所以在许多图像编辑工具中应用比较广泛,这个模型中颜色的参数分别是:色调(H, Hue),饱和度(S,Saturation),明度(V, Value)
+ * 1.PS软件时,H取值范围是0-360,S取值范围是(0%-100%),V取值范围是(0%-100%)。
+ * 2.利用openCV中cvSplit函数的在选择图像IPL_DEPTH_32F类型时,H取值范围是0-360,S取值范围是0-1(0%-100%),V取值范围是0-1(0%-100%)。
+ * 3.利用openCV中cvSplit函数的在选择图像IPL_DEPTH_8UC类型时,H取值范围是0-180,S取值范围是0-255,V取值范围是0-255
+ * @param inMat
+ * @param debug
+ */
+ public static void getHSVValue(Mat inMat, Boolean debug, String tempPath) {
+ int nRows = inMat.rows();
+ int nCols = inMat.cols();
+ Map map = Maps.newHashMap();
+ for (int i = 0; i < nRows; ++i) {
+ for (int j = 0; j < nCols; j += 3) {
+ int H = (int)inMat.get(i, j)[0];
+ // int S = (int)inMat.get(i, j)[1];
+ // int V = (int)inMat.get(i, j)[2];
+ if(map.containsKey(H)) {
+ int count = map.get(H);
+ map.put(H, count+1);
+ } else {
+ map.put(H, 1);
+ }
+ }
+ }
+ Set set = map.keySet();
+ Object[] arr = set.toArray();
+ Arrays.sort(arr);
+ for (Object key : arr) {
+ System.out.println(key + ": " + map.get(key));
+ }
+ return;
+ }
+
+
+
+ /**
+ * 计算最大内接矩形
+ * https://blog.csdn.net/cfqcfqcfqcfqcfq/article/details/53084090
+ * @param inMat
+ * @return
+ */
+ public static Rect maxAreaRect(Mat threshold, Point point) {
+ int edge[] = new int[4];
+ edge[0] = (int) point.x + 1;//top
+ edge[1] = (int) point.y + 1;//right
+ edge[2] = (int) point.y - 1;//bottom
+ edge[3] = (int) point.x - 1;//left
+
+ boolean[] expand = { true, true, true, true};//扩展标记位
+ int n = 0;
+ while (expand[0] || expand[1] || expand[2] || expand[3]){
+ int edgeID = n % 4;
+ expand[edgeID] = expandEdge(threshold, edge, edgeID);
+ n++;
+ }
+ Point tl = new Point(edge[3], edge[0]);
+ Point br = new Point(edge[1], edge[2]);
+ return new Rect(tl, br);
+ }
+
+
+ /**
+ * @brief expandEdge 扩展边界函数
+ * @param img:输入图像,单通道二值图,深度为8
+ * @param edge 边界数组,存放4条边界值
+ * @param edgeID 当前边界号
+ * @return 布尔值 确定当前边界是否可以扩展
+ */
+ public static boolean expandEdge(Mat img, int edge[], int edgeID) {
+ int nc = img.cols();
+ int nr = img.rows();
+
+ switch (edgeID) {
+ case 0:
+ if (edge[0] > nr) {
+ return false;
+ }
+ for (int i = edge[3]; i <= edge[1]; ++i) {
+ if (img.get(edge[0], i)[0]== 255) {// 遇见255像素表明碰到边缘线
+ return false;
+ }
+ }
+ edge[0]++;
+ return true;
+ case 1:
+ if (edge[1] > nc) {
+ return false;
+ }
+ for (int i = edge[2]; i <= edge[0]; ++i) {
+ if (img.get(i, edge[1])[0] == 255)
+ return false;
+ }
+ edge[1]++;
+ return true;
+ case 2:
+ if (edge[2] < 0) {
+ return false;
+ }
+ for (int i = edge[3]; i <= edge[1]; ++i) {
+ if (img.get(edge[2], i)[0] == 255)
+ return false;
+ }
+ edge[2]--;
+ return true;
+ case 3:
+ if (edge[3] < 0) {
+ return false;
+ }
+ for (int i = edge[2]; i <= edge[0]; ++i) {
+ if (img.get(i, edge[3])[0] == 255)
+ return false;
+ }
+ edge[3]--;
+ return true;
+ default:
+ return false;
+ }
+ }
+
+
+ /**
+ * 清除白色区域的内部黑色孔洞
+ * rowLimit != colsLimit, 使用长方形比正方形好
+ * 该算法比较耗时
+ * @param inMat
+ * @param rowLimit
+ * @param colsLimit
+ * @param debug
+ * @param tempPath
+ * @return
+ */
+ public static Mat clearInnerHole(Mat inMat, int rowLimit, int colsLimit, Boolean debug, String tempPath) {
+ Instant start = Instant.now();
+ int uncheck = 0, normal = 2, replace = 3, white = 255, black = 0;
+
+ Mat dst = new Mat(inMat.size(), CvType.CV_8UC1);
+ inMat.copyTo(dst);
+
+ // 初始化的图像全部为0,未检查; 全黑图像
+ Mat label = new Mat(inMat.size(), CvType.CV_8UC1);
+
+ // 标记所有的白色区域
+ for (int i = 0; i < inMat.rows(); i++) {
+ for (int j = 0; j < inMat.cols(); j++) {
+ // 白色点较少,遍历白色点速度快
+ if (inMat.get(i, j)[0] == white && label.get(i, j)[0] == uncheck) { // 对于二值图,0代表黑色,255代表白色
+ label.put(i, j, normal); // 中心点
+
+ // 执行两次,交换row 跟col;
+ int condition = 0;
+ do {
+ int x1 = i;
+ int x2 = i + rowLimit >= inMat.rows() ? inMat.rows() - 1 : i + rowLimit;
+ int y1 = j;
+ int y2 = j + colsLimit >= inMat.cols() ? inMat.cols() - 1 : j + colsLimit ;
+
+ int count = 0;
+ // 遍历四条边
+ for (int k = x1; k < x2; k++) {
+ if(inMat.get(k, y1)[0] == black || inMat.get(k, y2)[0] == black) {
+ count++;
+ }
+ }
+ for (int k = y1; k < y2; k++) {
+ if(inMat.get(x1, k)[0] == black || inMat.get(x2, k)[0] == black) {
+ count++;
+ }
+ }
+
+ // 根据中心点+limit,定位四个角生成一个矩形,
+ // 矩形四条边都是白色,内部的黑点标记为 要被替换的对象
+ if(count == 0 ) {
+ for (int n = x1; n < x2; n++) {
+ for (int m = y1; m < y2; m++) {
+ if (inMat.get(n, m)[0] == black && label.get(n, m)[0] == uncheck) {
+ label.put(n, m, replace);
+ }
+ }
+ }
+ }
+ int ex = rowLimit;
+ rowLimit = colsLimit;
+ colsLimit = ex;
+
+ condition++;
+ } while (condition == 1);
+ }
+ }
+ }
+
+
+ for (int i = 0; i < inMat.rows(); i++) {
+ for (int j = 0; j < inMat.cols(); j++) {
+ if(label.get(i, j)[0] == replace) {
+ dst.put(i, j, white);
+ }
+ }
+ }
+ label.release();
+ if (debug) {
+ Imgcodecs.imwrite(tempPath + debugMap.get("clearInnerHole") + "_clearInnerHole.jpg", dst);
+ Instant end = Instant.now();
+ System.out.println("clearInnerHole执行耗时:" + Duration.between(start, end).toMillis());
+ }
+ return dst;
+ }
+
+
+ /**
+ * 清除二值图像的黑洞
+ * 按矩形清理
+ * @param inMat 二值图像 0代表黑色,255代表白色
+ * @param rowLimit 像素值
+ * @param colsLimit 像素值
+ * @param debug
+ * @param tempPath
+ */
+ public static Mat clearHole(Mat inMat, int rowLimit, int colsLimit, Boolean debug, String tempPath) {
+ Instant start = Instant.now();
+ int uncheck = 0, normal = 2, replace = 3, white = 255, black = 0;
+ Mat dst = new Mat(inMat.size(), CvType.CV_8UC1);
+ inMat.copyTo(dst);
+
+ // 初始化的图像全部为0,未检查; 全黑图像
+ Mat label = new Mat(inMat.size(), CvType.CV_8UC1);
+
+ // 标记所有的白色区域
+ for (int i = 0; i < inMat.rows(); i++) {
+ for (int j = 0; j < inMat.cols(); j++) {
+ if (inMat.get(i, j)[0] == white) { // 对于二值图,0代表黑色,255代表白色
+ label.put(i, j, normal); // 中心点
+
+ // 执行两次,交换row 跟col;
+ int condition = 0;
+ do {
+ int x1 = i;
+ int x2 = i + rowLimit >= inMat.rows() ? inMat.rows() - 1 : i + rowLimit;
+ int y1 = j;
+ int y2 = j + colsLimit >= inMat.cols() ? inMat.cols() - 1 : j + colsLimit ;
+
+ int count = 0;
+ if(inMat.get(x1, y1)[0] == white) {// 左上角
+ count++;
+ }
+ if(inMat.get(x1, y2)[0] == white) { // 左下角
+ count++;
+ }
+ if(inMat.get(x2, y1)[0] == white) { // 右上角
+ count++;
+ }
+ if(inMat.get(x2, y2)[0] == white) { // 右下角
+ count++;
+ }
+
+ // 根据中心点+limit,定位四个角生成一个矩形,
+ // 将四个角都是白色的矩形,内部的黑点标记为 要被替换的对象
+ if(count >=4 ) {
+ for (int n = x1; n < x2; n++) {
+ for (int m = y1; m < y2; m++) {
+ if (inMat.get(n, m)[0] == black && label.get(n, m)[0] == uncheck) {
+ label.put(n, m, replace);
+ }
+ }
+ }
+ }
+
+ int ex = rowLimit;
+ rowLimit = colsLimit;
+ colsLimit = ex;
+
+ condition++;
+ } while (condition == 1);
+ }
+ }
+ }
+
+ for (int i = 0; i < inMat.rows(); i++) {
+ for (int j = 0; j < inMat.cols(); j++) {
+ if(label.get(i, j)[0] == replace) {
+ dst.put(i, j, white); // 黑色替换成白色
+ }
+ }
+ }
+ if (debug) {
+ Imgcodecs.imwrite(tempPath + debugMap.get("clearHole") + "_clearHole.jpg", dst);
+ Instant end = Instant.now();
+ System.out.println("clearHole执行耗时:" + Duration.between(start, end).toMillis());
+ }
+ return dst;
+ }
+
+ /**
+ * 清除二值图像的细小连接
+ * 按水平或者垂直方向清除
+ * @param inMat
+ * @param rowLimit
+ * @param colsLimit
+ * @param debug
+ * @param tempPath
+ * @return
+ */
+ public static Mat clearSmallConnArea(Mat inMat, int rowLimit, int colsLimit, Boolean debug, String tempPath) {
+ Instant start = Instant.now();
+ int uncheck = 0, normal = 2, replace = 3, white = 255, black = 0;
+
+ Mat dst = new Mat(inMat.size(), CvType.CV_8UC1);
+ inMat.copyTo(dst);
+
+ // 初始化的图像全部为0,未检查; 全黑图像
+ Mat label = new Mat(inMat.size(), CvType.CV_8UC1);
+
+ // 标记所有的白色区域
+ for (int i = 0; i < inMat.rows(); i++) {
+ for (int j = 0; j < inMat.cols(); j++) {
+ if (inMat.get(i, j)[0] == black) { // 对于二值图,0代表黑色,255代表白色
+ label.put(i, j, normal); // 中心点
+ // 执行两次,交换row 跟col;
+ int condition = 0;
+ do {
+ int x1 = i;
+ int x2 = i + rowLimit >= inMat.rows() ? inMat.rows() - 1 : i + rowLimit;
+ int y1 = j;
+ int y2 = j + colsLimit >= inMat.cols() ? inMat.cols() - 1 : j + colsLimit ;
+
+ int count = 0;
+ if(inMat.get(x1, y1)[0] == black) {// 左上角
+ count++;
+ }
+ if(inMat.get(x1, y2)[0] == black) { // 左下角
+ count++;
+ }
+ if(inMat.get(x2, y1)[0] == black) { // 右上角
+ count++;
+ }
+ if(inMat.get(x2, y2)[0] == black) { // 右下角
+ count++;
+ }
+
+ // 根据 中心点+limit,定位四个角生成一个矩形,
+ // 将四个角都是黑色的矩形,内部的白点标记为 要被替换的对象
+ if(count >= 4) {
+ for (int n = x1; n < x2; n++) {
+ for (int m = y1; m < y2; m++) {
+ if (inMat.get(n, m)[0] == white && label.get(n, m)[0] == uncheck) {
+ label.put(n, m, replace);
+ }
+ }
+ }
+ }
+ int ex = rowLimit;
+ rowLimit = colsLimit;
+ colsLimit = ex;
+
+ condition++;
+ } while (condition == 1);
+ }
+ }
+ }
+
+ for (int i = 0; i < inMat.rows(); i++) {
+ for (int j = 0; j < inMat.cols(); j++) {
+ if(label.get(i, j)[0] == replace) {
+ dst.put(i, j, black); // 白色替换成黑色
+ }
+ }
+ }
+ if (debug) {
+ Imgcodecs.imwrite(tempPath + debugMap.get("clearSmallConnArea") + "_clearSmallConnArea.jpg", dst);
+ Instant end = Instant.now();
+ System.out.println("clearSmallConnArea执行耗时:" + Duration.between(start, end).toMillis());
+ }
+ return dst;
+ }
+
+
+ /**
+ * 清除二值图像的细小连接
+ * 按45度斜边清除
+ * @param inMat
+ * @param limit
+ * @param angle
+ * @param debug
+ * @param tempPath
+ * @return
+ */
+ public static Mat clearAngleConn(Mat inMat, int limit, Boolean debug, String tempPath) {
+ Instant start = Instant.now();
+ int uncheck = 0, normal = 2, replace = 3, white = 255, black = 0;
+
+ Mat dst = new Mat(inMat.size(), CvType.CV_8UC1);
+ inMat.copyTo(dst);
+
+ // 初始化的图像全部为0,未检查; 全黑图像
+ Mat label = new Mat(inMat.size(), CvType.CV_8UC1);
+
+ // 标记所有的白色区域
+ for (int i = 0; i < inMat.rows(); i++) {
+ for (int j = 0; j < inMat.cols(); j++) {
+ if (inMat.get(i, j)[0] == black) { // 对于二值图,0代表黑色,255代表白色
+ label.put(i, j, normal); // 中心点
+
+ int x1 = i;
+ int x2 = i + limit >= inMat.rows() ? inMat.rows() - 1 : i + limit;
+ int y1 = j;
+ int y2 = j + limit >= inMat.cols() ? inMat.cols() - 1 : j + limit ;
+
+ // 根据 中心点+limit,定位四个角生成一个矩形,
+ // 将2个角都是黑色的线,内部的白点标记为 要被替换的对象
+ // 【\】 斜对角线
+ if(inMat.get(x1, y1)[0] == black && inMat.get(x2, y2)[0] == black) {
+ for (int n = x1, m = y1; n < x2 && m < y2; n++, m++) {
+ if (inMat.get(n, m)[0] == white && label.get(n, m)[0] == uncheck) {
+ label.put(n, m, replace);
+ }
+ }
+ }
+ if(inMat.get(x1, y2)[0] == black && inMat.get(x2, y1)[0] == black) {
+ // 【/】 斜对角线
+ for (int n = x1, m = y2; n < x2 && m > y1; n++, m--) {
+ if (inMat.get(n, m)[0] == white && label.get(n, m)[0] == uncheck) {
+ label.put(n, m, replace);
+ }
+ }
+ }
+ }
+ }
+ }
+ // 白色替换成黑色
+ for (int i = 0; i < inMat.rows(); i++) {
+ for (int j = 0; j < inMat.cols(); j++) {
+ if(label.get(i, j)[0] == replace) {
+ dst.put(i, j, black);
+ }
+ }
+ }
+
+ if (debug) {
+ Imgcodecs.imwrite(tempPath + debugMap.get("clearAngleConn") + "_clearAngleConn.jpg", dst);
+ Instant end = Instant.now();
+ System.out.println("clearAngleConn执行耗时:" + Duration.between(start, end).toMillis());
+ }
+ return dst;
+ }
+
+
+
+}
diff --git a/原yx-image-recognition/src/main/java/com/yuxue/util/PlateUtil.java b/原yx-image-recognition/src/main/java/com/yuxue/util/PlateUtil.java
new file mode 100644
index 00000000..8cd2704b
--- /dev/null
+++ b/原yx-image-recognition/src/main/java/com/yuxue/util/PlateUtil.java
@@ -0,0 +1,546 @@
+package com.yuxue.util;
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+import java.util.Vector;
+import java.util.Map.Entry;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.opencv.core.Core;
+import org.opencv.core.CvType;
+import org.opencv.core.Mat;
+import org.opencv.core.MatOfPoint;
+import org.opencv.core.Point;
+import org.opencv.core.Rect;
+import org.opencv.core.Scalar;
+import org.opencv.core.Size;
+import org.opencv.imgcodecs.Imgcodecs;
+import org.opencv.imgproc.Imgproc;
+import org.opencv.ml.ANN_MLP;
+import org.opencv.ml.SVM;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.yuxue.constant.Constant;
+import com.yuxue.enumtype.Direction;
+import com.yuxue.enumtype.PlateColor;
+import com.yuxue.train.SVMTrain;
+
+
+/**
+ * 车牌处理工具类
+ * 车牌切图按字符分割
+ * 字符识别
+ * 未完成
+ * @author yuxue
+ * @date 2020-05-28 15:11
+ */
+public class PlateUtil {
+
+ // 车牌定位处理步骤,该map用于表示步骤图片的顺序
+ private static Map debugMap = Maps.newLinkedHashMap();
+ static {
+ System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
+
+ debugMap.put("platePredict", 0);
+ debugMap.put("colorMatch", 0);
+ debugMap.put("plateThreshold", 0);
+ debugMap.put("plateContours", 0);
+ debugMap.put("plateRect", 0);
+ debugMap.put("plateCrop", 0);
+ debugMap.put("char_clearLiuDing", 0); // 去除柳钉
+ debugMap.put("specMat", 0);
+ debugMap.put("chineseMat", 0);
+ debugMap.put("char_auxRoi", 0);
+
+ // 设置index, 用于debug生成文件时候按名称排序
+ Integer index = 200;
+ for (Entry entry : debugMap.entrySet()) {
+ entry.setValue(index);
+ index ++;
+ }
+
+ // 这个位置加载模型文件会报错,暂时没时间定位啥问题报错
+ /*loadSvmModel("D:/PlateDetect/train/plate_detect_svm/svm2.xml");
+ loadAnnModel("D:/PlateDetect/train/chars_recognise_ann/ann.xml");*/
+ }
+
+ private static SVM svm = SVM.create();
+
+ private static ANN_MLP ann=ANN_MLP.create();
+
+ public static void loadSvmModel(String path) {
+ svm.clear();
+ svm=SVM.load(path);
+ }
+
+ // 加载ann配置文件 图像转文字的训练库文件
+ public static void loadAnnModel(String path) {
+ ann.clear();
+ ann = ANN_MLP.load(path);
+ }
+
+
+ public static void main(String[] args) {
+ /*System.err.println(PalteUtil.isPlate("粤AI234K"));
+ System.err.println(PalteUtil.isPlate("鄂CD3098"));*/
+
+ }
+
+
+ /**
+ * 根据正则表达式判断字符串是否是车牌
+ * @param str
+ * @return
+ */
+ public static Boolean isPlate(String str) {
+ Pattern p = Pattern.compile(Constant.plateReg);
+ Boolean bl = false;
+ Matcher m = p.matcher(str);
+ while(m.find()) {
+ bl = true;
+ break;
+ }
+ return bl;
+ }
+
+
+ /**
+ * 输入车牌切图集合,判断是否包含车牌
+ * @param inMat
+ * @param dst 包含车牌的图块
+ */
+ public static void hasPlate(Vector inMat, Vector dst, Boolean debug, String tempPath) {
+ int i = 0;
+ for (Mat src : inMat) {
+ if(src.rows() == Constant.DEFAULT_HEIGHT && src.cols() == Constant.DEFAULT_WIDTH) {
+ Mat samples = SVMTrain.getFeature(src);
+ float flag = svm.predict(samples);
+ if (flag == 0) {
+ dst.add(src);
+ if(debug) {
+ System.err.println("目标符合");
+ Imgcodecs.imwrite(tempPath + debugMap.get("platePredict") + "_platePredict" + i + ".png", src);
+ }
+ i++;
+ } else {
+ System.out.println("目标不符合");
+ }
+ } else {
+ System.err.println("非法图块");
+ }
+ }
+ return;
+ }
+
+
+ /**
+ * 判断切图车牌颜色
+ * @param inMat
+ * @return
+ */
+ public static PlateColor getPlateColor(Mat inMat, Boolean adaptive_minsv, Boolean debug, String tempPath) {
+ // 判断阈值
+ final float thresh = 0.70f;
+ if(colorMatch(inMat, PlateColor.GREEN, adaptive_minsv, debug, tempPath) > thresh) {
+ return PlateColor.GREEN;
+ }
+ if(colorMatch(inMat, PlateColor.YELLOW, adaptive_minsv, debug, tempPath) > thresh) {
+ return PlateColor.YELLOW;
+ }
+ if(colorMatch(inMat, PlateColor.BLUE, adaptive_minsv, debug, tempPath) > thresh) {
+ return PlateColor.BLUE;
+ }
+ return PlateColor.UNKNOWN;
+ }
+
+
+ /**
+ * 颜色匹配计算
+ * @param inMat
+ * @param r
+ * @param adaptive_minsv
+ * @param debug
+ * @param tempPath
+ * @return
+ */
+ public static Float colorMatch(Mat inMat, PlateColor r, Boolean adaptive_minsv, Boolean debug, String tempPath) {
+ final float max_sv = 255;
+ final float minref_sv = 64;
+ final float minabs_sv = 95;
+
+ Mat hsvMat = ImageUtil.rgb2Hsv(inMat, debug, tempPath);
+
+ // 匹配模板基色,切换以查找想要的基色
+ int min_h = r.minH;
+ int max_h = r.maxH;
+ float diff_h = (float) ((max_h - min_h) / 2);
+ int avg_h = (int) (min_h + diff_h);
+
+ for (int i = 0; i < hsvMat.rows(); ++i) {
+ for (int j = 0; j < hsvMat.cols(); j += 3) {
+ int H = (int)hsvMat.get(i, j)[0];
+ int S = (int)hsvMat.get(i, j)[1];
+ int V = (int)hsvMat.get(i, j)[2];
+
+ boolean colorMatched = false;
+
+ if ( min_h < H && H <= max_h) {
+ int Hdiff = Math.abs(H - avg_h);
+ float Hdiff_p = Hdiff / diff_h;
+ float min_sv = 0;
+ if (adaptive_minsv) {
+ min_sv = minref_sv - minref_sv / 2 * (1 - Hdiff_p);
+ } else {
+ min_sv = minabs_sv;
+ }
+ if ((min_sv < S && S <= max_sv) && (min_sv < V && V <= max_sv)) {
+ colorMatched = true;
+ }
+ }
+
+ if (colorMatched == true) {
+ hsvMat.put(i, j, 0, 0, 255);
+ } else {
+ hsvMat.put(i, j, 0, 0, 0);
+ }
+ }
+ }
+
+ // 获取颜色匹配后的二值灰度图
+ List hsvSplit = Lists.newArrayList();
+ Core.split(hsvMat, hsvSplit);
+ Mat gray = hsvSplit.get(2);
+
+ float percent = (float) Core.countNonZero(gray) / (gray.rows() * gray.cols());
+ if (debug) {
+ Imgcodecs.imwrite(tempPath + debugMap.get("colorMatch") + "_colorMatch.jpg", gray);
+ }
+ return percent;
+ }
+
+
+
+ /**
+ * 车牌切图,分割成单个字符切图
+ * @param inMat 输入原始图像
+ * @param charMat 返回字符切图vector
+ * @param debug
+ * @param tempPath
+ */
+ public static final int DEFAULT_ANGLE = 30; // 角度判断所用常量
+ public static void charsSegment(Mat inMat, PlateColor color, Vector charMat, Boolean debug, String tempPath) {
+ Mat gray = new Mat();
+ Imgproc.cvtColor(inMat, gray, Imgproc.COLOR_BGR2GRAY);
+
+ Mat threshold = new Mat();
+ switch (color) {
+ case BLUE:
+ Imgproc.threshold(gray, threshold, 10, 255, Imgproc.THRESH_OTSU + Imgproc.THRESH_BINARY);
+ break;
+
+ case YELLOW:
+ Imgproc.threshold(gray, threshold, 10, 255, Imgproc.THRESH_OTSU + Imgproc.THRESH_BINARY_INV);
+ break;
+
+ case GREEN:
+ Imgproc.threshold(gray, threshold, 10, 255, Imgproc.THRESH_OTSU + Imgproc.THRESH_BINARY_INV);
+ break;
+
+ default:
+ return;
+ }
+
+ // 图片处理,降噪等
+ if (debug) {
+ Imgcodecs.imwrite(tempPath + debugMap.get("plateThreshold") + "_plateThreshold.jpg", threshold);
+ }
+
+ // 获取轮廓
+ Mat contour = new Mat();
+ threshold.copyTo(contour);
+
+ List contours = Lists.newArrayList();
+ // 提取外部轮廓
+ Imgproc.findContours(contour, contours, new Mat(), Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_NONE);
+
+ if (debug) {
+ Mat result = new Mat();
+ inMat.copyTo(result);
+ Imgproc.drawContours(result, contours, -1, new Scalar(0, 0, 255, 255));
+ Imgcodecs.imwrite(tempPath + debugMap.get("plateContours") + "_plateContours.jpg", result);
+ }
+
+
+ Vector rt = new Vector();
+ for (int i = 0; i < contours.size(); i++) {
+ Rect mr = Imgproc.boundingRect(contours.get(i));
+ /*if(debug) {
+ Mat mat = new Mat(threshold, mr);
+ Imgcodecs.imwrite(tempPath + debugMap.get("plateRect") + "_plateRect_" + i + ".jpg", mat);
+ }*/
+ if (checkCharSizes(mr)) {
+ rt.add(mr);
+ }
+ }
+ if(null == rt || rt.size() <= 0) {
+ return;
+ }
+ Vector sorted = new Vector();
+ sortRect(rt, sorted);
+
+ String plate = "";
+ Vector dst = new Vector();
+
+ for (int i = 0; i < sorted.size(); i++) {
+ Mat img_crop = new Mat(threshold, sorted.get(i));
+ img_crop = preprocessChar(img_crop);
+ dst.add(img_crop);
+ if(debug) {
+ Imgcodecs.imwrite(tempPath + debugMap.get("plateCrop") + "_plateCrop_" + i + ".jpg", img_crop);
+ }
+
+ Mat f = features(img_crop, Constant.predictSize);
+
+ // 字符预测
+ Mat output = new Mat(1, 140, CvType.CV_32F);
+ int index = (int) ann.predict(f, output, 0);
+
+ if (index < Constant.numCharacter) {
+ plate += String.valueOf(Constant.strCharacters[index]);
+ } else {
+ String s = Constant.strChinese[index - Constant.numCharacter];
+ plate += Constant.KEY_CHINESE_MAP.get(s);
+ }
+ }
+ System.err.println("===>" + plate);
+
+ return;
+ }
+
+ /**
+ * 字符预处理: 统一每个字符的大小
+ * @param in
+ * @return
+ */
+ final static int CHAR_SIZE = 20;
+ private static Mat preprocessChar(Mat in) {
+ int h = in.rows();
+ int w = in.cols();
+ Mat transformMat = Mat.eye(2, 3, CvType.CV_32F);
+ int m = Math.max(w, h);
+ transformMat.put(0, 2, (m - w) / 2f);
+ transformMat.put(1, 2, (m - h) / 2f);
+
+ Mat warpImage = new Mat(m, m, in.type());
+ Imgproc.warpAffine(in, warpImage, transformMat, warpImage.size(), Imgproc.INTER_LINEAR, Core.BORDER_CONSTANT, new Scalar(0));
+
+ Mat resized = new Mat(CHAR_SIZE, CHAR_SIZE, CvType.CV_8UC3);
+ Imgproc.resize(warpImage, resized, resized.size(), 0, 0, Imgproc.INTER_CUBIC);
+
+ return resized;
+ }
+
+
+
+ /**
+ * 字符尺寸验证;去掉尺寸不符合的图块
+ * 此处计算宽高比意义不大,因为字符 1 的宽高比干扰就已经很大了
+ * @param r
+ * @return
+ */
+ public static Boolean checkCharSizes(Rect r) {
+ float minHeight = 15f;
+ float maxHeight = 35f;
+ double charAspect = r.size().width / r.size().height;
+ return charAspect <1 && minHeight <= r.size().height && r.size().height < maxHeight;
+ }
+
+
+
+ /**
+ * 将Rect按位置从左到右进行排序
+ * @param vecRect
+ * @param out
+ * @return
+ */
+ public static void sortRect(Vector vecRect, Vector out) {
+ Map map = Maps.newHashMap();
+ for (int i = 0; i < vecRect.size(); ++i) {
+ map.put(vecRect.get(i).x, i);
+ }
+ Set set = map.keySet();
+ Object[] arr = set.toArray();
+ Arrays.sort(arr);
+ for (Object key : arr) {
+ out.add(vecRect.get(map.get(key)));
+ }
+ return;
+ }
+
+
+
+ public static float[] projectedHistogram(final Mat img, Direction direction) {
+ int sz = 0;
+ switch (direction) {
+ case HORIZONTAL:
+ sz = img.rows();
+ break;
+
+ case VERTICAL:
+ sz = img.cols();
+ break;
+
+ default:
+ break;
+ }
+
+ // 统计这一行或一列中,非零元素的个数,并保存到nonZeroMat中
+ float[] nonZeroMat = new float[sz];
+ Core.extractChannel(img, img, 0);
+ for (int j = 0; j < sz; j++) {
+ Mat data = (direction == Direction.HORIZONTAL) ? img.row(j) : img.col(j);
+ int count = Core.countNonZero(data);
+ nonZeroMat[j] = count;
+ }
+ // Normalize histogram
+ float max = 0;
+ for (int j = 0; j < nonZeroMat.length; ++j) {
+ max = Math.max(max, nonZeroMat[j]);
+ }
+ if (max > 0) {
+ for (int j = 0; j < nonZeroMat.length; ++j) {
+ nonZeroMat[j] /= max;
+ }
+ }
+ return nonZeroMat;
+ }
+
+
+ public static Mat features(Mat in, int sizeData) {
+
+ float[] vhist = projectedHistogram(in, Direction.VERTICAL);
+ float[] hhist = projectedHistogram(in, Direction.HORIZONTAL);
+
+ Mat lowData = new Mat();
+ if (sizeData > 0) {
+ Imgproc.resize(in, lowData, new Size(sizeData, sizeData));
+ }
+
+ int numCols = vhist.length + hhist.length + lowData.cols() * lowData.rows();
+ Mat out = new Mat(1, numCols, CvType.CV_32F);
+
+ int j = 0;
+ for (int i = 0; i < vhist.length; ++i, ++j) {
+ out.put(0, j, vhist[i]);
+ }
+ for (int i = 0; i < hhist.length; ++i, ++j) {
+ out.put(0, j, hhist[i]);
+ }
+
+ for (int x = 0; x < lowData.cols(); x++) {
+ for (int y = 0; y < lowData.rows(); y++, ++j) {
+ double[] val = lowData.get(x, y);
+ out.put(0, j, val[0]);
+ }
+ }
+ return out;
+ }
+
+
+
+
+ /**
+ * 进行膨胀操作
+ * @param inMat
+ * @return
+ */
+ public static Mat dilate(Mat inMat) {
+ Mat result = inMat.clone();
+ Mat element = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(2, 2));
+ Imgproc.dilate(inMat, result, element);
+ return result;
+ }
+
+ /**
+ * 进行腐蚀操作
+ * @param inMat
+ * @return
+ */
+ public static Mat erode(Mat inMat) {
+ Mat result = inMat.clone();
+ Mat element = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(2, 2));
+ Imgproc.erode(inMat, result, element);
+ return result;
+ }
+
+
+ /**
+ * 随机数平移
+ * @param inMat
+ * @return
+ */
+ public static Mat randTranslate(Mat inMat) {
+ Random rand = new Random();
+ Mat result = inMat.clone();
+ int ran_x = rand.nextInt(10000) % 5 - 2; // 控制在-2~3个像素范围内
+ int ran_y = rand.nextInt(10000) % 5 - 2;
+ return translateImg(result, ran_x, ran_y);
+ }
+
+
+ /**
+ * 随机数旋转
+ * @param inMat
+ * @return
+ */
+ public static Mat randRotate(Mat inMat) {
+ Random rand = new Random();
+ Mat result = inMat.clone();
+ float angle = (float) (rand.nextInt(10000) % 15 - 7); // 旋转角度控制在-7~8°范围内
+ return rotateImg(result, angle);
+ }
+
+
+ /**
+ * 平移
+ * @param img
+ * @param offsetx
+ * @param offsety
+ * @return
+ */
+ public static Mat translateImg(Mat img, int offsetx, int offsety){
+ Mat dst = new Mat();
+ //定义平移矩阵
+ Mat trans_mat = Mat.zeros(2, 3, CvType.CV_32FC1);
+ trans_mat.put(0, 0, 1);
+ trans_mat.put(0, 2, offsetx);
+ trans_mat.put(1, 1, 1);
+ trans_mat.put(1, 2, offsety);
+ Imgproc.warpAffine(img, dst, trans_mat, img.size()); // 仿射变换
+ return dst;
+ }
+
+
+ /**
+ * 旋转角度
+ * @param source
+ * @param angle
+ * @return
+ */
+ public static Mat rotateImg(Mat source, float angle){
+ Point src_center = new Point(source.cols() / 2.0F, source.rows() / 2.0F);
+ Mat rot_mat = Imgproc.getRotationMatrix2D(src_center, angle, 1);
+ Mat dst = new Mat();
+ // 仿射变换 可以考虑使用投影变换; 这里使用放射变换进行旋转,对于实际效果来说感觉意义不大,反而会干扰结果预测
+ Imgproc.warpAffine(source, dst, rot_mat, source.size());
+ return dst;
+ }
+
+
+
+}
diff --git a/原yx-image-recognition/src/main/resources/DetectFace/AverageMaleFace.jpg b/原yx-image-recognition/src/main/resources/DetectFace/AverageMaleFace.jpg
new file mode 100644
index 00000000..3b96e03b
Binary files /dev/null and b/原yx-image-recognition/src/main/resources/DetectFace/AverageMaleFace.jpg differ
diff --git a/原yx-image-recognition/src/main/resources/DetectFace/huge.png b/原yx-image-recognition/src/main/resources/DetectFace/huge.png
new file mode 100644
index 00000000..f082b0f6
Binary files /dev/null and b/原yx-image-recognition/src/main/resources/DetectFace/huge.png differ
diff --git a/原yx-image-recognition/src/main/resources/application-dev.yml b/原yx-image-recognition/src/main/resources/application-dev.yml
new file mode 100644
index 00000000..ae73f24d
--- /dev/null
+++ b/原yx-image-recognition/src/main/resources/application-dev.yml
@@ -0,0 +1,19 @@
+#server:
+# port: 8888
+
+spring:
+ druid:
+ #datasource:
+ #eclipse启动的时候,默认加载的是target目录下的文件
+ url: jdbc:sqlite::resource:yx_image_recognition.db?date_string_format=yyyy-MM-dd HH:mm:ss
+ username: sqlite
+ password: sqlite
+ driver-class-name: org.sqlite.JDBC
+ max-active: 10 #最大连接数
+ min-idle: 5 #最小连接数
+ max-wait: 10000 #获取连接的最大等待时间
+ time-between-eviction-runs-millis: 60000 #空闲连接的检查时间间隔
+ min-evictable-idle-time-millis: 300000 #空闲连接最小空闲时间
+
+
+
\ No newline at end of file
diff --git a/原yx-image-recognition/src/main/resources/application-sqlite.yml b/原yx-image-recognition/src/main/resources/application-sqlite.yml
new file mode 100644
index 00000000..2b8bd88e
--- /dev/null
+++ b/原yx-image-recognition/src/main/resources/application-sqlite.yml
@@ -0,0 +1,15 @@
+spring:
+ druid:
+ #datasource:
+ #eclipse启动的时候,默认加载的是target目录下的文件
+ # :config 当前jar包所在目录下的config
+ url: jdbc:sqlite:config/yx_image_recognition.db?date_string_format=yyyy-MM-dd HH:mm:ss
+ username: sqlite
+ password: sqlite
+ driver-class-name: org.sqlite.JDBC
+ max-active: 10 #最大连接数
+ min-idle: 5 #最小连接数
+ max-wait: 10000 #获取连接的最大等待时间
+ time-between-eviction-runs-millis: 60000 #空闲连接的检查时间间隔
+ min-evictable-idle-time-millis: 300000 #空闲连接最小空闲时间
+
\ No newline at end of file
diff --git a/原yx-image-recognition/src/main/resources/application.yml b/原yx-image-recognition/src/main/resources/application.yml
new file mode 100644
index 00000000..5d4a1aa7
--- /dev/null
+++ b/原yx-image-recognition/src/main/resources/application.yml
@@ -0,0 +1,52 @@
+server:
+ port: 16666
+ servlet:
+ context-path: /
+
+spring:
+ application:
+ name : demo
+ mvc:
+ favicon:
+ enabled: true
+ messages:
+ basename: i18n.login
+
+ jackson:
+ time-zone: GMT+8
+ date-format: yyyy-MM-dd HH:mm:ss
+
+## 环境配置文件 dev sqlite
+ profiles:
+ active: dev
+
+## 静态页面配置
+ thymeleaf:
+ #热部署文件,页面不产生缓存,及时更新
+ cache: false
+ prefix: classpath:static/templates/
+ suffix: .html
+ encoding: UTF-8
+
+## Mybatis config
+mybatis:
+ mapperLocations: classpath:mapper/**/*.xml
+ configLocation: classpath:mybatis.xml
+
+## pagehelper
+pagehelper:
+ helperDialect: sqlite #postgresql
+ reasonable: true
+ supportMethodsArguments: true
+ params: countSql
+ count: countSql
+ returnPageInfo: check
+
+## 记录日志
+logging:
+ config: classpath:logback-spring.xml
+## Start logging
+ level:
+ root: INFO
+
+
diff --git a/原yx-image-recognition/src/main/resources/banner.txt b/原yx-image-recognition/src/main/resources/banner.txt
new file mode 100644
index 00000000..a8b2db60
--- /dev/null
+++ b/原yx-image-recognition/src/main/resources/banner.txt
@@ -0,0 +1,7 @@
+██╗ ██╗██╗ ██╗██╗ ██╗██╗ ██╗███████╗
+╚██╗ ██╔╝██║ ██║╚██╗██╔╝██║ ██║██╔════╝
+ ╚████╔╝ ██║ ██║ ╚███╔╝ ██║ ██║█████╗
+ ╚██╔╝ ██║ ██║ ██╔██╗ ██║ ██║██╔══╝
+ ██║ ╚██████╔╝██╔╝ ██╗╚██████╔╝███████╗
+ ╚═╝ ╚═════╝ ╚═╝ ╚═╝ ╚═════╝ ╚══════╝
+ :: YX Boot :: Power By SpringBoot (v2.1.0.RELEASE)
diff --git a/原yx-image-recognition/src/main/resources/easypr-c++/README.md b/原yx-image-recognition/src/main/resources/easypr-c++/README.md
new file mode 100644
index 00000000..0a0c03e8
--- /dev/null
+++ b/原yx-image-recognition/src/main/resources/easypr-c++/README.md
@@ -0,0 +1,5 @@
+# easypr-c++
+
+#### 介绍
+easypr 原版项目C++语言的实现过程,代码放这里,有兴趣研究easypr c++原版项目的,可以自己查阅相关代码
+
diff --git a/原yx-image-recognition/src/main/resources/easypr-c++/ann_train.cpp b/原yx-image-recognition/src/main/resources/easypr-c++/ann_train.cpp
new file mode 100644
index 00000000..5fc05bbd
--- /dev/null
+++ b/原yx-image-recognition/src/main/resources/easypr-c++/ann_train.cpp
@@ -0,0 +1,324 @@
+#include
+#include
+
+#include "easypr/train/ann_train.h"
+#include "easypr/config.h"
+#include "easypr/core/chars_identify.h"
+#include "easypr/core/feature.h"
+#include "easypr/core/core_func.h"
+#include "easypr/train/create_data.h"
+#include "easypr/util/util.h"
+
+
+
+// 原版C++语言 训练代码
+namespace easypr {
+
+AnnTrain::AnnTrain(const char* chars_folder, const char* xml): chars_folder_(chars_folder), ann_xml_(xml) {
+ ann_ = cv::ml::ANN_MLP::create();
+ type = 0; // type=0, 所有字符;type=1, 只有中文字符
+ kv_ = std::shared_ptr(new Kv);
+ kv_->load("resources/text/province_mapping"); // zh_cuan 川 zh_gan1 甘
+}
+
+void AnnTrain::train() {
+
+ int classNumber = 0;
+
+ cv::Mat layers;
+
+ int input_number = 0;
+ int hidden_number = 0;
+ int output_number = 0;
+
+ if (type == 0) {
+ classNumber = kCharsTotalNumber;
+ input_number = kAnnInput;
+ hidden_number = kNeurons;
+ output_number = classNumber;
+ } else if (type == 1) {
+ classNumber = kChineseNumber;
+
+ input_number = kAnnInput;
+ hidden_number = kNeurons;
+ output_number = classNumber;
+ }
+
+ int N = input_number;
+ int m = output_number;
+ int first_hidden_neurons = int(std::sqrt((m + 2) * N) + 2 * std::sqrt(N / (m + 2)));
+ int second_hidden_neurons = int(m * std::sqrt(N / (m + 2)));
+
+ bool useTLFN = false;
+ if (!useTLFN) {
+ layers.create(1, 3, CV_32SC1);
+ layers.at(0) = input_number;
+ layers.at(1) = hidden_number;
+ layers.at(2) = output_number;
+ } else {
+ // 两层神经网络很难训练,所以不要尝试
+ fprintf(stdout, ">> Use two-layers neural networks,\n");
+ layers.create(1, 4, CV_32SC1);
+ layers.at(0) = input_number;
+ layers.at(1) = first_hidden_neurons;
+ layers.at(2) = second_hidden_neurons;
+ layers.at(3) = output_number;
+ }
+
+ ann_->setLayerSizes(layers);
+ ann_->setActivationFunction(cv::ml::ANN_MLP::SIGMOID_SYM, 1, 1);
+ ann_->setTrainMethod(cv::ml::ANN_MLP::TrainingMethods::BACKPROP);
+ ann_->setTermCriteria(cvTermCriteria(CV_TERMCRIT_ITER, 30000, 0.0001));
+ ann_->setBackpropWeightScale(0.1);
+ ann_->setBackpropMomentumScale(0.1);
+
+ auto files = Utils::getFiles(chars_folder_);
+ if (files.size() == 0) {
+ fprintf(stdout, "No file found in the train folder!\n");
+ return;
+ }
+
+ // 使用原始数据 或者原始数据+合成数据
+ auto traindata = sdata(350);
+
+ ann_->train(traindata);
+
+ ann_->save(ann_xml_);
+
+ test();
+}
+
+// 识别 中文
+std::pair AnnTrain::identifyChinese(cv::Mat input) {
+ cv::Mat feature = charFeatures2(input, kPredictSize);
+ float maxVal = -2;
+ int result = 0;
+
+ cv::Mat output(1, kChineseNumber, CV_32FC1);
+ ann_->predict(feature, output);
+
+ for (int j = 0; j < kChineseNumber; j++) {
+ float val = output.at(j);
+ if (val > maxVal) {
+ maxVal = val;
+ result = j;
+ }
+ }
+
+ auto index = result + kCharsTotalNumber - kChineseNumber;
+ const char* key = kChars[index];
+ std::string s = key;
+ std::string province = kv_->get(s);
+
+ return std::make_pair(s, province);
+}
+
+// 识别 字符
+std::pair AnnTrain::identify(cv::Mat input) {
+ cv::Mat feature = charFeatures2(input, kPredictSize);
+ float maxVal = -2;
+ int result = 0;
+
+ cv::Mat output(1, kCharsTotalNumber, CV_32FC1);
+ ann_->predict(feature, output);
+ for (int j = 0; j < kCharsTotalNumber; j++) {
+ float val = output.at(j);
+ if (val > maxVal) {
+ maxVal = val;
+ result = j;
+ }
+ }
+
+ auto index = result;
+ if (index < kCharactersNumber) {
+ return std::make_pair(kChars[index], kChars[index]);
+ }
+ else {
+ const char* key = kChars[index];
+ std::string s = key;
+ std::string province = kv_->get(s);
+ return std::make_pair(s, province);
+ }
+}
+
+// 测试并计算准确率
+void AnnTrain::test() {
+ assert(chars_folder_);
+
+ int classNumber = 0;
+ if (type == 0) classNumber = kCharsTotalNumber;
+ if (type == 1) classNumber = kChineseNumber;
+
+ int corrects_all = 0, sum_all = 0;
+ std::vector rate_list;
+ for (int i = 0; i < classNumber; ++i) {
+ auto char_key = kChars[i + kCharsTotalNumber - classNumber];
+ char sub_folder[512] = { 0 };
+
+ sprintf(sub_folder, "%s/%s", chars_folder_, char_key);
+ fprintf(stdout, ">> Testing characters %s in %s \n", char_key, sub_folder);
+
+ auto chars_files = utils::getFiles(sub_folder);
+ int corrects = 0, sum = 0;
+ std::vector> error_files;
+
+ for (auto file : chars_files) {
+ auto img = cv::imread(file, 0); // 读取灰度图像
+ if (!img.data) {
+ continue;
+ }
+ std::pair ch;
+
+ if (type == 0) ch = identify(img);
+ if (type == 1) ch = identifyChinese(img);
+
+ if (ch.first == char_key) {
+ ++corrects;
+ ++corrects_all;
+ } else {
+ error_files.push_back(std::make_pair(utils::getFileName(file), ch.second));
+ }
+ ++sum;
+ ++sum_all;
+ }
+ float rate = (float)corrects / (sum == 0 ? 1 : sum);
+ rate_list.push_back(rate);
+
+ std::string error_string;
+ auto end = error_files.end();
+ if (error_files.size() >= 10) {
+ end -= static_cast(error_files.size() * (1 - 0.1));
+ }
+ for (auto k = error_files.begin(); k != end; ++k) {
+ auto kv = *k;
+ error_string.append(" ").append(kv.first).append(": ").append(kv.second);
+ if (k != end - 1) {
+ error_string.append(",\n");
+ } else {
+ error_string.append("\n ...");
+ }
+ }
+ fprintf(stdout, ">> [\n%s\n ]\n", error_string.c_str());
+ }
+ fprintf(stdout, ">> [sum_all: %d, correct_all: %d, rate: %.4f]\n", sum_all, corrects_all, (float)corrects_all / (sum_all == 0 ? 1 : sum_all));
+
+ double rate_sum = std::accumulate(rate_list.begin(), rate_list.end(), 0.0);
+ double rate_mean = rate_sum / (rate_list.size() == 0 ? 1 : rate_list.size());
+
+ fprintf(stdout, ">> [classNumber: %d, avg_rate: %.4f]\n", classNumber, rate_mean);
+}
+
+// 获取合成图像
+cv::Mat getSyntheticImage(const Mat& image) {
+ int rand_type = rand();
+ Mat result = image.clone();
+
+ if (rand_type % 2 == 0) {
+ int ran_x = rand() % 5 - 2;
+ int ran_y = rand() % 5 - 2;
+
+ result = translateImg(result, ran_x, ran_y);
+ } else if (rand_type % 2 != 0) {
+ float angle = float(rand() % 15 - 7);
+ result = rotateImg(result, angle);
+ }
+
+ return result;
+}
+
+// 处理训练文件
+cv::Ptr AnnTrain::sdata(size_t number_for_count) {
+ assert(chars_folder_);
+
+ cv::Mat samples;
+ std::vector labels;
+
+ int classNumber = 0;
+ if (type == 0) classNumber = kCharsTotalNumber;
+ if (type == 1) classNumber = kChineseNumber;
+
+ srand((unsigned)time(0));
+ for (int i = 0; i < classNumber; ++i) {
+
+ auto char_key = kChars[i + kCharsTotalNumber - classNumber];
+ char sub_folder[512] = { 0 };
+
+ sprintf(sub_folder, "%s/%s", chars_folder_, char_key);
+ fprintf(stdout, ">> Testing characters %s in %s \n", char_key, sub_folder);
+
+ auto chars_files = utils::getFiles(sub_folder);
+ size_t char_size = chars_files.size();
+ fprintf(stdout, ">> Characters count: %d \n", int(char_size));
+
+ std::vector matVec;
+ matVec.reserve(number_for_count);
+ for (auto file : chars_files) {
+ auto img = cv::imread(file, 0); // a grayscale image
+ matVec.push_back(img);
+ }
+
+ for (int t = 0; t < (int)number_for_count - (int)char_size; t++) {
+ int rand_range = char_size + t;
+ int ran_num = rand() % rand_range;
+ auto img = matVec.at(ran_num);
+ auto simg = getSyntheticImage(img);
+ matVec.push_back(simg);
+ }
+
+ for (auto img : matVec) {
+ auto fps = charFeatures2(img, kPredictSize);
+ samples.push_back(fps);
+ labels.push_back(i);
+ }
+ }
+
+ cv::Mat samples_;
+ samples.convertTo(samples_, CV_32F);
+ cv::Mat train_classes = cv::Mat::zeros((int)labels.size(), classNumber, CV_32F);
+
+ for (int i = 0; i < train_classes.rows; ++i) {
+ train_classes.at(i, labels[i]) = 1.f;
+ }
+
+ return cv::ml::TrainData::create(samples_, cv::ml::SampleTypes::ROW_SAMPLE, train_classes);
+}
+
+cv::Ptr AnnTrain::tdata() {
+ assert(chars_folder_);
+
+ cv::Mat samples;
+ std::vector labels;
+
+ std::cout << "Collecting chars in " << chars_folder_ << std::endl;
+
+ int classNumber = 0;
+ if (type == 0) classNumber = kCharsTotalNumber;
+ if (type == 1) classNumber = kChineseNumber;
+
+ for (int i = 0; i < classNumber; ++i) {
+ auto char_key = kChars[i + kCharsTotalNumber - classNumber];
+ char sub_folder[512] = {0};
+
+ sprintf(sub_folder, "%s/%s", chars_folder_, char_key);
+ std::cout << " >> Featuring characters " << char_key << " in " << sub_folder << std::endl;
+
+ auto chars_files = utils::getFiles(sub_folder);
+ for (auto file : chars_files) {
+ auto img = cv::imread(file, 0); // 读取灰度图像
+ auto fps = charFeatures2(img, kPredictSize);
+ samples.push_back(fps);
+ labels.push_back(i);
+ }
+ }
+
+ cv::Mat samples_;
+ samples.convertTo(samples_, CV_32F);
+ cv::Mat train_classes = cv::Mat::zeros((int)labels.size(), classNumber, CV_32F);
+
+ for (int i = 0; i < train_classes.rows; ++i) {
+ train_classes.at(i, labels[i]) = 1.f;
+ }
+
+ return cv::ml::TrainData::create(samples_, cv::ml::SampleTypes::ROW_SAMPLE, train_classes);
+}
+}
diff --git a/原yx-image-recognition/src/main/resources/easypr-c++/chars_identify.cpp b/原yx-image-recognition/src/main/resources/easypr-c++/chars_identify.cpp
new file mode 100644
index 00000000..a88d7277
--- /dev/null
+++ b/原yx-image-recognition/src/main/resources/easypr-c++/chars_identify.cpp
@@ -0,0 +1,454 @@
+#include "easypr/core/chars_identify.h"
+#include "easypr/core/character.hpp"
+#include "easypr/core/core_func.h"
+#include "easypr/core/feature.h"
+#include "easypr/core/params.h"
+#include "easypr/config.h"
+
+using namespace cv;
+
+namespace easypr {
+
+CharsIdentify* CharsIdentify::instance_ = nullptr;
+
+CharsIdentify* CharsIdentify::instance() {
+ if (!instance_) {
+ instance_ = new CharsIdentify;
+ }
+ return instance_;
+}
+
+CharsIdentify::CharsIdentify() {
+ LOAD_ANN_MODEL(ann_, kDefaultAnnPath);
+ LOAD_ANN_MODEL(annChinese_, kChineseAnnPath);
+ LOAD_ANN_MODEL(annGray_, kGrayAnnPath);
+
+ kv_ = std::shared_ptr(new Kv);
+ kv_->load(kChineseMappingPath);
+
+ extractFeature = getGrayPlusProject;
+}
+
+void CharsIdentify::LoadModel(std::string path) {
+ if (path != std::string(kDefaultAnnPath)) {
+ if (!ann_->empty())
+ ann_->clear();
+ LOAD_ANN_MODEL(ann_, path);
+ }
+}
+
+void CharsIdentify::LoadChineseModel(std::string path) {
+ if (path != std::string(kChineseAnnPath)) {
+ if (!annChinese_->empty())
+ annChinese_->clear();
+ LOAD_ANN_MODEL(annChinese_, path);
+ }
+}
+
+void CharsIdentify::LoadGrayChANN(std::string path) {
+ if (path != std::string(kGrayAnnPath)) {
+ if (!annGray_->empty())
+ annGray_->clear();
+ LOAD_ANN_MODEL(annGray_, path);
+ }
+}
+
+void CharsIdentify::LoadChineseMapping(std::string path) {
+ kv_->clear();
+ kv_->load(path);
+}
+
+void CharsIdentify::classify(cv::Mat featureRows, std::vector& out_maxIndexs,
+ std::vector& out_maxVals, std::vector isChineseVec){
+ int rowNum = featureRows.rows;
+
+ cv::Mat output(rowNum, kCharsTotalNumber, CV_32FC1);
+ ann_->predict(featureRows, output);
+
+ for (int output_index = 0; output_index < rowNum; output_index++) {
+ Mat output_row = output.row(output_index);
+ int result = 0;
+ float maxVal = -2.f;
+ bool isChinses = isChineseVec[output_index];
+ if (!isChinses) {
+ result = 0;
+ for (int j = 0; j < kCharactersNumber; j++) {
+ float val = output_row.at(j);
+ // std::cout << "j:" << j << "val:" << val << std::endl;
+ if (val > maxVal) {
+ maxVal = val;
+ result = j;
+ }
+ }
+ }
+ else {
+ result = kCharactersNumber;
+ for (int j = kCharactersNumber; j < kCharsTotalNumber; j++) {
+ float val = output_row.at(j);
+ //std::cout << "j:" << j << "val:" << val << std::endl;
+ if (val > maxVal) {
+ maxVal = val;
+ result = j;
+ }
+ }
+ }
+ out_maxIndexs[output_index] = result;
+ out_maxVals[output_index] = maxVal;
+ }
+}
+
+
+void CharsIdentify::classify(std::vector& charVec){
+ size_t charVecSize = charVec.size();
+
+ if (charVecSize == 0)
+ return;
+
+ Mat featureRows;
+ for (size_t index = 0; index < charVecSize; index++) {
+ Mat charInput = charVec[index].getCharacterMat();
+ Mat feature = charFeatures(charInput, kPredictSize);
+ featureRows.push_back(feature);
+ }
+
+ cv::Mat output(charVecSize, kCharsTotalNumber, CV_32FC1);
+ ann_->predict(featureRows, output);
+
+ for (size_t output_index = 0; output_index < charVecSize; output_index++) {
+ CCharacter& character = charVec[output_index];
+ Mat output_row = output.row(output_index);
+
+ int result = 0;
+ float maxVal = -2.f;
+ std::string label = "";
+
+ bool isChinses = character.getIsChinese();
+ if (!isChinses) {
+ result = 0;
+ for (int j = 0; j < kCharactersNumber; j++) {
+ float val = output_row.at(j);
+ //std::cout << "j:" << j << "val:" << val << std::endl;
+ if (val > maxVal) {
+ maxVal = val;
+ result = j;
+ }
+ }
+ label = std::make_pair(kChars[result], kChars[result]).second;
+ }
+ else {
+ result = kCharactersNumber;
+ for (int j = kCharactersNumber; j < kCharsTotalNumber; j++) {
+ float val = output_row.at(j);
+ //std::cout << "j:" << j << "val:" << val << std::endl;
+ if (val > maxVal) {
+ maxVal = val;
+ result = j;
+ }
+ }
+ const char* key = kChars[result];
+ std::string s = key;
+ std::string province = kv_->get(s);
+ label = std::make_pair(s, province).second;
+ }
+ /*std::cout << "result:" << result << std::endl;
+ std::cout << "maxVal:" << maxVal << std::endl;*/
+ character.setCharacterScore(maxVal);
+ character.setCharacterStr(label);
+ }
+}
+
+
+void CharsIdentify::classifyChineseGray(std::vector& charVec){
+ size_t charVecSize = charVec.size();
+ if (charVecSize == 0)
+ return;
+
+ Mat featureRows;
+ for (size_t index = 0; index < charVecSize; index++) {
+ Mat charInput = charVec[index].getCharacterMat();
+ cv::Mat feature;
+ extractFeature(charInput, feature);
+ featureRows.push_back(feature);
+ }
+
+ cv::Mat output(charVecSize, kChineseNumber, CV_32FC1);
+ annGray_->predict(featureRows, output);
+
+ for (size_t output_index = 0; output_index < charVecSize; output_index++) {
+ CCharacter& character = charVec[output_index];
+ Mat output_row = output.row(output_index);
+ bool isChinese = true;
+
+ float maxVal = -2;
+ int result = 0;
+
+ for (int j = 0; j < kChineseNumber; j++) {
+ float val = output_row.at(j);
+ //std::cout << "j:" << j << "val:" << val << std::endl;
+ if (val > maxVal) {
+ maxVal = val;
+ result = j;
+ }
+ }
+
+ // no match
+ if (-1 == result) {
+ result = 0;
+ maxVal = 0;
+ isChinese = false;
+ }
+
+ auto index = result + kCharsTotalNumber - kChineseNumber;
+ const char* key = kChars[index];
+ std::string s = key;
+ std::string province = kv_->get(s);
+
+ /*std::cout << "result:" << result << std::endl;
+ std::cout << "maxVal:" << maxVal << std::endl;*/
+
+ character.setCharacterScore(maxVal);
+ character.setCharacterStr(province);
+ character.setIsChinese(isChinese);
+ }
+}
+
+void CharsIdentify::classifyChinese(std::vector& charVec){
+ size_t charVecSize = charVec.size();
+
+ if (charVecSize == 0)
+ return;
+
+ Mat featureRows;
+ for (size_t index = 0; index < charVecSize; index++) {
+ Mat charInput = charVec[index].getCharacterMat();
+ Mat feature = charFeatures(charInput, kChineseSize);
+ featureRows.push_back(feature);
+ }
+
+ cv::Mat output(charVecSize, kChineseNumber, CV_32FC1);
+ annChinese_->predict(featureRows, output);
+
+ for (size_t output_index = 0; output_index < charVecSize; output_index++) {
+ CCharacter& character = charVec[output_index];
+ Mat output_row = output.row(output_index);
+ bool isChinese = true;
+
+ float maxVal = -2;
+ int result = 0;
+
+ for (int j = 0; j < kChineseNumber; j++) {
+ float val = output_row.at(j);
+ //std::cout << "j:" << j << "val:" << val << std::endl;
+ if (val > maxVal) {
+ maxVal = val;
+ result = j;
+ }
+ }
+
+ // no match
+ if (-1 == result) {
+ result = 0;
+ maxVal = 0;
+ isChinese = false;
+ }
+
+ auto index = result + kCharsTotalNumber - kChineseNumber;
+ const char* key = kChars[index];
+ std::string s = key;
+ std::string province = kv_->get(s);
+
+ /*std::cout << "result:" << result << std::endl;
+ std::cout << "maxVal:" << maxVal << std::endl;*/
+
+ character.setCharacterScore(maxVal);
+ character.setCharacterStr(province);
+ character.setIsChinese(isChinese);
+ }
+}
+
+int CharsIdentify::classify(cv::Mat f, float& maxVal, bool isChinses, bool isAlphabet){
+ int result = 0;
+
+ cv::Mat output(1, kCharsTotalNumber, CV_32FC1);
+ ann_->predict(f, output);
+
+ maxVal = -2.f;
+ if (!isChinses) {
+ if (!isAlphabet) {
+ result = 0;
+ for (int j = 0; j < kCharactersNumber; j++) {
+ float val = output.at(j);
+ // std::cout << "j:" << j << "val:" << val << std::endl;
+ if (val > maxVal) {
+ maxVal = val;
+ result = j;
+ }
+ }
+ }
+ else {
+ result = 0;
+ // begin with 11th char, which is 'A'
+ for (int j = 10; j < kCharactersNumber; j++) {
+ float val = output.at(j);
+ // std::cout << "j:" << j << "val:" << val << std::endl;
+ if (val > maxVal) {
+ maxVal = val;
+ result = j;
+ }
+ }
+ }
+ }
+ else {
+ result = kCharactersNumber;
+ for (int j = kCharactersNumber; j < kCharsTotalNumber; j++) {
+ float val = output.at(j);
+ //std::cout << "j:" << j << "val:" << val << std::endl;
+ if (val > maxVal) {
+ maxVal = val;
+ result = j;
+ }
+ }
+ }
+ //std::cout << "maxVal:" << maxVal << std::endl;
+ return result;
+}
+
+bool CharsIdentify::isCharacter(cv::Mat input, std::string& label, float& maxVal, bool isChinese) {
+ cv::Mat feature = charFeatures(input, kPredictSize);
+ auto index = static_cast(classify(feature, maxVal, isChinese));
+
+ if (isChinese) {
+ //std::cout << "maxVal:" << maxVal << std::endl;
+ }
+
+ float chineseMaxThresh = 0.2f;
+
+ if (maxVal >= 0.9 || (isChinese && maxVal >= chineseMaxThresh)) {
+ if (index < kCharactersNumber) {
+ label = std::make_pair(kChars[index], kChars[index]).second;
+ }
+ else {
+ const char* key = kChars[index];
+ std::string s = key;
+ std::string province = kv_->get(s);
+ label = std::make_pair(s, province).second;
+ }
+ return true;
+ }
+ else
+ return false;
+}
+
+std::pair CharsIdentify::identifyChinese(cv::Mat input, float& out, bool& isChinese) {
+ cv::Mat feature = charFeatures(input, kChineseSize);
+ float maxVal = -2;
+ int result = 0;
+
+ cv::Mat output(1, kChineseNumber, CV_32FC1);
+ annChinese_->predict(feature, output);
+
+ for (int j = 0; j < kChineseNumber; j++) {
+ float val = output.at(j);
+ //std::cout << "j:" << j << "val:" << val << std::endl;
+ if (val > maxVal) {
+ maxVal = val;
+ result = j;
+ }
+ }
+
+ // no match
+ if (-1 == result) {
+ result = 0;
+ maxVal = 0;
+ isChinese = false;
+ }
+ else if (maxVal > 0.9){
+ isChinese = true;
+ }
+
+ auto index = result + kCharsTotalNumber - kChineseNumber;
+ const char* key = kChars[index];
+ std::string s = key;
+ std::string province = kv_->get(s);
+ out = maxVal;
+
+ return std::make_pair(s, province);
+}
+
+std::pair CharsIdentify::identifyChineseGray(cv::Mat input, float& out, bool& isChinese) {
+ cv::Mat feature;
+ extractFeature(input, feature);
+ float maxVal = -2;
+ int result = 0;
+ cv::Mat output(1, kChineseNumber, CV_32FC1);
+ annGray_->predict(feature, output);
+
+ for (int j = 0; j < kChineseNumber; j++) {
+ float val = output.at(j);
+ //std::cout << "j:" << j << "val:" << val << std::endl;
+ if (val > maxVal) {
+ maxVal = val;
+ result = j;
+ }
+ }
+ // no match
+ if (-1 == result) {
+ result = 0;
+ maxVal = 0;
+ isChinese = false;
+ } else if (maxVal > 0.9){
+ isChinese = true;
+ }
+ auto index = result + kCharsTotalNumber - kChineseNumber;
+ const char* key = kChars[index];
+ std::string s = key;
+ std::string province = kv_->get(s);
+ out = maxVal;
+ return std::make_pair(s, province);
+}
+
+
+std::pair CharsIdentify::identify(cv::Mat input, bool isChinese, bool isAlphabet) {
+ cv::Mat feature = charFeatures(input, kPredictSize);
+ float maxVal = -2;
+ auto index = static_cast(classify(feature, maxVal, isChinese, isAlphabet));
+ if (index < kCharactersNumber) {
+ return std::make_pair(kChars[index], kChars[index]);
+ }
+ else {
+ const char* key = kChars[index];
+ std::string s = key;
+ std::string province = kv_->get(s);
+ return std::make_pair(s, province);
+ }
+}
+
+int CharsIdentify::identify(std::vector inputs, std::vector>& outputs,
+ std::vector isChineseVec) {
+ Mat featureRows;
+ size_t input_size = inputs.size();
+ for (size_t i = 0; i < input_size; i++) {
+ Mat input = inputs[i];
+ cv::Mat feature = charFeatures(input, kPredictSize);
+ featureRows.push_back(feature);
+ }
+
+ std::vector maxIndexs;
+ std::vector maxVals;
+ classify(featureRows, maxIndexs, maxVals, isChineseVec);
+
+ for (size_t row_index = 0; row_index < input_size; row_index++) {
+ int index = maxIndexs[row_index];
+ if (index < kCharactersNumber) {
+ outputs[row_index] = std::make_pair(kChars[index], kChars[index]);
+ }
+ else {
+ const char* key = kChars[index];
+ std::string s = key;
+ std::string province = kv_->get(s);
+ outputs[row_index] = std::make_pair(s, province);
+ }
+ }
+ return 0;
+}
+}
diff --git a/原yx-image-recognition/src/main/resources/easypr-c++/chars_recognise.cpp b/原yx-image-recognition/src/main/resources/easypr-c++/chars_recognise.cpp
new file mode 100644
index 00000000..d101fd37
--- /dev/null
+++ b/原yx-image-recognition/src/main/resources/easypr-c++/chars_recognise.cpp
@@ -0,0 +1,117 @@
+#include "easypr/core/chars_recognise.h"
+#include "easypr/core/character.hpp"
+#include "easypr/util/util.h"
+#include
+
+namespace easypr {
+
+CCharsRecognise::CCharsRecognise() { m_charsSegment = new CCharsSegment(); }
+
+CCharsRecognise::~CCharsRecognise() { SAFE_RELEASE(m_charsSegment); }
+
+int CCharsRecognise::charsRecognise(Mat plate, std::string& plateLicense) {
+ std::vector matChars;
+ int result = m_charsSegment->charsSegment(plate, matChars);
+ if (result == 0) {
+ int num = matChars.size();
+ for (int j = 0; j < num; j++)
+ {
+ Mat charMat = matChars.at(j);
+ bool isChinses = false;
+ float maxVal = 0;
+ if (j == 0) {
+ bool judge = true;
+ isChinses = true;
+ auto character = CharsIdentify::instance()->identifyChinese(charMat, maxVal, judge);
+ plateLicense.append(character.second);
+ }
+ else {
+ isChinses = false;
+ auto character = CharsIdentify::instance()->identify(charMat, isChinses);
+ plateLicense.append(character.second);
+ }
+ }
+
+ }
+ if (plateLicense.size() < 7) {
+ return -1;
+ }
+
+ return result;
+}
+
+
+int CCharsRecognise::charsRecognise(CPlate& plate, std::string& plateLicense) {
+ std::vector matChars;
+ std::vector grayChars;
+ Mat plateMat = plate.getPlateMat();
+ if (0) writeTempImage(plateMat, "plateMat/plate");
+ Color color;
+ if (plate.getPlateLocateType() == CMSER) {
+ color = plate.getPlateColor();
+ }
+ else {
+ int w = plateMat.cols;
+ int h = plateMat.rows;
+ Mat tmpMat = plateMat(Rect_(w * 0.1, h * 0.1, w * 0.8, h * 0.8));
+ color = getPlateType(tmpMat, true);
+ }
+
+ int result = m_charsSegment->charsSegmentUsingOSTU(plateMat, matChars, grayChars, color);
+
+ if (result == 0) {
+ int num = matChars.size();
+ for (int j = 0; j < num; j++)
+ {
+ Mat charMat = matChars.at(j);
+ Mat grayChar = grayChars.at(j);
+ if (color != Color::BLUE)
+ grayChar = 255 - grayChar;
+
+ bool isChinses = false;
+ std::pair character;
+ float maxVal;
+ if (0 == j) {
+ isChinses = true;
+ bool judge = true;
+ character = CharsIdentify::instance()->identifyChineseGray(grayChar, maxVal, judge);
+ plateLicense.append(character.second);
+
+ // set plate chinese mat and str
+ plate.setChineseMat(grayChar);
+ plate.setChineseKey(character.first);
+ if (0) writeTempImage(grayChar, "char_data/" + character.first + "/chars_");
+ }
+ else if (1 == j) {
+ isChinses = false;
+ bool isAbc = true;
+ character = CharsIdentify::instance()->identify(charMat, isChinses, isAbc);
+ plateLicense.append(character.second);
+ }
+ else {
+ isChinses = false;
+ SHOW_IMAGE(charMat, 0);
+ character = CharsIdentify::instance()->identify(charMat, isChinses);
+ plateLicense.append(character.second);
+ }
+
+ CCharacter charResult;
+ charResult.setCharacterMat(charMat);
+ charResult.setCharacterGrayMat(grayChar);
+ if (isChinses)
+ charResult.setCharacterStr(character.first);
+ else
+ charResult.setCharacterStr(character.second);
+
+ plate.addReutCharacter(charResult);
+ }
+ if (plateLicense.size() < 7) {
+ return -1;
+ }
+ }
+
+ return result;
+}
+
+
+}
\ No newline at end of file
diff --git a/原yx-image-recognition/src/main/resources/easypr-c++/chars_segment.cpp b/原yx-image-recognition/src/main/resources/easypr-c++/chars_segment.cpp
new file mode 100644
index 00000000..3edc85a0
--- /dev/null
+++ b/原yx-image-recognition/src/main/resources/easypr-c++/chars_segment.cpp
@@ -0,0 +1,1056 @@
+#include "easypr/core/chars_segment.h"
+#include "easypr/core/chars_identify.h"
+#include "easypr/core/core_func.h"
+#include "easypr/core/params.h"
+#include "easypr/config.h"
+#include "thirdparty/mser/mser2.hpp"
+
+namespace easypr {
+
+const float DEFAULT_BLUEPERCEMT = 0.3f;
+const float DEFAULT_WHITEPERCEMT = 0.1f;
+
+CCharsSegment::CCharsSegment() {
+ m_LiuDingSize = DEFAULT_LIUDING_SIZE;
+ m_theMatWidth = DEFAULT_MAT_WIDTH;
+
+ m_ColorThreshold = DEFAULT_COLORTHRESHOLD;
+ m_BluePercent = DEFAULT_BLUEPERCEMT;
+ m_WhitePercent = DEFAULT_WHITEPERCEMT;
+
+ m_debug = DEFAULT_DEBUG;
+}
+
+
+bool CCharsSegment::verifyCharSizes(Mat r) {
+ // Char sizes 45x90
+ float aspect = 45.0f / 90.0f;
+ float charAspect = (float)r.cols / (float)r.rows;
+ float error = 0.7f;
+ float minHeight = 10.f;
+ float maxHeight = 35.f;
+ // We have a different aspect ratio for number 1, and it can be ~0.2
+ float minAspect = 0.05f;
+ float maxAspect = aspect + aspect * error;
+ // area of pixels
+ int area = cv::countNonZero(r);
+ // bb area
+ int bbArea = r.cols * r.rows;
+ //% of pixel in area
+ int percPixels = area / bbArea;
+
+ if (percPixels <= 1 && charAspect > minAspect && charAspect < maxAspect &&
+ r.rows >= minHeight && r.rows < maxHeight)
+ return true;
+ else
+ return false;
+}
+
+
+Mat CCharsSegment::preprocessChar(Mat in) {
+ // Remap image
+ int h = in.rows;
+ int w = in.cols;
+
+ int charSize = CHAR_SIZE;
+
+ Mat transformMat = Mat::eye(2, 3, CV_32F);
+ int m = max(w, h);
+ transformMat.at(0, 2) = float(m / 2 - w / 2);
+ transformMat.at(1, 2) = float(m / 2 - h / 2);
+
+ Mat warpImage(m, m, in.type());
+ warpAffine(in, warpImage, transformMat, warpImage.size(), INTER_LINEAR,
+ BORDER_CONSTANT, Scalar(0));
+
+ Mat out;
+ resize(warpImage, out, Size(charSize, charSize));
+
+ return out;
+}
+
+
+//! choose the bese threshold method for chinese
+void CCharsSegment::judgeChinese(Mat in, Mat& out, Color plateType) {
+ Mat auxRoi = in;
+ float valOstu = -1.f, valAdap = -1.f;
+ Mat roiOstu, roiAdap;
+ bool isChinese = true;
+ if (1) {
+ if (BLUE == plateType) {
+ threshold(auxRoi, roiOstu, 0, 255, CV_THRESH_BINARY + CV_THRESH_OTSU);
+ }
+ else if (YELLOW == plateType) {
+ threshold(auxRoi, roiOstu, 0, 255, CV_THRESH_BINARY_INV + CV_THRESH_OTSU);
+ }
+ else if (WHITE == plateType) {
+ threshold(auxRoi, roiOstu, 0, 255, CV_THRESH_BINARY_INV + CV_THRESH_OTSU);
+ }
+ else {
+ threshold(auxRoi, roiOstu, 0, 255, CV_THRESH_OTSU + CV_THRESH_BINARY);
+ }
+ roiOstu = preprocessChar(roiOstu);
+ if (0) {
+ imshow("roiOstu", roiOstu);
+ waitKey(0);
+ destroyWindow("roiOstu");
+ }
+ auto character = CharsIdentify::instance()->identifyChinese(roiOstu, valOstu, isChinese);
+ }
+ if (1) {
+ if (BLUE == plateType) {
+ adaptiveThreshold(auxRoi, roiAdap, 255, ADAPTIVE_THRESH_MEAN_C, THRESH_BINARY, 3, 0);
+ }
+ else if (YELLOW == plateType) {
+ adaptiveThreshold(auxRoi, roiAdap, 255, ADAPTIVE_THRESH_MEAN_C, THRESH_BINARY_INV, 3, 0);
+ }
+ else if (WHITE == plateType) {
+ adaptiveThreshold(auxRoi, roiAdap, 255, ADAPTIVE_THRESH_MEAN_C, THRESH_BINARY_INV, 3, 0);
+ }
+ else {
+ adaptiveThreshold(auxRoi, roiAdap, 255, ADAPTIVE_THRESH_MEAN_C, THRESH_BINARY, 3, 0);
+ }
+ roiAdap = preprocessChar(roiAdap);
+ auto character = CharsIdentify::instance()->identifyChinese(roiAdap, valAdap, isChinese);
+ }
+
+ //std::cout << "valOstu: " << valOstu << std::endl;
+ //std::cout << "valAdap: " << valAdap << std::endl;
+
+ if (valOstu >= valAdap) {
+ out = roiOstu;
+ }
+ else {
+ out = roiAdap;
+ }
+}
+
+void CCharsSegment::judgeChineseGray(Mat in, Mat& out, Color plateType) {
+ out = in;
+}
+
+bool slideChineseWindow(Mat& image, Rect mr, Mat& newRoi, Color plateType, float slideLengthRatio, bool useAdapThreshold) {
+ std::vector charCandidateVec;
+
+ Rect maxrect = mr;
+ Point tlPoint = mr.tl();
+
+ bool isChinese = true;
+ int slideLength = int(slideLengthRatio * maxrect.width);
+ int slideStep = 1;
+ int fromX = 0;
+ fromX = tlPoint.x;
+
+ for (int slideX = -slideLength; slideX < slideLength; slideX += slideStep) {
+ float x_slide = 0;
+
+ x_slide = float(fromX + slideX);
+
+ float y_slide = (float)tlPoint.y;
+ Point2f p_slide(x_slide, y_slide);
+
+ //cv::circle(image, p_slide, 2, Scalar(255), 1);
+
+ int chineseWidth = int(maxrect.width);
+ int chineseHeight = int(maxrect.height);
+
+ Rect rect(Point2f(x_slide, y_slide), Size(chineseWidth, chineseHeight));
+
+ if (rect.tl().x < 0 || rect.tl().y < 0 || rect.br().x >= image.cols || rect.br().y >= image.rows)
+ continue;
+
+ Mat auxRoi = image(rect);
+
+ Mat roiOstu, roiAdap;
+ if (1) {
+ if (BLUE == plateType) {
+ threshold(auxRoi, roiOstu, 0, 255, CV_THRESH_BINARY + CV_THRESH_OTSU);
+ }
+ else if (YELLOW == plateType) {
+ threshold(auxRoi, roiOstu, 0, 255, CV_THRESH_BINARY_INV + CV_THRESH_OTSU);
+ }
+ else if (WHITE == plateType) {
+ threshold(auxRoi, roiOstu, 0, 255, CV_THRESH_BINARY_INV + CV_THRESH_OTSU);
+ }
+ else {
+ threshold(auxRoi, roiOstu, 0, 255, CV_THRESH_OTSU + CV_THRESH_BINARY);
+ }
+ roiOstu = preprocessChar(roiOstu, kChineseSize);
+
+ CCharacter charCandidateOstu;
+ charCandidateOstu.setCharacterPos(rect);
+ charCandidateOstu.setCharacterMat(roiOstu);
+ charCandidateOstu.setIsChinese(isChinese);
+ charCandidateVec.push_back(charCandidateOstu);
+ }
+ if (useAdapThreshold) {
+ if (BLUE == plateType) {
+ adaptiveThreshold(auxRoi, roiAdap, 255, ADAPTIVE_THRESH_MEAN_C, THRESH_BINARY, 3, 0);
+ }
+ else if (YELLOW == plateType) {
+ adaptiveThreshold(auxRoi, roiAdap, 255, ADAPTIVE_THRESH_MEAN_C, THRESH_BINARY_INV, 3, 0);
+ }
+ else if (WHITE == plateType) {
+ adaptiveThreshold(auxRoi, roiAdap, 255, ADAPTIVE_THRESH_MEAN_C, THRESH_BINARY_INV, 3, 0);
+ }
+ else {
+ adaptiveThreshold(auxRoi, roiAdap, 255, ADAPTIVE_THRESH_MEAN_C, THRESH_BINARY, 3, 0);
+ }
+ roiAdap = preprocessChar(roiAdap, kChineseSize);
+
+ CCharacter charCandidateAdap;
+ charCandidateAdap.setCharacterPos(rect);
+ charCandidateAdap.setCharacterMat(roiAdap);
+ charCandidateAdap.setIsChinese(isChinese);
+ charCandidateVec.push_back(charCandidateAdap);
+ }
+
+ }
+
+ CharsIdentify::instance()->classifyChinese(charCandidateVec);
+
+ double overlapThresh = 0.1;
+ NMStoCharacter(charCandidateVec, overlapThresh);
+
+ if (charCandidateVec.size() >= 1) {
+ std::sort(charCandidateVec.begin(), charCandidateVec.end(),
+ [](const CCharacter& r1, const CCharacter& r2) {
+ return r1.getCharacterScore() > r2.getCharacterScore();
+ });
+
+ newRoi = charCandidateVec.at(0).getCharacterMat();
+ return true;
+ }
+
+ return false;
+}
+
+bool slideChineseGrayWindow(const Mat& image, Rect& mr, Mat& newRoi, Color plateType, float slideLengthRatio) {
+ std::vector charCandidateVec;
+
+ Rect maxrect = mr;
+ Point tlPoint = mr.tl();
+
+ bool isChinese = true;
+ int slideLength = int(slideLengthRatio * maxrect.width);
+ int slideStep = 1;
+ int fromX = 0;
+ fromX = tlPoint.x;
+
+ for (int slideX = -slideLength; slideX < slideLength; slideX += slideStep) {
+ float x_slide = 0;
+ x_slide = float(fromX + slideX);
+
+ float y_slide = (float)tlPoint.y;
+
+ int chineseWidth = int(maxrect.width);
+ int chineseHeight = int(maxrect.height);
+
+ Rect rect(Point2f(x_slide, y_slide), Size(chineseWidth, chineseHeight));
+
+ if (rect.tl().x < 0 || rect.tl().y < 0 || rect.br().x >= image.cols || rect.br().y >= image.rows)
+ continue;
+
+ Mat auxRoi = image(rect);
+ Mat grayChinese;
+ grayChinese.create(kGrayCharHeight, kGrayCharWidth, CV_8UC1);
+ resize(auxRoi, grayChinese, grayChinese.size(), 0, 0, INTER_LINEAR);
+
+ CCharacter charCandidateOstu;
+ charCandidateOstu.setCharacterPos(rect);
+ charCandidateOstu.setCharacterMat(grayChinese);
+ charCandidateOstu.setIsChinese(isChinese);
+ charCandidateVec.push_back(charCandidateOstu);
+ }
+
+ CharsIdentify::instance()->classifyChineseGray(charCandidateVec);
+
+ double overlapThresh = 0.1;
+ NMStoCharacter(charCandidateVec, overlapThresh);
+
+ if (charCandidateVec.size() >= 1) {
+ std::sort(charCandidateVec.begin(), charCandidateVec.end(),
+ [](const CCharacter& r1, const CCharacter& r2) {
+ return r1.getCharacterScore() > r2.getCharacterScore();
+ });
+
+ newRoi = charCandidateVec.at(0).getCharacterMat();
+ mr = charCandidateVec.at(0).getCharacterPos();
+ return true;
+ }
+ return false;
+}
+
+
+int CCharsSegment::charsSegment(Mat input, vector& resultVec, Color color) {
+ if (!input.data) return 0x01;
+
+ Color plateType = color;
+
+ Mat input_grey;
+ cvtColor(input, input_grey, CV_BGR2GRAY);
+
+ if (0) {
+ imshow("plate", input_grey);
+ waitKey(0);
+ destroyWindow("plate");
+ }
+
+ Mat img_threshold;
+
+ img_threshold = input_grey.clone();
+ spatial_ostu(img_threshold, 8, 2, plateType);
+
+ if (0) {
+ imshow("plate", img_threshold);
+ waitKey(0);
+ destroyWindow("plate");
+ }
+
+ // remove liuding and hor lines
+ // also judge weather is plate use jump count
+ if (!clearLiuDing(img_threshold)) return 0x02;
+
+ Mat img_contours;
+ img_threshold.copyTo(img_contours);
+
+ vector > contours;
+ findContours(img_contours,
+ contours, // a vector of contours
+ CV_RETR_EXTERNAL, // retrieve the external contours
+ CV_CHAIN_APPROX_NONE); // all pixels of each contours
+
+ vector >::iterator itc = contours.begin();
+ vector vecRect;
+
+ while (itc != contours.end()) {
+ Rect mr = boundingRect(Mat(*itc));
+ Mat auxRoi(img_threshold, mr);
+
+ if (verifyCharSizes(auxRoi)) vecRect.push_back(mr);
+ ++itc;
+ }
+
+
+ if (vecRect.size() == 0) return 0x03;
+
+ vector sortedRect(vecRect);
+ std::sort(sortedRect.begin(), sortedRect.end(),
+ [](const Rect& r1, const Rect& r2) { return r1.x < r2.x; });
+
+ size_t specIndex = 0;
+
+ specIndex = GetSpecificRect(sortedRect);
+
+ Rect chineseRect;
+ if (specIndex < sortedRect.size())
+ chineseRect = GetChineseRect(sortedRect[specIndex]);
+ else
+ return 0x04;
+
+ if (0) {
+ rectangle(img_threshold, chineseRect, Scalar(255));
+ imshow("plate", img_threshold);
+ waitKey(0);
+ destroyWindow("plate");
+ }
+
+ vector newSortedRect;
+ newSortedRect.push_back(chineseRect);
+ RebuildRect(sortedRect, newSortedRect, specIndex);
+
+ if (newSortedRect.size() == 0) return 0x05;
+
+ bool useSlideWindow = true;
+ bool useAdapThreshold = true;
+ //bool useAdapThreshold = CParams::instance()->getParam1b();
+
+ for (size_t i = 0; i < newSortedRect.size(); i++) {
+ Rect mr = newSortedRect[i];
+
+ // Mat auxRoi(img_threshold, mr);
+ Mat auxRoi(input_grey, mr);
+ Mat newRoi;
+
+ if (i == 0) {
+ if (useSlideWindow) {
+ float slideLengthRatio = 0.1f;
+ //float slideLengthRatio = CParams::instance()->getParam1f();
+ if (!slideChineseWindow(input_grey, mr, newRoi, plateType, slideLengthRatio, useAdapThreshold))
+ judgeChinese(auxRoi, newRoi, plateType);
+ }
+ else
+ judgeChinese(auxRoi, newRoi, plateType);
+ }
+ else {
+ if (BLUE == plateType) {
+ threshold(auxRoi, newRoi, 0, 255, CV_THRESH_BINARY + CV_THRESH_OTSU);
+ }
+ else if (YELLOW == plateType) {
+ threshold(auxRoi, newRoi, 0, 255, CV_THRESH_BINARY_INV + CV_THRESH_OTSU);
+ }
+ else if (WHITE == plateType) {
+ threshold(auxRoi, newRoi, 0, 255, CV_THRESH_OTSU + CV_THRESH_BINARY_INV);
+ }
+ else {
+ threshold(auxRoi, newRoi, 0, 255, CV_THRESH_OTSU + CV_THRESH_BINARY);
+ }
+
+ newRoi = preprocessChar(newRoi);
+ }
+
+ if (0) {
+ if (i == 0) {
+ imshow("input_grey", input_grey);
+ waitKey(0);
+ destroyWindow("input_grey");
+ }
+ if (i == 0) {
+ imshow("newRoi", newRoi);
+ waitKey(0);
+ destroyWindow("newRoi");
+ }
+ }
+
+ resultVec.push_back(newRoi);
+ }
+
+ return 0;
+}
+
+int CCharsSegment::projectSegment(const Mat& input, Color color, vector& out_indexs) {
+ if (!input.data) return 0x01;
+
+ Color plateType = color;
+ Mat input_grey;
+ cvtColor(input, input_grey, CV_BGR2GRAY);
+ SHOW_IMAGE(input_grey, 0);
+
+ Mat img_threshold;
+ img_threshold = input_grey.clone();
+ spatial_ostu(img_threshold, 8, 2, plateType);
+ SHOW_IMAGE(img_threshold, 0);
+
+ // remove liuding and hor lines
+ // also judge weather is plate use jump count
+ if (!clearLiuDing(img_threshold)) return 0x02;
+ SHOW_IMAGE(img_threshold, 0);
+
+ Mat vhist = ProjectedHistogram(img_threshold, VERTICAL, 0);
+ Mat showHist = showHistogram(vhist);
+ SHOW_IMAGE(showHist, 1);
+
+ vector values;
+ vector indexs;
+ int size = vhist.cols;
+ for (int i = 0; i < size; i++) {
+ float val = vhist.at(i);
+ values.push_back(1.f - val);
+ }
+ Mat img_test = img_threshold.clone();
+ NMSfor1D(values, indexs);
+
+ out_indexs.resize(size);
+ for (int j = 0; j < size; j++)
+ out_indexs.at(j) = 0;
+ for (int i = 0; i < size; i++) {
+ float val = vhist.at(i);
+ if (indexs.at(i) && val < 0.1f) {
+ out_indexs.at(i) = 1;
+ for (int j = 0; j < img_test.rows; j++) {
+ img_test.at(j, i) = (char)255;
+ }
+ }
+ }
+ SHOW_IMAGE(img_test, 1);
+
+ return 0;
+}
+
+bool verifyCharRectSizes(Rect r) {
+ // Char sizes 45x90
+ float aspect = 45.0f / 90.0f;
+ float charAspect = (float)r.width / (float)r.height;
+ float error = 0.5f;
+ float minHeight = kPlateResizeHeight * 0.5f;
+ float maxHeight = kPlateResizeHeight * 1.f;
+ // We have a different aspect ratio for number 1, and it can be ~0.2
+ float minAspect = 0.10f; //0.2f;
+
+ float maxAspect = 0.85f; // aspect + aspect * error; //0.8f;
+
+ int ch = r.tl().y + r.height / 2;
+ int min_ch = int(kPlateResizeHeight * 0.3f);
+ int max_ch = int(kPlateResizeHeight * 0.7f);
+ if (ch > max_ch || ch < min_ch)
+ return false;
+
+ float h = (float)r.height;
+ if (h > maxHeight || h < minHeight)
+ return false;
+ if (charAspect < minAspect || charAspect > maxAspect)
+ return false;
+
+ return true;
+}
+
+Mat preprocessCharMat(Mat in, int char_size) {
+ // Remap image
+ int h = in.rows;
+ int w = in.cols;
+
+ int charSize = char_size;
+
+ Mat transformMat = Mat::eye(2, 3, CV_32F);
+ int m = max(w, h);
+ transformMat.at(0, 2) = float(m / 2 - w / 2);
+ transformMat.at(1, 2) = float(m / 2 - h / 2);
+
+ Mat warpImage(m, m, in.type());
+ warpAffine(in, warpImage, transformMat, warpImage.size(), INTER_LINEAR,
+ BORDER_CONSTANT, Scalar(0));
+
+ Mat out;
+ cv::resize(warpImage, out, Size(charSize, charSize));
+
+ return out;
+}
+
+Mat clearLiuDingAndBorder(const Mat& grayImage, Color color) {
+ SHOW_IMAGE(grayImage, 0);
+ Mat img_threshold;
+ img_threshold = grayImage.clone();
+ spatial_ostu(img_threshold, 1, 1, color);
+ clearLiuDing(img_threshold);
+ Rect cropRect;
+ clearBorder(img_threshold, cropRect);
+ Mat cropedGrayImage;
+ resize(grayImage(cropRect), cropedGrayImage, Size(kPlateResizeWidth, kPlateResizeHeight));
+ SHOW_IMAGE(cropedGrayImage, 0);
+ return cropedGrayImage;
+}
+
+void NMStoCharacterByRatio(std::vector &inVec, double overlap, const Rect groundRect) {
+ // rechange the score
+ for (auto& character : inVec) {
+ double score = character.getCharacterScore();
+ //cout << "score:" << score << endl;
+ Rect rect = character.getCharacterPos();
+ int w = rect.width;
+ int h = rect.height;
+ int gw = groundRect.width;
+ int gh = groundRect.height;
+
+ float iou = computeIOU(rect, groundRect);
+
+ int w_diff = abs(w - gw);
+ int h_diff = abs(h - gh);
+
+ //float w_ratio = (float)w / (float)gw;
+ //float h_ratio = (float)h / (float)gh;
+
+ float w_ratio = 1 - (float)w_diff / (float)gw;
+ float h_ratio = 1 - (float)h_diff / (float)gh;
+
+ float a = 0.5f;
+ float b = 0.5f;
+ //cout << "str:" << character.getCharacterStr() << endl;
+ // if the charater is '1', its probalilty is redcued by its iou
+ if ("1" == character.getCharacterStr()) {
+ a = 0.3f; //0.2f;
+ b = 0.7f; //0.8f;
+ }
+ float c = 0.1f;
+ //float weighted_score = a * (float)score + b * w_ratio + c * h_ratio;
+ float weighted_score = a * (float)score + b * w_ratio + c * h_ratio;
+ SHOW_IMAGE(character.getCharacterMat(), 0);
+ character.setCharacterScore((double)weighted_score);
+ //cout << "weighted_score:" << character.getCharacterScore() << endl;
+ }
+
+ std::sort(inVec.begin(), inVec.end());
+
+ std::vector::iterator it = inVec.begin();
+ for (; it != inVec.end(); ++it) {
+ CCharacter charSrc = *it;
+ // cout << "charScore:" << charSrc.getCharacterScore() << endl;
+ Rect rectSrc = charSrc.getCharacterPos();
+ std::vector::iterator itc = it + 1;
+
+ for (; itc != inVec.end();) {
+ CCharacter charComp = *itc;
+ Rect rectComp = charComp.getCharacterPos();
+ float iou = computeIOU(rectSrc, rectComp);
+
+ if (iou > overlap) {
+ itc = inVec.erase(itc);
+ }
+ else {
+ ++itc;
+ }
+ }
+ }
+}
+
+int getNearestIndex(Point center, const vector& groundCenters) {
+ int gc_size = int(groundCenters.size());
+ int index = 0;
+ int min_length = INT_MAX;
+ for (int p = 0; p < gc_size; p++) {
+ Point gc_point = groundCenters.at(p);
+ int length_square = (gc_point.x - center.x) * (gc_point.x - center.x) +
+ (gc_point.y - center.y) * (gc_point.y - center.y);
+ //int length_square = abs(gc_point.x - center.x);
+ if (length_square < min_length) {
+ min_length = length_square;
+ index = p;
+ }
+ }
+ return index;
+}
+
+int CCharsSegment::charsSegmentUsingMSER(Mat input, vector& resultVec, vector& grayChars, Color color) {
+ Mat grayImage;
+ cvtColor(input, grayImage, CV_BGR2GRAY);
+ std::vector bgrSplit;
+ split(input, bgrSplit);
+
+ //Mat grayChannel = clearLiuDingAndBorder(grayImage, color); //clearLiuDingAndBorder(grayImage, color);
+ Mat grayChannel = grayImage;
+
+ // Mat cropedGrayImage = grayImage;
+ // generate all channgel images;
+ vector channelImages;
+ bool useThreeChannel = false;
+ channelImages.push_back(grayChannel);
+ if (useThreeChannel) {
+ for (int i = 0; i < 3; i++)
+ channelImages.push_back(bgrSplit.at(i));
+ }
+ int csize = channelImages.size();
+
+ //TODO three channels
+ std::vector> all_contours;
+ std::vector all_boxes;
+ all_contours.reserve(32);
+ all_boxes.reserve(32);
+
+ const int imageArea = input.rows * input.cols;
+ const int delta = 1;
+ const int minArea = 30;
+ const double maxAreaRatio = 0.2;
+
+ int type = -1;
+ if (Color::BLUE == color) type = 0;
+ if (Color::YELLOW == color) type = 1;
+ if (Color::WHITE == color) type = 1;
+ if (Color::UNKNOWN == color) type = 0;
+
+ for (int c_index = 0; c_index < csize; c_index++) {
+ Mat cimage = channelImages.at(c_index);
+ Mat testImage = cimage.clone();
+ cvtColor(testImage, testImage, CV_GRAY2BGR);
+
+ const float plateMaxSymbolCount = kPlateMaxSymbolCount;
+ const int symbolIndex = kSymbolIndex;
+ float segmentRatio = plateMaxSymbolCount - int(plateMaxSymbolCount);
+ const int plateMaxCharCount = int(plateMaxSymbolCount);
+
+ vector> charsVecVec;
+ charsVecVec.resize(plateMaxCharCount);
+
+ vector groundCenters;
+ groundCenters.reserve(plateMaxCharCount);
+ vector groundRects;
+ groundRects.reserve(plateMaxCharCount);
+
+ // compute the ground char rect
+ int avg_char_width = int(kPlateResizeWidth * (1.f / plateMaxSymbolCount));
+ int avg_char_height = int(kPlateResizeHeight * 0.85f);
+
+ int x_axis = 0;
+ int y_axis = int((kPlateResizeHeight - avg_char_height) * 0.5f);
+ for (int j = 0; j < plateMaxSymbolCount; j++) {
+ int char_width = avg_char_width;
+ if (j != symbolIndex) char_width = avg_char_width;
+ else char_width = int(segmentRatio * avg_char_width);
+
+ Rect avg_char_rect = Rect(x_axis, y_axis, char_width, avg_char_height);
+ rectangle(testImage, avg_char_rect, Scalar(0, 0, 255));
+
+ Point center = Point(x_axis + int(char_width * 0.5f), y_axis + int(avg_char_height * 0.5f));
+ circle(testImage, center, 3, Scalar(0, 255, 0));
+ x_axis += char_width;
+
+ if (j != symbolIndex) {
+ groundCenters.push_back(center);
+ groundRects.push_back(avg_char_rect);
+ }
+ }
+ SHOW_IMAGE(testImage, 0);
+
+ Mat showImage = cimage.clone();
+ cvtColor(showImage, showImage, CV_GRAY2BGR);
+ Mat mdoImage = cimage.clone();
+ string candidateLicense;
+
+ Ptr mser;
+ // use origin mser to detect as many as possible characters
+ mser = MSER2::create(delta, minArea, int(maxAreaRatio * imageArea), false);
+ mser->detectRegions(cimage, all_contours, all_boxes, type);
+
+ std::vector charVec;
+ charVec.reserve(16);
+ size_t size = all_contours.size();
+
+ int char_index = 0;
+ int char_size = 20;
+
+ Mat showMSERImage = cimage.clone();
+ cvtColor(showMSERImage, showMSERImage, CV_GRAY2BGR);
+ // verify char size and output to rects;
+ for (size_t index = 0; index < size; index++) {
+ Rect rect = all_boxes[index];
+ vector &contour = all_contours[index];
+ rectangle(showMSERImage, rect, Scalar(0,0,255));
+
+ // find character
+ if (verifyCharRectSizes(rect)) {
+ Mat mserMat = adaptive_image_from_points(contour, rect, Size(char_size, char_size));
+ Mat mserInput = preprocessCharMat(mserMat, char_size);
+
+ Rect charRect = rect;
+ Point center(charRect.tl().x + charRect.width / 2, charRect.tl().y + charRect.height / 2);
+ Mat tmpMat;
+ double ostu_level = cv::threshold(cimage(charRect), tmpMat, 0, 255, CV_THRESH_BINARY | CV_THRESH_OTSU);
+ Mat grayCharMat = cimage(charRect);
+ Mat ostuMat;
+ switch (color) {
+ case BLUE: threshold(grayCharMat, ostuMat, 0, 255, CV_THRESH_BINARY + CV_THRESH_OTSU); break;
+ case YELLOW: threshold(grayCharMat, ostuMat, 0, 255, CV_THRESH_BINARY_INV + CV_THRESH_OTSU); break;
+ case WHITE: threshold(grayCharMat, ostuMat, 0, 255, CV_THRESH_OTSU + CV_THRESH_BINARY_INV); break;
+ default: threshold(grayCharMat, ostuMat, 0, 255, CV_THRESH_OTSU + CV_THRESH_BINARY); break;
+ }
+ Mat ostuInput = preprocessChar(ostuMat);
+ // use judegMDOratio2 function to
+ // remove the small lines in character like "zh-cuan"
+ if (judegMDOratio2(cimage, rect, contour, mdoImage, 1.2f, true)) {
+ CCharacter charCandidate;
+ //cout << contour.size() << endl;
+ charCandidate.setCharacterPos(charRect);
+ charCandidate.setCharacterMat(ostuInput); //charInput or ostuInput
+ charCandidate.setOstuLevel(ostu_level);
+ charCandidate.setCenterPoint(center);
+ int pos = getNearestIndex(center, groundCenters);
+ charsVecVec.at(pos).push_back(charCandidate);
+ charCandidate.setIndex(pos);
+ charCandidate.setIsChinese(false);
+ charVec.push_back(charCandidate);
+ }
+ }
+ else {
+ SHOW_IMAGE(showMSERImage(rect), 0);
+ }
+ }
+ SHOW_IMAGE(showMSERImage, 0);
+ SHOW_IMAGE(mdoImage, 0);
+
+ // classify all the images;
+ CharsIdentify::instance()->classify(charVec);
+ Rect maxrect = groundRects.at(0);
+
+ // NMS to the seven groud truth rect
+ bool useGround = true;
+ if (useGround) {
+ for (auto charCandidate : charVec) {
+ int pos = charCandidate.getIndex();
+ charsVecVec.at(pos).push_back(charCandidate);
+ }
+ charVec.clear();
+ for (size_t c = 0; c < charsVecVec.size(); c++) {
+ Mat testImage_2 = cimage.clone();
+ cvtColor(testImage_2, testImage_2, CV_GRAY2BGR);
+ vector& charPosVec = charsVecVec.at(c);
+ for (auto character : charPosVec) {
+ rectangle(testImage_2, character.getCharacterPos(), Scalar(0, 255, 0));
+ }
+ SHOW_IMAGE(testImage_2, 0);
+
+ double overlapThresh = 0.;
+ NMStoCharacterByRatio(charPosVec, overlapThresh, groundRects.at(c));
+ charPosVec.shrink_to_fit();
+
+ Mat testImage_3 = cimage.clone();
+ cvtColor(testImage_3, testImage_3, CV_GRAY2BGR);
+ for (auto character : charPosVec) {
+ rectangle(testImage_3, character.getCharacterPos(), Scalar(0, 255, 0));
+ }
+
+ // only the last group will contain more than one candidate character
+ if (charsVecVec.size() - 1 == c) {
+ for (auto charPos : charPosVec)
+ charVec.push_back(charPos);
+ }
+ else {
+ if (charPosVec.size() != 0) {
+ CCharacter& inputChar = charPosVec.at(0);
+ charVec.push_back(inputChar);
+ Mat charMat = inputChar.getCharacterMat();
+ SHOW_IMAGE(charMat, 0);
+ }
+ }
+ for (auto charPos : charPosVec) {
+ Rect r = charPos.getCharacterPos();
+ if (r.area() > maxrect.area())
+ maxrect = r;
+ }
+ SHOW_IMAGE(testImage_3, 0);
+ }
+ }
+ else {
+ NMStoCharacterByRatio(charVec, 0.2f, maxrect);
+ }
+
+ if (charVec.size() < kCharsCountInOnePlate) return 0x03;
+ std::sort(charVec.begin(), charVec.end(),[](const CCharacter& r1, const CCharacter& r2) { return r1.getCharacterPos().x < r2.getCharacterPos().x; });
+
+ string predictLicense = "";
+ vector sortedRect;
+ for (auto charCandidate : charVec) {
+ sortedRect.push_back(charCandidate.getCharacterPos());
+ predictLicense.append(charCandidate.getCharacterStr());
+ }
+ std::sort(sortedRect.begin(), sortedRect.end(),
+ [](const Rect& r1, const Rect& r2) { return r1.x < r2.x; });
+ cout << "predictLicense: " << predictLicense << endl;
+
+ // find chinese rect
+ size_t specIndex = 0;
+ specIndex = GetSpecificRect(sortedRect);
+ SHOW_IMAGE(showImage(sortedRect[specIndex]), 0);
+
+ Rect chineseRect;
+ if (specIndex < sortedRect.size())
+ chineseRect = GetChineseRect(sortedRect[specIndex]);
+ else
+ return 0x04;
+
+ vector newSortedRect;
+ newSortedRect.push_back(chineseRect);
+ if (newSortedRect.size() == 0) return 0x05;
+
+ SHOW_IMAGE(showImage(chineseRect), 0);
+ RebuildRect(sortedRect, newSortedRect, specIndex);
+
+ Mat theImage = channelImages.at(c_index);
+ for (size_t i = 0; i < newSortedRect.size(); i++) {
+ Rect mr = newSortedRect[i];
+ //mr = rectEnlarge(newSortedRect[i], cimage.cols, cimage.rows);
+ Mat auxRoi(theImage, mr);
+ Mat newRoi;
+ if (i == 0) {
+ //Rect large_mr = rectEnlarge(mr, theImage.cols, theImage.rows);
+ Rect large_mr = mr;
+ Mat grayChar(theImage, large_mr);
+ Mat grayChinese;
+ grayChinese.create(kGrayCharHeight, kGrayCharWidth, CV_8UC1);
+ resize(grayChar, grayChinese, grayChinese.size(), 0, 0, INTER_LINEAR);
+
+ Mat newChineseRoi;
+ if (1) {
+ float slideLengthRatio = 0.1f;
+ if (!slideChineseGrayWindow(theImage, large_mr, newChineseRoi, color, slideLengthRatio))
+ judgeChineseGray(grayChinese, newChineseRoi, color);
+ }
+ grayChars.push_back(newChineseRoi);
+ }
+ else {
+ switch (color) {
+ case BLUE: threshold(auxRoi, newRoi, 0, 255, CV_THRESH_BINARY + CV_THRESH_OTSU); break;
+ case YELLOW: threshold(auxRoi, newRoi, 0, 255, CV_THRESH_BINARY_INV + CV_THRESH_OTSU); break;
+ case WHITE: threshold(auxRoi, newRoi, 0, 255, CV_THRESH_OTSU + CV_THRESH_BINARY_INV); break;
+ default: threshold(auxRoi, newRoi, 0, 255, CV_THRESH_OTSU + CV_THRESH_BINARY); break;
+ }
+ newRoi = preprocessChar(newRoi);
+ Rect fit_mr = rectFit(mr, cimage.cols, cimage.rows);
+ Mat grayChar(cimage, fit_mr);
+ grayChars.push_back(grayChar);
+ }
+
+ rectangle(showImage, mr, Scalar(0, 0, 255), 1);
+ resultVec.push_back(newRoi);
+ }
+ SHOW_IMAGE(showImage, 0);
+ }
+
+ return 0;
+}
+
+
+int CCharsSegment::charsSegmentUsingOSTU(Mat input, vector& resultVec, vector& grayChars, Color color) {
+ if (!input.data) return 0x01;
+
+ Color plateType = color;
+ Mat input_grey;
+ cvtColor(input, input_grey, CV_BGR2GRAY);
+
+ Mat img_threshold;
+ img_threshold = input_grey.clone();
+ spatial_ostu(img_threshold, 8, 2, plateType);
+
+ // remove liuding and hor lines, also judge weather is plate use jump count
+ if (!clearLiuDing(img_threshold)) return 0x02;
+
+ Mat img_contours;
+ img_threshold.copyTo(img_contours);
+
+ vector > contours;
+ findContours(img_contours,
+ contours, // a vector of contours
+ CV_RETR_EXTERNAL, // retrieve the external contours
+ CV_CHAIN_APPROX_NONE); // all pixels of each contours
+
+ vector >::iterator itc = contours.begin();
+ vector vecRect;
+ while (itc != contours.end()) {
+ Rect mr = boundingRect(Mat(*itc));
+ Mat auxRoi(img_threshold, mr);
+ if (verifyCharSizes(auxRoi))
+ vecRect.push_back(mr);
+ ++itc;
+ }
+
+ if (vecRect.size() == 0) return 0x03;
+
+ vector sortedRect(vecRect);
+ std::sort(sortedRect.begin(), sortedRect.end(),
+ [](const Rect& r1, const Rect& r2) { return r1.x < r2.x; });
+
+ size_t specIndex = 0;
+ specIndex = GetSpecificRect(sortedRect);
+
+ Rect chineseRect;
+ if (specIndex < sortedRect.size())
+ chineseRect = GetChineseRect(sortedRect[specIndex]);
+ else
+ return 0x04;
+
+ if (0) {
+ rectangle(img_threshold, chineseRect, Scalar(255));
+ imshow("plate", img_threshold);
+ waitKey(0);
+ destroyWindow("plate");
+ }
+
+ vector newSortedRect;
+ newSortedRect.push_back(chineseRect);
+ RebuildRect(sortedRect, newSortedRect, specIndex);
+
+ if (newSortedRect.size() == 0) return 0x05;
+
+ bool useSlideWindow = true;
+ bool useAdapThreshold = true;
+ //bool useAdapThreshold = CParams::instance()->getParam1b();
+
+ for (size_t i = 0; i < newSortedRect.size(); i++) {
+ Rect mr = newSortedRect[i];
+ Mat auxRoi(input_grey, mr);
+ Mat newRoi;
+
+ if (i == 0) {
+ // genenrate gray chinese char
+ Rect large_mr = rectEnlarge(mr, input_grey.cols, input_grey.rows);
+ Mat grayChar(input_grey, large_mr);
+ Mat grayChinese;
+ grayChinese.create(kGrayCharHeight, kGrayCharWidth, CV_8UC1);
+ resize(grayChar, grayChinese, grayChinese.size(), 0, 0, INTER_LINEAR);
+
+ Mat newChineseRoi;
+ if (useSlideWindow) {
+ float slideLengthRatio = 0.1f;
+ if (!slideChineseGrayWindow(input_grey, large_mr, newChineseRoi, plateType, slideLengthRatio))
+ judgeChineseGray(grayChinese, newChineseRoi, plateType);
+ }
+ else {
+ judgeChinese(auxRoi, newRoi, plateType);
+ }
+ grayChars.push_back(newChineseRoi);
+ }
+ else {
+ switch (plateType) {
+ case BLUE: threshold(auxRoi, newRoi, 0, 255, CV_THRESH_BINARY + CV_THRESH_OTSU); break;
+ case YELLOW: threshold(auxRoi, newRoi, 0, 255, CV_THRESH_BINARY_INV + CV_THRESH_OTSU); break;
+ case WHITE: threshold(auxRoi, newRoi, 0, 255, CV_THRESH_OTSU + CV_THRESH_BINARY_INV); break;
+ default: threshold(auxRoi, newRoi, 0, 255, CV_THRESH_OTSU + CV_THRESH_BINARY); break;
+ }
+ newRoi = preprocessChar(newRoi);
+
+ // genenrate gray chinese char
+ Rect fit_mr = rectFit(mr, input_grey.cols, input_grey.rows);
+ Mat grayChar(input_grey, fit_mr);
+ grayChars.push_back(grayChar);
+ }
+ resultVec.push_back(newRoi);
+ }
+ return 0;
+}
+
+
+Rect CCharsSegment::GetChineseRect(const Rect rectSpe) {
+ int height = rectSpe.height;
+ float newwidth = rectSpe.width * 1.15f;
+ int x = rectSpe.x;
+ int y = rectSpe.y;
+
+ int newx = x - int(newwidth * 1.15);
+ newx = newx > 0 ? newx : 0;
+
+ Rect a(newx, y, int(newwidth), height);
+
+ return a;
+}
+
+int CCharsSegment::GetSpecificRect(const vector& vecRect) {
+ vector xpositions;
+ int maxHeight = 0;
+ int maxWidth = 0;
+
+ for (size_t i = 0; i < vecRect.size(); i++) {
+ xpositions.push_back(vecRect[i].x);
+
+ if (vecRect[i].height > maxHeight) {
+ maxHeight = vecRect[i].height;
+ }
+ if (vecRect[i].width > maxWidth) {
+ maxWidth = vecRect[i].width;
+ }
+ }
+
+ int specIndex = 0;
+ for (size_t i = 0; i < vecRect.size(); i++) {
+ Rect mr = vecRect[i];
+ int midx = mr.x + mr.width / 2;
+
+ // use prior knowledage to find the specific character
+ // position in 1/7 and 2/7
+ if ((mr.width > maxWidth * 0.6 || mr.height > maxHeight * 0.6) &&
+ (midx < int(m_theMatWidth / kPlateMaxSymbolCount) * kSymbolIndex &&
+ midx > int(m_theMatWidth / kPlateMaxSymbolCount) * (kSymbolIndex - 1))) {
+ specIndex = i;
+ }
+ }
+
+ return specIndex;
+}
+
+int CCharsSegment::RebuildRect(const vector& vecRect,
+ vector& outRect, int specIndex) {
+ int count = 6;
+ for (size_t i = specIndex; i < vecRect.size() && count; ++i, --count) {
+ outRect.push_back(vecRect[i]);
+ }
+
+ return 0;
+}
+
+}
diff --git a/原yx-image-recognition/src/main/resources/easypr-c++/core_func.cpp b/原yx-image-recognition/src/main/resources/easypr-c++/core_func.cpp
new file mode 100644
index 00000000..a4e419e1
--- /dev/null
+++ b/原yx-image-recognition/src/main/resources/easypr-c++/core_func.cpp
@@ -0,0 +1,2569 @@
+#include "easypr/core/core_func.h"
+#include "easypr/core/plate.hpp"
+#include "easypr/core/chars_identify.h"
+#include "easypr/config.h"
+#include "easypr/core/params.h"
+#include "thirdparty/mser/mser2.hpp"
+#include
+
+namespace easypr {
+ Mat colorMatch(const Mat &src, Mat &match, const Color r,
+ const bool adaptive_minsv) {
+
+ // if use adaptive_minsv
+ // min value of s and v is adaptive to h
+ const float max_sv = 255;
+ const float minref_sv = 64;
+
+ const float minabs_sv = 95; //95;
+
+ // H range of blue
+
+ const int min_blue = 100; // 100
+ const int max_blue = 140; // 140
+
+ // H range of yellow
+
+ const int min_yellow = 15; // 15
+ const int max_yellow = 40; // 40
+
+ // H range of white
+
+ const int min_white = 0; // 15
+ const int max_white = 30; // 40
+
+ Mat src_hsv;
+
+ // convert to HSV space
+ cvtColor(src, src_hsv, CV_BGR2HSV);
+
+ std::vector hsvSplit;
+ split(src_hsv, hsvSplit);
+ equalizeHist(hsvSplit[2], hsvSplit[2]);
+ merge(hsvSplit, src_hsv);
+
+ // match to find the color
+
+ int min_h = 0;
+ int max_h = 0;
+ switch (r) {
+ case BLUE:
+ min_h = min_blue;
+ max_h = max_blue;
+ break;
+ case YELLOW:
+ min_h = min_yellow;
+ max_h = max_yellow;
+ break;
+ case WHITE:
+ min_h = min_white;
+ max_h = max_white;
+ break;
+ default:
+ // Color::UNKNOWN
+ break;
+ }
+
+ float diff_h = float((max_h - min_h) / 2);
+ float avg_h = min_h + diff_h;
+
+ int channels = src_hsv.channels();
+ int nRows = src_hsv.rows;
+
+ // consider multi channel image
+ int nCols = src_hsv.cols * channels;
+ if (src_hsv.isContinuous()) {
+ nCols *= nRows;
+ nRows = 1;
+ }
+
+ int i, j;
+ uchar* p;
+ float s_all = 0;
+ float v_all = 0;
+ float count = 0;
+ for (i = 0; i < nRows; ++i) {
+ p = src_hsv.ptr(i);
+ for (j = 0; j < nCols; j += 3) {
+ int H = int(p[j]); // 0-180
+ int S = int(p[j + 1]); // 0-255
+ int V = int(p[j + 2]); // 0-255
+
+ s_all += S;
+ v_all += V;
+ count++;
+
+ bool colorMatched = false;
+
+ if (H > min_h && H < max_h) {
+ float Hdiff = 0;
+ if (H > avg_h)
+ Hdiff = H - avg_h;
+ else
+ Hdiff = avg_h - H;
+
+ float Hdiff_p = float(Hdiff) / diff_h;
+
+ float min_sv = 0;
+ if (true == adaptive_minsv)
+ min_sv =
+ minref_sv -
+ minref_sv / 2 *
+ (1
+ - Hdiff_p); // inref_sv - minref_sv / 2 * (1 - Hdiff_p)
+ else
+ min_sv = minabs_sv; // add
+
+ if ((S > min_sv && S < max_sv) && (V > min_sv && V < max_sv))
+ colorMatched = true;
+ }
+
+ if (colorMatched == true) {
+ p[j] = 0;
+ p[j + 1] = 0;
+ p[j + 2] = 255;
+ }
+ else {
+ p[j] = 0;
+ p[j + 1] = 0;
+ p[j + 2] = 0;
+ }
+ }
+ }
+
+ // cout << "avg_s:" << s_all / count << endl;
+ // cout << "avg_v:" << v_all / count << endl;
+
+ // get the final binary
+
+ Mat src_grey;
+ std::vector hsvSplit_done;
+ split(src_hsv, hsvSplit_done);
+ src_grey = hsvSplit_done[2];
+
+ match = src_grey;
+
+ return src_grey;
+ }
+
+ bool bFindLeftRightBound1(Mat &bound_threshold, int &posLeft, int &posRight) {
+
+ float span = bound_threshold.rows * 0.2f;
+
+ for (int i = 0; i < bound_threshold.cols - span - 1; i += 3) {
+ int whiteCount = 0;
+ for (int k = 0; k < bound_threshold.rows; k++) {
+ for (int l = i; l < i + span; l++) {
+ if (bound_threshold.data[k * bound_threshold.step[0] + l] == 255) {
+ whiteCount++;
+ }
+ }
+ }
+ if (whiteCount * 1.0 / (span * bound_threshold.rows) > 0.15) {
+ posLeft = i;
+ break;
+ }
+ }
+ span = bound_threshold.rows * 0.2f;
+
+
+ for (int i = bound_threshold.cols - 1; i > span; i -= 2) {
+ int whiteCount = 0;
+ for (int k = 0; k < bound_threshold.rows; k++) {
+ for (int l = i; l > i - span; l--) {
+ if (bound_threshold.data[k * bound_threshold.step[0] + l] == 255) {
+ whiteCount++;
+ }
+ }
+ }
+
+ if (whiteCount * 1.0 / (span * bound_threshold.rows) > 0.06) {
+ posRight = i;
+ if (posRight + 5 < bound_threshold.cols) {
+ posRight = posRight + 5;
+ } else {
+ posRight = bound_threshold.cols - 1;
+ }
+
+ break;
+ }
+ }
+
+ if (posLeft < posRight) {
+ return true;
+ }
+ return false;
+ }
+
+ bool bFindLeftRightBound(Mat &bound_threshold, int &posLeft, int &posRight) {
+
+
+ float span = bound_threshold.rows * 0.2f;
+
+ for (int i = 0; i < bound_threshold.cols - span - 1; i += 2) {
+ int whiteCount = 0;
+ for (int k = 0; k < bound_threshold.rows; k++) {
+ for (int l = i; l < i + span; l++) {
+ if (bound_threshold.data[k * bound_threshold.step[0] + l] == 255) {
+ whiteCount++;
+ }
+ }
+ }
+ if (whiteCount * 1.0 / (span * bound_threshold.rows) > 0.36) {
+ posLeft = i;
+ break;
+ }
+ }
+ span = bound_threshold.rows * 0.2f;
+
+
+ for (int i = bound_threshold.cols - 1; i > span; i -= 2) {
+ int whiteCount = 0;
+ for (int k = 0; k < bound_threshold.rows; k++) {
+ for (int l = i; l > i - span; l--) {
+ if (bound_threshold.data[k * bound_threshold.step[0] + l] == 255) {
+ whiteCount++;
+ }
+ }
+ }
+
+ if (whiteCount * 1.0 / (span * bound_threshold.rows) > 0.26) {
+ posRight = i;
+ break;
+ }
+ }
+
+ if (posLeft < posRight) {
+ return true;
+ }
+ return false;
+ }
+
+ bool bFindLeftRightBound2(Mat &bound_threshold, int &posLeft, int &posRight) {
+
+ float span = bound_threshold.rows * 0.2f;
+
+ for (int i = 0; i < bound_threshold.cols - span - 1; i += 3) {
+ int whiteCount = 0;
+ for (int k = 0; k < bound_threshold.rows; k++) {
+ for (int l = i; l < i + span; l++) {
+ if (bound_threshold.data[k * bound_threshold.step[0] + l] == 255) {
+ whiteCount++;
+ }
+ }
+ }
+ if (whiteCount * 1.0 / (span * bound_threshold.rows) > 0.32) {
+ posLeft = i;
+ break;
+ }
+ }
+ span = bound_threshold.rows * 0.2f;
+
+
+ for (int i = bound_threshold.cols - 1; i > span; i -= 3) {
+ int whiteCount = 0;
+ for (int k = 0; k < bound_threshold.rows; k++) {
+ for (int l = i; l > i - span; l--) {
+ if (bound_threshold.data[k * bound_threshold.step[0] + l] == 255) {
+ whiteCount++;
+ }
+ }
+ }
+
+ if (whiteCount * 1.0 / (span * bound_threshold.rows) > 0.22) {
+ posRight = i;
+ break;
+ }
+ }
+
+ if (posLeft < posRight) {
+ return true;
+ }
+ return false;
+ }
+
+
+ bool plateColorJudge(const Mat &src, const Color r, const bool adaptive_minsv,
+ float &percent) {
+
+ const float thresh = 0.45f;
+
+ Mat src_gray;
+ colorMatch(src, src_gray, r, adaptive_minsv);
+
+ percent =
+ float(countNonZero(src_gray)) / float(src_gray.rows * src_gray.cols);
+ // cout << "percent:" << percent << endl;
+
+ if (percent > thresh)
+ return true;
+ else
+ return false;
+ }
+
+ Color getPlateType(const Mat &src, const bool adaptive_minsv) {
+ float max_percent = 0;
+ Color max_color = UNKNOWN;
+
+ float blue_percent = 0;
+ float yellow_percent = 0;
+ float white_percent = 0;
+
+ if (plateColorJudge(src, BLUE, adaptive_minsv, blue_percent) == true) {
+ // cout << "BLUE" << endl;
+ return BLUE;
+ } else if (plateColorJudge(src, YELLOW, adaptive_minsv, yellow_percent) ==
+ true) {
+ // cout << "YELLOW" << endl;
+ return YELLOW;
+ } else if (plateColorJudge(src, WHITE, adaptive_minsv, white_percent) ==
+ true) {
+ // cout << "WHITE" << endl;
+ return WHITE;
+ } else {
+ //std::cout << "OTHER" << std::endl;
+
+ /*max_percent = blue_percent > yellow_percent ? blue_percent : yellow_percent;
+ max_color = blue_percent > yellow_percent ? BLUE : YELLOW;
+ max_color = max_percent > white_percent ? max_color : WHITE;*/
+
+ // always return blue
+ return BLUE;
+ }
+ }
+
+ void clearLiuDingOnly(Mat &img) {
+ const int x = 7;
+ Mat jump = Mat::zeros(1, img.rows, CV_32F);
+ for (int i = 0; i < img.rows; i++) {
+ int jumpCount = 0;
+ int whiteCount = 0;
+ for (int j = 0; j < img.cols - 1; j++) {
+ if (img.at(i, j) != img.at(i, j + 1)) jumpCount++;
+
+ if (img.at(i, j) == 255) {
+ whiteCount++;
+ }
+ }
+
+ jump.at(i) = (float) jumpCount;
+ }
+
+ for (int i = 0; i < img.rows; i++) {
+ if (jump.at(i) <= x) {
+ for (int j = 0; j < img.cols; j++) {
+ img.at(i, j) = 0;
+ }
+ }
+ }
+ }
+
+ bool clearLiuDing(Mat &img) {
+ std::vector