You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
456 lines
17 KiB
456 lines
17 KiB
5 months ago
|
import os
|
||
|
import sys
|
||
|
import cv2
|
||
|
from PyQt5.QtGui import QPixmap, QImage, qRed, qGreen, qBlue
|
||
|
from PyQt5.QtWidgets import QMainWindow, QApplication, QFileDialog, QMessageBox
|
||
|
import matplotlib.pyplot as plt
|
||
|
import numpy as np
|
||
|
import traceback
|
||
|
from PIL import Image
|
||
|
from PyQt5.QtCore import Qt
|
||
|
from predict import solve
|
||
|
|
||
|
|
||
|
def cvImgtoQtImg(cvImg): # 定义opencv图像转PyQt图像的函数
|
||
|
QtImgBuf = cv2.cvtColor(cvImg, cv2.COLOR_BGR2BGRA)
|
||
|
QtImg = QImage(QtImgBuf.data, QtImgBuf.shape[1], QtImgBuf.shape[0], QImage.Format_RGB32)
|
||
|
return QtImg
|
||
|
|
||
|
|
||
|
def QImage2CV(qimg):
|
||
|
tmp = qimg
|
||
|
|
||
|
# 使用numpy创建空的图象
|
||
|
cv_image = np.zeros((tmp.height(), tmp.width(), 3), dtype=np.uint8)
|
||
|
|
||
|
for row in range(0, tmp.height()):
|
||
|
for col in range(0, tmp.width()):
|
||
|
r = qRed(tmp.pixel(col, row))
|
||
|
g = qGreen(tmp.pixel(col, row))
|
||
|
b = qBlue(tmp.pixel(col, row))
|
||
|
cv_image[row, col, 0] = r
|
||
|
cv_image[row, col, 1] = g
|
||
|
cv_image[row, col, 2] = b
|
||
|
|
||
|
cv_image = cv2.cvtColor(cv_image, cv2.COLOR_RGB2BGR)
|
||
|
|
||
|
return cv_image
|
||
|
|
||
|
|
||
|
def QPixmap2cv(qtpixmap):
|
||
|
try:
|
||
|
qimg = qtpixmap.toImage()
|
||
|
temp_shape = (qimg.height(), qimg.bytesPerLine() * 8 // qimg.depth())
|
||
|
temp_shape += (4,)
|
||
|
ptr = qimg.bits()
|
||
|
ptr.setsize(qimg.byteCount())
|
||
|
result = np.array(ptr, dtype=np.uint8).reshape(temp_shape)
|
||
|
result = result[..., :3]
|
||
|
result = cv2.cvtColor(result, cv2.COLOR_RGB2BGR)
|
||
|
except Exception as e:
|
||
|
traceback.print_exc()
|
||
|
|
||
|
return result
|
||
|
|
||
|
def FFT2(img):
|
||
|
# 傅里叶变换
|
||
|
f = np.fft.fft2(img)
|
||
|
fshift = np.fft.fftshift(f)
|
||
|
res = 20 * np.log(np.abs(fshift))
|
||
|
res = res - res.min()
|
||
|
res = res / res.max() * 255
|
||
|
res = np.array(res, np.uint8)
|
||
|
|
||
|
# plt.imshow(res, cmap='gray')
|
||
|
plt.imshow(res)
|
||
|
plt.axis('off')
|
||
|
plt.savefig('Img.png', bbox_inches='tight', pad_inches=0.0)
|
||
|
plt.close()
|
||
|
result = cv2.imread('Img.png', 0)
|
||
|
os.remove('Img.png')
|
||
|
|
||
|
return result
|
||
|
|
||
|
|
||
|
class MyWindow(QMainWindow):
|
||
|
def __init__(self, Ui_MainWindow):
|
||
|
super().__init__()
|
||
|
self.ui = Ui_MainWindow
|
||
|
app = QApplication(sys.argv)
|
||
|
MainWindow = QMainWindow()
|
||
|
self.ui.setupUi(MainWindow)
|
||
|
self.picpath = ''
|
||
|
self.openfile_name = ''
|
||
|
self.pixmapBefore = QPixmap()
|
||
|
self.pixmapAfter = QPixmap()
|
||
|
|
||
|
self.ui.PicBefore.setScaledContents(False)
|
||
|
self.ui.PicBefore.setAlignment(Qt.AlignCenter)
|
||
|
self.ui.PicAfter.setScaledContents(False)
|
||
|
self.ui.PicAfter.setAlignment(Qt.AlignCenter)
|
||
|
|
||
|
self.ui.ImportBtn.clicked.connect(lambda: self.Import())
|
||
|
self.ui.GrayscaleBtn.clicked.connect(lambda: self.Grayscale())
|
||
|
self.ui.BinarizationBtn.clicked.connect(lambda: self.Binarization())
|
||
|
self.ui.geometryBtn.clicked.connect(lambda: self.Geometry())
|
||
|
self.ui.histogramBtn.clicked.connect(lambda: self.Histogram())
|
||
|
self.ui.EqualizeBtn.clicked.connect(lambda: self.Equalize())
|
||
|
self.ui.MeanBtn.clicked.connect(lambda: self.Mean())
|
||
|
self.ui.BoxBtn.clicked.connect(lambda: self.Box())
|
||
|
self.ui.vagueBtn.clicked.connect(lambda: self.Vague())
|
||
|
self.ui.medianBtn.clicked.connect(lambda: self.Median())
|
||
|
self.ui.RobertsBtn.clicked.connect(lambda: self.Roberts())
|
||
|
self.ui.PrewittBtn.clicked.connect(lambda: self.Prewitt())
|
||
|
self.ui.SobelBtn.clicked.connect(lambda: self.Sobel())
|
||
|
self.ui.LowpassBtn.clicked.connect(lambda: self.Lowpass())
|
||
|
self.ui.HighpassBtn.clicked.connect(lambda: self.Highpass())
|
||
|
self.ui.corrosionBtn.clicked.connect(lambda: self.Corrosion())
|
||
|
self.ui.expansionBtn.clicked.connect(lambda: self.Expansion())
|
||
|
self.ui.OpenBtn.clicked.connect(lambda: self.Open())
|
||
|
self.ui.CloseBtn.clicked.connect(lambda: self.Close())
|
||
|
self.ui.LOGBtn.clicked.connect(lambda: self.LOG())
|
||
|
self.ui.ScharrBtn.clicked.connect(lambda: self.Scharr())
|
||
|
self.ui.CannyBtn.clicked.connect(lambda: self.Canny())
|
||
|
self.ui.SaveBtn.clicked.connect(lambda: self.Save())
|
||
|
self.ui.feiyan.clicked.connect(lambda:self.Feiyan())
|
||
|
|
||
|
MainWindow.show()
|
||
|
sys.exit(app.exec_())
|
||
|
|
||
|
|
||
|
def Import(self):
|
||
|
# 导入图片
|
||
|
self.openfile_name = QFileDialog.getOpenFileName(self, '选择文件', '', "Image Files (*.png *.jpg *.bmp)")[0]
|
||
|
if not self.openfile_name:
|
||
|
# 如果没有选择文件,直接返回
|
||
|
return
|
||
|
|
||
|
try:
|
||
|
self.pixmapBefore = QPixmap(self.openfile_name)
|
||
|
if self.pixmapBefore.isNull():
|
||
|
raise ValueError("无法加载图片文件")
|
||
|
|
||
|
self.picpath = self.openfile_name
|
||
|
|
||
|
image = cv2.imread(self.picpath)
|
||
|
if image is None:
|
||
|
raise ValueError("无法读取图片文件")
|
||
|
|
||
|
self.ui.Label_H.setText(str(image.shape[0]))
|
||
|
self.ui.Label_W.setText(str(image.shape[1]))
|
||
|
self.ui.Label_T.setText(str(image.shape[2]))
|
||
|
self.ui.Label_Type.setText(str(image.dtype))
|
||
|
|
||
|
|
||
|
self.resizeImage(self.ui.PicBefore,self.pixmapBefore)
|
||
|
|
||
|
except Exception as e:
|
||
|
traceback.print_exc()
|
||
|
QMessageBox.critical(self, "错误", f"加载图片文件时出错: {e}")
|
||
|
|
||
|
def Save(self):
|
||
|
if self.pixmapAfter.isNull():
|
||
|
QMessageBox.about(self, '保存失败', '没有已经处理完成的图片')
|
||
|
return
|
||
|
# 保存
|
||
|
SaveName = QFileDialog.getSaveFileName(self, '选择文件', '', "Image Files (*.png *.jpg *.bmp)")[0]
|
||
|
|
||
|
if not SaveName:
|
||
|
return
|
||
|
|
||
|
try:
|
||
|
if type(self.pixmapAfter) == QImage:
|
||
|
print("1")
|
||
|
result = cv2.imwrite(SaveName, QImage2CV(self.pixmapAfter))
|
||
|
else:
|
||
|
print("2")
|
||
|
result = cv2.imwrite(SaveName, QPixmap2cv(self.pixmapAfter))
|
||
|
if result:
|
||
|
QMessageBox.about(self, '保存成功', '保存成功')
|
||
|
else:
|
||
|
QMessageBox.about(self, '保存失败', '路径中不能含有中文和空格')
|
||
|
except Exception as e:
|
||
|
traceback.print_exc()
|
||
|
|
||
|
def resizeImage(self, label, pixmap):
|
||
|
# print("aaa")
|
||
|
if pixmap:
|
||
|
label.setPixmap(pixmap.scaled(label.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation))
|
||
|
|
||
|
def resizeEvent(self, event):
|
||
|
self.resizeImage(self.ui.PicBefore,self.pixmapBefore)
|
||
|
self.resizeImage(self.ui.PicAfter,self.pixmapAfter)
|
||
|
super(MyWindow, self).resizeEvent(event)
|
||
|
|
||
|
def check(self):
|
||
|
if self.pixmapBefore.isNull():
|
||
|
QMessageBox.about(self, '操作失败', '请先导入图片')
|
||
|
return True
|
||
|
img = cv2.imread(self.picpath)
|
||
|
if img is None:
|
||
|
QMessageBox.about(self, '操作失败', '无法读取图片')
|
||
|
return True
|
||
|
return False
|
||
|
|
||
|
def Grayscale(self):
|
||
|
if self.check():
|
||
|
return
|
||
|
img = cv2.imread(self.picpath)
|
||
|
grayImg = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
|
||
|
qt_img = cvImgtoQtImg(grayImg)
|
||
|
self.pixmapAfter = QPixmap.fromImage(qt_img)
|
||
|
self.resizeImage(self.ui.PicAfter,self.pixmapAfter)
|
||
|
|
||
|
def Binarization(self):
|
||
|
if self.check():
|
||
|
return
|
||
|
try:
|
||
|
img = cv2.imread(self.picpath)
|
||
|
grayImg = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
|
||
|
qt_img = cvImgtoQtImg(cv2.threshold(grayImg, 127, 255, cv2.THRESH_BINARY)[1])
|
||
|
self.pixmapAfter = QPixmap.fromImage(qt_img)
|
||
|
self.resizeImage(self.ui.PicAfter, self.pixmapAfter)
|
||
|
except Exception as e:
|
||
|
traceback.print_exc()
|
||
|
|
||
|
def Geometry(self):
|
||
|
if self.check():
|
||
|
return
|
||
|
img = cv2.imread(self.picpath)
|
||
|
self.ui.PicAfter.setScaledContents(False)
|
||
|
qt_img = cvImgtoQtImg(cv2.resize(img, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_CUBIC))
|
||
|
self.pixmapAfter = QPixmap.fromImage(qt_img)
|
||
|
self.resizeImage(self.ui.PicAfter, self.pixmapAfter)
|
||
|
|
||
|
def Histogram(self):
|
||
|
if self.check():
|
||
|
return
|
||
|
img = cv2.imread(self.picpath)
|
||
|
img = cv2.calcHist([cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)], [0], None, [256], [0, 255])
|
||
|
plt.plot(img)
|
||
|
plt.savefig('img.jpg')
|
||
|
plt.close()
|
||
|
self.pixmapAfter = QPixmap('img.jpg')
|
||
|
self.resizeImage(self.ui.PicAfter, self.pixmapAfter)
|
||
|
os.remove('img.jpg')
|
||
|
|
||
|
def Equalize(self):
|
||
|
if self.check():
|
||
|
return
|
||
|
img = cv2.imread(self.picpath)
|
||
|
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
|
||
|
qt_img = cvImgtoQtImg(cv2.equalizeHist(img))
|
||
|
self.pixmapAfter = QPixmap.fromImage(qt_img)
|
||
|
self.resizeImage(self.ui.PicAfter, self.pixmapAfter)
|
||
|
|
||
|
def Mean(self):
|
||
|
if self.check():
|
||
|
return
|
||
|
img = cv2.imread(self.picpath)
|
||
|
qt_img = cvImgtoQtImg(cv2.blur(img, (3, 5)))
|
||
|
self.pixmapAfter = QPixmap.fromImage(qt_img)
|
||
|
self.resizeImage(self.ui.PicAfter, self.pixmapAfter)
|
||
|
|
||
|
def Box(self):
|
||
|
if self.check():
|
||
|
return
|
||
|
img = cv2.imread(self.picpath)
|
||
|
qt_img = cvImgtoQtImg(cv2.boxFilter(img, -1, (3, 5)))
|
||
|
self.pixmapAfter = QPixmap.fromImage(qt_img)
|
||
|
self.resizeImage(self.ui.PicAfter, self.pixmapAfter)
|
||
|
|
||
|
def Vague(self):
|
||
|
if self.check():
|
||
|
return
|
||
|
img = cv2.imread(self.picpath)
|
||
|
qt_img = cvImgtoQtImg(cv2.GaussianBlur(img, (3, 3), 0))
|
||
|
self.pixmapAfter = QPixmap.fromImage(qt_img)
|
||
|
self.resizeImage(self.ui.PicAfter, self.pixmapAfter)
|
||
|
|
||
|
def Median(self):
|
||
|
if self.check():
|
||
|
return
|
||
|
img = cv2.imread(self.picpath)
|
||
|
qt_img = cvImgtoQtImg(cv2.medianBlur(img, 5))
|
||
|
self.pixmapAfter = QPixmap.fromImage(qt_img)
|
||
|
self.resizeImage(self.ui.PicAfter, self.pixmapAfter)
|
||
|
|
||
|
def Roberts(self):
|
||
|
if self.check():
|
||
|
return
|
||
|
img = cv2.imread(self.picpath)
|
||
|
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
|
||
|
ret, img_binary = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)
|
||
|
kernelx_Robert = np.array([[-1, 0], [0, 1]], dtype=int)
|
||
|
kernely_Robert = np.array([[0, -1], [1, 0]], dtype=int)
|
||
|
x_Robert = cv2.filter2D(img_binary, cv2.CV_16S, kernelx_Robert)
|
||
|
y_Robert = cv2.filter2D(img_binary, cv2.CV_16S, kernely_Robert)
|
||
|
absX_Robert = cv2.convertScaleAbs(x_Robert)
|
||
|
absY_Robert = cv2.convertScaleAbs(y_Robert)
|
||
|
qt_img = cvImgtoQtImg(cv2.addWeighted(absX_Robert, 0.5, absY_Robert, 0.5, 0))
|
||
|
self.pixmapAfter = QPixmap.fromImage(qt_img)
|
||
|
self.resizeImage(self.ui.PicAfter, self.pixmapAfter)
|
||
|
|
||
|
def Prewitt(self):
|
||
|
if self.check():
|
||
|
return
|
||
|
img = cv2.imread(self.picpath)
|
||
|
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
|
||
|
ret, img_binary = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)
|
||
|
kernelx_Prewitt = np.array([[1, 1, 1], [0, 0, 0], [-1, -1, -1]], dtype=int)
|
||
|
kernely_Prewitt = np.array([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]], dtype=int)
|
||
|
x_Prewitt = cv2.filter2D(img_binary, -1, kernelx_Prewitt)
|
||
|
y_Prewitt = cv2.filter2D(img_binary, -1, kernely_Prewitt)
|
||
|
absX_Prewitt = cv2.convertScaleAbs(x_Prewitt)
|
||
|
absY_Prewitt = cv2.convertScaleAbs(y_Prewitt)
|
||
|
qt_img = cvImgtoQtImg(cv2.addWeighted(absX_Prewitt, 0.5, absY_Prewitt, 0.5, 0))
|
||
|
self.pixmapAfter = QPixmap.fromImage(qt_img)
|
||
|
self.resizeImage(self.ui.PicAfter, self.pixmapAfter)
|
||
|
|
||
|
def Sobel(self):
|
||
|
if self.check():
|
||
|
return
|
||
|
img = cv2.imread(self.picpath)
|
||
|
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
|
||
|
ret, img_binary = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)
|
||
|
x_Sobel = cv2.Sobel(img_binary, cv2.CV_16S, 1, 0)
|
||
|
y_Sobel = cv2.Sobel(img_binary, cv2.CV_16S, 0, 1)
|
||
|
absX_Sobel = cv2.convertScaleAbs(x_Sobel)
|
||
|
absY_Sobel = cv2.convertScaleAbs(y_Sobel)
|
||
|
qt_img = cvImgtoQtImg(cv2.addWeighted(absX_Sobel, 0.5, absY_Sobel, 0.5, 0))
|
||
|
self.pixmapAfter = QPixmap.fromImage(qt_img)
|
||
|
self.resizeImage(self.ui.PicAfter, self.pixmapAfter)
|
||
|
|
||
|
def Lowpass(self):
|
||
|
if self.check():
|
||
|
return
|
||
|
img = cv2.imread(self.picpath)
|
||
|
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
|
||
|
img_dft = np.fft.fft2(img)
|
||
|
dft_shift_low = np.fft.fftshift(img_dft)
|
||
|
h, w = dft_shift_low.shape[0:2]
|
||
|
h_center, w_center = int(h / 2), int(w / 2)
|
||
|
img_black = np.zeros((h, w), np.uint8)
|
||
|
img_black[h_center - int(100 / 2):h_center + int(100 / 2), w_center - int(100 / 2):w_center + int(100 / 2)] = 1
|
||
|
dft_shift_low = dft_shift_low * img_black
|
||
|
idft_shift = np.fft.ifftshift(dft_shift_low)
|
||
|
ifimg = np.fft.ifft2(idft_shift)
|
||
|
ifimg = np.abs(ifimg)
|
||
|
ifimg = np.int8(ifimg)
|
||
|
cv2.imwrite('img.jpg', ifimg)
|
||
|
self.pixmapAfter = QPixmap('img.jpg')
|
||
|
self.resizeImage(self.ui.PicAfter, self.pixmapAfter)
|
||
|
os.remove('img.jpg')
|
||
|
|
||
|
def Highpass(self):
|
||
|
if self.check():
|
||
|
return
|
||
|
img = cv2.imread(self.picpath)
|
||
|
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
|
||
|
img_dft = np.fft.fft2(img)
|
||
|
dft_shift = np.fft.fftshift(img_dft)
|
||
|
h, w = dft_shift.shape[0:2]
|
||
|
h_center, w_center = int(h / 2), int(w / 2)
|
||
|
dft_shift[h_center - int(50 / 2):h_center + int(50 / 2),
|
||
|
w_center - int(50 / 2):w_center + int(50 / 2)] = 0
|
||
|
idft_shift = np.fft.ifftshift(dft_shift)
|
||
|
img_idft = np.fft.ifft2(idft_shift)
|
||
|
img_idft = np.abs(img_idft)
|
||
|
img_idft = np.int8(img_idft)
|
||
|
cv2.imwrite('img.jpg', img_idft)
|
||
|
self.pixmapAfter = QPixmap('img.jpg')
|
||
|
self.resizeImage(self.ui.PicAfter, self.pixmapAfter)
|
||
|
os.remove('img.jpg')
|
||
|
|
||
|
def Corrosion(self):
|
||
|
if self.check():
|
||
|
return
|
||
|
img = cv2.imread(self.picpath)
|
||
|
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
|
||
|
ret, img_binary = cv2.threshold(img, 55, 255, cv2.THRESH_BINARY)
|
||
|
img_binary = np.ones(img_binary.shape, np.uint8) * 255 - img_binary
|
||
|
kernel = np.ones((3, 3), np.uint8)
|
||
|
qt_img = cvImgtoQtImg(cv2.erode(img_binary, kernel))
|
||
|
self.pixmapAfter = QPixmap.fromImage(qt_img)
|
||
|
self.resizeImage(self.ui.PicAfter, self.pixmapAfter)
|
||
|
|
||
|
def Expansion(self):
|
||
|
if self.check():
|
||
|
return
|
||
|
img = cv2.imread(self.picpath)
|
||
|
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
|
||
|
ret, img_binary = cv2.threshold(img, 55, 255, cv2.THRESH_BINARY)
|
||
|
img_binary = np.ones(img_binary.shape, np.uint8) * 255 - img_binary
|
||
|
kernel = np.ones((3, 3), np.uint8)
|
||
|
qt_img = cvImgtoQtImg(cv2.dilate(img_binary, kernel, iterations=1))
|
||
|
self.pixmapAfter = QPixmap.fromImage(qt_img)
|
||
|
self.resizeImage(self.ui.PicAfter, self.pixmapAfter)
|
||
|
|
||
|
def Open(self):
|
||
|
if self.check():
|
||
|
return
|
||
|
img = cv2.imread(self.picpath)
|
||
|
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
|
||
|
ret, img_binary = cv2.threshold(img, 55, 255, cv2.THRESH_BINARY)
|
||
|
img_binary = np.ones(img_binary.shape, np.uint8) * 255 - img_binary
|
||
|
kernel = np.ones((3, 3), np.uint8)
|
||
|
qt_img = cvImgtoQtImg(cv2.morphologyEx(img_binary, cv2.MORPH_OPEN, kernel))
|
||
|
self.pixmapAfter = QPixmap.fromImage(qt_img)
|
||
|
self.resizeImage(self.ui.PicAfter, self.pixmapAfter)
|
||
|
|
||
|
def Close(self):
|
||
|
if self.check():
|
||
|
return
|
||
|
img = cv2.imread(self.picpath)
|
||
|
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
|
||
|
ret, img_binary = cv2.threshold(img, 55, 255, cv2.THRESH_BINARY)
|
||
|
img_binary = np.ones(img_binary.shape, np.uint8) * 255 - img_binary
|
||
|
kernel = np.ones((3, 3), np.uint8)
|
||
|
qt_img = cvImgtoQtImg(cv2.morphologyEx(img_binary, cv2.MORPH_CLOSE, kernel))
|
||
|
self.pixmapAfter = QPixmap.fromImage(qt_img)
|
||
|
self.resizeImage(self.ui.PicAfter, self.pixmapAfter)
|
||
|
|
||
|
def LOG(self):
|
||
|
if self.check():
|
||
|
return
|
||
|
img = cv2.imread(self.picpath)
|
||
|
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
||
|
img_blur = cv2.GaussianBlur(img_gray, (3, 3), 1, 1)
|
||
|
LOG_result = cv2.Laplacian(img_blur, cv2.CV_16S, ksize=1)
|
||
|
qt_img = cvImgtoQtImg(cv2.convertScaleAbs(LOG_result))
|
||
|
self.pixmapAfter = QPixmap.fromImage(qt_img)
|
||
|
self.resizeImage(self.ui.PicAfter, self.pixmapAfter)
|
||
|
|
||
|
def Scharr(self):
|
||
|
if self.check():
|
||
|
return
|
||
|
img = cv2.imread(self.picpath)
|
||
|
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
||
|
Scharr_result = cv2.Scharr(img_gray, cv2.CV_16S, dx=1, dy=0)
|
||
|
qt_img = cvImgtoQtImg(cv2.convertScaleAbs(Scharr_result))
|
||
|
self.pixmapAfter = QPixmap.fromImage(qt_img)
|
||
|
self.resizeImage(self.ui.PicAfter, self.pixmapAfter)
|
||
|
|
||
|
def Canny(self):
|
||
|
if self.check():
|
||
|
return
|
||
|
img = cv2.imread(self.picpath)
|
||
|
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
||
|
img_blur_canny = cv2.GaussianBlur(img_gray, (7, 7), 1, 1)
|
||
|
qt_img = cvImgtoQtImg(cv2.Canny(img_blur_canny, 50, 150))
|
||
|
self.pixmapAfter = QPixmap.fromImage(qt_img)
|
||
|
self.resizeImage(self.ui.PicAfter, self.pixmapAfter)
|
||
|
|
||
|
def Feiyan(self):
|
||
|
if self.check():
|
||
|
return
|
||
|
print(self.picpath)
|
||
|
filename = str(self.convert_path(self.picpath))
|
||
|
print(filename)
|
||
|
_ = solve("checkpoint_epoch16.pth",filename,"out.png",num_classes=4)
|
||
|
self.pixmapAfter = QPixmap("out.png")
|
||
|
self.resizeImage(self.ui.PicAfter, self.pixmapAfter)
|
||
|
|
||
|
|
||
|
def convert_path(self,path):
|
||
|
return path.replace("/", "\\")
|