修复了图片打开的bug,修复了一个float转int的bug

modified:   nst/1.jpg
	modified:   sub_windows/sub_window_1.py
	modified:   sub_windows/sub_window_2.py
	modified:   sub_windows/sub_window_3.py
	modified:   sub_windows/sub_window_4.py
	modified:   sub_windows/sub_window_5.py
	modified:   sub_windows/sub_window_6.py
	modified:   sub_windows/sub_window_7.py
	modified:   sub_windows/sub_window_8.py
master
shawn-sheep 2 years ago
parent d22d223d6d
commit 8fb914f6fe

Binary file not shown.

After

Width:  |  Height:  |  Size: 90 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

After

Width:  |  Height:  |  Size: 95 KiB

@ -22,10 +22,18 @@ class SubWindow(QMainWindow):
self.ui.pushButton_reset.clicked.connect(self.reset) self.ui.pushButton_reset.clicked.connect(self.reset)
def open_file(self): def open_file(self):
file_path, file_type = QFileDialog.getOpenFileName(QFileDialog(), '选择图片', '', '图像文件(*.jpg *.bmp *.png)') file_path, file_type = QFileDialog.getOpenFileName(
QFileDialog(), "选择图片", "", "图像文件(*.jpg *.bmp *.png)"
)
self.cv_srcImage = cv2.imread(file_path) self.cv_srcImage = cv2.imread(file_path)
if self.cv_srcImage is not None:
height, width, channels = self.cv_srcImage.shape height, width, channels = self.cv_srcImage.shape
ui_image = QImage(cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2RGB), width, height, QImage.Format_RGB888) ui_image = QImage(
cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2RGB),
width,
height,
QImage.Format_RGB888,
)
if width > height: if width > height:
ui_image = ui_image.scaledToWidth(self.ui.label_image.width()) ui_image = ui_image.scaledToWidth(self.ui.label_image.width())
else: else:
@ -37,7 +45,12 @@ class SubWindow(QMainWindow):
def gray_convert(self): def gray_convert(self):
gray_image = cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2GRAY) gray_image = cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2GRAY)
height, width = gray_image.shape height, width = gray_image.shape
ui_image = QImage(cv2.cvtColor(gray_image, cv2.COLOR_BGR2RGB), width, height, QImage.Format_RGB888) ui_image = QImage(
cv2.cvtColor(gray_image, cv2.COLOR_BGR2RGB),
width,
height,
QImage.Format_RGB888,
)
if width > height: if width > height:
ui_image = ui_image.scaledToWidth(self.ui.label_image.width()) ui_image = ui_image.scaledToWidth(self.ui.label_image.width())
else: else:
@ -48,9 +61,16 @@ class SubWindow(QMainWindow):
def bin_convert(self): def bin_convert(self):
threshold_value = int(self.ui.spinBox_bin_threshold.value()) threshold_value = int(self.ui.spinBox_bin_threshold.value())
gray_image = cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2GRAY) gray_image = cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2GRAY)
ret, bin_image = cv2.threshold(gray_image, threshold_value, 255, cv2.THRESH_BINARY) ret, bin_image = cv2.threshold(
gray_image, threshold_value, 255, cv2.THRESH_BINARY
)
height, width = bin_image.shape height, width = bin_image.shape
ui_image = QImage(cv2.cvtColor(bin_image, cv2.COLOR_BGR2RGB), width, height, QImage.Format_RGB888) ui_image = QImage(
cv2.cvtColor(bin_image, cv2.COLOR_BGR2RGB),
width,
height,
QImage.Format_RGB888,
)
if width > height: if width > height:
ui_image = ui_image.scaledToWidth(self.ui.label_image.width()) ui_image = ui_image.scaledToWidth(self.ui.label_image.width())
else: else:
@ -60,7 +80,12 @@ class SubWindow(QMainWindow):
def reset(self): def reset(self):
height, width, channels = self.cv_srcImage.shape height, width, channels = self.cv_srcImage.shape
ui_image = QImage(cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2RGB), width, height, QImage.Format_RGB888) ui_image = QImage(
cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2RGB),
width,
height,
QImage.Format_RGB888,
)
if width > height: if width > height:
ui_image = ui_image.scaledToWidth(self.ui.label_image.width()) ui_image = ui_image.scaledToWidth(self.ui.label_image.width())
else: else:
@ -73,11 +98,11 @@ class SubWindow(QMainWindow):
def _show_image_information(self, current_image_type): def _show_image_information(self, current_image_type):
if current_image_type == 1: if current_image_type == 1:
self.ui.label_color_space_2.setText('彩色图') self.ui.label_color_space_2.setText("彩色图")
if current_image_type == 2: if current_image_type == 2:
self.ui.label_color_space_2.setText('灰度图') self.ui.label_color_space_2.setText("灰度图")
if current_image_type == 3: if current_image_type == 3:
self.ui.label_color_space_2.setText('二值图') self.ui.label_color_space_2.setText("二值图")
def _set_pushbutton_enabled(self): def _set_pushbutton_enabled(self):
self.ui.pushButton_gray_convert.setEnabled(True) self.ui.pushButton_gray_convert.setEnabled(True)

@ -24,10 +24,18 @@ class SubWindow(QMainWindow):
self.ui.pushButton_screenshot.clicked.connect(self.clip_image) self.ui.pushButton_screenshot.clicked.connect(self.clip_image)
def open_file(self): def open_file(self):
file_path, file_type = QFileDialog.getOpenFileName(QFileDialog(), '选择图片', '', '图像文件(*.jpg *.bmp *.png)') file_path, file_type = QFileDialog.getOpenFileName(
QFileDialog(), "选择图片", "", "图像文件(*.jpg *.bmp *.png)"
)
self.cv_srcImage = cv2.imread(file_path) self.cv_srcImage = cv2.imread(file_path)
if self.cv_srcImage is not None:
height, width, channels = self.cv_srcImage.shape height, width, channels = self.cv_srcImage.shape
ui_image = QImage(cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2RGB), width, height, QImage.Format_RGB888) ui_image = QImage(
cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2RGB),
width,
height,
QImage.Format_RGB888,
)
if width > height: if width > height:
ui_image = ui_image.scaledToWidth(self.ui.label_image.width()) ui_image = ui_image.scaledToWidth(self.ui.label_image.width())
else: else:
@ -40,11 +48,20 @@ class SubWindow(QMainWindow):
self.zoom_factor += 0.1 self.zoom_factor += 0.1
print(self.zoom_factor) print(self.zoom_factor)
height, width, channels = self.cv_srcImage.shape height, width, channels = self.cv_srcImage.shape
ui_image = QImage(cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2RGB), width, height, QImage.Format_RGB888) ui_image = QImage(
cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2RGB),
width,
height,
QImage.Format_RGB888,
)
if width > height: if width > height:
ui_image = ui_image.scaledToWidth(self.ui.label_image.width() * self.zoom_factor) ui_image = ui_image.scaledToWidth(
int(self.ui.label_image.width() * self.zoom_factor)
)
else: else:
ui_image = ui_image.scaledToHeight(self.ui.label_image.height() * self.zoom_factor) ui_image = ui_image.scaledToHeight(
int(self.ui.label_image.height() * self.zoom_factor)
)
self._show_qimage_to_label(ui_image) self._show_qimage_to_label(ui_image)
def zoom_out(self): def zoom_out(self):
@ -52,17 +69,31 @@ class SubWindow(QMainWindow):
self.zoom_factor -= 0.1 self.zoom_factor -= 0.1
print(self.zoom_factor) print(self.zoom_factor)
height, width, channels = self.cv_srcImage.shape height, width, channels = self.cv_srcImage.shape
ui_image = QImage(cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2RGB), width, height, QImage.Format_RGB888) ui_image = QImage(
cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2RGB),
width,
height,
QImage.Format_RGB888,
)
if width > height: if width > height:
ui_image = ui_image.scaledToWidth(self.ui.label_image.width() * self.zoom_factor) ui_image = ui_image.scaledToWidth(
int(self.ui.label_image.width() * self.zoom_factor)
)
else: else:
ui_image = ui_image.scaledToHeight(self.ui.label_image.height() * self.zoom_factor) ui_image = ui_image.scaledToHeight(
int(self.ui.label_image.height() * self.zoom_factor)
)
self._show_qimage_to_label(ui_image) self._show_qimage_to_label(ui_image)
def zoom_reset(self): def zoom_reset(self):
self.zoom_factor = 1.0 self.zoom_factor = 1.0
height, width, channels = self.cv_srcImage.shape height, width, channels = self.cv_srcImage.shape
ui_image = QImage(cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2RGB), width, height, QImage.Format_RGB888) ui_image = QImage(
cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2RGB),
width,
height,
QImage.Format_RGB888,
)
if width > height: if width > height:
ui_image = ui_image.scaledToWidth(self.ui.label_image.width()) ui_image = ui_image.scaledToWidth(self.ui.label_image.width())
else: else:
@ -74,16 +105,20 @@ class SubWindow(QMainWindow):
anchor_y = int(self.ui.spinBox_anchor_y.value()) anchor_y = int(self.ui.spinBox_anchor_y.value())
offset_x = int(self.ui.spinBox_X_offset.value()) offset_x = int(self.ui.spinBox_X_offset.value())
offset_y = int(self.ui.spinBox_Y_offset.value()) offset_y = int(self.ui.spinBox_Y_offset.value())
clip_image = self.cv_srcImage.copy()[anchor_y: offset_y - 1, anchor_x: offset_x - 1] clip_image = self.cv_srcImage.copy()[
cv2.imshow('clip_image', clip_image) anchor_y : offset_y - 1, anchor_x : offset_x - 1
]
cv2.imshow("clip_image", clip_image)
cv2.waitKey(0) cv2.waitKey(0)
def _show_zoom_factor(self): def _show_zoom_factor(self):
self.ui.label_zoom_factor_2.setText(str(self.zoom_factor)[:3] + 'x') self.ui.label_zoom_factor_2.setText(str(self.zoom_factor)[:3] + "x")
def _update_srcImage_size(self): def _update_srcImage_size(self):
height, width, channels = self.cv_srcImage.shape height, width, channels = self.cv_srcImage.shape
self.ui.label_srcImage_size.setText('原图X轴*Y轴' + str(width) + ' x ' + str(height)) self.ui.label_srcImage_size.setText(
"原图X轴*Y轴" + str(width) + " x " + str(height)
)
self.ui.spinBox_anchor_x.setMaximum(width) self.ui.spinBox_anchor_x.setMaximum(width)
self.ui.spinBox_anchor_y.setMaximum(height) self.ui.spinBox_anchor_y.setMaximum(height)
self.ui.spinBox_X_offset.setMaximum(width) self.ui.spinBox_X_offset.setMaximum(width)

@ -81,6 +81,7 @@ class SubWindow(QMainWindow):
def open_file(self): def open_file(self):
file_path, file_type = QFileDialog.getOpenFileName(QFileDialog(), '选择图片', '', '*.jpg *.bmp *.png *tif') file_path, file_type = QFileDialog.getOpenFileName(QFileDialog(), '选择图片', '', '*.jpg *.bmp *.png *tif')
self.cv_srcImage = cv2.imread(file_path) self.cv_srcImage = cv2.imread(file_path)
if self.cv_srcImage is None:
height, width, channels = self.cv_srcImage.shape height, width, channels = self.cv_srcImage.shape
ui_image = QImage(cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2RGB), width, height, QImage.Format_RGB888) ui_image = QImage(cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2RGB), width, height, QImage.Format_RGB888)
if width > height: if width > height:

@ -15,7 +15,7 @@ class SubWindow(QMainWindow):
self.ui_init() self.ui_init()
def ui_init(self): def ui_init(self):
sharpen_type_list = ['Sobel算子', 'Laplace算子', '自定义卷积核'] sharpen_type_list = ["Sobel算子", "Laplace算子", "自定义卷积核"]
self.ui.comboBox_selector.addItems(sharpen_type_list) self.ui.comboBox_selector.addItems(sharpen_type_list)
self.ui.comboBox_selector.activated.connect(self.comboBox_selected) self.ui.comboBox_selector.activated.connect(self.comboBox_selected)
self.ui.pushButton_open_file.clicked.connect(self.open_file) self.ui.pushButton_open_file.clicked.connect(self.open_file)
@ -36,24 +36,32 @@ class SubWindow(QMainWindow):
self.ui.groupBox_sobel_filter.setEnabled(False) self.ui.groupBox_sobel_filter.setEnabled(False)
self.ui.groupBox_laplace_filter.setEnabled(False) self.ui.groupBox_laplace_filter.setEnabled(False)
self.ui.groupBox_custom_filter.setEnabled(False) self.ui.groupBox_custom_filter.setEnabled(False)
elif selected == 'Sobel算子': elif selected == "Sobel算子":
self.ui.groupBox_sobel_filter.setEnabled(True) self.ui.groupBox_sobel_filter.setEnabled(True)
self.ui.groupBox_laplace_filter.setEnabled(False) self.ui.groupBox_laplace_filter.setEnabled(False)
self.ui.groupBox_custom_filter.setEnabled(False) self.ui.groupBox_custom_filter.setEnabled(False)
elif selected == 'Laplace算子': elif selected == "Laplace算子":
self.ui.groupBox_sobel_filter.setEnabled(False) self.ui.groupBox_sobel_filter.setEnabled(False)
self.ui.groupBox_laplace_filter.setEnabled(True) self.ui.groupBox_laplace_filter.setEnabled(True)
self.ui.groupBox_custom_filter.setEnabled(False) self.ui.groupBox_custom_filter.setEnabled(False)
elif selected == '自定义卷积核': elif selected == "自定义卷积核":
self.ui.groupBox_sobel_filter.setEnabled(False) self.ui.groupBox_sobel_filter.setEnabled(False)
self.ui.groupBox_laplace_filter.setEnabled(False) self.ui.groupBox_laplace_filter.setEnabled(False)
self.ui.groupBox_custom_filter.setEnabled(True) self.ui.groupBox_custom_filter.setEnabled(True)
def open_file(self): def open_file(self):
file_path, file_type = QFileDialog.getOpenFileName(QFileDialog(), '选择图片', '', '图像文件(*.jpg *.bmp *.png)') file_path, file_type = QFileDialog.getOpenFileName(
QFileDialog(), "选择图片", "", "图像文件(*.jpg *.bmp *.png)"
)
self.cv_srcImage = cv2.imread(file_path) self.cv_srcImage = cv2.imread(file_path)
if self.cv_srcImage is None:
height, width, channels = self.cv_srcImage.shape height, width, channels = self.cv_srcImage.shape
ui_image = QImage(cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2RGB), width, height, QImage.Format_RGB888) ui_image = QImage(
cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2RGB),
width,
height,
QImage.Format_RGB888,
)
if width > height: if width > height:
ui_image = ui_image.scaledToWidth(self.ui.label_image_1.width()) ui_image = ui_image.scaledToWidth(self.ui.label_image_1.width())
else: else:
@ -80,6 +88,7 @@ class SubWindow(QMainWindow):
y = cv2.convertScaleAbs(y) y = cv2.convertScaleAbs(y)
x_y = cv2.addWeighted(x, 0.5, y, 0.5, 0) x_y = cv2.addWeighted(x, 0.5, y, 0.5, 0)
return x_y return x_y
mode = 0 mode = 0
if self.ui.radioButton_sobel_dx.isChecked(): if self.ui.radioButton_sobel_dx.isChecked():
mode = 1 mode = 1
@ -89,7 +98,12 @@ class SubWindow(QMainWindow):
mode = 0 mode = 0
self.cv_sharpenImage = _sobel_sharpen_filter(image=self.cv_srcImage, mode=mode) self.cv_sharpenImage = _sobel_sharpen_filter(image=self.cv_srcImage, mode=mode)
height, width = self.cv_sharpenImage.shape height, width = self.cv_sharpenImage.shape
ui_image = QImage(cv2.cvtColor(self.cv_sharpenImage, cv2.COLOR_BGR2RGB), width, height, QImage.Format_RGB888) ui_image = QImage(
cv2.cvtColor(self.cv_sharpenImage, cv2.COLOR_BGR2RGB),
width,
height,
QImage.Format_RGB888,
)
if width > height: if width > height:
ui_image = ui_image.scaledToWidth(self.ui.label_image_2.width()) ui_image = ui_image.scaledToWidth(self.ui.label_image_2.width())
else: else:
@ -104,10 +118,18 @@ class SubWindow(QMainWindow):
copyImage = cv2.Laplacian(copyImage, ddepth=cv2.CV_16S, ksize=int(size)) copyImage = cv2.Laplacian(copyImage, ddepth=cv2.CV_16S, ksize=int(size))
copyImage = cv2.convertScaleAbs(copyImage) copyImage = cv2.convertScaleAbs(copyImage)
return copyImage return copyImage
size = self.ui.spinBox_laplace_ksize.value() size = self.ui.spinBox_laplace_ksize.value()
self.cv_sharpenImage = _laplacian_sharpen_filter(image=self.cv_srcImage, size=size) self.cv_sharpenImage = _laplacian_sharpen_filter(
image=self.cv_srcImage, size=size
)
height, width = self.cv_sharpenImage.shape height, width = self.cv_sharpenImage.shape
ui_image = QImage(cv2.cvtColor(self.cv_sharpenImage, cv2.COLOR_BGR2RGB), width, height, QImage.Format_RGB888) ui_image = QImage(
cv2.cvtColor(self.cv_sharpenImage, cv2.COLOR_BGR2RGB),
width,
height,
QImage.Format_RGB888,
)
if width > height: if width > height:
ui_image = ui_image.scaledToWidth(self.ui.label_image_2.width()) ui_image = ui_image.scaledToWidth(self.ui.label_image_2.width())
else: else:
@ -117,21 +139,43 @@ class SubWindow(QMainWindow):
def custom_filter(self): def custom_filter(self):
def _custom_filter(image, custom_kernel=None): def _custom_filter(image, custom_kernel=None):
if custom_kernel is None: if custom_kernel is None:
kernel = np.array([[0, -1.5, 0], [-1.5, 7, -1.5], [0, -1.5, 0]], np.float) kernel = np.array(
[[0, -1.5, 0], [-1.5, 7, -1.5], [0, -1.5, 0]], np.float
)
else: else:
kernel = np.array([[custom_kernel[0], custom_kernel[1], custom_kernel[2]], kernel = np.array(
[
[custom_kernel[0], custom_kernel[1], custom_kernel[2]],
[custom_kernel[3], custom_kernel[4], custom_kernel[5]], [custom_kernel[3], custom_kernel[4], custom_kernel[5]],
[custom_kernel[6], custom_kernel[7], custom_kernel[8]]], [custom_kernel[6], custom_kernel[7], custom_kernel[8]],
np.float) ],
np.float,
)
dst = cv2.filter2D(src=image, ddepth=cv2.CV_16S, kernel=kernel) dst = cv2.filter2D(src=image, ddepth=cv2.CV_16S, kernel=kernel)
dst = cv2.convertScaleAbs(dst) dst = cv2.convertScaleAbs(dst)
return dst return dst
custom_kernel = [self.ui.doubleSpinBox_custom_filter_1.value(), self.ui.doubleSpinBox_custom_filter_2.value(), self.ui.doubleSpinBox_custom_filter_3.value(),
self.ui.doubleSpinBox_custom_filter_4.value(), self.ui.doubleSpinBox_custom_filter_5.value(), self.ui.doubleSpinBox_custom_filter_6.value(), custom_kernel = [
self.ui.doubleSpinBox_custom_filter_7.value(), self.ui.doubleSpinBox_custom_filter_8.value(), self.ui.doubleSpinBox_custom_filter_9.value()] self.ui.doubleSpinBox_custom_filter_1.value(),
self.cv_sharpenImage = _custom_filter(image=self.cv_srcImage, custom_kernel=custom_kernel) self.ui.doubleSpinBox_custom_filter_2.value(),
self.ui.doubleSpinBox_custom_filter_3.value(),
self.ui.doubleSpinBox_custom_filter_4.value(),
self.ui.doubleSpinBox_custom_filter_5.value(),
self.ui.doubleSpinBox_custom_filter_6.value(),
self.ui.doubleSpinBox_custom_filter_7.value(),
self.ui.doubleSpinBox_custom_filter_8.value(),
self.ui.doubleSpinBox_custom_filter_9.value(),
]
self.cv_sharpenImage = _custom_filter(
image=self.cv_srcImage, custom_kernel=custom_kernel
)
height, width = self.cv_sharpenImage.shape[0], self.cv_sharpenImage.shape[1] height, width = self.cv_sharpenImage.shape[0], self.cv_sharpenImage.shape[1]
ui_image = QImage(cv2.cvtColor(self.cv_sharpenImage, cv2.COLOR_BGR2RGB), width, height, QImage.Format_RGB888) ui_image = QImage(
cv2.cvtColor(self.cv_sharpenImage, cv2.COLOR_BGR2RGB),
width,
height,
QImage.Format_RGB888,
)
if width > height: if width > height:
ui_image = ui_image.scaledToWidth(self.ui.label_image_2.width()) ui_image = ui_image.scaledToWidth(self.ui.label_image_2.width())
else: else:

@ -23,11 +23,19 @@ class SubWindow(QMainWindow):
pass pass
def open_file(self): def open_file(self):
file_path, file_type = QFileDialog.getOpenFileName(QFileDialog(), '选择图片', '', '图像文件(*.jpg *.bmp *.png)') file_path, file_type = QFileDialog.getOpenFileName(
QFileDialog(), "选择图片", "", "图像文件(*.jpg *.bmp *.png)"
)
self.cv_srcImage = cv2.imread(file_path) self.cv_srcImage = cv2.imread(file_path)
if self.cv_srcImage is not None:
print(self.cv_srcImage.shape) print(self.cv_srcImage.shape)
height, width = self.cv_srcImage.shape[0], self.cv_srcImage.shape[1] height, width = self.cv_srcImage.shape[0], self.cv_srcImage.shape[1]
ui_image = QImage(cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2RGB), width, height, QImage.Format_RGB888) ui_image = QImage(
cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2RGB),
width,
height,
QImage.Format_RGB888,
)
if width > height: if width > height:
ui_image = ui_image.scaledToWidth(self.ui.label_image_1.width()) ui_image = ui_image.scaledToWidth(self.ui.label_image_1.width())
else: else:
@ -43,9 +51,17 @@ class SubWindow(QMainWindow):
copyImage = np.clip(copyImage, 0, 255) copyImage = np.clip(copyImage, 0, 255)
copyImage = np.array(copyImage, dtype=np.uint8) copyImage = np.array(copyImage, dtype=np.uint8)
return copyImage return copyImage
self.cv_equImage = _brightness_change(image=self.cv_srcImage, p=self.ui.spinBox_brightness_change.value())
self.cv_equImage = _brightness_change(
image=self.cv_srcImage, p=self.ui.spinBox_brightness_change.value()
)
height, width = self.cv_equImage.shape[0], self.cv_equImage.shape[1] height, width = self.cv_equImage.shape[0], self.cv_equImage.shape[1]
ui_image = QImage(cv2.cvtColor(self.cv_equImage, cv2.COLOR_BGR2RGB), width, height, QImage.Format_RGB888) ui_image = QImage(
cv2.cvtColor(self.cv_equImage, cv2.COLOR_BGR2RGB),
width,
height,
QImage.Format_RGB888,
)
if width > height: if width > height:
ui_image = ui_image.scaledToWidth(self.ui.label_image_2.width()) ui_image = ui_image.scaledToWidth(self.ui.label_image_2.width())
else: else:
@ -58,7 +74,7 @@ class SubWindow(QMainWindow):
copyImage = image.copy() copyImage = image.copy()
if copyImage.ndim == 3: if copyImage.ndim == 3:
ycrcbImage = cv2.cvtColor(copyImage, cv2.COLOR_BGR2YCR_CB) ycrcbImage = cv2.cvtColor(copyImage, cv2.COLOR_BGR2YCR_CB)
channels = cv2.split(ycrcbImage) channels = list(cv2.split(ycrcbImage))
channels[0] = cv2.equalizeHist(src=channels[0]) channels[0] = cv2.equalizeHist(src=channels[0])
ycrcbImage = cv2.merge([channels[0], channels[1], channels[2]]) ycrcbImage = cv2.merge([channels[0], channels[1], channels[2]])
copyImage = cv2.cvtColor(ycrcbImage, cv2.COLOR_YCR_CB2BGR) copyImage = cv2.cvtColor(ycrcbImage, cv2.COLOR_YCR_CB2BGR)
@ -66,9 +82,15 @@ class SubWindow(QMainWindow):
elif copyImage.ndim == 2: elif copyImage.ndim == 2:
copyImage = cv2.equalizeHist(src=copyImage) copyImage = cv2.equalizeHist(src=copyImage)
return copyImage return copyImage
self.cv_equImage = _his_equ(image=self.cv_srcImage) self.cv_equImage = _his_equ(image=self.cv_srcImage)
height, width = self.cv_equImage.shape[0], self.cv_equImage.shape[1] height, width = self.cv_equImage.shape[0], self.cv_equImage.shape[1]
ui_image = QImage(cv2.cvtColor(self.cv_equImage, cv2.COLOR_BGR2RGB), width, height, QImage.Format_RGB888) ui_image = QImage(
cv2.cvtColor(self.cv_equImage, cv2.COLOR_BGR2RGB),
width,
height,
QImage.Format_RGB888,
)
if width > height: if width > height:
ui_image = ui_image.scaledToWidth(self.ui.label_image_2.width()) ui_image = ui_image.scaledToWidth(self.ui.label_image_2.width())
else: else:
@ -80,12 +102,22 @@ class SubWindow(QMainWindow):
if flag == 1: if flag == 1:
histImg = self._calc_gray_hist(image=self.cv_srcImage) histImg = self._calc_gray_hist(image=self.cv_srcImage)
width, height = histImg.shape[0], histImg.shape[1] width, height = histImg.shape[0], histImg.shape[1]
ui_image = QImage(cv2.cvtColor(histImg, cv2.COLOR_BGR2RGB), width, height, QImage.Format_RGB888) ui_image = QImage(
cv2.cvtColor(histImg, cv2.COLOR_BGR2RGB),
width,
height,
QImage.Format_RGB888,
)
self.ui.label_image_3.setPixmap(QPixmap.fromImage(ui_image)) self.ui.label_image_3.setPixmap(QPixmap.fromImage(ui_image))
elif flag == 2: elif flag == 2:
histImg = self._calc_gray_hist(image=self.cv_equImage) histImg = self._calc_gray_hist(image=self.cv_equImage)
width, height = histImg.shape[0], histImg.shape[1] width, height = histImg.shape[0], histImg.shape[1]
ui_image = QImage(cv2.cvtColor(histImg, cv2.COLOR_BGR2RGB), width, height, QImage.Format_RGB888) ui_image = QImage(
cv2.cvtColor(histImg, cv2.COLOR_BGR2RGB),
width,
height,
QImage.Format_RGB888,
)
self.ui.label_image_4.setPixmap(QPixmap.fromImage(ui_image)) self.ui.label_image_4.setPixmap(QPixmap.fromImage(ui_image))
def _calc_gray_hist(self, image): def _calc_gray_hist(self, image):

@ -22,10 +22,18 @@ class SubWindow(QMainWindow):
self.ui.pushButton_save.clicked.connect(self.save) self.ui.pushButton_save.clicked.connect(self.save)
def open_file(self): def open_file(self):
file_path, file_type = QFileDialog.getOpenFileName(QFileDialog(), '选择图片', '', '图像文件(*.jpg *.bmp *.png)') file_path, file_type = QFileDialog.getOpenFileName(
QFileDialog(), "选择图片", "", "图像文件(*.jpg *.bmp *.png)"
)
self.cv_srcImage = cv2.imread(file_path) self.cv_srcImage = cv2.imread(file_path)
if self.cv_srcImage is None:
height, width = self.cv_srcImage.shape[0], self.cv_srcImage.shape[1] height, width = self.cv_srcImage.shape[0], self.cv_srcImage.shape[1]
ui_image = QImage(cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2RGB), width, height, QImage.Format_RGB888) ui_image = QImage(
cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2RGB),
width,
height,
QImage.Format_RGB888,
)
if width > height: if width > height:
ui_image = ui_image.scaledToWidth(self.ui.label_image_1.width()) ui_image = ui_image.scaledToWidth(self.ui.label_image_1.width())
else: else:
@ -38,28 +46,35 @@ class SubWindow(QMainWindow):
# 判断radio # 判断radio
mask_flag = 6 mask_flag = 6
if self.ui.radioButton_s1.isChecked(): if self.ui.radioButton_s1.isChecked():
print('s1') print("s1")
mask_flag = 1 mask_flag = 1
elif self.ui.radioButton_s3.isChecked(): elif self.ui.radioButton_s3.isChecked():
print('s3') print("s3")
mask_flag = 3 mask_flag = 3
elif self.ui.radioButton_s6.isChecked(): elif self.ui.radioButton_s6.isChecked():
print('s6') print("s6")
mask_flag = 6 mask_flag = 6
elif self.ui.radioButton_s10.isChecked(): elif self.ui.radioButton_s10.isChecked():
print('s10') print("s10")
mask_flag = 10 mask_flag = 10
elif self.ui.radioButton_s15.isChecked(): elif self.ui.radioButton_s15.isChecked():
print('s15') print("s15")
mask_flag = 15 mask_flag = 15
elif self.ui.radioButton_s21.isChecked(): elif self.ui.radioButton_s21.isChecked():
print('s21') print("s21")
mask_flag = 21 mask_flag = 21
# DCT处理 # DCT处理
compressImage = self._dct_test(image=self.cv_srcImage, block=8, mask_flag=mask_flag) compressImage = self._dct_test(
image=self.cv_srcImage, block=8, mask_flag=mask_flag
)
self.saveImage = compressImage.copy() self.saveImage = compressImage.copy()
height, width = compressImage.shape[0], compressImage.shape[1] height, width = compressImage.shape[0], compressImage.shape[1]
ui_image = QImage(cv2.cvtColor(compressImage, cv2.COLOR_BGR2RGB), width, height, QImage.Format_RGB888) ui_image = QImage(
cv2.cvtColor(compressImage, cv2.COLOR_BGR2RGB),
width,
height,
QImage.Format_RGB888,
)
if width > height: if width > height:
ui_image = ui_image.scaledToWidth(self.ui.label_image_1.width()) ui_image = ui_image.scaledToWidth(self.ui.label_image_1.width())
else: else:
@ -67,54 +82,78 @@ class SubWindow(QMainWindow):
self.ui.label_image_2.setPixmap(QPixmap.fromImage(ui_image)) self.ui.label_image_2.setPixmap(QPixmap.fromImage(ui_image))
def _dct_test(self, image, block=8, mask_flag=10): def _dct_test(self, image, block=8, mask_flag=10):
mask_21 = np.uint8([[1, 1, 1, 1, 1, 1, 0, 0], mask_21 = np.uint8(
[
[1, 1, 1, 1, 1, 1, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0], [1, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]) [0, 0, 0, 0, 0, 0, 0, 0],
mask_15 = np.uint8([[1, 1, 1, 1, 1, 0, 0, 0], ]
)
mask_15 = np.uint8(
[
[1, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]) [0, 0, 0, 0, 0, 0, 0, 0],
mask_10 = np.uint8([[1, 1, 1, 1, 0, 0, 0, 0], ]
)
mask_10 = np.uint8(
[
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]) [0, 0, 0, 0, 0, 0, 0, 0],
mask_6 = np.uint8([[1, 1, 1, 0, 0, 0, 0, 0], ]
)
mask_6 = np.uint8(
[
[1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]) [0, 0, 0, 0, 0, 0, 0, 0],
mask_3 = np.uint8([[1, 1, 0, 0, 0, 0, 0, 0], ]
)
mask_3 = np.uint8(
[
[1, 1, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]) [0, 0, 0, 0, 0, 0, 0, 0],
mask_1 = np.uint8([[1, 0, 0, 0, 0, 0, 0, 0], ]
)
mask_1 = np.uint8(
[
[1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]) ]
)
mask = mask_10 mask = mask_10
if mask_flag == 1: if mask_flag == 1:
mask = mask_1 mask = mask_1
@ -131,21 +170,34 @@ class SubWindow(QMainWindow):
srcImage = cv2.cvtColor(image.copy(), cv2.COLOR_BGR2YUV) srcImage = cv2.cvtColor(image.copy(), cv2.COLOR_BGR2YUV)
retImage = np.zeros( retImage = np.zeros(
((srcImage.shape[0] // block + 1) * block, (srcImage.shape[1] // block + 1) * block, srcImage.ndim), (
np.float32) (srcImage.shape[0] // block + 1) * block,
(srcImage.shape[1] // block + 1) * block,
srcImage.ndim,
),
np.float32,
)
channels = cv2.split(srcImage) channels = cv2.split(srcImage)
Y_channel_float = np.array(channels[0], dtype=np.float32) Y_channel_float = np.array(channels[0], dtype=np.float32)
U_channel_float = np.array(channels[1], dtype=np.float32) U_channel_float = np.array(channels[1], dtype=np.float32)
V_channel_float = np.array(channels[2], dtype=np.float32) V_channel_float = np.array(channels[2], dtype=np.float32)
retImage[0: Y_channel_float.shape[0], 0: Y_channel_float.shape[1], 0] = Y_channel_float retImage[
retImage[0: U_channel_float.shape[0], 0: U_channel_float.shape[1], 1] = U_channel_float 0 : Y_channel_float.shape[0], 0 : Y_channel_float.shape[1], 0
retImage[0: V_channel_float.shape[0], 0: V_channel_float.shape[1], 2] = V_channel_float ] = Y_channel_float
retImage[
0 : U_channel_float.shape[0], 0 : U_channel_float.shape[1], 1
] = U_channel_float
retImage[
0 : V_channel_float.shape[0], 0 : V_channel_float.shape[1], 2
] = V_channel_float
T = np.zeros((block, block), np.float64) T = np.zeros((block, block), np.float64)
T[0, :] = 1 * np.sqrt(1 / block) T[0, :] = 1 * np.sqrt(1 / block)
for i in range(1, block): for i in range(1, block):
for j in range(0, block): for j in range(0, block):
T[i, j] = np.cos(np.pi * i * (2 * j + 1) / (2 * block)) * np.sqrt(2 / block) T[i, j] = np.cos(np.pi * i * (2 * j + 1) / (2 * block)) * np.sqrt(
2 / block
)
for y_offset in range(int(retImage.shape[0] / block)): for y_offset in range(int(retImage.shape[0] / block)):
for x_offset in range(int(retImage.shape[1] / block)): for x_offset in range(int(retImage.shape[1] / block)):
@ -156,12 +208,18 @@ class SubWindow(QMainWindow):
# subImg = cv2.idct(subImg) # subImg = cv2.idct(subImg)
# retImage[y_offset * block: y_offset * block + block, x_offset * block: x_offset * block + block, c] = subImg # retImage[y_offset * block: y_offset * block + block, x_offset * block: x_offset * block + block, c] = subImg
# 自建的方法 # 自建的方法
subImg = retImage[y_offset * block: y_offset * block + block, subImg = retImage[
x_offset * block: x_offset * block + block, c] y_offset * block : y_offset * block + block,
x_offset * block : x_offset * block + block,
c,
]
dctImg = np.dot(np.dot(T, subImg), np.transpose(T)) * mask dctImg = np.dot(np.dot(T, subImg), np.transpose(T)) * mask
subImg = np.dot(np.dot(np.transpose(T), dctImg), T) subImg = np.dot(np.dot(np.transpose(T), dctImg), T)
retImage[y_offset * block: y_offset * block + block, x_offset * block: x_offset * block + block, retImage[
c] = subImg y_offset * block : y_offset * block + block,
x_offset * block : x_offset * block + block,
c,
] = subImg
retImage = cv2.cvtColor(np.uint8(retImage), cv2.COLOR_YUV2BGR) retImage = cv2.cvtColor(np.uint8(retImage), cv2.COLOR_YUV2BGR)
retImage = retImage[0 : srcImage.shape[0], 0 : srcImage.shape[1]] retImage = retImage[0 : srcImage.shape[0], 0 : srcImage.shape[1]]
return retImage return retImage
@ -169,11 +227,8 @@ class SubWindow(QMainWindow):
def save(self): def save(self):
if self.saveImage is None: if self.saveImage is None:
return return
jpg_image = cv2.imencode('.jpg', self.saveImage)[1] jpg_image = cv2.imencode(".jpg", self.saveImage)[1]
fp = open('.././dctCompressImage.jpg', 'wb') fp = open(".././dctCompressImage.jpg", "wb")
fp.write(jpg_image) fp.write(jpg_image)
fp.close() fp.close()
print('ok') print("ok")

@ -19,10 +19,18 @@ class SubWindow(QMainWindow):
self.ui.pushButton_canny.clicked.connect(self.canny_process) self.ui.pushButton_canny.clicked.connect(self.canny_process)
def open_file(self): def open_file(self):
file_path, file_type = QFileDialog.getOpenFileName(QFileDialog(), '选择图片', '', '图像文件(*.jpg *.bmp *.png)') file_path, file_type = QFileDialog.getOpenFileName(
QFileDialog(), "选择图片", "", "图像文件(*.jpg *.bmp *.png)"
)
self.cv_srcImage = cv2.imread(file_path) self.cv_srcImage = cv2.imread(file_path)
if self.cv_srcImage is None:
height, width = self.cv_srcImage.shape[0], self.cv_srcImage.shape[1] height, width = self.cv_srcImage.shape[0], self.cv_srcImage.shape[1]
ui_image = QImage(cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2RGB), width, height, QImage.Format_RGB888) ui_image = QImage(
cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2RGB),
width,
height,
QImage.Format_RGB888,
)
if width > height: if width > height:
ui_image = ui_image.scaledToWidth(self.ui.label_image_1.width()) ui_image = ui_image.scaledToWidth(self.ui.label_image_1.width())
else: else:
@ -36,7 +44,12 @@ class SubWindow(QMainWindow):
high_th = int(self.ui.spinBox_high_th.value()) high_th = int(self.ui.spinBox_high_th.value())
edgeImg = cv2.Canny(self.cv_srcImage.copy(), low_th, high_th) edgeImg = cv2.Canny(self.cv_srcImage.copy(), low_th, high_th)
height, width = edgeImg.shape[0], edgeImg.shape[1] height, width = edgeImg.shape[0], edgeImg.shape[1]
ui_image = QImage(cv2.cvtColor(edgeImg, cv2.COLOR_BGR2RGB), width, height, QImage.Format_RGB888) ui_image = QImage(
cv2.cvtColor(edgeImg, cv2.COLOR_BGR2RGB),
width,
height,
QImage.Format_RGB888,
)
if width > height: if width > height:
ui_image = ui_image.scaledToWidth(self.ui.label_image_1.width()) ui_image = ui_image.scaledToWidth(self.ui.label_image_1.width())
else: else:

@ -14,7 +14,7 @@ class SubWindow(QMainWindow):
self.ui.setupUi(self) self.ui.setupUi(self)
self.FaceDetect_ = None self.FaceDetect_ = None
self.ui_init() self.ui_init()
self.face_cascade = cv2.CascadeClassifier('./static/cascade.xml') self.face_cascade = cv2.CascadeClassifier("./static/cascade.xml")
def ui_init(self): def ui_init(self):
self.ui.pushButton_video_captrue.clicked.connect(self.video_captrue) self.ui.pushButton_video_captrue.clicked.connect(self.video_captrue)
@ -23,28 +23,37 @@ class SubWindow(QMainWindow):
self.FaceDetect_.DetectOneFrame.connect(self.update_frame_to_label) self.FaceDetect_.DetectOneFrame.connect(self.update_frame_to_label)
def open_file(self): def open_file(self):
file_path, file_type = QFileDialog.getOpenFileName(QFileDialog(), '选择图片', '', '图像文件(*.jpg *.bmp *.png)') file_path, file_type = QFileDialog.getOpenFileName(
QFileDialog(), "选择图片", "", "图像文件(*.jpg *.bmp *.png)"
)
self.cv_srcImage = cv2.imread(file_path) self.cv_srcImage = cv2.imread(file_path)
if self.cv_srcImage is None:
gray = cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2GRAY) gray = cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2GRAY)
faces = self.face_cascade.detectMultiScale(gray, scaleFactor=1.15, minNeighbors=5, minSize=(5, 5)) faces = self.face_cascade.detectMultiScale(
for (x, y, w, h) in faces: gray, scaleFactor=1.15, minNeighbors=5, minSize=(5, 5)
)
for x, y, w, h in faces:
cv2.rectangle(self.cv_srcImage, (x, y), (x + w, y + w), (0, 255, 0), 5) cv2.rectangle(self.cv_srcImage, (x, y), (x + w, y + w), (0, 255, 0), 5)
height, width = self.cv_srcImage.shape[0], self.cv_srcImage.shape[1] height, width = self.cv_srcImage.shape[0], self.cv_srcImage.shape[1]
ui_image = QImage(cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2RGB), width, height, QImage.Format_RGB888) ui_image = QImage(
cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2RGB),
width,
height,
QImage.Format_RGB888,
)
if width > height: if width > height:
ui_image = ui_image.scaledToWidth(self.ui.label_image_1.width()) ui_image = ui_image.scaledToWidth(self.ui.label_image_1.width())
else: else:
ui_image = ui_image.scaledToHeight(self.ui.label_image_1.height()) ui_image = ui_image.scaledToHeight(self.ui.label_image_1.height())
self.ui.label_image_1.setPixmap(QPixmap.fromImage(ui_image)) self.ui.label_image_1.setPixmap(QPixmap.fromImage(ui_image))
def video_captrue(self): def video_captrue(self):
if not self.FaceDetect_.working: if not self.FaceDetect_.working:
self.FaceDetect_.working = True self.FaceDetect_.working = True
self.FaceDetect_.start() self.FaceDetect_.start()
else: else:
self.FaceDetect_.working = None self.FaceDetect_.working = None
self.ui.label_image_1.setText('停止捕捉') self.ui.label_image_1.setText("停止捕捉")
def update_frame_to_label(self, frame): def update_frame_to_label(self, frame):
self.ui.label_image_1.setPixmap(QPixmap.fromImage(frame)) self.ui.label_image_1.setPixmap(QPixmap.fromImage(frame))
@ -58,7 +67,7 @@ class FaceDetect(QThread):
self.working = None self.working = None
def run(self): def run(self):
face_cascade = cv2.CascadeClassifier('./static/cascade.xml') face_cascade = cv2.CascadeClassifier("./static/cascade.xml")
capture = cv2.VideoCapture(0, cv2.CAP_DSHOW) capture = cv2.VideoCapture(0, cv2.CAP_DSHOW)
codec = cv2.VideoWriter_fourcc("M", "J", "P", "G") codec = cv2.VideoWriter_fourcc("M", "J", "P", "G")
capture.set(cv2.CAP_PROP_FOURCC, codec) capture.set(cv2.CAP_PROP_FOURCC, codec)
@ -67,10 +76,17 @@ class FaceDetect(QThread):
(height, width, channels) = frame_color.shape (height, width, channels) = frame_color.shape
frame_color = cv2.flip(frame_color, flipCode=1) # 镜像 frame_color = cv2.flip(frame_color, flipCode=1) # 镜像
gray = cv2.cvtColor(frame_color, cv2.COLOR_BGR2GRAY) gray = cv2.cvtColor(frame_color, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.15, minNeighbors=5, minSize=(5, 5)) faces = face_cascade.detectMultiScale(
for (x, y, w, h) in faces: gray, scaleFactor=1.15, minNeighbors=5, minSize=(5, 5)
)
for x, y, w, h in faces:
cv2.rectangle(frame_color, (x, y), (x + w, y + w), (255, 255, 0), 4) cv2.rectangle(frame_color, (x, y), (x + w, y + w), (255, 255, 0), 4)
ui_image = QImage(cv2.cvtColor(frame_color, cv2.COLOR_BGR2RGB), width, height, QImage.Format_RGB888) ui_image = QImage(
cv2.cvtColor(frame_color, cv2.COLOR_BGR2RGB),
width,
height,
QImage.Format_RGB888,
)
self.DetectOneFrame.emit(ui_image) self.DetectOneFrame.emit(ui_image)
capture.release() capture.release()
print('结束人脸检测') print("结束人脸检测")

Loading…
Cancel
Save