diff --git a/dctCompressImage.jpg b/dctCompressImage.jpg new file mode 100644 index 0000000..bd11afa Binary files /dev/null and b/dctCompressImage.jpg differ diff --git a/nst/1.jpg b/nst/1.jpg index 577b685..80ccaca 100644 Binary files a/nst/1.jpg and b/nst/1.jpg differ diff --git a/sub_windows/sub_window_1.py b/sub_windows/sub_window_1.py index 7bdef8d..f9379a8 100644 --- a/sub_windows/sub_window_1.py +++ b/sub_windows/sub_window_1.py @@ -22,22 +22,35 @@ class SubWindow(QMainWindow): self.ui.pushButton_reset.clicked.connect(self.reset) def open_file(self): - file_path, file_type = QFileDialog.getOpenFileName(QFileDialog(), '选择图片', '', '图像文件(*.jpg *.bmp *.png)') + file_path, file_type = QFileDialog.getOpenFileName( + QFileDialog(), "选择图片", "", "图像文件(*.jpg *.bmp *.png)" + ) self.cv_srcImage = cv2.imread(file_path) - height, width, channels = self.cv_srcImage.shape - ui_image = QImage(cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2RGB), width, height, QImage.Format_RGB888) - if width > height: - ui_image = ui_image.scaledToWidth(self.ui.label_image.width()) - else: - ui_image = ui_image.scaledToHeight(self.ui.label_image.height()) - self._update_qimage_to_label(ui_image) - self._show_image_information(1) - self._set_pushbutton_enabled() + if self.cv_srcImage is not None: + height, width, channels = self.cv_srcImage.shape + ui_image = QImage( + cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2RGB), + width, + height, + QImage.Format_RGB888, + ) + if width > height: + ui_image = ui_image.scaledToWidth(self.ui.label_image.width()) + else: + ui_image = ui_image.scaledToHeight(self.ui.label_image.height()) + self._update_qimage_to_label(ui_image) + self._show_image_information(1) + self._set_pushbutton_enabled() def gray_convert(self): gray_image = cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2GRAY) height, width = gray_image.shape - ui_image = QImage(cv2.cvtColor(gray_image, cv2.COLOR_BGR2RGB), width, height, QImage.Format_RGB888) + ui_image = QImage( + cv2.cvtColor(gray_image, cv2.COLOR_BGR2RGB), + width, + height, + QImage.Format_RGB888, + ) if width > height: ui_image = ui_image.scaledToWidth(self.ui.label_image.width()) else: @@ -48,9 +61,16 @@ class SubWindow(QMainWindow): def bin_convert(self): threshold_value = int(self.ui.spinBox_bin_threshold.value()) gray_image = cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2GRAY) - ret, bin_image = cv2.threshold(gray_image, threshold_value, 255, cv2.THRESH_BINARY) + ret, bin_image = cv2.threshold( + gray_image, threshold_value, 255, cv2.THRESH_BINARY + ) height, width = bin_image.shape - ui_image = QImage(cv2.cvtColor(bin_image, cv2.COLOR_BGR2RGB), width, height, QImage.Format_RGB888) + ui_image = QImage( + cv2.cvtColor(bin_image, cv2.COLOR_BGR2RGB), + width, + height, + QImage.Format_RGB888, + ) if width > height: ui_image = ui_image.scaledToWidth(self.ui.label_image.width()) else: @@ -60,7 +80,12 @@ class SubWindow(QMainWindow): def reset(self): height, width, channels = self.cv_srcImage.shape - ui_image = QImage(cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2RGB), width, height, QImage.Format_RGB888) + ui_image = QImage( + cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2RGB), + width, + height, + QImage.Format_RGB888, + ) if width > height: ui_image = ui_image.scaledToWidth(self.ui.label_image.width()) else: @@ -73,13 +98,13 @@ class SubWindow(QMainWindow): def _show_image_information(self, current_image_type): if current_image_type == 1: - self.ui.label_color_space_2.setText('彩色图') + self.ui.label_color_space_2.setText("彩色图") if current_image_type == 2: - self.ui.label_color_space_2.setText('灰度图') + self.ui.label_color_space_2.setText("灰度图") if current_image_type == 3: - self.ui.label_color_space_2.setText('二值图') + self.ui.label_color_space_2.setText("二值图") def _set_pushbutton_enabled(self): self.ui.pushButton_gray_convert.setEnabled(True) self.ui.pushButton_bin_convert.setEnabled(True) - self.ui.pushButton_reset.setEnabled(True) \ No newline at end of file + self.ui.pushButton_reset.setEnabled(True) diff --git a/sub_windows/sub_window_2.py b/sub_windows/sub_window_2.py index cc81031..23f14f9 100644 --- a/sub_windows/sub_window_2.py +++ b/sub_windows/sub_window_2.py @@ -24,27 +24,44 @@ class SubWindow(QMainWindow): self.ui.pushButton_screenshot.clicked.connect(self.clip_image) def open_file(self): - file_path, file_type = QFileDialog.getOpenFileName(QFileDialog(), '选择图片', '', '图像文件(*.jpg *.bmp *.png)') + file_path, file_type = QFileDialog.getOpenFileName( + QFileDialog(), "选择图片", "", "图像文件(*.jpg *.bmp *.png)" + ) self.cv_srcImage = cv2.imread(file_path) - height, width, channels = self.cv_srcImage.shape - ui_image = QImage(cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2RGB), width, height, QImage.Format_RGB888) - if width > height: - ui_image = ui_image.scaledToWidth(self.ui.label_image.width()) - else: - ui_image = ui_image.scaledToHeight(self.ui.label_image.height()) - self.zoom_factor = 1.0 - self._show_qimage_to_label(ui_image) + if self.cv_srcImage is not None: + height, width, channels = self.cv_srcImage.shape + ui_image = QImage( + cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2RGB), + width, + height, + QImage.Format_RGB888, + ) + if width > height: + ui_image = ui_image.scaledToWidth(self.ui.label_image.width()) + else: + ui_image = ui_image.scaledToHeight(self.ui.label_image.height()) + self.zoom_factor = 1.0 + self._show_qimage_to_label(ui_image) def zoom_in(self): if 10 > self.zoom_factor > 0.1: self.zoom_factor += 0.1 print(self.zoom_factor) height, width, channels = self.cv_srcImage.shape - ui_image = QImage(cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2RGB), width, height, QImage.Format_RGB888) + ui_image = QImage( + cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2RGB), + width, + height, + QImage.Format_RGB888, + ) if width > height: - ui_image = ui_image.scaledToWidth(self.ui.label_image.width() * self.zoom_factor) + ui_image = ui_image.scaledToWidth( + int(self.ui.label_image.width() * self.zoom_factor) + ) else: - ui_image = ui_image.scaledToHeight(self.ui.label_image.height() * self.zoom_factor) + ui_image = ui_image.scaledToHeight( + int(self.ui.label_image.height() * self.zoom_factor) + ) self._show_qimage_to_label(ui_image) def zoom_out(self): @@ -52,17 +69,31 @@ class SubWindow(QMainWindow): self.zoom_factor -= 0.1 print(self.zoom_factor) height, width, channels = self.cv_srcImage.shape - ui_image = QImage(cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2RGB), width, height, QImage.Format_RGB888) + ui_image = QImage( + cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2RGB), + width, + height, + QImage.Format_RGB888, + ) if width > height: - ui_image = ui_image.scaledToWidth(self.ui.label_image.width() * self.zoom_factor) + ui_image = ui_image.scaledToWidth( + int(self.ui.label_image.width() * self.zoom_factor) + ) else: - ui_image = ui_image.scaledToHeight(self.ui.label_image.height() * self.zoom_factor) + ui_image = ui_image.scaledToHeight( + int(self.ui.label_image.height() * self.zoom_factor) + ) self._show_qimage_to_label(ui_image) def zoom_reset(self): self.zoom_factor = 1.0 height, width, channels = self.cv_srcImage.shape - ui_image = QImage(cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2RGB), width, height, QImage.Format_RGB888) + ui_image = QImage( + cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2RGB), + width, + height, + QImage.Format_RGB888, + ) if width > height: ui_image = ui_image.scaledToWidth(self.ui.label_image.width()) else: @@ -74,16 +105,20 @@ class SubWindow(QMainWindow): anchor_y = int(self.ui.spinBox_anchor_y.value()) offset_x = int(self.ui.spinBox_X_offset.value()) offset_y = int(self.ui.spinBox_Y_offset.value()) - clip_image = self.cv_srcImage.copy()[anchor_y: offset_y - 1, anchor_x: offset_x - 1] - cv2.imshow('clip_image', clip_image) + clip_image = self.cv_srcImage.copy()[ + anchor_y : offset_y - 1, anchor_x : offset_x - 1 + ] + cv2.imshow("clip_image", clip_image) cv2.waitKey(0) def _show_zoom_factor(self): - self.ui.label_zoom_factor_2.setText(str(self.zoom_factor)[:3] + 'x') + self.ui.label_zoom_factor_2.setText(str(self.zoom_factor)[:3] + "x") def _update_srcImage_size(self): height, width, channels = self.cv_srcImage.shape - self.ui.label_srcImage_size.setText('原图X轴*Y轴:' + str(width) + ' x ' + str(height)) + self.ui.label_srcImage_size.setText( + "原图X轴*Y轴:" + str(width) + " x " + str(height) + ) self.ui.spinBox_anchor_x.setMaximum(width) self.ui.spinBox_anchor_y.setMaximum(height) self.ui.spinBox_X_offset.setMaximum(width) @@ -96,4 +131,4 @@ class SubWindow(QMainWindow): def _show_qimage_to_label(self, qimage): self.ui.label_image.setPixmap(QPixmap.fromImage(qimage)) self._show_zoom_factor() - self._update_srcImage_size() \ No newline at end of file + self._update_srcImage_size() diff --git a/sub_windows/sub_window_3.py b/sub_windows/sub_window_3.py index ec6d6aa..dc026cf 100644 --- a/sub_windows/sub_window_3.py +++ b/sub_windows/sub_window_3.py @@ -81,13 +81,14 @@ class SubWindow(QMainWindow): def open_file(self): file_path, file_type = QFileDialog.getOpenFileName(QFileDialog(), '选择图片', '', '*.jpg *.bmp *.png *tif') self.cv_srcImage = cv2.imread(file_path) - height, width, channels = self.cv_srcImage.shape - ui_image = QImage(cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2RGB), width, height, QImage.Format_RGB888) - if width > height: - ui_image = ui_image.scaledToWidth(self.ui.label_image_1.width()) - else: - ui_image = ui_image.scaledToHeight(self.ui.label_image_1.height()) - self.ui.label_image_1.setPixmap(QPixmap.fromImage(ui_image)) + if self.cv_srcImage is None: + height, width, channels = self.cv_srcImage.shape + ui_image = QImage(cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2RGB), width, height, QImage.Format_RGB888) + if width > height: + ui_image = ui_image.scaledToWidth(self.ui.label_image_1.width()) + else: + ui_image = ui_image.scaledToHeight(self.ui.label_image_1.height()) + self.ui.label_image_1.setPixmap(QPixmap.fromImage(ui_image)) def comboBox_noise_type_selected(self): selected = self.ui.comboBox_noise_type.currentText() diff --git a/sub_windows/sub_window_4.py b/sub_windows/sub_window_4.py index d15aefa..dae3e89 100644 --- a/sub_windows/sub_window_4.py +++ b/sub_windows/sub_window_4.py @@ -15,7 +15,7 @@ class SubWindow(QMainWindow): self.ui_init() def ui_init(self): - sharpen_type_list = ['Sobel算子', 'Laplace算子', '自定义卷积核'] + sharpen_type_list = ["Sobel算子", "Laplace算子", "自定义卷积核"] self.ui.comboBox_selector.addItems(sharpen_type_list) self.ui.comboBox_selector.activated.connect(self.comboBox_selected) self.ui.pushButton_open_file.clicked.connect(self.open_file) @@ -36,29 +36,37 @@ class SubWindow(QMainWindow): self.ui.groupBox_sobel_filter.setEnabled(False) self.ui.groupBox_laplace_filter.setEnabled(False) self.ui.groupBox_custom_filter.setEnabled(False) - elif selected == 'Sobel算子': + elif selected == "Sobel算子": self.ui.groupBox_sobel_filter.setEnabled(True) self.ui.groupBox_laplace_filter.setEnabled(False) self.ui.groupBox_custom_filter.setEnabled(False) - elif selected == 'Laplace算子': + elif selected == "Laplace算子": self.ui.groupBox_sobel_filter.setEnabled(False) self.ui.groupBox_laplace_filter.setEnabled(True) self.ui.groupBox_custom_filter.setEnabled(False) - elif selected == '自定义卷积核': + elif selected == "自定义卷积核": self.ui.groupBox_sobel_filter.setEnabled(False) self.ui.groupBox_laplace_filter.setEnabled(False) self.ui.groupBox_custom_filter.setEnabled(True) def open_file(self): - file_path, file_type = QFileDialog.getOpenFileName(QFileDialog(), '选择图片', '', '图像文件(*.jpg *.bmp *.png)') + file_path, file_type = QFileDialog.getOpenFileName( + QFileDialog(), "选择图片", "", "图像文件(*.jpg *.bmp *.png)" + ) self.cv_srcImage = cv2.imread(file_path) - height, width, channels = self.cv_srcImage.shape - ui_image = QImage(cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2RGB), width, height, QImage.Format_RGB888) - if width > height: - ui_image = ui_image.scaledToWidth(self.ui.label_image_1.width()) - else: - ui_image = ui_image.scaledToHeight(self.ui.label_image_1.height()) - self.ui.label_image_1.setPixmap(QPixmap.fromImage(ui_image)) + if self.cv_srcImage is None: + height, width, channels = self.cv_srcImage.shape + ui_image = QImage( + cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2RGB), + width, + height, + QImage.Format_RGB888, + ) + if width > height: + ui_image = ui_image.scaledToWidth(self.ui.label_image_1.width()) + else: + ui_image = ui_image.scaledToHeight(self.ui.label_image_1.height()) + self.ui.label_image_1.setPixmap(QPixmap.fromImage(ui_image)) def sobel_sharpen_filter(self): def _sobel_sharpen_filter(image, mode=0): @@ -80,6 +88,7 @@ class SubWindow(QMainWindow): y = cv2.convertScaleAbs(y) x_y = cv2.addWeighted(x, 0.5, y, 0.5, 0) return x_y + mode = 0 if self.ui.radioButton_sobel_dx.isChecked(): mode = 1 @@ -89,7 +98,12 @@ class SubWindow(QMainWindow): mode = 0 self.cv_sharpenImage = _sobel_sharpen_filter(image=self.cv_srcImage, mode=mode) height, width = self.cv_sharpenImage.shape - ui_image = QImage(cv2.cvtColor(self.cv_sharpenImage, cv2.COLOR_BGR2RGB), width, height, QImage.Format_RGB888) + ui_image = QImage( + cv2.cvtColor(self.cv_sharpenImage, cv2.COLOR_BGR2RGB), + width, + height, + QImage.Format_RGB888, + ) if width > height: ui_image = ui_image.scaledToWidth(self.ui.label_image_2.width()) else: @@ -104,10 +118,18 @@ class SubWindow(QMainWindow): copyImage = cv2.Laplacian(copyImage, ddepth=cv2.CV_16S, ksize=int(size)) copyImage = cv2.convertScaleAbs(copyImage) return copyImage + size = self.ui.spinBox_laplace_ksize.value() - self.cv_sharpenImage = _laplacian_sharpen_filter(image=self.cv_srcImage, size=size) + self.cv_sharpenImage = _laplacian_sharpen_filter( + image=self.cv_srcImage, size=size + ) height, width = self.cv_sharpenImage.shape - ui_image = QImage(cv2.cvtColor(self.cv_sharpenImage, cv2.COLOR_BGR2RGB), width, height, QImage.Format_RGB888) + ui_image = QImage( + cv2.cvtColor(self.cv_sharpenImage, cv2.COLOR_BGR2RGB), + width, + height, + QImage.Format_RGB888, + ) if width > height: ui_image = ui_image.scaledToWidth(self.ui.label_image_2.width()) else: @@ -117,21 +139,43 @@ class SubWindow(QMainWindow): def custom_filter(self): def _custom_filter(image, custom_kernel=None): if custom_kernel is None: - kernel = np.array([[0, -1.5, 0], [-1.5, 7, -1.5], [0, -1.5, 0]], np.float) + kernel = np.array( + [[0, -1.5, 0], [-1.5, 7, -1.5], [0, -1.5, 0]], np.float + ) else: - kernel = np.array([[custom_kernel[0], custom_kernel[1], custom_kernel[2]], - [custom_kernel[3], custom_kernel[4], custom_kernel[5]], - [custom_kernel[6], custom_kernel[7], custom_kernel[8]]], - np.float) + kernel = np.array( + [ + [custom_kernel[0], custom_kernel[1], custom_kernel[2]], + [custom_kernel[3], custom_kernel[4], custom_kernel[5]], + [custom_kernel[6], custom_kernel[7], custom_kernel[8]], + ], + np.float, + ) dst = cv2.filter2D(src=image, ddepth=cv2.CV_16S, kernel=kernel) dst = cv2.convertScaleAbs(dst) return dst - custom_kernel = [self.ui.doubleSpinBox_custom_filter_1.value(), self.ui.doubleSpinBox_custom_filter_2.value(), self.ui.doubleSpinBox_custom_filter_3.value(), - self.ui.doubleSpinBox_custom_filter_4.value(), self.ui.doubleSpinBox_custom_filter_5.value(), self.ui.doubleSpinBox_custom_filter_6.value(), - self.ui.doubleSpinBox_custom_filter_7.value(), self.ui.doubleSpinBox_custom_filter_8.value(), self.ui.doubleSpinBox_custom_filter_9.value()] - self.cv_sharpenImage = _custom_filter(image=self.cv_srcImage, custom_kernel=custom_kernel) + + custom_kernel = [ + self.ui.doubleSpinBox_custom_filter_1.value(), + self.ui.doubleSpinBox_custom_filter_2.value(), + self.ui.doubleSpinBox_custom_filter_3.value(), + self.ui.doubleSpinBox_custom_filter_4.value(), + self.ui.doubleSpinBox_custom_filter_5.value(), + self.ui.doubleSpinBox_custom_filter_6.value(), + self.ui.doubleSpinBox_custom_filter_7.value(), + self.ui.doubleSpinBox_custom_filter_8.value(), + self.ui.doubleSpinBox_custom_filter_9.value(), + ] + self.cv_sharpenImage = _custom_filter( + image=self.cv_srcImage, custom_kernel=custom_kernel + ) height, width = self.cv_sharpenImage.shape[0], self.cv_sharpenImage.shape[1] - ui_image = QImage(cv2.cvtColor(self.cv_sharpenImage, cv2.COLOR_BGR2RGB), width, height, QImage.Format_RGB888) + ui_image = QImage( + cv2.cvtColor(self.cv_sharpenImage, cv2.COLOR_BGR2RGB), + width, + height, + QImage.Format_RGB888, + ) if width > height: ui_image = ui_image.scaledToWidth(self.ui.label_image_2.width()) else: diff --git a/sub_windows/sub_window_5.py b/sub_windows/sub_window_5.py index 8f16e57..5f50976 100644 --- a/sub_windows/sub_window_5.py +++ b/sub_windows/sub_window_5.py @@ -23,17 +23,25 @@ class SubWindow(QMainWindow): pass def open_file(self): - file_path, file_type = QFileDialog.getOpenFileName(QFileDialog(), '选择图片', '', '图像文件(*.jpg *.bmp *.png)') + file_path, file_type = QFileDialog.getOpenFileName( + QFileDialog(), "选择图片", "", "图像文件(*.jpg *.bmp *.png)" + ) self.cv_srcImage = cv2.imread(file_path) - print(self.cv_srcImage.shape) - height, width = self.cv_srcImage.shape[0], self.cv_srcImage.shape[1] - ui_image = QImage(cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2RGB), width, height, QImage.Format_RGB888) - if width > height: - ui_image = ui_image.scaledToWidth(self.ui.label_image_1.width()) - else: - ui_image = ui_image.scaledToHeight(self.ui.label_image_1.height()) - self.ui.label_image_1.setPixmap(QPixmap.fromImage(ui_image)) - self._show_hist_image(flag=1) + if self.cv_srcImage is not None: + print(self.cv_srcImage.shape) + height, width = self.cv_srcImage.shape[0], self.cv_srcImage.shape[1] + ui_image = QImage( + cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2RGB), + width, + height, + QImage.Format_RGB888, + ) + if width > height: + ui_image = ui_image.scaledToWidth(self.ui.label_image_1.width()) + else: + ui_image = ui_image.scaledToHeight(self.ui.label_image_1.height()) + self.ui.label_image_1.setPixmap(QPixmap.fromImage(ui_image)) + self._show_hist_image(flag=1) def brightness_change(self): def _brightness_change(image, p=0): @@ -43,9 +51,17 @@ class SubWindow(QMainWindow): copyImage = np.clip(copyImage, 0, 255) copyImage = np.array(copyImage, dtype=np.uint8) return copyImage - self.cv_equImage = _brightness_change(image=self.cv_srcImage, p=self.ui.spinBox_brightness_change.value()) + + self.cv_equImage = _brightness_change( + image=self.cv_srcImage, p=self.ui.spinBox_brightness_change.value() + ) height, width = self.cv_equImage.shape[0], self.cv_equImage.shape[1] - ui_image = QImage(cv2.cvtColor(self.cv_equImage, cv2.COLOR_BGR2RGB), width, height, QImage.Format_RGB888) + ui_image = QImage( + cv2.cvtColor(self.cv_equImage, cv2.COLOR_BGR2RGB), + width, + height, + QImage.Format_RGB888, + ) if width > height: ui_image = ui_image.scaledToWidth(self.ui.label_image_2.width()) else: @@ -58,7 +74,7 @@ class SubWindow(QMainWindow): copyImage = image.copy() if copyImage.ndim == 3: ycrcbImage = cv2.cvtColor(copyImage, cv2.COLOR_BGR2YCR_CB) - channels = cv2.split(ycrcbImage) + channels = list(cv2.split(ycrcbImage)) channels[0] = cv2.equalizeHist(src=channels[0]) ycrcbImage = cv2.merge([channels[0], channels[1], channels[2]]) copyImage = cv2.cvtColor(ycrcbImage, cv2.COLOR_YCR_CB2BGR) @@ -66,9 +82,15 @@ class SubWindow(QMainWindow): elif copyImage.ndim == 2: copyImage = cv2.equalizeHist(src=copyImage) return copyImage + self.cv_equImage = _his_equ(image=self.cv_srcImage) height, width = self.cv_equImage.shape[0], self.cv_equImage.shape[1] - ui_image = QImage(cv2.cvtColor(self.cv_equImage, cv2.COLOR_BGR2RGB), width, height, QImage.Format_RGB888) + ui_image = QImage( + cv2.cvtColor(self.cv_equImage, cv2.COLOR_BGR2RGB), + width, + height, + QImage.Format_RGB888, + ) if width > height: ui_image = ui_image.scaledToWidth(self.ui.label_image_2.width()) else: @@ -80,12 +102,22 @@ class SubWindow(QMainWindow): if flag == 1: histImg = self._calc_gray_hist(image=self.cv_srcImage) width, height = histImg.shape[0], histImg.shape[1] - ui_image = QImage(cv2.cvtColor(histImg, cv2.COLOR_BGR2RGB), width, height, QImage.Format_RGB888) + ui_image = QImage( + cv2.cvtColor(histImg, cv2.COLOR_BGR2RGB), + width, + height, + QImage.Format_RGB888, + ) self.ui.label_image_3.setPixmap(QPixmap.fromImage(ui_image)) elif flag == 2: histImg = self._calc_gray_hist(image=self.cv_equImage) width, height = histImg.shape[0], histImg.shape[1] - ui_image = QImage(cv2.cvtColor(histImg, cv2.COLOR_BGR2RGB), width, height, QImage.Format_RGB888) + ui_image = QImage( + cv2.cvtColor(histImg, cv2.COLOR_BGR2RGB), + width, + height, + QImage.Format_RGB888, + ) self.ui.label_image_4.setPixmap(QPixmap.fromImage(ui_image)) def _calc_gray_hist(self, image): diff --git a/sub_windows/sub_window_6.py b/sub_windows/sub_window_6.py index 3d85944..4a5a020 100644 --- a/sub_windows/sub_window_6.py +++ b/sub_windows/sub_window_6.py @@ -22,15 +22,23 @@ class SubWindow(QMainWindow): self.ui.pushButton_save.clicked.connect(self.save) def open_file(self): - file_path, file_type = QFileDialog.getOpenFileName(QFileDialog(), '选择图片', '', '图像文件(*.jpg *.bmp *.png)') + file_path, file_type = QFileDialog.getOpenFileName( + QFileDialog(), "选择图片", "", "图像文件(*.jpg *.bmp *.png)" + ) self.cv_srcImage = cv2.imread(file_path) - height, width = self.cv_srcImage.shape[0], self.cv_srcImage.shape[1] - ui_image = QImage(cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2RGB), width, height, QImage.Format_RGB888) - if width > height: - ui_image = ui_image.scaledToWidth(self.ui.label_image_1.width()) - else: - ui_image = ui_image.scaledToHeight(self.ui.label_image_1.height()) - self.ui.label_image_1.setPixmap(QPixmap.fromImage(ui_image)) + if self.cv_srcImage is None: + height, width = self.cv_srcImage.shape[0], self.cv_srcImage.shape[1] + ui_image = QImage( + cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2RGB), + width, + height, + QImage.Format_RGB888, + ) + if width > height: + ui_image = ui_image.scaledToWidth(self.ui.label_image_1.width()) + else: + ui_image = ui_image.scaledToHeight(self.ui.label_image_1.height()) + self.ui.label_image_1.setPixmap(QPixmap.fromImage(ui_image)) def dct_process(self): if self.cv_srcImage is None: @@ -38,28 +46,35 @@ class SubWindow(QMainWindow): # 判断radio mask_flag = 6 if self.ui.radioButton_s1.isChecked(): - print('s1') + print("s1") mask_flag = 1 elif self.ui.radioButton_s3.isChecked(): - print('s3') + print("s3") mask_flag = 3 elif self.ui.radioButton_s6.isChecked(): - print('s6') + print("s6") mask_flag = 6 elif self.ui.radioButton_s10.isChecked(): - print('s10') + print("s10") mask_flag = 10 elif self.ui.radioButton_s15.isChecked(): - print('s15') + print("s15") mask_flag = 15 elif self.ui.radioButton_s21.isChecked(): - print('s21') + print("s21") mask_flag = 21 # DCT处理 - compressImage = self._dct_test(image=self.cv_srcImage, block=8, mask_flag=mask_flag) + compressImage = self._dct_test( + image=self.cv_srcImage, block=8, mask_flag=mask_flag + ) self.saveImage = compressImage.copy() height, width = compressImage.shape[0], compressImage.shape[1] - ui_image = QImage(cv2.cvtColor(compressImage, cv2.COLOR_BGR2RGB), width, height, QImage.Format_RGB888) + ui_image = QImage( + cv2.cvtColor(compressImage, cv2.COLOR_BGR2RGB), + width, + height, + QImage.Format_RGB888, + ) if width > height: ui_image = ui_image.scaledToWidth(self.ui.label_image_1.width()) else: @@ -67,54 +82,78 @@ class SubWindow(QMainWindow): self.ui.label_image_2.setPixmap(QPixmap.fromImage(ui_image)) def _dct_test(self, image, block=8, mask_flag=10): - mask_21 = np.uint8([[1, 1, 1, 1, 1, 1, 0, 0], - [1, 1, 1, 1, 1, 0, 0, 0], - [1, 1, 1, 1, 0, 0, 0, 0], - [1, 1, 1, 0, 0, 0, 0, 0], - [1, 1, 0, 0, 0, 0, 0, 0], - [1, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0]]) - mask_15 = np.uint8([[1, 1, 1, 1, 1, 0, 0, 0], - [1, 1, 1, 1, 0, 0, 0, 0], - [1, 1, 1, 0, 0, 0, 0, 0], - [1, 1, 0, 0, 0, 0, 0, 0], - [1, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0]]) - mask_10 = np.uint8([[1, 1, 1, 1, 0, 0, 0, 0], - [1, 1, 1, 0, 0, 0, 0, 0], - [1, 1, 0, 0, 0, 0, 0, 0], - [1, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0]]) - mask_6 = np.uint8([[1, 1, 1, 0, 0, 0, 0, 0], - [1, 1, 0, 0, 0, 0, 0, 0], - [1, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0]]) - mask_3 = np.uint8([[1, 1, 0, 0, 0, 0, 0, 0], - [1, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0]]) - mask_1 = np.uint8([[1, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0]]) + mask_21 = np.uint8( + [ + [1, 1, 1, 1, 1, 1, 0, 0], + [1, 1, 1, 1, 1, 0, 0, 0], + [1, 1, 1, 1, 0, 0, 0, 0], + [1, 1, 1, 0, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0, 0, 0], + [1, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + ] + ) + mask_15 = np.uint8( + [ + [1, 1, 1, 1, 1, 0, 0, 0], + [1, 1, 1, 1, 0, 0, 0, 0], + [1, 1, 1, 0, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0, 0, 0], + [1, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + ] + ) + mask_10 = np.uint8( + [ + [1, 1, 1, 1, 0, 0, 0, 0], + [1, 1, 1, 0, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0, 0, 0], + [1, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + ] + ) + mask_6 = np.uint8( + [ + [1, 1, 1, 0, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0, 0, 0], + [1, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + ] + ) + mask_3 = np.uint8( + [ + [1, 1, 0, 0, 0, 0, 0, 0], + [1, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + ] + ) + mask_1 = np.uint8( + [ + [1, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + ] + ) mask = mask_10 if mask_flag == 1: mask = mask_1 @@ -131,21 +170,34 @@ class SubWindow(QMainWindow): srcImage = cv2.cvtColor(image.copy(), cv2.COLOR_BGR2YUV) retImage = np.zeros( - ((srcImage.shape[0] // block + 1) * block, (srcImage.shape[1] // block + 1) * block, srcImage.ndim), - np.float32) + ( + (srcImage.shape[0] // block + 1) * block, + (srcImage.shape[1] // block + 1) * block, + srcImage.ndim, + ), + np.float32, + ) channels = cv2.split(srcImage) Y_channel_float = np.array(channels[0], dtype=np.float32) U_channel_float = np.array(channels[1], dtype=np.float32) V_channel_float = np.array(channels[2], dtype=np.float32) - retImage[0: Y_channel_float.shape[0], 0: Y_channel_float.shape[1], 0] = Y_channel_float - retImage[0: U_channel_float.shape[0], 0: U_channel_float.shape[1], 1] = U_channel_float - retImage[0: V_channel_float.shape[0], 0: V_channel_float.shape[1], 2] = V_channel_float + retImage[ + 0 : Y_channel_float.shape[0], 0 : Y_channel_float.shape[1], 0 + ] = Y_channel_float + retImage[ + 0 : U_channel_float.shape[0], 0 : U_channel_float.shape[1], 1 + ] = U_channel_float + retImage[ + 0 : V_channel_float.shape[0], 0 : V_channel_float.shape[1], 2 + ] = V_channel_float T = np.zeros((block, block), np.float64) T[0, :] = 1 * np.sqrt(1 / block) for i in range(1, block): for j in range(0, block): - T[i, j] = np.cos(np.pi * i * (2 * j + 1) / (2 * block)) * np.sqrt(2 / block) + T[i, j] = np.cos(np.pi * i * (2 * j + 1) / (2 * block)) * np.sqrt( + 2 / block + ) for y_offset in range(int(retImage.shape[0] / block)): for x_offset in range(int(retImage.shape[1] / block)): @@ -156,24 +208,27 @@ class SubWindow(QMainWindow): # subImg = cv2.idct(subImg) # retImage[y_offset * block: y_offset * block + block, x_offset * block: x_offset * block + block, c] = subImg # 自建的方法 - subImg = retImage[y_offset * block: y_offset * block + block, - x_offset * block: x_offset * block + block, c] + subImg = retImage[ + y_offset * block : y_offset * block + block, + x_offset * block : x_offset * block + block, + c, + ] dctImg = np.dot(np.dot(T, subImg), np.transpose(T)) * mask subImg = np.dot(np.dot(np.transpose(T), dctImg), T) - retImage[y_offset * block: y_offset * block + block, x_offset * block: x_offset * block + block, - c] = subImg + retImage[ + y_offset * block : y_offset * block + block, + x_offset * block : x_offset * block + block, + c, + ] = subImg retImage = cv2.cvtColor(np.uint8(retImage), cv2.COLOR_YUV2BGR) - retImage = retImage[0: srcImage.shape[0], 0: srcImage.shape[1]] + retImage = retImage[0 : srcImage.shape[0], 0 : srcImage.shape[1]] return retImage def save(self): if self.saveImage is None: return - jpg_image = cv2.imencode('.jpg', self.saveImage)[1] - fp = open('.././dctCompressImage.jpg', 'wb') + jpg_image = cv2.imencode(".jpg", self.saveImage)[1] + fp = open(".././dctCompressImage.jpg", "wb") fp.write(jpg_image) fp.close() - print('ok') - - - + print("ok") diff --git a/sub_windows/sub_window_7.py b/sub_windows/sub_window_7.py index f336bc7..f5fc559 100644 --- a/sub_windows/sub_window_7.py +++ b/sub_windows/sub_window_7.py @@ -19,15 +19,23 @@ class SubWindow(QMainWindow): self.ui.pushButton_canny.clicked.connect(self.canny_process) def open_file(self): - file_path, file_type = QFileDialog.getOpenFileName(QFileDialog(), '选择图片', '', '图像文件(*.jpg *.bmp *.png)') + file_path, file_type = QFileDialog.getOpenFileName( + QFileDialog(), "选择图片", "", "图像文件(*.jpg *.bmp *.png)" + ) self.cv_srcImage = cv2.imread(file_path) - height, width = self.cv_srcImage.shape[0], self.cv_srcImage.shape[1] - ui_image = QImage(cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2RGB), width, height, QImage.Format_RGB888) - if width > height: - ui_image = ui_image.scaledToWidth(self.ui.label_image_1.width()) - else: - ui_image = ui_image.scaledToHeight(self.ui.label_image_1.height()) - self.ui.label_image_1.setPixmap(QPixmap.fromImage(ui_image)) + if self.cv_srcImage is None: + height, width = self.cv_srcImage.shape[0], self.cv_srcImage.shape[1] + ui_image = QImage( + cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2RGB), + width, + height, + QImage.Format_RGB888, + ) + if width > height: + ui_image = ui_image.scaledToWidth(self.ui.label_image_1.width()) + else: + ui_image = ui_image.scaledToHeight(self.ui.label_image_1.height()) + self.ui.label_image_1.setPixmap(QPixmap.fromImage(ui_image)) def canny_process(self): if self.cv_srcImage is None: @@ -36,7 +44,12 @@ class SubWindow(QMainWindow): high_th = int(self.ui.spinBox_high_th.value()) edgeImg = cv2.Canny(self.cv_srcImage.copy(), low_th, high_th) height, width = edgeImg.shape[0], edgeImg.shape[1] - ui_image = QImage(cv2.cvtColor(edgeImg, cv2.COLOR_BGR2RGB), width, height, QImage.Format_RGB888) + ui_image = QImage( + cv2.cvtColor(edgeImg, cv2.COLOR_BGR2RGB), + width, + height, + QImage.Format_RGB888, + ) if width > height: ui_image = ui_image.scaledToWidth(self.ui.label_image_1.width()) else: diff --git a/sub_windows/sub_window_8.py b/sub_windows/sub_window_8.py index e2c113e..dfd4281 100644 --- a/sub_windows/sub_window_8.py +++ b/sub_windows/sub_window_8.py @@ -14,7 +14,7 @@ class SubWindow(QMainWindow): self.ui.setupUi(self) self.FaceDetect_ = None self.ui_init() - self.face_cascade = cv2.CascadeClassifier('./static/cascade.xml') + self.face_cascade = cv2.CascadeClassifier("./static/cascade.xml") def ui_init(self): self.ui.pushButton_video_captrue.clicked.connect(self.video_captrue) @@ -23,20 +23,29 @@ class SubWindow(QMainWindow): self.FaceDetect_.DetectOneFrame.connect(self.update_frame_to_label) def open_file(self): - file_path, file_type = QFileDialog.getOpenFileName(QFileDialog(), '选择图片', '', '图像文件(*.jpg *.bmp *.png)') + file_path, file_type = QFileDialog.getOpenFileName( + QFileDialog(), "选择图片", "", "图像文件(*.jpg *.bmp *.png)" + ) self.cv_srcImage = cv2.imread(file_path) - gray = cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2GRAY) - faces = self.face_cascade.detectMultiScale(gray, scaleFactor=1.15, minNeighbors=5, minSize=(5, 5)) - for (x, y, w, h) in faces: - cv2.rectangle(self.cv_srcImage, (x, y), (x + w, y + w), (0, 255, 0), 5) - height, width = self.cv_srcImage.shape[0], self.cv_srcImage.shape[1] - ui_image = QImage(cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2RGB), width, height, QImage.Format_RGB888) - if width > height: - ui_image = ui_image.scaledToWidth(self.ui.label_image_1.width()) - else: - ui_image = ui_image.scaledToHeight(self.ui.label_image_1.height()) - self.ui.label_image_1.setPixmap(QPixmap.fromImage(ui_image)) - + if self.cv_srcImage is None: + gray = cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2GRAY) + faces = self.face_cascade.detectMultiScale( + gray, scaleFactor=1.15, minNeighbors=5, minSize=(5, 5) + ) + for x, y, w, h in faces: + cv2.rectangle(self.cv_srcImage, (x, y), (x + w, y + w), (0, 255, 0), 5) + height, width = self.cv_srcImage.shape[0], self.cv_srcImage.shape[1] + ui_image = QImage( + cv2.cvtColor(self.cv_srcImage, cv2.COLOR_BGR2RGB), + width, + height, + QImage.Format_RGB888, + ) + if width > height: + ui_image = ui_image.scaledToWidth(self.ui.label_image_1.width()) + else: + ui_image = ui_image.scaledToHeight(self.ui.label_image_1.height()) + self.ui.label_image_1.setPixmap(QPixmap.fromImage(ui_image)) def video_captrue(self): if not self.FaceDetect_.working: @@ -44,7 +53,7 @@ class SubWindow(QMainWindow): self.FaceDetect_.start() else: self.FaceDetect_.working = None - self.ui.label_image_1.setText('停止捕捉') + self.ui.label_image_1.setText("停止捕捉") def update_frame_to_label(self, frame): self.ui.label_image_1.setPixmap(QPixmap.fromImage(frame)) @@ -58,7 +67,7 @@ class FaceDetect(QThread): self.working = None def run(self): - face_cascade = cv2.CascadeClassifier('./static/cascade.xml') + face_cascade = cv2.CascadeClassifier("./static/cascade.xml") capture = cv2.VideoCapture(0, cv2.CAP_DSHOW) codec = cv2.VideoWriter_fourcc("M", "J", "P", "G") capture.set(cv2.CAP_PROP_FOURCC, codec) @@ -67,10 +76,17 @@ class FaceDetect(QThread): (height, width, channels) = frame_color.shape frame_color = cv2.flip(frame_color, flipCode=1) # 镜像 gray = cv2.cvtColor(frame_color, cv2.COLOR_BGR2GRAY) - faces = face_cascade.detectMultiScale(gray, scaleFactor=1.15, minNeighbors=5, minSize=(5, 5)) - for (x, y, w, h) in faces: + faces = face_cascade.detectMultiScale( + gray, scaleFactor=1.15, minNeighbors=5, minSize=(5, 5) + ) + for x, y, w, h in faces: cv2.rectangle(frame_color, (x, y), (x + w, y + w), (255, 255, 0), 4) - ui_image = QImage(cv2.cvtColor(frame_color, cv2.COLOR_BGR2RGB), width, height, QImage.Format_RGB888) + ui_image = QImage( + cv2.cvtColor(frame_color, cv2.COLOR_BGR2RGB), + width, + height, + QImage.Format_RGB888, + ) self.DetectOneFrame.emit(ui_image) capture.release() - print('结束人脸检测') + print("结束人脸检测")